Merge "Simplify LoadNativeLibrary()"
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 422d82f..11d0c9a 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -1140,6 +1140,12 @@
RegLocation rl_offset = info->args[0];
RegLocation rl_count = info->args[1];
RegLocation rl_data = info->args[2];
+ // No need to emit code checking whether `rl_data` is a null
+ // pointer, as callers of the native method
+ //
+ // java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
+ //
+ // all include a null check on `data` before calling that method.
CallRuntimeHelperRegLocationRegLocationRegLocation(
kQuickAllocStringFromChars, rl_offset, rl_count, rl_data, true);
RegLocation rl_return = GetReturn(kRefReg);
@@ -1357,6 +1363,7 @@
LoadValueDirectFixed(rl_start, reg_start);
}
RegStorage r_tgt = LoadHelper(kQuickIndexOf);
+ CheckEntrypointTypes<kQuickIndexOf, int32_t, void*, uint32_t, uint32_t>();
GenExplicitNullCheck(reg_ptr, info->opt_flags);
LIR* high_code_point_branch =
rl_char.is_const ? nullptr : OpCmpImmBranch(kCondGt, reg_char, 0xFFFF, nullptr);
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index a9fec30..3100b6d 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -384,7 +384,9 @@
compiler_->Init();
- CHECK_EQ(boot_image_, image_classes_.get() != nullptr);
+ if (boot_image_) {
+ CHECK(image_classes_.get() != nullptr) << "Expected image classes for boot image";
+ }
}
CompilerDriver::~CompilerDriver() {
@@ -868,12 +870,13 @@
}
bool CompilerDriver::IsImageClass(const char* descriptor) const {
- if (!IsBootImage()) {
- // NOTE: Currently only reachable from InitImageMethodVisitor for the app image case.
- return true;
- } else {
+ if (image_classes_ != nullptr) {
+ // If we have a set of image classes, use those.
return image_classes_->find(descriptor) != image_classes_->end();
}
+ // No set of image classes, assume we include all the classes.
+ // NOTE: Currently only reachable from InitImageMethodVisitor for the app image case.
+ return !IsBootImage();
}
bool CompilerDriver::IsClassToCompile(const char* descriptor) const {
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index 4785885..0037564 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -250,8 +250,8 @@
ProfileCompilationInfo info;
for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
std::string key = ProfileCompilationInfo::GetProfileDexFileKey(dex_file->GetLocation());
- profile_info_.AddData(key, dex_file->GetLocationChecksum(), 1);
- profile_info_.AddData(key, dex_file->GetLocationChecksum(), 2);
+ profile_info_.AddMethodIndex(key, dex_file->GetLocationChecksum(), 1);
+ profile_info_.AddMethodIndex(key, dex_file->GetLocationChecksum(), 2);
}
return &profile_info_;
}
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 23601c3..79a6d38 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -230,10 +230,10 @@
}
// Trim maps to reduce memory usage.
- // TODO: measure how much this increases compile time.
+ // TODO: move this to an idle phase.
{
TimingLogger::ScopedTiming t2("TrimMaps", &logger);
- runtime->GetArenaPool()->TrimMaps();
+ runtime->GetJitArenaPool()->TrimMaps();
}
total_time_ += NanoTime() - start_time;
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index a7a1c0f..f2929bc 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -533,6 +533,8 @@
first_index_bounds_check_map_(
std::less<int>(),
graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
+ dynamic_bce_standby_(
+ graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
early_exit_loop_(
std::less<uint32_t>(),
graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
@@ -553,6 +555,13 @@
}
void Finish() {
+ // Retry dynamic bce candidates on standby that are still in the graph.
+ for (HBoundsCheck* bounds_check : dynamic_bce_standby_) {
+ if (bounds_check->IsInBlock()) {
+ TryDynamicBCE(bounds_check);
+ }
+ }
+
// Preserve SSA structure which may have been broken by adding one or more
// new taken-test structures (see TransformLoopForDeoptimizationIfNeeded()).
InsertPhiNodes();
@@ -561,6 +570,7 @@
early_exit_loop_.clear();
taken_test_loop_.clear();
finite_loop_.clear();
+ dynamic_bce_standby_.clear();
}
private:
@@ -1301,7 +1311,7 @@
if (DynamicBCESeemsProfitable(loop, instruction->GetBlock()) &&
induction_range_.CanGenerateCode(
instruction, index, &needs_finite_test, &needs_taken_test) &&
- CanHandleInfiniteLoop(loop, index, needs_finite_test) &&
+ CanHandleInfiniteLoop(loop, instruction, index, needs_finite_test) &&
CanHandleLength(loop, length, needs_taken_test)) { // do this test last (may code gen)
HInstruction* lower = nullptr;
HInstruction* upper = nullptr;
@@ -1433,7 +1443,7 @@
* ensure the loop is finite.
*/
bool CanHandleInfiniteLoop(
- HLoopInformation* loop, HInstruction* index, bool needs_infinite_test) {
+ HLoopInformation* loop, HBoundsCheck* check, HInstruction* index, bool needs_infinite_test) {
if (needs_infinite_test) {
// If we already forced the loop to be finite, allow directly.
const uint32_t loop_id = loop->GetHeader()->GetBlockId();
@@ -1455,6 +1465,9 @@
}
}
}
+ // If bounds check made it this far, it is worthwhile to check later if
+ // the loop was forced finite by another candidate.
+ dynamic_bce_standby_.push_back(check);
return false;
}
return true;
@@ -1676,6 +1689,9 @@
// in a block that checks an index against that HArrayLength.
ArenaSafeMap<int, HBoundsCheck*> first_index_bounds_check_map_;
+ // Stand by list for dynamic bce.
+ ArenaVector<HBoundsCheck*> dynamic_bce_standby_;
+
// Early-exit loop bookkeeping.
ArenaSafeMap<uint32_t, bool> early_exit_loop_;
@@ -1711,21 +1727,18 @@
// that value dominated by that instruction fits in that range. Range of that
// value can be narrowed further down in the dominator tree.
BCEVisitor visitor(graph_, side_effects_, induction_analysis_);
- HBasicBlock* last_visited_block = nullptr;
for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
HBasicBlock* current = it.Current();
- if (current == last_visited_block) {
- // We may insert blocks into the reverse post order list when processing
- // a loop header. Don't process it again.
- DCHECK(current->IsLoopHeader());
- continue;
- }
if (visitor.IsAddedBlock(current)) {
// Skip added blocks. Their effects are already taken care of.
continue;
}
visitor.VisitBasicBlock(current);
- last_visited_block = current;
+ // Skip forward to the current block in case new basic blocks were inserted
+ // (which always appear earlier in reverse post order) to avoid visiting the
+ // same basic block twice.
+ for ( ; !it.Done() && it.Current() != current; it.Advance()) {
+ }
}
// Perform cleanup.
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index d550095..3e3719e 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -562,31 +562,16 @@
graph_->reverse_post_order_[++index] = otherwise;
graph_->reverse_post_order_[++index] = merge;
- // Set the loop information of the newly created blocks.
- HLoopInformation* loop_info = cursor_block->GetLoopInformation();
- if (loop_info != nullptr) {
- then->SetLoopInformation(cursor_block->GetLoopInformation());
- merge->SetLoopInformation(cursor_block->GetLoopInformation());
- otherwise->SetLoopInformation(cursor_block->GetLoopInformation());
- for (HLoopInformationOutwardIterator loop_it(*cursor_block);
- !loop_it.Done();
- loop_it.Advance()) {
- loop_it.Current()->Add(then);
- loop_it.Current()->Add(merge);
- loop_it.Current()->Add(otherwise);
- }
- // In case the original invoke location was a back edge, we need to update
- // the loop to now have the merge block as a back edge.
- if (loop_info->IsBackEdge(*original_invoke_block)) {
- loop_info->RemoveBackEdge(original_invoke_block);
- loop_info->AddBackEdge(merge);
- }
- }
- // Set the try/catch information of the newly created blocks.
- then->SetTryCatchInformation(cursor_block->GetTryCatchInformation());
- merge->SetTryCatchInformation(cursor_block->GetTryCatchInformation());
- otherwise->SetTryCatchInformation(cursor_block->GetTryCatchInformation());
+ graph_->UpdateLoopAndTryInformationOfNewBlock(
+ then, original_invoke_block, /* replace_if_back_edge */ false);
+ graph_->UpdateLoopAndTryInformationOfNewBlock(
+ otherwise, original_invoke_block, /* replace_if_back_edge */ false);
+
+ // In case the original invoke location was a back edge, we need to update
+ // the loop to now have the merge block as a back edge.
+ graph_->UpdateLoopAndTryInformationOfNewBlock(
+ merge, original_invoke_block, /* replace_if_back_edge */ true);
}
bool HInliner::TryInlinePolymorphicCallToSameTarget(HInvoke* invoke_instruction,
@@ -603,6 +588,10 @@
DCHECK(resolved_method != nullptr);
ArtMethod* actual_method = nullptr;
+ size_t method_index = invoke_instruction->IsInvokeVirtual()
+ ? invoke_instruction->AsInvokeVirtual()->GetVTableIndex()
+ : invoke_instruction->AsInvokeInterface()->GetImtIndex();
+
// Check whether we are actually calling the same method among
// the different types seen.
for (size_t i = 0; i < InlineCache::kIndividualCacheSize; ++i) {
@@ -611,13 +600,18 @@
}
ArtMethod* new_method = nullptr;
if (invoke_instruction->IsInvokeInterface()) {
- new_method = ic.GetTypeAt(i)->FindVirtualMethodForInterface(
- resolved_method, pointer_size);
+ new_method = ic.GetTypeAt(i)->GetEmbeddedImTableEntry(
+ method_index % mirror::Class::kImtSize, pointer_size);
+ if (new_method->IsRuntimeMethod()) {
+ // Bail out as soon as we see a conflict trampoline in one of the target's
+ // interface table.
+ return false;
+ }
} else {
DCHECK(invoke_instruction->IsInvokeVirtual());
- new_method = ic.GetTypeAt(i)->FindVirtualMethodForVirtual(
- resolved_method, pointer_size);
+ new_method = ic.GetTypeAt(i)->GetEmbeddedVTableEntry(method_index, pointer_size);
}
+ DCHECK(new_method != nullptr);
if (actual_method == nullptr) {
actual_method = new_method;
} else if (actual_method != new_method) {
@@ -641,10 +635,6 @@
HInstanceFieldGet* receiver_class = BuildGetReceiverClass(
class_linker, receiver, invoke_instruction->GetDexPc());
- size_t method_offset = invoke_instruction->IsInvokeVirtual()
- ? actual_method->GetVtableIndex()
- : invoke_instruction->AsInvokeInterface()->GetImtIndex();
-
Primitive::Type type = Is64BitInstructionSet(graph_->GetInstructionSet())
? Primitive::kPrimLong
: Primitive::kPrimInt;
@@ -653,7 +643,7 @@
type,
invoke_instruction->IsInvokeVirtual() ? HClassTableGet::TableKind::kVTable
: HClassTableGet::TableKind::kIMTable,
- method_offset,
+ method_index,
invoke_instruction->GetDexPc());
HConstant* constant;
@@ -1020,6 +1010,8 @@
// at runtime, we change this call as if it was a virtual call.
invoke_type = kVirtual;
}
+
+ const int32_t caller_instruction_counter = graph_->GetCurrentInstructionId();
HGraph* callee_graph = new (graph_->GetArena()) HGraph(
graph_->GetArena(),
callee_dex_file,
@@ -1029,7 +1021,7 @@
invoke_type,
graph_->IsDebuggable(),
/* osr */ false,
- graph_->GetCurrentInstructionId());
+ caller_instruction_counter);
callee_graph->SetArtMethod(resolved_method);
OptimizingCompilerStats inline_stats;
@@ -1229,7 +1221,16 @@
}
number_of_inlined_instructions_ += number_of_instructions;
+ DCHECK_EQ(caller_instruction_counter, graph_->GetCurrentInstructionId())
+ << "No instructions can be added to the outer graph while inner graph is being built";
+
+ const int32_t callee_instruction_counter = callee_graph->GetCurrentInstructionId();
+ graph_->SetCurrentInstructionId(callee_instruction_counter);
*return_replacement = callee_graph->InlineInto(graph_, invoke_instruction);
+
+ DCHECK_EQ(callee_instruction_counter, callee_graph->GetCurrentInstructionId())
+ << "No instructions can be added to the inner graph during inlining into the outer graph";
+
return true;
}
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 2ab50bb..0cec5cc 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -193,6 +193,46 @@
#undef INTRISIC_OPTIMIZATION
+//
+// Macros for use in the intrinsics code generators.
+//
+
+// Defines an unimplemented intrinsic: that is, a method call that is recognized as an
+// intrinsic to exploit e.g. no side-effects or exceptions, but otherwise not handled
+// by this architecture-specific intrinsics code generator. Eventually it is implemented
+// as a true method call.
+#define UNIMPLEMENTED_INTRINSIC(Arch, Name) \
+void IntrinsicLocationsBuilder ## Arch::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \
+} \
+void IntrinsicCodeGenerator ## Arch::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \
+}
+
+// Defines a list of unreached intrinsics: that is, method calls that are recognized as
+// an intrinsic, and then always converted into HIR instructions before they reach any
+// architecture-specific intrinsics code generator.
+#define UNREACHABLE_INTRINSIC(Arch, Name) \
+void IntrinsicLocationsBuilder ## Arch::Visit ## Name(HInvoke* invoke) { \
+ LOG(FATAL) << "Unreachable: intrinsic " << invoke->GetIntrinsic() \
+ << " should have been converted to HIR"; \
+} \
+void IntrinsicCodeGenerator ## Arch::Visit ## Name(HInvoke* invoke) { \
+ LOG(FATAL) << "Unreachable: intrinsic " << invoke->GetIntrinsic() \
+ << " should have been converted to HIR"; \
+}
+#define UNREACHABLE_INTRINSICS(Arch) \
+UNREACHABLE_INTRINSIC(Arch, FloatFloatToIntBits) \
+UNREACHABLE_INTRINSIC(Arch, DoubleDoubleToLongBits) \
+UNREACHABLE_INTRINSIC(Arch, FloatIsNaN) \
+UNREACHABLE_INTRINSIC(Arch, DoubleIsNaN) \
+UNREACHABLE_INTRINSIC(Arch, IntegerRotateLeft) \
+UNREACHABLE_INTRINSIC(Arch, LongRotateLeft) \
+UNREACHABLE_INTRINSIC(Arch, IntegerRotateRight) \
+UNREACHABLE_INTRINSIC(Arch, LongRotateRight) \
+UNREACHABLE_INTRINSIC(Arch, IntegerCompare) \
+UNREACHABLE_INTRINSIC(Arch, LongCompare) \
+UNREACHABLE_INTRINSIC(Arch, IntegerSignum) \
+UNREACHABLE_INTRINSIC(Arch, LongSignum)
+
} // namespace art
#endif // ART_COMPILER_OPTIMIZING_INTRINSICS_H_
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 4ce919e..69c9708 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -1151,6 +1151,7 @@
__ LoadFromOffset(kLoadWord, LR, TR,
QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pIndexOf).Int32Value());
+ CheckEntrypointTypes<kQuickIndexOf, int32_t, void*, uint32_t, uint32_t>();
__ blx(LR);
if (slow_path != nullptr) {
@@ -1242,6 +1243,12 @@
void IntrinsicCodeGeneratorARM::VisitStringNewStringFromChars(HInvoke* invoke) {
ArmAssembler* assembler = GetAssembler();
+ // No need to emit code checking whether `locations->InAt(2)` is a null
+ // pointer, as callers of the native method
+ //
+ // java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
+ //
+ // all include a null check on `data` before calling that method.
__ LoadFromOffset(
kLoadWord, LR, TR, QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pAllocStringFromChars).Int32Value());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1972,54 +1979,30 @@
__ Bind(&done);
}
-// Unimplemented intrinsics.
+UNIMPLEMENTED_INTRINSIC(ARM, IntegerBitCount)
+UNIMPLEMENTED_INTRINSIC(ARM, LongBitCount)
+UNIMPLEMENTED_INTRINSIC(ARM, MathMinDoubleDouble)
+UNIMPLEMENTED_INTRINSIC(ARM, MathMinFloatFloat)
+UNIMPLEMENTED_INTRINSIC(ARM, MathMaxDoubleDouble)
+UNIMPLEMENTED_INTRINSIC(ARM, MathMaxFloatFloat)
+UNIMPLEMENTED_INTRINSIC(ARM, MathMinLongLong)
+UNIMPLEMENTED_INTRINSIC(ARM, MathMaxLongLong)
+UNIMPLEMENTED_INTRINSIC(ARM, MathCeil) // Could be done by changing rounding mode, maybe?
+UNIMPLEMENTED_INTRINSIC(ARM, MathFloor) // Could be done by changing rounding mode, maybe?
+UNIMPLEMENTED_INTRINSIC(ARM, MathRint)
+UNIMPLEMENTED_INTRINSIC(ARM, MathRoundDouble) // Could be done by changing rounding mode, maybe?
+UNIMPLEMENTED_INTRINSIC(ARM, MathRoundFloat) // Could be done by changing rounding mode, maybe?
+UNIMPLEMENTED_INTRINSIC(ARM, UnsafeCASLong) // High register pressure.
+UNIMPLEMENTED_INTRINSIC(ARM, SystemArrayCopyChar)
+UNIMPLEMENTED_INTRINSIC(ARM, ReferenceGetReferent)
+UNIMPLEMENTED_INTRINSIC(ARM, FloatIsInfinite)
+UNIMPLEMENTED_INTRINSIC(ARM, DoubleIsInfinite)
+UNIMPLEMENTED_INTRINSIC(ARM, IntegerHighestOneBit)
+UNIMPLEMENTED_INTRINSIC(ARM, LongHighestOneBit)
+UNIMPLEMENTED_INTRINSIC(ARM, IntegerLowestOneBit)
+UNIMPLEMENTED_INTRINSIC(ARM, LongLowestOneBit)
-#define UNIMPLEMENTED_INTRINSIC(Name) \
-void IntrinsicLocationsBuilderARM::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \
-} \
-void IntrinsicCodeGeneratorARM::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \
-}
-
-UNIMPLEMENTED_INTRINSIC(IntegerBitCount)
-UNIMPLEMENTED_INTRINSIC(LongBitCount)
-UNIMPLEMENTED_INTRINSIC(MathMinDoubleDouble)
-UNIMPLEMENTED_INTRINSIC(MathMinFloatFloat)
-UNIMPLEMENTED_INTRINSIC(MathMaxDoubleDouble)
-UNIMPLEMENTED_INTRINSIC(MathMaxFloatFloat)
-UNIMPLEMENTED_INTRINSIC(MathMinLongLong)
-UNIMPLEMENTED_INTRINSIC(MathMaxLongLong)
-UNIMPLEMENTED_INTRINSIC(MathCeil) // Could be done by changing rounding mode, maybe?
-UNIMPLEMENTED_INTRINSIC(MathFloor) // Could be done by changing rounding mode, maybe?
-UNIMPLEMENTED_INTRINSIC(MathRint)
-UNIMPLEMENTED_INTRINSIC(MathRoundDouble) // Could be done by changing rounding mode, maybe?
-UNIMPLEMENTED_INTRINSIC(MathRoundFloat) // Could be done by changing rounding mode, maybe?
-UNIMPLEMENTED_INTRINSIC(UnsafeCASLong) // High register pressure.
-UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
-UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
-
-UNIMPLEMENTED_INTRINSIC(FloatIsInfinite)
-UNIMPLEMENTED_INTRINSIC(DoubleIsInfinite)
-
-UNIMPLEMENTED_INTRINSIC(IntegerHighestOneBit)
-UNIMPLEMENTED_INTRINSIC(LongHighestOneBit)
-UNIMPLEMENTED_INTRINSIC(IntegerLowestOneBit)
-UNIMPLEMENTED_INTRINSIC(LongLowestOneBit)
-
-// Handled as HIR instructions.
-UNIMPLEMENTED_INTRINSIC(FloatFloatToIntBits)
-UNIMPLEMENTED_INTRINSIC(DoubleDoubleToLongBits)
-UNIMPLEMENTED_INTRINSIC(FloatIsNaN)
-UNIMPLEMENTED_INTRINSIC(DoubleIsNaN)
-UNIMPLEMENTED_INTRINSIC(IntegerRotateLeft)
-UNIMPLEMENTED_INTRINSIC(LongRotateLeft)
-UNIMPLEMENTED_INTRINSIC(IntegerRotateRight)
-UNIMPLEMENTED_INTRINSIC(LongRotateRight)
-UNIMPLEMENTED_INTRINSIC(IntegerCompare)
-UNIMPLEMENTED_INTRINSIC(LongCompare)
-UNIMPLEMENTED_INTRINSIC(IntegerSignum)
-UNIMPLEMENTED_INTRINSIC(LongSignum)
-
-#undef UNIMPLEMENTED_INTRINSIC
+UNREACHABLE_INTRINSICS(ARM)
#undef __
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 4be1695..7a4a6ef 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -1301,6 +1301,7 @@
}
__ Ldr(lr, MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pIndexOf).Int32Value()));
+ CheckEntrypointTypes<kQuickIndexOf, int32_t, void*, uint32_t, uint32_t>();
__ Blr(lr);
if (slow_path != nullptr) {
@@ -1392,6 +1393,12 @@
void IntrinsicCodeGeneratorARM64::VisitStringNewStringFromChars(HInvoke* invoke) {
vixl::MacroAssembler* masm = GetVIXLAssembler();
+ // No need to emit code checking whether `locations->InAt(2)` is a null
+ // pointer, as callers of the native method
+ //
+ // java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
+ //
+ // all include a null check on `data` before calling that method.
__ Ldr(lr,
MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pAllocStringFromChars).Int32Value()));
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1665,43 +1672,19 @@
__ Bind(&done);
}
-// Unimplemented intrinsics.
+UNIMPLEMENTED_INTRINSIC(ARM64, IntegerBitCount)
+UNIMPLEMENTED_INTRINSIC(ARM64, LongBitCount)
+UNIMPLEMENTED_INTRINSIC(ARM64, SystemArrayCopyChar)
+UNIMPLEMENTED_INTRINSIC(ARM64, SystemArrayCopy)
+UNIMPLEMENTED_INTRINSIC(ARM64, ReferenceGetReferent)
+UNIMPLEMENTED_INTRINSIC(ARM64, FloatIsInfinite)
+UNIMPLEMENTED_INTRINSIC(ARM64, DoubleIsInfinite)
+UNIMPLEMENTED_INTRINSIC(ARM64, IntegerHighestOneBit)
+UNIMPLEMENTED_INTRINSIC(ARM64, LongHighestOneBit)
+UNIMPLEMENTED_INTRINSIC(ARM64, IntegerLowestOneBit)
+UNIMPLEMENTED_INTRINSIC(ARM64, LongLowestOneBit)
-#define UNIMPLEMENTED_INTRINSIC(Name) \
-void IntrinsicLocationsBuilderARM64::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \
-} \
-void IntrinsicCodeGeneratorARM64::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \
-}
-
-UNIMPLEMENTED_INTRINSIC(IntegerBitCount)
-UNIMPLEMENTED_INTRINSIC(LongBitCount)
-UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
-UNIMPLEMENTED_INTRINSIC(SystemArrayCopy)
-UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
-
-UNIMPLEMENTED_INTRINSIC(FloatIsInfinite)
-UNIMPLEMENTED_INTRINSIC(DoubleIsInfinite)
-
-UNIMPLEMENTED_INTRINSIC(IntegerHighestOneBit)
-UNIMPLEMENTED_INTRINSIC(LongHighestOneBit)
-UNIMPLEMENTED_INTRINSIC(IntegerLowestOneBit)
-UNIMPLEMENTED_INTRINSIC(LongLowestOneBit)
-
-// Handled as HIR instructions.
-UNIMPLEMENTED_INTRINSIC(FloatFloatToIntBits)
-UNIMPLEMENTED_INTRINSIC(DoubleDoubleToLongBits)
-UNIMPLEMENTED_INTRINSIC(FloatIsNaN)
-UNIMPLEMENTED_INTRINSIC(DoubleIsNaN)
-UNIMPLEMENTED_INTRINSIC(IntegerRotateLeft)
-UNIMPLEMENTED_INTRINSIC(LongRotateLeft)
-UNIMPLEMENTED_INTRINSIC(IntegerRotateRight)
-UNIMPLEMENTED_INTRINSIC(LongRotateRight)
-UNIMPLEMENTED_INTRINSIC(IntegerCompare)
-UNIMPLEMENTED_INTRINSIC(LongCompare)
-UNIMPLEMENTED_INTRINSIC(IntegerSignum)
-UNIMPLEMENTED_INTRINSIC(LongSignum)
-
-#undef UNIMPLEMENTED_INTRINSIC
+UNREACHABLE_INTRINSICS(ARM64)
#undef __
diff --git a/compiler/optimizing/intrinsics_list.h b/compiler/optimizing/intrinsics_list.h
index e1aea92..b8933e1 100644
--- a/compiler/optimizing/intrinsics_list.h
+++ b/compiler/optimizing/intrinsics_list.h
@@ -19,7 +19,7 @@
// All intrinsics supported by the optimizing compiler. Format is name, then whether it is expected
// to be a HInvokeStaticOrDirect node (compared to HInvokeVirtual), then whether it requires an
-// environment.
+// environment, may have side effects, or may throw exceptions.
#define INTRINSICS_LIST(V) \
V(DoubleDoubleToRawLongBits, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index a737d81..5a35dd5 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -607,202 +607,6 @@
GetAssembler());
}
-enum RotationDirection {
- kRotateRight,
- kRotateLeft,
-};
-
-static void GenRotate(HInvoke* invoke,
- Primitive::Type type,
- bool isR2OrNewer,
- RotationDirection direction,
- MipsAssembler* assembler) {
- DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong);
-
- LocationSummary* locations = invoke->GetLocations();
- if (invoke->InputAt(1)->IsIntConstant()) {
- int32_t shift = static_cast<int32_t>(invoke->InputAt(1)->AsIntConstant()->GetValue());
- if (type == Primitive::kPrimInt) {
- Register in = locations->InAt(0).AsRegister<Register>();
- Register out = locations->Out().AsRegister<Register>();
-
- shift &= 0x1f;
- if (direction == kRotateLeft) {
- shift = (32 - shift) & 0x1F;
- }
-
- if (isR2OrNewer) {
- if ((shift != 0) || (out != in)) {
- __ Rotr(out, in, shift);
- }
- } else {
- if (shift == 0) {
- if (out != in) {
- __ Move(out, in);
- }
- } else {
- __ Srl(AT, in, shift);
- __ Sll(out, in, 32 - shift);
- __ Or(out, out, AT);
- }
- }
- } else { // Primitive::kPrimLong
- Register in_lo = locations->InAt(0).AsRegisterPairLow<Register>();
- Register in_hi = locations->InAt(0).AsRegisterPairHigh<Register>();
- Register out_lo = locations->Out().AsRegisterPairLow<Register>();
- Register out_hi = locations->Out().AsRegisterPairHigh<Register>();
-
- shift &= 0x3f;
- if (direction == kRotateLeft) {
- shift = (64 - shift) & 0x3F;
- }
-
- if (shift == 0) {
- __ Move(out_lo, in_lo);
- __ Move(out_hi, in_hi);
- } else if (shift == 32) {
- __ Move(out_lo, in_hi);
- __ Move(out_hi, in_lo);
- } else if (shift < 32) {
- __ Srl(AT, in_lo, shift);
- __ Sll(out_lo, in_hi, 32 - shift);
- __ Or(out_lo, out_lo, AT);
- __ Srl(AT, in_hi, shift);
- __ Sll(out_hi, in_lo, 32 - shift);
- __ Or(out_hi, out_hi, AT);
- } else {
- __ Sll(AT, in_lo, 64 - shift);
- __ Srl(out_lo, in_hi, shift - 32);
- __ Or(out_lo, out_lo, AT);
- __ Sll(AT, in_hi, 64 - shift);
- __ Srl(out_hi, in_lo, shift - 32);
- __ Or(out_hi, out_hi, AT);
- }
- }
- } else { // !invoke->InputAt(1)->IsIntConstant()
- Register shamt = locations->InAt(1).AsRegister<Register>();
- if (type == Primitive::kPrimInt) {
- Register in = locations->InAt(0).AsRegister<Register>();
- Register out = locations->Out().AsRegister<Register>();
-
- if (isR2OrNewer) {
- if (direction == kRotateRight) {
- __ Rotrv(out, in, shamt);
- } else {
- // negu tmp, shamt
- __ Subu(TMP, ZERO, shamt);
- __ Rotrv(out, in, TMP);
- }
- } else {
- if (direction == kRotateRight) {
- __ Srlv(AT, in, shamt);
- __ Subu(TMP, ZERO, shamt);
- __ Sllv(out, in, TMP);
- __ Or(out, out, AT);
- } else {
- __ Sllv(AT, in, shamt);
- __ Subu(TMP, ZERO, shamt);
- __ Srlv(out, in, TMP);
- __ Or(out, out, AT);
- }
- }
- } else { // Primitive::kPrimLong
- Register in_lo = locations->InAt(0).AsRegisterPairLow<Register>();
- Register in_hi = locations->InAt(0).AsRegisterPairHigh<Register>();
- Register out_lo = locations->Out().AsRegisterPairLow<Register>();
- Register out_hi = locations->Out().AsRegisterPairHigh<Register>();
-
- MipsLabel done;
-
- if (direction == kRotateRight) {
- __ Nor(TMP, ZERO, shamt);
- __ Srlv(AT, in_lo, shamt);
- __ Sll(out_lo, in_hi, 1);
- __ Sllv(out_lo, out_lo, TMP);
- __ Or(out_lo, out_lo, AT);
- __ Srlv(AT, in_hi, shamt);
- __ Sll(out_hi, in_lo, 1);
- __ Sllv(out_hi, out_hi, TMP);
- __ Or(out_hi, out_hi, AT);
- } else {
- __ Nor(TMP, ZERO, shamt);
- __ Sllv(AT, in_lo, shamt);
- __ Srl(out_lo, in_hi, 1);
- __ Srlv(out_lo, out_lo, TMP);
- __ Or(out_lo, out_lo, AT);
- __ Sllv(AT, in_hi, shamt);
- __ Srl(out_hi, in_lo, 1);
- __ Srlv(out_hi, out_hi, TMP);
- __ Or(out_hi, out_hi, AT);
- }
-
- __ Andi(TMP, shamt, 32);
- __ Beqz(TMP, &done);
- __ Move(TMP, out_hi);
- __ Move(out_hi, out_lo);
- __ Move(out_lo, TMP);
-
- __ Bind(&done);
- }
- }
-}
-
-// int java.lang.Integer.rotateRight(int i, int distance)
-void IntrinsicLocationsBuilderMIPS::VisitIntegerRotateRight(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitIntegerRotateRight(HInvoke* invoke) {
- GenRotate(invoke, Primitive::kPrimInt, IsR2OrNewer(), kRotateRight, GetAssembler());
-}
-
-// long java.lang.Long.rotateRight(long i, int distance)
-void IntrinsicLocationsBuilderMIPS::VisitLongRotateRight(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
- locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitLongRotateRight(HInvoke* invoke) {
- GenRotate(invoke, Primitive::kPrimLong, IsR2OrNewer(), kRotateRight, GetAssembler());
-}
-
-// int java.lang.Integer.rotateLeft(int i, int distance)
-void IntrinsicLocationsBuilderMIPS::VisitIntegerRotateLeft(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitIntegerRotateLeft(HInvoke* invoke) {
- GenRotate(invoke, Primitive::kPrimInt, IsR2OrNewer(), kRotateLeft, GetAssembler());
-}
-
-// long java.lang.Long.rotateLeft(long i, int distance)
-void IntrinsicLocationsBuilderMIPS::VisitLongRotateLeft(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
- locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitLongRotateLeft(HInvoke* invoke) {
- GenRotate(invoke, Primitive::kPrimLong, IsR2OrNewer(), kRotateLeft, GetAssembler());
-}
-
// int java.lang.Integer.reverse(int)
void IntrinsicLocationsBuilderMIPS::VisitIntegerReverse(HInvoke* invoke) {
CreateIntToIntLocations(arena_, invoke);
@@ -1698,90 +1502,72 @@
__ Bind(&end);
}
-// Unimplemented intrinsics.
+UNIMPLEMENTED_INTRINSIC(MIPS, IntegerBitCount)
+UNIMPLEMENTED_INTRINSIC(MIPS, LongBitCount)
-#define UNIMPLEMENTED_INTRINSIC(Name) \
-void IntrinsicLocationsBuilderMIPS::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \
-} \
-void IntrinsicCodeGeneratorMIPS::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \
-}
+UNIMPLEMENTED_INTRINSIC(MIPS, MathCeil)
+UNIMPLEMENTED_INTRINSIC(MIPS, MathFloor)
+UNIMPLEMENTED_INTRINSIC(MIPS, MathRint)
+UNIMPLEMENTED_INTRINSIC(MIPS, MathRoundDouble)
+UNIMPLEMENTED_INTRINSIC(MIPS, MathRoundFloat)
+UNIMPLEMENTED_INTRINSIC(MIPS, ThreadCurrentThread)
+UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGet)
+UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetVolatile)
+UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetLong)
+UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetLongVolatile)
+UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetObject)
+UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetObjectVolatile)
+UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePut)
+UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePutOrdered)
+UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePutVolatile)
+UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePutObject)
+UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePutObjectOrdered)
+UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePutObjectVolatile)
+UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePutLong)
+UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePutLongOrdered)
+UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePutLongVolatile)
+UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeCASInt)
+UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeCASLong)
+UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeCASObject)
+UNIMPLEMENTED_INTRINSIC(MIPS, StringCompareTo)
+UNIMPLEMENTED_INTRINSIC(MIPS, StringIndexOf)
+UNIMPLEMENTED_INTRINSIC(MIPS, StringIndexOfAfter)
+UNIMPLEMENTED_INTRINSIC(MIPS, StringNewStringFromBytes)
+UNIMPLEMENTED_INTRINSIC(MIPS, StringNewStringFromChars)
+UNIMPLEMENTED_INTRINSIC(MIPS, StringNewStringFromString)
-UNIMPLEMENTED_INTRINSIC(IntegerBitCount)
-UNIMPLEMENTED_INTRINSIC(LongBitCount)
+UNIMPLEMENTED_INTRINSIC(MIPS, ReferenceGetReferent)
+UNIMPLEMENTED_INTRINSIC(MIPS, StringGetCharsNoCheck)
+UNIMPLEMENTED_INTRINSIC(MIPS, SystemArrayCopyChar)
+UNIMPLEMENTED_INTRINSIC(MIPS, SystemArrayCopy)
-UNIMPLEMENTED_INTRINSIC(MathCeil)
-UNIMPLEMENTED_INTRINSIC(MathFloor)
-UNIMPLEMENTED_INTRINSIC(MathRint)
-UNIMPLEMENTED_INTRINSIC(MathRoundDouble)
-UNIMPLEMENTED_INTRINSIC(MathRoundFloat)
-UNIMPLEMENTED_INTRINSIC(ThreadCurrentThread)
-UNIMPLEMENTED_INTRINSIC(UnsafeGet)
-UNIMPLEMENTED_INTRINSIC(UnsafeGetVolatile)
-UNIMPLEMENTED_INTRINSIC(UnsafeGetLong)
-UNIMPLEMENTED_INTRINSIC(UnsafeGetLongVolatile)
-UNIMPLEMENTED_INTRINSIC(UnsafeGetObject)
-UNIMPLEMENTED_INTRINSIC(UnsafeGetObjectVolatile)
-UNIMPLEMENTED_INTRINSIC(UnsafePut)
-UNIMPLEMENTED_INTRINSIC(UnsafePutOrdered)
-UNIMPLEMENTED_INTRINSIC(UnsafePutVolatile)
-UNIMPLEMENTED_INTRINSIC(UnsafePutObject)
-UNIMPLEMENTED_INTRINSIC(UnsafePutObjectOrdered)
-UNIMPLEMENTED_INTRINSIC(UnsafePutObjectVolatile)
-UNIMPLEMENTED_INTRINSIC(UnsafePutLong)
-UNIMPLEMENTED_INTRINSIC(UnsafePutLongOrdered)
-UNIMPLEMENTED_INTRINSIC(UnsafePutLongVolatile)
-UNIMPLEMENTED_INTRINSIC(UnsafeCASInt)
-UNIMPLEMENTED_INTRINSIC(UnsafeCASLong)
-UNIMPLEMENTED_INTRINSIC(UnsafeCASObject)
-UNIMPLEMENTED_INTRINSIC(StringCompareTo)
-UNIMPLEMENTED_INTRINSIC(StringIndexOf)
-UNIMPLEMENTED_INTRINSIC(StringIndexOfAfter)
-UNIMPLEMENTED_INTRINSIC(StringNewStringFromBytes)
-UNIMPLEMENTED_INTRINSIC(StringNewStringFromChars)
-UNIMPLEMENTED_INTRINSIC(StringNewStringFromString)
+UNIMPLEMENTED_INTRINSIC(MIPS, MathCos)
+UNIMPLEMENTED_INTRINSIC(MIPS, MathSin)
+UNIMPLEMENTED_INTRINSIC(MIPS, MathAcos)
+UNIMPLEMENTED_INTRINSIC(MIPS, MathAsin)
+UNIMPLEMENTED_INTRINSIC(MIPS, MathAtan)
+UNIMPLEMENTED_INTRINSIC(MIPS, MathAtan2)
+UNIMPLEMENTED_INTRINSIC(MIPS, MathCbrt)
+UNIMPLEMENTED_INTRINSIC(MIPS, MathCosh)
+UNIMPLEMENTED_INTRINSIC(MIPS, MathExp)
+UNIMPLEMENTED_INTRINSIC(MIPS, MathExpm1)
+UNIMPLEMENTED_INTRINSIC(MIPS, MathHypot)
+UNIMPLEMENTED_INTRINSIC(MIPS, MathLog)
+UNIMPLEMENTED_INTRINSIC(MIPS, MathLog10)
+UNIMPLEMENTED_INTRINSIC(MIPS, MathNextAfter)
+UNIMPLEMENTED_INTRINSIC(MIPS, MathSinh)
+UNIMPLEMENTED_INTRINSIC(MIPS, MathTan)
+UNIMPLEMENTED_INTRINSIC(MIPS, MathTanh)
-UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
-UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck)
-UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
-UNIMPLEMENTED_INTRINSIC(SystemArrayCopy)
+UNIMPLEMENTED_INTRINSIC(MIPS, FloatIsInfinite)
+UNIMPLEMENTED_INTRINSIC(MIPS, DoubleIsInfinite)
-UNIMPLEMENTED_INTRINSIC(MathCos)
-UNIMPLEMENTED_INTRINSIC(MathSin)
-UNIMPLEMENTED_INTRINSIC(MathAcos)
-UNIMPLEMENTED_INTRINSIC(MathAsin)
-UNIMPLEMENTED_INTRINSIC(MathAtan)
-UNIMPLEMENTED_INTRINSIC(MathAtan2)
-UNIMPLEMENTED_INTRINSIC(MathCbrt)
-UNIMPLEMENTED_INTRINSIC(MathCosh)
-UNIMPLEMENTED_INTRINSIC(MathExp)
-UNIMPLEMENTED_INTRINSIC(MathExpm1)
-UNIMPLEMENTED_INTRINSIC(MathHypot)
-UNIMPLEMENTED_INTRINSIC(MathLog)
-UNIMPLEMENTED_INTRINSIC(MathLog10)
-UNIMPLEMENTED_INTRINSIC(MathNextAfter)
-UNIMPLEMENTED_INTRINSIC(MathSinh)
-UNIMPLEMENTED_INTRINSIC(MathTan)
-UNIMPLEMENTED_INTRINSIC(MathTanh)
+UNIMPLEMENTED_INTRINSIC(MIPS, IntegerHighestOneBit)
+UNIMPLEMENTED_INTRINSIC(MIPS, LongHighestOneBit)
+UNIMPLEMENTED_INTRINSIC(MIPS, IntegerLowestOneBit)
+UNIMPLEMENTED_INTRINSIC(MIPS, LongLowestOneBit)
-UNIMPLEMENTED_INTRINSIC(FloatIsInfinite)
-UNIMPLEMENTED_INTRINSIC(DoubleIsInfinite)
-
-UNIMPLEMENTED_INTRINSIC(IntegerHighestOneBit)
-UNIMPLEMENTED_INTRINSIC(LongHighestOneBit)
-UNIMPLEMENTED_INTRINSIC(IntegerLowestOneBit)
-UNIMPLEMENTED_INTRINSIC(LongLowestOneBit)
-
-// Handled as HIR instructions.
-UNIMPLEMENTED_INTRINSIC(FloatFloatToIntBits)
-UNIMPLEMENTED_INTRINSIC(DoubleDoubleToLongBits)
-UNIMPLEMENTED_INTRINSIC(FloatIsNaN)
-UNIMPLEMENTED_INTRINSIC(DoubleIsNaN)
-UNIMPLEMENTED_INTRINSIC(IntegerCompare)
-UNIMPLEMENTED_INTRINSIC(LongCompare)
-UNIMPLEMENTED_INTRINSIC(IntegerSignum)
-UNIMPLEMENTED_INTRINSIC(LongSignum)
-
-#undef UNIMPLEMENTED_INTRINSIC
+UNREACHABLE_INTRINSICS(MIPS)
#undef __
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index ca2652b..45611f0 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -340,130 +340,6 @@
GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
}
-static void GenRotateRight(HInvoke* invoke,
- Primitive::Type type,
- Mips64Assembler* assembler) {
- DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong);
-
- LocationSummary* locations = invoke->GetLocations();
- GpuRegister in = locations->InAt(0).AsRegister<GpuRegister>();
- GpuRegister out = locations->Out().AsRegister<GpuRegister>();
-
- if (invoke->InputAt(1)->IsIntConstant()) {
- uint32_t shift = static_cast<uint32_t>(invoke->InputAt(1)->AsIntConstant()->GetValue());
- if (type == Primitive::kPrimInt) {
- shift &= 0x1f;
- __ Rotr(out, in, shift);
- } else {
- shift &= 0x3f;
- if (shift < 32) {
- __ Drotr(out, in, shift);
- } else {
- shift &= 0x1f;
- __ Drotr32(out, in, shift);
- }
- }
- } else {
- GpuRegister shamt = locations->InAt(1).AsRegister<GpuRegister>();
- if (type == Primitive::kPrimInt) {
- __ Rotrv(out, in, shamt);
- } else {
- __ Drotrv(out, in, shamt);
- }
- }
-}
-
-// int java.lang.Integer.rotateRight(int i, int distance)
-void IntrinsicLocationsBuilderMIPS64::VisitIntegerRotateRight(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitIntegerRotateRight(HInvoke* invoke) {
- GenRotateRight(invoke, Primitive::kPrimInt, GetAssembler());
-}
-
-// long java.lang.Long.rotateRight(long i, int distance)
-void IntrinsicLocationsBuilderMIPS64::VisitLongRotateRight(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitLongRotateRight(HInvoke* invoke) {
- GenRotateRight(invoke, Primitive::kPrimLong, GetAssembler());
-}
-
-static void GenRotateLeft(HInvoke* invoke,
- Primitive::Type type,
- Mips64Assembler* assembler) {
- DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong);
-
- LocationSummary* locations = invoke->GetLocations();
- GpuRegister in = locations->InAt(0).AsRegister<GpuRegister>();
- GpuRegister out = locations->Out().AsRegister<GpuRegister>();
-
- if (invoke->InputAt(1)->IsIntConstant()) {
- int32_t shift = -static_cast<int32_t>(invoke->InputAt(1)->AsIntConstant()->GetValue());
- if (type == Primitive::kPrimInt) {
- shift &= 0x1f;
- __ Rotr(out, in, shift);
- } else {
- shift &= 0x3f;
- if (shift < 32) {
- __ Drotr(out, in, shift);
- } else {
- shift &= 0x1f;
- __ Drotr32(out, in, shift);
- }
- }
- } else {
- GpuRegister shamt = locations->InAt(1).AsRegister<GpuRegister>();
- if (type == Primitive::kPrimInt) {
- __ Subu(TMP, ZERO, shamt);
- __ Rotrv(out, in, TMP);
- } else {
- __ Dsubu(TMP, ZERO, shamt);
- __ Drotrv(out, in, TMP);
- }
- }
-}
-
-// int java.lang.Integer.rotateLeft(int i, int distance)
-void IntrinsicLocationsBuilderMIPS64::VisitIntegerRotateLeft(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitIntegerRotateLeft(HInvoke* invoke) {
- GenRotateLeft(invoke, Primitive::kPrimInt, GetAssembler());
-}
-
-// long java.lang.Long.rotateLeft(long i, int distance)
-void IntrinsicLocationsBuilderMIPS64::VisitLongRotateLeft(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitLongRotateLeft(HInvoke* invoke) {
- GenRotateLeft(invoke, Primitive::kPrimLong, GetAssembler());
-}
-
static void GenReverse(LocationSummary* locations,
Primitive::Type type,
Mips64Assembler* assembler) {
@@ -1636,6 +1512,7 @@
TMP,
TR,
QUICK_ENTRYPOINT_OFFSET(kMips64DoublewordSize, pIndexOf).Int32Value());
+ CheckEntrypointTypes<kQuickIndexOf, int32_t, void*, uint32_t, uint32_t>();
__ Jalr(TMP);
__ Nop();
@@ -1685,7 +1562,7 @@
invoke, GetAssembler(), codegen_, GetAllocator(), /* start_at_zero */ false);
}
-// java.lang.String.String(byte[] bytes)
+// java.lang.StringFactory.newStringFromBytes(byte[] data, int high, int offset, int byteCount)
void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromBytes(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
LocationSummary::kCall,
@@ -1719,7 +1596,7 @@
__ Bind(slow_path->GetExitLabel());
}
-// java.lang.String.String(char[] value)
+// java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromChars(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
LocationSummary::kCall,
@@ -1735,6 +1612,12 @@
void IntrinsicCodeGeneratorMIPS64::VisitStringNewStringFromChars(HInvoke* invoke) {
Mips64Assembler* assembler = GetAssembler();
+ // No need to emit code checking whether `locations->InAt(2)` is a null
+ // pointer, as callers of the native method
+ //
+ // java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
+ //
+ // all include a null check on `data` before calling that method.
__ LoadFromOffset(kLoadDoubleword,
TMP,
TR,
@@ -1778,62 +1661,44 @@
__ Bind(slow_path->GetExitLabel());
}
-// Unimplemented intrinsics.
+UNIMPLEMENTED_INTRINSIC(MIPS64, IntegerBitCount)
+UNIMPLEMENTED_INTRINSIC(MIPS64, LongBitCount)
-#define UNIMPLEMENTED_INTRINSIC(Name) \
-void IntrinsicLocationsBuilderMIPS64::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \
-} \
-void IntrinsicCodeGeneratorMIPS64::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \
-}
+UNIMPLEMENTED_INTRINSIC(MIPS64, MathRoundDouble)
+UNIMPLEMENTED_INTRINSIC(MIPS64, MathRoundFloat)
-UNIMPLEMENTED_INTRINSIC(IntegerBitCount)
-UNIMPLEMENTED_INTRINSIC(LongBitCount)
+UNIMPLEMENTED_INTRINSIC(MIPS64, ReferenceGetReferent)
+UNIMPLEMENTED_INTRINSIC(MIPS64, StringGetCharsNoCheck)
+UNIMPLEMENTED_INTRINSIC(MIPS64, SystemArrayCopyChar)
+UNIMPLEMENTED_INTRINSIC(MIPS64, SystemArrayCopy)
-UNIMPLEMENTED_INTRINSIC(MathRoundDouble)
-UNIMPLEMENTED_INTRINSIC(MathRoundFloat)
+UNIMPLEMENTED_INTRINSIC(MIPS64, MathCos)
+UNIMPLEMENTED_INTRINSIC(MIPS64, MathSin)
+UNIMPLEMENTED_INTRINSIC(MIPS64, MathAcos)
+UNIMPLEMENTED_INTRINSIC(MIPS64, MathAsin)
+UNIMPLEMENTED_INTRINSIC(MIPS64, MathAtan)
+UNIMPLEMENTED_INTRINSIC(MIPS64, MathAtan2)
+UNIMPLEMENTED_INTRINSIC(MIPS64, MathCbrt)
+UNIMPLEMENTED_INTRINSIC(MIPS64, MathCosh)
+UNIMPLEMENTED_INTRINSIC(MIPS64, MathExp)
+UNIMPLEMENTED_INTRINSIC(MIPS64, MathExpm1)
+UNIMPLEMENTED_INTRINSIC(MIPS64, MathHypot)
+UNIMPLEMENTED_INTRINSIC(MIPS64, MathLog)
+UNIMPLEMENTED_INTRINSIC(MIPS64, MathLog10)
+UNIMPLEMENTED_INTRINSIC(MIPS64, MathNextAfter)
+UNIMPLEMENTED_INTRINSIC(MIPS64, MathSinh)
+UNIMPLEMENTED_INTRINSIC(MIPS64, MathTan)
+UNIMPLEMENTED_INTRINSIC(MIPS64, MathTanh)
-UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
-UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck)
-UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
-UNIMPLEMENTED_INTRINSIC(SystemArrayCopy)
+UNIMPLEMENTED_INTRINSIC(MIPS64, FloatIsInfinite)
+UNIMPLEMENTED_INTRINSIC(MIPS64, DoubleIsInfinite)
-UNIMPLEMENTED_INTRINSIC(MathCos)
-UNIMPLEMENTED_INTRINSIC(MathSin)
-UNIMPLEMENTED_INTRINSIC(MathAcos)
-UNIMPLEMENTED_INTRINSIC(MathAsin)
-UNIMPLEMENTED_INTRINSIC(MathAtan)
-UNIMPLEMENTED_INTRINSIC(MathAtan2)
-UNIMPLEMENTED_INTRINSIC(MathCbrt)
-UNIMPLEMENTED_INTRINSIC(MathCosh)
-UNIMPLEMENTED_INTRINSIC(MathExp)
-UNIMPLEMENTED_INTRINSIC(MathExpm1)
-UNIMPLEMENTED_INTRINSIC(MathHypot)
-UNIMPLEMENTED_INTRINSIC(MathLog)
-UNIMPLEMENTED_INTRINSIC(MathLog10)
-UNIMPLEMENTED_INTRINSIC(MathNextAfter)
-UNIMPLEMENTED_INTRINSIC(MathSinh)
-UNIMPLEMENTED_INTRINSIC(MathTan)
-UNIMPLEMENTED_INTRINSIC(MathTanh)
+UNIMPLEMENTED_INTRINSIC(MIPS64, IntegerHighestOneBit)
+UNIMPLEMENTED_INTRINSIC(MIPS64, LongHighestOneBit)
+UNIMPLEMENTED_INTRINSIC(MIPS64, IntegerLowestOneBit)
+UNIMPLEMENTED_INTRINSIC(MIPS64, LongLowestOneBit)
-UNIMPLEMENTED_INTRINSIC(FloatIsInfinite)
-UNIMPLEMENTED_INTRINSIC(DoubleIsInfinite)
-
-UNIMPLEMENTED_INTRINSIC(IntegerHighestOneBit)
-UNIMPLEMENTED_INTRINSIC(LongHighestOneBit)
-UNIMPLEMENTED_INTRINSIC(IntegerLowestOneBit)
-UNIMPLEMENTED_INTRINSIC(LongLowestOneBit)
-
-// Handled as HIR instructions.
-UNIMPLEMENTED_INTRINSIC(FloatFloatToIntBits)
-UNIMPLEMENTED_INTRINSIC(DoubleDoubleToLongBits)
-UNIMPLEMENTED_INTRINSIC(FloatIsNaN)
-UNIMPLEMENTED_INTRINSIC(DoubleIsNaN)
-UNIMPLEMENTED_INTRINSIC(IntegerCompare)
-UNIMPLEMENTED_INTRINSIC(LongCompare)
-UNIMPLEMENTED_INTRINSIC(IntegerSignum)
-UNIMPLEMENTED_INTRINSIC(LongSignum)
-
-#undef UNIMPLEMENTED_INTRINSIC
+UNREACHABLE_INTRINSICS(MIPS64)
#undef __
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 0df4553..9a2dc41 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -1564,6 +1564,12 @@
void IntrinsicCodeGeneratorX86::VisitStringNewStringFromChars(HInvoke* invoke) {
X86Assembler* assembler = GetAssembler();
+ // No need to emit code checking whether `locations->InAt(2)` is a null
+ // pointer, as callers of the native method
+ //
+ // java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
+ //
+ // all include a null check on `data` before calling that method.
__ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAllocStringFromChars)));
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
@@ -2621,41 +2627,17 @@
GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);
}
-// Unimplemented intrinsics.
+UNIMPLEMENTED_INTRINSIC(X86, MathRoundDouble)
+UNIMPLEMENTED_INTRINSIC(X86, ReferenceGetReferent)
+UNIMPLEMENTED_INTRINSIC(X86, SystemArrayCopy)
+UNIMPLEMENTED_INTRINSIC(X86, FloatIsInfinite)
+UNIMPLEMENTED_INTRINSIC(X86, DoubleIsInfinite)
+UNIMPLEMENTED_INTRINSIC(X86, IntegerHighestOneBit)
+UNIMPLEMENTED_INTRINSIC(X86, LongHighestOneBit)
+UNIMPLEMENTED_INTRINSIC(X86, IntegerLowestOneBit)
+UNIMPLEMENTED_INTRINSIC(X86, LongLowestOneBit)
-#define UNIMPLEMENTED_INTRINSIC(Name) \
-void IntrinsicLocationsBuilderX86::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \
-} \
-void IntrinsicCodeGeneratorX86::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \
-}
-
-UNIMPLEMENTED_INTRINSIC(MathRoundDouble)
-UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
-UNIMPLEMENTED_INTRINSIC(SystemArrayCopy)
-
-UNIMPLEMENTED_INTRINSIC(FloatIsInfinite)
-UNIMPLEMENTED_INTRINSIC(DoubleIsInfinite)
-
-UNIMPLEMENTED_INTRINSIC(IntegerHighestOneBit)
-UNIMPLEMENTED_INTRINSIC(LongHighestOneBit)
-UNIMPLEMENTED_INTRINSIC(IntegerLowestOneBit)
-UNIMPLEMENTED_INTRINSIC(LongLowestOneBit)
-
-// Handled as HIR instructions.
-UNIMPLEMENTED_INTRINSIC(FloatFloatToIntBits)
-UNIMPLEMENTED_INTRINSIC(DoubleDoubleToLongBits)
-UNIMPLEMENTED_INTRINSIC(FloatIsNaN)
-UNIMPLEMENTED_INTRINSIC(DoubleIsNaN)
-UNIMPLEMENTED_INTRINSIC(IntegerRotateLeft)
-UNIMPLEMENTED_INTRINSIC(LongRotateLeft)
-UNIMPLEMENTED_INTRINSIC(IntegerRotateRight)
-UNIMPLEMENTED_INTRINSIC(LongRotateRight)
-UNIMPLEMENTED_INTRINSIC(IntegerCompare)
-UNIMPLEMENTED_INTRINSIC(LongCompare)
-UNIMPLEMENTED_INTRINSIC(IntegerSignum)
-UNIMPLEMENTED_INTRINSIC(LongSignum)
-
-#undef UNIMPLEMENTED_INTRINSIC
+UNREACHABLE_INTRINSICS(X86)
#undef __
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 2a9e684..75204b4 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -1659,6 +1659,12 @@
void IntrinsicCodeGeneratorX86_64::VisitStringNewStringFromChars(HInvoke* invoke) {
X86_64Assembler* assembler = GetAssembler();
+ // No need to emit code checking whether `locations->InAt(2)` is a null
+ // pointer, as callers of the native method
+ //
+ // java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
+ //
+ // all include a null check on `data` before calling that method.
__ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pAllocStringFromChars),
/* no_rip */ true));
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -2705,34 +2711,11 @@
GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);
}
-// Unimplemented intrinsics.
+UNIMPLEMENTED_INTRINSIC(X86_64, ReferenceGetReferent)
+UNIMPLEMENTED_INTRINSIC(X86_64, FloatIsInfinite)
+UNIMPLEMENTED_INTRINSIC(X86_64, DoubleIsInfinite)
-#define UNIMPLEMENTED_INTRINSIC(Name) \
-void IntrinsicLocationsBuilderX86_64::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \
-} \
-void IntrinsicCodeGeneratorX86_64::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \
-}
-
-UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
-
-UNIMPLEMENTED_INTRINSIC(FloatIsInfinite)
-UNIMPLEMENTED_INTRINSIC(DoubleIsInfinite)
-
-// Handled as HIR instructions.
-UNIMPLEMENTED_INTRINSIC(FloatFloatToIntBits)
-UNIMPLEMENTED_INTRINSIC(DoubleDoubleToLongBits)
-UNIMPLEMENTED_INTRINSIC(FloatIsNaN)
-UNIMPLEMENTED_INTRINSIC(DoubleIsNaN)
-UNIMPLEMENTED_INTRINSIC(IntegerRotateLeft)
-UNIMPLEMENTED_INTRINSIC(LongRotateLeft)
-UNIMPLEMENTED_INTRINSIC(IntegerRotateRight)
-UNIMPLEMENTED_INTRINSIC(LongRotateRight)
-UNIMPLEMENTED_INTRINSIC(IntegerCompare)
-UNIMPLEMENTED_INTRINSIC(LongCompare)
-UNIMPLEMENTED_INTRINSIC(IntegerSignum)
-UNIMPLEMENTED_INTRINSIC(LongSignum)
-
-#undef UNIMPLEMENTED_INTRINSIC
+UNREACHABLE_INTRINSICS(X86_64)
#undef __
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 27a5b97..77ded29 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -127,6 +127,9 @@
// Remove the block from the list of blocks, so that further analyses
// never see it.
blocks_[i] = nullptr;
+ if (block->IsExitBlock()) {
+ SetExitBlock(nullptr);
+ }
}
}
}
@@ -1870,13 +1873,49 @@
DCHECK(block->GetPhis().IsEmpty());
if (block->IsExitBlock()) {
- exit_block_ = nullptr;
+ SetExitBlock(nullptr);
}
RemoveElement(reverse_post_order_, block);
blocks_[block->GetBlockId()] = nullptr;
}
+void HGraph::UpdateLoopAndTryInformationOfNewBlock(HBasicBlock* block,
+ HBasicBlock* reference,
+ bool replace_if_back_edge) {
+ if (block->IsLoopHeader()) {
+ // Clear the information of which blocks are contained in that loop. Since the
+ // information is stored as a bit vector based on block ids, we have to update
+ // it, as those block ids were specific to the callee graph and we are now adding
+ // these blocks to the caller graph.
+ block->GetLoopInformation()->ClearAllBlocks();
+ }
+
+ // If not already in a loop, update the loop information.
+ if (!block->IsInLoop()) {
+ block->SetLoopInformation(reference->GetLoopInformation());
+ }
+
+ // If the block is in a loop, update all its outward loops.
+ HLoopInformation* loop_info = block->GetLoopInformation();
+ if (loop_info != nullptr) {
+ for (HLoopInformationOutwardIterator loop_it(*block);
+ !loop_it.Done();
+ loop_it.Advance()) {
+ loop_it.Current()->Add(block);
+ }
+ if (replace_if_back_edge && loop_info->IsBackEdge(*reference)) {
+ loop_info->ReplaceBackEdge(reference, block);
+ }
+ }
+
+ // Copy TryCatchInformation if `reference` is a try block, not if it is a catch block.
+ TryCatchInformation* try_catch_info = reference->IsTryBlock()
+ ? reference->GetTryCatchInformation()
+ : nullptr;
+ block->SetTryCatchInformation(try_catch_info);
+}
+
HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
DCHECK(HasExitBlock()) << "Unimplemented scenario";
// Update the environments in this graph to have the invoke's environment
@@ -1940,34 +1979,6 @@
at->MergeWithInlined(first);
exit_block_->ReplaceWith(to);
- // Update all predecessors of the exit block (now the `to` block)
- // to not `HReturn` but `HGoto` instead.
- bool returns_void = to->GetPredecessors()[0]->GetLastInstruction()->IsReturnVoid();
- if (to->GetPredecessors().size() == 1) {
- HBasicBlock* predecessor = to->GetPredecessors()[0];
- HInstruction* last = predecessor->GetLastInstruction();
- if (!returns_void) {
- return_value = last->InputAt(0);
- }
- predecessor->AddInstruction(new (allocator) HGoto(last->GetDexPc()));
- predecessor->RemoveInstruction(last);
- } else {
- if (!returns_void) {
- // There will be multiple returns.
- return_value = new (allocator) HPhi(
- allocator, kNoRegNumber, 0, HPhi::ToPhiType(invoke->GetType()), to->GetDexPc());
- to->AddPhi(return_value->AsPhi());
- }
- for (HBasicBlock* predecessor : to->GetPredecessors()) {
- HInstruction* last = predecessor->GetLastInstruction();
- if (!returns_void) {
- return_value->AsPhi()->AddInput(last->InputAt(0));
- }
- predecessor->AddInstruction(new (allocator) HGoto(last->GetDexPc()));
- predecessor->RemoveInstruction(last);
- }
- }
-
// Update the meta information surrounding blocks:
// (1) the graph they are now in,
// (2) the reverse post order of that graph,
@@ -1991,10 +2002,6 @@
size_t index_of_at = IndexOfElement(outer_graph->reverse_post_order_, at);
MakeRoomFor(&outer_graph->reverse_post_order_, blocks_added, index_of_at);
- HLoopInformation* loop_info = at->GetLoopInformation();
- // Copy TryCatchInformation if `at` is a try block, not if it is a catch block.
- TryCatchInformation* try_catch_info = at->IsTryBlock() ? at->GetTryCatchInformation() : nullptr;
-
// Do a reverse post order of the blocks in the callee and do (1), (2), (3)
// and (4) to the blocks that apply.
for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) {
@@ -2005,23 +2012,7 @@
current->SetGraph(outer_graph);
outer_graph->AddBlock(current);
outer_graph->reverse_post_order_[++index_of_at] = current;
- if (!current->IsInLoop()) {
- current->SetLoopInformation(loop_info);
- } else if (current->IsLoopHeader()) {
- // Clear the information of which blocks are contained in that loop. Since the
- // information is stored as a bit vector based on block ids, we have to update
- // it, as those block ids were specific to the callee graph and we are now adding
- // these blocks to the caller graph.
- current->GetLoopInformation()->ClearAllBlocks();
- }
- if (current->IsInLoop()) {
- for (HLoopInformationOutwardIterator loop_it(*current);
- !loop_it.Done();
- loop_it.Advance()) {
- loop_it.Current()->Add(current);
- }
- }
- current->SetTryCatchInformation(try_catch_info);
+ UpdateLoopAndTryInformationOfNewBlock(current, at, /* replace_if_back_edge */ false);
}
}
@@ -2029,26 +2020,40 @@
to->SetGraph(outer_graph);
outer_graph->AddBlock(to);
outer_graph->reverse_post_order_[++index_of_at] = to;
- if (loop_info != nullptr) {
- if (!to->IsInLoop()) {
- to->SetLoopInformation(loop_info);
+ // Only `to` can become a back edge, as the inlined blocks
+ // are predecessors of `to`.
+ UpdateLoopAndTryInformationOfNewBlock(to, at, /* replace_if_back_edge */ true);
+
+ // Update all predecessors of the exit block (now the `to` block)
+ // to not `HReturn` but `HGoto` instead.
+ bool returns_void = to->GetPredecessors()[0]->GetLastInstruction()->IsReturnVoid();
+ if (to->GetPredecessors().size() == 1) {
+ HBasicBlock* predecessor = to->GetPredecessors()[0];
+ HInstruction* last = predecessor->GetLastInstruction();
+ if (!returns_void) {
+ return_value = last->InputAt(0);
}
- for (HLoopInformationOutwardIterator loop_it(*at); !loop_it.Done(); loop_it.Advance()) {
- loop_it.Current()->Add(to);
+ predecessor->AddInstruction(new (allocator) HGoto(last->GetDexPc()));
+ predecessor->RemoveInstruction(last);
+ } else {
+ if (!returns_void) {
+ // There will be multiple returns.
+ return_value = new (allocator) HPhi(
+ allocator, kNoRegNumber, 0, HPhi::ToPhiType(invoke->GetType()), to->GetDexPc());
+ to->AddPhi(return_value->AsPhi());
}
- if (loop_info->IsBackEdge(*at)) {
- // Only `to` can become a back edge, as the inlined blocks
- // are predecessors of `to`.
- loop_info->ReplaceBackEdge(at, to);
+ for (HBasicBlock* predecessor : to->GetPredecessors()) {
+ HInstruction* last = predecessor->GetLastInstruction();
+ if (!returns_void) {
+ DCHECK(last->IsReturn());
+ return_value->AsPhi()->AddInput(last->InputAt(0));
+ }
+ predecessor->AddInstruction(new (allocator) HGoto(last->GetDexPc()));
+ predecessor->RemoveInstruction(last);
}
}
- to->SetTryCatchInformation(try_catch_info);
}
- // Update the next instruction id of the outer graph, so that instructions
- // added later get bigger ids than those in the inner graph.
- outer_graph->SetCurrentInstructionId(GetNextInstructionId());
-
// Walk over the entry block and:
// - Move constants from the entry block to the outer_graph's entry block,
// - Replace HParameterValue instructions with their real value.
@@ -2157,32 +2162,17 @@
reverse_post_order_[index_of_header++] = false_block;
reverse_post_order_[index_of_header++] = new_pre_header;
- // Fix loop information.
- HLoopInformation* loop_info = old_pre_header->GetLoopInformation();
- if (loop_info != nullptr) {
- if_block->SetLoopInformation(loop_info);
- true_block->SetLoopInformation(loop_info);
- false_block->SetLoopInformation(loop_info);
- new_pre_header->SetLoopInformation(loop_info);
- // Add blocks to all enveloping loops.
- for (HLoopInformationOutwardIterator loop_it(*old_pre_header);
- !loop_it.Done();
- loop_it.Advance()) {
- loop_it.Current()->Add(if_block);
- loop_it.Current()->Add(true_block);
- loop_it.Current()->Add(false_block);
- loop_it.Current()->Add(new_pre_header);
- }
- }
-
- // Fix try/catch information.
- TryCatchInformation* try_catch_info = old_pre_header->IsTryBlock()
- ? old_pre_header->GetTryCatchInformation()
- : nullptr;
- if_block->SetTryCatchInformation(try_catch_info);
- true_block->SetTryCatchInformation(try_catch_info);
- false_block->SetTryCatchInformation(try_catch_info);
- new_pre_header->SetTryCatchInformation(try_catch_info);
+ // The pre_header can never be a back edge of a loop.
+ DCHECK((old_pre_header->GetLoopInformation() == nullptr) ||
+ !old_pre_header->GetLoopInformation()->IsBackEdge(*old_pre_header));
+ UpdateLoopAndTryInformationOfNewBlock(
+ if_block, old_pre_header, /* replace_if_back_edge */ false);
+ UpdateLoopAndTryInformationOfNewBlock(
+ true_block, old_pre_header, /* replace_if_back_edge */ false);
+ UpdateLoopAndTryInformationOfNewBlock(
+ false_block, old_pre_header, /* replace_if_back_edge */ false);
+ UpdateLoopAndTryInformationOfNewBlock(
+ new_pre_header, old_pre_header, /* replace_if_back_edge */ false);
}
static void CheckAgainstUpperBound(ReferenceTypeInfo rti, ReferenceTypeInfo upper_bound_rti)
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 9eddfc7..b684cc6 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -353,6 +353,13 @@
// and removing the invoke instruction.
HInstruction* InlineInto(HGraph* outer_graph, HInvoke* invoke);
+ // Update the loop and try membership of `block`, which was spawned from `reference`.
+ // In case `reference` is a back edge, `replace_if_back_edge` notifies whether `block`
+ // should be the new back edge.
+ void UpdateLoopAndTryInformationOfNewBlock(HBasicBlock* block,
+ HBasicBlock* reference,
+ bool replace_if_back_edge);
+
// Need to add a couple of blocks to test if the loop body is entered and
// put deoptimization instructions, etc.
void TransformLoopHeaderForBCE(HBasicBlock* header);
@@ -380,6 +387,7 @@
}
void SetCurrentInstructionId(int32_t id) {
+ DCHECK_GE(id, current_instruction_id_);
current_instruction_id_ = id;
}
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 5a9f258..13d6d62 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -861,7 +861,7 @@
const uint32_t access_flags = method->GetAccessFlags();
const InvokeType invoke_type = method->GetInvokeType();
- ArenaAllocator arena(Runtime::Current()->GetArenaPool());
+ ArenaAllocator arena(Runtime::Current()->GetJitArenaPool());
CodeVectorAllocator code_allocator(&arena);
std::unique_ptr<CodeGenerator> codegen;
{
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index dfcb4bc..d9a2f30 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -1263,6 +1263,24 @@
dex_caches_.clear();
}
+ void LoadClassProfileDescriptors() {
+ if (profile_compilation_info_ != nullptr && app_image_) {
+ Runtime* runtime = Runtime::Current();
+ CHECK(runtime != nullptr);
+ std::set<DexCacheResolvedClasses> resolved_classes(
+ profile_compilation_info_->GetResolvedClasses());
+ image_classes_.reset(new std::unordered_set<std::string>(
+ runtime->GetClassLinker()->GetClassDescriptorsForProfileKeys(resolved_classes)));
+ VLOG(compiler) << "Loaded " << image_classes_->size()
+ << " image class descriptors from profile";
+ if (VLOG_IS_ON(compiler)) {
+ for (const std::string& s : *image_classes_) {
+ LOG(INFO) << "Image class " << s;
+ }
+ }
+ }
+ }
+
// Set up the environment for compilation. Includes starting the runtime and loading/opening the
// boot class path.
bool Setup() {
@@ -1610,7 +1628,10 @@
// The non moving space is right after the oat file. Put the preferred app image location
// right after the non moving space so that we ideally get a continuous immune region for
// the GC.
- const size_t non_moving_space_capacity = heap->GetNonMovingSpace()->Capacity();
+ // Use the default non moving space capacity since dex2oat does not have a separate non-
+ // moving space. This means the runtime's non moving space space size will be as large
+ // as the growth limit for dex2oat, but smaller in the zygote.
+ const size_t non_moving_space_capacity = gc::Heap::kDefaultNonMovingSpaceCapacity;
image_base_ += non_moving_space_capacity;
VLOG(compiler) << "App image base=" << reinterpret_cast<void*>(image_base_);
}
@@ -2475,6 +2496,7 @@
}
static int CompileImage(Dex2Oat& dex2oat) {
+ dex2oat.LoadClassProfileDescriptors();
dex2oat.Compile();
if (!dex2oat.WriteOatFiles()) {
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index e30b968..c187536 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -361,6 +361,7 @@
const char* method_filter,
bool list_classes,
bool list_methods,
+ bool dump_header_only,
const char* export_dex_location,
uint32_t addr2instr)
: dump_raw_mapping_table_(dump_raw_mapping_table),
@@ -373,6 +374,7 @@
method_filter_(method_filter),
list_classes_(list_classes),
list_methods_(list_methods),
+ dump_header_only_(dump_header_only),
export_dex_location_(export_dex_location),
addr2instr_(addr2instr),
class_loader_(nullptr) {}
@@ -387,6 +389,7 @@
const char* const method_filter_;
const bool list_classes_;
const bool list_methods_;
+ const bool dump_header_only_;
const char* const export_dex_location_;
uint32_t addr2instr_;
Handle<mirror::ClassLoader>* class_loader_;
@@ -514,21 +517,24 @@
os << StringPrintf("0x%08x\n\n", resolved_addr2instr_);
}
- for (size_t i = 0; i < oat_dex_files_.size(); i++) {
- const OatFile::OatDexFile* oat_dex_file = oat_dex_files_[i];
- CHECK(oat_dex_file != nullptr);
+ if (!options_.dump_header_only_) {
+ for (size_t i = 0; i < oat_dex_files_.size(); i++) {
+ const OatFile::OatDexFile* oat_dex_file = oat_dex_files_[i];
+ CHECK(oat_dex_file != nullptr);
- // If file export selected skip file analysis
- if (options_.export_dex_location_) {
- if (!ExportDexFile(os, *oat_dex_file)) {
- success = false;
- }
- } else {
- if (!DumpOatDexFile(os, *oat_dex_file)) {
- success = false;
+ // If file export selected skip file analysis
+ if (options_.export_dex_location_) {
+ if (!ExportDexFile(os, *oat_dex_file)) {
+ success = false;
+ }
+ } else {
+ if (!DumpOatDexFile(os, *oat_dex_file)) {
+ success = false;
+ }
}
}
}
+
os << std::flush;
return success;
}
@@ -2572,6 +2578,8 @@
dump_code_info_stack_maps_ = true;
} else if (option == "--no-disassemble") {
disassemble_code_ = false;
+ } else if (option =="--header-only") {
+ dump_header_only_ = true;
} else if (option.starts_with("--symbolize=")) {
oat_filename_ = option.substr(strlen("--symbolize=")).data();
symbolize_ = true;
@@ -2655,6 +2663,9 @@
" --no-disassemble may be used to disable disassembly.\n"
" Example: --no-disassemble\n"
"\n"
+ " --header-only may be used to print only the oat header.\n"
+ " Example: --header-only\n"
+ "\n"
" --list-classes may be used to list target file classes (can be used with filters).\n"
" Example: --list-classes\n"
" Example: --list-classes --class-filter=com.example.foo\n"
@@ -2697,6 +2708,7 @@
bool symbolize_ = false;
bool list_classes_ = false;
bool list_methods_ = false;
+ bool dump_header_only_ = false;
uint32_t addr2instr_ = 0;
const char* export_dex_location_ = nullptr;
};
@@ -2719,6 +2731,7 @@
args_->method_filter_,
args_->list_classes_,
args_->list_methods_,
+ args_->dump_header_only_,
args_->export_dex_location_,
args_->addr2instr_));
diff --git a/profman/profile_assistant_test.cc b/profman/profile_assistant_test.cc
index 3faa8eb..b0d5df2 100644
--- a/profman/profile_assistant_test.cc
+++ b/profman/profile_assistant_test.cc
@@ -37,8 +37,8 @@
std::string dex_location2 = "location2" + id;
uint32_t dex_location_checksum2 = 10 * checksum;
for (uint16_t i = start_method_index; i < start_method_index + number_of_methods; i++) {
- ASSERT_TRUE(info->AddData(dex_location1, dex_location_checksum1, i));
- ASSERT_TRUE(info->AddData(dex_location2, dex_location_checksum2, i));
+ ASSERT_TRUE(info->AddMethodIndex(dex_location1, dex_location_checksum1, i));
+ ASSERT_TRUE(info->AddMethodIndex(dex_location2, dex_location_checksum2, i));
}
ASSERT_TRUE(info->Save(GetFd(profile)));
ASSERT_EQ(0, profile.GetFile()->Flush());
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 947de8a..500fa14 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -292,7 +292,8 @@
# Note that the fault_handler_x86.cc is not a mistake. This file is
# shared between the x86 and x86_64 architectures.
LIBART_SRC_FILES_x86_64 := \
- interpreter/mterp/mterp_stub.cc \
+ interpreter/mterp/mterp.cc \
+ interpreter/mterp/out/mterp_x86_64.S \
arch/x86_64/context_x86_64.cc \
arch/x86_64/entrypoints_init_x86_64.cc \
arch/x86_64/jni_entrypoints_x86_64.S \
@@ -306,7 +307,8 @@
$(LIBART_SRC_FILES_x86_64) \
LIBART_TARGET_SRC_FILES_mips := \
- interpreter/mterp/mterp_stub.cc \
+ interpreter/mterp/mterp.cc \
+ interpreter/mterp/out/mterp_mips.S \
arch/mips/context_mips.cc \
arch/mips/entrypoints_init_mips.cc \
arch/mips/jni_entrypoints_mips.S \
@@ -316,7 +318,8 @@
arch/mips/fault_handler_mips.cc
LIBART_TARGET_SRC_FILES_mips64 := \
- interpreter/mterp/mterp_stub.cc \
+ interpreter/mterp/mterp.cc \
+ interpreter/mterp/out/mterp_mips64.S \
arch/mips64/context_mips64.cc \
arch/mips64/entrypoints_init_mips64.cc \
arch/mips64/jni_entrypoints_mips64.S \
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index c4e314b..cfcef49 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -942,7 +942,86 @@
// Generate the allocation entrypoints for each allocator.
GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB)
+
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB).
+ENTRY art_quick_alloc_object_tlab
+ // Fast path tlab allocation.
+ // r0: type_idx/return value, r1: ArtMethod*, r9: Thread::Current
+ // r2, r3, r12: free.
+#if defined(USE_READ_BARRIER)
+ eor r0, r0, r0 // Read barrier not supported here.
+ sub r0, r0, #1 // Return -1.
+ bx lr
+#endif
+ ldr r2, [r1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_32] // Load dex cache resolved types array
+ // Load the class (r2)
+ ldr r2, [r2, r0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
+ cbz r2, .Lart_quick_alloc_object_tlab_slow_path // Check null class
+ // Check class status.
+ ldr r3, [r2, #MIRROR_CLASS_STATUS_OFFSET]
+ cmp r3, #MIRROR_CLASS_STATUS_INITIALIZED
+ bne .Lart_quick_alloc_object_tlab_slow_path
+ // Add a fake dependence from the
+ // following access flag and size
+ // loads to the status load.
+ // This is to prevent those loads
+ // from being reordered above the
+ // status load and reading wrong
+ // values (an alternative is to use
+ // a load-acquire for the status).
+ eor r3, r3, r3
+ add r2, r2, r3
+ // Check access flags has
+ // kAccClassIsFinalizable.
+ ldr r3, [r2, #MIRROR_CLASS_ACCESS_FLAGS_OFFSET]
+ tst r3, #ACCESS_FLAGS_CLASS_IS_FINALIZABLE
+ bne .Lart_quick_alloc_object_tlab_slow_path
+ // Load thread_local_pos (r12) and
+ // thread_local_end (r3) with ldrd.
+ // Check constraints for ldrd.
+#if !((THREAD_LOCAL_POS_OFFSET + 4 == THREAD_LOCAL_END_OFFSET) && (THREAD_LOCAL_POS_OFFSET % 8 == 0))
+#error "Thread::thread_local_pos/end must be consecutive and are 8 byte aligned for performance"
+#endif
+ ldrd r12, r3, [r9, #THREAD_LOCAL_POS_OFFSET]
+ sub r12, r3, r12 // Compute the remaining buf size.
+ ldr r3, [r2, #MIRROR_CLASS_OBJECT_SIZE_OFFSET] // Load the object size (r3).
+ cmp r3, r12 // Check if it fits. OK to do this
+ // before rounding up the object size
+ // assuming the buf size alignment.
+ bhi .Lart_quick_alloc_object_tlab_slow_path
+ // "Point of no slow path". Won't go to the slow path from here on. OK to clobber r0 and r1.
+ // Round up the object size by the
+ // object alignment. (addr + 7) & ~7.
+ add r3, r3, #OBJECT_ALIGNMENT_MASK
+ and r3, r3, #OBJECT_ALIGNMENT_MASK_TOGGLED
+ // Reload old thread_local_pos (r0)
+ // for the return value.
+ ldr r0, [r9, #THREAD_LOCAL_POS_OFFSET]
+ add r1, r0, r3
+ str r1, [r9, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos.
+ ldr r1, [r9, #THREAD_LOCAL_OBJECTS_OFFSET] // Increment thread_local_objects.
+ add r1, r1, #1
+ str r1, [r9, #THREAD_LOCAL_OBJECTS_OFFSET]
+ POISON_HEAP_REF r2
+ str r2, [r0, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer.
+ // Fence. This is "ish" not "ishst" so
+ // that the code after this allocation
+ // site will see the right values in
+ // the fields of the class.
+ // Alternatively we could use "ishst"
+ // if we use load-acquire for the
+ // class status load.)
+ dmb ish
+ bx lr
+.Lart_quick_alloc_object_tlab_slow_path:
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2, r3 // Save callee saves in case of GC.
+ mov r2, r9 // Pass Thread::Current.
+ bl artAllocObjectFromCodeTLAB // (uint32_t type_idx, Method* method, Thread*)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+END art_quick_alloc_object_tlab
+
+
// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc).
ENTRY art_quick_alloc_object_rosalloc
// Fast path rosalloc allocation.
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index cd38e16..a60f31e 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -411,7 +411,12 @@
DCHECK(method_header->Contains(pc));
return method_header;
} else {
- DCHECK(!code_cache->ContainsPc(reinterpret_cast<const void*>(pc))) << std::hex << pc;
+ DCHECK(!code_cache->ContainsPc(reinterpret_cast<const void*>(pc)))
+ << PrettyMethod(this)
+ << ", pc=" << std::hex << pc
+ << ", entry_point=" << std::hex << reinterpret_cast<uintptr_t>(existing_entry_point)
+ << ", copy=" << std::boolalpha << IsCopied()
+ << ", proxy=" << std::boolalpha << IsProxyMethod();
}
}
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index eb3b7f3..879364e 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -121,32 +121,32 @@
ADD_TEST_EQ(THREAD_SELF_OFFSET,
art::Thread::SelfOffset<__SIZEOF_POINTER__>().Int32Value())
+// Offset of field Thread::tlsPtr_.thread_local_objects.
+#define THREAD_LOCAL_OBJECTS_OFFSET (THREAD_CARD_TABLE_OFFSET + 168 * __SIZEOF_POINTER__)
+ADD_TEST_EQ(THREAD_LOCAL_OBJECTS_OFFSET,
+ art::Thread::ThreadLocalObjectsOffset<__SIZEOF_POINTER__>().Int32Value())
// Offset of field Thread::tlsPtr_.thread_local_pos.
-#define THREAD_LOCAL_POS_OFFSET (THREAD_CARD_TABLE_OFFSET + 169 * __SIZEOF_POINTER__)
+#define THREAD_LOCAL_POS_OFFSET (THREAD_LOCAL_OBJECTS_OFFSET + 2 * __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_LOCAL_POS_OFFSET,
art::Thread::ThreadLocalPosOffset<__SIZEOF_POINTER__>().Int32Value())
// Offset of field Thread::tlsPtr_.thread_local_end.
#define THREAD_LOCAL_END_OFFSET (THREAD_LOCAL_POS_OFFSET + __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_LOCAL_END_OFFSET,
art::Thread::ThreadLocalEndOffset<__SIZEOF_POINTER__>().Int32Value())
-// Offset of field Thread::tlsPtr_.thread_local_objects.
-#define THREAD_LOCAL_OBJECTS_OFFSET (THREAD_LOCAL_POS_OFFSET + 2 * __SIZEOF_POINTER__)
-ADD_TEST_EQ(THREAD_LOCAL_OBJECTS_OFFSET,
- art::Thread::ThreadLocalObjectsOffset<__SIZEOF_POINTER__>().Int32Value())
// Offset of field Thread::tlsPtr_.mterp_current_ibase.
-#define THREAD_CURRENT_IBASE_OFFSET (THREAD_LOCAL_POS_OFFSET + 3 * __SIZEOF_POINTER__)
+#define THREAD_CURRENT_IBASE_OFFSET (THREAD_LOCAL_POS_OFFSET + 2 * __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_CURRENT_IBASE_OFFSET,
art::Thread::MterpCurrentIBaseOffset<__SIZEOF_POINTER__>().Int32Value())
// Offset of field Thread::tlsPtr_.mterp_default_ibase.
-#define THREAD_DEFAULT_IBASE_OFFSET (THREAD_LOCAL_POS_OFFSET + 4 * __SIZEOF_POINTER__)
+#define THREAD_DEFAULT_IBASE_OFFSET (THREAD_LOCAL_POS_OFFSET + 3 * __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_DEFAULT_IBASE_OFFSET,
art::Thread::MterpDefaultIBaseOffset<__SIZEOF_POINTER__>().Int32Value())
// Offset of field Thread::tlsPtr_.mterp_alt_ibase.
-#define THREAD_ALT_IBASE_OFFSET (THREAD_LOCAL_POS_OFFSET + 5 * __SIZEOF_POINTER__)
+#define THREAD_ALT_IBASE_OFFSET (THREAD_LOCAL_POS_OFFSET + 4 * __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_ALT_IBASE_OFFSET,
art::Thread::MterpAltIBaseOffset<__SIZEOF_POINTER__>().Int32Value())
// Offset of field Thread::tlsPtr_.rosalloc_runs.
-#define THREAD_ROSALLOC_RUNS_OFFSET (THREAD_LOCAL_POS_OFFSET + 6 * __SIZEOF_POINTER__)
+#define THREAD_ROSALLOC_RUNS_OFFSET (THREAD_LOCAL_POS_OFFSET + 5 * __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_ROSALLOC_RUNS_OFFSET,
art::Thread::RosAllocRunsOffset<__SIZEOF_POINTER__>().Int32Value())
// Offset of field Thread::tlsPtr_.thread_local_alloc_stack_top.
diff --git a/runtime/atomic.cc b/runtime/atomic.cc
index e766a8d..d5ae570 100644
--- a/runtime/atomic.cc
+++ b/runtime/atomic.cc
@@ -28,7 +28,7 @@
}
void QuasiAtomic::Startup() {
- if (kNeedSwapMutexes) {
+ if (NeedSwapMutexes(kRuntimeISA)) {
gSwapMutexes = new std::vector<Mutex*>;
for (size_t i = 0; i < kSwapMutexCount; ++i) {
gSwapMutexes->push_back(new Mutex("QuasiAtomic stripe", kSwapMutexesLock));
@@ -37,7 +37,7 @@
}
void QuasiAtomic::Shutdown() {
- if (kNeedSwapMutexes) {
+ if (NeedSwapMutexes(kRuntimeISA)) {
STLDeleteElements(gSwapMutexes);
delete gSwapMutexes;
}
diff --git a/runtime/atomic.h b/runtime/atomic.h
index d4a7f37..e2a7259 100644
--- a/runtime/atomic.h
+++ b/runtime/atomic.h
@@ -22,6 +22,7 @@
#include <limits>
#include <vector>
+#include "arch/instruction_set.h"
#include "base/logging.h"
#include "base/macros.h"
@@ -44,14 +45,10 @@
// quasiatomic operations that are performed on partially-overlapping
// memory.
class QuasiAtomic {
-#if defined(__mips__) && !defined(__LP64__)
- static constexpr bool kNeedSwapMutexes = true;
-#elif defined(__mips__) && defined(__LP64__)
- // TODO - mips64 still need this for Cas64 ???
- static constexpr bool kNeedSwapMutexes = true;
-#else
- static constexpr bool kNeedSwapMutexes = false;
-#endif
+ static constexpr bool NeedSwapMutexes(InstructionSet isa) {
+ // TODO - mips64 still need this for Cas64 ???
+ return (isa == kMips) || (isa == kMips64);
+ }
public:
static void Startup();
@@ -60,7 +57,7 @@
// Reads the 64-bit value at "addr" without tearing.
static int64_t Read64(volatile const int64_t* addr) {
- if (!kNeedSwapMutexes) {
+ if (!NeedSwapMutexes(kRuntimeISA)) {
int64_t value;
#if defined(__LP64__)
value = *addr;
@@ -96,7 +93,7 @@
// Writes to the 64-bit value at "addr" without tearing.
static void Write64(volatile int64_t* addr, int64_t value) {
- if (!kNeedSwapMutexes) {
+ if (!NeedSwapMutexes(kRuntimeISA)) {
#if defined(__LP64__)
*addr = value;
#else
@@ -142,7 +139,7 @@
// at some point during the execution of Cas64, *addr was not equal to
// old_value.
static bool Cas64(int64_t old_value, int64_t new_value, volatile int64_t* addr) {
- if (!kNeedSwapMutexes) {
+ if (!NeedSwapMutexes(kRuntimeISA)) {
return __sync_bool_compare_and_swap(addr, old_value, new_value);
} else {
return SwapMutexCas64(old_value, new_value, addr);
@@ -150,8 +147,8 @@
}
// Does the architecture provide reasonable atomic long operations or do we fall back on mutexes?
- static bool LongAtomicsUseMutexes() {
- return kNeedSwapMutexes;
+ static bool LongAtomicsUseMutexes(InstructionSet isa) {
+ return NeedSwapMutexes(isa);
}
static void ThreadFenceAcquire() {
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index a4b38ea..44af3f7 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -183,10 +183,10 @@
free(reinterpret_cast<void*>(memory_));
}
-MemMapArena::MemMapArena(size_t size, bool low_4gb) {
+MemMapArena::MemMapArena(size_t size, bool low_4gb, const char* name) {
std::string error_msg;
map_.reset(MemMap::MapAnonymous(
- "LinearAlloc", nullptr, size, PROT_READ | PROT_WRITE, low_4gb, false, &error_msg));
+ name, nullptr, size, PROT_READ | PROT_WRITE, low_4gb, false, &error_msg));
CHECK(map_.get() != nullptr) << error_msg;
memory_ = map_->Begin();
size_ = map_->Size();
@@ -210,9 +210,12 @@
}
}
-ArenaPool::ArenaPool(bool use_malloc, bool low_4gb)
- : use_malloc_(use_malloc), lock_("Arena pool lock", kArenaPoolLock), free_arenas_(nullptr),
- low_4gb_(low_4gb) {
+ArenaPool::ArenaPool(bool use_malloc, bool low_4gb, const char* name)
+ : use_malloc_(use_malloc),
+ lock_("Arena pool lock", kArenaPoolLock),
+ free_arenas_(nullptr),
+ low_4gb_(low_4gb),
+ name_(name) {
if (low_4gb) {
CHECK(!use_malloc) << "low4gb must use map implementation";
}
@@ -250,7 +253,7 @@
}
if (ret == nullptr) {
ret = use_malloc_ ? static_cast<Arena*>(new MallocArena(size)) :
- new MemMapArena(size, low_4gb_);
+ new MemMapArena(size, low_4gb_, name_);
}
ret->Reset();
return ret;
diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h
index 8a96571..728f897 100644
--- a/runtime/base/arena_allocator.h
+++ b/runtime/base/arena_allocator.h
@@ -261,7 +261,7 @@
class MemMapArena FINAL : public Arena {
public:
- MemMapArena(size_t size, bool low_4gb);
+ MemMapArena(size_t size, bool low_4gb, const char* name);
virtual ~MemMapArena();
void Release() OVERRIDE;
@@ -271,7 +271,9 @@
class ArenaPool {
public:
- explicit ArenaPool(bool use_malloc = true, bool low_4gb = false);
+ ArenaPool(bool use_malloc = true,
+ bool low_4gb = false,
+ const char* name = "LinearAlloc");
~ArenaPool();
Arena* AllocArena(size_t size) REQUIRES(!lock_);
void FreeArenaChain(Arena* first) REQUIRES(!lock_);
@@ -287,6 +289,7 @@
mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
Arena* free_arenas_ GUARDED_BY(lock_);
const bool low_4gb_;
+ const char* name_;
DISALLOW_COPY_AND_ASSIGN(ArenaPool);
};
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index e72f2a2..293451c 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -66,8 +66,8 @@
kRosAllocGlobalLock,
kRosAllocBracketLock,
kRosAllocBulkFreeLock,
- kTransactionLogLock,
kMarkSweepMarkStackLock,
+ kTransactionLogLock,
kJniWeakGlobalsLock,
kReferenceQueueSoftReferencesLock,
kReferenceQueuePhantomReferencesLock,
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index cd4daeb..5f26b5d 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -58,6 +58,7 @@
#include "interpreter/interpreter.h"
#include "jit/jit.h"
#include "jit/jit_code_cache.h"
+#include "jit/offline_profiling_info.h"
#include "leb128.h"
#include "linear_alloc.h"
#include "mirror/class.h"
@@ -1615,7 +1616,8 @@
VLOG(image) << name->ToModifiedUtf8();
}
*error_msg = "Rejecting application image due to class loader mismatch";
- return false;
+ // Ignore class loader mismatch for now since these would just use possibly incorrect
+ // oat code anyways. The structural class check should be done in the parent.
}
}
}
@@ -2853,8 +2855,9 @@
WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
LinearAlloc* allocator = class_loader->GetAllocator();
if (allocator == nullptr) {
- allocator = Runtime::Current()->CreateLinearAlloc();
- class_loader->SetAllocator(allocator);
+ RegisterClassLoader(class_loader);
+ allocator = class_loader->GetAllocator();
+ CHECK(allocator != nullptr);
}
return allocator;
}
@@ -4815,24 +4818,31 @@
Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(new_class);
}
+void ClassLinker::RegisterClassLoader(mirror::ClassLoader* class_loader) {
+ CHECK(class_loader->GetAllocator() == nullptr);
+ CHECK(class_loader->GetClassTable() == nullptr);
+ Thread* const self = Thread::Current();
+ ClassLoaderData data;
+ data.weak_root = self->GetJniEnv()->vm->AddWeakGlobalRef(self, class_loader);
+ // Create and set the class table.
+ data.class_table = new ClassTable;
+ class_loader->SetClassTable(data.class_table);
+ // Create and set the linear allocator.
+ data.allocator = Runtime::Current()->CreateLinearAlloc();
+ class_loader->SetAllocator(data.allocator);
+ // Add to the list so that we know to free the data later.
+ class_loaders_.push_back(data);
+}
+
ClassTable* ClassLinker::InsertClassTableForClassLoader(mirror::ClassLoader* class_loader) {
if (class_loader == nullptr) {
return &boot_class_table_;
}
ClassTable* class_table = class_loader->GetClassTable();
if (class_table == nullptr) {
- class_table = new ClassTable;
- Thread* const self = Thread::Current();
- ClassLoaderData data;
- data.weak_root = self->GetJniEnv()->vm->AddWeakGlobalRef(self, class_loader);
- data.class_table = class_table;
- // Don't already have a class table, add it to the class loader.
- CHECK(class_loader->GetClassTable() == nullptr);
- class_loader->SetClassTable(data.class_table);
- // Should have been set when we registered the dex file.
- data.allocator = class_loader->GetAllocator();
- CHECK(data.allocator != nullptr);
- class_loaders_.push_back(data);
+ RegisterClassLoader(class_loader);
+ class_table = class_loader->GetClassTable();
+ DCHECK(class_table != nullptr);
}
return class_table;
}
@@ -7628,6 +7638,116 @@
}
}
+std::set<DexCacheResolvedClasses> ClassLinker::GetResolvedClasses(bool ignore_boot_classes) {
+ ScopedObjectAccess soa(Thread::Current());
+ ScopedAssertNoThreadSuspension ants(soa.Self(), __FUNCTION__);
+ std::set<DexCacheResolvedClasses> ret;
+ VLOG(class_linker) << "Collecting resolved classes";
+ const uint64_t start_time = NanoTime();
+ ReaderMutexLock mu(soa.Self(), *DexLock());
+ // Loop through all the dex caches and inspect resolved classes.
+ for (const ClassLinker::DexCacheData& data : GetDexCachesData()) {
+ if (soa.Self()->IsJWeakCleared(data.weak_root)) {
+ continue;
+ }
+ mirror::DexCache* dex_cache =
+ down_cast<mirror::DexCache*>(soa.Self()->DecodeJObject(data.weak_root));
+ if (dex_cache == nullptr) {
+ continue;
+ }
+ const DexFile* dex_file = dex_cache->GetDexFile();
+ const std::string& location = dex_file->GetLocation();
+ const size_t num_class_defs = dex_file->NumClassDefs();
+ // Use the resolved types, this will miss array classes.
+ const size_t num_types = dex_file->NumTypeIds();
+ VLOG(class_linker) << "Collecting class profile for dex file " << location
+ << " types=" << num_types << " class_defs=" << num_class_defs;
+ DexCacheResolvedClasses resolved_classes(dex_file->GetLocation(),
+ dex_file->GetLocationChecksum());
+ size_t num_resolved = 0;
+ std::unordered_set<uint16_t> class_set;
+ CHECK_EQ(num_types, dex_cache->NumResolvedTypes());
+ for (size_t i = 0; i < num_types; ++i) {
+ mirror::Class* klass = dex_cache->GetResolvedType(i);
+ // Filter out null class loader since that is the boot class loader.
+ if (klass == nullptr || (ignore_boot_classes && klass->GetClassLoader() == nullptr)) {
+ continue;
+ }
+ ++num_resolved;
+ DCHECK(!klass->IsProxyClass());
+ DCHECK(klass->IsResolved());
+ mirror::DexCache* klass_dex_cache = klass->GetDexCache();
+ if (klass_dex_cache == dex_cache) {
+ const size_t class_def_idx = klass->GetDexClassDefIndex();
+ DCHECK(klass->IsResolved());
+ CHECK_LT(class_def_idx, num_class_defs);
+ class_set.insert(class_def_idx);
+ }
+ }
+
+ if (!class_set.empty()) {
+ auto it = ret.find(resolved_classes);
+ if (it != ret.end()) {
+ // Already have the key, union the class def idxs.
+ it->AddClasses(class_set.begin(), class_set.end());
+ } else {
+ resolved_classes.AddClasses(class_set.begin(), class_set.end());
+ ret.insert(resolved_classes);
+ }
+ }
+
+ VLOG(class_linker) << "Dex location " << location << " has " << num_resolved << " / "
+ << num_class_defs << " resolved classes";
+ }
+ VLOG(class_linker) << "Collecting class profile took " << PrettyDuration(NanoTime() - start_time);
+ return ret;
+}
+
+std::unordered_set<std::string> ClassLinker::GetClassDescriptorsForProfileKeys(
+ const std::set<DexCacheResolvedClasses>& classes) {
+ std::unordered_set<std::string> ret;
+ Thread* const self = Thread::Current();
+ std::unordered_map<std::string, const DexFile*> location_to_dex_file;
+ ScopedObjectAccess soa(self);
+ ScopedAssertNoThreadSuspension ants(soa.Self(), __FUNCTION__);
+ ReaderMutexLock mu(self, *DexLock());
+ for (const ClassLinker::DexCacheData& data : GetDexCachesData()) {
+ if (!self->IsJWeakCleared(data.weak_root)) {
+ mirror::DexCache* dex_cache =
+ down_cast<mirror::DexCache*>(soa.Self()->DecodeJObject(data.weak_root));
+ if (dex_cache != nullptr) {
+ const DexFile* dex_file = dex_cache->GetDexFile();
+ // There could be duplicates if two dex files with the same location are mapped.
+ location_to_dex_file.emplace(
+ ProfileCompilationInfo::GetProfileDexFileKey(dex_file->GetLocation()), dex_file);
+ }
+ }
+ }
+ for (const DexCacheResolvedClasses& info : classes) {
+ const std::string& profile_key = info.GetDexLocation();
+ auto found = location_to_dex_file.find(profile_key);
+ if (found != location_to_dex_file.end()) {
+ const DexFile* dex_file = found->second;
+ VLOG(profiler) << "Found opened dex file for " << dex_file->GetLocation() << " with "
+ << info.GetClasses().size() << " classes";
+ DCHECK_EQ(dex_file->GetLocationChecksum(), info.GetLocationChecksum());
+ for (uint16_t class_def_idx : info.GetClasses()) {
+ if (class_def_idx >= dex_file->NumClassDefs()) {
+ LOG(WARNING) << "Class def index " << class_def_idx << " >= " << dex_file->NumClassDefs();
+ continue;
+ }
+ const DexFile::TypeId& type_id = dex_file->GetTypeId(
+ dex_file->GetClassDef(class_def_idx).class_idx_);
+ const char* descriptor = dex_file->GetTypeDescriptor(type_id);
+ ret.insert(descriptor);
+ }
+ } else {
+ VLOG(class_linker) << "Failed to find opened dex file for profile key " << profile_key;
+ }
+ }
+ return ret;
+}
+
// Instantiate ResolveMethod.
template ArtMethod* ClassLinker::ResolveMethod<ClassLinker::kForceICCECheck>(
const DexFile& dex_file,
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index aa55dac..0a75b27 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -17,8 +17,10 @@
#ifndef ART_RUNTIME_CLASS_LINKER_H_
#define ART_RUNTIME_CLASS_LINKER_H_
+#include <set>
#include <string>
#include <unordered_map>
+#include <unordered_set>
#include <utility>
#include <vector>
@@ -27,6 +29,7 @@
#include "base/macros.h"
#include "base/mutex.h"
#include "class_table.h"
+#include "dex_cache_resolved_classes.h"
#include "dex_file.h"
#include "gc_root.h"
#include "jni.h"
@@ -573,12 +576,12 @@
// Unlike GetOrCreateAllocatorForClassLoader, GetAllocatorForClassLoader asserts that the
// allocator for this class loader is already created.
- static LinearAlloc* GetAllocatorForClassLoader(mirror::ClassLoader* class_loader)
+ LinearAlloc* GetAllocatorForClassLoader(mirror::ClassLoader* class_loader)
SHARED_REQUIRES(Locks::mutator_lock_);
// Return the linear alloc for a class loader if it is already allocated, otherwise allocate and
// set it. TODO: Consider using a lock other than classlinker_classes_lock_.
- static LinearAlloc* GetOrCreateAllocatorForClassLoader(mirror::ClassLoader* class_loader)
+ LinearAlloc* GetOrCreateAllocatorForClassLoader(mirror::ClassLoader* class_loader)
REQUIRES(!Locks::classlinker_classes_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -589,6 +592,13 @@
static bool ShouldUseInterpreterEntrypoint(ArtMethod* method, const void* quick_code)
SHARED_REQUIRES(Locks::mutator_lock_);
+ std::set<DexCacheResolvedClasses> GetResolvedClasses(bool ignore_boot_classes)
+ REQUIRES(!dex_lock_);
+
+ std::unordered_set<std::string> GetClassDescriptorsForProfileKeys(
+ const std::set<DexCacheResolvedClasses>& classes)
+ REQUIRES(!dex_lock_);
+
struct DexCacheData {
// Weak root to the DexCache. Note: Do not decode this unnecessarily or else class unloading may
// not work properly.
@@ -970,9 +980,16 @@
mirror::Class* LookupClassFromBootImage(const char* descriptor)
SHARED_REQUIRES(Locks::mutator_lock_);
+ // Register a class loader and create its class table and allocator. Should not be called if
+ // these are already created.
+ void RegisterClassLoader(mirror::ClassLoader* class_loader)
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(Locks::classlinker_classes_lock_);
+
// Returns null if not found.
ClassTable* ClassTableForClassLoader(mirror::ClassLoader* class_loader)
SHARED_REQUIRES(Locks::mutator_lock_, Locks::classlinker_classes_lock_);
+
// Insert a new class table if not found.
ClassTable* InsertClassTableForClassLoader(mirror::ClassLoader* class_loader)
SHARED_REQUIRES(Locks::mutator_lock_)
diff --git a/runtime/dex_cache_resolved_classes.h b/runtime/dex_cache_resolved_classes.h
new file mode 100644
index 0000000..80c12cb
--- /dev/null
+++ b/runtime/dex_cache_resolved_classes.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_DEX_CACHE_RESOLVED_CLASSES_H_
+#define ART_RUNTIME_DEX_CACHE_RESOLVED_CLASSES_H_
+
+#include <string>
+#include <unordered_set>
+#include <vector>
+
+namespace art {
+
+// Data structure for passing around which classes belonging to a dex cache / dex file are resolved.
+class DexCacheResolvedClasses {
+ public:
+ DexCacheResolvedClasses(const std::string& dex_location, uint32_t location_checksum)
+ : dex_location_(dex_location),
+ location_checksum_(location_checksum) {}
+
+ // Only compare the key elements, ignore the resolved classes.
+ int Compare(const DexCacheResolvedClasses& other) const {
+ if (location_checksum_ != other.location_checksum_) {
+ return static_cast<int>(location_checksum_ - other.location_checksum_);
+ }
+ return dex_location_.compare(other.dex_location_);
+ }
+
+ template <class InputIt>
+ void AddClasses(InputIt begin, InputIt end) const {
+ classes_.insert(begin, end);
+ }
+
+ const std::string& GetDexLocation() const {
+ return dex_location_;
+ }
+
+ uint32_t GetLocationChecksum() const {
+ return location_checksum_;
+ }
+
+ const std::unordered_set<uint16_t>& GetClasses() const {
+ return classes_;
+ }
+
+ private:
+ const std::string dex_location_;
+ const uint32_t location_checksum_;
+ // Array of resolved class def indexes.
+ mutable std::unordered_set<uint16_t> classes_;
+};
+
+inline bool operator<(const DexCacheResolvedClasses& a, const DexCacheResolvedClasses& b) {
+ return a.Compare(b) < 0;
+}
+
+} // namespace art
+
+#endif // ART_RUNTIME_DEX_CACHE_RESOLVED_CLASSES_H_
diff --git a/runtime/entrypoints/quick/quick_default_externs.h b/runtime/entrypoints/quick/quick_default_externs.h
index fbf028d..4e01d80 100644
--- a/runtime/entrypoints/quick/quick_default_externs.h
+++ b/runtime/entrypoints/quick/quick_default_externs.h
@@ -93,7 +93,7 @@
extern "C" uint64_t art_quick_ushr_long(uint64_t, uint32_t);
// Intrinsic entrypoints.
-extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t);
+extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t);
extern "C" int32_t art_quick_string_compareto(void*, void*);
extern "C" void* art_quick_memcpy(void*, const void*, size_t);
diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h
index faa4747..79d1c13 100644
--- a/runtime/entrypoints/quick/quick_entrypoints_list.h
+++ b/runtime/entrypoints/quick/quick_entrypoints_list.h
@@ -119,7 +119,7 @@
V(ShrLong, uint64_t, uint64_t, uint32_t) \
V(UshrLong, uint64_t, uint64_t, uint32_t) \
\
- V(IndexOf, int32_t, void*, uint32_t, uint32_t, uint32_t) \
+ V(IndexOf, int32_t, void*, uint32_t, uint32_t) \
V(StringCompareTo, int32_t, void*, void*) \
V(Memcpy, void*, void*, const void*, size_t) \
\
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index e72809b..c621672 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -119,10 +119,10 @@
// Skip across the entrypoints structures.
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_objects, thread_local_start, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_start, thread_local_pos, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_pos, thread_local_end, sizeof(void*));
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_end, thread_local_objects, sizeof(void*));
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_objects, mterp_current_ibase, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_end, mterp_current_ibase, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, mterp_current_ibase, mterp_default_ibase, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, mterp_default_ibase, mterp_alt_ibase, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, mterp_alt_ibase, rosalloc_runs, sizeof(void*));
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index 4de5388..4672483 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -245,6 +245,9 @@
heap->SetAllocTrackingEnabled(true);
}
} else {
+ // Delete outside of the critical section to avoid possible lock violations like the runtime
+ // shutdown lock.
+ std::unique_ptr<AllocRecordObjectMap> map;
{
MutexLock mu(self, *Locks::alloc_tracker_lock_);
if (!heap->IsAllocTrackingEnabled()) {
@@ -252,7 +255,7 @@
}
heap->SetAllocTrackingEnabled(false);
LOG(INFO) << "Disabling alloc tracker";
- heap->SetAllocationRecords(nullptr);
+ map = heap->ReleaseAllocationRecords();
}
// If an allocation comes in before we uninstrument, we will safely drop it on the floor.
Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
diff --git a/runtime/gc/collector/immune_spaces.cc b/runtime/gc/collector/immune_spaces.cc
index 26da4ca..1e5f283 100644
--- a/runtime/gc/collector/immune_spaces.cc
+++ b/runtime/gc/collector/immune_spaces.cc
@@ -16,6 +16,9 @@
#include "immune_spaces.h"
+#include <vector>
+#include <tuple>
+
#include "gc/space/space-inl.h"
#include "mirror/object.h"
#include "oat_file.h"
@@ -32,11 +35,12 @@
void ImmuneSpaces::CreateLargestImmuneRegion() {
uintptr_t best_begin = 0u;
uintptr_t best_end = 0u;
+ uintptr_t best_heap_size = 0u;
uintptr_t cur_begin = 0u;
uintptr_t cur_end = 0u;
- // TODO: If the last space is an image space, we may include its oat file in the immune region.
- // This could potentially hide heap corruption bugs if there is invalid pointers that point into
- // the boot oat code
+ uintptr_t cur_heap_size = 0u;
+ using Interval = std::tuple</*start*/uintptr_t, /*end*/uintptr_t, /*is_heap*/bool>;
+ std::vector<Interval> intervals;
for (space::ContinuousSpace* space : GetSpaces()) {
uintptr_t space_begin = reinterpret_cast<uintptr_t>(space->Begin());
uintptr_t space_end = reinterpret_cast<uintptr_t>(space->Limit());
@@ -50,29 +54,47 @@
// creation, the actual oat file could be somewhere else.
const OatFile* const image_oat_file = image_space->GetOatFile();
if (image_oat_file != nullptr) {
- uintptr_t oat_begin = reinterpret_cast<uintptr_t>(image_oat_file->Begin());
- uintptr_t oat_end = reinterpret_cast<uintptr_t>(image_oat_file->End());
- if (space_end == oat_begin) {
- DCHECK_GE(oat_end, oat_begin);
- space_end = oat_end;
- }
+ intervals.push_back(Interval(reinterpret_cast<uintptr_t>(image_oat_file->Begin()),
+ reinterpret_cast<uintptr_t>(image_oat_file->End()),
+ /*image*/false));
}
}
- if (cur_begin == 0u) {
- cur_begin = space_begin;
- cur_end = space_end;
- } else if (cur_end == space_begin) {
- // Extend current region.
- cur_end = space_end;
- } else {
- // Reset.
- cur_begin = 0;
- cur_end = 0;
+ intervals.push_back(Interval(space_begin, space_end, /*is_heap*/true));
+ }
+ std::sort(intervals.begin(), intervals.end());
+ // Intervals are already sorted by begin, if a new interval begins at the end of the current
+ // region then we append, otherwise we restart the current interval. To prevent starting an
+ // interval on an oat file, ignore oat files that are not extending an existing interval.
+ // If the total number of image bytes in the current interval is larger than the current best
+ // one, then we set the best one to be the current one.
+ for (const Interval& interval : intervals) {
+ const uintptr_t begin = std::get<0>(interval);
+ const uintptr_t end = std::get<1>(interval);
+ const bool is_heap = std::get<2>(interval);
+ VLOG(collector) << "Interval " << reinterpret_cast<const void*>(begin) << "-"
+ << reinterpret_cast<const void*>(end) << " is_heap=" << is_heap;
+ DCHECK_GE(end, begin);
+ DCHECK_GE(begin, cur_end);
+ // New interval is not at the end of the current one, start a new interval if we are a heap
+ // interval. Otherwise continue since we never start a new region with non image intervals.
+ if (begin != cur_end) {
+ if (!is_heap) {
+ continue;
+ }
+ // Not extending, reset the region.
+ cur_begin = begin;
+ cur_heap_size = 0;
}
- if (cur_end - cur_begin > best_end - best_begin) {
- // Improvement, update the best range.
- best_begin = cur_begin;
- best_end = cur_end;
+ cur_end = end;
+ if (is_heap) {
+ // Only update if the total number of image bytes is greater than the current best one.
+ // We don't want to count the oat file bytes since these contain no java objects.
+ cur_heap_size += end - begin;
+ if (cur_heap_size > best_heap_size) {
+ best_begin = cur_begin;
+ best_end = cur_end;
+ best_heap_size = cur_heap_size;
+ }
}
}
largest_immune_region_.SetBegin(reinterpret_cast<mirror::Object*>(best_begin));
diff --git a/runtime/gc/collector/immune_spaces_test.cc b/runtime/gc/collector/immune_spaces_test.cc
index 56838f5..cf93ec6 100644
--- a/runtime/gc/collector/immune_spaces_test.cc
+++ b/runtime/gc/collector/immune_spaces_test.cc
@@ -28,7 +28,136 @@
namespace gc {
namespace collector {
-class ImmuneSpacesTest : public CommonRuntimeTest {};
+class DummyOatFile : public OatFile {
+ public:
+ DummyOatFile(uint8_t* begin, uint8_t* end) : OatFile("Location", /*is_executable*/ false) {
+ begin_ = begin;
+ end_ = end;
+ }
+};
+
+class DummyImageSpace : public space::ImageSpace {
+ public:
+ DummyImageSpace(MemMap* map,
+ accounting::ContinuousSpaceBitmap* live_bitmap,
+ std::unique_ptr<DummyOatFile>&& oat_file,
+ std::unique_ptr<MemMap>&& oat_map)
+ : ImageSpace("DummyImageSpace",
+ /*image_location*/"",
+ map,
+ live_bitmap,
+ map->End()),
+ oat_map_(std::move(oat_map)) {
+ oat_file_ = std::move(oat_file);
+ oat_file_non_owned_ = oat_file_.get();
+ }
+
+ private:
+ std::unique_ptr<MemMap> oat_map_;
+};
+
+class ImmuneSpacesTest : public CommonRuntimeTest {
+ static constexpr size_t kMaxBitmaps = 10;
+
+ public:
+ ImmuneSpacesTest() {}
+
+ void ReserveBitmaps() {
+ // Create a bunch of dummy bitmaps since these are required to create image spaces. The bitmaps
+ // do not need to cover the image spaces though.
+ for (size_t i = 0; i < kMaxBitmaps; ++i) {
+ std::unique_ptr<accounting::ContinuousSpaceBitmap> bitmap(
+ accounting::ContinuousSpaceBitmap::Create("bitmap",
+ reinterpret_cast<uint8_t*>(kPageSize),
+ kPageSize));
+ CHECK(bitmap != nullptr);
+ live_bitmaps_.push_back(std::move(bitmap));
+ }
+ }
+
+ // Create an image space, the oat file is optional.
+ DummyImageSpace* CreateImageSpace(uint8_t* image_begin,
+ size_t image_size,
+ uint8_t* oat_begin,
+ size_t oat_size) {
+ std::string error_str;
+ std::unique_ptr<MemMap> map(MemMap::MapAnonymous("DummyImageSpace",
+ image_begin,
+ image_size,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb*/true,
+ /*reuse*/false,
+ &error_str));
+ if (map == nullptr) {
+ LOG(ERROR) << error_str;
+ return nullptr;
+ }
+ CHECK(!live_bitmaps_.empty());
+ std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap(std::move(live_bitmaps_.back()));
+ live_bitmaps_.pop_back();
+ std::unique_ptr<MemMap> oat_map(MemMap::MapAnonymous("OatMap",
+ oat_begin,
+ oat_size,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb*/true,
+ /*reuse*/false,
+ &error_str));
+ if (oat_map == nullptr) {
+ LOG(ERROR) << error_str;
+ return nullptr;
+ }
+ std::unique_ptr<DummyOatFile> oat_file(new DummyOatFile(oat_map->Begin(), oat_map->End()));
+ // Create image header.
+ ImageSection sections[ImageHeader::kSectionCount];
+ new (map->Begin()) ImageHeader(
+ /*image_begin*/PointerToLowMemUInt32(map->Begin()),
+ /*image_size*/map->Size(),
+ sections,
+ /*image_roots*/PointerToLowMemUInt32(map->Begin()) + 1,
+ /*oat_checksum*/0u,
+ // The oat file data in the header is always right after the image space.
+ /*oat_file_begin*/PointerToLowMemUInt32(oat_begin),
+ /*oat_data_begin*/PointerToLowMemUInt32(oat_begin),
+ /*oat_data_end*/PointerToLowMemUInt32(oat_begin + oat_size),
+ /*oat_file_end*/PointerToLowMemUInt32(oat_begin + oat_size),
+ /*boot_image_begin*/0u,
+ /*boot_image_size*/0u,
+ /*boot_oat_begin*/0u,
+ /*boot_oat_size*/0u,
+ /*pointer_size*/sizeof(void*),
+ /*compile_pic*/false,
+ /*is_pic*/false,
+ ImageHeader::kStorageModeUncompressed,
+ /*storage_size*/0u);
+ return new DummyImageSpace(map.release(),
+ live_bitmap.release(),
+ std::move(oat_file),
+ std::move(oat_map));
+ }
+
+ // Does not reserve the memory, the caller needs to be sure no other threads will map at the
+ // returned address.
+ static uint8_t* GetContinuousMemoryRegion(size_t size) {
+ std::string error_str;
+ std::unique_ptr<MemMap> map(MemMap::MapAnonymous("reserve",
+ nullptr,
+ size,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb*/true,
+ /*reuse*/false,
+ &error_str));
+ if (map == nullptr) {
+ LOG(ERROR) << "Failed to allocate memory region " << error_str;
+ return nullptr;
+ }
+ return map->Begin();
+ }
+
+ private:
+ // Bitmap pool for pre-allocated dummy bitmaps. We need to pre-allocate them since we don't want
+ // them to randomly get placed somewhere where we want an image space.
+ std::vector<std::unique_ptr<accounting::ContinuousSpaceBitmap>> live_bitmaps_;
+};
class DummySpace : public space::ContinuousSpace {
public:
@@ -72,94 +201,41 @@
EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()), b.Limit());
}
-class DummyOatFile : public OatFile {
- public:
- DummyOatFile(uint8_t* begin, uint8_t* end) : OatFile("Location", /*is_executable*/ false) {
- begin_ = begin;
- end_ = end;
- }
-};
-
-class DummyImageSpace : public space::ImageSpace {
- public:
- DummyImageSpace(MemMap* map,
- accounting::ContinuousSpaceBitmap* live_bitmap,
- std::unique_ptr<DummyOatFile>&& oat_file)
- : ImageSpace("DummyImageSpace",
- /*image_location*/"",
- map,
- live_bitmap,
- map->End()) {
- oat_file_ = std::move(oat_file);
- oat_file_non_owned_ = oat_file_.get();
- }
-
- // Size is the size of the image space, oat offset is where the oat file is located
- // after the end of image space. oat_size is the size of the oat file.
- static DummyImageSpace* Create(size_t size, size_t oat_offset, size_t oat_size) {
- std::string error_str;
- std::unique_ptr<MemMap> map(MemMap::MapAnonymous("DummyImageSpace",
- nullptr,
- size,
- PROT_READ | PROT_WRITE,
- /*low_4gb*/true,
- /*reuse*/false,
- &error_str));
- if (map == nullptr) {
- LOG(ERROR) << error_str;
- return nullptr;
- }
- std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap(
- accounting::ContinuousSpaceBitmap::Create("bitmap", map->Begin(), map->Size()));
- if (live_bitmap == nullptr) {
- return nullptr;
- }
- // The actual mapped oat file may not be directly after the image for the app image case.
- std::unique_ptr<DummyOatFile> oat_file(new DummyOatFile(map->End() + oat_offset,
- map->End() + oat_offset + oat_size));
- // Create image header.
- ImageSection sections[ImageHeader::kSectionCount];
- new (map->Begin()) ImageHeader(
- /*image_begin*/PointerToLowMemUInt32(map->Begin()),
- /*image_size*/map->Size(),
- sections,
- /*image_roots*/PointerToLowMemUInt32(map->Begin()) + 1,
- /*oat_checksum*/0u,
- // The oat file data in the header is always right after the image space.
- /*oat_file_begin*/PointerToLowMemUInt32(map->End()),
- /*oat_data_begin*/PointerToLowMemUInt32(map->End()),
- /*oat_data_end*/PointerToLowMemUInt32(map->End() + oat_size),
- /*oat_file_end*/PointerToLowMemUInt32(map->End() + oat_size),
- /*boot_image_begin*/0u,
- /*boot_image_size*/0u,
- /*boot_oat_begin*/0u,
- /*boot_oat_size*/0u,
- /*pointer_size*/sizeof(void*),
- /*compile_pic*/false,
- /*is_pic*/false,
- ImageHeader::kStorageModeUncompressed,
- /*storage_size*/0u);
- return new DummyImageSpace(map.release(), live_bitmap.release(), std::move(oat_file));
- }
-};
-
+// Tests [image][oat][space] producing a single large immune region.
TEST_F(ImmuneSpacesTest, AppendAfterImage) {
+ ReserveBitmaps();
ImmuneSpaces spaces;
- constexpr size_t image_size = 123 * kPageSize;
- constexpr size_t image_oat_size = 321 * kPageSize;
- std::unique_ptr<DummyImageSpace> image_space(DummyImageSpace::Create(image_size,
- 0,
- image_oat_size));
+ constexpr size_t kImageSize = 123 * kPageSize;
+ constexpr size_t kImageOatSize = 321 * kPageSize;
+ constexpr size_t kOtherSpaceSize= 100 * kPageSize;
+
+ uint8_t* memory = GetContinuousMemoryRegion(kImageSize + kImageOatSize + kOtherSpaceSize);
+
+ std::unique_ptr<DummyImageSpace> image_space(CreateImageSpace(memory,
+ kImageSize,
+ memory + kImageSize,
+ kImageOatSize));
ASSERT_TRUE(image_space != nullptr);
const ImageHeader& image_header = image_space->GetImageHeader();
- EXPECT_EQ(image_header.GetImageSize(), image_size);
+ DummySpace space(image_header.GetOatFileEnd(), image_header.GetOatFileEnd() + kOtherSpaceSize);
+
+ EXPECT_EQ(image_header.GetImageSize(), kImageSize);
EXPECT_EQ(static_cast<size_t>(image_header.GetOatFileEnd() - image_header.GetOatFileBegin()),
- image_oat_size);
- DummySpace space(image_header.GetOatFileEnd(), image_header.GetOatFileEnd() + 813 * kPageSize);
- EXPECT_NE(image_space->Limit(), space.Begin());
+ kImageOatSize);
+ EXPECT_EQ(image_space->GetOatFile()->Size(), kImageOatSize);
+ // Check that we do not include the oat if there is no space after.
{
WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
spaces.AddSpace(image_space.get());
+ }
+ EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
+ image_space->Begin());
+ EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()),
+ image_space->Limit());
+ // Add another space and ensure it gets appended.
+ EXPECT_NE(image_space->Limit(), space.Begin());
+ {
+ WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
spaces.AddSpace(&space);
}
EXPECT_TRUE(spaces.ContainsSpace(image_space.get()));
@@ -170,18 +246,122 @@
EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
image_space->Begin());
EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()), space.Limit());
- // Check that appending with a gap between the map does not include the oat file.
- image_space.reset(DummyImageSpace::Create(image_size, kPageSize, image_oat_size));
- spaces.Reset();
+}
+
+// Test [image1][image2][image1 oat][image2 oat][image3] producing a single large immune region.
+TEST_F(ImmuneSpacesTest, MultiImage) {
+ ReserveBitmaps();
+ // Image 2 needs to be smaller or else it may be chosen for immune region.
+ constexpr size_t kImage1Size = kPageSize * 17;
+ constexpr size_t kImage2Size = kPageSize * 13;
+ constexpr size_t kImage3Size = kPageSize * 3;
+ constexpr size_t kImage1OatSize = kPageSize * 5;
+ constexpr size_t kImage2OatSize = kPageSize * 8;
+ constexpr size_t kImage3OatSize = kPageSize;
+ constexpr size_t kImageBytes = kImage1Size + kImage2Size + kImage3Size;
+ constexpr size_t kMemorySize = kImageBytes + kImage1OatSize + kImage2OatSize + kImage3OatSize;
+ uint8_t* memory = GetContinuousMemoryRegion(kMemorySize);
+ uint8_t* space1_begin = memory;
+ memory += kImage1Size;
+ uint8_t* space2_begin = memory;
+ memory += kImage2Size;
+ uint8_t* space1_oat_begin = memory;
+ memory += kImage1OatSize;
+ uint8_t* space2_oat_begin = memory;
+ memory += kImage2OatSize;
+ uint8_t* space3_begin = memory;
+
+ std::unique_ptr<DummyImageSpace> space1(CreateImageSpace(space1_begin,
+ kImage1Size,
+ space1_oat_begin,
+ kImage1OatSize));
+ ASSERT_TRUE(space1 != nullptr);
+
+
+ std::unique_ptr<DummyImageSpace> space2(CreateImageSpace(space2_begin,
+ kImage2Size,
+ space2_oat_begin,
+ kImage2OatSize));
+ ASSERT_TRUE(space2 != nullptr);
+
+ // Finally put a 3rd image space.
+ std::unique_ptr<DummyImageSpace> space3(CreateImageSpace(space3_begin,
+ kImage3Size,
+ space3_begin + kImage3Size,
+ kImage3OatSize));
+ ASSERT_TRUE(space3 != nullptr);
+
+ // Check that we do not include the oat if there is no space after.
+ ImmuneSpaces spaces;
{
WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
- spaces.AddSpace(image_space.get());
+ LOG(INFO) << "Adding space1 " << reinterpret_cast<const void*>(space1->Begin());
+ spaces.AddSpace(space1.get());
+ LOG(INFO) << "Adding space2 " << reinterpret_cast<const void*>(space2->Begin());
+ spaces.AddSpace(space2.get());
+ }
+ // There are no more heap bytes, the immune region should only be the first 2 image spaces and
+ // should exclude the image oat files.
+ EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
+ space1->Begin());
+ EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()),
+ space2->Limit());
+
+ // Add another space after the oat files, now it should contain the entire memory region.
+ {
+ WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ LOG(INFO) << "Adding space3 " << reinterpret_cast<const void*>(space3->Begin());
+ spaces.AddSpace(space3.get());
}
EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
- image_space->Begin());
- // Size should be equal, we should not add the oat file since it is not adjacent to the image
- // space.
- EXPECT_EQ(spaces.GetLargestImmuneRegion().Size(), image_size);
+ space1->Begin());
+ EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()),
+ space3->Limit());
+
+ // Add a smaller non-adjacent space and ensure it does not become part of the immune region.
+ // Image size is kImageBytes - kPageSize
+ // Oat size is kPageSize.
+ // Guard pages to ensure it is not adjacent to an existing immune region.
+ // Layout: [guard page][image][oat][guard page]
+ constexpr size_t kGuardSize = kPageSize;
+ constexpr size_t kImage4Size = kImageBytes - kPageSize;
+ constexpr size_t kImage4OatSize = kPageSize;
+ uint8_t* memory2 = GetContinuousMemoryRegion(kImage4Size + kImage4OatSize + kGuardSize * 2);
+ std::unique_ptr<DummyImageSpace> space4(CreateImageSpace(memory2 + kGuardSize,
+ kImage4Size,
+ memory2 + kGuardSize + kImage4Size,
+ kImage4OatSize));
+ ASSERT_TRUE(space4 != nullptr);
+ {
+ WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ LOG(INFO) << "Adding space4 " << reinterpret_cast<const void*>(space4->Begin());
+ spaces.AddSpace(space4.get());
+ }
+ EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
+ space1->Begin());
+ EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()),
+ space3->Limit());
+
+ // Add a larger non-adjacent space and ensure it becomes the new largest immune region.
+ // Image size is kImageBytes + kPageSize
+ // Oat size is kPageSize.
+ // Guard pages to ensure it is not adjacent to an existing immune region.
+ // Layout: [guard page][image][oat][guard page]
+ constexpr size_t kImage5Size = kImageBytes + kPageSize;
+ constexpr size_t kImage5OatSize = kPageSize;
+ uint8_t* memory3 = GetContinuousMemoryRegion(kImage5Size + kImage5OatSize + kGuardSize * 2);
+ std::unique_ptr<DummyImageSpace> space5(CreateImageSpace(memory3 + kGuardSize,
+ kImage5Size,
+ memory3 + kGuardSize + kImage5Size,
+ kImage5OatSize));
+ ASSERT_TRUE(space5 != nullptr);
+ {
+ WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ LOG(INFO) << "Adding space5 " << reinterpret_cast<const void*>(space5->Begin());
+ spaces.AddSpace(space5.get());
+ }
+ EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()), space5->Begin());
+ EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()), space5->Limit());
}
} // namespace collector
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index f437830..d7023d8 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -109,16 +109,25 @@
obj = TryToAllocate<kInstrumented, false>(self, allocator, byte_count, &bytes_allocated,
&usable_size, &bytes_tl_bulk_allocated);
if (UNLIKELY(obj == nullptr)) {
- bool is_current_allocator = allocator == GetCurrentAllocator();
- obj = AllocateInternalWithGc(self, allocator, byte_count, &bytes_allocated, &usable_size,
+ // AllocateInternalWithGc can cause thread suspension, if someone instruments the entrypoints
+ // or changes the allocator in a suspend point here, we need to retry the allocation.
+ obj = AllocateInternalWithGc(self,
+ allocator,
+ kInstrumented,
+ byte_count,
+ &bytes_allocated,
+ &usable_size,
&bytes_tl_bulk_allocated, &klass);
if (obj == nullptr) {
- bool after_is_current_allocator = allocator == GetCurrentAllocator();
- // If there is a pending exception, fail the allocation right away since the next one
- // could cause OOM and abort the runtime.
- if (!self->IsExceptionPending() && is_current_allocator && !after_is_current_allocator) {
- // If the allocator changed, we need to restart the allocation.
- return AllocObject<kInstrumented>(self, klass, byte_count, pre_fence_visitor);
+ // The only way that we can get a null return if there is no pending exception is if the
+ // allocator or instrumentation changed.
+ if (!self->IsExceptionPending()) {
+ // AllocObject will pick up the new allocator type, and instrumented as true is the safe
+ // default.
+ return AllocObject</*kInstrumented*/true>(self,
+ klass,
+ byte_count,
+ pre_fence_visitor);
}
return nullptr;
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 4bee462..bebff0f 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1650,8 +1650,15 @@
return nullptr;
}
+static inline bool EntrypointsInstrumented() SHARED_REQUIRES(Locks::mutator_lock_) {
+ instrumentation::Instrumentation* const instrumentation =
+ Runtime::Current()->GetInstrumentation();
+ return instrumentation != nullptr && instrumentation->AllocEntrypointsInstrumented();
+}
+
mirror::Object* Heap::AllocateInternalWithGc(Thread* self,
AllocatorType allocator,
+ bool instrumented,
size_t alloc_size,
size_t* bytes_allocated,
size_t* usable_size,
@@ -1667,12 +1674,13 @@
// The allocation failed. If the GC is running, block until it completes, and then retry the
// allocation.
collector::GcType last_gc = WaitForGcToComplete(kGcCauseForAlloc, self);
+ // If we were the default allocator but the allocator changed while we were suspended,
+ // abort the allocation.
+ if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
+ (!instrumented && EntrypointsInstrumented())) {
+ return nullptr;
+ }
if (last_gc != collector::kGcTypeNone) {
- // If we were the default allocator but the allocator changed while we were suspended,
- // abort the allocation.
- if (was_default_allocator && allocator != GetCurrentAllocator()) {
- return nullptr;
- }
// A GC was in progress and we blocked, retry allocation now that memory has been freed.
mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
usable_size, bytes_tl_bulk_allocated);
@@ -1684,7 +1692,8 @@
collector::GcType tried_type = next_gc_type_;
const bool gc_ran =
CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
- if (was_default_allocator && allocator != GetCurrentAllocator()) {
+ if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
+ (!instrumented && EntrypointsInstrumented())) {
return nullptr;
}
if (gc_ran) {
@@ -1703,7 +1712,8 @@
// Attempt to run the collector, if we succeed, re-try the allocation.
const bool plan_gc_ran =
CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
- if (was_default_allocator && allocator != GetCurrentAllocator()) {
+ if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
+ (!instrumented && EntrypointsInstrumented())) {
return nullptr;
}
if (plan_gc_ran) {
@@ -1732,7 +1742,8 @@
// We don't need a WaitForGcToComplete here either.
DCHECK(!gc_plan_.empty());
CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
- if (was_default_allocator && allocator != GetCurrentAllocator()) {
+ if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
+ (!instrumented && EntrypointsInstrumented())) {
return nullptr;
}
ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size,
@@ -1748,6 +1759,11 @@
min_interval_homogeneous_space_compaction_by_oom_) {
last_time_homogeneous_space_compaction_by_oom_ = current_time;
HomogeneousSpaceCompactResult result = PerformHomogeneousSpaceCompact();
+ // Thread suspension could have occurred.
+ if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
+ (!instrumented && EntrypointsInstrumented())) {
+ return nullptr;
+ }
switch (result) {
case HomogeneousSpaceCompactResult::kSuccess:
// If the allocation succeeded, we delayed an oom.
@@ -1788,6 +1804,11 @@
// If we aren't out of memory then the OOM was probably from the non moving space being
// full. Attempt to disable compaction and turn the main space into a non moving space.
DisableMovingGc();
+ // Thread suspension could have occurred.
+ if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
+ (!instrumented && EntrypointsInstrumented())) {
+ return nullptr;
+ }
// If we are still a moving GC then something must have caused the transition to fail.
if (IsMovingGc(collector_type_)) {
MutexLock mu(self, *gc_complete_lock_);
@@ -3926,6 +3947,10 @@
allocation_records_.reset(records);
}
+std::unique_ptr<AllocRecordObjectMap> Heap::ReleaseAllocationRecords() {
+ return std::move(allocation_records_);
+}
+
void Heap::VisitAllocationRecords(RootVisitor* visitor) const {
if (IsAllocTrackingEnabled()) {
MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 6edb548..889069d 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -766,6 +766,10 @@
return allocation_records_.get();
}
+ // Release ownership of the allocation records.
+ std::unique_ptr<AllocRecordObjectMap> ReleaseAllocationRecords()
+ REQUIRES(Locks::alloc_tracker_lock_);
+
void SetAllocationRecords(AllocRecordObjectMap* records)
REQUIRES(Locks::alloc_tracker_lock_);
@@ -865,6 +869,7 @@
// an initial allocation attempt failed.
mirror::Object* AllocateInternalWithGc(Thread* self,
AllocatorType allocator,
+ bool instrumented,
size_t num_bytes,
size_t* bytes_allocated,
size_t* usable_size,
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 4ef36a4..5aaf104 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -1278,7 +1278,7 @@
PROT_READ | PROT_WRITE,
/*low_4gb*/true,
/*reuse*/false,
- out_error_msg));
+ /*out*/out_error_msg));
if (map != nullptr) {
const size_t stored_size = image_header->GetDataSize();
const size_t write_offset = sizeof(ImageHeader); // Skip the header.
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 2e4be6b..b3cdb41 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -419,6 +419,12 @@
size_t inlined_frames_before_frame)
SHARED_REQUIRES(Locks::mutator_lock_);
+ // Does not hold lock, used to check if someone changed from not instrumented to instrumented
+ // during a GC suspend point.
+ bool AllocEntrypointsInstrumented() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ return quick_alloc_entry_points_instrumentation_counter_ > 0;
+ }
+
private:
InstrumentationLevel GetCurrentInstrumentationLevel() const;
@@ -572,9 +578,7 @@
InterpreterHandlerTable interpreter_handler_table_ GUARDED_BY(Locks::mutator_lock_);
// Greater than 0 if quick alloc entry points instrumented.
- size_t quick_alloc_entry_points_instrumentation_counter_
- GUARDED_BY(Locks::instrument_entrypoints_lock_);
-
+ size_t quick_alloc_entry_points_instrumentation_counter_;
friend class InstrumentationTest; // For GetCurrentInstrumentationLevel and ConfigureStubs.
DISALLOW_COPY_AND_ASSIGN(Instrumentation);
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index a595d33..baf4afe 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -240,20 +240,10 @@
return os;
}
-#if !defined(__clang__)
-#if (defined(__arm__) || defined(__i386__) || defined(__aarch64__))
-// TODO: remove when all targets implemented.
static constexpr InterpreterImplKind kInterpreterImplKind = kMterpImplKind;
-#else
-static constexpr InterpreterImplKind kInterpreterImplKind = kComputedGotoImplKind;
-#endif
-#else
+
+#if defined(__clang__)
// Clang 3.4 fails to build the goto interpreter implementation.
-#if (defined(__arm__) || defined(__i386__) || defined(__aarch64__))
-static constexpr InterpreterImplKind kInterpreterImplKind = kMterpImplKind;
-#else
-static constexpr InterpreterImplKind kInterpreterImplKind = kSwitchImplKind;
-#endif
template<bool do_access_check, bool transaction_active>
JValue ExecuteGotoImpl(Thread*, const DexFile::CodeItem*, ShadowFrame&, JValue) {
LOG(FATAL) << "UNREACHABLE";
@@ -295,9 +285,7 @@
}
jit::Jit* jit = Runtime::Current()->GetJit();
- if (UNLIKELY(jit != nullptr &&
- jit->JitAtFirstUse() &&
- jit->GetCodeCache()->ContainsMethod(method))) {
+ if (jit != nullptr && jit->CanInvokeCompiledCode(method)) {
JValue result;
// Pop the shadow frame before calling into compiled code.
@@ -327,12 +315,8 @@
while (true) {
// Mterp does not support all instrumentation/debugging.
if (MterpShouldSwitchInterpreters()) {
-#if !defined(__clang__)
- return ExecuteGotoImpl<false, false>(self, code_item, shadow_frame, result_register);
-#else
return ExecuteSwitchImpl<false, false>(self, code_item, shadow_frame, result_register,
false);
-#endif
}
bool returned = ExecuteMterpImpl(self, code_item, &shadow_frame, &result_register);
if (returned) {
diff --git a/runtime/interpreter/mterp/config_mips b/runtime/interpreter/mterp/config_mips
index d1221f7..c6292c3 100644
--- a/runtime/interpreter/mterp/config_mips
+++ b/runtime/interpreter/mterp/config_mips
@@ -1,4 +1,4 @@
-# Copyright (C) 2015 The Android Open Source Project
+# Copyright (C) 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,7 +13,7 @@
# limitations under the License.
#
-# Configuration for MIPS_32
+# Configuration for MIPS_32 targets.
#
handler-style computed-goto
@@ -33,265 +33,265 @@
# opcode list; argument to op-start is default directory
op-start mips
- # (override example:) op OP_SUB_FLOAT_2ADDR arm-vfp
- # (fallback example:) op OP_SUB_FLOAT_2ADDR FALLBACK
+ # (override example:) op op_sub_float_2addr arm-vfp
+ # (fallback example:) op op_sub_float_2addr FALLBACK
- op op_nop FALLBACK
- op op_move FALLBACK
- op op_move_from16 FALLBACK
- op op_move_16 FALLBACK
- op op_move_wide FALLBACK
- op op_move_wide_from16 FALLBACK
- op op_move_wide_16 FALLBACK
- op op_move_object FALLBACK
- op op_move_object_from16 FALLBACK
- op op_move_object_16 FALLBACK
- op op_move_result FALLBACK
- op op_move_result_wide FALLBACK
- op op_move_result_object FALLBACK
- op op_move_exception FALLBACK
- op op_return_void FALLBACK
- op op_return FALLBACK
- op op_return_wide FALLBACK
- op op_return_object FALLBACK
- op op_const_4 FALLBACK
- op op_const_16 FALLBACK
- op op_const FALLBACK
- op op_const_high16 FALLBACK
- op op_const_wide_16 FALLBACK
- op op_const_wide_32 FALLBACK
- op op_const_wide FALLBACK
- op op_const_wide_high16 FALLBACK
- op op_const_string FALLBACK
- op op_const_string_jumbo FALLBACK
- op op_const_class FALLBACK
- op op_monitor_enter FALLBACK
- op op_monitor_exit FALLBACK
- op op_check_cast FALLBACK
- op op_instance_of FALLBACK
- op op_array_length FALLBACK
- op op_new_instance FALLBACK
- op op_new_array FALLBACK
- op op_filled_new_array FALLBACK
- op op_filled_new_array_range FALLBACK
- op op_fill_array_data FALLBACK
- op op_throw FALLBACK
- op op_goto FALLBACK
- op op_goto_16 FALLBACK
- op op_goto_32 FALLBACK
- op op_packed_switch FALLBACK
- op op_sparse_switch FALLBACK
- op op_cmpl_float FALLBACK
- op op_cmpg_float FALLBACK
- op op_cmpl_double FALLBACK
- op op_cmpg_double FALLBACK
- op op_cmp_long FALLBACK
- op op_if_eq FALLBACK
- op op_if_ne FALLBACK
- op op_if_lt FALLBACK
- op op_if_ge FALLBACK
- op op_if_gt FALLBACK
- op op_if_le FALLBACK
- op op_if_eqz FALLBACK
- op op_if_nez FALLBACK
- op op_if_ltz FALLBACK
- op op_if_gez FALLBACK
- op op_if_gtz FALLBACK
- op op_if_lez FALLBACK
- op_unused_3e FALLBACK
- op_unused_3f FALLBACK
- op_unused_40 FALLBACK
- op_unused_41 FALLBACK
- op_unused_42 FALLBACK
- op_unused_43 FALLBACK
- op op_aget FALLBACK
- op op_aget_wide FALLBACK
- op op_aget_object FALLBACK
- op op_aget_boolean FALLBACK
- op op_aget_byte FALLBACK
- op op_aget_char FALLBACK
- op op_aget_short FALLBACK
- op op_aput FALLBACK
- op op_aput_wide FALLBACK
- op op_aput_object FALLBACK
- op op_aput_boolean FALLBACK
- op op_aput_byte FALLBACK
- op op_aput_char FALLBACK
- op op_aput_short FALLBACK
- op op_iget FALLBACK
- op op_iget_wide FALLBACK
- op op_iget_object FALLBACK
- op op_iget_boolean FALLBACK
- op op_iget_byte FALLBACK
- op op_iget_char FALLBACK
- op op_iget_short FALLBACK
- op op_iput FALLBACK
- op op_iput_wide FALLBACK
- op op_iput_object FALLBACK
- op op_iput_boolean FALLBACK
- op op_iput_byte FALLBACK
- op op_iput_char FALLBACK
- op op_iput_short FALLBACK
- op op_sget FALLBACK
- op op_sget_wide FALLBACK
- op op_sget_object FALLBACK
- op op_sget_boolean FALLBACK
- op op_sget_byte FALLBACK
- op op_sget_char FALLBACK
- op op_sget_short FALLBACK
- op op_sput FALLBACK
- op op_sput_wide FALLBACK
- op op_sput_object FALLBACK
- op op_sput_boolean FALLBACK
- op op_sput_byte FALLBACK
- op op_sput_char FALLBACK
- op op_sput_short FALLBACK
- op op_invoke_virtual FALLBACK
- op op_invoke_super FALLBACK
- op op_invoke_direct FALLBACK
- op op_invoke_static FALLBACK
- op op_invoke_interface FALLBACK
- op op_return_void_no_barrier FALLBACK
- op op_invoke_virtual_range FALLBACK
- op op_invoke_super_range FALLBACK
- op op_invoke_direct_range FALLBACK
- op op_invoke_static_range FALLBACK
- op op_invoke_interface_range FALLBACK
- op_unused_79 FALLBACK
- op_unused_7a FALLBACK
- op op_neg_int FALLBACK
- op op_not_int FALLBACK
- op op_neg_long FALLBACK
- op op_not_long FALLBACK
- op op_neg_float FALLBACK
- op op_neg_double FALLBACK
- op op_int_to_long FALLBACK
- op op_int_to_float FALLBACK
- op op_int_to_double FALLBACK
- op op_long_to_int FALLBACK
- op op_long_to_float FALLBACK
- op op_long_to_double FALLBACK
- op op_float_to_int FALLBACK
- op op_float_to_long FALLBACK
- op op_float_to_double FALLBACK
- op op_double_to_int FALLBACK
- op op_double_to_long FALLBACK
- op op_double_to_float FALLBACK
- op op_int_to_byte FALLBACK
- op op_int_to_char FALLBACK
- op op_int_to_short FALLBACK
- op op_add_int FALLBACK
- op op_sub_int FALLBACK
- op op_mul_int FALLBACK
- op op_div_int FALLBACK
- op op_rem_int FALLBACK
- op op_and_int FALLBACK
- op op_or_int FALLBACK
- op op_xor_int FALLBACK
- op op_shl_int FALLBACK
- op op_shr_int FALLBACK
- op op_ushr_int FALLBACK
- op op_add_long FALLBACK
- op op_sub_long FALLBACK
- op op_mul_long FALLBACK
- op op_div_long FALLBACK
- op op_rem_long FALLBACK
- op op_and_long FALLBACK
- op op_or_long FALLBACK
- op op_xor_long FALLBACK
- op op_shl_long FALLBACK
- op op_shr_long FALLBACK
- op op_ushr_long FALLBACK
- op op_add_float FALLBACK
- op op_sub_float FALLBACK
- op op_mul_float FALLBACK
- op op_div_float FALLBACK
- op op_rem_float FALLBACK
- op op_add_double FALLBACK
- op op_sub_double FALLBACK
- op op_mul_double FALLBACK
- op op_div_double FALLBACK
- op op_rem_double FALLBACK
- op op_add_int_2addr FALLBACK
- op op_sub_int_2addr FALLBACK
- op op_mul_int_2addr FALLBACK
- op op_div_int_2addr FALLBACK
- op op_rem_int_2addr FALLBACK
- op op_and_int_2addr FALLBACK
- op op_or_int_2addr FALLBACK
- op op_xor_int_2addr FALLBACK
- op op_shl_int_2addr FALLBACK
- op op_shr_int_2addr FALLBACK
- op op_ushr_int_2addr FALLBACK
- op op_add_long_2addr FALLBACK
- op op_sub_long_2addr FALLBACK
- op op_mul_long_2addr FALLBACK
- op op_div_long_2addr FALLBACK
- op op_rem_long_2addr FALLBACK
- op op_and_long_2addr FALLBACK
- op op_or_long_2addr FALLBACK
- op op_xor_long_2addr FALLBACK
- op op_shl_long_2addr FALLBACK
- op op_shr_long_2addr FALLBACK
- op op_ushr_long_2addr FALLBACK
- op op_add_float_2addr FALLBACK
- op op_sub_float_2addr FALLBACK
- op op_mul_float_2addr FALLBACK
- op op_div_float_2addr FALLBACK
- op op_rem_float_2addr FALLBACK
- op op_add_double_2addr FALLBACK
- op op_sub_double_2addr FALLBACK
- op op_mul_double_2addr FALLBACK
- op op_div_double_2addr FALLBACK
- op op_rem_double_2addr FALLBACK
- op op_add_int_lit16 FALLBACK
- op op_rsub_int FALLBACK
- op op_mul_int_lit16 FALLBACK
- op op_div_int_lit16 FALLBACK
- op op_rem_int_lit16 FALLBACK
- op op_and_int_lit16 FALLBACK
- op op_or_int_lit16 FALLBACK
- op op_xor_int_lit16 FALLBACK
- op op_add_int_lit8 FALLBACK
- op op_rsub_int_lit8 FALLBACK
- op op_mul_int_lit8 FALLBACK
- op op_div_int_lit8 FALLBACK
- op op_rem_int_lit8 FALLBACK
- op op_and_int_lit8 FALLBACK
- op op_or_int_lit8 FALLBACK
- op op_xor_int_lit8 FALLBACK
- op op_shl_int_lit8 FALLBACK
- op op_shr_int_lit8 FALLBACK
- op op_ushr_int_lit8 FALLBACK
- op op_iget_quick FALLBACK
- op op_iget_wide_quick FALLBACK
- op op_iget_object_quick FALLBACK
- op op_iput_quick FALLBACK
- op op_iput_wide_quick FALLBACK
- op op_iput_object_quick FALLBACK
- op op_invoke_virtual_quick FALLBACK
- op op_invoke_virtual_range_quick FALLBACK
- op op_iput_boolean_quick FALLBACK
- op op_iput_byte_quick FALLBACK
- op op_iput_char_quick FALLBACK
- op op_iput_short_quick FALLBACK
- op op_iget_boolean_quick FALLBACK
- op op_iget_byte_quick FALLBACK
- op op_iget_char_quick FALLBACK
- op op_iget_short_quick FALLBACK
- op_unused_f3 FALLBACK
- op_unused_f4 FALLBACK
- op_unused_f5 FALLBACK
- op_unused_f6 FALLBACK
- op_unused_f7 FALLBACK
- op_unused_f8 FALLBACK
- op_unused_f9 FALLBACK
- op_unused_fa FALLBACK
- op_unused_fb FALLBACK
- op_unused_fc FALLBACK
- op_unused_fd FALLBACK
- op_unused_fe FALLBACK
- op_unused_ff FALLBACK
+ # op op_nop FALLBACK
+ # op op_move FALLBACK
+ # op op_move_from16 FALLBACK
+ # op op_move_16 FALLBACK
+ # op op_move_wide FALLBACK
+ # op op_move_wide_from16 FALLBACK
+ # op op_move_wide_16 FALLBACK
+ # op op_move_object FALLBACK
+ # op op_move_object_from16 FALLBACK
+ # op op_move_object_16 FALLBACK
+ # op op_move_result FALLBACK
+ # op op_move_result_wide FALLBACK
+ # op op_move_result_object FALLBACK
+ # op op_move_exception FALLBACK
+ # op op_return_void FALLBACK
+ # op op_return FALLBACK
+ # op op_return_wide FALLBACK
+ # op op_return_object FALLBACK
+ # op op_const_4 FALLBACK
+ # op op_const_16 FALLBACK
+ # op op_const FALLBACK
+ # op op_const_high16 FALLBACK
+ # op op_const_wide_16 FALLBACK
+ # op op_const_wide_32 FALLBACK
+ # op op_const_wide FALLBACK
+ # op op_const_wide_high16 FALLBACK
+ # op op_const_string FALLBACK
+ # op op_const_string_jumbo FALLBACK
+ # op op_const_class FALLBACK
+ # op op_monitor_enter FALLBACK
+ # op op_monitor_exit FALLBACK
+ # op op_check_cast FALLBACK
+ # op op_instance_of FALLBACK
+ # op op_array_length FALLBACK
+ # op op_new_instance FALLBACK
+ # op op_new_array FALLBACK
+ # op op_filled_new_array FALLBACK
+ # op op_filled_new_array_range FALLBACK
+ # op op_fill_array_data FALLBACK
+ # op op_throw FALLBACK
+ # op op_goto FALLBACK
+ # op op_goto_16 FALLBACK
+ # op op_goto_32 FALLBACK
+ # op op_packed_switch FALLBACK
+ # op op_sparse_switch FALLBACK
+ # op op_cmpl_float FALLBACK
+ # op op_cmpg_float FALLBACK
+ # op op_cmpl_double FALLBACK
+ # op op_cmpg_double FALLBACK
+ # op op_cmp_long FALLBACK
+ # op op_if_eq FALLBACK
+ # op op_if_ne FALLBACK
+ # op op_if_lt FALLBACK
+ # op op_if_ge FALLBACK
+ # op op_if_gt FALLBACK
+ # op op_if_le FALLBACK
+ # op op_if_eqz FALLBACK
+ # op op_if_nez FALLBACK
+ # op op_if_ltz FALLBACK
+ # op op_if_gez FALLBACK
+ # op op_if_gtz FALLBACK
+ # op op_if_lez FALLBACK
+ # op op_unused_3e FALLBACK
+ # op op_unused_3f FALLBACK
+ # op op_unused_40 FALLBACK
+ # op op_unused_41 FALLBACK
+ # op op_unused_42 FALLBACK
+ # op op_unused_43 FALLBACK
+ # op op_aget FALLBACK
+ # op op_aget_wide FALLBACK
+ # op op_aget_object FALLBACK
+ # op op_aget_boolean FALLBACK
+ # op op_aget_byte FALLBACK
+ # op op_aget_char FALLBACK
+ # op op_aget_short FALLBACK
+ # op op_aput FALLBACK
+ # op op_aput_wide FALLBACK
+ # op op_aput_object FALLBACK
+ # op op_aput_boolean FALLBACK
+ # op op_aput_byte FALLBACK
+ # op op_aput_char FALLBACK
+ # op op_aput_short FALLBACK
+ # op op_iget FALLBACK
+ # op op_iget_wide FALLBACK
+ # op op_iget_object FALLBACK
+ # op op_iget_boolean FALLBACK
+ # op op_iget_byte FALLBACK
+ # op op_iget_char FALLBACK
+ # op op_iget_short FALLBACK
+ # op op_iput FALLBACK
+ # op op_iput_wide FALLBACK
+ # op op_iput_object FALLBACK
+ # op op_iput_boolean FALLBACK
+ # op op_iput_byte FALLBACK
+ # op op_iput_char FALLBACK
+ # op op_iput_short FALLBACK
+ # op op_sget FALLBACK
+ # op op_sget_wide FALLBACK
+ # op op_sget_object FALLBACK
+ # op op_sget_boolean FALLBACK
+ # op op_sget_byte FALLBACK
+ # op op_sget_char FALLBACK
+ # op op_sget_short FALLBACK
+ # op op_sput FALLBACK
+ # op op_sput_wide FALLBACK
+ # op op_sput_object FALLBACK
+ # op op_sput_boolean FALLBACK
+ # op op_sput_byte FALLBACK
+ # op op_sput_char FALLBACK
+ # op op_sput_short FALLBACK
+ # op op_invoke_virtual FALLBACK
+ # op op_invoke_super FALLBACK
+ # op op_invoke_direct FALLBACK
+ # op op_invoke_static FALLBACK
+ # op op_invoke_interface FALLBACK
+ # op op_return_void_no_barrier FALLBACK
+ # op op_invoke_virtual_range FALLBACK
+ # op op_invoke_super_range FALLBACK
+ # op op_invoke_direct_range FALLBACK
+ # op op_invoke_static_range FALLBACK
+ # op op_invoke_interface_range FALLBACK
+ # op op_unused_79 FALLBACK
+ # op op_unused_7a FALLBACK
+ # op op_neg_int FALLBACK
+ # op op_not_int FALLBACK
+ # op op_neg_long FALLBACK
+ # op op_not_long FALLBACK
+ # op op_neg_float FALLBACK
+ # op op_neg_double FALLBACK
+ # op op_int_to_long FALLBACK
+ # op op_int_to_float FALLBACK
+ # op op_int_to_double FALLBACK
+ # op op_long_to_int FALLBACK
+ # op op_long_to_float FALLBACK
+ # op op_long_to_double FALLBACK
+ # op op_float_to_int FALLBACK
+ # op op_float_to_long FALLBACK
+ # op op_float_to_double FALLBACK
+ # op op_double_to_int FALLBACK
+ # op op_double_to_long FALLBACK
+ # op op_double_to_float FALLBACK
+ # op op_int_to_byte FALLBACK
+ # op op_int_to_char FALLBACK
+ # op op_int_to_short FALLBACK
+ # op op_add_int FALLBACK
+ # op op_sub_int FALLBACK
+ # op op_mul_int FALLBACK
+ # op op_div_int FALLBACK
+ # op op_rem_int FALLBACK
+ # op op_and_int FALLBACK
+ # op op_or_int FALLBACK
+ # op op_xor_int FALLBACK
+ # op op_shl_int FALLBACK
+ # op op_shr_int FALLBACK
+ # op op_ushr_int FALLBACK
+ # op op_add_long FALLBACK
+ # op op_sub_long FALLBACK
+ # op op_mul_long FALLBACK
+ # op op_div_long FALLBACK
+ # op op_rem_long FALLBACK
+ # op op_and_long FALLBACK
+ # op op_or_long FALLBACK
+ # op op_xor_long FALLBACK
+ # op op_shl_long FALLBACK
+ # op op_shr_long FALLBACK
+ # op op_ushr_long FALLBACK
+ # op op_add_float FALLBACK
+ # op op_sub_float FALLBACK
+ # op op_mul_float FALLBACK
+ # op op_div_float FALLBACK
+ # op op_rem_float FALLBACK
+ # op op_add_double FALLBACK
+ # op op_sub_double FALLBACK
+ # op op_mul_double FALLBACK
+ # op op_div_double FALLBACK
+ # op op_rem_double FALLBACK
+ # op op_add_int_2addr FALLBACK
+ # op op_sub_int_2addr FALLBACK
+ # op op_mul_int_2addr FALLBACK
+ # op op_div_int_2addr FALLBACK
+ # op op_rem_int_2addr FALLBACK
+ # op op_and_int_2addr FALLBACK
+ # op op_or_int_2addr FALLBACK
+ # op op_xor_int_2addr FALLBACK
+ # op op_shl_int_2addr FALLBACK
+ # op op_shr_int_2addr FALLBACK
+ # op op_ushr_int_2addr FALLBACK
+ # op op_add_long_2addr FALLBACK
+ # op op_sub_long_2addr FALLBACK
+ # op op_mul_long_2addr FALLBACK
+ # op op_div_long_2addr FALLBACK
+ # op op_rem_long_2addr FALLBACK
+ # op op_and_long_2addr FALLBACK
+ # op op_or_long_2addr FALLBACK
+ # op op_xor_long_2addr FALLBACK
+ # op op_shl_long_2addr FALLBACK
+ # op op_shr_long_2addr FALLBACK
+ # op op_ushr_long_2addr FALLBACK
+ # op op_add_float_2addr FALLBACK
+ # op op_sub_float_2addr FALLBACK
+ # op op_mul_float_2addr FALLBACK
+ # op op_div_float_2addr FALLBACK
+ # op op_rem_float_2addr FALLBACK
+ # op op_add_double_2addr FALLBACK
+ # op op_sub_double_2addr FALLBACK
+ # op op_mul_double_2addr FALLBACK
+ # op op_div_double_2addr FALLBACK
+ # op op_rem_double_2addr FALLBACK
+ # op op_add_int_lit16 FALLBACK
+ # op op_rsub_int FALLBACK
+ # op op_mul_int_lit16 FALLBACK
+ # op op_div_int_lit16 FALLBACK
+ # op op_rem_int_lit16 FALLBACK
+ # op op_and_int_lit16 FALLBACK
+ # op op_or_int_lit16 FALLBACK
+ # op op_xor_int_lit16 FALLBACK
+ # op op_add_int_lit8 FALLBACK
+ # op op_rsub_int_lit8 FALLBACK
+ # op op_mul_int_lit8 FALLBACK
+ # op op_div_int_lit8 FALLBACK
+ # op op_rem_int_lit8 FALLBACK
+ # op op_and_int_lit8 FALLBACK
+ # op op_or_int_lit8 FALLBACK
+ # op op_xor_int_lit8 FALLBACK
+ # op op_shl_int_lit8 FALLBACK
+ # op op_shr_int_lit8 FALLBACK
+ # op op_ushr_int_lit8 FALLBACK
+ # op op_iget_quick FALLBACK
+ # op op_iget_wide_quick FALLBACK
+ # op op_iget_object_quick FALLBACK
+ # op op_iput_quick FALLBACK
+ # op op_iput_wide_quick FALLBACK
+ # op op_iput_object_quick FALLBACK
+ # op op_invoke_virtual_quick FALLBACK
+ # op op_invoke_virtual_range_quick FALLBACK
+ # op op_iput_boolean_quick FALLBACK
+ # op op_iput_byte_quick FALLBACK
+ # op op_iput_char_quick FALLBACK
+ # op op_iput_short_quick FALLBACK
+ # op op_iget_boolean_quick FALLBACK
+ # op op_iget_byte_quick FALLBACK
+ # op op_iget_char_quick FALLBACK
+ # op op_iget_short_quick FALLBACK
+ op op_invoke_lambda FALLBACK
+ # op op_unused_f4 FALLBACK
+ op op_capture_variable FALLBACK
+ op op_create_lambda FALLBACK
+ op op_liberate_variable FALLBACK
+ op op_box_lambda FALLBACK
+ op op_unbox_lambda FALLBACK
+ # op op_unused_fa FALLBACK
+ # op op_unused_fb FALLBACK
+ # op op_unused_fc FALLBACK
+ # op op_unused_fd FALLBACK
+ # op op_unused_fe FALLBACK
+ # op op_unused_ff FALLBACK
op-end
# common subroutines for asm
diff --git a/runtime/interpreter/mterp/config_mips64 b/runtime/interpreter/mterp/config_mips64
index f804ce5..c40c007 100644
--- a/runtime/interpreter/mterp/config_mips64
+++ b/runtime/interpreter/mterp/config_mips64
@@ -36,262 +36,262 @@
# (override example:) op OP_SUB_FLOAT_2ADDR arm-vfp
# (fallback example:) op OP_SUB_FLOAT_2ADDR FALLBACK
- op op_nop FALLBACK
- op op_move FALLBACK
- op op_move_from16 FALLBACK
- op op_move_16 FALLBACK
- op op_move_wide FALLBACK
- op op_move_wide_from16 FALLBACK
- op op_move_wide_16 FALLBACK
- op op_move_object FALLBACK
- op op_move_object_from16 FALLBACK
- op op_move_object_16 FALLBACK
- op op_move_result FALLBACK
- op op_move_result_wide FALLBACK
- op op_move_result_object FALLBACK
- op op_move_exception FALLBACK
- op op_return_void FALLBACK
- op op_return FALLBACK
- op op_return_wide FALLBACK
- op op_return_object FALLBACK
- op op_const_4 FALLBACK
- op op_const_16 FALLBACK
- op op_const FALLBACK
- op op_const_high16 FALLBACK
- op op_const_wide_16 FALLBACK
- op op_const_wide_32 FALLBACK
- op op_const_wide FALLBACK
- op op_const_wide_high16 FALLBACK
- op op_const_string FALLBACK
- op op_const_string_jumbo FALLBACK
- op op_const_class FALLBACK
- op op_monitor_enter FALLBACK
- op op_monitor_exit FALLBACK
- op op_check_cast FALLBACK
- op op_instance_of FALLBACK
- op op_array_length FALLBACK
- op op_new_instance FALLBACK
- op op_new_array FALLBACK
- op op_filled_new_array FALLBACK
- op op_filled_new_array_range FALLBACK
- op op_fill_array_data FALLBACK
- op op_throw FALLBACK
- op op_goto FALLBACK
- op op_goto_16 FALLBACK
- op op_goto_32 FALLBACK
- op op_packed_switch FALLBACK
- op op_sparse_switch FALLBACK
- op op_cmpl_float FALLBACK
- op op_cmpg_float FALLBACK
- op op_cmpl_double FALLBACK
- op op_cmpg_double FALLBACK
- op op_cmp_long FALLBACK
- op op_if_eq FALLBACK
- op op_if_ne FALLBACK
- op op_if_lt FALLBACK
- op op_if_ge FALLBACK
- op op_if_gt FALLBACK
- op op_if_le FALLBACK
- op op_if_eqz FALLBACK
- op op_if_nez FALLBACK
- op op_if_ltz FALLBACK
- op op_if_gez FALLBACK
- op op_if_gtz FALLBACK
- op op_if_lez FALLBACK
- op_unused_3e FALLBACK
- op_unused_3f FALLBACK
- op_unused_40 FALLBACK
- op_unused_41 FALLBACK
- op_unused_42 FALLBACK
- op_unused_43 FALLBACK
- op op_aget FALLBACK
- op op_aget_wide FALLBACK
- op op_aget_object FALLBACK
- op op_aget_boolean FALLBACK
- op op_aget_byte FALLBACK
- op op_aget_char FALLBACK
- op op_aget_short FALLBACK
- op op_aput FALLBACK
- op op_aput_wide FALLBACK
- op op_aput_object FALLBACK
- op op_aput_boolean FALLBACK
- op op_aput_byte FALLBACK
- op op_aput_char FALLBACK
- op op_aput_short FALLBACK
- op op_iget FALLBACK
- op op_iget_wide FALLBACK
- op op_iget_object FALLBACK
- op op_iget_boolean FALLBACK
- op op_iget_byte FALLBACK
- op op_iget_char FALLBACK
- op op_iget_short FALLBACK
- op op_iput FALLBACK
- op op_iput_wide FALLBACK
- op op_iput_object FALLBACK
- op op_iput_boolean FALLBACK
- op op_iput_byte FALLBACK
- op op_iput_char FALLBACK
- op op_iput_short FALLBACK
- op op_sget FALLBACK
- op op_sget_wide FALLBACK
- op op_sget_object FALLBACK
- op op_sget_boolean FALLBACK
- op op_sget_byte FALLBACK
- op op_sget_char FALLBACK
- op op_sget_short FALLBACK
- op op_sput FALLBACK
- op op_sput_wide FALLBACK
- op op_sput_object FALLBACK
- op op_sput_boolean FALLBACK
- op op_sput_byte FALLBACK
- op op_sput_char FALLBACK
- op op_sput_short FALLBACK
- op op_invoke_virtual FALLBACK
- op op_invoke_super FALLBACK
- op op_invoke_direct FALLBACK
- op op_invoke_static FALLBACK
- op op_invoke_interface FALLBACK
- op op_return_void_no_barrier FALLBACK
- op op_invoke_virtual_range FALLBACK
- op op_invoke_super_range FALLBACK
- op op_invoke_direct_range FALLBACK
- op op_invoke_static_range FALLBACK
- op op_invoke_interface_range FALLBACK
- op_unused_79 FALLBACK
- op_unused_7a FALLBACK
- op op_neg_int FALLBACK
- op op_not_int FALLBACK
- op op_neg_long FALLBACK
- op op_not_long FALLBACK
- op op_neg_float FALLBACK
- op op_neg_double FALLBACK
- op op_int_to_long FALLBACK
- op op_int_to_float FALLBACK
- op op_int_to_double FALLBACK
- op op_long_to_int FALLBACK
- op op_long_to_float FALLBACK
- op op_long_to_double FALLBACK
- op op_float_to_int FALLBACK
- op op_float_to_long FALLBACK
- op op_float_to_double FALLBACK
- op op_double_to_int FALLBACK
- op op_double_to_long FALLBACK
- op op_double_to_float FALLBACK
- op op_int_to_byte FALLBACK
- op op_int_to_char FALLBACK
- op op_int_to_short FALLBACK
- op op_add_int FALLBACK
- op op_sub_int FALLBACK
- op op_mul_int FALLBACK
- op op_div_int FALLBACK
- op op_rem_int FALLBACK
- op op_and_int FALLBACK
- op op_or_int FALLBACK
- op op_xor_int FALLBACK
- op op_shl_int FALLBACK
- op op_shr_int FALLBACK
- op op_ushr_int FALLBACK
- op op_add_long FALLBACK
- op op_sub_long FALLBACK
- op op_mul_long FALLBACK
- op op_div_long FALLBACK
- op op_rem_long FALLBACK
- op op_and_long FALLBACK
- op op_or_long FALLBACK
- op op_xor_long FALLBACK
- op op_shl_long FALLBACK
- op op_shr_long FALLBACK
- op op_ushr_long FALLBACK
- op op_add_float FALLBACK
- op op_sub_float FALLBACK
- op op_mul_float FALLBACK
- op op_div_float FALLBACK
- op op_rem_float FALLBACK
- op op_add_double FALLBACK
- op op_sub_double FALLBACK
- op op_mul_double FALLBACK
- op op_div_double FALLBACK
- op op_rem_double FALLBACK
- op op_add_int_2addr FALLBACK
- op op_sub_int_2addr FALLBACK
- op op_mul_int_2addr FALLBACK
- op op_div_int_2addr FALLBACK
- op op_rem_int_2addr FALLBACK
- op op_and_int_2addr FALLBACK
- op op_or_int_2addr FALLBACK
- op op_xor_int_2addr FALLBACK
- op op_shl_int_2addr FALLBACK
- op op_shr_int_2addr FALLBACK
- op op_ushr_int_2addr FALLBACK
- op op_add_long_2addr FALLBACK
- op op_sub_long_2addr FALLBACK
- op op_mul_long_2addr FALLBACK
- op op_div_long_2addr FALLBACK
- op op_rem_long_2addr FALLBACK
- op op_and_long_2addr FALLBACK
- op op_or_long_2addr FALLBACK
- op op_xor_long_2addr FALLBACK
- op op_shl_long_2addr FALLBACK
- op op_shr_long_2addr FALLBACK
- op op_ushr_long_2addr FALLBACK
- op op_add_float_2addr FALLBACK
- op op_sub_float_2addr FALLBACK
- op op_mul_float_2addr FALLBACK
- op op_div_float_2addr FALLBACK
- op op_rem_float_2addr FALLBACK
- op op_add_double_2addr FALLBACK
- op op_sub_double_2addr FALLBACK
- op op_mul_double_2addr FALLBACK
- op op_div_double_2addr FALLBACK
- op op_rem_double_2addr FALLBACK
- op op_add_int_lit16 FALLBACK
- op op_rsub_int FALLBACK
- op op_mul_int_lit16 FALLBACK
- op op_div_int_lit16 FALLBACK
- op op_rem_int_lit16 FALLBACK
- op op_and_int_lit16 FALLBACK
- op op_or_int_lit16 FALLBACK
- op op_xor_int_lit16 FALLBACK
- op op_add_int_lit8 FALLBACK
- op op_rsub_int_lit8 FALLBACK
- op op_mul_int_lit8 FALLBACK
- op op_div_int_lit8 FALLBACK
- op op_rem_int_lit8 FALLBACK
- op op_and_int_lit8 FALLBACK
- op op_or_int_lit8 FALLBACK
- op op_xor_int_lit8 FALLBACK
- op op_shl_int_lit8 FALLBACK
- op op_shr_int_lit8 FALLBACK
- op op_ushr_int_lit8 FALLBACK
- op op_iget_quick FALLBACK
- op op_iget_wide_quick FALLBACK
- op op_iget_object_quick FALLBACK
- op op_iput_quick FALLBACK
- op op_iput_wide_quick FALLBACK
- op op_iput_object_quick FALLBACK
- op op_invoke_virtual_quick FALLBACK
- op op_invoke_virtual_range_quick FALLBACK
- op op_iput_boolean_quick FALLBACK
- op op_iput_byte_quick FALLBACK
- op op_iput_char_quick FALLBACK
- op op_iput_short_quick FALLBACK
- op op_iget_boolean_quick FALLBACK
- op op_iget_byte_quick FALLBACK
- op op_iget_char_quick FALLBACK
- op op_iget_short_quick FALLBACK
- op_unused_f3 FALLBACK
- op_unused_f4 FALLBACK
- op_unused_f5 FALLBACK
- op_unused_f6 FALLBACK
- op_unused_f7 FALLBACK
- op_unused_f8 FALLBACK
- op_unused_f9 FALLBACK
- op_unused_fa FALLBACK
- op_unused_fb FALLBACK
- op_unused_fc FALLBACK
- op_unused_fd FALLBACK
- op_unused_fe FALLBACK
- op_unused_ff FALLBACK
+ # op op_nop FALLBACK
+ # op op_move FALLBACK
+ # op op_move_from16 FALLBACK
+ # op op_move_16 FALLBACK
+ # op op_move_wide FALLBACK
+ # op op_move_wide_from16 FALLBACK
+ # op op_move_wide_16 FALLBACK
+ # op op_move_object FALLBACK
+ # op op_move_object_from16 FALLBACK
+ # op op_move_object_16 FALLBACK
+ # op op_move_result FALLBACK
+ # op op_move_result_wide FALLBACK
+ # op op_move_result_object FALLBACK
+ # op op_move_exception FALLBACK
+ # op op_return_void FALLBACK
+ # op op_return FALLBACK
+ # op op_return_wide FALLBACK
+ # op op_return_object FALLBACK
+ # op op_const_4 FALLBACK
+ # op op_const_16 FALLBACK
+ # op op_const FALLBACK
+ # op op_const_high16 FALLBACK
+ # op op_const_wide_16 FALLBACK
+ # op op_const_wide_32 FALLBACK
+ # op op_const_wide FALLBACK
+ # op op_const_wide_high16 FALLBACK
+ # op op_const_string FALLBACK
+ # op op_const_string_jumbo FALLBACK
+ # op op_const_class FALLBACK
+ # op op_monitor_enter FALLBACK
+ # op op_monitor_exit FALLBACK
+ # op op_check_cast FALLBACK
+ # op op_instance_of FALLBACK
+ # op op_array_length FALLBACK
+ # op op_new_instance FALLBACK
+ # op op_new_array FALLBACK
+ # op op_filled_new_array FALLBACK
+ # op op_filled_new_array_range FALLBACK
+ # op op_fill_array_data FALLBACK
+ # op op_throw FALLBACK
+ # op op_goto FALLBACK
+ # op op_goto_16 FALLBACK
+ # op op_goto_32 FALLBACK
+ # op op_packed_switch FALLBACK
+ # op op_sparse_switch FALLBACK
+ # op op_cmpl_float FALLBACK
+ # op op_cmpg_float FALLBACK
+ # op op_cmpl_double FALLBACK
+ # op op_cmpg_double FALLBACK
+ # op op_cmp_long FALLBACK
+ # op op_if_eq FALLBACK
+ # op op_if_ne FALLBACK
+ # op op_if_lt FALLBACK
+ # op op_if_ge FALLBACK
+ # op op_if_gt FALLBACK
+ # op op_if_le FALLBACK
+ # op op_if_eqz FALLBACK
+ # op op_if_nez FALLBACK
+ # op op_if_ltz FALLBACK
+ # op op_if_gez FALLBACK
+ # op op_if_gtz FALLBACK
+ # op op_if_lez FALLBACK
+ # op op_unused_3e FALLBACK
+ # op op_unused_3f FALLBACK
+ # op op_unused_40 FALLBACK
+ # op op_unused_41 FALLBACK
+ # op op_unused_42 FALLBACK
+ # op op_unused_43 FALLBACK
+ # op op_aget FALLBACK
+ # op op_aget_wide FALLBACK
+ # op op_aget_object FALLBACK
+ # op op_aget_boolean FALLBACK
+ # op op_aget_byte FALLBACK
+ # op op_aget_char FALLBACK
+ # op op_aget_short FALLBACK
+ # op op_aput FALLBACK
+ # op op_aput_wide FALLBACK
+ # op op_aput_object FALLBACK
+ # op op_aput_boolean FALLBACK
+ # op op_aput_byte FALLBACK
+ # op op_aput_char FALLBACK
+ # op op_aput_short FALLBACK
+ # op op_iget FALLBACK
+ # op op_iget_wide FALLBACK
+ # op op_iget_object FALLBACK
+ # op op_iget_boolean FALLBACK
+ # op op_iget_byte FALLBACK
+ # op op_iget_char FALLBACK
+ # op op_iget_short FALLBACK
+ # op op_iput FALLBACK
+ # op op_iput_wide FALLBACK
+ # op op_iput_object FALLBACK
+ # op op_iput_boolean FALLBACK
+ # op op_iput_byte FALLBACK
+ # op op_iput_char FALLBACK
+ # op op_iput_short FALLBACK
+ # op op_sget FALLBACK
+ # op op_sget_wide FALLBACK
+ # op op_sget_object FALLBACK
+ # op op_sget_boolean FALLBACK
+ # op op_sget_byte FALLBACK
+ # op op_sget_char FALLBACK
+ # op op_sget_short FALLBACK
+ # op op_sput FALLBACK
+ # op op_sput_wide FALLBACK
+ # op op_sput_object FALLBACK
+ # op op_sput_boolean FALLBACK
+ # op op_sput_byte FALLBACK
+ # op op_sput_char FALLBACK
+ # op op_sput_short FALLBACK
+ # op op_invoke_virtual FALLBACK
+ # op op_invoke_super FALLBACK
+ # op op_invoke_direct FALLBACK
+ # op op_invoke_static FALLBACK
+ # op op_invoke_interface FALLBACK
+ # op op_return_void_no_barrier FALLBACK
+ # op op_invoke_virtual_range FALLBACK
+ # op op_invoke_super_range FALLBACK
+ # op op_invoke_direct_range FALLBACK
+ # op op_invoke_static_range FALLBACK
+ # op op_invoke_interface_range FALLBACK
+ # op op_unused_79 FALLBACK
+ # op op_unused_7a FALLBACK
+ # op op_neg_int FALLBACK
+ # op op_not_int FALLBACK
+ # op op_neg_long FALLBACK
+ # op op_not_long FALLBACK
+ # op op_neg_float FALLBACK
+ # op op_neg_double FALLBACK
+ # op op_int_to_long FALLBACK
+ # op op_int_to_float FALLBACK
+ # op op_int_to_double FALLBACK
+ # op op_long_to_int FALLBACK
+ # op op_long_to_float FALLBACK
+ # op op_long_to_double FALLBACK
+ # op op_float_to_int FALLBACK
+ # op op_float_to_long FALLBACK
+ # op op_float_to_double FALLBACK
+ # op op_double_to_int FALLBACK
+ # op op_double_to_long FALLBACK
+ # op op_double_to_float FALLBACK
+ # op op_int_to_byte FALLBACK
+ # op op_int_to_char FALLBACK
+ # op op_int_to_short FALLBACK
+ # op op_add_int FALLBACK
+ # op op_sub_int FALLBACK
+ # op op_mul_int FALLBACK
+ # op op_div_int FALLBACK
+ # op op_rem_int FALLBACK
+ # op op_and_int FALLBACK
+ # op op_or_int FALLBACK
+ # op op_xor_int FALLBACK
+ # op op_shl_int FALLBACK
+ # op op_shr_int FALLBACK
+ # op op_ushr_int FALLBACK
+ # op op_add_long FALLBACK
+ # op op_sub_long FALLBACK
+ # op op_mul_long FALLBACK
+ # op op_div_long FALLBACK
+ # op op_rem_long FALLBACK
+ # op op_and_long FALLBACK
+ # op op_or_long FALLBACK
+ # op op_xor_long FALLBACK
+ # op op_shl_long FALLBACK
+ # op op_shr_long FALLBACK
+ # op op_ushr_long FALLBACK
+ # op op_add_float FALLBACK
+ # op op_sub_float FALLBACK
+ # op op_mul_float FALLBACK
+ # op op_div_float FALLBACK
+ # op op_rem_float FALLBACK
+ # op op_add_double FALLBACK
+ # op op_sub_double FALLBACK
+ # op op_mul_double FALLBACK
+ # op op_div_double FALLBACK
+ # op op_rem_double FALLBACK
+ # op op_add_int_2addr FALLBACK
+ # op op_sub_int_2addr FALLBACK
+ # op op_mul_int_2addr FALLBACK
+ # op op_div_int_2addr FALLBACK
+ # op op_rem_int_2addr FALLBACK
+ # op op_and_int_2addr FALLBACK
+ # op op_or_int_2addr FALLBACK
+ # op op_xor_int_2addr FALLBACK
+ # op op_shl_int_2addr FALLBACK
+ # op op_shr_int_2addr FALLBACK
+ # op op_ushr_int_2addr FALLBACK
+ # op op_add_long_2addr FALLBACK
+ # op op_sub_long_2addr FALLBACK
+ # op op_mul_long_2addr FALLBACK
+ # op op_div_long_2addr FALLBACK
+ # op op_rem_long_2addr FALLBACK
+ # op op_and_long_2addr FALLBACK
+ # op op_or_long_2addr FALLBACK
+ # op op_xor_long_2addr FALLBACK
+ # op op_shl_long_2addr FALLBACK
+ # op op_shr_long_2addr FALLBACK
+ # op op_ushr_long_2addr FALLBACK
+ # op op_add_float_2addr FALLBACK
+ # op op_sub_float_2addr FALLBACK
+ # op op_mul_float_2addr FALLBACK
+ # op op_div_float_2addr FALLBACK
+ # op op_rem_float_2addr FALLBACK
+ # op op_add_double_2addr FALLBACK
+ # op op_sub_double_2addr FALLBACK
+ # op op_mul_double_2addr FALLBACK
+ # op op_div_double_2addr FALLBACK
+ # op op_rem_double_2addr FALLBACK
+ # op op_add_int_lit16 FALLBACK
+ # op op_rsub_int FALLBACK
+ # op op_mul_int_lit16 FALLBACK
+ # op op_div_int_lit16 FALLBACK
+ # op op_rem_int_lit16 FALLBACK
+ # op op_and_int_lit16 FALLBACK
+ # op op_or_int_lit16 FALLBACK
+ # op op_xor_int_lit16 FALLBACK
+ # op op_add_int_lit8 FALLBACK
+ # op op_rsub_int_lit8 FALLBACK
+ # op op_mul_int_lit8 FALLBACK
+ # op op_div_int_lit8 FALLBACK
+ # op op_rem_int_lit8 FALLBACK
+ # op op_and_int_lit8 FALLBACK
+ # op op_or_int_lit8 FALLBACK
+ # op op_xor_int_lit8 FALLBACK
+ # op op_shl_int_lit8 FALLBACK
+ # op op_shr_int_lit8 FALLBACK
+ # op op_ushr_int_lit8 FALLBACK
+ # op op_iget_quick FALLBACK
+ # op op_iget_wide_quick FALLBACK
+ # op op_iget_object_quick FALLBACK
+ # op op_iput_quick FALLBACK
+ # op op_iput_wide_quick FALLBACK
+ # op op_iput_object_quick FALLBACK
+ # op op_invoke_virtual_quick FALLBACK
+ # op op_invoke_virtual_range_quick FALLBACK
+ # op op_iput_boolean_quick FALLBACK
+ # op op_iput_byte_quick FALLBACK
+ # op op_iput_char_quick FALLBACK
+ # op op_iput_short_quick FALLBACK
+ # op op_iget_boolean_quick FALLBACK
+ # op op_iget_byte_quick FALLBACK
+ # op op_iget_char_quick FALLBACK
+ # op op_iget_short_quick FALLBACK
+ op op_invoke_lambda FALLBACK
+ # op op_unused_f4 FALLBACK
+ op op_capture_variable FALLBACK
+ op op_create_lambda FALLBACK
+ op op_liberate_variable FALLBACK
+ op op_box_lambda FALLBACK
+ op op_unbox_lambda FALLBACK
+ # op op_unused_fa FALLBACK
+ # op op_unused_fb FALLBACK
+ # op op_unused_fc FALLBACK
+ # op op_unused_fd FALLBACK
+ # op op_unused_fe FALLBACK
+ # op op_unused_ff FALLBACK
op-end
# common subroutines for asm
diff --git a/runtime/interpreter/mterp/config_x86_64 b/runtime/interpreter/mterp/config_x86_64
index a002dc2..1d7eb03 100644
--- a/runtime/interpreter/mterp/config_x86_64
+++ b/runtime/interpreter/mterp/config_x86_64
@@ -19,6 +19,10 @@
handler-style computed-goto
handler-size 128
+function-type-format FUNCTION_TYPE(%s)
+function-size-format SIZE(%s,%s)
+global-name-format SYMBOL(%s)
+
# source for alternate entry stub
asm-alt-stub x86_64/alt_stub.S
@@ -36,262 +40,262 @@
# (override example:) op OP_SUB_FLOAT_2ADDR arm-vfp
# (fallback example:) op OP_SUB_FLOAT_2ADDR FALLBACK
- op op_nop FALLBACK
- op op_move FALLBACK
- op op_move_from16 FALLBACK
- op op_move_16 FALLBACK
- op op_move_wide FALLBACK
- op op_move_wide_from16 FALLBACK
- op op_move_wide_16 FALLBACK
- op op_move_object FALLBACK
- op op_move_object_from16 FALLBACK
- op op_move_object_16 FALLBACK
- op op_move_result FALLBACK
- op op_move_result_wide FALLBACK
- op op_move_result_object FALLBACK
- op op_move_exception FALLBACK
- op op_return_void FALLBACK
- op op_return FALLBACK
- op op_return_wide FALLBACK
- op op_return_object FALLBACK
- op op_const_4 FALLBACK
- op op_const_16 FALLBACK
- op op_const FALLBACK
- op op_const_high16 FALLBACK
- op op_const_wide_16 FALLBACK
- op op_const_wide_32 FALLBACK
- op op_const_wide FALLBACK
- op op_const_wide_high16 FALLBACK
- op op_const_string FALLBACK
- op op_const_string_jumbo FALLBACK
- op op_const_class FALLBACK
- op op_monitor_enter FALLBACK
- op op_monitor_exit FALLBACK
- op op_check_cast FALLBACK
- op op_instance_of FALLBACK
- op op_array_length FALLBACK
- op op_new_instance FALLBACK
- op op_new_array FALLBACK
- op op_filled_new_array FALLBACK
- op op_filled_new_array_range FALLBACK
- op op_fill_array_data FALLBACK
- op op_throw FALLBACK
- op op_goto FALLBACK
- op op_goto_16 FALLBACK
- op op_goto_32 FALLBACK
- op op_packed_switch FALLBACK
- op op_sparse_switch FALLBACK
- op op_cmpl_float FALLBACK
- op op_cmpg_float FALLBACK
- op op_cmpl_double FALLBACK
- op op_cmpg_double FALLBACK
- op op_cmp_long FALLBACK
- op op_if_eq FALLBACK
- op op_if_ne FALLBACK
- op op_if_lt FALLBACK
- op op_if_ge FALLBACK
- op op_if_gt FALLBACK
- op op_if_le FALLBACK
- op op_if_eqz FALLBACK
- op op_if_nez FALLBACK
- op op_if_ltz FALLBACK
- op op_if_gez FALLBACK
- op op_if_gtz FALLBACK
- op op_if_lez FALLBACK
- op_unused_3e FALLBACK
- op_unused_3f FALLBACK
- op_unused_40 FALLBACK
- op_unused_41 FALLBACK
- op_unused_42 FALLBACK
- op_unused_43 FALLBACK
- op op_aget FALLBACK
- op op_aget_wide FALLBACK
- op op_aget_object FALLBACK
- op op_aget_boolean FALLBACK
- op op_aget_byte FALLBACK
- op op_aget_char FALLBACK
- op op_aget_short FALLBACK
- op op_aput FALLBACK
- op op_aput_wide FALLBACK
- op op_aput_object FALLBACK
- op op_aput_boolean FALLBACK
- op op_aput_byte FALLBACK
- op op_aput_char FALLBACK
- op op_aput_short FALLBACK
- op op_iget FALLBACK
- op op_iget_wide FALLBACK
- op op_iget_object FALLBACK
- op op_iget_boolean FALLBACK
- op op_iget_byte FALLBACK
- op op_iget_char FALLBACK
- op op_iget_short FALLBACK
- op op_iput FALLBACK
- op op_iput_wide FALLBACK
- op op_iput_object FALLBACK
- op op_iput_boolean FALLBACK
- op op_iput_byte FALLBACK
- op op_iput_char FALLBACK
- op op_iput_short FALLBACK
- op op_sget FALLBACK
- op op_sget_wide FALLBACK
- op op_sget_object FALLBACK
- op op_sget_boolean FALLBACK
- op op_sget_byte FALLBACK
- op op_sget_char FALLBACK
- op op_sget_short FALLBACK
- op op_sput FALLBACK
- op op_sput_wide FALLBACK
- op op_sput_object FALLBACK
- op op_sput_boolean FALLBACK
- op op_sput_byte FALLBACK
- op op_sput_char FALLBACK
- op op_sput_short FALLBACK
- op op_invoke_virtual FALLBACK
- op op_invoke_super FALLBACK
- op op_invoke_direct FALLBACK
- op op_invoke_static FALLBACK
- op op_invoke_interface FALLBACK
- op op_return_void_no_barrier FALLBACK
- op op_invoke_virtual_range FALLBACK
- op op_invoke_super_range FALLBACK
- op op_invoke_direct_range FALLBACK
- op op_invoke_static_range FALLBACK
- op op_invoke_interface_range FALLBACK
- op_unused_79 FALLBACK
- op_unused_7a FALLBACK
- op op_neg_int FALLBACK
- op op_not_int FALLBACK
- op op_neg_long FALLBACK
- op op_not_long FALLBACK
- op op_neg_float FALLBACK
- op op_neg_double FALLBACK
- op op_int_to_long FALLBACK
- op op_int_to_float FALLBACK
- op op_int_to_double FALLBACK
- op op_long_to_int FALLBACK
- op op_long_to_float FALLBACK
- op op_long_to_double FALLBACK
- op op_float_to_int FALLBACK
- op op_float_to_long FALLBACK
- op op_float_to_double FALLBACK
- op op_double_to_int FALLBACK
- op op_double_to_long FALLBACK
- op op_double_to_float FALLBACK
- op op_int_to_byte FALLBACK
- op op_int_to_char FALLBACK
- op op_int_to_short FALLBACK
- op op_add_int FALLBACK
- op op_sub_int FALLBACK
- op op_mul_int FALLBACK
- op op_div_int FALLBACK
- op op_rem_int FALLBACK
- op op_and_int FALLBACK
- op op_or_int FALLBACK
- op op_xor_int FALLBACK
- op op_shl_int FALLBACK
- op op_shr_int FALLBACK
- op op_ushr_int FALLBACK
- op op_add_long FALLBACK
- op op_sub_long FALLBACK
- op op_mul_long FALLBACK
- op op_div_long FALLBACK
- op op_rem_long FALLBACK
- op op_and_long FALLBACK
- op op_or_long FALLBACK
- op op_xor_long FALLBACK
- op op_shl_long FALLBACK
- op op_shr_long FALLBACK
- op op_ushr_long FALLBACK
- op op_add_float FALLBACK
- op op_sub_float FALLBACK
- op op_mul_float FALLBACK
- op op_div_float FALLBACK
- op op_rem_float FALLBACK
- op op_add_double FALLBACK
- op op_sub_double FALLBACK
- op op_mul_double FALLBACK
- op op_div_double FALLBACK
- op op_rem_double FALLBACK
- op op_add_int_2addr FALLBACK
- op op_sub_int_2addr FALLBACK
- op op_mul_int_2addr FALLBACK
- op op_div_int_2addr FALLBACK
- op op_rem_int_2addr FALLBACK
- op op_and_int_2addr FALLBACK
- op op_or_int_2addr FALLBACK
- op op_xor_int_2addr FALLBACK
- op op_shl_int_2addr FALLBACK
- op op_shr_int_2addr FALLBACK
- op op_ushr_int_2addr FALLBACK
- op op_add_long_2addr FALLBACK
- op op_sub_long_2addr FALLBACK
- op op_mul_long_2addr FALLBACK
- op op_div_long_2addr FALLBACK
- op op_rem_long_2addr FALLBACK
- op op_and_long_2addr FALLBACK
- op op_or_long_2addr FALLBACK
- op op_xor_long_2addr FALLBACK
- op op_shl_long_2addr FALLBACK
- op op_shr_long_2addr FALLBACK
- op op_ushr_long_2addr FALLBACK
- op op_add_float_2addr FALLBACK
- op op_sub_float_2addr FALLBACK
- op op_mul_float_2addr FALLBACK
- op op_div_float_2addr FALLBACK
- op op_rem_float_2addr FALLBACK
- op op_add_double_2addr FALLBACK
- op op_sub_double_2addr FALLBACK
- op op_mul_double_2addr FALLBACK
- op op_div_double_2addr FALLBACK
- op op_rem_double_2addr FALLBACK
- op op_add_int_lit16 FALLBACK
- op op_rsub_int FALLBACK
- op op_mul_int_lit16 FALLBACK
- op op_div_int_lit16 FALLBACK
- op op_rem_int_lit16 FALLBACK
- op op_and_int_lit16 FALLBACK
- op op_or_int_lit16 FALLBACK
- op op_xor_int_lit16 FALLBACK
- op op_add_int_lit8 FALLBACK
- op op_rsub_int_lit8 FALLBACK
- op op_mul_int_lit8 FALLBACK
- op op_div_int_lit8 FALLBACK
- op op_rem_int_lit8 FALLBACK
- op op_and_int_lit8 FALLBACK
- op op_or_int_lit8 FALLBACK
- op op_xor_int_lit8 FALLBACK
- op op_shl_int_lit8 FALLBACK
- op op_shr_int_lit8 FALLBACK
- op op_ushr_int_lit8 FALLBACK
- op op_iget_quick FALLBACK
- op op_iget_wide_quick FALLBACK
- op op_iget_object_quick FALLBACK
- op op_iput_quick FALLBACK
- op op_iput_wide_quick FALLBACK
- op op_iput_object_quick FALLBACK
- op op_invoke_virtual_quick FALLBACK
- op op_invoke_virtual_range_quick FALLBACK
- op op_iput_boolean_quick FALLBACK
- op op_iput_byte_quick FALLBACK
- op op_iput_char_quick FALLBACK
- op op_iput_short_quick FALLBACK
- op op_iget_boolean_quick FALLBACK
- op op_iget_byte_quick FALLBACK
- op op_iget_char_quick FALLBACK
- op op_iget_short_quick FALLBACK
- op_unused_f3 FALLBACK
- op_unused_f4 FALLBACK
- op_unused_f5 FALLBACK
- op_unused_f6 FALLBACK
- op_unused_f7 FALLBACK
- op_unused_f8 FALLBACK
- op_unused_f9 FALLBACK
- op_unused_fa FALLBACK
- op_unused_fb FALLBACK
- op_unused_fc FALLBACK
- op_unused_fd FALLBACK
- op_unused_fe FALLBACK
- op_unused_ff FALLBACK
+ # op op_nop FALLBACK
+ # op op_move FALLBACK
+ # op op_move_from16 FALLBACK
+ # op op_move_16 FALLBACK
+ # op op_move_wide FALLBACK
+ # op op_move_wide_from16 FALLBACK
+ # op op_move_wide_16 FALLBACK
+ # op op_move_object FALLBACK
+ # op op_move_object_from16 FALLBACK
+ # op op_move_object_16 FALLBACK
+ # op op_move_result FALLBACK
+ # op op_move_result_wide FALLBACK
+ # op op_move_result_object FALLBACK
+ # op op_move_exception FALLBACK
+ # op op_return_void FALLBACK
+ # op op_return FALLBACK
+ # op op_return_wide FALLBACK
+ # op op_return_object FALLBACK
+ # op op_const_4 FALLBACK
+ # op op_const_16 FALLBACK
+ # op op_const FALLBACK
+ # op op_const_high16 FALLBACK
+ # op op_const_wide_16 FALLBACK
+ # op op_const_wide_32 FALLBACK
+ # op op_const_wide FALLBACK
+ # op op_const_wide_high16 FALLBACK
+ # op op_const_string FALLBACK
+ # op op_const_string_jumbo FALLBACK
+ # op op_const_class FALLBACK
+ # op op_monitor_enter FALLBACK
+ # op op_monitor_exit FALLBACK
+ # op op_check_cast FALLBACK
+ # op op_instance_of FALLBACK
+ # op op_array_length FALLBACK
+ # op op_new_instance FALLBACK
+ # op op_new_array FALLBACK
+ # op op_filled_new_array FALLBACK
+ # op op_filled_new_array_range FALLBACK
+ # op op_fill_array_data FALLBACK
+ # op op_throw FALLBACK
+ # op op_goto FALLBACK
+ # op op_goto_16 FALLBACK
+ # op op_goto_32 FALLBACK
+ # op op_packed_switch FALLBACK
+ # op op_sparse_switch FALLBACK
+ # op op_cmpl_float FALLBACK
+ # op op_cmpg_float FALLBACK
+ # op op_cmpl_double FALLBACK
+ # op op_cmpg_double FALLBACK
+ # op op_cmp_long FALLBACK
+ # op op_if_eq FALLBACK
+ # op op_if_ne FALLBACK
+ # op op_if_lt FALLBACK
+ # op op_if_ge FALLBACK
+ # op op_if_gt FALLBACK
+ # op op_if_le FALLBACK
+ # op op_if_eqz FALLBACK
+ # op op_if_nez FALLBACK
+ # op op_if_ltz FALLBACK
+ # op op_if_gez FALLBACK
+ # op op_if_gtz FALLBACK
+ # op op_if_lez FALLBACK
+ # op op_unused_3e FALLBACK
+ # op op_unused_3f FALLBACK
+ # op op_unused_40 FALLBACK
+ # op op_unused_41 FALLBACK
+ # op op_unused_42 FALLBACK
+ # op op_unused_43 FALLBACK
+ # op op_aget FALLBACK
+ # op op_aget_wide FALLBACK
+ # op op_aget_object FALLBACK
+ # op op_aget_boolean FALLBACK
+ # op op_aget_byte FALLBACK
+ # op op_aget_char FALLBACK
+ # op op_aget_short FALLBACK
+ # op op_aput FALLBACK
+ # op op_aput_wide FALLBACK
+ # op op_aput_object FALLBACK
+ # op op_aput_boolean FALLBACK
+ # op op_aput_byte FALLBACK
+ # op op_aput_char FALLBACK
+ # op op_aput_short FALLBACK
+ # op op_iget FALLBACK
+ # op op_iget_wide FALLBACK
+ # op op_iget_object FALLBACK
+ # op op_iget_boolean FALLBACK
+ # op op_iget_byte FALLBACK
+ # op op_iget_char FALLBACK
+ # op op_iget_short FALLBACK
+ # op op_iput FALLBACK
+ # op op_iput_wide FALLBACK
+ # op op_iput_object FALLBACK
+ # op op_iput_boolean FALLBACK
+ # op op_iput_byte FALLBACK
+ # op op_iput_char FALLBACK
+ # op op_iput_short FALLBACK
+ # op op_sget FALLBACK
+ # op op_sget_wide FALLBACK
+ # op op_sget_object FALLBACK
+ # op op_sget_boolean FALLBACK
+ # op op_sget_byte FALLBACK
+ # op op_sget_char FALLBACK
+ # op op_sget_short FALLBACK
+ # op op_sput FALLBACK
+ # op op_sput_wide FALLBACK
+ # op op_sput_object FALLBACK
+ # op op_sput_boolean FALLBACK
+ # op op_sput_byte FALLBACK
+ # op op_sput_char FALLBACK
+ # op op_sput_short FALLBACK
+ # op op_invoke_virtual FALLBACK
+ # op op_invoke_super FALLBACK
+ # op op_invoke_direct FALLBACK
+ # op op_invoke_static FALLBACK
+ # op op_invoke_interface FALLBACK
+ # op op_return_void_no_barrier FALLBACK
+ # op op_invoke_virtual_range FALLBACK
+ # op op_invoke_super_range FALLBACK
+ # op op_invoke_direct_range FALLBACK
+ # op op_invoke_static_range FALLBACK
+ # op op_invoke_interface_range FALLBACK
+ # op op_unused_79 FALLBACK
+ # op op_unused_7a FALLBACK
+ # op op_neg_int FALLBACK
+ # op op_not_int FALLBACK
+ # op op_neg_long FALLBACK
+ # op op_not_long FALLBACK
+ # op op_neg_float FALLBACK
+ # op op_neg_double FALLBACK
+ # op op_int_to_long FALLBACK
+ # op op_int_to_float FALLBACK
+ # op op_int_to_double FALLBACK
+ # op op_long_to_int FALLBACK
+ # op op_long_to_float FALLBACK
+ # op op_long_to_double FALLBACK
+ # op op_float_to_int FALLBACK
+ # op op_float_to_long FALLBACK
+ # op op_float_to_double FALLBACK
+ # op op_double_to_int FALLBACK
+ # op op_double_to_long FALLBACK
+ # op op_double_to_float FALLBACK
+ # op op_int_to_byte FALLBACK
+ # op op_int_to_char FALLBACK
+ # op op_int_to_short FALLBACK
+ # op op_add_int FALLBACK
+ # op op_sub_int FALLBACK
+ # op op_mul_int FALLBACK
+ # op op_div_int FALLBACK
+ # op op_rem_int FALLBACK
+ # op op_and_int FALLBACK
+ # op op_or_int FALLBACK
+ # op op_xor_int FALLBACK
+ # op op_shl_int FALLBACK
+ # op op_shr_int FALLBACK
+ # op op_ushr_int FALLBACK
+ # op op_add_long FALLBACK
+ # op op_sub_long FALLBACK
+ # op op_mul_long FALLBACK
+ # op op_div_long FALLBACK
+ # op op_rem_long FALLBACK
+ # op op_and_long FALLBACK
+ # op op_or_long FALLBACK
+ # op op_xor_long FALLBACK
+ # op op_shl_long FALLBACK
+ # op op_shr_long FALLBACK
+ # op op_ushr_long FALLBACK
+ # op op_add_float FALLBACK
+ # op op_sub_float FALLBACK
+ # op op_mul_float FALLBACK
+ # op op_div_float FALLBACK
+ # op op_rem_float FALLBACK
+ # op op_add_double FALLBACK
+ # op op_sub_double FALLBACK
+ # op op_mul_double FALLBACK
+ # op op_div_double FALLBACK
+ # op op_rem_double FALLBACK
+ # op op_add_int_2addr FALLBACK
+ # op op_sub_int_2addr FALLBACK
+ # op op_mul_int_2addr FALLBACK
+ # op op_div_int_2addr FALLBACK
+ # op op_rem_int_2addr FALLBACK
+ # op op_and_int_2addr FALLBACK
+ # op op_or_int_2addr FALLBACK
+ # op op_xor_int_2addr FALLBACK
+ # op op_shl_int_2addr FALLBACK
+ # op op_shr_int_2addr FALLBACK
+ # op op_ushr_int_2addr FALLBACK
+ # op op_add_long_2addr FALLBACK
+ # op op_sub_long_2addr FALLBACK
+ # op op_mul_long_2addr FALLBACK
+ # op op_div_long_2addr FALLBACK
+ # op op_rem_long_2addr FALLBACK
+ # op op_and_long_2addr FALLBACK
+ # op op_or_long_2addr FALLBACK
+ # op op_xor_long_2addr FALLBACK
+ # op op_shl_long_2addr FALLBACK
+ # op op_shr_long_2addr FALLBACK
+ # op op_ushr_long_2addr FALLBACK
+ # op op_add_float_2addr FALLBACK
+ # op op_sub_float_2addr FALLBACK
+ # op op_mul_float_2addr FALLBACK
+ # op op_div_float_2addr FALLBACK
+ # op op_rem_float_2addr FALLBACK
+ # op op_add_double_2addr FALLBACK
+ # op op_sub_double_2addr FALLBACK
+ # op op_mul_double_2addr FALLBACK
+ # op op_div_double_2addr FALLBACK
+ # op op_rem_double_2addr FALLBACK
+ # op op_add_int_lit16 FALLBACK
+ # op op_rsub_int FALLBACK
+ # op op_mul_int_lit16 FALLBACK
+ # op op_div_int_lit16 FALLBACK
+ # op op_rem_int_lit16 FALLBACK
+ # op op_and_int_lit16 FALLBACK
+ # op op_or_int_lit16 FALLBACK
+ # op op_xor_int_lit16 FALLBACK
+ # op op_add_int_lit8 FALLBACK
+ # op op_rsub_int_lit8 FALLBACK
+ # op op_mul_int_lit8 FALLBACK
+ # op op_div_int_lit8 FALLBACK
+ # op op_rem_int_lit8 FALLBACK
+ # op op_and_int_lit8 FALLBACK
+ # op op_or_int_lit8 FALLBACK
+ # op op_xor_int_lit8 FALLBACK
+ # op op_shl_int_lit8 FALLBACK
+ # op op_shr_int_lit8 FALLBACK
+ # op op_ushr_int_lit8 FALLBACK
+ # op op_iget_quick FALLBACK
+ # op op_iget_wide_quick FALLBACK
+ # op op_iget_object_quick FALLBACK
+ # op op_iput_quick FALLBACK
+ # op op_iput_wide_quick FALLBACK
+ # op op_iput_object_quick FALLBACK
+ # op op_invoke_virtual_quick FALLBACK
+ # op op_invoke_virtual_range_quick FALLBACK
+ # op op_iput_boolean_quick FALLBACK
+ # op op_iput_byte_quick FALLBACK
+ # op op_iput_char_quick FALLBACK
+ # op op_iput_short_quick FALLBACK
+ # op op_iget_boolean_quick FALLBACK
+ # op op_iget_byte_quick FALLBACK
+ # op op_iget_char_quick FALLBACK
+ # op op_iget_short_quick FALLBACK
+ op op_invoke_lambda FALLBACK
+ # op op_unused_f4 FALLBACK
+ op op_capture_variable FALLBACK
+ op op_create_lambda FALLBACK
+ op op_liberate_variable FALLBACK
+ op op_box_lambda FALLBACK
+ op op_unbox_lambda FALLBACK
+ # op op_unused_fa FALLBACK
+ # op op_unused_fb FALLBACK
+ # op op_unused_fc FALLBACK
+ # op op_unused_fd FALLBACK
+ # op op_unused_fe FALLBACK
+ # op op_unused_ff FALLBACK
op-end
# common subroutines for asm
diff --git a/runtime/interpreter/mterp/mips/alt_stub.S b/runtime/interpreter/mterp/mips/alt_stub.S
new file mode 100644
index 0000000..4598061
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/alt_stub.S
@@ -0,0 +1,13 @@
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (${opnum} * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
diff --git a/runtime/interpreter/mterp/mips/bincmp.S b/runtime/interpreter/mterp/mips/bincmp.S
new file mode 100644
index 0000000..70057f6
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/bincmp.S
@@ -0,0 +1,37 @@
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ GET_OPA4(a0) # a0 <- A+
+ GET_OPB(a1) # a1 <- B
+ GET_VREG(a3, a1) # a3 <- vB
+ GET_VREG(a2, a0) # a2 <- vA
+ b${revcmp} a2, a3, 1f # branch to 1 if comparison failed
+ FETCH_S(rINST, 1) # rINST<- branch offset, in code units
+ b 2f
+1:
+ li rINST, 2 # rINST- BYTE branch dist for not-taken
+2:
+#if MTERP_PROFILE_BRANCHES
+ EXPORT_PC()
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rINST
+ JAL(MterpProfileBranch) # (self, shadow_frame, offset)
+ bnez v0, MterpOnStackReplacement # Note: offset must be in rINST
+#endif
+ addu a2, rINST, rINST # convert to bytes
+ FETCH_ADVANCE_INST_RB(a2) # update rPC, load rINST
+ bgez a2, .L_${opcode}_finish
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ b MterpCheckSuspendAndContinue
+
+%break
+
+.L_${opcode}_finish:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/binop.S b/runtime/interpreter/mterp/mips/binop.S
new file mode 100644
index 0000000..ce09da45
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/binop.S
@@ -0,0 +1,33 @@
+%default {"preinstr":"", "result":"a0", "chkzero":"0"}
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ srl a3, a0, 8 # a3 <- CC
+ and a2, a0, 255 # a2 <- BB
+ GET_VREG(a1, a3) # a1 <- vCC
+ GET_VREG(a0, a2) # a0 <- vBB
+ .if $chkzero
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ $preinstr # optional op
+ $instr # $result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO($result, rOBJ, t0) # vAA <- $result
+ /* 11-14 instructions */
diff --git a/runtime/interpreter/mterp/mips/binop2addr.S b/runtime/interpreter/mterp/mips/binop2addr.S
new file mode 100644
index 0000000..548cbcb
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/binop2addr.S
@@ -0,0 +1,29 @@
+%default {"preinstr":"", "result":"a0", "chkzero":"0"}
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call.
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a0, rOBJ) # a0 <- vA
+ GET_VREG(a1, a3) # a1 <- vB
+ .if $chkzero
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ $preinstr # optional op
+ $instr # $result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO($result, rOBJ, t0) # vAA <- $result
+ /* 10-13 instructions */
diff --git a/runtime/interpreter/mterp/mips/binopLit16.S b/runtime/interpreter/mterp/mips/binopLit16.S
new file mode 100644
index 0000000..fc0c9ff
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/binopLit16.S
@@ -0,0 +1,30 @@
+%default {"preinstr":"", "result":"a0", "chkzero":"0"}
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ # binop/lit16 vA, vB, /* +CCCC */
+ FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
+ GET_OPB(a2) # a2 <- B
+ GET_OPA(rOBJ) # rOBJ <- A+
+ GET_VREG(a0, a2) # a0 <- vB
+ and rOBJ, rOBJ, 15
+ .if $chkzero
+ # cmp a1, 0; is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ $preinstr # optional op
+ $instr # $result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO($result, rOBJ, t0) # vAA <- $result
+ /* 10-13 instructions */
diff --git a/runtime/interpreter/mterp/mips/binopLit8.S b/runtime/interpreter/mterp/mips/binopLit8.S
new file mode 100644
index 0000000..a591408
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/binopLit8.S
@@ -0,0 +1,31 @@
+%default {"preinstr":"", "result":"a0", "chkzero":"0"}
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ # binop/lit8 vAA, vBB, /* +CC */
+ FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a3, 255 # a2 <- BB
+ GET_VREG(a0, a2) # a0 <- vBB
+ sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
+ .if $chkzero
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ $preinstr # optional op
+ $instr # $result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO($result, rOBJ, t0) # vAA <- $result
+ /* 10-12 instructions */
diff --git a/runtime/interpreter/mterp/mips/binopWide.S b/runtime/interpreter/mterp/mips/binopWide.S
new file mode 100644
index 0000000..608525b
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/binopWide.S
@@ -0,0 +1,35 @@
+%default {"preinstr":"", "result0":"a0", "result1":"a1", "chkzero":"0", "arg0":"a0", "arg1":"a1", "arg2":"a2", "arg3":"a3"}
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ EAS2(a2, rFP, a2) # a2 <- &fp[BB]
+ EAS2(t1, rFP, a3) # a3 <- &fp[CC]
+ LOAD64($arg0, $arg1, a2) # a0/a1 <- vBB/vBB+1
+ LOAD64($arg2, $arg3, t1) # a2/a3 <- vCC/vCC+1
+ .if $chkzero
+ or t0, $arg2, $arg3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ $preinstr # optional op
+ $instr # result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64_GOTO($result0, $result1, rOBJ, t0) # vAA/vAA+1 <- $result0/$result1
+ /* 14-17 instructions */
diff --git a/runtime/interpreter/mterp/mips/binopWide2addr.S b/runtime/interpreter/mterp/mips/binopWide2addr.S
new file mode 100644
index 0000000..cc92149
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/binopWide2addr.S
@@ -0,0 +1,33 @@
+%default {"preinstr":"", "result0":"a0", "result1":"a1", "chkzero":"0", "arg0":"a0", "arg1":"a1", "arg2":"a2", "arg3":"a3"}
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a1) # a1 <- B
+ EAS2(a1, rFP, a1) # a1 <- &fp[B]
+ EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
+ LOAD64($arg2, $arg3, a1) # a2/a3 <- vBB/vBB+1
+ LOAD64($arg0, $arg1, t0) # a0/a1 <- vAA/vAA+1
+ .if $chkzero
+ or t0, $arg2, $arg3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ $preinstr # optional op
+ $instr # result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64($result0, $result1, rOBJ) # vAA/vAA+1 <- $result0/$result1
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 12-15 instructions */
diff --git a/runtime/interpreter/mterp/mips/entry.S b/runtime/interpreter/mterp/mips/entry.S
new file mode 100644
index 0000000..cef08fe
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/entry.S
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Interpreter entry point.
+ */
+
+ .text
+ .align 2
+ .global ExecuteMterpImpl
+ .ent ExecuteMterpImpl
+ .frame sp, STACK_SIZE, ra
+/*
+ * On entry:
+ * a0 Thread* self
+ * a1 code_item
+ * a2 ShadowFrame
+ * a3 JValue* result_register
+ *
+ */
+
+ExecuteMterpImpl:
+ .set noreorder
+ .cpload t9
+ .set reorder
+/* Save to the stack. Frame size = STACK_SIZE */
+ STACK_STORE_FULL()
+/* This directive will make sure all subsequent jal restore gp at a known offset */
+ .cprestore STACK_OFFSET_GP
+
+ /* Remember the return register */
+ sw a3, SHADOWFRAME_RESULT_REGISTER_OFFSET(a2)
+
+ /* Remember the code_item */
+ sw a1, SHADOWFRAME_CODE_ITEM_OFFSET(a2)
+
+ /* set up "named" registers */
+ move rSELF, a0
+ lw a0, SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(a2)
+ addu rFP, a2, SHADOWFRAME_VREGS_OFFSET # point to insns[] (i.e. - the dalivk byte code).
+ EAS2(rREFS, rFP, a0) # point to reference array in shadow frame
+ lw a0, SHADOWFRAME_DEX_PC_OFFSET(a2) # Get starting dex_pc
+ addu rPC, a1, CODEITEM_INSNS_OFFSET # Point to base of insns[]
+ EAS1(rPC, rPC, a0) # Create direct pointer to 1st dex opcode
+
+ EXPORT_PC()
+
+ /* Starting ibase */
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
+
+ /* start executing the instruction at rPC */
+ FETCH_INST() # load rINST from rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* NOTE: no fallthrough */
diff --git a/runtime/interpreter/mterp/mips/fallback.S b/runtime/interpreter/mterp/mips/fallback.S
new file mode 100644
index 0000000..82cbc63
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/fallback.S
@@ -0,0 +1,2 @@
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
diff --git a/runtime/interpreter/mterp/mips/fbinop.S b/runtime/interpreter/mterp/mips/fbinop.S
new file mode 100644
index 0000000..d0d39ae
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/fbinop.S
@@ -0,0 +1,19 @@
+ /*
+ * Generic 32-bit binary float operation.
+ *
+ * For: add-fp, sub-fp, mul-fp, div-fp, rem-fp
+ */
+
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # s5 <- AA
+ srl a3, a0, 8 # a3 <- CC
+ and a2, a0, 255 # a2 <- BB
+ GET_VREG_F(fa1, a3) # a1 <- vCC
+ GET_VREG_F(fa0, a2) # a0 <- vBB
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ $instr # f0 = result
+ SET_VREG_F(fv0, rOBJ) # vAA <- fv0
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/fbinop2addr.S b/runtime/interpreter/mterp/mips/fbinop2addr.S
new file mode 100644
index 0000000..ccb67b1
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/fbinop2addr.S
@@ -0,0 +1,19 @@
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr"
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call.
+ *
+ * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
+ * div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # t1 <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG_F(fa0, rOBJ)
+ GET_VREG_F(fa1, a3)
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ $instr
+ SET_VREG_F(fv0, rOBJ) # vAA <- result
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/fbinopWide.S b/runtime/interpreter/mterp/mips/fbinopWide.S
new file mode 100644
index 0000000..3be9325
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/fbinopWide.S
@@ -0,0 +1,28 @@
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be an MIPS instruction or a function call.
+ *
+ * for: add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # s5 <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ EAS2(a2, rFP, a2) # a2 <- &fp[BB]
+ EAS2(t1, rFP, a3) # a3 <- &fp[CC]
+ LOAD64_F(fa0, fa0f, a2)
+ LOAD64_F(fa1, fa1f, t1)
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ $instr
+ SET_VREG64_F(fv0, fv0f, rOBJ)
+ b .L${opcode}_finish
+%break
+
+.L${opcode}_finish:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/fbinopWide2addr.S b/runtime/interpreter/mterp/mips/fbinopWide2addr.S
new file mode 100644
index 0000000..8541f11
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/fbinopWide2addr.S
@@ -0,0 +1,21 @@
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be an MIPS instruction or a function call.
+ *
+ * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
+ * div-double/2addr, rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a1) # a1 <- B
+ EAS2(a1, rFP, a1) # a1 <- &fp[B]
+ EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
+ LOAD64_F(fa0, fa0f, t0)
+ LOAD64_F(fa1, fa1f, a1)
+
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ $instr
+ SET_VREG64_F(fv0, fv0f, rOBJ)
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/footer.S b/runtime/interpreter/mterp/mips/footer.S
new file mode 100644
index 0000000..083dc15
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/footer.S
@@ -0,0 +1,179 @@
+/*
+ * ===========================================================================
+ * Common subroutines and data
+ * ===========================================================================
+ */
+
+ .text
+ .align 2
+
+/*
+ * We've detected a condition that will result in an exception, but the exception
+ * has not yet been thrown. Just bail out to the reference interpreter to deal with it.
+ * TUNING: for consistency, we may want to just go ahead and handle these here.
+ */
+common_errDivideByZero:
+ EXPORT_PC()
+#if MTERP_LOGGING
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ JAL(MterpLogDivideByZeroException)
+#endif
+ b MterpCommonFallback
+
+common_errArrayIndex:
+ EXPORT_PC()
+#if MTERP_LOGGING
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ JAL(MterpLogArrayIndexException)
+#endif
+ b MterpCommonFallback
+
+common_errNegativeArraySize:
+ EXPORT_PC()
+#if MTERP_LOGGING
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ JAL(MterpLogNegativeArraySizeException)
+#endif
+ b MterpCommonFallback
+
+common_errNoSuchMethod:
+ EXPORT_PC()
+#if MTERP_LOGGING
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ JAL(MterpLogNoSuchMethodException)
+#endif
+ b MterpCommonFallback
+
+common_errNullObject:
+ EXPORT_PC()
+#if MTERP_LOGGING
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ JAL(MterpLogNullObjectException)
+#endif
+ b MterpCommonFallback
+
+common_exceptionThrown:
+ EXPORT_PC()
+#if MTERP_LOGGING
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ JAL(MterpLogExceptionThrownException)
+#endif
+ b MterpCommonFallback
+
+MterpSuspendFallback:
+ EXPORT_PC()
+#if MTERP_LOGGING
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ lw a2, THREAD_FLAGS_OFFSET(rSELF)
+ JAL(MterpLogSuspendFallback)
+#endif
+ b MterpCommonFallback
+
+/*
+ * If we're here, something is out of the ordinary. If there is a pending
+ * exception, handle it. Otherwise, roll back and retry with the reference
+ * interpreter.
+ */
+MterpPossibleException:
+ lw a0, THREAD_EXCEPTION_OFFSET(rSELF)
+ beqz a0, MterpFallback # If exception, fall back to reference interpreter.
+ /* intentional fallthrough - handle pending exception. */
+/*
+ * On return from a runtime helper routine, we've found a pending exception.
+ * Can we handle it here - or need to bail out to caller?
+ *
+ */
+MterpException:
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ JAL(MterpHandleException) # (self, shadow_frame)
+ beqz v0, MterpExceptionReturn # no local catch, back to caller.
+ lw a0, OFF_FP_CODE_ITEM(rFP)
+ lw a1, OFF_FP_DEX_PC(rFP)
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
+ addu rPC, a0, CODEITEM_INSNS_OFFSET
+ sll a1, a1, 1
+ addu rPC, rPC, a1 # generate new dex_pc_ptr
+ /* Do we need to switch interpreters? */
+ JAL(MterpShouldSwitchInterpreters)
+ bnez v0, MterpFallback
+ /* resume execution at catch block */
+ EXPORT_PC()
+ FETCH_INST()
+ GET_INST_OPCODE(t0)
+ GOTO_OPCODE(t0)
+ /* NOTE: no fallthrough */
+
+/*
+ * Check for suspend check request. Assumes rINST already loaded, rPC advanced and
+ * still needs to get the opcode and branch to it, and flags are in lr.
+ */
+MterpCheckSuspendAndContinue:
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh rIBASE
+ and ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ bnez ra, 1f
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+1:
+ EXPORT_PC()
+ move a0, rSELF
+ JAL(MterpSuspendCheck) # (self)
+ bnez v0, MterpFallback
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/*
+ * On-stack replacement has happened, and now we've returned from the compiled method.
+ */
+MterpOnStackReplacement:
+#if MTERP_LOGGING
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rINST
+ JAL(MterpLogOSR)
+#endif
+ li v0, 1 # Signal normal return
+ b MterpDone
+
+/*
+ * Bail out to reference interpreter.
+ */
+MterpFallback:
+ EXPORT_PC()
+#if MTERP_LOGGING
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ JAL(MterpLogFallback)
+#endif
+MterpCommonFallback:
+ move v0, zero # signal retry with reference interpreter.
+ b MterpDone
+/*
+ * We pushed some registers on the stack in ExecuteMterpImpl, then saved
+ * SP and LR. Here we restore SP, restore the registers, and then restore
+ * LR to PC.
+ *
+ * On entry:
+ * uint32_t* rFP (should still be live, pointer to base of vregs)
+ */
+MterpExceptionReturn:
+ li v0, 1 # signal return to caller.
+ b MterpDone
+MterpReturn:
+ lw a2, OFF_FP_RESULT_REGISTER(rFP)
+ sw v0, 0(a2)
+ sw v1, 4(a2)
+ li v0, 1 # signal return to caller.
+MterpDone:
+/* Restore from the stack and return. Frame size = STACK_SIZE */
+ STACK_LOAD_FULL()
+ jalr zero, ra
+
+ .end ExecuteMterpImpl
diff --git a/runtime/interpreter/mterp/mips/funop.S b/runtime/interpreter/mterp/mips/funop.S
new file mode 100644
index 0000000..bfb9346
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/funop.S
@@ -0,0 +1,18 @@
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op a0".
+ * This could be a MIPS instruction or a function call.
+ *
+ * for: int-to-float, float-to-int
+ */
+ /* unop vA, vB */
+ GET_OPB(a3) # a3 <- B
+ GET_OPA4(rOBJ) # t0 <- A+
+ GET_VREG_F(fa0, a3)
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ $instr
+
+.L${opcode}_set_vreg_f:
+ SET_VREG_F(fv0, rOBJ)
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ GOTO_OPCODE(t1) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/funopWide.S b/runtime/interpreter/mterp/mips/funopWide.S
new file mode 100644
index 0000000..3d4cf22
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/funopWide.S
@@ -0,0 +1,22 @@
+%default {"preinstr":"", "ld_arg":"LOAD64_F(fa0, fa0f, a3)", "st_result":"SET_VREG64_F(fv0, fv0f, rOBJ)"}
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op a0/a1".
+ * This could be a MIPS instruction or a function call.
+ *
+ * long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ GET_OPA4(rOBJ) # t1 <- A+
+ GET_OPB(a3) # a3 <- B
+ EAS2(a3, rFP, a3) # a3 <- &fp[B]
+ $ld_arg
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ $preinstr # optional op
+ $instr # a0/a1 <- op, a2-a3 changed
+
+.L${opcode}_set_vreg:
+ $st_result # vAA <- a0/a1
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 12-13 instructions */
diff --git a/runtime/interpreter/mterp/mips/funopWider.S b/runtime/interpreter/mterp/mips/funopWider.S
new file mode 100644
index 0000000..efb85f3
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/funopWider.S
@@ -0,0 +1,19 @@
+%default {"st_result":"SET_VREG64_F(fv0, fv0f, rOBJ)"}
+ /*
+ * Generic 32bit-to-64bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op a0", where
+ * "result" is a 64-bit quantity in a0/a1.
+ *
+ * For: int-to-double, float-to-long, float-to-double
+ */
+ /* unop vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG_F(fa0, a3)
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ $instr
+
+.L${opcode}_set_vreg:
+ $st_result # vA/vA+1 <- a0/a1
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/header.S b/runtime/interpreter/mterp/mips/header.S
new file mode 100644
index 0000000..37ab21d
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/header.S
@@ -0,0 +1,484 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ Art assembly interpreter notes:
+
+ First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
+ handle invoke, allows higher-level code to create frame & shadow frame.
+
+ Once that's working, support direct entry code & eliminate shadow frame (and
+ excess locals allocation.
+
+ Some (hopefully) temporary ugliness. We'll treat rFP as pointing to the
+ base of the vreg array within the shadow frame. Access the other fields,
+ dex_pc_, method_ and number_of_vregs_ via negative offsets. For now, we'll continue
+ the shadow frame mechanism of double-storing object references - via rFP &
+ number_of_vregs_.
+
+ */
+
+#include "asm_support.h"
+
+#if (__mips==32) && (__mips_isa_rev>=2)
+#define MIPS32REVGE2 /* mips32r2 and greater */
+#if (__mips==32) && (__mips_isa_rev>=5)
+#define FPU64 /* 64 bit FPU */
+#if (__mips==32) && (__mips_isa_rev>=6)
+#define MIPS32REVGE6 /* mips32r6 and greater */
+#endif
+#endif
+#endif
+
+/* MIPS definitions and declarations
+
+ reg nick purpose
+ s0 rPC interpreted program counter, used for fetching instructions
+ s1 rFP interpreted frame pointer, used for accessing locals and args
+ s2 rSELF self (Thread) pointer
+ s3 rIBASE interpreted instruction base pointer, used for computed goto
+ s4 rINST first 16-bit code unit of current instruction
+ s6 rREFS base of object references in shadow frame (ideally, we'll get rid of this later).
+*/
+
+/* single-purpose registers, given names for clarity */
+#define rPC s0
+#define rFP s1
+#define rSELF s2
+#define rIBASE s3
+#define rINST s4
+#define rOBJ s5
+#define rREFS s6
+#define rTEMP s7
+
+#define rARG0 a0
+#define rARG1 a1
+#define rARG2 a2
+#define rARG3 a3
+#define rRESULT0 v0
+#define rRESULT1 v1
+
+/* GP register definitions */
+#define zero $$0 /* always zero */
+#define AT $$at /* assembler temp */
+#define v0 $$2 /* return value */
+#define v1 $$3
+#define a0 $$4 /* argument registers */
+#define a1 $$5
+#define a2 $$6
+#define a3 $$7
+#define t0 $$8 /* temp registers (not saved across subroutine calls) */
+#define t1 $$9
+#define t2 $$10
+#define t3 $$11
+#define t4 $$12
+#define t5 $$13
+#define t6 $$14
+#define t7 $$15
+#define ta0 $$12 /* alias */
+#define ta1 $$13
+#define ta2 $$14
+#define ta3 $$15
+#define s0 $$16 /* saved across subroutine calls (callee saved) */
+#define s1 $$17
+#define s2 $$18
+#define s3 $$19
+#define s4 $$20
+#define s5 $$21
+#define s6 $$22
+#define s7 $$23
+#define t8 $$24 /* two more temp registers */
+#define t9 $$25
+#define k0 $$26 /* kernel temporary */
+#define k1 $$27
+#define gp $$28 /* global pointer */
+#define sp $$29 /* stack pointer */
+#define s8 $$30 /* one more callee saved */
+#define ra $$31 /* return address */
+
+/* FP register definitions */
+#define fv0 $$f0
+#define fv0f $$f1
+#define fv1 $$f2
+#define fv1f $$f3
+#define fa0 $$f12
+#define fa0f $$f13
+#define fa1 $$f14
+#define fa1f $$f15
+#define ft0 $$f4
+#define ft0f $$f5
+#define ft1 $$f6
+#define ft1f $$f7
+#define ft2 $$f8
+#define ft2f $$f9
+#define ft3 $$f10
+#define ft3f $$f11
+#define ft4 $$f16
+#define ft4f $$f17
+#define ft5 $$f18
+#define ft5f $$f19
+#define fs0 $$f20
+#define fs0f $$f21
+#define fs1 $$f22
+#define fs1f $$f23
+#define fs2 $$f24
+#define fs2f $$f25
+#define fs3 $$f26
+#define fs3f $$f27
+#define fs4 $$f28
+#define fs4f $$f29
+#define fs5 $$f30
+#define fs5f $$f31
+
+#ifndef MIPS32REVGE6
+#define fcc0 $$fcc0
+#define fcc1 $$fcc1
+#endif
+
+/*
+ * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So,
+ * to access other shadow frame fields, we need to use a backwards offset. Define those here.
+ */
+#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
+#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
+#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
+#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
+#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
+#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
+#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
+#define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET)
+#define OFF_FP_SHADOWFRAME (-SHADOWFRAME_VREGS_OFFSET)
+
+#define MTERP_PROFILE_BRANCHES 1
+#define MTERP_LOGGING 0
+
+/*
+ * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
+ * be done *before* something throws.
+ *
+ * It's okay to do this more than once.
+ *
+ * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
+ * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
+ * offset into the code_items_[] array. For effiency, we will "export" the
+ * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
+ * to convert to a dex pc when needed.
+ */
+#define EXPORT_PC() \
+ sw rPC, OFF_FP_DEX_PC_PTR(rFP)
+
+#define EXPORT_DEX_PC(tmp) \
+ lw tmp, OFF_FP_CODE_ITEM(rFP) \
+ sw rPC, OFF_FP_DEX_PC_PTR(rFP) \
+ addu tmp, CODEITEM_INSNS_OFFSET \
+ subu tmp, rPC, tmp \
+ sra tmp, tmp, 1 \
+ sw tmp, OFF_FP_DEX_PC(rFP)
+
+/*
+ * Fetch the next instruction from rPC into rINST. Does not advance rPC.
+ */
+#define FETCH_INST() lhu rINST, (rPC)
+
+/*
+ * Fetch the next instruction from the specified offset. Advances rPC
+ * to point to the next instruction. "_count" is in 16-bit code units.
+ *
+ * This must come AFTER anything that can throw an exception, or the
+ * exception catch may miss. (This also implies that it must come after
+ * EXPORT_PC().)
+ */
+#define FETCH_ADVANCE_INST(_count) lhu rINST, ((_count)*2)(rPC); \
+ addu rPC, rPC, ((_count) * 2)
+
+/*
+ * The operation performed here is similar to FETCH_ADVANCE_INST, except the
+ * src and dest registers are parameterized (not hard-wired to rPC and rINST).
+ */
+#define PREFETCH_ADVANCE_INST(_dreg, _sreg, _count) \
+ lhu _dreg, ((_count)*2)(_sreg) ; \
+ addu _sreg, _sreg, (_count)*2
+
+/*
+ * Similar to FETCH_ADVANCE_INST, but does not update rPC. Used to load
+ * rINST ahead of possible exception point. Be sure to manually advance rPC
+ * later.
+ */
+#define PREFETCH_INST(_count) lhu rINST, ((_count)*2)(rPC)
+
+/* Advance rPC by some number of code units. */
+#define ADVANCE(_count) addu rPC, rPC, ((_count) * 2)
+
+/*
+ * Fetch the next instruction from an offset specified by rd. Updates
+ * rPC to point to the next instruction. "rd" must specify the distance
+ * in bytes, *not* 16-bit code units, and may be a signed value.
+ */
+#define FETCH_ADVANCE_INST_RB(rd) addu rPC, rPC, rd; \
+ lhu rINST, (rPC)
+
+/*
+ * Fetch a half-word code unit from an offset past the current PC. The
+ * "_count" value is in 16-bit code units. Does not advance rPC.
+ *
+ * The "_S" variant works the same but treats the value as signed.
+ */
+#define FETCH(rd, _count) lhu rd, ((_count) * 2)(rPC)
+#define FETCH_S(rd, _count) lh rd, ((_count) * 2)(rPC)
+
+/*
+ * Fetch one byte from an offset past the current PC. Pass in the same
+ * "_count" as you would for FETCH, and an additional 0/1 indicating which
+ * byte of the halfword you want (lo/hi).
+ */
+#define FETCH_B(rd, _count, _byte) lbu rd, ((_count) * 2 + _byte)(rPC)
+
+/*
+ * Put the instruction's opcode field into the specified register.
+ */
+#define GET_INST_OPCODE(rd) and rd, rINST, 0xFF
+
+/*
+ * Put the prefetched instruction's opcode field into the specified register.
+ */
+#define GET_PREFETCHED_OPCODE(dreg, sreg) andi dreg, sreg, 255
+
+/*
+ * Begin executing the opcode in rd.
+ */
+#define GOTO_OPCODE(rd) sll rd, rd, ${handler_size_bits}; \
+ addu rd, rIBASE, rd; \
+ jalr zero, rd
+
+#define GOTO_OPCODE_BASE(_base, rd) sll rd, rd, ${handler_size_bits}; \
+ addu rd, _base, rd; \
+ jalr zero, rd
+
+/*
+ * Get/set the 32-bit value from a Dalvik register.
+ */
+#define GET_VREG(rd, rix) LOAD_eas2(rd, rFP, rix)
+
+#define GET_VREG_F(rd, rix) EAS2(AT, rFP, rix); \
+ .set noat; l.s rd, (AT); .set at
+
+#define SET_VREG(rd, rix) .set noat; \
+ sll AT, rix, 2; \
+ addu t8, rFP, AT; \
+ sw rd, 0(t8); \
+ addu t8, rREFS, AT; \
+ .set at; \
+ sw zero, 0(t8)
+
+#define SET_VREG64(rlo, rhi, rix) .set noat; \
+ sll AT, rix, 2; \
+ addu t8, rFP, AT; \
+ sw rlo, 0(t8); \
+ sw rhi, 4(t8); \
+ addu t8, rREFS, AT; \
+ .set at; \
+ sw zero, 0(t8); \
+ sw zero, 4(t8)
+
+#ifdef FPU64
+#define SET_VREG64_F(rlo, rhi, rix) .set noat; \
+ sll AT, rix, 2; \
+ addu t8, rREFS, AT; \
+ sw zero, 0(t8); \
+ sw zero, 4(t8); \
+ addu t8, rFP, AT; \
+ mfhc1 AT, rlo; \
+ sw AT, 4(t8); \
+ .set at; \
+ s.s rlo, 0(t8)
+#else
+#define SET_VREG64_F(rlo, rhi, rix) .set noat; \
+ sll AT, rix, 2; \
+ addu t8, rFP, AT; \
+ s.s rlo, 0(t8); \
+ s.s rhi, 4(t8); \
+ addu t8, rREFS, AT; \
+ .set at; \
+ sw zero, 0(t8); \
+ sw zero, 4(t8)
+#endif
+
+#define SET_VREG_OBJECT(rd, rix) .set noat; \
+ sll AT, rix, 2; \
+ addu t8, rFP, AT; \
+ sw rd, 0(t8); \
+ addu t8, rREFS, AT; \
+ .set at; \
+ sw rd, 0(t8)
+
+/* Combination of the SET_VREG and GOTO_OPCODE functions to save 1 instruction */
+#define SET_VREG_GOTO(rd, rix, dst) .set noreorder; \
+ sll dst, dst, ${handler_size_bits}; \
+ addu dst, rIBASE, dst; \
+ .set noat; \
+ sll AT, rix, 2; \
+ addu t8, rFP, AT; \
+ sw rd, 0(t8); \
+ addu t8, rREFS, AT; \
+ .set at; \
+ jalr zero, dst; \
+ sw zero, 0(t8); \
+ .set reorder
+
+/* Combination of the SET_VREG64 and GOTO_OPCODE functions to save 1 instruction */
+#define SET_VREG64_GOTO(rlo, rhi, rix, dst) .set noreorder; \
+ sll dst, dst, ${handler_size_bits}; \
+ addu dst, rIBASE, dst; \
+ .set noat; \
+ sll AT, rix, 2; \
+ addu t8, rFP, AT; \
+ sw rlo, 0(t8); \
+ sw rhi, 4(t8); \
+ addu t8, rREFS, AT; \
+ .set at; \
+ sw zero, 0(t8); \
+ jalr zero, dst; \
+ sw zero, 4(t8); \
+ .set reorder
+
+#define SET_VREG_F(rd, rix) .set noat; \
+ sll AT, rix, 2; \
+ addu t8, rFP, AT; \
+ s.s rd, 0(t8); \
+ addu t8, rREFS, AT; \
+ .set at; \
+ sw zero, 0(t8)
+
+#define GET_OPA(rd) srl rd, rINST, 8
+#ifdef MIPS32REVGE2
+#define GET_OPA4(rd) ext rd, rINST, 8, 4
+#else
+#define GET_OPA4(rd) GET_OPA(rd); and rd, 0xf
+#endif
+#define GET_OPB(rd) srl rd, rINST, 12
+
+/*
+ * Form an Effective Address rd = rbase + roff<<n;
+ * Uses reg AT
+ */
+#define EASN(rd, rbase, roff, rshift) .set noat; \
+ sll AT, roff, rshift; \
+ addu rd, rbase, AT; \
+ .set at
+
+#define EAS1(rd, rbase, roff) EASN(rd, rbase, roff, 1)
+#define EAS2(rd, rbase, roff) EASN(rd, rbase, roff, 2)
+#define EAS3(rd, rbase, roff) EASN(rd, rbase, roff, 3)
+#define EAS4(rd, rbase, roff) EASN(rd, rbase, roff, 4)
+
+/*
+ * Form an Effective Shift Right rd = rbase + roff>>n;
+ * Uses reg AT
+ */
+#define ESRN(rd, rbase, roff, rshift) .set noat; \
+ srl AT, roff, rshift; \
+ addu rd, rbase, AT; \
+ .set at
+
+#define LOAD_eas2(rd, rbase, roff) EAS2(AT, rbase, roff); \
+ .set noat; lw rd, 0(AT); .set at
+
+#define STORE_eas2(rd, rbase, roff) EAS2(AT, rbase, roff); \
+ .set noat; sw rd, 0(AT); .set at
+
+#define LOAD_RB_OFF(rd, rbase, off) lw rd, off(rbase)
+#define STORE_RB_OFF(rd, rbase, off) sw rd, off(rbase)
+
+#define STORE64_off(rlo, rhi, rbase, off) sw rlo, off(rbase); \
+ sw rhi, (off+4)(rbase)
+#define LOAD64_off(rlo, rhi, rbase, off) lw rlo, off(rbase); \
+ lw rhi, (off+4)(rbase)
+
+#define STORE64(rlo, rhi, rbase) STORE64_off(rlo, rhi, rbase, 0)
+#define LOAD64(rlo, rhi, rbase) LOAD64_off(rlo, rhi, rbase, 0)
+
+#ifdef FPU64
+#define STORE64_off_F(rlo, rhi, rbase, off) s.s rlo, off(rbase); \
+ .set noat; \
+ mfhc1 AT, rlo; \
+ sw AT, (off+4)(rbase); \
+ .set at
+#define LOAD64_off_F(rlo, rhi, rbase, off) l.s rlo, off(rbase); \
+ .set noat; \
+ lw AT, (off+4)(rbase); \
+ mthc1 AT, rlo; \
+ .set at
+#else
+#define STORE64_off_F(rlo, rhi, rbase, off) s.s rlo, off(rbase); \
+ s.s rhi, (off+4)(rbase)
+#define LOAD64_off_F(rlo, rhi, rbase, off) l.s rlo, off(rbase); \
+ l.s rhi, (off+4)(rbase)
+#endif
+
+#define STORE64_F(rlo, rhi, rbase) STORE64_off_F(rlo, rhi, rbase, 0)
+#define LOAD64_F(rlo, rhi, rbase) LOAD64_off_F(rlo, rhi, rbase, 0)
+
+
+#define LOAD_base_offMirrorArray_length(rd, rbase) LOAD_RB_OFF(rd, rbase, MIRROR_ARRAY_LENGTH_OFFSET)
+
+#define STACK_STORE(rd, off) sw rd, off(sp)
+#define STACK_LOAD(rd, off) lw rd, off(sp)
+#define CREATE_STACK(n) subu sp, sp, n
+#define DELETE_STACK(n) addu sp, sp, n
+
+#define LOAD_ADDR(dest, addr) la dest, addr
+#define LOAD_IMM(dest, imm) li dest, imm
+#define MOVE_REG(dest, src) move dest, src
+#define STACK_SIZE 128
+
+#define STACK_OFFSET_ARG04 16
+#define STACK_OFFSET_ARG05 20
+#define STACK_OFFSET_ARG06 24
+#define STACK_OFFSET_ARG07 28
+#define STACK_OFFSET_GP 84
+
+#define JAL(n) jal n
+#define BAL(n) bal n
+
+/*
+ * FP register usage restrictions:
+ * 1) We don't use the callee save FP registers so we don't have to save them.
+ * 2) We don't use the odd FP registers so we can share code with mips32r6.
+ */
+#define STACK_STORE_FULL() CREATE_STACK(STACK_SIZE); \
+ STACK_STORE(ra, 124); \
+ STACK_STORE(s8, 120); \
+ STACK_STORE(s0, 116); \
+ STACK_STORE(s1, 112); \
+ STACK_STORE(s2, 108); \
+ STACK_STORE(s3, 104); \
+ STACK_STORE(s4, 100); \
+ STACK_STORE(s5, 96); \
+ STACK_STORE(s6, 92); \
+ STACK_STORE(s7, 88);
+
+#define STACK_LOAD_FULL() STACK_LOAD(gp, STACK_OFFSET_GP); \
+ STACK_LOAD(s7, 88); \
+ STACK_LOAD(s6, 92); \
+ STACK_LOAD(s5, 96); \
+ STACK_LOAD(s4, 100); \
+ STACK_LOAD(s3, 104); \
+ STACK_LOAD(s2, 108); \
+ STACK_LOAD(s1, 112); \
+ STACK_LOAD(s0, 116); \
+ STACK_LOAD(s8, 120); \
+ STACK_LOAD(ra, 124); \
+ DELETE_STACK(STACK_SIZE)
diff --git a/runtime/interpreter/mterp/mips/invoke.S b/runtime/interpreter/mterp/mips/invoke.S
new file mode 100644
index 0000000..bcd3a57
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/invoke.S
@@ -0,0 +1,19 @@
+%default { "helper":"UndefinedInvokeHandler" }
+ /*
+ * Generic invoke handler wrapper.
+ */
+ # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
+ # op {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ .extern $helper
+ EXPORT_PC()
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rPC
+ move a3, rINST
+ JAL($helper)
+ beqz v0, MterpException
+ FETCH_ADVANCE_INST(3)
+ JAL(MterpShouldSwitchInterpreters)
+ bnez v0, MterpFallback
+ GET_INST_OPCODE(t0)
+ GOTO_OPCODE(t0)
diff --git a/runtime/interpreter/mterp/mips/op_add_double.S b/runtime/interpreter/mterp/mips/op_add_double.S
new file mode 100644
index 0000000..12ef0cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_add_double.S
@@ -0,0 +1 @@
+%include "mips/fbinopWide.S" {"instr":"add.d fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_add_double_2addr.S b/runtime/interpreter/mterp/mips/op_add_double_2addr.S
new file mode 100644
index 0000000..c57add5
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_add_double_2addr.S
@@ -0,0 +1 @@
+%include "mips/fbinopWide2addr.S" {"instr":"add.d fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_add_float.S b/runtime/interpreter/mterp/mips/op_add_float.S
new file mode 100644
index 0000000..6a46cf0
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_add_float.S
@@ -0,0 +1 @@
+%include "mips/fbinop.S" {"instr":"add.s fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_add_float_2addr.S b/runtime/interpreter/mterp/mips/op_add_float_2addr.S
new file mode 100644
index 0000000..6ab5cc1
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_add_float_2addr.S
@@ -0,0 +1 @@
+%include "mips/fbinop2addr.S" {"instr":"add.s fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_add_int.S b/runtime/interpreter/mterp/mips/op_add_int.S
new file mode 100644
index 0000000..53a0cb1
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_add_int.S
@@ -0,0 +1 @@
+%include "mips/binop.S" {"instr":"addu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_add_int_2addr.S b/runtime/interpreter/mterp/mips/op_add_int_2addr.S
new file mode 100644
index 0000000..ddd9214
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_add_int_2addr.S
@@ -0,0 +1 @@
+%include "mips/binop2addr.S" {"instr":"addu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_add_int_lit16.S b/runtime/interpreter/mterp/mips/op_add_int_lit16.S
new file mode 100644
index 0000000..05535c1
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_add_int_lit16.S
@@ -0,0 +1 @@
+%include "mips/binopLit16.S" {"instr":"addu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_add_int_lit8.S b/runtime/interpreter/mterp/mips/op_add_int_lit8.S
new file mode 100644
index 0000000..fd021b3
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_add_int_lit8.S
@@ -0,0 +1 @@
+%include "mips/binopLit8.S" {"instr":"addu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_add_long.S b/runtime/interpreter/mterp/mips/op_add_long.S
new file mode 100644
index 0000000..faacc6a
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_add_long.S
@@ -0,0 +1,9 @@
+/*
+ * The compiler generates the following sequence for
+ * [v1 v0] = [a1 a0] + [a3 a2];
+ * addu v0,a2,a0
+ * addu a1,a3,a1
+ * sltu v1,v0,a2
+ * addu v1,v1,a1
+ */
+%include "mips/binopWide.S" { "result0":"v0", "result1":"v1", "preinstr":"addu v0, a2, a0", "instr":"addu a1, a3, a1; sltu v1, v0, a2; addu v1, v1, a1" }
diff --git a/runtime/interpreter/mterp/mips/op_add_long_2addr.S b/runtime/interpreter/mterp/mips/op_add_long_2addr.S
new file mode 100644
index 0000000..bf827c1
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_add_long_2addr.S
@@ -0,0 +1,4 @@
+/*
+ * See op_add_long.S for details
+ */
+%include "mips/binopWide2addr.S" { "result0":"v0", "result1":"v1", "preinstr":"addu v0, a2, a0", "instr":"addu a1, a3, a1; sltu v1, v0, a2; addu v1, v1, a1" }
diff --git a/runtime/interpreter/mterp/mips/op_aget.S b/runtime/interpreter/mterp/mips/op_aget.S
new file mode 100644
index 0000000..8aa8992
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_aget.S
@@ -0,0 +1,32 @@
+%default { "load":"lw", "shift":"2", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+ *
+ * NOTE: assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(a2, 1, 0) # a2 <- BB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ FETCH_B(a3, 1, 1) # a3 <- CC
+ GET_VREG(a0, a2) # a0 <- vBB (array object)
+ GET_VREG(a1, a3) # a1 <- vCC (requested index)
+ # null array object?
+ beqz a0, common_errNullObject # yes, bail
+ LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
+ .if $shift
+ EASN(a0, a0, a1, $shift) # a0 <- arrayObj + index*width
+ .else
+ addu a0, a0, a1
+ .endif
+ # a1 >= a3; compare unsigned index
+ bgeu a1, a3, common_errArrayIndex # index >= length, bail
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ $load a2, $data_offset(a0) # a2 <- vBB[vCC]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a2, rOBJ, t0) # vAA <- a2
diff --git a/runtime/interpreter/mterp/mips/op_aget_boolean.S b/runtime/interpreter/mterp/mips/op_aget_boolean.S
new file mode 100644
index 0000000..59f7f82
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_aget_boolean.S
@@ -0,0 +1 @@
+%include "mips/op_aget.S" { "load":"lbu", "shift":"0", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips/op_aget_byte.S b/runtime/interpreter/mterp/mips/op_aget_byte.S
new file mode 100644
index 0000000..11038fa
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_aget_byte.S
@@ -0,0 +1 @@
+%include "mips/op_aget.S" { "load":"lb", "shift":"0", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips/op_aget_char.S b/runtime/interpreter/mterp/mips/op_aget_char.S
new file mode 100644
index 0000000..96f2ab6
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_aget_char.S
@@ -0,0 +1 @@
+%include "mips/op_aget.S" { "load":"lhu", "shift":"1", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips/op_aget_object.S b/runtime/interpreter/mterp/mips/op_aget_object.S
new file mode 100644
index 0000000..e3ab9d8
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_aget_object.S
@@ -0,0 +1,20 @@
+ /*
+ * Array object get. vAA <- vBB[vCC].
+ *
+ * for: aget-object
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(a2, 1, 0) # a2 <- BB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ FETCH_B(a3, 1, 1) # a3 <- CC
+ EXPORT_PC()
+ GET_VREG(a0, a2) # a0 <- vBB (array object)
+ GET_VREG(a1, a3) # a1 <- vCC (requested index)
+ JAL(artAGetObjectFromMterp) # v0 <- GetObj(array, index)
+ lw a1, THREAD_EXCEPTION_OFFSET(rSELF)
+ PREFETCH_INST(2) # load rINST
+ bnez a1, MterpException
+ SET_VREG_OBJECT(v0, rOBJ) # vAA <- v0
+ ADVANCE(2) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_aget_short.S b/runtime/interpreter/mterp/mips/op_aget_short.S
new file mode 100644
index 0000000..cd7f7bf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_aget_short.S
@@ -0,0 +1 @@
+%include "mips/op_aget.S" { "load":"lh", "shift":"1", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips/op_aget_wide.S b/runtime/interpreter/mterp/mips/op_aget_wide.S
new file mode 100644
index 0000000..08822f5
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_aget_wide.S
@@ -0,0 +1,22 @@
+ /*
+ * Array get, 64 bits. vAA <- vBB[vCC].
+ *
+ * Arrays of long/double are 64-bit aligned.
+ */
+ /* aget-wide vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ GET_VREG(a0, a2) # a0 <- vBB (array object)
+ GET_VREG(a1, a3) # a1 <- vCC (requested index)
+ # null array object?
+ beqz a0, common_errNullObject # yes, bail
+ LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
+ EAS3(a0, a0, a1) # a0 <- arrayObj + index*width
+ bgeu a1, a3, common_errArrayIndex # index >= length, bail
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ LOAD64_off(a2, a3, a0, MIRROR_WIDE_ARRAY_DATA_OFFSET)
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64_GOTO(a2, a3, rOBJ, t0) # vAA/vAA+1 <- a2/a3
diff --git a/runtime/interpreter/mterp/mips/op_and_int.S b/runtime/interpreter/mterp/mips/op_and_int.S
new file mode 100644
index 0000000..98fe4af
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_and_int.S
@@ -0,0 +1 @@
+%include "mips/binop.S" {"instr":"and a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_and_int_2addr.S b/runtime/interpreter/mterp/mips/op_and_int_2addr.S
new file mode 100644
index 0000000..7f90ed4
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_and_int_2addr.S
@@ -0,0 +1 @@
+%include "mips/binop2addr.S" {"instr":"and a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_and_int_lit16.S b/runtime/interpreter/mterp/mips/op_and_int_lit16.S
new file mode 100644
index 0000000..e46f23b
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_and_int_lit16.S
@@ -0,0 +1 @@
+%include "mips/binopLit16.S" {"instr":"and a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_and_int_lit8.S b/runtime/interpreter/mterp/mips/op_and_int_lit8.S
new file mode 100644
index 0000000..3332883
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_and_int_lit8.S
@@ -0,0 +1 @@
+%include "mips/binopLit8.S" {"instr":"and a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_and_long.S b/runtime/interpreter/mterp/mips/op_and_long.S
new file mode 100644
index 0000000..a98a6df
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_and_long.S
@@ -0,0 +1 @@
+%include "mips/binopWide.S" {"preinstr":"and a0, a0, a2", "instr":"and a1, a1, a3"}
diff --git a/runtime/interpreter/mterp/mips/op_and_long_2addr.S b/runtime/interpreter/mterp/mips/op_and_long_2addr.S
new file mode 100644
index 0000000..350c044
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_and_long_2addr.S
@@ -0,0 +1 @@
+%include "mips/binopWide2addr.S" {"preinstr":"and a0, a0, a2", "instr":"and a1, a1, a3"}
diff --git a/runtime/interpreter/mterp/mips/op_aput.S b/runtime/interpreter/mterp/mips/op_aput.S
new file mode 100644
index 0000000..53d6ae0
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_aput.S
@@ -0,0 +1,30 @@
+%default { "store":"sw", "shift":"2", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
+
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ *
+ * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(a2, 1, 0) # a2 <- BB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ FETCH_B(a3, 1, 1) # a3 <- CC
+ GET_VREG(a0, a2) # a0 <- vBB (array object)
+ GET_VREG(a1, a3) # a1 <- vCC (requested index)
+ # null array object?
+ beqz a0, common_errNullObject # yes, bail
+ LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
+ .if $shift
+ EASN(a0, a0, a1, $shift) # a0 <- arrayObj + index*width
+ .else
+ addu a0, a0, a1
+ .endif
+ bgeu a1, a3, common_errArrayIndex # index >= length, bail
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_VREG(a2, rOBJ) # a2 <- vAA
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ $store a2, $data_offset(a0) # vBB[vCC] <- a2
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_aput_boolean.S b/runtime/interpreter/mterp/mips/op_aput_boolean.S
new file mode 100644
index 0000000..9cae5ef
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_aput_boolean.S
@@ -0,0 +1 @@
+%include "mips/op_aput.S" { "store":"sb", "shift":"0", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips/op_aput_byte.S b/runtime/interpreter/mterp/mips/op_aput_byte.S
new file mode 100644
index 0000000..3bbd12c
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_aput_byte.S
@@ -0,0 +1 @@
+%include "mips/op_aput.S" { "store":"sb", "shift":"0", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips/op_aput_char.S b/runtime/interpreter/mterp/mips/op_aput_char.S
new file mode 100644
index 0000000..ae69717
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_aput_char.S
@@ -0,0 +1 @@
+%include "mips/op_aput.S" { "store":"sh", "shift":"1", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips/op_aput_object.S b/runtime/interpreter/mterp/mips/op_aput_object.S
new file mode 100644
index 0000000..55b13b1
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_aput_object.S
@@ -0,0 +1,14 @@
+ /*
+ * Store an object into an array. vBB[vCC] <- vAA.
+ *
+ */
+ /* op vAA, vBB, vCC */
+ EXPORT_PC()
+ addu a0, rFP, OFF_FP_SHADOWFRAME
+ move a1, rPC
+ move a2, rINST
+ JAL(MterpAputObject)
+ beqz v0, MterpPossibleException
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_aput_short.S b/runtime/interpreter/mterp/mips/op_aput_short.S
new file mode 100644
index 0000000..9586259
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_aput_short.S
@@ -0,0 +1 @@
+%include "mips/op_aput.S" { "store":"sh", "shift":"1", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips/op_aput_wide.S b/runtime/interpreter/mterp/mips/op_aput_wide.S
new file mode 100644
index 0000000..ef99261
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_aput_wide.S
@@ -0,0 +1,25 @@
+ /*
+ * Array put, 64 bits. vBB[vCC] <- vAA.
+ *
+ * Arrays of long/double are 64-bit aligned, so it's okay to use STRD.
+ */
+ /* aput-wide vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(t0) # t0 <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ GET_VREG(a0, a2) # a0 <- vBB (array object)
+ GET_VREG(a1, a3) # a1 <- vCC (requested index)
+ # null array object?
+ beqz a0, common_errNullObject # yes, bail
+ LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
+ EAS3(a0, a0, a1) # a0 <- arrayObj + index*width
+ EAS2(rOBJ, rFP, t0) # rOBJ <- &fp[AA]
+ # compare unsigned index, length
+ bgeu a1, a3, common_errArrayIndex # index >= length, bail
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ LOAD64(a2, a3, rOBJ) # a2/a3 <- vAA/vAA+1
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64_off(a2, a3, a0, MIRROR_WIDE_ARRAY_DATA_OFFSET) # a2/a3 <- vBB[vCC]
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_array_length.S b/runtime/interpreter/mterp/mips/op_array_length.S
new file mode 100644
index 0000000..2b4a86f
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_array_length.S
@@ -0,0 +1,12 @@
+ /*
+ * Return the length of an array.
+ */
+ GET_OPB(a1) # a1 <- B
+ GET_OPA4(a2) # a2 <- A+
+ GET_VREG(a0, a1) # a0 <- vB (object ref)
+ # is object null?
+ beqz a0, common_errNullObject # yup, fail
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ LOAD_base_offMirrorArray_length(a3, a0) # a3 <- array length
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a3, a2, t0) # vA <- length
diff --git a/runtime/interpreter/mterp/mips/op_check_cast.S b/runtime/interpreter/mterp/mips/op_check_cast.S
new file mode 100644
index 0000000..9a6cefa
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_check_cast.S
@@ -0,0 +1,16 @@
+ /*
+ * Check to see if a cast from one class to another is allowed.
+ */
+ # check-cast vAA, class /* BBBB */
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- BBBB
+ GET_OPA(a1) # a1 <- AA
+ EAS2(a1, rFP, a1) # a1 <- &object
+ lw a2, OFF_FP_METHOD(rFP) # a2 <- method
+ move a3, rSELF # a3 <- self
+ JAL(MterpCheckCast) # v0 <- CheckCast(index, &obj, method, self)
+ PREFETCH_INST(2)
+ bnez v0, MterpPossibleException
+ ADVANCE(2)
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_cmp_long.S b/runtime/interpreter/mterp/mips/op_cmp_long.S
new file mode 100644
index 0000000..44806c3
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_cmp_long.S
@@ -0,0 +1,34 @@
+ /*
+ * Compare two 64-bit values
+ * x = y return 0
+ * x < y return -1
+ * x > y return 1
+ *
+ * I think I can improve on the ARM code by the following observation
+ * slt t0, x.hi, y.hi; # (x.hi < y.hi) ? 1:0
+ * sgt t1, x.hi, y.hi; # (y.hi > x.hi) ? 1:0
+ * subu v0, t0, t1 # v0= -1:1:0 for [ < > = ]
+ */
+ /* cmp-long vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ EAS2(a2, rFP, a2) # a2 <- &fp[BB]
+ EAS2(a3, rFP, a3) # a3 <- &fp[CC]
+ LOAD64(a0, a1, a2) # a0/a1 <- vBB/vBB+1
+ LOAD64(a2, a3, a3) # a2/a3 <- vCC/vCC+1
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ slt t0, a1, a3 # compare hi
+ sgt t1, a1, a3
+ subu v0, t1, t0 # v0 <- (-1, 1, 0)
+ bnez v0, .L${opcode}_finish
+ # at this point x.hi==y.hi
+ sltu t0, a0, a2 # compare lo
+ sgtu t1, a0, a2
+ subu v0, t1, t0 # v0 <- (-1, 1, 0) for [< > =]
+
+.L${opcode}_finish:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(v0, rOBJ, t0) # vAA <- v0
diff --git a/runtime/interpreter/mterp/mips/op_cmpg_double.S b/runtime/interpreter/mterp/mips/op_cmpg_double.S
new file mode 100644
index 0000000..e7965a7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_cmpg_double.S
@@ -0,0 +1 @@
+%include "mips/op_cmpl_double.S" { "naninst":"li rTEMP, 1" }
diff --git a/runtime/interpreter/mterp/mips/op_cmpg_float.S b/runtime/interpreter/mterp/mips/op_cmpg_float.S
new file mode 100644
index 0000000..53519a6
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_cmpg_float.S
@@ -0,0 +1 @@
+%include "mips/op_cmpl_float.S" { "naninst":"li rTEMP, 1" }
diff --git a/runtime/interpreter/mterp/mips/op_cmpl_double.S b/runtime/interpreter/mterp/mips/op_cmpl_double.S
new file mode 100644
index 0000000..5a47fd7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_cmpl_double.S
@@ -0,0 +1,54 @@
+%default { "naninst":"li rTEMP, -1" }
+ /*
+ * Compare two floating-point values. Puts 0(==), 1(>), or -1(<)
+ * into the destination register (rTEMP) based on the comparison results.
+ *
+ * Provide a "naninst" instruction that puts 1 or -1 into rTEMP depending
+ * on what value we'd like to return when one of the operands is NaN.
+ *
+ * See op_cmpl_float for more details.
+ *
+ * For: cmpl-double, cmpg-double
+ */
+ /* op vAA, vBB, vCC */
+
+ FETCH(a0, 1) # a0 <- CCBB
+ and rOBJ, a0, 255 # s5 <- BB
+ srl t0, a0, 8 # t0 <- CC
+ EAS2(rOBJ, rFP, rOBJ) # s5 <- &fp[BB]
+ EAS2(t0, rFP, t0) # t0 <- &fp[CC]
+ LOAD64_F(ft0, ft0f, rOBJ)
+ LOAD64_F(ft1, ft1f, t0)
+#ifdef MIPS32REVGE6
+ cmp.ult.d ft2, ft0, ft1
+ li rTEMP, -1
+ bc1nez ft2, .L${opcode}_finish
+ cmp.ult.d ft2, ft1, ft0
+ li rTEMP, 1
+ bc1nez ft2, .L${opcode}_finish
+ cmp.eq.d ft2, ft0, ft1
+ li rTEMP, 0
+ bc1nez ft2, .L${opcode}_finish
+ b .L${opcode}_nan
+#else
+ c.olt.d fcc0, ft0, ft1
+ li rTEMP, -1
+ bc1t fcc0, .L${opcode}_finish
+ c.olt.d fcc0, ft1, ft0
+ li rTEMP, 1
+ bc1t fcc0, .L${opcode}_finish
+ c.eq.d fcc0, ft0, ft1
+ li rTEMP, 0
+ bc1t fcc0, .L${opcode}_finish
+ b .L${opcode}_nan
+#endif
+%break
+
+.L${opcode}_nan:
+ $naninst
+
+.L${opcode}_finish:
+ GET_OPA(rOBJ)
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(rTEMP, rOBJ, t0) # vAA <- rTEMP
diff --git a/runtime/interpreter/mterp/mips/op_cmpl_float.S b/runtime/interpreter/mterp/mips/op_cmpl_float.S
new file mode 100644
index 0000000..cfd87ee
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_cmpl_float.S
@@ -0,0 +1,61 @@
+%default { "naninst":"li rTEMP, -1" }
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register rTEMP based on the results of the comparison.
+ *
+ * Provide a "naninst" instruction that puts 1 or -1 into rTEMP depending
+ * on what value we'd like to return when one of the operands is NaN.
+ *
+ * The operation we're implementing is:
+ * if (x == y)
+ * return 0;
+ * else if (x < y)
+ * return -1;
+ * else if (x > y)
+ * return 1;
+ * else
+ * return {-1 or 1}; // one or both operands was NaN
+ *
+ * for: cmpl-float, cmpg-float
+ */
+ /* op vAA, vBB, vCC */
+
+ /* "clasic" form */
+ FETCH(a0, 1) # a0 <- CCBB
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8
+ GET_VREG_F(ft0, a2)
+ GET_VREG_F(ft1, a3)
+#ifdef MIPS32REVGE6
+ cmp.ult.s ft2, ft0, ft1 # Is ft0 < ft1
+ li rTEMP, -1
+ bc1nez ft2, .L${opcode}_finish
+ cmp.ult.s ft2, ft1, ft0
+ li rTEMP, 1
+ bc1nez ft2, .L${opcode}_finish
+ cmp.eq.s ft2, ft0, ft1
+ li rTEMP, 0
+ bc1nez ft2, .L${opcode}_finish
+ b .L${opcode}_nan
+#else
+ c.olt.s fcc0, ft0, ft1 # Is ft0 < ft1
+ li rTEMP, -1
+ bc1t fcc0, .L${opcode}_finish
+ c.olt.s fcc0, ft1, ft0
+ li rTEMP, 1
+ bc1t fcc0, .L${opcode}_finish
+ c.eq.s fcc0, ft0, ft1
+ li rTEMP, 0
+ bc1t fcc0, .L${opcode}_finish
+ b .L${opcode}_nan
+#endif
+%break
+
+.L${opcode}_nan:
+ $naninst
+
+.L${opcode}_finish:
+ GET_OPA(rOBJ)
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(rTEMP, rOBJ, t0) # vAA <- rTEMP
diff --git a/runtime/interpreter/mterp/mips/op_const.S b/runtime/interpreter/mterp/mips/op_const.S
new file mode 100644
index 0000000..c505761
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_const.S
@@ -0,0 +1,9 @@
+ # const vAA, /* +BBBBbbbb */
+ GET_OPA(a3) # a3 <- AA
+ FETCH(a0, 1) # a0 <- bbbb (low)
+ FETCH(a1, 2) # a1 <- BBBB (high)
+ FETCH_ADVANCE_INST(3) # advance rPC, load rINST
+ sll a1, a1, 16
+ or a0, a1, a0 # a0 <- BBBBbbbb
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, a3, t0) # vAA <- a0
diff --git a/runtime/interpreter/mterp/mips/op_const_16.S b/runtime/interpreter/mterp/mips/op_const_16.S
new file mode 100644
index 0000000..5e47633
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_const_16.S
@@ -0,0 +1,6 @@
+ # const/16 vAA, /* +BBBB */
+ FETCH_S(a0, 1) # a0 <- ssssBBBB (sign-extended)
+ GET_OPA(a3) # a3 <- AA
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, a3, t0) # vAA <- a0
diff --git a/runtime/interpreter/mterp/mips/op_const_4.S b/runtime/interpreter/mterp/mips/op_const_4.S
new file mode 100644
index 0000000..8b662f9
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_const_4.S
@@ -0,0 +1,8 @@
+ # const/4 vA, /* +B */
+ sll a1, rINST, 16 # a1 <- Bxxx0000
+ GET_OPA(a0) # a0 <- A+
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ sra a1, a1, 28 # a1 <- sssssssB (sign-extended)
+ and a0, a0, 15
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a1, a0, t0) # fp[A] <- a1
diff --git a/runtime/interpreter/mterp/mips/op_const_class.S b/runtime/interpreter/mterp/mips/op_const_class.S
new file mode 100644
index 0000000..7202b11
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_const_class.S
@@ -0,0 +1,12 @@
+ # const/class vAA, Class /* BBBB */
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- BBBB
+ GET_OPA(a1) # a1 <- AA
+ addu a2, rFP, OFF_FP_SHADOWFRAME # a2 <- shadow frame
+ move a3, rSELF
+ JAL(MterpConstClass)
+ PREFETCH_INST(2) # load rINST
+ bnez v0, MterpPossibleException
+ ADVANCE(2) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_const_high16.S b/runtime/interpreter/mterp/mips/op_const_high16.S
new file mode 100644
index 0000000..36c1c35
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_const_high16.S
@@ -0,0 +1,7 @@
+ # const/high16 vAA, /* +BBBB0000 */
+ FETCH(a0, 1) # a0 <- 0000BBBB (zero-extended)
+ GET_OPA(a3) # a3 <- AA
+ sll a0, a0, 16 # a0 <- BBBB0000
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, a3, t0) # vAA <- a0
diff --git a/runtime/interpreter/mterp/mips/op_const_string.S b/runtime/interpreter/mterp/mips/op_const_string.S
new file mode 100644
index 0000000..d8eeb46
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_const_string.S
@@ -0,0 +1,12 @@
+ # const/string vAA, String /* BBBB */
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- BBBB
+ GET_OPA(a1) # a1 <- AA
+ addu a2, rFP, OFF_FP_SHADOWFRAME # a2 <- shadow frame
+ move a3, rSELF
+ JAL(MterpConstString) # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST(2) # load rINST
+ bnez v0, MterpPossibleException
+ ADVANCE(2) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_const_string_jumbo.S b/runtime/interpreter/mterp/mips/op_const_string_jumbo.S
new file mode 100644
index 0000000..d732ca1
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_const_string_jumbo.S
@@ -0,0 +1,15 @@
+ # const/string vAA, String /* BBBBBBBB */
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- bbbb (low)
+ FETCH(a2, 2) # a2 <- BBBB (high)
+ GET_OPA(a1) # a1 <- AA
+ sll a2, a2, 16
+ or a0, a0, a2 # a0 <- BBBBbbbb
+ addu a2, rFP, OFF_FP_SHADOWFRAME # a2 <- shadow frame
+ move a3, rSELF
+ JAL(MterpConstString) # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST(3) # load rINST
+ bnez v0, MterpPossibleException
+ ADVANCE(3) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_const_wide.S b/runtime/interpreter/mterp/mips/op_const_wide.S
new file mode 100644
index 0000000..01d0f87
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_const_wide.S
@@ -0,0 +1,14 @@
+ # const-wide vAA, /* +HHHHhhhhBBBBbbbb */
+ FETCH(a0, 1) # a0 <- bbbb (low)
+ FETCH(a1, 2) # a1 <- BBBB (low middle)
+ FETCH(a2, 3) # a2 <- hhhh (high middle)
+ sll a1, 16 #
+ or a0, a1 # a0 <- BBBBbbbb (low word)
+ FETCH(a3, 4) # a3 <- HHHH (high)
+ GET_OPA(t1) # t1 <- AA
+ sll a3, 16
+ or a1, a3, a2 # a1 <- HHHHhhhh (high word)
+ FETCH_ADVANCE_INST(5) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64(a0, a1, t1) # vAA <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_const_wide_16.S b/runtime/interpreter/mterp/mips/op_const_wide_16.S
new file mode 100644
index 0000000..583d9ef
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_const_wide_16.S
@@ -0,0 +1,8 @@
+ # const-wide/16 vAA, /* +BBBB */
+ FETCH_S(a0, 1) # a0 <- ssssBBBB (sign-extended)
+ GET_OPA(a3) # a3 <- AA
+ sra a1, a0, 31 # a1 <- ssssssss
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64(a0, a1, a3) # vAA <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_const_wide_32.S b/runtime/interpreter/mterp/mips/op_const_wide_32.S
new file mode 100644
index 0000000..3eb4574
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_const_wide_32.S
@@ -0,0 +1,11 @@
+ # const-wide/32 vAA, /* +BBBBbbbb */
+ FETCH(a0, 1) # a0 <- 0000bbbb (low)
+ GET_OPA(a3) # a3 <- AA
+ FETCH_S(a2, 2) # a2 <- ssssBBBB (high)
+ FETCH_ADVANCE_INST(3) # advance rPC, load rINST
+ sll a2, a2, 16
+ or a0, a0, a2 # a0 <- BBBBbbbb
+ sra a1, a0, 31 # a1 <- ssssssss
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64(a0, a1, a3) # vAA <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_const_wide_high16.S b/runtime/interpreter/mterp/mips/op_const_wide_high16.S
new file mode 100644
index 0000000..88382c6
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_const_wide_high16.S
@@ -0,0 +1,9 @@
+ # const-wide/high16 vAA, /* +BBBB000000000000 */
+ FETCH(a1, 1) # a1 <- 0000BBBB (zero-extended)
+ GET_OPA(a3) # a3 <- AA
+ li a0, 0 # a0 <- 00000000
+ sll a1, 16 # a1 <- BBBB0000
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64(a0, a1, a3) # vAA <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_div_double.S b/runtime/interpreter/mterp/mips/op_div_double.S
new file mode 100644
index 0000000..84e4c4e
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_div_double.S
@@ -0,0 +1 @@
+%include "mips/fbinopWide.S" {"instr":"div.d fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_div_double_2addr.S b/runtime/interpreter/mterp/mips/op_div_double_2addr.S
new file mode 100644
index 0000000..65b92e3
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_div_double_2addr.S
@@ -0,0 +1 @@
+%include "mips/fbinopWide2addr.S" {"instr":"div.d fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_div_float.S b/runtime/interpreter/mterp/mips/op_div_float.S
new file mode 100644
index 0000000..44b8d47
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_div_float.S
@@ -0,0 +1 @@
+%include "mips/fbinop.S" {"instr":"div.s fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_div_float_2addr.S b/runtime/interpreter/mterp/mips/op_div_float_2addr.S
new file mode 100644
index 0000000..e5fff92
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_div_float_2addr.S
@@ -0,0 +1 @@
+%include "mips/fbinop2addr.S" {"instr":"div.s fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_div_int.S b/runtime/interpreter/mterp/mips/op_div_int.S
new file mode 100644
index 0000000..5d28c84
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_div_int.S
@@ -0,0 +1,5 @@
+#ifdef MIPS32REVGE6
+%include "mips/binop.S" {"instr":"div a0, a0, a1", "chkzero":"1"}
+#else
+%include "mips/binop.S" {"preinstr":"div zero, a0, a1", "instr":"mflo a0", "chkzero":"1"}
+#endif
diff --git a/runtime/interpreter/mterp/mips/op_div_int_2addr.S b/runtime/interpreter/mterp/mips/op_div_int_2addr.S
new file mode 100644
index 0000000..6c079e0
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_div_int_2addr.S
@@ -0,0 +1,5 @@
+#ifdef MIPS32REVGE6
+%include "mips/binop2addr.S" {"instr":"div a0, a0, a1", "chkzero":"1"}
+#else
+%include "mips/binop2addr.S" {"preinstr":"div zero, a0, a1", "instr":"mflo a0", "chkzero":"1"}
+#endif
diff --git a/runtime/interpreter/mterp/mips/op_div_int_lit16.S b/runtime/interpreter/mterp/mips/op_div_int_lit16.S
new file mode 100644
index 0000000..ee7452c
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_div_int_lit16.S
@@ -0,0 +1,5 @@
+#ifdef MIPS32REVGE6
+%include "mips/binopLit16.S" {"instr":"div a0, a0, a1", "chkzero":"1"}
+#else
+%include "mips/binopLit16.S" {"preinstr":"div zero, a0, a1", "instr":"mflo a0", "chkzero":"1"}
+#endif
diff --git a/runtime/interpreter/mterp/mips/op_div_int_lit8.S b/runtime/interpreter/mterp/mips/op_div_int_lit8.S
new file mode 100644
index 0000000..d2964b8
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_div_int_lit8.S
@@ -0,0 +1,5 @@
+#ifdef MIPS32REVGE6
+%include "mips/binopLit8.S" {"instr":"div a0, a0, a1", "chkzero":"1"}
+#else
+%include "mips/binopLit8.S" {"preinstr":"div zero, a0, a1", "instr":"mflo a0", "chkzero":"1"}
+#endif
diff --git a/runtime/interpreter/mterp/mips/op_div_long.S b/runtime/interpreter/mterp/mips/op_div_long.S
new file mode 100644
index 0000000..2097866
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_div_long.S
@@ -0,0 +1 @@
+%include "mips/binopWide.S" {"result0":"v0", "result1":"v1", "instr":"JAL(__divdi3)", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips/op_div_long_2addr.S b/runtime/interpreter/mterp/mips/op_div_long_2addr.S
new file mode 100644
index 0000000..c279305
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_div_long_2addr.S
@@ -0,0 +1 @@
+%include "mips/binopWide2addr.S" {"result0":"v0", "result1":"v1", "instr":"JAL(__divdi3)", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips/op_double_to_float.S b/runtime/interpreter/mterp/mips/op_double_to_float.S
new file mode 100644
index 0000000..1d32c2e
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_double_to_float.S
@@ -0,0 +1 @@
+%include "mips/unopNarrower.S" {"instr":"cvt.s.d fv0, fa0"}
diff --git a/runtime/interpreter/mterp/mips/op_double_to_int.S b/runtime/interpreter/mterp/mips/op_double_to_int.S
new file mode 100644
index 0000000..30a0a73
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_double_to_int.S
@@ -0,0 +1,58 @@
+%include "mips/unopNarrower.S" {"instr":"b d2i_doconv"}
+/*
+ * Convert the double in a0/a1 to an int in a0.
+ *
+ * We have to clip values to int min/max per the specification. The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer. The EABI convert function isn't doing this for us.
+ */
+%break
+
+d2i_doconv:
+#ifdef MIPS32REVGE6
+ la t0, .LDOUBLE_TO_INT_max
+ LOAD64_F(fa1, fa1f, t0)
+ cmp.ule.d ft2, fa1, fa0
+ l.s fv0, .LDOUBLE_TO_INT_maxret
+ bc1nez ft2, .L${opcode}_set_vreg_f
+
+ la t0, .LDOUBLE_TO_INT_min
+ LOAD64_F(fa1, fa1f, t0)
+ cmp.ule.d ft2, fa0, fa1
+ l.s fv0, .LDOUBLE_TO_INT_minret
+ bc1nez ft2, .L${opcode}_set_vreg_f
+
+ mov.d fa1, fa0
+ cmp.un.d ft2, fa0, fa1
+ li.s fv0, 0
+ bc1nez ft2, .L${opcode}_set_vreg_f
+#else
+ la t0, .LDOUBLE_TO_INT_max
+ LOAD64_F(fa1, fa1f, t0)
+ c.ole.d fcc0, fa1, fa0
+ l.s fv0, .LDOUBLE_TO_INT_maxret
+ bc1t .L${opcode}_set_vreg_f
+
+ la t0, .LDOUBLE_TO_INT_min
+ LOAD64_F(fa1, fa1f, t0)
+ c.ole.d fcc0, fa0, fa1
+ l.s fv0, .LDOUBLE_TO_INT_minret
+ bc1t .L${opcode}_set_vreg_f
+
+ mov.d fa1, fa0
+ c.un.d fcc0, fa0, fa1
+ li.s fv0, 0
+ bc1t .L${opcode}_set_vreg_f
+#endif
+
+ trunc.w.d fv0, fa0
+ b .L${opcode}_set_vreg_f
+
+.LDOUBLE_TO_INT_max:
+ .dword 0x41dfffffffc00000
+.LDOUBLE_TO_INT_min:
+ .dword 0xc1e0000000000000 # minint, as a double (high word)
+.LDOUBLE_TO_INT_maxret:
+ .word 0x7fffffff
+.LDOUBLE_TO_INT_minret:
+ .word 0x80000000
diff --git a/runtime/interpreter/mterp/mips/op_double_to_long.S b/runtime/interpreter/mterp/mips/op_double_to_long.S
new file mode 100644
index 0000000..4f9e367
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_double_to_long.S
@@ -0,0 +1,56 @@
+%include "mips/funopWide.S" {"instr":"b d2l_doconv", "st_result":"SET_VREG64(rRESULT0, rRESULT1, rOBJ)"}
+%break
+
+d2l_doconv:
+#ifdef MIPS32REVGE6
+ la t0, .LDOUBLE_TO_LONG_max
+ LOAD64_F(fa1, fa1f, t0)
+ cmp.ule.d ft2, fa1, fa0
+ la t0, .LDOUBLE_TO_LONG_ret_max
+ LOAD64(rRESULT0, rRESULT1, t0)
+ bc1nez ft2, .L${opcode}_set_vreg
+
+ la t0, .LDOUBLE_TO_LONG_min
+ LOAD64_F(fa1, fa1f, t0)
+ cmp.ule.d ft2, fa0, fa1
+ la t0, .LDOUBLE_TO_LONG_ret_min
+ LOAD64(rRESULT0, rRESULT1, t0)
+ bc1nez ft2, .L${opcode}_set_vreg
+
+ mov.d fa1, fa0
+ cmp.un.d ft2, fa0, fa1
+ li rRESULT0, 0
+ li rRESULT1, 0
+ bc1nez ft2, .L${opcode}_set_vreg
+#else
+ la t0, .LDOUBLE_TO_LONG_max
+ LOAD64_F(fa1, fa1f, t0)
+ c.ole.d fcc0, fa1, fa0
+ la t0, .LDOUBLE_TO_LONG_ret_max
+ LOAD64(rRESULT0, rRESULT1, t0)
+ bc1t .L${opcode}_set_vreg
+
+ la t0, .LDOUBLE_TO_LONG_min
+ LOAD64_F(fa1, fa1f, t0)
+ c.ole.d fcc0, fa0, fa1
+ la t0, .LDOUBLE_TO_LONG_ret_min
+ LOAD64(rRESULT0, rRESULT1, t0)
+ bc1t .L${opcode}_set_vreg
+
+ mov.d fa1, fa0
+ c.un.d fcc0, fa0, fa1
+ li rRESULT0, 0
+ li rRESULT1, 0
+ bc1t .L${opcode}_set_vreg
+#endif
+ JAL(__fixdfdi)
+ b .L${opcode}_set_vreg
+
+.LDOUBLE_TO_LONG_max:
+ .dword 0x43e0000000000000 # maxlong, as a double (high word)
+.LDOUBLE_TO_LONG_min:
+ .dword 0xc3e0000000000000 # minlong, as a double (high word)
+.LDOUBLE_TO_LONG_ret_max:
+ .dword 0x7fffffffffffffff
+.LDOUBLE_TO_LONG_ret_min:
+ .dword 0x8000000000000000
diff --git a/runtime/interpreter/mterp/mips/op_fill_array_data.S b/runtime/interpreter/mterp/mips/op_fill_array_data.S
new file mode 100644
index 0000000..8605746
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_fill_array_data.S
@@ -0,0 +1,14 @@
+ /* fill-array-data vAA, +BBBBBBBB */
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- bbbb (lo)
+ FETCH(a1, 2) # a1 <- BBBB (hi)
+ GET_OPA(a3) # a3 <- AA
+ sll a1, a1, 16 # a1 <- BBBBbbbb
+ or a1, a0, a1 # a1 <- BBBBbbbb
+ GET_VREG(a0, a3) # a0 <- vAA (array object)
+ EAS1(a1, rPC, a1) # a1 <- PC + BBBBbbbb*2 (array data off.)
+ JAL(MterpFillArrayData) # v0 <- Mterp(obj, payload)
+ beqz v0, MterpPossibleException # has exception
+ FETCH_ADVANCE_INST(3) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_filled_new_array.S b/runtime/interpreter/mterp/mips/op_filled_new_array.S
new file mode 100644
index 0000000..3f62fae
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_filled_new_array.S
@@ -0,0 +1,18 @@
+%default { "helper":"MterpFilledNewArray" }
+ /*
+ * Create a new array with elements filled from registers.
+ *
+ * for: filled-new-array, filled-new-array/range
+ */
+ # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
+ # op {vCCCC..v(CCCC+AA-1)}, type /* BBBB */
+ .extern $helper
+ EXPORT_PC()
+ addu a0, rFP, OFF_FP_SHADOWFRAME # a0 <- shadow frame
+ move a1, rPC
+ move a2, rSELF
+ JAL($helper) # v0 <- helper(shadow_frame, pc, self)
+ beqz v0, MterpPossibleException # has exception
+ FETCH_ADVANCE_INST(3) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_filled_new_array_range.S b/runtime/interpreter/mterp/mips/op_filled_new_array_range.S
new file mode 100644
index 0000000..f8dcb0e
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_filled_new_array_range.S
@@ -0,0 +1 @@
+%include "mips/op_filled_new_array.S" { "helper":"MterpFilledNewArrayRange" }
diff --git a/runtime/interpreter/mterp/mips/op_float_to_double.S b/runtime/interpreter/mterp/mips/op_float_to_double.S
new file mode 100644
index 0000000..1315255
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_float_to_double.S
@@ -0,0 +1 @@
+%include "mips/funopWider.S" {"instr":"cvt.d.s fv0, fa0"}
diff --git a/runtime/interpreter/mterp/mips/op_float_to_int.S b/runtime/interpreter/mterp/mips/op_float_to_int.S
new file mode 100644
index 0000000..e032869
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_float_to_int.S
@@ -0,0 +1,50 @@
+%include "mips/funop.S" {"instr":"b f2i_doconv"}
+%break
+
+/*
+ * Not an entry point as it is used only once !!
+ */
+f2i_doconv:
+#ifdef MIPS32REVGE6
+ l.s fa1, .LFLOAT_TO_INT_max
+ cmp.ule.s ft2, fa1, fa0
+ l.s fv0, .LFLOAT_TO_INT_ret_max
+ bc1nez ft2, .L${opcode}_set_vreg_f
+
+ l.s fa1, .LFLOAT_TO_INT_min
+ cmp.ule.s ft2, fa0, fa1
+ l.s fv0, .LFLOAT_TO_INT_ret_min
+ bc1nez ft2, .L${opcode}_set_vreg_f
+
+ mov.s fa1, fa0
+ cmp.un.s ft2, fa0, fa1
+ li.s fv0, 0
+ bc1nez ft2, .L${opcode}_set_vreg_f
+#else
+ l.s fa1, .LFLOAT_TO_INT_max
+ c.ole.s fcc0, fa1, fa0
+ l.s fv0, .LFLOAT_TO_INT_ret_max
+ bc1t .L${opcode}_set_vreg_f
+
+ l.s fa1, .LFLOAT_TO_INT_min
+ c.ole.s fcc0, fa0, fa1
+ l.s fv0, .LFLOAT_TO_INT_ret_min
+ bc1t .L${opcode}_set_vreg_f
+
+ mov.s fa1, fa0
+ c.un.s fcc0, fa0, fa1
+ li.s fv0, 0
+ bc1t .L${opcode}_set_vreg_f
+#endif
+
+ trunc.w.s fv0, fa0
+ b .L${opcode}_set_vreg_f
+
+.LFLOAT_TO_INT_max:
+ .word 0x4f000000
+.LFLOAT_TO_INT_min:
+ .word 0xcf000000
+.LFLOAT_TO_INT_ret_max:
+ .word 0x7fffffff
+.LFLOAT_TO_INT_ret_min:
+ .word 0x80000000
diff --git a/runtime/interpreter/mterp/mips/op_float_to_long.S b/runtime/interpreter/mterp/mips/op_float_to_long.S
new file mode 100644
index 0000000..77b2c46
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_float_to_long.S
@@ -0,0 +1,51 @@
+%include "mips/funopWider.S" {"instr":"b f2l_doconv", "st_result":"SET_VREG64(rRESULT0, rRESULT1, rOBJ)"}
+%break
+
+f2l_doconv:
+#ifdef MIPS32REVGE6
+ l.s fa1, .LLONG_TO_max
+ cmp.ule.s ft2, fa1, fa0
+ li rRESULT0, ~0
+ li rRESULT1, ~0x80000000
+ bc1nez ft2, .L${opcode}_set_vreg
+
+ l.s fa1, .LLONG_TO_min
+ cmp.ule.s ft2, fa0, fa1
+ li rRESULT0, 0
+ li rRESULT1, 0x80000000
+ bc1nez ft2, .L${opcode}_set_vreg
+
+ mov.s fa1, fa0
+ cmp.un.s ft2, fa0, fa1
+ li rRESULT0, 0
+ li rRESULT1, 0
+ bc1nez ft2, .L${opcode}_set_vreg
+#else
+ l.s fa1, .LLONG_TO_max
+ c.ole.s fcc0, fa1, fa0
+ li rRESULT0, ~0
+ li rRESULT1, ~0x80000000
+ bc1t .L${opcode}_set_vreg
+
+ l.s fa1, .LLONG_TO_min
+ c.ole.s fcc0, fa0, fa1
+ li rRESULT0, 0
+ li rRESULT1, 0x80000000
+ bc1t .L${opcode}_set_vreg
+
+ mov.s fa1, fa0
+ c.un.s fcc0, fa0, fa1
+ li rRESULT0, 0
+ li rRESULT1, 0
+ bc1t .L${opcode}_set_vreg
+#endif
+
+ JAL(__fixsfdi)
+
+ b .L${opcode}_set_vreg
+
+.LLONG_TO_max:
+ .word 0x5f000000
+
+.LLONG_TO_min:
+ .word 0xdf000000
diff --git a/runtime/interpreter/mterp/mips/op_goto.S b/runtime/interpreter/mterp/mips/op_goto.S
new file mode 100644
index 0000000..d6f21c9
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_goto.S
@@ -0,0 +1,38 @@
+ /*
+ * Unconditional branch, 8-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto +AA */
+#if MTERP_PROFILE_BRANCHES
+ sll a0, rINST, 16 # a0 <- AAxx0000
+ sra rINST, a0, 24 # rINST <- ssssssAA (sign-extended)
+ EXPORT_PC()
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rINST
+ JAL(MterpProfileBranch) # (self, shadow_frame, offset)
+ bnez v0, MterpOnStackReplacement # Note: offset must be in rINST
+ addu a2, rINST, rINST # a2 <- byte offset
+ FETCH_ADVANCE_INST_RB(a2) # update rPC, load rINST
+ /* If backwards branch refresh rIBASE */
+ bgez a2, 1f
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ b MterpCheckSuspendAndContinue
+1:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+#else
+ sll a0, rINST, 16 # a0 <- AAxx0000
+ sra rINST, a0, 24 # rINST <- ssssssAA (sign-extended)
+ addu a2, rINST, rINST # a2 <- byte offset
+ FETCH_ADVANCE_INST_RB(a2) # update rPC, load rINST
+ /* If backwards branch refresh rIBASE */
+ bgez a1, 1f
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ b MterpCheckSuspendAndContinue
+1:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+#endif
diff --git a/runtime/interpreter/mterp/mips/op_goto_16.S b/runtime/interpreter/mterp/mips/op_goto_16.S
new file mode 100644
index 0000000..cec4432
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_goto_16.S
@@ -0,0 +1,34 @@
+ /*
+ * Unconditional branch, 16-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto/16 +AAAA */
+#if MTERP_PROFILE_BRANCHES
+ FETCH_S(rINST, 1) # rINST <- ssssAAAA (sign-extended)
+ EXPORT_PC()
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rINST
+ JAL(MterpProfileBranch) # (self, shadow_frame, offset)
+ bnez v0, MterpOnStackReplacement # Note: offset must be in rINST
+ addu a1, rINST, rINST # a1 <- byte offset, flags set
+ FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
+ bgez a1, 1f
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ b MterpCheckSuspendAndContinue
+1:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+#else
+ FETCH_S(rINST, 1) # rINST <- ssssAAAA (sign-extended)
+ addu a1, rINST, rINST # a1 <- byte offset, flags set
+ FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
+ bgez a1, 1f
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ b MterpCheckSuspendAndContinue
+1:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+#endif
diff --git a/runtime/interpreter/mterp/mips/op_goto_32.S b/runtime/interpreter/mterp/mips/op_goto_32.S
new file mode 100644
index 0000000..083acd1
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_goto_32.S
@@ -0,0 +1,43 @@
+ /*
+ * Unconditional branch, 32-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ *
+ * Unlike most opcodes, this one is allowed to branch to itself, so
+ * our "backward branch" test must be "<=0" instead of "<0".
+ */
+ /* goto/32 +AAAAAAAA */
+#if MTERP_PROFILE_BRANCHES
+ FETCH(a0, 1) # a0 <- aaaa (lo)
+ FETCH(a1, 2) # a1 <- AAAA (hi)
+ sll a1, a1, 16
+ or rINST, a0, a1 # rINST <- AAAAaaaa
+ EXPORT_PC()
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rINST
+ JAL(MterpProfileBranch) # (self, shadow_frame, offset)
+ bnez v0, MterpOnStackReplacement # Note: offset must be in rINST
+ addu a1, rINST, rINST # a1 <- byte offset
+ FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
+ bgtz a1, 1f
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ b MterpCheckSuspendAndContinue
+1:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+#else
+ FETCH(a0, 1) # a0 <- aaaa (lo)
+ FETCH(a1, 2) # a1 <- AAAA (hi)
+ sll a1, a1, 16
+ or rINST, a0, a1 # rINST <- AAAAaaaa
+ addu a1, rINST, rINST # a1 <- byte offset
+ FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
+ bgtz a1, 1f
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ b MterpCheckSuspendAndContinue
+1:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+#endif
diff --git a/runtime/interpreter/mterp/mips/op_if_eq.S b/runtime/interpreter/mterp/mips/op_if_eq.S
new file mode 100644
index 0000000..e7190d8
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_if_eq.S
@@ -0,0 +1 @@
+%include "mips/bincmp.S" { "revcmp":"ne" }
diff --git a/runtime/interpreter/mterp/mips/op_if_eqz.S b/runtime/interpreter/mterp/mips/op_if_eqz.S
new file mode 100644
index 0000000..0a78fd9
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_if_eqz.S
@@ -0,0 +1 @@
+%include "mips/zcmp.S" { "revcmp":"ne" }
diff --git a/runtime/interpreter/mterp/mips/op_if_ge.S b/runtime/interpreter/mterp/mips/op_if_ge.S
new file mode 100644
index 0000000..b2629ba
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_if_ge.S
@@ -0,0 +1 @@
+%include "mips/bincmp.S" { "revcmp":"lt" }
diff --git a/runtime/interpreter/mterp/mips/op_if_gez.S b/runtime/interpreter/mterp/mips/op_if_gez.S
new file mode 100644
index 0000000..b02f677
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_if_gez.S
@@ -0,0 +1 @@
+%include "mips/zcmp.S" { "revcmp":"lt" }
diff --git a/runtime/interpreter/mterp/mips/op_if_gt.S b/runtime/interpreter/mterp/mips/op_if_gt.S
new file mode 100644
index 0000000..f620d4a
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_if_gt.S
@@ -0,0 +1 @@
+%include "mips/bincmp.S" { "revcmp":"le" }
diff --git a/runtime/interpreter/mterp/mips/op_if_gtz.S b/runtime/interpreter/mterp/mips/op_if_gtz.S
new file mode 100644
index 0000000..5e5dd70
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_if_gtz.S
@@ -0,0 +1 @@
+%include "mips/zcmp.S" { "revcmp":"le" }
diff --git a/runtime/interpreter/mterp/mips/op_if_le.S b/runtime/interpreter/mterp/mips/op_if_le.S
new file mode 100644
index 0000000..a4e8b1a
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_if_le.S
@@ -0,0 +1 @@
+%include "mips/bincmp.S" { "revcmp":"gt" }
diff --git a/runtime/interpreter/mterp/mips/op_if_lez.S b/runtime/interpreter/mterp/mips/op_if_lez.S
new file mode 100644
index 0000000..af551a6
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_if_lez.S
@@ -0,0 +1 @@
+%include "mips/zcmp.S" { "revcmp":"gt" }
diff --git a/runtime/interpreter/mterp/mips/op_if_lt.S b/runtime/interpreter/mterp/mips/op_if_lt.S
new file mode 100644
index 0000000..f33b9a4
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_if_lt.S
@@ -0,0 +1 @@
+%include "mips/bincmp.S" { "revcmp":"ge" }
diff --git a/runtime/interpreter/mterp/mips/op_if_ltz.S b/runtime/interpreter/mterp/mips/op_if_ltz.S
new file mode 100644
index 0000000..18fcb1d
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_if_ltz.S
@@ -0,0 +1 @@
+%include "mips/zcmp.S" { "revcmp":"ge" }
diff --git a/runtime/interpreter/mterp/mips/op_if_ne.S b/runtime/interpreter/mterp/mips/op_if_ne.S
new file mode 100644
index 0000000..e0a102b
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_if_ne.S
@@ -0,0 +1 @@
+%include "mips/bincmp.S" { "revcmp":"eq" }
diff --git a/runtime/interpreter/mterp/mips/op_if_nez.S b/runtime/interpreter/mterp/mips/op_if_nez.S
new file mode 100644
index 0000000..d1866a0
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_if_nez.S
@@ -0,0 +1 @@
+%include "mips/zcmp.S" { "revcmp":"eq" }
diff --git a/runtime/interpreter/mterp/mips/op_iget.S b/runtime/interpreter/mterp/mips/op_iget.S
new file mode 100644
index 0000000..86d44fa
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iget.S
@@ -0,0 +1,25 @@
+%default { "is_object":"0", "helper":"artGet32InstanceFromCode"}
+ /*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- field ref CCCC
+ GET_OPB(a1) # a1 <- B
+ GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
+ lw a2, OFF_FP_METHOD(rFP) # a2 <- referrer
+ move a3, rSELF # a3 <- self
+ JAL($helper)
+ lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
+ GET_OPA4(a2) # a2<- A+
+ PREFETCH_INST(2) # load rINST
+ bnez a3, MterpPossibleException # bail out
+ .if $is_object
+ SET_VREG_OBJECT(v0, a2) # fp[A] <- v0
+ .else
+ SET_VREG(v0, a2) # fp[A] <- v0
+ .endif
+ ADVANCE(2) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_iget_boolean.S b/runtime/interpreter/mterp/mips/op_iget_boolean.S
new file mode 100644
index 0000000..e03364e
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iget_boolean.S
@@ -0,0 +1 @@
+%include "mips/op_iget.S" { "helper":"artGetBooleanInstanceFromCode" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_boolean_quick.S b/runtime/interpreter/mterp/mips/op_iget_boolean_quick.S
new file mode 100644
index 0000000..f3032b3
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iget_boolean_quick.S
@@ -0,0 +1 @@
+%include "mips/op_iget_quick.S" { "load":"lbu" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_byte.S b/runtime/interpreter/mterp/mips/op_iget_byte.S
new file mode 100644
index 0000000..dc87cfe
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iget_byte.S
@@ -0,0 +1 @@
+%include "mips/op_iget.S" { "helper":"artGetByteInstanceFromCode" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_byte_quick.S b/runtime/interpreter/mterp/mips/op_iget_byte_quick.S
new file mode 100644
index 0000000..d93f844
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iget_byte_quick.S
@@ -0,0 +1 @@
+%include "mips/op_iget_quick.S" { "load":"lb" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_char.S b/runtime/interpreter/mterp/mips/op_iget_char.S
new file mode 100644
index 0000000..55f8a93
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iget_char.S
@@ -0,0 +1 @@
+%include "mips/op_iget.S" { "helper":"artGetCharInstanceFromCode" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_char_quick.S b/runtime/interpreter/mterp/mips/op_iget_char_quick.S
new file mode 100644
index 0000000..6f6d608
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iget_char_quick.S
@@ -0,0 +1 @@
+%include "mips/op_iget_quick.S" { "load":"lhu" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_object.S b/runtime/interpreter/mterp/mips/op_iget_object.S
new file mode 100644
index 0000000..11d93a4
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iget_object.S
@@ -0,0 +1 @@
+%include "mips/op_iget.S" { "is_object":"1", "helper":"artGetObjInstanceFromCode" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_object_quick.S b/runtime/interpreter/mterp/mips/op_iget_object_quick.S
new file mode 100644
index 0000000..31d94b9
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iget_object_quick.S
@@ -0,0 +1,15 @@
+ /* For: iget-object-quick */
+ /* op vA, vB, offset@CCCC */
+ GET_OPB(a2) # a2 <- B
+ FETCH(a1, 1) # a1 <- field byte offset
+ EXPORT_PC()
+ GET_VREG(a0, a2) # a0 <- object we're operating on
+ JAL(artIGetObjectFromMterp) # v0 <- GetObj(obj, offset)
+ lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
+ GET_OPA4(a2) # a2<- A+
+ PREFETCH_INST(2) # load rINST
+ bnez a3, MterpPossibleException # bail out
+ SET_VREG_OBJECT(v0, a2) # fp[A] <- v0
+ ADVANCE(2) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_iget_quick.S b/runtime/interpreter/mterp/mips/op_iget_quick.S
new file mode 100644
index 0000000..fbafa5b
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iget_quick.S
@@ -0,0 +1,14 @@
+%default { "load":"lw" }
+ /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+ # op vA, vB, offset /* CCCC */
+ GET_OPB(a2) # a2 <- B
+ GET_VREG(a3, a2) # a3 <- object we're operating on
+ FETCH(a1, 1) # a1 <- field byte offset
+ GET_OPA4(a2) # a2 <- A(+)
+ # check object for null
+ beqz a3, common_errNullObject # object was null
+ addu t0, a3, a1
+ $load a0, 0(t0) # a0 <- obj.field (8/16/32 bits)
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, a2, t0) # fp[A] <- a0
diff --git a/runtime/interpreter/mterp/mips/op_iget_short.S b/runtime/interpreter/mterp/mips/op_iget_short.S
new file mode 100644
index 0000000..9086246
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iget_short.S
@@ -0,0 +1 @@
+%include "mips/op_iget.S" { "helper":"artGetShortInstanceFromCode" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_short_quick.S b/runtime/interpreter/mterp/mips/op_iget_short_quick.S
new file mode 100644
index 0000000..899a0fe
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iget_short_quick.S
@@ -0,0 +1 @@
+%include "mips/op_iget_quick.S" { "load":"lh" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_wide.S b/runtime/interpreter/mterp/mips/op_iget_wide.S
new file mode 100644
index 0000000..8fe3089
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iget_wide.S
@@ -0,0 +1,20 @@
+ /*
+ * 64-bit instance field get.
+ *
+ * for: iget-wide
+ */
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- field byte offset
+ GET_OPB(a1) # a1 <- B
+ GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
+ lw a2, OFF_FP_METHOD(rFP) # a2 <- referrer
+ move a3, rSELF # a3 <- self
+ JAL(artGet64InstanceFromCode)
+ lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
+ GET_OPA4(a2) # a2<- A+
+ PREFETCH_INST(2) # load rINST
+ bnez a3, MterpException # bail out
+ SET_VREG64(v0, v1, a2) # fp[A] <- v0/v1
+ ADVANCE(2) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_iget_wide_quick.S b/runtime/interpreter/mterp/mips/op_iget_wide_quick.S
new file mode 100644
index 0000000..4d2f291
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iget_wide_quick.S
@@ -0,0 +1,13 @@
+ # iget-wide-quick vA, vB, offset /* CCCC */
+ GET_OPB(a2) # a2 <- B
+ GET_VREG(a3, a2) # a3 <- object we're operating on
+ FETCH(a1, 1) # a1 <- field byte offset
+ GET_OPA4(a2) # a2 <- A(+)
+ # check object for null
+ beqz a3, common_errNullObject # object was null
+ addu t0, a3, a1 # t0 <- a3 + a1
+ LOAD64(a0, a1, t0) # a0 <- obj.field (64 bits, aligned)
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64(a0, a1, a2) # fp[A] <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_instance_of.S b/runtime/interpreter/mterp/mips/op_instance_of.S
new file mode 100644
index 0000000..d2679bd
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_instance_of.S
@@ -0,0 +1,21 @@
+ /*
+ * Check to see if an object reference is an instance of a class.
+ *
+ * Most common situation is a non-null object, being compared against
+ * an already-resolved class.
+ */
+ # instance-of vA, vB, class /* CCCC */
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- CCCC
+ GET_OPB(a1) # a1 <- B
+ EAS2(a1, rFP, a1) # a1 <- &object
+ lw a2, OFF_FP_METHOD(rFP) # a2 <- method
+ move a3, rSELF # a3 <- self
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ JAL(MterpInstanceOf) # v0 <- Mterp(index, &obj, method, self)
+ lw a1, THREAD_EXCEPTION_OFFSET(rSELF)
+ PREFETCH_INST(2) # load rINST
+ bnez a1, MterpException
+ ADVANCE(2) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(v0, rOBJ, t0) # vA <- v0
diff --git a/runtime/interpreter/mterp/mips/op_int_to_byte.S b/runtime/interpreter/mterp/mips/op_int_to_byte.S
new file mode 100644
index 0000000..77314c62
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_int_to_byte.S
@@ -0,0 +1 @@
+%include "mips/unop.S" {"preinstr":"sll a0, a0, 24", "instr":"sra a0, a0, 24"}
diff --git a/runtime/interpreter/mterp/mips/op_int_to_char.S b/runtime/interpreter/mterp/mips/op_int_to_char.S
new file mode 100644
index 0000000..1b74a6e
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_int_to_char.S
@@ -0,0 +1 @@
+%include "mips/unop.S" {"preinstr":"", "instr":"and a0, 0xffff"}
diff --git a/runtime/interpreter/mterp/mips/op_int_to_double.S b/runtime/interpreter/mterp/mips/op_int_to_double.S
new file mode 100644
index 0000000..89484ce
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_int_to_double.S
@@ -0,0 +1 @@
+%include "mips/funopWider.S" {"instr":"cvt.d.w fv0, fa0"}
diff --git a/runtime/interpreter/mterp/mips/op_int_to_float.S b/runtime/interpreter/mterp/mips/op_int_to_float.S
new file mode 100644
index 0000000..d6f4b36
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_int_to_float.S
@@ -0,0 +1 @@
+%include "mips/funop.S" {"instr":"cvt.s.w fv0, fa0"}
diff --git a/runtime/interpreter/mterp/mips/op_int_to_long.S b/runtime/interpreter/mterp/mips/op_int_to_long.S
new file mode 100644
index 0000000..9907463
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_int_to_long.S
@@ -0,0 +1 @@
+%include "mips/unopWider.S" {"instr":"sra a1, a0, 31"}
diff --git a/runtime/interpreter/mterp/mips/op_int_to_short.S b/runtime/interpreter/mterp/mips/op_int_to_short.S
new file mode 100644
index 0000000..5649c2a
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_int_to_short.S
@@ -0,0 +1 @@
+%include "mips/unop.S" {"preinstr":"sll a0, 16", "instr":"sra a0, 16"}
diff --git a/runtime/interpreter/mterp/mips/op_invoke_direct.S b/runtime/interpreter/mterp/mips/op_invoke_direct.S
new file mode 100644
index 0000000..1ef198a
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_invoke_direct.S
@@ -0,0 +1 @@
+%include "mips/invoke.S" { "helper":"MterpInvokeDirect" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_direct_range.S b/runtime/interpreter/mterp/mips/op_invoke_direct_range.S
new file mode 100644
index 0000000..af7477f
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_invoke_direct_range.S
@@ -0,0 +1 @@
+%include "mips/invoke.S" { "helper":"MterpInvokeDirectRange" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_interface.S b/runtime/interpreter/mterp/mips/op_invoke_interface.S
new file mode 100644
index 0000000..80a485a
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_invoke_interface.S
@@ -0,0 +1 @@
+%include "mips/invoke.S" { "helper":"MterpInvokeInterface" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_interface_range.S b/runtime/interpreter/mterp/mips/op_invoke_interface_range.S
new file mode 100644
index 0000000..8d725dc
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_invoke_interface_range.S
@@ -0,0 +1 @@
+%include "mips/invoke.S" { "helper":"MterpInvokeInterfaceRange" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_static.S b/runtime/interpreter/mterp/mips/op_invoke_static.S
new file mode 100644
index 0000000..46253cb
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_invoke_static.S
@@ -0,0 +1 @@
+%include "mips/invoke.S" { "helper":"MterpInvokeStatic" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_static_range.S b/runtime/interpreter/mterp/mips/op_invoke_static_range.S
new file mode 100644
index 0000000..96abafe
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_invoke_static_range.S
@@ -0,0 +1 @@
+%include "mips/invoke.S" { "helper":"MterpInvokeStaticRange" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_super.S b/runtime/interpreter/mterp/mips/op_invoke_super.S
new file mode 100644
index 0000000..473951b
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_invoke_super.S
@@ -0,0 +1 @@
+%include "mips/invoke.S" { "helper":"MterpInvokeSuper" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_super_range.S b/runtime/interpreter/mterp/mips/op_invoke_super_range.S
new file mode 100644
index 0000000..963ff27
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_invoke_super_range.S
@@ -0,0 +1 @@
+%include "mips/invoke.S" { "helper":"MterpInvokeSuperRange" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_virtual.S b/runtime/interpreter/mterp/mips/op_invoke_virtual.S
new file mode 100644
index 0000000..ea51e98
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_invoke_virtual.S
@@ -0,0 +1 @@
+%include "mips/invoke.S" { "helper":"MterpInvokeVirtual" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_virtual_quick.S b/runtime/interpreter/mterp/mips/op_invoke_virtual_quick.S
new file mode 100644
index 0000000..0c00091
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_invoke_virtual_quick.S
@@ -0,0 +1 @@
+%include "mips/invoke.S" { "helper":"MterpInvokeVirtualQuick" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_virtual_range.S b/runtime/interpreter/mterp/mips/op_invoke_virtual_range.S
new file mode 100644
index 0000000..82201e7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_invoke_virtual_range.S
@@ -0,0 +1 @@
+%include "mips/invoke.S" { "helper":"MterpInvokeVirtualRange" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_virtual_range_quick.S b/runtime/interpreter/mterp/mips/op_invoke_virtual_range_quick.S
new file mode 100644
index 0000000..c783675
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_invoke_virtual_range_quick.S
@@ -0,0 +1 @@
+%include "mips/invoke.S" { "helper":"MterpInvokeVirtualQuickRange" }
diff --git a/runtime/interpreter/mterp/mips/op_iput.S b/runtime/interpreter/mterp/mips/op_iput.S
new file mode 100644
index 0000000..732a9a4
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iput.S
@@ -0,0 +1,21 @@
+%default { "handler":"artSet32InstanceFromMterp" }
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ # op vA, vB, field /* CCCC */
+ .extern $handler
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- field ref CCCC
+ GET_OPB(a1) # a1 <- B
+ GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
+ GET_OPA4(a2) # a2 <- A+
+ GET_VREG(a2, a2) # a2 <- fp[A]
+ lw a3, OFF_FP_METHOD(rFP) # a3 <- referrer
+ PREFETCH_INST(2) # load rINST
+ JAL($handler)
+ bnez v0, MterpPossibleException # bail out
+ ADVANCE(2) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_iput_boolean.S b/runtime/interpreter/mterp/mips/op_iput_boolean.S
new file mode 100644
index 0000000..da28c97
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iput_boolean.S
@@ -0,0 +1 @@
+%include "mips/op_iput.S" { "handler":"artSet8InstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/mips/op_iput_boolean_quick.S b/runtime/interpreter/mterp/mips/op_iput_boolean_quick.S
new file mode 100644
index 0000000..7d5caf6
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iput_boolean_quick.S
@@ -0,0 +1 @@
+%include "mips/op_iput_quick.S" { "store":"sb" }
diff --git a/runtime/interpreter/mterp/mips/op_iput_byte.S b/runtime/interpreter/mterp/mips/op_iput_byte.S
new file mode 100644
index 0000000..da28c97
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iput_byte.S
@@ -0,0 +1 @@
+%include "mips/op_iput.S" { "handler":"artSet8InstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/mips/op_iput_byte_quick.S b/runtime/interpreter/mterp/mips/op_iput_byte_quick.S
new file mode 100644
index 0000000..7d5caf6
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iput_byte_quick.S
@@ -0,0 +1 @@
+%include "mips/op_iput_quick.S" { "store":"sb" }
diff --git a/runtime/interpreter/mterp/mips/op_iput_char.S b/runtime/interpreter/mterp/mips/op_iput_char.S
new file mode 100644
index 0000000..389b0bf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iput_char.S
@@ -0,0 +1 @@
+%include "mips/op_iput.S" { "handler":"artSet16InstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/mips/op_iput_char_quick.S b/runtime/interpreter/mterp/mips/op_iput_char_quick.S
new file mode 100644
index 0000000..4bc84eb
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iput_char_quick.S
@@ -0,0 +1 @@
+%include "mips/op_iput_quick.S" { "store":"sh" }
diff --git a/runtime/interpreter/mterp/mips/op_iput_object.S b/runtime/interpreter/mterp/mips/op_iput_object.S
new file mode 100644
index 0000000..6b856e7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iput_object.S
@@ -0,0 +1,16 @@
+ /*
+ * 32-bit instance field put.
+ *
+ * for: iput-object, iput-object-volatile
+ */
+ # op vA, vB, field /* CCCC */
+ EXPORT_PC()
+ addu a0, rFP, OFF_FP_SHADOWFRAME
+ move a1, rPC
+ move a2, rINST
+ move a3, rSELF
+ JAL(MterpIputObject)
+ beqz v0, MterpException
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_iput_object_quick.S b/runtime/interpreter/mterp/mips/op_iput_object_quick.S
new file mode 100644
index 0000000..c3f1526
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iput_object_quick.S
@@ -0,0 +1,11 @@
+ /* For: iput-object-quick */
+ # op vA, vB, offset /* CCCC */
+ EXPORT_PC()
+ addu a0, rFP, OFF_FP_SHADOWFRAME
+ move a1, rPC
+ move a2, rINST
+ JAL(MterpIputObjectQuick)
+ beqz v0, MterpException
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_iput_quick.S b/runtime/interpreter/mterp/mips/op_iput_quick.S
new file mode 100644
index 0000000..0829666
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iput_quick.S
@@ -0,0 +1,14 @@
+%default { "store":"sw" }
+ /* For: iput-quick, iput-object-quick */
+ # op vA, vB, offset /* CCCC */
+ GET_OPB(a2) # a2 <- B
+ GET_VREG(a3, a2) # a3 <- fp[B], the object pointer
+ FETCH(a1, 1) # a1 <- field byte offset
+ GET_OPA4(a2) # a2 <- A(+)
+ beqz a3, common_errNullObject # object was null
+ GET_VREG(a0, a2) # a0 <- fp[A]
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ addu t0, a3, a1
+ $store a0, 0(t0) # obj.field (8/16/32 bits) <- a0
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_iput_short.S b/runtime/interpreter/mterp/mips/op_iput_short.S
new file mode 100644
index 0000000..389b0bf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iput_short.S
@@ -0,0 +1 @@
+%include "mips/op_iput.S" { "handler":"artSet16InstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/mips/op_iput_short_quick.S b/runtime/interpreter/mterp/mips/op_iput_short_quick.S
new file mode 100644
index 0000000..4bc84eb
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iput_short_quick.S
@@ -0,0 +1 @@
+%include "mips/op_iput_quick.S" { "store":"sh" }
diff --git a/runtime/interpreter/mterp/mips/op_iput_wide.S b/runtime/interpreter/mterp/mips/op_iput_wide.S
new file mode 100644
index 0000000..6d23f8c
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iput_wide.S
@@ -0,0 +1,15 @@
+ # iput-wide vA, vB, field /* CCCC */
+ .extern artSet64InstanceFromMterp
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- field ref CCCC
+ GET_OPB(a1) # a1 <- B
+ GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
+ GET_OPA4(a2) # a2 <- A+
+ EAS2(a2, rFP, a2) # a2 <- &fp[A]
+ lw a3, OFF_FP_METHOD(rFP) # a3 <- referrer
+ PREFETCH_INST(2) # load rINST
+ JAL(artSet64InstanceFromMterp)
+ bnez v0, MterpPossibleException # bail out
+ ADVANCE(2) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_iput_wide_quick.S b/runtime/interpreter/mterp/mips/op_iput_wide_quick.S
new file mode 100644
index 0000000..9fdb847
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_iput_wide_quick.S
@@ -0,0 +1,14 @@
+ # iput-wide-quick vA, vB, offset /* CCCC */
+ GET_OPA4(a0) # a0 <- A(+)
+ GET_OPB(a1) # a1 <- B
+ GET_VREG(a2, a1) # a2 <- fp[B], the object pointer
+ # check object for null
+ beqz a2, common_errNullObject # object was null
+ EAS2(a3, rFP, a0) # a3 <- &fp[A]
+ LOAD64(a0, a1, a3) # a0/a1 <- fp[A]
+ FETCH(a3, 1) # a3 <- field byte offset
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ addu a2, a2, a3 # obj.field (64 bits, aligned) <- a0/a1
+ STORE64(a0, a1, a2) # obj.field (64 bits, aligned) <- a0/a1
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_long_to_double.S b/runtime/interpreter/mterp/mips/op_long_to_double.S
new file mode 100644
index 0000000..b83aaf4
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_long_to_double.S
@@ -0,0 +1 @@
+%include "mips/funopWide.S" {"instr":"JAL(__floatdidf)", "ld_arg":"LOAD64(rARG0, rARG1, a3)"}
diff --git a/runtime/interpreter/mterp/mips/op_long_to_float.S b/runtime/interpreter/mterp/mips/op_long_to_float.S
new file mode 100644
index 0000000..27faba5
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_long_to_float.S
@@ -0,0 +1 @@
+%include "mips/unopNarrower.S" {"instr":"JAL(__floatdisf)", "load":"LOAD64(rARG0, rARG1, a3)"}
diff --git a/runtime/interpreter/mterp/mips/op_long_to_int.S b/runtime/interpreter/mterp/mips/op_long_to_int.S
new file mode 100644
index 0000000..949c180
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_long_to_int.S
@@ -0,0 +1,2 @@
+/* we ignore the high word, making this equivalent to a 32-bit reg move */
+%include "mips/op_move.S"
diff --git a/runtime/interpreter/mterp/mips/op_monitor_enter.S b/runtime/interpreter/mterp/mips/op_monitor_enter.S
new file mode 100644
index 0000000..20d9029
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_monitor_enter.S
@@ -0,0 +1,13 @@
+ /*
+ * Synchronize on an object.
+ */
+ /* monitor-enter vAA */
+ EXPORT_PC()
+ GET_OPA(a2) # a2 <- AA
+ GET_VREG(a0, a2) # a0 <- vAA (object)
+ move a1, rSELF # a1 <- self
+ JAL(artLockObjectFromCode) # v0 <- artLockObject(obj, self)
+ bnez v0, MterpException
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_monitor_exit.S b/runtime/interpreter/mterp/mips/op_monitor_exit.S
new file mode 100644
index 0000000..1eadff9
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_monitor_exit.S
@@ -0,0 +1,17 @@
+ /*
+ * Unlock an object.
+ *
+ * Exceptions that occur when unlocking a monitor need to appear as
+ * if they happened at the following instruction. See the Dalvik
+ * instruction spec.
+ */
+ /* monitor-exit vAA */
+ EXPORT_PC()
+ GET_OPA(a2) # a2 <- AA
+ GET_VREG(a0, a2) # a0 <- vAA (object)
+ move a1, rSELF # a1 <- self
+ JAL(artUnlockObjectFromCode) # v0 <- artUnlockObject(obj, self)
+ bnez v0, MterpException
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_move.S b/runtime/interpreter/mterp/mips/op_move.S
new file mode 100644
index 0000000..76588ba
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_move.S
@@ -0,0 +1,14 @@
+%default { "is_object":"0" }
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ GET_OPB(a1) # a1 <- B from 15:12
+ GET_OPA4(a0) # a0 <- A from 11:8
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ GET_VREG(a2, a1) # a2 <- fp[B]
+ GET_INST_OPCODE(t0) # t0 <- opcode from rINST
+ .if $is_object
+ SET_VREG_OBJECT(a2, a0) # fp[A] <- a2
+ .else
+ SET_VREG(a2, a0) # fp[A] <- a2
+ .endif
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_move_16.S b/runtime/interpreter/mterp/mips/op_move_16.S
new file mode 100644
index 0000000..f7de6c2
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_move_16.S
@@ -0,0 +1,14 @@
+%default { "is_object":"0" }
+ /* for: move/16, move-object/16 */
+ /* op vAAAA, vBBBB */
+ FETCH(a1, 2) # a1 <- BBBB
+ FETCH(a0, 1) # a0 <- AAAA
+ FETCH_ADVANCE_INST(3) # advance rPC, load rINST
+ GET_VREG(a2, a1) # a2 <- fp[BBBB]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ .if $is_object
+ SET_VREG_OBJECT(a2, a0) # fp[AAAA] <- a2
+ .else
+ SET_VREG(a2, a0) # fp[AAAA] <- a2
+ .endif
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_move_exception.S b/runtime/interpreter/mterp/mips/op_move_exception.S
new file mode 100644
index 0000000..f04a035
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_move_exception.S
@@ -0,0 +1,8 @@
+ /* move-exception vAA */
+ GET_OPA(a2) # a2 <- AA
+ lw a3, THREAD_EXCEPTION_OFFSET(rSELF) # get exception obj
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ SET_VREG_OBJECT(a3, a2) # fp[AA] <- exception obj
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ sw zero, THREAD_EXCEPTION_OFFSET(rSELF) # clear exception
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_move_from16.S b/runtime/interpreter/mterp/mips/op_move_from16.S
new file mode 100644
index 0000000..b8be741
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_move_from16.S
@@ -0,0 +1,14 @@
+%default { "is_object":"0" }
+ /* for: move/from16, move-object/from16 */
+ /* op vAA, vBBBB */
+ FETCH(a1, 1) # a1 <- BBBB
+ GET_OPA(a0) # a0 <- AA
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_VREG(a2, a1) # a2 <- fp[BBBB]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ .if $is_object
+ SET_VREG_OBJECT(a2, a0) # fp[AA] <- a2
+ .else
+ SET_VREG(a2, a0) # fp[AA] <- a2
+ .endif
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_move_object.S b/runtime/interpreter/mterp/mips/op_move_object.S
new file mode 100644
index 0000000..9420ff3
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_move_object.S
@@ -0,0 +1 @@
+%include "mips/op_move.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/mips/op_move_object_16.S b/runtime/interpreter/mterp/mips/op_move_object_16.S
new file mode 100644
index 0000000..d6454c2
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_move_object_16.S
@@ -0,0 +1 @@
+%include "mips/op_move_16.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/mips/op_move_object_from16.S b/runtime/interpreter/mterp/mips/op_move_object_from16.S
new file mode 100644
index 0000000..db0aca1
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_move_object_from16.S
@@ -0,0 +1 @@
+%include "mips/op_move_from16.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/mips/op_move_result.S b/runtime/interpreter/mterp/mips/op_move_result.S
new file mode 100644
index 0000000..315c68e
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_move_result.S
@@ -0,0 +1,14 @@
+%default { "is_object":"0" }
+ /* for: move-result, move-result-object */
+ /* op vAA */
+ GET_OPA(a2) # a2 <- AA
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ lw a0, OFF_FP_RESULT_REGISTER(rFP) # get pointer to result JType
+ lw a0, 0(a0) # a0 <- result.i
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ .if $is_object
+ SET_VREG_OBJECT(a0, a2) # fp[AA] <- a0
+ .else
+ SET_VREG(a0, a2) # fp[AA] <- a0
+ .endif
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_move_result_object.S b/runtime/interpreter/mterp/mips/op_move_result_object.S
new file mode 100644
index 0000000..fcbffee
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_move_result_object.S
@@ -0,0 +1 @@
+%include "mips/op_move_result.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/mips/op_move_result_wide.S b/runtime/interpreter/mterp/mips/op_move_result_wide.S
new file mode 100644
index 0000000..940c1ff
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_move_result_wide.S
@@ -0,0 +1,8 @@
+ /* move-result-wide vAA */
+ GET_OPA(a2) # a2 <- AA
+ lw a3, OFF_FP_RESULT_REGISTER(rFP) # get pointer to result JType
+ LOAD64(a0, a1, a3) # a0/a1 <- retval.j
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ SET_VREG64(a0, a1, a2) # fp[AA] <- a0/a1
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_move_wide.S b/runtime/interpreter/mterp/mips/op_move_wide.S
new file mode 100644
index 0000000..dd224c3
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_move_wide.S
@@ -0,0 +1,10 @@
+ /* move-wide vA, vB */
+ /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
+ GET_OPA4(a2) # a2 <- A(+)
+ GET_OPB(a3) # a3 <- B
+ EAS2(a3, rFP, a3) # a3 <- &fp[B]
+ LOAD64(a0, a1, a3) # a0/a1 <- fp[B]
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ SET_VREG64(a0, a1, a2) # fp[A] <- a0/a1
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_move_wide_16.S b/runtime/interpreter/mterp/mips/op_move_wide_16.S
new file mode 100644
index 0000000..d8761eb
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_move_wide_16.S
@@ -0,0 +1,10 @@
+ /* move-wide/16 vAAAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
+ FETCH(a3, 2) # a3 <- BBBB
+ FETCH(a2, 1) # a2 <- AAAA
+ EAS2(a3, rFP, a3) # a3 <- &fp[BBBB]
+ LOAD64(a0, a1, a3) # a0/a1 <- fp[BBBB]
+ FETCH_ADVANCE_INST(3) # advance rPC, load rINST
+ SET_VREG64(a0, a1, a2) # fp[AAAA] <- a0/a1
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_move_wide_from16.S b/runtime/interpreter/mterp/mips/op_move_wide_from16.S
new file mode 100644
index 0000000..2103fa1
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_move_wide_from16.S
@@ -0,0 +1,10 @@
+ /* move-wide/from16 vAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
+ FETCH(a3, 1) # a3 <- BBBB
+ GET_OPA(a2) # a2 <- AA
+ EAS2(a3, rFP, a3) # a3 <- &fp[BBBB]
+ LOAD64(a0, a1, a3) # a0/a1 <- fp[BBBB]
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ SET_VREG64(a0, a1, a2) # fp[AA] <- a0/a1
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_mul_double.S b/runtime/interpreter/mterp/mips/op_mul_double.S
new file mode 100644
index 0000000..44a473b
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_mul_double.S
@@ -0,0 +1 @@
+%include "mips/fbinopWide.S" {"instr":"mul.d fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_mul_double_2addr.S b/runtime/interpreter/mterp/mips/op_mul_double_2addr.S
new file mode 100644
index 0000000..4e5c230
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_mul_double_2addr.S
@@ -0,0 +1 @@
+%include "mips/fbinopWide2addr.S" {"instr":"mul.d fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_mul_float.S b/runtime/interpreter/mterp/mips/op_mul_float.S
new file mode 100644
index 0000000..abc9390
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_mul_float.S
@@ -0,0 +1 @@
+%include "mips/fbinop.S" {"instr":"mul.s fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_mul_float_2addr.S b/runtime/interpreter/mterp/mips/op_mul_float_2addr.S
new file mode 100644
index 0000000..2469109
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_mul_float_2addr.S
@@ -0,0 +1 @@
+%include "mips/fbinop2addr.S" {"instr":"mul.s fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_mul_int.S b/runtime/interpreter/mterp/mips/op_mul_int.S
new file mode 100644
index 0000000..266823c
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_mul_int.S
@@ -0,0 +1 @@
+%include "mips/binop.S" {"instr":"mul a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_mul_int_2addr.S b/runtime/interpreter/mterp/mips/op_mul_int_2addr.S
new file mode 100644
index 0000000..b7dc5d3
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_mul_int_2addr.S
@@ -0,0 +1 @@
+%include "mips/binop2addr.S" {"instr":"mul a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_mul_int_lit16.S b/runtime/interpreter/mterp/mips/op_mul_int_lit16.S
new file mode 100644
index 0000000..fb4c8ec
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_mul_int_lit16.S
@@ -0,0 +1 @@
+%include "mips/binopLit16.S" {"instr":"mul a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_mul_int_lit8.S b/runtime/interpreter/mterp/mips/op_mul_int_lit8.S
new file mode 100644
index 0000000..6d2e7de
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_mul_int_lit8.S
@@ -0,0 +1 @@
+%include "mips/binopLit8.S" {"instr":"mul a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_mul_long.S b/runtime/interpreter/mterp/mips/op_mul_long.S
new file mode 100644
index 0000000..803bbec
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_mul_long.S
@@ -0,0 +1,43 @@
+ /*
+ * Signed 64-bit integer multiply.
+ * a1 a0
+ * x a3 a2
+ * -------------
+ * a2a1 a2a0
+ * a3a0
+ * a3a1 (<= unused)
+ * ---------------
+ * v1 v0
+ */
+ /* mul-long vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ and t0, a0, 255 # a2 <- BB
+ srl t1, a0, 8 # a3 <- CC
+ EAS2(t0, rFP, t0) # t0 <- &fp[BB]
+ LOAD64(a0, a1, t0) # a0/a1 <- vBB/vBB+1
+
+ EAS2(t1, rFP, t1) # t0 <- &fp[CC]
+ LOAD64(a2, a3, t1) # a2/a3 <- vCC/vCC+1
+
+ mul v1, a3, a0 # v1= a3a0
+#ifdef MIPS32REVGE6
+ mulu v0, a2, a0 # v0= a2a0
+ muhu t1, a2, a0
+#else
+ multu a2, a0
+ mfhi t1
+ mflo v0 # v0= a2a0
+#endif
+ mul t0, a2, a1 # t0= a2a1
+ addu v1, v1, t1 # v1+= hi(a2a0)
+ addu v1, v1, t0 # v1= a3a0 + a2a1;
+
+ GET_OPA(a0) # a0 <- AA
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ b .L${opcode}_finish
+%break
+
+.L${opcode}_finish:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64(v0, v1, a0) # vAA::vAA+1 <- v0(low) :: v1(high)
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_mul_long_2addr.S b/runtime/interpreter/mterp/mips/op_mul_long_2addr.S
new file mode 100644
index 0000000..6950b71
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_mul_long_2addr.S
@@ -0,0 +1,31 @@
+ /*
+ * See op_mul_long.S for more details
+ */
+ /* mul-long/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+
+ EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
+ LOAD64(a0, a1, t0) # vAA.low / high
+
+ GET_OPB(t1) # t1 <- B
+ EAS2(t1, rFP, t1) # t1 <- &fp[B]
+ LOAD64(a2, a3, t1) # vBB.low / high
+
+ mul v1, a3, a0 # v1= a3a0
+#ifdef MIPS32REVGE6
+ mulu v0, a2, a0 # v0= a2a0
+ muhu t1, a2, a0
+#else
+ multu a2, a0
+ mfhi t1
+ mflo v0 # v0= a2a0
+ #endif
+ mul t2, a2, a1 # t2= a2a1
+ addu v1, v1, t1 # v1= a3a0 + hi(a2a0)
+ addu v1, v1, t2 # v1= v1 + a2a1;
+
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ # vAA <- v0 (low)
+ SET_VREG64(v0, v1, rOBJ) # vAA+1 <- v1 (high)
+ GOTO_OPCODE(t1) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_neg_double.S b/runtime/interpreter/mterp/mips/op_neg_double.S
new file mode 100644
index 0000000..89cc918
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_neg_double.S
@@ -0,0 +1 @@
+%include "mips/unopWide.S" {"instr":"addu a1, a1, 0x80000000"}
diff --git a/runtime/interpreter/mterp/mips/op_neg_float.S b/runtime/interpreter/mterp/mips/op_neg_float.S
new file mode 100644
index 0000000..e702755
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_neg_float.S
@@ -0,0 +1 @@
+%include "mips/unop.S" {"instr":"addu a0, a0, 0x80000000"}
diff --git a/runtime/interpreter/mterp/mips/op_neg_int.S b/runtime/interpreter/mterp/mips/op_neg_int.S
new file mode 100644
index 0000000..4461731
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_neg_int.S
@@ -0,0 +1 @@
+%include "mips/unop.S" {"instr":"negu a0, a0"}
diff --git a/runtime/interpreter/mterp/mips/op_neg_long.S b/runtime/interpreter/mterp/mips/op_neg_long.S
new file mode 100644
index 0000000..71e60f5
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_neg_long.S
@@ -0,0 +1 @@
+%include "mips/unopWide.S" {"result0":"v0", "result1":"v1", "preinstr":"negu v0, a0", "instr":"negu v1, a1; sltu a0, zero, v0; subu v1, v1, a0"}
diff --git a/runtime/interpreter/mterp/mips/op_new_array.S b/runtime/interpreter/mterp/mips/op_new_array.S
new file mode 100644
index 0000000..4a6512d
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_new_array.S
@@ -0,0 +1,18 @@
+ /*
+ * Allocate an array of objects, specified with the array class
+ * and a count.
+ *
+ * The verifier guarantees that this is an array class, so we don't
+ * check for it here.
+ */
+ /* new-array vA, vB, class@CCCC */
+ EXPORT_PC()
+ addu a0, rFP, OFF_FP_SHADOWFRAME
+ move a1, rPC
+ move a2, rINST
+ move a3, rSELF
+ JAL(MterpNewArray)
+ beqz v0, MterpPossibleException
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_new_instance.S b/runtime/interpreter/mterp/mips/op_new_instance.S
new file mode 100644
index 0000000..51a09b2
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_new_instance.S
@@ -0,0 +1,13 @@
+ /*
+ * Create a new instance of a class.
+ */
+ # new-instance vAA, class /* BBBB */
+ EXPORT_PC()
+ addu a0, rFP, OFF_FP_SHADOWFRAME
+ move a1, rSELF
+ move a2, rINST
+ JAL(MterpNewInstance)
+ beqz v0, MterpPossibleException
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_nop.S b/runtime/interpreter/mterp/mips/op_nop.S
new file mode 100644
index 0000000..3565631
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_nop.S
@@ -0,0 +1,3 @@
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_not_int.S b/runtime/interpreter/mterp/mips/op_not_int.S
new file mode 100644
index 0000000..55d8cc1
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_not_int.S
@@ -0,0 +1 @@
+%include "mips/unop.S" {"instr":"not a0, a0"}
diff --git a/runtime/interpreter/mterp/mips/op_not_long.S b/runtime/interpreter/mterp/mips/op_not_long.S
new file mode 100644
index 0000000..9e7c95b
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_not_long.S
@@ -0,0 +1 @@
+%include "mips/unopWide.S" {"preinstr":"not a0, a0", "instr":"not a1, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_or_int.S b/runtime/interpreter/mterp/mips/op_or_int.S
new file mode 100644
index 0000000..c7ce760
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_or_int.S
@@ -0,0 +1 @@
+%include "mips/binop.S" {"instr":"or a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_or_int_2addr.S b/runtime/interpreter/mterp/mips/op_or_int_2addr.S
new file mode 100644
index 0000000..192d611
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_or_int_2addr.S
@@ -0,0 +1 @@
+%include "mips/binop2addr.S" {"instr":"or a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_or_int_lit16.S b/runtime/interpreter/mterp/mips/op_or_int_lit16.S
new file mode 100644
index 0000000..f4ef75f
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_or_int_lit16.S
@@ -0,0 +1 @@
+%include "mips/binopLit16.S" {"instr":"or a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_or_int_lit8.S b/runtime/interpreter/mterp/mips/op_or_int_lit8.S
new file mode 100644
index 0000000..f6212e2
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_or_int_lit8.S
@@ -0,0 +1 @@
+%include "mips/binopLit8.S" {"instr":"or a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_or_long.S b/runtime/interpreter/mterp/mips/op_or_long.S
new file mode 100644
index 0000000..0f94486
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_or_long.S
@@ -0,0 +1 @@
+%include "mips/binopWide.S" {"preinstr":"or a0, a0, a2", "instr":"or a1, a1, a3"}
diff --git a/runtime/interpreter/mterp/mips/op_or_long_2addr.S b/runtime/interpreter/mterp/mips/op_or_long_2addr.S
new file mode 100644
index 0000000..43c3d05
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_or_long_2addr.S
@@ -0,0 +1 @@
+%include "mips/binopWide2addr.S" {"preinstr":"or a0, a0, a2", "instr":"or a1, a1, a3"}
diff --git a/runtime/interpreter/mterp/mips/op_packed_switch.S b/runtime/interpreter/mterp/mips/op_packed_switch.S
new file mode 100644
index 0000000..93fae97
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_packed_switch.S
@@ -0,0 +1,57 @@
+%default { "func":"MterpDoPackedSwitch" }
+ /*
+ * Handle a packed-switch or sparse-switch instruction. In both cases
+ * we decode it and hand it off to a helper function.
+ *
+ * We don't really expect backward branches in a switch statement, but
+ * they're perfectly legal, so we check for them here.
+ *
+ * for: packed-switch, sparse-switch
+ */
+ /* op vAA, +BBBB */
+#if MTERP_PROFILE_BRANCHES
+ FETCH(a0, 1) # a0 <- bbbb (lo)
+ FETCH(a1, 2) # a1 <- BBBB (hi)
+ GET_OPA(a3) # a3 <- AA
+ sll t0, a1, 16
+ or a0, a0, t0 # a0 <- BBBBbbbb
+ GET_VREG(a1, a3) # a1 <- vAA
+ EAS1(a0, rPC, a0) # a0 <- PC + BBBBbbbb*2
+ JAL($func) # a0 <- code-unit branch offset
+ move rINST, v0
+ EXPORT_PC()
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rINST
+ JAL(MterpProfileBranch) # (self, shadow_frame, offset)
+ bnez v0, MterpOnStackReplacement # Note: offset must be in rINST
+ addu a1, rINST, rINST # a1 <- byte offset
+ FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
+ bgtz a1, .L${opcode}_finish
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ b MterpCheckSuspendAndContinue
+#else
+ FETCH(a0, 1) # a0 <- bbbb (lo)
+ FETCH(a1, 2) # a1 <- BBBB (hi)
+ GET_OPA(a3) # a3 <- AA
+ sll t0, a1, 16
+ or a0, a0, t0 # a0 <- BBBBbbbb
+ GET_VREG(a1, a3) # a1 <- vAA
+ EAS1(a0, rPC, a0) # a0 <- PC + BBBBbbbb*2
+ JAL($func) # a0 <- code-unit branch offset
+ move rINST, v0
+ addu a1, rINST, rINST # a1 <- byte offset
+ FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
+ bgtz a1, 1f
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ b MterpCheckSuspendAndContinue
+1:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+#endif
+
+%break
+
+.L${opcode}_finish:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_rem_double.S b/runtime/interpreter/mterp/mips/op_rem_double.S
new file mode 100644
index 0000000..a6890a8
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_rem_double.S
@@ -0,0 +1 @@
+%include "mips/fbinopWide.S" {"instr":"JAL(fmod)"}
diff --git a/runtime/interpreter/mterp/mips/op_rem_double_2addr.S b/runtime/interpreter/mterp/mips/op_rem_double_2addr.S
new file mode 100644
index 0000000..a24e160
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_rem_double_2addr.S
@@ -0,0 +1 @@
+%include "mips/fbinopWide2addr.S" {"instr":"JAL(fmod)"}
diff --git a/runtime/interpreter/mterp/mips/op_rem_float.S b/runtime/interpreter/mterp/mips/op_rem_float.S
new file mode 100644
index 0000000..ac3d50c
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_rem_float.S
@@ -0,0 +1 @@
+%include "mips/fbinop.S" {"instr":"JAL(fmodf)"}
diff --git a/runtime/interpreter/mterp/mips/op_rem_float_2addr.S b/runtime/interpreter/mterp/mips/op_rem_float_2addr.S
new file mode 100644
index 0000000..7f0a932
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_rem_float_2addr.S
@@ -0,0 +1 @@
+%include "mips/fbinop2addr.S" {"instr":"JAL(fmodf)"}
diff --git a/runtime/interpreter/mterp/mips/op_rem_int.S b/runtime/interpreter/mterp/mips/op_rem_int.S
new file mode 100644
index 0000000..c2a334a
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_rem_int.S
@@ -0,0 +1,5 @@
+#ifdef MIPS32REVGE6
+%include "mips/binop.S" {"instr":"mod a0, a0, a1", "chkzero":"1"}
+#else
+%include "mips/binop.S" {"preinstr":"div zero, a0, a1", "instr":"mfhi a0", "chkzero":"1"}
+#endif
diff --git a/runtime/interpreter/mterp/mips/op_rem_int_2addr.S b/runtime/interpreter/mterp/mips/op_rem_int_2addr.S
new file mode 100644
index 0000000..46c353f
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_rem_int_2addr.S
@@ -0,0 +1,5 @@
+#ifdef MIPS32REVGE6
+%include "mips/binop2addr.S" {"instr":"mod a0, a0, a1", "chkzero":"1"}
+#else
+%include "mips/binop2addr.S" {"preinstr":"div zero, a0, a1", "instr":"mfhi a0", "chkzero":"1"}
+#endif
diff --git a/runtime/interpreter/mterp/mips/op_rem_int_lit16.S b/runtime/interpreter/mterp/mips/op_rem_int_lit16.S
new file mode 100644
index 0000000..2894ad3
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_rem_int_lit16.S
@@ -0,0 +1,5 @@
+#ifdef MIPS32REVGE6
+%include "mips/binopLit16.S" {"instr":"mod a0, a0, a1", "chkzero":"1"}
+#else
+%include "mips/binopLit16.S" {"preinstr":"div zero, a0, a1", "instr":"mfhi a0", "chkzero":"1"}
+#endif
diff --git a/runtime/interpreter/mterp/mips/op_rem_int_lit8.S b/runtime/interpreter/mterp/mips/op_rem_int_lit8.S
new file mode 100644
index 0000000..582248b
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_rem_int_lit8.S
@@ -0,0 +1,5 @@
+#ifdef MIPS32REVGE6
+%include "mips/binopLit8.S" {"instr":"mod a0, a0, a1", "chkzero":"1"}
+#else
+%include "mips/binopLit8.S" {"preinstr":"div zero, a0, a1", "instr":"mfhi a0", "chkzero":"1"}
+#endif
diff --git a/runtime/interpreter/mterp/mips/op_rem_long.S b/runtime/interpreter/mterp/mips/op_rem_long.S
new file mode 100644
index 0000000..e3eb19b
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_rem_long.S
@@ -0,0 +1 @@
+%include "mips/binopWide.S" { "result0":"v0", "result1":"v1", "instr":"JAL(__moddi3)", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips/op_rem_long_2addr.S b/runtime/interpreter/mterp/mips/op_rem_long_2addr.S
new file mode 100644
index 0000000..8fc9fdb
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_rem_long_2addr.S
@@ -0,0 +1 @@
+%include "mips/binopWide2addr.S" { "result0":"v0", "result1":"v1", "instr":"JAL(__moddi3)", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips/op_return.S b/runtime/interpreter/mterp/mips/op_return.S
new file mode 100644
index 0000000..894ae18
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_return.S
@@ -0,0 +1,18 @@
+ /*
+ * Return a 32-bit value.
+ *
+ * for: return, return-object
+ */
+ /* op vAA */
+ .extern MterpThreadFenceForConstructor
+ JAL(MterpThreadFenceForConstructor)
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ move a0, rSELF
+ and ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ beqz ra, 1f
+ JAL(MterpSuspendCheck) # (self)
+1:
+ GET_OPA(a2) # a2 <- AA
+ GET_VREG(v0, a2) # v0 <- vAA
+ move v1, zero
+ b MterpReturn
diff --git a/runtime/interpreter/mterp/mips/op_return_object.S b/runtime/interpreter/mterp/mips/op_return_object.S
new file mode 100644
index 0000000..7350e00
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_return_object.S
@@ -0,0 +1 @@
+%include "mips/op_return.S"
diff --git a/runtime/interpreter/mterp/mips/op_return_void.S b/runtime/interpreter/mterp/mips/op_return_void.S
new file mode 100644
index 0000000..35c1326
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_return_void.S
@@ -0,0 +1,11 @@
+ .extern MterpThreadFenceForConstructor
+ JAL(MterpThreadFenceForConstructor)
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ move a0, rSELF
+ and ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ beqz ra, 1f
+ JAL(MterpSuspendCheck) # (self)
+1:
+ move v0, zero
+ move v1, zero
+ b MterpReturn
diff --git a/runtime/interpreter/mterp/mips/op_return_void_no_barrier.S b/runtime/interpreter/mterp/mips/op_return_void_no_barrier.S
new file mode 100644
index 0000000..56968b5
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_return_void_no_barrier.S
@@ -0,0 +1,9 @@
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ move a0, rSELF
+ and ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ beqz ra, 1f
+ JAL(MterpSuspendCheck) # (self)
+1:
+ move v0, zero
+ move v1, zero
+ b MterpReturn
diff --git a/runtime/interpreter/mterp/mips/op_return_wide.S b/runtime/interpreter/mterp/mips/op_return_wide.S
new file mode 100644
index 0000000..91d62bf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_return_wide.S
@@ -0,0 +1,16 @@
+ /*
+ * Return a 64-bit value.
+ */
+ /* return-wide vAA */
+ .extern MterpThreadFenceForConstructor
+ JAL(MterpThreadFenceForConstructor)
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ move a0, rSELF
+ and ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ beqz ra, 1f
+ JAL(MterpSuspendCheck) # (self)
+1:
+ GET_OPA(a2) # a2 <- AA
+ EAS2(a2, rFP, a2) # a2 <- &fp[AA]
+ LOAD64(v0, v1, a2) # v0/v1 <- vAA/vAA+1
+ b MterpReturn
diff --git a/runtime/interpreter/mterp/mips/op_rsub_int.S b/runtime/interpreter/mterp/mips/op_rsub_int.S
new file mode 100644
index 0000000..f7e61bb
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_rsub_int.S
@@ -0,0 +1,2 @@
+/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
+%include "mips/binopLit16.S" {"instr":"subu a0, a1, a0"}
diff --git a/runtime/interpreter/mterp/mips/op_rsub_int_lit8.S b/runtime/interpreter/mterp/mips/op_rsub_int_lit8.S
new file mode 100644
index 0000000..3968a5e
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_rsub_int_lit8.S
@@ -0,0 +1 @@
+%include "mips/binopLit8.S" {"instr":"subu a0, a1, a0"}
diff --git a/runtime/interpreter/mterp/mips/op_sget.S b/runtime/interpreter/mterp/mips/op_sget.S
new file mode 100644
index 0000000..3efcfbb
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sget.S
@@ -0,0 +1,25 @@
+%default { "is_object":"0", "helper":"artGet32StaticFromCode" }
+ /*
+ * General SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ # op vAA, field /* BBBB */
+ .extern $helper
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- field ref BBBB
+ lw a1, OFF_FP_METHOD(rFP) # a1 <- method
+ move a2, rSELF # a2 <- self
+ JAL($helper)
+ lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
+ GET_OPA(a2) # a2 <- AA
+ PREFETCH_INST(2)
+ bnez a3, MterpException # bail out
+.if $is_object
+ SET_VREG_OBJECT(v0, a2) # fp[AA] <- v0
+.else
+ SET_VREG(v0, a2) # fp[AA] <- v0
+.endif
+ ADVANCE(2)
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_sget_boolean.S b/runtime/interpreter/mterp/mips/op_sget_boolean.S
new file mode 100644
index 0000000..45a5a70
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sget_boolean.S
@@ -0,0 +1 @@
+%include "mips/op_sget.S" {"helper":"artGetBooleanStaticFromCode"}
diff --git a/runtime/interpreter/mterp/mips/op_sget_byte.S b/runtime/interpreter/mterp/mips/op_sget_byte.S
new file mode 100644
index 0000000..319122c
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sget_byte.S
@@ -0,0 +1 @@
+%include "mips/op_sget.S" {"helper":"artGetByteStaticFromCode"}
diff --git a/runtime/interpreter/mterp/mips/op_sget_char.S b/runtime/interpreter/mterp/mips/op_sget_char.S
new file mode 100644
index 0000000..7103847
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sget_char.S
@@ -0,0 +1 @@
+%include "mips/op_sget.S" {"helper":"artGetCharStaticFromCode"}
diff --git a/runtime/interpreter/mterp/mips/op_sget_object.S b/runtime/interpreter/mterp/mips/op_sget_object.S
new file mode 100644
index 0000000..b205f51
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sget_object.S
@@ -0,0 +1 @@
+%include "mips/op_sget.S" {"is_object":"1", "helper":"artGetObjStaticFromCode"}
diff --git a/runtime/interpreter/mterp/mips/op_sget_short.S b/runtime/interpreter/mterp/mips/op_sget_short.S
new file mode 100644
index 0000000..3301823
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sget_short.S
@@ -0,0 +1 @@
+%include "mips/op_sget.S" {"helper":"artGetShortStaticFromCode"}
diff --git a/runtime/interpreter/mterp/mips/op_sget_wide.S b/runtime/interpreter/mterp/mips/op_sget_wide.S
new file mode 100644
index 0000000..7aee386
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sget_wide.S
@@ -0,0 +1,17 @@
+ /*
+ * 64-bit SGET handler.
+ */
+ # sget-wide vAA, field /* BBBB */
+ .extern artGet64StaticFromCode
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- field ref BBBB
+ lw a1, OFF_FP_METHOD(rFP) # a1 <- method
+ move a2, rSELF # a2 <- self
+ JAL(artGet64StaticFromCode)
+ lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
+ bnez a3, MterpException
+ GET_OPA(a1) # a1 <- AA
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ SET_VREG64(v0, v1, a1) # vAA/vAA+1 <- v0/v1
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_shl_int.S b/runtime/interpreter/mterp/mips/op_shl_int.S
new file mode 100644
index 0000000..15cbe94
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_shl_int.S
@@ -0,0 +1 @@
+%include "mips/binop.S" {"instr":"sll a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_shl_int_2addr.S b/runtime/interpreter/mterp/mips/op_shl_int_2addr.S
new file mode 100644
index 0000000..ef9bd65
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_shl_int_2addr.S
@@ -0,0 +1 @@
+%include "mips/binop2addr.S" {"instr":"sll a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_shl_int_lit8.S b/runtime/interpreter/mterp/mips/op_shl_int_lit8.S
new file mode 100644
index 0000000..d2afb53
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_shl_int_lit8.S
@@ -0,0 +1 @@
+%include "mips/binopLit8.S" {"instr":"sll a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_shl_long.S b/runtime/interpreter/mterp/mips/op_shl_long.S
new file mode 100644
index 0000000..0121669
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_shl_long.S
@@ -0,0 +1,31 @@
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.
+ */
+ /* shl-long vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(t2) # t2 <- AA
+ and a3, a0, 255 # a3 <- BB
+ srl a0, a0, 8 # a0 <- CC
+ EAS2(a3, rFP, a3) # a3 <- &fp[BB]
+ GET_VREG(a2, a0) # a2 <- vCC
+ LOAD64(a0, a1, a3) # a0/a1 <- vBB/vBB+1
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+
+ andi v1, a2, 0x20 # shift< shift & 0x20
+ sll v0, a0, a2 # rlo<- alo << (shift&31)
+ bnez v1, .L${opcode}_finish
+ not v1, a2 # rhi<- 31-shift (shift is 5b)
+ srl a0, 1
+ srl a0, v1 # alo<- alo >> (32-(shift&31))
+ sll v1, a1, a2 # rhi<- ahi << (shift&31)
+ or v1, a0 # rhi<- rhi | alo
+ SET_VREG64_GOTO(v0, v1, t2, t0) # vAA/vAA+1 <- a0/a1
+%break
+
+.L${opcode}_finish:
+ SET_VREG64_GOTO(zero, v0, t2, t0) # vAA/vAA+1 <- rlo/rhi
diff --git a/runtime/interpreter/mterp/mips/op_shl_long_2addr.S b/runtime/interpreter/mterp/mips/op_shl_long_2addr.S
new file mode 100644
index 0000000..8ce6058
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_shl_long_2addr.S
@@ -0,0 +1,27 @@
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* shl-long/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a2, a3) # a2 <- vB
+ EAS2(t2, rFP, rOBJ) # t2 <- &fp[A]
+ LOAD64(a0, a1, t2) # a0/a1 <- vAA/vAA+1
+
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+
+ andi v1, a2, 0x20 # shift< shift & 0x20
+ sll v0, a0, a2 # rlo<- alo << (shift&31)
+ bnez v1, .L${opcode}_finish
+ not v1, a2 # rhi<- 31-shift (shift is 5b)
+ srl a0, 1
+ srl a0, v1 # alo<- alo >> (32-(shift&31))
+ sll v1, a1, a2 # rhi<- ahi << (shift&31)
+ or v1, a0 # rhi<- rhi | alo
+ SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vAA/vAA+1 <- a0/a1
+%break
+
+.L${opcode}_finish:
+ SET_VREG64_GOTO(zero, v0, rOBJ, t0) # vAA/vAA+1 <- rlo/rhi
diff --git a/runtime/interpreter/mterp/mips/op_shr_int.S b/runtime/interpreter/mterp/mips/op_shr_int.S
new file mode 100644
index 0000000..6110839
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_shr_int.S
@@ -0,0 +1 @@
+%include "mips/binop.S" {"instr":"sra a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_shr_int_2addr.S b/runtime/interpreter/mterp/mips/op_shr_int_2addr.S
new file mode 100644
index 0000000..e00ff5b
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_shr_int_2addr.S
@@ -0,0 +1 @@
+%include "mips/binop2addr.S" {"instr":"sra a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_shr_int_lit8.S b/runtime/interpreter/mterp/mips/op_shr_int_lit8.S
new file mode 100644
index 0000000..d058f58
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_shr_int_lit8.S
@@ -0,0 +1 @@
+%include "mips/binopLit8.S" {"instr":"sra a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_shr_long.S b/runtime/interpreter/mterp/mips/op_shr_long.S
new file mode 100644
index 0000000..4c42758
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_shr_long.S
@@ -0,0 +1,31 @@
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.
+ */
+ /* shr-long vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(t3) # t3 <- AA
+ and a3, a0, 255 # a3 <- BB
+ srl a0, a0, 8 # a0 <- CC
+ EAS2(a3, rFP, a3) # a3 <- &fp[BB]
+ GET_VREG(a2, a0) # a2 <- vCC
+ LOAD64(a0, a1, a3) # a0/a1 <- vBB/vBB+1
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+
+ andi v0, a2, 0x20 # shift & 0x20
+ sra v1, a1, a2 # rhi<- ahi >> (shift&31)
+ bnez v0, .L${opcode}_finish
+ srl v0, a0, a2 # rlo<- alo >> (shift&31)
+ not a0, a2 # alo<- 31-shift (shift is 5b)
+ sll a1, 1
+ sll a1, a0 # ahi<- ahi << (32-(shift&31))
+ or v0, a1 # rlo<- rlo | ahi
+ SET_VREG64_GOTO(v0, v1, t3, t0) # vAA/VAA+1 <- v0/v0
+%break
+
+.L${opcode}_finish:
+ sra a3, a1, 31 # a3<- sign(ah)
+ SET_VREG64_GOTO(v1, a3, t3, t0) # vAA/VAA+1 <- rlo/rhi
diff --git a/runtime/interpreter/mterp/mips/op_shr_long_2addr.S b/runtime/interpreter/mterp/mips/op_shr_long_2addr.S
new file mode 100644
index 0000000..3adc085
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_shr_long_2addr.S
@@ -0,0 +1,27 @@
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* shr-long/2addr vA, vB */
+ GET_OPA4(t2) # t2 <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a2, a3) # a2 <- vB
+ EAS2(t0, rFP, t2) # t0 <- &fp[A]
+ LOAD64(a0, a1, t0) # a0/a1 <- vAA/vAA+1
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+
+ andi v0, a2, 0x20 # shift & 0x20
+ sra v1, a1, a2 # rhi<- ahi >> (shift&31)
+ bnez v0, .L${opcode}_finish
+ srl v0, a0, a2 # rlo<- alo >> (shift&31)
+ not a0, a2 # alo<- 31-shift (shift is 5b)
+ sll a1, 1
+ sll a1, a0 # ahi<- ahi << (32-(shift&31))
+ or v0, a1 # rlo<- rlo | ahi
+ SET_VREG64_GOTO(v0, v1, t2, t0) # vAA/vAA+1 <- a0/a1
+%break
+
+.L${opcode}_finish:
+ sra a3, a1, 31 # a3<- sign(ah)
+ SET_VREG64_GOTO(v1, a3, t2, t0) # vAA/vAA+1 <- rlo/rhi
diff --git a/runtime/interpreter/mterp/mips/op_sparse_switch.S b/runtime/interpreter/mterp/mips/op_sparse_switch.S
new file mode 100644
index 0000000..670f464
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sparse_switch.S
@@ -0,0 +1 @@
+%include "mips/op_packed_switch.S" { "func":"MterpDoSparseSwitch" }
diff --git a/runtime/interpreter/mterp/mips/op_sput.S b/runtime/interpreter/mterp/mips/op_sput.S
new file mode 100644
index 0000000..ee313b9
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sput.S
@@ -0,0 +1,19 @@
+%default { "helper":"artSet32StaticFromCode"}
+ /*
+ * General SPUT handler.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ # op vAA, field /* BBBB */
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- field ref BBBB
+ GET_OPA(a3) # a3 <- AA
+ GET_VREG(a1, a3) # a1 <- fp[AA], the object pointer
+ lw a2, OFF_FP_METHOD(rFP) # a2 <- method
+ move a3, rSELF # a3 <- self
+ PREFETCH_INST(2) # load rINST
+ JAL($helper)
+ bnez v0, MterpException # bail out
+ ADVANCE(2) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_sput_boolean.S b/runtime/interpreter/mterp/mips/op_sput_boolean.S
new file mode 100644
index 0000000..7909ef5
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sput_boolean.S
@@ -0,0 +1 @@
+%include "mips/op_sput.S" {"helper":"artSet8StaticFromCode"}
diff --git a/runtime/interpreter/mterp/mips/op_sput_byte.S b/runtime/interpreter/mterp/mips/op_sput_byte.S
new file mode 100644
index 0000000..7909ef5
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sput_byte.S
@@ -0,0 +1 @@
+%include "mips/op_sput.S" {"helper":"artSet8StaticFromCode"}
diff --git a/runtime/interpreter/mterp/mips/op_sput_char.S b/runtime/interpreter/mterp/mips/op_sput_char.S
new file mode 100644
index 0000000..188195c
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sput_char.S
@@ -0,0 +1 @@
+%include "mips/op_sput.S" {"helper":"artSet16StaticFromCode"}
diff --git a/runtime/interpreter/mterp/mips/op_sput_object.S b/runtime/interpreter/mterp/mips/op_sput_object.S
new file mode 100644
index 0000000..4f9034e
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sput_object.S
@@ -0,0 +1,16 @@
+ /*
+ * General 32-bit SPUT handler.
+ *
+ * for: sput-object,
+ */
+ /* op vAA, field@BBBB */
+ EXPORT_PC()
+ addu a0, rFP, OFF_FP_SHADOWFRAME
+ move a1, rPC
+ move a2, rINST
+ move a3, rSELF
+ JAL(MterpSputObject)
+ beqz v0, MterpException
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_sput_short.S b/runtime/interpreter/mterp/mips/op_sput_short.S
new file mode 100644
index 0000000..188195c
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sput_short.S
@@ -0,0 +1 @@
+%include "mips/op_sput.S" {"helper":"artSet16StaticFromCode"}
diff --git a/runtime/interpreter/mterp/mips/op_sput_wide.S b/runtime/interpreter/mterp/mips/op_sput_wide.S
new file mode 100644
index 0000000..1e11466
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sput_wide.S
@@ -0,0 +1,17 @@
+ /*
+ * 64-bit SPUT handler.
+ */
+ # sput-wide vAA, field /* BBBB */
+ .extern artSet64IndirectStaticFromMterp
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- field ref CCCC
+ lw a1, OFF_FP_METHOD(rFP) # a1 <- method
+ GET_OPA(a2) # a2 <- AA
+ EAS2(a2, rFP, a2) # a2 <- &fp[AA]
+ move a3, rSELF # a3 <- self
+ PREFETCH_INST(2) # load rINST
+ JAL(artSet64IndirectStaticFromMterp)
+ bnez v0, MterpException # bail out
+ ADVANCE(2) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_sub_double.S b/runtime/interpreter/mterp/mips/op_sub_double.S
new file mode 100644
index 0000000..9473218
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sub_double.S
@@ -0,0 +1 @@
+%include "mips/fbinopWide.S" {"instr":"sub.d fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_sub_double_2addr.S b/runtime/interpreter/mterp/mips/op_sub_double_2addr.S
new file mode 100644
index 0000000..7ce7c74
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sub_double_2addr.S
@@ -0,0 +1 @@
+%include "mips/fbinopWide2addr.S" {"instr":"sub.d fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_sub_float.S b/runtime/interpreter/mterp/mips/op_sub_float.S
new file mode 100644
index 0000000..04650d9
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sub_float.S
@@ -0,0 +1 @@
+%include "mips/fbinop.S" {"instr":"sub.s fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_sub_float_2addr.S b/runtime/interpreter/mterp/mips/op_sub_float_2addr.S
new file mode 100644
index 0000000..dfe935c
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sub_float_2addr.S
@@ -0,0 +1 @@
+%include "mips/fbinop2addr.S" {"instr":"sub.s fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_sub_int.S b/runtime/interpreter/mterp/mips/op_sub_int.S
new file mode 100644
index 0000000..43da1b6
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sub_int.S
@@ -0,0 +1 @@
+%include "mips/binop.S" {"instr":"subu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_sub_int_2addr.S b/runtime/interpreter/mterp/mips/op_sub_int_2addr.S
new file mode 100644
index 0000000..cf34aa6
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sub_int_2addr.S
@@ -0,0 +1 @@
+%include "mips/binop2addr.S" {"instr":"subu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_sub_long.S b/runtime/interpreter/mterp/mips/op_sub_long.S
new file mode 100644
index 0000000..0f58e8e
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sub_long.S
@@ -0,0 +1,8 @@
+/*
+ * For little endian the code sequence looks as follows:
+ * subu v0,a0,a2
+ * subu v1,a1,a3
+ * sltu a0,a0,v0
+ * subu v1,v1,a0
+ */
+%include "mips/binopWide.S" { "result0":"v0", "result1":"v1", "preinstr":"subu v0, a0, a2", "instr":"subu v1, a1, a3; sltu a0, a0, v0; subu v1, v1, a0" }
diff --git a/runtime/interpreter/mterp/mips/op_sub_long_2addr.S b/runtime/interpreter/mterp/mips/op_sub_long_2addr.S
new file mode 100644
index 0000000..aa256c2
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_sub_long_2addr.S
@@ -0,0 +1,4 @@
+/*
+ * See op_sub_long.S for more details
+ */
+%include "mips/binopWide2addr.S" { "result0":"v0", "result1":"v1", "preinstr":"subu v0, a0, a2", "instr":"subu v1, a1, a3; sltu a0, a0, v0; subu v1, v1, a0" }
diff --git a/runtime/interpreter/mterp/mips/op_throw.S b/runtime/interpreter/mterp/mips/op_throw.S
new file mode 100644
index 0000000..adc8b04
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_throw.S
@@ -0,0 +1,11 @@
+ /*
+ * Throw an exception object in the current thread.
+ */
+ /* throw vAA */
+ EXPORT_PC() # exception handler can throw
+ GET_OPA(a2) # a2 <- AA
+ GET_VREG(a1, a2) # a1 <- vAA (exception object)
+ # null object?
+ beqz a1, common_errNullObject # yes, throw an NPE instead
+ sw a1, THREAD_EXCEPTION_OFFSET(rSELF) # thread->exception <- obj
+ b MterpException
diff --git a/runtime/interpreter/mterp/mips/op_unused_3e.S b/runtime/interpreter/mterp/mips/op_unused_3e.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_3e.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_3f.S b/runtime/interpreter/mterp/mips/op_unused_3f.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_3f.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_40.S b/runtime/interpreter/mterp/mips/op_unused_40.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_40.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_41.S b/runtime/interpreter/mterp/mips/op_unused_41.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_41.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_42.S b/runtime/interpreter/mterp/mips/op_unused_42.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_42.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_43.S b/runtime/interpreter/mterp/mips/op_unused_43.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_43.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_73.S b/runtime/interpreter/mterp/mips/op_unused_73.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_73.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_79.S b/runtime/interpreter/mterp/mips/op_unused_79.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_79.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_7a.S b/runtime/interpreter/mterp/mips/op_unused_7a.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_7a.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_f3.S b/runtime/interpreter/mterp/mips/op_unused_f3.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_f3.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_f4.S b/runtime/interpreter/mterp/mips/op_unused_f4.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_f4.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_f5.S b/runtime/interpreter/mterp/mips/op_unused_f5.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_f5.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_f6.S b/runtime/interpreter/mterp/mips/op_unused_f6.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_f6.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_f7.S b/runtime/interpreter/mterp/mips/op_unused_f7.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_f7.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_f8.S b/runtime/interpreter/mterp/mips/op_unused_f8.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_f8.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_f9.S b/runtime/interpreter/mterp/mips/op_unused_f9.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_f9.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_fa.S b/runtime/interpreter/mterp/mips/op_unused_fa.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_fa.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_fb.S b/runtime/interpreter/mterp/mips/op_unused_fb.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_fb.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_fc.S b/runtime/interpreter/mterp/mips/op_unused_fc.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_fc.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_fd.S b/runtime/interpreter/mterp/mips/op_unused_fd.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_fd.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_fe.S b/runtime/interpreter/mterp/mips/op_unused_fe.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_fe.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_ff.S b/runtime/interpreter/mterp/mips/op_unused_ff.S
new file mode 100644
index 0000000..99ef3cf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_unused_ff.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_ushr_int.S b/runtime/interpreter/mterp/mips/op_ushr_int.S
new file mode 100644
index 0000000..b95472b
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_ushr_int.S
@@ -0,0 +1 @@
+%include "mips/binop.S" {"instr":"srl a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_ushr_int_2addr.S b/runtime/interpreter/mterp/mips/op_ushr_int_2addr.S
new file mode 100644
index 0000000..fc17778
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_ushr_int_2addr.S
@@ -0,0 +1 @@
+%include "mips/binop2addr.S" {"instr":"srl a0, a0, a1 "}
diff --git a/runtime/interpreter/mterp/mips/op_ushr_int_lit8.S b/runtime/interpreter/mterp/mips/op_ushr_int_lit8.S
new file mode 100644
index 0000000..c82cfba
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_ushr_int_lit8.S
@@ -0,0 +1 @@
+%include "mips/binopLit8.S" {"instr":"srl a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_ushr_long.S b/runtime/interpreter/mterp/mips/op_ushr_long.S
new file mode 100644
index 0000000..2e227a9
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_ushr_long.S
@@ -0,0 +1,31 @@
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.
+ */
+ /* ushr-long vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a3, a0, 255 # a3 <- BB
+ srl a0, a0, 8 # a0 <- CC
+ EAS2(a3, rFP, a3) # a3 <- &fp[BB]
+ GET_VREG(a2, a0) # a2 <- vCC
+ LOAD64(a0, a1, a3) # a0/a1 <- vBB/vBB+1
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+
+ andi v0, a2, 0x20 # shift & 0x20
+ srl v1, a1, a2 # rhi<- ahi >> (shift&31)
+ bnez v0, .L${opcode}_finish
+ srl v0, a0, a2 # rlo<- alo >> (shift&31)
+ not a0, a2 # alo<- 31-n (shift is 5b)
+ sll a1, 1
+ sll a1, a0 # ahi<- ahi << (32-(shift&31))
+ or v0, a1 # rlo<- rlo | ahi
+ SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vAA/vAA+1 <- v0/v1
+%break
+
+.L${opcode}_finish:
+ SET_VREG64_GOTO(v1, zero, rOBJ, t0) # vAA/vAA+1 <- rlo/rhi
diff --git a/runtime/interpreter/mterp/mips/op_ushr_long_2addr.S b/runtime/interpreter/mterp/mips/op_ushr_long_2addr.S
new file mode 100644
index 0000000..ccf1f7e
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_ushr_long_2addr.S
@@ -0,0 +1,27 @@
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* ushr-long/2addr vA, vB */
+ GET_OPA4(t3) # t3 <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a2, a3) # a2 <- vB
+ EAS2(t0, rFP, t3) # t0 <- &fp[A]
+ LOAD64(a0, a1, t0) # a0/a1 <- vAA/vAA+1
+
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+
+ andi v0, a2, 0x20 # shift & 0x20
+ srl v1, a1, a2 # rhi<- ahi >> (shift&31)
+ bnez v0, .L${opcode}_finish
+ srl v0, a0, a2 # rlo<- alo >> (shift&31)
+ not a0, a2 # alo<- 31-n (shift is 5b)
+ sll a1, 1
+ sll a1, a0 # ahi<- ahi << (32-(shift&31))
+ or v0, a1 # rlo<- rlo | ahi
+ SET_VREG64_GOTO(v0, v1, t3, t0) # vAA/vAA+1 <- a0/a1
+%break
+
+.L${opcode}_finish:
+ SET_VREG64_GOTO(v1, zero, t3, t0) # vAA/vAA+1 <- rlo/rhi
diff --git a/runtime/interpreter/mterp/mips/op_xor_int.S b/runtime/interpreter/mterp/mips/op_xor_int.S
new file mode 100644
index 0000000..6c23f1f
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_xor_int.S
@@ -0,0 +1 @@
+%include "mips/binop.S" {"instr":"xor a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_xor_int_2addr.S b/runtime/interpreter/mterp/mips/op_xor_int_2addr.S
new file mode 100644
index 0000000..5ee1667
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_xor_int_2addr.S
@@ -0,0 +1 @@
+%include "mips/binop2addr.S" {"instr":"xor a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_xor_int_lit16.S b/runtime/interpreter/mterp/mips/op_xor_int_lit16.S
new file mode 100644
index 0000000..2af37a6
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_xor_int_lit16.S
@@ -0,0 +1 @@
+%include "mips/binopLit16.S" {"instr":"xor a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_xor_int_lit8.S b/runtime/interpreter/mterp/mips/op_xor_int_lit8.S
new file mode 100644
index 0000000..944ed69
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_xor_int_lit8.S
@@ -0,0 +1 @@
+%include "mips/binopLit8.S" {"instr":"xor a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_xor_long.S b/runtime/interpreter/mterp/mips/op_xor_long.S
new file mode 100644
index 0000000..93f8f70
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_xor_long.S
@@ -0,0 +1 @@
+%include "mips/binopWide.S" {"preinstr":"xor a0, a0, a2", "instr":"xor a1, a1, a3"}
diff --git a/runtime/interpreter/mterp/mips/op_xor_long_2addr.S b/runtime/interpreter/mterp/mips/op_xor_long_2addr.S
new file mode 100644
index 0000000..49f3fa4
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_xor_long_2addr.S
@@ -0,0 +1 @@
+%include "mips/binopWide2addr.S" {"preinstr":"xor a0, a0, a2", "instr":"xor a1, a1, a3"}
diff --git a/runtime/interpreter/mterp/mips/unop.S b/runtime/interpreter/mterp/mips/unop.S
new file mode 100644
index 0000000..52a8f0a
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/unop.S
@@ -0,0 +1,19 @@
+%default {"preinstr":"", "result0":"a0"}
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op a0".
+ * This could be a MIPS instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ GET_OPB(a3) # a3 <- B
+ GET_OPA4(t0) # t0 <- A+
+ GET_VREG(a0, a3) # a0 <- vB
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ $preinstr # optional op
+ $instr # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ SET_VREG_GOTO($result0, t0, t1) # vAA <- result0
+ /* 9-10 instructions */
diff --git a/runtime/interpreter/mterp/mips/unopNarrower.S b/runtime/interpreter/mterp/mips/unopNarrower.S
new file mode 100644
index 0000000..9c38bad
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/unopNarrower.S
@@ -0,0 +1,24 @@
+%default {"load":"LOAD64_F(fa0, fa0f, a3)"}
+ /*
+ * Generic 64bit-to-32bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op a0/a1", where
+ * "result" is a 32-bit quantity in a0.
+ *
+ * For: long-to-float, double-to-int, double-to-float
+ * If hard floating point support is available, use fa0 as the parameter,
+ * except for long-to-float opcode.
+ * (This would work for long-to-int, but that instruction is actually
+ * an exact match for OP_MOVE.)
+ */
+ /* unop vA, vB */
+ GET_OPB(a3) # a3 <- B
+ GET_OPA4(rOBJ) # t1 <- A+
+ EAS2(a3, rFP, a3) # a3 <- &fp[B]
+ $load
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ $instr
+
+.L${opcode}_set_vreg_f:
+ SET_VREG_F(fv0, rOBJ) # vA <- result0
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/unopWide.S b/runtime/interpreter/mterp/mips/unopWide.S
new file mode 100644
index 0000000..fd25dff
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/unopWide.S
@@ -0,0 +1,20 @@
+%default {"preinstr":"", "result0":"a0", "result1":"a1"}
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op a0/a1".
+ * This could be MIPS instruction or a function call.
+ *
+ * For: neg-long, not-long, neg-double,
+ */
+ /* unop vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ EAS2(a3, rFP, a3) # a3 <- &fp[B]
+ LOAD64(a0, a1, a3) # a0/a1 <- vAA
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ $preinstr # optional op
+ $instr # a0/a1 <- op, a2-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64($result0, $result1, rOBJ) # vAA <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 12-13 instructions */
diff --git a/runtime/interpreter/mterp/mips/unopWider.S b/runtime/interpreter/mterp/mips/unopWider.S
new file mode 100644
index 0000000..1c18837
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/unopWider.S
@@ -0,0 +1,19 @@
+%default {"preinstr":"", "result0":"a0", "result1":"a1"}
+ /*
+ * Generic 32bit-to-64bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op a0", where
+ * "result" is a 64-bit quantity in a0/a1.
+ *
+ * For: int-to-long
+ */
+ /* unop vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a0, a3) # a0 <- vB
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ $preinstr # optional op
+ $instr # result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64($result0, $result1, rOBJ) # vA/vA+1 <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 10-11 instructions */
diff --git a/runtime/interpreter/mterp/mips/unused.S b/runtime/interpreter/mterp/mips/unused.S
new file mode 100644
index 0000000..ffa00be
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/unused.S
@@ -0,0 +1,4 @@
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
diff --git a/runtime/interpreter/mterp/mips/zcmp.S b/runtime/interpreter/mterp/mips/zcmp.S
new file mode 100644
index 0000000..1fa1385
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/zcmp.S
@@ -0,0 +1,32 @@
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ GET_OPA(a0) # a0 <- AA
+ GET_VREG(a2, a0) # a2 <- vAA
+ FETCH_S(rINST, 1) # rINST <- branch offset, in code units
+ b${revcmp} a2, zero, 1f # branch to 1 if comparison failed
+ b 2f
+1:
+ li rINST, 2 # rINST- BYTE branch dist for not-taken
+2:
+#if MTERP_PROFILE_BRANCHES
+ EXPORT_PC()
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rINST
+ JAL(MterpProfileBranch) # (self, shadow_frame, offset)
+ bnez v0, MterpOnStackReplacement # Note: offset must be in rINST
+#endif
+ addu a1, rINST, rINST # convert to bytes
+ FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
+ bgez a1, 3f
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ b MterpCheckSuspendAndContinue
+3:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/alt_stub.S b/runtime/interpreter/mterp/mips64/alt_stub.S
new file mode 100644
index 0000000..bd76a1b
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/alt_stub.S
@@ -0,0 +1,14 @@
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (${opnum} * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
diff --git a/runtime/interpreter/mterp/mips64/bincmp.S b/runtime/interpreter/mterp/mips64/bincmp.S
new file mode 100644
index 0000000..d39c900
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/bincmp.S
@@ -0,0 +1,32 @@
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "condition"
+ * fragment that specifies the comparison to perform, e.g. for
+ * "if-le" you would use "le".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ lh a4, 2(rPC) # a4 <- sign-extended CCCC
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG a0, a2 # a0 <- vA
+ GET_VREG a1, a3 # a1 <- vB
+
+ b${condition}c a0, a1, 1f
+ li a4, 2 # offset if branch not taken
+1:
+
+ dlsa rPC, a4, rPC, 1 # rPC <- rPC + CCCC * 2
+ FETCH_INST # load rINST
+
+#if MTERP_SUSPEND
+ bgez a4, 2f # CCCC * 2 >= 0 => no suspend check
+ REFRESH_IBASE
+2:
+#else
+ lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue
+ bltz a4, MterpCheckSuspendAndContinue
+#endif
+
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/binop.S b/runtime/interpreter/mterp/mips64/binop.S
new file mode 100644
index 0000000..fab48b7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/binop.S
@@ -0,0 +1,30 @@
+%default {"preinstr":"", "result":"a0", "chkzero":"0"}
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG a0, a2 # a0 <- vBB
+ GET_VREG a1, a3 # a1 <- vCC
+ .if $chkzero
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ $preinstr # optional op
+ $instr # $result <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG $result, a4 # vAA <- $result
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/binop2addr.S b/runtime/interpreter/mterp/mips64/binop2addr.S
new file mode 100644
index 0000000..1ae73f5
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/binop2addr.S
@@ -0,0 +1,30 @@
+%default {"preinstr":"", "result":"a0", "chkzero":"0"}
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vB (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr
+ */
+ /* binop/2addr vA, vB */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG a0, a2 # a0 <- vA
+ GET_VREG a1, a3 # a1 <- vB
+ .if $chkzero
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ $preinstr # optional op
+ $instr # $result <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG $result, a2 # vA <- $result
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/binopLit16.S b/runtime/interpreter/mterp/mips64/binopLit16.S
new file mode 100644
index 0000000..9257758
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/binopLit16.S
@@ -0,0 +1,28 @@
+%default {"preinstr":"", "result":"a0", "chkzero":"0"}
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * CCCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ lh a1, 2(rPC) # a1 <- sign-extended CCCC
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG a0, a3 # a0 <- vB
+ .if $chkzero
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ $preinstr # optional op
+ $instr # $result <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG $result, a2 # vA <- $result
+ GOTO_OPCODE v0 # jump to next instruction
+
diff --git a/runtime/interpreter/mterp/mips64/binopLit8.S b/runtime/interpreter/mterp/mips64/binopLit8.S
new file mode 100644
index 0000000..f4a0bba
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/binopLit8.S
@@ -0,0 +1,29 @@
+%default {"preinstr":"", "result":"a0", "chkzero":"0"}
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * CC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ lbu a3, 2(rPC) # a3 <- BB
+ lb a1, 3(rPC) # a1 <- sign-extended CC
+ srl a2, rINST, 8 # a2 <- AA
+ GET_VREG a0, a3 # a0 <- vBB
+ .if $chkzero
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ $preinstr # optional op
+ $instr # $result <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG $result, a2 # vAA <- $result
+ GOTO_OPCODE v0 # jump to next instruction
+
diff --git a/runtime/interpreter/mterp/mips64/binopWide.S b/runtime/interpreter/mterp/mips64/binopWide.S
new file mode 100644
index 0000000..732f0d6
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/binopWide.S
@@ -0,0 +1,30 @@
+%default {"preinstr":"", "result":"a0", "chkzero":"0"}
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
+ * xor-long, shl-long, shr-long, ushr-long
+ */
+ /* binop vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG_WIDE a0, a2 # a0 <- vBB
+ GET_VREG_WIDE a1, a3 # a1 <- vCC
+ .if $chkzero
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ $preinstr # optional op
+ $instr # $result <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE $result, a4 # vAA <- $result
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/binopWide2addr.S b/runtime/interpreter/mterp/mips64/binopWide2addr.S
new file mode 100644
index 0000000..45d8d82
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/binopWide2addr.S
@@ -0,0 +1,30 @@
+%default {"preinstr":"", "result":"a0", "chkzero":"0"}
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vB (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
+ * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
+ * shl-long/2addr, shr-long/2addr, ushr-long/2addr
+ */
+ /* binop/2addr vA, vB */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG_WIDE a0, a2 # a0 <- vA
+ GET_VREG_WIDE a1, a3 # a1 <- vB
+ .if $chkzero
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ $preinstr # optional op
+ $instr # $result <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE $result, a2 # vA <- $result
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/entry.S b/runtime/interpreter/mterp/mips64/entry.S
new file mode 100644
index 0000000..ae6c26b
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/entry.S
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Interpreter entry point.
+ */
+
+ .set reorder
+
+ .text
+ .global ExecuteMterpImpl
+ .type ExecuteMterpImpl, %function
+ .balign 16
+/*
+ * On entry:
+ * a0 Thread* self
+ * a1 code_item
+ * a2 ShadowFrame
+ * a3 JValue* result_register
+ *
+ */
+ExecuteMterpImpl:
+ .cfi_startproc
+ .cpsetup t9, t8, ExecuteMterpImpl
+
+ .cfi_def_cfa sp, 0
+ daddu sp, sp, -STACK_SIZE
+ .cfi_adjust_cfa_offset STACK_SIZE
+
+ sd t8, STACK_OFFSET_GP(sp)
+ .cfi_rel_offset 28, STACK_OFFSET_GP
+ sd ra, STACK_OFFSET_RA(sp)
+ .cfi_rel_offset 31, STACK_OFFSET_RA
+
+ sd s0, STACK_OFFSET_S0(sp)
+ .cfi_rel_offset 16, STACK_OFFSET_S0
+ sd s1, STACK_OFFSET_S1(sp)
+ .cfi_rel_offset 17, STACK_OFFSET_S1
+ sd s2, STACK_OFFSET_S2(sp)
+ .cfi_rel_offset 18, STACK_OFFSET_S2
+ sd s3, STACK_OFFSET_S3(sp)
+ .cfi_rel_offset 19, STACK_OFFSET_S3
+ sd s4, STACK_OFFSET_S4(sp)
+ .cfi_rel_offset 20, STACK_OFFSET_S4
+ sd s5, STACK_OFFSET_S5(sp)
+ .cfi_rel_offset 21, STACK_OFFSET_S5
+
+ /* Remember the return register */
+ sd a3, SHADOWFRAME_RESULT_REGISTER_OFFSET(a2)
+
+ /* Remember the code_item */
+ sd a1, SHADOWFRAME_CODE_ITEM_OFFSET(a2)
+
+ /* set up "named" registers */
+ move rSELF, a0
+ daddu rFP, a2, SHADOWFRAME_VREGS_OFFSET
+ lw v0, SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(a2)
+ dlsa rREFS, v0, rFP, 2
+ daddu rPC, a1, CODEITEM_INSNS_OFFSET
+ lw v0, SHADOWFRAME_DEX_PC_OFFSET(a2)
+ dlsa rPC, v0, rPC, 1
+ EXPORT_PC
+
+ /* Starting ibase */
+ REFRESH_IBASE
+
+ /* start executing the instruction at rPC */
+ FETCH_INST
+ GET_INST_OPCODE v0
+ GOTO_OPCODE v0
+
+ /* NOTE: no fallthrough */
diff --git a/runtime/interpreter/mterp/mips64/fallback.S b/runtime/interpreter/mterp/mips64/fallback.S
new file mode 100644
index 0000000..560b994
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/fallback.S
@@ -0,0 +1,2 @@
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
diff --git a/runtime/interpreter/mterp/mips64/fbinop.S b/runtime/interpreter/mterp/mips64/fbinop.S
new file mode 100644
index 0000000..f19dd1c
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/fbinop.S
@@ -0,0 +1,18 @@
+%default {}
+ /*:
+ * Generic 32-bit floating-point operation.
+ *
+ * For: add-float, sub-float, mul-float, div-float.
+ * form: <op> f0, f0, f1
+ */
+ /* binop vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG_FLOAT f0, a2 # f0 <- vBB
+ GET_VREG_FLOAT f1, a3 # f1 <- vCC
+ $instr # f0 <- f0 op f1
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_FLOAT f0, a4 # vAA <- f0
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/fbinop2addr.S b/runtime/interpreter/mterp/mips64/fbinop2addr.S
new file mode 100644
index 0000000..2e2cd7e
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/fbinop2addr.S
@@ -0,0 +1,17 @@
+%default {}
+ /*:
+ * Generic 32-bit "/2addr" floating-point operation.
+ *
+ * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr.
+ * form: <op> f0, f0, f1
+ */
+ /* binop/2addr vA, vB */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG_FLOAT f0, a2 # f0 <- vA
+ GET_VREG_FLOAT f1, a3 # f1 <- vB
+ $instr # f0 <- f0 op f1
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_FLOAT f0, a2 # vA <- f0
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/fbinopWide.S b/runtime/interpreter/mterp/mips64/fbinopWide.S
new file mode 100644
index 0000000..8915c94
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/fbinopWide.S
@@ -0,0 +1,18 @@
+%default {}
+ /*:
+ * Generic 64-bit floating-point operation.
+ *
+ * For: add-double, sub-double, mul-double, div-double.
+ * form: <op> f0, f0, f1
+ */
+ /* binop vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG_DOUBLE f0, a2 # f0 <- vBB
+ GET_VREG_DOUBLE f1, a3 # f1 <- vCC
+ $instr # f0 <- f0 op f1
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_DOUBLE f0, a4 # vAA <- f0
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/fbinopWide2addr.S b/runtime/interpreter/mterp/mips64/fbinopWide2addr.S
new file mode 100644
index 0000000..a3f4eaa
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/fbinopWide2addr.S
@@ -0,0 +1,17 @@
+%default {}
+ /*:
+ * Generic 64-bit "/2addr" floating-point operation.
+ *
+ * For: add-double/2addr, sub-double/2addr, mul-double/2addr, div-double/2addr.
+ * form: <op> f0, f0, f1
+ */
+ /* binop/2addr vA, vB */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG_DOUBLE f0, a2 # f0 <- vA
+ GET_VREG_DOUBLE f1, a3 # f1 <- vB
+ $instr # f0 <- f0 op f1
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_DOUBLE f0, a2 # vA <- f0
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/fcmp.S b/runtime/interpreter/mterp/mips64/fcmp.S
new file mode 100644
index 0000000..2e1a3e4
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/fcmp.S
@@ -0,0 +1,32 @@
+%default {}
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * For: cmpl-float, cmpg-float
+ */
+ /* op vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG_FLOAT f0, a2 # f0 <- vBB
+ GET_VREG_FLOAT f1, a3 # f1 <- vCC
+ cmp.eq.s f2, f0, f1
+ li a0, 0
+ bc1nez f2, 1f # done if vBB == vCC (ordered)
+ .if $gt_bias
+ cmp.lt.s f2, f0, f1
+ li a0, -1
+ bc1nez f2, 1f # done if vBB < vCC (ordered)
+ li a0, 1 # vBB > vCC or unordered
+ .else
+ cmp.lt.s f2, f1, f0
+ li a0, 1
+ bc1nez f2, 1f # done if vBB > vCC (ordered)
+ li a0, -1 # vBB < vCC or unordered
+ .endif
+1:
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a4 # vAA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/fcmpWide.S b/runtime/interpreter/mterp/mips64/fcmpWide.S
new file mode 100644
index 0000000..2a3a341
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/fcmpWide.S
@@ -0,0 +1,32 @@
+%default {}
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * For: cmpl-double, cmpg-double
+ */
+ /* op vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG_DOUBLE f0, a2 # f0 <- vBB
+ GET_VREG_DOUBLE f1, a3 # f1 <- vCC
+ cmp.eq.d f2, f0, f1
+ li a0, 0
+ bc1nez f2, 1f # done if vBB == vCC (ordered)
+ .if $gt_bias
+ cmp.lt.d f2, f0, f1
+ li a0, -1
+ bc1nez f2, 1f # done if vBB < vCC (ordered)
+ li a0, 1 # vBB > vCC or unordered
+ .else
+ cmp.lt.d f2, f1, f0
+ li a0, 1
+ bc1nez f2, 1f # done if vBB > vCC (ordered)
+ li a0, -1 # vBB < vCC or unordered
+ .endif
+1:
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a4 # vAA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/fcvtFooter.S b/runtime/interpreter/mterp/mips64/fcvtFooter.S
new file mode 100644
index 0000000..06e9507
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/fcvtFooter.S
@@ -0,0 +1,18 @@
+ /*
+ * Stores a specified register containing the result of conversion
+ * from or to a floating-point type and jumps to the next instruction.
+ *
+ * Expects a1 to contain the destination Dalvik register number.
+ * a1 is set up by fcvtHeader.S.
+ *
+ * For: int-to-float, int-to-double, long-to-float, long-to-double,
+ * float-to-int, float-to-long, float-to-double, double-to-int,
+ * double-to-long, double-to-float, neg-float, neg-double.
+ *
+ * Note that this file can't be included after a break in other files
+ * and in those files its contents appear as a copy.
+ * See: float-to-int, float-to-long, double-to-int, double-to-long.
+ */
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG$suffix $valreg, a1
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/fcvtHeader.S b/runtime/interpreter/mterp/mips64/fcvtHeader.S
new file mode 100644
index 0000000..8742e42
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/fcvtHeader.S
@@ -0,0 +1,15 @@
+ /*
+ * Loads a specified register from vB. Used primarily for conversions
+ * from or to a floating-point type.
+ *
+ * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
+ * store the result in vA and jump to the next instruction.
+ *
+ * For: int-to-float, int-to-double, long-to-float, long-to-double,
+ * float-to-int, float-to-long, float-to-double, double-to-int,
+ * double-to-long, double-to-float, neg-float, neg-double.
+ */
+ ext a1, rINST, 8, 4 # a1 <- A
+ srl a2, rINST, 12 # a2 <- B
+ GET_VREG$suffix $valreg, a2
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
diff --git a/runtime/interpreter/mterp/mips64/footer.S b/runtime/interpreter/mterp/mips64/footer.S
new file mode 100644
index 0000000..1a2e22b
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/footer.S
@@ -0,0 +1,154 @@
+/*
+ * We've detected a condition that will result in an exception, but the exception
+ * has not yet been thrown. Just bail out to the reference interpreter to deal with it.
+ * TUNING: for consistency, we may want to just go ahead and handle these here.
+ */
+
+ .extern MterpLogDivideByZeroException
+common_errDivideByZero:
+ EXPORT_PC
+#if MTERP_LOGGING
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ jal MterpLogDivideByZeroException
+#endif
+ b MterpCommonFallback
+
+ .extern MterpLogArrayIndexException
+common_errArrayIndex:
+ EXPORT_PC
+#if MTERP_LOGGING
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ jal MterpLogArrayIndexException
+#endif
+ b MterpCommonFallback
+
+ .extern MterpLogNullObjectException
+common_errNullObject:
+ EXPORT_PC
+#if MTERP_LOGGING
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ jal MterpLogNullObjectException
+#endif
+ b MterpCommonFallback
+
+/*
+ * If we're here, something is out of the ordinary. If there is a pending
+ * exception, handle it. Otherwise, roll back and retry with the reference
+ * interpreter.
+ */
+MterpPossibleException:
+ ld a0, THREAD_EXCEPTION_OFFSET(rSELF)
+ beqzc a0, MterpFallback # If not, fall back to reference interpreter.
+ /* intentional fallthrough - handle pending exception. */
+/*
+ * On return from a runtime helper routine, we've found a pending exception.
+ * Can we handle it here - or need to bail out to caller?
+ *
+ */
+ .extern MterpHandleException
+MterpException:
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ jal MterpHandleException # (self, shadow_frame)
+ beqzc v0, MterpExceptionReturn # no local catch, back to caller.
+ ld a0, OFF_FP_CODE_ITEM(rFP)
+ lwu a1, OFF_FP_DEX_PC(rFP)
+ REFRESH_IBASE
+ daddu rPC, a0, CODEITEM_INSNS_OFFSET
+ dlsa rPC, a1, rPC, 1 # generate new dex_pc_ptr
+ sd rPC, OFF_FP_DEX_PC_PTR(rFP)
+ /* resume execution at catch block */
+ FETCH_INST
+ GET_INST_OPCODE v0
+ GOTO_OPCODE v0
+ /* NOTE: no fallthrough */
+
+/*
+ * Check for suspend check request. Assumes rINST already loaded, rPC advanced and
+ * still needs to get the opcode and branch to it, and flags are in ra.
+ */
+ .extern MterpSuspendCheck
+MterpCheckSuspendAndContinue:
+ REFRESH_IBASE
+ and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ bnez ra, check1
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+check1:
+ EXPORT_PC
+ move a0, rSELF
+ jal MterpSuspendCheck # (self)
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+/*
+ * Bail out to reference interpreter.
+ */
+ .extern MterpLogFallback
+MterpFallback:
+ EXPORT_PC
+#if MTERP_LOGGING
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ jal MterpLogFallback
+#endif
+MterpCommonFallback:
+ li v0, 0 # signal retry with reference interpreter.
+ b MterpDone
+
+/*
+ * We pushed some registers on the stack in ExecuteMterpImpl, then saved
+ * SP and RA. Here we restore SP, restore the registers, and then restore
+ * RA to PC.
+ *
+ * On entry:
+ * uint32_t* rFP (should still be live, pointer to base of vregs)
+ */
+MterpExceptionReturn:
+ li v0, 1 # signal return to caller.
+ b MterpDone
+/*
+ * Returned value is expected in a0 and if it's not 64-bit, the 32 most
+ * significant bits of a0 must be 0.
+ */
+MterpReturn:
+ ld a2, OFF_FP_RESULT_REGISTER(rFP)
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ sd a0, 0(a2)
+ move a0, rSELF
+ and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ beqzc ra, check2
+ jal MterpSuspendCheck # (self)
+check2:
+ li v0, 1 # signal return to caller.
+MterpDone:
+ ld s5, STACK_OFFSET_S5(sp)
+ .cfi_restore 21
+ ld s4, STACK_OFFSET_S4(sp)
+ .cfi_restore 20
+ ld s3, STACK_OFFSET_S3(sp)
+ .cfi_restore 19
+ ld s2, STACK_OFFSET_S2(sp)
+ .cfi_restore 18
+ ld s1, STACK_OFFSET_S1(sp)
+ .cfi_restore 17
+ ld s0, STACK_OFFSET_S0(sp)
+ .cfi_restore 16
+
+ ld ra, STACK_OFFSET_RA(sp)
+ .cfi_restore 31
+
+ ld t8, STACK_OFFSET_GP(sp)
+ .cpreturn
+ .cfi_restore 28
+
+ .set noreorder
+ jr ra
+ daddu sp, sp, STACK_SIZE
+ .cfi_adjust_cfa_offset -STACK_SIZE
+
+ .cfi_endproc
+ .size ExecuteMterpImpl, .-ExecuteMterpImpl
diff --git a/runtime/interpreter/mterp/mips64/header.S b/runtime/interpreter/mterp/mips64/header.S
new file mode 100644
index 0000000..4c3ca9e
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/header.S
@@ -0,0 +1,285 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <machine/regdef.h>
+
+/* TODO: add the missing file and use its FP register definitions. */
+/* #include <machine/fpregdef.h> */
+/* FP register definitions */
+#define f0 $$f0
+#define f1 $$f1
+#define f2 $$f2
+#define f3 $$f3
+#define f12 $$f12
+#define f13 $$f13
+
+/*
+ * It looks like the GNU assembler currently does not support the blec and bgtc
+ * idioms, which should translate into bgec and bltc respectively with swapped
+ * left and right register operands.
+ * TODO: remove these macros when the assembler is fixed.
+ */
+.macro blec lreg, rreg, target
+ bgec \rreg, \lreg, \target
+.endm
+.macro bgtc lreg, rreg, target
+ bltc \rreg, \lreg, \target
+.endm
+
+/*
+Mterp and MIPS64 notes:
+
+The following registers have fixed assignments:
+
+ reg nick purpose
+ s0 rPC interpreted program counter, used for fetching instructions
+ s1 rFP interpreted frame pointer, used for accessing locals and args
+ s2 rSELF self (Thread) pointer
+ s3 rINST first 16-bit code unit of current instruction
+ s4 rIBASE interpreted instruction base pointer, used for computed goto
+ s5 rREFS base of object references in shadow frame (ideally, we'll get rid of this later).
+*/
+
+/* During bringup, we'll use the shadow frame model instead of rFP */
+/* single-purpose registers, given names for clarity */
+#define rPC s0
+#define rFP s1
+#define rSELF s2
+#define rINST s3
+#define rIBASE s4
+#define rREFS s5
+
+/*
+ * This is a #include, not a %include, because we want the C pre-processor
+ * to expand the macros into assembler assignment statements.
+ */
+#include "asm_support.h"
+
+/*
+ * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So,
+ * to access other shadow frame fields, we need to use a backwards offset. Define those here.
+ */
+#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
+#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
+#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
+#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
+#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
+#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
+#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
+#define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET)
+#define OFF_FP_SHADOWFRAME (-SHADOWFRAME_VREGS_OFFSET)
+
+/*
+ *
+ * The reference interpreter performs explicit suspect checks, which is somewhat wasteful.
+ * Dalvik's interpreter folded suspend checks into the jump table mechanism, and eventually
+ * mterp should do so as well.
+ */
+#define MTERP_SUSPEND 0
+
+#define MTERP_LOGGING 0
+
+/*
+ * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
+ * be done *before* something throws.
+ *
+ * It's okay to do this more than once.
+ *
+ * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
+ * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
+ * offset into the code_items_[] array. For effiency, we will "export" the
+ * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
+ * to convert to a dex pc when needed.
+ */
+.macro EXPORT_PC
+ sd rPC, OFF_FP_DEX_PC_PTR(rFP)
+.endm
+
+/*
+ * Refresh handler table.
+ */
+.macro REFRESH_IBASE
+ ld rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
+.endm
+
+/*
+ * Fetch the next instruction from rPC into rINST. Does not advance rPC.
+ */
+.macro FETCH_INST
+ lhu rINST, 0(rPC)
+.endm
+
+/* Advance rPC by some number of code units. */
+.macro ADVANCE count
+ daddu rPC, rPC, (\count) * 2
+.endm
+
+/*
+ * Fetch the next instruction from the specified offset. Advances rPC
+ * to point to the next instruction.
+ *
+ * This must come AFTER anything that can throw an exception, or the
+ * exception catch may miss. (This also implies that it must come after
+ * EXPORT_PC.)
+ */
+.macro FETCH_ADVANCE_INST count
+ ADVANCE \count
+ FETCH_INST
+.endm
+
+/*
+ * Similar to FETCH_ADVANCE_INST, but does not update rPC. Used to load
+ * rINST ahead of possible exception point. Be sure to manually advance rPC
+ * later.
+ */
+.macro PREFETCH_INST count
+ lhu rINST, ((\count) * 2)(rPC)
+.endm
+
+/*
+ * Put the instruction's opcode field into the specified register.
+ */
+.macro GET_INST_OPCODE reg
+ and \reg, rINST, 255
+.endm
+
+/*
+ * Begin executing the opcode in _reg.
+ */
+.macro GOTO_OPCODE reg
+ .set noat
+ sll AT, \reg, 7
+ daddu AT, rIBASE, AT
+ jic AT, 0
+ .set at
+.endm
+
+/*
+ * Get/set the 32-bit value from a Dalvik register.
+ * Note, GET_VREG does sign extension to 64 bits while
+ * GET_VREG_U does zero extension to 64 bits.
+ * One is useful for arithmetic while the other is
+ * useful for storing the result value as 64-bit.
+ */
+.macro GET_VREG reg, vreg
+ .set noat
+ dlsa AT, \vreg, rFP, 2
+ lw \reg, 0(AT)
+ .set at
+.endm
+.macro GET_VREG_U reg, vreg
+ .set noat
+ dlsa AT, \vreg, rFP, 2
+ lwu \reg, 0(AT)
+ .set at
+.endm
+.macro GET_VREG_FLOAT reg, vreg
+ .set noat
+ dlsa AT, \vreg, rFP, 2
+ lwc1 \reg, 0(AT)
+ .set at
+.endm
+.macro SET_VREG reg, vreg
+ .set noat
+ dlsa AT, \vreg, rFP, 2
+ sw \reg, 0(AT)
+ dlsa AT, \vreg, rREFS, 2
+ sw zero, 0(AT)
+ .set at
+.endm
+.macro SET_VREG_OBJECT reg, vreg
+ .set noat
+ dlsa AT, \vreg, rFP, 2
+ sw \reg, 0(AT)
+ dlsa AT, \vreg, rREFS, 2
+ sw \reg, 0(AT)
+ .set at
+.endm
+.macro SET_VREG_FLOAT reg, vreg
+ .set noat
+ dlsa AT, \vreg, rFP, 2
+ swc1 \reg, 0(AT)
+ dlsa AT, \vreg, rREFS, 2
+ sw zero, 0(AT)
+ .set at
+.endm
+
+/*
+ * Get/set the 64-bit value from a Dalvik register.
+ * Avoid unaligned memory accesses.
+ * Note, SET_VREG_WIDE clobbers the register containing the value being stored.
+ * Note, SET_VREG_DOUBLE clobbers the register containing the Dalvik register number.
+ */
+.macro GET_VREG_WIDE reg, vreg
+ .set noat
+ dlsa AT, \vreg, rFP, 2
+ lw \reg, 0(AT)
+ lw AT, 4(AT)
+ dinsu \reg, AT, 32, 32
+ .set at
+.endm
+.macro GET_VREG_DOUBLE reg, vreg
+ .set noat
+ dlsa AT, \vreg, rFP, 2
+ lwc1 \reg, 0(AT)
+ lw AT, 4(AT)
+ mthc1 AT, \reg
+ .set at
+.endm
+.macro SET_VREG_WIDE reg, vreg
+ .set noat
+ dlsa AT, \vreg, rFP, 2
+ sw \reg, 0(AT)
+ drotr32 \reg, \reg, 0
+ sw \reg, 4(AT)
+ dlsa AT, \vreg, rREFS, 2
+ sw zero, 0(AT)
+ sw zero, 4(AT)
+ .set at
+.endm
+.macro SET_VREG_DOUBLE reg, vreg
+ .set noat
+ dlsa AT, \vreg, rREFS, 2
+ sw zero, 0(AT)
+ sw zero, 4(AT)
+ dlsa AT, \vreg, rFP, 2
+ swc1 \reg, 0(AT)
+ mfhc1 \vreg, \reg
+ sw \vreg, 4(AT)
+ .set at
+.endm
+
+/*
+ * On-stack offsets for spilling/unspilling callee-saved registers
+ * and the frame size.
+ */
+#define STACK_OFFSET_RA 0
+#define STACK_OFFSET_GP 8
+#define STACK_OFFSET_S0 16
+#define STACK_OFFSET_S1 24
+#define STACK_OFFSET_S2 32
+#define STACK_OFFSET_S3 40
+#define STACK_OFFSET_S4 48
+#define STACK_OFFSET_S5 56
+#define STACK_SIZE 64
+
+/* Constants for float/double_to_int/long conversions */
+#define INT_MIN 0x80000000
+#define INT_MIN_AS_FLOAT 0xCF000000
+#define INT_MIN_AS_DOUBLE 0xC1E0000000000000
+#define LONG_MIN 0x8000000000000000
+#define LONG_MIN_AS_FLOAT 0xDF000000
+#define LONG_MIN_AS_DOUBLE 0xC3E0000000000000
diff --git a/runtime/interpreter/mterp/mips64/invoke.S b/runtime/interpreter/mterp/mips64/invoke.S
new file mode 100644
index 0000000..4ae4fb1
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/invoke.S
@@ -0,0 +1,17 @@
+%default { "helper":"UndefinedInvokeHandler" }
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern $helper
+ EXPORT_PC
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rPC
+ move a3, rINST
+ jal $helper
+ beqzc v0, MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE v0
+ GOTO_OPCODE v0
diff --git a/runtime/interpreter/mterp/mips64/op_add_double.S b/runtime/interpreter/mterp/mips64/op_add_double.S
new file mode 100644
index 0000000..1520e32
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_add_double.S
@@ -0,0 +1 @@
+%include "mips64/fbinopWide.S" {"instr":"add.d f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_add_double_2addr.S b/runtime/interpreter/mterp/mips64/op_add_double_2addr.S
new file mode 100644
index 0000000..c14382e
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_add_double_2addr.S
@@ -0,0 +1 @@
+%include "mips64/fbinopWide2addr.S" {"instr":"add.d f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_add_float.S b/runtime/interpreter/mterp/mips64/op_add_float.S
new file mode 100644
index 0000000..c6ed558
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_add_float.S
@@ -0,0 +1 @@
+%include "mips64/fbinop.S" {"instr":"add.s f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_add_float_2addr.S b/runtime/interpreter/mterp/mips64/op_add_float_2addr.S
new file mode 100644
index 0000000..4c20547
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_add_float_2addr.S
@@ -0,0 +1 @@
+%include "mips64/fbinop2addr.S" {"instr":"add.s f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_add_int.S b/runtime/interpreter/mterp/mips64/op_add_int.S
new file mode 100644
index 0000000..6e569de
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_add_int.S
@@ -0,0 +1 @@
+%include "mips64/binop.S" {"instr":"addu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_add_int_2addr.S b/runtime/interpreter/mterp/mips64/op_add_int_2addr.S
new file mode 100644
index 0000000..2a84124
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_add_int_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binop2addr.S" {"instr":"addu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_add_int_lit16.S b/runtime/interpreter/mterp/mips64/op_add_int_lit16.S
new file mode 100644
index 0000000..94b053b
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_add_int_lit16.S
@@ -0,0 +1 @@
+%include "mips64/binopLit16.S" {"instr":"addu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_add_int_lit8.S b/runtime/interpreter/mterp/mips64/op_add_int_lit8.S
new file mode 100644
index 0000000..3b6d734
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_add_int_lit8.S
@@ -0,0 +1 @@
+%include "mips64/binopLit8.S" {"instr":"addu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_add_long.S b/runtime/interpreter/mterp/mips64/op_add_long.S
new file mode 100644
index 0000000..c8d702f
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_add_long.S
@@ -0,0 +1 @@
+%include "mips64/binopWide.S" {"instr":"daddu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_add_long_2addr.S b/runtime/interpreter/mterp/mips64/op_add_long_2addr.S
new file mode 100644
index 0000000..928ff54
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_add_long_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binopWide2addr.S" {"instr":"daddu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_aget.S b/runtime/interpreter/mterp/mips64/op_aget.S
new file mode 100644
index 0000000..0472a06
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_aget.S
@@ -0,0 +1,29 @@
+%default { "load":"lw", "shift":"2", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+ *
+ * NOTE: assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ srl a4, rINST, 8 # a4 <- AA
+ GET_VREG_U a0, a2 # a0 <- vBB (array object)
+ GET_VREG a1, a3 # a1 <- vCC (requested index)
+ beqz a0, common_errNullObject # bail if null array object
+ lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
+ .if $shift
+ # [d]lsa does not support shift count of 0.
+ dlsa a0, a1, a0, $shift # a0 <- arrayObj + index*width
+ .else
+ daddu a0, a1, a0 # a0 <- arrayObj + index*width
+ .endif
+ bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ $load a2, $data_offset(a0) # a2 <- vBB[vCC]
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a2, a4 # vAA <- a2
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_aget_boolean.S b/runtime/interpreter/mterp/mips64/op_aget_boolean.S
new file mode 100644
index 0000000..d5be01b
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_aget_boolean.S
@@ -0,0 +1 @@
+%include "mips64/op_aget.S" { "load":"lbu", "shift":"0", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips64/op_aget_byte.S b/runtime/interpreter/mterp/mips64/op_aget_byte.S
new file mode 100644
index 0000000..084de8d
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_aget_byte.S
@@ -0,0 +1 @@
+%include "mips64/op_aget.S" { "load":"lb", "shift":"0", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips64/op_aget_char.S b/runtime/interpreter/mterp/mips64/op_aget_char.S
new file mode 100644
index 0000000..6c99ed5
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_aget_char.S
@@ -0,0 +1 @@
+%include "mips64/op_aget.S" { "load":"lhu", "shift":"1", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips64/op_aget_object.S b/runtime/interpreter/mterp/mips64/op_aget_object.S
new file mode 100644
index 0000000..6374a05
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_aget_object.S
@@ -0,0 +1,21 @@
+ /*
+ * Array object get. vAA <- vBB[vCC].
+ *
+ * for: aget-object
+ */
+ /* op vAA, vBB, vCC */
+ .extern artAGetObjectFromMterp
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ EXPORT_PC
+ GET_VREG_U a0, a2 # a0 <- vBB (array object)
+ GET_VREG a1, a3 # a1 <- vCC (requested index)
+ jal artAGetObjectFromMterp # (array, index)
+ ld a1, THREAD_EXCEPTION_OFFSET(rSELF)
+ srl a4, rINST, 8 # a4 <- AA
+ PREFETCH_INST 2
+ bnez a1, MterpException
+ SET_VREG_OBJECT v0, a4 # vAA <- v0
+ ADVANCE 2
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_aget_short.S b/runtime/interpreter/mterp/mips64/op_aget_short.S
new file mode 100644
index 0000000..0158b0a
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_aget_short.S
@@ -0,0 +1 @@
+%include "mips64/op_aget.S" { "load":"lh", "shift":"1", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips64/op_aget_wide.S b/runtime/interpreter/mterp/mips64/op_aget_wide.S
new file mode 100644
index 0000000..0945aca
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_aget_wide.S
@@ -0,0 +1,21 @@
+ /*
+ * Array get, 64 bits. vAA <- vBB[vCC].
+ *
+ */
+ /* aget-wide vAA, vBB, vCC */
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ srl a4, rINST, 8 # a4 <- AA
+ GET_VREG_U a0, a2 # a0 <- vBB (array object)
+ GET_VREG a1, a3 # a1 <- vCC (requested index)
+ beqz a0, common_errNullObject # bail if null array object
+ lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
+ dlsa a0, a1, a0, 3 # a0 <- arrayObj + index*width
+ bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ lw a2, MIRROR_WIDE_ARRAY_DATA_OFFSET(a0)
+ lw a3, (MIRROR_WIDE_ARRAY_DATA_OFFSET+4)(a0)
+ dinsu a2, a3, 32, 32 # a2 <- vBB[vCC]
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a2, a4 # vAA <- a2
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_and_int.S b/runtime/interpreter/mterp/mips64/op_and_int.S
new file mode 100644
index 0000000..f0792a8
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_and_int.S
@@ -0,0 +1 @@
+%include "mips64/binop.S" {"instr":"and a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_and_int_2addr.S b/runtime/interpreter/mterp/mips64/op_and_int_2addr.S
new file mode 100644
index 0000000..08dc615
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_and_int_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binop2addr.S" {"instr":"and a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_and_int_lit16.S b/runtime/interpreter/mterp/mips64/op_and_int_lit16.S
new file mode 100644
index 0000000..65d28ad
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_and_int_lit16.S
@@ -0,0 +1 @@
+%include "mips64/binopLit16.S" {"instr":"and a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_and_int_lit8.S b/runtime/interpreter/mterp/mips64/op_and_int_lit8.S
new file mode 100644
index 0000000..ab84bb7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_and_int_lit8.S
@@ -0,0 +1 @@
+%include "mips64/binopLit8.S" {"instr":"and a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_and_long.S b/runtime/interpreter/mterp/mips64/op_and_long.S
new file mode 100644
index 0000000..e383ba0
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_and_long.S
@@ -0,0 +1 @@
+%include "mips64/binopWide.S" {"instr":"and a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_and_long_2addr.S b/runtime/interpreter/mterp/mips64/op_and_long_2addr.S
new file mode 100644
index 0000000..f863bb9
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_and_long_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binopWide2addr.S" {"instr":"and a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_aput.S b/runtime/interpreter/mterp/mips64/op_aput.S
new file mode 100644
index 0000000..9bfda97
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_aput.S
@@ -0,0 +1,29 @@
+%default { "store":"sw", "shift":"2", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ *
+ * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ srl a4, rINST, 8 # a4 <- AA
+ GET_VREG_U a0, a2 # a0 <- vBB (array object)
+ GET_VREG a1, a3 # a1 <- vCC (requested index)
+ beqz a0, common_errNullObject # bail if null array object
+ lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
+ .if $shift
+ # [d]lsa does not support shift count of 0.
+ dlsa a0, a1, a0, $shift # a0 <- arrayObj + index*width
+ .else
+ daddu a0, a1, a0 # a0 <- arrayObj + index*width
+ .endif
+ bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_VREG a2, a4 # a2 <- vAA
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ $store a2, $data_offset(a0) # vBB[vCC] <- a2
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_aput_boolean.S b/runtime/interpreter/mterp/mips64/op_aput_boolean.S
new file mode 100644
index 0000000..6707a1f
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_aput_boolean.S
@@ -0,0 +1 @@
+%include "mips64/op_aput.S" { "store":"sb", "shift":"0", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips64/op_aput_byte.S b/runtime/interpreter/mterp/mips64/op_aput_byte.S
new file mode 100644
index 0000000..7b9ce48
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_aput_byte.S
@@ -0,0 +1 @@
+%include "mips64/op_aput.S" { "store":"sb", "shift":"0", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips64/op_aput_char.S b/runtime/interpreter/mterp/mips64/op_aput_char.S
new file mode 100644
index 0000000..82bc8f7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_aput_char.S
@@ -0,0 +1 @@
+%include "mips64/op_aput.S" { "store":"sh", "shift":"1", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips64/op_aput_object.S b/runtime/interpreter/mterp/mips64/op_aput_object.S
new file mode 100644
index 0000000..b132456
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_aput_object.S
@@ -0,0 +1,14 @@
+ /*
+ * Store an object into an array. vBB[vCC] <- vAA.
+ */
+ /* op vAA, vBB, vCC */
+ .extern MterpAputObject
+ EXPORT_PC
+ daddu a0, rFP, OFF_FP_SHADOWFRAME
+ move a1, rPC
+ move a2, rINST
+ jal MterpAputObject
+ beqzc v0, MterpPossibleException
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_aput_short.S b/runtime/interpreter/mterp/mips64/op_aput_short.S
new file mode 100644
index 0000000..a7af294
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_aput_short.S
@@ -0,0 +1 @@
+%include "mips64/op_aput.S" { "store":"sh", "shift":"1", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips64/op_aput_wide.S b/runtime/interpreter/mterp/mips64/op_aput_wide.S
new file mode 100644
index 0000000..a1d7a3b
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_aput_wide.S
@@ -0,0 +1,21 @@
+ /*
+ * Array put, 64 bits. vBB[vCC] <- vAA.
+ *
+ */
+ /* aput-wide vAA, vBB, vCC */
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ srl a4, rINST, 8 # a4 <- AA
+ GET_VREG_U a0, a2 # a0 <- vBB (array object)
+ GET_VREG a1, a3 # a1 <- vCC (requested index)
+ beqz a0, common_errNullObject # bail if null array object
+ lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
+ dlsa a0, a1, a0, 3 # a0 <- arrayObj + index*width
+ bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
+ GET_VREG_WIDE a2, a4 # a2 <- vAA
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ sw a2, MIRROR_WIDE_ARRAY_DATA_OFFSET(a0)
+ dsrl32 a2, a2, 0
+ sw a2, (MIRROR_WIDE_ARRAY_DATA_OFFSET+4)(a0) # vBB[vCC] <- a2
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_array_length.S b/runtime/interpreter/mterp/mips64/op_array_length.S
new file mode 100644
index 0000000..2d9e172
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_array_length.S
@@ -0,0 +1,12 @@
+ /*
+ * Return the length of an array.
+ */
+ srl a1, rINST, 12 # a1 <- B
+ GET_VREG_U a0, a1 # a0 <- vB (object ref)
+ ext a2, rINST, 8, 4 # a2 <- A
+ beqz a0, common_errNullObject # yup, fail
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- array length
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a3, a2 # vB <- length
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_check_cast.S b/runtime/interpreter/mterp/mips64/op_check_cast.S
new file mode 100644
index 0000000..472595d
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_check_cast.S
@@ -0,0 +1,17 @@
+ /*
+ * Check to see if a cast from one class to another is allowed.
+ */
+ /* check-cast vAA, class//BBBB */
+ .extern MterpCheckCast
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- BBBB
+ srl a1, rINST, 8 # a1 <- AA
+ dlsa a1, a1, rFP, 2 # a1 <- &object
+ ld a2, OFF_FP_METHOD(rFP) # a2 <- method
+ move a3, rSELF # a3 <- self
+ jal MterpCheckCast # (index, &obj, method, self)
+ PREFETCH_INST 2
+ bnez v0, MterpPossibleException
+ ADVANCE 2
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_cmp_long.S b/runtime/interpreter/mterp/mips64/op_cmp_long.S
new file mode 100644
index 0000000..6e9376c
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_cmp_long.S
@@ -0,0 +1,13 @@
+ /* cmp-long vAA, vBB, vCC */
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ srl a4, rINST, 8 # a4 <- AA
+ GET_VREG_WIDE a0, a2 # a0 <- vBB
+ GET_VREG_WIDE a1, a3 # a1 <- vCC
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ slt a2, a0, a1
+ slt a0, a1, a0
+ subu a0, a0, a2
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a4 # vAA <- result
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_cmpg_double.S b/runtime/interpreter/mterp/mips64/op_cmpg_double.S
new file mode 100644
index 0000000..a8e2ef9
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_cmpg_double.S
@@ -0,0 +1 @@
+%include "mips64/fcmpWide.S" {"gt_bias":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_cmpg_float.S b/runtime/interpreter/mterp/mips64/op_cmpg_float.S
new file mode 100644
index 0000000..0c93eac
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_cmpg_float.S
@@ -0,0 +1 @@
+%include "mips64/fcmp.S" {"gt_bias":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_cmpl_double.S b/runtime/interpreter/mterp/mips64/op_cmpl_double.S
new file mode 100644
index 0000000..9111b06
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_cmpl_double.S
@@ -0,0 +1 @@
+%include "mips64/fcmpWide.S" {"gt_bias":"0"}
diff --git a/runtime/interpreter/mterp/mips64/op_cmpl_float.S b/runtime/interpreter/mterp/mips64/op_cmpl_float.S
new file mode 100644
index 0000000..b047451
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_cmpl_float.S
@@ -0,0 +1 @@
+%include "mips64/fcmp.S" {"gt_bias":"0"}
diff --git a/runtime/interpreter/mterp/mips64/op_const.S b/runtime/interpreter/mterp/mips64/op_const.S
new file mode 100644
index 0000000..4b0d69b
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_const.S
@@ -0,0 +1,9 @@
+ /* const vAA, #+BBBBbbbb */
+ srl a2, rINST, 8 # a2 <- AA
+ lh a0, 2(rPC) # a0 <- bbbb (low)
+ lh a1, 4(rPC) # a1 <- BBBB (high)
+ FETCH_ADVANCE_INST 3 # advance rPC, load rINST
+ ins a0, a1, 16, 16 # a0 = BBBBbbbb
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vAA <- +BBBBbbbb
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_const_16.S b/runtime/interpreter/mterp/mips64/op_const_16.S
new file mode 100644
index 0000000..51e68a7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_const_16.S
@@ -0,0 +1,7 @@
+ /* const/16 vAA, #+BBBB */
+ srl a2, rINST, 8 # a2 <- AA
+ lh a0, 2(rPC) # a0 <- sign-extended BBBB
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vAA <- +BBBB
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_const_4.S b/runtime/interpreter/mterp/mips64/op_const_4.S
new file mode 100644
index 0000000..0a58bff
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_const_4.S
@@ -0,0 +1,8 @@
+ /* const/4 vA, #+B */
+ ext a2, rINST, 8, 4 # a2 <- A
+ seh a0, rINST # sign extend B in rINST
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ sra a0, a0, 12 # shift B into its final position
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vA <- +B
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_const_class.S b/runtime/interpreter/mterp/mips64/op_const_class.S
new file mode 100644
index 0000000..adf79df
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_const_class.S
@@ -0,0 +1,13 @@
+ /* const/class vAA, Class//BBBB */
+ .extern MterpConstClass
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- BBBB
+ srl a1, rINST, 8 # a1 <- AA
+ daddu a2, rFP, OFF_FP_SHADOWFRAME
+ move a3, rSELF
+ jal MterpConstClass # (index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST 2 # load rINST
+ bnez v0, MterpPossibleException # let reference interpreter deal with it.
+ ADVANCE 2 # advance rPC
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_const_high16.S b/runtime/interpreter/mterp/mips64/op_const_high16.S
new file mode 100644
index 0000000..43effb6
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_const_high16.S
@@ -0,0 +1,8 @@
+ /* const/high16 vAA, #+BBBB0000 */
+ srl a2, rINST, 8 # a2 <- AA
+ lh a0, 2(rPC) # a0 <- BBBB
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ sll a0, a0, 16 # a0 <- BBBB0000
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vAA <- +BBBB0000
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_const_string.S b/runtime/interpreter/mterp/mips64/op_const_string.S
new file mode 100644
index 0000000..4684c11
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_const_string.S
@@ -0,0 +1,13 @@
+ /* const/string vAA, String//BBBB */
+ .extern MterpConstString
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- BBBB
+ srl a1, rINST, 8 # a1 <- AA
+ daddu a2, rFP, OFF_FP_SHADOWFRAME
+ move a3, rSELF
+ jal MterpConstString # (index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST 2 # load rINST
+ bnez v0, MterpPossibleException # let reference interpreter deal with it.
+ ADVANCE 2 # advance rPC
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_const_string_jumbo.S b/runtime/interpreter/mterp/mips64/op_const_string_jumbo.S
new file mode 100644
index 0000000..47f2101
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_const_string_jumbo.S
@@ -0,0 +1,15 @@
+ /* const/string vAA, String//BBBBBBBB */
+ .extern MterpConstString
+ EXPORT_PC
+ lh a0, 2(rPC) # a0 <- bbbb (low)
+ lh a4, 4(rPC) # a4 <- BBBB (high)
+ srl a1, rINST, 8 # a1 <- AA
+ ins a0, a4, 16, 16 # a0 <- BBBBbbbb
+ daddu a2, rFP, OFF_FP_SHADOWFRAME
+ move a3, rSELF
+ jal MterpConstString # (index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST 3 # load rINST
+ bnez v0, MterpPossibleException # let reference interpreter deal with it.
+ ADVANCE 3 # advance rPC
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_const_wide.S b/runtime/interpreter/mterp/mips64/op_const_wide.S
new file mode 100644
index 0000000..f7eaf7c
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_const_wide.S
@@ -0,0 +1,13 @@
+ /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
+ srl a4, rINST, 8 # a4 <- AA
+ lh a0, 2(rPC) # a0 <- bbbb (low)
+ lh a1, 4(rPC) # a1 <- BBBB (low middle)
+ lh a2, 6(rPC) # a2 <- hhhh (high middle)
+ lh a3, 8(rPC) # a3 <- HHHH (high)
+ FETCH_ADVANCE_INST 5 # advance rPC, load rINST
+ ins a0, a1, 16, 16 # a0 = BBBBbbbb
+ ins a2, a3, 16, 16 # a2 = HHHHhhhh
+ dinsu a0, a2, 32, 32 # a0 = HHHHhhhhBBBBbbbb
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a4 # vAA <- +HHHHhhhhBBBBbbbb
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_const_wide_16.S b/runtime/interpreter/mterp/mips64/op_const_wide_16.S
new file mode 100644
index 0000000..3a70937
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_const_wide_16.S
@@ -0,0 +1,7 @@
+ /* const-wide/16 vAA, #+BBBB */
+ srl a2, rINST, 8 # a2 <- AA
+ lh a0, 2(rPC) # a0 <- sign-extended BBBB
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a2 # vAA <- +BBBB
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_const_wide_32.S b/runtime/interpreter/mterp/mips64/op_const_wide_32.S
new file mode 100644
index 0000000..867197c
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_const_wide_32.S
@@ -0,0 +1,9 @@
+ /* const-wide/32 vAA, #+BBBBbbbb */
+ srl a2, rINST, 8 # a2 <- AA
+ lh a0, 2(rPC) # a0 <- bbbb (low)
+ lh a1, 4(rPC) # a1 <- BBBB (high)
+ FETCH_ADVANCE_INST 3 # advance rPC, load rINST
+ ins a0, a1, 16, 16 # a0 = BBBBbbbb
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a2 # vAA <- +BBBBbbbb
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_const_wide_high16.S b/runtime/interpreter/mterp/mips64/op_const_wide_high16.S
new file mode 100644
index 0000000..d741631
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_const_wide_high16.S
@@ -0,0 +1,8 @@
+ /* const-wide/high16 vAA, #+BBBB000000000000 */
+ srl a2, rINST, 8 # a2 <- AA
+ lh a0, 2(rPC) # a0 <- BBBB
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ dsll32 a0, a0, 16 # a0 <- BBBB000000000000
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a2 # vAA <- +BBBB000000000000
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_div_double.S b/runtime/interpreter/mterp/mips64/op_div_double.S
new file mode 100644
index 0000000..44998f0
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_div_double.S
@@ -0,0 +1 @@
+%include "mips64/fbinopWide.S" {"instr":"div.d f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_div_double_2addr.S b/runtime/interpreter/mterp/mips64/op_div_double_2addr.S
new file mode 100644
index 0000000..396af79
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_div_double_2addr.S
@@ -0,0 +1 @@
+%include "mips64/fbinopWide2addr.S" {"instr":"div.d f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_div_float.S b/runtime/interpreter/mterp/mips64/op_div_float.S
new file mode 100644
index 0000000..7b09d52
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_div_float.S
@@ -0,0 +1 @@
+%include "mips64/fbinop.S" {"instr":"div.s f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_div_float_2addr.S b/runtime/interpreter/mterp/mips64/op_div_float_2addr.S
new file mode 100644
index 0000000..e74fdda
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_div_float_2addr.S
@@ -0,0 +1 @@
+%include "mips64/fbinop2addr.S" {"instr":"div.s f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_div_int.S b/runtime/interpreter/mterp/mips64/op_div_int.S
new file mode 100644
index 0000000..fb04acb
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_div_int.S
@@ -0,0 +1 @@
+%include "mips64/binop.S" {"instr":"div a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_div_int_2addr.S b/runtime/interpreter/mterp/mips64/op_div_int_2addr.S
new file mode 100644
index 0000000..db29b84
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_div_int_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binop2addr.S" {"instr":"div a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_div_int_lit16.S b/runtime/interpreter/mterp/mips64/op_div_int_lit16.S
new file mode 100644
index 0000000..e903dde
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_div_int_lit16.S
@@ -0,0 +1 @@
+%include "mips64/binopLit16.S" {"instr":"div a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_div_int_lit8.S b/runtime/interpreter/mterp/mips64/op_div_int_lit8.S
new file mode 100644
index 0000000..0559605
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_div_int_lit8.S
@@ -0,0 +1 @@
+%include "mips64/binopLit8.S" {"instr":"div a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_div_long.S b/runtime/interpreter/mterp/mips64/op_div_long.S
new file mode 100644
index 0000000..01fc2b2
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_div_long.S
@@ -0,0 +1 @@
+%include "mips64/binopWide.S" {"instr":"ddiv a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_div_long_2addr.S b/runtime/interpreter/mterp/mips64/op_div_long_2addr.S
new file mode 100644
index 0000000..9627ab8
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_div_long_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binopWide2addr.S" {"instr":"ddiv a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_double_to_float.S b/runtime/interpreter/mterp/mips64/op_double_to_float.S
new file mode 100644
index 0000000..2b2acee
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_double_to_float.S
@@ -0,0 +1,8 @@
+ /*
+ * Conversion from or to floating-point happens in a floating-point register.
+ * Therefore we load the input and store the output into or from a
+ * floating-point register irrespective of the type.
+ */
+%include "mips64/fcvtHeader.S" { "suffix":"_DOUBLE", "valreg":"f0" }
+ cvt.s.d f0, f0
+%include "mips64/fcvtFooter.S" { "suffix":"_FLOAT", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_double_to_int.S b/runtime/interpreter/mterp/mips64/op_double_to_int.S
new file mode 100644
index 0000000..aa2cbca
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_double_to_int.S
@@ -0,0 +1,23 @@
+%include "mips64/fcvtHeader.S" { "suffix":"_DOUBLE", "valreg":"f0" }
+ /*
+ * TODO: simplify this when the MIPS64R6 emulator
+ * supports NAN2008=1.
+ */
+ dli t0, INT_MIN_AS_DOUBLE
+ dmtc1 t0, f1
+ cmp.le.d f1, f1, f0
+ bc1nez f1, .L${opcode}_trunc
+ cmp.eq.d f1, f0, f0
+ li t0, INT_MIN
+ mfc1 t1, f1
+ and t0, t0, t1
+ b .L${opcode}_done
+%break
+.L${opcode}_trunc:
+ trunc.w.d f0, f0
+ mfc1 t0, f0
+.L${opcode}_done:
+ /* Can't include fcvtFooter.S after break */
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG t0, a1
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_double_to_long.S b/runtime/interpreter/mterp/mips64/op_double_to_long.S
new file mode 100644
index 0000000..777cfeb
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_double_to_long.S
@@ -0,0 +1,23 @@
+%include "mips64/fcvtHeader.S" { "suffix":"_DOUBLE", "valreg":"f0" }
+ /*
+ * TODO: simplify this when the MIPS64R6 emulator
+ * supports NAN2008=1.
+ */
+ dli t0, LONG_MIN_AS_DOUBLE
+ dmtc1 t0, f1
+ cmp.le.d f1, f1, f0
+ bc1nez f1, .L${opcode}_trunc
+ cmp.eq.d f1, f0, f0
+ dli t0, LONG_MIN
+ mfc1 t1, f1
+ and t0, t0, t1
+ b .L${opcode}_done
+%break
+.L${opcode}_trunc:
+ trunc.l.d f0, f0
+ dmfc1 t0, f0
+.L${opcode}_done:
+ /* Can't include fcvtFooter.S after break */
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE t0, a1
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_fill_array_data.S b/runtime/interpreter/mterp/mips64/op_fill_array_data.S
new file mode 100644
index 0000000..c90f0b9
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_fill_array_data.S
@@ -0,0 +1,14 @@
+ /* fill-array-data vAA, +BBBBBBBB */
+ .extern MterpFillArrayData
+ EXPORT_PC
+ lh a1, 2(rPC) # a1 <- bbbb (lo)
+ lh a0, 4(rPC) # a0 <- BBBB (hi)
+ srl a3, rINST, 8 # a3 <- AA
+ ins a1, a0, 16, 16 # a1 <- BBBBbbbb
+ GET_VREG_U a0, a3 # a0 <- vAA (array object)
+ dlsa a1, a1, rPC, 1 # a1 <- PC + BBBBbbbb*2 (array data off.)
+ jal MterpFillArrayData # (obj, payload)
+ beqzc v0, MterpPossibleException # exception?
+ FETCH_ADVANCE_INST 3 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_filled_new_array.S b/runtime/interpreter/mterp/mips64/op_filled_new_array.S
new file mode 100644
index 0000000..35f55c2
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_filled_new_array.S
@@ -0,0 +1,18 @@
+%default { "helper":"MterpFilledNewArray" }
+ /*
+ * Create a new array with elements filled from registers.
+ *
+ * for: filled-new-array, filled-new-array/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class//CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, type//BBBB */
+ .extern $helper
+ EXPORT_PC
+ daddu a0, rFP, OFF_FP_SHADOWFRAME
+ move a1, rPC
+ move a2, rSELF
+ jal $helper
+ beqzc v0, MterpPossibleException
+ FETCH_ADVANCE_INST 3 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_filled_new_array_range.S b/runtime/interpreter/mterp/mips64/op_filled_new_array_range.S
new file mode 100644
index 0000000..a4e18f6
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_filled_new_array_range.S
@@ -0,0 +1 @@
+%include "mips64/op_filled_new_array.S" { "helper":"MterpFilledNewArrayRange" }
diff --git a/runtime/interpreter/mterp/mips64/op_float_to_double.S b/runtime/interpreter/mterp/mips64/op_float_to_double.S
new file mode 100644
index 0000000..6accfee
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_float_to_double.S
@@ -0,0 +1,8 @@
+ /*
+ * Conversion from or to floating-point happens in a floating-point register.
+ * Therefore we load the input and store the output into or from a
+ * floating-point register irrespective of the type.
+ */
+%include "mips64/fcvtHeader.S" { "suffix":"_FLOAT", "valreg":"f0" }
+ cvt.d.s f0, f0
+%include "mips64/fcvtFooter.S" { "suffix":"_DOUBLE", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_float_to_int.S b/runtime/interpreter/mterp/mips64/op_float_to_int.S
new file mode 100644
index 0000000..d957540
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_float_to_int.S
@@ -0,0 +1,23 @@
+%include "mips64/fcvtHeader.S" { "suffix":"_FLOAT", "valreg":"f0" }
+ /*
+ * TODO: simplify this when the MIPS64R6 emulator
+ * supports NAN2008=1.
+ */
+ li t0, INT_MIN_AS_FLOAT
+ mtc1 t0, f1
+ cmp.le.s f1, f1, f0
+ bc1nez f1, .L${opcode}_trunc
+ cmp.eq.s f1, f0, f0
+ li t0, INT_MIN
+ mfc1 t1, f1
+ and t0, t0, t1
+ b .L${opcode}_done
+%break
+.L${opcode}_trunc:
+ trunc.w.s f0, f0
+ mfc1 t0, f0
+.L${opcode}_done:
+ /* Can't include fcvtFooter.S after break */
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG t0, a1
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_float_to_long.S b/runtime/interpreter/mterp/mips64/op_float_to_long.S
new file mode 100644
index 0000000..5d036c8
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_float_to_long.S
@@ -0,0 +1,23 @@
+%include "mips64/fcvtHeader.S" { "suffix":"_FLOAT", "valreg":"f0" }
+ /*
+ * TODO: simplify this when the MIPS64R6 emulator
+ * supports NAN2008=1.
+ */
+ li t0, LONG_MIN_AS_FLOAT
+ mtc1 t0, f1
+ cmp.le.s f1, f1, f0
+ bc1nez f1, .L${opcode}_trunc
+ cmp.eq.s f1, f0, f0
+ dli t0, LONG_MIN
+ mfc1 t1, f1
+ and t0, t0, t1
+ b .L${opcode}_done
+%break
+.L${opcode}_trunc:
+ trunc.l.s f0, f0
+ dmfc1 t0, f0
+.L${opcode}_done:
+ /* Can't include fcvtFooter.S after break */
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE t0, a1
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_goto.S b/runtime/interpreter/mterp/mips64/op_goto.S
new file mode 100644
index 0000000..f2df3e4
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_goto.S
@@ -0,0 +1,23 @@
+ /*
+ * Unconditional branch, 8-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto +AA */
+ srl a0, rINST, 8
+ seb a0, a0 # a0 <- sign-extended AA
+ dlsa rPC, a0, rPC, 1 # rPC <- rPC + AA * 2
+ FETCH_INST # load rINST
+
+#if MTERP_SUSPEND
+ bgez a0, 1f # AA * 2 >= 0 => no suspend check
+ REFRESH_IBASE
+1:
+#else
+ lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue
+ bltz a0, MterpCheckSuspendAndContinue
+#endif
+
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_goto_16.S b/runtime/interpreter/mterp/mips64/op_goto_16.S
new file mode 100644
index 0000000..cbf8cf2
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_goto_16.S
@@ -0,0 +1,22 @@
+ /*
+ * Unconditional branch, 16-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto/16 +AAAA */
+ lh a0, 2(rPC) # a0 <- sign-extended AAAA
+ dlsa rPC, a0, rPC, 1 # rPC <- rPC + AAAA * 2
+ FETCH_INST # load rINST
+
+#if MTERP_SUSPEND
+ bgez a0, 1f # AA * 2 >= 0 => no suspend check
+ REFRESH_IBASE
+1:
+#else
+ lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue
+ bltz a0, MterpCheckSuspendAndContinue
+#endif
+
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_goto_32.S b/runtime/interpreter/mterp/mips64/op_goto_32.S
new file mode 100644
index 0000000..4a1feac
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_goto_32.S
@@ -0,0 +1,27 @@
+ /*
+ * Unconditional branch, 32-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ *
+ * Unlike most opcodes, this one is allowed to branch to itself, so
+ * our "backward branch" test must be "<=0" instead of "<0".
+ */
+ /* goto/32 +AAAAAAAA */
+ lh a0, 2(rPC) # a0 <- aaaa (low)
+ lh a1, 4(rPC) # a1 <- AAAA (high)
+ ins a0, a1, 16, 16 # a0 = sign-extended AAAAaaaa
+ dlsa rPC, a0, rPC, 1 # rPC <- rPC + AAAAAAAA * 2
+ FETCH_INST # load rINST
+
+#if MTERP_SUSPEND
+ bgtz a0, 1f # AA * 2 > 0 => no suspend check
+ REFRESH_IBASE
+1:
+#else
+ lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue
+ blez a0, MterpCheckSuspendAndContinue
+#endif
+
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_if_eq.S b/runtime/interpreter/mterp/mips64/op_if_eq.S
new file mode 100644
index 0000000..aa35cad
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_if_eq.S
@@ -0,0 +1 @@
+%include "mips64/bincmp.S" { "condition":"eq" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_eqz.S b/runtime/interpreter/mterp/mips64/op_if_eqz.S
new file mode 100644
index 0000000..0fe3418
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_if_eqz.S
@@ -0,0 +1 @@
+%include "mips64/zcmp.S" { "condition":"eq" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_ge.S b/runtime/interpreter/mterp/mips64/op_if_ge.S
new file mode 100644
index 0000000..59fdcc5
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_if_ge.S
@@ -0,0 +1 @@
+%include "mips64/bincmp.S" { "condition":"ge" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_gez.S b/runtime/interpreter/mterp/mips64/op_if_gez.S
new file mode 100644
index 0000000..57f1f66
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_if_gez.S
@@ -0,0 +1 @@
+%include "mips64/zcmp.S" { "condition":"ge" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_gt.S b/runtime/interpreter/mterp/mips64/op_if_gt.S
new file mode 100644
index 0000000..26cc119
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_if_gt.S
@@ -0,0 +1 @@
+%include "mips64/bincmp.S" { "condition":"gt" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_gtz.S b/runtime/interpreter/mterp/mips64/op_if_gtz.S
new file mode 100644
index 0000000..69fcacb
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_if_gtz.S
@@ -0,0 +1 @@
+%include "mips64/zcmp.S" { "condition":"gt" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_le.S b/runtime/interpreter/mterp/mips64/op_if_le.S
new file mode 100644
index 0000000..a7fce17
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_if_le.S
@@ -0,0 +1 @@
+%include "mips64/bincmp.S" { "condition":"le" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_lez.S b/runtime/interpreter/mterp/mips64/op_if_lez.S
new file mode 100644
index 0000000..f3edcc6
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_if_lez.S
@@ -0,0 +1 @@
+%include "mips64/zcmp.S" { "condition":"le" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_lt.S b/runtime/interpreter/mterp/mips64/op_if_lt.S
new file mode 100644
index 0000000..a975a31
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_if_lt.S
@@ -0,0 +1 @@
+%include "mips64/bincmp.S" { "condition":"lt" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_ltz.S b/runtime/interpreter/mterp/mips64/op_if_ltz.S
new file mode 100644
index 0000000..c1d730d
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_if_ltz.S
@@ -0,0 +1 @@
+%include "mips64/zcmp.S" { "condition":"lt" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_ne.S b/runtime/interpreter/mterp/mips64/op_if_ne.S
new file mode 100644
index 0000000..f143ee9
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_if_ne.S
@@ -0,0 +1 @@
+%include "mips64/bincmp.S" { "condition":"ne" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_nez.S b/runtime/interpreter/mterp/mips64/op_if_nez.S
new file mode 100644
index 0000000..1856b96
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_if_nez.S
@@ -0,0 +1 @@
+%include "mips64/zcmp.S" { "condition":"ne" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget.S b/runtime/interpreter/mterp/mips64/op_iget.S
new file mode 100644
index 0000000..ade4b31
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iget.S
@@ -0,0 +1,26 @@
+%default { "is_object":"0", "helper":"artGet32InstanceFromCode"}
+ /*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ .extern $helper
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- field ref CCCC
+ srl a1, rINST, 12 # a1 <- B
+ GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
+ ld a2, OFF_FP_METHOD(rFP) # a2 <- referrer
+ move a3, rSELF # a3 <- self
+ jal $helper
+ ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
+ ext a2, rINST, 8, 4 # a2 <- A
+ PREFETCH_INST 2
+ bnez a3, MterpPossibleException # bail out
+ .if $is_object
+ SET_VREG_OBJECT v0, a2 # fp[A] <- v0
+ .else
+ SET_VREG v0, a2 # fp[A] <- v0
+ .endif
+ ADVANCE 2
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_iget_boolean.S b/runtime/interpreter/mterp/mips64/op_iget_boolean.S
new file mode 100644
index 0000000..cb2c8be
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iget_boolean.S
@@ -0,0 +1 @@
+%include "mips64/op_iget.S" { "helper":"artGetBooleanInstanceFromCode" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_boolean_quick.S b/runtime/interpreter/mterp/mips64/op_iget_boolean_quick.S
new file mode 100644
index 0000000..979dc70
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iget_boolean_quick.S
@@ -0,0 +1 @@
+%include "mips64/op_iget_quick.S" { "load":"lbu" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_byte.S b/runtime/interpreter/mterp/mips64/op_iget_byte.S
new file mode 100644
index 0000000..099d8d0
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iget_byte.S
@@ -0,0 +1 @@
+%include "mips64/op_iget.S" { "helper":"artGetByteInstanceFromCode" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_byte_quick.S b/runtime/interpreter/mterp/mips64/op_iget_byte_quick.S
new file mode 100644
index 0000000..cb35556
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iget_byte_quick.S
@@ -0,0 +1 @@
+%include "mips64/op_iget_quick.S" { "load":"lb" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_char.S b/runtime/interpreter/mterp/mips64/op_iget_char.S
new file mode 100644
index 0000000..927b7af
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iget_char.S
@@ -0,0 +1 @@
+%include "mips64/op_iget.S" { "helper":"artGetCharInstanceFromCode" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_char_quick.S b/runtime/interpreter/mterp/mips64/op_iget_char_quick.S
new file mode 100644
index 0000000..6034567
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iget_char_quick.S
@@ -0,0 +1 @@
+%include "mips64/op_iget_quick.S" { "load":"lhu" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_object.S b/runtime/interpreter/mterp/mips64/op_iget_object.S
new file mode 100644
index 0000000..c658556
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iget_object.S
@@ -0,0 +1 @@
+%include "mips64/op_iget.S" { "is_object":"1", "helper":"artGetObjInstanceFromCode" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_object_quick.S b/runtime/interpreter/mterp/mips64/op_iget_object_quick.S
new file mode 100644
index 0000000..171d543
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iget_object_quick.S
@@ -0,0 +1,16 @@
+ /* For: iget-object-quick */
+ /* op vA, vB, offset//CCCC */
+ .extern artIGetObjectFromMterp
+ srl a2, rINST, 12 # a2 <- B
+ lhu a1, 2(rPC) # a1 <- field byte offset
+ EXPORT_PC
+ GET_VREG_U a0, a2 # a0 <- object we're operating on
+ jal artIGetObjectFromMterp # (obj, offset)
+ ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
+ ext a2, rINST, 8, 4 # a2 <- A
+ PREFETCH_INST 2
+ bnez a3, MterpPossibleException # bail out
+ SET_VREG_OBJECT v0, a2 # fp[A] <- v0
+ ADVANCE 2 # advance rPC
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_iget_quick.S b/runtime/interpreter/mterp/mips64/op_iget_quick.S
new file mode 100644
index 0000000..fee6ab7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iget_quick.S
@@ -0,0 +1,14 @@
+%default { "load":"lw" }
+ /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+ /* op vA, vB, offset//CCCC */
+ srl a2, rINST, 12 # a2 <- B
+ lhu a1, 2(rPC) # a1 <- field byte offset
+ GET_VREG_U a3, a2 # a3 <- object we're operating on
+ ext a4, rINST, 8, 4 # a4 <- A
+ daddu a1, a1, a3
+ beqz a3, common_errNullObject # object was null
+ $load a0, 0(a1) # a0 <- obj.field
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ SET_VREG a0, a4 # fp[A] <- a0
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_iget_short.S b/runtime/interpreter/mterp/mips64/op_iget_short.S
new file mode 100644
index 0000000..28b5093
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iget_short.S
@@ -0,0 +1 @@
+%include "mips64/op_iget.S" { "helper":"artGetShortInstanceFromCode" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_short_quick.S b/runtime/interpreter/mterp/mips64/op_iget_short_quick.S
new file mode 100644
index 0000000..6e152db
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iget_short_quick.S
@@ -0,0 +1 @@
+%include "mips64/op_iget_quick.S" { "load":"lh" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_wide.S b/runtime/interpreter/mterp/mips64/op_iget_wide.S
new file mode 100644
index 0000000..85cf670
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iget_wide.S
@@ -0,0 +1,21 @@
+ /*
+ * 64-bit instance field get.
+ *
+ * for: iget-wide
+ */
+ .extern artGet64InstanceFromCode
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- field ref CCCC
+ srl a1, rINST, 12 # a1 <- B
+ GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
+ ld a2, OFF_FP_METHOD(rFP) # a2 <- referrer
+ move a3, rSELF # a3 <- self
+ jal artGet64InstanceFromCode
+ ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
+ ext a2, rINST, 8, 4 # a2 <- A
+ PREFETCH_INST 2
+ bnez a3, MterpPossibleException # bail out
+ SET_VREG_WIDE v0, a2 # fp[A] <- v0
+ ADVANCE 2
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_iget_wide_quick.S b/runtime/interpreter/mterp/mips64/op_iget_wide_quick.S
new file mode 100644
index 0000000..2adc6ad
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iget_wide_quick.S
@@ -0,0 +1,14 @@
+ /* iget-wide-quick vA, vB, offset//CCCC */
+ srl a2, rINST, 12 # a2 <- B
+ lhu a4, 2(rPC) # a4 <- field byte offset
+ GET_VREG_U a3, a2 # a3 <- object we're operating on
+ ext a2, rINST, 8, 4 # a2 <- A
+ beqz a3, common_errNullObject # object was null
+ daddu a4, a3, a4 # create direct pointer
+ lw a0, 0(a4)
+ lw a1, 4(a4)
+ dinsu a0, a1, 32, 32
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ SET_VREG_WIDE a0, a2
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_instance_of.S b/runtime/interpreter/mterp/mips64/op_instance_of.S
new file mode 100644
index 0000000..39a5dc7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_instance_of.S
@@ -0,0 +1,23 @@
+ /*
+ * Check to see if an object reference is an instance of a class.
+ *
+ * Most common situation is a non-null object, being compared against
+ * an already-resolved class.
+ */
+ /* instance-of vA, vB, class//CCCC */
+ .extern MterpInstanceOf
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- CCCC
+ srl a1, rINST, 12 # a1 <- B
+ dlsa a1, a1, rFP, 2 # a1 <- &object
+ ld a2, OFF_FP_METHOD(rFP) # a2 <- method
+ move a3, rSELF # a3 <- self
+ jal MterpInstanceOf # (index, &obj, method, self)
+ ld a1, THREAD_EXCEPTION_OFFSET(rSELF)
+ ext a2, rINST, 8, 4 # a2 <- A
+ PREFETCH_INST 2
+ bnez a1, MterpException
+ ADVANCE 2 # advance rPC
+ SET_VREG v0, a2 # vA <- v0
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_int_to_byte.S b/runtime/interpreter/mterp/mips64/op_int_to_byte.S
new file mode 100644
index 0000000..1993e07
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_int_to_byte.S
@@ -0,0 +1 @@
+%include "mips64/unop.S" {"instr":"seb a0, a0"}
diff --git a/runtime/interpreter/mterp/mips64/op_int_to_char.S b/runtime/interpreter/mterp/mips64/op_int_to_char.S
new file mode 100644
index 0000000..8f03acd
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_int_to_char.S
@@ -0,0 +1 @@
+%include "mips64/unop.S" {"instr":"and a0, a0, 0xffff"}
diff --git a/runtime/interpreter/mterp/mips64/op_int_to_double.S b/runtime/interpreter/mterp/mips64/op_int_to_double.S
new file mode 100644
index 0000000..6df71be
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_int_to_double.S
@@ -0,0 +1,8 @@
+ /*
+ * Conversion from or to floating-point happens in a floating-point register.
+ * Therefore we load the input and store the output into or from a
+ * floating-point register irrespective of the type.
+ */
+%include "mips64/fcvtHeader.S" { "suffix":"_FLOAT", "valreg":"f0" }
+ cvt.d.w f0, f0
+%include "mips64/fcvtFooter.S" { "suffix":"_DOUBLE", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_int_to_float.S b/runtime/interpreter/mterp/mips64/op_int_to_float.S
new file mode 100644
index 0000000..77e9eba
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_int_to_float.S
@@ -0,0 +1,8 @@
+ /*
+ * Conversion from or to floating-point happens in a floating-point register.
+ * Therefore we load the input and store the output into or from a
+ * floating-point register irrespective of the type.
+ */
+%include "mips64/fcvtHeader.S" { "suffix":"_FLOAT", "valreg":"f0" }
+ cvt.s.w f0, f0
+%include "mips64/fcvtFooter.S" { "suffix":"_FLOAT", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_int_to_long.S b/runtime/interpreter/mterp/mips64/op_int_to_long.S
new file mode 100644
index 0000000..7b9ad86
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_int_to_long.S
@@ -0,0 +1,8 @@
+ /* int-to-long vA, vB */
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG a0, a3 # a0 <- vB (sign-extended to 64 bits)
+ ext a2, rINST, 8, 4 # a2 <- A
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a2 # vA <- vB
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_int_to_short.S b/runtime/interpreter/mterp/mips64/op_int_to_short.S
new file mode 100644
index 0000000..4a3f234
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_int_to_short.S
@@ -0,0 +1 @@
+%include "mips64/unop.S" {"instr":"seh a0, a0"}
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_direct.S b/runtime/interpreter/mterp/mips64/op_invoke_direct.S
new file mode 100644
index 0000000..5047118
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_invoke_direct.S
@@ -0,0 +1 @@
+%include "mips64/invoke.S" { "helper":"MterpInvokeDirect" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_direct_range.S b/runtime/interpreter/mterp/mips64/op_invoke_direct_range.S
new file mode 100644
index 0000000..5c9b95f
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_invoke_direct_range.S
@@ -0,0 +1 @@
+%include "mips64/invoke.S" { "helper":"MterpInvokeDirectRange" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_interface.S b/runtime/interpreter/mterp/mips64/op_invoke_interface.S
new file mode 100644
index 0000000..ed148ad
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_invoke_interface.S
@@ -0,0 +1,8 @@
+%include "mips64/invoke.S" { "helper":"MterpInvokeInterface" }
+ /*
+ * Handle an interface method call.
+ *
+ * for: invoke-interface, invoke-interface/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_interface_range.S b/runtime/interpreter/mterp/mips64/op_invoke_interface_range.S
new file mode 100644
index 0000000..91c231e
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_invoke_interface_range.S
@@ -0,0 +1 @@
+%include "mips64/invoke.S" { "helper":"MterpInvokeInterfaceRange" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_static.S b/runtime/interpreter/mterp/mips64/op_invoke_static.S
new file mode 100644
index 0000000..44f5cb7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_invoke_static.S
@@ -0,0 +1 @@
+%include "mips64/invoke.S" { "helper":"MterpInvokeStatic" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_static_range.S b/runtime/interpreter/mterp/mips64/op_invoke_static_range.S
new file mode 100644
index 0000000..289e5aa
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_invoke_static_range.S
@@ -0,0 +1 @@
+%include "mips64/invoke.S" { "helper":"MterpInvokeStaticRange" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_super.S b/runtime/interpreter/mterp/mips64/op_invoke_super.S
new file mode 100644
index 0000000..b13fffe
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_invoke_super.S
@@ -0,0 +1,8 @@
+%include "mips64/invoke.S" { "helper":"MterpInvokeSuper" }
+ /*
+ * Handle a "super" method call.
+ *
+ * for: invoke-super, invoke-super/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_super_range.S b/runtime/interpreter/mterp/mips64/op_invoke_super_range.S
new file mode 100644
index 0000000..350b975
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_invoke_super_range.S
@@ -0,0 +1 @@
+%include "mips64/invoke.S" { "helper":"MterpInvokeSuperRange" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_virtual.S b/runtime/interpreter/mterp/mips64/op_invoke_virtual.S
new file mode 100644
index 0000000..0d26cda
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_invoke_virtual.S
@@ -0,0 +1,8 @@
+%include "mips64/invoke.S" { "helper":"MterpInvokeVirtual" }
+ /*
+ * Handle a virtual method call.
+ *
+ * for: invoke-virtual, invoke-virtual/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_virtual_quick.S b/runtime/interpreter/mterp/mips64/op_invoke_virtual_quick.S
new file mode 100644
index 0000000..f39562c
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_invoke_virtual_quick.S
@@ -0,0 +1 @@
+%include "mips64/invoke.S" { "helper":"MterpInvokeVirtualQuick" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_virtual_range.S b/runtime/interpreter/mterp/mips64/op_invoke_virtual_range.S
new file mode 100644
index 0000000..0bb43f8
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_invoke_virtual_range.S
@@ -0,0 +1 @@
+%include "mips64/invoke.S" { "helper":"MterpInvokeVirtualRange" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_virtual_range_quick.S b/runtime/interpreter/mterp/mips64/op_invoke_virtual_range_quick.S
new file mode 100644
index 0000000..c448851
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_invoke_virtual_range_quick.S
@@ -0,0 +1 @@
+%include "mips64/invoke.S" { "helper":"MterpInvokeVirtualQuickRange" }
diff --git a/runtime/interpreter/mterp/mips64/op_iput.S b/runtime/interpreter/mterp/mips64/op_iput.S
new file mode 100644
index 0000000..a906a0f
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iput.S
@@ -0,0 +1,21 @@
+%default { "helper":"artSet32InstanceFromMterp" }
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field//CCCC */
+ .extern $helper
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- field ref CCCC
+ srl a1, rINST, 12 # a1 <- B
+ GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
+ ext a2, rINST, 8, 4 # a2 <- A
+ GET_VREG a2, a2 # a2 <- fp[A]
+ ld a3, OFF_FP_METHOD(rFP) # a3 <- referrer
+ PREFETCH_INST 2
+ jal $helper
+ bnez v0, MterpPossibleException # bail out
+ ADVANCE 2
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_iput_boolean.S b/runtime/interpreter/mterp/mips64/op_iput_boolean.S
new file mode 100644
index 0000000..3034fa5
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iput_boolean.S
@@ -0,0 +1 @@
+%include "mips64/op_iput.S" { "helper":"artSet8InstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/mips64/op_iput_boolean_quick.S b/runtime/interpreter/mterp/mips64/op_iput_boolean_quick.S
new file mode 100644
index 0000000..df99948
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iput_boolean_quick.S
@@ -0,0 +1 @@
+%include "mips64/op_iput_quick.S" { "store":"sb" }
diff --git a/runtime/interpreter/mterp/mips64/op_iput_byte.S b/runtime/interpreter/mterp/mips64/op_iput_byte.S
new file mode 100644
index 0000000..3034fa5
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iput_byte.S
@@ -0,0 +1 @@
+%include "mips64/op_iput.S" { "helper":"artSet8InstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/mips64/op_iput_byte_quick.S b/runtime/interpreter/mterp/mips64/op_iput_byte_quick.S
new file mode 100644
index 0000000..df99948
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iput_byte_quick.S
@@ -0,0 +1 @@
+%include "mips64/op_iput_quick.S" { "store":"sb" }
diff --git a/runtime/interpreter/mterp/mips64/op_iput_char.S b/runtime/interpreter/mterp/mips64/op_iput_char.S
new file mode 100644
index 0000000..4c2fa28
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iput_char.S
@@ -0,0 +1 @@
+%include "mips64/op_iput.S" { "helper":"artSet16InstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/mips64/op_iput_char_quick.S b/runtime/interpreter/mterp/mips64/op_iput_char_quick.S
new file mode 100644
index 0000000..a6286b7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iput_char_quick.S
@@ -0,0 +1 @@
+%include "mips64/op_iput_quick.S" { "store":"sh" }
diff --git a/runtime/interpreter/mterp/mips64/op_iput_object.S b/runtime/interpreter/mterp/mips64/op_iput_object.S
new file mode 100644
index 0000000..9a42f54
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iput_object.S
@@ -0,0 +1,11 @@
+ .extern MterpIputObject
+ EXPORT_PC
+ daddu a0, rFP, OFF_FP_SHADOWFRAME
+ move a1, rPC
+ move a2, rINST
+ move a3, rSELF
+ jal MterpIputObject
+ beqzc v0, MterpException
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_iput_object_quick.S b/runtime/interpreter/mterp/mips64/op_iput_object_quick.S
new file mode 100644
index 0000000..658ef42
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iput_object_quick.S
@@ -0,0 +1,10 @@
+ .extern MterpIputObjectQuick
+ EXPORT_PC
+ daddu a0, rFP, OFF_FP_SHADOWFRAME
+ move a1, rPC
+ move a2, rINST
+ jal MterpIputObjectQuick
+ beqzc v0, MterpException
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_iput_quick.S b/runtime/interpreter/mterp/mips64/op_iput_quick.S
new file mode 100644
index 0000000..b95adfc
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iput_quick.S
@@ -0,0 +1,14 @@
+%default { "store":"sw" }
+ /* For: iput-quick, iput-boolean-quick, iput-byte-quick, iput-char-quick, iput-short-quick */
+ /* op vA, vB, offset//CCCC */
+ srl a2, rINST, 12 # a2 <- B
+ lhu a1, 2(rPC) # a1 <- field byte offset
+ GET_VREG_U a3, a2 # a3 <- fp[B], the object pointer
+ ext a2, rINST, 8, 4 # a2 <- A
+ beqz a3, common_errNullObject # object was null
+ GET_VREG a0, a2 # a0 <- fp[A]
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ daddu a1, a1, a3
+ $store a0, 0(a1) # obj.field <- a0
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_iput_short.S b/runtime/interpreter/mterp/mips64/op_iput_short.S
new file mode 100644
index 0000000..4c2fa28
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iput_short.S
@@ -0,0 +1 @@
+%include "mips64/op_iput.S" { "helper":"artSet16InstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/mips64/op_iput_short_quick.S b/runtime/interpreter/mterp/mips64/op_iput_short_quick.S
new file mode 100644
index 0000000..a6286b7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iput_short_quick.S
@@ -0,0 +1 @@
+%include "mips64/op_iput_quick.S" { "store":"sh" }
diff --git a/runtime/interpreter/mterp/mips64/op_iput_wide.S b/runtime/interpreter/mterp/mips64/op_iput_wide.S
new file mode 100644
index 0000000..9b790f8
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iput_wide.S
@@ -0,0 +1,15 @@
+ /* iput-wide vA, vB, field//CCCC */
+ .extern artSet64InstanceFromMterp
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- field ref CCCC
+ srl a1, rINST, 12 # a1 <- B
+ GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
+ ext a2, rINST, 8, 4 # a2 <- A
+ dlsa a2, a2, rFP, 2 # a2 <- &fp[A]
+ ld a3, OFF_FP_METHOD(rFP) # a3 <- referrer
+ PREFETCH_INST 2
+ jal artSet64InstanceFromMterp
+ bnez v0, MterpPossibleException # bail out
+ ADVANCE 2
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_iput_wide_quick.S b/runtime/interpreter/mterp/mips64/op_iput_wide_quick.S
new file mode 100644
index 0000000..95a8ad8
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_iput_wide_quick.S
@@ -0,0 +1,14 @@
+ /* iput-wide-quick vA, vB, offset//CCCC */
+ srl a2, rINST, 12 # a2 <- B
+ lhu a3, 2(rPC) # a3 <- field byte offset
+ GET_VREG_U a2, a2 # a2 <- fp[B], the object pointer
+ ext a0, rINST, 8, 4 # a0 <- A
+ beqz a2, common_errNullObject # object was null
+ GET_VREG_WIDE a0, a0 # a0 <- fp[A]
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ daddu a1, a2, a3 # create a direct pointer
+ sw a0, 0(a1)
+ dsrl32 a0, a0, 0
+ sw a0, 4(a1)
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_long_to_double.S b/runtime/interpreter/mterp/mips64/op_long_to_double.S
new file mode 100644
index 0000000..8503e76
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_long_to_double.S
@@ -0,0 +1,8 @@
+ /*
+ * Conversion from or to floating-point happens in a floating-point register.
+ * Therefore we load the input and store the output into or from a
+ * floating-point register irrespective of the type.
+ */
+%include "mips64/fcvtHeader.S" { "suffix":"_DOUBLE", "valreg":"f0" }
+ cvt.d.l f0, f0
+%include "mips64/fcvtFooter.S" { "suffix":"_DOUBLE", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_long_to_float.S b/runtime/interpreter/mterp/mips64/op_long_to_float.S
new file mode 100644
index 0000000..31f5c0e
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_long_to_float.S
@@ -0,0 +1,8 @@
+ /*
+ * Conversion from or to floating-point happens in a floating-point register.
+ * Therefore we load the input and store the output into or from a
+ * floating-point register irrespective of the type.
+ */
+%include "mips64/fcvtHeader.S" { "suffix":"_DOUBLE", "valreg":"f0" }
+ cvt.s.l f0, f0
+%include "mips64/fcvtFooter.S" { "suffix":"_FLOAT", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_long_to_int.S b/runtime/interpreter/mterp/mips64/op_long_to_int.S
new file mode 100644
index 0000000..4ef4b51
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_long_to_int.S
@@ -0,0 +1,2 @@
+/* we ignore the high word, making this equivalent to a 32-bit reg move */
+%include "mips64/op_move.S"
diff --git a/runtime/interpreter/mterp/mips64/op_monitor_enter.S b/runtime/interpreter/mterp/mips64/op_monitor_enter.S
new file mode 100644
index 0000000..36ae503
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_monitor_enter.S
@@ -0,0 +1,14 @@
+ /*
+ * Synchronize on an object.
+ */
+ /* monitor-enter vAA */
+ .extern artLockObjectFromCode
+ EXPORT_PC
+ srl a2, rINST, 8 # a2 <- AA
+ GET_VREG_U a0, a2 # a0 <- vAA (object)
+ move a1, rSELF # a1 <- self
+ jal artLockObjectFromCode
+ bnezc v0, MterpException
+ FETCH_ADVANCE_INST 1
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_monitor_exit.S b/runtime/interpreter/mterp/mips64/op_monitor_exit.S
new file mode 100644
index 0000000..9945952
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_monitor_exit.S
@@ -0,0 +1,18 @@
+ /*
+ * Unlock an object.
+ *
+ * Exceptions that occur when unlocking a monitor need to appear as
+ * if they happened at the following instruction. See the Dalvik
+ * instruction spec.
+ */
+ /* monitor-exit vAA */
+ .extern artUnlockObjectFromCode
+ EXPORT_PC
+ srl a2, rINST, 8 # a2 <- AA
+ GET_VREG_U a0, a2 # a0 <- vAA (object)
+ move a1, rSELF # a1 <- self
+ jal artUnlockObjectFromCode # v0 <- success for unlock(self, obj)
+ bnezc v0, MterpException
+ FETCH_ADVANCE_INST 1 # before throw: advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_move.S b/runtime/interpreter/mterp/mips64/op_move.S
new file mode 100644
index 0000000..c79f6cd
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_move.S
@@ -0,0 +1,14 @@
+%default { "is_object":"0" }
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ GET_VREG a0, a3 # a0 <- vB
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ .if $is_object
+ SET_VREG_OBJECT a0, a2 # vA <- vB
+ .else
+ SET_VREG a0, a2 # vA <- vB
+ .endif
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_move_16.S b/runtime/interpreter/mterp/mips64/op_move_16.S
new file mode 100644
index 0000000..9d5c4dc
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_move_16.S
@@ -0,0 +1,14 @@
+%default { "is_object":"0" }
+ /* for: move/16, move-object/16 */
+ /* op vAAAA, vBBBB */
+ lhu a3, 4(rPC) # a3 <- BBBB
+ lhu a2, 2(rPC) # a2 <- AAAA
+ FETCH_ADVANCE_INST 3 # advance rPC, load rINST
+ GET_VREG a0, a3 # a0 <- vBBBB
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ .if $is_object
+ SET_VREG_OBJECT a0, a2 # vAAAA <- vBBBB
+ .else
+ SET_VREG a0, a2 # vAAAA <- vBBBB
+ .endif
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_move_exception.S b/runtime/interpreter/mterp/mips64/op_move_exception.S
new file mode 100644
index 0000000..d226718
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_move_exception.S
@@ -0,0 +1,8 @@
+ /* move-exception vAA */
+ srl a2, rINST, 8 # a2 <- AA
+ ld a0, THREAD_EXCEPTION_OFFSET(rSELF) # load exception obj
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ SET_VREG_OBJECT a0, a2 # vAA <- exception obj
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ sd zero, THREAD_EXCEPTION_OFFSET(rSELF) # clear exception
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_move_from16.S b/runtime/interpreter/mterp/mips64/op_move_from16.S
new file mode 100644
index 0000000..6d6bde0
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_move_from16.S
@@ -0,0 +1,14 @@
+%default { "is_object":"0" }
+ /* for: move/from16, move-object/from16 */
+ /* op vAA, vBBBB */
+ lhu a3, 2(rPC) # a3 <- BBBB
+ srl a2, rINST, 8 # a2 <- AA
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_VREG a0, a3 # a0 <- vBBBB
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ .if $is_object
+ SET_VREG_OBJECT a0, a2 # vAA <- vBBBB
+ .else
+ SET_VREG a0, a2 # vAA <- vBBBB
+ .endif
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_move_object.S b/runtime/interpreter/mterp/mips64/op_move_object.S
new file mode 100644
index 0000000..47e0272
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_move_object.S
@@ -0,0 +1 @@
+%include "mips64/op_move.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_move_object_16.S b/runtime/interpreter/mterp/mips64/op_move_object_16.S
new file mode 100644
index 0000000..a777dcd
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_move_object_16.S
@@ -0,0 +1 @@
+%include "mips64/op_move_16.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_move_object_from16.S b/runtime/interpreter/mterp/mips64/op_move_object_from16.S
new file mode 100644
index 0000000..ab55ebd
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_move_object_from16.S
@@ -0,0 +1 @@
+%include "mips64/op_move_from16.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_move_result.S b/runtime/interpreter/mterp/mips64/op_move_result.S
new file mode 100644
index 0000000..1ec28cb
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_move_result.S
@@ -0,0 +1,14 @@
+%default { "is_object":"0" }
+ /* for: move-result, move-result-object */
+ /* op vAA */
+ srl a2, rINST, 8 # a2 <- AA
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ ld a0, OFF_FP_RESULT_REGISTER(rFP) # get pointer to result JType
+ lw a0, 0(a0) # a0 <- result.i
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ .if $is_object
+ SET_VREG_OBJECT a0, a2 # vAA <- result
+ .else
+ SET_VREG a0, a2 # vAA <- result
+ .endif
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_move_result_object.S b/runtime/interpreter/mterp/mips64/op_move_result_object.S
new file mode 100644
index 0000000..e76bc22
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_move_result_object.S
@@ -0,0 +1 @@
+%include "mips64/op_move_result.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_move_result_wide.S b/runtime/interpreter/mterp/mips64/op_move_result_wide.S
new file mode 100644
index 0000000..3ba0d72
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_move_result_wide.S
@@ -0,0 +1,9 @@
+ /* for: move-result-wide */
+ /* op vAA */
+ srl a2, rINST, 8 # a2 <- AA
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ ld a0, OFF_FP_RESULT_REGISTER(rFP) # get pointer to result JType
+ ld a0, 0(a0) # a0 <- result.j
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a2 # vAA <- result
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_move_wide.S b/runtime/interpreter/mterp/mips64/op_move_wide.S
new file mode 100644
index 0000000..ea23f87
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_move_wide.S
@@ -0,0 +1,9 @@
+ /* move-wide vA, vB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ ext a3, rINST, 12, 4 # a3 <- B
+ ext a2, rINST, 8, 4 # a2 <- A
+ GET_VREG_WIDE a0, a3 # a0 <- vB
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a2 # vA <- vB
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_move_wide_16.S b/runtime/interpreter/mterp/mips64/op_move_wide_16.S
new file mode 100644
index 0000000..8ec6068
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_move_wide_16.S
@@ -0,0 +1,9 @@
+ /* move-wide/16 vAAAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ lhu a3, 4(rPC) # a3 <- BBBB
+ lhu a2, 2(rPC) # a2 <- AAAA
+ GET_VREG_WIDE a0, a3 # a0 <- vBBBB
+ FETCH_ADVANCE_INST 3 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a2 # vAAAA <- vBBBB
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_move_wide_from16.S b/runtime/interpreter/mterp/mips64/op_move_wide_from16.S
new file mode 100644
index 0000000..11d5603
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_move_wide_from16.S
@@ -0,0 +1,9 @@
+ /* move-wide/from16 vAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ lhu a3, 2(rPC) # a3 <- BBBB
+ srl a2, rINST, 8 # a2 <- AA
+ GET_VREG_WIDE a0, a3 # a0 <- vBBBB
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a2 # vAA <- vBBBB
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_mul_double.S b/runtime/interpreter/mterp/mips64/op_mul_double.S
new file mode 100644
index 0000000..e7e17f7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_mul_double.S
@@ -0,0 +1 @@
+%include "mips64/fbinopWide.S" {"instr":"mul.d f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_mul_double_2addr.S b/runtime/interpreter/mterp/mips64/op_mul_double_2addr.S
new file mode 100644
index 0000000..f404d46
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_mul_double_2addr.S
@@ -0,0 +1 @@
+%include "mips64/fbinopWide2addr.S" {"instr":"mul.d f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_mul_float.S b/runtime/interpreter/mterp/mips64/op_mul_float.S
new file mode 100644
index 0000000..9a695fc
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_mul_float.S
@@ -0,0 +1 @@
+%include "mips64/fbinop.S" {"instr":"mul.s f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_mul_float_2addr.S b/runtime/interpreter/mterp/mips64/op_mul_float_2addr.S
new file mode 100644
index 0000000..a134a34
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_mul_float_2addr.S
@@ -0,0 +1 @@
+%include "mips64/fbinop2addr.S" {"instr":"mul.s f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_mul_int.S b/runtime/interpreter/mterp/mips64/op_mul_int.S
new file mode 100644
index 0000000..e1b90ff
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_mul_int.S
@@ -0,0 +1 @@
+%include "mips64/binop.S" {"instr":"mul a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_mul_int_2addr.S b/runtime/interpreter/mterp/mips64/op_mul_int_2addr.S
new file mode 100644
index 0000000..c0c4063
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_mul_int_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binop2addr.S" {"instr":"mul a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_mul_int_lit16.S b/runtime/interpreter/mterp/mips64/op_mul_int_lit16.S
new file mode 100644
index 0000000..bb4fff8
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_mul_int_lit16.S
@@ -0,0 +1 @@
+%include "mips64/binopLit16.S" {"instr":"mul a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_mul_int_lit8.S b/runtime/interpreter/mterp/mips64/op_mul_int_lit8.S
new file mode 100644
index 0000000..da11ea9
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_mul_int_lit8.S
@@ -0,0 +1 @@
+%include "mips64/binopLit8.S" {"instr":"mul a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_mul_long.S b/runtime/interpreter/mterp/mips64/op_mul_long.S
new file mode 100644
index 0000000..ec32850
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_mul_long.S
@@ -0,0 +1 @@
+%include "mips64/binopWide.S" {"instr":"dmul a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_mul_long_2addr.S b/runtime/interpreter/mterp/mips64/op_mul_long_2addr.S
new file mode 100644
index 0000000..eb50cda
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_mul_long_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binopWide2addr.S" {"instr":"dmul a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_neg_double.S b/runtime/interpreter/mterp/mips64/op_neg_double.S
new file mode 100644
index 0000000..a135d61
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_neg_double.S
@@ -0,0 +1,3 @@
+%include "mips64/fcvtHeader.S" { "suffix":"_DOUBLE", "valreg":"f0" }
+ neg.d f0, f0
+%include "mips64/fcvtFooter.S" { "suffix":"_DOUBLE", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_neg_float.S b/runtime/interpreter/mterp/mips64/op_neg_float.S
new file mode 100644
index 0000000..78019f0
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_neg_float.S
@@ -0,0 +1,3 @@
+%include "mips64/fcvtHeader.S" { "suffix":"_FLOAT", "valreg":"f0" }
+ neg.s f0, f0
+%include "mips64/fcvtFooter.S" { "suffix":"_FLOAT", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_neg_int.S b/runtime/interpreter/mterp/mips64/op_neg_int.S
new file mode 100644
index 0000000..31538c0
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_neg_int.S
@@ -0,0 +1 @@
+%include "mips64/unop.S" {"instr":"subu a0, zero, a0"}
diff --git a/runtime/interpreter/mterp/mips64/op_neg_long.S b/runtime/interpreter/mterp/mips64/op_neg_long.S
new file mode 100644
index 0000000..bc80d06
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_neg_long.S
@@ -0,0 +1 @@
+%include "mips64/unopWide.S" {"instr":"dsubu a0, zero, a0"}
diff --git a/runtime/interpreter/mterp/mips64/op_new_array.S b/runtime/interpreter/mterp/mips64/op_new_array.S
new file mode 100644
index 0000000..d78b4ac
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_new_array.S
@@ -0,0 +1,19 @@
+ /*
+ * Allocate an array of objects, specified with the array class
+ * and a count.
+ *
+ * The verifier guarantees that this is an array class, so we don't
+ * check for it here.
+ */
+ /* new-array vA, vB, class//CCCC */
+ .extern MterpNewArray
+ EXPORT_PC
+ daddu a0, rFP, OFF_FP_SHADOWFRAME
+ move a1, rPC
+ move a2, rINST
+ move a3, rSELF
+ jal MterpNewArray
+ beqzc v0, MterpPossibleException
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_new_instance.S b/runtime/interpreter/mterp/mips64/op_new_instance.S
new file mode 100644
index 0000000..cc5e13e
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_new_instance.S
@@ -0,0 +1,14 @@
+ /*
+ * Create a new instance of a class.
+ */
+ /* new-instance vAA, class//BBBB */
+ .extern MterpNewInstance
+ EXPORT_PC
+ daddu a0, rFP, OFF_FP_SHADOWFRAME
+ move a1, rSELF
+ move a2, rINST
+ jal MterpNewInstance # (shadow_frame, self, inst_data)
+ beqzc v0, MterpPossibleException
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_nop.S b/runtime/interpreter/mterp/mips64/op_nop.S
new file mode 100644
index 0000000..cc803a7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_nop.S
@@ -0,0 +1,3 @@
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_not_int.S b/runtime/interpreter/mterp/mips64/op_not_int.S
new file mode 100644
index 0000000..5954095
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_not_int.S
@@ -0,0 +1 @@
+%include "mips64/unop.S" {"instr":"nor a0, zero, a0"}
diff --git a/runtime/interpreter/mterp/mips64/op_not_long.S b/runtime/interpreter/mterp/mips64/op_not_long.S
new file mode 100644
index 0000000..c8f5da7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_not_long.S
@@ -0,0 +1 @@
+%include "mips64/unopWide.S" {"instr":"nor a0, zero, a0"}
diff --git a/runtime/interpreter/mterp/mips64/op_or_int.S b/runtime/interpreter/mterp/mips64/op_or_int.S
new file mode 100644
index 0000000..0102355
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_or_int.S
@@ -0,0 +1 @@
+%include "mips64/binop.S" {"instr":"or a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_or_int_2addr.S b/runtime/interpreter/mterp/mips64/op_or_int_2addr.S
new file mode 100644
index 0000000..eed8900
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_or_int_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binop2addr.S" {"instr":"or a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_or_int_lit16.S b/runtime/interpreter/mterp/mips64/op_or_int_lit16.S
new file mode 100644
index 0000000..16a0f3e
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_or_int_lit16.S
@@ -0,0 +1 @@
+%include "mips64/binopLit16.S" {"instr":"or a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_or_int_lit8.S b/runtime/interpreter/mterp/mips64/op_or_int_lit8.S
new file mode 100644
index 0000000..dbbf790
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_or_int_lit8.S
@@ -0,0 +1 @@
+%include "mips64/binopLit8.S" {"instr":"or a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_or_long.S b/runtime/interpreter/mterp/mips64/op_or_long.S
new file mode 100644
index 0000000..e6f8639
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_or_long.S
@@ -0,0 +1 @@
+%include "mips64/binopWide.S" {"instr":"or a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_or_long_2addr.S b/runtime/interpreter/mterp/mips64/op_or_long_2addr.S
new file mode 100644
index 0000000..ad5e6c8
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_or_long_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binopWide2addr.S" {"instr":"or a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_packed_switch.S b/runtime/interpreter/mterp/mips64/op_packed_switch.S
new file mode 100644
index 0000000..cdbdf75
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_packed_switch.S
@@ -0,0 +1,31 @@
+%default { "func":"MterpDoPackedSwitch" }
+ /*
+ * Handle a packed-switch or sparse-switch instruction. In both cases
+ * we decode it and hand it off to a helper function.
+ *
+ * We don't really expect backward branches in a switch statement, but
+ * they're perfectly legal, so we check for them here.
+ *
+ * for: packed-switch, sparse-switch
+ */
+ /* op vAA, +BBBBBBBB */
+ .extern $func
+ lh a0, 2(rPC) # a0 <- bbbb (lo)
+ lh a1, 4(rPC) # a1 <- BBBB (hi)
+ srl a3, rINST, 8 # a3 <- AA
+ ins a0, a1, 16, 16 # a0 <- BBBBbbbb
+ GET_VREG a1, a3 # a1 <- vAA
+ dlsa a0, a0, rPC, 1 # a0 <- PC + BBBBbbbb*2
+ jal $func # v0 <- code-unit branch offset
+ dlsa rPC, v0, rPC, 1 # rPC <- rPC + offset * 2
+ FETCH_INST # load rINST
+#if MTERP_SUSPEND
+ bgtz v0, 1f # offset * 2 > 0 => no suspend check
+ REFRESH_IBASE
+1:
+#else
+ lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue
+ blez v0, MterpCheckSuspendAndContinue
+#endif
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_rem_double.S b/runtime/interpreter/mterp/mips64/op_rem_double.S
new file mode 100644
index 0000000..ba61cfd
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_rem_double.S
@@ -0,0 +1,12 @@
+ /* rem-double vAA, vBB, vCC */
+ .extern fmod
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG_DOUBLE f12, a2 # f12 <- vBB
+ GET_VREG_DOUBLE f13, a3 # f13 <- vCC
+ jal fmod # f0 <- f12 op f13
+ srl a4, rINST, 8 # a4 <- AA
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_DOUBLE f0, a4 # vAA <- f0
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_rem_double_2addr.S b/runtime/interpreter/mterp/mips64/op_rem_double_2addr.S
new file mode 100644
index 0000000..c649f0d
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_rem_double_2addr.S
@@ -0,0 +1,12 @@
+ /* rem-double/2addr vA, vB */
+ .extern fmod
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG_DOUBLE f12, a2 # f12 <- vA
+ GET_VREG_DOUBLE f13, a3 # f13 <- vB
+ jal fmod # f0 <- f12 op f13
+ ext a2, rINST, 8, 4 # a2 <- A
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_DOUBLE f0, a2 # vA <- f0
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_rem_float.S b/runtime/interpreter/mterp/mips64/op_rem_float.S
new file mode 100644
index 0000000..3967b0b
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_rem_float.S
@@ -0,0 +1,12 @@
+ /* rem-float vAA, vBB, vCC */
+ .extern fmodf
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG_FLOAT f12, a2 # f12 <- vBB
+ GET_VREG_FLOAT f13, a3 # f13 <- vCC
+ jal fmodf # f0 <- f12 op f13
+ srl a4, rINST, 8 # a4 <- AA
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_FLOAT f0, a4 # vAA <- f0
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_rem_float_2addr.S b/runtime/interpreter/mterp/mips64/op_rem_float_2addr.S
new file mode 100644
index 0000000..3fed41e
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_rem_float_2addr.S
@@ -0,0 +1,12 @@
+ /* rem-float/2addr vA, vB */
+ .extern fmodf
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG_FLOAT f12, a2 # f12 <- vA
+ GET_VREG_FLOAT f13, a3 # f13 <- vB
+ jal fmodf # f0 <- f12 op f13
+ ext a2, rINST, 8, 4 # a2 <- A
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_FLOAT f0, a2 # vA <- f0
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_rem_int.S b/runtime/interpreter/mterp/mips64/op_rem_int.S
new file mode 100644
index 0000000..c05e9c4
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_rem_int.S
@@ -0,0 +1 @@
+%include "mips64/binop.S" {"instr":"mod a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_rem_int_2addr.S b/runtime/interpreter/mterp/mips64/op_rem_int_2addr.S
new file mode 100644
index 0000000..a4e162d
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_rem_int_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binop2addr.S" {"instr":"mod a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_rem_int_lit16.S b/runtime/interpreter/mterp/mips64/op_rem_int_lit16.S
new file mode 100644
index 0000000..3284f14
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_rem_int_lit16.S
@@ -0,0 +1 @@
+%include "mips64/binopLit16.S" {"instr":"mod a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_rem_int_lit8.S b/runtime/interpreter/mterp/mips64/op_rem_int_lit8.S
new file mode 100644
index 0000000..1e6a584
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_rem_int_lit8.S
@@ -0,0 +1 @@
+%include "mips64/binopLit8.S" {"instr":"mod a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_rem_long.S b/runtime/interpreter/mterp/mips64/op_rem_long.S
new file mode 100644
index 0000000..32b2d19
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_rem_long.S
@@ -0,0 +1 @@
+%include "mips64/binopWide.S" {"instr":"dmod a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_rem_long_2addr.S b/runtime/interpreter/mterp/mips64/op_rem_long_2addr.S
new file mode 100644
index 0000000..ad658e1
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_rem_long_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binopWide2addr.S" {"instr":"dmod a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_return.S b/runtime/interpreter/mterp/mips64/op_return.S
new file mode 100644
index 0000000..ec986b8
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_return.S
@@ -0,0 +1,18 @@
+ /*
+ * Return a 32-bit value.
+ *
+ * for: return, return-object
+ */
+ /* op vAA */
+ .extern MterpThreadFenceForConstructor
+ .extern MterpSuspendCheck
+ jal MterpThreadFenceForConstructor
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ move a0, rSELF
+ and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ beqzc ra, 1f
+ jal MterpSuspendCheck # (self)
+1:
+ srl a2, rINST, 8 # a2 <- AA
+ GET_VREG_U a0, a2 # a0 <- vAA
+ b MterpReturn
diff --git a/runtime/interpreter/mterp/mips64/op_return_object.S b/runtime/interpreter/mterp/mips64/op_return_object.S
new file mode 100644
index 0000000..67f1871
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_return_object.S
@@ -0,0 +1 @@
+%include "mips64/op_return.S"
diff --git a/runtime/interpreter/mterp/mips64/op_return_void.S b/runtime/interpreter/mterp/mips64/op_return_void.S
new file mode 100644
index 0000000..05253ae
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_return_void.S
@@ -0,0 +1,11 @@
+ .extern MterpThreadFenceForConstructor
+ .extern MterpSuspendCheck
+ jal MterpThreadFenceForConstructor
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ move a0, rSELF
+ and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ beqzc ra, 1f
+ jal MterpSuspendCheck # (self)
+1:
+ li a0, 0
+ b MterpReturn
diff --git a/runtime/interpreter/mterp/mips64/op_return_void_no_barrier.S b/runtime/interpreter/mterp/mips64/op_return_void_no_barrier.S
new file mode 100644
index 0000000..f67e811
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_return_void_no_barrier.S
@@ -0,0 +1,9 @@
+ .extern MterpSuspendCheck
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ move a0, rSELF
+ and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ beqzc ra, 1f
+ jal MterpSuspendCheck # (self)
+1:
+ li a0, 0
+ b MterpReturn
diff --git a/runtime/interpreter/mterp/mips64/op_return_wide.S b/runtime/interpreter/mterp/mips64/op_return_wide.S
new file mode 100644
index 0000000..544e027
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_return_wide.S
@@ -0,0 +1,17 @@
+ /*
+ * Return a 64-bit value.
+ */
+ /* return-wide vAA */
+ /* op vAA */
+ .extern MterpThreadFenceForConstructor
+ .extern MterpSuspendCheck
+ jal MterpThreadFenceForConstructor
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ move a0, rSELF
+ and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ beqzc ra, 1f
+ jal MterpSuspendCheck # (self)
+1:
+ srl a2, rINST, 8 # a2 <- AA
+ GET_VREG_WIDE a0, a2 # a0 <- vAA
+ b MterpReturn
diff --git a/runtime/interpreter/mterp/mips64/op_rsub_int.S b/runtime/interpreter/mterp/mips64/op_rsub_int.S
new file mode 100644
index 0000000..fa31a0a
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_rsub_int.S
@@ -0,0 +1 @@
+%include "mips64/binopLit16.S" {"instr":"subu a0, a1, a0"}
diff --git a/runtime/interpreter/mterp/mips64/op_rsub_int_lit8.S b/runtime/interpreter/mterp/mips64/op_rsub_int_lit8.S
new file mode 100644
index 0000000..c31ff32
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_rsub_int_lit8.S
@@ -0,0 +1 @@
+%include "mips64/binopLit8.S" {"instr":"subu a0, a1, a0"}
diff --git a/runtime/interpreter/mterp/mips64/op_sget.S b/runtime/interpreter/mterp/mips64/op_sget.S
new file mode 100644
index 0000000..bd2cfe3
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sget.S
@@ -0,0 +1,26 @@
+%default { "is_object":"0", "helper":"artGet32StaticFromCode", "extend":"" }
+ /*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field//BBBB */
+ .extern $helper
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- field ref BBBB
+ ld a1, OFF_FP_METHOD(rFP)
+ move a2, rSELF
+ jal $helper
+ ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
+ srl a2, rINST, 8 # a2 <- AA
+ $extend
+ PREFETCH_INST 2
+ bnez a3, MterpException # bail out
+ .if $is_object
+ SET_VREG_OBJECT v0, a2 # fp[AA] <- v0
+ .else
+ SET_VREG v0, a2 # fp[AA] <- v0
+ .endif
+ ADVANCE 2
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0
diff --git a/runtime/interpreter/mterp/mips64/op_sget_boolean.S b/runtime/interpreter/mterp/mips64/op_sget_boolean.S
new file mode 100644
index 0000000..e7b1844
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sget_boolean.S
@@ -0,0 +1 @@
+%include "mips64/op_sget.S" {"helper":"artGetBooleanStaticFromCode", "extend":"and v0, v0, 0xff"}
diff --git a/runtime/interpreter/mterp/mips64/op_sget_byte.S b/runtime/interpreter/mterp/mips64/op_sget_byte.S
new file mode 100644
index 0000000..52a2e4a
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sget_byte.S
@@ -0,0 +1 @@
+%include "mips64/op_sget.S" {"helper":"artGetByteStaticFromCode", "extend":"seb v0, v0"}
diff --git a/runtime/interpreter/mterp/mips64/op_sget_char.S b/runtime/interpreter/mterp/mips64/op_sget_char.S
new file mode 100644
index 0000000..873d82a
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sget_char.S
@@ -0,0 +1 @@
+%include "mips64/op_sget.S" {"helper":"artGetCharStaticFromCode", "extend":"and v0, v0, 0xffff"}
diff --git a/runtime/interpreter/mterp/mips64/op_sget_object.S b/runtime/interpreter/mterp/mips64/op_sget_object.S
new file mode 100644
index 0000000..3108417
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sget_object.S
@@ -0,0 +1 @@
+%include "mips64/op_sget.S" {"is_object":"1", "helper":"artGetObjStaticFromCode"}
diff --git a/runtime/interpreter/mterp/mips64/op_sget_short.S b/runtime/interpreter/mterp/mips64/op_sget_short.S
new file mode 100644
index 0000000..fed4e76
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sget_short.S
@@ -0,0 +1 @@
+%include "mips64/op_sget.S" {"helper":"artGetShortStaticFromCode", "extend":"seh v0, v0"}
diff --git a/runtime/interpreter/mterp/mips64/op_sget_wide.S b/runtime/interpreter/mterp/mips64/op_sget_wide.S
new file mode 100644
index 0000000..77124d1
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sget_wide.S
@@ -0,0 +1,18 @@
+ /*
+ * SGET_WIDE handler wrapper.
+ *
+ */
+ /* sget-wide vAA, field//BBBB */
+ .extern artGet64StaticFromCode
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- field ref BBBB
+ ld a1, OFF_FP_METHOD(rFP)
+ move a2, rSELF
+ jal artGet64StaticFromCode
+ ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
+ srl a4, rINST, 8 # a4 <- AA
+ bnez a3, MterpException # bail out
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ SET_VREG_WIDE v0, a4
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_shl_int.S b/runtime/interpreter/mterp/mips64/op_shl_int.S
new file mode 100644
index 0000000..784481f
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_shl_int.S
@@ -0,0 +1 @@
+%include "mips64/binop.S" {"instr":"sll a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_shl_int_2addr.S b/runtime/interpreter/mterp/mips64/op_shl_int_2addr.S
new file mode 100644
index 0000000..a6c8a78
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_shl_int_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binop2addr.S" {"instr":"sll a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_shl_int_lit8.S b/runtime/interpreter/mterp/mips64/op_shl_int_lit8.S
new file mode 100644
index 0000000..36ef207
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_shl_int_lit8.S
@@ -0,0 +1 @@
+%include "mips64/binopLit8.S" {"instr":"sll a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_shl_long.S b/runtime/interpreter/mterp/mips64/op_shl_long.S
new file mode 100644
index 0000000..225a2cb
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_shl_long.S
@@ -0,0 +1 @@
+%include "mips64/binopWide.S" {"instr":"dsll a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_shl_long_2addr.S b/runtime/interpreter/mterp/mips64/op_shl_long_2addr.S
new file mode 100644
index 0000000..c04d882
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_shl_long_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binopWide2addr.S" {"instr":"dsll a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_shr_int.S b/runtime/interpreter/mterp/mips64/op_shr_int.S
new file mode 100644
index 0000000..eded037
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_shr_int.S
@@ -0,0 +1 @@
+%include "mips64/binop.S" {"instr":"sra a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_shr_int_2addr.S b/runtime/interpreter/mterp/mips64/op_shr_int_2addr.S
new file mode 100644
index 0000000..5b4d96f
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_shr_int_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binop2addr.S" {"instr":"sra a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_shr_int_lit8.S b/runtime/interpreter/mterp/mips64/op_shr_int_lit8.S
new file mode 100644
index 0000000..175eb86
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_shr_int_lit8.S
@@ -0,0 +1 @@
+%include "mips64/binopLit8.S" {"instr":"sra a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_shr_long.S b/runtime/interpreter/mterp/mips64/op_shr_long.S
new file mode 100644
index 0000000..0db38c8
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_shr_long.S
@@ -0,0 +1 @@
+%include "mips64/binopWide.S" {"instr":"dsra a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_shr_long_2addr.S b/runtime/interpreter/mterp/mips64/op_shr_long_2addr.S
new file mode 100644
index 0000000..48131ad
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_shr_long_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binopWide2addr.S" {"instr":"dsra a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_sparse_switch.S b/runtime/interpreter/mterp/mips64/op_sparse_switch.S
new file mode 100644
index 0000000..b065aaa
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sparse_switch.S
@@ -0,0 +1 @@
+%include "mips64/op_packed_switch.S" { "func":"MterpDoSparseSwitch" }
diff --git a/runtime/interpreter/mterp/mips64/op_sput.S b/runtime/interpreter/mterp/mips64/op_sput.S
new file mode 100644
index 0000000..142f18f
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sput.S
@@ -0,0 +1,20 @@
+%default { "helper":"artSet32StaticFromCode" }
+ /*
+ * General SPUT handler wrapper.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field//BBBB */
+ .extern $helper
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- field ref BBBB
+ srl a3, rINST, 8 # a3 <- AA
+ GET_VREG a1, a3 # a1 <- fp[AA]
+ ld a2, OFF_FP_METHOD(rFP)
+ move a3, rSELF
+ PREFETCH_INST 2 # Get next inst, but don't advance rPC
+ jal $helper
+ bnezc v0, MterpException # 0 on success
+ ADVANCE 2 # Past exception point - now advance rPC
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_sput_boolean.S b/runtime/interpreter/mterp/mips64/op_sput_boolean.S
new file mode 100644
index 0000000..f5b8dbf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sput_boolean.S
@@ -0,0 +1 @@
+%include "mips64/op_sput.S" {"helper":"artSet8StaticFromCode"}
diff --git a/runtime/interpreter/mterp/mips64/op_sput_byte.S b/runtime/interpreter/mterp/mips64/op_sput_byte.S
new file mode 100644
index 0000000..f5b8dbf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sput_byte.S
@@ -0,0 +1 @@
+%include "mips64/op_sput.S" {"helper":"artSet8StaticFromCode"}
diff --git a/runtime/interpreter/mterp/mips64/op_sput_char.S b/runtime/interpreter/mterp/mips64/op_sput_char.S
new file mode 100644
index 0000000..c4d195c
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sput_char.S
@@ -0,0 +1 @@
+%include "mips64/op_sput.S" {"helper":"artSet16StaticFromCode"}
diff --git a/runtime/interpreter/mterp/mips64/op_sput_object.S b/runtime/interpreter/mterp/mips64/op_sput_object.S
new file mode 100644
index 0000000..ef4c685
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sput_object.S
@@ -0,0 +1,11 @@
+ .extern MterpSputObject
+ EXPORT_PC
+ daddu a0, rFP, OFF_FP_SHADOWFRAME
+ move a1, rPC
+ move a2, rINST
+ move a3, rSELF
+ jal MterpSputObject
+ beqzc v0, MterpException
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_sput_short.S b/runtime/interpreter/mterp/mips64/op_sput_short.S
new file mode 100644
index 0000000..c4d195c
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sput_short.S
@@ -0,0 +1 @@
+%include "mips64/op_sput.S" {"helper":"artSet16StaticFromCode"}
diff --git a/runtime/interpreter/mterp/mips64/op_sput_wide.S b/runtime/interpreter/mterp/mips64/op_sput_wide.S
new file mode 100644
index 0000000..828ddc1
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sput_wide.S
@@ -0,0 +1,18 @@
+ /*
+ * SPUT_WIDE handler wrapper.
+ *
+ */
+ /* sput-wide vAA, field//BBBB */
+ .extern artSet64IndirectStaticFromMterp
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- field ref BBBB
+ ld a1, OFF_FP_METHOD(rFP)
+ srl a2, rINST, 8 # a2 <- AA
+ dlsa a2, a2, rFP, 2
+ move a3, rSELF
+ PREFETCH_INST 2 # Get next inst, but don't advance rPC
+ jal artSet64IndirectStaticFromMterp
+ bnezc v0, MterpException # 0 on success, -1 on failure
+ ADVANCE 2 # Past exception point - now advance rPC
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_sub_double.S b/runtime/interpreter/mterp/mips64/op_sub_double.S
new file mode 100644
index 0000000..40a6c89
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sub_double.S
@@ -0,0 +1 @@
+%include "mips64/fbinopWide.S" {"instr":"sub.d f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_sub_double_2addr.S b/runtime/interpreter/mterp/mips64/op_sub_double_2addr.S
new file mode 100644
index 0000000..984737e
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sub_double_2addr.S
@@ -0,0 +1 @@
+%include "mips64/fbinopWide2addr.S" {"instr":"sub.d f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_sub_float.S b/runtime/interpreter/mterp/mips64/op_sub_float.S
new file mode 100644
index 0000000..9010592
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sub_float.S
@@ -0,0 +1 @@
+%include "mips64/fbinop.S" {"instr":"sub.s f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_sub_float_2addr.S b/runtime/interpreter/mterp/mips64/op_sub_float_2addr.S
new file mode 100644
index 0000000..e7d4ffe
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sub_float_2addr.S
@@ -0,0 +1 @@
+%include "mips64/fbinop2addr.S" {"instr":"sub.s f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_sub_int.S b/runtime/interpreter/mterp/mips64/op_sub_int.S
new file mode 100644
index 0000000..609ea05
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sub_int.S
@@ -0,0 +1 @@
+%include "mips64/binop.S" {"instr":"subu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_sub_int_2addr.S b/runtime/interpreter/mterp/mips64/op_sub_int_2addr.S
new file mode 100644
index 0000000..ba2f1e8
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sub_int_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binop2addr.S" {"instr":"subu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_sub_long.S b/runtime/interpreter/mterp/mips64/op_sub_long.S
new file mode 100644
index 0000000..09a6afd
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sub_long.S
@@ -0,0 +1 @@
+%include "mips64/binopWide.S" {"instr":"dsubu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_sub_long_2addr.S b/runtime/interpreter/mterp/mips64/op_sub_long_2addr.S
new file mode 100644
index 0000000..b9ec82a
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_sub_long_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binopWide2addr.S" {"instr":"dsubu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_throw.S b/runtime/interpreter/mterp/mips64/op_throw.S
new file mode 100644
index 0000000..6418d57
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_throw.S
@@ -0,0 +1,10 @@
+ /*
+ * Throw an exception object in the current thread.
+ */
+ /* throw vAA */
+ EXPORT_PC
+ srl a2, rINST, 8 # a2 <- AA
+ GET_VREG_U a0, a2 # a0 <- vAA (exception object)
+ beqzc a0, common_errNullObject
+ sd a0, THREAD_EXCEPTION_OFFSET(rSELF) # thread->exception <- obj
+ b MterpException
diff --git a/runtime/interpreter/mterp/mips64/op_unused_3e.S b/runtime/interpreter/mterp/mips64/op_unused_3e.S
new file mode 100644
index 0000000..29463d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_unused_3e.S
@@ -0,0 +1 @@
+%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_3f.S b/runtime/interpreter/mterp/mips64/op_unused_3f.S
new file mode 100644
index 0000000..29463d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_unused_3f.S
@@ -0,0 +1 @@
+%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_40.S b/runtime/interpreter/mterp/mips64/op_unused_40.S
new file mode 100644
index 0000000..29463d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_unused_40.S
@@ -0,0 +1 @@
+%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_41.S b/runtime/interpreter/mterp/mips64/op_unused_41.S
new file mode 100644
index 0000000..29463d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_unused_41.S
@@ -0,0 +1 @@
+%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_42.S b/runtime/interpreter/mterp/mips64/op_unused_42.S
new file mode 100644
index 0000000..29463d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_unused_42.S
@@ -0,0 +1 @@
+%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_43.S b/runtime/interpreter/mterp/mips64/op_unused_43.S
new file mode 100644
index 0000000..29463d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_unused_43.S
@@ -0,0 +1 @@
+%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_79.S b/runtime/interpreter/mterp/mips64/op_unused_79.S
new file mode 100644
index 0000000..29463d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_unused_79.S
@@ -0,0 +1 @@
+%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_7a.S b/runtime/interpreter/mterp/mips64/op_unused_7a.S
new file mode 100644
index 0000000..29463d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_unused_7a.S
@@ -0,0 +1 @@
+%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_f4.S b/runtime/interpreter/mterp/mips64/op_unused_f4.S
new file mode 100644
index 0000000..29463d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_unused_f4.S
@@ -0,0 +1 @@
+%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_fa.S b/runtime/interpreter/mterp/mips64/op_unused_fa.S
new file mode 100644
index 0000000..29463d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_unused_fa.S
@@ -0,0 +1 @@
+%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_fb.S b/runtime/interpreter/mterp/mips64/op_unused_fb.S
new file mode 100644
index 0000000..29463d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_unused_fb.S
@@ -0,0 +1 @@
+%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_fc.S b/runtime/interpreter/mterp/mips64/op_unused_fc.S
new file mode 100644
index 0000000..29463d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_unused_fc.S
@@ -0,0 +1 @@
+%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_fd.S b/runtime/interpreter/mterp/mips64/op_unused_fd.S
new file mode 100644
index 0000000..29463d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_unused_fd.S
@@ -0,0 +1 @@
+%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_fe.S b/runtime/interpreter/mterp/mips64/op_unused_fe.S
new file mode 100644
index 0000000..29463d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_unused_fe.S
@@ -0,0 +1 @@
+%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_ff.S b/runtime/interpreter/mterp/mips64/op_unused_ff.S
new file mode 100644
index 0000000..29463d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_unused_ff.S
@@ -0,0 +1 @@
+%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_ushr_int.S b/runtime/interpreter/mterp/mips64/op_ushr_int.S
new file mode 100644
index 0000000..37c90cb
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_ushr_int.S
@@ -0,0 +1 @@
+%include "mips64/binop.S" {"instr":"srl a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_ushr_int_2addr.S b/runtime/interpreter/mterp/mips64/op_ushr_int_2addr.S
new file mode 100644
index 0000000..d6bf413
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_ushr_int_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binop2addr.S" {"instr":"srl a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_ushr_int_lit8.S b/runtime/interpreter/mterp/mips64/op_ushr_int_lit8.S
new file mode 100644
index 0000000..2a2d843
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_ushr_int_lit8.S
@@ -0,0 +1 @@
+%include "mips64/binopLit8.S" {"instr":"srl a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_ushr_long.S b/runtime/interpreter/mterp/mips64/op_ushr_long.S
new file mode 100644
index 0000000..e724405
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_ushr_long.S
@@ -0,0 +1 @@
+%include "mips64/binopWide.S" {"instr":"dsrl a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_ushr_long_2addr.S b/runtime/interpreter/mterp/mips64/op_ushr_long_2addr.S
new file mode 100644
index 0000000..d2cf135
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_ushr_long_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binopWide2addr.S" {"instr":"dsrl a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_xor_int.S b/runtime/interpreter/mterp/mips64/op_xor_int.S
new file mode 100644
index 0000000..ee25ebc
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_xor_int.S
@@ -0,0 +1 @@
+%include "mips64/binop.S" {"instr":"xor a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_xor_int_2addr.S b/runtime/interpreter/mterp/mips64/op_xor_int_2addr.S
new file mode 100644
index 0000000..0f04967
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_xor_int_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binop2addr.S" {"instr":"xor a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_xor_int_lit16.S b/runtime/interpreter/mterp/mips64/op_xor_int_lit16.S
new file mode 100644
index 0000000..ecb21ae
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_xor_int_lit16.S
@@ -0,0 +1 @@
+%include "mips64/binopLit16.S" {"instr":"xor a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_xor_int_lit8.S b/runtime/interpreter/mterp/mips64/op_xor_int_lit8.S
new file mode 100644
index 0000000..115ae99
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_xor_int_lit8.S
@@ -0,0 +1 @@
+%include "mips64/binopLit8.S" {"instr":"xor a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_xor_long.S b/runtime/interpreter/mterp/mips64/op_xor_long.S
new file mode 100644
index 0000000..7ebabc2
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_xor_long.S
@@ -0,0 +1 @@
+%include "mips64/binopWide.S" {"instr":"xor a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_xor_long_2addr.S b/runtime/interpreter/mterp/mips64/op_xor_long_2addr.S
new file mode 100644
index 0000000..0f1919a
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_xor_long_2addr.S
@@ -0,0 +1 @@
+%include "mips64/binopWide2addr.S" {"instr":"xor a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/unop.S b/runtime/interpreter/mterp/mips64/unop.S
new file mode 100644
index 0000000..e3f7ea0
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/unop.S
@@ -0,0 +1,18 @@
+%default {"preinstr":""}
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "a0 = op a0".
+ *
+ * for: int-to-byte, int-to-char, int-to-short,
+ * not-int, neg-int
+ */
+ /* unop vA, vB */
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG a0, a3 # a0 <- vB
+ ext a2, rINST, 8, 4 # a2 <- A
+ $preinstr # optional op
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ $instr # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/unopWide.S b/runtime/interpreter/mterp/mips64/unopWide.S
new file mode 100644
index 0000000..c0dd1aa
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/unopWide.S
@@ -0,0 +1,17 @@
+%default {"preinstr":""}
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "a0 = op a0".
+ *
+ * For: not-long, neg-long
+ */
+ /* unop vA, vB */
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG_WIDE a0, a3 # a0 <- vB
+ ext a2, rINST, 8, 4 # a2 <- A
+ $preinstr # optional op
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ $instr # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a2 # vA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/unused.S b/runtime/interpreter/mterp/mips64/unused.S
new file mode 100644
index 0000000..30d38bd
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/unused.S
@@ -0,0 +1,4 @@
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
diff --git a/runtime/interpreter/mterp/mips64/zcmp.S b/runtime/interpreter/mterp/mips64/zcmp.S
new file mode 100644
index 0000000..d7ad894
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/zcmp.S
@@ -0,0 +1,30 @@
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "condition"
+ * fragment that specifies the comparison to perform, e.g. for
+ * "if-lez" you would use "le".
+ *
+ * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ lh a4, 2(rPC) # a4 <- sign-extended BBBB
+ srl a2, rINST, 8 # a2 <- AA
+ GET_VREG a0, a2 # a0 <- vAA
+
+ b${condition}zc a0, 1f
+ li a4, 2 # offset if branch not taken
+1:
+
+ dlsa rPC, a4, rPC, 1 # rPC <- rPC + BBBB * 2
+ FETCH_INST # load rINST
+
+#if MTERP_SUSPEND
+ bgez a4, 2f # BBBB * 2 >= 0 => no suspend check
+ REFRESH_IBASE
+2:
+#else
+ lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue
+ bltz a4, MterpCheckSuspendAndContinue
+#endif
+
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index b443c69..ca727f4 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -149,7 +149,9 @@
Runtime::Current()->GetInstrumentation();
bool unhandled_instrumentation;
// TODO: enable for other targets after more extensive testing.
- if ((kRuntimeISA == kArm64) || (kRuntimeISA == kArm) || (kRuntimeISA == kX86)) {
+ if ((kRuntimeISA == kArm64) || (kRuntimeISA == kArm) ||
+ (kRuntimeISA == kX86_64) || (kRuntimeISA == kX86) ||
+ (kRuntimeISA == kMips)) {
unhandled_instrumentation = instrumentation->NonJitProfilingActive();
} else {
unhandled_instrumentation = instrumentation->IsActive();
diff --git a/runtime/interpreter/mterp/out/mterp_mips.S b/runtime/interpreter/mterp/out/mterp_mips.S
new file mode 100644
index 0000000..7ae1ab1
--- /dev/null
+++ b/runtime/interpreter/mterp/out/mterp_mips.S
@@ -0,0 +1,13157 @@
+/*
+ * This file was generated automatically by gen-mterp.py for 'mips'.
+ *
+ * --> DO NOT EDIT <--
+ */
+
+/* File: mips/header.S */
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ Art assembly interpreter notes:
+
+ First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
+ handle invoke, allows higher-level code to create frame & shadow frame.
+
+ Once that's working, support direct entry code & eliminate shadow frame (and
+ excess locals allocation.
+
+ Some (hopefully) temporary ugliness. We'll treat rFP as pointing to the
+ base of the vreg array within the shadow frame. Access the other fields,
+ dex_pc_, method_ and number_of_vregs_ via negative offsets. For now, we'll continue
+ the shadow frame mechanism of double-storing object references - via rFP &
+ number_of_vregs_.
+
+ */
+
+#include "asm_support.h"
+
+#if (__mips==32) && (__mips_isa_rev>=2)
+#define MIPS32REVGE2 /* mips32r2 and greater */
+#if (__mips==32) && (__mips_isa_rev>=5)
+#define FPU64 /* 64 bit FPU */
+#if (__mips==32) && (__mips_isa_rev>=6)
+#define MIPS32REVGE6 /* mips32r6 and greater */
+#endif
+#endif
+#endif
+
+/* MIPS definitions and declarations
+
+ reg nick purpose
+ s0 rPC interpreted program counter, used for fetching instructions
+ s1 rFP interpreted frame pointer, used for accessing locals and args
+ s2 rSELF self (Thread) pointer
+ s3 rIBASE interpreted instruction base pointer, used for computed goto
+ s4 rINST first 16-bit code unit of current instruction
+ s6 rREFS base of object references in shadow frame (ideally, we'll get rid of this later).
+*/
+
+/* single-purpose registers, given names for clarity */
+#define rPC s0
+#define rFP s1
+#define rSELF s2
+#define rIBASE s3
+#define rINST s4
+#define rOBJ s5
+#define rREFS s6
+#define rTEMP s7
+
+#define rARG0 a0
+#define rARG1 a1
+#define rARG2 a2
+#define rARG3 a3
+#define rRESULT0 v0
+#define rRESULT1 v1
+
+/* GP register definitions */
+#define zero $0 /* always zero */
+#define AT $at /* assembler temp */
+#define v0 $2 /* return value */
+#define v1 $3
+#define a0 $4 /* argument registers */
+#define a1 $5
+#define a2 $6
+#define a3 $7
+#define t0 $8 /* temp registers (not saved across subroutine calls) */
+#define t1 $9
+#define t2 $10
+#define t3 $11
+#define t4 $12
+#define t5 $13
+#define t6 $14
+#define t7 $15
+#define ta0 $12 /* alias */
+#define ta1 $13
+#define ta2 $14
+#define ta3 $15
+#define s0 $16 /* saved across subroutine calls (callee saved) */
+#define s1 $17
+#define s2 $18
+#define s3 $19
+#define s4 $20
+#define s5 $21
+#define s6 $22
+#define s7 $23
+#define t8 $24 /* two more temp registers */
+#define t9 $25
+#define k0 $26 /* kernel temporary */
+#define k1 $27
+#define gp $28 /* global pointer */
+#define sp $29 /* stack pointer */
+#define s8 $30 /* one more callee saved */
+#define ra $31 /* return address */
+
+/* FP register definitions */
+#define fv0 $f0
+#define fv0f $f1
+#define fv1 $f2
+#define fv1f $f3
+#define fa0 $f12
+#define fa0f $f13
+#define fa1 $f14
+#define fa1f $f15
+#define ft0 $f4
+#define ft0f $f5
+#define ft1 $f6
+#define ft1f $f7
+#define ft2 $f8
+#define ft2f $f9
+#define ft3 $f10
+#define ft3f $f11
+#define ft4 $f16
+#define ft4f $f17
+#define ft5 $f18
+#define ft5f $f19
+#define fs0 $f20
+#define fs0f $f21
+#define fs1 $f22
+#define fs1f $f23
+#define fs2 $f24
+#define fs2f $f25
+#define fs3 $f26
+#define fs3f $f27
+#define fs4 $f28
+#define fs4f $f29
+#define fs5 $f30
+#define fs5f $f31
+
+#ifndef MIPS32REVGE6
+#define fcc0 $fcc0
+#define fcc1 $fcc1
+#endif
+
+/*
+ * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So,
+ * to access other shadow frame fields, we need to use a backwards offset. Define those here.
+ */
+#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
+#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
+#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
+#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
+#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
+#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
+#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
+#define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET)
+#define OFF_FP_SHADOWFRAME (-SHADOWFRAME_VREGS_OFFSET)
+
+#define MTERP_PROFILE_BRANCHES 1
+#define MTERP_LOGGING 0
+
+/*
+ * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
+ * be done *before* something throws.
+ *
+ * It's okay to do this more than once.
+ *
+ * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
+ * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
+ * offset into the code_items_[] array. For effiency, we will "export" the
+ * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
+ * to convert to a dex pc when needed.
+ */
+#define EXPORT_PC() \
+ sw rPC, OFF_FP_DEX_PC_PTR(rFP)
+
+#define EXPORT_DEX_PC(tmp) \
+ lw tmp, OFF_FP_CODE_ITEM(rFP) \
+ sw rPC, OFF_FP_DEX_PC_PTR(rFP) \
+ addu tmp, CODEITEM_INSNS_OFFSET \
+ subu tmp, rPC, tmp \
+ sra tmp, tmp, 1 \
+ sw tmp, OFF_FP_DEX_PC(rFP)
+
+/*
+ * Fetch the next instruction from rPC into rINST. Does not advance rPC.
+ */
+#define FETCH_INST() lhu rINST, (rPC)
+
+/*
+ * Fetch the next instruction from the specified offset. Advances rPC
+ * to point to the next instruction. "_count" is in 16-bit code units.
+ *
+ * This must come AFTER anything that can throw an exception, or the
+ * exception catch may miss. (This also implies that it must come after
+ * EXPORT_PC().)
+ */
+#define FETCH_ADVANCE_INST(_count) lhu rINST, ((_count)*2)(rPC); \
+ addu rPC, rPC, ((_count) * 2)
+
+/*
+ * The operation performed here is similar to FETCH_ADVANCE_INST, except the
+ * src and dest registers are parameterized (not hard-wired to rPC and rINST).
+ */
+#define PREFETCH_ADVANCE_INST(_dreg, _sreg, _count) \
+ lhu _dreg, ((_count)*2)(_sreg) ; \
+ addu _sreg, _sreg, (_count)*2
+
+/*
+ * Similar to FETCH_ADVANCE_INST, but does not update rPC. Used to load
+ * rINST ahead of possible exception point. Be sure to manually advance rPC
+ * later.
+ */
+#define PREFETCH_INST(_count) lhu rINST, ((_count)*2)(rPC)
+
+/* Advance rPC by some number of code units. */
+#define ADVANCE(_count) addu rPC, rPC, ((_count) * 2)
+
+/*
+ * Fetch the next instruction from an offset specified by rd. Updates
+ * rPC to point to the next instruction. "rd" must specify the distance
+ * in bytes, *not* 16-bit code units, and may be a signed value.
+ */
+#define FETCH_ADVANCE_INST_RB(rd) addu rPC, rPC, rd; \
+ lhu rINST, (rPC)
+
+/*
+ * Fetch a half-word code unit from an offset past the current PC. The
+ * "_count" value is in 16-bit code units. Does not advance rPC.
+ *
+ * The "_S" variant works the same but treats the value as signed.
+ */
+#define FETCH(rd, _count) lhu rd, ((_count) * 2)(rPC)
+#define FETCH_S(rd, _count) lh rd, ((_count) * 2)(rPC)
+
+/*
+ * Fetch one byte from an offset past the current PC. Pass in the same
+ * "_count" as you would for FETCH, and an additional 0/1 indicating which
+ * byte of the halfword you want (lo/hi).
+ */
+#define FETCH_B(rd, _count, _byte) lbu rd, ((_count) * 2 + _byte)(rPC)
+
+/*
+ * Put the instruction's opcode field into the specified register.
+ */
+#define GET_INST_OPCODE(rd) and rd, rINST, 0xFF
+
+/*
+ * Put the prefetched instruction's opcode field into the specified register.
+ */
+#define GET_PREFETCHED_OPCODE(dreg, sreg) andi dreg, sreg, 255
+
+/*
+ * Begin executing the opcode in rd.
+ */
+#define GOTO_OPCODE(rd) sll rd, rd, 7; \
+ addu rd, rIBASE, rd; \
+ jalr zero, rd
+
+#define GOTO_OPCODE_BASE(_base, rd) sll rd, rd, 7; \
+ addu rd, _base, rd; \
+ jalr zero, rd
+
+/*
+ * Get/set the 32-bit value from a Dalvik register.
+ */
+#define GET_VREG(rd, rix) LOAD_eas2(rd, rFP, rix)
+
+#define GET_VREG_F(rd, rix) EAS2(AT, rFP, rix); \
+ .set noat; l.s rd, (AT); .set at
+
+#define SET_VREG(rd, rix) .set noat; \
+ sll AT, rix, 2; \
+ addu t8, rFP, AT; \
+ sw rd, 0(t8); \
+ addu t8, rREFS, AT; \
+ .set at; \
+ sw zero, 0(t8)
+
+#define SET_VREG64(rlo, rhi, rix) .set noat; \
+ sll AT, rix, 2; \
+ addu t8, rFP, AT; \
+ sw rlo, 0(t8); \
+ sw rhi, 4(t8); \
+ addu t8, rREFS, AT; \
+ .set at; \
+ sw zero, 0(t8); \
+ sw zero, 4(t8)
+
+#ifdef FPU64
+#define SET_VREG64_F(rlo, rhi, rix) .set noat; \
+ sll AT, rix, 2; \
+ addu t8, rREFS, AT; \
+ sw zero, 0(t8); \
+ sw zero, 4(t8); \
+ addu t8, rFP, AT; \
+ mfhc1 AT, rlo; \
+ sw AT, 4(t8); \
+ .set at; \
+ s.s rlo, 0(t8)
+#else
+#define SET_VREG64_F(rlo, rhi, rix) .set noat; \
+ sll AT, rix, 2; \
+ addu t8, rFP, AT; \
+ s.s rlo, 0(t8); \
+ s.s rhi, 4(t8); \
+ addu t8, rREFS, AT; \
+ .set at; \
+ sw zero, 0(t8); \
+ sw zero, 4(t8)
+#endif
+
+#define SET_VREG_OBJECT(rd, rix) .set noat; \
+ sll AT, rix, 2; \
+ addu t8, rFP, AT; \
+ sw rd, 0(t8); \
+ addu t8, rREFS, AT; \
+ .set at; \
+ sw rd, 0(t8)
+
+/* Combination of the SET_VREG and GOTO_OPCODE functions to save 1 instruction */
+#define SET_VREG_GOTO(rd, rix, dst) .set noreorder; \
+ sll dst, dst, 7; \
+ addu dst, rIBASE, dst; \
+ .set noat; \
+ sll AT, rix, 2; \
+ addu t8, rFP, AT; \
+ sw rd, 0(t8); \
+ addu t8, rREFS, AT; \
+ .set at; \
+ jalr zero, dst; \
+ sw zero, 0(t8); \
+ .set reorder
+
+/* Combination of the SET_VREG64 and GOTO_OPCODE functions to save 1 instruction */
+#define SET_VREG64_GOTO(rlo, rhi, rix, dst) .set noreorder; \
+ sll dst, dst, 7; \
+ addu dst, rIBASE, dst; \
+ .set noat; \
+ sll AT, rix, 2; \
+ addu t8, rFP, AT; \
+ sw rlo, 0(t8); \
+ sw rhi, 4(t8); \
+ addu t8, rREFS, AT; \
+ .set at; \
+ sw zero, 0(t8); \
+ jalr zero, dst; \
+ sw zero, 4(t8); \
+ .set reorder
+
+#define SET_VREG_F(rd, rix) .set noat; \
+ sll AT, rix, 2; \
+ addu t8, rFP, AT; \
+ s.s rd, 0(t8); \
+ addu t8, rREFS, AT; \
+ .set at; \
+ sw zero, 0(t8)
+
+#define GET_OPA(rd) srl rd, rINST, 8
+#ifdef MIPS32REVGE2
+#define GET_OPA4(rd) ext rd, rINST, 8, 4
+#else
+#define GET_OPA4(rd) GET_OPA(rd); and rd, 0xf
+#endif
+#define GET_OPB(rd) srl rd, rINST, 12
+
+/*
+ * Form an Effective Address rd = rbase + roff<<n;
+ * Uses reg AT
+ */
+#define EASN(rd, rbase, roff, rshift) .set noat; \
+ sll AT, roff, rshift; \
+ addu rd, rbase, AT; \
+ .set at
+
+#define EAS1(rd, rbase, roff) EASN(rd, rbase, roff, 1)
+#define EAS2(rd, rbase, roff) EASN(rd, rbase, roff, 2)
+#define EAS3(rd, rbase, roff) EASN(rd, rbase, roff, 3)
+#define EAS4(rd, rbase, roff) EASN(rd, rbase, roff, 4)
+
+/*
+ * Form an Effective Shift Right rd = rbase + roff>>n;
+ * Uses reg AT
+ */
+#define ESRN(rd, rbase, roff, rshift) .set noat; \
+ srl AT, roff, rshift; \
+ addu rd, rbase, AT; \
+ .set at
+
+#define LOAD_eas2(rd, rbase, roff) EAS2(AT, rbase, roff); \
+ .set noat; lw rd, 0(AT); .set at
+
+#define STORE_eas2(rd, rbase, roff) EAS2(AT, rbase, roff); \
+ .set noat; sw rd, 0(AT); .set at
+
+#define LOAD_RB_OFF(rd, rbase, off) lw rd, off(rbase)
+#define STORE_RB_OFF(rd, rbase, off) sw rd, off(rbase)
+
+#define STORE64_off(rlo, rhi, rbase, off) sw rlo, off(rbase); \
+ sw rhi, (off+4)(rbase)
+#define LOAD64_off(rlo, rhi, rbase, off) lw rlo, off(rbase); \
+ lw rhi, (off+4)(rbase)
+
+#define STORE64(rlo, rhi, rbase) STORE64_off(rlo, rhi, rbase, 0)
+#define LOAD64(rlo, rhi, rbase) LOAD64_off(rlo, rhi, rbase, 0)
+
+#ifdef FPU64
+#define STORE64_off_F(rlo, rhi, rbase, off) s.s rlo, off(rbase); \
+ .set noat; \
+ mfhc1 AT, rlo; \
+ sw AT, (off+4)(rbase); \
+ .set at
+#define LOAD64_off_F(rlo, rhi, rbase, off) l.s rlo, off(rbase); \
+ .set noat; \
+ lw AT, (off+4)(rbase); \
+ mthc1 AT, rlo; \
+ .set at
+#else
+#define STORE64_off_F(rlo, rhi, rbase, off) s.s rlo, off(rbase); \
+ s.s rhi, (off+4)(rbase)
+#define LOAD64_off_F(rlo, rhi, rbase, off) l.s rlo, off(rbase); \
+ l.s rhi, (off+4)(rbase)
+#endif
+
+#define STORE64_F(rlo, rhi, rbase) STORE64_off_F(rlo, rhi, rbase, 0)
+#define LOAD64_F(rlo, rhi, rbase) LOAD64_off_F(rlo, rhi, rbase, 0)
+
+
+#define LOAD_base_offMirrorArray_length(rd, rbase) LOAD_RB_OFF(rd, rbase, MIRROR_ARRAY_LENGTH_OFFSET)
+
+#define STACK_STORE(rd, off) sw rd, off(sp)
+#define STACK_LOAD(rd, off) lw rd, off(sp)
+#define CREATE_STACK(n) subu sp, sp, n
+#define DELETE_STACK(n) addu sp, sp, n
+
+#define LOAD_ADDR(dest, addr) la dest, addr
+#define LOAD_IMM(dest, imm) li dest, imm
+#define MOVE_REG(dest, src) move dest, src
+#define STACK_SIZE 128
+
+#define STACK_OFFSET_ARG04 16
+#define STACK_OFFSET_ARG05 20
+#define STACK_OFFSET_ARG06 24
+#define STACK_OFFSET_ARG07 28
+#define STACK_OFFSET_GP 84
+
+#define JAL(n) jal n
+#define BAL(n) bal n
+
+/*
+ * FP register usage restrictions:
+ * 1) We don't use the callee save FP registers so we don't have to save them.
+ * 2) We don't use the odd FP registers so we can share code with mips32r6.
+ */
+#define STACK_STORE_FULL() CREATE_STACK(STACK_SIZE); \
+ STACK_STORE(ra, 124); \
+ STACK_STORE(s8, 120); \
+ STACK_STORE(s0, 116); \
+ STACK_STORE(s1, 112); \
+ STACK_STORE(s2, 108); \
+ STACK_STORE(s3, 104); \
+ STACK_STORE(s4, 100); \
+ STACK_STORE(s5, 96); \
+ STACK_STORE(s6, 92); \
+ STACK_STORE(s7, 88);
+
+#define STACK_LOAD_FULL() STACK_LOAD(gp, STACK_OFFSET_GP); \
+ STACK_LOAD(s7, 88); \
+ STACK_LOAD(s6, 92); \
+ STACK_LOAD(s5, 96); \
+ STACK_LOAD(s4, 100); \
+ STACK_LOAD(s3, 104); \
+ STACK_LOAD(s2, 108); \
+ STACK_LOAD(s1, 112); \
+ STACK_LOAD(s0, 116); \
+ STACK_LOAD(s8, 120); \
+ STACK_LOAD(ra, 124); \
+ DELETE_STACK(STACK_SIZE)
+
+/* File: mips/entry.S */
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Interpreter entry point.
+ */
+
+ .text
+ .align 2
+ .global ExecuteMterpImpl
+ .ent ExecuteMterpImpl
+ .frame sp, STACK_SIZE, ra
+/*
+ * On entry:
+ * a0 Thread* self
+ * a1 code_item
+ * a2 ShadowFrame
+ * a3 JValue* result_register
+ *
+ */
+
+ExecuteMterpImpl:
+ .set noreorder
+ .cpload t9
+ .set reorder
+/* Save to the stack. Frame size = STACK_SIZE */
+ STACK_STORE_FULL()
+/* This directive will make sure all subsequent jal restore gp at a known offset */
+ .cprestore STACK_OFFSET_GP
+
+ /* Remember the return register */
+ sw a3, SHADOWFRAME_RESULT_REGISTER_OFFSET(a2)
+
+ /* Remember the code_item */
+ sw a1, SHADOWFRAME_CODE_ITEM_OFFSET(a2)
+
+ /* set up "named" registers */
+ move rSELF, a0
+ lw a0, SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(a2)
+ addu rFP, a2, SHADOWFRAME_VREGS_OFFSET # point to insns[] (i.e. - the dalivk byte code).
+ EAS2(rREFS, rFP, a0) # point to reference array in shadow frame
+ lw a0, SHADOWFRAME_DEX_PC_OFFSET(a2) # Get starting dex_pc
+ addu rPC, a1, CODEITEM_INSNS_OFFSET # Point to base of insns[]
+ EAS1(rPC, rPC, a0) # Create direct pointer to 1st dex opcode
+
+ EXPORT_PC()
+
+ /* Starting ibase */
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
+
+ /* start executing the instruction at rPC */
+ FETCH_INST() # load rINST from rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* NOTE: no fallthrough */
+
+
+ .global artMterpAsmInstructionStart
+ .type artMterpAsmInstructionStart, %function
+artMterpAsmInstructionStart = .L_op_nop
+ .text
+
+/* ------------------------------ */
+ .balign 128
+.L_op_nop: /* 0x00 */
+/* File: mips/op_nop.S */
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move: /* 0x01 */
+/* File: mips/op_move.S */
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ GET_OPB(a1) # a1 <- B from 15:12
+ GET_OPA4(a0) # a0 <- A from 11:8
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ GET_VREG(a2, a1) # a2 <- fp[B]
+ GET_INST_OPCODE(t0) # t0 <- opcode from rINST
+ .if 0
+ SET_VREG_OBJECT(a2, a0) # fp[A] <- a2
+ .else
+ SET_VREG(a2, a0) # fp[A] <- a2
+ .endif
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_from16: /* 0x02 */
+/* File: mips/op_move_from16.S */
+ /* for: move/from16, move-object/from16 */
+ /* op vAA, vBBBB */
+ FETCH(a1, 1) # a1 <- BBBB
+ GET_OPA(a0) # a0 <- AA
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_VREG(a2, a1) # a2 <- fp[BBBB]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ .if 0
+ SET_VREG_OBJECT(a2, a0) # fp[AA] <- a2
+ .else
+ SET_VREG(a2, a0) # fp[AA] <- a2
+ .endif
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_16: /* 0x03 */
+/* File: mips/op_move_16.S */
+ /* for: move/16, move-object/16 */
+ /* op vAAAA, vBBBB */
+ FETCH(a1, 2) # a1 <- BBBB
+ FETCH(a0, 1) # a0 <- AAAA
+ FETCH_ADVANCE_INST(3) # advance rPC, load rINST
+ GET_VREG(a2, a1) # a2 <- fp[BBBB]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ .if 0
+ SET_VREG_OBJECT(a2, a0) # fp[AAAA] <- a2
+ .else
+ SET_VREG(a2, a0) # fp[AAAA] <- a2
+ .endif
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_wide: /* 0x04 */
+/* File: mips/op_move_wide.S */
+ /* move-wide vA, vB */
+ /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
+ GET_OPA4(a2) # a2 <- A(+)
+ GET_OPB(a3) # a3 <- B
+ EAS2(a3, rFP, a3) # a3 <- &fp[B]
+ LOAD64(a0, a1, a3) # a0/a1 <- fp[B]
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ SET_VREG64(a0, a1, a2) # fp[A] <- a0/a1
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_wide_from16: /* 0x05 */
+/* File: mips/op_move_wide_from16.S */
+ /* move-wide/from16 vAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
+ FETCH(a3, 1) # a3 <- BBBB
+ GET_OPA(a2) # a2 <- AA
+ EAS2(a3, rFP, a3) # a3 <- &fp[BBBB]
+ LOAD64(a0, a1, a3) # a0/a1 <- fp[BBBB]
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ SET_VREG64(a0, a1, a2) # fp[AA] <- a0/a1
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_wide_16: /* 0x06 */
+/* File: mips/op_move_wide_16.S */
+ /* move-wide/16 vAAAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
+ FETCH(a3, 2) # a3 <- BBBB
+ FETCH(a2, 1) # a2 <- AAAA
+ EAS2(a3, rFP, a3) # a3 <- &fp[BBBB]
+ LOAD64(a0, a1, a3) # a0/a1 <- fp[BBBB]
+ FETCH_ADVANCE_INST(3) # advance rPC, load rINST
+ SET_VREG64(a0, a1, a2) # fp[AAAA] <- a0/a1
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_object: /* 0x07 */
+/* File: mips/op_move_object.S */
+/* File: mips/op_move.S */
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ GET_OPB(a1) # a1 <- B from 15:12
+ GET_OPA4(a0) # a0 <- A from 11:8
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ GET_VREG(a2, a1) # a2 <- fp[B]
+ GET_INST_OPCODE(t0) # t0 <- opcode from rINST
+ .if 1
+ SET_VREG_OBJECT(a2, a0) # fp[A] <- a2
+ .else
+ SET_VREG(a2, a0) # fp[A] <- a2
+ .endif
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_object_from16: /* 0x08 */
+/* File: mips/op_move_object_from16.S */
+/* File: mips/op_move_from16.S */
+ /* for: move/from16, move-object/from16 */
+ /* op vAA, vBBBB */
+ FETCH(a1, 1) # a1 <- BBBB
+ GET_OPA(a0) # a0 <- AA
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_VREG(a2, a1) # a2 <- fp[BBBB]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ .if 1
+ SET_VREG_OBJECT(a2, a0) # fp[AA] <- a2
+ .else
+ SET_VREG(a2, a0) # fp[AA] <- a2
+ .endif
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_object_16: /* 0x09 */
+/* File: mips/op_move_object_16.S */
+/* File: mips/op_move_16.S */
+ /* for: move/16, move-object/16 */
+ /* op vAAAA, vBBBB */
+ FETCH(a1, 2) # a1 <- BBBB
+ FETCH(a0, 1) # a0 <- AAAA
+ FETCH_ADVANCE_INST(3) # advance rPC, load rINST
+ GET_VREG(a2, a1) # a2 <- fp[BBBB]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ .if 1
+ SET_VREG_OBJECT(a2, a0) # fp[AAAA] <- a2
+ .else
+ SET_VREG(a2, a0) # fp[AAAA] <- a2
+ .endif
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_result: /* 0x0a */
+/* File: mips/op_move_result.S */
+ /* for: move-result, move-result-object */
+ /* op vAA */
+ GET_OPA(a2) # a2 <- AA
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ lw a0, OFF_FP_RESULT_REGISTER(rFP) # get pointer to result JType
+ lw a0, 0(a0) # a0 <- result.i
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ .if 0
+ SET_VREG_OBJECT(a0, a2) # fp[AA] <- a0
+ .else
+ SET_VREG(a0, a2) # fp[AA] <- a0
+ .endif
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_result_wide: /* 0x0b */
+/* File: mips/op_move_result_wide.S */
+ /* move-result-wide vAA */
+ GET_OPA(a2) # a2 <- AA
+ lw a3, OFF_FP_RESULT_REGISTER(rFP) # get pointer to result JType
+ LOAD64(a0, a1, a3) # a0/a1 <- retval.j
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ SET_VREG64(a0, a1, a2) # fp[AA] <- a0/a1
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_result_object: /* 0x0c */
+/* File: mips/op_move_result_object.S */
+/* File: mips/op_move_result.S */
+ /* for: move-result, move-result-object */
+ /* op vAA */
+ GET_OPA(a2) # a2 <- AA
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ lw a0, OFF_FP_RESULT_REGISTER(rFP) # get pointer to result JType
+ lw a0, 0(a0) # a0 <- result.i
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ .if 1
+ SET_VREG_OBJECT(a0, a2) # fp[AA] <- a0
+ .else
+ SET_VREG(a0, a2) # fp[AA] <- a0
+ .endif
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_exception: /* 0x0d */
+/* File: mips/op_move_exception.S */
+ /* move-exception vAA */
+ GET_OPA(a2) # a2 <- AA
+ lw a3, THREAD_EXCEPTION_OFFSET(rSELF) # get exception obj
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ SET_VREG_OBJECT(a3, a2) # fp[AA] <- exception obj
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ sw zero, THREAD_EXCEPTION_OFFSET(rSELF) # clear exception
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_return_void: /* 0x0e */
+/* File: mips/op_return_void.S */
+ .extern MterpThreadFenceForConstructor
+ JAL(MterpThreadFenceForConstructor)
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ move a0, rSELF
+ and ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ beqz ra, 1f
+ JAL(MterpSuspendCheck) # (self)
+1:
+ move v0, zero
+ move v1, zero
+ b MterpReturn
+
+/* ------------------------------ */
+ .balign 128
+.L_op_return: /* 0x0f */
+/* File: mips/op_return.S */
+ /*
+ * Return a 32-bit value.
+ *
+ * for: return, return-object
+ */
+ /* op vAA */
+ .extern MterpThreadFenceForConstructor
+ JAL(MterpThreadFenceForConstructor)
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ move a0, rSELF
+ and ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ beqz ra, 1f
+ JAL(MterpSuspendCheck) # (self)
+1:
+ GET_OPA(a2) # a2 <- AA
+ GET_VREG(v0, a2) # v0 <- vAA
+ move v1, zero
+ b MterpReturn
+
+/* ------------------------------ */
+ .balign 128
+.L_op_return_wide: /* 0x10 */
+/* File: mips/op_return_wide.S */
+ /*
+ * Return a 64-bit value.
+ */
+ /* return-wide vAA */
+ .extern MterpThreadFenceForConstructor
+ JAL(MterpThreadFenceForConstructor)
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ move a0, rSELF
+ and ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ beqz ra, 1f
+ JAL(MterpSuspendCheck) # (self)
+1:
+ GET_OPA(a2) # a2 <- AA
+ EAS2(a2, rFP, a2) # a2 <- &fp[AA]
+ LOAD64(v0, v1, a2) # v0/v1 <- vAA/vAA+1
+ b MterpReturn
+
+/* ------------------------------ */
+ .balign 128
+.L_op_return_object: /* 0x11 */
+/* File: mips/op_return_object.S */
+/* File: mips/op_return.S */
+ /*
+ * Return a 32-bit value.
+ *
+ * for: return, return-object
+ */
+ /* op vAA */
+ .extern MterpThreadFenceForConstructor
+ JAL(MterpThreadFenceForConstructor)
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ move a0, rSELF
+ and ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ beqz ra, 1f
+ JAL(MterpSuspendCheck) # (self)
+1:
+ GET_OPA(a2) # a2 <- AA
+ GET_VREG(v0, a2) # v0 <- vAA
+ move v1, zero
+ b MterpReturn
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_4: /* 0x12 */
+/* File: mips/op_const_4.S */
+ # const/4 vA, /* +B */
+ sll a1, rINST, 16 # a1 <- Bxxx0000
+ GET_OPA(a0) # a0 <- A+
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ sra a1, a1, 28 # a1 <- sssssssB (sign-extended)
+ and a0, a0, 15
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a1, a0, t0) # fp[A] <- a1
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_16: /* 0x13 */
+/* File: mips/op_const_16.S */
+ # const/16 vAA, /* +BBBB */
+ FETCH_S(a0, 1) # a0 <- ssssBBBB (sign-extended)
+ GET_OPA(a3) # a3 <- AA
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, a3, t0) # vAA <- a0
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const: /* 0x14 */
+/* File: mips/op_const.S */
+ # const vAA, /* +BBBBbbbb */
+ GET_OPA(a3) # a3 <- AA
+ FETCH(a0, 1) # a0 <- bbbb (low)
+ FETCH(a1, 2) # a1 <- BBBB (high)
+ FETCH_ADVANCE_INST(3) # advance rPC, load rINST
+ sll a1, a1, 16
+ or a0, a1, a0 # a0 <- BBBBbbbb
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, a3, t0) # vAA <- a0
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_high16: /* 0x15 */
+/* File: mips/op_const_high16.S */
+ # const/high16 vAA, /* +BBBB0000 */
+ FETCH(a0, 1) # a0 <- 0000BBBB (zero-extended)
+ GET_OPA(a3) # a3 <- AA
+ sll a0, a0, 16 # a0 <- BBBB0000
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, a3, t0) # vAA <- a0
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_wide_16: /* 0x16 */
+/* File: mips/op_const_wide_16.S */
+ # const-wide/16 vAA, /* +BBBB */
+ FETCH_S(a0, 1) # a0 <- ssssBBBB (sign-extended)
+ GET_OPA(a3) # a3 <- AA
+ sra a1, a0, 31 # a1 <- ssssssss
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64(a0, a1, a3) # vAA <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_wide_32: /* 0x17 */
+/* File: mips/op_const_wide_32.S */
+ # const-wide/32 vAA, /* +BBBBbbbb */
+ FETCH(a0, 1) # a0 <- 0000bbbb (low)
+ GET_OPA(a3) # a3 <- AA
+ FETCH_S(a2, 2) # a2 <- ssssBBBB (high)
+ FETCH_ADVANCE_INST(3) # advance rPC, load rINST
+ sll a2, a2, 16
+ or a0, a0, a2 # a0 <- BBBBbbbb
+ sra a1, a0, 31 # a1 <- ssssssss
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64(a0, a1, a3) # vAA <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_wide: /* 0x18 */
+/* File: mips/op_const_wide.S */
+ # const-wide vAA, /* +HHHHhhhhBBBBbbbb */
+ FETCH(a0, 1) # a0 <- bbbb (low)
+ FETCH(a1, 2) # a1 <- BBBB (low middle)
+ FETCH(a2, 3) # a2 <- hhhh (high middle)
+ sll a1, 16 #
+ or a0, a1 # a0 <- BBBBbbbb (low word)
+ FETCH(a3, 4) # a3 <- HHHH (high)
+ GET_OPA(t1) # t1 <- AA
+ sll a3, 16
+ or a1, a3, a2 # a1 <- HHHHhhhh (high word)
+ FETCH_ADVANCE_INST(5) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64(a0, a1, t1) # vAA <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_wide_high16: /* 0x19 */
+/* File: mips/op_const_wide_high16.S */
+ # const-wide/high16 vAA, /* +BBBB000000000000 */
+ FETCH(a1, 1) # a1 <- 0000BBBB (zero-extended)
+ GET_OPA(a3) # a3 <- AA
+ li a0, 0 # a0 <- 00000000
+ sll a1, 16 # a1 <- BBBB0000
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64(a0, a1, a3) # vAA <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_string: /* 0x1a */
+/* File: mips/op_const_string.S */
+ # const/string vAA, String /* BBBB */
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- BBBB
+ GET_OPA(a1) # a1 <- AA
+ addu a2, rFP, OFF_FP_SHADOWFRAME # a2 <- shadow frame
+ move a3, rSELF
+ JAL(MterpConstString) # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST(2) # load rINST
+ bnez v0, MterpPossibleException
+ ADVANCE(2) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_string_jumbo: /* 0x1b */
+/* File: mips/op_const_string_jumbo.S */
+ # const/string vAA, String /* BBBBBBBB */
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- bbbb (low)
+ FETCH(a2, 2) # a2 <- BBBB (high)
+ GET_OPA(a1) # a1 <- AA
+ sll a2, a2, 16
+ or a0, a0, a2 # a0 <- BBBBbbbb
+ addu a2, rFP, OFF_FP_SHADOWFRAME # a2 <- shadow frame
+ move a3, rSELF
+ JAL(MterpConstString) # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST(3) # load rINST
+ bnez v0, MterpPossibleException
+ ADVANCE(3) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_class: /* 0x1c */
+/* File: mips/op_const_class.S */
+ # const/class vAA, Class /* BBBB */
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- BBBB
+ GET_OPA(a1) # a1 <- AA
+ addu a2, rFP, OFF_FP_SHADOWFRAME # a2 <- shadow frame
+ move a3, rSELF
+ JAL(MterpConstClass)
+ PREFETCH_INST(2) # load rINST
+ bnez v0, MterpPossibleException
+ ADVANCE(2) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_monitor_enter: /* 0x1d */
+/* File: mips/op_monitor_enter.S */
+ /*
+ * Synchronize on an object.
+ */
+ /* monitor-enter vAA */
+ EXPORT_PC()
+ GET_OPA(a2) # a2 <- AA
+ GET_VREG(a0, a2) # a0 <- vAA (object)
+ move a1, rSELF # a1 <- self
+ JAL(artLockObjectFromCode) # v0 <- artLockObject(obj, self)
+ bnez v0, MterpException
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_monitor_exit: /* 0x1e */
+/* File: mips/op_monitor_exit.S */
+ /*
+ * Unlock an object.
+ *
+ * Exceptions that occur when unlocking a monitor need to appear as
+ * if they happened at the following instruction. See the Dalvik
+ * instruction spec.
+ */
+ /* monitor-exit vAA */
+ EXPORT_PC()
+ GET_OPA(a2) # a2 <- AA
+ GET_VREG(a0, a2) # a0 <- vAA (object)
+ move a1, rSELF # a1 <- self
+ JAL(artUnlockObjectFromCode) # v0 <- artUnlockObject(obj, self)
+ bnez v0, MterpException
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_check_cast: /* 0x1f */
+/* File: mips/op_check_cast.S */
+ /*
+ * Check to see if a cast from one class to another is allowed.
+ */
+ # check-cast vAA, class /* BBBB */
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- BBBB
+ GET_OPA(a1) # a1 <- AA
+ EAS2(a1, rFP, a1) # a1 <- &object
+ lw a2, OFF_FP_METHOD(rFP) # a2 <- method
+ move a3, rSELF # a3 <- self
+ JAL(MterpCheckCast) # v0 <- CheckCast(index, &obj, method, self)
+ PREFETCH_INST(2)
+ bnez v0, MterpPossibleException
+ ADVANCE(2)
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_instance_of: /* 0x20 */
+/* File: mips/op_instance_of.S */
+ /*
+ * Check to see if an object reference is an instance of a class.
+ *
+ * Most common situation is a non-null object, being compared against
+ * an already-resolved class.
+ */
+ # instance-of vA, vB, class /* CCCC */
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- CCCC
+ GET_OPB(a1) # a1 <- B
+ EAS2(a1, rFP, a1) # a1 <- &object
+ lw a2, OFF_FP_METHOD(rFP) # a2 <- method
+ move a3, rSELF # a3 <- self
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ JAL(MterpInstanceOf) # v0 <- Mterp(index, &obj, method, self)
+ lw a1, THREAD_EXCEPTION_OFFSET(rSELF)
+ PREFETCH_INST(2) # load rINST
+ bnez a1, MterpException
+ ADVANCE(2) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(v0, rOBJ, t0) # vA <- v0
+
+/* ------------------------------ */
+ .balign 128
+.L_op_array_length: /* 0x21 */
+/* File: mips/op_array_length.S */
+ /*
+ * Return the length of an array.
+ */
+ GET_OPB(a1) # a1 <- B
+ GET_OPA4(a2) # a2 <- A+
+ GET_VREG(a0, a1) # a0 <- vB (object ref)
+ # is object null?
+ beqz a0, common_errNullObject # yup, fail
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ LOAD_base_offMirrorArray_length(a3, a0) # a3 <- array length
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a3, a2, t0) # vA <- length
+
+/* ------------------------------ */
+ .balign 128
+.L_op_new_instance: /* 0x22 */
+/* File: mips/op_new_instance.S */
+ /*
+ * Create a new instance of a class.
+ */
+ # new-instance vAA, class /* BBBB */
+ EXPORT_PC()
+ addu a0, rFP, OFF_FP_SHADOWFRAME
+ move a1, rSELF
+ move a2, rINST
+ JAL(MterpNewInstance)
+ beqz v0, MterpPossibleException
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_new_array: /* 0x23 */
+/* File: mips/op_new_array.S */
+ /*
+ * Allocate an array of objects, specified with the array class
+ * and a count.
+ *
+ * The verifier guarantees that this is an array class, so we don't
+ * check for it here.
+ */
+ /* new-array vA, vB, class@CCCC */
+ EXPORT_PC()
+ addu a0, rFP, OFF_FP_SHADOWFRAME
+ move a1, rPC
+ move a2, rINST
+ move a3, rSELF
+ JAL(MterpNewArray)
+ beqz v0, MterpPossibleException
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_filled_new_array: /* 0x24 */
+/* File: mips/op_filled_new_array.S */
+ /*
+ * Create a new array with elements filled from registers.
+ *
+ * for: filled-new-array, filled-new-array/range
+ */
+ # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
+ # op {vCCCC..v(CCCC+AA-1)}, type /* BBBB */
+ .extern MterpFilledNewArray
+ EXPORT_PC()
+ addu a0, rFP, OFF_FP_SHADOWFRAME # a0 <- shadow frame
+ move a1, rPC
+ move a2, rSELF
+ JAL(MterpFilledNewArray) # v0 <- helper(shadow_frame, pc, self)
+ beqz v0, MterpPossibleException # has exception
+ FETCH_ADVANCE_INST(3) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_filled_new_array_range: /* 0x25 */
+/* File: mips/op_filled_new_array_range.S */
+/* File: mips/op_filled_new_array.S */
+ /*
+ * Create a new array with elements filled from registers.
+ *
+ * for: filled-new-array, filled-new-array/range
+ */
+ # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
+ # op {vCCCC..v(CCCC+AA-1)}, type /* BBBB */
+ .extern MterpFilledNewArrayRange
+ EXPORT_PC()
+ addu a0, rFP, OFF_FP_SHADOWFRAME # a0 <- shadow frame
+ move a1, rPC
+ move a2, rSELF
+ JAL(MterpFilledNewArrayRange) # v0 <- helper(shadow_frame, pc, self)
+ beqz v0, MterpPossibleException # has exception
+ FETCH_ADVANCE_INST(3) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_fill_array_data: /* 0x26 */
+/* File: mips/op_fill_array_data.S */
+ /* fill-array-data vAA, +BBBBBBBB */
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- bbbb (lo)
+ FETCH(a1, 2) # a1 <- BBBB (hi)
+ GET_OPA(a3) # a3 <- AA
+ sll a1, a1, 16 # a1 <- BBBBbbbb
+ or a1, a0, a1 # a1 <- BBBBbbbb
+ GET_VREG(a0, a3) # a0 <- vAA (array object)
+ EAS1(a1, rPC, a1) # a1 <- PC + BBBBbbbb*2 (array data off.)
+ JAL(MterpFillArrayData) # v0 <- Mterp(obj, payload)
+ beqz v0, MterpPossibleException # has exception
+ FETCH_ADVANCE_INST(3) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_throw: /* 0x27 */
+/* File: mips/op_throw.S */
+ /*
+ * Throw an exception object in the current thread.
+ */
+ /* throw vAA */
+ EXPORT_PC() # exception handler can throw
+ GET_OPA(a2) # a2 <- AA
+ GET_VREG(a1, a2) # a1 <- vAA (exception object)
+ # null object?
+ beqz a1, common_errNullObject # yes, throw an NPE instead
+ sw a1, THREAD_EXCEPTION_OFFSET(rSELF) # thread->exception <- obj
+ b MterpException
+
+/* ------------------------------ */
+ .balign 128
+.L_op_goto: /* 0x28 */
+/* File: mips/op_goto.S */
+ /*
+ * Unconditional branch, 8-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto +AA */
+#if MTERP_PROFILE_BRANCHES
+ sll a0, rINST, 16 # a0 <- AAxx0000
+ sra rINST, a0, 24 # rINST <- ssssssAA (sign-extended)
+ EXPORT_PC()
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rINST
+ JAL(MterpProfileBranch) # (self, shadow_frame, offset)
+ bnez v0, MterpOnStackReplacement # Note: offset must be in rINST
+ addu a2, rINST, rINST # a2 <- byte offset
+ FETCH_ADVANCE_INST_RB(a2) # update rPC, load rINST
+ /* If backwards branch refresh rIBASE */
+ bgez a2, 1f
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ b MterpCheckSuspendAndContinue
+1:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+#else
+ sll a0, rINST, 16 # a0 <- AAxx0000
+ sra rINST, a0, 24 # rINST <- ssssssAA (sign-extended)
+ addu a2, rINST, rINST # a2 <- byte offset
+ FETCH_ADVANCE_INST_RB(a2) # update rPC, load rINST
+ /* If backwards branch refresh rIBASE */
+ bgez a1, 1f
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ b MterpCheckSuspendAndContinue
+1:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+#endif
+
+/* ------------------------------ */
+ .balign 128
+.L_op_goto_16: /* 0x29 */
+/* File: mips/op_goto_16.S */
+ /*
+ * Unconditional branch, 16-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto/16 +AAAA */
+#if MTERP_PROFILE_BRANCHES
+ FETCH_S(rINST, 1) # rINST <- ssssAAAA (sign-extended)
+ EXPORT_PC()
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rINST
+ JAL(MterpProfileBranch) # (self, shadow_frame, offset)
+ bnez v0, MterpOnStackReplacement # Note: offset must be in rINST
+ addu a1, rINST, rINST # a1 <- byte offset, flags set
+ FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
+ bgez a1, 1f
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ b MterpCheckSuspendAndContinue
+1:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+#else
+ FETCH_S(rINST, 1) # rINST <- ssssAAAA (sign-extended)
+ addu a1, rINST, rINST # a1 <- byte offset, flags set
+ FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
+ bgez a1, 1f
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ b MterpCheckSuspendAndContinue
+1:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+#endif
+
+/* ------------------------------ */
+ .balign 128
+.L_op_goto_32: /* 0x2a */
+/* File: mips/op_goto_32.S */
+ /*
+ * Unconditional branch, 32-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ *
+ * Unlike most opcodes, this one is allowed to branch to itself, so
+ * our "backward branch" test must be "<=0" instead of "<0".
+ */
+ /* goto/32 +AAAAAAAA */
+#if MTERP_PROFILE_BRANCHES
+ FETCH(a0, 1) # a0 <- aaaa (lo)
+ FETCH(a1, 2) # a1 <- AAAA (hi)
+ sll a1, a1, 16
+ or rINST, a0, a1 # rINST <- AAAAaaaa
+ EXPORT_PC()
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rINST
+ JAL(MterpProfileBranch) # (self, shadow_frame, offset)
+ bnez v0, MterpOnStackReplacement # Note: offset must be in rINST
+ addu a1, rINST, rINST # a1 <- byte offset
+ FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
+ bgtz a1, 1f
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ b MterpCheckSuspendAndContinue
+1:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+#else
+ FETCH(a0, 1) # a0 <- aaaa (lo)
+ FETCH(a1, 2) # a1 <- AAAA (hi)
+ sll a1, a1, 16
+ or rINST, a0, a1 # rINST <- AAAAaaaa
+ addu a1, rINST, rINST # a1 <- byte offset
+ FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
+ bgtz a1, 1f
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ b MterpCheckSuspendAndContinue
+1:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+#endif
+
+/* ------------------------------ */
+ .balign 128
+.L_op_packed_switch: /* 0x2b */
+/* File: mips/op_packed_switch.S */
+ /*
+ * Handle a packed-switch or sparse-switch instruction. In both cases
+ * we decode it and hand it off to a helper function.
+ *
+ * We don't really expect backward branches in a switch statement, but
+ * they're perfectly legal, so we check for them here.
+ *
+ * for: packed-switch, sparse-switch
+ */
+ /* op vAA, +BBBB */
+#if MTERP_PROFILE_BRANCHES
+ FETCH(a0, 1) # a0 <- bbbb (lo)
+ FETCH(a1, 2) # a1 <- BBBB (hi)
+ GET_OPA(a3) # a3 <- AA
+ sll t0, a1, 16
+ or a0, a0, t0 # a0 <- BBBBbbbb
+ GET_VREG(a1, a3) # a1 <- vAA
+ EAS1(a0, rPC, a0) # a0 <- PC + BBBBbbbb*2
+ JAL(MterpDoPackedSwitch) # a0 <- code-unit branch offset
+ move rINST, v0
+ EXPORT_PC()
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rINST
+ JAL(MterpProfileBranch) # (self, shadow_frame, offset)
+ bnez v0, MterpOnStackReplacement # Note: offset must be in rINST
+ addu a1, rINST, rINST # a1 <- byte offset
+ FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
+ bgtz a1, .Lop_packed_switch_finish
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ b MterpCheckSuspendAndContinue
+#else
+ FETCH(a0, 1) # a0 <- bbbb (lo)
+ FETCH(a1, 2) # a1 <- BBBB (hi)
+ GET_OPA(a3) # a3 <- AA
+ sll t0, a1, 16
+ or a0, a0, t0 # a0 <- BBBBbbbb
+ GET_VREG(a1, a3) # a1 <- vAA
+ EAS1(a0, rPC, a0) # a0 <- PC + BBBBbbbb*2
+ JAL(MterpDoPackedSwitch) # a0 <- code-unit branch offset
+ move rINST, v0
+ addu a1, rINST, rINST # a1 <- byte offset
+ FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
+ bgtz a1, 1f
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ b MterpCheckSuspendAndContinue
+1:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sparse_switch: /* 0x2c */
+/* File: mips/op_sparse_switch.S */
+/* File: mips/op_packed_switch.S */
+ /*
+ * Handle a packed-switch or sparse-switch instruction. In both cases
+ * we decode it and hand it off to a helper function.
+ *
+ * We don't really expect backward branches in a switch statement, but
+ * they're perfectly legal, so we check for them here.
+ *
+ * for: packed-switch, sparse-switch
+ */
+ /* op vAA, +BBBB */
+#if MTERP_PROFILE_BRANCHES
+ FETCH(a0, 1) # a0 <- bbbb (lo)
+ FETCH(a1, 2) # a1 <- BBBB (hi)
+ GET_OPA(a3) # a3 <- AA
+ sll t0, a1, 16
+ or a0, a0, t0 # a0 <- BBBBbbbb
+ GET_VREG(a1, a3) # a1 <- vAA
+ EAS1(a0, rPC, a0) # a0 <- PC + BBBBbbbb*2
+ JAL(MterpDoSparseSwitch) # a0 <- code-unit branch offset
+ move rINST, v0
+ EXPORT_PC()
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rINST
+ JAL(MterpProfileBranch) # (self, shadow_frame, offset)
+ bnez v0, MterpOnStackReplacement # Note: offset must be in rINST
+ addu a1, rINST, rINST # a1 <- byte offset
+ FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
+ bgtz a1, .Lop_sparse_switch_finish
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ b MterpCheckSuspendAndContinue
+#else
+ FETCH(a0, 1) # a0 <- bbbb (lo)
+ FETCH(a1, 2) # a1 <- BBBB (hi)
+ GET_OPA(a3) # a3 <- AA
+ sll t0, a1, 16
+ or a0, a0, t0 # a0 <- BBBBbbbb
+ GET_VREG(a1, a3) # a1 <- vAA
+ EAS1(a0, rPC, a0) # a0 <- PC + BBBBbbbb*2
+ JAL(MterpDoSparseSwitch) # a0 <- code-unit branch offset
+ move rINST, v0
+ addu a1, rINST, rINST # a1 <- byte offset
+ FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
+ bgtz a1, 1f
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ b MterpCheckSuspendAndContinue
+1:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+#endif
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_cmpl_float: /* 0x2d */
+/* File: mips/op_cmpl_float.S */
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register rTEMP based on the results of the comparison.
+ *
+ * Provide a "naninst" instruction that puts 1 or -1 into rTEMP depending
+ * on what value we'd like to return when one of the operands is NaN.
+ *
+ * The operation we're implementing is:
+ * if (x == y)
+ * return 0;
+ * else if (x < y)
+ * return -1;
+ * else if (x > y)
+ * return 1;
+ * else
+ * return {-1 or 1}; // one or both operands was NaN
+ *
+ * for: cmpl-float, cmpg-float
+ */
+ /* op vAA, vBB, vCC */
+
+ /* "clasic" form */
+ FETCH(a0, 1) # a0 <- CCBB
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8
+ GET_VREG_F(ft0, a2)
+ GET_VREG_F(ft1, a3)
+#ifdef MIPS32REVGE6
+ cmp.ult.s ft2, ft0, ft1 # Is ft0 < ft1
+ li rTEMP, -1
+ bc1nez ft2, .Lop_cmpl_float_finish
+ cmp.ult.s ft2, ft1, ft0
+ li rTEMP, 1
+ bc1nez ft2, .Lop_cmpl_float_finish
+ cmp.eq.s ft2, ft0, ft1
+ li rTEMP, 0
+ bc1nez ft2, .Lop_cmpl_float_finish
+ b .Lop_cmpl_float_nan
+#else
+ c.olt.s fcc0, ft0, ft1 # Is ft0 < ft1
+ li rTEMP, -1
+ bc1t fcc0, .Lop_cmpl_float_finish
+ c.olt.s fcc0, ft1, ft0
+ li rTEMP, 1
+ bc1t fcc0, .Lop_cmpl_float_finish
+ c.eq.s fcc0, ft0, ft1
+ li rTEMP, 0
+ bc1t fcc0, .Lop_cmpl_float_finish
+ b .Lop_cmpl_float_nan
+#endif
+
+/* ------------------------------ */
+ .balign 128
+.L_op_cmpg_float: /* 0x2e */
+/* File: mips/op_cmpg_float.S */
+/* File: mips/op_cmpl_float.S */
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register rTEMP based on the results of the comparison.
+ *
+ * Provide a "naninst" instruction that puts 1 or -1 into rTEMP depending
+ * on what value we'd like to return when one of the operands is NaN.
+ *
+ * The operation we're implementing is:
+ * if (x == y)
+ * return 0;
+ * else if (x < y)
+ * return -1;
+ * else if (x > y)
+ * return 1;
+ * else
+ * return {-1 or 1}; // one or both operands was NaN
+ *
+ * for: cmpl-float, cmpg-float
+ */
+ /* op vAA, vBB, vCC */
+
+ /* "clasic" form */
+ FETCH(a0, 1) # a0 <- CCBB
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8
+ GET_VREG_F(ft0, a2)
+ GET_VREG_F(ft1, a3)
+#ifdef MIPS32REVGE6
+ cmp.ult.s ft2, ft0, ft1 # Is ft0 < ft1
+ li rTEMP, -1
+ bc1nez ft2, .Lop_cmpg_float_finish
+ cmp.ult.s ft2, ft1, ft0
+ li rTEMP, 1
+ bc1nez ft2, .Lop_cmpg_float_finish
+ cmp.eq.s ft2, ft0, ft1
+ li rTEMP, 0
+ bc1nez ft2, .Lop_cmpg_float_finish
+ b .Lop_cmpg_float_nan
+#else
+ c.olt.s fcc0, ft0, ft1 # Is ft0 < ft1
+ li rTEMP, -1
+ bc1t fcc0, .Lop_cmpg_float_finish
+ c.olt.s fcc0, ft1, ft0
+ li rTEMP, 1
+ bc1t fcc0, .Lop_cmpg_float_finish
+ c.eq.s fcc0, ft0, ft1
+ li rTEMP, 0
+ bc1t fcc0, .Lop_cmpg_float_finish
+ b .Lop_cmpg_float_nan
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_cmpl_double: /* 0x2f */
+/* File: mips/op_cmpl_double.S */
+ /*
+ * Compare two floating-point values. Puts 0(==), 1(>), or -1(<)
+ * into the destination register (rTEMP) based on the comparison results.
+ *
+ * Provide a "naninst" instruction that puts 1 or -1 into rTEMP depending
+ * on what value we'd like to return when one of the operands is NaN.
+ *
+ * See op_cmpl_float for more details.
+ *
+ * For: cmpl-double, cmpg-double
+ */
+ /* op vAA, vBB, vCC */
+
+ FETCH(a0, 1) # a0 <- CCBB
+ and rOBJ, a0, 255 # s5 <- BB
+ srl t0, a0, 8 # t0 <- CC
+ EAS2(rOBJ, rFP, rOBJ) # s5 <- &fp[BB]
+ EAS2(t0, rFP, t0) # t0 <- &fp[CC]
+ LOAD64_F(ft0, ft0f, rOBJ)
+ LOAD64_F(ft1, ft1f, t0)
+#ifdef MIPS32REVGE6
+ cmp.ult.d ft2, ft0, ft1
+ li rTEMP, -1
+ bc1nez ft2, .Lop_cmpl_double_finish
+ cmp.ult.d ft2, ft1, ft0
+ li rTEMP, 1
+ bc1nez ft2, .Lop_cmpl_double_finish
+ cmp.eq.d ft2, ft0, ft1
+ li rTEMP, 0
+ bc1nez ft2, .Lop_cmpl_double_finish
+ b .Lop_cmpl_double_nan
+#else
+ c.olt.d fcc0, ft0, ft1
+ li rTEMP, -1
+ bc1t fcc0, .Lop_cmpl_double_finish
+ c.olt.d fcc0, ft1, ft0
+ li rTEMP, 1
+ bc1t fcc0, .Lop_cmpl_double_finish
+ c.eq.d fcc0, ft0, ft1
+ li rTEMP, 0
+ bc1t fcc0, .Lop_cmpl_double_finish
+ b .Lop_cmpl_double_nan
+#endif
+
+/* ------------------------------ */
+ .balign 128
+.L_op_cmpg_double: /* 0x30 */
+/* File: mips/op_cmpg_double.S */
+/* File: mips/op_cmpl_double.S */
+ /*
+ * Compare two floating-point values. Puts 0(==), 1(>), or -1(<)
+ * into the destination register (rTEMP) based on the comparison results.
+ *
+ * Provide a "naninst" instruction that puts 1 or -1 into rTEMP depending
+ * on what value we'd like to return when one of the operands is NaN.
+ *
+ * See op_cmpl_float for more details.
+ *
+ * For: cmpl-double, cmpg-double
+ */
+ /* op vAA, vBB, vCC */
+
+ FETCH(a0, 1) # a0 <- CCBB
+ and rOBJ, a0, 255 # s5 <- BB
+ srl t0, a0, 8 # t0 <- CC
+ EAS2(rOBJ, rFP, rOBJ) # s5 <- &fp[BB]
+ EAS2(t0, rFP, t0) # t0 <- &fp[CC]
+ LOAD64_F(ft0, ft0f, rOBJ)
+ LOAD64_F(ft1, ft1f, t0)
+#ifdef MIPS32REVGE6
+ cmp.ult.d ft2, ft0, ft1
+ li rTEMP, -1
+ bc1nez ft2, .Lop_cmpg_double_finish
+ cmp.ult.d ft2, ft1, ft0
+ li rTEMP, 1
+ bc1nez ft2, .Lop_cmpg_double_finish
+ cmp.eq.d ft2, ft0, ft1
+ li rTEMP, 0
+ bc1nez ft2, .Lop_cmpg_double_finish
+ b .Lop_cmpg_double_nan
+#else
+ c.olt.d fcc0, ft0, ft1
+ li rTEMP, -1
+ bc1t fcc0, .Lop_cmpg_double_finish
+ c.olt.d fcc0, ft1, ft0
+ li rTEMP, 1
+ bc1t fcc0, .Lop_cmpg_double_finish
+ c.eq.d fcc0, ft0, ft1
+ li rTEMP, 0
+ bc1t fcc0, .Lop_cmpg_double_finish
+ b .Lop_cmpg_double_nan
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_cmp_long: /* 0x31 */
+/* File: mips/op_cmp_long.S */
+ /*
+ * Compare two 64-bit values
+ * x = y return 0
+ * x < y return -1
+ * x > y return 1
+ *
+ * I think I can improve on the ARM code by the following observation
+ * slt t0, x.hi, y.hi; # (x.hi < y.hi) ? 1:0
+ * sgt t1, x.hi, y.hi; # (y.hi > x.hi) ? 1:0
+ * subu v0, t0, t1 # v0= -1:1:0 for [ < > = ]
+ */
+ /* cmp-long vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ EAS2(a2, rFP, a2) # a2 <- &fp[BB]
+ EAS2(a3, rFP, a3) # a3 <- &fp[CC]
+ LOAD64(a0, a1, a2) # a0/a1 <- vBB/vBB+1
+ LOAD64(a2, a3, a3) # a2/a3 <- vCC/vCC+1
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ slt t0, a1, a3 # compare hi
+ sgt t1, a1, a3
+ subu v0, t1, t0 # v0 <- (-1, 1, 0)
+ bnez v0, .Lop_cmp_long_finish
+ # at this point x.hi==y.hi
+ sltu t0, a0, a2 # compare lo
+ sgtu t1, a0, a2
+ subu v0, t1, t0 # v0 <- (-1, 1, 0) for [< > =]
+
+.Lop_cmp_long_finish:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(v0, rOBJ, t0) # vAA <- v0
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_eq: /* 0x32 */
+/* File: mips/op_if_eq.S */
+/* File: mips/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ GET_OPA4(a0) # a0 <- A+
+ GET_OPB(a1) # a1 <- B
+ GET_VREG(a3, a1) # a3 <- vB
+ GET_VREG(a2, a0) # a2 <- vA
+ bne a2, a3, 1f # branch to 1 if comparison failed
+ FETCH_S(rINST, 1) # rINST<- branch offset, in code units
+ b 2f
+1:
+ li rINST, 2 # rINST- BYTE branch dist for not-taken
+2:
+#if MTERP_PROFILE_BRANCHES
+ EXPORT_PC()
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rINST
+ JAL(MterpProfileBranch) # (self, shadow_frame, offset)
+ bnez v0, MterpOnStackReplacement # Note: offset must be in rINST
+#endif
+ addu a2, rINST, rINST # convert to bytes
+ FETCH_ADVANCE_INST_RB(a2) # update rPC, load rINST
+ bgez a2, .L_op_if_eq_finish
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ b MterpCheckSuspendAndContinue
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_ne: /* 0x33 */
+/* File: mips/op_if_ne.S */
+/* File: mips/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ GET_OPA4(a0) # a0 <- A+
+ GET_OPB(a1) # a1 <- B
+ GET_VREG(a3, a1) # a3 <- vB
+ GET_VREG(a2, a0) # a2 <- vA
+ beq a2, a3, 1f # branch to 1 if comparison failed
+ FETCH_S(rINST, 1) # rINST<- branch offset, in code units
+ b 2f
+1:
+ li rINST, 2 # rINST- BYTE branch dist for not-taken
+2:
+#if MTERP_PROFILE_BRANCHES
+ EXPORT_PC()
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rINST
+ JAL(MterpProfileBranch) # (self, shadow_frame, offset)
+ bnez v0, MterpOnStackReplacement # Note: offset must be in rINST
+#endif
+ addu a2, rINST, rINST # convert to bytes
+ FETCH_ADVANCE_INST_RB(a2) # update rPC, load rINST
+ bgez a2, .L_op_if_ne_finish
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ b MterpCheckSuspendAndContinue
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_lt: /* 0x34 */
+/* File: mips/op_if_lt.S */
+/* File: mips/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ GET_OPA4(a0) # a0 <- A+
+ GET_OPB(a1) # a1 <- B
+ GET_VREG(a3, a1) # a3 <- vB
+ GET_VREG(a2, a0) # a2 <- vA
+ bge a2, a3, 1f # branch to 1 if comparison failed
+ FETCH_S(rINST, 1) # rINST<- branch offset, in code units
+ b 2f
+1:
+ li rINST, 2 # rINST- BYTE branch dist for not-taken
+2:
+#if MTERP_PROFILE_BRANCHES
+ EXPORT_PC()
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rINST
+ JAL(MterpProfileBranch) # (self, shadow_frame, offset)
+ bnez v0, MterpOnStackReplacement # Note: offset must be in rINST
+#endif
+ addu a2, rINST, rINST # convert to bytes
+ FETCH_ADVANCE_INST_RB(a2) # update rPC, load rINST
+ bgez a2, .L_op_if_lt_finish
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ b MterpCheckSuspendAndContinue
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_ge: /* 0x35 */
+/* File: mips/op_if_ge.S */
+/* File: mips/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ GET_OPA4(a0) # a0 <- A+
+ GET_OPB(a1) # a1 <- B
+ GET_VREG(a3, a1) # a3 <- vB
+ GET_VREG(a2, a0) # a2 <- vA
+ blt a2, a3, 1f # branch to 1 if comparison failed
+ FETCH_S(rINST, 1) # rINST<- branch offset, in code units
+ b 2f
+1:
+ li rINST, 2 # rINST- BYTE branch dist for not-taken
+2:
+#if MTERP_PROFILE_BRANCHES
+ EXPORT_PC()
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rINST
+ JAL(MterpProfileBranch) # (self, shadow_frame, offset)
+ bnez v0, MterpOnStackReplacement # Note: offset must be in rINST
+#endif
+ addu a2, rINST, rINST # convert to bytes
+ FETCH_ADVANCE_INST_RB(a2) # update rPC, load rINST
+ bgez a2, .L_op_if_ge_finish
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ b MterpCheckSuspendAndContinue
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_gt: /* 0x36 */
+/* File: mips/op_if_gt.S */
+/* File: mips/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ GET_OPA4(a0) # a0 <- A+
+ GET_OPB(a1) # a1 <- B
+ GET_VREG(a3, a1) # a3 <- vB
+ GET_VREG(a2, a0) # a2 <- vA
+ ble a2, a3, 1f # branch to 1 if comparison failed
+ FETCH_S(rINST, 1) # rINST<- branch offset, in code units
+ b 2f
+1:
+ li rINST, 2 # rINST- BYTE branch dist for not-taken
+2:
+#if MTERP_PROFILE_BRANCHES
+ EXPORT_PC()
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rINST
+ JAL(MterpProfileBranch) # (self, shadow_frame, offset)
+ bnez v0, MterpOnStackReplacement # Note: offset must be in rINST
+#endif
+ addu a2, rINST, rINST # convert to bytes
+ FETCH_ADVANCE_INST_RB(a2) # update rPC, load rINST
+ bgez a2, .L_op_if_gt_finish
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ b MterpCheckSuspendAndContinue
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_le: /* 0x37 */
+/* File: mips/op_if_le.S */
+/* File: mips/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ GET_OPA4(a0) # a0 <- A+
+ GET_OPB(a1) # a1 <- B
+ GET_VREG(a3, a1) # a3 <- vB
+ GET_VREG(a2, a0) # a2 <- vA
+ bgt a2, a3, 1f # branch to 1 if comparison failed
+ FETCH_S(rINST, 1) # rINST<- branch offset, in code units
+ b 2f
+1:
+ li rINST, 2 # rINST- BYTE branch dist for not-taken
+2:
+#if MTERP_PROFILE_BRANCHES
+ EXPORT_PC()
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rINST
+ JAL(MterpProfileBranch) # (self, shadow_frame, offset)
+ bnez v0, MterpOnStackReplacement # Note: offset must be in rINST
+#endif
+ addu a2, rINST, rINST # convert to bytes
+ FETCH_ADVANCE_INST_RB(a2) # update rPC, load rINST
+ bgez a2, .L_op_if_le_finish
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ b MterpCheckSuspendAndContinue
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_eqz: /* 0x38 */
+/* File: mips/op_if_eqz.S */
+/* File: mips/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ GET_OPA(a0) # a0 <- AA
+ GET_VREG(a2, a0) # a2 <- vAA
+ FETCH_S(rINST, 1) # rINST <- branch offset, in code units
+ bne a2, zero, 1f # branch to 1 if comparison failed
+ b 2f
+1:
+ li rINST, 2 # rINST- BYTE branch dist for not-taken
+2:
+#if MTERP_PROFILE_BRANCHES
+ EXPORT_PC()
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rINST
+ JAL(MterpProfileBranch) # (self, shadow_frame, offset)
+ bnez v0, MterpOnStackReplacement # Note: offset must be in rINST
+#endif
+ addu a1, rINST, rINST # convert to bytes
+ FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
+ bgez a1, 3f
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ b MterpCheckSuspendAndContinue
+3:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_nez: /* 0x39 */
+/* File: mips/op_if_nez.S */
+/* File: mips/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ GET_OPA(a0) # a0 <- AA
+ GET_VREG(a2, a0) # a2 <- vAA
+ FETCH_S(rINST, 1) # rINST <- branch offset, in code units
+ beq a2, zero, 1f # branch to 1 if comparison failed
+ b 2f
+1:
+ li rINST, 2 # rINST- BYTE branch dist for not-taken
+2:
+#if MTERP_PROFILE_BRANCHES
+ EXPORT_PC()
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rINST
+ JAL(MterpProfileBranch) # (self, shadow_frame, offset)
+ bnez v0, MterpOnStackReplacement # Note: offset must be in rINST
+#endif
+ addu a1, rINST, rINST # convert to bytes
+ FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
+ bgez a1, 3f
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ b MterpCheckSuspendAndContinue
+3:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_ltz: /* 0x3a */
+/* File: mips/op_if_ltz.S */
+/* File: mips/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ GET_OPA(a0) # a0 <- AA
+ GET_VREG(a2, a0) # a2 <- vAA
+ FETCH_S(rINST, 1) # rINST <- branch offset, in code units
+ bge a2, zero, 1f # branch to 1 if comparison failed
+ b 2f
+1:
+ li rINST, 2 # rINST- BYTE branch dist for not-taken
+2:
+#if MTERP_PROFILE_BRANCHES
+ EXPORT_PC()
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rINST
+ JAL(MterpProfileBranch) # (self, shadow_frame, offset)
+ bnez v0, MterpOnStackReplacement # Note: offset must be in rINST
+#endif
+ addu a1, rINST, rINST # convert to bytes
+ FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
+ bgez a1, 3f
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ b MterpCheckSuspendAndContinue
+3:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_gez: /* 0x3b */
+/* File: mips/op_if_gez.S */
+/* File: mips/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ GET_OPA(a0) # a0 <- AA
+ GET_VREG(a2, a0) # a2 <- vAA
+ FETCH_S(rINST, 1) # rINST <- branch offset, in code units
+ blt a2, zero, 1f # branch to 1 if comparison failed
+ b 2f
+1:
+ li rINST, 2 # rINST- BYTE branch dist for not-taken
+2:
+#if MTERP_PROFILE_BRANCHES
+ EXPORT_PC()
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rINST
+ JAL(MterpProfileBranch) # (self, shadow_frame, offset)
+ bnez v0, MterpOnStackReplacement # Note: offset must be in rINST
+#endif
+ addu a1, rINST, rINST # convert to bytes
+ FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
+ bgez a1, 3f
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ b MterpCheckSuspendAndContinue
+3:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_gtz: /* 0x3c */
+/* File: mips/op_if_gtz.S */
+/* File: mips/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ GET_OPA(a0) # a0 <- AA
+ GET_VREG(a2, a0) # a2 <- vAA
+ FETCH_S(rINST, 1) # rINST <- branch offset, in code units
+ ble a2, zero, 1f # branch to 1 if comparison failed
+ b 2f
+1:
+ li rINST, 2 # rINST- BYTE branch dist for not-taken
+2:
+#if MTERP_PROFILE_BRANCHES
+ EXPORT_PC()
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rINST
+ JAL(MterpProfileBranch) # (self, shadow_frame, offset)
+ bnez v0, MterpOnStackReplacement # Note: offset must be in rINST
+#endif
+ addu a1, rINST, rINST # convert to bytes
+ FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
+ bgez a1, 3f
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ b MterpCheckSuspendAndContinue
+3:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_lez: /* 0x3d */
+/* File: mips/op_if_lez.S */
+/* File: mips/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ GET_OPA(a0) # a0 <- AA
+ GET_VREG(a2, a0) # a2 <- vAA
+ FETCH_S(rINST, 1) # rINST <- branch offset, in code units
+ bgt a2, zero, 1f # branch to 1 if comparison failed
+ b 2f
+1:
+ li rINST, 2 # rINST- BYTE branch dist for not-taken
+2:
+#if MTERP_PROFILE_BRANCHES
+ EXPORT_PC()
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rINST
+ JAL(MterpProfileBranch) # (self, shadow_frame, offset)
+ bnez v0, MterpOnStackReplacement # Note: offset must be in rINST
+#endif
+ addu a1, rINST, rINST # convert to bytes
+ FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
+ bgez a1, 3f
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ b MterpCheckSuspendAndContinue
+3:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_3e: /* 0x3e */
+/* File: mips/op_unused_3e.S */
+/* File: mips/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_3f: /* 0x3f */
+/* File: mips/op_unused_3f.S */
+/* File: mips/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_40: /* 0x40 */
+/* File: mips/op_unused_40.S */
+/* File: mips/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_41: /* 0x41 */
+/* File: mips/op_unused_41.S */
+/* File: mips/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_42: /* 0x42 */
+/* File: mips/op_unused_42.S */
+/* File: mips/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_43: /* 0x43 */
+/* File: mips/op_unused_43.S */
+/* File: mips/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aget: /* 0x44 */
+/* File: mips/op_aget.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+ *
+ * NOTE: assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(a2, 1, 0) # a2 <- BB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ FETCH_B(a3, 1, 1) # a3 <- CC
+ GET_VREG(a0, a2) # a0 <- vBB (array object)
+ GET_VREG(a1, a3) # a1 <- vCC (requested index)
+ # null array object?
+ beqz a0, common_errNullObject # yes, bail
+ LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
+ .if 2
+ EASN(a0, a0, a1, 2) # a0 <- arrayObj + index*width
+ .else
+ addu a0, a0, a1
+ .endif
+ # a1 >= a3; compare unsigned index
+ bgeu a1, a3, common_errArrayIndex # index >= length, bail
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ lw a2, MIRROR_INT_ARRAY_DATA_OFFSET(a0) # a2 <- vBB[vCC]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a2, rOBJ, t0) # vAA <- a2
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aget_wide: /* 0x45 */
+/* File: mips/op_aget_wide.S */
+ /*
+ * Array get, 64 bits. vAA <- vBB[vCC].
+ *
+ * Arrays of long/double are 64-bit aligned.
+ */
+ /* aget-wide vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ GET_VREG(a0, a2) # a0 <- vBB (array object)
+ GET_VREG(a1, a3) # a1 <- vCC (requested index)
+ # null array object?
+ beqz a0, common_errNullObject # yes, bail
+ LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
+ EAS3(a0, a0, a1) # a0 <- arrayObj + index*width
+ bgeu a1, a3, common_errArrayIndex # index >= length, bail
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ LOAD64_off(a2, a3, a0, MIRROR_WIDE_ARRAY_DATA_OFFSET)
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64_GOTO(a2, a3, rOBJ, t0) # vAA/vAA+1 <- a2/a3
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aget_object: /* 0x46 */
+/* File: mips/op_aget_object.S */
+ /*
+ * Array object get. vAA <- vBB[vCC].
+ *
+ * for: aget-object
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(a2, 1, 0) # a2 <- BB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ FETCH_B(a3, 1, 1) # a3 <- CC
+ EXPORT_PC()
+ GET_VREG(a0, a2) # a0 <- vBB (array object)
+ GET_VREG(a1, a3) # a1 <- vCC (requested index)
+ JAL(artAGetObjectFromMterp) # v0 <- GetObj(array, index)
+ lw a1, THREAD_EXCEPTION_OFFSET(rSELF)
+ PREFETCH_INST(2) # load rINST
+ bnez a1, MterpException
+ SET_VREG_OBJECT(v0, rOBJ) # vAA <- v0
+ ADVANCE(2) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aget_boolean: /* 0x47 */
+/* File: mips/op_aget_boolean.S */
+/* File: mips/op_aget.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+ *
+ * NOTE: assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(a2, 1, 0) # a2 <- BB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ FETCH_B(a3, 1, 1) # a3 <- CC
+ GET_VREG(a0, a2) # a0 <- vBB (array object)
+ GET_VREG(a1, a3) # a1 <- vCC (requested index)
+ # null array object?
+ beqz a0, common_errNullObject # yes, bail
+ LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
+ .if 0
+ EASN(a0, a0, a1, 0) # a0 <- arrayObj + index*width
+ .else
+ addu a0, a0, a1
+ .endif
+ # a1 >= a3; compare unsigned index
+ bgeu a1, a3, common_errArrayIndex # index >= length, bail
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ lbu a2, MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(a0) # a2 <- vBB[vCC]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a2, rOBJ, t0) # vAA <- a2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aget_byte: /* 0x48 */
+/* File: mips/op_aget_byte.S */
+/* File: mips/op_aget.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+ *
+ * NOTE: assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(a2, 1, 0) # a2 <- BB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ FETCH_B(a3, 1, 1) # a3 <- CC
+ GET_VREG(a0, a2) # a0 <- vBB (array object)
+ GET_VREG(a1, a3) # a1 <- vCC (requested index)
+ # null array object?
+ beqz a0, common_errNullObject # yes, bail
+ LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
+ .if 0
+ EASN(a0, a0, a1, 0) # a0 <- arrayObj + index*width
+ .else
+ addu a0, a0, a1
+ .endif
+ # a1 >= a3; compare unsigned index
+ bgeu a1, a3, common_errArrayIndex # index >= length, bail
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ lb a2, MIRROR_BYTE_ARRAY_DATA_OFFSET(a0) # a2 <- vBB[vCC]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a2, rOBJ, t0) # vAA <- a2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aget_char: /* 0x49 */
+/* File: mips/op_aget_char.S */
+/* File: mips/op_aget.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+ *
+ * NOTE: assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(a2, 1, 0) # a2 <- BB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ FETCH_B(a3, 1, 1) # a3 <- CC
+ GET_VREG(a0, a2) # a0 <- vBB (array object)
+ GET_VREG(a1, a3) # a1 <- vCC (requested index)
+ # null array object?
+ beqz a0, common_errNullObject # yes, bail
+ LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
+ .if 1
+ EASN(a0, a0, a1, 1) # a0 <- arrayObj + index*width
+ .else
+ addu a0, a0, a1
+ .endif
+ # a1 >= a3; compare unsigned index
+ bgeu a1, a3, common_errArrayIndex # index >= length, bail
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ lhu a2, MIRROR_CHAR_ARRAY_DATA_OFFSET(a0) # a2 <- vBB[vCC]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a2, rOBJ, t0) # vAA <- a2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aget_short: /* 0x4a */
+/* File: mips/op_aget_short.S */
+/* File: mips/op_aget.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+ *
+ * NOTE: assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(a2, 1, 0) # a2 <- BB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ FETCH_B(a3, 1, 1) # a3 <- CC
+ GET_VREG(a0, a2) # a0 <- vBB (array object)
+ GET_VREG(a1, a3) # a1 <- vCC (requested index)
+ # null array object?
+ beqz a0, common_errNullObject # yes, bail
+ LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
+ .if 1
+ EASN(a0, a0, a1, 1) # a0 <- arrayObj + index*width
+ .else
+ addu a0, a0, a1
+ .endif
+ # a1 >= a3; compare unsigned index
+ bgeu a1, a3, common_errArrayIndex # index >= length, bail
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ lh a2, MIRROR_SHORT_ARRAY_DATA_OFFSET(a0) # a2 <- vBB[vCC]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a2, rOBJ, t0) # vAA <- a2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aput: /* 0x4b */
+/* File: mips/op_aput.S */
+
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ *
+ * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(a2, 1, 0) # a2 <- BB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ FETCH_B(a3, 1, 1) # a3 <- CC
+ GET_VREG(a0, a2) # a0 <- vBB (array object)
+ GET_VREG(a1, a3) # a1 <- vCC (requested index)
+ # null array object?
+ beqz a0, common_errNullObject # yes, bail
+ LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
+ .if 2
+ EASN(a0, a0, a1, 2) # a0 <- arrayObj + index*width
+ .else
+ addu a0, a0, a1
+ .endif
+ bgeu a1, a3, common_errArrayIndex # index >= length, bail
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_VREG(a2, rOBJ) # a2 <- vAA
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ sw a2, MIRROR_INT_ARRAY_DATA_OFFSET(a0) # vBB[vCC] <- a2
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aput_wide: /* 0x4c */
+/* File: mips/op_aput_wide.S */
+ /*
+ * Array put, 64 bits. vBB[vCC] <- vAA.
+ *
+ * Arrays of long/double are 64-bit aligned, so it's okay to use STRD.
+ */
+ /* aput-wide vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(t0) # t0 <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ GET_VREG(a0, a2) # a0 <- vBB (array object)
+ GET_VREG(a1, a3) # a1 <- vCC (requested index)
+ # null array object?
+ beqz a0, common_errNullObject # yes, bail
+ LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
+ EAS3(a0, a0, a1) # a0 <- arrayObj + index*width
+ EAS2(rOBJ, rFP, t0) # rOBJ <- &fp[AA]
+ # compare unsigned index, length
+ bgeu a1, a3, common_errArrayIndex # index >= length, bail
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ LOAD64(a2, a3, rOBJ) # a2/a3 <- vAA/vAA+1
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64_off(a2, a3, a0, MIRROR_WIDE_ARRAY_DATA_OFFSET) # a2/a3 <- vBB[vCC]
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aput_object: /* 0x4d */
+/* File: mips/op_aput_object.S */
+ /*
+ * Store an object into an array. vBB[vCC] <- vAA.
+ *
+ */
+ /* op vAA, vBB, vCC */
+ EXPORT_PC()
+ addu a0, rFP, OFF_FP_SHADOWFRAME
+ move a1, rPC
+ move a2, rINST
+ JAL(MterpAputObject)
+ beqz v0, MterpPossibleException
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aput_boolean: /* 0x4e */
+/* File: mips/op_aput_boolean.S */
+/* File: mips/op_aput.S */
+
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ *
+ * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(a2, 1, 0) # a2 <- BB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ FETCH_B(a3, 1, 1) # a3 <- CC
+ GET_VREG(a0, a2) # a0 <- vBB (array object)
+ GET_VREG(a1, a3) # a1 <- vCC (requested index)
+ # null array object?
+ beqz a0, common_errNullObject # yes, bail
+ LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
+ .if 0
+ EASN(a0, a0, a1, 0) # a0 <- arrayObj + index*width
+ .else
+ addu a0, a0, a1
+ .endif
+ bgeu a1, a3, common_errArrayIndex # index >= length, bail
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_VREG(a2, rOBJ) # a2 <- vAA
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ sb a2, MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(a0) # vBB[vCC] <- a2
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aput_byte: /* 0x4f */
+/* File: mips/op_aput_byte.S */
+/* File: mips/op_aput.S */
+
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ *
+ * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(a2, 1, 0) # a2 <- BB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ FETCH_B(a3, 1, 1) # a3 <- CC
+ GET_VREG(a0, a2) # a0 <- vBB (array object)
+ GET_VREG(a1, a3) # a1 <- vCC (requested index)
+ # null array object?
+ beqz a0, common_errNullObject # yes, bail
+ LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
+ .if 0
+ EASN(a0, a0, a1, 0) # a0 <- arrayObj + index*width
+ .else
+ addu a0, a0, a1
+ .endif
+ bgeu a1, a3, common_errArrayIndex # index >= length, bail
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_VREG(a2, rOBJ) # a2 <- vAA
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ sb a2, MIRROR_BYTE_ARRAY_DATA_OFFSET(a0) # vBB[vCC] <- a2
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aput_char: /* 0x50 */
+/* File: mips/op_aput_char.S */
+/* File: mips/op_aput.S */
+
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ *
+ * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(a2, 1, 0) # a2 <- BB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ FETCH_B(a3, 1, 1) # a3 <- CC
+ GET_VREG(a0, a2) # a0 <- vBB (array object)
+ GET_VREG(a1, a3) # a1 <- vCC (requested index)
+ # null array object?
+ beqz a0, common_errNullObject # yes, bail
+ LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
+ .if 1
+ EASN(a0, a0, a1, 1) # a0 <- arrayObj + index*width
+ .else
+ addu a0, a0, a1
+ .endif
+ bgeu a1, a3, common_errArrayIndex # index >= length, bail
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_VREG(a2, rOBJ) # a2 <- vAA
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ sh a2, MIRROR_CHAR_ARRAY_DATA_OFFSET(a0) # vBB[vCC] <- a2
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aput_short: /* 0x51 */
+/* File: mips/op_aput_short.S */
+/* File: mips/op_aput.S */
+
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ *
+ * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(a2, 1, 0) # a2 <- BB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ FETCH_B(a3, 1, 1) # a3 <- CC
+ GET_VREG(a0, a2) # a0 <- vBB (array object)
+ GET_VREG(a1, a3) # a1 <- vCC (requested index)
+ # null array object?
+ beqz a0, common_errNullObject # yes, bail
+ LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
+ .if 1
+ EASN(a0, a0, a1, 1) # a0 <- arrayObj + index*width
+ .else
+ addu a0, a0, a1
+ .endif
+ bgeu a1, a3, common_errArrayIndex # index >= length, bail
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_VREG(a2, rOBJ) # a2 <- vAA
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ sh a2, MIRROR_SHORT_ARRAY_DATA_OFFSET(a0) # vBB[vCC] <- a2
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget: /* 0x52 */
+/* File: mips/op_iget.S */
+ /*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- field ref CCCC
+ GET_OPB(a1) # a1 <- B
+ GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
+ lw a2, OFF_FP_METHOD(rFP) # a2 <- referrer
+ move a3, rSELF # a3 <- self
+ JAL(artGet32InstanceFromCode)
+ lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
+ GET_OPA4(a2) # a2<- A+
+ PREFETCH_INST(2) # load rINST
+ bnez a3, MterpPossibleException # bail out
+ .if 0
+ SET_VREG_OBJECT(v0, a2) # fp[A] <- v0
+ .else
+ SET_VREG(v0, a2) # fp[A] <- v0
+ .endif
+ ADVANCE(2) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_wide: /* 0x53 */
+/* File: mips/op_iget_wide.S */
+ /*
+ * 64-bit instance field get.
+ *
+ * for: iget-wide
+ */
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- field byte offset
+ GET_OPB(a1) # a1 <- B
+ GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
+ lw a2, OFF_FP_METHOD(rFP) # a2 <- referrer
+ move a3, rSELF # a3 <- self
+ JAL(artGet64InstanceFromCode)
+ lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
+ GET_OPA4(a2) # a2<- A+
+ PREFETCH_INST(2) # load rINST
+ bnez a3, MterpException # bail out
+ SET_VREG64(v0, v1, a2) # fp[A] <- v0/v1
+ ADVANCE(2) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_object: /* 0x54 */
+/* File: mips/op_iget_object.S */
+/* File: mips/op_iget.S */
+ /*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- field ref CCCC
+ GET_OPB(a1) # a1 <- B
+ GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
+ lw a2, OFF_FP_METHOD(rFP) # a2 <- referrer
+ move a3, rSELF # a3 <- self
+ JAL(artGetObjInstanceFromCode)
+ lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
+ GET_OPA4(a2) # a2<- A+
+ PREFETCH_INST(2) # load rINST
+ bnez a3, MterpPossibleException # bail out
+ .if 1
+ SET_VREG_OBJECT(v0, a2) # fp[A] <- v0
+ .else
+ SET_VREG(v0, a2) # fp[A] <- v0
+ .endif
+ ADVANCE(2) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_boolean: /* 0x55 */
+/* File: mips/op_iget_boolean.S */
+/* File: mips/op_iget.S */
+ /*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- field ref CCCC
+ GET_OPB(a1) # a1 <- B
+ GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
+ lw a2, OFF_FP_METHOD(rFP) # a2 <- referrer
+ move a3, rSELF # a3 <- self
+ JAL(artGetBooleanInstanceFromCode)
+ lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
+ GET_OPA4(a2) # a2<- A+
+ PREFETCH_INST(2) # load rINST
+ bnez a3, MterpPossibleException # bail out
+ .if 0
+ SET_VREG_OBJECT(v0, a2) # fp[A] <- v0
+ .else
+ SET_VREG(v0, a2) # fp[A] <- v0
+ .endif
+ ADVANCE(2) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_byte: /* 0x56 */
+/* File: mips/op_iget_byte.S */
+/* File: mips/op_iget.S */
+ /*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- field ref CCCC
+ GET_OPB(a1) # a1 <- B
+ GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
+ lw a2, OFF_FP_METHOD(rFP) # a2 <- referrer
+ move a3, rSELF # a3 <- self
+ JAL(artGetByteInstanceFromCode)
+ lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
+ GET_OPA4(a2) # a2<- A+
+ PREFETCH_INST(2) # load rINST
+ bnez a3, MterpPossibleException # bail out
+ .if 0
+ SET_VREG_OBJECT(v0, a2) # fp[A] <- v0
+ .else
+ SET_VREG(v0, a2) # fp[A] <- v0
+ .endif
+ ADVANCE(2) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_char: /* 0x57 */
+/* File: mips/op_iget_char.S */
+/* File: mips/op_iget.S */
+ /*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- field ref CCCC
+ GET_OPB(a1) # a1 <- B
+ GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
+ lw a2, OFF_FP_METHOD(rFP) # a2 <- referrer
+ move a3, rSELF # a3 <- self
+ JAL(artGetCharInstanceFromCode)
+ lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
+ GET_OPA4(a2) # a2<- A+
+ PREFETCH_INST(2) # load rINST
+ bnez a3, MterpPossibleException # bail out
+ .if 0
+ SET_VREG_OBJECT(v0, a2) # fp[A] <- v0
+ .else
+ SET_VREG(v0, a2) # fp[A] <- v0
+ .endif
+ ADVANCE(2) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_short: /* 0x58 */
+/* File: mips/op_iget_short.S */
+/* File: mips/op_iget.S */
+ /*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- field ref CCCC
+ GET_OPB(a1) # a1 <- B
+ GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
+ lw a2, OFF_FP_METHOD(rFP) # a2 <- referrer
+ move a3, rSELF # a3 <- self
+ JAL(artGetShortInstanceFromCode)
+ lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
+ GET_OPA4(a2) # a2<- A+
+ PREFETCH_INST(2) # load rINST
+ bnez a3, MterpPossibleException # bail out
+ .if 0
+ SET_VREG_OBJECT(v0, a2) # fp[A] <- v0
+ .else
+ SET_VREG(v0, a2) # fp[A] <- v0
+ .endif
+ ADVANCE(2) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput: /* 0x59 */
+/* File: mips/op_iput.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ # op vA, vB, field /* CCCC */
+ .extern artSet32InstanceFromMterp
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- field ref CCCC
+ GET_OPB(a1) # a1 <- B
+ GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
+ GET_OPA4(a2) # a2 <- A+
+ GET_VREG(a2, a2) # a2 <- fp[A]
+ lw a3, OFF_FP_METHOD(rFP) # a3 <- referrer
+ PREFETCH_INST(2) # load rINST
+ JAL(artSet32InstanceFromMterp)
+ bnez v0, MterpPossibleException # bail out
+ ADVANCE(2) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_wide: /* 0x5a */
+/* File: mips/op_iput_wide.S */
+ # iput-wide vA, vB, field /* CCCC */
+ .extern artSet64InstanceFromMterp
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- field ref CCCC
+ GET_OPB(a1) # a1 <- B
+ GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
+ GET_OPA4(a2) # a2 <- A+
+ EAS2(a2, rFP, a2) # a2 <- &fp[A]
+ lw a3, OFF_FP_METHOD(rFP) # a3 <- referrer
+ PREFETCH_INST(2) # load rINST
+ JAL(artSet64InstanceFromMterp)
+ bnez v0, MterpPossibleException # bail out
+ ADVANCE(2) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_object: /* 0x5b */
+/* File: mips/op_iput_object.S */
+ /*
+ * 32-bit instance field put.
+ *
+ * for: iput-object, iput-object-volatile
+ */
+ # op vA, vB, field /* CCCC */
+ EXPORT_PC()
+ addu a0, rFP, OFF_FP_SHADOWFRAME
+ move a1, rPC
+ move a2, rINST
+ move a3, rSELF
+ JAL(MterpIputObject)
+ beqz v0, MterpException
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_boolean: /* 0x5c */
+/* File: mips/op_iput_boolean.S */
+/* File: mips/op_iput.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ # op vA, vB, field /* CCCC */
+ .extern artSet8InstanceFromMterp
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- field ref CCCC
+ GET_OPB(a1) # a1 <- B
+ GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
+ GET_OPA4(a2) # a2 <- A+
+ GET_VREG(a2, a2) # a2 <- fp[A]
+ lw a3, OFF_FP_METHOD(rFP) # a3 <- referrer
+ PREFETCH_INST(2) # load rINST
+ JAL(artSet8InstanceFromMterp)
+ bnez v0, MterpPossibleException # bail out
+ ADVANCE(2) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_byte: /* 0x5d */
+/* File: mips/op_iput_byte.S */
+/* File: mips/op_iput.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ # op vA, vB, field /* CCCC */
+ .extern artSet8InstanceFromMterp
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- field ref CCCC
+ GET_OPB(a1) # a1 <- B
+ GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
+ GET_OPA4(a2) # a2 <- A+
+ GET_VREG(a2, a2) # a2 <- fp[A]
+ lw a3, OFF_FP_METHOD(rFP) # a3 <- referrer
+ PREFETCH_INST(2) # load rINST
+ JAL(artSet8InstanceFromMterp)
+ bnez v0, MterpPossibleException # bail out
+ ADVANCE(2) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_char: /* 0x5e */
+/* File: mips/op_iput_char.S */
+/* File: mips/op_iput.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ # op vA, vB, field /* CCCC */
+ .extern artSet16InstanceFromMterp
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- field ref CCCC
+ GET_OPB(a1) # a1 <- B
+ GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
+ GET_OPA4(a2) # a2 <- A+
+ GET_VREG(a2, a2) # a2 <- fp[A]
+ lw a3, OFF_FP_METHOD(rFP) # a3 <- referrer
+ PREFETCH_INST(2) # load rINST
+ JAL(artSet16InstanceFromMterp)
+ bnez v0, MterpPossibleException # bail out
+ ADVANCE(2) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_short: /* 0x5f */
+/* File: mips/op_iput_short.S */
+/* File: mips/op_iput.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ # op vA, vB, field /* CCCC */
+ .extern artSet16InstanceFromMterp
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- field ref CCCC
+ GET_OPB(a1) # a1 <- B
+ GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
+ GET_OPA4(a2) # a2 <- A+
+ GET_VREG(a2, a2) # a2 <- fp[A]
+ lw a3, OFF_FP_METHOD(rFP) # a3 <- referrer
+ PREFETCH_INST(2) # load rINST
+ JAL(artSet16InstanceFromMterp)
+ bnez v0, MterpPossibleException # bail out
+ ADVANCE(2) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sget: /* 0x60 */
+/* File: mips/op_sget.S */
+ /*
+ * General SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ # op vAA, field /* BBBB */
+ .extern artGet32StaticFromCode
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- field ref BBBB
+ lw a1, OFF_FP_METHOD(rFP) # a1 <- method
+ move a2, rSELF # a2 <- self
+ JAL(artGet32StaticFromCode)
+ lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
+ GET_OPA(a2) # a2 <- AA
+ PREFETCH_INST(2)
+ bnez a3, MterpException # bail out
+.if 0
+ SET_VREG_OBJECT(v0, a2) # fp[AA] <- v0
+.else
+ SET_VREG(v0, a2) # fp[AA] <- v0
+.endif
+ ADVANCE(2)
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sget_wide: /* 0x61 */
+/* File: mips/op_sget_wide.S */
+ /*
+ * 64-bit SGET handler.
+ */
+ # sget-wide vAA, field /* BBBB */
+ .extern artGet64StaticFromCode
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- field ref BBBB
+ lw a1, OFF_FP_METHOD(rFP) # a1 <- method
+ move a2, rSELF # a2 <- self
+ JAL(artGet64StaticFromCode)
+ lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
+ bnez a3, MterpException
+ GET_OPA(a1) # a1 <- AA
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ SET_VREG64(v0, v1, a1) # vAA/vAA+1 <- v0/v1
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sget_object: /* 0x62 */
+/* File: mips/op_sget_object.S */
+/* File: mips/op_sget.S */
+ /*
+ * General SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ # op vAA, field /* BBBB */
+ .extern artGetObjStaticFromCode
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- field ref BBBB
+ lw a1, OFF_FP_METHOD(rFP) # a1 <- method
+ move a2, rSELF # a2 <- self
+ JAL(artGetObjStaticFromCode)
+ lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
+ GET_OPA(a2) # a2 <- AA
+ PREFETCH_INST(2)
+ bnez a3, MterpException # bail out
+.if 1
+ SET_VREG_OBJECT(v0, a2) # fp[AA] <- v0
+.else
+ SET_VREG(v0, a2) # fp[AA] <- v0
+.endif
+ ADVANCE(2)
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sget_boolean: /* 0x63 */
+/* File: mips/op_sget_boolean.S */
+/* File: mips/op_sget.S */
+ /*
+ * General SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ # op vAA, field /* BBBB */
+ .extern artGetBooleanStaticFromCode
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- field ref BBBB
+ lw a1, OFF_FP_METHOD(rFP) # a1 <- method
+ move a2, rSELF # a2 <- self
+ JAL(artGetBooleanStaticFromCode)
+ lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
+ GET_OPA(a2) # a2 <- AA
+ PREFETCH_INST(2)
+ bnez a3, MterpException # bail out
+.if 0
+ SET_VREG_OBJECT(v0, a2) # fp[AA] <- v0
+.else
+ SET_VREG(v0, a2) # fp[AA] <- v0
+.endif
+ ADVANCE(2)
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sget_byte: /* 0x64 */
+/* File: mips/op_sget_byte.S */
+/* File: mips/op_sget.S */
+ /*
+ * General SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ # op vAA, field /* BBBB */
+ .extern artGetByteStaticFromCode
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- field ref BBBB
+ lw a1, OFF_FP_METHOD(rFP) # a1 <- method
+ move a2, rSELF # a2 <- self
+ JAL(artGetByteStaticFromCode)
+ lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
+ GET_OPA(a2) # a2 <- AA
+ PREFETCH_INST(2)
+ bnez a3, MterpException # bail out
+.if 0
+ SET_VREG_OBJECT(v0, a2) # fp[AA] <- v0
+.else
+ SET_VREG(v0, a2) # fp[AA] <- v0
+.endif
+ ADVANCE(2)
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sget_char: /* 0x65 */
+/* File: mips/op_sget_char.S */
+/* File: mips/op_sget.S */
+ /*
+ * General SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ # op vAA, field /* BBBB */
+ .extern artGetCharStaticFromCode
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- field ref BBBB
+ lw a1, OFF_FP_METHOD(rFP) # a1 <- method
+ move a2, rSELF # a2 <- self
+ JAL(artGetCharStaticFromCode)
+ lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
+ GET_OPA(a2) # a2 <- AA
+ PREFETCH_INST(2)
+ bnez a3, MterpException # bail out
+.if 0
+ SET_VREG_OBJECT(v0, a2) # fp[AA] <- v0
+.else
+ SET_VREG(v0, a2) # fp[AA] <- v0
+.endif
+ ADVANCE(2)
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sget_short: /* 0x66 */
+/* File: mips/op_sget_short.S */
+/* File: mips/op_sget.S */
+ /*
+ * General SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ # op vAA, field /* BBBB */
+ .extern artGetShortStaticFromCode
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- field ref BBBB
+ lw a1, OFF_FP_METHOD(rFP) # a1 <- method
+ move a2, rSELF # a2 <- self
+ JAL(artGetShortStaticFromCode)
+ lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
+ GET_OPA(a2) # a2 <- AA
+ PREFETCH_INST(2)
+ bnez a3, MterpException # bail out
+.if 0
+ SET_VREG_OBJECT(v0, a2) # fp[AA] <- v0
+.else
+ SET_VREG(v0, a2) # fp[AA] <- v0
+.endif
+ ADVANCE(2)
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sput: /* 0x67 */
+/* File: mips/op_sput.S */
+ /*
+ * General SPUT handler.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ # op vAA, field /* BBBB */
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- field ref BBBB
+ GET_OPA(a3) # a3 <- AA
+ GET_VREG(a1, a3) # a1 <- fp[AA], the object pointer
+ lw a2, OFF_FP_METHOD(rFP) # a2 <- method
+ move a3, rSELF # a3 <- self
+ PREFETCH_INST(2) # load rINST
+ JAL(artSet32StaticFromCode)
+ bnez v0, MterpException # bail out
+ ADVANCE(2) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sput_wide: /* 0x68 */
+/* File: mips/op_sput_wide.S */
+ /*
+ * 64-bit SPUT handler.
+ */
+ # sput-wide vAA, field /* BBBB */
+ .extern artSet64IndirectStaticFromMterp
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- field ref CCCC
+ lw a1, OFF_FP_METHOD(rFP) # a1 <- method
+ GET_OPA(a2) # a2 <- AA
+ EAS2(a2, rFP, a2) # a2 <- &fp[AA]
+ move a3, rSELF # a3 <- self
+ PREFETCH_INST(2) # load rINST
+ JAL(artSet64IndirectStaticFromMterp)
+ bnez v0, MterpException # bail out
+ ADVANCE(2) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sput_object: /* 0x69 */
+/* File: mips/op_sput_object.S */
+ /*
+ * General 32-bit SPUT handler.
+ *
+ * for: sput-object,
+ */
+ /* op vAA, field@BBBB */
+ EXPORT_PC()
+ addu a0, rFP, OFF_FP_SHADOWFRAME
+ move a1, rPC
+ move a2, rINST
+ move a3, rSELF
+ JAL(MterpSputObject)
+ beqz v0, MterpException
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sput_boolean: /* 0x6a */
+/* File: mips/op_sput_boolean.S */
+/* File: mips/op_sput.S */
+ /*
+ * General SPUT handler.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ # op vAA, field /* BBBB */
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- field ref BBBB
+ GET_OPA(a3) # a3 <- AA
+ GET_VREG(a1, a3) # a1 <- fp[AA], the object pointer
+ lw a2, OFF_FP_METHOD(rFP) # a2 <- method
+ move a3, rSELF # a3 <- self
+ PREFETCH_INST(2) # load rINST
+ JAL(artSet8StaticFromCode)
+ bnez v0, MterpException # bail out
+ ADVANCE(2) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sput_byte: /* 0x6b */
+/* File: mips/op_sput_byte.S */
+/* File: mips/op_sput.S */
+ /*
+ * General SPUT handler.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ # op vAA, field /* BBBB */
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- field ref BBBB
+ GET_OPA(a3) # a3 <- AA
+ GET_VREG(a1, a3) # a1 <- fp[AA], the object pointer
+ lw a2, OFF_FP_METHOD(rFP) # a2 <- method
+ move a3, rSELF # a3 <- self
+ PREFETCH_INST(2) # load rINST
+ JAL(artSet8StaticFromCode)
+ bnez v0, MterpException # bail out
+ ADVANCE(2) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sput_char: /* 0x6c */
+/* File: mips/op_sput_char.S */
+/* File: mips/op_sput.S */
+ /*
+ * General SPUT handler.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ # op vAA, field /* BBBB */
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- field ref BBBB
+ GET_OPA(a3) # a3 <- AA
+ GET_VREG(a1, a3) # a1 <- fp[AA], the object pointer
+ lw a2, OFF_FP_METHOD(rFP) # a2 <- method
+ move a3, rSELF # a3 <- self
+ PREFETCH_INST(2) # load rINST
+ JAL(artSet16StaticFromCode)
+ bnez v0, MterpException # bail out
+ ADVANCE(2) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sput_short: /* 0x6d */
+/* File: mips/op_sput_short.S */
+/* File: mips/op_sput.S */
+ /*
+ * General SPUT handler.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ # op vAA, field /* BBBB */
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- field ref BBBB
+ GET_OPA(a3) # a3 <- AA
+ GET_VREG(a1, a3) # a1 <- fp[AA], the object pointer
+ lw a2, OFF_FP_METHOD(rFP) # a2 <- method
+ move a3, rSELF # a3 <- self
+ PREFETCH_INST(2) # load rINST
+ JAL(artSet16StaticFromCode)
+ bnez v0, MterpException # bail out
+ ADVANCE(2) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_virtual: /* 0x6e */
+/* File: mips/op_invoke_virtual.S */
+/* File: mips/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
+ # op {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ .extern MterpInvokeVirtual
+ EXPORT_PC()
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rPC
+ move a3, rINST
+ JAL(MterpInvokeVirtual)
+ beqz v0, MterpException
+ FETCH_ADVANCE_INST(3)
+ JAL(MterpShouldSwitchInterpreters)
+ bnez v0, MterpFallback
+ GET_INST_OPCODE(t0)
+ GOTO_OPCODE(t0)
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_super: /* 0x6f */
+/* File: mips/op_invoke_super.S */
+/* File: mips/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
+ # op {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ .extern MterpInvokeSuper
+ EXPORT_PC()
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rPC
+ move a3, rINST
+ JAL(MterpInvokeSuper)
+ beqz v0, MterpException
+ FETCH_ADVANCE_INST(3)
+ JAL(MterpShouldSwitchInterpreters)
+ bnez v0, MterpFallback
+ GET_INST_OPCODE(t0)
+ GOTO_OPCODE(t0)
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_direct: /* 0x70 */
+/* File: mips/op_invoke_direct.S */
+/* File: mips/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
+ # op {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ .extern MterpInvokeDirect
+ EXPORT_PC()
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rPC
+ move a3, rINST
+ JAL(MterpInvokeDirect)
+ beqz v0, MterpException
+ FETCH_ADVANCE_INST(3)
+ JAL(MterpShouldSwitchInterpreters)
+ bnez v0, MterpFallback
+ GET_INST_OPCODE(t0)
+ GOTO_OPCODE(t0)
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_static: /* 0x71 */
+/* File: mips/op_invoke_static.S */
+/* File: mips/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
+ # op {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ .extern MterpInvokeStatic
+ EXPORT_PC()
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rPC
+ move a3, rINST
+ JAL(MterpInvokeStatic)
+ beqz v0, MterpException
+ FETCH_ADVANCE_INST(3)
+ JAL(MterpShouldSwitchInterpreters)
+ bnez v0, MterpFallback
+ GET_INST_OPCODE(t0)
+ GOTO_OPCODE(t0)
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_interface: /* 0x72 */
+/* File: mips/op_invoke_interface.S */
+/* File: mips/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
+ # op {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ .extern MterpInvokeInterface
+ EXPORT_PC()
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rPC
+ move a3, rINST
+ JAL(MterpInvokeInterface)
+ beqz v0, MterpException
+ FETCH_ADVANCE_INST(3)
+ JAL(MterpShouldSwitchInterpreters)
+ bnez v0, MterpFallback
+ GET_INST_OPCODE(t0)
+ GOTO_OPCODE(t0)
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_return_void_no_barrier: /* 0x73 */
+/* File: mips/op_return_void_no_barrier.S */
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ move a0, rSELF
+ and ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ beqz ra, 1f
+ JAL(MterpSuspendCheck) # (self)
+1:
+ move v0, zero
+ move v1, zero
+ b MterpReturn
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_virtual_range: /* 0x74 */
+/* File: mips/op_invoke_virtual_range.S */
+/* File: mips/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
+ # op {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ .extern MterpInvokeVirtualRange
+ EXPORT_PC()
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rPC
+ move a3, rINST
+ JAL(MterpInvokeVirtualRange)
+ beqz v0, MterpException
+ FETCH_ADVANCE_INST(3)
+ JAL(MterpShouldSwitchInterpreters)
+ bnez v0, MterpFallback
+ GET_INST_OPCODE(t0)
+ GOTO_OPCODE(t0)
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_super_range: /* 0x75 */
+/* File: mips/op_invoke_super_range.S */
+/* File: mips/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
+ # op {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ .extern MterpInvokeSuperRange
+ EXPORT_PC()
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rPC
+ move a3, rINST
+ JAL(MterpInvokeSuperRange)
+ beqz v0, MterpException
+ FETCH_ADVANCE_INST(3)
+ JAL(MterpShouldSwitchInterpreters)
+ bnez v0, MterpFallback
+ GET_INST_OPCODE(t0)
+ GOTO_OPCODE(t0)
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_direct_range: /* 0x76 */
+/* File: mips/op_invoke_direct_range.S */
+/* File: mips/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
+ # op {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ .extern MterpInvokeDirectRange
+ EXPORT_PC()
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rPC
+ move a3, rINST
+ JAL(MterpInvokeDirectRange)
+ beqz v0, MterpException
+ FETCH_ADVANCE_INST(3)
+ JAL(MterpShouldSwitchInterpreters)
+ bnez v0, MterpFallback
+ GET_INST_OPCODE(t0)
+ GOTO_OPCODE(t0)
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_static_range: /* 0x77 */
+/* File: mips/op_invoke_static_range.S */
+/* File: mips/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
+ # op {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ .extern MterpInvokeStaticRange
+ EXPORT_PC()
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rPC
+ move a3, rINST
+ JAL(MterpInvokeStaticRange)
+ beqz v0, MterpException
+ FETCH_ADVANCE_INST(3)
+ JAL(MterpShouldSwitchInterpreters)
+ bnez v0, MterpFallback
+ GET_INST_OPCODE(t0)
+ GOTO_OPCODE(t0)
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_interface_range: /* 0x78 */
+/* File: mips/op_invoke_interface_range.S */
+/* File: mips/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
+ # op {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ .extern MterpInvokeInterfaceRange
+ EXPORT_PC()
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rPC
+ move a3, rINST
+ JAL(MterpInvokeInterfaceRange)
+ beqz v0, MterpException
+ FETCH_ADVANCE_INST(3)
+ JAL(MterpShouldSwitchInterpreters)
+ bnez v0, MterpFallback
+ GET_INST_OPCODE(t0)
+ GOTO_OPCODE(t0)
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_79: /* 0x79 */
+/* File: mips/op_unused_79.S */
+/* File: mips/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_7a: /* 0x7a */
+/* File: mips/op_unused_7a.S */
+/* File: mips/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_neg_int: /* 0x7b */
+/* File: mips/op_neg_int.S */
+/* File: mips/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op a0".
+ * This could be a MIPS instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ GET_OPB(a3) # a3 <- B
+ GET_OPA4(t0) # t0 <- A+
+ GET_VREG(a0, a3) # a0 <- vB
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ # optional op
+ negu a0, a0 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ SET_VREG_GOTO(a0, t0, t1) # vAA <- result0
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_not_int: /* 0x7c */
+/* File: mips/op_not_int.S */
+/* File: mips/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op a0".
+ * This could be a MIPS instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ GET_OPB(a3) # a3 <- B
+ GET_OPA4(t0) # t0 <- A+
+ GET_VREG(a0, a3) # a0 <- vB
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ # optional op
+ not a0, a0 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ SET_VREG_GOTO(a0, t0, t1) # vAA <- result0
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_neg_long: /* 0x7d */
+/* File: mips/op_neg_long.S */
+/* File: mips/unopWide.S */
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op a0/a1".
+ * This could be MIPS instruction or a function call.
+ *
+ * For: neg-long, not-long, neg-double,
+ */
+ /* unop vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ EAS2(a3, rFP, a3) # a3 <- &fp[B]
+ LOAD64(a0, a1, a3) # a0/a1 <- vAA
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ negu v0, a0 # optional op
+ negu v1, a1; sltu a0, zero, v0; subu v1, v1, a0 # a0/a1 <- op, a2-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64(v0, v1, rOBJ) # vAA <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 12-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_not_long: /* 0x7e */
+/* File: mips/op_not_long.S */
+/* File: mips/unopWide.S */
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op a0/a1".
+ * This could be MIPS instruction or a function call.
+ *
+ * For: neg-long, not-long, neg-double,
+ */
+ /* unop vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ EAS2(a3, rFP, a3) # a3 <- &fp[B]
+ LOAD64(a0, a1, a3) # a0/a1 <- vAA
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ not a0, a0 # optional op
+ not a1, a1 # a0/a1 <- op, a2-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64(a0, a1, rOBJ) # vAA <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 12-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_neg_float: /* 0x7f */
+/* File: mips/op_neg_float.S */
+/* File: mips/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op a0".
+ * This could be a MIPS instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ GET_OPB(a3) # a3 <- B
+ GET_OPA4(t0) # t0 <- A+
+ GET_VREG(a0, a3) # a0 <- vB
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ # optional op
+ addu a0, a0, 0x80000000 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ SET_VREG_GOTO(a0, t0, t1) # vAA <- result0
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_neg_double: /* 0x80 */
+/* File: mips/op_neg_double.S */
+/* File: mips/unopWide.S */
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op a0/a1".
+ * This could be MIPS instruction or a function call.
+ *
+ * For: neg-long, not-long, neg-double,
+ */
+ /* unop vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ EAS2(a3, rFP, a3) # a3 <- &fp[B]
+ LOAD64(a0, a1, a3) # a0/a1 <- vAA
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ # optional op
+ addu a1, a1, 0x80000000 # a0/a1 <- op, a2-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64(a0, a1, rOBJ) # vAA <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 12-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_int_to_long: /* 0x81 */
+/* File: mips/op_int_to_long.S */
+/* File: mips/unopWider.S */
+ /*
+ * Generic 32bit-to-64bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op a0", where
+ * "result" is a 64-bit quantity in a0/a1.
+ *
+ * For: int-to-long
+ */
+ /* unop vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a0, a3) # a0 <- vB
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ # optional op
+ sra a1, a0, 31 # result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64(a0, a1, rOBJ) # vA/vA+1 <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 10-11 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_int_to_float: /* 0x82 */
+/* File: mips/op_int_to_float.S */
+/* File: mips/funop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op a0".
+ * This could be a MIPS instruction or a function call.
+ *
+ * for: int-to-float, float-to-int
+ */
+ /* unop vA, vB */
+ GET_OPB(a3) # a3 <- B
+ GET_OPA4(rOBJ) # t0 <- A+
+ GET_VREG_F(fa0, a3)
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ cvt.s.w fv0, fa0
+
+.Lop_int_to_float_set_vreg_f:
+ SET_VREG_F(fv0, rOBJ)
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ GOTO_OPCODE(t1) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_int_to_double: /* 0x83 */
+/* File: mips/op_int_to_double.S */
+/* File: mips/funopWider.S */
+ /*
+ * Generic 32bit-to-64bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op a0", where
+ * "result" is a 64-bit quantity in a0/a1.
+ *
+ * For: int-to-double, float-to-long, float-to-double
+ */
+ /* unop vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG_F(fa0, a3)
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ cvt.d.w fv0, fa0
+
+.Lop_int_to_double_set_vreg:
+ SET_VREG64_F(fv0, fv0f, rOBJ) # vA/vA+1 <- a0/a1
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_long_to_int: /* 0x84 */
+/* File: mips/op_long_to_int.S */
+/* we ignore the high word, making this equivalent to a 32-bit reg move */
+/* File: mips/op_move.S */
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ GET_OPB(a1) # a1 <- B from 15:12
+ GET_OPA4(a0) # a0 <- A from 11:8
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ GET_VREG(a2, a1) # a2 <- fp[B]
+ GET_INST_OPCODE(t0) # t0 <- opcode from rINST
+ .if 0
+ SET_VREG_OBJECT(a2, a0) # fp[A] <- a2
+ .else
+ SET_VREG(a2, a0) # fp[A] <- a2
+ .endif
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_long_to_float: /* 0x85 */
+/* File: mips/op_long_to_float.S */
+/* File: mips/unopNarrower.S */
+ /*
+ * Generic 64bit-to-32bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op a0/a1", where
+ * "result" is a 32-bit quantity in a0.
+ *
+ * For: long-to-float, double-to-int, double-to-float
+ * If hard floating point support is available, use fa0 as the parameter,
+ * except for long-to-float opcode.
+ * (This would work for long-to-int, but that instruction is actually
+ * an exact match for OP_MOVE.)
+ */
+ /* unop vA, vB */
+ GET_OPB(a3) # a3 <- B
+ GET_OPA4(rOBJ) # t1 <- A+
+ EAS2(a3, rFP, a3) # a3 <- &fp[B]
+ LOAD64(rARG0, rARG1, a3)
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ JAL(__floatdisf)
+
+.Lop_long_to_float_set_vreg_f:
+ SET_VREG_F(fv0, rOBJ) # vA <- result0
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_long_to_double: /* 0x86 */
+/* File: mips/op_long_to_double.S */
+/* File: mips/funopWide.S */
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op a0/a1".
+ * This could be a MIPS instruction or a function call.
+ *
+ * long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ GET_OPA4(rOBJ) # t1 <- A+
+ GET_OPB(a3) # a3 <- B
+ EAS2(a3, rFP, a3) # a3 <- &fp[B]
+ LOAD64(rARG0, rARG1, a3)
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ # optional op
+ JAL(__floatdidf) # a0/a1 <- op, a2-a3 changed
+
+.Lop_long_to_double_set_vreg:
+ SET_VREG64_F(fv0, fv0f, rOBJ) # vAA <- a0/a1
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 12-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_float_to_int: /* 0x87 */
+/* File: mips/op_float_to_int.S */
+/* File: mips/funop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op a0".
+ * This could be a MIPS instruction or a function call.
+ *
+ * for: int-to-float, float-to-int
+ */
+ /* unop vA, vB */
+ GET_OPB(a3) # a3 <- B
+ GET_OPA4(rOBJ) # t0 <- A+
+ GET_VREG_F(fa0, a3)
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ b f2i_doconv
+
+.Lop_float_to_int_set_vreg_f:
+ SET_VREG_F(fv0, rOBJ)
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ GOTO_OPCODE(t1) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_float_to_long: /* 0x88 */
+/* File: mips/op_float_to_long.S */
+/* File: mips/funopWider.S */
+ /*
+ * Generic 32bit-to-64bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op a0", where
+ * "result" is a 64-bit quantity in a0/a1.
+ *
+ * For: int-to-double, float-to-long, float-to-double
+ */
+ /* unop vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG_F(fa0, a3)
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ b f2l_doconv
+
+.Lop_float_to_long_set_vreg:
+ SET_VREG64(rRESULT0, rRESULT1, rOBJ) # vA/vA+1 <- a0/a1
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_float_to_double: /* 0x89 */
+/* File: mips/op_float_to_double.S */
+/* File: mips/funopWider.S */
+ /*
+ * Generic 32bit-to-64bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op a0", where
+ * "result" is a 64-bit quantity in a0/a1.
+ *
+ * For: int-to-double, float-to-long, float-to-double
+ */
+ /* unop vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG_F(fa0, a3)
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ cvt.d.s fv0, fa0
+
+.Lop_float_to_double_set_vreg:
+ SET_VREG64_F(fv0, fv0f, rOBJ) # vA/vA+1 <- a0/a1
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_double_to_int: /* 0x8a */
+/* File: mips/op_double_to_int.S */
+/* File: mips/unopNarrower.S */
+ /*
+ * Generic 64bit-to-32bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op a0/a1", where
+ * "result" is a 32-bit quantity in a0.
+ *
+ * For: long-to-float, double-to-int, double-to-float
+ * If hard floating point support is available, use fa0 as the parameter,
+ * except for long-to-float opcode.
+ * (This would work for long-to-int, but that instruction is actually
+ * an exact match for OP_MOVE.)
+ */
+ /* unop vA, vB */
+ GET_OPB(a3) # a3 <- B
+ GET_OPA4(rOBJ) # t1 <- A+
+ EAS2(a3, rFP, a3) # a3 <- &fp[B]
+ LOAD64_F(fa0, fa0f, a3)
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ b d2i_doconv
+
+.Lop_double_to_int_set_vreg_f:
+ SET_VREG_F(fv0, rOBJ) # vA <- result0
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/*
+ * Convert the double in a0/a1 to an int in a0.
+ *
+ * We have to clip values to int min/max per the specification. The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer. The EABI convert function isn't doing this for us.
+ */
+
+/* ------------------------------ */
+ .balign 128
+.L_op_double_to_long: /* 0x8b */
+/* File: mips/op_double_to_long.S */
+/* File: mips/funopWide.S */
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op a0/a1".
+ * This could be a MIPS instruction or a function call.
+ *
+ * long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ GET_OPA4(rOBJ) # t1 <- A+
+ GET_OPB(a3) # a3 <- B
+ EAS2(a3, rFP, a3) # a3 <- &fp[B]
+ LOAD64_F(fa0, fa0f, a3)
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ # optional op
+ b d2l_doconv # a0/a1 <- op, a2-a3 changed
+
+.Lop_double_to_long_set_vreg:
+ SET_VREG64(rRESULT0, rRESULT1, rOBJ) # vAA <- a0/a1
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 12-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_double_to_float: /* 0x8c */
+/* File: mips/op_double_to_float.S */
+/* File: mips/unopNarrower.S */
+ /*
+ * Generic 64bit-to-32bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op a0/a1", where
+ * "result" is a 32-bit quantity in a0.
+ *
+ * For: long-to-float, double-to-int, double-to-float
+ * If hard floating point support is available, use fa0 as the parameter,
+ * except for long-to-float opcode.
+ * (This would work for long-to-int, but that instruction is actually
+ * an exact match for OP_MOVE.)
+ */
+ /* unop vA, vB */
+ GET_OPB(a3) # a3 <- B
+ GET_OPA4(rOBJ) # t1 <- A+
+ EAS2(a3, rFP, a3) # a3 <- &fp[B]
+ LOAD64_F(fa0, fa0f, a3)
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ cvt.s.d fv0, fa0
+
+.Lop_double_to_float_set_vreg_f:
+ SET_VREG_F(fv0, rOBJ) # vA <- result0
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_int_to_byte: /* 0x8d */
+/* File: mips/op_int_to_byte.S */
+/* File: mips/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op a0".
+ * This could be a MIPS instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ GET_OPB(a3) # a3 <- B
+ GET_OPA4(t0) # t0 <- A+
+ GET_VREG(a0, a3) # a0 <- vB
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ sll a0, a0, 24 # optional op
+ sra a0, a0, 24 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ SET_VREG_GOTO(a0, t0, t1) # vAA <- result0
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_int_to_char: /* 0x8e */
+/* File: mips/op_int_to_char.S */
+/* File: mips/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op a0".
+ * This could be a MIPS instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ GET_OPB(a3) # a3 <- B
+ GET_OPA4(t0) # t0 <- A+
+ GET_VREG(a0, a3) # a0 <- vB
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ # optional op
+ and a0, 0xffff # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ SET_VREG_GOTO(a0, t0, t1) # vAA <- result0
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_int_to_short: /* 0x8f */
+/* File: mips/op_int_to_short.S */
+/* File: mips/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op a0".
+ * This could be a MIPS instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ GET_OPB(a3) # a3 <- B
+ GET_OPA4(t0) # t0 <- A+
+ GET_VREG(a0, a3) # a0 <- vB
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ sll a0, 16 # optional op
+ sra a0, 16 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ SET_VREG_GOTO(a0, t0, t1) # vAA <- result0
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_int: /* 0x90 */
+/* File: mips/op_add_int.S */
+/* File: mips/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ srl a3, a0, 8 # a3 <- CC
+ and a2, a0, 255 # a2 <- BB
+ GET_VREG(a1, a3) # a1 <- vCC
+ GET_VREG(a0, a2) # a0 <- vBB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ # optional op
+ addu a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_int: /* 0x91 */
+/* File: mips/op_sub_int.S */
+/* File: mips/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ srl a3, a0, 8 # a3 <- CC
+ and a2, a0, 255 # a2 <- BB
+ GET_VREG(a1, a3) # a1 <- vCC
+ GET_VREG(a0, a2) # a0 <- vBB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ # optional op
+ subu a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_int: /* 0x92 */
+/* File: mips/op_mul_int.S */
+/* File: mips/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ srl a3, a0, 8 # a3 <- CC
+ and a2, a0, 255 # a2 <- BB
+ GET_VREG(a1, a3) # a1 <- vCC
+ GET_VREG(a0, a2) # a0 <- vBB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ # optional op
+ mul a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_int: /* 0x93 */
+/* File: mips/op_div_int.S */
+#ifdef MIPS32REVGE6
+/* File: mips/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ srl a3, a0, 8 # a3 <- CC
+ and a2, a0, 255 # a2 <- BB
+ GET_VREG(a1, a3) # a1 <- vCC
+ GET_VREG(a0, a2) # a0 <- vBB
+ .if 1
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ # optional op
+ div a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 11-14 instructions */
+
+#else
+/* File: mips/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ srl a3, a0, 8 # a3 <- CC
+ and a2, a0, 255 # a2 <- BB
+ GET_VREG(a1, a3) # a1 <- vCC
+ GET_VREG(a0, a2) # a0 <- vBB
+ .if 1
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ div zero, a0, a1 # optional op
+ mflo a0 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 11-14 instructions */
+
+#endif
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_int: /* 0x94 */
+/* File: mips/op_rem_int.S */
+#ifdef MIPS32REVGE6
+/* File: mips/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ srl a3, a0, 8 # a3 <- CC
+ and a2, a0, 255 # a2 <- BB
+ GET_VREG(a1, a3) # a1 <- vCC
+ GET_VREG(a0, a2) # a0 <- vBB
+ .if 1
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ # optional op
+ mod a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 11-14 instructions */
+
+#else
+/* File: mips/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ srl a3, a0, 8 # a3 <- CC
+ and a2, a0, 255 # a2 <- BB
+ GET_VREG(a1, a3) # a1 <- vCC
+ GET_VREG(a0, a2) # a0 <- vBB
+ .if 1
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ div zero, a0, a1 # optional op
+ mfhi a0 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 11-14 instructions */
+
+#endif
+
+/* ------------------------------ */
+ .balign 128
+.L_op_and_int: /* 0x95 */
+/* File: mips/op_and_int.S */
+/* File: mips/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ srl a3, a0, 8 # a3 <- CC
+ and a2, a0, 255 # a2 <- BB
+ GET_VREG(a1, a3) # a1 <- vCC
+ GET_VREG(a0, a2) # a0 <- vBB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ # optional op
+ and a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_or_int: /* 0x96 */
+/* File: mips/op_or_int.S */
+/* File: mips/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ srl a3, a0, 8 # a3 <- CC
+ and a2, a0, 255 # a2 <- BB
+ GET_VREG(a1, a3) # a1 <- vCC
+ GET_VREG(a0, a2) # a0 <- vBB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ # optional op
+ or a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_xor_int: /* 0x97 */
+/* File: mips/op_xor_int.S */
+/* File: mips/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ srl a3, a0, 8 # a3 <- CC
+ and a2, a0, 255 # a2 <- BB
+ GET_VREG(a1, a3) # a1 <- vCC
+ GET_VREG(a0, a2) # a0 <- vBB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ # optional op
+ xor a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shl_int: /* 0x98 */
+/* File: mips/op_shl_int.S */
+/* File: mips/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ srl a3, a0, 8 # a3 <- CC
+ and a2, a0, 255 # a2 <- BB
+ GET_VREG(a1, a3) # a1 <- vCC
+ GET_VREG(a0, a2) # a0 <- vBB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ # optional op
+ sll a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shr_int: /* 0x99 */
+/* File: mips/op_shr_int.S */
+/* File: mips/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ srl a3, a0, 8 # a3 <- CC
+ and a2, a0, 255 # a2 <- BB
+ GET_VREG(a1, a3) # a1 <- vCC
+ GET_VREG(a0, a2) # a0 <- vBB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ # optional op
+ sra a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_ushr_int: /* 0x9a */
+/* File: mips/op_ushr_int.S */
+/* File: mips/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ srl a3, a0, 8 # a3 <- CC
+ and a2, a0, 255 # a2 <- BB
+ GET_VREG(a1, a3) # a1 <- vCC
+ GET_VREG(a0, a2) # a0 <- vBB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ # optional op
+ srl a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_long: /* 0x9b */
+/* File: mips/op_add_long.S */
+/*
+ * The compiler generates the following sequence for
+ * [v1 v0] = [a1 a0] + [a3 a2];
+ * addu v0,a2,a0
+ * addu a1,a3,a1
+ * sltu v1,v0,a2
+ * addu v1,v1,a1
+ */
+/* File: mips/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ EAS2(a2, rFP, a2) # a2 <- &fp[BB]
+ EAS2(t1, rFP, a3) # a3 <- &fp[CC]
+ LOAD64(a0, a1, a2) # a0/a1 <- vBB/vBB+1
+ LOAD64(a2, a3, t1) # a2/a3 <- vCC/vCC+1
+ .if 0
+ or t0, a2, a3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ addu v0, a2, a0 # optional op
+ addu a1, a3, a1; sltu v1, v0, a2; addu v1, v1, a1 # result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vAA/vAA+1 <- v0/v1
+ /* 14-17 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_long: /* 0x9c */
+/* File: mips/op_sub_long.S */
+/*
+ * For little endian the code sequence looks as follows:
+ * subu v0,a0,a2
+ * subu v1,a1,a3
+ * sltu a0,a0,v0
+ * subu v1,v1,a0
+ */
+/* File: mips/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ EAS2(a2, rFP, a2) # a2 <- &fp[BB]
+ EAS2(t1, rFP, a3) # a3 <- &fp[CC]
+ LOAD64(a0, a1, a2) # a0/a1 <- vBB/vBB+1
+ LOAD64(a2, a3, t1) # a2/a3 <- vCC/vCC+1
+ .if 0
+ or t0, a2, a3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ subu v0, a0, a2 # optional op
+ subu v1, a1, a3; sltu a0, a0, v0; subu v1, v1, a0 # result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vAA/vAA+1 <- v0/v1
+ /* 14-17 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_long: /* 0x9d */
+/* File: mips/op_mul_long.S */
+ /*
+ * Signed 64-bit integer multiply.
+ * a1 a0
+ * x a3 a2
+ * -------------
+ * a2a1 a2a0
+ * a3a0
+ * a3a1 (<= unused)
+ * ---------------
+ * v1 v0
+ */
+ /* mul-long vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ and t0, a0, 255 # a2 <- BB
+ srl t1, a0, 8 # a3 <- CC
+ EAS2(t0, rFP, t0) # t0 <- &fp[BB]
+ LOAD64(a0, a1, t0) # a0/a1 <- vBB/vBB+1
+
+ EAS2(t1, rFP, t1) # t0 <- &fp[CC]
+ LOAD64(a2, a3, t1) # a2/a3 <- vCC/vCC+1
+
+ mul v1, a3, a0 # v1= a3a0
+#ifdef MIPS32REVGE6
+ mulu v0, a2, a0 # v0= a2a0
+ muhu t1, a2, a0
+#else
+ multu a2, a0
+ mfhi t1
+ mflo v0 # v0= a2a0
+#endif
+ mul t0, a2, a1 # t0= a2a1
+ addu v1, v1, t1 # v1+= hi(a2a0)
+ addu v1, v1, t0 # v1= a3a0 + a2a1;
+
+ GET_OPA(a0) # a0 <- AA
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ b .Lop_mul_long_finish
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_long: /* 0x9e */
+/* File: mips/op_div_long.S */
+/* File: mips/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ EAS2(a2, rFP, a2) # a2 <- &fp[BB]
+ EAS2(t1, rFP, a3) # a3 <- &fp[CC]
+ LOAD64(a0, a1, a2) # a0/a1 <- vBB/vBB+1
+ LOAD64(a2, a3, t1) # a2/a3 <- vCC/vCC+1
+ .if 1
+ or t0, a2, a3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ # optional op
+ JAL(__divdi3) # result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vAA/vAA+1 <- v0/v1
+ /* 14-17 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_long: /* 0x9f */
+/* File: mips/op_rem_long.S */
+/* File: mips/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ EAS2(a2, rFP, a2) # a2 <- &fp[BB]
+ EAS2(t1, rFP, a3) # a3 <- &fp[CC]
+ LOAD64(a0, a1, a2) # a0/a1 <- vBB/vBB+1
+ LOAD64(a2, a3, t1) # a2/a3 <- vCC/vCC+1
+ .if 1
+ or t0, a2, a3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ # optional op
+ JAL(__moddi3) # result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vAA/vAA+1 <- v0/v1
+ /* 14-17 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_and_long: /* 0xa0 */
+/* File: mips/op_and_long.S */
+/* File: mips/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ EAS2(a2, rFP, a2) # a2 <- &fp[BB]
+ EAS2(t1, rFP, a3) # a3 <- &fp[CC]
+ LOAD64(a0, a1, a2) # a0/a1 <- vBB/vBB+1
+ LOAD64(a2, a3, t1) # a2/a3 <- vCC/vCC+1
+ .if 0
+ or t0, a2, a3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ and a0, a0, a2 # optional op
+ and a1, a1, a3 # result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64_GOTO(a0, a1, rOBJ, t0) # vAA/vAA+1 <- a0/a1
+ /* 14-17 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_or_long: /* 0xa1 */
+/* File: mips/op_or_long.S */
+/* File: mips/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ EAS2(a2, rFP, a2) # a2 <- &fp[BB]
+ EAS2(t1, rFP, a3) # a3 <- &fp[CC]
+ LOAD64(a0, a1, a2) # a0/a1 <- vBB/vBB+1
+ LOAD64(a2, a3, t1) # a2/a3 <- vCC/vCC+1
+ .if 0
+ or t0, a2, a3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ or a0, a0, a2 # optional op
+ or a1, a1, a3 # result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64_GOTO(a0, a1, rOBJ, t0) # vAA/vAA+1 <- a0/a1
+ /* 14-17 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_xor_long: /* 0xa2 */
+/* File: mips/op_xor_long.S */
+/* File: mips/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ EAS2(a2, rFP, a2) # a2 <- &fp[BB]
+ EAS2(t1, rFP, a3) # a3 <- &fp[CC]
+ LOAD64(a0, a1, a2) # a0/a1 <- vBB/vBB+1
+ LOAD64(a2, a3, t1) # a2/a3 <- vCC/vCC+1
+ .if 0
+ or t0, a2, a3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ xor a0, a0, a2 # optional op
+ xor a1, a1, a3 # result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64_GOTO(a0, a1, rOBJ, t0) # vAA/vAA+1 <- a0/a1
+ /* 14-17 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shl_long: /* 0xa3 */
+/* File: mips/op_shl_long.S */
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.
+ */
+ /* shl-long vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(t2) # t2 <- AA
+ and a3, a0, 255 # a3 <- BB
+ srl a0, a0, 8 # a0 <- CC
+ EAS2(a3, rFP, a3) # a3 <- &fp[BB]
+ GET_VREG(a2, a0) # a2 <- vCC
+ LOAD64(a0, a1, a3) # a0/a1 <- vBB/vBB+1
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+
+ andi v1, a2, 0x20 # shift< shift & 0x20
+ sll v0, a0, a2 # rlo<- alo << (shift&31)
+ bnez v1, .Lop_shl_long_finish
+ not v1, a2 # rhi<- 31-shift (shift is 5b)
+ srl a0, 1
+ srl a0, v1 # alo<- alo >> (32-(shift&31))
+ sll v1, a1, a2 # rhi<- ahi << (shift&31)
+ or v1, a0 # rhi<- rhi | alo
+ SET_VREG64_GOTO(v0, v1, t2, t0) # vAA/vAA+1 <- a0/a1
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shr_long: /* 0xa4 */
+/* File: mips/op_shr_long.S */
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.
+ */
+ /* shr-long vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(t3) # t3 <- AA
+ and a3, a0, 255 # a3 <- BB
+ srl a0, a0, 8 # a0 <- CC
+ EAS2(a3, rFP, a3) # a3 <- &fp[BB]
+ GET_VREG(a2, a0) # a2 <- vCC
+ LOAD64(a0, a1, a3) # a0/a1 <- vBB/vBB+1
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+
+ andi v0, a2, 0x20 # shift & 0x20
+ sra v1, a1, a2 # rhi<- ahi >> (shift&31)
+ bnez v0, .Lop_shr_long_finish
+ srl v0, a0, a2 # rlo<- alo >> (shift&31)
+ not a0, a2 # alo<- 31-shift (shift is 5b)
+ sll a1, 1
+ sll a1, a0 # ahi<- ahi << (32-(shift&31))
+ or v0, a1 # rlo<- rlo | ahi
+ SET_VREG64_GOTO(v0, v1, t3, t0) # vAA/VAA+1 <- v0/v0
+
+/* ------------------------------ */
+ .balign 128
+.L_op_ushr_long: /* 0xa5 */
+/* File: mips/op_ushr_long.S */
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.
+ */
+ /* ushr-long vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a3, a0, 255 # a3 <- BB
+ srl a0, a0, 8 # a0 <- CC
+ EAS2(a3, rFP, a3) # a3 <- &fp[BB]
+ GET_VREG(a2, a0) # a2 <- vCC
+ LOAD64(a0, a1, a3) # a0/a1 <- vBB/vBB+1
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+
+ andi v0, a2, 0x20 # shift & 0x20
+ srl v1, a1, a2 # rhi<- ahi >> (shift&31)
+ bnez v0, .Lop_ushr_long_finish
+ srl v0, a0, a2 # rlo<- alo >> (shift&31)
+ not a0, a2 # alo<- 31-n (shift is 5b)
+ sll a1, 1
+ sll a1, a0 # ahi<- ahi << (32-(shift&31))
+ or v0, a1 # rlo<- rlo | ahi
+ SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vAA/vAA+1 <- v0/v1
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_float: /* 0xa6 */
+/* File: mips/op_add_float.S */
+/* File: mips/fbinop.S */
+ /*
+ * Generic 32-bit binary float operation.
+ *
+ * For: add-fp, sub-fp, mul-fp, div-fp, rem-fp
+ */
+
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # s5 <- AA
+ srl a3, a0, 8 # a3 <- CC
+ and a2, a0, 255 # a2 <- BB
+ GET_VREG_F(fa1, a3) # a1 <- vCC
+ GET_VREG_F(fa0, a2) # a0 <- vBB
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ add.s fv0, fa0, fa1 # f0 = result
+ SET_VREG_F(fv0, rOBJ) # vAA <- fv0
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_float: /* 0xa7 */
+/* File: mips/op_sub_float.S */
+/* File: mips/fbinop.S */
+ /*
+ * Generic 32-bit binary float operation.
+ *
+ * For: add-fp, sub-fp, mul-fp, div-fp, rem-fp
+ */
+
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # s5 <- AA
+ srl a3, a0, 8 # a3 <- CC
+ and a2, a0, 255 # a2 <- BB
+ GET_VREG_F(fa1, a3) # a1 <- vCC
+ GET_VREG_F(fa0, a2) # a0 <- vBB
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ sub.s fv0, fa0, fa1 # f0 = result
+ SET_VREG_F(fv0, rOBJ) # vAA <- fv0
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_float: /* 0xa8 */
+/* File: mips/op_mul_float.S */
+/* File: mips/fbinop.S */
+ /*
+ * Generic 32-bit binary float operation.
+ *
+ * For: add-fp, sub-fp, mul-fp, div-fp, rem-fp
+ */
+
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # s5 <- AA
+ srl a3, a0, 8 # a3 <- CC
+ and a2, a0, 255 # a2 <- BB
+ GET_VREG_F(fa1, a3) # a1 <- vCC
+ GET_VREG_F(fa0, a2) # a0 <- vBB
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ mul.s fv0, fa0, fa1 # f0 = result
+ SET_VREG_F(fv0, rOBJ) # vAA <- fv0
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_float: /* 0xa9 */
+/* File: mips/op_div_float.S */
+/* File: mips/fbinop.S */
+ /*
+ * Generic 32-bit binary float operation.
+ *
+ * For: add-fp, sub-fp, mul-fp, div-fp, rem-fp
+ */
+
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # s5 <- AA
+ srl a3, a0, 8 # a3 <- CC
+ and a2, a0, 255 # a2 <- BB
+ GET_VREG_F(fa1, a3) # a1 <- vCC
+ GET_VREG_F(fa0, a2) # a0 <- vBB
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ div.s fv0, fa0, fa1 # f0 = result
+ SET_VREG_F(fv0, rOBJ) # vAA <- fv0
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_float: /* 0xaa */
+/* File: mips/op_rem_float.S */
+/* File: mips/fbinop.S */
+ /*
+ * Generic 32-bit binary float operation.
+ *
+ * For: add-fp, sub-fp, mul-fp, div-fp, rem-fp
+ */
+
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # s5 <- AA
+ srl a3, a0, 8 # a3 <- CC
+ and a2, a0, 255 # a2 <- BB
+ GET_VREG_F(fa1, a3) # a1 <- vCC
+ GET_VREG_F(fa0, a2) # a0 <- vBB
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ JAL(fmodf) # f0 = result
+ SET_VREG_F(fv0, rOBJ) # vAA <- fv0
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_double: /* 0xab */
+/* File: mips/op_add_double.S */
+/* File: mips/fbinopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be an MIPS instruction or a function call.
+ *
+ * for: add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # s5 <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ EAS2(a2, rFP, a2) # a2 <- &fp[BB]
+ EAS2(t1, rFP, a3) # a3 <- &fp[CC]
+ LOAD64_F(fa0, fa0f, a2)
+ LOAD64_F(fa1, fa1f, t1)
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ add.d fv0, fa0, fa1
+ SET_VREG64_F(fv0, fv0f, rOBJ)
+ b .Lop_add_double_finish
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_double: /* 0xac */
+/* File: mips/op_sub_double.S */
+/* File: mips/fbinopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be an MIPS instruction or a function call.
+ *
+ * for: add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # s5 <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ EAS2(a2, rFP, a2) # a2 <- &fp[BB]
+ EAS2(t1, rFP, a3) # a3 <- &fp[CC]
+ LOAD64_F(fa0, fa0f, a2)
+ LOAD64_F(fa1, fa1f, t1)
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ sub.d fv0, fa0, fa1
+ SET_VREG64_F(fv0, fv0f, rOBJ)
+ b .Lop_sub_double_finish
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_double: /* 0xad */
+/* File: mips/op_mul_double.S */
+/* File: mips/fbinopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be an MIPS instruction or a function call.
+ *
+ * for: add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # s5 <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ EAS2(a2, rFP, a2) # a2 <- &fp[BB]
+ EAS2(t1, rFP, a3) # a3 <- &fp[CC]
+ LOAD64_F(fa0, fa0f, a2)
+ LOAD64_F(fa1, fa1f, t1)
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ mul.d fv0, fa0, fa1
+ SET_VREG64_F(fv0, fv0f, rOBJ)
+ b .Lop_mul_double_finish
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_double: /* 0xae */
+/* File: mips/op_div_double.S */
+/* File: mips/fbinopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be an MIPS instruction or a function call.
+ *
+ * for: add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # s5 <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ EAS2(a2, rFP, a2) # a2 <- &fp[BB]
+ EAS2(t1, rFP, a3) # a3 <- &fp[CC]
+ LOAD64_F(fa0, fa0f, a2)
+ LOAD64_F(fa1, fa1f, t1)
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ div.d fv0, fa0, fa1
+ SET_VREG64_F(fv0, fv0f, rOBJ)
+ b .Lop_div_double_finish
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_double: /* 0xaf */
+/* File: mips/op_rem_double.S */
+/* File: mips/fbinopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be an MIPS instruction or a function call.
+ *
+ * for: add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # s5 <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ EAS2(a2, rFP, a2) # a2 <- &fp[BB]
+ EAS2(t1, rFP, a3) # a3 <- &fp[CC]
+ LOAD64_F(fa0, fa0f, a2)
+ LOAD64_F(fa1, fa1f, t1)
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ JAL(fmod)
+ SET_VREG64_F(fv0, fv0f, rOBJ)
+ b .Lop_rem_double_finish
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_int_2addr: /* 0xb0 */
+/* File: mips/op_add_int_2addr.S */
+/* File: mips/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call.
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a0, rOBJ) # a0 <- vA
+ GET_VREG(a1, a3) # a1 <- vB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ # optional op
+ addu a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_int_2addr: /* 0xb1 */
+/* File: mips/op_sub_int_2addr.S */
+/* File: mips/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call.
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a0, rOBJ) # a0 <- vA
+ GET_VREG(a1, a3) # a1 <- vB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ # optional op
+ subu a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_int_2addr: /* 0xb2 */
+/* File: mips/op_mul_int_2addr.S */
+/* File: mips/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call.
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a0, rOBJ) # a0 <- vA
+ GET_VREG(a1, a3) # a1 <- vB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ # optional op
+ mul a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_int_2addr: /* 0xb3 */
+/* File: mips/op_div_int_2addr.S */
+#ifdef MIPS32REVGE6
+/* File: mips/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call.
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a0, rOBJ) # a0 <- vA
+ GET_VREG(a1, a3) # a1 <- vB
+ .if 1
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ # optional op
+ div a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-13 instructions */
+
+#else
+/* File: mips/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call.
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a0, rOBJ) # a0 <- vA
+ GET_VREG(a1, a3) # a1 <- vB
+ .if 1
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ div zero, a0, a1 # optional op
+ mflo a0 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-13 instructions */
+
+#endif
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_int_2addr: /* 0xb4 */
+/* File: mips/op_rem_int_2addr.S */
+#ifdef MIPS32REVGE6
+/* File: mips/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call.
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a0, rOBJ) # a0 <- vA
+ GET_VREG(a1, a3) # a1 <- vB
+ .if 1
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ # optional op
+ mod a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-13 instructions */
+
+#else
+/* File: mips/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call.
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a0, rOBJ) # a0 <- vA
+ GET_VREG(a1, a3) # a1 <- vB
+ .if 1
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ div zero, a0, a1 # optional op
+ mfhi a0 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-13 instructions */
+
+#endif
+
+/* ------------------------------ */
+ .balign 128
+.L_op_and_int_2addr: /* 0xb5 */
+/* File: mips/op_and_int_2addr.S */
+/* File: mips/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call.
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a0, rOBJ) # a0 <- vA
+ GET_VREG(a1, a3) # a1 <- vB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ # optional op
+ and a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_or_int_2addr: /* 0xb6 */
+/* File: mips/op_or_int_2addr.S */
+/* File: mips/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call.
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a0, rOBJ) # a0 <- vA
+ GET_VREG(a1, a3) # a1 <- vB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ # optional op
+ or a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_xor_int_2addr: /* 0xb7 */
+/* File: mips/op_xor_int_2addr.S */
+/* File: mips/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call.
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a0, rOBJ) # a0 <- vA
+ GET_VREG(a1, a3) # a1 <- vB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ # optional op
+ xor a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shl_int_2addr: /* 0xb8 */
+/* File: mips/op_shl_int_2addr.S */
+/* File: mips/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call.
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a0, rOBJ) # a0 <- vA
+ GET_VREG(a1, a3) # a1 <- vB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ # optional op
+ sll a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shr_int_2addr: /* 0xb9 */
+/* File: mips/op_shr_int_2addr.S */
+/* File: mips/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call.
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a0, rOBJ) # a0 <- vA
+ GET_VREG(a1, a3) # a1 <- vB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ # optional op
+ sra a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_ushr_int_2addr: /* 0xba */
+/* File: mips/op_ushr_int_2addr.S */
+/* File: mips/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call.
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a0, rOBJ) # a0 <- vA
+ GET_VREG(a1, a3) # a1 <- vB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ # optional op
+ srl a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_long_2addr: /* 0xbb */
+/* File: mips/op_add_long_2addr.S */
+/*
+ * See op_add_long.S for details
+ */
+/* File: mips/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a1) # a1 <- B
+ EAS2(a1, rFP, a1) # a1 <- &fp[B]
+ EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
+ LOAD64(a2, a3, a1) # a2/a3 <- vBB/vBB+1
+ LOAD64(a0, a1, t0) # a0/a1 <- vAA/vAA+1
+ .if 0
+ or t0, a2, a3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ addu v0, a2, a0 # optional op
+ addu a1, a3, a1; sltu v1, v0, a2; addu v1, v1, a1 # result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64(v0, v1, rOBJ) # vAA/vAA+1 <- v0/v1
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 12-15 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_long_2addr: /* 0xbc */
+/* File: mips/op_sub_long_2addr.S */
+/*
+ * See op_sub_long.S for more details
+ */
+/* File: mips/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a1) # a1 <- B
+ EAS2(a1, rFP, a1) # a1 <- &fp[B]
+ EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
+ LOAD64(a2, a3, a1) # a2/a3 <- vBB/vBB+1
+ LOAD64(a0, a1, t0) # a0/a1 <- vAA/vAA+1
+ .if 0
+ or t0, a2, a3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ subu v0, a0, a2 # optional op
+ subu v1, a1, a3; sltu a0, a0, v0; subu v1, v1, a0 # result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64(v0, v1, rOBJ) # vAA/vAA+1 <- v0/v1
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 12-15 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_long_2addr: /* 0xbd */
+/* File: mips/op_mul_long_2addr.S */
+ /*
+ * See op_mul_long.S for more details
+ */
+ /* mul-long/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+
+ EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
+ LOAD64(a0, a1, t0) # vAA.low / high
+
+ GET_OPB(t1) # t1 <- B
+ EAS2(t1, rFP, t1) # t1 <- &fp[B]
+ LOAD64(a2, a3, t1) # vBB.low / high
+
+ mul v1, a3, a0 # v1= a3a0
+#ifdef MIPS32REVGE6
+ mulu v0, a2, a0 # v0= a2a0
+ muhu t1, a2, a0
+#else
+ multu a2, a0
+ mfhi t1
+ mflo v0 # v0= a2a0
+ #endif
+ mul t2, a2, a1 # t2= a2a1
+ addu v1, v1, t1 # v1= a3a0 + hi(a2a0)
+ addu v1, v1, t2 # v1= v1 + a2a1;
+
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ # vAA <- v0 (low)
+ SET_VREG64(v0, v1, rOBJ) # vAA+1 <- v1 (high)
+ GOTO_OPCODE(t1) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_long_2addr: /* 0xbe */
+/* File: mips/op_div_long_2addr.S */
+/* File: mips/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a1) # a1 <- B
+ EAS2(a1, rFP, a1) # a1 <- &fp[B]
+ EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
+ LOAD64(a2, a3, a1) # a2/a3 <- vBB/vBB+1
+ LOAD64(a0, a1, t0) # a0/a1 <- vAA/vAA+1
+ .if 1
+ or t0, a2, a3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ # optional op
+ JAL(__divdi3) # result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64(v0, v1, rOBJ) # vAA/vAA+1 <- v0/v1
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 12-15 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_long_2addr: /* 0xbf */
+/* File: mips/op_rem_long_2addr.S */
+/* File: mips/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a1) # a1 <- B
+ EAS2(a1, rFP, a1) # a1 <- &fp[B]
+ EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
+ LOAD64(a2, a3, a1) # a2/a3 <- vBB/vBB+1
+ LOAD64(a0, a1, t0) # a0/a1 <- vAA/vAA+1
+ .if 1
+ or t0, a2, a3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ # optional op
+ JAL(__moddi3) # result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64(v0, v1, rOBJ) # vAA/vAA+1 <- v0/v1
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 12-15 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_and_long_2addr: /* 0xc0 */
+/* File: mips/op_and_long_2addr.S */
+/* File: mips/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a1) # a1 <- B
+ EAS2(a1, rFP, a1) # a1 <- &fp[B]
+ EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
+ LOAD64(a2, a3, a1) # a2/a3 <- vBB/vBB+1
+ LOAD64(a0, a1, t0) # a0/a1 <- vAA/vAA+1
+ .if 0
+ or t0, a2, a3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ and a0, a0, a2 # optional op
+ and a1, a1, a3 # result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64(a0, a1, rOBJ) # vAA/vAA+1 <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 12-15 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_or_long_2addr: /* 0xc1 */
+/* File: mips/op_or_long_2addr.S */
+/* File: mips/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a1) # a1 <- B
+ EAS2(a1, rFP, a1) # a1 <- &fp[B]
+ EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
+ LOAD64(a2, a3, a1) # a2/a3 <- vBB/vBB+1
+ LOAD64(a0, a1, t0) # a0/a1 <- vAA/vAA+1
+ .if 0
+ or t0, a2, a3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ or a0, a0, a2 # optional op
+ or a1, a1, a3 # result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64(a0, a1, rOBJ) # vAA/vAA+1 <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 12-15 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_xor_long_2addr: /* 0xc2 */
+/* File: mips/op_xor_long_2addr.S */
+/* File: mips/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a1) # a1 <- B
+ EAS2(a1, rFP, a1) # a1 <- &fp[B]
+ EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
+ LOAD64(a2, a3, a1) # a2/a3 <- vBB/vBB+1
+ LOAD64(a0, a1, t0) # a0/a1 <- vAA/vAA+1
+ .if 0
+ or t0, a2, a3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ xor a0, a0, a2 # optional op
+ xor a1, a1, a3 # result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64(a0, a1, rOBJ) # vAA/vAA+1 <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 12-15 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shl_long_2addr: /* 0xc3 */
+/* File: mips/op_shl_long_2addr.S */
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* shl-long/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a2, a3) # a2 <- vB
+ EAS2(t2, rFP, rOBJ) # t2 <- &fp[A]
+ LOAD64(a0, a1, t2) # a0/a1 <- vAA/vAA+1
+
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+
+ andi v1, a2, 0x20 # shift< shift & 0x20
+ sll v0, a0, a2 # rlo<- alo << (shift&31)
+ bnez v1, .Lop_shl_long_2addr_finish
+ not v1, a2 # rhi<- 31-shift (shift is 5b)
+ srl a0, 1
+ srl a0, v1 # alo<- alo >> (32-(shift&31))
+ sll v1, a1, a2 # rhi<- ahi << (shift&31)
+ or v1, a0 # rhi<- rhi | alo
+ SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vAA/vAA+1 <- a0/a1
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shr_long_2addr: /* 0xc4 */
+/* File: mips/op_shr_long_2addr.S */
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* shr-long/2addr vA, vB */
+ GET_OPA4(t2) # t2 <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a2, a3) # a2 <- vB
+ EAS2(t0, rFP, t2) # t0 <- &fp[A]
+ LOAD64(a0, a1, t0) # a0/a1 <- vAA/vAA+1
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+
+ andi v0, a2, 0x20 # shift & 0x20
+ sra v1, a1, a2 # rhi<- ahi >> (shift&31)
+ bnez v0, .Lop_shr_long_2addr_finish
+ srl v0, a0, a2 # rlo<- alo >> (shift&31)
+ not a0, a2 # alo<- 31-shift (shift is 5b)
+ sll a1, 1
+ sll a1, a0 # ahi<- ahi << (32-(shift&31))
+ or v0, a1 # rlo<- rlo | ahi
+ SET_VREG64_GOTO(v0, v1, t2, t0) # vAA/vAA+1 <- a0/a1
+
+/* ------------------------------ */
+ .balign 128
+.L_op_ushr_long_2addr: /* 0xc5 */
+/* File: mips/op_ushr_long_2addr.S */
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* ushr-long/2addr vA, vB */
+ GET_OPA4(t3) # t3 <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a2, a3) # a2 <- vB
+ EAS2(t0, rFP, t3) # t0 <- &fp[A]
+ LOAD64(a0, a1, t0) # a0/a1 <- vAA/vAA+1
+
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+
+ andi v0, a2, 0x20 # shift & 0x20
+ srl v1, a1, a2 # rhi<- ahi >> (shift&31)
+ bnez v0, .Lop_ushr_long_2addr_finish
+ srl v0, a0, a2 # rlo<- alo >> (shift&31)
+ not a0, a2 # alo<- 31-n (shift is 5b)
+ sll a1, 1
+ sll a1, a0 # ahi<- ahi << (32-(shift&31))
+ or v0, a1 # rlo<- rlo | ahi
+ SET_VREG64_GOTO(v0, v1, t3, t0) # vAA/vAA+1 <- a0/a1
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_float_2addr: /* 0xc6 */
+/* File: mips/op_add_float_2addr.S */
+/* File: mips/fbinop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr"
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call.
+ *
+ * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
+ * div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # t1 <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG_F(fa0, rOBJ)
+ GET_VREG_F(fa1, a3)
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ add.s fv0, fa0, fa1
+ SET_VREG_F(fv0, rOBJ) # vAA <- result
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_float_2addr: /* 0xc7 */
+/* File: mips/op_sub_float_2addr.S */
+/* File: mips/fbinop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr"
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call.
+ *
+ * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
+ * div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # t1 <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG_F(fa0, rOBJ)
+ GET_VREG_F(fa1, a3)
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ sub.s fv0, fa0, fa1
+ SET_VREG_F(fv0, rOBJ) # vAA <- result
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_float_2addr: /* 0xc8 */
+/* File: mips/op_mul_float_2addr.S */
+/* File: mips/fbinop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr"
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call.
+ *
+ * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
+ * div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # t1 <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG_F(fa0, rOBJ)
+ GET_VREG_F(fa1, a3)
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ mul.s fv0, fa0, fa1
+ SET_VREG_F(fv0, rOBJ) # vAA <- result
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_float_2addr: /* 0xc9 */
+/* File: mips/op_div_float_2addr.S */
+/* File: mips/fbinop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr"
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call.
+ *
+ * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
+ * div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # t1 <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG_F(fa0, rOBJ)
+ GET_VREG_F(fa1, a3)
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ div.s fv0, fa0, fa1
+ SET_VREG_F(fv0, rOBJ) # vAA <- result
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_float_2addr: /* 0xca */
+/* File: mips/op_rem_float_2addr.S */
+/* File: mips/fbinop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr"
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call.
+ *
+ * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
+ * div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # t1 <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG_F(fa0, rOBJ)
+ GET_VREG_F(fa1, a3)
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ JAL(fmodf)
+ SET_VREG_F(fv0, rOBJ) # vAA <- result
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_double_2addr: /* 0xcb */
+/* File: mips/op_add_double_2addr.S */
+/* File: mips/fbinopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be an MIPS instruction or a function call.
+ *
+ * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
+ * div-double/2addr, rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a1) # a1 <- B
+ EAS2(a1, rFP, a1) # a1 <- &fp[B]
+ EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
+ LOAD64_F(fa0, fa0f, t0)
+ LOAD64_F(fa1, fa1f, a1)
+
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ add.d fv0, fa0, fa1
+ SET_VREG64_F(fv0, fv0f, rOBJ)
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_double_2addr: /* 0xcc */
+/* File: mips/op_sub_double_2addr.S */
+/* File: mips/fbinopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be an MIPS instruction or a function call.
+ *
+ * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
+ * div-double/2addr, rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a1) # a1 <- B
+ EAS2(a1, rFP, a1) # a1 <- &fp[B]
+ EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
+ LOAD64_F(fa0, fa0f, t0)
+ LOAD64_F(fa1, fa1f, a1)
+
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ sub.d fv0, fa0, fa1
+ SET_VREG64_F(fv0, fv0f, rOBJ)
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_double_2addr: /* 0xcd */
+/* File: mips/op_mul_double_2addr.S */
+/* File: mips/fbinopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be an MIPS instruction or a function call.
+ *
+ * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
+ * div-double/2addr, rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a1) # a1 <- B
+ EAS2(a1, rFP, a1) # a1 <- &fp[B]
+ EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
+ LOAD64_F(fa0, fa0f, t0)
+ LOAD64_F(fa1, fa1f, a1)
+
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ mul.d fv0, fa0, fa1
+ SET_VREG64_F(fv0, fv0f, rOBJ)
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_double_2addr: /* 0xce */
+/* File: mips/op_div_double_2addr.S */
+/* File: mips/fbinopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be an MIPS instruction or a function call.
+ *
+ * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
+ * div-double/2addr, rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a1) # a1 <- B
+ EAS2(a1, rFP, a1) # a1 <- &fp[B]
+ EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
+ LOAD64_F(fa0, fa0f, t0)
+ LOAD64_F(fa1, fa1f, a1)
+
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ div.d fv0, fa0, fa1
+ SET_VREG64_F(fv0, fv0f, rOBJ)
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_double_2addr: /* 0xcf */
+/* File: mips/op_rem_double_2addr.S */
+/* File: mips/fbinopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be an MIPS instruction or a function call.
+ *
+ * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
+ * div-double/2addr, rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a1) # a1 <- B
+ EAS2(a1, rFP, a1) # a1 <- &fp[B]
+ EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
+ LOAD64_F(fa0, fa0f, t0)
+ LOAD64_F(fa1, fa1f, a1)
+
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ JAL(fmod)
+ SET_VREG64_F(fv0, fv0f, rOBJ)
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_int_lit16: /* 0xd0 */
+/* File: mips/op_add_int_lit16.S */
+/* File: mips/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ # binop/lit16 vA, vB, /* +CCCC */
+ FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
+ GET_OPB(a2) # a2 <- B
+ GET_OPA(rOBJ) # rOBJ <- A+
+ GET_VREG(a0, a2) # a0 <- vB
+ and rOBJ, rOBJ, 15
+ .if 0
+ # cmp a1, 0; is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ # optional op
+ addu a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rsub_int: /* 0xd1 */
+/* File: mips/op_rsub_int.S */
+/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
+/* File: mips/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ # binop/lit16 vA, vB, /* +CCCC */
+ FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
+ GET_OPB(a2) # a2 <- B
+ GET_OPA(rOBJ) # rOBJ <- A+
+ GET_VREG(a0, a2) # a0 <- vB
+ and rOBJ, rOBJ, 15
+ .if 0
+ # cmp a1, 0; is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ # optional op
+ subu a0, a1, a0 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_int_lit16: /* 0xd2 */
+/* File: mips/op_mul_int_lit16.S */
+/* File: mips/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ # binop/lit16 vA, vB, /* +CCCC */
+ FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
+ GET_OPB(a2) # a2 <- B
+ GET_OPA(rOBJ) # rOBJ <- A+
+ GET_VREG(a0, a2) # a0 <- vB
+ and rOBJ, rOBJ, 15
+ .if 0
+ # cmp a1, 0; is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ # optional op
+ mul a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_int_lit16: /* 0xd3 */
+/* File: mips/op_div_int_lit16.S */
+#ifdef MIPS32REVGE6
+/* File: mips/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ # binop/lit16 vA, vB, /* +CCCC */
+ FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
+ GET_OPB(a2) # a2 <- B
+ GET_OPA(rOBJ) # rOBJ <- A+
+ GET_VREG(a0, a2) # a0 <- vB
+ and rOBJ, rOBJ, 15
+ .if 1
+ # cmp a1, 0; is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ # optional op
+ div a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-13 instructions */
+
+#else
+/* File: mips/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ # binop/lit16 vA, vB, /* +CCCC */
+ FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
+ GET_OPB(a2) # a2 <- B
+ GET_OPA(rOBJ) # rOBJ <- A+
+ GET_VREG(a0, a2) # a0 <- vB
+ and rOBJ, rOBJ, 15
+ .if 1
+ # cmp a1, 0; is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ div zero, a0, a1 # optional op
+ mflo a0 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-13 instructions */
+
+#endif
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_int_lit16: /* 0xd4 */
+/* File: mips/op_rem_int_lit16.S */
+#ifdef MIPS32REVGE6
+/* File: mips/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ # binop/lit16 vA, vB, /* +CCCC */
+ FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
+ GET_OPB(a2) # a2 <- B
+ GET_OPA(rOBJ) # rOBJ <- A+
+ GET_VREG(a0, a2) # a0 <- vB
+ and rOBJ, rOBJ, 15
+ .if 1
+ # cmp a1, 0; is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ # optional op
+ mod a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-13 instructions */
+
+#else
+/* File: mips/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ # binop/lit16 vA, vB, /* +CCCC */
+ FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
+ GET_OPB(a2) # a2 <- B
+ GET_OPA(rOBJ) # rOBJ <- A+
+ GET_VREG(a0, a2) # a0 <- vB
+ and rOBJ, rOBJ, 15
+ .if 1
+ # cmp a1, 0; is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ div zero, a0, a1 # optional op
+ mfhi a0 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-13 instructions */
+
+#endif
+
+/* ------------------------------ */
+ .balign 128
+.L_op_and_int_lit16: /* 0xd5 */
+/* File: mips/op_and_int_lit16.S */
+/* File: mips/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ # binop/lit16 vA, vB, /* +CCCC */
+ FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
+ GET_OPB(a2) # a2 <- B
+ GET_OPA(rOBJ) # rOBJ <- A+
+ GET_VREG(a0, a2) # a0 <- vB
+ and rOBJ, rOBJ, 15
+ .if 0
+ # cmp a1, 0; is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ # optional op
+ and a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_or_int_lit16: /* 0xd6 */
+/* File: mips/op_or_int_lit16.S */
+/* File: mips/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ # binop/lit16 vA, vB, /* +CCCC */
+ FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
+ GET_OPB(a2) # a2 <- B
+ GET_OPA(rOBJ) # rOBJ <- A+
+ GET_VREG(a0, a2) # a0 <- vB
+ and rOBJ, rOBJ, 15
+ .if 0
+ # cmp a1, 0; is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ # optional op
+ or a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_xor_int_lit16: /* 0xd7 */
+/* File: mips/op_xor_int_lit16.S */
+/* File: mips/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ # binop/lit16 vA, vB, /* +CCCC */
+ FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
+ GET_OPB(a2) # a2 <- B
+ GET_OPA(rOBJ) # rOBJ <- A+
+ GET_VREG(a0, a2) # a0 <- vB
+ and rOBJ, rOBJ, 15
+ .if 0
+ # cmp a1, 0; is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ # optional op
+ xor a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_int_lit8: /* 0xd8 */
+/* File: mips/op_add_int_lit8.S */
+/* File: mips/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ # binop/lit8 vAA, vBB, /* +CC */
+ FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a3, 255 # a2 <- BB
+ GET_VREG(a0, a2) # a0 <- vBB
+ sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ # optional op
+ addu a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rsub_int_lit8: /* 0xd9 */
+/* File: mips/op_rsub_int_lit8.S */
+/* File: mips/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ # binop/lit8 vAA, vBB, /* +CC */
+ FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a3, 255 # a2 <- BB
+ GET_VREG(a0, a2) # a0 <- vBB
+ sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ # optional op
+ subu a0, a1, a0 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_int_lit8: /* 0xda */
+/* File: mips/op_mul_int_lit8.S */
+/* File: mips/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ # binop/lit8 vAA, vBB, /* +CC */
+ FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a3, 255 # a2 <- BB
+ GET_VREG(a0, a2) # a0 <- vBB
+ sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ # optional op
+ mul a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_int_lit8: /* 0xdb */
+/* File: mips/op_div_int_lit8.S */
+#ifdef MIPS32REVGE6
+/* File: mips/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ # binop/lit8 vAA, vBB, /* +CC */
+ FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a3, 255 # a2 <- BB
+ GET_VREG(a0, a2) # a0 <- vBB
+ sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
+ .if 1
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ # optional op
+ div a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-12 instructions */
+
+#else
+/* File: mips/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ # binop/lit8 vAA, vBB, /* +CC */
+ FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a3, 255 # a2 <- BB
+ GET_VREG(a0, a2) # a0 <- vBB
+ sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
+ .if 1
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ div zero, a0, a1 # optional op
+ mflo a0 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-12 instructions */
+
+#endif
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_int_lit8: /* 0xdc */
+/* File: mips/op_rem_int_lit8.S */
+#ifdef MIPS32REVGE6
+/* File: mips/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ # binop/lit8 vAA, vBB, /* +CC */
+ FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a3, 255 # a2 <- BB
+ GET_VREG(a0, a2) # a0 <- vBB
+ sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
+ .if 1
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ # optional op
+ mod a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-12 instructions */
+
+#else
+/* File: mips/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ # binop/lit8 vAA, vBB, /* +CC */
+ FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a3, 255 # a2 <- BB
+ GET_VREG(a0, a2) # a0 <- vBB
+ sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
+ .if 1
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ div zero, a0, a1 # optional op
+ mfhi a0 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-12 instructions */
+
+#endif
+
+/* ------------------------------ */
+ .balign 128
+.L_op_and_int_lit8: /* 0xdd */
+/* File: mips/op_and_int_lit8.S */
+/* File: mips/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ # binop/lit8 vAA, vBB, /* +CC */
+ FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a3, 255 # a2 <- BB
+ GET_VREG(a0, a2) # a0 <- vBB
+ sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ # optional op
+ and a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_or_int_lit8: /* 0xde */
+/* File: mips/op_or_int_lit8.S */
+/* File: mips/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ # binop/lit8 vAA, vBB, /* +CC */
+ FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a3, 255 # a2 <- BB
+ GET_VREG(a0, a2) # a0 <- vBB
+ sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ # optional op
+ or a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_xor_int_lit8: /* 0xdf */
+/* File: mips/op_xor_int_lit8.S */
+/* File: mips/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ # binop/lit8 vAA, vBB, /* +CC */
+ FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a3, 255 # a2 <- BB
+ GET_VREG(a0, a2) # a0 <- vBB
+ sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ # optional op
+ xor a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shl_int_lit8: /* 0xe0 */
+/* File: mips/op_shl_int_lit8.S */
+/* File: mips/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ # binop/lit8 vAA, vBB, /* +CC */
+ FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a3, 255 # a2 <- BB
+ GET_VREG(a0, a2) # a0 <- vBB
+ sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ # optional op
+ sll a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shr_int_lit8: /* 0xe1 */
+/* File: mips/op_shr_int_lit8.S */
+/* File: mips/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ # binop/lit8 vAA, vBB, /* +CC */
+ FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a3, 255 # a2 <- BB
+ GET_VREG(a0, a2) # a0 <- vBB
+ sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ # optional op
+ sra a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_ushr_int_lit8: /* 0xe2 */
+/* File: mips/op_ushr_int_lit8.S */
+/* File: mips/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ # binop/lit8 vAA, vBB, /* +CC */
+ FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a3, 255 # a2 <- BB
+ GET_VREG(a0, a2) # a0 <- vBB
+ sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ # optional op
+ srl a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_quick: /* 0xe3 */
+/* File: mips/op_iget_quick.S */
+ /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+ # op vA, vB, offset /* CCCC */
+ GET_OPB(a2) # a2 <- B
+ GET_VREG(a3, a2) # a3 <- object we're operating on
+ FETCH(a1, 1) # a1 <- field byte offset
+ GET_OPA4(a2) # a2 <- A(+)
+ # check object for null
+ beqz a3, common_errNullObject # object was null
+ addu t0, a3, a1
+ lw a0, 0(t0) # a0 <- obj.field (8/16/32 bits)
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, a2, t0) # fp[A] <- a0
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_wide_quick: /* 0xe4 */
+/* File: mips/op_iget_wide_quick.S */
+ # iget-wide-quick vA, vB, offset /* CCCC */
+ GET_OPB(a2) # a2 <- B
+ GET_VREG(a3, a2) # a3 <- object we're operating on
+ FETCH(a1, 1) # a1 <- field byte offset
+ GET_OPA4(a2) # a2 <- A(+)
+ # check object for null
+ beqz a3, common_errNullObject # object was null
+ addu t0, a3, a1 # t0 <- a3 + a1
+ LOAD64(a0, a1, t0) # a0 <- obj.field (64 bits, aligned)
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64(a0, a1, a2) # fp[A] <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_object_quick: /* 0xe5 */
+/* File: mips/op_iget_object_quick.S */
+ /* For: iget-object-quick */
+ /* op vA, vB, offset@CCCC */
+ GET_OPB(a2) # a2 <- B
+ FETCH(a1, 1) # a1 <- field byte offset
+ EXPORT_PC()
+ GET_VREG(a0, a2) # a0 <- object we're operating on
+ JAL(artIGetObjectFromMterp) # v0 <- GetObj(obj, offset)
+ lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
+ GET_OPA4(a2) # a2<- A+
+ PREFETCH_INST(2) # load rINST
+ bnez a3, MterpPossibleException # bail out
+ SET_VREG_OBJECT(v0, a2) # fp[A] <- v0
+ ADVANCE(2) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_quick: /* 0xe6 */
+/* File: mips/op_iput_quick.S */
+ /* For: iput-quick, iput-object-quick */
+ # op vA, vB, offset /* CCCC */
+ GET_OPB(a2) # a2 <- B
+ GET_VREG(a3, a2) # a3 <- fp[B], the object pointer
+ FETCH(a1, 1) # a1 <- field byte offset
+ GET_OPA4(a2) # a2 <- A(+)
+ beqz a3, common_errNullObject # object was null
+ GET_VREG(a0, a2) # a0 <- fp[A]
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ addu t0, a3, a1
+ sw a0, 0(t0) # obj.field (8/16/32 bits) <- a0
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_wide_quick: /* 0xe7 */
+/* File: mips/op_iput_wide_quick.S */
+ # iput-wide-quick vA, vB, offset /* CCCC */
+ GET_OPA4(a0) # a0 <- A(+)
+ GET_OPB(a1) # a1 <- B
+ GET_VREG(a2, a1) # a2 <- fp[B], the object pointer
+ # check object for null
+ beqz a2, common_errNullObject # object was null
+ EAS2(a3, rFP, a0) # a3 <- &fp[A]
+ LOAD64(a0, a1, a3) # a0/a1 <- fp[A]
+ FETCH(a3, 1) # a3 <- field byte offset
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ addu a2, a2, a3 # obj.field (64 bits, aligned) <- a0/a1
+ STORE64(a0, a1, a2) # obj.field (64 bits, aligned) <- a0/a1
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_object_quick: /* 0xe8 */
+/* File: mips/op_iput_object_quick.S */
+ /* For: iput-object-quick */
+ # op vA, vB, offset /* CCCC */
+ EXPORT_PC()
+ addu a0, rFP, OFF_FP_SHADOWFRAME
+ move a1, rPC
+ move a2, rINST
+ JAL(MterpIputObjectQuick)
+ beqz v0, MterpException
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_virtual_quick: /* 0xe9 */
+/* File: mips/op_invoke_virtual_quick.S */
+/* File: mips/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
+ # op {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ .extern MterpInvokeVirtualQuick
+ EXPORT_PC()
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rPC
+ move a3, rINST
+ JAL(MterpInvokeVirtualQuick)
+ beqz v0, MterpException
+ FETCH_ADVANCE_INST(3)
+ JAL(MterpShouldSwitchInterpreters)
+ bnez v0, MterpFallback
+ GET_INST_OPCODE(t0)
+ GOTO_OPCODE(t0)
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_virtual_range_quick: /* 0xea */
+/* File: mips/op_invoke_virtual_range_quick.S */
+/* File: mips/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
+ # op {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ .extern MterpInvokeVirtualQuickRange
+ EXPORT_PC()
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rPC
+ move a3, rINST
+ JAL(MterpInvokeVirtualQuickRange)
+ beqz v0, MterpException
+ FETCH_ADVANCE_INST(3)
+ JAL(MterpShouldSwitchInterpreters)
+ bnez v0, MterpFallback
+ GET_INST_OPCODE(t0)
+ GOTO_OPCODE(t0)
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_boolean_quick: /* 0xeb */
+/* File: mips/op_iput_boolean_quick.S */
+/* File: mips/op_iput_quick.S */
+ /* For: iput-quick, iput-object-quick */
+ # op vA, vB, offset /* CCCC */
+ GET_OPB(a2) # a2 <- B
+ GET_VREG(a3, a2) # a3 <- fp[B], the object pointer
+ FETCH(a1, 1) # a1 <- field byte offset
+ GET_OPA4(a2) # a2 <- A(+)
+ beqz a3, common_errNullObject # object was null
+ GET_VREG(a0, a2) # a0 <- fp[A]
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ addu t0, a3, a1
+ sb a0, 0(t0) # obj.field (8/16/32 bits) <- a0
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_byte_quick: /* 0xec */
+/* File: mips/op_iput_byte_quick.S */
+/* File: mips/op_iput_quick.S */
+ /* For: iput-quick, iput-object-quick */
+ # op vA, vB, offset /* CCCC */
+ GET_OPB(a2) # a2 <- B
+ GET_VREG(a3, a2) # a3 <- fp[B], the object pointer
+ FETCH(a1, 1) # a1 <- field byte offset
+ GET_OPA4(a2) # a2 <- A(+)
+ beqz a3, common_errNullObject # object was null
+ GET_VREG(a0, a2) # a0 <- fp[A]
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ addu t0, a3, a1
+ sb a0, 0(t0) # obj.field (8/16/32 bits) <- a0
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_char_quick: /* 0xed */
+/* File: mips/op_iput_char_quick.S */
+/* File: mips/op_iput_quick.S */
+ /* For: iput-quick, iput-object-quick */
+ # op vA, vB, offset /* CCCC */
+ GET_OPB(a2) # a2 <- B
+ GET_VREG(a3, a2) # a3 <- fp[B], the object pointer
+ FETCH(a1, 1) # a1 <- field byte offset
+ GET_OPA4(a2) # a2 <- A(+)
+ beqz a3, common_errNullObject # object was null
+ GET_VREG(a0, a2) # a0 <- fp[A]
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ addu t0, a3, a1
+ sh a0, 0(t0) # obj.field (8/16/32 bits) <- a0
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_short_quick: /* 0xee */
+/* File: mips/op_iput_short_quick.S */
+/* File: mips/op_iput_quick.S */
+ /* For: iput-quick, iput-object-quick */
+ # op vA, vB, offset /* CCCC */
+ GET_OPB(a2) # a2 <- B
+ GET_VREG(a3, a2) # a3 <- fp[B], the object pointer
+ FETCH(a1, 1) # a1 <- field byte offset
+ GET_OPA4(a2) # a2 <- A(+)
+ beqz a3, common_errNullObject # object was null
+ GET_VREG(a0, a2) # a0 <- fp[A]
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ addu t0, a3, a1
+ sh a0, 0(t0) # obj.field (8/16/32 bits) <- a0
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_boolean_quick: /* 0xef */
+/* File: mips/op_iget_boolean_quick.S */
+/* File: mips/op_iget_quick.S */
+ /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+ # op vA, vB, offset /* CCCC */
+ GET_OPB(a2) # a2 <- B
+ GET_VREG(a3, a2) # a3 <- object we're operating on
+ FETCH(a1, 1) # a1 <- field byte offset
+ GET_OPA4(a2) # a2 <- A(+)
+ # check object for null
+ beqz a3, common_errNullObject # object was null
+ addu t0, a3, a1
+ lbu a0, 0(t0) # a0 <- obj.field (8/16/32 bits)
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, a2, t0) # fp[A] <- a0
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_byte_quick: /* 0xf0 */
+/* File: mips/op_iget_byte_quick.S */
+/* File: mips/op_iget_quick.S */
+ /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+ # op vA, vB, offset /* CCCC */
+ GET_OPB(a2) # a2 <- B
+ GET_VREG(a3, a2) # a3 <- object we're operating on
+ FETCH(a1, 1) # a1 <- field byte offset
+ GET_OPA4(a2) # a2 <- A(+)
+ # check object for null
+ beqz a3, common_errNullObject # object was null
+ addu t0, a3, a1
+ lb a0, 0(t0) # a0 <- obj.field (8/16/32 bits)
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, a2, t0) # fp[A] <- a0
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_char_quick: /* 0xf1 */
+/* File: mips/op_iget_char_quick.S */
+/* File: mips/op_iget_quick.S */
+ /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+ # op vA, vB, offset /* CCCC */
+ GET_OPB(a2) # a2 <- B
+ GET_VREG(a3, a2) # a3 <- object we're operating on
+ FETCH(a1, 1) # a1 <- field byte offset
+ GET_OPA4(a2) # a2 <- A(+)
+ # check object for null
+ beqz a3, common_errNullObject # object was null
+ addu t0, a3, a1
+ lhu a0, 0(t0) # a0 <- obj.field (8/16/32 bits)
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, a2, t0) # fp[A] <- a0
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_short_quick: /* 0xf2 */
+/* File: mips/op_iget_short_quick.S */
+/* File: mips/op_iget_quick.S */
+ /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+ # op vA, vB, offset /* CCCC */
+ GET_OPB(a2) # a2 <- B
+ GET_VREG(a3, a2) # a3 <- object we're operating on
+ FETCH(a1, 1) # a1 <- field byte offset
+ GET_OPA4(a2) # a2 <- A(+)
+ # check object for null
+ beqz a3, common_errNullObject # object was null
+ addu t0, a3, a1
+ lh a0, 0(t0) # a0 <- obj.field (8/16/32 bits)
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, a2, t0) # fp[A] <- a0
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_lambda: /* 0xf3 */
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_f4: /* 0xf4 */
+/* File: mips/op_unused_f4.S */
+/* File: mips/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_capture_variable: /* 0xf5 */
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
+
+/* ------------------------------ */
+ .balign 128
+.L_op_create_lambda: /* 0xf6 */
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
+
+/* ------------------------------ */
+ .balign 128
+.L_op_liberate_variable: /* 0xf7 */
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
+
+/* ------------------------------ */
+ .balign 128
+.L_op_box_lambda: /* 0xf8 */
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unbox_lambda: /* 0xf9 */
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_fa: /* 0xfa */
+/* File: mips/op_unused_fa.S */
+/* File: mips/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_fb: /* 0xfb */
+/* File: mips/op_unused_fb.S */
+/* File: mips/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_fc: /* 0xfc */
+/* File: mips/op_unused_fc.S */
+/* File: mips/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_fd: /* 0xfd */
+/* File: mips/op_unused_fd.S */
+/* File: mips/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_fe: /* 0xfe */
+/* File: mips/op_unused_fe.S */
+/* File: mips/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_ff: /* 0xff */
+/* File: mips/op_unused_ff.S */
+/* File: mips/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+ .balign 128
+ .size artMterpAsmInstructionStart, .-artMterpAsmInstructionStart
+ .global artMterpAsmInstructionEnd
+artMterpAsmInstructionEnd:
+
+/*
+ * ===========================================================================
+ * Sister implementations
+ * ===========================================================================
+ */
+ .global artMterpAsmSisterStart
+ .type artMterpAsmSisterStart, %function
+ .text
+ .balign 4
+artMterpAsmSisterStart:
+
+/* continuation for op_packed_switch */
+
+.Lop_packed_switch_finish:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* continuation for op_sparse_switch */
+
+.Lop_sparse_switch_finish:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* continuation for op_cmpl_float */
+
+.Lop_cmpl_float_nan:
+ li rTEMP, -1
+
+.Lop_cmpl_float_finish:
+ GET_OPA(rOBJ)
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(rTEMP, rOBJ, t0) # vAA <- rTEMP
+
+/* continuation for op_cmpg_float */
+
+.Lop_cmpg_float_nan:
+ li rTEMP, 1
+
+.Lop_cmpg_float_finish:
+ GET_OPA(rOBJ)
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(rTEMP, rOBJ, t0) # vAA <- rTEMP
+
+/* continuation for op_cmpl_double */
+
+.Lop_cmpl_double_nan:
+ li rTEMP, -1
+
+.Lop_cmpl_double_finish:
+ GET_OPA(rOBJ)
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(rTEMP, rOBJ, t0) # vAA <- rTEMP
+
+/* continuation for op_cmpg_double */
+
+.Lop_cmpg_double_nan:
+ li rTEMP, 1
+
+.Lop_cmpg_double_finish:
+ GET_OPA(rOBJ)
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(rTEMP, rOBJ, t0) # vAA <- rTEMP
+
+/* continuation for op_if_eq */
+
+.L_op_if_eq_finish:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* continuation for op_if_ne */
+
+.L_op_if_ne_finish:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* continuation for op_if_lt */
+
+.L_op_if_lt_finish:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* continuation for op_if_ge */
+
+.L_op_if_ge_finish:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* continuation for op_if_gt */
+
+.L_op_if_gt_finish:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* continuation for op_if_le */
+
+.L_op_if_le_finish:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* continuation for op_float_to_int */
+
+/*
+ * Not an entry point as it is used only once !!
+ */
+f2i_doconv:
+#ifdef MIPS32REVGE6
+ l.s fa1, .LFLOAT_TO_INT_max
+ cmp.ule.s ft2, fa1, fa0
+ l.s fv0, .LFLOAT_TO_INT_ret_max
+ bc1nez ft2, .Lop_float_to_int_set_vreg_f
+
+ l.s fa1, .LFLOAT_TO_INT_min
+ cmp.ule.s ft2, fa0, fa1
+ l.s fv0, .LFLOAT_TO_INT_ret_min
+ bc1nez ft2, .Lop_float_to_int_set_vreg_f
+
+ mov.s fa1, fa0
+ cmp.un.s ft2, fa0, fa1
+ li.s fv0, 0
+ bc1nez ft2, .Lop_float_to_int_set_vreg_f
+#else
+ l.s fa1, .LFLOAT_TO_INT_max
+ c.ole.s fcc0, fa1, fa0
+ l.s fv0, .LFLOAT_TO_INT_ret_max
+ bc1t .Lop_float_to_int_set_vreg_f
+
+ l.s fa1, .LFLOAT_TO_INT_min
+ c.ole.s fcc0, fa0, fa1
+ l.s fv0, .LFLOAT_TO_INT_ret_min
+ bc1t .Lop_float_to_int_set_vreg_f
+
+ mov.s fa1, fa0
+ c.un.s fcc0, fa0, fa1
+ li.s fv0, 0
+ bc1t .Lop_float_to_int_set_vreg_f
+#endif
+
+ trunc.w.s fv0, fa0
+ b .Lop_float_to_int_set_vreg_f
+
+.LFLOAT_TO_INT_max:
+ .word 0x4f000000
+.LFLOAT_TO_INT_min:
+ .word 0xcf000000
+.LFLOAT_TO_INT_ret_max:
+ .word 0x7fffffff
+.LFLOAT_TO_INT_ret_min:
+ .word 0x80000000
+
+/* continuation for op_float_to_long */
+
+f2l_doconv:
+#ifdef MIPS32REVGE6
+ l.s fa1, .LLONG_TO_max
+ cmp.ule.s ft2, fa1, fa0
+ li rRESULT0, ~0
+ li rRESULT1, ~0x80000000
+ bc1nez ft2, .Lop_float_to_long_set_vreg
+
+ l.s fa1, .LLONG_TO_min
+ cmp.ule.s ft2, fa0, fa1
+ li rRESULT0, 0
+ li rRESULT1, 0x80000000
+ bc1nez ft2, .Lop_float_to_long_set_vreg
+
+ mov.s fa1, fa0
+ cmp.un.s ft2, fa0, fa1
+ li rRESULT0, 0
+ li rRESULT1, 0
+ bc1nez ft2, .Lop_float_to_long_set_vreg
+#else
+ l.s fa1, .LLONG_TO_max
+ c.ole.s fcc0, fa1, fa0
+ li rRESULT0, ~0
+ li rRESULT1, ~0x80000000
+ bc1t .Lop_float_to_long_set_vreg
+
+ l.s fa1, .LLONG_TO_min
+ c.ole.s fcc0, fa0, fa1
+ li rRESULT0, 0
+ li rRESULT1, 0x80000000
+ bc1t .Lop_float_to_long_set_vreg
+
+ mov.s fa1, fa0
+ c.un.s fcc0, fa0, fa1
+ li rRESULT0, 0
+ li rRESULT1, 0
+ bc1t .Lop_float_to_long_set_vreg
+#endif
+
+ JAL(__fixsfdi)
+
+ b .Lop_float_to_long_set_vreg
+
+.LLONG_TO_max:
+ .word 0x5f000000
+
+.LLONG_TO_min:
+ .word 0xdf000000
+
+/* continuation for op_double_to_int */
+
+d2i_doconv:
+#ifdef MIPS32REVGE6
+ la t0, .LDOUBLE_TO_INT_max
+ LOAD64_F(fa1, fa1f, t0)
+ cmp.ule.d ft2, fa1, fa0
+ l.s fv0, .LDOUBLE_TO_INT_maxret
+ bc1nez ft2, .Lop_double_to_int_set_vreg_f
+
+ la t0, .LDOUBLE_TO_INT_min
+ LOAD64_F(fa1, fa1f, t0)
+ cmp.ule.d ft2, fa0, fa1
+ l.s fv0, .LDOUBLE_TO_INT_minret
+ bc1nez ft2, .Lop_double_to_int_set_vreg_f
+
+ mov.d fa1, fa0
+ cmp.un.d ft2, fa0, fa1
+ li.s fv0, 0
+ bc1nez ft2, .Lop_double_to_int_set_vreg_f
+#else
+ la t0, .LDOUBLE_TO_INT_max
+ LOAD64_F(fa1, fa1f, t0)
+ c.ole.d fcc0, fa1, fa0
+ l.s fv0, .LDOUBLE_TO_INT_maxret
+ bc1t .Lop_double_to_int_set_vreg_f
+
+ la t0, .LDOUBLE_TO_INT_min
+ LOAD64_F(fa1, fa1f, t0)
+ c.ole.d fcc0, fa0, fa1
+ l.s fv0, .LDOUBLE_TO_INT_minret
+ bc1t .Lop_double_to_int_set_vreg_f
+
+ mov.d fa1, fa0
+ c.un.d fcc0, fa0, fa1
+ li.s fv0, 0
+ bc1t .Lop_double_to_int_set_vreg_f
+#endif
+
+ trunc.w.d fv0, fa0
+ b .Lop_double_to_int_set_vreg_f
+
+.LDOUBLE_TO_INT_max:
+ .dword 0x41dfffffffc00000
+.LDOUBLE_TO_INT_min:
+ .dword 0xc1e0000000000000 # minint, as a double (high word)
+.LDOUBLE_TO_INT_maxret:
+ .word 0x7fffffff
+.LDOUBLE_TO_INT_minret:
+ .word 0x80000000
+
+/* continuation for op_double_to_long */
+
+d2l_doconv:
+#ifdef MIPS32REVGE6
+ la t0, .LDOUBLE_TO_LONG_max
+ LOAD64_F(fa1, fa1f, t0)
+ cmp.ule.d ft2, fa1, fa0
+ la t0, .LDOUBLE_TO_LONG_ret_max
+ LOAD64(rRESULT0, rRESULT1, t0)
+ bc1nez ft2, .Lop_double_to_long_set_vreg
+
+ la t0, .LDOUBLE_TO_LONG_min
+ LOAD64_F(fa1, fa1f, t0)
+ cmp.ule.d ft2, fa0, fa1
+ la t0, .LDOUBLE_TO_LONG_ret_min
+ LOAD64(rRESULT0, rRESULT1, t0)
+ bc1nez ft2, .Lop_double_to_long_set_vreg
+
+ mov.d fa1, fa0
+ cmp.un.d ft2, fa0, fa1
+ li rRESULT0, 0
+ li rRESULT1, 0
+ bc1nez ft2, .Lop_double_to_long_set_vreg
+#else
+ la t0, .LDOUBLE_TO_LONG_max
+ LOAD64_F(fa1, fa1f, t0)
+ c.ole.d fcc0, fa1, fa0
+ la t0, .LDOUBLE_TO_LONG_ret_max
+ LOAD64(rRESULT0, rRESULT1, t0)
+ bc1t .Lop_double_to_long_set_vreg
+
+ la t0, .LDOUBLE_TO_LONG_min
+ LOAD64_F(fa1, fa1f, t0)
+ c.ole.d fcc0, fa0, fa1
+ la t0, .LDOUBLE_TO_LONG_ret_min
+ LOAD64(rRESULT0, rRESULT1, t0)
+ bc1t .Lop_double_to_long_set_vreg
+
+ mov.d fa1, fa0
+ c.un.d fcc0, fa0, fa1
+ li rRESULT0, 0
+ li rRESULT1, 0
+ bc1t .Lop_double_to_long_set_vreg
+#endif
+ JAL(__fixdfdi)
+ b .Lop_double_to_long_set_vreg
+
+.LDOUBLE_TO_LONG_max:
+ .dword 0x43e0000000000000 # maxlong, as a double (high word)
+.LDOUBLE_TO_LONG_min:
+ .dword 0xc3e0000000000000 # minlong, as a double (high word)
+.LDOUBLE_TO_LONG_ret_max:
+ .dword 0x7fffffffffffffff
+.LDOUBLE_TO_LONG_ret_min:
+ .dword 0x8000000000000000
+
+/* continuation for op_mul_long */
+
+.Lop_mul_long_finish:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64(v0, v1, a0) # vAA::vAA+1 <- v0(low) :: v1(high)
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* continuation for op_shl_long */
+
+.Lop_shl_long_finish:
+ SET_VREG64_GOTO(zero, v0, t2, t0) # vAA/vAA+1 <- rlo/rhi
+
+/* continuation for op_shr_long */
+
+.Lop_shr_long_finish:
+ sra a3, a1, 31 # a3<- sign(ah)
+ SET_VREG64_GOTO(v1, a3, t3, t0) # vAA/VAA+1 <- rlo/rhi
+
+/* continuation for op_ushr_long */
+
+.Lop_ushr_long_finish:
+ SET_VREG64_GOTO(v1, zero, rOBJ, t0) # vAA/vAA+1 <- rlo/rhi
+
+/* continuation for op_add_double */
+
+.Lop_add_double_finish:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* continuation for op_sub_double */
+
+.Lop_sub_double_finish:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* continuation for op_mul_double */
+
+.Lop_mul_double_finish:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* continuation for op_div_double */
+
+.Lop_div_double_finish:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* continuation for op_rem_double */
+
+.Lop_rem_double_finish:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* continuation for op_shl_long_2addr */
+
+.Lop_shl_long_2addr_finish:
+ SET_VREG64_GOTO(zero, v0, rOBJ, t0) # vAA/vAA+1 <- rlo/rhi
+
+/* continuation for op_shr_long_2addr */
+
+.Lop_shr_long_2addr_finish:
+ sra a3, a1, 31 # a3<- sign(ah)
+ SET_VREG64_GOTO(v1, a3, t2, t0) # vAA/vAA+1 <- rlo/rhi
+
+/* continuation for op_ushr_long_2addr */
+
+.Lop_ushr_long_2addr_finish:
+ SET_VREG64_GOTO(v1, zero, t3, t0) # vAA/vAA+1 <- rlo/rhi
+
+ .size artMterpAsmSisterStart, .-artMterpAsmSisterStart
+ .global artMterpAsmSisterEnd
+artMterpAsmSisterEnd:
+
+
+ .global artMterpAsmAltInstructionStart
+ .type artMterpAsmAltInstructionStart, %function
+ .text
+
+artMterpAsmAltInstructionStart = .L_ALT_op_nop
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_nop: /* 0x00 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (0 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move: /* 0x01 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (1 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_from16: /* 0x02 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (2 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_16: /* 0x03 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (3 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_wide: /* 0x04 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (4 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_wide_from16: /* 0x05 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (5 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_wide_16: /* 0x06 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (6 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_object: /* 0x07 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (7 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_object_from16: /* 0x08 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (8 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_object_16: /* 0x09 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (9 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_result: /* 0x0a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (10 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_result_wide: /* 0x0b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (11 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_result_object: /* 0x0c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (12 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_exception: /* 0x0d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (13 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_return_void: /* 0x0e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (14 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_return: /* 0x0f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (15 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_return_wide: /* 0x10 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (16 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_return_object: /* 0x11 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (17 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_4: /* 0x12 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (18 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_16: /* 0x13 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (19 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const: /* 0x14 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (20 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_high16: /* 0x15 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (21 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_wide_16: /* 0x16 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (22 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_wide_32: /* 0x17 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (23 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_wide: /* 0x18 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (24 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_wide_high16: /* 0x19 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (25 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_string: /* 0x1a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (26 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_string_jumbo: /* 0x1b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (27 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_class: /* 0x1c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (28 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_monitor_enter: /* 0x1d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (29 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_monitor_exit: /* 0x1e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (30 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_check_cast: /* 0x1f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (31 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_instance_of: /* 0x20 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (32 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_array_length: /* 0x21 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (33 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_new_instance: /* 0x22 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (34 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_new_array: /* 0x23 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (35 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_filled_new_array: /* 0x24 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (36 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_filled_new_array_range: /* 0x25 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (37 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_fill_array_data: /* 0x26 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (38 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_throw: /* 0x27 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (39 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_goto: /* 0x28 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (40 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_goto_16: /* 0x29 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (41 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_goto_32: /* 0x2a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (42 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_packed_switch: /* 0x2b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (43 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sparse_switch: /* 0x2c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (44 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_cmpl_float: /* 0x2d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (45 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_cmpg_float: /* 0x2e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (46 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_cmpl_double: /* 0x2f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (47 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_cmpg_double: /* 0x30 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (48 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_cmp_long: /* 0x31 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (49 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_eq: /* 0x32 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (50 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_ne: /* 0x33 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (51 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_lt: /* 0x34 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (52 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_ge: /* 0x35 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (53 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_gt: /* 0x36 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (54 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_le: /* 0x37 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (55 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_eqz: /* 0x38 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (56 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_nez: /* 0x39 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (57 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_ltz: /* 0x3a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (58 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_gez: /* 0x3b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (59 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_gtz: /* 0x3c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (60 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_lez: /* 0x3d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (61 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_3e: /* 0x3e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (62 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_3f: /* 0x3f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (63 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_40: /* 0x40 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (64 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_41: /* 0x41 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (65 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_42: /* 0x42 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (66 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_43: /* 0x43 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (67 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aget: /* 0x44 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (68 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aget_wide: /* 0x45 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (69 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aget_object: /* 0x46 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (70 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aget_boolean: /* 0x47 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (71 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aget_byte: /* 0x48 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (72 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aget_char: /* 0x49 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (73 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aget_short: /* 0x4a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (74 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aput: /* 0x4b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (75 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aput_wide: /* 0x4c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (76 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aput_object: /* 0x4d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (77 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aput_boolean: /* 0x4e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (78 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aput_byte: /* 0x4f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (79 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aput_char: /* 0x50 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (80 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aput_short: /* 0x51 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (81 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget: /* 0x52 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (82 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_wide: /* 0x53 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (83 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_object: /* 0x54 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (84 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_boolean: /* 0x55 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (85 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_byte: /* 0x56 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (86 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_char: /* 0x57 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (87 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_short: /* 0x58 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (88 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput: /* 0x59 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (89 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_wide: /* 0x5a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (90 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_object: /* 0x5b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (91 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_boolean: /* 0x5c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (92 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_byte: /* 0x5d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (93 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_char: /* 0x5e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (94 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_short: /* 0x5f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (95 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sget: /* 0x60 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (96 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sget_wide: /* 0x61 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (97 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sget_object: /* 0x62 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (98 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sget_boolean: /* 0x63 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (99 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sget_byte: /* 0x64 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (100 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sget_char: /* 0x65 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (101 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sget_short: /* 0x66 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (102 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sput: /* 0x67 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (103 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sput_wide: /* 0x68 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (104 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sput_object: /* 0x69 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (105 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sput_boolean: /* 0x6a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (106 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sput_byte: /* 0x6b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (107 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sput_char: /* 0x6c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (108 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sput_short: /* 0x6d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (109 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_virtual: /* 0x6e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (110 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_super: /* 0x6f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (111 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_direct: /* 0x70 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (112 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_static: /* 0x71 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (113 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_interface: /* 0x72 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (114 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_return_void_no_barrier: /* 0x73 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (115 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_virtual_range: /* 0x74 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (116 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_super_range: /* 0x75 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (117 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_direct_range: /* 0x76 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (118 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_static_range: /* 0x77 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (119 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_interface_range: /* 0x78 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (120 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_79: /* 0x79 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (121 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_7a: /* 0x7a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (122 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_neg_int: /* 0x7b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (123 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_not_int: /* 0x7c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (124 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_neg_long: /* 0x7d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (125 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_not_long: /* 0x7e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (126 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_neg_float: /* 0x7f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (127 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_neg_double: /* 0x80 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (128 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_int_to_long: /* 0x81 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (129 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_int_to_float: /* 0x82 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (130 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_int_to_double: /* 0x83 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (131 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_long_to_int: /* 0x84 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (132 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_long_to_float: /* 0x85 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (133 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_long_to_double: /* 0x86 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (134 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_float_to_int: /* 0x87 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (135 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_float_to_long: /* 0x88 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (136 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_float_to_double: /* 0x89 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (137 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_double_to_int: /* 0x8a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (138 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_double_to_long: /* 0x8b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (139 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_double_to_float: /* 0x8c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (140 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_int_to_byte: /* 0x8d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (141 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_int_to_char: /* 0x8e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (142 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_int_to_short: /* 0x8f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (143 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_int: /* 0x90 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (144 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_int: /* 0x91 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (145 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_int: /* 0x92 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (146 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_int: /* 0x93 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (147 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_int: /* 0x94 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (148 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_and_int: /* 0x95 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (149 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_or_int: /* 0x96 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (150 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_xor_int: /* 0x97 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (151 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shl_int: /* 0x98 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (152 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shr_int: /* 0x99 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (153 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_ushr_int: /* 0x9a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (154 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_long: /* 0x9b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (155 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_long: /* 0x9c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (156 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_long: /* 0x9d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (157 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_long: /* 0x9e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (158 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_long: /* 0x9f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (159 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_and_long: /* 0xa0 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (160 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_or_long: /* 0xa1 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (161 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_xor_long: /* 0xa2 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (162 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shl_long: /* 0xa3 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (163 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shr_long: /* 0xa4 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (164 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_ushr_long: /* 0xa5 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (165 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_float: /* 0xa6 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (166 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_float: /* 0xa7 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (167 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_float: /* 0xa8 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (168 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_float: /* 0xa9 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (169 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_float: /* 0xaa */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (170 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_double: /* 0xab */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (171 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_double: /* 0xac */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (172 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_double: /* 0xad */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (173 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_double: /* 0xae */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (174 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_double: /* 0xaf */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (175 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_int_2addr: /* 0xb0 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (176 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_int_2addr: /* 0xb1 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (177 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_int_2addr: /* 0xb2 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (178 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_int_2addr: /* 0xb3 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (179 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_int_2addr: /* 0xb4 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (180 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_and_int_2addr: /* 0xb5 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (181 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_or_int_2addr: /* 0xb6 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (182 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_xor_int_2addr: /* 0xb7 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (183 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shl_int_2addr: /* 0xb8 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (184 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shr_int_2addr: /* 0xb9 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (185 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_ushr_int_2addr: /* 0xba */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (186 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_long_2addr: /* 0xbb */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (187 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_long_2addr: /* 0xbc */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (188 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_long_2addr: /* 0xbd */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (189 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_long_2addr: /* 0xbe */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (190 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_long_2addr: /* 0xbf */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (191 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_and_long_2addr: /* 0xc0 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (192 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_or_long_2addr: /* 0xc1 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (193 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_xor_long_2addr: /* 0xc2 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (194 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shl_long_2addr: /* 0xc3 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (195 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shr_long_2addr: /* 0xc4 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (196 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_ushr_long_2addr: /* 0xc5 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (197 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_float_2addr: /* 0xc6 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (198 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_float_2addr: /* 0xc7 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (199 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_float_2addr: /* 0xc8 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (200 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_float_2addr: /* 0xc9 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (201 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_float_2addr: /* 0xca */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (202 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_double_2addr: /* 0xcb */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (203 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_double_2addr: /* 0xcc */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (204 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_double_2addr: /* 0xcd */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (205 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_double_2addr: /* 0xce */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (206 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_double_2addr: /* 0xcf */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (207 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_int_lit16: /* 0xd0 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (208 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rsub_int: /* 0xd1 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (209 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_int_lit16: /* 0xd2 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (210 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_int_lit16: /* 0xd3 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (211 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_int_lit16: /* 0xd4 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (212 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_and_int_lit16: /* 0xd5 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (213 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_or_int_lit16: /* 0xd6 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (214 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_xor_int_lit16: /* 0xd7 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (215 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_int_lit8: /* 0xd8 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (216 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rsub_int_lit8: /* 0xd9 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (217 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_int_lit8: /* 0xda */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (218 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_int_lit8: /* 0xdb */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (219 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_int_lit8: /* 0xdc */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (220 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_and_int_lit8: /* 0xdd */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (221 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_or_int_lit8: /* 0xde */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (222 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_xor_int_lit8: /* 0xdf */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (223 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shl_int_lit8: /* 0xe0 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (224 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shr_int_lit8: /* 0xe1 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (225 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_ushr_int_lit8: /* 0xe2 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (226 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_quick: /* 0xe3 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (227 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_wide_quick: /* 0xe4 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (228 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_object_quick: /* 0xe5 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (229 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_quick: /* 0xe6 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (230 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_wide_quick: /* 0xe7 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (231 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_object_quick: /* 0xe8 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (232 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_virtual_quick: /* 0xe9 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (233 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_virtual_range_quick: /* 0xea */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (234 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_boolean_quick: /* 0xeb */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (235 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_byte_quick: /* 0xec */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (236 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_char_quick: /* 0xed */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (237 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_short_quick: /* 0xee */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (238 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_boolean_quick: /* 0xef */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (239 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_byte_quick: /* 0xf0 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (240 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_char_quick: /* 0xf1 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (241 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_short_quick: /* 0xf2 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (242 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_lambda: /* 0xf3 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (243 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_f4: /* 0xf4 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (244 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_capture_variable: /* 0xf5 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (245 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_create_lambda: /* 0xf6 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (246 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_liberate_variable: /* 0xf7 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (247 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_box_lambda: /* 0xf8 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (248 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unbox_lambda: /* 0xf9 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (249 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_fa: /* 0xfa */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (250 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_fb: /* 0xfb */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (251 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_fc: /* 0xfc */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (252 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_fd: /* 0xfd */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (253 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_fe: /* 0xfe */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (254 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_ff: /* 0xff */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC()
+ la ra, artMterpAsmInstructionStart + (255 * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ la a2, MterpCheckBefore
+ jalr zero, a2 # Tail call to Mterp(self, shadow_frame)
+
+ .balign 128
+ .size artMterpAsmAltInstructionStart, .-artMterpAsmAltInstructionStart
+ .global artMterpAsmAltInstructionEnd
+artMterpAsmAltInstructionEnd:
+/* File: mips/footer.S */
+/*
+ * ===========================================================================
+ * Common subroutines and data
+ * ===========================================================================
+ */
+
+ .text
+ .align 2
+
+/*
+ * We've detected a condition that will result in an exception, but the exception
+ * has not yet been thrown. Just bail out to the reference interpreter to deal with it.
+ * TUNING: for consistency, we may want to just go ahead and handle these here.
+ */
+common_errDivideByZero:
+ EXPORT_PC()
+#if MTERP_LOGGING
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ JAL(MterpLogDivideByZeroException)
+#endif
+ b MterpCommonFallback
+
+common_errArrayIndex:
+ EXPORT_PC()
+#if MTERP_LOGGING
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ JAL(MterpLogArrayIndexException)
+#endif
+ b MterpCommonFallback
+
+common_errNegativeArraySize:
+ EXPORT_PC()
+#if MTERP_LOGGING
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ JAL(MterpLogNegativeArraySizeException)
+#endif
+ b MterpCommonFallback
+
+common_errNoSuchMethod:
+ EXPORT_PC()
+#if MTERP_LOGGING
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ JAL(MterpLogNoSuchMethodException)
+#endif
+ b MterpCommonFallback
+
+common_errNullObject:
+ EXPORT_PC()
+#if MTERP_LOGGING
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ JAL(MterpLogNullObjectException)
+#endif
+ b MterpCommonFallback
+
+common_exceptionThrown:
+ EXPORT_PC()
+#if MTERP_LOGGING
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ JAL(MterpLogExceptionThrownException)
+#endif
+ b MterpCommonFallback
+
+MterpSuspendFallback:
+ EXPORT_PC()
+#if MTERP_LOGGING
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ lw a2, THREAD_FLAGS_OFFSET(rSELF)
+ JAL(MterpLogSuspendFallback)
+#endif
+ b MterpCommonFallback
+
+/*
+ * If we're here, something is out of the ordinary. If there is a pending
+ * exception, handle it. Otherwise, roll back and retry with the reference
+ * interpreter.
+ */
+MterpPossibleException:
+ lw a0, THREAD_EXCEPTION_OFFSET(rSELF)
+ beqz a0, MterpFallback # If exception, fall back to reference interpreter.
+ /* intentional fallthrough - handle pending exception. */
+/*
+ * On return from a runtime helper routine, we've found a pending exception.
+ * Can we handle it here - or need to bail out to caller?
+ *
+ */
+MterpException:
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ JAL(MterpHandleException) # (self, shadow_frame)
+ beqz v0, MterpExceptionReturn # no local catch, back to caller.
+ lw a0, OFF_FP_CODE_ITEM(rFP)
+ lw a1, OFF_FP_DEX_PC(rFP)
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
+ addu rPC, a0, CODEITEM_INSNS_OFFSET
+ sll a1, a1, 1
+ addu rPC, rPC, a1 # generate new dex_pc_ptr
+ /* Do we need to switch interpreters? */
+ JAL(MterpShouldSwitchInterpreters)
+ bnez v0, MterpFallback
+ /* resume execution at catch block */
+ EXPORT_PC()
+ FETCH_INST()
+ GET_INST_OPCODE(t0)
+ GOTO_OPCODE(t0)
+ /* NOTE: no fallthrough */
+
+/*
+ * Check for suspend check request. Assumes rINST already loaded, rPC advanced and
+ * still needs to get the opcode and branch to it, and flags are in lr.
+ */
+MterpCheckSuspendAndContinue:
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh rIBASE
+ and ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ bnez ra, 1f
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+1:
+ EXPORT_PC()
+ move a0, rSELF
+ JAL(MterpSuspendCheck) # (self)
+ bnez v0, MterpFallback
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/*
+ * On-stack replacement has happened, and now we've returned from the compiled method.
+ */
+MterpOnStackReplacement:
+#if MTERP_LOGGING
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rINST
+ JAL(MterpLogOSR)
+#endif
+ li v0, 1 # Signal normal return
+ b MterpDone
+
+/*
+ * Bail out to reference interpreter.
+ */
+MterpFallback:
+ EXPORT_PC()
+#if MTERP_LOGGING
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ JAL(MterpLogFallback)
+#endif
+MterpCommonFallback:
+ move v0, zero # signal retry with reference interpreter.
+ b MterpDone
+/*
+ * We pushed some registers on the stack in ExecuteMterpImpl, then saved
+ * SP and LR. Here we restore SP, restore the registers, and then restore
+ * LR to PC.
+ *
+ * On entry:
+ * uint32_t* rFP (should still be live, pointer to base of vregs)
+ */
+MterpExceptionReturn:
+ li v0, 1 # signal return to caller.
+ b MterpDone
+MterpReturn:
+ lw a2, OFF_FP_RESULT_REGISTER(rFP)
+ sw v0, 0(a2)
+ sw v1, 4(a2)
+ li v0, 1 # signal return to caller.
+MterpDone:
+/* Restore from the stack and return. Frame size = STACK_SIZE */
+ STACK_LOAD_FULL()
+ jalr zero, ra
+
+ .end ExecuteMterpImpl
+
diff --git a/runtime/interpreter/mterp/out/mterp_mips64.S b/runtime/interpreter/mterp/out/mterp_mips64.S
new file mode 100644
index 0000000..7cef823
--- /dev/null
+++ b/runtime/interpreter/mterp/out/mterp_mips64.S
@@ -0,0 +1,12362 @@
+/*
+ * This file was generated automatically by gen-mterp.py for 'mips64'.
+ *
+ * --> DO NOT EDIT <--
+ */
+
+/* File: mips64/header.S */
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <machine/regdef.h>
+
+/* TODO: add the missing file and use its FP register definitions. */
+/* #include <machine/fpregdef.h> */
+/* FP register definitions */
+#define f0 $f0
+#define f1 $f1
+#define f2 $f2
+#define f3 $f3
+#define f12 $f12
+#define f13 $f13
+
+/*
+ * It looks like the GNU assembler currently does not support the blec and bgtc
+ * idioms, which should translate into bgec and bltc respectively with swapped
+ * left and right register operands.
+ * TODO: remove these macros when the assembler is fixed.
+ */
+.macro blec lreg, rreg, target
+ bgec \rreg, \lreg, \target
+.endm
+.macro bgtc lreg, rreg, target
+ bltc \rreg, \lreg, \target
+.endm
+
+/*
+Mterp and MIPS64 notes:
+
+The following registers have fixed assignments:
+
+ reg nick purpose
+ s0 rPC interpreted program counter, used for fetching instructions
+ s1 rFP interpreted frame pointer, used for accessing locals and args
+ s2 rSELF self (Thread) pointer
+ s3 rINST first 16-bit code unit of current instruction
+ s4 rIBASE interpreted instruction base pointer, used for computed goto
+ s5 rREFS base of object references in shadow frame (ideally, we'll get rid of this later).
+*/
+
+/* During bringup, we'll use the shadow frame model instead of rFP */
+/* single-purpose registers, given names for clarity */
+#define rPC s0
+#define rFP s1
+#define rSELF s2
+#define rINST s3
+#define rIBASE s4
+#define rREFS s5
+
+/*
+ * This is a #include, not a %include, because we want the C pre-processor
+ * to expand the macros into assembler assignment statements.
+ */
+#include "asm_support.h"
+
+/*
+ * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So,
+ * to access other shadow frame fields, we need to use a backwards offset. Define those here.
+ */
+#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
+#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
+#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
+#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
+#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
+#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
+#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
+#define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET)
+#define OFF_FP_SHADOWFRAME (-SHADOWFRAME_VREGS_OFFSET)
+
+/*
+ *
+ * The reference interpreter performs explicit suspect checks, which is somewhat wasteful.
+ * Dalvik's interpreter folded suspend checks into the jump table mechanism, and eventually
+ * mterp should do so as well.
+ */
+#define MTERP_SUSPEND 0
+
+#define MTERP_LOGGING 0
+
+/*
+ * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
+ * be done *before* something throws.
+ *
+ * It's okay to do this more than once.
+ *
+ * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
+ * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
+ * offset into the code_items_[] array. For effiency, we will "export" the
+ * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
+ * to convert to a dex pc when needed.
+ */
+.macro EXPORT_PC
+ sd rPC, OFF_FP_DEX_PC_PTR(rFP)
+.endm
+
+/*
+ * Refresh handler table.
+ */
+.macro REFRESH_IBASE
+ ld rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
+.endm
+
+/*
+ * Fetch the next instruction from rPC into rINST. Does not advance rPC.
+ */
+.macro FETCH_INST
+ lhu rINST, 0(rPC)
+.endm
+
+/* Advance rPC by some number of code units. */
+.macro ADVANCE count
+ daddu rPC, rPC, (\count) * 2
+.endm
+
+/*
+ * Fetch the next instruction from the specified offset. Advances rPC
+ * to point to the next instruction.
+ *
+ * This must come AFTER anything that can throw an exception, or the
+ * exception catch may miss. (This also implies that it must come after
+ * EXPORT_PC.)
+ */
+.macro FETCH_ADVANCE_INST count
+ ADVANCE \count
+ FETCH_INST
+.endm
+
+/*
+ * Similar to FETCH_ADVANCE_INST, but does not update rPC. Used to load
+ * rINST ahead of possible exception point. Be sure to manually advance rPC
+ * later.
+ */
+.macro PREFETCH_INST count
+ lhu rINST, ((\count) * 2)(rPC)
+.endm
+
+/*
+ * Put the instruction's opcode field into the specified register.
+ */
+.macro GET_INST_OPCODE reg
+ and \reg, rINST, 255
+.endm
+
+/*
+ * Begin executing the opcode in _reg.
+ */
+.macro GOTO_OPCODE reg
+ .set noat
+ sll AT, \reg, 7
+ daddu AT, rIBASE, AT
+ jic AT, 0
+ .set at
+.endm
+
+/*
+ * Get/set the 32-bit value from a Dalvik register.
+ * Note, GET_VREG does sign extension to 64 bits while
+ * GET_VREG_U does zero extension to 64 bits.
+ * One is useful for arithmetic while the other is
+ * useful for storing the result value as 64-bit.
+ */
+.macro GET_VREG reg, vreg
+ .set noat
+ dlsa AT, \vreg, rFP, 2
+ lw \reg, 0(AT)
+ .set at
+.endm
+.macro GET_VREG_U reg, vreg
+ .set noat
+ dlsa AT, \vreg, rFP, 2
+ lwu \reg, 0(AT)
+ .set at
+.endm
+.macro GET_VREG_FLOAT reg, vreg
+ .set noat
+ dlsa AT, \vreg, rFP, 2
+ lwc1 \reg, 0(AT)
+ .set at
+.endm
+.macro SET_VREG reg, vreg
+ .set noat
+ dlsa AT, \vreg, rFP, 2
+ sw \reg, 0(AT)
+ dlsa AT, \vreg, rREFS, 2
+ sw zero, 0(AT)
+ .set at
+.endm
+.macro SET_VREG_OBJECT reg, vreg
+ .set noat
+ dlsa AT, \vreg, rFP, 2
+ sw \reg, 0(AT)
+ dlsa AT, \vreg, rREFS, 2
+ sw \reg, 0(AT)
+ .set at
+.endm
+.macro SET_VREG_FLOAT reg, vreg
+ .set noat
+ dlsa AT, \vreg, rFP, 2
+ swc1 \reg, 0(AT)
+ dlsa AT, \vreg, rREFS, 2
+ sw zero, 0(AT)
+ .set at
+.endm
+
+/*
+ * Get/set the 64-bit value from a Dalvik register.
+ * Avoid unaligned memory accesses.
+ * Note, SET_VREG_WIDE clobbers the register containing the value being stored.
+ * Note, SET_VREG_DOUBLE clobbers the register containing the Dalvik register number.
+ */
+.macro GET_VREG_WIDE reg, vreg
+ .set noat
+ dlsa AT, \vreg, rFP, 2
+ lw \reg, 0(AT)
+ lw AT, 4(AT)
+ dinsu \reg, AT, 32, 32
+ .set at
+.endm
+.macro GET_VREG_DOUBLE reg, vreg
+ .set noat
+ dlsa AT, \vreg, rFP, 2
+ lwc1 \reg, 0(AT)
+ lw AT, 4(AT)
+ mthc1 AT, \reg
+ .set at
+.endm
+.macro SET_VREG_WIDE reg, vreg
+ .set noat
+ dlsa AT, \vreg, rFP, 2
+ sw \reg, 0(AT)
+ drotr32 \reg, \reg, 0
+ sw \reg, 4(AT)
+ dlsa AT, \vreg, rREFS, 2
+ sw zero, 0(AT)
+ sw zero, 4(AT)
+ .set at
+.endm
+.macro SET_VREG_DOUBLE reg, vreg
+ .set noat
+ dlsa AT, \vreg, rREFS, 2
+ sw zero, 0(AT)
+ sw zero, 4(AT)
+ dlsa AT, \vreg, rFP, 2
+ swc1 \reg, 0(AT)
+ mfhc1 \vreg, \reg
+ sw \vreg, 4(AT)
+ .set at
+.endm
+
+/*
+ * On-stack offsets for spilling/unspilling callee-saved registers
+ * and the frame size.
+ */
+#define STACK_OFFSET_RA 0
+#define STACK_OFFSET_GP 8
+#define STACK_OFFSET_S0 16
+#define STACK_OFFSET_S1 24
+#define STACK_OFFSET_S2 32
+#define STACK_OFFSET_S3 40
+#define STACK_OFFSET_S4 48
+#define STACK_OFFSET_S5 56
+#define STACK_SIZE 64
+
+/* Constants for float/double_to_int/long conversions */
+#define INT_MIN 0x80000000
+#define INT_MIN_AS_FLOAT 0xCF000000
+#define INT_MIN_AS_DOUBLE 0xC1E0000000000000
+#define LONG_MIN 0x8000000000000000
+#define LONG_MIN_AS_FLOAT 0xDF000000
+#define LONG_MIN_AS_DOUBLE 0xC3E0000000000000
+
+/* File: mips64/entry.S */
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Interpreter entry point.
+ */
+
+ .set reorder
+
+ .text
+ .global ExecuteMterpImpl
+ .type ExecuteMterpImpl, %function
+ .balign 16
+/*
+ * On entry:
+ * a0 Thread* self
+ * a1 code_item
+ * a2 ShadowFrame
+ * a3 JValue* result_register
+ *
+ */
+ExecuteMterpImpl:
+ .cfi_startproc
+ .cpsetup t9, t8, ExecuteMterpImpl
+
+ .cfi_def_cfa sp, 0
+ daddu sp, sp, -STACK_SIZE
+ .cfi_adjust_cfa_offset STACK_SIZE
+
+ sd t8, STACK_OFFSET_GP(sp)
+ .cfi_rel_offset 28, STACK_OFFSET_GP
+ sd ra, STACK_OFFSET_RA(sp)
+ .cfi_rel_offset 31, STACK_OFFSET_RA
+
+ sd s0, STACK_OFFSET_S0(sp)
+ .cfi_rel_offset 16, STACK_OFFSET_S0
+ sd s1, STACK_OFFSET_S1(sp)
+ .cfi_rel_offset 17, STACK_OFFSET_S1
+ sd s2, STACK_OFFSET_S2(sp)
+ .cfi_rel_offset 18, STACK_OFFSET_S2
+ sd s3, STACK_OFFSET_S3(sp)
+ .cfi_rel_offset 19, STACK_OFFSET_S3
+ sd s4, STACK_OFFSET_S4(sp)
+ .cfi_rel_offset 20, STACK_OFFSET_S4
+ sd s5, STACK_OFFSET_S5(sp)
+ .cfi_rel_offset 21, STACK_OFFSET_S5
+
+ /* Remember the return register */
+ sd a3, SHADOWFRAME_RESULT_REGISTER_OFFSET(a2)
+
+ /* Remember the code_item */
+ sd a1, SHADOWFRAME_CODE_ITEM_OFFSET(a2)
+
+ /* set up "named" registers */
+ move rSELF, a0
+ daddu rFP, a2, SHADOWFRAME_VREGS_OFFSET
+ lw v0, SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(a2)
+ dlsa rREFS, v0, rFP, 2
+ daddu rPC, a1, CODEITEM_INSNS_OFFSET
+ lw v0, SHADOWFRAME_DEX_PC_OFFSET(a2)
+ dlsa rPC, v0, rPC, 1
+ EXPORT_PC
+
+ /* Starting ibase */
+ REFRESH_IBASE
+
+ /* start executing the instruction at rPC */
+ FETCH_INST
+ GET_INST_OPCODE v0
+ GOTO_OPCODE v0
+
+ /* NOTE: no fallthrough */
+
+
+ .global artMterpAsmInstructionStart
+ .type artMterpAsmInstructionStart, %function
+artMterpAsmInstructionStart = .L_op_nop
+ .text
+
+/* ------------------------------ */
+ .balign 128
+.L_op_nop: /* 0x00 */
+/* File: mips64/op_nop.S */
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move: /* 0x01 */
+/* File: mips64/op_move.S */
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ GET_VREG a0, a3 # a0 <- vB
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ .if 0
+ SET_VREG_OBJECT a0, a2 # vA <- vB
+ .else
+ SET_VREG a0, a2 # vA <- vB
+ .endif
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_from16: /* 0x02 */
+/* File: mips64/op_move_from16.S */
+ /* for: move/from16, move-object/from16 */
+ /* op vAA, vBBBB */
+ lhu a3, 2(rPC) # a3 <- BBBB
+ srl a2, rINST, 8 # a2 <- AA
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_VREG a0, a3 # a0 <- vBBBB
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ .if 0
+ SET_VREG_OBJECT a0, a2 # vAA <- vBBBB
+ .else
+ SET_VREG a0, a2 # vAA <- vBBBB
+ .endif
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_16: /* 0x03 */
+/* File: mips64/op_move_16.S */
+ /* for: move/16, move-object/16 */
+ /* op vAAAA, vBBBB */
+ lhu a3, 4(rPC) # a3 <- BBBB
+ lhu a2, 2(rPC) # a2 <- AAAA
+ FETCH_ADVANCE_INST 3 # advance rPC, load rINST
+ GET_VREG a0, a3 # a0 <- vBBBB
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ .if 0
+ SET_VREG_OBJECT a0, a2 # vAAAA <- vBBBB
+ .else
+ SET_VREG a0, a2 # vAAAA <- vBBBB
+ .endif
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_wide: /* 0x04 */
+/* File: mips64/op_move_wide.S */
+ /* move-wide vA, vB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ ext a3, rINST, 12, 4 # a3 <- B
+ ext a2, rINST, 8, 4 # a2 <- A
+ GET_VREG_WIDE a0, a3 # a0 <- vB
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a2 # vA <- vB
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_wide_from16: /* 0x05 */
+/* File: mips64/op_move_wide_from16.S */
+ /* move-wide/from16 vAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ lhu a3, 2(rPC) # a3 <- BBBB
+ srl a2, rINST, 8 # a2 <- AA
+ GET_VREG_WIDE a0, a3 # a0 <- vBBBB
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a2 # vAA <- vBBBB
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_wide_16: /* 0x06 */
+/* File: mips64/op_move_wide_16.S */
+ /* move-wide/16 vAAAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ lhu a3, 4(rPC) # a3 <- BBBB
+ lhu a2, 2(rPC) # a2 <- AAAA
+ GET_VREG_WIDE a0, a3 # a0 <- vBBBB
+ FETCH_ADVANCE_INST 3 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a2 # vAAAA <- vBBBB
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_object: /* 0x07 */
+/* File: mips64/op_move_object.S */
+/* File: mips64/op_move.S */
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ GET_VREG a0, a3 # a0 <- vB
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ .if 1
+ SET_VREG_OBJECT a0, a2 # vA <- vB
+ .else
+ SET_VREG a0, a2 # vA <- vB
+ .endif
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_object_from16: /* 0x08 */
+/* File: mips64/op_move_object_from16.S */
+/* File: mips64/op_move_from16.S */
+ /* for: move/from16, move-object/from16 */
+ /* op vAA, vBBBB */
+ lhu a3, 2(rPC) # a3 <- BBBB
+ srl a2, rINST, 8 # a2 <- AA
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_VREG a0, a3 # a0 <- vBBBB
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ .if 1
+ SET_VREG_OBJECT a0, a2 # vAA <- vBBBB
+ .else
+ SET_VREG a0, a2 # vAA <- vBBBB
+ .endif
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_object_16: /* 0x09 */
+/* File: mips64/op_move_object_16.S */
+/* File: mips64/op_move_16.S */
+ /* for: move/16, move-object/16 */
+ /* op vAAAA, vBBBB */
+ lhu a3, 4(rPC) # a3 <- BBBB
+ lhu a2, 2(rPC) # a2 <- AAAA
+ FETCH_ADVANCE_INST 3 # advance rPC, load rINST
+ GET_VREG a0, a3 # a0 <- vBBBB
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ .if 1
+ SET_VREG_OBJECT a0, a2 # vAAAA <- vBBBB
+ .else
+ SET_VREG a0, a2 # vAAAA <- vBBBB
+ .endif
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_result: /* 0x0a */
+/* File: mips64/op_move_result.S */
+ /* for: move-result, move-result-object */
+ /* op vAA */
+ srl a2, rINST, 8 # a2 <- AA
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ ld a0, OFF_FP_RESULT_REGISTER(rFP) # get pointer to result JType
+ lw a0, 0(a0) # a0 <- result.i
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ .if 0
+ SET_VREG_OBJECT a0, a2 # vAA <- result
+ .else
+ SET_VREG a0, a2 # vAA <- result
+ .endif
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_result_wide: /* 0x0b */
+/* File: mips64/op_move_result_wide.S */
+ /* for: move-result-wide */
+ /* op vAA */
+ srl a2, rINST, 8 # a2 <- AA
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ ld a0, OFF_FP_RESULT_REGISTER(rFP) # get pointer to result JType
+ ld a0, 0(a0) # a0 <- result.j
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a2 # vAA <- result
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_result_object: /* 0x0c */
+/* File: mips64/op_move_result_object.S */
+/* File: mips64/op_move_result.S */
+ /* for: move-result, move-result-object */
+ /* op vAA */
+ srl a2, rINST, 8 # a2 <- AA
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ ld a0, OFF_FP_RESULT_REGISTER(rFP) # get pointer to result JType
+ lw a0, 0(a0) # a0 <- result.i
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ .if 1
+ SET_VREG_OBJECT a0, a2 # vAA <- result
+ .else
+ SET_VREG a0, a2 # vAA <- result
+ .endif
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_exception: /* 0x0d */
+/* File: mips64/op_move_exception.S */
+ /* move-exception vAA */
+ srl a2, rINST, 8 # a2 <- AA
+ ld a0, THREAD_EXCEPTION_OFFSET(rSELF) # load exception obj
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ SET_VREG_OBJECT a0, a2 # vAA <- exception obj
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ sd zero, THREAD_EXCEPTION_OFFSET(rSELF) # clear exception
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_return_void: /* 0x0e */
+/* File: mips64/op_return_void.S */
+ .extern MterpThreadFenceForConstructor
+ .extern MterpSuspendCheck
+ jal MterpThreadFenceForConstructor
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ move a0, rSELF
+ and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ beqzc ra, 1f
+ jal MterpSuspendCheck # (self)
+1:
+ li a0, 0
+ b MterpReturn
+
+/* ------------------------------ */
+ .balign 128
+.L_op_return: /* 0x0f */
+/* File: mips64/op_return.S */
+ /*
+ * Return a 32-bit value.
+ *
+ * for: return, return-object
+ */
+ /* op vAA */
+ .extern MterpThreadFenceForConstructor
+ .extern MterpSuspendCheck
+ jal MterpThreadFenceForConstructor
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ move a0, rSELF
+ and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ beqzc ra, 1f
+ jal MterpSuspendCheck # (self)
+1:
+ srl a2, rINST, 8 # a2 <- AA
+ GET_VREG_U a0, a2 # a0 <- vAA
+ b MterpReturn
+
+/* ------------------------------ */
+ .balign 128
+.L_op_return_wide: /* 0x10 */
+/* File: mips64/op_return_wide.S */
+ /*
+ * Return a 64-bit value.
+ */
+ /* return-wide vAA */
+ /* op vAA */
+ .extern MterpThreadFenceForConstructor
+ .extern MterpSuspendCheck
+ jal MterpThreadFenceForConstructor
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ move a0, rSELF
+ and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ beqzc ra, 1f
+ jal MterpSuspendCheck # (self)
+1:
+ srl a2, rINST, 8 # a2 <- AA
+ GET_VREG_WIDE a0, a2 # a0 <- vAA
+ b MterpReturn
+
+/* ------------------------------ */
+ .balign 128
+.L_op_return_object: /* 0x11 */
+/* File: mips64/op_return_object.S */
+/* File: mips64/op_return.S */
+ /*
+ * Return a 32-bit value.
+ *
+ * for: return, return-object
+ */
+ /* op vAA */
+ .extern MterpThreadFenceForConstructor
+ .extern MterpSuspendCheck
+ jal MterpThreadFenceForConstructor
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ move a0, rSELF
+ and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ beqzc ra, 1f
+ jal MterpSuspendCheck # (self)
+1:
+ srl a2, rINST, 8 # a2 <- AA
+ GET_VREG_U a0, a2 # a0 <- vAA
+ b MterpReturn
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_4: /* 0x12 */
+/* File: mips64/op_const_4.S */
+ /* const/4 vA, #+B */
+ ext a2, rINST, 8, 4 # a2 <- A
+ seh a0, rINST # sign extend B in rINST
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ sra a0, a0, 12 # shift B into its final position
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vA <- +B
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_16: /* 0x13 */
+/* File: mips64/op_const_16.S */
+ /* const/16 vAA, #+BBBB */
+ srl a2, rINST, 8 # a2 <- AA
+ lh a0, 2(rPC) # a0 <- sign-extended BBBB
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vAA <- +BBBB
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const: /* 0x14 */
+/* File: mips64/op_const.S */
+ /* const vAA, #+BBBBbbbb */
+ srl a2, rINST, 8 # a2 <- AA
+ lh a0, 2(rPC) # a0 <- bbbb (low)
+ lh a1, 4(rPC) # a1 <- BBBB (high)
+ FETCH_ADVANCE_INST 3 # advance rPC, load rINST
+ ins a0, a1, 16, 16 # a0 = BBBBbbbb
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vAA <- +BBBBbbbb
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_high16: /* 0x15 */
+/* File: mips64/op_const_high16.S */
+ /* const/high16 vAA, #+BBBB0000 */
+ srl a2, rINST, 8 # a2 <- AA
+ lh a0, 2(rPC) # a0 <- BBBB
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ sll a0, a0, 16 # a0 <- BBBB0000
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vAA <- +BBBB0000
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_wide_16: /* 0x16 */
+/* File: mips64/op_const_wide_16.S */
+ /* const-wide/16 vAA, #+BBBB */
+ srl a2, rINST, 8 # a2 <- AA
+ lh a0, 2(rPC) # a0 <- sign-extended BBBB
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a2 # vAA <- +BBBB
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_wide_32: /* 0x17 */
+/* File: mips64/op_const_wide_32.S */
+ /* const-wide/32 vAA, #+BBBBbbbb */
+ srl a2, rINST, 8 # a2 <- AA
+ lh a0, 2(rPC) # a0 <- bbbb (low)
+ lh a1, 4(rPC) # a1 <- BBBB (high)
+ FETCH_ADVANCE_INST 3 # advance rPC, load rINST
+ ins a0, a1, 16, 16 # a0 = BBBBbbbb
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a2 # vAA <- +BBBBbbbb
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_wide: /* 0x18 */
+/* File: mips64/op_const_wide.S */
+ /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
+ srl a4, rINST, 8 # a4 <- AA
+ lh a0, 2(rPC) # a0 <- bbbb (low)
+ lh a1, 4(rPC) # a1 <- BBBB (low middle)
+ lh a2, 6(rPC) # a2 <- hhhh (high middle)
+ lh a3, 8(rPC) # a3 <- HHHH (high)
+ FETCH_ADVANCE_INST 5 # advance rPC, load rINST
+ ins a0, a1, 16, 16 # a0 = BBBBbbbb
+ ins a2, a3, 16, 16 # a2 = HHHHhhhh
+ dinsu a0, a2, 32, 32 # a0 = HHHHhhhhBBBBbbbb
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a4 # vAA <- +HHHHhhhhBBBBbbbb
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_wide_high16: /* 0x19 */
+/* File: mips64/op_const_wide_high16.S */
+ /* const-wide/high16 vAA, #+BBBB000000000000 */
+ srl a2, rINST, 8 # a2 <- AA
+ lh a0, 2(rPC) # a0 <- BBBB
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ dsll32 a0, a0, 16 # a0 <- BBBB000000000000
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a2 # vAA <- +BBBB000000000000
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_string: /* 0x1a */
+/* File: mips64/op_const_string.S */
+ /* const/string vAA, String//BBBB */
+ .extern MterpConstString
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- BBBB
+ srl a1, rINST, 8 # a1 <- AA
+ daddu a2, rFP, OFF_FP_SHADOWFRAME
+ move a3, rSELF
+ jal MterpConstString # (index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST 2 # load rINST
+ bnez v0, MterpPossibleException # let reference interpreter deal with it.
+ ADVANCE 2 # advance rPC
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_string_jumbo: /* 0x1b */
+/* File: mips64/op_const_string_jumbo.S */
+ /* const/string vAA, String//BBBBBBBB */
+ .extern MterpConstString
+ EXPORT_PC
+ lh a0, 2(rPC) # a0 <- bbbb (low)
+ lh a4, 4(rPC) # a4 <- BBBB (high)
+ srl a1, rINST, 8 # a1 <- AA
+ ins a0, a4, 16, 16 # a0 <- BBBBbbbb
+ daddu a2, rFP, OFF_FP_SHADOWFRAME
+ move a3, rSELF
+ jal MterpConstString # (index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST 3 # load rINST
+ bnez v0, MterpPossibleException # let reference interpreter deal with it.
+ ADVANCE 3 # advance rPC
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_class: /* 0x1c */
+/* File: mips64/op_const_class.S */
+ /* const/class vAA, Class//BBBB */
+ .extern MterpConstClass
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- BBBB
+ srl a1, rINST, 8 # a1 <- AA
+ daddu a2, rFP, OFF_FP_SHADOWFRAME
+ move a3, rSELF
+ jal MterpConstClass # (index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST 2 # load rINST
+ bnez v0, MterpPossibleException # let reference interpreter deal with it.
+ ADVANCE 2 # advance rPC
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_monitor_enter: /* 0x1d */
+/* File: mips64/op_monitor_enter.S */
+ /*
+ * Synchronize on an object.
+ */
+ /* monitor-enter vAA */
+ .extern artLockObjectFromCode
+ EXPORT_PC
+ srl a2, rINST, 8 # a2 <- AA
+ GET_VREG_U a0, a2 # a0 <- vAA (object)
+ move a1, rSELF # a1 <- self
+ jal artLockObjectFromCode
+ bnezc v0, MterpException
+ FETCH_ADVANCE_INST 1
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_monitor_exit: /* 0x1e */
+/* File: mips64/op_monitor_exit.S */
+ /*
+ * Unlock an object.
+ *
+ * Exceptions that occur when unlocking a monitor need to appear as
+ * if they happened at the following instruction. See the Dalvik
+ * instruction spec.
+ */
+ /* monitor-exit vAA */
+ .extern artUnlockObjectFromCode
+ EXPORT_PC
+ srl a2, rINST, 8 # a2 <- AA
+ GET_VREG_U a0, a2 # a0 <- vAA (object)
+ move a1, rSELF # a1 <- self
+ jal artUnlockObjectFromCode # v0 <- success for unlock(self, obj)
+ bnezc v0, MterpException
+ FETCH_ADVANCE_INST 1 # before throw: advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_check_cast: /* 0x1f */
+/* File: mips64/op_check_cast.S */
+ /*
+ * Check to see if a cast from one class to another is allowed.
+ */
+ /* check-cast vAA, class//BBBB */
+ .extern MterpCheckCast
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- BBBB
+ srl a1, rINST, 8 # a1 <- AA
+ dlsa a1, a1, rFP, 2 # a1 <- &object
+ ld a2, OFF_FP_METHOD(rFP) # a2 <- method
+ move a3, rSELF # a3 <- self
+ jal MterpCheckCast # (index, &obj, method, self)
+ PREFETCH_INST 2
+ bnez v0, MterpPossibleException
+ ADVANCE 2
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_instance_of: /* 0x20 */
+/* File: mips64/op_instance_of.S */
+ /*
+ * Check to see if an object reference is an instance of a class.
+ *
+ * Most common situation is a non-null object, being compared against
+ * an already-resolved class.
+ */
+ /* instance-of vA, vB, class//CCCC */
+ .extern MterpInstanceOf
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- CCCC
+ srl a1, rINST, 12 # a1 <- B
+ dlsa a1, a1, rFP, 2 # a1 <- &object
+ ld a2, OFF_FP_METHOD(rFP) # a2 <- method
+ move a3, rSELF # a3 <- self
+ jal MterpInstanceOf # (index, &obj, method, self)
+ ld a1, THREAD_EXCEPTION_OFFSET(rSELF)
+ ext a2, rINST, 8, 4 # a2 <- A
+ PREFETCH_INST 2
+ bnez a1, MterpException
+ ADVANCE 2 # advance rPC
+ SET_VREG v0, a2 # vA <- v0
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_array_length: /* 0x21 */
+/* File: mips64/op_array_length.S */
+ /*
+ * Return the length of an array.
+ */
+ srl a1, rINST, 12 # a1 <- B
+ GET_VREG_U a0, a1 # a0 <- vB (object ref)
+ ext a2, rINST, 8, 4 # a2 <- A
+ beqz a0, common_errNullObject # yup, fail
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- array length
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a3, a2 # vB <- length
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_new_instance: /* 0x22 */
+/* File: mips64/op_new_instance.S */
+ /*
+ * Create a new instance of a class.
+ */
+ /* new-instance vAA, class//BBBB */
+ .extern MterpNewInstance
+ EXPORT_PC
+ daddu a0, rFP, OFF_FP_SHADOWFRAME
+ move a1, rSELF
+ move a2, rINST
+ jal MterpNewInstance # (shadow_frame, self, inst_data)
+ beqzc v0, MterpPossibleException
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_new_array: /* 0x23 */
+/* File: mips64/op_new_array.S */
+ /*
+ * Allocate an array of objects, specified with the array class
+ * and a count.
+ *
+ * The verifier guarantees that this is an array class, so we don't
+ * check for it here.
+ */
+ /* new-array vA, vB, class//CCCC */
+ .extern MterpNewArray
+ EXPORT_PC
+ daddu a0, rFP, OFF_FP_SHADOWFRAME
+ move a1, rPC
+ move a2, rINST
+ move a3, rSELF
+ jal MterpNewArray
+ beqzc v0, MterpPossibleException
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_filled_new_array: /* 0x24 */
+/* File: mips64/op_filled_new_array.S */
+ /*
+ * Create a new array with elements filled from registers.
+ *
+ * for: filled-new-array, filled-new-array/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class//CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, type//BBBB */
+ .extern MterpFilledNewArray
+ EXPORT_PC
+ daddu a0, rFP, OFF_FP_SHADOWFRAME
+ move a1, rPC
+ move a2, rSELF
+ jal MterpFilledNewArray
+ beqzc v0, MterpPossibleException
+ FETCH_ADVANCE_INST 3 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_filled_new_array_range: /* 0x25 */
+/* File: mips64/op_filled_new_array_range.S */
+/* File: mips64/op_filled_new_array.S */
+ /*
+ * Create a new array with elements filled from registers.
+ *
+ * for: filled-new-array, filled-new-array/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class//CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, type//BBBB */
+ .extern MterpFilledNewArrayRange
+ EXPORT_PC
+ daddu a0, rFP, OFF_FP_SHADOWFRAME
+ move a1, rPC
+ move a2, rSELF
+ jal MterpFilledNewArrayRange
+ beqzc v0, MterpPossibleException
+ FETCH_ADVANCE_INST 3 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_fill_array_data: /* 0x26 */
+/* File: mips64/op_fill_array_data.S */
+ /* fill-array-data vAA, +BBBBBBBB */
+ .extern MterpFillArrayData
+ EXPORT_PC
+ lh a1, 2(rPC) # a1 <- bbbb (lo)
+ lh a0, 4(rPC) # a0 <- BBBB (hi)
+ srl a3, rINST, 8 # a3 <- AA
+ ins a1, a0, 16, 16 # a1 <- BBBBbbbb
+ GET_VREG_U a0, a3 # a0 <- vAA (array object)
+ dlsa a1, a1, rPC, 1 # a1 <- PC + BBBBbbbb*2 (array data off.)
+ jal MterpFillArrayData # (obj, payload)
+ beqzc v0, MterpPossibleException # exception?
+ FETCH_ADVANCE_INST 3 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_throw: /* 0x27 */
+/* File: mips64/op_throw.S */
+ /*
+ * Throw an exception object in the current thread.
+ */
+ /* throw vAA */
+ EXPORT_PC
+ srl a2, rINST, 8 # a2 <- AA
+ GET_VREG_U a0, a2 # a0 <- vAA (exception object)
+ beqzc a0, common_errNullObject
+ sd a0, THREAD_EXCEPTION_OFFSET(rSELF) # thread->exception <- obj
+ b MterpException
+
+/* ------------------------------ */
+ .balign 128
+.L_op_goto: /* 0x28 */
+/* File: mips64/op_goto.S */
+ /*
+ * Unconditional branch, 8-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto +AA */
+ srl a0, rINST, 8
+ seb a0, a0 # a0 <- sign-extended AA
+ dlsa rPC, a0, rPC, 1 # rPC <- rPC + AA * 2
+ FETCH_INST # load rINST
+
+#if MTERP_SUSPEND
+ bgez a0, 1f # AA * 2 >= 0 => no suspend check
+ REFRESH_IBASE
+1:
+#else
+ lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue
+ bltz a0, MterpCheckSuspendAndContinue
+#endif
+
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_goto_16: /* 0x29 */
+/* File: mips64/op_goto_16.S */
+ /*
+ * Unconditional branch, 16-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto/16 +AAAA */
+ lh a0, 2(rPC) # a0 <- sign-extended AAAA
+ dlsa rPC, a0, rPC, 1 # rPC <- rPC + AAAA * 2
+ FETCH_INST # load rINST
+
+#if MTERP_SUSPEND
+ bgez a0, 1f # AA * 2 >= 0 => no suspend check
+ REFRESH_IBASE
+1:
+#else
+ lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue
+ bltz a0, MterpCheckSuspendAndContinue
+#endif
+
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_goto_32: /* 0x2a */
+/* File: mips64/op_goto_32.S */
+ /*
+ * Unconditional branch, 32-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ *
+ * Unlike most opcodes, this one is allowed to branch to itself, so
+ * our "backward branch" test must be "<=0" instead of "<0".
+ */
+ /* goto/32 +AAAAAAAA */
+ lh a0, 2(rPC) # a0 <- aaaa (low)
+ lh a1, 4(rPC) # a1 <- AAAA (high)
+ ins a0, a1, 16, 16 # a0 = sign-extended AAAAaaaa
+ dlsa rPC, a0, rPC, 1 # rPC <- rPC + AAAAAAAA * 2
+ FETCH_INST # load rINST
+
+#if MTERP_SUSPEND
+ bgtz a0, 1f # AA * 2 > 0 => no suspend check
+ REFRESH_IBASE
+1:
+#else
+ lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue
+ blez a0, MterpCheckSuspendAndContinue
+#endif
+
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_packed_switch: /* 0x2b */
+/* File: mips64/op_packed_switch.S */
+ /*
+ * Handle a packed-switch or sparse-switch instruction. In both cases
+ * we decode it and hand it off to a helper function.
+ *
+ * We don't really expect backward branches in a switch statement, but
+ * they're perfectly legal, so we check for them here.
+ *
+ * for: packed-switch, sparse-switch
+ */
+ /* op vAA, +BBBBBBBB */
+ .extern MterpDoPackedSwitch
+ lh a0, 2(rPC) # a0 <- bbbb (lo)
+ lh a1, 4(rPC) # a1 <- BBBB (hi)
+ srl a3, rINST, 8 # a3 <- AA
+ ins a0, a1, 16, 16 # a0 <- BBBBbbbb
+ GET_VREG a1, a3 # a1 <- vAA
+ dlsa a0, a0, rPC, 1 # a0 <- PC + BBBBbbbb*2
+ jal MterpDoPackedSwitch # v0 <- code-unit branch offset
+ dlsa rPC, v0, rPC, 1 # rPC <- rPC + offset * 2
+ FETCH_INST # load rINST
+#if MTERP_SUSPEND
+ bgtz v0, 1f # offset * 2 > 0 => no suspend check
+ REFRESH_IBASE
+1:
+#else
+ lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue
+ blez v0, MterpCheckSuspendAndContinue
+#endif
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sparse_switch: /* 0x2c */
+/* File: mips64/op_sparse_switch.S */
+/* File: mips64/op_packed_switch.S */
+ /*
+ * Handle a packed-switch or sparse-switch instruction. In both cases
+ * we decode it and hand it off to a helper function.
+ *
+ * We don't really expect backward branches in a switch statement, but
+ * they're perfectly legal, so we check for them here.
+ *
+ * for: packed-switch, sparse-switch
+ */
+ /* op vAA, +BBBBBBBB */
+ .extern MterpDoSparseSwitch
+ lh a0, 2(rPC) # a0 <- bbbb (lo)
+ lh a1, 4(rPC) # a1 <- BBBB (hi)
+ srl a3, rINST, 8 # a3 <- AA
+ ins a0, a1, 16, 16 # a0 <- BBBBbbbb
+ GET_VREG a1, a3 # a1 <- vAA
+ dlsa a0, a0, rPC, 1 # a0 <- PC + BBBBbbbb*2
+ jal MterpDoSparseSwitch # v0 <- code-unit branch offset
+ dlsa rPC, v0, rPC, 1 # rPC <- rPC + offset * 2
+ FETCH_INST # load rINST
+#if MTERP_SUSPEND
+ bgtz v0, 1f # offset * 2 > 0 => no suspend check
+ REFRESH_IBASE
+1:
+#else
+ lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue
+ blez v0, MterpCheckSuspendAndContinue
+#endif
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_cmpl_float: /* 0x2d */
+/* File: mips64/op_cmpl_float.S */
+/* File: mips64/fcmp.S */
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * For: cmpl-float, cmpg-float
+ */
+ /* op vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG_FLOAT f0, a2 # f0 <- vBB
+ GET_VREG_FLOAT f1, a3 # f1 <- vCC
+ cmp.eq.s f2, f0, f1
+ li a0, 0
+ bc1nez f2, 1f # done if vBB == vCC (ordered)
+ .if 0
+ cmp.lt.s f2, f0, f1
+ li a0, -1
+ bc1nez f2, 1f # done if vBB < vCC (ordered)
+ li a0, 1 # vBB > vCC or unordered
+ .else
+ cmp.lt.s f2, f1, f0
+ li a0, 1
+ bc1nez f2, 1f # done if vBB > vCC (ordered)
+ li a0, -1 # vBB < vCC or unordered
+ .endif
+1:
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a4 # vAA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_cmpg_float: /* 0x2e */
+/* File: mips64/op_cmpg_float.S */
+/* File: mips64/fcmp.S */
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * For: cmpl-float, cmpg-float
+ */
+ /* op vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG_FLOAT f0, a2 # f0 <- vBB
+ GET_VREG_FLOAT f1, a3 # f1 <- vCC
+ cmp.eq.s f2, f0, f1
+ li a0, 0
+ bc1nez f2, 1f # done if vBB == vCC (ordered)
+ .if 1
+ cmp.lt.s f2, f0, f1
+ li a0, -1
+ bc1nez f2, 1f # done if vBB < vCC (ordered)
+ li a0, 1 # vBB > vCC or unordered
+ .else
+ cmp.lt.s f2, f1, f0
+ li a0, 1
+ bc1nez f2, 1f # done if vBB > vCC (ordered)
+ li a0, -1 # vBB < vCC or unordered
+ .endif
+1:
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a4 # vAA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_cmpl_double: /* 0x2f */
+/* File: mips64/op_cmpl_double.S */
+/* File: mips64/fcmpWide.S */
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * For: cmpl-double, cmpg-double
+ */
+ /* op vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG_DOUBLE f0, a2 # f0 <- vBB
+ GET_VREG_DOUBLE f1, a3 # f1 <- vCC
+ cmp.eq.d f2, f0, f1
+ li a0, 0
+ bc1nez f2, 1f # done if vBB == vCC (ordered)
+ .if 0
+ cmp.lt.d f2, f0, f1
+ li a0, -1
+ bc1nez f2, 1f # done if vBB < vCC (ordered)
+ li a0, 1 # vBB > vCC or unordered
+ .else
+ cmp.lt.d f2, f1, f0
+ li a0, 1
+ bc1nez f2, 1f # done if vBB > vCC (ordered)
+ li a0, -1 # vBB < vCC or unordered
+ .endif
+1:
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a4 # vAA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_cmpg_double: /* 0x30 */
+/* File: mips64/op_cmpg_double.S */
+/* File: mips64/fcmpWide.S */
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * For: cmpl-double, cmpg-double
+ */
+ /* op vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG_DOUBLE f0, a2 # f0 <- vBB
+ GET_VREG_DOUBLE f1, a3 # f1 <- vCC
+ cmp.eq.d f2, f0, f1
+ li a0, 0
+ bc1nez f2, 1f # done if vBB == vCC (ordered)
+ .if 1
+ cmp.lt.d f2, f0, f1
+ li a0, -1
+ bc1nez f2, 1f # done if vBB < vCC (ordered)
+ li a0, 1 # vBB > vCC or unordered
+ .else
+ cmp.lt.d f2, f1, f0
+ li a0, 1
+ bc1nez f2, 1f # done if vBB > vCC (ordered)
+ li a0, -1 # vBB < vCC or unordered
+ .endif
+1:
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a4 # vAA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_cmp_long: /* 0x31 */
+/* File: mips64/op_cmp_long.S */
+ /* cmp-long vAA, vBB, vCC */
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ srl a4, rINST, 8 # a4 <- AA
+ GET_VREG_WIDE a0, a2 # a0 <- vBB
+ GET_VREG_WIDE a1, a3 # a1 <- vCC
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ slt a2, a0, a1
+ slt a0, a1, a0
+ subu a0, a0, a2
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a4 # vAA <- result
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_eq: /* 0x32 */
+/* File: mips64/op_if_eq.S */
+/* File: mips64/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "condition"
+ * fragment that specifies the comparison to perform, e.g. for
+ * "if-le" you would use "le".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ lh a4, 2(rPC) # a4 <- sign-extended CCCC
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG a0, a2 # a0 <- vA
+ GET_VREG a1, a3 # a1 <- vB
+
+ beqc a0, a1, 1f
+ li a4, 2 # offset if branch not taken
+1:
+
+ dlsa rPC, a4, rPC, 1 # rPC <- rPC + CCCC * 2
+ FETCH_INST # load rINST
+
+#if MTERP_SUSPEND
+ bgez a4, 2f # CCCC * 2 >= 0 => no suspend check
+ REFRESH_IBASE
+2:
+#else
+ lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue
+ bltz a4, MterpCheckSuspendAndContinue
+#endif
+
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_ne: /* 0x33 */
+/* File: mips64/op_if_ne.S */
+/* File: mips64/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "condition"
+ * fragment that specifies the comparison to perform, e.g. for
+ * "if-le" you would use "le".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ lh a4, 2(rPC) # a4 <- sign-extended CCCC
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG a0, a2 # a0 <- vA
+ GET_VREG a1, a3 # a1 <- vB
+
+ bnec a0, a1, 1f
+ li a4, 2 # offset if branch not taken
+1:
+
+ dlsa rPC, a4, rPC, 1 # rPC <- rPC + CCCC * 2
+ FETCH_INST # load rINST
+
+#if MTERP_SUSPEND
+ bgez a4, 2f # CCCC * 2 >= 0 => no suspend check
+ REFRESH_IBASE
+2:
+#else
+ lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue
+ bltz a4, MterpCheckSuspendAndContinue
+#endif
+
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_lt: /* 0x34 */
+/* File: mips64/op_if_lt.S */
+/* File: mips64/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "condition"
+ * fragment that specifies the comparison to perform, e.g. for
+ * "if-le" you would use "le".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ lh a4, 2(rPC) # a4 <- sign-extended CCCC
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG a0, a2 # a0 <- vA
+ GET_VREG a1, a3 # a1 <- vB
+
+ bltc a0, a1, 1f
+ li a4, 2 # offset if branch not taken
+1:
+
+ dlsa rPC, a4, rPC, 1 # rPC <- rPC + CCCC * 2
+ FETCH_INST # load rINST
+
+#if MTERP_SUSPEND
+ bgez a4, 2f # CCCC * 2 >= 0 => no suspend check
+ REFRESH_IBASE
+2:
+#else
+ lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue
+ bltz a4, MterpCheckSuspendAndContinue
+#endif
+
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_ge: /* 0x35 */
+/* File: mips64/op_if_ge.S */
+/* File: mips64/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "condition"
+ * fragment that specifies the comparison to perform, e.g. for
+ * "if-le" you would use "le".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ lh a4, 2(rPC) # a4 <- sign-extended CCCC
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG a0, a2 # a0 <- vA
+ GET_VREG a1, a3 # a1 <- vB
+
+ bgec a0, a1, 1f
+ li a4, 2 # offset if branch not taken
+1:
+
+ dlsa rPC, a4, rPC, 1 # rPC <- rPC + CCCC * 2
+ FETCH_INST # load rINST
+
+#if MTERP_SUSPEND
+ bgez a4, 2f # CCCC * 2 >= 0 => no suspend check
+ REFRESH_IBASE
+2:
+#else
+ lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue
+ bltz a4, MterpCheckSuspendAndContinue
+#endif
+
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_gt: /* 0x36 */
+/* File: mips64/op_if_gt.S */
+/* File: mips64/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "condition"
+ * fragment that specifies the comparison to perform, e.g. for
+ * "if-le" you would use "le".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ lh a4, 2(rPC) # a4 <- sign-extended CCCC
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG a0, a2 # a0 <- vA
+ GET_VREG a1, a3 # a1 <- vB
+
+ bgtc a0, a1, 1f
+ li a4, 2 # offset if branch not taken
+1:
+
+ dlsa rPC, a4, rPC, 1 # rPC <- rPC + CCCC * 2
+ FETCH_INST # load rINST
+
+#if MTERP_SUSPEND
+ bgez a4, 2f # CCCC * 2 >= 0 => no suspend check
+ REFRESH_IBASE
+2:
+#else
+ lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue
+ bltz a4, MterpCheckSuspendAndContinue
+#endif
+
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_le: /* 0x37 */
+/* File: mips64/op_if_le.S */
+/* File: mips64/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "condition"
+ * fragment that specifies the comparison to perform, e.g. for
+ * "if-le" you would use "le".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ lh a4, 2(rPC) # a4 <- sign-extended CCCC
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG a0, a2 # a0 <- vA
+ GET_VREG a1, a3 # a1 <- vB
+
+ blec a0, a1, 1f
+ li a4, 2 # offset if branch not taken
+1:
+
+ dlsa rPC, a4, rPC, 1 # rPC <- rPC + CCCC * 2
+ FETCH_INST # load rINST
+
+#if MTERP_SUSPEND
+ bgez a4, 2f # CCCC * 2 >= 0 => no suspend check
+ REFRESH_IBASE
+2:
+#else
+ lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue
+ bltz a4, MterpCheckSuspendAndContinue
+#endif
+
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_eqz: /* 0x38 */
+/* File: mips64/op_if_eqz.S */
+/* File: mips64/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "condition"
+ * fragment that specifies the comparison to perform, e.g. for
+ * "if-lez" you would use "le".
+ *
+ * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ lh a4, 2(rPC) # a4 <- sign-extended BBBB
+ srl a2, rINST, 8 # a2 <- AA
+ GET_VREG a0, a2 # a0 <- vAA
+
+ beqzc a0, 1f
+ li a4, 2 # offset if branch not taken
+1:
+
+ dlsa rPC, a4, rPC, 1 # rPC <- rPC + BBBB * 2
+ FETCH_INST # load rINST
+
+#if MTERP_SUSPEND
+ bgez a4, 2f # BBBB * 2 >= 0 => no suspend check
+ REFRESH_IBASE
+2:
+#else
+ lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue
+ bltz a4, MterpCheckSuspendAndContinue
+#endif
+
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_nez: /* 0x39 */
+/* File: mips64/op_if_nez.S */
+/* File: mips64/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "condition"
+ * fragment that specifies the comparison to perform, e.g. for
+ * "if-lez" you would use "le".
+ *
+ * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ lh a4, 2(rPC) # a4 <- sign-extended BBBB
+ srl a2, rINST, 8 # a2 <- AA
+ GET_VREG a0, a2 # a0 <- vAA
+
+ bnezc a0, 1f
+ li a4, 2 # offset if branch not taken
+1:
+
+ dlsa rPC, a4, rPC, 1 # rPC <- rPC + BBBB * 2
+ FETCH_INST # load rINST
+
+#if MTERP_SUSPEND
+ bgez a4, 2f # BBBB * 2 >= 0 => no suspend check
+ REFRESH_IBASE
+2:
+#else
+ lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue
+ bltz a4, MterpCheckSuspendAndContinue
+#endif
+
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_ltz: /* 0x3a */
+/* File: mips64/op_if_ltz.S */
+/* File: mips64/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "condition"
+ * fragment that specifies the comparison to perform, e.g. for
+ * "if-lez" you would use "le".
+ *
+ * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ lh a4, 2(rPC) # a4 <- sign-extended BBBB
+ srl a2, rINST, 8 # a2 <- AA
+ GET_VREG a0, a2 # a0 <- vAA
+
+ bltzc a0, 1f
+ li a4, 2 # offset if branch not taken
+1:
+
+ dlsa rPC, a4, rPC, 1 # rPC <- rPC + BBBB * 2
+ FETCH_INST # load rINST
+
+#if MTERP_SUSPEND
+ bgez a4, 2f # BBBB * 2 >= 0 => no suspend check
+ REFRESH_IBASE
+2:
+#else
+ lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue
+ bltz a4, MterpCheckSuspendAndContinue
+#endif
+
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_gez: /* 0x3b */
+/* File: mips64/op_if_gez.S */
+/* File: mips64/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "condition"
+ * fragment that specifies the comparison to perform, e.g. for
+ * "if-lez" you would use "le".
+ *
+ * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ lh a4, 2(rPC) # a4 <- sign-extended BBBB
+ srl a2, rINST, 8 # a2 <- AA
+ GET_VREG a0, a2 # a0 <- vAA
+
+ bgezc a0, 1f
+ li a4, 2 # offset if branch not taken
+1:
+
+ dlsa rPC, a4, rPC, 1 # rPC <- rPC + BBBB * 2
+ FETCH_INST # load rINST
+
+#if MTERP_SUSPEND
+ bgez a4, 2f # BBBB * 2 >= 0 => no suspend check
+ REFRESH_IBASE
+2:
+#else
+ lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue
+ bltz a4, MterpCheckSuspendAndContinue
+#endif
+
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_gtz: /* 0x3c */
+/* File: mips64/op_if_gtz.S */
+/* File: mips64/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "condition"
+ * fragment that specifies the comparison to perform, e.g. for
+ * "if-lez" you would use "le".
+ *
+ * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ lh a4, 2(rPC) # a4 <- sign-extended BBBB
+ srl a2, rINST, 8 # a2 <- AA
+ GET_VREG a0, a2 # a0 <- vAA
+
+ bgtzc a0, 1f
+ li a4, 2 # offset if branch not taken
+1:
+
+ dlsa rPC, a4, rPC, 1 # rPC <- rPC + BBBB * 2
+ FETCH_INST # load rINST
+
+#if MTERP_SUSPEND
+ bgez a4, 2f # BBBB * 2 >= 0 => no suspend check
+ REFRESH_IBASE
+2:
+#else
+ lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue
+ bltz a4, MterpCheckSuspendAndContinue
+#endif
+
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_lez: /* 0x3d */
+/* File: mips64/op_if_lez.S */
+/* File: mips64/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "condition"
+ * fragment that specifies the comparison to perform, e.g. for
+ * "if-lez" you would use "le".
+ *
+ * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ lh a4, 2(rPC) # a4 <- sign-extended BBBB
+ srl a2, rINST, 8 # a2 <- AA
+ GET_VREG a0, a2 # a0 <- vAA
+
+ blezc a0, 1f
+ li a4, 2 # offset if branch not taken
+1:
+
+ dlsa rPC, a4, rPC, 1 # rPC <- rPC + BBBB * 2
+ FETCH_INST # load rINST
+
+#if MTERP_SUSPEND
+ bgez a4, 2f # BBBB * 2 >= 0 => no suspend check
+ REFRESH_IBASE
+2:
+#else
+ lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue
+ bltz a4, MterpCheckSuspendAndContinue
+#endif
+
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_3e: /* 0x3e */
+/* File: mips64/op_unused_3e.S */
+/* File: mips64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_3f: /* 0x3f */
+/* File: mips64/op_unused_3f.S */
+/* File: mips64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_40: /* 0x40 */
+/* File: mips64/op_unused_40.S */
+/* File: mips64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_41: /* 0x41 */
+/* File: mips64/op_unused_41.S */
+/* File: mips64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_42: /* 0x42 */
+/* File: mips64/op_unused_42.S */
+/* File: mips64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_43: /* 0x43 */
+/* File: mips64/op_unused_43.S */
+/* File: mips64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aget: /* 0x44 */
+/* File: mips64/op_aget.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+ *
+ * NOTE: assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ srl a4, rINST, 8 # a4 <- AA
+ GET_VREG_U a0, a2 # a0 <- vBB (array object)
+ GET_VREG a1, a3 # a1 <- vCC (requested index)
+ beqz a0, common_errNullObject # bail if null array object
+ lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
+ .if 2
+ # [d]lsa does not support shift count of 0.
+ dlsa a0, a1, a0, 2 # a0 <- arrayObj + index*width
+ .else
+ daddu a0, a1, a0 # a0 <- arrayObj + index*width
+ .endif
+ bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ lw a2, MIRROR_INT_ARRAY_DATA_OFFSET(a0) # a2 <- vBB[vCC]
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a2, a4 # vAA <- a2
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aget_wide: /* 0x45 */
+/* File: mips64/op_aget_wide.S */
+ /*
+ * Array get, 64 bits. vAA <- vBB[vCC].
+ *
+ */
+ /* aget-wide vAA, vBB, vCC */
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ srl a4, rINST, 8 # a4 <- AA
+ GET_VREG_U a0, a2 # a0 <- vBB (array object)
+ GET_VREG a1, a3 # a1 <- vCC (requested index)
+ beqz a0, common_errNullObject # bail if null array object
+ lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
+ dlsa a0, a1, a0, 3 # a0 <- arrayObj + index*width
+ bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ lw a2, MIRROR_WIDE_ARRAY_DATA_OFFSET(a0)
+ lw a3, (MIRROR_WIDE_ARRAY_DATA_OFFSET+4)(a0)
+ dinsu a2, a3, 32, 32 # a2 <- vBB[vCC]
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a2, a4 # vAA <- a2
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aget_object: /* 0x46 */
+/* File: mips64/op_aget_object.S */
+ /*
+ * Array object get. vAA <- vBB[vCC].
+ *
+ * for: aget-object
+ */
+ /* op vAA, vBB, vCC */
+ .extern artAGetObjectFromMterp
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ EXPORT_PC
+ GET_VREG_U a0, a2 # a0 <- vBB (array object)
+ GET_VREG a1, a3 # a1 <- vCC (requested index)
+ jal artAGetObjectFromMterp # (array, index)
+ ld a1, THREAD_EXCEPTION_OFFSET(rSELF)
+ srl a4, rINST, 8 # a4 <- AA
+ PREFETCH_INST 2
+ bnez a1, MterpException
+ SET_VREG_OBJECT v0, a4 # vAA <- v0
+ ADVANCE 2
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aget_boolean: /* 0x47 */
+/* File: mips64/op_aget_boolean.S */
+/* File: mips64/op_aget.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+ *
+ * NOTE: assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ srl a4, rINST, 8 # a4 <- AA
+ GET_VREG_U a0, a2 # a0 <- vBB (array object)
+ GET_VREG a1, a3 # a1 <- vCC (requested index)
+ beqz a0, common_errNullObject # bail if null array object
+ lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
+ .if 0
+ # [d]lsa does not support shift count of 0.
+ dlsa a0, a1, a0, 0 # a0 <- arrayObj + index*width
+ .else
+ daddu a0, a1, a0 # a0 <- arrayObj + index*width
+ .endif
+ bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ lbu a2, MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(a0) # a2 <- vBB[vCC]
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a2, a4 # vAA <- a2
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aget_byte: /* 0x48 */
+/* File: mips64/op_aget_byte.S */
+/* File: mips64/op_aget.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+ *
+ * NOTE: assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ srl a4, rINST, 8 # a4 <- AA
+ GET_VREG_U a0, a2 # a0 <- vBB (array object)
+ GET_VREG a1, a3 # a1 <- vCC (requested index)
+ beqz a0, common_errNullObject # bail if null array object
+ lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
+ .if 0
+ # [d]lsa does not support shift count of 0.
+ dlsa a0, a1, a0, 0 # a0 <- arrayObj + index*width
+ .else
+ daddu a0, a1, a0 # a0 <- arrayObj + index*width
+ .endif
+ bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ lb a2, MIRROR_BYTE_ARRAY_DATA_OFFSET(a0) # a2 <- vBB[vCC]
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a2, a4 # vAA <- a2
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aget_char: /* 0x49 */
+/* File: mips64/op_aget_char.S */
+/* File: mips64/op_aget.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+ *
+ * NOTE: assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ srl a4, rINST, 8 # a4 <- AA
+ GET_VREG_U a0, a2 # a0 <- vBB (array object)
+ GET_VREG a1, a3 # a1 <- vCC (requested index)
+ beqz a0, common_errNullObject # bail if null array object
+ lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
+ .if 1
+ # [d]lsa does not support shift count of 0.
+ dlsa a0, a1, a0, 1 # a0 <- arrayObj + index*width
+ .else
+ daddu a0, a1, a0 # a0 <- arrayObj + index*width
+ .endif
+ bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ lhu a2, MIRROR_CHAR_ARRAY_DATA_OFFSET(a0) # a2 <- vBB[vCC]
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a2, a4 # vAA <- a2
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aget_short: /* 0x4a */
+/* File: mips64/op_aget_short.S */
+/* File: mips64/op_aget.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+ *
+ * NOTE: assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ srl a4, rINST, 8 # a4 <- AA
+ GET_VREG_U a0, a2 # a0 <- vBB (array object)
+ GET_VREG a1, a3 # a1 <- vCC (requested index)
+ beqz a0, common_errNullObject # bail if null array object
+ lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
+ .if 1
+ # [d]lsa does not support shift count of 0.
+ dlsa a0, a1, a0, 1 # a0 <- arrayObj + index*width
+ .else
+ daddu a0, a1, a0 # a0 <- arrayObj + index*width
+ .endif
+ bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ lh a2, MIRROR_SHORT_ARRAY_DATA_OFFSET(a0) # a2 <- vBB[vCC]
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a2, a4 # vAA <- a2
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aput: /* 0x4b */
+/* File: mips64/op_aput.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ *
+ * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ srl a4, rINST, 8 # a4 <- AA
+ GET_VREG_U a0, a2 # a0 <- vBB (array object)
+ GET_VREG a1, a3 # a1 <- vCC (requested index)
+ beqz a0, common_errNullObject # bail if null array object
+ lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
+ .if 2
+ # [d]lsa does not support shift count of 0.
+ dlsa a0, a1, a0, 2 # a0 <- arrayObj + index*width
+ .else
+ daddu a0, a1, a0 # a0 <- arrayObj + index*width
+ .endif
+ bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_VREG a2, a4 # a2 <- vAA
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ sw a2, MIRROR_INT_ARRAY_DATA_OFFSET(a0) # vBB[vCC] <- a2
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aput_wide: /* 0x4c */
+/* File: mips64/op_aput_wide.S */
+ /*
+ * Array put, 64 bits. vBB[vCC] <- vAA.
+ *
+ */
+ /* aput-wide vAA, vBB, vCC */
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ srl a4, rINST, 8 # a4 <- AA
+ GET_VREG_U a0, a2 # a0 <- vBB (array object)
+ GET_VREG a1, a3 # a1 <- vCC (requested index)
+ beqz a0, common_errNullObject # bail if null array object
+ lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
+ dlsa a0, a1, a0, 3 # a0 <- arrayObj + index*width
+ bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
+ GET_VREG_WIDE a2, a4 # a2 <- vAA
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ sw a2, MIRROR_WIDE_ARRAY_DATA_OFFSET(a0)
+ dsrl32 a2, a2, 0
+ sw a2, (MIRROR_WIDE_ARRAY_DATA_OFFSET+4)(a0) # vBB[vCC] <- a2
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aput_object: /* 0x4d */
+/* File: mips64/op_aput_object.S */
+ /*
+ * Store an object into an array. vBB[vCC] <- vAA.
+ */
+ /* op vAA, vBB, vCC */
+ .extern MterpAputObject
+ EXPORT_PC
+ daddu a0, rFP, OFF_FP_SHADOWFRAME
+ move a1, rPC
+ move a2, rINST
+ jal MterpAputObject
+ beqzc v0, MterpPossibleException
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aput_boolean: /* 0x4e */
+/* File: mips64/op_aput_boolean.S */
+/* File: mips64/op_aput.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ *
+ * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ srl a4, rINST, 8 # a4 <- AA
+ GET_VREG_U a0, a2 # a0 <- vBB (array object)
+ GET_VREG a1, a3 # a1 <- vCC (requested index)
+ beqz a0, common_errNullObject # bail if null array object
+ lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
+ .if 0
+ # [d]lsa does not support shift count of 0.
+ dlsa a0, a1, a0, 0 # a0 <- arrayObj + index*width
+ .else
+ daddu a0, a1, a0 # a0 <- arrayObj + index*width
+ .endif
+ bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_VREG a2, a4 # a2 <- vAA
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ sb a2, MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(a0) # vBB[vCC] <- a2
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aput_byte: /* 0x4f */
+/* File: mips64/op_aput_byte.S */
+/* File: mips64/op_aput.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ *
+ * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ srl a4, rINST, 8 # a4 <- AA
+ GET_VREG_U a0, a2 # a0 <- vBB (array object)
+ GET_VREG a1, a3 # a1 <- vCC (requested index)
+ beqz a0, common_errNullObject # bail if null array object
+ lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
+ .if 0
+ # [d]lsa does not support shift count of 0.
+ dlsa a0, a1, a0, 0 # a0 <- arrayObj + index*width
+ .else
+ daddu a0, a1, a0 # a0 <- arrayObj + index*width
+ .endif
+ bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_VREG a2, a4 # a2 <- vAA
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ sb a2, MIRROR_BYTE_ARRAY_DATA_OFFSET(a0) # vBB[vCC] <- a2
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aput_char: /* 0x50 */
+/* File: mips64/op_aput_char.S */
+/* File: mips64/op_aput.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ *
+ * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ srl a4, rINST, 8 # a4 <- AA
+ GET_VREG_U a0, a2 # a0 <- vBB (array object)
+ GET_VREG a1, a3 # a1 <- vCC (requested index)
+ beqz a0, common_errNullObject # bail if null array object
+ lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
+ .if 1
+ # [d]lsa does not support shift count of 0.
+ dlsa a0, a1, a0, 1 # a0 <- arrayObj + index*width
+ .else
+ daddu a0, a1, a0 # a0 <- arrayObj + index*width
+ .endif
+ bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_VREG a2, a4 # a2 <- vAA
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ sh a2, MIRROR_CHAR_ARRAY_DATA_OFFSET(a0) # vBB[vCC] <- a2
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aput_short: /* 0x51 */
+/* File: mips64/op_aput_short.S */
+/* File: mips64/op_aput.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ *
+ * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ srl a4, rINST, 8 # a4 <- AA
+ GET_VREG_U a0, a2 # a0 <- vBB (array object)
+ GET_VREG a1, a3 # a1 <- vCC (requested index)
+ beqz a0, common_errNullObject # bail if null array object
+ lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
+ .if 1
+ # [d]lsa does not support shift count of 0.
+ dlsa a0, a1, a0, 1 # a0 <- arrayObj + index*width
+ .else
+ daddu a0, a1, a0 # a0 <- arrayObj + index*width
+ .endif
+ bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_VREG a2, a4 # a2 <- vAA
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ sh a2, MIRROR_SHORT_ARRAY_DATA_OFFSET(a0) # vBB[vCC] <- a2
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget: /* 0x52 */
+/* File: mips64/op_iget.S */
+ /*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ .extern artGet32InstanceFromCode
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- field ref CCCC
+ srl a1, rINST, 12 # a1 <- B
+ GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
+ ld a2, OFF_FP_METHOD(rFP) # a2 <- referrer
+ move a3, rSELF # a3 <- self
+ jal artGet32InstanceFromCode
+ ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
+ ext a2, rINST, 8, 4 # a2 <- A
+ PREFETCH_INST 2
+ bnez a3, MterpPossibleException # bail out
+ .if 0
+ SET_VREG_OBJECT v0, a2 # fp[A] <- v0
+ .else
+ SET_VREG v0, a2 # fp[A] <- v0
+ .endif
+ ADVANCE 2
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_wide: /* 0x53 */
+/* File: mips64/op_iget_wide.S */
+ /*
+ * 64-bit instance field get.
+ *
+ * for: iget-wide
+ */
+ .extern artGet64InstanceFromCode
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- field ref CCCC
+ srl a1, rINST, 12 # a1 <- B
+ GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
+ ld a2, OFF_FP_METHOD(rFP) # a2 <- referrer
+ move a3, rSELF # a3 <- self
+ jal artGet64InstanceFromCode
+ ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
+ ext a2, rINST, 8, 4 # a2 <- A
+ PREFETCH_INST 2
+ bnez a3, MterpPossibleException # bail out
+ SET_VREG_WIDE v0, a2 # fp[A] <- v0
+ ADVANCE 2
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_object: /* 0x54 */
+/* File: mips64/op_iget_object.S */
+/* File: mips64/op_iget.S */
+ /*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ .extern artGetObjInstanceFromCode
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- field ref CCCC
+ srl a1, rINST, 12 # a1 <- B
+ GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
+ ld a2, OFF_FP_METHOD(rFP) # a2 <- referrer
+ move a3, rSELF # a3 <- self
+ jal artGetObjInstanceFromCode
+ ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
+ ext a2, rINST, 8, 4 # a2 <- A
+ PREFETCH_INST 2
+ bnez a3, MterpPossibleException # bail out
+ .if 1
+ SET_VREG_OBJECT v0, a2 # fp[A] <- v0
+ .else
+ SET_VREG v0, a2 # fp[A] <- v0
+ .endif
+ ADVANCE 2
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_boolean: /* 0x55 */
+/* File: mips64/op_iget_boolean.S */
+/* File: mips64/op_iget.S */
+ /*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ .extern artGetBooleanInstanceFromCode
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- field ref CCCC
+ srl a1, rINST, 12 # a1 <- B
+ GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
+ ld a2, OFF_FP_METHOD(rFP) # a2 <- referrer
+ move a3, rSELF # a3 <- self
+ jal artGetBooleanInstanceFromCode
+ ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
+ ext a2, rINST, 8, 4 # a2 <- A
+ PREFETCH_INST 2
+ bnez a3, MterpPossibleException # bail out
+ .if 0
+ SET_VREG_OBJECT v0, a2 # fp[A] <- v0
+ .else
+ SET_VREG v0, a2 # fp[A] <- v0
+ .endif
+ ADVANCE 2
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_byte: /* 0x56 */
+/* File: mips64/op_iget_byte.S */
+/* File: mips64/op_iget.S */
+ /*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ .extern artGetByteInstanceFromCode
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- field ref CCCC
+ srl a1, rINST, 12 # a1 <- B
+ GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
+ ld a2, OFF_FP_METHOD(rFP) # a2 <- referrer
+ move a3, rSELF # a3 <- self
+ jal artGetByteInstanceFromCode
+ ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
+ ext a2, rINST, 8, 4 # a2 <- A
+ PREFETCH_INST 2
+ bnez a3, MterpPossibleException # bail out
+ .if 0
+ SET_VREG_OBJECT v0, a2 # fp[A] <- v0
+ .else
+ SET_VREG v0, a2 # fp[A] <- v0
+ .endif
+ ADVANCE 2
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_char: /* 0x57 */
+/* File: mips64/op_iget_char.S */
+/* File: mips64/op_iget.S */
+ /*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ .extern artGetCharInstanceFromCode
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- field ref CCCC
+ srl a1, rINST, 12 # a1 <- B
+ GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
+ ld a2, OFF_FP_METHOD(rFP) # a2 <- referrer
+ move a3, rSELF # a3 <- self
+ jal artGetCharInstanceFromCode
+ ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
+ ext a2, rINST, 8, 4 # a2 <- A
+ PREFETCH_INST 2
+ bnez a3, MterpPossibleException # bail out
+ .if 0
+ SET_VREG_OBJECT v0, a2 # fp[A] <- v0
+ .else
+ SET_VREG v0, a2 # fp[A] <- v0
+ .endif
+ ADVANCE 2
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_short: /* 0x58 */
+/* File: mips64/op_iget_short.S */
+/* File: mips64/op_iget.S */
+ /*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ .extern artGetShortInstanceFromCode
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- field ref CCCC
+ srl a1, rINST, 12 # a1 <- B
+ GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
+ ld a2, OFF_FP_METHOD(rFP) # a2 <- referrer
+ move a3, rSELF # a3 <- self
+ jal artGetShortInstanceFromCode
+ ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
+ ext a2, rINST, 8, 4 # a2 <- A
+ PREFETCH_INST 2
+ bnez a3, MterpPossibleException # bail out
+ .if 0
+ SET_VREG_OBJECT v0, a2 # fp[A] <- v0
+ .else
+ SET_VREG v0, a2 # fp[A] <- v0
+ .endif
+ ADVANCE 2
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput: /* 0x59 */
+/* File: mips64/op_iput.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field//CCCC */
+ .extern artSet32InstanceFromMterp
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- field ref CCCC
+ srl a1, rINST, 12 # a1 <- B
+ GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
+ ext a2, rINST, 8, 4 # a2 <- A
+ GET_VREG a2, a2 # a2 <- fp[A]
+ ld a3, OFF_FP_METHOD(rFP) # a3 <- referrer
+ PREFETCH_INST 2
+ jal artSet32InstanceFromMterp
+ bnez v0, MterpPossibleException # bail out
+ ADVANCE 2
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_wide: /* 0x5a */
+/* File: mips64/op_iput_wide.S */
+ /* iput-wide vA, vB, field//CCCC */
+ .extern artSet64InstanceFromMterp
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- field ref CCCC
+ srl a1, rINST, 12 # a1 <- B
+ GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
+ ext a2, rINST, 8, 4 # a2 <- A
+ dlsa a2, a2, rFP, 2 # a2 <- &fp[A]
+ ld a3, OFF_FP_METHOD(rFP) # a3 <- referrer
+ PREFETCH_INST 2
+ jal artSet64InstanceFromMterp
+ bnez v0, MterpPossibleException # bail out
+ ADVANCE 2
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_object: /* 0x5b */
+/* File: mips64/op_iput_object.S */
+ .extern MterpIputObject
+ EXPORT_PC
+ daddu a0, rFP, OFF_FP_SHADOWFRAME
+ move a1, rPC
+ move a2, rINST
+ move a3, rSELF
+ jal MterpIputObject
+ beqzc v0, MterpException
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_boolean: /* 0x5c */
+/* File: mips64/op_iput_boolean.S */
+/* File: mips64/op_iput.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field//CCCC */
+ .extern artSet8InstanceFromMterp
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- field ref CCCC
+ srl a1, rINST, 12 # a1 <- B
+ GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
+ ext a2, rINST, 8, 4 # a2 <- A
+ GET_VREG a2, a2 # a2 <- fp[A]
+ ld a3, OFF_FP_METHOD(rFP) # a3 <- referrer
+ PREFETCH_INST 2
+ jal artSet8InstanceFromMterp
+ bnez v0, MterpPossibleException # bail out
+ ADVANCE 2
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_byte: /* 0x5d */
+/* File: mips64/op_iput_byte.S */
+/* File: mips64/op_iput.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field//CCCC */
+ .extern artSet8InstanceFromMterp
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- field ref CCCC
+ srl a1, rINST, 12 # a1 <- B
+ GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
+ ext a2, rINST, 8, 4 # a2 <- A
+ GET_VREG a2, a2 # a2 <- fp[A]
+ ld a3, OFF_FP_METHOD(rFP) # a3 <- referrer
+ PREFETCH_INST 2
+ jal artSet8InstanceFromMterp
+ bnez v0, MterpPossibleException # bail out
+ ADVANCE 2
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_char: /* 0x5e */
+/* File: mips64/op_iput_char.S */
+/* File: mips64/op_iput.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field//CCCC */
+ .extern artSet16InstanceFromMterp
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- field ref CCCC
+ srl a1, rINST, 12 # a1 <- B
+ GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
+ ext a2, rINST, 8, 4 # a2 <- A
+ GET_VREG a2, a2 # a2 <- fp[A]
+ ld a3, OFF_FP_METHOD(rFP) # a3 <- referrer
+ PREFETCH_INST 2
+ jal artSet16InstanceFromMterp
+ bnez v0, MterpPossibleException # bail out
+ ADVANCE 2
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_short: /* 0x5f */
+/* File: mips64/op_iput_short.S */
+/* File: mips64/op_iput.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field//CCCC */
+ .extern artSet16InstanceFromMterp
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- field ref CCCC
+ srl a1, rINST, 12 # a1 <- B
+ GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
+ ext a2, rINST, 8, 4 # a2 <- A
+ GET_VREG a2, a2 # a2 <- fp[A]
+ ld a3, OFF_FP_METHOD(rFP) # a3 <- referrer
+ PREFETCH_INST 2
+ jal artSet16InstanceFromMterp
+ bnez v0, MterpPossibleException # bail out
+ ADVANCE 2
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sget: /* 0x60 */
+/* File: mips64/op_sget.S */
+ /*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field//BBBB */
+ .extern artGet32StaticFromCode
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- field ref BBBB
+ ld a1, OFF_FP_METHOD(rFP)
+ move a2, rSELF
+ jal artGet32StaticFromCode
+ ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
+ srl a2, rINST, 8 # a2 <- AA
+
+ PREFETCH_INST 2
+ bnez a3, MterpException # bail out
+ .if 0
+ SET_VREG_OBJECT v0, a2 # fp[AA] <- v0
+ .else
+ SET_VREG v0, a2 # fp[AA] <- v0
+ .endif
+ ADVANCE 2
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sget_wide: /* 0x61 */
+/* File: mips64/op_sget_wide.S */
+ /*
+ * SGET_WIDE handler wrapper.
+ *
+ */
+ /* sget-wide vAA, field//BBBB */
+ .extern artGet64StaticFromCode
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- field ref BBBB
+ ld a1, OFF_FP_METHOD(rFP)
+ move a2, rSELF
+ jal artGet64StaticFromCode
+ ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
+ srl a4, rINST, 8 # a4 <- AA
+ bnez a3, MterpException # bail out
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ SET_VREG_WIDE v0, a4
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sget_object: /* 0x62 */
+/* File: mips64/op_sget_object.S */
+/* File: mips64/op_sget.S */
+ /*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field//BBBB */
+ .extern artGetObjStaticFromCode
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- field ref BBBB
+ ld a1, OFF_FP_METHOD(rFP)
+ move a2, rSELF
+ jal artGetObjStaticFromCode
+ ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
+ srl a2, rINST, 8 # a2 <- AA
+
+ PREFETCH_INST 2
+ bnez a3, MterpException # bail out
+ .if 1
+ SET_VREG_OBJECT v0, a2 # fp[AA] <- v0
+ .else
+ SET_VREG v0, a2 # fp[AA] <- v0
+ .endif
+ ADVANCE 2
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sget_boolean: /* 0x63 */
+/* File: mips64/op_sget_boolean.S */
+/* File: mips64/op_sget.S */
+ /*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field//BBBB */
+ .extern artGetBooleanStaticFromCode
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- field ref BBBB
+ ld a1, OFF_FP_METHOD(rFP)
+ move a2, rSELF
+ jal artGetBooleanStaticFromCode
+ ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
+ srl a2, rINST, 8 # a2 <- AA
+ and v0, v0, 0xff
+ PREFETCH_INST 2
+ bnez a3, MterpException # bail out
+ .if 0
+ SET_VREG_OBJECT v0, a2 # fp[AA] <- v0
+ .else
+ SET_VREG v0, a2 # fp[AA] <- v0
+ .endif
+ ADVANCE 2
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sget_byte: /* 0x64 */
+/* File: mips64/op_sget_byte.S */
+/* File: mips64/op_sget.S */
+ /*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field//BBBB */
+ .extern artGetByteStaticFromCode
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- field ref BBBB
+ ld a1, OFF_FP_METHOD(rFP)
+ move a2, rSELF
+ jal artGetByteStaticFromCode
+ ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
+ srl a2, rINST, 8 # a2 <- AA
+ seb v0, v0
+ PREFETCH_INST 2
+ bnez a3, MterpException # bail out
+ .if 0
+ SET_VREG_OBJECT v0, a2 # fp[AA] <- v0
+ .else
+ SET_VREG v0, a2 # fp[AA] <- v0
+ .endif
+ ADVANCE 2
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sget_char: /* 0x65 */
+/* File: mips64/op_sget_char.S */
+/* File: mips64/op_sget.S */
+ /*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field//BBBB */
+ .extern artGetCharStaticFromCode
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- field ref BBBB
+ ld a1, OFF_FP_METHOD(rFP)
+ move a2, rSELF
+ jal artGetCharStaticFromCode
+ ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
+ srl a2, rINST, 8 # a2 <- AA
+ and v0, v0, 0xffff
+ PREFETCH_INST 2
+ bnez a3, MterpException # bail out
+ .if 0
+ SET_VREG_OBJECT v0, a2 # fp[AA] <- v0
+ .else
+ SET_VREG v0, a2 # fp[AA] <- v0
+ .endif
+ ADVANCE 2
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sget_short: /* 0x66 */
+/* File: mips64/op_sget_short.S */
+/* File: mips64/op_sget.S */
+ /*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field//BBBB */
+ .extern artGetShortStaticFromCode
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- field ref BBBB
+ ld a1, OFF_FP_METHOD(rFP)
+ move a2, rSELF
+ jal artGetShortStaticFromCode
+ ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
+ srl a2, rINST, 8 # a2 <- AA
+ seh v0, v0
+ PREFETCH_INST 2
+ bnez a3, MterpException # bail out
+ .if 0
+ SET_VREG_OBJECT v0, a2 # fp[AA] <- v0
+ .else
+ SET_VREG v0, a2 # fp[AA] <- v0
+ .endif
+ ADVANCE 2
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sput: /* 0x67 */
+/* File: mips64/op_sput.S */
+ /*
+ * General SPUT handler wrapper.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field//BBBB */
+ .extern artSet32StaticFromCode
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- field ref BBBB
+ srl a3, rINST, 8 # a3 <- AA
+ GET_VREG a1, a3 # a1 <- fp[AA]
+ ld a2, OFF_FP_METHOD(rFP)
+ move a3, rSELF
+ PREFETCH_INST 2 # Get next inst, but don't advance rPC
+ jal artSet32StaticFromCode
+ bnezc v0, MterpException # 0 on success
+ ADVANCE 2 # Past exception point - now advance rPC
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sput_wide: /* 0x68 */
+/* File: mips64/op_sput_wide.S */
+ /*
+ * SPUT_WIDE handler wrapper.
+ *
+ */
+ /* sput-wide vAA, field//BBBB */
+ .extern artSet64IndirectStaticFromMterp
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- field ref BBBB
+ ld a1, OFF_FP_METHOD(rFP)
+ srl a2, rINST, 8 # a2 <- AA
+ dlsa a2, a2, rFP, 2
+ move a3, rSELF
+ PREFETCH_INST 2 # Get next inst, but don't advance rPC
+ jal artSet64IndirectStaticFromMterp
+ bnezc v0, MterpException # 0 on success, -1 on failure
+ ADVANCE 2 # Past exception point - now advance rPC
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sput_object: /* 0x69 */
+/* File: mips64/op_sput_object.S */
+ .extern MterpSputObject
+ EXPORT_PC
+ daddu a0, rFP, OFF_FP_SHADOWFRAME
+ move a1, rPC
+ move a2, rINST
+ move a3, rSELF
+ jal MterpSputObject
+ beqzc v0, MterpException
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sput_boolean: /* 0x6a */
+/* File: mips64/op_sput_boolean.S */
+/* File: mips64/op_sput.S */
+ /*
+ * General SPUT handler wrapper.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field//BBBB */
+ .extern artSet8StaticFromCode
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- field ref BBBB
+ srl a3, rINST, 8 # a3 <- AA
+ GET_VREG a1, a3 # a1 <- fp[AA]
+ ld a2, OFF_FP_METHOD(rFP)
+ move a3, rSELF
+ PREFETCH_INST 2 # Get next inst, but don't advance rPC
+ jal artSet8StaticFromCode
+ bnezc v0, MterpException # 0 on success
+ ADVANCE 2 # Past exception point - now advance rPC
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sput_byte: /* 0x6b */
+/* File: mips64/op_sput_byte.S */
+/* File: mips64/op_sput.S */
+ /*
+ * General SPUT handler wrapper.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field//BBBB */
+ .extern artSet8StaticFromCode
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- field ref BBBB
+ srl a3, rINST, 8 # a3 <- AA
+ GET_VREG a1, a3 # a1 <- fp[AA]
+ ld a2, OFF_FP_METHOD(rFP)
+ move a3, rSELF
+ PREFETCH_INST 2 # Get next inst, but don't advance rPC
+ jal artSet8StaticFromCode
+ bnezc v0, MterpException # 0 on success
+ ADVANCE 2 # Past exception point - now advance rPC
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sput_char: /* 0x6c */
+/* File: mips64/op_sput_char.S */
+/* File: mips64/op_sput.S */
+ /*
+ * General SPUT handler wrapper.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field//BBBB */
+ .extern artSet16StaticFromCode
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- field ref BBBB
+ srl a3, rINST, 8 # a3 <- AA
+ GET_VREG a1, a3 # a1 <- fp[AA]
+ ld a2, OFF_FP_METHOD(rFP)
+ move a3, rSELF
+ PREFETCH_INST 2 # Get next inst, but don't advance rPC
+ jal artSet16StaticFromCode
+ bnezc v0, MterpException # 0 on success
+ ADVANCE 2 # Past exception point - now advance rPC
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sput_short: /* 0x6d */
+/* File: mips64/op_sput_short.S */
+/* File: mips64/op_sput.S */
+ /*
+ * General SPUT handler wrapper.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field//BBBB */
+ .extern artSet16StaticFromCode
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- field ref BBBB
+ srl a3, rINST, 8 # a3 <- AA
+ GET_VREG a1, a3 # a1 <- fp[AA]
+ ld a2, OFF_FP_METHOD(rFP)
+ move a3, rSELF
+ PREFETCH_INST 2 # Get next inst, but don't advance rPC
+ jal artSet16StaticFromCode
+ bnezc v0, MterpException # 0 on success
+ ADVANCE 2 # Past exception point - now advance rPC
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_virtual: /* 0x6e */
+/* File: mips64/op_invoke_virtual.S */
+/* File: mips64/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeVirtual
+ EXPORT_PC
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rPC
+ move a3, rINST
+ jal MterpInvokeVirtual
+ beqzc v0, MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE v0
+ GOTO_OPCODE v0
+
+ /*
+ * Handle a virtual method call.
+ *
+ * for: invoke-virtual, invoke-virtual/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_super: /* 0x6f */
+/* File: mips64/op_invoke_super.S */
+/* File: mips64/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeSuper
+ EXPORT_PC
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rPC
+ move a3, rINST
+ jal MterpInvokeSuper
+ beqzc v0, MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE v0
+ GOTO_OPCODE v0
+
+ /*
+ * Handle a "super" method call.
+ *
+ * for: invoke-super, invoke-super/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_direct: /* 0x70 */
+/* File: mips64/op_invoke_direct.S */
+/* File: mips64/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeDirect
+ EXPORT_PC
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rPC
+ move a3, rINST
+ jal MterpInvokeDirect
+ beqzc v0, MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE v0
+ GOTO_OPCODE v0
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_static: /* 0x71 */
+/* File: mips64/op_invoke_static.S */
+/* File: mips64/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeStatic
+ EXPORT_PC
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rPC
+ move a3, rINST
+ jal MterpInvokeStatic
+ beqzc v0, MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE v0
+ GOTO_OPCODE v0
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_interface: /* 0x72 */
+/* File: mips64/op_invoke_interface.S */
+/* File: mips64/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeInterface
+ EXPORT_PC
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rPC
+ move a3, rINST
+ jal MterpInvokeInterface
+ beqzc v0, MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE v0
+ GOTO_OPCODE v0
+
+ /*
+ * Handle an interface method call.
+ *
+ * for: invoke-interface, invoke-interface/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+/* ------------------------------ */
+ .balign 128
+.L_op_return_void_no_barrier: /* 0x73 */
+/* File: mips64/op_return_void_no_barrier.S */
+ .extern MterpSuspendCheck
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ move a0, rSELF
+ and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ beqzc ra, 1f
+ jal MterpSuspendCheck # (self)
+1:
+ li a0, 0
+ b MterpReturn
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_virtual_range: /* 0x74 */
+/* File: mips64/op_invoke_virtual_range.S */
+/* File: mips64/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeVirtualRange
+ EXPORT_PC
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rPC
+ move a3, rINST
+ jal MterpInvokeVirtualRange
+ beqzc v0, MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE v0
+ GOTO_OPCODE v0
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_super_range: /* 0x75 */
+/* File: mips64/op_invoke_super_range.S */
+/* File: mips64/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeSuperRange
+ EXPORT_PC
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rPC
+ move a3, rINST
+ jal MterpInvokeSuperRange
+ beqzc v0, MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE v0
+ GOTO_OPCODE v0
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_direct_range: /* 0x76 */
+/* File: mips64/op_invoke_direct_range.S */
+/* File: mips64/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeDirectRange
+ EXPORT_PC
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rPC
+ move a3, rINST
+ jal MterpInvokeDirectRange
+ beqzc v0, MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE v0
+ GOTO_OPCODE v0
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_static_range: /* 0x77 */
+/* File: mips64/op_invoke_static_range.S */
+/* File: mips64/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeStaticRange
+ EXPORT_PC
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rPC
+ move a3, rINST
+ jal MterpInvokeStaticRange
+ beqzc v0, MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE v0
+ GOTO_OPCODE v0
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_interface_range: /* 0x78 */
+/* File: mips64/op_invoke_interface_range.S */
+/* File: mips64/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeInterfaceRange
+ EXPORT_PC
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rPC
+ move a3, rINST
+ jal MterpInvokeInterfaceRange
+ beqzc v0, MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE v0
+ GOTO_OPCODE v0
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_79: /* 0x79 */
+/* File: mips64/op_unused_79.S */
+/* File: mips64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_7a: /* 0x7a */
+/* File: mips64/op_unused_7a.S */
+/* File: mips64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_neg_int: /* 0x7b */
+/* File: mips64/op_neg_int.S */
+/* File: mips64/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "a0 = op a0".
+ *
+ * for: int-to-byte, int-to-char, int-to-short,
+ * not-int, neg-int
+ */
+ /* unop vA, vB */
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG a0, a3 # a0 <- vB
+ ext a2, rINST, 8, 4 # a2 <- A
+ # optional op
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ subu a0, zero, a0 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_not_int: /* 0x7c */
+/* File: mips64/op_not_int.S */
+/* File: mips64/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "a0 = op a0".
+ *
+ * for: int-to-byte, int-to-char, int-to-short,
+ * not-int, neg-int
+ */
+ /* unop vA, vB */
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG a0, a3 # a0 <- vB
+ ext a2, rINST, 8, 4 # a2 <- A
+ # optional op
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ nor a0, zero, a0 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_neg_long: /* 0x7d */
+/* File: mips64/op_neg_long.S */
+/* File: mips64/unopWide.S */
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "a0 = op a0".
+ *
+ * For: not-long, neg-long
+ */
+ /* unop vA, vB */
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG_WIDE a0, a3 # a0 <- vB
+ ext a2, rINST, 8, 4 # a2 <- A
+ # optional op
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ dsubu a0, zero, a0 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a2 # vA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_not_long: /* 0x7e */
+/* File: mips64/op_not_long.S */
+/* File: mips64/unopWide.S */
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "a0 = op a0".
+ *
+ * For: not-long, neg-long
+ */
+ /* unop vA, vB */
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG_WIDE a0, a3 # a0 <- vB
+ ext a2, rINST, 8, 4 # a2 <- A
+ # optional op
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ nor a0, zero, a0 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a2 # vA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_neg_float: /* 0x7f */
+/* File: mips64/op_neg_float.S */
+/* File: mips64/fcvtHeader.S */
+ /*
+ * Loads a specified register from vB. Used primarily for conversions
+ * from or to a floating-point type.
+ *
+ * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
+ * store the result in vA and jump to the next instruction.
+ *
+ * For: int-to-float, int-to-double, long-to-float, long-to-double,
+ * float-to-int, float-to-long, float-to-double, double-to-int,
+ * double-to-long, double-to-float, neg-float, neg-double.
+ */
+ ext a1, rINST, 8, 4 # a1 <- A
+ srl a2, rINST, 12 # a2 <- B
+ GET_VREG_FLOAT f0, a2
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+
+ neg.s f0, f0
+/* File: mips64/fcvtFooter.S */
+ /*
+ * Stores a specified register containing the result of conversion
+ * from or to a floating-point type and jumps to the next instruction.
+ *
+ * Expects a1 to contain the destination Dalvik register number.
+ * a1 is set up by fcvtHeader.S.
+ *
+ * For: int-to-float, int-to-double, long-to-float, long-to-double,
+ * float-to-int, float-to-long, float-to-double, double-to-int,
+ * double-to-long, double-to-float, neg-float, neg-double.
+ *
+ * Note that this file can't be included after a break in other files
+ * and in those files its contents appear as a copy.
+ * See: float-to-int, float-to-long, double-to-int, double-to-long.
+ */
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_FLOAT f0, a1
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_neg_double: /* 0x80 */
+/* File: mips64/op_neg_double.S */
+/* File: mips64/fcvtHeader.S */
+ /*
+ * Loads a specified register from vB. Used primarily for conversions
+ * from or to a floating-point type.
+ *
+ * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
+ * store the result in vA and jump to the next instruction.
+ *
+ * For: int-to-float, int-to-double, long-to-float, long-to-double,
+ * float-to-int, float-to-long, float-to-double, double-to-int,
+ * double-to-long, double-to-float, neg-float, neg-double.
+ */
+ ext a1, rINST, 8, 4 # a1 <- A
+ srl a2, rINST, 12 # a2 <- B
+ GET_VREG_DOUBLE f0, a2
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+
+ neg.d f0, f0
+/* File: mips64/fcvtFooter.S */
+ /*
+ * Stores a specified register containing the result of conversion
+ * from or to a floating-point type and jumps to the next instruction.
+ *
+ * Expects a1 to contain the destination Dalvik register number.
+ * a1 is set up by fcvtHeader.S.
+ *
+ * For: int-to-float, int-to-double, long-to-float, long-to-double,
+ * float-to-int, float-to-long, float-to-double, double-to-int,
+ * double-to-long, double-to-float, neg-float, neg-double.
+ *
+ * Note that this file can't be included after a break in other files
+ * and in those files its contents appear as a copy.
+ * See: float-to-int, float-to-long, double-to-int, double-to-long.
+ */
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_DOUBLE f0, a1
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_int_to_long: /* 0x81 */
+/* File: mips64/op_int_to_long.S */
+ /* int-to-long vA, vB */
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG a0, a3 # a0 <- vB (sign-extended to 64 bits)
+ ext a2, rINST, 8, 4 # a2 <- A
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a2 # vA <- vB
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_int_to_float: /* 0x82 */
+/* File: mips64/op_int_to_float.S */
+ /*
+ * Conversion from or to floating-point happens in a floating-point register.
+ * Therefore we load the input and store the output into or from a
+ * floating-point register irrespective of the type.
+ */
+/* File: mips64/fcvtHeader.S */
+ /*
+ * Loads a specified register from vB. Used primarily for conversions
+ * from or to a floating-point type.
+ *
+ * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
+ * store the result in vA and jump to the next instruction.
+ *
+ * For: int-to-float, int-to-double, long-to-float, long-to-double,
+ * float-to-int, float-to-long, float-to-double, double-to-int,
+ * double-to-long, double-to-float, neg-float, neg-double.
+ */
+ ext a1, rINST, 8, 4 # a1 <- A
+ srl a2, rINST, 12 # a2 <- B
+ GET_VREG_FLOAT f0, a2
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+
+ cvt.s.w f0, f0
+/* File: mips64/fcvtFooter.S */
+ /*
+ * Stores a specified register containing the result of conversion
+ * from or to a floating-point type and jumps to the next instruction.
+ *
+ * Expects a1 to contain the destination Dalvik register number.
+ * a1 is set up by fcvtHeader.S.
+ *
+ * For: int-to-float, int-to-double, long-to-float, long-to-double,
+ * float-to-int, float-to-long, float-to-double, double-to-int,
+ * double-to-long, double-to-float, neg-float, neg-double.
+ *
+ * Note that this file can't be included after a break in other files
+ * and in those files its contents appear as a copy.
+ * See: float-to-int, float-to-long, double-to-int, double-to-long.
+ */
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_FLOAT f0, a1
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_int_to_double: /* 0x83 */
+/* File: mips64/op_int_to_double.S */
+ /*
+ * Conversion from or to floating-point happens in a floating-point register.
+ * Therefore we load the input and store the output into or from a
+ * floating-point register irrespective of the type.
+ */
+/* File: mips64/fcvtHeader.S */
+ /*
+ * Loads a specified register from vB. Used primarily for conversions
+ * from or to a floating-point type.
+ *
+ * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
+ * store the result in vA and jump to the next instruction.
+ *
+ * For: int-to-float, int-to-double, long-to-float, long-to-double,
+ * float-to-int, float-to-long, float-to-double, double-to-int,
+ * double-to-long, double-to-float, neg-float, neg-double.
+ */
+ ext a1, rINST, 8, 4 # a1 <- A
+ srl a2, rINST, 12 # a2 <- B
+ GET_VREG_FLOAT f0, a2
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+
+ cvt.d.w f0, f0
+/* File: mips64/fcvtFooter.S */
+ /*
+ * Stores a specified register containing the result of conversion
+ * from or to a floating-point type and jumps to the next instruction.
+ *
+ * Expects a1 to contain the destination Dalvik register number.
+ * a1 is set up by fcvtHeader.S.
+ *
+ * For: int-to-float, int-to-double, long-to-float, long-to-double,
+ * float-to-int, float-to-long, float-to-double, double-to-int,
+ * double-to-long, double-to-float, neg-float, neg-double.
+ *
+ * Note that this file can't be included after a break in other files
+ * and in those files its contents appear as a copy.
+ * See: float-to-int, float-to-long, double-to-int, double-to-long.
+ */
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_DOUBLE f0, a1
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_long_to_int: /* 0x84 */
+/* File: mips64/op_long_to_int.S */
+/* we ignore the high word, making this equivalent to a 32-bit reg move */
+/* File: mips64/op_move.S */
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ GET_VREG a0, a3 # a0 <- vB
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ .if 0
+ SET_VREG_OBJECT a0, a2 # vA <- vB
+ .else
+ SET_VREG a0, a2 # vA <- vB
+ .endif
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_long_to_float: /* 0x85 */
+/* File: mips64/op_long_to_float.S */
+ /*
+ * Conversion from or to floating-point happens in a floating-point register.
+ * Therefore we load the input and store the output into or from a
+ * floating-point register irrespective of the type.
+ */
+/* File: mips64/fcvtHeader.S */
+ /*
+ * Loads a specified register from vB. Used primarily for conversions
+ * from or to a floating-point type.
+ *
+ * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
+ * store the result in vA and jump to the next instruction.
+ *
+ * For: int-to-float, int-to-double, long-to-float, long-to-double,
+ * float-to-int, float-to-long, float-to-double, double-to-int,
+ * double-to-long, double-to-float, neg-float, neg-double.
+ */
+ ext a1, rINST, 8, 4 # a1 <- A
+ srl a2, rINST, 12 # a2 <- B
+ GET_VREG_DOUBLE f0, a2
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+
+ cvt.s.l f0, f0
+/* File: mips64/fcvtFooter.S */
+ /*
+ * Stores a specified register containing the result of conversion
+ * from or to a floating-point type and jumps to the next instruction.
+ *
+ * Expects a1 to contain the destination Dalvik register number.
+ * a1 is set up by fcvtHeader.S.
+ *
+ * For: int-to-float, int-to-double, long-to-float, long-to-double,
+ * float-to-int, float-to-long, float-to-double, double-to-int,
+ * double-to-long, double-to-float, neg-float, neg-double.
+ *
+ * Note that this file can't be included after a break in other files
+ * and in those files its contents appear as a copy.
+ * See: float-to-int, float-to-long, double-to-int, double-to-long.
+ */
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_FLOAT f0, a1
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_long_to_double: /* 0x86 */
+/* File: mips64/op_long_to_double.S */
+ /*
+ * Conversion from or to floating-point happens in a floating-point register.
+ * Therefore we load the input and store the output into or from a
+ * floating-point register irrespective of the type.
+ */
+/* File: mips64/fcvtHeader.S */
+ /*
+ * Loads a specified register from vB. Used primarily for conversions
+ * from or to a floating-point type.
+ *
+ * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
+ * store the result in vA and jump to the next instruction.
+ *
+ * For: int-to-float, int-to-double, long-to-float, long-to-double,
+ * float-to-int, float-to-long, float-to-double, double-to-int,
+ * double-to-long, double-to-float, neg-float, neg-double.
+ */
+ ext a1, rINST, 8, 4 # a1 <- A
+ srl a2, rINST, 12 # a2 <- B
+ GET_VREG_DOUBLE f0, a2
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+
+ cvt.d.l f0, f0
+/* File: mips64/fcvtFooter.S */
+ /*
+ * Stores a specified register containing the result of conversion
+ * from or to a floating-point type and jumps to the next instruction.
+ *
+ * Expects a1 to contain the destination Dalvik register number.
+ * a1 is set up by fcvtHeader.S.
+ *
+ * For: int-to-float, int-to-double, long-to-float, long-to-double,
+ * float-to-int, float-to-long, float-to-double, double-to-int,
+ * double-to-long, double-to-float, neg-float, neg-double.
+ *
+ * Note that this file can't be included after a break in other files
+ * and in those files its contents appear as a copy.
+ * See: float-to-int, float-to-long, double-to-int, double-to-long.
+ */
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_DOUBLE f0, a1
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_float_to_int: /* 0x87 */
+/* File: mips64/op_float_to_int.S */
+/* File: mips64/fcvtHeader.S */
+ /*
+ * Loads a specified register from vB. Used primarily for conversions
+ * from or to a floating-point type.
+ *
+ * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
+ * store the result in vA and jump to the next instruction.
+ *
+ * For: int-to-float, int-to-double, long-to-float, long-to-double,
+ * float-to-int, float-to-long, float-to-double, double-to-int,
+ * double-to-long, double-to-float, neg-float, neg-double.
+ */
+ ext a1, rINST, 8, 4 # a1 <- A
+ srl a2, rINST, 12 # a2 <- B
+ GET_VREG_FLOAT f0, a2
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+
+ /*
+ * TODO: simplify this when the MIPS64R6 emulator
+ * supports NAN2008=1.
+ */
+ li t0, INT_MIN_AS_FLOAT
+ mtc1 t0, f1
+ cmp.le.s f1, f1, f0
+ bc1nez f1, .Lop_float_to_int_trunc
+ cmp.eq.s f1, f0, f0
+ li t0, INT_MIN
+ mfc1 t1, f1
+ and t0, t0, t1
+ b .Lop_float_to_int_done
+
+/* ------------------------------ */
+ .balign 128
+.L_op_float_to_long: /* 0x88 */
+/* File: mips64/op_float_to_long.S */
+/* File: mips64/fcvtHeader.S */
+ /*
+ * Loads a specified register from vB. Used primarily for conversions
+ * from or to a floating-point type.
+ *
+ * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
+ * store the result in vA and jump to the next instruction.
+ *
+ * For: int-to-float, int-to-double, long-to-float, long-to-double,
+ * float-to-int, float-to-long, float-to-double, double-to-int,
+ * double-to-long, double-to-float, neg-float, neg-double.
+ */
+ ext a1, rINST, 8, 4 # a1 <- A
+ srl a2, rINST, 12 # a2 <- B
+ GET_VREG_FLOAT f0, a2
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+
+ /*
+ * TODO: simplify this when the MIPS64R6 emulator
+ * supports NAN2008=1.
+ */
+ li t0, LONG_MIN_AS_FLOAT
+ mtc1 t0, f1
+ cmp.le.s f1, f1, f0
+ bc1nez f1, .Lop_float_to_long_trunc
+ cmp.eq.s f1, f0, f0
+ dli t0, LONG_MIN
+ mfc1 t1, f1
+ and t0, t0, t1
+ b .Lop_float_to_long_done
+
+/* ------------------------------ */
+ .balign 128
+.L_op_float_to_double: /* 0x89 */
+/* File: mips64/op_float_to_double.S */
+ /*
+ * Conversion from or to floating-point happens in a floating-point register.
+ * Therefore we load the input and store the output into or from a
+ * floating-point register irrespective of the type.
+ */
+/* File: mips64/fcvtHeader.S */
+ /*
+ * Loads a specified register from vB. Used primarily for conversions
+ * from or to a floating-point type.
+ *
+ * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
+ * store the result in vA and jump to the next instruction.
+ *
+ * For: int-to-float, int-to-double, long-to-float, long-to-double,
+ * float-to-int, float-to-long, float-to-double, double-to-int,
+ * double-to-long, double-to-float, neg-float, neg-double.
+ */
+ ext a1, rINST, 8, 4 # a1 <- A
+ srl a2, rINST, 12 # a2 <- B
+ GET_VREG_FLOAT f0, a2
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+
+ cvt.d.s f0, f0
+/* File: mips64/fcvtFooter.S */
+ /*
+ * Stores a specified register containing the result of conversion
+ * from or to a floating-point type and jumps to the next instruction.
+ *
+ * Expects a1 to contain the destination Dalvik register number.
+ * a1 is set up by fcvtHeader.S.
+ *
+ * For: int-to-float, int-to-double, long-to-float, long-to-double,
+ * float-to-int, float-to-long, float-to-double, double-to-int,
+ * double-to-long, double-to-float, neg-float, neg-double.
+ *
+ * Note that this file can't be included after a break in other files
+ * and in those files its contents appear as a copy.
+ * See: float-to-int, float-to-long, double-to-int, double-to-long.
+ */
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_DOUBLE f0, a1
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_double_to_int: /* 0x8a */
+/* File: mips64/op_double_to_int.S */
+/* File: mips64/fcvtHeader.S */
+ /*
+ * Loads a specified register from vB. Used primarily for conversions
+ * from or to a floating-point type.
+ *
+ * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
+ * store the result in vA and jump to the next instruction.
+ *
+ * For: int-to-float, int-to-double, long-to-float, long-to-double,
+ * float-to-int, float-to-long, float-to-double, double-to-int,
+ * double-to-long, double-to-float, neg-float, neg-double.
+ */
+ ext a1, rINST, 8, 4 # a1 <- A
+ srl a2, rINST, 12 # a2 <- B
+ GET_VREG_DOUBLE f0, a2
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+
+ /*
+ * TODO: simplify this when the MIPS64R6 emulator
+ * supports NAN2008=1.
+ */
+ dli t0, INT_MIN_AS_DOUBLE
+ dmtc1 t0, f1
+ cmp.le.d f1, f1, f0
+ bc1nez f1, .Lop_double_to_int_trunc
+ cmp.eq.d f1, f0, f0
+ li t0, INT_MIN
+ mfc1 t1, f1
+ and t0, t0, t1
+ b .Lop_double_to_int_done
+
+/* ------------------------------ */
+ .balign 128
+.L_op_double_to_long: /* 0x8b */
+/* File: mips64/op_double_to_long.S */
+/* File: mips64/fcvtHeader.S */
+ /*
+ * Loads a specified register from vB. Used primarily for conversions
+ * from or to a floating-point type.
+ *
+ * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
+ * store the result in vA and jump to the next instruction.
+ *
+ * For: int-to-float, int-to-double, long-to-float, long-to-double,
+ * float-to-int, float-to-long, float-to-double, double-to-int,
+ * double-to-long, double-to-float, neg-float, neg-double.
+ */
+ ext a1, rINST, 8, 4 # a1 <- A
+ srl a2, rINST, 12 # a2 <- B
+ GET_VREG_DOUBLE f0, a2
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+
+ /*
+ * TODO: simplify this when the MIPS64R6 emulator
+ * supports NAN2008=1.
+ */
+ dli t0, LONG_MIN_AS_DOUBLE
+ dmtc1 t0, f1
+ cmp.le.d f1, f1, f0
+ bc1nez f1, .Lop_double_to_long_trunc
+ cmp.eq.d f1, f0, f0
+ dli t0, LONG_MIN
+ mfc1 t1, f1
+ and t0, t0, t1
+ b .Lop_double_to_long_done
+
+/* ------------------------------ */
+ .balign 128
+.L_op_double_to_float: /* 0x8c */
+/* File: mips64/op_double_to_float.S */
+ /*
+ * Conversion from or to floating-point happens in a floating-point register.
+ * Therefore we load the input and store the output into or from a
+ * floating-point register irrespective of the type.
+ */
+/* File: mips64/fcvtHeader.S */
+ /*
+ * Loads a specified register from vB. Used primarily for conversions
+ * from or to a floating-point type.
+ *
+ * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
+ * store the result in vA and jump to the next instruction.
+ *
+ * For: int-to-float, int-to-double, long-to-float, long-to-double,
+ * float-to-int, float-to-long, float-to-double, double-to-int,
+ * double-to-long, double-to-float, neg-float, neg-double.
+ */
+ ext a1, rINST, 8, 4 # a1 <- A
+ srl a2, rINST, 12 # a2 <- B
+ GET_VREG_DOUBLE f0, a2
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+
+ cvt.s.d f0, f0
+/* File: mips64/fcvtFooter.S */
+ /*
+ * Stores a specified register containing the result of conversion
+ * from or to a floating-point type and jumps to the next instruction.
+ *
+ * Expects a1 to contain the destination Dalvik register number.
+ * a1 is set up by fcvtHeader.S.
+ *
+ * For: int-to-float, int-to-double, long-to-float, long-to-double,
+ * float-to-int, float-to-long, float-to-double, double-to-int,
+ * double-to-long, double-to-float, neg-float, neg-double.
+ *
+ * Note that this file can't be included after a break in other files
+ * and in those files its contents appear as a copy.
+ * See: float-to-int, float-to-long, double-to-int, double-to-long.
+ */
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_FLOAT f0, a1
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_int_to_byte: /* 0x8d */
+/* File: mips64/op_int_to_byte.S */
+/* File: mips64/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "a0 = op a0".
+ *
+ * for: int-to-byte, int-to-char, int-to-short,
+ * not-int, neg-int
+ */
+ /* unop vA, vB */
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG a0, a3 # a0 <- vB
+ ext a2, rINST, 8, 4 # a2 <- A
+ # optional op
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ seb a0, a0 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_int_to_char: /* 0x8e */
+/* File: mips64/op_int_to_char.S */
+/* File: mips64/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "a0 = op a0".
+ *
+ * for: int-to-byte, int-to-char, int-to-short,
+ * not-int, neg-int
+ */
+ /* unop vA, vB */
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG a0, a3 # a0 <- vB
+ ext a2, rINST, 8, 4 # a2 <- A
+ # optional op
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ and a0, a0, 0xffff # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_int_to_short: /* 0x8f */
+/* File: mips64/op_int_to_short.S */
+/* File: mips64/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "a0 = op a0".
+ *
+ * for: int-to-byte, int-to-char, int-to-short,
+ * not-int, neg-int
+ */
+ /* unop vA, vB */
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG a0, a3 # a0 <- vB
+ ext a2, rINST, 8, 4 # a2 <- A
+ # optional op
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ seh a0, a0 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_int: /* 0x90 */
+/* File: mips64/op_add_int.S */
+/* File: mips64/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG a0, a2 # a0 <- vBB
+ GET_VREG a1, a3 # a1 <- vCC
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ # optional op
+ addu a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a4 # vAA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_int: /* 0x91 */
+/* File: mips64/op_sub_int.S */
+/* File: mips64/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG a0, a2 # a0 <- vBB
+ GET_VREG a1, a3 # a1 <- vCC
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ # optional op
+ subu a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a4 # vAA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_int: /* 0x92 */
+/* File: mips64/op_mul_int.S */
+/* File: mips64/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG a0, a2 # a0 <- vBB
+ GET_VREG a1, a3 # a1 <- vCC
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ # optional op
+ mul a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a4 # vAA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_int: /* 0x93 */
+/* File: mips64/op_div_int.S */
+/* File: mips64/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG a0, a2 # a0 <- vBB
+ GET_VREG a1, a3 # a1 <- vCC
+ .if 1
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ # optional op
+ div a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a4 # vAA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_int: /* 0x94 */
+/* File: mips64/op_rem_int.S */
+/* File: mips64/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG a0, a2 # a0 <- vBB
+ GET_VREG a1, a3 # a1 <- vCC
+ .if 1
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ # optional op
+ mod a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a4 # vAA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_and_int: /* 0x95 */
+/* File: mips64/op_and_int.S */
+/* File: mips64/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG a0, a2 # a0 <- vBB
+ GET_VREG a1, a3 # a1 <- vCC
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ # optional op
+ and a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a4 # vAA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_or_int: /* 0x96 */
+/* File: mips64/op_or_int.S */
+/* File: mips64/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG a0, a2 # a0 <- vBB
+ GET_VREG a1, a3 # a1 <- vCC
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ # optional op
+ or a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a4 # vAA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_xor_int: /* 0x97 */
+/* File: mips64/op_xor_int.S */
+/* File: mips64/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG a0, a2 # a0 <- vBB
+ GET_VREG a1, a3 # a1 <- vCC
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ # optional op
+ xor a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a4 # vAA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shl_int: /* 0x98 */
+/* File: mips64/op_shl_int.S */
+/* File: mips64/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG a0, a2 # a0 <- vBB
+ GET_VREG a1, a3 # a1 <- vCC
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ # optional op
+ sll a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a4 # vAA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shr_int: /* 0x99 */
+/* File: mips64/op_shr_int.S */
+/* File: mips64/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG a0, a2 # a0 <- vBB
+ GET_VREG a1, a3 # a1 <- vCC
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ # optional op
+ sra a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a4 # vAA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_ushr_int: /* 0x9a */
+/* File: mips64/op_ushr_int.S */
+/* File: mips64/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG a0, a2 # a0 <- vBB
+ GET_VREG a1, a3 # a1 <- vCC
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ # optional op
+ srl a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a4 # vAA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_long: /* 0x9b */
+/* File: mips64/op_add_long.S */
+/* File: mips64/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
+ * xor-long, shl-long, shr-long, ushr-long
+ */
+ /* binop vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG_WIDE a0, a2 # a0 <- vBB
+ GET_VREG_WIDE a1, a3 # a1 <- vCC
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ # optional op
+ daddu a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a4 # vAA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_long: /* 0x9c */
+/* File: mips64/op_sub_long.S */
+/* File: mips64/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
+ * xor-long, shl-long, shr-long, ushr-long
+ */
+ /* binop vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG_WIDE a0, a2 # a0 <- vBB
+ GET_VREG_WIDE a1, a3 # a1 <- vCC
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ # optional op
+ dsubu a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a4 # vAA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_long: /* 0x9d */
+/* File: mips64/op_mul_long.S */
+/* File: mips64/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
+ * xor-long, shl-long, shr-long, ushr-long
+ */
+ /* binop vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG_WIDE a0, a2 # a0 <- vBB
+ GET_VREG_WIDE a1, a3 # a1 <- vCC
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ # optional op
+ dmul a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a4 # vAA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_long: /* 0x9e */
+/* File: mips64/op_div_long.S */
+/* File: mips64/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
+ * xor-long, shl-long, shr-long, ushr-long
+ */
+ /* binop vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG_WIDE a0, a2 # a0 <- vBB
+ GET_VREG_WIDE a1, a3 # a1 <- vCC
+ .if 1
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ # optional op
+ ddiv a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a4 # vAA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_long: /* 0x9f */
+/* File: mips64/op_rem_long.S */
+/* File: mips64/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
+ * xor-long, shl-long, shr-long, ushr-long
+ */
+ /* binop vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG_WIDE a0, a2 # a0 <- vBB
+ GET_VREG_WIDE a1, a3 # a1 <- vCC
+ .if 1
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ # optional op
+ dmod a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a4 # vAA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_and_long: /* 0xa0 */
+/* File: mips64/op_and_long.S */
+/* File: mips64/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
+ * xor-long, shl-long, shr-long, ushr-long
+ */
+ /* binop vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG_WIDE a0, a2 # a0 <- vBB
+ GET_VREG_WIDE a1, a3 # a1 <- vCC
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ # optional op
+ and a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a4 # vAA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_or_long: /* 0xa1 */
+/* File: mips64/op_or_long.S */
+/* File: mips64/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
+ * xor-long, shl-long, shr-long, ushr-long
+ */
+ /* binop vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG_WIDE a0, a2 # a0 <- vBB
+ GET_VREG_WIDE a1, a3 # a1 <- vCC
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ # optional op
+ or a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a4 # vAA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_xor_long: /* 0xa2 */
+/* File: mips64/op_xor_long.S */
+/* File: mips64/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
+ * xor-long, shl-long, shr-long, ushr-long
+ */
+ /* binop vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG_WIDE a0, a2 # a0 <- vBB
+ GET_VREG_WIDE a1, a3 # a1 <- vCC
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ # optional op
+ xor a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a4 # vAA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shl_long: /* 0xa3 */
+/* File: mips64/op_shl_long.S */
+/* File: mips64/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
+ * xor-long, shl-long, shr-long, ushr-long
+ */
+ /* binop vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG_WIDE a0, a2 # a0 <- vBB
+ GET_VREG_WIDE a1, a3 # a1 <- vCC
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ # optional op
+ dsll a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a4 # vAA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shr_long: /* 0xa4 */
+/* File: mips64/op_shr_long.S */
+/* File: mips64/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
+ * xor-long, shl-long, shr-long, ushr-long
+ */
+ /* binop vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG_WIDE a0, a2 # a0 <- vBB
+ GET_VREG_WIDE a1, a3 # a1 <- vCC
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ # optional op
+ dsra a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a4 # vAA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_ushr_long: /* 0xa5 */
+/* File: mips64/op_ushr_long.S */
+/* File: mips64/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
+ * xor-long, shl-long, shr-long, ushr-long
+ */
+ /* binop vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG_WIDE a0, a2 # a0 <- vBB
+ GET_VREG_WIDE a1, a3 # a1 <- vCC
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ # optional op
+ dsrl a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a4 # vAA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_float: /* 0xa6 */
+/* File: mips64/op_add_float.S */
+/* File: mips64/fbinop.S */
+ /*:
+ * Generic 32-bit floating-point operation.
+ *
+ * For: add-float, sub-float, mul-float, div-float.
+ * form: <op> f0, f0, f1
+ */
+ /* binop vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG_FLOAT f0, a2 # f0 <- vBB
+ GET_VREG_FLOAT f1, a3 # f1 <- vCC
+ add.s f0, f0, f1 # f0 <- f0 op f1
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_FLOAT f0, a4 # vAA <- f0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_float: /* 0xa7 */
+/* File: mips64/op_sub_float.S */
+/* File: mips64/fbinop.S */
+ /*:
+ * Generic 32-bit floating-point operation.
+ *
+ * For: add-float, sub-float, mul-float, div-float.
+ * form: <op> f0, f0, f1
+ */
+ /* binop vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG_FLOAT f0, a2 # f0 <- vBB
+ GET_VREG_FLOAT f1, a3 # f1 <- vCC
+ sub.s f0, f0, f1 # f0 <- f0 op f1
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_FLOAT f0, a4 # vAA <- f0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_float: /* 0xa8 */
+/* File: mips64/op_mul_float.S */
+/* File: mips64/fbinop.S */
+ /*:
+ * Generic 32-bit floating-point operation.
+ *
+ * For: add-float, sub-float, mul-float, div-float.
+ * form: <op> f0, f0, f1
+ */
+ /* binop vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG_FLOAT f0, a2 # f0 <- vBB
+ GET_VREG_FLOAT f1, a3 # f1 <- vCC
+ mul.s f0, f0, f1 # f0 <- f0 op f1
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_FLOAT f0, a4 # vAA <- f0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_float: /* 0xa9 */
+/* File: mips64/op_div_float.S */
+/* File: mips64/fbinop.S */
+ /*:
+ * Generic 32-bit floating-point operation.
+ *
+ * For: add-float, sub-float, mul-float, div-float.
+ * form: <op> f0, f0, f1
+ */
+ /* binop vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG_FLOAT f0, a2 # f0 <- vBB
+ GET_VREG_FLOAT f1, a3 # f1 <- vCC
+ div.s f0, f0, f1 # f0 <- f0 op f1
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_FLOAT f0, a4 # vAA <- f0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_float: /* 0xaa */
+/* File: mips64/op_rem_float.S */
+ /* rem-float vAA, vBB, vCC */
+ .extern fmodf
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG_FLOAT f12, a2 # f12 <- vBB
+ GET_VREG_FLOAT f13, a3 # f13 <- vCC
+ jal fmodf # f0 <- f12 op f13
+ srl a4, rINST, 8 # a4 <- AA
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_FLOAT f0, a4 # vAA <- f0
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_double: /* 0xab */
+/* File: mips64/op_add_double.S */
+/* File: mips64/fbinopWide.S */
+ /*:
+ * Generic 64-bit floating-point operation.
+ *
+ * For: add-double, sub-double, mul-double, div-double.
+ * form: <op> f0, f0, f1
+ */
+ /* binop vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG_DOUBLE f0, a2 # f0 <- vBB
+ GET_VREG_DOUBLE f1, a3 # f1 <- vCC
+ add.d f0, f0, f1 # f0 <- f0 op f1
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_DOUBLE f0, a4 # vAA <- f0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_double: /* 0xac */
+/* File: mips64/op_sub_double.S */
+/* File: mips64/fbinopWide.S */
+ /*:
+ * Generic 64-bit floating-point operation.
+ *
+ * For: add-double, sub-double, mul-double, div-double.
+ * form: <op> f0, f0, f1
+ */
+ /* binop vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG_DOUBLE f0, a2 # f0 <- vBB
+ GET_VREG_DOUBLE f1, a3 # f1 <- vCC
+ sub.d f0, f0, f1 # f0 <- f0 op f1
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_DOUBLE f0, a4 # vAA <- f0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_double: /* 0xad */
+/* File: mips64/op_mul_double.S */
+/* File: mips64/fbinopWide.S */
+ /*:
+ * Generic 64-bit floating-point operation.
+ *
+ * For: add-double, sub-double, mul-double, div-double.
+ * form: <op> f0, f0, f1
+ */
+ /* binop vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG_DOUBLE f0, a2 # f0 <- vBB
+ GET_VREG_DOUBLE f1, a3 # f1 <- vCC
+ mul.d f0, f0, f1 # f0 <- f0 op f1
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_DOUBLE f0, a4 # vAA <- f0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_double: /* 0xae */
+/* File: mips64/op_div_double.S */
+/* File: mips64/fbinopWide.S */
+ /*:
+ * Generic 64-bit floating-point operation.
+ *
+ * For: add-double, sub-double, mul-double, div-double.
+ * form: <op> f0, f0, f1
+ */
+ /* binop vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG_DOUBLE f0, a2 # f0 <- vBB
+ GET_VREG_DOUBLE f1, a3 # f1 <- vCC
+ div.d f0, f0, f1 # f0 <- f0 op f1
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_DOUBLE f0, a4 # vAA <- f0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_double: /* 0xaf */
+/* File: mips64/op_rem_double.S */
+ /* rem-double vAA, vBB, vCC */
+ .extern fmod
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG_DOUBLE f12, a2 # f12 <- vBB
+ GET_VREG_DOUBLE f13, a3 # f13 <- vCC
+ jal fmod # f0 <- f12 op f13
+ srl a4, rINST, 8 # a4 <- AA
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_DOUBLE f0, a4 # vAA <- f0
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_int_2addr: /* 0xb0 */
+/* File: mips64/op_add_int_2addr.S */
+/* File: mips64/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vB (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr
+ */
+ /* binop/2addr vA, vB */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG a0, a2 # a0 <- vA
+ GET_VREG a1, a3 # a1 <- vB
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ # optional op
+ addu a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_int_2addr: /* 0xb1 */
+/* File: mips64/op_sub_int_2addr.S */
+/* File: mips64/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vB (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr
+ */
+ /* binop/2addr vA, vB */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG a0, a2 # a0 <- vA
+ GET_VREG a1, a3 # a1 <- vB
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ # optional op
+ subu a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_int_2addr: /* 0xb2 */
+/* File: mips64/op_mul_int_2addr.S */
+/* File: mips64/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vB (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr
+ */
+ /* binop/2addr vA, vB */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG a0, a2 # a0 <- vA
+ GET_VREG a1, a3 # a1 <- vB
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ # optional op
+ mul a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_int_2addr: /* 0xb3 */
+/* File: mips64/op_div_int_2addr.S */
+/* File: mips64/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vB (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr
+ */
+ /* binop/2addr vA, vB */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG a0, a2 # a0 <- vA
+ GET_VREG a1, a3 # a1 <- vB
+ .if 1
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ # optional op
+ div a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_int_2addr: /* 0xb4 */
+/* File: mips64/op_rem_int_2addr.S */
+/* File: mips64/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vB (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr
+ */
+ /* binop/2addr vA, vB */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG a0, a2 # a0 <- vA
+ GET_VREG a1, a3 # a1 <- vB
+ .if 1
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ # optional op
+ mod a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_and_int_2addr: /* 0xb5 */
+/* File: mips64/op_and_int_2addr.S */
+/* File: mips64/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vB (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr
+ */
+ /* binop/2addr vA, vB */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG a0, a2 # a0 <- vA
+ GET_VREG a1, a3 # a1 <- vB
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ # optional op
+ and a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_or_int_2addr: /* 0xb6 */
+/* File: mips64/op_or_int_2addr.S */
+/* File: mips64/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vB (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr
+ */
+ /* binop/2addr vA, vB */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG a0, a2 # a0 <- vA
+ GET_VREG a1, a3 # a1 <- vB
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ # optional op
+ or a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_xor_int_2addr: /* 0xb7 */
+/* File: mips64/op_xor_int_2addr.S */
+/* File: mips64/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vB (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr
+ */
+ /* binop/2addr vA, vB */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG a0, a2 # a0 <- vA
+ GET_VREG a1, a3 # a1 <- vB
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ # optional op
+ xor a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shl_int_2addr: /* 0xb8 */
+/* File: mips64/op_shl_int_2addr.S */
+/* File: mips64/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vB (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr
+ */
+ /* binop/2addr vA, vB */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG a0, a2 # a0 <- vA
+ GET_VREG a1, a3 # a1 <- vB
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ # optional op
+ sll a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shr_int_2addr: /* 0xb9 */
+/* File: mips64/op_shr_int_2addr.S */
+/* File: mips64/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vB (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr
+ */
+ /* binop/2addr vA, vB */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG a0, a2 # a0 <- vA
+ GET_VREG a1, a3 # a1 <- vB
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ # optional op
+ sra a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_ushr_int_2addr: /* 0xba */
+/* File: mips64/op_ushr_int_2addr.S */
+/* File: mips64/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vB (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr
+ */
+ /* binop/2addr vA, vB */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG a0, a2 # a0 <- vA
+ GET_VREG a1, a3 # a1 <- vB
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ # optional op
+ srl a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_long_2addr: /* 0xbb */
+/* File: mips64/op_add_long_2addr.S */
+/* File: mips64/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vB (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
+ * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
+ * shl-long/2addr, shr-long/2addr, ushr-long/2addr
+ */
+ /* binop/2addr vA, vB */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG_WIDE a0, a2 # a0 <- vA
+ GET_VREG_WIDE a1, a3 # a1 <- vB
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ # optional op
+ daddu a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a2 # vA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_long_2addr: /* 0xbc */
+/* File: mips64/op_sub_long_2addr.S */
+/* File: mips64/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vB (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
+ * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
+ * shl-long/2addr, shr-long/2addr, ushr-long/2addr
+ */
+ /* binop/2addr vA, vB */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG_WIDE a0, a2 # a0 <- vA
+ GET_VREG_WIDE a1, a3 # a1 <- vB
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ # optional op
+ dsubu a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a2 # vA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_long_2addr: /* 0xbd */
+/* File: mips64/op_mul_long_2addr.S */
+/* File: mips64/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vB (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
+ * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
+ * shl-long/2addr, shr-long/2addr, ushr-long/2addr
+ */
+ /* binop/2addr vA, vB */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG_WIDE a0, a2 # a0 <- vA
+ GET_VREG_WIDE a1, a3 # a1 <- vB
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ # optional op
+ dmul a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a2 # vA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_long_2addr: /* 0xbe */
+/* File: mips64/op_div_long_2addr.S */
+/* File: mips64/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vB (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
+ * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
+ * shl-long/2addr, shr-long/2addr, ushr-long/2addr
+ */
+ /* binop/2addr vA, vB */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG_WIDE a0, a2 # a0 <- vA
+ GET_VREG_WIDE a1, a3 # a1 <- vB
+ .if 1
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ # optional op
+ ddiv a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a2 # vA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_long_2addr: /* 0xbf */
+/* File: mips64/op_rem_long_2addr.S */
+/* File: mips64/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vB (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
+ * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
+ * shl-long/2addr, shr-long/2addr, ushr-long/2addr
+ */
+ /* binop/2addr vA, vB */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG_WIDE a0, a2 # a0 <- vA
+ GET_VREG_WIDE a1, a3 # a1 <- vB
+ .if 1
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ # optional op
+ dmod a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a2 # vA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_and_long_2addr: /* 0xc0 */
+/* File: mips64/op_and_long_2addr.S */
+/* File: mips64/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vB (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
+ * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
+ * shl-long/2addr, shr-long/2addr, ushr-long/2addr
+ */
+ /* binop/2addr vA, vB */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG_WIDE a0, a2 # a0 <- vA
+ GET_VREG_WIDE a1, a3 # a1 <- vB
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ # optional op
+ and a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a2 # vA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_or_long_2addr: /* 0xc1 */
+/* File: mips64/op_or_long_2addr.S */
+/* File: mips64/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vB (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
+ * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
+ * shl-long/2addr, shr-long/2addr, ushr-long/2addr
+ */
+ /* binop/2addr vA, vB */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG_WIDE a0, a2 # a0 <- vA
+ GET_VREG_WIDE a1, a3 # a1 <- vB
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ # optional op
+ or a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a2 # vA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_xor_long_2addr: /* 0xc2 */
+/* File: mips64/op_xor_long_2addr.S */
+/* File: mips64/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vB (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
+ * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
+ * shl-long/2addr, shr-long/2addr, ushr-long/2addr
+ */
+ /* binop/2addr vA, vB */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG_WIDE a0, a2 # a0 <- vA
+ GET_VREG_WIDE a1, a3 # a1 <- vB
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ # optional op
+ xor a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a2 # vA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shl_long_2addr: /* 0xc3 */
+/* File: mips64/op_shl_long_2addr.S */
+/* File: mips64/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vB (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
+ * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
+ * shl-long/2addr, shr-long/2addr, ushr-long/2addr
+ */
+ /* binop/2addr vA, vB */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG_WIDE a0, a2 # a0 <- vA
+ GET_VREG_WIDE a1, a3 # a1 <- vB
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ # optional op
+ dsll a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a2 # vA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shr_long_2addr: /* 0xc4 */
+/* File: mips64/op_shr_long_2addr.S */
+/* File: mips64/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vB (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
+ * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
+ * shl-long/2addr, shr-long/2addr, ushr-long/2addr
+ */
+ /* binop/2addr vA, vB */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG_WIDE a0, a2 # a0 <- vA
+ GET_VREG_WIDE a1, a3 # a1 <- vB
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ # optional op
+ dsra a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a2 # vA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_ushr_long_2addr: /* 0xc5 */
+/* File: mips64/op_ushr_long_2addr.S */
+/* File: mips64/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vB (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
+ * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
+ * shl-long/2addr, shr-long/2addr, ushr-long/2addr
+ */
+ /* binop/2addr vA, vB */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG_WIDE a0, a2 # a0 <- vA
+ GET_VREG_WIDE a1, a3 # a1 <- vB
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ # optional op
+ dsrl a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a2 # vA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_float_2addr: /* 0xc6 */
+/* File: mips64/op_add_float_2addr.S */
+/* File: mips64/fbinop2addr.S */
+ /*:
+ * Generic 32-bit "/2addr" floating-point operation.
+ *
+ * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr.
+ * form: <op> f0, f0, f1
+ */
+ /* binop/2addr vA, vB */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG_FLOAT f0, a2 # f0 <- vA
+ GET_VREG_FLOAT f1, a3 # f1 <- vB
+ add.s f0, f0, f1 # f0 <- f0 op f1
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_FLOAT f0, a2 # vA <- f0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_float_2addr: /* 0xc7 */
+/* File: mips64/op_sub_float_2addr.S */
+/* File: mips64/fbinop2addr.S */
+ /*:
+ * Generic 32-bit "/2addr" floating-point operation.
+ *
+ * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr.
+ * form: <op> f0, f0, f1
+ */
+ /* binop/2addr vA, vB */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG_FLOAT f0, a2 # f0 <- vA
+ GET_VREG_FLOAT f1, a3 # f1 <- vB
+ sub.s f0, f0, f1 # f0 <- f0 op f1
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_FLOAT f0, a2 # vA <- f0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_float_2addr: /* 0xc8 */
+/* File: mips64/op_mul_float_2addr.S */
+/* File: mips64/fbinop2addr.S */
+ /*:
+ * Generic 32-bit "/2addr" floating-point operation.
+ *
+ * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr.
+ * form: <op> f0, f0, f1
+ */
+ /* binop/2addr vA, vB */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG_FLOAT f0, a2 # f0 <- vA
+ GET_VREG_FLOAT f1, a3 # f1 <- vB
+ mul.s f0, f0, f1 # f0 <- f0 op f1
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_FLOAT f0, a2 # vA <- f0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_float_2addr: /* 0xc9 */
+/* File: mips64/op_div_float_2addr.S */
+/* File: mips64/fbinop2addr.S */
+ /*:
+ * Generic 32-bit "/2addr" floating-point operation.
+ *
+ * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr.
+ * form: <op> f0, f0, f1
+ */
+ /* binop/2addr vA, vB */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG_FLOAT f0, a2 # f0 <- vA
+ GET_VREG_FLOAT f1, a3 # f1 <- vB
+ div.s f0, f0, f1 # f0 <- f0 op f1
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_FLOAT f0, a2 # vA <- f0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_float_2addr: /* 0xca */
+/* File: mips64/op_rem_float_2addr.S */
+ /* rem-float/2addr vA, vB */
+ .extern fmodf
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG_FLOAT f12, a2 # f12 <- vA
+ GET_VREG_FLOAT f13, a3 # f13 <- vB
+ jal fmodf # f0 <- f12 op f13
+ ext a2, rINST, 8, 4 # a2 <- A
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_FLOAT f0, a2 # vA <- f0
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_double_2addr: /* 0xcb */
+/* File: mips64/op_add_double_2addr.S */
+/* File: mips64/fbinopWide2addr.S */
+ /*:
+ * Generic 64-bit "/2addr" floating-point operation.
+ *
+ * For: add-double/2addr, sub-double/2addr, mul-double/2addr, div-double/2addr.
+ * form: <op> f0, f0, f1
+ */
+ /* binop/2addr vA, vB */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG_DOUBLE f0, a2 # f0 <- vA
+ GET_VREG_DOUBLE f1, a3 # f1 <- vB
+ add.d f0, f0, f1 # f0 <- f0 op f1
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_DOUBLE f0, a2 # vA <- f0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_double_2addr: /* 0xcc */
+/* File: mips64/op_sub_double_2addr.S */
+/* File: mips64/fbinopWide2addr.S */
+ /*:
+ * Generic 64-bit "/2addr" floating-point operation.
+ *
+ * For: add-double/2addr, sub-double/2addr, mul-double/2addr, div-double/2addr.
+ * form: <op> f0, f0, f1
+ */
+ /* binop/2addr vA, vB */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG_DOUBLE f0, a2 # f0 <- vA
+ GET_VREG_DOUBLE f1, a3 # f1 <- vB
+ sub.d f0, f0, f1 # f0 <- f0 op f1
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_DOUBLE f0, a2 # vA <- f0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_double_2addr: /* 0xcd */
+/* File: mips64/op_mul_double_2addr.S */
+/* File: mips64/fbinopWide2addr.S */
+ /*:
+ * Generic 64-bit "/2addr" floating-point operation.
+ *
+ * For: add-double/2addr, sub-double/2addr, mul-double/2addr, div-double/2addr.
+ * form: <op> f0, f0, f1
+ */
+ /* binop/2addr vA, vB */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG_DOUBLE f0, a2 # f0 <- vA
+ GET_VREG_DOUBLE f1, a3 # f1 <- vB
+ mul.d f0, f0, f1 # f0 <- f0 op f1
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_DOUBLE f0, a2 # vA <- f0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_double_2addr: /* 0xce */
+/* File: mips64/op_div_double_2addr.S */
+/* File: mips64/fbinopWide2addr.S */
+ /*:
+ * Generic 64-bit "/2addr" floating-point operation.
+ *
+ * For: add-double/2addr, sub-double/2addr, mul-double/2addr, div-double/2addr.
+ * form: <op> f0, f0, f1
+ */
+ /* binop/2addr vA, vB */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG_DOUBLE f0, a2 # f0 <- vA
+ GET_VREG_DOUBLE f1, a3 # f1 <- vB
+ div.d f0, f0, f1 # f0 <- f0 op f1
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_DOUBLE f0, a2 # vA <- f0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_double_2addr: /* 0xcf */
+/* File: mips64/op_rem_double_2addr.S */
+ /* rem-double/2addr vA, vB */
+ .extern fmod
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG_DOUBLE f12, a2 # f12 <- vA
+ GET_VREG_DOUBLE f13, a3 # f13 <- vB
+ jal fmod # f0 <- f12 op f13
+ ext a2, rINST, 8, 4 # a2 <- A
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_DOUBLE f0, a2 # vA <- f0
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_int_lit16: /* 0xd0 */
+/* File: mips64/op_add_int_lit16.S */
+/* File: mips64/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * CCCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ lh a1, 2(rPC) # a1 <- sign-extended CCCC
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG a0, a3 # a0 <- vB
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ # optional op
+ addu a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rsub_int: /* 0xd1 */
+/* File: mips64/op_rsub_int.S */
+/* File: mips64/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * CCCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ lh a1, 2(rPC) # a1 <- sign-extended CCCC
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG a0, a3 # a0 <- vB
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ # optional op
+ subu a0, a1, a0 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_int_lit16: /* 0xd2 */
+/* File: mips64/op_mul_int_lit16.S */
+/* File: mips64/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * CCCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ lh a1, 2(rPC) # a1 <- sign-extended CCCC
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG a0, a3 # a0 <- vB
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ # optional op
+ mul a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_int_lit16: /* 0xd3 */
+/* File: mips64/op_div_int_lit16.S */
+/* File: mips64/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * CCCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ lh a1, 2(rPC) # a1 <- sign-extended CCCC
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG a0, a3 # a0 <- vB
+ .if 1
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ # optional op
+ div a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_int_lit16: /* 0xd4 */
+/* File: mips64/op_rem_int_lit16.S */
+/* File: mips64/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * CCCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ lh a1, 2(rPC) # a1 <- sign-extended CCCC
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG a0, a3 # a0 <- vB
+ .if 1
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ # optional op
+ mod a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_and_int_lit16: /* 0xd5 */
+/* File: mips64/op_and_int_lit16.S */
+/* File: mips64/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * CCCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ lh a1, 2(rPC) # a1 <- sign-extended CCCC
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG a0, a3 # a0 <- vB
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ # optional op
+ and a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_or_int_lit16: /* 0xd6 */
+/* File: mips64/op_or_int_lit16.S */
+/* File: mips64/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * CCCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ lh a1, 2(rPC) # a1 <- sign-extended CCCC
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG a0, a3 # a0 <- vB
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ # optional op
+ or a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_xor_int_lit16: /* 0xd7 */
+/* File: mips64/op_xor_int_lit16.S */
+/* File: mips64/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * CCCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ lh a1, 2(rPC) # a1 <- sign-extended CCCC
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG a0, a3 # a0 <- vB
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ # optional op
+ xor a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_int_lit8: /* 0xd8 */
+/* File: mips64/op_add_int_lit8.S */
+/* File: mips64/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * CC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ lbu a3, 2(rPC) # a3 <- BB
+ lb a1, 3(rPC) # a1 <- sign-extended CC
+ srl a2, rINST, 8 # a2 <- AA
+ GET_VREG a0, a3 # a0 <- vBB
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ # optional op
+ addu a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vAA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rsub_int_lit8: /* 0xd9 */
+/* File: mips64/op_rsub_int_lit8.S */
+/* File: mips64/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * CC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ lbu a3, 2(rPC) # a3 <- BB
+ lb a1, 3(rPC) # a1 <- sign-extended CC
+ srl a2, rINST, 8 # a2 <- AA
+ GET_VREG a0, a3 # a0 <- vBB
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ # optional op
+ subu a0, a1, a0 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vAA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_int_lit8: /* 0xda */
+/* File: mips64/op_mul_int_lit8.S */
+/* File: mips64/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * CC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ lbu a3, 2(rPC) # a3 <- BB
+ lb a1, 3(rPC) # a1 <- sign-extended CC
+ srl a2, rINST, 8 # a2 <- AA
+ GET_VREG a0, a3 # a0 <- vBB
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ # optional op
+ mul a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vAA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_int_lit8: /* 0xdb */
+/* File: mips64/op_div_int_lit8.S */
+/* File: mips64/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * CC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ lbu a3, 2(rPC) # a3 <- BB
+ lb a1, 3(rPC) # a1 <- sign-extended CC
+ srl a2, rINST, 8 # a2 <- AA
+ GET_VREG a0, a3 # a0 <- vBB
+ .if 1
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ # optional op
+ div a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vAA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_int_lit8: /* 0xdc */
+/* File: mips64/op_rem_int_lit8.S */
+/* File: mips64/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * CC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ lbu a3, 2(rPC) # a3 <- BB
+ lb a1, 3(rPC) # a1 <- sign-extended CC
+ srl a2, rINST, 8 # a2 <- AA
+ GET_VREG a0, a3 # a0 <- vBB
+ .if 1
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ # optional op
+ mod a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vAA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_and_int_lit8: /* 0xdd */
+/* File: mips64/op_and_int_lit8.S */
+/* File: mips64/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * CC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ lbu a3, 2(rPC) # a3 <- BB
+ lb a1, 3(rPC) # a1 <- sign-extended CC
+ srl a2, rINST, 8 # a2 <- AA
+ GET_VREG a0, a3 # a0 <- vBB
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ # optional op
+ and a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vAA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_or_int_lit8: /* 0xde */
+/* File: mips64/op_or_int_lit8.S */
+/* File: mips64/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * CC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ lbu a3, 2(rPC) # a3 <- BB
+ lb a1, 3(rPC) # a1 <- sign-extended CC
+ srl a2, rINST, 8 # a2 <- AA
+ GET_VREG a0, a3 # a0 <- vBB
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ # optional op
+ or a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vAA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_xor_int_lit8: /* 0xdf */
+/* File: mips64/op_xor_int_lit8.S */
+/* File: mips64/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * CC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ lbu a3, 2(rPC) # a3 <- BB
+ lb a1, 3(rPC) # a1 <- sign-extended CC
+ srl a2, rINST, 8 # a2 <- AA
+ GET_VREG a0, a3 # a0 <- vBB
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ # optional op
+ xor a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vAA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shl_int_lit8: /* 0xe0 */
+/* File: mips64/op_shl_int_lit8.S */
+/* File: mips64/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * CC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ lbu a3, 2(rPC) # a3 <- BB
+ lb a1, 3(rPC) # a1 <- sign-extended CC
+ srl a2, rINST, 8 # a2 <- AA
+ GET_VREG a0, a3 # a0 <- vBB
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ # optional op
+ sll a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vAA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shr_int_lit8: /* 0xe1 */
+/* File: mips64/op_shr_int_lit8.S */
+/* File: mips64/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * CC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ lbu a3, 2(rPC) # a3 <- BB
+ lb a1, 3(rPC) # a1 <- sign-extended CC
+ srl a2, rINST, 8 # a2 <- AA
+ GET_VREG a0, a3 # a0 <- vBB
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ # optional op
+ sra a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vAA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_ushr_int_lit8: /* 0xe2 */
+/* File: mips64/op_ushr_int_lit8.S */
+/* File: mips64/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * CC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ lbu a3, 2(rPC) # a3 <- BB
+ lb a1, 3(rPC) # a1 <- sign-extended CC
+ srl a2, rINST, 8 # a2 <- AA
+ GET_VREG a0, a3 # a0 <- vBB
+ .if 0
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ # optional op
+ srl a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vAA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_quick: /* 0xe3 */
+/* File: mips64/op_iget_quick.S */
+ /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+ /* op vA, vB, offset//CCCC */
+ srl a2, rINST, 12 # a2 <- B
+ lhu a1, 2(rPC) # a1 <- field byte offset
+ GET_VREG_U a3, a2 # a3 <- object we're operating on
+ ext a4, rINST, 8, 4 # a4 <- A
+ daddu a1, a1, a3
+ beqz a3, common_errNullObject # object was null
+ lw a0, 0(a1) # a0 <- obj.field
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ SET_VREG a0, a4 # fp[A] <- a0
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_wide_quick: /* 0xe4 */
+/* File: mips64/op_iget_wide_quick.S */
+ /* iget-wide-quick vA, vB, offset//CCCC */
+ srl a2, rINST, 12 # a2 <- B
+ lhu a4, 2(rPC) # a4 <- field byte offset
+ GET_VREG_U a3, a2 # a3 <- object we're operating on
+ ext a2, rINST, 8, 4 # a2 <- A
+ beqz a3, common_errNullObject # object was null
+ daddu a4, a3, a4 # create direct pointer
+ lw a0, 0(a4)
+ lw a1, 4(a4)
+ dinsu a0, a1, 32, 32
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ SET_VREG_WIDE a0, a2
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_object_quick: /* 0xe5 */
+/* File: mips64/op_iget_object_quick.S */
+ /* For: iget-object-quick */
+ /* op vA, vB, offset//CCCC */
+ .extern artIGetObjectFromMterp
+ srl a2, rINST, 12 # a2 <- B
+ lhu a1, 2(rPC) # a1 <- field byte offset
+ EXPORT_PC
+ GET_VREG_U a0, a2 # a0 <- object we're operating on
+ jal artIGetObjectFromMterp # (obj, offset)
+ ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
+ ext a2, rINST, 8, 4 # a2 <- A
+ PREFETCH_INST 2
+ bnez a3, MterpPossibleException # bail out
+ SET_VREG_OBJECT v0, a2 # fp[A] <- v0
+ ADVANCE 2 # advance rPC
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_quick: /* 0xe6 */
+/* File: mips64/op_iput_quick.S */
+ /* For: iput-quick, iput-boolean-quick, iput-byte-quick, iput-char-quick, iput-short-quick */
+ /* op vA, vB, offset//CCCC */
+ srl a2, rINST, 12 # a2 <- B
+ lhu a1, 2(rPC) # a1 <- field byte offset
+ GET_VREG_U a3, a2 # a3 <- fp[B], the object pointer
+ ext a2, rINST, 8, 4 # a2 <- A
+ beqz a3, common_errNullObject # object was null
+ GET_VREG a0, a2 # a0 <- fp[A]
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ daddu a1, a1, a3
+ sw a0, 0(a1) # obj.field <- a0
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_wide_quick: /* 0xe7 */
+/* File: mips64/op_iput_wide_quick.S */
+ /* iput-wide-quick vA, vB, offset//CCCC */
+ srl a2, rINST, 12 # a2 <- B
+ lhu a3, 2(rPC) # a3 <- field byte offset
+ GET_VREG_U a2, a2 # a2 <- fp[B], the object pointer
+ ext a0, rINST, 8, 4 # a0 <- A
+ beqz a2, common_errNullObject # object was null
+ GET_VREG_WIDE a0, a0 # a0 <- fp[A]
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ daddu a1, a2, a3 # create a direct pointer
+ sw a0, 0(a1)
+ dsrl32 a0, a0, 0
+ sw a0, 4(a1)
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_object_quick: /* 0xe8 */
+/* File: mips64/op_iput_object_quick.S */
+ .extern MterpIputObjectQuick
+ EXPORT_PC
+ daddu a0, rFP, OFF_FP_SHADOWFRAME
+ move a1, rPC
+ move a2, rINST
+ jal MterpIputObjectQuick
+ beqzc v0, MterpException
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_virtual_quick: /* 0xe9 */
+/* File: mips64/op_invoke_virtual_quick.S */
+/* File: mips64/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeVirtualQuick
+ EXPORT_PC
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rPC
+ move a3, rINST
+ jal MterpInvokeVirtualQuick
+ beqzc v0, MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE v0
+ GOTO_OPCODE v0
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_virtual_range_quick: /* 0xea */
+/* File: mips64/op_invoke_virtual_range_quick.S */
+/* File: mips64/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeVirtualQuickRange
+ EXPORT_PC
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rPC
+ move a3, rINST
+ jal MterpInvokeVirtualQuickRange
+ beqzc v0, MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE v0
+ GOTO_OPCODE v0
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_boolean_quick: /* 0xeb */
+/* File: mips64/op_iput_boolean_quick.S */
+/* File: mips64/op_iput_quick.S */
+ /* For: iput-quick, iput-boolean-quick, iput-byte-quick, iput-char-quick, iput-short-quick */
+ /* op vA, vB, offset//CCCC */
+ srl a2, rINST, 12 # a2 <- B
+ lhu a1, 2(rPC) # a1 <- field byte offset
+ GET_VREG_U a3, a2 # a3 <- fp[B], the object pointer
+ ext a2, rINST, 8, 4 # a2 <- A
+ beqz a3, common_errNullObject # object was null
+ GET_VREG a0, a2 # a0 <- fp[A]
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ daddu a1, a1, a3
+ sb a0, 0(a1) # obj.field <- a0
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_byte_quick: /* 0xec */
+/* File: mips64/op_iput_byte_quick.S */
+/* File: mips64/op_iput_quick.S */
+ /* For: iput-quick, iput-boolean-quick, iput-byte-quick, iput-char-quick, iput-short-quick */
+ /* op vA, vB, offset//CCCC */
+ srl a2, rINST, 12 # a2 <- B
+ lhu a1, 2(rPC) # a1 <- field byte offset
+ GET_VREG_U a3, a2 # a3 <- fp[B], the object pointer
+ ext a2, rINST, 8, 4 # a2 <- A
+ beqz a3, common_errNullObject # object was null
+ GET_VREG a0, a2 # a0 <- fp[A]
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ daddu a1, a1, a3
+ sb a0, 0(a1) # obj.field <- a0
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_char_quick: /* 0xed */
+/* File: mips64/op_iput_char_quick.S */
+/* File: mips64/op_iput_quick.S */
+ /* For: iput-quick, iput-boolean-quick, iput-byte-quick, iput-char-quick, iput-short-quick */
+ /* op vA, vB, offset//CCCC */
+ srl a2, rINST, 12 # a2 <- B
+ lhu a1, 2(rPC) # a1 <- field byte offset
+ GET_VREG_U a3, a2 # a3 <- fp[B], the object pointer
+ ext a2, rINST, 8, 4 # a2 <- A
+ beqz a3, common_errNullObject # object was null
+ GET_VREG a0, a2 # a0 <- fp[A]
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ daddu a1, a1, a3
+ sh a0, 0(a1) # obj.field <- a0
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_short_quick: /* 0xee */
+/* File: mips64/op_iput_short_quick.S */
+/* File: mips64/op_iput_quick.S */
+ /* For: iput-quick, iput-boolean-quick, iput-byte-quick, iput-char-quick, iput-short-quick */
+ /* op vA, vB, offset//CCCC */
+ srl a2, rINST, 12 # a2 <- B
+ lhu a1, 2(rPC) # a1 <- field byte offset
+ GET_VREG_U a3, a2 # a3 <- fp[B], the object pointer
+ ext a2, rINST, 8, 4 # a2 <- A
+ beqz a3, common_errNullObject # object was null
+ GET_VREG a0, a2 # a0 <- fp[A]
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ daddu a1, a1, a3
+ sh a0, 0(a1) # obj.field <- a0
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_boolean_quick: /* 0xef */
+/* File: mips64/op_iget_boolean_quick.S */
+/* File: mips64/op_iget_quick.S */
+ /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+ /* op vA, vB, offset//CCCC */
+ srl a2, rINST, 12 # a2 <- B
+ lhu a1, 2(rPC) # a1 <- field byte offset
+ GET_VREG_U a3, a2 # a3 <- object we're operating on
+ ext a4, rINST, 8, 4 # a4 <- A
+ daddu a1, a1, a3
+ beqz a3, common_errNullObject # object was null
+ lbu a0, 0(a1) # a0 <- obj.field
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ SET_VREG a0, a4 # fp[A] <- a0
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_byte_quick: /* 0xf0 */
+/* File: mips64/op_iget_byte_quick.S */
+/* File: mips64/op_iget_quick.S */
+ /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+ /* op vA, vB, offset//CCCC */
+ srl a2, rINST, 12 # a2 <- B
+ lhu a1, 2(rPC) # a1 <- field byte offset
+ GET_VREG_U a3, a2 # a3 <- object we're operating on
+ ext a4, rINST, 8, 4 # a4 <- A
+ daddu a1, a1, a3
+ beqz a3, common_errNullObject # object was null
+ lb a0, 0(a1) # a0 <- obj.field
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ SET_VREG a0, a4 # fp[A] <- a0
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_char_quick: /* 0xf1 */
+/* File: mips64/op_iget_char_quick.S */
+/* File: mips64/op_iget_quick.S */
+ /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+ /* op vA, vB, offset//CCCC */
+ srl a2, rINST, 12 # a2 <- B
+ lhu a1, 2(rPC) # a1 <- field byte offset
+ GET_VREG_U a3, a2 # a3 <- object we're operating on
+ ext a4, rINST, 8, 4 # a4 <- A
+ daddu a1, a1, a3
+ beqz a3, common_errNullObject # object was null
+ lhu a0, 0(a1) # a0 <- obj.field
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ SET_VREG a0, a4 # fp[A] <- a0
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_short_quick: /* 0xf2 */
+/* File: mips64/op_iget_short_quick.S */
+/* File: mips64/op_iget_quick.S */
+ /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+ /* op vA, vB, offset//CCCC */
+ srl a2, rINST, 12 # a2 <- B
+ lhu a1, 2(rPC) # a1 <- field byte offset
+ GET_VREG_U a3, a2 # a3 <- object we're operating on
+ ext a4, rINST, 8, 4 # a4 <- A
+ daddu a1, a1, a3
+ beqz a3, common_errNullObject # object was null
+ lh a0, 0(a1) # a0 <- obj.field
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ SET_VREG a0, a4 # fp[A] <- a0
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_lambda: /* 0xf3 */
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_f4: /* 0xf4 */
+/* File: mips64/op_unused_f4.S */
+/* File: mips64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_capture_variable: /* 0xf5 */
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
+
+/* ------------------------------ */
+ .balign 128
+.L_op_create_lambda: /* 0xf6 */
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
+
+/* ------------------------------ */
+ .balign 128
+.L_op_liberate_variable: /* 0xf7 */
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
+
+/* ------------------------------ */
+ .balign 128
+.L_op_box_lambda: /* 0xf8 */
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unbox_lambda: /* 0xf9 */
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_fa: /* 0xfa */
+/* File: mips64/op_unused_fa.S */
+/* File: mips64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_fb: /* 0xfb */
+/* File: mips64/op_unused_fb.S */
+/* File: mips64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_fc: /* 0xfc */
+/* File: mips64/op_unused_fc.S */
+/* File: mips64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_fd: /* 0xfd */
+/* File: mips64/op_unused_fd.S */
+/* File: mips64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_fe: /* 0xfe */
+/* File: mips64/op_unused_fe.S */
+/* File: mips64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_ff: /* 0xff */
+/* File: mips64/op_unused_ff.S */
+/* File: mips64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+ .balign 128
+ .size artMterpAsmInstructionStart, .-artMterpAsmInstructionStart
+ .global artMterpAsmInstructionEnd
+artMterpAsmInstructionEnd:
+
+/*
+ * ===========================================================================
+ * Sister implementations
+ * ===========================================================================
+ */
+ .global artMterpAsmSisterStart
+ .type artMterpAsmSisterStart, %function
+ .text
+ .balign 4
+artMterpAsmSisterStart:
+
+/* continuation for op_float_to_int */
+.Lop_float_to_int_trunc:
+ trunc.w.s f0, f0
+ mfc1 t0, f0
+.Lop_float_to_int_done:
+ /* Can't include fcvtFooter.S after break */
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG t0, a1
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* continuation for op_float_to_long */
+.Lop_float_to_long_trunc:
+ trunc.l.s f0, f0
+ dmfc1 t0, f0
+.Lop_float_to_long_done:
+ /* Can't include fcvtFooter.S after break */
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE t0, a1
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* continuation for op_double_to_int */
+.Lop_double_to_int_trunc:
+ trunc.w.d f0, f0
+ mfc1 t0, f0
+.Lop_double_to_int_done:
+ /* Can't include fcvtFooter.S after break */
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG t0, a1
+ GOTO_OPCODE v0 # jump to next instruction
+
+/* continuation for op_double_to_long */
+.Lop_double_to_long_trunc:
+ trunc.l.d f0, f0
+ dmfc1 t0, f0
+.Lop_double_to_long_done:
+ /* Can't include fcvtFooter.S after break */
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE t0, a1
+ GOTO_OPCODE v0 # jump to next instruction
+
+ .size artMterpAsmSisterStart, .-artMterpAsmSisterStart
+ .global artMterpAsmSisterEnd
+artMterpAsmSisterEnd:
+
+
+ .global artMterpAsmAltInstructionStart
+ .type artMterpAsmAltInstructionStart, %function
+ .text
+
+artMterpAsmAltInstructionStart = .L_ALT_op_nop
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_nop: /* 0x00 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (0 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move: /* 0x01 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (1 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_from16: /* 0x02 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (2 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_16: /* 0x03 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (3 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_wide: /* 0x04 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (4 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_wide_from16: /* 0x05 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (5 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_wide_16: /* 0x06 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (6 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_object: /* 0x07 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (7 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_object_from16: /* 0x08 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (8 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_object_16: /* 0x09 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (9 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_result: /* 0x0a */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (10 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_result_wide: /* 0x0b */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (11 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_result_object: /* 0x0c */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (12 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_exception: /* 0x0d */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (13 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_return_void: /* 0x0e */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (14 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_return: /* 0x0f */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (15 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_return_wide: /* 0x10 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (16 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_return_object: /* 0x11 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (17 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_4: /* 0x12 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (18 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_16: /* 0x13 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (19 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const: /* 0x14 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (20 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_high16: /* 0x15 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (21 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_wide_16: /* 0x16 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (22 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_wide_32: /* 0x17 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (23 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_wide: /* 0x18 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (24 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_wide_high16: /* 0x19 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (25 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_string: /* 0x1a */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (26 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_string_jumbo: /* 0x1b */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (27 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_class: /* 0x1c */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (28 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_monitor_enter: /* 0x1d */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (29 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_monitor_exit: /* 0x1e */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (30 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_check_cast: /* 0x1f */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (31 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_instance_of: /* 0x20 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (32 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_array_length: /* 0x21 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (33 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_new_instance: /* 0x22 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (34 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_new_array: /* 0x23 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (35 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_filled_new_array: /* 0x24 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (36 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_filled_new_array_range: /* 0x25 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (37 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_fill_array_data: /* 0x26 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (38 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_throw: /* 0x27 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (39 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_goto: /* 0x28 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (40 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_goto_16: /* 0x29 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (41 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_goto_32: /* 0x2a */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (42 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_packed_switch: /* 0x2b */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (43 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sparse_switch: /* 0x2c */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (44 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_cmpl_float: /* 0x2d */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (45 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_cmpg_float: /* 0x2e */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (46 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_cmpl_double: /* 0x2f */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (47 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_cmpg_double: /* 0x30 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (48 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_cmp_long: /* 0x31 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (49 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_eq: /* 0x32 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (50 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_ne: /* 0x33 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (51 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_lt: /* 0x34 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (52 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_ge: /* 0x35 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (53 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_gt: /* 0x36 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (54 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_le: /* 0x37 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (55 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_eqz: /* 0x38 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (56 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_nez: /* 0x39 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (57 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_ltz: /* 0x3a */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (58 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_gez: /* 0x3b */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (59 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_gtz: /* 0x3c */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (60 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_lez: /* 0x3d */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (61 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_3e: /* 0x3e */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (62 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_3f: /* 0x3f */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (63 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_40: /* 0x40 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (64 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_41: /* 0x41 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (65 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_42: /* 0x42 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (66 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_43: /* 0x43 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (67 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aget: /* 0x44 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (68 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aget_wide: /* 0x45 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (69 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aget_object: /* 0x46 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (70 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aget_boolean: /* 0x47 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (71 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aget_byte: /* 0x48 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (72 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aget_char: /* 0x49 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (73 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aget_short: /* 0x4a */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (74 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aput: /* 0x4b */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (75 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aput_wide: /* 0x4c */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (76 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aput_object: /* 0x4d */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (77 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aput_boolean: /* 0x4e */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (78 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aput_byte: /* 0x4f */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (79 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aput_char: /* 0x50 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (80 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aput_short: /* 0x51 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (81 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget: /* 0x52 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (82 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_wide: /* 0x53 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (83 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_object: /* 0x54 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (84 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_boolean: /* 0x55 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (85 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_byte: /* 0x56 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (86 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_char: /* 0x57 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (87 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_short: /* 0x58 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (88 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput: /* 0x59 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (89 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_wide: /* 0x5a */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (90 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_object: /* 0x5b */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (91 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_boolean: /* 0x5c */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (92 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_byte: /* 0x5d */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (93 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_char: /* 0x5e */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (94 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_short: /* 0x5f */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (95 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sget: /* 0x60 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (96 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sget_wide: /* 0x61 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (97 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sget_object: /* 0x62 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (98 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sget_boolean: /* 0x63 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (99 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sget_byte: /* 0x64 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (100 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sget_char: /* 0x65 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (101 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sget_short: /* 0x66 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (102 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sput: /* 0x67 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (103 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sput_wide: /* 0x68 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (104 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sput_object: /* 0x69 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (105 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sput_boolean: /* 0x6a */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (106 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sput_byte: /* 0x6b */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (107 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sput_char: /* 0x6c */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (108 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sput_short: /* 0x6d */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (109 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_virtual: /* 0x6e */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (110 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_super: /* 0x6f */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (111 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_direct: /* 0x70 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (112 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_static: /* 0x71 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (113 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_interface: /* 0x72 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (114 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_return_void_no_barrier: /* 0x73 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (115 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_virtual_range: /* 0x74 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (116 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_super_range: /* 0x75 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (117 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_direct_range: /* 0x76 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (118 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_static_range: /* 0x77 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (119 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_interface_range: /* 0x78 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (120 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_79: /* 0x79 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (121 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_7a: /* 0x7a */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (122 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_neg_int: /* 0x7b */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (123 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_not_int: /* 0x7c */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (124 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_neg_long: /* 0x7d */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (125 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_not_long: /* 0x7e */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (126 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_neg_float: /* 0x7f */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (127 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_neg_double: /* 0x80 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (128 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_int_to_long: /* 0x81 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (129 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_int_to_float: /* 0x82 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (130 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_int_to_double: /* 0x83 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (131 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_long_to_int: /* 0x84 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (132 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_long_to_float: /* 0x85 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (133 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_long_to_double: /* 0x86 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (134 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_float_to_int: /* 0x87 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (135 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_float_to_long: /* 0x88 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (136 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_float_to_double: /* 0x89 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (137 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_double_to_int: /* 0x8a */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (138 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_double_to_long: /* 0x8b */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (139 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_double_to_float: /* 0x8c */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (140 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_int_to_byte: /* 0x8d */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (141 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_int_to_char: /* 0x8e */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (142 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_int_to_short: /* 0x8f */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (143 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_int: /* 0x90 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (144 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_int: /* 0x91 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (145 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_int: /* 0x92 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (146 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_int: /* 0x93 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (147 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_int: /* 0x94 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (148 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_and_int: /* 0x95 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (149 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_or_int: /* 0x96 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (150 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_xor_int: /* 0x97 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (151 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shl_int: /* 0x98 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (152 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shr_int: /* 0x99 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (153 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_ushr_int: /* 0x9a */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (154 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_long: /* 0x9b */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (155 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_long: /* 0x9c */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (156 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_long: /* 0x9d */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (157 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_long: /* 0x9e */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (158 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_long: /* 0x9f */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (159 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_and_long: /* 0xa0 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (160 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_or_long: /* 0xa1 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (161 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_xor_long: /* 0xa2 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (162 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shl_long: /* 0xa3 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (163 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shr_long: /* 0xa4 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (164 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_ushr_long: /* 0xa5 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (165 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_float: /* 0xa6 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (166 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_float: /* 0xa7 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (167 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_float: /* 0xa8 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (168 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_float: /* 0xa9 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (169 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_float: /* 0xaa */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (170 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_double: /* 0xab */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (171 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_double: /* 0xac */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (172 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_double: /* 0xad */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (173 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_double: /* 0xae */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (174 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_double: /* 0xaf */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (175 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_int_2addr: /* 0xb0 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (176 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_int_2addr: /* 0xb1 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (177 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_int_2addr: /* 0xb2 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (178 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_int_2addr: /* 0xb3 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (179 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_int_2addr: /* 0xb4 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (180 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_and_int_2addr: /* 0xb5 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (181 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_or_int_2addr: /* 0xb6 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (182 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_xor_int_2addr: /* 0xb7 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (183 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shl_int_2addr: /* 0xb8 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (184 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shr_int_2addr: /* 0xb9 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (185 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_ushr_int_2addr: /* 0xba */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (186 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_long_2addr: /* 0xbb */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (187 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_long_2addr: /* 0xbc */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (188 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_long_2addr: /* 0xbd */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (189 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_long_2addr: /* 0xbe */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (190 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_long_2addr: /* 0xbf */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (191 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_and_long_2addr: /* 0xc0 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (192 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_or_long_2addr: /* 0xc1 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (193 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_xor_long_2addr: /* 0xc2 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (194 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shl_long_2addr: /* 0xc3 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (195 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shr_long_2addr: /* 0xc4 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (196 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_ushr_long_2addr: /* 0xc5 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (197 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_float_2addr: /* 0xc6 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (198 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_float_2addr: /* 0xc7 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (199 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_float_2addr: /* 0xc8 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (200 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_float_2addr: /* 0xc9 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (201 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_float_2addr: /* 0xca */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (202 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_double_2addr: /* 0xcb */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (203 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_double_2addr: /* 0xcc */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (204 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_double_2addr: /* 0xcd */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (205 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_double_2addr: /* 0xce */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (206 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_double_2addr: /* 0xcf */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (207 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_int_lit16: /* 0xd0 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (208 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rsub_int: /* 0xd1 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (209 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_int_lit16: /* 0xd2 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (210 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_int_lit16: /* 0xd3 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (211 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_int_lit16: /* 0xd4 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (212 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_and_int_lit16: /* 0xd5 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (213 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_or_int_lit16: /* 0xd6 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (214 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_xor_int_lit16: /* 0xd7 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (215 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_int_lit8: /* 0xd8 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (216 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rsub_int_lit8: /* 0xd9 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (217 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_int_lit8: /* 0xda */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (218 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_int_lit8: /* 0xdb */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (219 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_int_lit8: /* 0xdc */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (220 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_and_int_lit8: /* 0xdd */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (221 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_or_int_lit8: /* 0xde */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (222 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_xor_int_lit8: /* 0xdf */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (223 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shl_int_lit8: /* 0xe0 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (224 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shr_int_lit8: /* 0xe1 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (225 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_ushr_int_lit8: /* 0xe2 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (226 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_quick: /* 0xe3 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (227 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_wide_quick: /* 0xe4 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (228 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_object_quick: /* 0xe5 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (229 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_quick: /* 0xe6 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (230 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_wide_quick: /* 0xe7 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (231 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_object_quick: /* 0xe8 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (232 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_virtual_quick: /* 0xe9 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (233 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_virtual_range_quick: /* 0xea */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (234 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_boolean_quick: /* 0xeb */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (235 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_byte_quick: /* 0xec */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (236 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_char_quick: /* 0xed */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (237 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_short_quick: /* 0xee */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (238 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_boolean_quick: /* 0xef */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (239 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_byte_quick: /* 0xf0 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (240 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_char_quick: /* 0xf1 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (241 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_short_quick: /* 0xf2 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (242 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_lambda: /* 0xf3 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (243 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_f4: /* 0xf4 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (244 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_capture_variable: /* 0xf5 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (245 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_create_lambda: /* 0xf6 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (246 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_liberate_variable: /* 0xf7 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (247 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_box_lambda: /* 0xf8 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (248 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unbox_lambda: /* 0xf9 */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (249 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_fa: /* 0xfa */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (250 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_fb: /* 0xfb */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (251 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_fc: /* 0xfc */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (252 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_fd: /* 0xfd */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (253 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_fe: /* 0xfe */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (254 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_ff: /* 0xff */
+/* File: mips64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ daddu ra, ra, (255 * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame) Note: tail call.
+
+ .balign 128
+ .size artMterpAsmAltInstructionStart, .-artMterpAsmAltInstructionStart
+ .global artMterpAsmAltInstructionEnd
+artMterpAsmAltInstructionEnd:
+/* File: mips64/footer.S */
+/*
+ * We've detected a condition that will result in an exception, but the exception
+ * has not yet been thrown. Just bail out to the reference interpreter to deal with it.
+ * TUNING: for consistency, we may want to just go ahead and handle these here.
+ */
+
+ .extern MterpLogDivideByZeroException
+common_errDivideByZero:
+ EXPORT_PC
+#if MTERP_LOGGING
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ jal MterpLogDivideByZeroException
+#endif
+ b MterpCommonFallback
+
+ .extern MterpLogArrayIndexException
+common_errArrayIndex:
+ EXPORT_PC
+#if MTERP_LOGGING
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ jal MterpLogArrayIndexException
+#endif
+ b MterpCommonFallback
+
+ .extern MterpLogNullObjectException
+common_errNullObject:
+ EXPORT_PC
+#if MTERP_LOGGING
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ jal MterpLogNullObjectException
+#endif
+ b MterpCommonFallback
+
+/*
+ * If we're here, something is out of the ordinary. If there is a pending
+ * exception, handle it. Otherwise, roll back and retry with the reference
+ * interpreter.
+ */
+MterpPossibleException:
+ ld a0, THREAD_EXCEPTION_OFFSET(rSELF)
+ beqzc a0, MterpFallback # If not, fall back to reference interpreter.
+ /* intentional fallthrough - handle pending exception. */
+/*
+ * On return from a runtime helper routine, we've found a pending exception.
+ * Can we handle it here - or need to bail out to caller?
+ *
+ */
+ .extern MterpHandleException
+MterpException:
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ jal MterpHandleException # (self, shadow_frame)
+ beqzc v0, MterpExceptionReturn # no local catch, back to caller.
+ ld a0, OFF_FP_CODE_ITEM(rFP)
+ lwu a1, OFF_FP_DEX_PC(rFP)
+ REFRESH_IBASE
+ daddu rPC, a0, CODEITEM_INSNS_OFFSET
+ dlsa rPC, a1, rPC, 1 # generate new dex_pc_ptr
+ sd rPC, OFF_FP_DEX_PC_PTR(rFP)
+ /* resume execution at catch block */
+ FETCH_INST
+ GET_INST_OPCODE v0
+ GOTO_OPCODE v0
+ /* NOTE: no fallthrough */
+
+/*
+ * Check for suspend check request. Assumes rINST already loaded, rPC advanced and
+ * still needs to get the opcode and branch to it, and flags are in ra.
+ */
+ .extern MterpSuspendCheck
+MterpCheckSuspendAndContinue:
+ REFRESH_IBASE
+ and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ bnez ra, check1
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+check1:
+ EXPORT_PC
+ move a0, rSELF
+ jal MterpSuspendCheck # (self)
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+/*
+ * Bail out to reference interpreter.
+ */
+ .extern MterpLogFallback
+MterpFallback:
+ EXPORT_PC
+#if MTERP_LOGGING
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ jal MterpLogFallback
+#endif
+MterpCommonFallback:
+ li v0, 0 # signal retry with reference interpreter.
+ b MterpDone
+
+/*
+ * We pushed some registers on the stack in ExecuteMterpImpl, then saved
+ * SP and RA. Here we restore SP, restore the registers, and then restore
+ * RA to PC.
+ *
+ * On entry:
+ * uint32_t* rFP (should still be live, pointer to base of vregs)
+ */
+MterpExceptionReturn:
+ li v0, 1 # signal return to caller.
+ b MterpDone
+/*
+ * Returned value is expected in a0 and if it's not 64-bit, the 32 most
+ * significant bits of a0 must be 0.
+ */
+MterpReturn:
+ ld a2, OFF_FP_RESULT_REGISTER(rFP)
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ sd a0, 0(a2)
+ move a0, rSELF
+ and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ beqzc ra, check2
+ jal MterpSuspendCheck # (self)
+check2:
+ li v0, 1 # signal return to caller.
+MterpDone:
+ ld s5, STACK_OFFSET_S5(sp)
+ .cfi_restore 21
+ ld s4, STACK_OFFSET_S4(sp)
+ .cfi_restore 20
+ ld s3, STACK_OFFSET_S3(sp)
+ .cfi_restore 19
+ ld s2, STACK_OFFSET_S2(sp)
+ .cfi_restore 18
+ ld s1, STACK_OFFSET_S1(sp)
+ .cfi_restore 17
+ ld s0, STACK_OFFSET_S0(sp)
+ .cfi_restore 16
+
+ ld ra, STACK_OFFSET_RA(sp)
+ .cfi_restore 31
+
+ ld t8, STACK_OFFSET_GP(sp)
+ .cpreturn
+ .cfi_restore 28
+
+ .set noreorder
+ jr ra
+ daddu sp, sp, STACK_SIZE
+ .cfi_adjust_cfa_offset -STACK_SIZE
+
+ .cfi_endproc
+ .size ExecuteMterpImpl, .-ExecuteMterpImpl
+
diff --git a/runtime/interpreter/mterp/out/mterp_x86.S b/runtime/interpreter/mterp/out/mterp_x86.S
index 567550f..ebac5fc 100644
--- a/runtime/interpreter/mterp/out/mterp_x86.S
+++ b/runtime/interpreter/mterp/out/mterp_x86.S
@@ -112,25 +112,32 @@
#define SYMBOL(name) name
#endif
+.macro PUSH _reg
+ pushl \_reg
+ .cfi_adjust_cfa_offset 4
+ .cfi_rel_offset \_reg, 0
+.endm
+
+.macro POP _reg
+ popl \_reg
+ .cfi_adjust_cfa_offset -4
+ .cfi_restore \_reg
+.endm
+
/* Frame size must be 16-byte aligned.
- * Remember about 4 bytes for return address
+ * Remember about 4 bytes for return address + 4 * 4 for spills
*/
-#define FRAME_SIZE 44
+#define FRAME_SIZE 28
/* Frame diagram while executing ExecuteMterpImpl, high to low addresses */
-#define IN_ARG3 (FRAME_SIZE + 16)
-#define IN_ARG2 (FRAME_SIZE + 12)
-#define IN_ARG1 (FRAME_SIZE + 8)
-#define IN_ARG0 (FRAME_SIZE + 4)
-#define CALLER_RP (FRAME_SIZE + 0)
+#define IN_ARG3 (FRAME_SIZE + 16 + 16)
+#define IN_ARG2 (FRAME_SIZE + 16 + 12)
+#define IN_ARG1 (FRAME_SIZE + 16 + 8)
+#define IN_ARG0 (FRAME_SIZE + 16 + 4)
/* Spill offsets relative to %esp */
-#define EBP_SPILL (FRAME_SIZE - 4)
-#define EDI_SPILL (FRAME_SIZE - 8)
-#define ESI_SPILL (FRAME_SIZE - 12)
-#define EBX_SPILL (FRAME_SIZE - 16)
-#define LOCAL0 (FRAME_SIZE - 20)
-#define LOCAL1 (FRAME_SIZE - 24)
-#define LOCAL2 (FRAME_SIZE - 28)
+#define LOCAL0 (FRAME_SIZE - 4)
+#define LOCAL1 (FRAME_SIZE - 8)
+#define LOCAL2 (FRAME_SIZE - 12)
/* Out Arg offsets, relative to %esp */
#define OUT_ARG3 ( 12)
#define OUT_ARG2 ( 8)
@@ -360,16 +367,18 @@
SYMBOL(ExecuteMterpImpl):
.cfi_startproc
+ .cfi_def_cfa esp, 4
+
+ /* Spill callee save regs */
+ PUSH %ebp
+ PUSH %edi
+ PUSH %esi
+ PUSH %ebx
+
/* Allocate frame */
subl $FRAME_SIZE, %esp
.cfi_adjust_cfa_offset FRAME_SIZE
- /* Spill callee save regs */
- movl %ebp, EBP_SPILL(%esp)
- movl %edi, EDI_SPILL(%esp)
- movl %esi, ESI_SPILL(%esp)
- movl %ebx, EBX_SPILL(%esp)
-
/* Load ShadowFrame pointer */
movl IN_ARG2(%esp), %edx
@@ -12985,17 +12994,16 @@
movl %ecx, 4(%edx)
mov $1, %eax
MterpDone:
- /* Restore callee save register */
- movl EBP_SPILL(%esp), %ebp
- movl EDI_SPILL(%esp), %edi
- movl ESI_SPILL(%esp), %esi
- movl EBX_SPILL(%esp), %ebx
-
/* pop up frame */
addl $FRAME_SIZE, %esp
.cfi_adjust_cfa_offset -FRAME_SIZE
- ret
+ /* Restore callee save register */
+ POP %ebx
+ POP %esi
+ POP %edi
+ POP %ebp
+ ret
.cfi_endproc
SIZE(ExecuteMterpImpl,ExecuteMterpImpl)
diff --git a/runtime/interpreter/mterp/out/mterp_x86_64.S b/runtime/interpreter/mterp/out/mterp_x86_64.S
new file mode 100644
index 0000000..a1360e0
--- /dev/null
+++ b/runtime/interpreter/mterp/out/mterp_x86_64.S
@@ -0,0 +1,11960 @@
+/*
+ * This file was generated automatically by gen-mterp.py for 'x86_64'.
+ *
+ * --> DO NOT EDIT <--
+ */
+
+/* File: x86_64/header.S */
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ Art assembly interpreter notes:
+
+ First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
+ handle invoke, allows higher-level code to create frame & shadow frame.
+
+ Once that's working, support direct entry code & eliminate shadow frame (and
+ excess locals allocation.
+
+ Some (hopefully) temporary ugliness. We'll treat rFP as pointing to the
+ base of the vreg array within the shadow frame. Access the other fields,
+ dex_pc_, method_ and number_of_vregs_ via negative offsets. For now, we'll continue
+ the shadow frame mechanism of double-storing object references - via rFP &
+ number_of_vregs_.
+
+ */
+
+/*
+x86_64 ABI general notes:
+
+Caller save set:
+ rax, rdx, rcx, rsi, rdi, r8-r11, st(0)-st(7)
+Callee save set:
+ rbx, rbp, r12-r15
+Return regs:
+ 32-bit in eax
+ 64-bit in rax
+ fp on xmm0
+
+First 8 fp parameters came in xmm0-xmm7.
+First 6 non-fp parameters came in rdi, rsi, rdx, rcx, r8, r9.
+Other parameters passed on stack, pushed right-to-left. On entry to target, first
+param is at 8(%esp). Traditional entry code is:
+
+Stack must be 16-byte aligned to support SSE in native code.
+
+If we're not doing variable stack allocation (alloca), the frame pointer can be
+eliminated and all arg references adjusted to be esp relative.
+*/
+
+/*
+Mterp and x86_64 notes:
+
+Some key interpreter variables will be assigned to registers.
+
+ nick reg purpose
+ rSELF rbp pointer to ThreadSelf.
+ rPC r12 interpreted program counter, used for fetching instructions
+ rFP r13 interpreted frame pointer, used for accessing locals and args
+ rINSTw bx first 16-bit code of current instruction
+ rINSTbl bl opcode portion of instruction word
+ rINSTbh bh high byte of inst word, usually contains src/tgt reg names
+ rIBASE r14 base of instruction handler table
+ rREFS r15 base of object references in shadow frame.
+
+Notes:
+ o High order 16 bits of ebx must be zero on entry to handler
+ o rPC, rFP, rINSTw/rINSTbl valid on handler entry and exit
+ o eax and ecx are scratch, rINSTw/ebx sometimes scratch
+
+Macros are provided for common operations. Each macro MUST emit only
+one instruction to make instruction-counting easier. They MUST NOT alter
+unspecified registers or condition codes.
+*/
+
+/*
+ * This is a #include, not a %include, because we want the C pre-processor
+ * to expand the macros into assembler assignment statements.
+ */
+#include "asm_support.h"
+
+/*
+ * Handle mac compiler specific
+ */
+#if defined(__APPLE__)
+ #define MACRO_LITERAL(value) $(value)
+ #define FUNCTION_TYPE(name)
+ #define SIZE(start,end)
+ // Mac OS' symbols have an _ prefix.
+ #define SYMBOL(name) _ ## name
+#else
+ #define MACRO_LITERAL(value) $value
+ #define FUNCTION_TYPE(name) .type name, @function
+ #define SIZE(start,end) .size start, .-end
+ #define SYMBOL(name) name
+#endif
+
+.macro PUSH _reg
+ pushq \_reg
+ .cfi_adjust_cfa_offset 8
+ .cfi_rel_offset \_reg, 0
+.endm
+
+.macro POP _reg
+ popq \_reg
+ .cfi_adjust_cfa_offset -8
+ .cfi_restore \_reg
+.endm
+
+/* Frame size must be 16-byte aligned.
+ * Remember about 8 bytes for return address + 6 * 8 for spills.
+ */
+#define FRAME_SIZE 8
+
+/* Frame diagram while executing ExecuteMterpImpl, high to low addresses */
+#define IN_ARG3 %rcx
+#define IN_ARG2 %rdx
+#define IN_ARG1 %rsi
+#define IN_ARG0 %rdi
+/* Out Args */
+#define OUT_ARG3 %rcx
+#define OUT_ARG2 %rdx
+#define OUT_ARG1 %rsi
+#define OUT_ARG0 %rdi
+#define OUT_32_ARG3 %ecx
+#define OUT_32_ARG2 %edx
+#define OUT_32_ARG1 %esi
+#define OUT_32_ARG0 %edi
+#define OUT_FP_ARG1 %xmm1
+#define OUT_FP_ARG0 %xmm0
+
+/* During bringup, we'll use the shadow frame model instead of rFP */
+/* single-purpose registers, given names for clarity */
+#define rSELF %rbp
+#define rPC %r12
+#define rFP %r13
+#define rINST %ebx
+#define rINSTq %rbx
+#define rINSTw %bx
+#define rINSTbh %bh
+#define rINSTbl %bl
+#define rIBASE %r14
+#define rREFS %r15
+
+/*
+ * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So,
+ * to access other shadow frame fields, we need to use a backwards offset. Define those here.
+ */
+#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
+#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
+#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
+#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
+#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
+#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
+#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
+#define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET)
+#define OFF_FP_SHADOWFRAME (-SHADOWFRAME_VREGS_OFFSET)
+
+#define MTERP_PROFILE_BRANCHES 1
+#define MTERP_LOGGING 0
+
+/*
+ * Profile branch. rINST should contain the offset. %eax is scratch.
+ */
+.macro MTERP_PROFILE_BRANCH
+#ifdef MTERP_PROFILE_BRANCHES
+ EXPORT_PC
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ movl rINST, OUT_32_ARG2
+ call SYMBOL(MterpProfileBranch)
+ testb %al, %al
+ jnz MterpOnStackReplacement
+#endif
+.endm
+
+/*
+ * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
+ * be done *before* something throws.
+ *
+ * It's okay to do this more than once.
+ *
+ * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
+ * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
+ * offset into the code_items_[] array. For effiency, we will "export" the
+ * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
+ * to convert to a dex pc when needed.
+ */
+.macro EXPORT_PC
+ movq rPC, OFF_FP_DEX_PC_PTR(rFP)
+.endm
+
+/*
+ * Refresh handler table.
+ * IBase handles uses the caller save register so we must restore it after each call.
+ * Also it is used as a result of some 64-bit operations (like imul) and we should
+ * restore it in such cases also.
+ *
+ */
+.macro REFRESH_IBASE
+ movq THREAD_CURRENT_IBASE_OFFSET(rSELF), rIBASE
+.endm
+
+/*
+ * Refresh rINST.
+ * At enter to handler rINST does not contain the opcode number.
+ * However some utilities require the full value, so this macro
+ * restores the opcode number.
+ */
+.macro REFRESH_INST _opnum
+ movb rINSTbl, rINSTbh
+ movb $\_opnum, rINSTbl
+.endm
+
+/*
+ * Fetch the next instruction from rPC into rINSTw. Does not advance rPC.
+ */
+.macro FETCH_INST
+ movzwq (rPC), rINSTq
+.endm
+
+/*
+ * Remove opcode from rINST, compute the address of handler and jump to it.
+ */
+.macro GOTO_NEXT
+ movzx rINSTbl,%eax
+ movzbl rINSTbh,rINST
+ shll MACRO_LITERAL(7), %eax
+ addq rIBASE, %rax
+ jmp *%rax
+.endm
+
+/*
+ * Advance rPC by instruction count.
+ */
+.macro ADVANCE_PC _count
+ leaq 2*\_count(rPC), rPC
+.endm
+
+/*
+ * Advance rPC by instruction count, fetch instruction and jump to handler.
+ */
+.macro ADVANCE_PC_FETCH_AND_GOTO_NEXT _count
+ ADVANCE_PC \_count
+ FETCH_INST
+ GOTO_NEXT
+.endm
+
+/*
+ * Get/set the 32-bit value from a Dalvik register.
+ */
+#define VREG_ADDRESS(_vreg) (rFP,_vreg,4)
+#define VREG_REF_ADDRESS(_vreg) (rREFS,_vreg,4)
+
+.macro GET_VREG _reg _vreg
+ movl (rFP,\_vreg,4), \_reg
+.endm
+
+/* Read wide value. */
+.macro GET_WIDE_VREG _reg _vreg
+ movq (rFP,\_vreg,4), \_reg
+.endm
+
+.macro SET_VREG _reg _vreg
+ movl \_reg, (rFP,\_vreg,4)
+ movl MACRO_LITERAL(0), (rREFS,\_vreg,4)
+.endm
+
+/* Write wide value. reg is clobbered. */
+.macro SET_WIDE_VREG _reg _vreg
+ movq \_reg, (rFP,\_vreg,4)
+ xorq \_reg, \_reg
+ movq \_reg, (rREFS,\_vreg,4)
+.endm
+
+.macro SET_VREG_OBJECT _reg _vreg
+ movl \_reg, (rFP,\_vreg,4)
+ movl \_reg, (rREFS,\_vreg,4)
+.endm
+
+.macro GET_VREG_HIGH _reg _vreg
+ movl 4(rFP,\_vreg,4), \_reg
+.endm
+
+.macro SET_VREG_HIGH _reg _vreg
+ movl \_reg, 4(rFP,\_vreg,4)
+ movl MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
+.endm
+
+.macro CLEAR_REF _vreg
+ movl MACRO_LITERAL(0), (rREFS,\_vreg,4)
+.endm
+
+.macro CLEAR_WIDE_REF _vreg
+ movl MACRO_LITERAL(0), (rREFS,\_vreg,4)
+ movl MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
+.endm
+
+/* File: x86_64/entry.S */
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Interpreter entry point.
+ */
+
+ .text
+ .global SYMBOL(ExecuteMterpImpl)
+ FUNCTION_TYPE(ExecuteMterpImpl)
+
+/*
+ * On entry:
+ * 0 Thread* self
+ * 1 code_item
+ * 2 ShadowFrame
+ * 3 JValue* result_register
+ *
+ */
+
+SYMBOL(ExecuteMterpImpl):
+ .cfi_startproc
+ .cfi_def_cfa rsp, 8
+
+ /* Spill callee save regs */
+ PUSH %rbx
+ PUSH %rbp
+ PUSH %r12
+ PUSH %r13
+ PUSH %r14
+ PUSH %r15
+
+ /* Allocate frame */
+ subq $FRAME_SIZE, %rsp
+ .cfi_adjust_cfa_offset FRAME_SIZE
+
+ /* Remember the return register */
+ movq IN_ARG3, SHADOWFRAME_RESULT_REGISTER_OFFSET(IN_ARG2)
+
+ /* Remember the code_item */
+ movq IN_ARG1, SHADOWFRAME_CODE_ITEM_OFFSET(IN_ARG2)
+
+ /* set up "named" registers */
+ movl SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(IN_ARG2), %eax
+ leaq SHADOWFRAME_VREGS_OFFSET(IN_ARG2), rFP
+ leaq (rFP, %rax, 4), rREFS
+ movl SHADOWFRAME_DEX_PC_OFFSET(IN_ARG2), %eax
+ leaq CODEITEM_INSNS_OFFSET(IN_ARG1), rPC
+ leaq (rPC, %rax, 2), rPC
+ EXPORT_PC
+
+ /* Starting ibase */
+ movq IN_ARG0, rSELF
+ REFRESH_IBASE
+
+ /* start executing the instruction at rPC */
+ FETCH_INST
+ GOTO_NEXT
+ /* NOTE: no fallthrough */
+
+
+ .global SYMBOL(artMterpAsmInstructionStart)
+ FUNCTION_TYPE(SYMBOL(artMterpAsmInstructionStart))
+SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
+ .text
+
+/* ------------------------------ */
+ .balign 128
+.L_op_nop: /* 0x00 */
+/* File: x86_64/op_nop.S */
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move: /* 0x01 */
+/* File: x86_64/op_move.S */
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ movl rINST, %eax # eax <- BA
+ andb $0xf, %al # eax <- A
+ shrl $4, rINST # rINST <- B
+ GET_VREG %edx, rINSTq
+ .if 0
+ SET_VREG_OBJECT %edx, %rax # fp[A] <- fp[B]
+ .else
+ SET_VREG %edx, %rax # fp[A] <- fp[B]
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_from16: /* 0x02 */
+/* File: x86_64/op_move_from16.S */
+ /* for: move/from16, move-object/from16 */
+ /* op vAA, vBBBB */
+ movzwq 2(rPC), %rax # eax <- BBBB
+ GET_VREG %edx, %rax # edx <- fp[BBBB]
+ .if 0
+ SET_VREG_OBJECT %edx, rINSTq # fp[A] <- fp[B]
+ .else
+ SET_VREG %edx, rINSTq # fp[A] <- fp[B]
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_16: /* 0x03 */
+/* File: x86_64/op_move_16.S */
+ /* for: move/16, move-object/16 */
+ /* op vAAAA, vBBBB */
+ movzwq 4(rPC), %rcx # ecx <- BBBB
+ movzwq 2(rPC), %rax # eax <- AAAA
+ GET_VREG %edx, %rcx
+ .if 0
+ SET_VREG_OBJECT %edx, %rax # fp[A] <- fp[B]
+ .else
+ SET_VREG %edx, %rax # fp[A] <- fp[B]
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_wide: /* 0x04 */
+/* File: x86_64/op_move_wide.S */
+ /* move-wide vA, vB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ movl rINST, %ecx # ecx <- BA
+ sarl $4, rINST # rINST <- B
+ andb $0xf, %cl # ecx <- A
+ GET_WIDE_VREG %rdx, rINSTq # rdx <- v[B]
+ SET_WIDE_VREG %rdx, %rcx # v[A] <- rdx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_wide_from16: /* 0x05 */
+/* File: x86_64/op_move_wide_from16.S */
+ /* move-wide/from16 vAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ movzwl 2(rPC), %ecx # ecx <- BBBB
+ GET_WIDE_VREG %rdx, %rcx # rdx <- v[B]
+ SET_WIDE_VREG %rdx, rINSTq # v[A] <- rdx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_wide_16: /* 0x06 */
+/* File: x86_64/op_move_wide_16.S */
+ /* move-wide/16 vAAAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ movzwq 4(rPC), %rcx # ecx<- BBBB
+ movzwq 2(rPC), %rax # eax<- AAAA
+ GET_WIDE_VREG %rdx, %rcx # rdx <- v[B]
+ SET_WIDE_VREG %rdx, %rax # v[A] <- rdx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_object: /* 0x07 */
+/* File: x86_64/op_move_object.S */
+/* File: x86_64/op_move.S */
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ movl rINST, %eax # eax <- BA
+ andb $0xf, %al # eax <- A
+ shrl $4, rINST # rINST <- B
+ GET_VREG %edx, rINSTq
+ .if 1
+ SET_VREG_OBJECT %edx, %rax # fp[A] <- fp[B]
+ .else
+ SET_VREG %edx, %rax # fp[A] <- fp[B]
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_object_from16: /* 0x08 */
+/* File: x86_64/op_move_object_from16.S */
+/* File: x86_64/op_move_from16.S */
+ /* for: move/from16, move-object/from16 */
+ /* op vAA, vBBBB */
+ movzwq 2(rPC), %rax # eax <- BBBB
+ GET_VREG %edx, %rax # edx <- fp[BBBB]
+ .if 1
+ SET_VREG_OBJECT %edx, rINSTq # fp[A] <- fp[B]
+ .else
+ SET_VREG %edx, rINSTq # fp[A] <- fp[B]
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_object_16: /* 0x09 */
+/* File: x86_64/op_move_object_16.S */
+/* File: x86_64/op_move_16.S */
+ /* for: move/16, move-object/16 */
+ /* op vAAAA, vBBBB */
+ movzwq 4(rPC), %rcx # ecx <- BBBB
+ movzwq 2(rPC), %rax # eax <- AAAA
+ GET_VREG %edx, %rcx
+ .if 1
+ SET_VREG_OBJECT %edx, %rax # fp[A] <- fp[B]
+ .else
+ SET_VREG %edx, %rax # fp[A] <- fp[B]
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_result: /* 0x0a */
+/* File: x86_64/op_move_result.S */
+ /* for: move-result, move-result-object */
+ /* op vAA */
+ movq OFF_FP_RESULT_REGISTER(rFP), %rax # get pointer to result JType.
+ movl (%rax), %eax # r0 <- result.i.
+ .if 0
+ SET_VREG_OBJECT %eax, rINSTq # fp[A] <- fp[B]
+ .else
+ SET_VREG %eax, rINSTq # fp[A] <- fp[B]
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_result_wide: /* 0x0b */
+/* File: x86_64/op_move_result_wide.S */
+ /* move-result-wide vAA */
+ movq OFF_FP_RESULT_REGISTER(rFP), %rax # get pointer to result JType.
+ movq (%rax), %rdx # Get wide
+ SET_WIDE_VREG %rdx, rINSTq # v[AA] <- rdx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_result_object: /* 0x0c */
+/* File: x86_64/op_move_result_object.S */
+/* File: x86_64/op_move_result.S */
+ /* for: move-result, move-result-object */
+ /* op vAA */
+ movq OFF_FP_RESULT_REGISTER(rFP), %rax # get pointer to result JType.
+ movl (%rax), %eax # r0 <- result.i.
+ .if 1
+ SET_VREG_OBJECT %eax, rINSTq # fp[A] <- fp[B]
+ .else
+ SET_VREG %eax, rINSTq # fp[A] <- fp[B]
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_exception: /* 0x0d */
+/* File: x86_64/op_move_exception.S */
+ /* move-exception vAA */
+ movl THREAD_EXCEPTION_OFFSET(rSELF), %eax
+ SET_VREG_OBJECT %eax, rINSTq # fp[AA] <- exception object
+ movl $0, THREAD_EXCEPTION_OFFSET(rSELF)
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+/* ------------------------------ */
+ .balign 128
+.L_op_return_void: /* 0x0e */
+/* File: x86_64/op_return_void.S */
+ .extern MterpThreadFenceForConstructor
+ call SYMBOL(MterpThreadFenceForConstructor)
+ testl $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(rSELF)
+ jz 1f
+ movq rSELF, OUT_ARG0
+ call SYMBOL(MterpSuspendCheck)
+1:
+ xorq %rax, %rax
+ jmp MterpReturn
+
+/* ------------------------------ */
+ .balign 128
+.L_op_return: /* 0x0f */
+/* File: x86_64/op_return.S */
+/*
+ * Return a 32-bit value.
+ *
+ * for: return, return-object
+ */
+ /* op vAA */
+ .extern MterpThreadFenceForConstructor
+ call SYMBOL(MterpThreadFenceForConstructor)
+ testl $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(rSELF)
+ jz 1f
+ movq rSELF, OUT_ARG0
+ call SYMBOL(MterpSuspendCheck)
+1:
+ GET_VREG %eax, rINSTq # eax <- vAA
+ jmp MterpReturn
+
+/* ------------------------------ */
+ .balign 128
+.L_op_return_wide: /* 0x10 */
+/* File: x86_64/op_return_wide.S */
+/*
+ * Return a 64-bit value.
+ */
+ /* return-wide vAA */
+ .extern MterpThreadFenceForConstructor
+ call SYMBOL(MterpThreadFenceForConstructor)
+ testl $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(rSELF)
+ jz 1f
+ movq rSELF, OUT_ARG0
+ call SYMBOL(MterpSuspendCheck)
+1:
+ GET_WIDE_VREG %rax, rINSTq # eax <- v[AA]
+ jmp MterpReturn
+
+/* ------------------------------ */
+ .balign 128
+.L_op_return_object: /* 0x11 */
+/* File: x86_64/op_return_object.S */
+/* File: x86_64/op_return.S */
+/*
+ * Return a 32-bit value.
+ *
+ * for: return, return-object
+ */
+ /* op vAA */
+ .extern MterpThreadFenceForConstructor
+ call SYMBOL(MterpThreadFenceForConstructor)
+ testl $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(rSELF)
+ jz 1f
+ movq rSELF, OUT_ARG0
+ call SYMBOL(MterpSuspendCheck)
+1:
+ GET_VREG %eax, rINSTq # eax <- vAA
+ jmp MterpReturn
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_4: /* 0x12 */
+/* File: x86_64/op_const_4.S */
+ /* const/4 vA, #+B */
+ movsbl rINSTbl, %eax # eax <-ssssssBx
+ movl $0xf, rINST
+ andl %eax, rINST # rINST <- A
+ sarl $4, %eax
+ SET_VREG %eax, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_16: /* 0x13 */
+/* File: x86_64/op_const_16.S */
+ /* const/16 vAA, #+BBBB */
+ movswl 2(rPC), %ecx # ecx <- ssssBBBB
+ SET_VREG %ecx, rINSTq # vAA <- ssssBBBB
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const: /* 0x14 */
+/* File: x86_64/op_const.S */
+ /* const vAA, #+BBBBbbbb */
+ movl 2(rPC), %eax # grab all 32 bits at once
+ SET_VREG %eax, rINSTq # vAA<- eax
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_high16: /* 0x15 */
+/* File: x86_64/op_const_high16.S */
+ /* const/high16 vAA, #+BBBB0000 */
+ movzwl 2(rPC), %eax # eax <- 0000BBBB
+ sall $16, %eax # eax <- BBBB0000
+ SET_VREG %eax, rINSTq # vAA <- eax
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_wide_16: /* 0x16 */
+/* File: x86_64/op_const_wide_16.S */
+ /* const-wide/16 vAA, #+BBBB */
+ movswq 2(rPC), %rax # rax <- ssssBBBB
+ SET_WIDE_VREG %rax, rINSTq # store
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_wide_32: /* 0x17 */
+/* File: x86_64/op_const_wide_32.S */
+ /* const-wide/32 vAA, #+BBBBbbbb */
+ movslq 2(rPC), %rax # eax <- ssssssssBBBBbbbb
+ SET_WIDE_VREG %rax, rINSTq # store
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_wide: /* 0x18 */
+/* File: x86_64/op_const_wide.S */
+ /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
+ movq 2(rPC), %rax # rax <- HHHHhhhhBBBBbbbb
+ SET_WIDE_VREG %rax, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 5
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_wide_high16: /* 0x19 */
+/* File: x86_64/op_const_wide_high16.S */
+ /* const-wide/high16 vAA, #+BBBB000000000000 */
+ movzwq 2(rPC), %rax # eax <- 0000BBBB
+ salq $48, %rax # eax <- BBBB0000
+ SET_WIDE_VREG %rax, rINSTq # v[AA+0] <- eax
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_string: /* 0x1a */
+/* File: x86_64/op_const_string.S */
+ /* const/string vAA, String@BBBB */
+ EXPORT_PC
+ movzwq 2(rPC), OUT_ARG0 # OUT_ARG0 <- BBBB
+ movq rINSTq, OUT_ARG1
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
+ movq rSELF, OUT_ARG3
+ call SYMBOL(MterpConstString) # (index, tgt_reg, shadow_frame, self)
+ testb %al, %al
+ jnz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_string_jumbo: /* 0x1b */
+/* File: x86_64/op_const_string_jumbo.S */
+ /* const/string vAA, String@BBBBBBBB */
+ EXPORT_PC
+ movl 2(rPC), OUT_32_ARG0 # OUT_32_ARG0 <- BBBB
+ movq rINSTq, OUT_ARG1
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
+ movq rSELF, OUT_ARG3
+ call SYMBOL(MterpConstString) # (index, tgt_reg, shadow_frame, self)
+ testb %al, %al
+ jnz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_class: /* 0x1c */
+/* File: x86_64/op_const_class.S */
+ /* const/class vAA, Class@BBBB */
+ EXPORT_PC
+ movzwq 2(rPC), OUT_ARG0 # eax <- OUT_ARG0
+ movq rINSTq, OUT_ARG1
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
+ movq rSELF, OUT_ARG3
+ call SYMBOL(MterpConstClass) # (index, tgt_reg, shadow_frame, self)
+ testb %al, %al
+ jnz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+ .balign 128
+.L_op_monitor_enter: /* 0x1d */
+/* File: x86_64/op_monitor_enter.S */
+/*
+ * Synchronize on an object.
+ */
+ /* monitor-enter vAA */
+ EXPORT_PC
+ GET_VREG OUT_32_ARG0, rINSTq
+ movq rSELF, OUT_ARG1
+ call SYMBOL(artLockObjectFromCode) # (object, self)
+ testq %rax, %rax
+ jnz MterpException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+/* ------------------------------ */
+ .balign 128
+.L_op_monitor_exit: /* 0x1e */
+/* File: x86_64/op_monitor_exit.S */
+/*
+ * Unlock an object.
+ *
+ * Exceptions that occur when unlocking a monitor need to appear as
+ * if they happened at the following instruction. See the Dalvik
+ * instruction spec.
+ */
+ /* monitor-exit vAA */
+ EXPORT_PC
+ GET_VREG OUT_32_ARG0, rINSTq
+ movq rSELF, OUT_ARG1
+ call SYMBOL(artUnlockObjectFromCode) # (object, self)
+ testq %rax, %rax
+ jnz MterpException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+/* ------------------------------ */
+ .balign 128
+.L_op_check_cast: /* 0x1f */
+/* File: x86_64/op_check_cast.S */
+/*
+ * Check to see if a cast from one class to another is allowed.
+ */
+ /* check-cast vAA, class@BBBB */
+ EXPORT_PC
+ movzwq 2(rPC), OUT_ARG0 # OUT_ARG0 <- BBBB
+ leaq VREG_ADDRESS(rINSTq), OUT_ARG1
+ movq OFF_FP_METHOD(rFP), OUT_ARG2
+ movq rSELF, OUT_ARG3
+ call SYMBOL(MterpCheckCast) # (index, &obj, method, self)
+ testb %al, %al
+ jnz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+ .balign 128
+.L_op_instance_of: /* 0x20 */
+/* File: x86_64/op_instance_of.S */
+/*
+ * Check to see if an object reference is an instance of a class.
+ *
+ * Most common situation is a non-null object, being compared against
+ * an already-resolved class.
+ */
+ /* instance-of vA, vB, class@CCCC */
+ EXPORT_PC
+ movzwl 2(rPC), OUT_32_ARG0 # OUT_32_ARG0 <- CCCC
+ movl rINST, %eax # eax <- BA
+ sarl $4, %eax # eax <- B
+ leaq VREG_ADDRESS(%rax), OUT_ARG1 # Get object address
+ movq OFF_FP_METHOD(rFP), OUT_ARG2
+ movq rSELF, OUT_ARG3
+ call SYMBOL(MterpInstanceOf) # (index, &obj, method, self)
+ movsbl %al, %eax
+ cmpq $0, THREAD_EXCEPTION_OFFSET(rSELF)
+ jnz MterpException
+ andb $0xf, rINSTbl # rINSTbl <- A
+ SET_VREG %eax, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+ .balign 128
+.L_op_array_length: /* 0x21 */
+/* File: x86_64/op_array_length.S */
+/*
+ * Return the length of an array.
+ */
+ movl rINST, %eax # eax <- BA
+ sarl $4, rINST # rINST <- B
+ GET_VREG %ecx, rINSTq # ecx <- vB (object ref)
+ testl %ecx, %ecx # is null?
+ je common_errNullObject
+ andb $0xf, %al # eax <- A
+ movl MIRROR_ARRAY_LENGTH_OFFSET(%rcx), rINST
+ SET_VREG rINST, %rax
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+/* ------------------------------ */
+ .balign 128
+.L_op_new_instance: /* 0x22 */
+/* File: x86_64/op_new_instance.S */
+/*
+ * Create a new instance of a class.
+ */
+ /* new-instance vAA, class@BBBB */
+ EXPORT_PC
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
+ movq rSELF, OUT_ARG1
+ REFRESH_INST 34
+ movq rINSTq, OUT_ARG2
+ call SYMBOL(MterpNewInstance)
+ testb %al, %al # 0 means an exception is thrown
+ jz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+ .balign 128
+.L_op_new_array: /* 0x23 */
+/* File: x86_64/op_new_array.S */
+/*
+ * Allocate an array of objects, specified with the array class
+ * and a count.
+ *
+ * The verifier guarantees that this is an array class, so we don't
+ * check for it here.
+ */
+ /* new-array vA, vB, class@CCCC */
+ EXPORT_PC
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
+ movq rPC, OUT_ARG1
+ REFRESH_INST 35
+ movq rINSTq, OUT_ARG2
+ movq rSELF, OUT_ARG3
+ call SYMBOL(MterpNewArray)
+ testb %al, %al # 0 means an exception is thrown
+ jz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+ .balign 128
+.L_op_filled_new_array: /* 0x24 */
+/* File: x86_64/op_filled_new_array.S */
+/*
+ * Create a new array with elements filled from registers.
+ *
+ * for: filled-new-array, filled-new-array/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
+ .extern MterpFilledNewArray
+ EXPORT_PC
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
+ movq rPC, OUT_ARG1
+ movq rSELF, OUT_ARG2
+ call SYMBOL(MterpFilledNewArray)
+ testb %al, %al # 0 means an exception is thrown
+ jz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+/* ------------------------------ */
+ .balign 128
+.L_op_filled_new_array_range: /* 0x25 */
+/* File: x86_64/op_filled_new_array_range.S */
+/* File: x86_64/op_filled_new_array.S */
+/*
+ * Create a new array with elements filled from registers.
+ *
+ * for: filled-new-array, filled-new-array/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
+ .extern MterpFilledNewArrayRange
+ EXPORT_PC
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
+ movq rPC, OUT_ARG1
+ movq rSELF, OUT_ARG2
+ call SYMBOL(MterpFilledNewArrayRange)
+ testb %al, %al # 0 means an exception is thrown
+ jz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_fill_array_data: /* 0x26 */
+/* File: x86_64/op_fill_array_data.S */
+ /* fill-array-data vAA, +BBBBBBBB */
+ EXPORT_PC
+ movl 2(rPC), %ecx # ecx <- BBBBbbbb
+ leaq (rPC,%rcx,2), OUT_ARG1 # OUT_ARG1 <- PC + BBBBbbbb*2
+ GET_VREG OUT_32_ARG0, rINSTq # OUT_ARG0 <- vAA (array object)
+ call SYMBOL(MterpFillArrayData) # (obj, payload)
+ testb %al, %al # 0 means an exception is thrown
+ jz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+/* ------------------------------ */
+ .balign 128
+.L_op_throw: /* 0x27 */
+/* File: x86_64/op_throw.S */
+/*
+ * Throw an exception object in the current thread.
+ */
+ /* throw vAA */
+ EXPORT_PC
+ GET_VREG %eax, rINSTq # eax<- vAA (exception object)
+ testb %al, %al
+ jz common_errNullObject
+ movq %rax, THREAD_EXCEPTION_OFFSET(rSELF)
+ jmp MterpException
+
+/* ------------------------------ */
+ .balign 128
+.L_op_goto: /* 0x28 */
+/* File: x86_64/op_goto.S */
+/*
+ * Unconditional branch, 8-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto +AA */
+ movsbq rINSTbl, rINSTq # rINSTq <- ssssssAA
+ MTERP_PROFILE_BRANCH
+ addq rINSTq, rINSTq # rINSTq <- AA * 2
+ leaq (rPC, rINSTq), rPC
+ FETCH_INST
+ jle MterpCheckSuspendAndContinue # AA * 2 <= 0 => suspend check
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 128
+.L_op_goto_16: /* 0x29 */
+/* File: x86_64/op_goto_16.S */
+/*
+ * Unconditional branch, 16-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto/16 +AAAA */
+ movswq 2(rPC), rINSTq # rINSTq <- ssssAAAA
+ MTERP_PROFILE_BRANCH
+ addq rINSTq, rINSTq # rINSTq <- AA * 2
+ leaq (rPC, rINSTq), rPC
+ FETCH_INST
+ jle MterpCheckSuspendAndContinue # AA * 2 <= 0 => suspend check
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 128
+.L_op_goto_32: /* 0x2a */
+/* File: x86_64/op_goto_32.S */
+/*
+ * Unconditional branch, 32-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ *
+ * Because we need the SF bit set, we'll use an adds
+ * to convert from Dalvik offset to byte offset.
+ */
+ /* goto/32 +AAAAAAAA */
+ movslq 2(rPC), rINSTq # rINSTq <- AAAAAAAA
+ MTERP_PROFILE_BRANCH
+ addq rINSTq, rINSTq # rINSTq <- AA * 2
+ leaq (rPC, rINSTq), rPC
+ FETCH_INST
+ jle MterpCheckSuspendAndContinue # AA * 2 <= 0 => suspend check
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 128
+.L_op_packed_switch: /* 0x2b */
+/* File: x86_64/op_packed_switch.S */
+/*
+ * Handle a packed-switch or sparse-switch instruction. In both cases
+ * we decode it and hand it off to a helper function.
+ *
+ * We don't really expect backward branches in a switch statement, but
+ * they're perfectly legal, so we check for them here.
+ *
+ * for: packed-switch, sparse-switch
+ */
+ /* op vAA, +BBBB */
+ movslq 2(rPC), OUT_ARG0 # rcx <- BBBBbbbb
+ leaq (rPC,OUT_ARG0,2), OUT_ARG0 # rcx <- PC + BBBBbbbb*2
+ GET_VREG OUT_32_ARG1, rINSTq # eax <- vAA
+ call SYMBOL(MterpDoPackedSwitch)
+ movslq %eax, rINSTq
+ MTERP_PROFILE_BRANCH
+ addq rINSTq, rINSTq
+ leaq (rPC, rINSTq), rPC
+ FETCH_INST
+ jle MterpCheckSuspendAndContinue
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sparse_switch: /* 0x2c */
+/* File: x86_64/op_sparse_switch.S */
+/* File: x86_64/op_packed_switch.S */
+/*
+ * Handle a packed-switch or sparse-switch instruction. In both cases
+ * we decode it and hand it off to a helper function.
+ *
+ * We don't really expect backward branches in a switch statement, but
+ * they're perfectly legal, so we check for them here.
+ *
+ * for: packed-switch, sparse-switch
+ */
+ /* op vAA, +BBBB */
+ movslq 2(rPC), OUT_ARG0 # rcx <- BBBBbbbb
+ leaq (rPC,OUT_ARG0,2), OUT_ARG0 # rcx <- PC + BBBBbbbb*2
+ GET_VREG OUT_32_ARG1, rINSTq # eax <- vAA
+ call SYMBOL(MterpDoSparseSwitch)
+ movslq %eax, rINSTq
+ MTERP_PROFILE_BRANCH
+ addq rINSTq, rINSTq
+ leaq (rPC, rINSTq), rPC
+ FETCH_INST
+ jle MterpCheckSuspendAndContinue
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_cmpl_float: /* 0x2d */
+/* File: x86_64/op_cmpl_float.S */
+/* File: x86_64/fpcmp.S */
+/*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ * if (x == y) {
+ * return 0;
+ * } else if (x < y) {
+ * return -1;
+ * } else if (x > y) {
+ * return 1;
+ * } else {
+ * return nanval ? 1 : -1;
+ * }
+ * }
+ */
+ /* op vAA, vBB, vCC */
+ movzbq 3(rPC), %rcx # ecx<- CC
+ movzbq 2(rPC), %rax # eax<- BB
+ movss VREG_ADDRESS(%rax), %xmm0
+ xor %eax, %eax
+ ucomiss VREG_ADDRESS(%rcx), %xmm0
+ jp .Lop_cmpl_float_nan_is_neg
+ je .Lop_cmpl_float_finish
+ jb .Lop_cmpl_float_less
+.Lop_cmpl_float_nan_is_pos:
+ addb $1, %al
+ jmp .Lop_cmpl_float_finish
+.Lop_cmpl_float_nan_is_neg:
+.Lop_cmpl_float_less:
+ movl $-1, %eax
+.Lop_cmpl_float_finish:
+ SET_VREG %eax, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_cmpg_float: /* 0x2e */
+/* File: x86_64/op_cmpg_float.S */
+/* File: x86_64/fpcmp.S */
+/*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ * if (x == y) {
+ * return 0;
+ * } else if (x < y) {
+ * return -1;
+ * } else if (x > y) {
+ * return 1;
+ * } else {
+ * return nanval ? 1 : -1;
+ * }
+ * }
+ */
+ /* op vAA, vBB, vCC */
+ movzbq 3(rPC), %rcx # ecx<- CC
+ movzbq 2(rPC), %rax # eax<- BB
+ movss VREG_ADDRESS(%rax), %xmm0
+ xor %eax, %eax
+ ucomiss VREG_ADDRESS(%rcx), %xmm0
+ jp .Lop_cmpg_float_nan_is_pos
+ je .Lop_cmpg_float_finish
+ jb .Lop_cmpg_float_less
+.Lop_cmpg_float_nan_is_pos:
+ addb $1, %al
+ jmp .Lop_cmpg_float_finish
+.Lop_cmpg_float_nan_is_neg:
+.Lop_cmpg_float_less:
+ movl $-1, %eax
+.Lop_cmpg_float_finish:
+ SET_VREG %eax, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_cmpl_double: /* 0x2f */
+/* File: x86_64/op_cmpl_double.S */
+/* File: x86_64/fpcmp.S */
+/*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ * if (x == y) {
+ * return 0;
+ * } else if (x < y) {
+ * return -1;
+ * } else if (x > y) {
+ * return 1;
+ * } else {
+ * return nanval ? 1 : -1;
+ * }
+ * }
+ */
+ /* op vAA, vBB, vCC */
+ movzbq 3(rPC), %rcx # ecx<- CC
+ movzbq 2(rPC), %rax # eax<- BB
+ movsd VREG_ADDRESS(%rax), %xmm0
+ xor %eax, %eax
+ ucomisd VREG_ADDRESS(%rcx), %xmm0
+ jp .Lop_cmpl_double_nan_is_neg
+ je .Lop_cmpl_double_finish
+ jb .Lop_cmpl_double_less
+.Lop_cmpl_double_nan_is_pos:
+ addb $1, %al
+ jmp .Lop_cmpl_double_finish
+.Lop_cmpl_double_nan_is_neg:
+.Lop_cmpl_double_less:
+ movl $-1, %eax
+.Lop_cmpl_double_finish:
+ SET_VREG %eax, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_cmpg_double: /* 0x30 */
+/* File: x86_64/op_cmpg_double.S */
+/* File: x86_64/fpcmp.S */
+/*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ * if (x == y) {
+ * return 0;
+ * } else if (x < y) {
+ * return -1;
+ * } else if (x > y) {
+ * return 1;
+ * } else {
+ * return nanval ? 1 : -1;
+ * }
+ * }
+ */
+ /* op vAA, vBB, vCC */
+ movzbq 3(rPC), %rcx # ecx<- CC
+ movzbq 2(rPC), %rax # eax<- BB
+ movsd VREG_ADDRESS(%rax), %xmm0
+ xor %eax, %eax
+ ucomisd VREG_ADDRESS(%rcx), %xmm0
+ jp .Lop_cmpg_double_nan_is_pos
+ je .Lop_cmpg_double_finish
+ jb .Lop_cmpg_double_less
+.Lop_cmpg_double_nan_is_pos:
+ addb $1, %al
+ jmp .Lop_cmpg_double_finish
+.Lop_cmpg_double_nan_is_neg:
+.Lop_cmpg_double_less:
+ movl $-1, %eax
+.Lop_cmpg_double_finish:
+ SET_VREG %eax, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_cmp_long: /* 0x31 */
+/* File: x86_64/op_cmp_long.S */
+/*
+ * Compare two 64-bit values. Puts 0, 1, or -1 into the destination
+ * register based on the results of the comparison.
+ */
+ /* cmp-long vAA, vBB, vCC */
+ movzbq 2(rPC), %rdx # edx <- BB
+ movzbq 3(rPC), %rcx # ecx <- CC
+ GET_WIDE_VREG %rdx, %rdx # rdx <- v[BB]
+ xorl %eax, %eax
+ xorl %edi, %edi
+ addb $1, %al
+ movl $-1, %esi
+ cmpq VREG_ADDRESS(%rcx), %rdx
+ cmovl %esi, %edi
+ cmovg %eax, %edi
+ SET_VREG %edi, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_eq: /* 0x32 */
+/* File: x86_64/op_if_eq.S */
+/* File: x86_64/bincmp.S */
+/*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ movl rINST, %ecx # rcx <- A+
+ sarl $4, rINST # rINST <- B
+ andb $0xf, %cl # rcx <- A
+ GET_VREG %eax, %rcx # eax <- vA
+ cmpl VREG_ADDRESS(rINSTq), %eax # compare (vA, vB)
+ movl $2, rINST # assume not taken
+ jne 1f
+ movswq 2(rPC), rINSTq # Get signed branch offset
+1:
+ MTERP_PROFILE_BRANCH
+ addq rINSTq, rINSTq # rax <- AA * 2
+ leaq (rPC, rINSTq), rPC
+ FETCH_INST
+ jle MterpCheckSuspendAndContinue # AA * 2 <= 0 => suspend check
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_ne: /* 0x33 */
+/* File: x86_64/op_if_ne.S */
+/* File: x86_64/bincmp.S */
+/*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ movl rINST, %ecx # rcx <- A+
+ sarl $4, rINST # rINST <- B
+ andb $0xf, %cl # rcx <- A
+ GET_VREG %eax, %rcx # eax <- vA
+ cmpl VREG_ADDRESS(rINSTq), %eax # compare (vA, vB)
+ movl $2, rINST # assume not taken
+ je 1f
+ movswq 2(rPC), rINSTq # Get signed branch offset
+1:
+ MTERP_PROFILE_BRANCH
+ addq rINSTq, rINSTq # rax <- AA * 2
+ leaq (rPC, rINSTq), rPC
+ FETCH_INST
+ jle MterpCheckSuspendAndContinue # AA * 2 <= 0 => suspend check
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_lt: /* 0x34 */
+/* File: x86_64/op_if_lt.S */
+/* File: x86_64/bincmp.S */
+/*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ movl rINST, %ecx # rcx <- A+
+ sarl $4, rINST # rINST <- B
+ andb $0xf, %cl # rcx <- A
+ GET_VREG %eax, %rcx # eax <- vA
+ cmpl VREG_ADDRESS(rINSTq), %eax # compare (vA, vB)
+ movl $2, rINST # assume not taken
+ jge 1f
+ movswq 2(rPC), rINSTq # Get signed branch offset
+1:
+ MTERP_PROFILE_BRANCH
+ addq rINSTq, rINSTq # rax <- AA * 2
+ leaq (rPC, rINSTq), rPC
+ FETCH_INST
+ jle MterpCheckSuspendAndContinue # AA * 2 <= 0 => suspend check
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_ge: /* 0x35 */
+/* File: x86_64/op_if_ge.S */
+/* File: x86_64/bincmp.S */
+/*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ movl rINST, %ecx # rcx <- A+
+ sarl $4, rINST # rINST <- B
+ andb $0xf, %cl # rcx <- A
+ GET_VREG %eax, %rcx # eax <- vA
+ cmpl VREG_ADDRESS(rINSTq), %eax # compare (vA, vB)
+ movl $2, rINST # assume not taken
+ jl 1f
+ movswq 2(rPC), rINSTq # Get signed branch offset
+1:
+ MTERP_PROFILE_BRANCH
+ addq rINSTq, rINSTq # rax <- AA * 2
+ leaq (rPC, rINSTq), rPC
+ FETCH_INST
+ jle MterpCheckSuspendAndContinue # AA * 2 <= 0 => suspend check
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_gt: /* 0x36 */
+/* File: x86_64/op_if_gt.S */
+/* File: x86_64/bincmp.S */
+/*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ movl rINST, %ecx # rcx <- A+
+ sarl $4, rINST # rINST <- B
+ andb $0xf, %cl # rcx <- A
+ GET_VREG %eax, %rcx # eax <- vA
+ cmpl VREG_ADDRESS(rINSTq), %eax # compare (vA, vB)
+ movl $2, rINST # assume not taken
+ jle 1f
+ movswq 2(rPC), rINSTq # Get signed branch offset
+1:
+ MTERP_PROFILE_BRANCH
+ addq rINSTq, rINSTq # rax <- AA * 2
+ leaq (rPC, rINSTq), rPC
+ FETCH_INST
+ jle MterpCheckSuspendAndContinue # AA * 2 <= 0 => suspend check
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_le: /* 0x37 */
+/* File: x86_64/op_if_le.S */
+/* File: x86_64/bincmp.S */
+/*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ movl rINST, %ecx # rcx <- A+
+ sarl $4, rINST # rINST <- B
+ andb $0xf, %cl # rcx <- A
+ GET_VREG %eax, %rcx # eax <- vA
+ cmpl VREG_ADDRESS(rINSTq), %eax # compare (vA, vB)
+ movl $2, rINST # assume not taken
+ jg 1f
+ movswq 2(rPC), rINSTq # Get signed branch offset
+1:
+ MTERP_PROFILE_BRANCH
+ addq rINSTq, rINSTq # rax <- AA * 2
+ leaq (rPC, rINSTq), rPC
+ FETCH_INST
+ jle MterpCheckSuspendAndContinue # AA * 2 <= 0 => suspend check
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_eqz: /* 0x38 */
+/* File: x86_64/op_if_eqz.S */
+/* File: x86_64/zcmp.S */
+/*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ cmpl $0, VREG_ADDRESS(rINSTq) # compare (vA, 0)
+ movl $2, rINST # assume branch not taken
+ jne 1f
+ movswq 2(rPC), rINSTq # fetch signed displacement
+1:
+ MTERP_PROFILE_BRANCH
+ addq rINSTq, rINSTq # rINSTq <- AA * 2
+ leaq (rPC, rINSTq), rPC
+ FETCH_INST
+ jle MterpCheckSuspendAndContinue # AA * 2 <= 0 => suspend check
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_nez: /* 0x39 */
+/* File: x86_64/op_if_nez.S */
+/* File: x86_64/zcmp.S */
+/*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ cmpl $0, VREG_ADDRESS(rINSTq) # compare (vA, 0)
+ movl $2, rINST # assume branch not taken
+ je 1f
+ movswq 2(rPC), rINSTq # fetch signed displacement
+1:
+ MTERP_PROFILE_BRANCH
+ addq rINSTq, rINSTq # rINSTq <- AA * 2
+ leaq (rPC, rINSTq), rPC
+ FETCH_INST
+ jle MterpCheckSuspendAndContinue # AA * 2 <= 0 => suspend check
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_ltz: /* 0x3a */
+/* File: x86_64/op_if_ltz.S */
+/* File: x86_64/zcmp.S */
+/*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ cmpl $0, VREG_ADDRESS(rINSTq) # compare (vA, 0)
+ movl $2, rINST # assume branch not taken
+ jge 1f
+ movswq 2(rPC), rINSTq # fetch signed displacement
+1:
+ MTERP_PROFILE_BRANCH
+ addq rINSTq, rINSTq # rINSTq <- AA * 2
+ leaq (rPC, rINSTq), rPC
+ FETCH_INST
+ jle MterpCheckSuspendAndContinue # AA * 2 <= 0 => suspend check
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_gez: /* 0x3b */
+/* File: x86_64/op_if_gez.S */
+/* File: x86_64/zcmp.S */
+/*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ cmpl $0, VREG_ADDRESS(rINSTq) # compare (vA, 0)
+ movl $2, rINST # assume branch not taken
+ jl 1f
+ movswq 2(rPC), rINSTq # fetch signed displacement
+1:
+ MTERP_PROFILE_BRANCH
+ addq rINSTq, rINSTq # rINSTq <- AA * 2
+ leaq (rPC, rINSTq), rPC
+ FETCH_INST
+ jle MterpCheckSuspendAndContinue # AA * 2 <= 0 => suspend check
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_gtz: /* 0x3c */
+/* File: x86_64/op_if_gtz.S */
+/* File: x86_64/zcmp.S */
+/*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ cmpl $0, VREG_ADDRESS(rINSTq) # compare (vA, 0)
+ movl $2, rINST # assume branch not taken
+ jle 1f
+ movswq 2(rPC), rINSTq # fetch signed displacement
+1:
+ MTERP_PROFILE_BRANCH
+ addq rINSTq, rINSTq # rINSTq <- AA * 2
+ leaq (rPC, rINSTq), rPC
+ FETCH_INST
+ jle MterpCheckSuspendAndContinue # AA * 2 <= 0 => suspend check
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_lez: /* 0x3d */
+/* File: x86_64/op_if_lez.S */
+/* File: x86_64/zcmp.S */
+/*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ cmpl $0, VREG_ADDRESS(rINSTq) # compare (vA, 0)
+ movl $2, rINST # assume branch not taken
+ jg 1f
+ movswq 2(rPC), rINSTq # fetch signed displacement
+1:
+ MTERP_PROFILE_BRANCH
+ addq rINSTq, rINSTq # rINSTq <- AA * 2
+ leaq (rPC, rINSTq), rPC
+ FETCH_INST
+ jle MterpCheckSuspendAndContinue # AA * 2 <= 0 => suspend check
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_3e: /* 0x3e */
+/* File: x86_64/op_unused_3e.S */
+/* File: x86_64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ jmp MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_3f: /* 0x3f */
+/* File: x86_64/op_unused_3f.S */
+/* File: x86_64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ jmp MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_40: /* 0x40 */
+/* File: x86_64/op_unused_40.S */
+/* File: x86_64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ jmp MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_41: /* 0x41 */
+/* File: x86_64/op_unused_41.S */
+/* File: x86_64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ jmp MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_42: /* 0x42 */
+/* File: x86_64/op_unused_42.S */
+/* File: x86_64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ jmp MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_43: /* 0x43 */
+/* File: x86_64/op_unused_43.S */
+/* File: x86_64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ jmp MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aget: /* 0x44 */
+/* File: x86_64/op_aget.S */
+/*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short, aget-wide
+ *
+ */
+ /* op vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # eax <- BB
+ movzbq 3(rPC), %rcx # ecx <- CC
+ GET_VREG %eax, %rax # eax <- vBB (array object)
+ GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
+ testl %eax, %eax # null array object?
+ je common_errNullObject # bail if so
+ cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
+ jae common_errArrayIndex # index >= length, bail.
+ .if 0
+ movq MIRROR_INT_ARRAY_DATA_OFFSET(%rax,%rcx,8), %rax
+ SET_WIDE_VREG %rax, rINSTq
+ .else
+ movl MIRROR_INT_ARRAY_DATA_OFFSET(%rax,%rcx,4), %eax
+ SET_VREG %eax, rINSTq
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aget_wide: /* 0x45 */
+/* File: x86_64/op_aget_wide.S */
+/* File: x86_64/op_aget.S */
+/*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short, aget-wide
+ *
+ */
+ /* op vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # eax <- BB
+ movzbq 3(rPC), %rcx # ecx <- CC
+ GET_VREG %eax, %rax # eax <- vBB (array object)
+ GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
+ testl %eax, %eax # null array object?
+ je common_errNullObject # bail if so
+ cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
+ jae common_errArrayIndex # index >= length, bail.
+ .if 1
+ movq MIRROR_WIDE_ARRAY_DATA_OFFSET(%rax,%rcx,8), %rax
+ SET_WIDE_VREG %rax, rINSTq
+ .else
+ movq MIRROR_WIDE_ARRAY_DATA_OFFSET(%rax,%rcx,8), %eax
+ SET_VREG %eax, rINSTq
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aget_object: /* 0x46 */
+/* File: x86_64/op_aget_object.S */
+/*
+ * Array object get. vAA <- vBB[vCC].
+ *
+ * for: aget-object
+ */
+ /* op vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # rax <- BB
+ movzbq 3(rPC), %rcx # rcx <- CC
+ GET_VREG OUT_32_ARG0, %rax # eax <- vBB (array object)
+ GET_VREG OUT_32_ARG1, %rcx # ecx <- vCC (requested index)
+ EXPORT_PC
+ call SYMBOL(artAGetObjectFromMterp) # (array, index)
+ cmpq $0, THREAD_EXCEPTION_OFFSET(rSELF)
+ jnz MterpException
+ SET_VREG_OBJECT %eax, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aget_boolean: /* 0x47 */
+/* File: x86_64/op_aget_boolean.S */
+/* File: x86_64/op_aget.S */
+/*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short, aget-wide
+ *
+ */
+ /* op vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # eax <- BB
+ movzbq 3(rPC), %rcx # ecx <- CC
+ GET_VREG %eax, %rax # eax <- vBB (array object)
+ GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
+ testl %eax, %eax # null array object?
+ je common_errNullObject # bail if so
+ cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
+ jae common_errArrayIndex # index >= length, bail.
+ .if 0
+ movq MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(%rax,%rcx,8), %rax
+ SET_WIDE_VREG %rax, rINSTq
+ .else
+ movzbl MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(%rax,%rcx,1), %eax
+ SET_VREG %eax, rINSTq
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aget_byte: /* 0x48 */
+/* File: x86_64/op_aget_byte.S */
+/* File: x86_64/op_aget.S */
+/*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short, aget-wide
+ *
+ */
+ /* op vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # eax <- BB
+ movzbq 3(rPC), %rcx # ecx <- CC
+ GET_VREG %eax, %rax # eax <- vBB (array object)
+ GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
+ testl %eax, %eax # null array object?
+ je common_errNullObject # bail if so
+ cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
+ jae common_errArrayIndex # index >= length, bail.
+ .if 0
+ movq MIRROR_BYTE_ARRAY_DATA_OFFSET(%rax,%rcx,8), %rax
+ SET_WIDE_VREG %rax, rINSTq
+ .else
+ movsbl MIRROR_BYTE_ARRAY_DATA_OFFSET(%rax,%rcx,1), %eax
+ SET_VREG %eax, rINSTq
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aget_char: /* 0x49 */
+/* File: x86_64/op_aget_char.S */
+/* File: x86_64/op_aget.S */
+/*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short, aget-wide
+ *
+ */
+ /* op vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # eax <- BB
+ movzbq 3(rPC), %rcx # ecx <- CC
+ GET_VREG %eax, %rax # eax <- vBB (array object)
+ GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
+ testl %eax, %eax # null array object?
+ je common_errNullObject # bail if so
+ cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
+ jae common_errArrayIndex # index >= length, bail.
+ .if 0
+ movq MIRROR_CHAR_ARRAY_DATA_OFFSET(%rax,%rcx,8), %rax
+ SET_WIDE_VREG %rax, rINSTq
+ .else
+ movzwl MIRROR_CHAR_ARRAY_DATA_OFFSET(%rax,%rcx,2), %eax
+ SET_VREG %eax, rINSTq
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aget_short: /* 0x4a */
+/* File: x86_64/op_aget_short.S */
+/* File: x86_64/op_aget.S */
+/*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short, aget-wide
+ *
+ */
+ /* op vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # eax <- BB
+ movzbq 3(rPC), %rcx # ecx <- CC
+ GET_VREG %eax, %rax # eax <- vBB (array object)
+ GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
+ testl %eax, %eax # null array object?
+ je common_errNullObject # bail if so
+ cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
+ jae common_errArrayIndex # index >= length, bail.
+ .if 0
+ movq MIRROR_SHORT_ARRAY_DATA_OFFSET(%rax,%rcx,8), %rax
+ SET_WIDE_VREG %rax, rINSTq
+ .else
+ movswl MIRROR_SHORT_ARRAY_DATA_OFFSET(%rax,%rcx,2), %eax
+ SET_VREG %eax, rINSTq
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aput: /* 0x4b */
+/* File: x86_64/op_aput.S */
+/*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short, aput-wide
+ *
+ */
+ /* op vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # rax <- BB
+ movzbq 3(rPC), %rcx # rcx <- CC
+ GET_VREG %eax, %rax # eax <- vBB (array object)
+ GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
+ testl %eax, %eax # null array object?
+ je common_errNullObject # bail if so
+ cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
+ jae common_errArrayIndex # index >= length, bail.
+ .if 0
+ GET_WIDE_VREG rINSTq, rINSTq
+ .else
+ GET_VREG rINST, rINSTq
+ .endif
+ movl rINST, MIRROR_INT_ARRAY_DATA_OFFSET(%rax,%rcx,4)
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aput_wide: /* 0x4c */
+/* File: x86_64/op_aput_wide.S */
+/* File: x86_64/op_aput.S */
+/*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short, aput-wide
+ *
+ */
+ /* op vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # rax <- BB
+ movzbq 3(rPC), %rcx # rcx <- CC
+ GET_VREG %eax, %rax # eax <- vBB (array object)
+ GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
+ testl %eax, %eax # null array object?
+ je common_errNullObject # bail if so
+ cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
+ jae common_errArrayIndex # index >= length, bail.
+ .if 1
+ GET_WIDE_VREG rINSTq, rINSTq
+ .else
+ GET_VREG rINST, rINSTq
+ .endif
+ movq rINSTq, MIRROR_WIDE_ARRAY_DATA_OFFSET(%rax,%rcx,8)
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aput_object: /* 0x4d */
+/* File: x86_64/op_aput_object.S */
+/*
+ * Store an object into an array. vBB[vCC] <- vAA.
+ */
+ /* op vAA, vBB, vCC */
+ EXPORT_PC
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
+ movq rPC, OUT_ARG1
+ REFRESH_INST 77
+ movq rINSTq, OUT_ARG2
+ call SYMBOL(MterpAputObject) # (array, index)
+ testb %al, %al
+ jz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aput_boolean: /* 0x4e */
+/* File: x86_64/op_aput_boolean.S */
+/* File: x86_64/op_aput.S */
+/*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short, aput-wide
+ *
+ */
+ /* op vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # rax <- BB
+ movzbq 3(rPC), %rcx # rcx <- CC
+ GET_VREG %eax, %rax # eax <- vBB (array object)
+ GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
+ testl %eax, %eax # null array object?
+ je common_errNullObject # bail if so
+ cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
+ jae common_errArrayIndex # index >= length, bail.
+ .if 0
+ GET_WIDE_VREG rINSTq, rINSTq
+ .else
+ GET_VREG rINST, rINSTq
+ .endif
+ movb rINSTbl, MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(%rax,%rcx,1)
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aput_byte: /* 0x4f */
+/* File: x86_64/op_aput_byte.S */
+/* File: x86_64/op_aput.S */
+/*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short, aput-wide
+ *
+ */
+ /* op vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # rax <- BB
+ movzbq 3(rPC), %rcx # rcx <- CC
+ GET_VREG %eax, %rax # eax <- vBB (array object)
+ GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
+ testl %eax, %eax # null array object?
+ je common_errNullObject # bail if so
+ cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
+ jae common_errArrayIndex # index >= length, bail.
+ .if 0
+ GET_WIDE_VREG rINSTq, rINSTq
+ .else
+ GET_VREG rINST, rINSTq
+ .endif
+ movb rINSTbl, MIRROR_BYTE_ARRAY_DATA_OFFSET(%rax,%rcx,1)
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aput_char: /* 0x50 */
+/* File: x86_64/op_aput_char.S */
+/* File: x86_64/op_aput.S */
+/*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short, aput-wide
+ *
+ */
+ /* op vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # rax <- BB
+ movzbq 3(rPC), %rcx # rcx <- CC
+ GET_VREG %eax, %rax # eax <- vBB (array object)
+ GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
+ testl %eax, %eax # null array object?
+ je common_errNullObject # bail if so
+ cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
+ jae common_errArrayIndex # index >= length, bail.
+ .if 0
+ GET_WIDE_VREG rINSTq, rINSTq
+ .else
+ GET_VREG rINST, rINSTq
+ .endif
+ movw rINSTw, MIRROR_CHAR_ARRAY_DATA_OFFSET(%rax,%rcx,2)
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aput_short: /* 0x51 */
+/* File: x86_64/op_aput_short.S */
+/* File: x86_64/op_aput.S */
+/*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short, aput-wide
+ *
+ */
+ /* op vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # rax <- BB
+ movzbq 3(rPC), %rcx # rcx <- CC
+ GET_VREG %eax, %rax # eax <- vBB (array object)
+ GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
+ testl %eax, %eax # null array object?
+ je common_errNullObject # bail if so
+ cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
+ jae common_errArrayIndex # index >= length, bail.
+ .if 0
+ GET_WIDE_VREG rINSTq, rINSTq
+ .else
+ GET_VREG rINST, rINSTq
+ .endif
+ movw rINSTw, MIRROR_SHORT_ARRAY_DATA_OFFSET(%rax,%rcx,2)
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget: /* 0x52 */
+/* File: x86_64/op_iget.S */
+/*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short, iget-wide
+ */
+ EXPORT_PC
+ movzbq rINSTbl, %rcx # rcx <- BA
+ movzwl 2(rPC), OUT_32_ARG0 # eax <- field ref CCCC
+ sarl $4, %ecx # ecx <- B
+ GET_VREG OUT_32_ARG1, %rcx # the object pointer
+ movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
+ movq rSELF, OUT_ARG3
+ call SYMBOL(artGet32InstanceFromCode)
+ cmpq $0, THREAD_EXCEPTION_OFFSET(rSELF)
+ jnz MterpException # bail out
+ andb $0xf, rINSTbl # rINST <- A
+ .if 0
+ SET_VREG_OBJECT %eax, rINSTq # fp[A] <-value
+ .else
+ .if 0
+ SET_WIDE_VREG %rax, rINSTq # fp[A] <-value
+ .else
+ SET_VREG %eax, rINSTq # fp[A] <-value
+ .endif
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_wide: /* 0x53 */
+/* File: x86_64/op_iget_wide.S */
+/* File: x86_64/op_iget.S */
+/*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short, iget-wide
+ */
+ EXPORT_PC
+ movzbq rINSTbl, %rcx # rcx <- BA
+ movzwl 2(rPC), OUT_32_ARG0 # eax <- field ref CCCC
+ sarl $4, %ecx # ecx <- B
+ GET_VREG OUT_32_ARG1, %rcx # the object pointer
+ movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
+ movq rSELF, OUT_ARG3
+ call SYMBOL(artGet64InstanceFromCode)
+ cmpq $0, THREAD_EXCEPTION_OFFSET(rSELF)
+ jnz MterpException # bail out
+ andb $0xf, rINSTbl # rINST <- A
+ .if 0
+ SET_VREG_OBJECT %eax, rINSTq # fp[A] <-value
+ .else
+ .if 1
+ SET_WIDE_VREG %rax, rINSTq # fp[A] <-value
+ .else
+ SET_VREG %eax, rINSTq # fp[A] <-value
+ .endif
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_object: /* 0x54 */
+/* File: x86_64/op_iget_object.S */
+/* File: x86_64/op_iget.S */
+/*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short, iget-wide
+ */
+ EXPORT_PC
+ movzbq rINSTbl, %rcx # rcx <- BA
+ movzwl 2(rPC), OUT_32_ARG0 # eax <- field ref CCCC
+ sarl $4, %ecx # ecx <- B
+ GET_VREG OUT_32_ARG1, %rcx # the object pointer
+ movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
+ movq rSELF, OUT_ARG3
+ call SYMBOL(artGetObjInstanceFromCode)
+ cmpq $0, THREAD_EXCEPTION_OFFSET(rSELF)
+ jnz MterpException # bail out
+ andb $0xf, rINSTbl # rINST <- A
+ .if 1
+ SET_VREG_OBJECT %eax, rINSTq # fp[A] <-value
+ .else
+ .if 0
+ SET_WIDE_VREG %rax, rINSTq # fp[A] <-value
+ .else
+ SET_VREG %eax, rINSTq # fp[A] <-value
+ .endif
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_boolean: /* 0x55 */
+/* File: x86_64/op_iget_boolean.S */
+/* File: x86_64/op_iget.S */
+/*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short, iget-wide
+ */
+ EXPORT_PC
+ movzbq rINSTbl, %rcx # rcx <- BA
+ movzwl 2(rPC), OUT_32_ARG0 # eax <- field ref CCCC
+ sarl $4, %ecx # ecx <- B
+ GET_VREG OUT_32_ARG1, %rcx # the object pointer
+ movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
+ movq rSELF, OUT_ARG3
+ call SYMBOL(artGetBooleanInstanceFromCode)
+ cmpq $0, THREAD_EXCEPTION_OFFSET(rSELF)
+ jnz MterpException # bail out
+ andb $0xf, rINSTbl # rINST <- A
+ .if 0
+ SET_VREG_OBJECT %eax, rINSTq # fp[A] <-value
+ .else
+ .if 0
+ SET_WIDE_VREG %rax, rINSTq # fp[A] <-value
+ .else
+ SET_VREG %eax, rINSTq # fp[A] <-value
+ .endif
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_byte: /* 0x56 */
+/* File: x86_64/op_iget_byte.S */
+/* File: x86_64/op_iget.S */
+/*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short, iget-wide
+ */
+ EXPORT_PC
+ movzbq rINSTbl, %rcx # rcx <- BA
+ movzwl 2(rPC), OUT_32_ARG0 # eax <- field ref CCCC
+ sarl $4, %ecx # ecx <- B
+ GET_VREG OUT_32_ARG1, %rcx # the object pointer
+ movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
+ movq rSELF, OUT_ARG3
+ call SYMBOL(artGetByteInstanceFromCode)
+ cmpq $0, THREAD_EXCEPTION_OFFSET(rSELF)
+ jnz MterpException # bail out
+ andb $0xf, rINSTbl # rINST <- A
+ .if 0
+ SET_VREG_OBJECT %eax, rINSTq # fp[A] <-value
+ .else
+ .if 0
+ SET_WIDE_VREG %rax, rINSTq # fp[A] <-value
+ .else
+ SET_VREG %eax, rINSTq # fp[A] <-value
+ .endif
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_char: /* 0x57 */
+/* File: x86_64/op_iget_char.S */
+/* File: x86_64/op_iget.S */
+/*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short, iget-wide
+ */
+ EXPORT_PC
+ movzbq rINSTbl, %rcx # rcx <- BA
+ movzwl 2(rPC), OUT_32_ARG0 # eax <- field ref CCCC
+ sarl $4, %ecx # ecx <- B
+ GET_VREG OUT_32_ARG1, %rcx # the object pointer
+ movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
+ movq rSELF, OUT_ARG3
+ call SYMBOL(artGetCharInstanceFromCode)
+ cmpq $0, THREAD_EXCEPTION_OFFSET(rSELF)
+ jnz MterpException # bail out
+ andb $0xf, rINSTbl # rINST <- A
+ .if 0
+ SET_VREG_OBJECT %eax, rINSTq # fp[A] <-value
+ .else
+ .if 0
+ SET_WIDE_VREG %rax, rINSTq # fp[A] <-value
+ .else
+ SET_VREG %eax, rINSTq # fp[A] <-value
+ .endif
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_short: /* 0x58 */
+/* File: x86_64/op_iget_short.S */
+/* File: x86_64/op_iget.S */
+/*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short, iget-wide
+ */
+ EXPORT_PC
+ movzbq rINSTbl, %rcx # rcx <- BA
+ movzwl 2(rPC), OUT_32_ARG0 # eax <- field ref CCCC
+ sarl $4, %ecx # ecx <- B
+ GET_VREG OUT_32_ARG1, %rcx # the object pointer
+ movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
+ movq rSELF, OUT_ARG3
+ call SYMBOL(artGetShortInstanceFromCode)
+ cmpq $0, THREAD_EXCEPTION_OFFSET(rSELF)
+ jnz MterpException # bail out
+ andb $0xf, rINSTbl # rINST <- A
+ .if 0
+ SET_VREG_OBJECT %eax, rINSTq # fp[A] <-value
+ .else
+ .if 0
+ SET_WIDE_VREG %rax, rINSTq # fp[A] <-value
+ .else
+ SET_VREG %eax, rINSTq # fp[A] <-value
+ .endif
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput: /* 0x59 */
+/* File: x86_64/op_iput.S */
+/*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ .extern artSet32InstanceFromMterp
+ EXPORT_PC
+ movzwl 2(rPC), OUT_32_ARG0 # field ref <- 0000CCCC
+ movzbq rINSTbl, %rcx # rcx<- BA
+ sarl $4, %ecx # ecx<- B
+ GET_VREG OUT_32_ARG1, %rcx # the object pointer
+ andb $0xf, rINSTbl # rINST<- A
+ GET_VREG OUT_32_ARG2, rINSTq # fp[A]
+ movq OFF_FP_METHOD(rFP), OUT_ARG3 # referrer
+ call SYMBOL(artSet32InstanceFromMterp)
+ testb %al, %al
+ jnz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_wide: /* 0x5a */
+/* File: x86_64/op_iput_wide.S */
+ /* iput-wide vA, vB, field@CCCC */
+ .extern artSet64InstanceFromMterp
+ EXPORT_PC
+ movzwq 2(rPC), OUT_ARG0 # field ref CCCC
+ movzbq rINSTbl, %rcx # rcx <- BA
+ sarl $4, %ecx # ecx <- B
+ GET_VREG OUT_32_ARG1, %rcx # the object pointer
+ andb $0xf, rINSTbl # rINST <- A
+ leaq VREG_ADDRESS(rINSTq), OUT_ARG2 # &fp[A]
+ movq OFF_FP_METHOD(rFP), OUT_ARG3 # referrer
+ call SYMBOL(artSet64InstanceFromMterp)
+ testb %al, %al
+ jnz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_object: /* 0x5b */
+/* File: x86_64/op_iput_object.S */
+ EXPORT_PC
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
+ movq rPC, OUT_ARG1
+ REFRESH_INST 91
+ movl rINST, OUT_32_ARG2
+ movq rSELF, OUT_ARG3
+ call SYMBOL(MterpIputObject)
+ testb %al, %al
+ jz MterpException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_boolean: /* 0x5c */
+/* File: x86_64/op_iput_boolean.S */
+/* File: x86_64/op_iput.S */
+/*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ .extern artSet8InstanceFromMterp
+ EXPORT_PC
+ movzwl 2(rPC), OUT_32_ARG0 # field ref <- 0000CCCC
+ movzbq rINSTbl, %rcx # rcx<- BA
+ sarl $4, %ecx # ecx<- B
+ GET_VREG OUT_32_ARG1, %rcx # the object pointer
+ andb $0xf, rINSTbl # rINST<- A
+ GET_VREG OUT_32_ARG2, rINSTq # fp[A]
+ movq OFF_FP_METHOD(rFP), OUT_ARG3 # referrer
+ call SYMBOL(artSet8InstanceFromMterp)
+ testb %al, %al
+ jnz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_byte: /* 0x5d */
+/* File: x86_64/op_iput_byte.S */
+/* File: x86_64/op_iput.S */
+/*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ .extern artSet8InstanceFromMterp
+ EXPORT_PC
+ movzwl 2(rPC), OUT_32_ARG0 # field ref <- 0000CCCC
+ movzbq rINSTbl, %rcx # rcx<- BA
+ sarl $4, %ecx # ecx<- B
+ GET_VREG OUT_32_ARG1, %rcx # the object pointer
+ andb $0xf, rINSTbl # rINST<- A
+ GET_VREG OUT_32_ARG2, rINSTq # fp[A]
+ movq OFF_FP_METHOD(rFP), OUT_ARG3 # referrer
+ call SYMBOL(artSet8InstanceFromMterp)
+ testb %al, %al
+ jnz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_char: /* 0x5e */
+/* File: x86_64/op_iput_char.S */
+/* File: x86_64/op_iput.S */
+/*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ .extern artSet16InstanceFromMterp
+ EXPORT_PC
+ movzwl 2(rPC), OUT_32_ARG0 # field ref <- 0000CCCC
+ movzbq rINSTbl, %rcx # rcx<- BA
+ sarl $4, %ecx # ecx<- B
+ GET_VREG OUT_32_ARG1, %rcx # the object pointer
+ andb $0xf, rINSTbl # rINST<- A
+ GET_VREG OUT_32_ARG2, rINSTq # fp[A]
+ movq OFF_FP_METHOD(rFP), OUT_ARG3 # referrer
+ call SYMBOL(artSet16InstanceFromMterp)
+ testb %al, %al
+ jnz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_short: /* 0x5f */
+/* File: x86_64/op_iput_short.S */
+/* File: x86_64/op_iput.S */
+/*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ .extern artSet16InstanceFromMterp
+ EXPORT_PC
+ movzwl 2(rPC), OUT_32_ARG0 # field ref <- 0000CCCC
+ movzbq rINSTbl, %rcx # rcx<- BA
+ sarl $4, %ecx # ecx<- B
+ GET_VREG OUT_32_ARG1, %rcx # the object pointer
+ andb $0xf, rINSTbl # rINST<- A
+ GET_VREG OUT_32_ARG2, rINSTq # fp[A]
+ movq OFF_FP_METHOD(rFP), OUT_ARG3 # referrer
+ call SYMBOL(artSet16InstanceFromMterp)
+ testb %al, %al
+ jnz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sget: /* 0x60 */
+/* File: x86_64/op_sget.S */
+/*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
+ */
+ /* op vAA, field@BBBB */
+ .extern artGet32StaticFromCode
+ EXPORT_PC
+ movzwq 2(rPC), OUT_ARG0 # field ref CCCC
+ movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer
+ movq rSELF, OUT_ARG2 # self
+ call SYMBOL(artGet32StaticFromCode)
+ cmpl $0, THREAD_EXCEPTION_OFFSET(rSELF)
+ jnz MterpException
+ .if 0
+ SET_VREG_OBJECT %eax, rINSTq # fp[A] <- value
+ .else
+ .if 0
+ SET_WIDE_VREG %rax, rINSTq # fp[A] <- value
+ .else
+ SET_VREG %eax, rINSTq # fp[A] <- value
+ .endif
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sget_wide: /* 0x61 */
+/* File: x86_64/op_sget_wide.S */
+/* File: x86_64/op_sget.S */
+/*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
+ */
+ /* op vAA, field@BBBB */
+ .extern artGet64StaticFromCode
+ EXPORT_PC
+ movzwq 2(rPC), OUT_ARG0 # field ref CCCC
+ movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer
+ movq rSELF, OUT_ARG2 # self
+ call SYMBOL(artGet64StaticFromCode)
+ cmpl $0, THREAD_EXCEPTION_OFFSET(rSELF)
+ jnz MterpException
+ .if 0
+ SET_VREG_OBJECT %eax, rINSTq # fp[A] <- value
+ .else
+ .if 1
+ SET_WIDE_VREG %rax, rINSTq # fp[A] <- value
+ .else
+ SET_VREG %eax, rINSTq # fp[A] <- value
+ .endif
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sget_object: /* 0x62 */
+/* File: x86_64/op_sget_object.S */
+/* File: x86_64/op_sget.S */
+/*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
+ */
+ /* op vAA, field@BBBB */
+ .extern artGetObjStaticFromCode
+ EXPORT_PC
+ movzwq 2(rPC), OUT_ARG0 # field ref CCCC
+ movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer
+ movq rSELF, OUT_ARG2 # self
+ call SYMBOL(artGetObjStaticFromCode)
+ cmpl $0, THREAD_EXCEPTION_OFFSET(rSELF)
+ jnz MterpException
+ .if 1
+ SET_VREG_OBJECT %eax, rINSTq # fp[A] <- value
+ .else
+ .if 0
+ SET_WIDE_VREG %rax, rINSTq # fp[A] <- value
+ .else
+ SET_VREG %eax, rINSTq # fp[A] <- value
+ .endif
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sget_boolean: /* 0x63 */
+/* File: x86_64/op_sget_boolean.S */
+/* File: x86_64/op_sget.S */
+/*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
+ */
+ /* op vAA, field@BBBB */
+ .extern artGetBooleanStaticFromCode
+ EXPORT_PC
+ movzwq 2(rPC), OUT_ARG0 # field ref CCCC
+ movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer
+ movq rSELF, OUT_ARG2 # self
+ call SYMBOL(artGetBooleanStaticFromCode)
+ cmpl $0, THREAD_EXCEPTION_OFFSET(rSELF)
+ jnz MterpException
+ .if 0
+ SET_VREG_OBJECT %eax, rINSTq # fp[A] <- value
+ .else
+ .if 0
+ SET_WIDE_VREG %rax, rINSTq # fp[A] <- value
+ .else
+ SET_VREG %eax, rINSTq # fp[A] <- value
+ .endif
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sget_byte: /* 0x64 */
+/* File: x86_64/op_sget_byte.S */
+/* File: x86_64/op_sget.S */
+/*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
+ */
+ /* op vAA, field@BBBB */
+ .extern artGetByteStaticFromCode
+ EXPORT_PC
+ movzwq 2(rPC), OUT_ARG0 # field ref CCCC
+ movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer
+ movq rSELF, OUT_ARG2 # self
+ call SYMBOL(artGetByteStaticFromCode)
+ cmpl $0, THREAD_EXCEPTION_OFFSET(rSELF)
+ jnz MterpException
+ .if 0
+ SET_VREG_OBJECT %eax, rINSTq # fp[A] <- value
+ .else
+ .if 0
+ SET_WIDE_VREG %rax, rINSTq # fp[A] <- value
+ .else
+ SET_VREG %eax, rINSTq # fp[A] <- value
+ .endif
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sget_char: /* 0x65 */
+/* File: x86_64/op_sget_char.S */
+/* File: x86_64/op_sget.S */
+/*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
+ */
+ /* op vAA, field@BBBB */
+ .extern artGetCharStaticFromCode
+ EXPORT_PC
+ movzwq 2(rPC), OUT_ARG0 # field ref CCCC
+ movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer
+ movq rSELF, OUT_ARG2 # self
+ call SYMBOL(artGetCharStaticFromCode)
+ cmpl $0, THREAD_EXCEPTION_OFFSET(rSELF)
+ jnz MterpException
+ .if 0
+ SET_VREG_OBJECT %eax, rINSTq # fp[A] <- value
+ .else
+ .if 0
+ SET_WIDE_VREG %rax, rINSTq # fp[A] <- value
+ .else
+ SET_VREG %eax, rINSTq # fp[A] <- value
+ .endif
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sget_short: /* 0x66 */
+/* File: x86_64/op_sget_short.S */
+/* File: x86_64/op_sget.S */
+/*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
+ */
+ /* op vAA, field@BBBB */
+ .extern artGetShortStaticFromCode
+ EXPORT_PC
+ movzwq 2(rPC), OUT_ARG0 # field ref CCCC
+ movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer
+ movq rSELF, OUT_ARG2 # self
+ call SYMBOL(artGetShortStaticFromCode)
+ cmpl $0, THREAD_EXCEPTION_OFFSET(rSELF)
+ jnz MterpException
+ .if 0
+ SET_VREG_OBJECT %eax, rINSTq # fp[A] <- value
+ .else
+ .if 0
+ SET_WIDE_VREG %rax, rINSTq # fp[A] <- value
+ .else
+ SET_VREG %eax, rINSTq # fp[A] <- value
+ .endif
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sput: /* 0x67 */
+/* File: x86_64/op_sput.S */
+/*
+ * General SPUT handler wrapper.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ .extern artSet32StaticFromCode
+ EXPORT_PC
+ movzwq 2(rPC), OUT_ARG0 # field ref BBBB
+ GET_VREG OUT_32_ARG1, rINSTq # fp[AA]
+ movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
+ movq rSELF, OUT_ARG3 # self
+ call SYMBOL(artSet32StaticFromCode)
+ testb %al, %al
+ jnz MterpException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sput_wide: /* 0x68 */
+/* File: x86_64/op_sput_wide.S */
+/*
+ * SPUT_WIDE handler wrapper.
+ *
+ */
+ /* sput-wide vAA, field@BBBB */
+ .extern artSet64IndirectStaticFromMterp
+ EXPORT_PC
+ movzwq 2(rPC), OUT_ARG0 # field ref BBBB
+ movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer
+ leaq VREG_ADDRESS(rINSTq), OUT_ARG2 # &fp[AA]
+ movq rSELF, OUT_ARG3 # self
+ call SYMBOL(artSet64IndirectStaticFromMterp)
+ testb %al, %al
+ jnz MterpException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sput_object: /* 0x69 */
+/* File: x86_64/op_sput_object.S */
+ EXPORT_PC
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
+ movq rPC, OUT_ARG1
+ REFRESH_INST 105
+ movq rINSTq, OUT_ARG2
+ movq rSELF, OUT_ARG3
+ call SYMBOL(MterpSputObject)
+ testb %al, %al
+ jz MterpException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sput_boolean: /* 0x6a */
+/* File: x86_64/op_sput_boolean.S */
+/* File: x86_64/op_sput.S */
+/*
+ * General SPUT handler wrapper.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ .extern artSet8StaticFromCode
+ EXPORT_PC
+ movzwq 2(rPC), OUT_ARG0 # field ref BBBB
+ GET_VREG OUT_32_ARG1, rINSTq # fp[AA]
+ movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
+ movq rSELF, OUT_ARG3 # self
+ call SYMBOL(artSet8StaticFromCode)
+ testb %al, %al
+ jnz MterpException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sput_byte: /* 0x6b */
+/* File: x86_64/op_sput_byte.S */
+/* File: x86_64/op_sput.S */
+/*
+ * General SPUT handler wrapper.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ .extern artSet8StaticFromCode
+ EXPORT_PC
+ movzwq 2(rPC), OUT_ARG0 # field ref BBBB
+ GET_VREG OUT_32_ARG1, rINSTq # fp[AA]
+ movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
+ movq rSELF, OUT_ARG3 # self
+ call SYMBOL(artSet8StaticFromCode)
+ testb %al, %al
+ jnz MterpException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sput_char: /* 0x6c */
+/* File: x86_64/op_sput_char.S */
+/* File: x86_64/op_sput.S */
+/*
+ * General SPUT handler wrapper.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ .extern artSet16StaticFromCode
+ EXPORT_PC
+ movzwq 2(rPC), OUT_ARG0 # field ref BBBB
+ GET_VREG OUT_32_ARG1, rINSTq # fp[AA]
+ movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
+ movq rSELF, OUT_ARG3 # self
+ call SYMBOL(artSet16StaticFromCode)
+ testb %al, %al
+ jnz MterpException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sput_short: /* 0x6d */
+/* File: x86_64/op_sput_short.S */
+/* File: x86_64/op_sput.S */
+/*
+ * General SPUT handler wrapper.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ .extern artSet16StaticFromCode
+ EXPORT_PC
+ movzwq 2(rPC), OUT_ARG0 # field ref BBBB
+ GET_VREG OUT_32_ARG1, rINSTq # fp[AA]
+ movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
+ movq rSELF, OUT_ARG3 # self
+ call SYMBOL(artSet16StaticFromCode)
+ testb %al, %al
+ jnz MterpException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_virtual: /* 0x6e */
+/* File: x86_64/op_invoke_virtual.S */
+/* File: x86_64/invoke.S */
+/*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeVirtual
+ EXPORT_PC
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ movq rPC, OUT_ARG2
+ REFRESH_INST 110
+ movl rINST, OUT_32_ARG3
+ call SYMBOL(MterpInvokeVirtual)
+ testb %al, %al
+ jz MterpException
+ ADVANCE_PC 3
+ call SYMBOL(MterpShouldSwitchInterpreters)
+ testb %al, %al
+ jnz MterpFallback
+ FETCH_INST
+ GOTO_NEXT
+
+/*
+ * Handle a virtual method call.
+ *
+ * for: invoke-virtual, invoke-virtual/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_super: /* 0x6f */
+/* File: x86_64/op_invoke_super.S */
+/* File: x86_64/invoke.S */
+/*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeSuper
+ EXPORT_PC
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ movq rPC, OUT_ARG2
+ REFRESH_INST 111
+ movl rINST, OUT_32_ARG3
+ call SYMBOL(MterpInvokeSuper)
+ testb %al, %al
+ jz MterpException
+ ADVANCE_PC 3
+ call SYMBOL(MterpShouldSwitchInterpreters)
+ testb %al, %al
+ jnz MterpFallback
+ FETCH_INST
+ GOTO_NEXT
+
+/*
+ * Handle a "super" method call.
+ *
+ * for: invoke-super, invoke-super/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_direct: /* 0x70 */
+/* File: x86_64/op_invoke_direct.S */
+/* File: x86_64/invoke.S */
+/*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeDirect
+ EXPORT_PC
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ movq rPC, OUT_ARG2
+ REFRESH_INST 112
+ movl rINST, OUT_32_ARG3
+ call SYMBOL(MterpInvokeDirect)
+ testb %al, %al
+ jz MterpException
+ ADVANCE_PC 3
+ call SYMBOL(MterpShouldSwitchInterpreters)
+ testb %al, %al
+ jnz MterpFallback
+ FETCH_INST
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_static: /* 0x71 */
+/* File: x86_64/op_invoke_static.S */
+/* File: x86_64/invoke.S */
+/*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeStatic
+ EXPORT_PC
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ movq rPC, OUT_ARG2
+ REFRESH_INST 113
+ movl rINST, OUT_32_ARG3
+ call SYMBOL(MterpInvokeStatic)
+ testb %al, %al
+ jz MterpException
+ ADVANCE_PC 3
+ call SYMBOL(MterpShouldSwitchInterpreters)
+ testb %al, %al
+ jnz MterpFallback
+ FETCH_INST
+ GOTO_NEXT
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_interface: /* 0x72 */
+/* File: x86_64/op_invoke_interface.S */
+/* File: x86_64/invoke.S */
+/*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeInterface
+ EXPORT_PC
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ movq rPC, OUT_ARG2
+ REFRESH_INST 114
+ movl rINST, OUT_32_ARG3
+ call SYMBOL(MterpInvokeInterface)
+ testb %al, %al
+ jz MterpException
+ ADVANCE_PC 3
+ call SYMBOL(MterpShouldSwitchInterpreters)
+ testb %al, %al
+ jnz MterpFallback
+ FETCH_INST
+ GOTO_NEXT
+
+/*
+ * Handle an interface method call.
+ *
+ * for: invoke-interface, invoke-interface/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+/* ------------------------------ */
+ .balign 128
+.L_op_return_void_no_barrier: /* 0x73 */
+/* File: x86_64/op_return_void_no_barrier.S */
+ testl $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(rSELF)
+ jz 1f
+ movq rSELF, OUT_ARG0
+ call SYMBOL(MterpSuspendCheck)
+1:
+ xorq %rax, %rax
+ jmp MterpReturn
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_virtual_range: /* 0x74 */
+/* File: x86_64/op_invoke_virtual_range.S */
+/* File: x86_64/invoke.S */
+/*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeVirtualRange
+ EXPORT_PC
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ movq rPC, OUT_ARG2
+ REFRESH_INST 116
+ movl rINST, OUT_32_ARG3
+ call SYMBOL(MterpInvokeVirtualRange)
+ testb %al, %al
+ jz MterpException
+ ADVANCE_PC 3
+ call SYMBOL(MterpShouldSwitchInterpreters)
+ testb %al, %al
+ jnz MterpFallback
+ FETCH_INST
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_super_range: /* 0x75 */
+/* File: x86_64/op_invoke_super_range.S */
+/* File: x86_64/invoke.S */
+/*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeSuperRange
+ EXPORT_PC
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ movq rPC, OUT_ARG2
+ REFRESH_INST 117
+ movl rINST, OUT_32_ARG3
+ call SYMBOL(MterpInvokeSuperRange)
+ testb %al, %al
+ jz MterpException
+ ADVANCE_PC 3
+ call SYMBOL(MterpShouldSwitchInterpreters)
+ testb %al, %al
+ jnz MterpFallback
+ FETCH_INST
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_direct_range: /* 0x76 */
+/* File: x86_64/op_invoke_direct_range.S */
+/* File: x86_64/invoke.S */
+/*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeDirectRange
+ EXPORT_PC
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ movq rPC, OUT_ARG2
+ REFRESH_INST 118
+ movl rINST, OUT_32_ARG3
+ call SYMBOL(MterpInvokeDirectRange)
+ testb %al, %al
+ jz MterpException
+ ADVANCE_PC 3
+ call SYMBOL(MterpShouldSwitchInterpreters)
+ testb %al, %al
+ jnz MterpFallback
+ FETCH_INST
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_static_range: /* 0x77 */
+/* File: x86_64/op_invoke_static_range.S */
+/* File: x86_64/invoke.S */
+/*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeStaticRange
+ EXPORT_PC
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ movq rPC, OUT_ARG2
+ REFRESH_INST 119
+ movl rINST, OUT_32_ARG3
+ call SYMBOL(MterpInvokeStaticRange)
+ testb %al, %al
+ jz MterpException
+ ADVANCE_PC 3
+ call SYMBOL(MterpShouldSwitchInterpreters)
+ testb %al, %al
+ jnz MterpFallback
+ FETCH_INST
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_interface_range: /* 0x78 */
+/* File: x86_64/op_invoke_interface_range.S */
+/* File: x86_64/invoke.S */
+/*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeInterfaceRange
+ EXPORT_PC
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ movq rPC, OUT_ARG2
+ REFRESH_INST 120
+ movl rINST, OUT_32_ARG3
+ call SYMBOL(MterpInvokeInterfaceRange)
+ testb %al, %al
+ jz MterpException
+ ADVANCE_PC 3
+ call SYMBOL(MterpShouldSwitchInterpreters)
+ testb %al, %al
+ jnz MterpFallback
+ FETCH_INST
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_79: /* 0x79 */
+/* File: x86_64/op_unused_79.S */
+/* File: x86_64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ jmp MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_7a: /* 0x7a */
+/* File: x86_64/op_unused_7a.S */
+/* File: x86_64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ jmp MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_neg_int: /* 0x7b */
+/* File: x86_64/op_neg_int.S */
+/* File: x86_64/unop.S */
+/*
+ * Generic 32/64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op eax".
+ */
+ /* unop vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $4,rINST # rINST <- B
+ .if 0
+ GET_WIDE_VREG %rax, rINSTq # rax <- vB
+ .else
+ GET_VREG %eax, rINSTq # eax <- vB
+ .endif
+ andb $0xf,%cl # ecx <- A
+
+ negl %eax
+ .if 0
+ SET_WIDE_VREG %rax, %rcx
+ .else
+ SET_VREG %eax, %rcx
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_not_int: /* 0x7c */
+/* File: x86_64/op_not_int.S */
+/* File: x86_64/unop.S */
+/*
+ * Generic 32/64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op eax".
+ */
+ /* unop vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $4,rINST # rINST <- B
+ .if 0
+ GET_WIDE_VREG %rax, rINSTq # rax <- vB
+ .else
+ GET_VREG %eax, rINSTq # eax <- vB
+ .endif
+ andb $0xf,%cl # ecx <- A
+
+ notl %eax
+ .if 0
+ SET_WIDE_VREG %rax, %rcx
+ .else
+ SET_VREG %eax, %rcx
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_neg_long: /* 0x7d */
+/* File: x86_64/op_neg_long.S */
+/* File: x86_64/unop.S */
+/*
+ * Generic 32/64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op eax".
+ */
+ /* unop vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $4,rINST # rINST <- B
+ .if 1
+ GET_WIDE_VREG %rax, rINSTq # rax <- vB
+ .else
+ GET_VREG %eax, rINSTq # eax <- vB
+ .endif
+ andb $0xf,%cl # ecx <- A
+
+ negq %rax
+ .if 1
+ SET_WIDE_VREG %rax, %rcx
+ .else
+ SET_VREG %eax, %rcx
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_not_long: /* 0x7e */
+/* File: x86_64/op_not_long.S */
+/* File: x86_64/unop.S */
+/*
+ * Generic 32/64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op eax".
+ */
+ /* unop vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $4,rINST # rINST <- B
+ .if 1
+ GET_WIDE_VREG %rax, rINSTq # rax <- vB
+ .else
+ GET_VREG %eax, rINSTq # eax <- vB
+ .endif
+ andb $0xf,%cl # ecx <- A
+
+ notq %rax
+ .if 1
+ SET_WIDE_VREG %rax, %rcx
+ .else
+ SET_VREG %eax, %rcx
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_neg_float: /* 0x7f */
+/* File: x86_64/op_neg_float.S */
+/* File: x86_64/unop.S */
+/*
+ * Generic 32/64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op eax".
+ */
+ /* unop vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $4,rINST # rINST <- B
+ .if 0
+ GET_WIDE_VREG %rax, rINSTq # rax <- vB
+ .else
+ GET_VREG %eax, rINSTq # eax <- vB
+ .endif
+ andb $0xf,%cl # ecx <- A
+
+ xorl $0x80000000, %eax
+ .if 0
+ SET_WIDE_VREG %rax, %rcx
+ .else
+ SET_VREG %eax, %rcx
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_neg_double: /* 0x80 */
+/* File: x86_64/op_neg_double.S */
+/* File: x86_64/unop.S */
+/*
+ * Generic 32/64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op eax".
+ */
+ /* unop vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $4,rINST # rINST <- B
+ .if 1
+ GET_WIDE_VREG %rax, rINSTq # rax <- vB
+ .else
+ GET_VREG %eax, rINSTq # eax <- vB
+ .endif
+ andb $0xf,%cl # ecx <- A
+ movq $0x8000000000000000, %rsi
+ xorq %rsi, %rax
+ .if 1
+ SET_WIDE_VREG %rax, %rcx
+ .else
+ SET_VREG %eax, %rcx
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_int_to_long: /* 0x81 */
+/* File: x86_64/op_int_to_long.S */
+ /* int to long vA, vB */
+ movzbq rINSTbl, %rax # rax <- +A
+ sarl $4, %eax # eax <- B
+ andb $0xf, rINSTbl # rINST <- A
+ movslq VREG_ADDRESS(%rax), %rax
+ SET_WIDE_VREG %rax, rINSTq # v[A] <- %rax
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_int_to_float: /* 0x82 */
+/* File: x86_64/op_int_to_float.S */
+/* File: x86_64/fpcvt.S */
+/*
+ * Generic 32-bit FP conversion operation.
+ */
+ /* unop vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $4, rINST # rINST <- B
+ andb $0xf, %cl # ecx <- A
+ cvtsi2ssl VREG_ADDRESS(rINSTq), %xmm0
+ .if 0
+ movsd %xmm0, VREG_ADDRESS(%rcx)
+ CLEAR_WIDE_REF %rcx
+ .else
+ movss %xmm0, VREG_ADDRESS(%rcx)
+ CLEAR_REF %rcx
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_int_to_double: /* 0x83 */
+/* File: x86_64/op_int_to_double.S */
+/* File: x86_64/fpcvt.S */
+/*
+ * Generic 32-bit FP conversion operation.
+ */
+ /* unop vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $4, rINST # rINST <- B
+ andb $0xf, %cl # ecx <- A
+ cvtsi2sdl VREG_ADDRESS(rINSTq), %xmm0
+ .if 1
+ movsd %xmm0, VREG_ADDRESS(%rcx)
+ CLEAR_WIDE_REF %rcx
+ .else
+ movss %xmm0, VREG_ADDRESS(%rcx)
+ CLEAR_REF %rcx
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_long_to_int: /* 0x84 */
+/* File: x86_64/op_long_to_int.S */
+/* we ignore the high word, making this equivalent to a 32-bit reg move */
+/* File: x86_64/op_move.S */
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ movl rINST, %eax # eax <- BA
+ andb $0xf, %al # eax <- A
+ shrl $4, rINST # rINST <- B
+ GET_VREG %edx, rINSTq
+ .if 0
+ SET_VREG_OBJECT %edx, %rax # fp[A] <- fp[B]
+ .else
+ SET_VREG %edx, %rax # fp[A] <- fp[B]
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_long_to_float: /* 0x85 */
+/* File: x86_64/op_long_to_float.S */
+/* File: x86_64/fpcvt.S */
+/*
+ * Generic 32-bit FP conversion operation.
+ */
+ /* unop vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $4, rINST # rINST <- B
+ andb $0xf, %cl # ecx <- A
+ cvtsi2ssq VREG_ADDRESS(rINSTq), %xmm0
+ .if 0
+ movsd %xmm0, VREG_ADDRESS(%rcx)
+ CLEAR_WIDE_REF %rcx
+ .else
+ movss %xmm0, VREG_ADDRESS(%rcx)
+ CLEAR_REF %rcx
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_long_to_double: /* 0x86 */
+/* File: x86_64/op_long_to_double.S */
+/* File: x86_64/fpcvt.S */
+/*
+ * Generic 32-bit FP conversion operation.
+ */
+ /* unop vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $4, rINST # rINST <- B
+ andb $0xf, %cl # ecx <- A
+ cvtsi2sdq VREG_ADDRESS(rINSTq), %xmm0
+ .if 1
+ movsd %xmm0, VREG_ADDRESS(%rcx)
+ CLEAR_WIDE_REF %rcx
+ .else
+ movss %xmm0, VREG_ADDRESS(%rcx)
+ CLEAR_REF %rcx
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_float_to_int: /* 0x87 */
+/* File: x86_64/op_float_to_int.S */
+/* File: x86_64/cvtfp_int.S */
+/* On fp to int conversions, Java requires that
+ * if the result > maxint, it should be clamped to maxint. If it is less
+ * than minint, it should be clamped to minint. If it is a nan, the result
+ * should be zero. Further, the rounding mode is to truncate.
+ */
+ /* float/double to int/long vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $4, rINST # rINST <- B
+ andb $0xf, %cl # ecx <- A
+ movss VREG_ADDRESS(rINSTq), %xmm0
+ movl $0x7fffffff, %eax
+ cvtsi2ssl %eax, %xmm1
+ comiss %xmm1, %xmm0
+ jae 1f
+ jp 2f
+ cvttss2sil %xmm0, %eax
+ jmp 1f
+2:
+ xorl %eax, %eax
+1:
+ .if 0
+ SET_WIDE_VREG %eax, %rcx
+ .else
+ SET_VREG %eax, %rcx
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_float_to_long: /* 0x88 */
+/* File: x86_64/op_float_to_long.S */
+/* File: x86_64/cvtfp_int.S */
+/* On fp to int conversions, Java requires that
+ * if the result > maxint, it should be clamped to maxint. If it is less
+ * than minint, it should be clamped to minint. If it is a nan, the result
+ * should be zero. Further, the rounding mode is to truncate.
+ */
+ /* float/double to int/long vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $4, rINST # rINST <- B
+ andb $0xf, %cl # ecx <- A
+ movss VREG_ADDRESS(rINSTq), %xmm0
+ movq $0x7fffffffffffffff, %rax
+ cvtsi2ssq %rax, %xmm1
+ comiss %xmm1, %xmm0
+ jae 1f
+ jp 2f
+ cvttss2siq %xmm0, %rax
+ jmp 1f
+2:
+ xorq %rax, %rax
+1:
+ .if 1
+ SET_WIDE_VREG %rax, %rcx
+ .else
+ SET_VREG %rax, %rcx
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_float_to_double: /* 0x89 */
+/* File: x86_64/op_float_to_double.S */
+/* File: x86_64/fpcvt.S */
+/*
+ * Generic 32-bit FP conversion operation.
+ */
+ /* unop vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $4, rINST # rINST <- B
+ andb $0xf, %cl # ecx <- A
+ cvtss2sd VREG_ADDRESS(rINSTq), %xmm0
+ .if 1
+ movsd %xmm0, VREG_ADDRESS(%rcx)
+ CLEAR_WIDE_REF %rcx
+ .else
+ movss %xmm0, VREG_ADDRESS(%rcx)
+ CLEAR_REF %rcx
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_double_to_int: /* 0x8a */
+/* File: x86_64/op_double_to_int.S */
+/* File: x86_64/cvtfp_int.S */
+/* On fp to int conversions, Java requires that
+ * if the result > maxint, it should be clamped to maxint. If it is less
+ * than minint, it should be clamped to minint. If it is a nan, the result
+ * should be zero. Further, the rounding mode is to truncate.
+ */
+ /* float/double to int/long vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $4, rINST # rINST <- B
+ andb $0xf, %cl # ecx <- A
+ movsd VREG_ADDRESS(rINSTq), %xmm0
+ movl $0x7fffffff, %eax
+ cvtsi2sdl %eax, %xmm1
+ comisd %xmm1, %xmm0
+ jae 1f
+ jp 2f
+ cvttsd2sil %xmm0, %eax
+ jmp 1f
+2:
+ xorl %eax, %eax
+1:
+ .if 0
+ SET_WIDE_VREG %eax, %rcx
+ .else
+ SET_VREG %eax, %rcx
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_double_to_long: /* 0x8b */
+/* File: x86_64/op_double_to_long.S */
+/* File: x86_64/cvtfp_int.S */
+/* On fp to int conversions, Java requires that
+ * if the result > maxint, it should be clamped to maxint. If it is less
+ * than minint, it should be clamped to minint. If it is a nan, the result
+ * should be zero. Further, the rounding mode is to truncate.
+ */
+ /* float/double to int/long vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $4, rINST # rINST <- B
+ andb $0xf, %cl # ecx <- A
+ movsd VREG_ADDRESS(rINSTq), %xmm0
+ movq $0x7fffffffffffffff, %rax
+ cvtsi2sdq %rax, %xmm1
+ comisd %xmm1, %xmm0
+ jae 1f
+ jp 2f
+ cvttsd2siq %xmm0, %rax
+ jmp 1f
+2:
+ xorq %rax, %rax
+1:
+ .if 1
+ SET_WIDE_VREG %rax, %rcx
+ .else
+ SET_VREG %rax, %rcx
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_double_to_float: /* 0x8c */
+/* File: x86_64/op_double_to_float.S */
+/* File: x86_64/fpcvt.S */
+/*
+ * Generic 32-bit FP conversion operation.
+ */
+ /* unop vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $4, rINST # rINST <- B
+ andb $0xf, %cl # ecx <- A
+ cvtsd2ss VREG_ADDRESS(rINSTq), %xmm0
+ .if 0
+ movsd %xmm0, VREG_ADDRESS(%rcx)
+ CLEAR_WIDE_REF %rcx
+ .else
+ movss %xmm0, VREG_ADDRESS(%rcx)
+ CLEAR_REF %rcx
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_int_to_byte: /* 0x8d */
+/* File: x86_64/op_int_to_byte.S */
+/* File: x86_64/unop.S */
+/*
+ * Generic 32/64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op eax".
+ */
+ /* unop vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $4,rINST # rINST <- B
+ .if 0
+ GET_WIDE_VREG %rax, rINSTq # rax <- vB
+ .else
+ GET_VREG %eax, rINSTq # eax <- vB
+ .endif
+ andb $0xf,%cl # ecx <- A
+
+movsbl %al, %eax
+ .if 0
+ SET_WIDE_VREG %rax, %rcx
+ .else
+ SET_VREG %eax, %rcx
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_int_to_char: /* 0x8e */
+/* File: x86_64/op_int_to_char.S */
+/* File: x86_64/unop.S */
+/*
+ * Generic 32/64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op eax".
+ */
+ /* unop vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $4,rINST # rINST <- B
+ .if 0
+ GET_WIDE_VREG %rax, rINSTq # rax <- vB
+ .else
+ GET_VREG %eax, rINSTq # eax <- vB
+ .endif
+ andb $0xf,%cl # ecx <- A
+
+movzwl %ax,%eax
+ .if 0
+ SET_WIDE_VREG %rax, %rcx
+ .else
+ SET_VREG %eax, %rcx
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_int_to_short: /* 0x8f */
+/* File: x86_64/op_int_to_short.S */
+/* File: x86_64/unop.S */
+/*
+ * Generic 32/64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op eax".
+ */
+ /* unop vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $4,rINST # rINST <- B
+ .if 0
+ GET_WIDE_VREG %rax, rINSTq # rax <- vB
+ .else
+ GET_VREG %eax, rINSTq # eax <- vB
+ .endif
+ andb $0xf,%cl # ecx <- A
+
+movswl %ax, %eax
+ .if 0
+ SET_WIDE_VREG %rax, %rcx
+ .else
+ SET_VREG %eax, %rcx
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_int: /* 0x90 */
+/* File: x86_64/op_add_int.S */
+/* File: x86_64/binop.S */
+/*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int, sub-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # rax <- BB
+ movzbq 3(rPC), %rcx # rcx <- CC
+ GET_VREG %eax, %rax # eax <- vBB
+ addl (rFP,%rcx,4), %eax # ex: addl (rFP,%rcx,4),%eax
+ SET_VREG %eax, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_int: /* 0x91 */
+/* File: x86_64/op_sub_int.S */
+/* File: x86_64/binop.S */
+/*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int, sub-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # rax <- BB
+ movzbq 3(rPC), %rcx # rcx <- CC
+ GET_VREG %eax, %rax # eax <- vBB
+ subl (rFP,%rcx,4), %eax # ex: addl (rFP,%rcx,4),%eax
+ SET_VREG %eax, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_int: /* 0x92 */
+/* File: x86_64/op_mul_int.S */
+/* File: x86_64/binop.S */
+/*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int, sub-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # rax <- BB
+ movzbq 3(rPC), %rcx # rcx <- CC
+ GET_VREG %eax, %rax # eax <- vBB
+ imull (rFP,%rcx,4), %eax # ex: addl (rFP,%rcx,4),%eax
+ SET_VREG %eax, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_int: /* 0x93 */
+/* File: x86_64/op_div_int.S */
+/* File: x86_64/bindiv.S */
+/*
+ * 32-bit binary div/rem operation. Handles special case of op1=-1.
+ */
+ /* div/rem vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # rax <- BB
+ movzbq 3(rPC), %rcx # rcx <- CC
+ .if 0
+ GET_WIDE_VREG %rax, %rax # eax <- vBB
+ GET_WIDE_VREG %ecx, %rcx # ecx <- vCC
+ .else
+ GET_VREG %eax, %rax # eax <- vBB
+ GET_VREG %ecx, %rcx # ecx <- vCC
+ .endif
+ testl %ecx, %ecx
+ jz common_errDivideByZero
+ cmpl $-1, %ecx
+ je 2f
+ cdq # rdx:rax <- sign-extended of rax
+ idivl %ecx
+1:
+ .if 0
+ SET_WIDE_VREG %eax, rINSTq # eax <- vBB
+ .else
+ SET_VREG %eax, rINSTq # eax <- vBB
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+2:
+ .if 0
+ xorl %eax, %eax
+ .else
+ negl %eax
+ .endif
+ jmp 1b
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_int: /* 0x94 */
+/* File: x86_64/op_rem_int.S */
+/* File: x86_64/bindiv.S */
+/*
+ * 32-bit binary div/rem operation. Handles special case of op1=-1.
+ */
+ /* div/rem vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # rax <- BB
+ movzbq 3(rPC), %rcx # rcx <- CC
+ .if 0
+ GET_WIDE_VREG %rax, %rax # eax <- vBB
+ GET_WIDE_VREG %ecx, %rcx # ecx <- vCC
+ .else
+ GET_VREG %eax, %rax # eax <- vBB
+ GET_VREG %ecx, %rcx # ecx <- vCC
+ .endif
+ testl %ecx, %ecx
+ jz common_errDivideByZero
+ cmpl $-1, %ecx
+ je 2f
+ cdq # rdx:rax <- sign-extended of rax
+ idivl %ecx
+1:
+ .if 0
+ SET_WIDE_VREG %edx, rINSTq # eax <- vBB
+ .else
+ SET_VREG %edx, rINSTq # eax <- vBB
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+2:
+ .if 1
+ xorl %edx, %edx
+ .else
+ negl %edx
+ .endif
+ jmp 1b
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_and_int: /* 0x95 */
+/* File: x86_64/op_and_int.S */
+/* File: x86_64/binop.S */
+/*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int, sub-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # rax <- BB
+ movzbq 3(rPC), %rcx # rcx <- CC
+ GET_VREG %eax, %rax # eax <- vBB
+ andl (rFP,%rcx,4), %eax # ex: addl (rFP,%rcx,4),%eax
+ SET_VREG %eax, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_or_int: /* 0x96 */
+/* File: x86_64/op_or_int.S */
+/* File: x86_64/binop.S */
+/*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int, sub-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # rax <- BB
+ movzbq 3(rPC), %rcx # rcx <- CC
+ GET_VREG %eax, %rax # eax <- vBB
+ orl (rFP,%rcx,4), %eax # ex: addl (rFP,%rcx,4),%eax
+ SET_VREG %eax, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_xor_int: /* 0x97 */
+/* File: x86_64/op_xor_int.S */
+/* File: x86_64/binop.S */
+/*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int, sub-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # rax <- BB
+ movzbq 3(rPC), %rcx # rcx <- CC
+ GET_VREG %eax, %rax # eax <- vBB
+ xorl (rFP,%rcx,4), %eax # ex: addl (rFP,%rcx,4),%eax
+ SET_VREG %eax, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shl_int: /* 0x98 */
+/* File: x86_64/op_shl_int.S */
+/* File: x86_64/binop1.S */
+/*
+ * Generic 32-bit binary operation in which both operands loaded to
+ * registers (op0 in eax, op1 in ecx).
+ */
+ /* binop vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # eax <- BB
+ movzbq 3(rPC), %rcx # ecx <- CC
+ GET_VREG %ecx, %rcx # eax <- vCC
+ .if 0
+ GET_WIDE_VREG %rax, %rax # rax <- vBB
+ sall %cl, %eax # ex: addl %ecx,%eax
+ SET_WIDE_VREG %rax, rINSTq
+ .else
+ GET_VREG %eax, %rax # eax <- vBB
+ sall %cl, %eax # ex: addl %ecx,%eax
+ SET_VREG %eax, rINSTq
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shr_int: /* 0x99 */
+/* File: x86_64/op_shr_int.S */
+/* File: x86_64/binop1.S */
+/*
+ * Generic 32-bit binary operation in which both operands loaded to
+ * registers (op0 in eax, op1 in ecx).
+ */
+ /* binop vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # eax <- BB
+ movzbq 3(rPC), %rcx # ecx <- CC
+ GET_VREG %ecx, %rcx # eax <- vCC
+ .if 0
+ GET_WIDE_VREG %rax, %rax # rax <- vBB
+ sarl %cl, %eax # ex: addl %ecx,%eax
+ SET_WIDE_VREG %rax, rINSTq
+ .else
+ GET_VREG %eax, %rax # eax <- vBB
+ sarl %cl, %eax # ex: addl %ecx,%eax
+ SET_VREG %eax, rINSTq
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_ushr_int: /* 0x9a */
+/* File: x86_64/op_ushr_int.S */
+/* File: x86_64/binop1.S */
+/*
+ * Generic 32-bit binary operation in which both operands loaded to
+ * registers (op0 in eax, op1 in ecx).
+ */
+ /* binop vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # eax <- BB
+ movzbq 3(rPC), %rcx # ecx <- CC
+ GET_VREG %ecx, %rcx # eax <- vCC
+ .if 0
+ GET_WIDE_VREG %rax, %rax # rax <- vBB
+ shrl %cl, %eax # ex: addl %ecx,%eax
+ SET_WIDE_VREG %rax, rINSTq
+ .else
+ GET_VREG %eax, %rax # eax <- vBB
+ shrl %cl, %eax # ex: addl %ecx,%eax
+ SET_VREG %eax, rINSTq
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_long: /* 0x9b */
+/* File: x86_64/op_add_long.S */
+/* File: x86_64/binopWide.S */
+/*
+ * Generic 64-bit binary operation.
+ */
+ /* binop vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # eax <- BB
+ movzbq 3(rPC), %rcx # ecx <- CC
+ GET_WIDE_VREG %rax, %rax # rax <- v[BB]
+ addq (rFP,%rcx,4), %rax # ex: addq (rFP,%rcx,4),%rax
+ SET_WIDE_VREG %rax, rINSTq # v[AA] <- rax
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_long: /* 0x9c */
+/* File: x86_64/op_sub_long.S */
+/* File: x86_64/binopWide.S */
+/*
+ * Generic 64-bit binary operation.
+ */
+ /* binop vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # eax <- BB
+ movzbq 3(rPC), %rcx # ecx <- CC
+ GET_WIDE_VREG %rax, %rax # rax <- v[BB]
+ subq (rFP,%rcx,4), %rax # ex: addq (rFP,%rcx,4),%rax
+ SET_WIDE_VREG %rax, rINSTq # v[AA] <- rax
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_long: /* 0x9d */
+/* File: x86_64/op_mul_long.S */
+/* File: x86_64/binopWide.S */
+/*
+ * Generic 64-bit binary operation.
+ */
+ /* binop vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # eax <- BB
+ movzbq 3(rPC), %rcx # ecx <- CC
+ GET_WIDE_VREG %rax, %rax # rax <- v[BB]
+ imulq (rFP,%rcx,4), %rax # ex: addq (rFP,%rcx,4),%rax
+ SET_WIDE_VREG %rax, rINSTq # v[AA] <- rax
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_long: /* 0x9e */
+/* File: x86_64/op_div_long.S */
+/* File: x86_64/bindiv.S */
+/*
+ * 32-bit binary div/rem operation. Handles special case of op1=-1.
+ */
+ /* div/rem vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # rax <- BB
+ movzbq 3(rPC), %rcx # rcx <- CC
+ .if 1
+ GET_WIDE_VREG %rax, %rax # eax <- vBB
+ GET_WIDE_VREG %rcx, %rcx # ecx <- vCC
+ .else
+ GET_VREG %eax, %rax # eax <- vBB
+ GET_VREG %rcx, %rcx # ecx <- vCC
+ .endif
+ testq %rcx, %rcx
+ jz common_errDivideByZero
+ cmpq $-1, %rcx
+ je 2f
+ cqo # rdx:rax <- sign-extended of rax
+ idivq %rcx
+1:
+ .if 1
+ SET_WIDE_VREG %rax, rINSTq # eax <- vBB
+ .else
+ SET_VREG %rax, rINSTq # eax <- vBB
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+2:
+ .if 0
+ xorq %rax, %rax
+ .else
+ negq %rax
+ .endif
+ jmp 1b
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_long: /* 0x9f */
+/* File: x86_64/op_rem_long.S */
+/* File: x86_64/bindiv.S */
+/*
+ * 32-bit binary div/rem operation. Handles special case of op1=-1.
+ */
+ /* div/rem vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # rax <- BB
+ movzbq 3(rPC), %rcx # rcx <- CC
+ .if 1
+ GET_WIDE_VREG %rax, %rax # eax <- vBB
+ GET_WIDE_VREG %rcx, %rcx # ecx <- vCC
+ .else
+ GET_VREG %eax, %rax # eax <- vBB
+ GET_VREG %rcx, %rcx # ecx <- vCC
+ .endif
+ testq %rcx, %rcx
+ jz common_errDivideByZero
+ cmpq $-1, %rcx
+ je 2f
+ cqo # rdx:rax <- sign-extended of rax
+ idivq %rcx
+1:
+ .if 1
+ SET_WIDE_VREG %rdx, rINSTq # eax <- vBB
+ .else
+ SET_VREG %rdx, rINSTq # eax <- vBB
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+2:
+ .if 1
+ xorq %rdx, %rdx
+ .else
+ negq %rdx
+ .endif
+ jmp 1b
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_and_long: /* 0xa0 */
+/* File: x86_64/op_and_long.S */
+/* File: x86_64/binopWide.S */
+/*
+ * Generic 64-bit binary operation.
+ */
+ /* binop vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # eax <- BB
+ movzbq 3(rPC), %rcx # ecx <- CC
+ GET_WIDE_VREG %rax, %rax # rax <- v[BB]
+ andq (rFP,%rcx,4), %rax # ex: addq (rFP,%rcx,4),%rax
+ SET_WIDE_VREG %rax, rINSTq # v[AA] <- rax
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_or_long: /* 0xa1 */
+/* File: x86_64/op_or_long.S */
+/* File: x86_64/binopWide.S */
+/*
+ * Generic 64-bit binary operation.
+ */
+ /* binop vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # eax <- BB
+ movzbq 3(rPC), %rcx # ecx <- CC
+ GET_WIDE_VREG %rax, %rax # rax <- v[BB]
+ orq (rFP,%rcx,4), %rax # ex: addq (rFP,%rcx,4),%rax
+ SET_WIDE_VREG %rax, rINSTq # v[AA] <- rax
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_xor_long: /* 0xa2 */
+/* File: x86_64/op_xor_long.S */
+/* File: x86_64/binopWide.S */
+/*
+ * Generic 64-bit binary operation.
+ */
+ /* binop vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # eax <- BB
+ movzbq 3(rPC), %rcx # ecx <- CC
+ GET_WIDE_VREG %rax, %rax # rax <- v[BB]
+ xorq (rFP,%rcx,4), %rax # ex: addq (rFP,%rcx,4),%rax
+ SET_WIDE_VREG %rax, rINSTq # v[AA] <- rax
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shl_long: /* 0xa3 */
+/* File: x86_64/op_shl_long.S */
+/* File: x86_64/binop1.S */
+/*
+ * Generic 32-bit binary operation in which both operands loaded to
+ * registers (op0 in eax, op1 in ecx).
+ */
+ /* binop vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # eax <- BB
+ movzbq 3(rPC), %rcx # ecx <- CC
+ GET_VREG %ecx, %rcx # eax <- vCC
+ .if 1
+ GET_WIDE_VREG %rax, %rax # rax <- vBB
+ salq %cl, %rax # ex: addl %ecx,%eax
+ SET_WIDE_VREG %rax, rINSTq
+ .else
+ GET_VREG %eax, %rax # eax <- vBB
+ salq %cl, %rax # ex: addl %ecx,%eax
+ SET_VREG %eax, rINSTq
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shr_long: /* 0xa4 */
+/* File: x86_64/op_shr_long.S */
+/* File: x86_64/binop1.S */
+/*
+ * Generic 32-bit binary operation in which both operands loaded to
+ * registers (op0 in eax, op1 in ecx).
+ */
+ /* binop vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # eax <- BB
+ movzbq 3(rPC), %rcx # ecx <- CC
+ GET_VREG %ecx, %rcx # eax <- vCC
+ .if 1
+ GET_WIDE_VREG %rax, %rax # rax <- vBB
+ sarq %cl, %rax # ex: addl %ecx,%eax
+ SET_WIDE_VREG %rax, rINSTq
+ .else
+ GET_VREG %eax, %rax # eax <- vBB
+ sarq %cl, %rax # ex: addl %ecx,%eax
+ SET_VREG %eax, rINSTq
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_ushr_long: /* 0xa5 */
+/* File: x86_64/op_ushr_long.S */
+/* File: x86_64/binop1.S */
+/*
+ * Generic 32-bit binary operation in which both operands loaded to
+ * registers (op0 in eax, op1 in ecx).
+ */
+ /* binop vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # eax <- BB
+ movzbq 3(rPC), %rcx # ecx <- CC
+ GET_VREG %ecx, %rcx # eax <- vCC
+ .if 1
+ GET_WIDE_VREG %rax, %rax # rax <- vBB
+ shrq %cl, %rax # ex: addl %ecx,%eax
+ SET_WIDE_VREG %rax, rINSTq
+ .else
+ GET_VREG %eax, %rax # eax <- vBB
+ shrq %cl, %rax # ex: addl %ecx,%eax
+ SET_VREG %eax, rINSTq
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_float: /* 0xa6 */
+/* File: x86_64/op_add_float.S */
+/* File: x86_64/sseBinop.S */
+ movzbq 2(rPC), %rcx # ecx <- BB
+ movzbq 3(rPC), %rax # eax <- CC
+ movss VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
+ addss VREG_ADDRESS(%rax), %xmm0
+ movss %xmm0, VREG_ADDRESS(rINSTq) # vAA <- %xmm0
+ pxor %xmm0, %xmm0
+ movss %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_float: /* 0xa7 */
+/* File: x86_64/op_sub_float.S */
+/* File: x86_64/sseBinop.S */
+ movzbq 2(rPC), %rcx # ecx <- BB
+ movzbq 3(rPC), %rax # eax <- CC
+ movss VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
+ subss VREG_ADDRESS(%rax), %xmm0
+ movss %xmm0, VREG_ADDRESS(rINSTq) # vAA <- %xmm0
+ pxor %xmm0, %xmm0
+ movss %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_float: /* 0xa8 */
+/* File: x86_64/op_mul_float.S */
+/* File: x86_64/sseBinop.S */
+ movzbq 2(rPC), %rcx # ecx <- BB
+ movzbq 3(rPC), %rax # eax <- CC
+ movss VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
+ mulss VREG_ADDRESS(%rax), %xmm0
+ movss %xmm0, VREG_ADDRESS(rINSTq) # vAA <- %xmm0
+ pxor %xmm0, %xmm0
+ movss %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_float: /* 0xa9 */
+/* File: x86_64/op_div_float.S */
+/* File: x86_64/sseBinop.S */
+ movzbq 2(rPC), %rcx # ecx <- BB
+ movzbq 3(rPC), %rax # eax <- CC
+ movss VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
+ divss VREG_ADDRESS(%rax), %xmm0
+ movss %xmm0, VREG_ADDRESS(rINSTq) # vAA <- %xmm0
+ pxor %xmm0, %xmm0
+ movss %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_float: /* 0xaa */
+/* File: x86_64/op_rem_float.S */
+ /* rem_float vAA, vBB, vCC */
+ movzbq 3(rPC), %rcx # ecx <- BB
+ movzbq 2(rPC), %rax # eax <- CC
+ flds VREG_ADDRESS(%rcx) # vBB to fp stack
+ flds VREG_ADDRESS(%rax) # vCC to fp stack
+1:
+ fprem
+ fstsw %ax
+ sahf
+ jp 1b
+ fstp %st(1)
+ fstps VREG_ADDRESS(rINSTq) # %st to vAA
+ CLEAR_REF rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_double: /* 0xab */
+/* File: x86_64/op_add_double.S */
+/* File: x86_64/sseBinop.S */
+ movzbq 2(rPC), %rcx # ecx <- BB
+ movzbq 3(rPC), %rax # eax <- CC
+ movsd VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
+ addsd VREG_ADDRESS(%rax), %xmm0
+ movsd %xmm0, VREG_ADDRESS(rINSTq) # vAA <- %xmm0
+ pxor %xmm0, %xmm0
+ movsd %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_double: /* 0xac */
+/* File: x86_64/op_sub_double.S */
+/* File: x86_64/sseBinop.S */
+ movzbq 2(rPC), %rcx # ecx <- BB
+ movzbq 3(rPC), %rax # eax <- CC
+ movsd VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
+ subsd VREG_ADDRESS(%rax), %xmm0
+ movsd %xmm0, VREG_ADDRESS(rINSTq) # vAA <- %xmm0
+ pxor %xmm0, %xmm0
+ movsd %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_double: /* 0xad */
+/* File: x86_64/op_mul_double.S */
+/* File: x86_64/sseBinop.S */
+ movzbq 2(rPC), %rcx # ecx <- BB
+ movzbq 3(rPC), %rax # eax <- CC
+ movsd VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
+ mulsd VREG_ADDRESS(%rax), %xmm0
+ movsd %xmm0, VREG_ADDRESS(rINSTq) # vAA <- %xmm0
+ pxor %xmm0, %xmm0
+ movsd %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_double: /* 0xae */
+/* File: x86_64/op_div_double.S */
+/* File: x86_64/sseBinop.S */
+ movzbq 2(rPC), %rcx # ecx <- BB
+ movzbq 3(rPC), %rax # eax <- CC
+ movsd VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
+ divsd VREG_ADDRESS(%rax), %xmm0
+ movsd %xmm0, VREG_ADDRESS(rINSTq) # vAA <- %xmm0
+ pxor %xmm0, %xmm0
+ movsd %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_double: /* 0xaf */
+/* File: x86_64/op_rem_double.S */
+ /* rem_double vAA, vBB, vCC */
+ movzbq 3(rPC), %rcx # ecx <- BB
+ movzbq 2(rPC), %rax # eax <- CC
+ fldl VREG_ADDRESS(%rcx) # %st1 <- fp[vBB]
+ fldl VREG_ADDRESS(%rax) # %st0 <- fp[vCC]
+1:
+ fprem
+ fstsw %ax
+ sahf
+ jp 1b
+ fstp %st(1)
+ fstpl VREG_ADDRESS(rINSTq) # fp[vAA] <- %st
+ CLEAR_WIDE_REF rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_int_2addr: /* 0xb0 */
+/* File: x86_64/op_add_int_2addr.S */
+/* File: x86_64/binop2addr.S */
+/*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an instruction or a function call.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $4, rINST # rINST <- B
+ andb $0xf, %cl # ecx <- A
+ GET_VREG %eax, rINSTq # eax <- vB
+ addl %eax, (rFP,%rcx,4) # for ex: addl %eax,(rFP,%ecx,4)
+ CLEAR_REF %rcx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_int_2addr: /* 0xb1 */
+/* File: x86_64/op_sub_int_2addr.S */
+/* File: x86_64/binop2addr.S */
+/*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an instruction or a function call.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $4, rINST # rINST <- B
+ andb $0xf, %cl # ecx <- A
+ GET_VREG %eax, rINSTq # eax <- vB
+ subl %eax, (rFP,%rcx,4) # for ex: addl %eax,(rFP,%ecx,4)
+ CLEAR_REF %rcx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_int_2addr: /* 0xb2 */
+/* File: x86_64/op_mul_int_2addr.S */
+ /* mul vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $4, rINST # rINST <- B
+ andb $0xf, %cl # ecx <- A
+ GET_VREG %eax, %rcx # eax <- vA
+ imull (rFP,rINSTq,4), %eax
+ SET_VREG %eax, %rcx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_int_2addr: /* 0xb3 */
+/* File: x86_64/op_div_int_2addr.S */
+/* File: x86_64/bindiv2addr.S */
+/*
+ * 32-bit binary div/rem operation. Handles special case of op1=-1.
+ */
+ /* div/rem/2addr vA, vB */
+ movl rINST, %ecx # rcx <- BA
+ sarl $4, %ecx # rcx <- B
+ andb $0xf, rINSTbl # rINST <- A
+ .if 0
+ GET_WIDE_VREG %rax, rINSTq # eax <- vA
+ GET_WIDE_VREG %ecx, %rcx # ecx <- vB
+ .else
+ GET_VREG %eax, rINSTq # eax <- vA
+ GET_VREG %ecx, %rcx # ecx <- vB
+ .endif
+ testl %ecx, %ecx
+ jz common_errDivideByZero
+ cmpl $-1, %ecx
+ je 2f
+ cdq # rdx:rax <- sign-extended of rax
+ idivl %ecx
+1:
+ .if 0
+ SET_WIDE_VREG %eax, rINSTq # vA <- result
+ .else
+ SET_VREG %eax, rINSTq # vA <- result
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+2:
+ .if 0
+ xorl %eax, %eax
+ .else
+ negl %eax
+ .endif
+ jmp 1b
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_int_2addr: /* 0xb4 */
+/* File: x86_64/op_rem_int_2addr.S */
+/* File: x86_64/bindiv2addr.S */
+/*
+ * 32-bit binary div/rem operation. Handles special case of op1=-1.
+ */
+ /* div/rem/2addr vA, vB */
+ movl rINST, %ecx # rcx <- BA
+ sarl $4, %ecx # rcx <- B
+ andb $0xf, rINSTbl # rINST <- A
+ .if 0
+ GET_WIDE_VREG %rax, rINSTq # eax <- vA
+ GET_WIDE_VREG %ecx, %rcx # ecx <- vB
+ .else
+ GET_VREG %eax, rINSTq # eax <- vA
+ GET_VREG %ecx, %rcx # ecx <- vB
+ .endif
+ testl %ecx, %ecx
+ jz common_errDivideByZero
+ cmpl $-1, %ecx
+ je 2f
+ cdq # rdx:rax <- sign-extended of rax
+ idivl %ecx
+1:
+ .if 0
+ SET_WIDE_VREG %edx, rINSTq # vA <- result
+ .else
+ SET_VREG %edx, rINSTq # vA <- result
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+2:
+ .if 1
+ xorl %edx, %edx
+ .else
+ negl %edx
+ .endif
+ jmp 1b
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_and_int_2addr: /* 0xb5 */
+/* File: x86_64/op_and_int_2addr.S */
+/* File: x86_64/binop2addr.S */
+/*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an instruction or a function call.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $4, rINST # rINST <- B
+ andb $0xf, %cl # ecx <- A
+ GET_VREG %eax, rINSTq # eax <- vB
+ andl %eax, (rFP,%rcx,4) # for ex: addl %eax,(rFP,%ecx,4)
+ CLEAR_REF %rcx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_or_int_2addr: /* 0xb6 */
+/* File: x86_64/op_or_int_2addr.S */
+/* File: x86_64/binop2addr.S */
+/*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an instruction or a function call.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $4, rINST # rINST <- B
+ andb $0xf, %cl # ecx <- A
+ GET_VREG %eax, rINSTq # eax <- vB
+ orl %eax, (rFP,%rcx,4) # for ex: addl %eax,(rFP,%ecx,4)
+ CLEAR_REF %rcx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_xor_int_2addr: /* 0xb7 */
+/* File: x86_64/op_xor_int_2addr.S */
+/* File: x86_64/binop2addr.S */
+/*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an instruction or a function call.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $4, rINST # rINST <- B
+ andb $0xf, %cl # ecx <- A
+ GET_VREG %eax, rINSTq # eax <- vB
+ xorl %eax, (rFP,%rcx,4) # for ex: addl %eax,(rFP,%ecx,4)
+ CLEAR_REF %rcx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shl_int_2addr: /* 0xb8 */
+/* File: x86_64/op_shl_int_2addr.S */
+/* File: x86_64/shop2addr.S */
+/*
+ * Generic 32-bit "shift/2addr" operation.
+ */
+ /* shift/2addr vA, vB */
+ movl rINST, %ecx # ecx <- BA
+ sarl $4, %ecx # ecx <- B
+ GET_VREG %ecx, %rcx # ecx <- vBB
+ andb $0xf, rINSTbl # rINST <- A
+ .if 0
+ GET_WIDE_VREG %rax, rINSTq # rax <- vAA
+ sall %cl, %eax # ex: sarl %cl, %eax
+ SET_WIDE_VREG %rax, rINSTq
+ .else
+ GET_VREG %eax, rINSTq # eax <- vAA
+ sall %cl, %eax # ex: sarl %cl, %eax
+ SET_VREG %eax, rINSTq
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shr_int_2addr: /* 0xb9 */
+/* File: x86_64/op_shr_int_2addr.S */
+/* File: x86_64/shop2addr.S */
+/*
+ * Generic 32-bit "shift/2addr" operation.
+ */
+ /* shift/2addr vA, vB */
+ movl rINST, %ecx # ecx <- BA
+ sarl $4, %ecx # ecx <- B
+ GET_VREG %ecx, %rcx # ecx <- vBB
+ andb $0xf, rINSTbl # rINST <- A
+ .if 0
+ GET_WIDE_VREG %rax, rINSTq # rax <- vAA
+ sarl %cl, %eax # ex: sarl %cl, %eax
+ SET_WIDE_VREG %rax, rINSTq
+ .else
+ GET_VREG %eax, rINSTq # eax <- vAA
+ sarl %cl, %eax # ex: sarl %cl, %eax
+ SET_VREG %eax, rINSTq
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_ushr_int_2addr: /* 0xba */
+/* File: x86_64/op_ushr_int_2addr.S */
+/* File: x86_64/shop2addr.S */
+/*
+ * Generic 32-bit "shift/2addr" operation.
+ */
+ /* shift/2addr vA, vB */
+ movl rINST, %ecx # ecx <- BA
+ sarl $4, %ecx # ecx <- B
+ GET_VREG %ecx, %rcx # ecx <- vBB
+ andb $0xf, rINSTbl # rINST <- A
+ .if 0
+ GET_WIDE_VREG %rax, rINSTq # rax <- vAA
+ shrl %cl, %eax # ex: sarl %cl, %eax
+ SET_WIDE_VREG %rax, rINSTq
+ .else
+ GET_VREG %eax, rINSTq # eax <- vAA
+ shrl %cl, %eax # ex: sarl %cl, %eax
+ SET_VREG %eax, rINSTq
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_long_2addr: /* 0xbb */
+/* File: x86_64/op_add_long_2addr.S */
+/* File: x86_64/binopWide2addr.S */
+/*
+ * Generic 64-bit binary operation.
+ */
+ /* binop/2addr vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $4, rINST # rINST <- B
+ andb $0xf, %cl # ecx <- A
+ GET_WIDE_VREG %rax, rINSTq # rax <- vB
+ addq %rax, (rFP,%rcx,4) # for ex: addq %rax,(rFP,%rcx,4)
+ CLEAR_WIDE_REF %rcx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_long_2addr: /* 0xbc */
+/* File: x86_64/op_sub_long_2addr.S */
+/* File: x86_64/binopWide2addr.S */
+/*
+ * Generic 64-bit binary operation.
+ */
+ /* binop/2addr vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $4, rINST # rINST <- B
+ andb $0xf, %cl # ecx <- A
+ GET_WIDE_VREG %rax, rINSTq # rax <- vB
+ subq %rax, (rFP,%rcx,4) # for ex: addq %rax,(rFP,%rcx,4)
+ CLEAR_WIDE_REF %rcx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_long_2addr: /* 0xbd */
+/* File: x86_64/op_mul_long_2addr.S */
+ /* mul vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $4, rINST # rINST <- B
+ andb $0xf, %cl # ecx <- A
+ GET_WIDE_VREG %rax, %rcx # rax <- vA
+ imulq (rFP,rINSTq,4), %rax
+ SET_WIDE_VREG %rax, %rcx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_long_2addr: /* 0xbe */
+/* File: x86_64/op_div_long_2addr.S */
+/* File: x86_64/bindiv2addr.S */
+/*
+ * 32-bit binary div/rem operation. Handles special case of op1=-1.
+ */
+ /* div/rem/2addr vA, vB */
+ movl rINST, %ecx # rcx <- BA
+ sarl $4, %ecx # rcx <- B
+ andb $0xf, rINSTbl # rINST <- A
+ .if 1
+ GET_WIDE_VREG %rax, rINSTq # eax <- vA
+ GET_WIDE_VREG %rcx, %rcx # ecx <- vB
+ .else
+ GET_VREG %eax, rINSTq # eax <- vA
+ GET_VREG %rcx, %rcx # ecx <- vB
+ .endif
+ testq %rcx, %rcx
+ jz common_errDivideByZero
+ cmpq $-1, %rcx
+ je 2f
+ cqo # rdx:rax <- sign-extended of rax
+ idivq %rcx
+1:
+ .if 1
+ SET_WIDE_VREG %rax, rINSTq # vA <- result
+ .else
+ SET_VREG %rax, rINSTq # vA <- result
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+2:
+ .if 0
+ xorq %rax, %rax
+ .else
+ negq %rax
+ .endif
+ jmp 1b
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_long_2addr: /* 0xbf */
+/* File: x86_64/op_rem_long_2addr.S */
+/* File: x86_64/bindiv2addr.S */
+/*
+ * 32-bit binary div/rem operation. Handles special case of op1=-1.
+ */
+ /* div/rem/2addr vA, vB */
+ movl rINST, %ecx # rcx <- BA
+ sarl $4, %ecx # rcx <- B
+ andb $0xf, rINSTbl # rINST <- A
+ .if 1
+ GET_WIDE_VREG %rax, rINSTq # eax <- vA
+ GET_WIDE_VREG %rcx, %rcx # ecx <- vB
+ .else
+ GET_VREG %eax, rINSTq # eax <- vA
+ GET_VREG %rcx, %rcx # ecx <- vB
+ .endif
+ testq %rcx, %rcx
+ jz common_errDivideByZero
+ cmpq $-1, %rcx
+ je 2f
+ cqo # rdx:rax <- sign-extended of rax
+ idivq %rcx
+1:
+ .if 1
+ SET_WIDE_VREG %rdx, rINSTq # vA <- result
+ .else
+ SET_VREG %rdx, rINSTq # vA <- result
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+2:
+ .if 1
+ xorq %rdx, %rdx
+ .else
+ negq %rdx
+ .endif
+ jmp 1b
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_and_long_2addr: /* 0xc0 */
+/* File: x86_64/op_and_long_2addr.S */
+/* File: x86_64/binopWide2addr.S */
+/*
+ * Generic 64-bit binary operation.
+ */
+ /* binop/2addr vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $4, rINST # rINST <- B
+ andb $0xf, %cl # ecx <- A
+ GET_WIDE_VREG %rax, rINSTq # rax <- vB
+ andq %rax, (rFP,%rcx,4) # for ex: addq %rax,(rFP,%rcx,4)
+ CLEAR_WIDE_REF %rcx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_or_long_2addr: /* 0xc1 */
+/* File: x86_64/op_or_long_2addr.S */
+/* File: x86_64/binopWide2addr.S */
+/*
+ * Generic 64-bit binary operation.
+ */
+ /* binop/2addr vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $4, rINST # rINST <- B
+ andb $0xf, %cl # ecx <- A
+ GET_WIDE_VREG %rax, rINSTq # rax <- vB
+ orq %rax, (rFP,%rcx,4) # for ex: addq %rax,(rFP,%rcx,4)
+ CLEAR_WIDE_REF %rcx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_xor_long_2addr: /* 0xc2 */
+/* File: x86_64/op_xor_long_2addr.S */
+/* File: x86_64/binopWide2addr.S */
+/*
+ * Generic 64-bit binary operation.
+ */
+ /* binop/2addr vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $4, rINST # rINST <- B
+ andb $0xf, %cl # ecx <- A
+ GET_WIDE_VREG %rax, rINSTq # rax <- vB
+ xorq %rax, (rFP,%rcx,4) # for ex: addq %rax,(rFP,%rcx,4)
+ CLEAR_WIDE_REF %rcx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shl_long_2addr: /* 0xc3 */
+/* File: x86_64/op_shl_long_2addr.S */
+/* File: x86_64/shop2addr.S */
+/*
+ * Generic 32-bit "shift/2addr" operation.
+ */
+ /* shift/2addr vA, vB */
+ movl rINST, %ecx # ecx <- BA
+ sarl $4, %ecx # ecx <- B
+ GET_VREG %ecx, %rcx # ecx <- vBB
+ andb $0xf, rINSTbl # rINST <- A
+ .if 1
+ GET_WIDE_VREG %rax, rINSTq # rax <- vAA
+ salq %cl, %rax # ex: sarl %cl, %eax
+ SET_WIDE_VREG %rax, rINSTq
+ .else
+ GET_VREG %eax, rINSTq # eax <- vAA
+ salq %cl, %rax # ex: sarl %cl, %eax
+ SET_VREG %eax, rINSTq
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shr_long_2addr: /* 0xc4 */
+/* File: x86_64/op_shr_long_2addr.S */
+/* File: x86_64/shop2addr.S */
+/*
+ * Generic 32-bit "shift/2addr" operation.
+ */
+ /* shift/2addr vA, vB */
+ movl rINST, %ecx # ecx <- BA
+ sarl $4, %ecx # ecx <- B
+ GET_VREG %ecx, %rcx # ecx <- vBB
+ andb $0xf, rINSTbl # rINST <- A
+ .if 1
+ GET_WIDE_VREG %rax, rINSTq # rax <- vAA
+ sarq %cl, %rax # ex: sarl %cl, %eax
+ SET_WIDE_VREG %rax, rINSTq
+ .else
+ GET_VREG %eax, rINSTq # eax <- vAA
+ sarq %cl, %rax # ex: sarl %cl, %eax
+ SET_VREG %eax, rINSTq
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_ushr_long_2addr: /* 0xc5 */
+/* File: x86_64/op_ushr_long_2addr.S */
+/* File: x86_64/shop2addr.S */
+/*
+ * Generic 32-bit "shift/2addr" operation.
+ */
+ /* shift/2addr vA, vB */
+ movl rINST, %ecx # ecx <- BA
+ sarl $4, %ecx # ecx <- B
+ GET_VREG %ecx, %rcx # ecx <- vBB
+ andb $0xf, rINSTbl # rINST <- A
+ .if 1
+ GET_WIDE_VREG %rax, rINSTq # rax <- vAA
+ shrq %cl, %rax # ex: sarl %cl, %eax
+ SET_WIDE_VREG %rax, rINSTq
+ .else
+ GET_VREG %eax, rINSTq # eax <- vAA
+ shrq %cl, %rax # ex: sarl %cl, %eax
+ SET_VREG %eax, rINSTq
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_float_2addr: /* 0xc6 */
+/* File: x86_64/op_add_float_2addr.S */
+/* File: x86_64/sseBinop2Addr.S */
+ movl rINST, %ecx # ecx <- A+
+ andl $0xf, %ecx # ecx <- A
+ movss VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
+ sarl $4, rINST # rINST<- B
+ addss VREG_ADDRESS(rINSTq), %xmm0
+ movss %xmm0, VREG_ADDRESS(%rcx) # vAA<- %xmm0
+ pxor %xmm0, %xmm0
+ movss %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_float_2addr: /* 0xc7 */
+/* File: x86_64/op_sub_float_2addr.S */
+/* File: x86_64/sseBinop2Addr.S */
+ movl rINST, %ecx # ecx <- A+
+ andl $0xf, %ecx # ecx <- A
+ movss VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
+ sarl $4, rINST # rINST<- B
+ subss VREG_ADDRESS(rINSTq), %xmm0
+ movss %xmm0, VREG_ADDRESS(%rcx) # vAA<- %xmm0
+ pxor %xmm0, %xmm0
+ movss %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_float_2addr: /* 0xc8 */
+/* File: x86_64/op_mul_float_2addr.S */
+/* File: x86_64/sseBinop2Addr.S */
+ movl rINST, %ecx # ecx <- A+
+ andl $0xf, %ecx # ecx <- A
+ movss VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
+ sarl $4, rINST # rINST<- B
+ mulss VREG_ADDRESS(rINSTq), %xmm0
+ movss %xmm0, VREG_ADDRESS(%rcx) # vAA<- %xmm0
+ pxor %xmm0, %xmm0
+ movss %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_float_2addr: /* 0xc9 */
+/* File: x86_64/op_div_float_2addr.S */
+/* File: x86_64/sseBinop2Addr.S */
+ movl rINST, %ecx # ecx <- A+
+ andl $0xf, %ecx # ecx <- A
+ movss VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
+ sarl $4, rINST # rINST<- B
+ divss VREG_ADDRESS(rINSTq), %xmm0
+ movss %xmm0, VREG_ADDRESS(%rcx) # vAA<- %xmm0
+ pxor %xmm0, %xmm0
+ movss %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_float_2addr: /* 0xca */
+/* File: x86_64/op_rem_float_2addr.S */
+ /* rem_float/2addr vA, vB */
+ movzbq rINSTbl, %rcx # ecx <- A+
+ sarl $4, rINST # rINST <- B
+ flds VREG_ADDRESS(rINSTq) # vB to fp stack
+ andb $0xf, %cl # ecx <- A
+ flds VREG_ADDRESS(%rcx) # vA to fp stack
+1:
+ fprem
+ fstsw %ax
+ sahf
+ jp 1b
+ fstp %st(1)
+ fstps VREG_ADDRESS(%rcx) # %st to vA
+ CLEAR_REF %rcx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_double_2addr: /* 0xcb */
+/* File: x86_64/op_add_double_2addr.S */
+/* File: x86_64/sseBinop2Addr.S */
+ movl rINST, %ecx # ecx <- A+
+ andl $0xf, %ecx # ecx <- A
+ movsd VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
+ sarl $4, rINST # rINST<- B
+ addsd VREG_ADDRESS(rINSTq), %xmm0
+ movsd %xmm0, VREG_ADDRESS(%rcx) # vAA<- %xmm0
+ pxor %xmm0, %xmm0
+ movsd %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_double_2addr: /* 0xcc */
+/* File: x86_64/op_sub_double_2addr.S */
+/* File: x86_64/sseBinop2Addr.S */
+ movl rINST, %ecx # ecx <- A+
+ andl $0xf, %ecx # ecx <- A
+ movsd VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
+ sarl $4, rINST # rINST<- B
+ subsd VREG_ADDRESS(rINSTq), %xmm0
+ movsd %xmm0, VREG_ADDRESS(%rcx) # vAA<- %xmm0
+ pxor %xmm0, %xmm0
+ movsd %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_double_2addr: /* 0xcd */
+/* File: x86_64/op_mul_double_2addr.S */
+/* File: x86_64/sseBinop2Addr.S */
+ movl rINST, %ecx # ecx <- A+
+ andl $0xf, %ecx # ecx <- A
+ movsd VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
+ sarl $4, rINST # rINST<- B
+ mulsd VREG_ADDRESS(rINSTq), %xmm0
+ movsd %xmm0, VREG_ADDRESS(%rcx) # vAA<- %xmm0
+ pxor %xmm0, %xmm0
+ movsd %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_double_2addr: /* 0xce */
+/* File: x86_64/op_div_double_2addr.S */
+/* File: x86_64/sseBinop2Addr.S */
+ movl rINST, %ecx # ecx <- A+
+ andl $0xf, %ecx # ecx <- A
+ movsd VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
+ sarl $4, rINST # rINST<- B
+ divsd VREG_ADDRESS(rINSTq), %xmm0
+ movsd %xmm0, VREG_ADDRESS(%rcx) # vAA<- %xmm0
+ pxor %xmm0, %xmm0
+ movsd %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_double_2addr: /* 0xcf */
+/* File: x86_64/op_rem_double_2addr.S */
+ /* rem_double/2addr vA, vB */
+ movzbq rINSTbl, %rcx # ecx <- A+
+ sarl $4, rINST # rINST <- B
+ fldl VREG_ADDRESS(rINSTq) # vB to fp stack
+ andb $0xf, %cl # ecx <- A
+ fldl VREG_ADDRESS(%rcx) # vA to fp stack
+1:
+ fprem
+ fstsw %ax
+ sahf
+ jp 1b
+ fstp %st(1)
+ fstpl VREG_ADDRESS(%rcx) # %st to vA
+ CLEAR_WIDE_REF %rcx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_int_lit16: /* 0xd0 */
+/* File: x86_64/op_add_int_lit16.S */
+/* File: x86_64/binopLit16.S */
+/*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int/lit16, rsub-int,
+ * and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ movl rINST, %eax # rax <- 000000BA
+ sarl $4, %eax # eax <- B
+ GET_VREG %eax, %rax # eax <- vB
+ andb $0xf, rINSTbl # rINST <- A
+ movswl 2(rPC), %ecx # ecx <- ssssCCCC
+ addl %ecx, %eax # for example: addl %ecx, %eax
+ SET_VREG %eax, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rsub_int: /* 0xd1 */
+/* File: x86_64/op_rsub_int.S */
+/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
+/* File: x86_64/binopLit16.S */
+/*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int/lit16, rsub-int,
+ * and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ movl rINST, %eax # rax <- 000000BA
+ sarl $4, %eax # eax <- B
+ GET_VREG %eax, %rax # eax <- vB
+ andb $0xf, rINSTbl # rINST <- A
+ movswl 2(rPC), %ecx # ecx <- ssssCCCC
+ subl %eax, %ecx # for example: addl %ecx, %eax
+ SET_VREG %ecx, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_int_lit16: /* 0xd2 */
+/* File: x86_64/op_mul_int_lit16.S */
+/* File: x86_64/binopLit16.S */
+/*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int/lit16, rsub-int,
+ * and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ movl rINST, %eax # rax <- 000000BA
+ sarl $4, %eax # eax <- B
+ GET_VREG %eax, %rax # eax <- vB
+ andb $0xf, rINSTbl # rINST <- A
+ movswl 2(rPC), %ecx # ecx <- ssssCCCC
+ imull %ecx, %eax # for example: addl %ecx, %eax
+ SET_VREG %eax, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_int_lit16: /* 0xd3 */
+/* File: x86_64/op_div_int_lit16.S */
+/* File: x86_64/bindivLit16.S */
+/*
+ * 32-bit binary div/rem operation. Handles special case of op1=-1.
+ */
+ /* div/rem/lit16 vA, vB, #+CCCC */
+ /* Need A in rINST, ssssCCCC in ecx, vB in eax */
+ movl rINST, %eax # rax <- 000000BA
+ sarl $4, %eax # eax <- B
+ GET_VREG %eax, %rax # eax <- vB
+ movswl 2(rPC), %ecx # ecx <- ssssCCCC
+ andb $0xf, rINSTbl # rINST <- A
+ testl %ecx, %ecx
+ jz common_errDivideByZero
+ cmpl $-1, %ecx
+ je 2f
+ cdq # rax <- sign-extended of eax
+ idivl %ecx
+1:
+ SET_VREG %eax, rINSTq # vA <- result
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+2:
+ .if 0
+ xorl %eax, %eax
+ .else
+ negl %eax
+ .endif
+ jmp 1b
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_int_lit16: /* 0xd4 */
+/* File: x86_64/op_rem_int_lit16.S */
+/* File: x86_64/bindivLit16.S */
+/*
+ * 32-bit binary div/rem operation. Handles special case of op1=-1.
+ */
+ /* div/rem/lit16 vA, vB, #+CCCC */
+ /* Need A in rINST, ssssCCCC in ecx, vB in eax */
+ movl rINST, %eax # rax <- 000000BA
+ sarl $4, %eax # eax <- B
+ GET_VREG %eax, %rax # eax <- vB
+ movswl 2(rPC), %ecx # ecx <- ssssCCCC
+ andb $0xf, rINSTbl # rINST <- A
+ testl %ecx, %ecx
+ jz common_errDivideByZero
+ cmpl $-1, %ecx
+ je 2f
+ cdq # rax <- sign-extended of eax
+ idivl %ecx
+1:
+ SET_VREG %edx, rINSTq # vA <- result
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+2:
+ .if 1
+ xorl %edx, %edx
+ .else
+ negl %edx
+ .endif
+ jmp 1b
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_and_int_lit16: /* 0xd5 */
+/* File: x86_64/op_and_int_lit16.S */
+/* File: x86_64/binopLit16.S */
+/*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int/lit16, rsub-int,
+ * and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ movl rINST, %eax # rax <- 000000BA
+ sarl $4, %eax # eax <- B
+ GET_VREG %eax, %rax # eax <- vB
+ andb $0xf, rINSTbl # rINST <- A
+ movswl 2(rPC), %ecx # ecx <- ssssCCCC
+ andl %ecx, %eax # for example: addl %ecx, %eax
+ SET_VREG %eax, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_or_int_lit16: /* 0xd6 */
+/* File: x86_64/op_or_int_lit16.S */
+/* File: x86_64/binopLit16.S */
+/*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int/lit16, rsub-int,
+ * and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ movl rINST, %eax # rax <- 000000BA
+ sarl $4, %eax # eax <- B
+ GET_VREG %eax, %rax # eax <- vB
+ andb $0xf, rINSTbl # rINST <- A
+ movswl 2(rPC), %ecx # ecx <- ssssCCCC
+ orl %ecx, %eax # for example: addl %ecx, %eax
+ SET_VREG %eax, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_xor_int_lit16: /* 0xd7 */
+/* File: x86_64/op_xor_int_lit16.S */
+/* File: x86_64/binopLit16.S */
+/*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int/lit16, rsub-int,
+ * and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ movl rINST, %eax # rax <- 000000BA
+ sarl $4, %eax # eax <- B
+ GET_VREG %eax, %rax # eax <- vB
+ andb $0xf, rINSTbl # rINST <- A
+ movswl 2(rPC), %ecx # ecx <- ssssCCCC
+ xorl %ecx, %eax # for example: addl %ecx, %eax
+ SET_VREG %eax, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_int_lit8: /* 0xd8 */
+/* File: x86_64/op_add_int_lit8.S */
+/* File: x86_64/binopLit8.S */
+/*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * For: add-int/lit8, rsub-int/lit8
+ * and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ movzbq 2(rPC), %rax # rax <- BB
+ movsbl 3(rPC), %ecx # rcx <- ssssssCC
+ GET_VREG %eax, %rax # eax <- rBB
+ addl %ecx, %eax # ex: addl %ecx,%eax
+ SET_VREG %eax, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rsub_int_lit8: /* 0xd9 */
+/* File: x86_64/op_rsub_int_lit8.S */
+/* File: x86_64/binopLit8.S */
+/*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * For: add-int/lit8, rsub-int/lit8
+ * and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ movzbq 2(rPC), %rax # rax <- BB
+ movsbl 3(rPC), %ecx # rcx <- ssssssCC
+ GET_VREG %eax, %rax # eax <- rBB
+ subl %eax, %ecx # ex: addl %ecx,%eax
+ SET_VREG %ecx, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_int_lit8: /* 0xda */
+/* File: x86_64/op_mul_int_lit8.S */
+/* File: x86_64/binopLit8.S */
+/*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * For: add-int/lit8, rsub-int/lit8
+ * and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ movzbq 2(rPC), %rax # rax <- BB
+ movsbl 3(rPC), %ecx # rcx <- ssssssCC
+ GET_VREG %eax, %rax # eax <- rBB
+ imull %ecx, %eax # ex: addl %ecx,%eax
+ SET_VREG %eax, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_int_lit8: /* 0xdb */
+/* File: x86_64/op_div_int_lit8.S */
+/* File: x86_64/bindivLit8.S */
+/*
+ * 32-bit div/rem "lit8" binary operation. Handles special case of
+ * op0=minint & op1=-1
+ */
+ /* div/rem/lit8 vAA, vBB, #+CC */
+ movzbq 2(rPC), %rax # eax <- BB
+ movsbl 3(rPC), %ecx # ecx <- ssssssCC
+ GET_VREG %eax, %rax # eax <- rBB
+ testl %ecx, %ecx
+ je common_errDivideByZero
+ cmpl $-1, %ecx
+ je 2f
+ cdq # rax <- sign-extended of eax
+ idivl %ecx
+1:
+ SET_VREG %eax, rINSTq # vA <- result
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+2:
+ .if 0
+ xorl %eax, %eax
+ .else
+ negl %eax
+ .endif
+ jmp 1b
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_int_lit8: /* 0xdc */
+/* File: x86_64/op_rem_int_lit8.S */
+/* File: x86_64/bindivLit8.S */
+/*
+ * 32-bit div/rem "lit8" binary operation. Handles special case of
+ * op0=minint & op1=-1
+ */
+ /* div/rem/lit8 vAA, vBB, #+CC */
+ movzbq 2(rPC), %rax # eax <- BB
+ movsbl 3(rPC), %ecx # ecx <- ssssssCC
+ GET_VREG %eax, %rax # eax <- rBB
+ testl %ecx, %ecx
+ je common_errDivideByZero
+ cmpl $-1, %ecx
+ je 2f
+ cdq # rax <- sign-extended of eax
+ idivl %ecx
+1:
+ SET_VREG %edx, rINSTq # vA <- result
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+2:
+ .if 1
+ xorl %edx, %edx
+ .else
+ negl %edx
+ .endif
+ jmp 1b
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_and_int_lit8: /* 0xdd */
+/* File: x86_64/op_and_int_lit8.S */
+/* File: x86_64/binopLit8.S */
+/*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * For: add-int/lit8, rsub-int/lit8
+ * and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ movzbq 2(rPC), %rax # rax <- BB
+ movsbl 3(rPC), %ecx # rcx <- ssssssCC
+ GET_VREG %eax, %rax # eax <- rBB
+ andl %ecx, %eax # ex: addl %ecx,%eax
+ SET_VREG %eax, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_or_int_lit8: /* 0xde */
+/* File: x86_64/op_or_int_lit8.S */
+/* File: x86_64/binopLit8.S */
+/*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * For: add-int/lit8, rsub-int/lit8
+ * and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ movzbq 2(rPC), %rax # rax <- BB
+ movsbl 3(rPC), %ecx # rcx <- ssssssCC
+ GET_VREG %eax, %rax # eax <- rBB
+ orl %ecx, %eax # ex: addl %ecx,%eax
+ SET_VREG %eax, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_xor_int_lit8: /* 0xdf */
+/* File: x86_64/op_xor_int_lit8.S */
+/* File: x86_64/binopLit8.S */
+/*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * For: add-int/lit8, rsub-int/lit8
+ * and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ movzbq 2(rPC), %rax # rax <- BB
+ movsbl 3(rPC), %ecx # rcx <- ssssssCC
+ GET_VREG %eax, %rax # eax <- rBB
+ xorl %ecx, %eax # ex: addl %ecx,%eax
+ SET_VREG %eax, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shl_int_lit8: /* 0xe0 */
+/* File: x86_64/op_shl_int_lit8.S */
+/* File: x86_64/binopLit8.S */
+/*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * For: add-int/lit8, rsub-int/lit8
+ * and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ movzbq 2(rPC), %rax # rax <- BB
+ movsbl 3(rPC), %ecx # rcx <- ssssssCC
+ GET_VREG %eax, %rax # eax <- rBB
+ sall %cl, %eax # ex: addl %ecx,%eax
+ SET_VREG %eax, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shr_int_lit8: /* 0xe1 */
+/* File: x86_64/op_shr_int_lit8.S */
+/* File: x86_64/binopLit8.S */
+/*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * For: add-int/lit8, rsub-int/lit8
+ * and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ movzbq 2(rPC), %rax # rax <- BB
+ movsbl 3(rPC), %ecx # rcx <- ssssssCC
+ GET_VREG %eax, %rax # eax <- rBB
+ sarl %cl, %eax # ex: addl %ecx,%eax
+ SET_VREG %eax, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_ushr_int_lit8: /* 0xe2 */
+/* File: x86_64/op_ushr_int_lit8.S */
+/* File: x86_64/binopLit8.S */
+/*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * For: add-int/lit8, rsub-int/lit8
+ * and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ movzbq 2(rPC), %rax # rax <- BB
+ movsbl 3(rPC), %ecx # rcx <- ssssssCC
+ GET_VREG %eax, %rax # eax <- rBB
+ shrl %cl, %eax # ex: addl %ecx,%eax
+ SET_VREG %eax, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_quick: /* 0xe3 */
+/* File: x86_64/op_iget_quick.S */
+ /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick, iget-wide-quick */
+ /* op vA, vB, offset@CCCC */
+ movl rINST, %ecx # rcx <- BA
+ sarl $4, %ecx # ecx <- B
+ GET_VREG %ecx, %rcx # vB (object we're operating on)
+ movzwq 2(rPC), %rax # eax <- field byte offset
+ testl %ecx, %ecx # is object null?
+ je common_errNullObject
+ andb $0xf,rINSTbl # rINST <- A
+ .if 0
+ movq (%rcx,%rax,1), %rax
+ SET_WIDE_VREG %rax, rINSTq # fp[A] <- value
+ .else
+ movl (%rcx,%rax,1), %eax
+ SET_VREG %eax, rINSTq # fp[A] <- value
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_wide_quick: /* 0xe4 */
+/* File: x86_64/op_iget_wide_quick.S */
+/* File: x86_64/op_iget_quick.S */
+ /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick, iget-wide-quick */
+ /* op vA, vB, offset@CCCC */
+ movl rINST, %ecx # rcx <- BA
+ sarl $4, %ecx # ecx <- B
+ GET_VREG %ecx, %rcx # vB (object we're operating on)
+ movzwq 2(rPC), %rax # eax <- field byte offset
+ testl %ecx, %ecx # is object null?
+ je common_errNullObject
+ andb $0xf,rINSTbl # rINST <- A
+ .if 1
+ movq (%rcx,%rax,1), %rax
+ SET_WIDE_VREG %rax, rINSTq # fp[A] <- value
+ .else
+ movswl (%rcx,%rax,1), %eax
+ SET_VREG %eax, rINSTq # fp[A] <- value
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_object_quick: /* 0xe5 */
+/* File: x86_64/op_iget_object_quick.S */
+ /* For: iget-object-quick */
+ /* op vA, vB, offset@CCCC */
+ .extern artIGetObjectFromMterp
+ movzbq rINSTbl, %rcx # rcx <- BA
+ sarl $4, %ecx # ecx <- B
+ GET_VREG OUT_32_ARG0, %rcx # vB (object we're operating on)
+ movzwl 2(rPC), OUT_32_ARG1 # eax <- field byte offset
+ EXPORT_PC
+ callq SYMBOL(artIGetObjectFromMterp) # (obj, offset)
+ cmpq $0, THREAD_EXCEPTION_OFFSET(rSELF)
+ jnz MterpException # bail out
+ andb $0xf, rINSTbl # rINST <- A
+ SET_VREG_OBJECT %eax, rINSTq # fp[A] <- value
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_quick: /* 0xe6 */
+/* File: x86_64/op_iput_quick.S */
+ /* For: iput-quick, iput-object-quick */
+ /* op vA, vB, offset@CCCC */
+ movzbq rINSTbl, %rcx # rcx <- BA
+ sarl $4, %ecx # ecx <- B
+ GET_VREG %ecx, %rcx # vB (object we're operating on)
+ testl %ecx, %ecx # is object null?
+ je common_errNullObject
+ andb $0xf, rINSTbl # rINST <- A
+ GET_VREG rINST, rINSTq # rINST <- v[A]
+ movzwq 2(rPC), %rax # rax <- field byte offset
+ movl rINST, (%rcx,%rax,1)
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_wide_quick: /* 0xe7 */
+/* File: x86_64/op_iput_wide_quick.S */
+ /* iput-wide-quick vA, vB, offset@CCCC */
+ movzbq rINSTbl, %rcx # rcx<- BA
+ sarl $4, %ecx # ecx<- B
+ GET_VREG %ecx, %rcx # vB (object we're operating on)
+ testl %ecx, %ecx # is object null?
+ je common_errNullObject
+ movzwq 2(rPC), %rax # rax<- field byte offset
+ leaq (%rcx,%rax,1), %rcx # ecx<- Address of 64-bit target
+ andb $0xf, rINSTbl # rINST<- A
+ GET_WIDE_VREG %rax, rINSTq # rax<- fp[A]/fp[A+1]
+ movq %rax, (%rcx) # obj.field<- r0/r1
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_object_quick: /* 0xe8 */
+/* File: x86_64/op_iput_object_quick.S */
+ EXPORT_PC
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
+ movq rPC, OUT_ARG1
+ REFRESH_INST 232
+ movl rINST, OUT_32_ARG2
+ call SYMBOL(MterpIputObjectQuick)
+ testb %al, %al
+ jz MterpException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_virtual_quick: /* 0xe9 */
+/* File: x86_64/op_invoke_virtual_quick.S */
+/* File: x86_64/invoke.S */
+/*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeVirtualQuick
+ EXPORT_PC
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ movq rPC, OUT_ARG2
+ REFRESH_INST 233
+ movl rINST, OUT_32_ARG3
+ call SYMBOL(MterpInvokeVirtualQuick)
+ testb %al, %al
+ jz MterpException
+ ADVANCE_PC 3
+ call SYMBOL(MterpShouldSwitchInterpreters)
+ testb %al, %al
+ jnz MterpFallback
+ FETCH_INST
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_virtual_range_quick: /* 0xea */
+/* File: x86_64/op_invoke_virtual_range_quick.S */
+/* File: x86_64/invoke.S */
+/*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeVirtualQuickRange
+ EXPORT_PC
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ movq rPC, OUT_ARG2
+ REFRESH_INST 234
+ movl rINST, OUT_32_ARG3
+ call SYMBOL(MterpInvokeVirtualQuickRange)
+ testb %al, %al
+ jz MterpException
+ ADVANCE_PC 3
+ call SYMBOL(MterpShouldSwitchInterpreters)
+ testb %al, %al
+ jnz MterpFallback
+ FETCH_INST
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_boolean_quick: /* 0xeb */
+/* File: x86_64/op_iput_boolean_quick.S */
+/* File: x86_64/op_iput_quick.S */
+ /* For: iput-quick, iput-object-quick */
+ /* op vA, vB, offset@CCCC */
+ movzbq rINSTbl, %rcx # rcx <- BA
+ sarl $4, %ecx # ecx <- B
+ GET_VREG %ecx, %rcx # vB (object we're operating on)
+ testl %ecx, %ecx # is object null?
+ je common_errNullObject
+ andb $0xf, rINSTbl # rINST <- A
+ GET_VREG rINST, rINSTq # rINST <- v[A]
+ movzwq 2(rPC), %rax # rax <- field byte offset
+ movb rINSTbl, (%rcx,%rax,1)
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_byte_quick: /* 0xec */
+/* File: x86_64/op_iput_byte_quick.S */
+/* File: x86_64/op_iput_quick.S */
+ /* For: iput-quick, iput-object-quick */
+ /* op vA, vB, offset@CCCC */
+ movzbq rINSTbl, %rcx # rcx <- BA
+ sarl $4, %ecx # ecx <- B
+ GET_VREG %ecx, %rcx # vB (object we're operating on)
+ testl %ecx, %ecx # is object null?
+ je common_errNullObject
+ andb $0xf, rINSTbl # rINST <- A
+ GET_VREG rINST, rINSTq # rINST <- v[A]
+ movzwq 2(rPC), %rax # rax <- field byte offset
+ movb rINSTbl, (%rcx,%rax,1)
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_char_quick: /* 0xed */
+/* File: x86_64/op_iput_char_quick.S */
+/* File: x86_64/op_iput_quick.S */
+ /* For: iput-quick, iput-object-quick */
+ /* op vA, vB, offset@CCCC */
+ movzbq rINSTbl, %rcx # rcx <- BA
+ sarl $4, %ecx # ecx <- B
+ GET_VREG %ecx, %rcx # vB (object we're operating on)
+ testl %ecx, %ecx # is object null?
+ je common_errNullObject
+ andb $0xf, rINSTbl # rINST <- A
+ GET_VREG rINST, rINSTq # rINST <- v[A]
+ movzwq 2(rPC), %rax # rax <- field byte offset
+ movw rINSTw, (%rcx,%rax,1)
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_short_quick: /* 0xee */
+/* File: x86_64/op_iput_short_quick.S */
+/* File: x86_64/op_iput_quick.S */
+ /* For: iput-quick, iput-object-quick */
+ /* op vA, vB, offset@CCCC */
+ movzbq rINSTbl, %rcx # rcx <- BA
+ sarl $4, %ecx # ecx <- B
+ GET_VREG %ecx, %rcx # vB (object we're operating on)
+ testl %ecx, %ecx # is object null?
+ je common_errNullObject
+ andb $0xf, rINSTbl # rINST <- A
+ GET_VREG rINST, rINSTq # rINST <- v[A]
+ movzwq 2(rPC), %rax # rax <- field byte offset
+ movw rINSTw, (%rcx,%rax,1)
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_boolean_quick: /* 0xef */
+/* File: x86_64/op_iget_boolean_quick.S */
+/* File: x86_64/op_iget_quick.S */
+ /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick, iget-wide-quick */
+ /* op vA, vB, offset@CCCC */
+ movl rINST, %ecx # rcx <- BA
+ sarl $4, %ecx # ecx <- B
+ GET_VREG %ecx, %rcx # vB (object we're operating on)
+ movzwq 2(rPC), %rax # eax <- field byte offset
+ testl %ecx, %ecx # is object null?
+ je common_errNullObject
+ andb $0xf,rINSTbl # rINST <- A
+ .if 0
+ movq (%rcx,%rax,1), %rax
+ SET_WIDE_VREG %rax, rINSTq # fp[A] <- value
+ .else
+ movsbl (%rcx,%rax,1), %eax
+ SET_VREG %eax, rINSTq # fp[A] <- value
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_byte_quick: /* 0xf0 */
+/* File: x86_64/op_iget_byte_quick.S */
+/* File: x86_64/op_iget_quick.S */
+ /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick, iget-wide-quick */
+ /* op vA, vB, offset@CCCC */
+ movl rINST, %ecx # rcx <- BA
+ sarl $4, %ecx # ecx <- B
+ GET_VREG %ecx, %rcx # vB (object we're operating on)
+ movzwq 2(rPC), %rax # eax <- field byte offset
+ testl %ecx, %ecx # is object null?
+ je common_errNullObject
+ andb $0xf,rINSTbl # rINST <- A
+ .if 0
+ movq (%rcx,%rax,1), %rax
+ SET_WIDE_VREG %rax, rINSTq # fp[A] <- value
+ .else
+ movsbl (%rcx,%rax,1), %eax
+ SET_VREG %eax, rINSTq # fp[A] <- value
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_char_quick: /* 0xf1 */
+/* File: x86_64/op_iget_char_quick.S */
+/* File: x86_64/op_iget_quick.S */
+ /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick, iget-wide-quick */
+ /* op vA, vB, offset@CCCC */
+ movl rINST, %ecx # rcx <- BA
+ sarl $4, %ecx # ecx <- B
+ GET_VREG %ecx, %rcx # vB (object we're operating on)
+ movzwq 2(rPC), %rax # eax <- field byte offset
+ testl %ecx, %ecx # is object null?
+ je common_errNullObject
+ andb $0xf,rINSTbl # rINST <- A
+ .if 0
+ movq (%rcx,%rax,1), %rax
+ SET_WIDE_VREG %rax, rINSTq # fp[A] <- value
+ .else
+ movzwl (%rcx,%rax,1), %eax
+ SET_VREG %eax, rINSTq # fp[A] <- value
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_short_quick: /* 0xf2 */
+/* File: x86_64/op_iget_short_quick.S */
+/* File: x86_64/op_iget_quick.S */
+ /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick, iget-wide-quick */
+ /* op vA, vB, offset@CCCC */
+ movl rINST, %ecx # rcx <- BA
+ sarl $4, %ecx # ecx <- B
+ GET_VREG %ecx, %rcx # vB (object we're operating on)
+ movzwq 2(rPC), %rax # eax <- field byte offset
+ testl %ecx, %ecx # is object null?
+ je common_errNullObject
+ andb $0xf,rINSTbl # rINST <- A
+ .if 0
+ movq (%rcx,%rax,1), %rax
+ SET_WIDE_VREG %rax, rINSTq # fp[A] <- value
+ .else
+ movswl (%rcx,%rax,1), %eax
+ SET_VREG %eax, rINSTq # fp[A] <- value
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_lambda: /* 0xf3 */
+/* Transfer stub to alternate interpreter */
+ jmp MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_f4: /* 0xf4 */
+/* File: x86_64/op_unused_f4.S */
+/* File: x86_64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ jmp MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_capture_variable: /* 0xf5 */
+/* Transfer stub to alternate interpreter */
+ jmp MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_create_lambda: /* 0xf6 */
+/* Transfer stub to alternate interpreter */
+ jmp MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_liberate_variable: /* 0xf7 */
+/* Transfer stub to alternate interpreter */
+ jmp MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_box_lambda: /* 0xf8 */
+/* Transfer stub to alternate interpreter */
+ jmp MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unbox_lambda: /* 0xf9 */
+/* Transfer stub to alternate interpreter */
+ jmp MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_fa: /* 0xfa */
+/* File: x86_64/op_unused_fa.S */
+/* File: x86_64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ jmp MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_fb: /* 0xfb */
+/* File: x86_64/op_unused_fb.S */
+/* File: x86_64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ jmp MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_fc: /* 0xfc */
+/* File: x86_64/op_unused_fc.S */
+/* File: x86_64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ jmp MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_fd: /* 0xfd */
+/* File: x86_64/op_unused_fd.S */
+/* File: x86_64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ jmp MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_fe: /* 0xfe */
+/* File: x86_64/op_unused_fe.S */
+/* File: x86_64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ jmp MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_ff: /* 0xff */
+/* File: x86_64/op_unused_ff.S */
+/* File: x86_64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ jmp MterpFallback
+
+
+ .balign 128
+ SIZE(SYMBOL(artMterpAsmInstructionStart),SYMBOL(artMterpAsmInstructionStart))
+ .global SYMBOL(artMterpAsmInstructionEnd)
+SYMBOL(artMterpAsmInstructionEnd):
+
+/*
+ * ===========================================================================
+ * Sister implementations
+ * ===========================================================================
+ */
+ .global SYMBOL(artMterpAsmSisterStart)
+ FUNCTION_TYPE(SYMBOL(artMterpAsmSisterStart))
+ .text
+ .balign 4
+SYMBOL(artMterpAsmSisterStart):
+
+ SIZE(SYMBOL(artMterpAsmSisterStart),SYMBOL(artMterpAsmSisterStart))
+ .global SYMBOL(artMterpAsmSisterEnd)
+SYMBOL(artMterpAsmSisterEnd):
+
+
+ .global SYMBOL(artMterpAsmAltInstructionStart)
+ FUNCTION_TYPE(SYMBOL(artMterpAsmAltInstructionStart))
+ .text
+
+SYMBOL(artMterpAsmAltInstructionStart) = .L_ALT_op_nop
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_nop: /* 0x00 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(0*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move: /* 0x01 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(1*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_from16: /* 0x02 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(2*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_16: /* 0x03 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(3*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_wide: /* 0x04 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(4*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_wide_from16: /* 0x05 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(5*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_wide_16: /* 0x06 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(6*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_object: /* 0x07 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(7*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_object_from16: /* 0x08 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(8*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_object_16: /* 0x09 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(9*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_result: /* 0x0a */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(10*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_result_wide: /* 0x0b */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(11*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_result_object: /* 0x0c */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(12*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_exception: /* 0x0d */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(13*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_return_void: /* 0x0e */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(14*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_return: /* 0x0f */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(15*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_return_wide: /* 0x10 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(16*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_return_object: /* 0x11 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(17*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_4: /* 0x12 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(18*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_16: /* 0x13 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(19*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const: /* 0x14 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(20*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_high16: /* 0x15 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(21*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_wide_16: /* 0x16 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(22*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_wide_32: /* 0x17 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(23*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_wide: /* 0x18 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(24*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_wide_high16: /* 0x19 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(25*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_string: /* 0x1a */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(26*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_string_jumbo: /* 0x1b */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(27*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_class: /* 0x1c */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(28*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_monitor_enter: /* 0x1d */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(29*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_monitor_exit: /* 0x1e */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(30*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_check_cast: /* 0x1f */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(31*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_instance_of: /* 0x20 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(32*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_array_length: /* 0x21 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(33*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_new_instance: /* 0x22 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(34*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_new_array: /* 0x23 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(35*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_filled_new_array: /* 0x24 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(36*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_filled_new_array_range: /* 0x25 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(37*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_fill_array_data: /* 0x26 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(38*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_throw: /* 0x27 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(39*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_goto: /* 0x28 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(40*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_goto_16: /* 0x29 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(41*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_goto_32: /* 0x2a */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(42*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_packed_switch: /* 0x2b */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(43*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sparse_switch: /* 0x2c */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(44*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_cmpl_float: /* 0x2d */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(45*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_cmpg_float: /* 0x2e */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(46*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_cmpl_double: /* 0x2f */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(47*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_cmpg_double: /* 0x30 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(48*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_cmp_long: /* 0x31 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(49*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_eq: /* 0x32 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(50*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_ne: /* 0x33 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(51*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_lt: /* 0x34 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(52*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_ge: /* 0x35 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(53*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_gt: /* 0x36 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(54*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_le: /* 0x37 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(55*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_eqz: /* 0x38 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(56*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_nez: /* 0x39 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(57*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_ltz: /* 0x3a */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(58*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_gez: /* 0x3b */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(59*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_gtz: /* 0x3c */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(60*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_lez: /* 0x3d */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(61*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_3e: /* 0x3e */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(62*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_3f: /* 0x3f */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(63*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_40: /* 0x40 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(64*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_41: /* 0x41 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(65*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_42: /* 0x42 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(66*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_43: /* 0x43 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(67*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aget: /* 0x44 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(68*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aget_wide: /* 0x45 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(69*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aget_object: /* 0x46 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(70*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aget_boolean: /* 0x47 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(71*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aget_byte: /* 0x48 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(72*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aget_char: /* 0x49 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(73*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aget_short: /* 0x4a */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(74*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aput: /* 0x4b */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(75*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aput_wide: /* 0x4c */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(76*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aput_object: /* 0x4d */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(77*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aput_boolean: /* 0x4e */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(78*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aput_byte: /* 0x4f */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(79*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aput_char: /* 0x50 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(80*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aput_short: /* 0x51 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(81*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget: /* 0x52 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(82*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_wide: /* 0x53 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(83*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_object: /* 0x54 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(84*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_boolean: /* 0x55 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(85*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_byte: /* 0x56 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(86*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_char: /* 0x57 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(87*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_short: /* 0x58 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(88*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput: /* 0x59 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(89*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_wide: /* 0x5a */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(90*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_object: /* 0x5b */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(91*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_boolean: /* 0x5c */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(92*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_byte: /* 0x5d */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(93*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_char: /* 0x5e */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(94*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_short: /* 0x5f */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(95*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sget: /* 0x60 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(96*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sget_wide: /* 0x61 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(97*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sget_object: /* 0x62 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(98*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sget_boolean: /* 0x63 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(99*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sget_byte: /* 0x64 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(100*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sget_char: /* 0x65 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(101*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sget_short: /* 0x66 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(102*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sput: /* 0x67 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(103*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sput_wide: /* 0x68 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(104*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sput_object: /* 0x69 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(105*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sput_boolean: /* 0x6a */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(106*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sput_byte: /* 0x6b */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(107*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sput_char: /* 0x6c */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(108*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sput_short: /* 0x6d */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(109*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_virtual: /* 0x6e */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(110*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_super: /* 0x6f */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(111*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_direct: /* 0x70 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(112*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_static: /* 0x71 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(113*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_interface: /* 0x72 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(114*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_return_void_no_barrier: /* 0x73 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(115*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_virtual_range: /* 0x74 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(116*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_super_range: /* 0x75 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(117*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_direct_range: /* 0x76 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(118*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_static_range: /* 0x77 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(119*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_interface_range: /* 0x78 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(120*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_79: /* 0x79 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(121*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_7a: /* 0x7a */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(122*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_neg_int: /* 0x7b */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(123*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_not_int: /* 0x7c */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(124*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_neg_long: /* 0x7d */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(125*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_not_long: /* 0x7e */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(126*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_neg_float: /* 0x7f */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(127*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_neg_double: /* 0x80 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(128*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_int_to_long: /* 0x81 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(129*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_int_to_float: /* 0x82 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(130*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_int_to_double: /* 0x83 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(131*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_long_to_int: /* 0x84 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(132*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_long_to_float: /* 0x85 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(133*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_long_to_double: /* 0x86 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(134*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_float_to_int: /* 0x87 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(135*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_float_to_long: /* 0x88 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(136*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_float_to_double: /* 0x89 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(137*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_double_to_int: /* 0x8a */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(138*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_double_to_long: /* 0x8b */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(139*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_double_to_float: /* 0x8c */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(140*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_int_to_byte: /* 0x8d */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(141*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_int_to_char: /* 0x8e */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(142*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_int_to_short: /* 0x8f */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(143*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_int: /* 0x90 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(144*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_int: /* 0x91 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(145*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_int: /* 0x92 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(146*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_int: /* 0x93 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(147*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_int: /* 0x94 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(148*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_and_int: /* 0x95 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(149*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_or_int: /* 0x96 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(150*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_xor_int: /* 0x97 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(151*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shl_int: /* 0x98 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(152*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shr_int: /* 0x99 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(153*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_ushr_int: /* 0x9a */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(154*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_long: /* 0x9b */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(155*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_long: /* 0x9c */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(156*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_long: /* 0x9d */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(157*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_long: /* 0x9e */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(158*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_long: /* 0x9f */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(159*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_and_long: /* 0xa0 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(160*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_or_long: /* 0xa1 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(161*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_xor_long: /* 0xa2 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(162*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shl_long: /* 0xa3 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(163*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shr_long: /* 0xa4 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(164*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_ushr_long: /* 0xa5 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(165*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_float: /* 0xa6 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(166*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_float: /* 0xa7 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(167*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_float: /* 0xa8 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(168*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_float: /* 0xa9 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(169*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_float: /* 0xaa */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(170*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_double: /* 0xab */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(171*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_double: /* 0xac */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(172*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_double: /* 0xad */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(173*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_double: /* 0xae */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(174*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_double: /* 0xaf */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(175*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_int_2addr: /* 0xb0 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(176*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_int_2addr: /* 0xb1 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(177*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_int_2addr: /* 0xb2 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(178*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_int_2addr: /* 0xb3 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(179*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_int_2addr: /* 0xb4 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(180*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_and_int_2addr: /* 0xb5 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(181*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_or_int_2addr: /* 0xb6 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(182*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_xor_int_2addr: /* 0xb7 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(183*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shl_int_2addr: /* 0xb8 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(184*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shr_int_2addr: /* 0xb9 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(185*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_ushr_int_2addr: /* 0xba */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(186*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_long_2addr: /* 0xbb */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(187*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_long_2addr: /* 0xbc */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(188*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_long_2addr: /* 0xbd */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(189*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_long_2addr: /* 0xbe */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(190*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_long_2addr: /* 0xbf */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(191*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_and_long_2addr: /* 0xc0 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(192*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_or_long_2addr: /* 0xc1 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(193*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_xor_long_2addr: /* 0xc2 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(194*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shl_long_2addr: /* 0xc3 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(195*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shr_long_2addr: /* 0xc4 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(196*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_ushr_long_2addr: /* 0xc5 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(197*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_float_2addr: /* 0xc6 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(198*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_float_2addr: /* 0xc7 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(199*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_float_2addr: /* 0xc8 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(200*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_float_2addr: /* 0xc9 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(201*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_float_2addr: /* 0xca */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(202*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_double_2addr: /* 0xcb */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(203*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_double_2addr: /* 0xcc */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(204*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_double_2addr: /* 0xcd */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(205*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_double_2addr: /* 0xce */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(206*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_double_2addr: /* 0xcf */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(207*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_int_lit16: /* 0xd0 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(208*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rsub_int: /* 0xd1 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(209*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_int_lit16: /* 0xd2 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(210*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_int_lit16: /* 0xd3 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(211*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_int_lit16: /* 0xd4 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(212*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_and_int_lit16: /* 0xd5 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(213*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_or_int_lit16: /* 0xd6 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(214*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_xor_int_lit16: /* 0xd7 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(215*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_int_lit8: /* 0xd8 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(216*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rsub_int_lit8: /* 0xd9 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(217*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_int_lit8: /* 0xda */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(218*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_int_lit8: /* 0xdb */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(219*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_int_lit8: /* 0xdc */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(220*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_and_int_lit8: /* 0xdd */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(221*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_or_int_lit8: /* 0xde */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(222*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_xor_int_lit8: /* 0xdf */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(223*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shl_int_lit8: /* 0xe0 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(224*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shr_int_lit8: /* 0xe1 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(225*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_ushr_int_lit8: /* 0xe2 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(226*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_quick: /* 0xe3 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(227*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_wide_quick: /* 0xe4 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(228*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_object_quick: /* 0xe5 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(229*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_quick: /* 0xe6 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(230*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_wide_quick: /* 0xe7 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(231*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_object_quick: /* 0xe8 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(232*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_virtual_quick: /* 0xe9 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(233*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_virtual_range_quick: /* 0xea */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(234*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_boolean_quick: /* 0xeb */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(235*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_byte_quick: /* 0xec */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(236*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_char_quick: /* 0xed */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(237*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_short_quick: /* 0xee */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(238*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_boolean_quick: /* 0xef */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(239*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_byte_quick: /* 0xf0 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(240*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_char_quick: /* 0xf1 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(241*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_short_quick: /* 0xf2 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(242*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_lambda: /* 0xf3 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(243*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_f4: /* 0xf4 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(244*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_capture_variable: /* 0xf5 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(245*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_create_lambda: /* 0xf6 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(246*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_liberate_variable: /* 0xf7 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(247*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_box_lambda: /* 0xf8 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(248*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unbox_lambda: /* 0xf9 */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(249*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_fa: /* 0xfa */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(250*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_fb: /* 0xfb */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(251*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_fc: /* 0xfc */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(252*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_fd: /* 0xfd */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(253*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_fe: /* 0xfe */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(254*128)
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_ff: /* 0xff */
+/* File: x86_64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(255*128)
+
+ .balign 128
+ SIZE(SYMBOL(artMterpAsmAltInstructionStart),SYMBOL(artMterpAsmAltInstructionStart))
+ .global SYMBOL(artMterpAsmAltInstructionEnd)
+SYMBOL(artMterpAsmAltInstructionEnd):
+/* File: x86_64/footer.S */
+/*
+ * ===========================================================================
+ * Common subroutines and data
+ * ===========================================================================
+ */
+
+ .text
+ .align 2
+
+/*
+ * We've detected a condition that will result in an exception, but the exception
+ * has not yet been thrown. Just bail out to the reference interpreter to deal with it.
+ * TUNING: for consistency, we may want to just go ahead and handle these here.
+ */
+common_errDivideByZero:
+ EXPORT_PC
+#if MTERP_LOGGING
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpLogDivideByZeroException)
+#endif
+ jmp MterpCommonFallback
+
+common_errArrayIndex:
+ EXPORT_PC
+#if MTERP_LOGGING
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpLogArrayIndexException)
+#endif
+ jmp MterpCommonFallback
+
+common_errNegativeArraySize:
+ EXPORT_PC
+#if MTERP_LOGGING
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpLogNegativeArraySizeException)
+#endif
+ jmp MterpCommonFallback
+
+common_errNoSuchMethod:
+ EXPORT_PC
+#if MTERP_LOGGING
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpLogNoSuchMethodException)
+#endif
+ jmp MterpCommonFallback
+
+common_errNullObject:
+ EXPORT_PC
+#if MTERP_LOGGING
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpLogNullObjectException)
+#endif
+ jmp MterpCommonFallback
+
+common_exceptionThrown:
+ EXPORT_PC
+#if MTERP_LOGGING
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpLogExceptionThrownException)
+#endif
+ jmp MterpCommonFallback
+
+MterpSuspendFallback:
+ EXPORT_PC
+#if MTERP_LOGGING
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ movl THREAD_FLAGS_OFFSET(rSELF), OUT_32_ARG2
+ call SYMBOL(MterpLogSuspendFallback)
+#endif
+ jmp MterpCommonFallback
+
+/*
+ * If we're here, something is out of the ordinary. If there is a pending
+ * exception, handle it. Otherwise, roll back and retry with the reference
+ * interpreter.
+ */
+MterpPossibleException:
+ cmpq $0, THREAD_EXCEPTION_OFFSET(rSELF)
+ jz MterpFallback
+ /* intentional fallthrough - handle pending exception. */
+
+/*
+ * On return from a runtime helper routine, we've found a pending exception.
+ * Can we handle it here - or need to bail out to caller?
+ *
+ */
+MterpException:
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpHandleException)
+ testb %al, %al
+ jz MterpExceptionReturn
+ movq OFF_FP_CODE_ITEM(rFP), %rax
+ mov OFF_FP_DEX_PC(rFP), %ecx
+ leaq CODEITEM_INSNS_OFFSET(%rax), rPC
+ leaq (rPC, %rcx, 2), rPC
+ movq rPC, OFF_FP_DEX_PC_PTR(rFP)
+ /* Do we need to switch interpreters? */
+ call SYMBOL(MterpShouldSwitchInterpreters)
+ testb %al, %al
+ jnz MterpFallback
+ /* resume execution at catch block */
+ REFRESH_IBASE
+ FETCH_INST
+ GOTO_NEXT
+ /* NOTE: no fallthrough */
+
+/*
+ * Check for suspend check request. Assumes rINST already loaded, rPC advanced and
+ * still needs to get the opcode and branch to it, and flags are in lr.
+ */
+MterpCheckSuspendAndContinue:
+ REFRESH_IBASE
+ testl $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(rSELF)
+ jz 1f
+ EXPORT_PC
+ movq rSELF, OUT_ARG0
+ call SYMBOL(MterpSuspendCheck)
+1:
+ GOTO_NEXT
+
+/*
+ * On-stack replacement has happened, and now we've returned from the compiled method.
+ */
+MterpOnStackReplacement:
+#if MTERP_LOGGING
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ movl rINST, OUT_32_ARG2
+ call SYMBOL(MterpLogOSR)
+#endif
+ movl $1, %eax
+ jmp MterpDone
+
+/*
+ * Bail out to reference interpreter.
+ */
+MterpFallback:
+ EXPORT_PC
+#if MTERP_LOGGING
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpLogFallback)
+#endif
+MterpCommonFallback:
+ xorl %eax, %eax
+ jmp MterpDone
+
+/*
+ * On entry:
+ * uint32_t* rFP (should still be live, pointer to base of vregs)
+ */
+MterpExceptionReturn:
+ movl $1, %eax
+ jmp MterpDone
+MterpReturn:
+ movq OFF_FP_RESULT_REGISTER(rFP), %rdx
+ movq %rax, (%rdx)
+ movl $1, %eax
+MterpDone:
+ /* pop up frame */
+ addq $FRAME_SIZE, %rsp
+ .cfi_adjust_cfa_offset -FRAME_SIZE
+
+ /* Restore callee save register */
+ POP %r15
+ POP %r14
+ POP %r13
+ POP %r12
+ POP %rbp
+ POP %rbx
+ ret
+ .cfi_endproc
+ SIZE(ExecuteMterpImpl,ExecuteMterpImpl)
+
diff --git a/runtime/interpreter/mterp/rebuild.sh b/runtime/interpreter/mterp/rebuild.sh
index ac87945..ca3dcd9 100755
--- a/runtime/interpreter/mterp/rebuild.sh
+++ b/runtime/interpreter/mterp/rebuild.sh
@@ -20,5 +20,4 @@
#
set -e
-# for arch in arm x86 mips arm64 x86_64 mips64; do TARGET_ARCH_EXT=$arch make -f Makefile_mterp; done
-for arch in arm x86 arm64 ; do TARGET_ARCH_EXT=$arch make -f Makefile_mterp; done
+for arch in arm x86 mips arm64 x86_64 mips64; do TARGET_ARCH_EXT=$arch make -f Makefile_mterp; done
diff --git a/runtime/interpreter/mterp/x86/entry.S b/runtime/interpreter/mterp/x86/entry.S
index b83f7e1..785efdc 100644
--- a/runtime/interpreter/mterp/x86/entry.S
+++ b/runtime/interpreter/mterp/x86/entry.S
@@ -32,16 +32,18 @@
SYMBOL(ExecuteMterpImpl):
.cfi_startproc
+ .cfi_def_cfa esp, 4
+
+ /* Spill callee save regs */
+ PUSH %ebp
+ PUSH %edi
+ PUSH %esi
+ PUSH %ebx
+
/* Allocate frame */
subl $$FRAME_SIZE, %esp
.cfi_adjust_cfa_offset FRAME_SIZE
- /* Spill callee save regs */
- movl %ebp, EBP_SPILL(%esp)
- movl %edi, EDI_SPILL(%esp)
- movl %esi, ESI_SPILL(%esp)
- movl %ebx, EBX_SPILL(%esp)
-
/* Load ShadowFrame pointer */
movl IN_ARG2(%esp), %edx
diff --git a/runtime/interpreter/mterp/x86/footer.S b/runtime/interpreter/mterp/x86/footer.S
index 64d72d7..3965ecd 100644
--- a/runtime/interpreter/mterp/x86/footer.S
+++ b/runtime/interpreter/mterp/x86/footer.S
@@ -189,16 +189,15 @@
movl %ecx, 4(%edx)
mov $$1, %eax
MterpDone:
- /* Restore callee save register */
- movl EBP_SPILL(%esp), %ebp
- movl EDI_SPILL(%esp), %edi
- movl ESI_SPILL(%esp), %esi
- movl EBX_SPILL(%esp), %ebx
-
/* pop up frame */
addl $$FRAME_SIZE, %esp
.cfi_adjust_cfa_offset -FRAME_SIZE
- ret
+ /* Restore callee save register */
+ POP %ebx
+ POP %esi
+ POP %edi
+ POP %ebp
+ ret
.cfi_endproc
SIZE(ExecuteMterpImpl,ExecuteMterpImpl)
diff --git a/runtime/interpreter/mterp/x86/header.S b/runtime/interpreter/mterp/x86/header.S
index 6bddaf9..5729b90 100644
--- a/runtime/interpreter/mterp/x86/header.S
+++ b/runtime/interpreter/mterp/x86/header.S
@@ -105,25 +105,32 @@
#define SYMBOL(name) name
#endif
+.macro PUSH _reg
+ pushl \_reg
+ .cfi_adjust_cfa_offset 4
+ .cfi_rel_offset \_reg, 0
+.endm
+
+.macro POP _reg
+ popl \_reg
+ .cfi_adjust_cfa_offset -4
+ .cfi_restore \_reg
+.endm
+
/* Frame size must be 16-byte aligned.
- * Remember about 4 bytes for return address
+ * Remember about 4 bytes for return address + 4 * 4 for spills
*/
-#define FRAME_SIZE 44
+#define FRAME_SIZE 28
/* Frame diagram while executing ExecuteMterpImpl, high to low addresses */
-#define IN_ARG3 (FRAME_SIZE + 16)
-#define IN_ARG2 (FRAME_SIZE + 12)
-#define IN_ARG1 (FRAME_SIZE + 8)
-#define IN_ARG0 (FRAME_SIZE + 4)
-#define CALLER_RP (FRAME_SIZE + 0)
+#define IN_ARG3 (FRAME_SIZE + 16 + 16)
+#define IN_ARG2 (FRAME_SIZE + 16 + 12)
+#define IN_ARG1 (FRAME_SIZE + 16 + 8)
+#define IN_ARG0 (FRAME_SIZE + 16 + 4)
/* Spill offsets relative to %esp */
-#define EBP_SPILL (FRAME_SIZE - 4)
-#define EDI_SPILL (FRAME_SIZE - 8)
-#define ESI_SPILL (FRAME_SIZE - 12)
-#define EBX_SPILL (FRAME_SIZE - 16)
-#define LOCAL0 (FRAME_SIZE - 20)
-#define LOCAL1 (FRAME_SIZE - 24)
-#define LOCAL2 (FRAME_SIZE - 28)
+#define LOCAL0 (FRAME_SIZE - 4)
+#define LOCAL1 (FRAME_SIZE - 8)
+#define LOCAL2 (FRAME_SIZE - 12)
/* Out Arg offsets, relative to %esp */
#define OUT_ARG3 ( 12)
#define OUT_ARG2 ( 8)
diff --git a/runtime/interpreter/mterp/x86_64/alt_stub.S b/runtime/interpreter/mterp/x86_64/alt_stub.S
new file mode 100644
index 0000000..6fcebbb
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/alt_stub.S
@@ -0,0 +1,17 @@
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Unlike the Arm handler, we can't do this as a tail call
+ * because rIBASE is caller save and we need to reload it.
+ *
+ * Note that unlike in the Arm implementation, we should never arrive
+ * here with a zero breakFlag because we always refresh rIBASE on
+ * return.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
+ jmp .L_op_nop+(${opnum}*${handler_size_bytes})
diff --git a/runtime/interpreter/mterp/x86_64/bincmp.S b/runtime/interpreter/mterp/x86_64/bincmp.S
new file mode 100644
index 0000000..a16050b
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/bincmp.S
@@ -0,0 +1,23 @@
+/*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ movl rINST, %ecx # rcx <- A+
+ sarl $$4, rINST # rINST <- B
+ andb $$0xf, %cl # rcx <- A
+ GET_VREG %eax, %rcx # eax <- vA
+ cmpl VREG_ADDRESS(rINSTq), %eax # compare (vA, vB)
+ movl $$2, rINST # assume not taken
+ j${revcmp} 1f
+ movswq 2(rPC), rINSTq # Get signed branch offset
+1:
+ MTERP_PROFILE_BRANCH
+ addq rINSTq, rINSTq # rax <- AA * 2
+ leaq (rPC, rINSTq), rPC
+ FETCH_INST
+ jle MterpCheckSuspendAndContinue # AA * 2 <= 0 => suspend check
+ GOTO_NEXT
diff --git a/runtime/interpreter/mterp/x86_64/bindiv.S b/runtime/interpreter/mterp/x86_64/bindiv.S
new file mode 100644
index 0000000..e10d1dc
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/bindiv.S
@@ -0,0 +1,34 @@
+%default {"result":"","second":"","wide":"","suffix":"","rem":"0","ext":"cdq"}
+/*
+ * 32-bit binary div/rem operation. Handles special case of op1=-1.
+ */
+ /* div/rem vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # rax <- BB
+ movzbq 3(rPC), %rcx # rcx <- CC
+ .if $wide
+ GET_WIDE_VREG %rax, %rax # eax <- vBB
+ GET_WIDE_VREG $second, %rcx # ecx <- vCC
+ .else
+ GET_VREG %eax, %rax # eax <- vBB
+ GET_VREG $second, %rcx # ecx <- vCC
+ .endif
+ test${suffix} $second, $second
+ jz common_errDivideByZero
+ cmp${suffix} $$-1, $second
+ je 2f
+ $ext # rdx:rax <- sign-extended of rax
+ idiv${suffix} $second
+1:
+ .if $wide
+ SET_WIDE_VREG $result, rINSTq # eax <- vBB
+ .else
+ SET_VREG $result, rINSTq # eax <- vBB
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+2:
+ .if $rem
+ xor${suffix} $result, $result
+ .else
+ neg${suffix} $result
+ .endif
+ jmp 1b
diff --git a/runtime/interpreter/mterp/x86_64/bindiv2addr.S b/runtime/interpreter/mterp/x86_64/bindiv2addr.S
new file mode 100644
index 0000000..8b9bc95
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/bindiv2addr.S
@@ -0,0 +1,35 @@
+%default {"result":"","second":"","wide":"","suffix":"","rem":"0","ext":"cdq"}
+/*
+ * 32-bit binary div/rem operation. Handles special case of op1=-1.
+ */
+ /* div/rem/2addr vA, vB */
+ movl rINST, %ecx # rcx <- BA
+ sarl $$4, %ecx # rcx <- B
+ andb $$0xf, rINSTbl # rINST <- A
+ .if $wide
+ GET_WIDE_VREG %rax, rINSTq # eax <- vA
+ GET_WIDE_VREG $second, %rcx # ecx <- vB
+ .else
+ GET_VREG %eax, rINSTq # eax <- vA
+ GET_VREG $second, %rcx # ecx <- vB
+ .endif
+ test${suffix} $second, $second
+ jz common_errDivideByZero
+ cmp${suffix} $$-1, $second
+ je 2f
+ $ext # rdx:rax <- sign-extended of rax
+ idiv${suffix} $second
+1:
+ .if $wide
+ SET_WIDE_VREG $result, rINSTq # vA <- result
+ .else
+ SET_VREG $result, rINSTq # vA <- result
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+2:
+ .if $rem
+ xor${suffix} $result, $result
+ .else
+ neg${suffix} $result
+ .endif
+ jmp 1b
diff --git a/runtime/interpreter/mterp/x86_64/bindivLit16.S b/runtime/interpreter/mterp/x86_64/bindivLit16.S
new file mode 100644
index 0000000..80dbce2
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/bindivLit16.S
@@ -0,0 +1,27 @@
+%default {"result":"","rem":"0"}
+/*
+ * 32-bit binary div/rem operation. Handles special case of op1=-1.
+ */
+ /* div/rem/lit16 vA, vB, #+CCCC */
+ /* Need A in rINST, ssssCCCC in ecx, vB in eax */
+ movl rINST, %eax # rax <- 000000BA
+ sarl $$4, %eax # eax <- B
+ GET_VREG %eax, %rax # eax <- vB
+ movswl 2(rPC), %ecx # ecx <- ssssCCCC
+ andb $$0xf, rINSTbl # rINST <- A
+ testl %ecx, %ecx
+ jz common_errDivideByZero
+ cmpl $$-1, %ecx
+ je 2f
+ cdq # rax <- sign-extended of eax
+ idivl %ecx
+1:
+ SET_VREG $result, rINSTq # vA <- result
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+2:
+ .if $rem
+ xorl $result, $result
+ .else
+ negl $result
+ .endif
+ jmp 1b
diff --git a/runtime/interpreter/mterp/x86_64/bindivLit8.S b/runtime/interpreter/mterp/x86_64/bindivLit8.S
new file mode 100644
index 0000000..ab535f3
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/bindivLit8.S
@@ -0,0 +1,25 @@
+%default {"result":"","rem":"0"}
+/*
+ * 32-bit div/rem "lit8" binary operation. Handles special case of
+ * op0=minint & op1=-1
+ */
+ /* div/rem/lit8 vAA, vBB, #+CC */
+ movzbq 2(rPC), %rax # eax <- BB
+ movsbl 3(rPC), %ecx # ecx <- ssssssCC
+ GET_VREG %eax, %rax # eax <- rBB
+ testl %ecx, %ecx
+ je common_errDivideByZero
+ cmpl $$-1, %ecx
+ je 2f
+ cdq # rax <- sign-extended of eax
+ idivl %ecx
+1:
+ SET_VREG $result, rINSTq # vA <- result
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+2:
+ .if $rem
+ xorl $result, $result
+ .else
+ negl $result
+ .endif
+ jmp 1b
diff --git a/runtime/interpreter/mterp/x86_64/binop.S b/runtime/interpreter/mterp/x86_64/binop.S
new file mode 100644
index 0000000..962dd61
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/binop.S
@@ -0,0 +1,17 @@
+%default {"result":"%eax"}
+/*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int, sub-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # rax <- BB
+ movzbq 3(rPC), %rcx # rcx <- CC
+ GET_VREG %eax, %rax # eax <- vBB
+ $instr # ex: addl (rFP,%rcx,4),%eax
+ SET_VREG $result, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/binop1.S b/runtime/interpreter/mterp/x86_64/binop1.S
new file mode 100644
index 0000000..bdd5732
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/binop1.S
@@ -0,0 +1,19 @@
+%default {"wide":"0"}
+/*
+ * Generic 32-bit binary operation in which both operands loaded to
+ * registers (op0 in eax, op1 in ecx).
+ */
+ /* binop vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # eax <- BB
+ movzbq 3(rPC), %rcx # ecx <- CC
+ GET_VREG %ecx, %rcx # eax <- vCC
+ .if $wide
+ GET_WIDE_VREG %rax, %rax # rax <- vBB
+ $instr # ex: addl %ecx,%eax
+ SET_WIDE_VREG %rax, rINSTq
+ .else
+ GET_VREG %eax, %rax # eax <- vBB
+ $instr # ex: addl %ecx,%eax
+ SET_VREG %eax, rINSTq
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/binop2addr.S b/runtime/interpreter/mterp/x86_64/binop2addr.S
new file mode 100644
index 0000000..4448a81
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/binop2addr.S
@@ -0,0 +1,19 @@
+%default {"result":"%eax"}
+/*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an instruction or a function call.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $$4, rINST # rINST <- B
+ andb $$0xf, %cl # ecx <- A
+ GET_VREG %eax, rINSTq # eax <- vB
+ $instr # for ex: addl %eax,(rFP,%ecx,4)
+ CLEAR_REF %rcx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/binopLit16.S b/runtime/interpreter/mterp/x86_64/binopLit16.S
new file mode 100644
index 0000000..de43b53
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/binopLit16.S
@@ -0,0 +1,19 @@
+%default {"result":"%eax"}
+/*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int/lit16, rsub-int,
+ * and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ movl rINST, %eax # rax <- 000000BA
+ sarl $$4, %eax # eax <- B
+ GET_VREG %eax, %rax # eax <- vB
+ andb $$0xf, rINSTbl # rINST <- A
+ movswl 2(rPC), %ecx # ecx <- ssssCCCC
+ $instr # for example: addl %ecx, %eax
+ SET_VREG $result, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/binopLit8.S b/runtime/interpreter/mterp/x86_64/binopLit8.S
new file mode 100644
index 0000000..995002b
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/binopLit8.S
@@ -0,0 +1,18 @@
+%default {"result":"%eax"}
+/*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * For: add-int/lit8, rsub-int/lit8
+ * and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ movzbq 2(rPC), %rax # rax <- BB
+ movsbl 3(rPC), %ecx # rcx <- ssssssCC
+ GET_VREG %eax, %rax # eax <- rBB
+ $instr # ex: addl %ecx,%eax
+ SET_VREG $result, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/binopWide.S b/runtime/interpreter/mterp/x86_64/binopWide.S
new file mode 100644
index 0000000..f92f18e
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/binopWide.S
@@ -0,0 +1,10 @@
+/*
+ * Generic 64-bit binary operation.
+ */
+ /* binop vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # eax <- BB
+ movzbq 3(rPC), %rcx # ecx <- CC
+ GET_WIDE_VREG %rax, %rax # rax <- v[BB]
+ $instr # ex: addq (rFP,%rcx,4),%rax
+ SET_WIDE_VREG %rax, rINSTq # v[AA] <- rax
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/binopWide2addr.S b/runtime/interpreter/mterp/x86_64/binopWide2addr.S
new file mode 100644
index 0000000..d9e6cfb
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/binopWide2addr.S
@@ -0,0 +1,11 @@
+/*
+ * Generic 64-bit binary operation.
+ */
+ /* binop/2addr vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $$4, rINST # rINST <- B
+ andb $$0xf, %cl # ecx <- A
+ GET_WIDE_VREG %rax, rINSTq # rax <- vB
+ $instr # for ex: addq %rax,(rFP,%rcx,4)
+ CLEAR_WIDE_REF %rcx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/cvtfp_int.S b/runtime/interpreter/mterp/x86_64/cvtfp_int.S
new file mode 100644
index 0000000..1472bd2
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/cvtfp_int.S
@@ -0,0 +1,27 @@
+%default {"fp_suffix":"","i_suffix":"","max_const":"","result_reg":"","wide":""}
+/* On fp to int conversions, Java requires that
+ * if the result > maxint, it should be clamped to maxint. If it is less
+ * than minint, it should be clamped to minint. If it is a nan, the result
+ * should be zero. Further, the rounding mode is to truncate.
+ */
+ /* float/double to int/long vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $$4, rINST # rINST <- B
+ andb $$0xf, %cl # ecx <- A
+ movs${fp_suffix} VREG_ADDRESS(rINSTq), %xmm0
+ mov${i_suffix} ${max_const}, ${result_reg}
+ cvtsi2s${fp_suffix}${i_suffix} ${result_reg}, %xmm1
+ comis${fp_suffix} %xmm1, %xmm0
+ jae 1f
+ jp 2f
+ cvtts${fp_suffix}2si${i_suffix} %xmm0, ${result_reg}
+ jmp 1f
+2:
+ xor${i_suffix} ${result_reg}, ${result_reg}
+1:
+ .if $wide
+ SET_WIDE_VREG ${result_reg}, %rcx
+ .else
+ SET_VREG ${result_reg}, %rcx
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/entry.S b/runtime/interpreter/mterp/x86_64/entry.S
new file mode 100644
index 0000000..69b2371
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/entry.S
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Interpreter entry point.
+ */
+
+ .text
+ .global SYMBOL(ExecuteMterpImpl)
+ FUNCTION_TYPE(ExecuteMterpImpl)
+
+/*
+ * On entry:
+ * 0 Thread* self
+ * 1 code_item
+ * 2 ShadowFrame
+ * 3 JValue* result_register
+ *
+ */
+
+SYMBOL(ExecuteMterpImpl):
+ .cfi_startproc
+ .cfi_def_cfa rsp, 8
+
+ /* Spill callee save regs */
+ PUSH %rbx
+ PUSH %rbp
+ PUSH %r12
+ PUSH %r13
+ PUSH %r14
+ PUSH %r15
+
+ /* Allocate frame */
+ subq $$FRAME_SIZE, %rsp
+ .cfi_adjust_cfa_offset FRAME_SIZE
+
+ /* Remember the return register */
+ movq IN_ARG3, SHADOWFRAME_RESULT_REGISTER_OFFSET(IN_ARG2)
+
+ /* Remember the code_item */
+ movq IN_ARG1, SHADOWFRAME_CODE_ITEM_OFFSET(IN_ARG2)
+
+ /* set up "named" registers */
+ movl SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(IN_ARG2), %eax
+ leaq SHADOWFRAME_VREGS_OFFSET(IN_ARG2), rFP
+ leaq (rFP, %rax, 4), rREFS
+ movl SHADOWFRAME_DEX_PC_OFFSET(IN_ARG2), %eax
+ leaq CODEITEM_INSNS_OFFSET(IN_ARG1), rPC
+ leaq (rPC, %rax, 2), rPC
+ EXPORT_PC
+
+ /* Starting ibase */
+ movq IN_ARG0, rSELF
+ REFRESH_IBASE
+
+ /* start executing the instruction at rPC */
+ FETCH_INST
+ GOTO_NEXT
+ /* NOTE: no fallthrough */
diff --git a/runtime/interpreter/mterp/x86_64/fallback.S b/runtime/interpreter/mterp/x86_64/fallback.S
new file mode 100644
index 0000000..8d61166
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/fallback.S
@@ -0,0 +1,3 @@
+/* Transfer stub to alternate interpreter */
+ jmp MterpFallback
+
diff --git a/runtime/interpreter/mterp/x86_64/footer.S b/runtime/interpreter/mterp/x86_64/footer.S
new file mode 100644
index 0000000..573256b
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/footer.S
@@ -0,0 +1,181 @@
+/*
+ * ===========================================================================
+ * Common subroutines and data
+ * ===========================================================================
+ */
+
+ .text
+ .align 2
+
+/*
+ * We've detected a condition that will result in an exception, but the exception
+ * has not yet been thrown. Just bail out to the reference interpreter to deal with it.
+ * TUNING: for consistency, we may want to just go ahead and handle these here.
+ */
+common_errDivideByZero:
+ EXPORT_PC
+#if MTERP_LOGGING
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpLogDivideByZeroException)
+#endif
+ jmp MterpCommonFallback
+
+common_errArrayIndex:
+ EXPORT_PC
+#if MTERP_LOGGING
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpLogArrayIndexException)
+#endif
+ jmp MterpCommonFallback
+
+common_errNegativeArraySize:
+ EXPORT_PC
+#if MTERP_LOGGING
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpLogNegativeArraySizeException)
+#endif
+ jmp MterpCommonFallback
+
+common_errNoSuchMethod:
+ EXPORT_PC
+#if MTERP_LOGGING
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpLogNoSuchMethodException)
+#endif
+ jmp MterpCommonFallback
+
+common_errNullObject:
+ EXPORT_PC
+#if MTERP_LOGGING
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpLogNullObjectException)
+#endif
+ jmp MterpCommonFallback
+
+common_exceptionThrown:
+ EXPORT_PC
+#if MTERP_LOGGING
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpLogExceptionThrownException)
+#endif
+ jmp MterpCommonFallback
+
+MterpSuspendFallback:
+ EXPORT_PC
+#if MTERP_LOGGING
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ movl THREAD_FLAGS_OFFSET(rSELF), OUT_32_ARG2
+ call SYMBOL(MterpLogSuspendFallback)
+#endif
+ jmp MterpCommonFallback
+
+/*
+ * If we're here, something is out of the ordinary. If there is a pending
+ * exception, handle it. Otherwise, roll back and retry with the reference
+ * interpreter.
+ */
+MterpPossibleException:
+ cmpq $$0, THREAD_EXCEPTION_OFFSET(rSELF)
+ jz MterpFallback
+ /* intentional fallthrough - handle pending exception. */
+
+/*
+ * On return from a runtime helper routine, we've found a pending exception.
+ * Can we handle it here - or need to bail out to caller?
+ *
+ */
+MterpException:
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpHandleException)
+ testb %al, %al
+ jz MterpExceptionReturn
+ movq OFF_FP_CODE_ITEM(rFP), %rax
+ mov OFF_FP_DEX_PC(rFP), %ecx
+ leaq CODEITEM_INSNS_OFFSET(%rax), rPC
+ leaq (rPC, %rcx, 2), rPC
+ movq rPC, OFF_FP_DEX_PC_PTR(rFP)
+ /* Do we need to switch interpreters? */
+ call SYMBOL(MterpShouldSwitchInterpreters)
+ testb %al, %al
+ jnz MterpFallback
+ /* resume execution at catch block */
+ REFRESH_IBASE
+ FETCH_INST
+ GOTO_NEXT
+ /* NOTE: no fallthrough */
+
+/*
+ * Check for suspend check request. Assumes rINST already loaded, rPC advanced and
+ * still needs to get the opcode and branch to it, and flags are in lr.
+ */
+MterpCheckSuspendAndContinue:
+ REFRESH_IBASE
+ testl $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(rSELF)
+ jz 1f
+ EXPORT_PC
+ movq rSELF, OUT_ARG0
+ call SYMBOL(MterpSuspendCheck)
+1:
+ GOTO_NEXT
+
+/*
+ * On-stack replacement has happened, and now we've returned from the compiled method.
+ */
+MterpOnStackReplacement:
+#if MTERP_LOGGING
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ movl rINST, OUT_32_ARG2
+ call SYMBOL(MterpLogOSR)
+#endif
+ movl $$1, %eax
+ jmp MterpDone
+
+/*
+ * Bail out to reference interpreter.
+ */
+MterpFallback:
+ EXPORT_PC
+#if MTERP_LOGGING
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpLogFallback)
+#endif
+MterpCommonFallback:
+ xorl %eax, %eax
+ jmp MterpDone
+
+/*
+ * On entry:
+ * uint32_t* rFP (should still be live, pointer to base of vregs)
+ */
+MterpExceptionReturn:
+ movl $$1, %eax
+ jmp MterpDone
+MterpReturn:
+ movq OFF_FP_RESULT_REGISTER(rFP), %rdx
+ movq %rax, (%rdx)
+ movl $$1, %eax
+MterpDone:
+ /* pop up frame */
+ addq $$FRAME_SIZE, %rsp
+ .cfi_adjust_cfa_offset -FRAME_SIZE
+
+ /* Restore callee save register */
+ POP %r15
+ POP %r14
+ POP %r13
+ POP %r12
+ POP %rbp
+ POP %rbx
+ ret
+ .cfi_endproc
+ SIZE(ExecuteMterpImpl,ExecuteMterpImpl)
diff --git a/runtime/interpreter/mterp/x86_64/fpcmp.S b/runtime/interpreter/mterp/x86_64/fpcmp.S
new file mode 100644
index 0000000..806bc2b
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/fpcmp.S
@@ -0,0 +1,35 @@
+%default {"suff":"d","nanval":"pos"}
+/*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ * if (x == y) {
+ * return 0;
+ * } else if (x < y) {
+ * return -1;
+ * } else if (x > y) {
+ * return 1;
+ * } else {
+ * return nanval ? 1 : -1;
+ * }
+ * }
+ */
+ /* op vAA, vBB, vCC */
+ movzbq 3(rPC), %rcx # ecx<- CC
+ movzbq 2(rPC), %rax # eax<- BB
+ movs${suff} VREG_ADDRESS(%rax), %xmm0
+ xor %eax, %eax
+ ucomis${suff} VREG_ADDRESS(%rcx), %xmm0
+ jp .L${opcode}_nan_is_${nanval}
+ je .L${opcode}_finish
+ jb .L${opcode}_less
+.L${opcode}_nan_is_pos:
+ addb $$1, %al
+ jmp .L${opcode}_finish
+.L${opcode}_nan_is_neg:
+.L${opcode}_less:
+ movl $$-1, %eax
+.L${opcode}_finish:
+ SET_VREG %eax, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/fpcvt.S b/runtime/interpreter/mterp/x86_64/fpcvt.S
new file mode 100644
index 0000000..657869e
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/fpcvt.S
@@ -0,0 +1,17 @@
+%default {"source_suffix":"","dest_suffix":"","wide":""}
+/*
+ * Generic 32-bit FP conversion operation.
+ */
+ /* unop vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $$4, rINST # rINST <- B
+ andb $$0xf, %cl # ecx <- A
+ cvts${source_suffix}2s${dest_suffix} VREG_ADDRESS(rINSTq), %xmm0
+ .if $wide
+ movsd %xmm0, VREG_ADDRESS(%rcx)
+ CLEAR_WIDE_REF %rcx
+ .else
+ movss %xmm0, VREG_ADDRESS(%rcx)
+ CLEAR_REF %rcx
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/header.S b/runtime/interpreter/mterp/x86_64/header.S
new file mode 100644
index 0000000..eb84ea1
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/header.S
@@ -0,0 +1,303 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ Art assembly interpreter notes:
+
+ First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
+ handle invoke, allows higher-level code to create frame & shadow frame.
+
+ Once that's working, support direct entry code & eliminate shadow frame (and
+ excess locals allocation.
+
+ Some (hopefully) temporary ugliness. We'll treat rFP as pointing to the
+ base of the vreg array within the shadow frame. Access the other fields,
+ dex_pc_, method_ and number_of_vregs_ via negative offsets. For now, we'll continue
+ the shadow frame mechanism of double-storing object references - via rFP &
+ number_of_vregs_.
+
+ */
+
+/*
+x86_64 ABI general notes:
+
+Caller save set:
+ rax, rdx, rcx, rsi, rdi, r8-r11, st(0)-st(7)
+Callee save set:
+ rbx, rbp, r12-r15
+Return regs:
+ 32-bit in eax
+ 64-bit in rax
+ fp on xmm0
+
+First 8 fp parameters came in xmm0-xmm7.
+First 6 non-fp parameters came in rdi, rsi, rdx, rcx, r8, r9.
+Other parameters passed on stack, pushed right-to-left. On entry to target, first
+param is at 8(%esp). Traditional entry code is:
+
+Stack must be 16-byte aligned to support SSE in native code.
+
+If we're not doing variable stack allocation (alloca), the frame pointer can be
+eliminated and all arg references adjusted to be esp relative.
+*/
+
+/*
+Mterp and x86_64 notes:
+
+Some key interpreter variables will be assigned to registers.
+
+ nick reg purpose
+ rSELF rbp pointer to ThreadSelf.
+ rPC r12 interpreted program counter, used for fetching instructions
+ rFP r13 interpreted frame pointer, used for accessing locals and args
+ rINSTw bx first 16-bit code of current instruction
+ rINSTbl bl opcode portion of instruction word
+ rINSTbh bh high byte of inst word, usually contains src/tgt reg names
+ rIBASE r14 base of instruction handler table
+ rREFS r15 base of object references in shadow frame.
+
+Notes:
+ o High order 16 bits of ebx must be zero on entry to handler
+ o rPC, rFP, rINSTw/rINSTbl valid on handler entry and exit
+ o eax and ecx are scratch, rINSTw/ebx sometimes scratch
+
+Macros are provided for common operations. Each macro MUST emit only
+one instruction to make instruction-counting easier. They MUST NOT alter
+unspecified registers or condition codes.
+*/
+
+/*
+ * This is a #include, not a %include, because we want the C pre-processor
+ * to expand the macros into assembler assignment statements.
+ */
+#include "asm_support.h"
+
+/*
+ * Handle mac compiler specific
+ */
+#if defined(__APPLE__)
+ #define MACRO_LITERAL(value) $$(value)
+ #define FUNCTION_TYPE(name)
+ #define SIZE(start,end)
+ // Mac OS' symbols have an _ prefix.
+ #define SYMBOL(name) _ ## name
+#else
+ #define MACRO_LITERAL(value) $$value
+ #define FUNCTION_TYPE(name) .type name, @function
+ #define SIZE(start,end) .size start, .-end
+ #define SYMBOL(name) name
+#endif
+
+.macro PUSH _reg
+ pushq \_reg
+ .cfi_adjust_cfa_offset 8
+ .cfi_rel_offset \_reg, 0
+.endm
+
+.macro POP _reg
+ popq \_reg
+ .cfi_adjust_cfa_offset -8
+ .cfi_restore \_reg
+.endm
+
+/* Frame size must be 16-byte aligned.
+ * Remember about 8 bytes for return address + 6 * 8 for spills.
+ */
+#define FRAME_SIZE 8
+
+/* Frame diagram while executing ExecuteMterpImpl, high to low addresses */
+#define IN_ARG3 %rcx
+#define IN_ARG2 %rdx
+#define IN_ARG1 %rsi
+#define IN_ARG0 %rdi
+/* Out Args */
+#define OUT_ARG3 %rcx
+#define OUT_ARG2 %rdx
+#define OUT_ARG1 %rsi
+#define OUT_ARG0 %rdi
+#define OUT_32_ARG3 %ecx
+#define OUT_32_ARG2 %edx
+#define OUT_32_ARG1 %esi
+#define OUT_32_ARG0 %edi
+#define OUT_FP_ARG1 %xmm1
+#define OUT_FP_ARG0 %xmm0
+
+/* During bringup, we'll use the shadow frame model instead of rFP */
+/* single-purpose registers, given names for clarity */
+#define rSELF %rbp
+#define rPC %r12
+#define rFP %r13
+#define rINST %ebx
+#define rINSTq %rbx
+#define rINSTw %bx
+#define rINSTbh %bh
+#define rINSTbl %bl
+#define rIBASE %r14
+#define rREFS %r15
+
+/*
+ * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So,
+ * to access other shadow frame fields, we need to use a backwards offset. Define those here.
+ */
+#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
+#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
+#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
+#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
+#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
+#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
+#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
+#define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET)
+#define OFF_FP_SHADOWFRAME (-SHADOWFRAME_VREGS_OFFSET)
+
+#define MTERP_PROFILE_BRANCHES 1
+#define MTERP_LOGGING 0
+
+/*
+ * Profile branch. rINST should contain the offset. %eax is scratch.
+ */
+.macro MTERP_PROFILE_BRANCH
+#ifdef MTERP_PROFILE_BRANCHES
+ EXPORT_PC
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ movl rINST, OUT_32_ARG2
+ call SYMBOL(MterpProfileBranch)
+ testb %al, %al
+ jnz MterpOnStackReplacement
+#endif
+.endm
+
+/*
+ * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
+ * be done *before* something throws.
+ *
+ * It's okay to do this more than once.
+ *
+ * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
+ * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
+ * offset into the code_items_[] array. For effiency, we will "export" the
+ * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
+ * to convert to a dex pc when needed.
+ */
+.macro EXPORT_PC
+ movq rPC, OFF_FP_DEX_PC_PTR(rFP)
+.endm
+
+/*
+ * Refresh handler table.
+ * IBase handles uses the caller save register so we must restore it after each call.
+ * Also it is used as a result of some 64-bit operations (like imul) and we should
+ * restore it in such cases also.
+ *
+ */
+.macro REFRESH_IBASE
+ movq THREAD_CURRENT_IBASE_OFFSET(rSELF), rIBASE
+.endm
+
+/*
+ * Refresh rINST.
+ * At enter to handler rINST does not contain the opcode number.
+ * However some utilities require the full value, so this macro
+ * restores the opcode number.
+ */
+.macro REFRESH_INST _opnum
+ movb rINSTbl, rINSTbh
+ movb $$\_opnum, rINSTbl
+.endm
+
+/*
+ * Fetch the next instruction from rPC into rINSTw. Does not advance rPC.
+ */
+.macro FETCH_INST
+ movzwq (rPC), rINSTq
+.endm
+
+/*
+ * Remove opcode from rINST, compute the address of handler and jump to it.
+ */
+.macro GOTO_NEXT
+ movzx rINSTbl,%eax
+ movzbl rINSTbh,rINST
+ shll MACRO_LITERAL(${handler_size_bits}), %eax
+ addq rIBASE, %rax
+ jmp *%rax
+.endm
+
+/*
+ * Advance rPC by instruction count.
+ */
+.macro ADVANCE_PC _count
+ leaq 2*\_count(rPC), rPC
+.endm
+
+/*
+ * Advance rPC by instruction count, fetch instruction and jump to handler.
+ */
+.macro ADVANCE_PC_FETCH_AND_GOTO_NEXT _count
+ ADVANCE_PC \_count
+ FETCH_INST
+ GOTO_NEXT
+.endm
+
+/*
+ * Get/set the 32-bit value from a Dalvik register.
+ */
+#define VREG_ADDRESS(_vreg) (rFP,_vreg,4)
+#define VREG_REF_ADDRESS(_vreg) (rREFS,_vreg,4)
+
+.macro GET_VREG _reg _vreg
+ movl (rFP,\_vreg,4), \_reg
+.endm
+
+/* Read wide value. */
+.macro GET_WIDE_VREG _reg _vreg
+ movq (rFP,\_vreg,4), \_reg
+.endm
+
+.macro SET_VREG _reg _vreg
+ movl \_reg, (rFP,\_vreg,4)
+ movl MACRO_LITERAL(0), (rREFS,\_vreg,4)
+.endm
+
+/* Write wide value. reg is clobbered. */
+.macro SET_WIDE_VREG _reg _vreg
+ movq \_reg, (rFP,\_vreg,4)
+ xorq \_reg, \_reg
+ movq \_reg, (rREFS,\_vreg,4)
+.endm
+
+.macro SET_VREG_OBJECT _reg _vreg
+ movl \_reg, (rFP,\_vreg,4)
+ movl \_reg, (rREFS,\_vreg,4)
+.endm
+
+.macro GET_VREG_HIGH _reg _vreg
+ movl 4(rFP,\_vreg,4), \_reg
+.endm
+
+.macro SET_VREG_HIGH _reg _vreg
+ movl \_reg, 4(rFP,\_vreg,4)
+ movl MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
+.endm
+
+.macro CLEAR_REF _vreg
+ movl MACRO_LITERAL(0), (rREFS,\_vreg,4)
+.endm
+
+.macro CLEAR_WIDE_REF _vreg
+ movl MACRO_LITERAL(0), (rREFS,\_vreg,4)
+ movl MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
+.endm
diff --git a/runtime/interpreter/mterp/x86_64/invoke.S b/runtime/interpreter/mterp/x86_64/invoke.S
new file mode 100644
index 0000000..f7e6155
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/invoke.S
@@ -0,0 +1,22 @@
+%default { "helper":"UndefinedInvokeHandler" }
+/*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern $helper
+ EXPORT_PC
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ movq rPC, OUT_ARG2
+ REFRESH_INST ${opnum}
+ movl rINST, OUT_32_ARG3
+ call SYMBOL($helper)
+ testb %al, %al
+ jz MterpException
+ ADVANCE_PC 3
+ call SYMBOL(MterpShouldSwitchInterpreters)
+ testb %al, %al
+ jnz MterpFallback
+ FETCH_INST
+ GOTO_NEXT
diff --git a/runtime/interpreter/mterp/x86_64/op_add_double.S b/runtime/interpreter/mterp/x86_64/op_add_double.S
new file mode 100644
index 0000000..cb462cb
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_add_double.S
@@ -0,0 +1 @@
+%include "x86_64/sseBinop.S" {"instr":"adds","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86_64/op_add_double_2addr.S b/runtime/interpreter/mterp/x86_64/op_add_double_2addr.S
new file mode 100644
index 0000000..063bde3
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_add_double_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/sseBinop2Addr.S" {"instr":"adds","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86_64/op_add_float.S b/runtime/interpreter/mterp/x86_64/op_add_float.S
new file mode 100644
index 0000000..7753bf8
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_add_float.S
@@ -0,0 +1 @@
+%include "x86_64/sseBinop.S" {"instr":"adds","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86_64/op_add_float_2addr.S b/runtime/interpreter/mterp/x86_64/op_add_float_2addr.S
new file mode 100644
index 0000000..6c8005b
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_add_float_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/sseBinop2Addr.S" {"instr":"adds","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86_64/op_add_int.S b/runtime/interpreter/mterp/x86_64/op_add_int.S
new file mode 100644
index 0000000..e316be7
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_add_int.S
@@ -0,0 +1 @@
+%include "x86_64/binop.S" {"instr":"addl (rFP,%rcx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_add_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_add_int_2addr.S
new file mode 100644
index 0000000..2ff8293
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_add_int_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/binop2addr.S" {"instr":"addl %eax, (rFP,%rcx,4)"}
diff --git a/runtime/interpreter/mterp/x86_64/op_add_int_lit16.S b/runtime/interpreter/mterp/x86_64/op_add_int_lit16.S
new file mode 100644
index 0000000..bfeb7ca
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_add_int_lit16.S
@@ -0,0 +1 @@
+%include "x86_64/binopLit16.S" {"instr":"addl %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_add_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_add_int_lit8.S
new file mode 100644
index 0000000..8954844
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_add_int_lit8.S
@@ -0,0 +1 @@
+%include "x86_64/binopLit8.S" {"instr":"addl %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_add_long.S b/runtime/interpreter/mterp/x86_64/op_add_long.S
new file mode 100644
index 0000000..89131ff
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_add_long.S
@@ -0,0 +1 @@
+%include "x86_64/binopWide.S" {"instr":"addq (rFP,%rcx,4), %rax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_add_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_add_long_2addr.S
new file mode 100644
index 0000000..fed98bc
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_add_long_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/binopWide2addr.S" {"instr":"addq %rax, (rFP,%rcx,4)"}
diff --git a/runtime/interpreter/mterp/x86_64/op_aget.S b/runtime/interpreter/mterp/x86_64/op_aget.S
new file mode 100644
index 0000000..58d4948
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_aget.S
@@ -0,0 +1,24 @@
+%default { "load":"movl", "shift":"4", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET", "wide":"0" }
+/*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short, aget-wide
+ *
+ */
+ /* op vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # eax <- BB
+ movzbq 3(rPC), %rcx # ecx <- CC
+ GET_VREG %eax, %rax # eax <- vBB (array object)
+ GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
+ testl %eax, %eax # null array object?
+ je common_errNullObject # bail if so
+ cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
+ jae common_errArrayIndex # index >= length, bail.
+ .if $wide
+ movq $data_offset(%rax,%rcx,8), %rax
+ SET_WIDE_VREG %rax, rINSTq
+ .else
+ $load $data_offset(%rax,%rcx,$shift), %eax
+ SET_VREG %eax, rINSTq
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_aget_boolean.S b/runtime/interpreter/mterp/x86_64/op_aget_boolean.S
new file mode 100644
index 0000000..cf7bdb5
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_aget_boolean.S
@@ -0,0 +1 @@
+%include "x86_64/op_aget.S" { "load":"movzbl", "shift":"1", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86_64/op_aget_byte.S b/runtime/interpreter/mterp/x86_64/op_aget_byte.S
new file mode 100644
index 0000000..1cbb569
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_aget_byte.S
@@ -0,0 +1 @@
+%include "x86_64/op_aget.S" { "load":"movsbl", "shift":"1", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86_64/op_aget_char.S b/runtime/interpreter/mterp/x86_64/op_aget_char.S
new file mode 100644
index 0000000..45c9085
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_aget_char.S
@@ -0,0 +1 @@
+%include "x86_64/op_aget.S" { "load":"movzwl", "shift":"2", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86_64/op_aget_object.S b/runtime/interpreter/mterp/x86_64/op_aget_object.S
new file mode 100644
index 0000000..8baedea
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_aget_object.S
@@ -0,0 +1,16 @@
+/*
+ * Array object get. vAA <- vBB[vCC].
+ *
+ * for: aget-object
+ */
+ /* op vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # rax <- BB
+ movzbq 3(rPC), %rcx # rcx <- CC
+ GET_VREG OUT_32_ARG0, %rax # eax <- vBB (array object)
+ GET_VREG OUT_32_ARG1, %rcx # ecx <- vCC (requested index)
+ EXPORT_PC
+ call SYMBOL(artAGetObjectFromMterp) # (array, index)
+ cmpq $$0, THREAD_EXCEPTION_OFFSET(rSELF)
+ jnz MterpException
+ SET_VREG_OBJECT %eax, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_aget_short.S b/runtime/interpreter/mterp/x86_64/op_aget_short.S
new file mode 100644
index 0000000..82c4a1d
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_aget_short.S
@@ -0,0 +1 @@
+%include "x86_64/op_aget.S" { "load":"movswl", "shift":"2", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86_64/op_aget_wide.S b/runtime/interpreter/mterp/x86_64/op_aget_wide.S
new file mode 100644
index 0000000..4f2771b
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_aget_wide.S
@@ -0,0 +1 @@
+%include "x86_64/op_aget.S" { "load":"movq", "shift":"8", "data_offset":"MIRROR_WIDE_ARRAY_DATA_OFFSET", "wide":"1" }
diff --git a/runtime/interpreter/mterp/x86_64/op_and_int.S b/runtime/interpreter/mterp/x86_64/op_and_int.S
new file mode 100644
index 0000000..4469889
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_and_int.S
@@ -0,0 +1 @@
+%include "x86_64/binop.S" {"instr":"andl (rFP,%rcx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_and_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_and_int_2addr.S
new file mode 100644
index 0000000..16315bb
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_and_int_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/binop2addr.S" {"instr":"andl %eax, (rFP,%rcx,4)"}
diff --git a/runtime/interpreter/mterp/x86_64/op_and_int_lit16.S b/runtime/interpreter/mterp/x86_64/op_and_int_lit16.S
new file mode 100644
index 0000000..63e851b
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_and_int_lit16.S
@@ -0,0 +1 @@
+%include "x86_64/binopLit16.S" {"instr":"andl %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_and_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_and_int_lit8.S
new file mode 100644
index 0000000..da7a20f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_and_int_lit8.S
@@ -0,0 +1 @@
+%include "x86_64/binopLit8.S" {"instr":"andl %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_and_long.S b/runtime/interpreter/mterp/x86_64/op_and_long.S
new file mode 100644
index 0000000..ce1dd26
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_and_long.S
@@ -0,0 +1 @@
+%include "x86_64/binopWide.S" {"instr":"andq (rFP,%rcx,4), %rax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_and_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_and_long_2addr.S
new file mode 100644
index 0000000..d17ab8d
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_and_long_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/binopWide2addr.S" {"instr":"andq %rax, (rFP,%rcx,4)"}
diff --git a/runtime/interpreter/mterp/x86_64/op_aput.S b/runtime/interpreter/mterp/x86_64/op_aput.S
new file mode 100644
index 0000000..11500ad
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_aput.S
@@ -0,0 +1,23 @@
+%default { "reg":"rINST", "store":"movl", "shift":"4", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET", "wide":"0" }
+/*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short, aput-wide
+ *
+ */
+ /* op vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # rax <- BB
+ movzbq 3(rPC), %rcx # rcx <- CC
+ GET_VREG %eax, %rax # eax <- vBB (array object)
+ GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
+ testl %eax, %eax # null array object?
+ je common_errNullObject # bail if so
+ cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
+ jae common_errArrayIndex # index >= length, bail.
+ .if $wide
+ GET_WIDE_VREG rINSTq, rINSTq
+ .else
+ GET_VREG rINST, rINSTq
+ .endif
+ $store $reg, $data_offset(%rax,%rcx,$shift)
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_aput_boolean.S b/runtime/interpreter/mterp/x86_64/op_aput_boolean.S
new file mode 100644
index 0000000..7d77a86
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_aput_boolean.S
@@ -0,0 +1 @@
+%include "x86_64/op_aput.S" { "reg":"rINSTbl", "store":"movb", "shift":"1", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86_64/op_aput_byte.S b/runtime/interpreter/mterp/x86_64/op_aput_byte.S
new file mode 100644
index 0000000..7a1723e
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_aput_byte.S
@@ -0,0 +1 @@
+%include "x86_64/op_aput.S" { "reg":"rINSTbl", "store":"movb", "shift":"1", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86_64/op_aput_char.S b/runtime/interpreter/mterp/x86_64/op_aput_char.S
new file mode 100644
index 0000000..f8f50a3
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_aput_char.S
@@ -0,0 +1 @@
+%include "x86_64/op_aput.S" { "reg":"rINSTw", "store":"movw", "shift":"2", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86_64/op_aput_object.S b/runtime/interpreter/mterp/x86_64/op_aput_object.S
new file mode 100644
index 0000000..b1bae0f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_aput_object.S
@@ -0,0 +1,13 @@
+/*
+ * Store an object into an array. vBB[vCC] <- vAA.
+ */
+ /* op vAA, vBB, vCC */
+ EXPORT_PC
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
+ movq rPC, OUT_ARG1
+ REFRESH_INST ${opnum}
+ movq rINSTq, OUT_ARG2
+ call SYMBOL(MterpAputObject) # (array, index)
+ testb %al, %al
+ jz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_aput_short.S b/runtime/interpreter/mterp/x86_64/op_aput_short.S
new file mode 100644
index 0000000..481fd68
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_aput_short.S
@@ -0,0 +1 @@
+%include "x86_64/op_aput.S" { "reg":"rINSTw", "store":"movw", "shift":"2", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86_64/op_aput_wide.S b/runtime/interpreter/mterp/x86_64/op_aput_wide.S
new file mode 100644
index 0000000..5bbd39b
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_aput_wide.S
@@ -0,0 +1 @@
+%include "x86_64/op_aput.S" { "reg":"rINSTq", "store":"movq", "shift":"8", "data_offset":"MIRROR_WIDE_ARRAY_DATA_OFFSET", "wide":"1" }
diff --git a/runtime/interpreter/mterp/x86_64/op_array_length.S b/runtime/interpreter/mterp/x86_64/op_array_length.S
new file mode 100644
index 0000000..e80d665
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_array_length.S
@@ -0,0 +1,12 @@
+/*
+ * Return the length of an array.
+ */
+ movl rINST, %eax # eax <- BA
+ sarl $$4, rINST # rINST <- B
+ GET_VREG %ecx, rINSTq # ecx <- vB (object ref)
+ testl %ecx, %ecx # is null?
+ je common_errNullObject
+ andb $$0xf, %al # eax <- A
+ movl MIRROR_ARRAY_LENGTH_OFFSET(%rcx), rINST
+ SET_VREG rINST, %rax
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_check_cast.S b/runtime/interpreter/mterp/x86_64/op_check_cast.S
new file mode 100644
index 0000000..f8fa7b2
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_check_cast.S
@@ -0,0 +1,13 @@
+/*
+ * Check to see if a cast from one class to another is allowed.
+ */
+ /* check-cast vAA, class@BBBB */
+ EXPORT_PC
+ movzwq 2(rPC), OUT_ARG0 # OUT_ARG0 <- BBBB
+ leaq VREG_ADDRESS(rINSTq), OUT_ARG1
+ movq OFF_FP_METHOD(rFP), OUT_ARG2
+ movq rSELF, OUT_ARG3
+ call SYMBOL(MterpCheckCast) # (index, &obj, method, self)
+ testb %al, %al
+ jnz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_cmp_long.S b/runtime/interpreter/mterp/x86_64/op_cmp_long.S
new file mode 100644
index 0000000..23ca3e5
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_cmp_long.S
@@ -0,0 +1,17 @@
+/*
+ * Compare two 64-bit values. Puts 0, 1, or -1 into the destination
+ * register based on the results of the comparison.
+ */
+ /* cmp-long vAA, vBB, vCC */
+ movzbq 2(rPC), %rdx # edx <- BB
+ movzbq 3(rPC), %rcx # ecx <- CC
+ GET_WIDE_VREG %rdx, %rdx # rdx <- v[BB]
+ xorl %eax, %eax
+ xorl %edi, %edi
+ addb $$1, %al
+ movl $$-1, %esi
+ cmpq VREG_ADDRESS(%rcx), %rdx
+ cmovl %esi, %edi
+ cmovg %eax, %edi
+ SET_VREG %edi, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_cmpg_double.S b/runtime/interpreter/mterp/x86_64/op_cmpg_double.S
new file mode 100644
index 0000000..7c0aa1b
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_cmpg_double.S
@@ -0,0 +1 @@
+%include "x86_64/fpcmp.S" {"suff":"d","nanval":"pos"}
diff --git a/runtime/interpreter/mterp/x86_64/op_cmpg_float.S b/runtime/interpreter/mterp/x86_64/op_cmpg_float.S
new file mode 100644
index 0000000..14e8472
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_cmpg_float.S
@@ -0,0 +1 @@
+%include "x86_64/fpcmp.S" {"suff":"s","nanval":"pos"}
diff --git a/runtime/interpreter/mterp/x86_64/op_cmpl_double.S b/runtime/interpreter/mterp/x86_64/op_cmpl_double.S
new file mode 100644
index 0000000..1d4c424
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_cmpl_double.S
@@ -0,0 +1 @@
+%include "x86_64/fpcmp.S" {"suff":"d","nanval":"neg"}
diff --git a/runtime/interpreter/mterp/x86_64/op_cmpl_float.S b/runtime/interpreter/mterp/x86_64/op_cmpl_float.S
new file mode 100644
index 0000000..97a12a6
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_cmpl_float.S
@@ -0,0 +1 @@
+%include "x86_64/fpcmp.S" {"suff":"s","nanval":"neg"}
diff --git a/runtime/interpreter/mterp/x86_64/op_const.S b/runtime/interpreter/mterp/x86_64/op_const.S
new file mode 100644
index 0000000..3cfafdb
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_const.S
@@ -0,0 +1,4 @@
+ /* const vAA, #+BBBBbbbb */
+ movl 2(rPC), %eax # grab all 32 bits at once
+ SET_VREG %eax, rINSTq # vAA<- eax
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86_64/op_const_16.S b/runtime/interpreter/mterp/x86_64/op_const_16.S
new file mode 100644
index 0000000..1a139c6
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_const_16.S
@@ -0,0 +1,4 @@
+ /* const/16 vAA, #+BBBB */
+ movswl 2(rPC), %ecx # ecx <- ssssBBBB
+ SET_VREG %ecx, rINSTq # vAA <- ssssBBBB
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_const_4.S b/runtime/interpreter/mterp/x86_64/op_const_4.S
new file mode 100644
index 0000000..23c4816
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_const_4.S
@@ -0,0 +1,7 @@
+ /* const/4 vA, #+B */
+ movsbl rINSTbl, %eax # eax <-ssssssBx
+ movl $$0xf, rINST
+ andl %eax, rINST # rINST <- A
+ sarl $$4, %eax
+ SET_VREG %eax, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_const_class.S b/runtime/interpreter/mterp/x86_64/op_const_class.S
new file mode 100644
index 0000000..494920a
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_const_class.S
@@ -0,0 +1,10 @@
+ /* const/class vAA, Class@BBBB */
+ EXPORT_PC
+ movzwq 2(rPC), OUT_ARG0 # eax <- OUT_ARG0
+ movq rINSTq, OUT_ARG1
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
+ movq rSELF, OUT_ARG3
+ call SYMBOL(MterpConstClass) # (index, tgt_reg, shadow_frame, self)
+ testb %al, %al
+ jnz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_const_high16.S b/runtime/interpreter/mterp/x86_64/op_const_high16.S
new file mode 100644
index 0000000..64e633c
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_const_high16.S
@@ -0,0 +1,5 @@
+ /* const/high16 vAA, #+BBBB0000 */
+ movzwl 2(rPC), %eax # eax <- 0000BBBB
+ sall $$16, %eax # eax <- BBBB0000
+ SET_VREG %eax, rINSTq # vAA <- eax
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_const_string.S b/runtime/interpreter/mterp/x86_64/op_const_string.S
new file mode 100644
index 0000000..7c199ec
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_const_string.S
@@ -0,0 +1,10 @@
+ /* const/string vAA, String@BBBB */
+ EXPORT_PC
+ movzwq 2(rPC), OUT_ARG0 # OUT_ARG0 <- BBBB
+ movq rINSTq, OUT_ARG1
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
+ movq rSELF, OUT_ARG3
+ call SYMBOL(MterpConstString) # (index, tgt_reg, shadow_frame, self)
+ testb %al, %al
+ jnz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_const_string_jumbo.S b/runtime/interpreter/mterp/x86_64/op_const_string_jumbo.S
new file mode 100644
index 0000000..ae03d20
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_const_string_jumbo.S
@@ -0,0 +1,10 @@
+ /* const/string vAA, String@BBBBBBBB */
+ EXPORT_PC
+ movl 2(rPC), OUT_32_ARG0 # OUT_32_ARG0 <- BBBB
+ movq rINSTq, OUT_ARG1
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
+ movq rSELF, OUT_ARG3
+ call SYMBOL(MterpConstString) # (index, tgt_reg, shadow_frame, self)
+ testb %al, %al
+ jnz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86_64/op_const_wide.S b/runtime/interpreter/mterp/x86_64/op_const_wide.S
new file mode 100644
index 0000000..5615177
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_const_wide.S
@@ -0,0 +1,4 @@
+ /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
+ movq 2(rPC), %rax # rax <- HHHHhhhhBBBBbbbb
+ SET_WIDE_VREG %rax, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 5
diff --git a/runtime/interpreter/mterp/x86_64/op_const_wide_16.S b/runtime/interpreter/mterp/x86_64/op_const_wide_16.S
new file mode 100644
index 0000000..593b624
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_const_wide_16.S
@@ -0,0 +1,4 @@
+ /* const-wide/16 vAA, #+BBBB */
+ movswq 2(rPC), %rax # rax <- ssssBBBB
+ SET_WIDE_VREG %rax, rINSTq # store
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_const_wide_32.S b/runtime/interpreter/mterp/x86_64/op_const_wide_32.S
new file mode 100644
index 0000000..5ef3636
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_const_wide_32.S
@@ -0,0 +1,4 @@
+ /* const-wide/32 vAA, #+BBBBbbbb */
+ movslq 2(rPC), %rax # eax <- ssssssssBBBBbbbb
+ SET_WIDE_VREG %rax, rINSTq # store
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86_64/op_const_wide_high16.S b/runtime/interpreter/mterp/x86_64/op_const_wide_high16.S
new file mode 100644
index 0000000..b86b4e5
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_const_wide_high16.S
@@ -0,0 +1,5 @@
+ /* const-wide/high16 vAA, #+BBBB000000000000 */
+ movzwq 2(rPC), %rax # eax <- 0000BBBB
+ salq $$48, %rax # eax <- BBBB0000
+ SET_WIDE_VREG %rax, rINSTq # v[AA+0] <- eax
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_div_double.S b/runtime/interpreter/mterp/x86_64/op_div_double.S
new file mode 100644
index 0000000..45c700c
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_div_double.S
@@ -0,0 +1 @@
+%include "x86_64/sseBinop.S" {"instr":"divs","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86_64/op_div_double_2addr.S b/runtime/interpreter/mterp/x86_64/op_div_double_2addr.S
new file mode 100644
index 0000000..83f270e
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_div_double_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/sseBinop2Addr.S" {"instr":"divs","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86_64/op_div_float.S b/runtime/interpreter/mterp/x86_64/op_div_float.S
new file mode 100644
index 0000000..aa90b24
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_div_float.S
@@ -0,0 +1 @@
+%include "x86_64/sseBinop.S" {"instr":"divs","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86_64/op_div_float_2addr.S b/runtime/interpreter/mterp/x86_64/op_div_float_2addr.S
new file mode 100644
index 0000000..f0f8f1a
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_div_float_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/sseBinop2Addr.S" {"instr":"divs","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86_64/op_div_int.S b/runtime/interpreter/mterp/x86_64/op_div_int.S
new file mode 100644
index 0000000..bba5a17
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_div_int.S
@@ -0,0 +1 @@
+%include "x86_64/bindiv.S" {"result":"%eax","second":"%ecx","wide":"0","suffix":"l"}
diff --git a/runtime/interpreter/mterp/x86_64/op_div_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_div_int_2addr.S
new file mode 100644
index 0000000..fa4255d
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_div_int_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/bindiv2addr.S" {"result":"%eax","second":"%ecx","wide":"0","suffix":"l"}
diff --git a/runtime/interpreter/mterp/x86_64/op_div_int_lit16.S b/runtime/interpreter/mterp/x86_64/op_div_int_lit16.S
new file mode 100644
index 0000000..3fa1e09
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_div_int_lit16.S
@@ -0,0 +1 @@
+%include "x86_64/bindivLit16.S" {"result":"%eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_div_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_div_int_lit8.S
new file mode 100644
index 0000000..859883e
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_div_int_lit8.S
@@ -0,0 +1 @@
+%include "x86_64/bindivLit8.S" {"result":"%eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_div_long.S b/runtime/interpreter/mterp/x86_64/op_div_long.S
new file mode 100644
index 0000000..a061a88
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_div_long.S
@@ -0,0 +1 @@
+%include "x86_64/bindiv.S" {"result":"%rax","second":"%rcx","wide":"1","suffix":"q","ext":"cqo"}
diff --git a/runtime/interpreter/mterp/x86_64/op_div_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_div_long_2addr.S
new file mode 100644
index 0000000..8886e68
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_div_long_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/bindiv2addr.S" {"result":"%rax","second":"%rcx","wide":"1","suffix":"q","ext":"cqo"}
diff --git a/runtime/interpreter/mterp/x86_64/op_double_to_float.S b/runtime/interpreter/mterp/x86_64/op_double_to_float.S
new file mode 100644
index 0000000..cea1482
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_double_to_float.S
@@ -0,0 +1 @@
+%include "x86_64/fpcvt.S" {"source_suffix":"d","dest_suffix":"s","wide":"0"}
diff --git a/runtime/interpreter/mterp/x86_64/op_double_to_int.S b/runtime/interpreter/mterp/x86_64/op_double_to_int.S
new file mode 100644
index 0000000..a9965ed
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_double_to_int.S
@@ -0,0 +1 @@
+%include "x86_64/cvtfp_int.S" {"fp_suffix":"d","i_suffix":"l","max_const":"$0x7fffffff","result_reg":"%eax","wide":"0"}
diff --git a/runtime/interpreter/mterp/x86_64/op_double_to_long.S b/runtime/interpreter/mterp/x86_64/op_double_to_long.S
new file mode 100644
index 0000000..179e6a1
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_double_to_long.S
@@ -0,0 +1 @@
+%include "x86_64/cvtfp_int.S" {"fp_suffix":"d","i_suffix":"q","max_const":"$0x7fffffffffffffff","result_reg":"%rax","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_fill_array_data.S b/runtime/interpreter/mterp/x86_64/op_fill_array_data.S
new file mode 100644
index 0000000..626bad4
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_fill_array_data.S
@@ -0,0 +1,9 @@
+ /* fill-array-data vAA, +BBBBBBBB */
+ EXPORT_PC
+ movl 2(rPC), %ecx # ecx <- BBBBbbbb
+ leaq (rPC,%rcx,2), OUT_ARG1 # OUT_ARG1 <- PC + BBBBbbbb*2
+ GET_VREG OUT_32_ARG0, rINSTq # OUT_ARG0 <- vAA (array object)
+ call SYMBOL(MterpFillArrayData) # (obj, payload)
+ testb %al, %al # 0 means an exception is thrown
+ jz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86_64/op_filled_new_array.S b/runtime/interpreter/mterp/x86_64/op_filled_new_array.S
new file mode 100644
index 0000000..a7f7ddc
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_filled_new_array.S
@@ -0,0 +1,17 @@
+%default { "helper":"MterpFilledNewArray" }
+/*
+ * Create a new array with elements filled from registers.
+ *
+ * for: filled-new-array, filled-new-array/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
+ .extern $helper
+ EXPORT_PC
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
+ movq rPC, OUT_ARG1
+ movq rSELF, OUT_ARG2
+ call SYMBOL($helper)
+ testb %al, %al # 0 means an exception is thrown
+ jz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86_64/op_filled_new_array_range.S b/runtime/interpreter/mterp/x86_64/op_filled_new_array_range.S
new file mode 100644
index 0000000..4ca79a3
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_filled_new_array_range.S
@@ -0,0 +1 @@
+%include "x86_64/op_filled_new_array.S" { "helper":"MterpFilledNewArrayRange" }
diff --git a/runtime/interpreter/mterp/x86_64/op_float_to_double.S b/runtime/interpreter/mterp/x86_64/op_float_to_double.S
new file mode 100644
index 0000000..7855205
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_float_to_double.S
@@ -0,0 +1 @@
+%include "x86_64/fpcvt.S" {"source_suffix":"s","dest_suffix":"d","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_float_to_int.S b/runtime/interpreter/mterp/x86_64/op_float_to_int.S
new file mode 100644
index 0000000..cb90555
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_float_to_int.S
@@ -0,0 +1 @@
+%include "x86_64/cvtfp_int.S" {"fp_suffix":"s","i_suffix":"l","max_const":"$0x7fffffff","result_reg":"%eax","wide":"0"}
diff --git a/runtime/interpreter/mterp/x86_64/op_float_to_long.S b/runtime/interpreter/mterp/x86_64/op_float_to_long.S
new file mode 100644
index 0000000..96bb4ee
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_float_to_long.S
@@ -0,0 +1 @@
+%include "x86_64/cvtfp_int.S" {"fp_suffix":"s","i_suffix":"q","max_const":"$0x7fffffffffffffff","result_reg":"%rax","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_goto.S b/runtime/interpreter/mterp/x86_64/op_goto.S
new file mode 100644
index 0000000..c4fc976
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_goto.S
@@ -0,0 +1,14 @@
+/*
+ * Unconditional branch, 8-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto +AA */
+ movsbq rINSTbl, rINSTq # rINSTq <- ssssssAA
+ MTERP_PROFILE_BRANCH
+ addq rINSTq, rINSTq # rINSTq <- AA * 2
+ leaq (rPC, rINSTq), rPC
+ FETCH_INST
+ jle MterpCheckSuspendAndContinue # AA * 2 <= 0 => suspend check
+ GOTO_NEXT
diff --git a/runtime/interpreter/mterp/x86_64/op_goto_16.S b/runtime/interpreter/mterp/x86_64/op_goto_16.S
new file mode 100644
index 0000000..8cb9a5c
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_goto_16.S
@@ -0,0 +1,14 @@
+/*
+ * Unconditional branch, 16-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto/16 +AAAA */
+ movswq 2(rPC), rINSTq # rINSTq <- ssssAAAA
+ MTERP_PROFILE_BRANCH
+ addq rINSTq, rINSTq # rINSTq <- AA * 2
+ leaq (rPC, rINSTq), rPC
+ FETCH_INST
+ jle MterpCheckSuspendAndContinue # AA * 2 <= 0 => suspend check
+ GOTO_NEXT
diff --git a/runtime/interpreter/mterp/x86_64/op_goto_32.S b/runtime/interpreter/mterp/x86_64/op_goto_32.S
new file mode 100644
index 0000000..4ecdacd
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_goto_32.S
@@ -0,0 +1,17 @@
+/*
+ * Unconditional branch, 32-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ *
+ * Because we need the SF bit set, we'll use an adds
+ * to convert from Dalvik offset to byte offset.
+ */
+ /* goto/32 +AAAAAAAA */
+ movslq 2(rPC), rINSTq # rINSTq <- AAAAAAAA
+ MTERP_PROFILE_BRANCH
+ addq rINSTq, rINSTq # rINSTq <- AA * 2
+ leaq (rPC, rINSTq), rPC
+ FETCH_INST
+ jle MterpCheckSuspendAndContinue # AA * 2 <= 0 => suspend check
+ GOTO_NEXT
diff --git a/runtime/interpreter/mterp/x86_64/op_if_eq.S b/runtime/interpreter/mterp/x86_64/op_if_eq.S
new file mode 100644
index 0000000..d56ce72
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_if_eq.S
@@ -0,0 +1 @@
+%include "x86_64/bincmp.S" { "revcmp":"ne" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_eqz.S b/runtime/interpreter/mterp/x86_64/op_if_eqz.S
new file mode 100644
index 0000000..a0fc444
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_if_eqz.S
@@ -0,0 +1 @@
+%include "x86_64/zcmp.S" { "revcmp":"ne" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_ge.S b/runtime/interpreter/mterp/x86_64/op_if_ge.S
new file mode 100644
index 0000000..a7832ef
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_if_ge.S
@@ -0,0 +1 @@
+%include "x86_64/bincmp.S" { "revcmp":"l" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_gez.S b/runtime/interpreter/mterp/x86_64/op_if_gez.S
new file mode 100644
index 0000000..f9af5db
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_if_gez.S
@@ -0,0 +1 @@
+%include "x86_64/zcmp.S" { "revcmp":"l" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_gt.S b/runtime/interpreter/mterp/x86_64/op_if_gt.S
new file mode 100644
index 0000000..70f2b9e
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_if_gt.S
@@ -0,0 +1 @@
+%include "x86_64/bincmp.S" { "revcmp":"le" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_gtz.S b/runtime/interpreter/mterp/x86_64/op_if_gtz.S
new file mode 100644
index 0000000..2fb0d50
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_if_gtz.S
@@ -0,0 +1 @@
+%include "x86_64/zcmp.S" { "revcmp":"le" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_le.S b/runtime/interpreter/mterp/x86_64/op_if_le.S
new file mode 100644
index 0000000..321962a
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_if_le.S
@@ -0,0 +1 @@
+%include "x86_64/bincmp.S" { "revcmp":"g" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_lez.S b/runtime/interpreter/mterp/x86_64/op_if_lez.S
new file mode 100644
index 0000000..d3dc334
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_if_lez.S
@@ -0,0 +1 @@
+%include "x86_64/zcmp.S" { "revcmp":"g" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_lt.S b/runtime/interpreter/mterp/x86_64/op_if_lt.S
new file mode 100644
index 0000000..f028005
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_if_lt.S
@@ -0,0 +1 @@
+%include "x86_64/bincmp.S" { "revcmp":"ge" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_ltz.S b/runtime/interpreter/mterp/x86_64/op_if_ltz.S
new file mode 100644
index 0000000..383d73a
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_if_ltz.S
@@ -0,0 +1 @@
+%include "x86_64/zcmp.S" { "revcmp":"ge" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_ne.S b/runtime/interpreter/mterp/x86_64/op_if_ne.S
new file mode 100644
index 0000000..ac6e063
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_if_ne.S
@@ -0,0 +1 @@
+%include "x86_64/bincmp.S" { "revcmp":"e" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_nez.S b/runtime/interpreter/mterp/x86_64/op_if_nez.S
new file mode 100644
index 0000000..c96e4f3
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_if_nez.S
@@ -0,0 +1 @@
+%include "x86_64/zcmp.S" { "revcmp":"e" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget.S b/runtime/interpreter/mterp/x86_64/op_iget.S
new file mode 100644
index 0000000..a0d0faf
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iget.S
@@ -0,0 +1,27 @@
+%default { "is_object":"0", "helper":"artGet32InstanceFromCode", "wide":"0"}
+/*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short, iget-wide
+ */
+ EXPORT_PC
+ movzbq rINSTbl, %rcx # rcx <- BA
+ movzwl 2(rPC), OUT_32_ARG0 # eax <- field ref CCCC
+ sarl $$4, %ecx # ecx <- B
+ GET_VREG OUT_32_ARG1, %rcx # the object pointer
+ movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
+ movq rSELF, OUT_ARG3
+ call SYMBOL($helper)
+ cmpq $$0, THREAD_EXCEPTION_OFFSET(rSELF)
+ jnz MterpException # bail out
+ andb $$0xf, rINSTbl # rINST <- A
+ .if $is_object
+ SET_VREG_OBJECT %eax, rINSTq # fp[A] <-value
+ .else
+ .if $wide
+ SET_WIDE_VREG %rax, rINSTq # fp[A] <-value
+ .else
+ SET_VREG %eax, rINSTq # fp[A] <-value
+ .endif
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_boolean.S b/runtime/interpreter/mterp/x86_64/op_iget_boolean.S
new file mode 100644
index 0000000..6ac5523
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iget_boolean.S
@@ -0,0 +1 @@
+%include "x86_64/op_iget.S" { "helper":"artGetBooleanInstanceFromCode" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_boolean_quick.S b/runtime/interpreter/mterp/x86_64/op_iget_boolean_quick.S
new file mode 100644
index 0000000..07139c7
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iget_boolean_quick.S
@@ -0,0 +1 @@
+%include "x86_64/op_iget_quick.S" { "load":"movsbl" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_byte.S b/runtime/interpreter/mterp/x86_64/op_iget_byte.S
new file mode 100644
index 0000000..6a861b1
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iget_byte.S
@@ -0,0 +1 @@
+%include "x86_64/op_iget.S" { "helper":"artGetByteInstanceFromCode" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_byte_quick.S b/runtime/interpreter/mterp/x86_64/op_iget_byte_quick.S
new file mode 100644
index 0000000..07139c7
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iget_byte_quick.S
@@ -0,0 +1 @@
+%include "x86_64/op_iget_quick.S" { "load":"movsbl" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_char.S b/runtime/interpreter/mterp/x86_64/op_iget_char.S
new file mode 100644
index 0000000..021a0f1
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iget_char.S
@@ -0,0 +1 @@
+%include "x86_64/op_iget.S" { "helper":"artGetCharInstanceFromCode" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_char_quick.S b/runtime/interpreter/mterp/x86_64/op_iget_char_quick.S
new file mode 100644
index 0000000..8cb3be3
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iget_char_quick.S
@@ -0,0 +1 @@
+%include "x86_64/op_iget_quick.S" { "load":"movzwl" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_object.S b/runtime/interpreter/mterp/x86_64/op_iget_object.S
new file mode 100644
index 0000000..d92bc9c
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iget_object.S
@@ -0,0 +1 @@
+%include "x86_64/op_iget.S" { "is_object":"1", "helper":"artGetObjInstanceFromCode" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_object_quick.S b/runtime/interpreter/mterp/x86_64/op_iget_object_quick.S
new file mode 100644
index 0000000..964d20a
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iget_object_quick.S
@@ -0,0 +1,14 @@
+ /* For: iget-object-quick */
+ /* op vA, vB, offset@CCCC */
+ .extern artIGetObjectFromMterp
+ movzbq rINSTbl, %rcx # rcx <- BA
+ sarl $$4, %ecx # ecx <- B
+ GET_VREG OUT_32_ARG0, %rcx # vB (object we're operating on)
+ movzwl 2(rPC), OUT_32_ARG1 # eax <- field byte offset
+ EXPORT_PC
+ callq SYMBOL(artIGetObjectFromMterp) # (obj, offset)
+ cmpq $$0, THREAD_EXCEPTION_OFFSET(rSELF)
+ jnz MterpException # bail out
+ andb $$0xf, rINSTbl # rINST <- A
+ SET_VREG_OBJECT %eax, rINSTq # fp[A] <- value
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_quick.S b/runtime/interpreter/mterp/x86_64/op_iget_quick.S
new file mode 100644
index 0000000..bfb7530
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iget_quick.S
@@ -0,0 +1,18 @@
+%default { "load":"movl", "wide":"0"}
+ /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick, iget-wide-quick */
+ /* op vA, vB, offset@CCCC */
+ movl rINST, %ecx # rcx <- BA
+ sarl $$4, %ecx # ecx <- B
+ GET_VREG %ecx, %rcx # vB (object we're operating on)
+ movzwq 2(rPC), %rax # eax <- field byte offset
+ testl %ecx, %ecx # is object null?
+ je common_errNullObject
+ andb $$0xf,rINSTbl # rINST <- A
+ .if $wide
+ movq (%rcx,%rax,1), %rax
+ SET_WIDE_VREG %rax, rINSTq # fp[A] <- value
+ .else
+ ${load} (%rcx,%rax,1), %eax
+ SET_VREG %eax, rINSTq # fp[A] <- value
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_short.S b/runtime/interpreter/mterp/x86_64/op_iget_short.S
new file mode 100644
index 0000000..f158bea
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iget_short.S
@@ -0,0 +1 @@
+%include "x86_64/op_iget.S" { "helper":"artGetShortInstanceFromCode" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_short_quick.S b/runtime/interpreter/mterp/x86_64/op_iget_short_quick.S
new file mode 100644
index 0000000..56ca858
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iget_short_quick.S
@@ -0,0 +1 @@
+%include "x86_64/op_iget_quick.S" { "load":"movswl" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_wide.S b/runtime/interpreter/mterp/x86_64/op_iget_wide.S
new file mode 100644
index 0000000..74bb9ff
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iget_wide.S
@@ -0,0 +1 @@
+%include "x86_64/op_iget.S" { "helper":"artGet64InstanceFromCode", "wide":"1" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_wide_quick.S b/runtime/interpreter/mterp/x86_64/op_iget_wide_quick.S
new file mode 100644
index 0000000..169d625
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iget_wide_quick.S
@@ -0,0 +1 @@
+%include "x86_64/op_iget_quick.S" { "load":"movswl", "wide":"1" }
diff --git a/runtime/interpreter/mterp/x86_64/op_instance_of.S b/runtime/interpreter/mterp/x86_64/op_instance_of.S
new file mode 100644
index 0000000..6be37f9
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_instance_of.S
@@ -0,0 +1,21 @@
+/*
+ * Check to see if an object reference is an instance of a class.
+ *
+ * Most common situation is a non-null object, being compared against
+ * an already-resolved class.
+ */
+ /* instance-of vA, vB, class@CCCC */
+ EXPORT_PC
+ movzwl 2(rPC), OUT_32_ARG0 # OUT_32_ARG0 <- CCCC
+ movl rINST, %eax # eax <- BA
+ sarl $$4, %eax # eax <- B
+ leaq VREG_ADDRESS(%rax), OUT_ARG1 # Get object address
+ movq OFF_FP_METHOD(rFP), OUT_ARG2
+ movq rSELF, OUT_ARG3
+ call SYMBOL(MterpInstanceOf) # (index, &obj, method, self)
+ movsbl %al, %eax
+ cmpq $$0, THREAD_EXCEPTION_OFFSET(rSELF)
+ jnz MterpException
+ andb $$0xf, rINSTbl # rINSTbl <- A
+ SET_VREG %eax, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_int_to_byte.S b/runtime/interpreter/mterp/x86_64/op_int_to_byte.S
new file mode 100644
index 0000000..f4e578f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_int_to_byte.S
@@ -0,0 +1 @@
+%include "x86_64/unop.S" {"instr":"movsbl %al, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_int_to_char.S b/runtime/interpreter/mterp/x86_64/op_int_to_char.S
new file mode 100644
index 0000000..c1bf17f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_int_to_char.S
@@ -0,0 +1 @@
+%include "x86_64/unop.S" {"instr":"movzwl %ax,%eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_int_to_double.S b/runtime/interpreter/mterp/x86_64/op_int_to_double.S
new file mode 100644
index 0000000..27ebf42
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_int_to_double.S
@@ -0,0 +1 @@
+%include "x86_64/fpcvt.S" {"source_suffix":"i","dest_suffix":"dl","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_int_to_float.S b/runtime/interpreter/mterp/x86_64/op_int_to_float.S
new file mode 100644
index 0000000..5a98d44
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_int_to_float.S
@@ -0,0 +1 @@
+%include "x86_64/fpcvt.S" {"source_suffix":"i","dest_suffix":"sl","wide":"0"}
diff --git a/runtime/interpreter/mterp/x86_64/op_int_to_long.S b/runtime/interpreter/mterp/x86_64/op_int_to_long.S
new file mode 100644
index 0000000..9281137
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_int_to_long.S
@@ -0,0 +1,8 @@
+ /* int to long vA, vB */
+ movzbq rINSTbl, %rax # rax <- +A
+ sarl $$4, %eax # eax <- B
+ andb $$0xf, rINSTbl # rINST <- A
+ movslq VREG_ADDRESS(%rax), %rax
+ SET_WIDE_VREG %rax, rINSTq # v[A] <- %rax
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
diff --git a/runtime/interpreter/mterp/x86_64/op_int_to_short.S b/runtime/interpreter/mterp/x86_64/op_int_to_short.S
new file mode 100644
index 0000000..6ae6b50
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_int_to_short.S
@@ -0,0 +1 @@
+%include "x86_64/unop.S" {"instr":"movswl %ax, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_direct.S b/runtime/interpreter/mterp/x86_64/op_invoke_direct.S
new file mode 100644
index 0000000..9628589
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_invoke_direct.S
@@ -0,0 +1 @@
+%include "x86_64/invoke.S" { "helper":"MterpInvokeDirect" }
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_direct_range.S b/runtime/interpreter/mterp/x86_64/op_invoke_direct_range.S
new file mode 100644
index 0000000..09ac881
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_invoke_direct_range.S
@@ -0,0 +1 @@
+%include "x86_64/invoke.S" { "helper":"MterpInvokeDirectRange" }
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_interface.S b/runtime/interpreter/mterp/x86_64/op_invoke_interface.S
new file mode 100644
index 0000000..76d9cd4
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_invoke_interface.S
@@ -0,0 +1,8 @@
+%include "x86_64/invoke.S" { "helper":"MterpInvokeInterface" }
+/*
+ * Handle an interface method call.
+ *
+ * for: invoke-interface, invoke-interface/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_interface_range.S b/runtime/interpreter/mterp/x86_64/op_invoke_interface_range.S
new file mode 100644
index 0000000..785b43c
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_invoke_interface_range.S
@@ -0,0 +1 @@
+%include "x86_64/invoke.S" { "helper":"MterpInvokeInterfaceRange" }
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_static.S b/runtime/interpreter/mterp/x86_64/op_invoke_static.S
new file mode 100644
index 0000000..dd8027d
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_invoke_static.S
@@ -0,0 +1,2 @@
+%include "x86_64/invoke.S" { "helper":"MterpInvokeStatic" }
+
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_static_range.S b/runtime/interpreter/mterp/x86_64/op_invoke_static_range.S
new file mode 100644
index 0000000..ee26074
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_invoke_static_range.S
@@ -0,0 +1 @@
+%include "x86_64/invoke.S" { "helper":"MterpInvokeStaticRange" }
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_super.S b/runtime/interpreter/mterp/x86_64/op_invoke_super.S
new file mode 100644
index 0000000..d07f8d5
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_invoke_super.S
@@ -0,0 +1,8 @@
+%include "x86_64/invoke.S" { "helper":"MterpInvokeSuper" }
+/*
+ * Handle a "super" method call.
+ *
+ * for: invoke-super, invoke-super/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_super_range.S b/runtime/interpreter/mterp/x86_64/op_invoke_super_range.S
new file mode 100644
index 0000000..7245cfd
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_invoke_super_range.S
@@ -0,0 +1 @@
+%include "x86_64/invoke.S" { "helper":"MterpInvokeSuperRange" }
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_virtual.S b/runtime/interpreter/mterp/x86_64/op_invoke_virtual.S
new file mode 100644
index 0000000..19c708b
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_invoke_virtual.S
@@ -0,0 +1,8 @@
+%include "x86_64/invoke.S" { "helper":"MterpInvokeVirtual" }
+/*
+ * Handle a virtual method call.
+ *
+ * for: invoke-virtual, invoke-virtual/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_virtual_quick.S b/runtime/interpreter/mterp/x86_64/op_invoke_virtual_quick.S
new file mode 100644
index 0000000..313bd05
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_invoke_virtual_quick.S
@@ -0,0 +1 @@
+%include "x86_64/invoke.S" { "helper":"MterpInvokeVirtualQuick" }
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_virtual_range.S b/runtime/interpreter/mterp/x86_64/op_invoke_virtual_range.S
new file mode 100644
index 0000000..424ad32
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_invoke_virtual_range.S
@@ -0,0 +1 @@
+%include "x86_64/invoke.S" { "helper":"MterpInvokeVirtualRange" }
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_virtual_range_quick.S b/runtime/interpreter/mterp/x86_64/op_invoke_virtual_range_quick.S
new file mode 100644
index 0000000..556f718
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_invoke_virtual_range_quick.S
@@ -0,0 +1 @@
+%include "x86_64/invoke.S" { "helper":"MterpInvokeVirtualQuickRange" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput.S b/runtime/interpreter/mterp/x86_64/op_iput.S
new file mode 100644
index 0000000..6b7cb1c
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iput.S
@@ -0,0 +1,20 @@
+%default { "handler":"artSet32InstanceFromMterp"}
+/*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ .extern $handler
+ EXPORT_PC
+ movzwl 2(rPC), OUT_32_ARG0 # field ref <- 0000CCCC
+ movzbq rINSTbl, %rcx # rcx<- BA
+ sarl $$4, %ecx # ecx<- B
+ GET_VREG OUT_32_ARG1, %rcx # the object pointer
+ andb $$0xf, rINSTbl # rINST<- A
+ GET_VREG OUT_32_ARG2, rINSTq # fp[A]
+ movq OFF_FP_METHOD(rFP), OUT_ARG3 # referrer
+ call SYMBOL($handler)
+ testb %al, %al
+ jnz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_boolean.S b/runtime/interpreter/mterp/x86_64/op_iput_boolean.S
new file mode 100644
index 0000000..cb4b1cd
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iput_boolean.S
@@ -0,0 +1 @@
+%include "x86_64/op_iput.S" { "handler":"artSet8InstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_boolean_quick.S b/runtime/interpreter/mterp/x86_64/op_iput_boolean_quick.S
new file mode 100644
index 0000000..6bd060e
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iput_boolean_quick.S
@@ -0,0 +1 @@
+%include "x86_64/op_iput_quick.S" { "reg":"rINSTbl", "store":"movb" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_byte.S b/runtime/interpreter/mterp/x86_64/op_iput_byte.S
new file mode 100644
index 0000000..cb4b1cd
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iput_byte.S
@@ -0,0 +1 @@
+%include "x86_64/op_iput.S" { "handler":"artSet8InstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_byte_quick.S b/runtime/interpreter/mterp/x86_64/op_iput_byte_quick.S
new file mode 100644
index 0000000..6bd060e
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iput_byte_quick.S
@@ -0,0 +1 @@
+%include "x86_64/op_iput_quick.S" { "reg":"rINSTbl", "store":"movb" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_char.S b/runtime/interpreter/mterp/x86_64/op_iput_char.S
new file mode 100644
index 0000000..b4e147c
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iput_char.S
@@ -0,0 +1 @@
+%include "x86_64/op_iput.S" { "handler":"artSet16InstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_char_quick.S b/runtime/interpreter/mterp/x86_64/op_iput_char_quick.S
new file mode 100644
index 0000000..3da96d5
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iput_char_quick.S
@@ -0,0 +1 @@
+%include "x86_64/op_iput_quick.S" { "reg":"rINSTw", "store":"movw" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_object.S b/runtime/interpreter/mterp/x86_64/op_iput_object.S
new file mode 100644
index 0000000..828712d
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iput_object.S
@@ -0,0 +1,10 @@
+ EXPORT_PC
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
+ movq rPC, OUT_ARG1
+ REFRESH_INST ${opnum}
+ movl rINST, OUT_32_ARG2
+ movq rSELF, OUT_ARG3
+ call SYMBOL(MterpIputObject)
+ testb %al, %al
+ jz MterpException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_object_quick.S b/runtime/interpreter/mterp/x86_64/op_iput_object_quick.S
new file mode 100644
index 0000000..b5b128a
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iput_object_quick.S
@@ -0,0 +1,9 @@
+ EXPORT_PC
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
+ movq rPC, OUT_ARG1
+ REFRESH_INST ${opnum}
+ movl rINST, OUT_32_ARG2
+ call SYMBOL(MterpIputObjectQuick)
+ testb %al, %al
+ jz MterpException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_quick.S b/runtime/interpreter/mterp/x86_64/op_iput_quick.S
new file mode 100644
index 0000000..ecaf98e
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iput_quick.S
@@ -0,0 +1,13 @@
+%default { "reg":"rINST", "store":"movl" }
+ /* For: iput-quick, iput-object-quick */
+ /* op vA, vB, offset@CCCC */
+ movzbq rINSTbl, %rcx # rcx <- BA
+ sarl $$4, %ecx # ecx <- B
+ GET_VREG %ecx, %rcx # vB (object we're operating on)
+ testl %ecx, %ecx # is object null?
+ je common_errNullObject
+ andb $$0xf, rINSTbl # rINST <- A
+ GET_VREG rINST, rINSTq # rINST <- v[A]
+ movzwq 2(rPC), %rax # rax <- field byte offset
+ ${store} ${reg}, (%rcx,%rax,1)
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_short.S b/runtime/interpreter/mterp/x86_64/op_iput_short.S
new file mode 100644
index 0000000..b4e147c
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iput_short.S
@@ -0,0 +1 @@
+%include "x86_64/op_iput.S" { "handler":"artSet16InstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_short_quick.S b/runtime/interpreter/mterp/x86_64/op_iput_short_quick.S
new file mode 100644
index 0000000..3da96d5
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iput_short_quick.S
@@ -0,0 +1 @@
+%include "x86_64/op_iput_quick.S" { "reg":"rINSTw", "store":"movw" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_wide.S b/runtime/interpreter/mterp/x86_64/op_iput_wide.S
new file mode 100644
index 0000000..e59717b
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iput_wide.S
@@ -0,0 +1,14 @@
+ /* iput-wide vA, vB, field@CCCC */
+ .extern artSet64InstanceFromMterp
+ EXPORT_PC
+ movzwq 2(rPC), OUT_ARG0 # field ref CCCC
+ movzbq rINSTbl, %rcx # rcx <- BA
+ sarl $$4, %ecx # ecx <- B
+ GET_VREG OUT_32_ARG1, %rcx # the object pointer
+ andb $$0xf, rINSTbl # rINST <- A
+ leaq VREG_ADDRESS(rINSTq), OUT_ARG2 # &fp[A]
+ movq OFF_FP_METHOD(rFP), OUT_ARG3 # referrer
+ call SYMBOL(artSet64InstanceFromMterp)
+ testb %al, %al
+ jnz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_wide_quick.S b/runtime/interpreter/mterp/x86_64/op_iput_wide_quick.S
new file mode 100644
index 0000000..473189d
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_iput_wide_quick.S
@@ -0,0 +1,12 @@
+ /* iput-wide-quick vA, vB, offset@CCCC */
+ movzbq rINSTbl, %rcx # rcx<- BA
+ sarl $$4, %ecx # ecx<- B
+ GET_VREG %ecx, %rcx # vB (object we're operating on)
+ testl %ecx, %ecx # is object null?
+ je common_errNullObject
+ movzwq 2(rPC), %rax # rax<- field byte offset
+ leaq (%rcx,%rax,1), %rcx # ecx<- Address of 64-bit target
+ andb $$0xf, rINSTbl # rINST<- A
+ GET_WIDE_VREG %rax, rINSTq # rax<- fp[A]/fp[A+1]
+ movq %rax, (%rcx) # obj.field<- r0/r1
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_long_to_double.S b/runtime/interpreter/mterp/x86_64/op_long_to_double.S
new file mode 100644
index 0000000..7cdae32
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_long_to_double.S
@@ -0,0 +1 @@
+%include "x86_64/fpcvt.S" {"source_suffix":"i","dest_suffix":"dq","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_long_to_float.S b/runtime/interpreter/mterp/x86_64/op_long_to_float.S
new file mode 100644
index 0000000..7553348
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_long_to_float.S
@@ -0,0 +1 @@
+%include "x86_64/fpcvt.S" {"source_suffix":"i","dest_suffix":"sq","wide":"0"}
diff --git a/runtime/interpreter/mterp/x86_64/op_long_to_int.S b/runtime/interpreter/mterp/x86_64/op_long_to_int.S
new file mode 100644
index 0000000..7b50c8e
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_long_to_int.S
@@ -0,0 +1,2 @@
+/* we ignore the high word, making this equivalent to a 32-bit reg move */
+%include "x86_64/op_move.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_monitor_enter.S b/runtime/interpreter/mterp/x86_64/op_monitor_enter.S
new file mode 100644
index 0000000..411091f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_monitor_enter.S
@@ -0,0 +1,11 @@
+/*
+ * Synchronize on an object.
+ */
+ /* monitor-enter vAA */
+ EXPORT_PC
+ GET_VREG OUT_32_ARG0, rINSTq
+ movq rSELF, OUT_ARG1
+ call SYMBOL(artLockObjectFromCode) # (object, self)
+ testq %rax, %rax
+ jnz MterpException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_monitor_exit.S b/runtime/interpreter/mterp/x86_64/op_monitor_exit.S
new file mode 100644
index 0000000..72d9a23
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_monitor_exit.S
@@ -0,0 +1,15 @@
+/*
+ * Unlock an object.
+ *
+ * Exceptions that occur when unlocking a monitor need to appear as
+ * if they happened at the following instruction. See the Dalvik
+ * instruction spec.
+ */
+ /* monitor-exit vAA */
+ EXPORT_PC
+ GET_VREG OUT_32_ARG0, rINSTq
+ movq rSELF, OUT_ARG1
+ call SYMBOL(artUnlockObjectFromCode) # (object, self)
+ testq %rax, %rax
+ jnz MterpException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_move.S b/runtime/interpreter/mterp/x86_64/op_move.S
new file mode 100644
index 0000000..ccaac2c
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_move.S
@@ -0,0 +1,13 @@
+%default { "is_object":"0" }
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ movl rINST, %eax # eax <- BA
+ andb $$0xf, %al # eax <- A
+ shrl $$4, rINST # rINST <- B
+ GET_VREG %edx, rINSTq
+ .if $is_object
+ SET_VREG_OBJECT %edx, %rax # fp[A] <- fp[B]
+ .else
+ SET_VREG %edx, %rax # fp[A] <- fp[B]
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_move_16.S b/runtime/interpreter/mterp/x86_64/op_move_16.S
new file mode 100644
index 0000000..6a813eb
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_move_16.S
@@ -0,0 +1,12 @@
+%default { "is_object":"0" }
+ /* for: move/16, move-object/16 */
+ /* op vAAAA, vBBBB */
+ movzwq 4(rPC), %rcx # ecx <- BBBB
+ movzwq 2(rPC), %rax # eax <- AAAA
+ GET_VREG %edx, %rcx
+ .if $is_object
+ SET_VREG_OBJECT %edx, %rax # fp[A] <- fp[B]
+ .else
+ SET_VREG %edx, %rax # fp[A] <- fp[B]
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86_64/op_move_exception.S b/runtime/interpreter/mterp/x86_64/op_move_exception.S
new file mode 100644
index 0000000..d0a14fd
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_move_exception.S
@@ -0,0 +1,5 @@
+ /* move-exception vAA */
+ movl THREAD_EXCEPTION_OFFSET(rSELF), %eax
+ SET_VREG_OBJECT %eax, rINSTq # fp[AA] <- exception object
+ movl $$0, THREAD_EXCEPTION_OFFSET(rSELF)
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_move_from16.S b/runtime/interpreter/mterp/x86_64/op_move_from16.S
new file mode 100644
index 0000000..150e9c2
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_move_from16.S
@@ -0,0 +1,11 @@
+%default { "is_object":"0" }
+ /* for: move/from16, move-object/from16 */
+ /* op vAA, vBBBB */
+ movzwq 2(rPC), %rax # eax <- BBBB
+ GET_VREG %edx, %rax # edx <- fp[BBBB]
+ .if $is_object
+ SET_VREG_OBJECT %edx, rINSTq # fp[A] <- fp[B]
+ .else
+ SET_VREG %edx, rINSTq # fp[A] <- fp[B]
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_move_object.S b/runtime/interpreter/mterp/x86_64/op_move_object.S
new file mode 100644
index 0000000..0d86649
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_move_object.S
@@ -0,0 +1 @@
+%include "x86_64/op_move.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_move_object_16.S b/runtime/interpreter/mterp/x86_64/op_move_object_16.S
new file mode 100644
index 0000000..32541ff
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_move_object_16.S
@@ -0,0 +1 @@
+%include "x86_64/op_move_16.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_move_object_from16.S b/runtime/interpreter/mterp/x86_64/op_move_object_from16.S
new file mode 100644
index 0000000..983e4ab
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_move_object_from16.S
@@ -0,0 +1 @@
+%include "x86_64/op_move_from16.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_move_result.S b/runtime/interpreter/mterp/x86_64/op_move_result.S
new file mode 100644
index 0000000..8268344
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_move_result.S
@@ -0,0 +1,11 @@
+%default { "is_object":"0" }
+ /* for: move-result, move-result-object */
+ /* op vAA */
+ movq OFF_FP_RESULT_REGISTER(rFP), %rax # get pointer to result JType.
+ movl (%rax), %eax # r0 <- result.i.
+ .if $is_object
+ SET_VREG_OBJECT %eax, rINSTq # fp[A] <- fp[B]
+ .else
+ SET_VREG %eax, rINSTq # fp[A] <- fp[B]
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_move_result_object.S b/runtime/interpreter/mterp/x86_64/op_move_result_object.S
new file mode 100644
index 0000000..c5aac17
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_move_result_object.S
@@ -0,0 +1 @@
+%include "x86_64/op_move_result.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_move_result_wide.S b/runtime/interpreter/mterp/x86_64/op_move_result_wide.S
new file mode 100644
index 0000000..03de783
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_move_result_wide.S
@@ -0,0 +1,5 @@
+ /* move-result-wide vAA */
+ movq OFF_FP_RESULT_REGISTER(rFP), %rax # get pointer to result JType.
+ movq (%rax), %rdx # Get wide
+ SET_WIDE_VREG %rdx, rINSTq # v[AA] <- rdx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_move_wide.S b/runtime/interpreter/mterp/x86_64/op_move_wide.S
new file mode 100644
index 0000000..508f8cc
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_move_wide.S
@@ -0,0 +1,8 @@
+ /* move-wide vA, vB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ movl rINST, %ecx # ecx <- BA
+ sarl $$4, rINST # rINST <- B
+ andb $$0xf, %cl # ecx <- A
+ GET_WIDE_VREG %rdx, rINSTq # rdx <- v[B]
+ SET_WIDE_VREG %rdx, %rcx # v[A] <- rdx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_move_wide_16.S b/runtime/interpreter/mterp/x86_64/op_move_wide_16.S
new file mode 100644
index 0000000..ce371a9
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_move_wide_16.S
@@ -0,0 +1,7 @@
+ /* move-wide/16 vAAAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ movzwq 4(rPC), %rcx # ecx<- BBBB
+ movzwq 2(rPC), %rax # eax<- AAAA
+ GET_WIDE_VREG %rdx, %rcx # rdx <- v[B]
+ SET_WIDE_VREG %rdx, %rax # v[A] <- rdx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86_64/op_move_wide_from16.S b/runtime/interpreter/mterp/x86_64/op_move_wide_from16.S
new file mode 100644
index 0000000..0d6971a
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_move_wide_from16.S
@@ -0,0 +1,6 @@
+ /* move-wide/from16 vAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ movzwl 2(rPC), %ecx # ecx <- BBBB
+ GET_WIDE_VREG %rdx, %rcx # rdx <- v[B]
+ SET_WIDE_VREG %rdx, rINSTq # v[A] <- rdx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_mul_double.S b/runtime/interpreter/mterp/x86_64/op_mul_double.S
new file mode 100644
index 0000000..1f4bcb3
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_mul_double.S
@@ -0,0 +1 @@
+%include "x86_64/sseBinop.S" {"instr":"muls","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86_64/op_mul_double_2addr.S b/runtime/interpreter/mterp/x86_64/op_mul_double_2addr.S
new file mode 100644
index 0000000..9850a28
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_mul_double_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/sseBinop2Addr.S" {"instr":"muls","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86_64/op_mul_float.S b/runtime/interpreter/mterp/x86_64/op_mul_float.S
new file mode 100644
index 0000000..85960e9
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_mul_float.S
@@ -0,0 +1 @@
+%include "x86_64/sseBinop.S" {"instr":"muls","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86_64/op_mul_float_2addr.S b/runtime/interpreter/mterp/x86_64/op_mul_float_2addr.S
new file mode 100644
index 0000000..6d36b6a
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_mul_float_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/sseBinop2Addr.S" {"instr":"muls","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86_64/op_mul_int.S b/runtime/interpreter/mterp/x86_64/op_mul_int.S
new file mode 100644
index 0000000..5f3923a
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_mul_int.S
@@ -0,0 +1 @@
+%include "x86_64/binop.S" {"instr":"imull (rFP,%rcx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_mul_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_mul_int_2addr.S
new file mode 100644
index 0000000..0b5af8a
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_mul_int_2addr.S
@@ -0,0 +1,8 @@
+ /* mul vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $$4, rINST # rINST <- B
+ andb $$0xf, %cl # ecx <- A
+ GET_VREG %eax, %rcx # eax <- vA
+ imull (rFP,rINSTq,4), %eax
+ SET_VREG %eax, %rcx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_mul_int_lit16.S b/runtime/interpreter/mterp/x86_64/op_mul_int_lit16.S
new file mode 100644
index 0000000..a4cfdbc
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_mul_int_lit16.S
@@ -0,0 +1 @@
+%include "x86_64/binopLit16.S" {"instr":"imull %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_mul_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_mul_int_lit8.S
new file mode 100644
index 0000000..89e9acb
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_mul_int_lit8.S
@@ -0,0 +1 @@
+%include "x86_64/binopLit8.S" {"instr":"imull %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_mul_long.S b/runtime/interpreter/mterp/x86_64/op_mul_long.S
new file mode 100644
index 0000000..2b85370
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_mul_long.S
@@ -0,0 +1 @@
+%include "x86_64/binopWide.S" {"instr":"imulq (rFP,%rcx,4), %rax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_mul_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_mul_long_2addr.S
new file mode 100644
index 0000000..167128b
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_mul_long_2addr.S
@@ -0,0 +1,8 @@
+ /* mul vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $$4, rINST # rINST <- B
+ andb $$0xf, %cl # ecx <- A
+ GET_WIDE_VREG %rax, %rcx # rax <- vA
+ imulq (rFP,rINSTq,4), %rax
+ SET_WIDE_VREG %rax, %rcx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_neg_double.S b/runtime/interpreter/mterp/x86_64/op_neg_double.S
new file mode 100644
index 0000000..2c14b09
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_neg_double.S
@@ -0,0 +1 @@
+%include "x86_64/unop.S" {"preinstr":" movq $0x8000000000000000, %rsi", "instr":" xorq %rsi, %rax", "wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_neg_float.S b/runtime/interpreter/mterp/x86_64/op_neg_float.S
new file mode 100644
index 0000000..148b21e
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_neg_float.S
@@ -0,0 +1 @@
+%include "x86_64/unop.S" {"instr":" xorl $0x80000000, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_neg_int.S b/runtime/interpreter/mterp/x86_64/op_neg_int.S
new file mode 100644
index 0000000..f90a937
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_neg_int.S
@@ -0,0 +1 @@
+%include "x86_64/unop.S" {"instr":" negl %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_neg_long.S b/runtime/interpreter/mterp/x86_64/op_neg_long.S
new file mode 100644
index 0000000..18fc3cc
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_neg_long.S
@@ -0,0 +1 @@
+%include "x86_64/unop.S" {"instr":" negq %rax", "wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_new_array.S b/runtime/interpreter/mterp/x86_64/op_new_array.S
new file mode 100644
index 0000000..9831a0b
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_new_array.S
@@ -0,0 +1,18 @@
+/*
+ * Allocate an array of objects, specified with the array class
+ * and a count.
+ *
+ * The verifier guarantees that this is an array class, so we don't
+ * check for it here.
+ */
+ /* new-array vA, vB, class@CCCC */
+ EXPORT_PC
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
+ movq rPC, OUT_ARG1
+ REFRESH_INST ${opnum}
+ movq rINSTq, OUT_ARG2
+ movq rSELF, OUT_ARG3
+ call SYMBOL(MterpNewArray)
+ testb %al, %al # 0 means an exception is thrown
+ jz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_new_instance.S b/runtime/interpreter/mterp/x86_64/op_new_instance.S
new file mode 100644
index 0000000..fc8c8cd
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_new_instance.S
@@ -0,0 +1,13 @@
+/*
+ * Create a new instance of a class.
+ */
+ /* new-instance vAA, class@BBBB */
+ EXPORT_PC
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
+ movq rSELF, OUT_ARG1
+ REFRESH_INST ${opnum}
+ movq rINSTq, OUT_ARG2
+ call SYMBOL(MterpNewInstance)
+ testb %al, %al # 0 means an exception is thrown
+ jz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_nop.S b/runtime/interpreter/mterp/x86_64/op_nop.S
new file mode 100644
index 0000000..4cb68e3
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_nop.S
@@ -0,0 +1 @@
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_not_int.S b/runtime/interpreter/mterp/x86_64/op_not_int.S
new file mode 100644
index 0000000..463d080
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_not_int.S
@@ -0,0 +1 @@
+%include "x86_64/unop.S" {"instr":" notl %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_not_long.S b/runtime/interpreter/mterp/x86_64/op_not_long.S
new file mode 100644
index 0000000..c97bb9e
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_not_long.S
@@ -0,0 +1 @@
+%include "x86_64/unop.S" {"instr":" notq %rax", "wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_or_int.S b/runtime/interpreter/mterp/x86_64/op_or_int.S
new file mode 100644
index 0000000..730310f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_or_int.S
@@ -0,0 +1 @@
+%include "x86_64/binop.S" {"instr":"orl (rFP,%rcx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_or_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_or_int_2addr.S
new file mode 100644
index 0000000..f722e4d
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_or_int_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/binop2addr.S" {"instr":"orl %eax, (rFP,%rcx,4)"}
diff --git a/runtime/interpreter/mterp/x86_64/op_or_int_lit16.S b/runtime/interpreter/mterp/x86_64/op_or_int_lit16.S
new file mode 100644
index 0000000..fee86c7
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_or_int_lit16.S
@@ -0,0 +1 @@
+%include "x86_64/binopLit16.S" {"instr":"orl %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_or_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_or_int_lit8.S
new file mode 100644
index 0000000..81104c7
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_or_int_lit8.S
@@ -0,0 +1 @@
+%include "x86_64/binopLit8.S" {"instr":"orl %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_or_long.S b/runtime/interpreter/mterp/x86_64/op_or_long.S
new file mode 100644
index 0000000..6c70a20
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_or_long.S
@@ -0,0 +1 @@
+%include "x86_64/binopWide.S" {"instr":"orq (rFP,%rcx,4), %rax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_or_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_or_long_2addr.S
new file mode 100644
index 0000000..546da1d
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_or_long_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/binopWide2addr.S" {"instr":"orq %rax, (rFP,%rcx,4)"}
diff --git a/runtime/interpreter/mterp/x86_64/op_packed_switch.S b/runtime/interpreter/mterp/x86_64/op_packed_switch.S
new file mode 100644
index 0000000..cb0acb7
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_packed_switch.S
@@ -0,0 +1,22 @@
+%default { "func":"MterpDoPackedSwitch" }
+/*
+ * Handle a packed-switch or sparse-switch instruction. In both cases
+ * we decode it and hand it off to a helper function.
+ *
+ * We don't really expect backward branches in a switch statement, but
+ * they're perfectly legal, so we check for them here.
+ *
+ * for: packed-switch, sparse-switch
+ */
+ /* op vAA, +BBBB */
+ movslq 2(rPC), OUT_ARG0 # rcx <- BBBBbbbb
+ leaq (rPC,OUT_ARG0,2), OUT_ARG0 # rcx <- PC + BBBBbbbb*2
+ GET_VREG OUT_32_ARG1, rINSTq # eax <- vAA
+ call SYMBOL($func)
+ movslq %eax, rINSTq
+ MTERP_PROFILE_BRANCH
+ addq rINSTq, rINSTq
+ leaq (rPC, rINSTq), rPC
+ FETCH_INST
+ jle MterpCheckSuspendAndContinue
+ GOTO_NEXT
diff --git a/runtime/interpreter/mterp/x86_64/op_rem_double.S b/runtime/interpreter/mterp/x86_64/op_rem_double.S
new file mode 100644
index 0000000..00aed78
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_rem_double.S
@@ -0,0 +1,14 @@
+ /* rem_double vAA, vBB, vCC */
+ movzbq 3(rPC), %rcx # ecx <- BB
+ movzbq 2(rPC), %rax # eax <- CC
+ fldl VREG_ADDRESS(%rcx) # %st1 <- fp[vBB]
+ fldl VREG_ADDRESS(%rax) # %st0 <- fp[vCC]
+1:
+ fprem
+ fstsw %ax
+ sahf
+ jp 1b
+ fstp %st(1)
+ fstpl VREG_ADDRESS(rINSTq) # fp[vAA] <- %st
+ CLEAR_WIDE_REF rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_rem_double_2addr.S b/runtime/interpreter/mterp/x86_64/op_rem_double_2addr.S
new file mode 100644
index 0000000..9768266
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_rem_double_2addr.S
@@ -0,0 +1,15 @@
+ /* rem_double/2addr vA, vB */
+ movzbq rINSTbl, %rcx # ecx <- A+
+ sarl $$4, rINST # rINST <- B
+ fldl VREG_ADDRESS(rINSTq) # vB to fp stack
+ andb $$0xf, %cl # ecx <- A
+ fldl VREG_ADDRESS(%rcx) # vA to fp stack
+1:
+ fprem
+ fstsw %ax
+ sahf
+ jp 1b
+ fstp %st(1)
+ fstpl VREG_ADDRESS(%rcx) # %st to vA
+ CLEAR_WIDE_REF %rcx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_rem_float.S b/runtime/interpreter/mterp/x86_64/op_rem_float.S
new file mode 100644
index 0000000..5af28ac
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_rem_float.S
@@ -0,0 +1,14 @@
+ /* rem_float vAA, vBB, vCC */
+ movzbq 3(rPC), %rcx # ecx <- BB
+ movzbq 2(rPC), %rax # eax <- CC
+ flds VREG_ADDRESS(%rcx) # vBB to fp stack
+ flds VREG_ADDRESS(%rax) # vCC to fp stack
+1:
+ fprem
+ fstsw %ax
+ sahf
+ jp 1b
+ fstp %st(1)
+ fstps VREG_ADDRESS(rINSTq) # %st to vAA
+ CLEAR_REF rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_rem_float_2addr.S b/runtime/interpreter/mterp/x86_64/op_rem_float_2addr.S
new file mode 100644
index 0000000..e9282a8
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_rem_float_2addr.S
@@ -0,0 +1,15 @@
+ /* rem_float/2addr vA, vB */
+ movzbq rINSTbl, %rcx # ecx <- A+
+ sarl $$4, rINST # rINST <- B
+ flds VREG_ADDRESS(rINSTq) # vB to fp stack
+ andb $$0xf, %cl # ecx <- A
+ flds VREG_ADDRESS(%rcx) # vA to fp stack
+1:
+ fprem
+ fstsw %ax
+ sahf
+ jp 1b
+ fstp %st(1)
+ fstps VREG_ADDRESS(%rcx) # %st to vA
+ CLEAR_REF %rcx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_rem_int.S b/runtime/interpreter/mterp/x86_64/op_rem_int.S
new file mode 100644
index 0000000..fd77d7c
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_rem_int.S
@@ -0,0 +1 @@
+%include "x86_64/bindiv.S" {"result":"%edx","second":"%ecx","wide":"0","suffix":"l","rem":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_rem_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_rem_int_2addr.S
new file mode 100644
index 0000000..25ffbf7
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_rem_int_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/bindiv2addr.S" {"result":"%edx","second":"%ecx","wide":"0","suffix":"l","rem":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_rem_int_lit16.S b/runtime/interpreter/mterp/x86_64/op_rem_int_lit16.S
new file mode 100644
index 0000000..21cc370
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_rem_int_lit16.S
@@ -0,0 +1 @@
+%include "x86_64/bindivLit16.S" {"result":"%edx","rem":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_rem_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_rem_int_lit8.S
new file mode 100644
index 0000000..2eb0150
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_rem_int_lit8.S
@@ -0,0 +1 @@
+%include "x86_64/bindivLit8.S" {"result":"%edx","rem":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_rem_long.S b/runtime/interpreter/mterp/x86_64/op_rem_long.S
new file mode 100644
index 0000000..efa7215
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_rem_long.S
@@ -0,0 +1 @@
+%include "x86_64/bindiv.S" {"result":"%rdx","second":"%rcx","wide":"1","suffix":"q","ext":"cqo","rem":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_rem_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_rem_long_2addr.S
new file mode 100644
index 0000000..ce0dd86
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_rem_long_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/bindiv2addr.S" {"result":"%rdx","second":"%rcx","wide":"1","suffix":"q","rem":"1","ext":"cqo"}
diff --git a/runtime/interpreter/mterp/x86_64/op_return.S b/runtime/interpreter/mterp/x86_64/op_return.S
new file mode 100644
index 0000000..14f4f8a
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_return.S
@@ -0,0 +1,15 @@
+/*
+ * Return a 32-bit value.
+ *
+ * for: return, return-object
+ */
+ /* op vAA */
+ .extern MterpThreadFenceForConstructor
+ call SYMBOL(MterpThreadFenceForConstructor)
+ testl $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(rSELF)
+ jz 1f
+ movq rSELF, OUT_ARG0
+ call SYMBOL(MterpSuspendCheck)
+1:
+ GET_VREG %eax, rINSTq # eax <- vAA
+ jmp MterpReturn
diff --git a/runtime/interpreter/mterp/x86_64/op_return_object.S b/runtime/interpreter/mterp/x86_64/op_return_object.S
new file mode 100644
index 0000000..1ae69a5
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_return_object.S
@@ -0,0 +1 @@
+%include "x86_64/op_return.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_return_void.S b/runtime/interpreter/mterp/x86_64/op_return_void.S
new file mode 100644
index 0000000..46a5753
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_return_void.S
@@ -0,0 +1,9 @@
+ .extern MterpThreadFenceForConstructor
+ call SYMBOL(MterpThreadFenceForConstructor)
+ testl $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(rSELF)
+ jz 1f
+ movq rSELF, OUT_ARG0
+ call SYMBOL(MterpSuspendCheck)
+1:
+ xorq %rax, %rax
+ jmp MterpReturn
diff --git a/runtime/interpreter/mterp/x86_64/op_return_void_no_barrier.S b/runtime/interpreter/mterp/x86_64/op_return_void_no_barrier.S
new file mode 100644
index 0000000..92e3506
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_return_void_no_barrier.S
@@ -0,0 +1,7 @@
+ testl $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(rSELF)
+ jz 1f
+ movq rSELF, OUT_ARG0
+ call SYMBOL(MterpSuspendCheck)
+1:
+ xorq %rax, %rax
+ jmp MterpReturn
diff --git a/runtime/interpreter/mterp/x86_64/op_return_wide.S b/runtime/interpreter/mterp/x86_64/op_return_wide.S
new file mode 100644
index 0000000..f2d6e04
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_return_wide.S
@@ -0,0 +1,13 @@
+/*
+ * Return a 64-bit value.
+ */
+ /* return-wide vAA */
+ .extern MterpThreadFenceForConstructor
+ call SYMBOL(MterpThreadFenceForConstructor)
+ testl $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(rSELF)
+ jz 1f
+ movq rSELF, OUT_ARG0
+ call SYMBOL(MterpSuspendCheck)
+1:
+ GET_WIDE_VREG %rax, rINSTq # eax <- v[AA]
+ jmp MterpReturn
diff --git a/runtime/interpreter/mterp/x86_64/op_rsub_int.S b/runtime/interpreter/mterp/x86_64/op_rsub_int.S
new file mode 100644
index 0000000..2dd2002
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_rsub_int.S
@@ -0,0 +1,2 @@
+/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
+%include "x86_64/binopLit16.S" {"instr":"subl %eax, %ecx","result":"%ecx"}
diff --git a/runtime/interpreter/mterp/x86_64/op_rsub_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_rsub_int_lit8.S
new file mode 100644
index 0000000..64d0d8a
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_rsub_int_lit8.S
@@ -0,0 +1 @@
+%include "x86_64/binopLit8.S" {"instr":"subl %eax, %ecx" , "result":"%ecx"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sget.S b/runtime/interpreter/mterp/x86_64/op_sget.S
new file mode 100644
index 0000000..38d9a5e
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sget.S
@@ -0,0 +1,25 @@
+%default { "is_object":"0", "helper":"artGet32StaticFromCode", "wide":"0" }
+/*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
+ */
+ /* op vAA, field@BBBB */
+ .extern $helper
+ EXPORT_PC
+ movzwq 2(rPC), OUT_ARG0 # field ref CCCC
+ movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer
+ movq rSELF, OUT_ARG2 # self
+ call SYMBOL($helper)
+ cmpl $$0, THREAD_EXCEPTION_OFFSET(rSELF)
+ jnz MterpException
+ .if $is_object
+ SET_VREG_OBJECT %eax, rINSTq # fp[A] <- value
+ .else
+ .if $wide
+ SET_WIDE_VREG %rax, rINSTq # fp[A] <- value
+ .else
+ SET_VREG %eax, rINSTq # fp[A] <- value
+ .endif
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_sget_boolean.S b/runtime/interpreter/mterp/x86_64/op_sget_boolean.S
new file mode 100644
index 0000000..7d358da
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sget_boolean.S
@@ -0,0 +1 @@
+%include "x86_64/op_sget.S" {"helper":"artGetBooleanStaticFromCode"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sget_byte.S b/runtime/interpreter/mterp/x86_64/op_sget_byte.S
new file mode 100644
index 0000000..79d9ff4
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sget_byte.S
@@ -0,0 +1 @@
+%include "x86_64/op_sget.S" {"helper":"artGetByteStaticFromCode"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sget_char.S b/runtime/interpreter/mterp/x86_64/op_sget_char.S
new file mode 100644
index 0000000..4488610
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sget_char.S
@@ -0,0 +1 @@
+%include "x86_64/op_sget.S" {"helper":"artGetCharStaticFromCode"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sget_object.S b/runtime/interpreter/mterp/x86_64/op_sget_object.S
new file mode 100644
index 0000000..09b627e
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sget_object.S
@@ -0,0 +1 @@
+%include "x86_64/op_sget.S" {"is_object":"1", "helper":"artGetObjStaticFromCode"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sget_short.S b/runtime/interpreter/mterp/x86_64/op_sget_short.S
new file mode 100644
index 0000000..47ac238
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sget_short.S
@@ -0,0 +1 @@
+%include "x86_64/op_sget.S" {"helper":"artGetShortStaticFromCode"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sget_wide.S b/runtime/interpreter/mterp/x86_64/op_sget_wide.S
new file mode 100644
index 0000000..aa22343
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sget_wide.S
@@ -0,0 +1 @@
+%include "x86_64/op_sget.S" {"helper":"artGet64StaticFromCode", "wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_shl_int.S b/runtime/interpreter/mterp/x86_64/op_shl_int.S
new file mode 100644
index 0000000..fa1edb7
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_shl_int.S
@@ -0,0 +1 @@
+%include "x86_64/binop1.S" {"instr":"sall %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_shl_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_shl_int_2addr.S
new file mode 100644
index 0000000..dd96279
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_shl_int_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/shop2addr.S" {"instr":"sall %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_shl_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_shl_int_lit8.S
new file mode 100644
index 0000000..39b23ae
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_shl_int_lit8.S
@@ -0,0 +1 @@
+%include "x86_64/binopLit8.S" {"instr":"sall %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_shl_long.S b/runtime/interpreter/mterp/x86_64/op_shl_long.S
new file mode 100644
index 0000000..fdc7cb6
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_shl_long.S
@@ -0,0 +1 @@
+%include "x86_64/binop1.S" {"instr":"salq %cl, %rax","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_shl_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_shl_long_2addr.S
new file mode 100644
index 0000000..546633f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_shl_long_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/shop2addr.S" {"instr":"salq %cl, %rax","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_shr_int.S b/runtime/interpreter/mterp/x86_64/op_shr_int.S
new file mode 100644
index 0000000..fc289f4
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_shr_int.S
@@ -0,0 +1 @@
+%include "x86_64/binop1.S" {"instr":"sarl %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_shr_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_shr_int_2addr.S
new file mode 100644
index 0000000..0e5bca7
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_shr_int_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/shop2addr.S" {"instr":"sarl %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_shr_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_shr_int_lit8.S
new file mode 100644
index 0000000..3cc9307
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_shr_int_lit8.S
@@ -0,0 +1 @@
+%include "x86_64/binopLit8.S" {"instr":"sarl %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_shr_long.S b/runtime/interpreter/mterp/x86_64/op_shr_long.S
new file mode 100644
index 0000000..25028d3
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_shr_long.S
@@ -0,0 +1 @@
+%include "x86_64/binop1.S" {"instr":"sarq %cl, %rax","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_shr_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_shr_long_2addr.S
new file mode 100644
index 0000000..3738413
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_shr_long_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/shop2addr.S" {"instr":"sarq %cl, %rax","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sparse_switch.S b/runtime/interpreter/mterp/x86_64/op_sparse_switch.S
new file mode 100644
index 0000000..0eaa514
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sparse_switch.S
@@ -0,0 +1 @@
+%include "x86_64/op_packed_switch.S" { "func":"MterpDoSparseSwitch" }
diff --git a/runtime/interpreter/mterp/x86_64/op_sput.S b/runtime/interpreter/mterp/x86_64/op_sput.S
new file mode 100644
index 0000000..e92b032
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sput.S
@@ -0,0 +1,17 @@
+%default { "helper":"artSet32StaticFromCode"}
+/*
+ * General SPUT handler wrapper.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ .extern $helper
+ EXPORT_PC
+ movzwq 2(rPC), OUT_ARG0 # field ref BBBB
+ GET_VREG OUT_32_ARG1, rINSTq # fp[AA]
+ movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
+ movq rSELF, OUT_ARG3 # self
+ call SYMBOL($helper)
+ testb %al, %al
+ jnz MterpException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_sput_boolean.S b/runtime/interpreter/mterp/x86_64/op_sput_boolean.S
new file mode 100644
index 0000000..8718915
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sput_boolean.S
@@ -0,0 +1 @@
+%include "x86_64/op_sput.S" {"helper":"artSet8StaticFromCode"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sput_byte.S b/runtime/interpreter/mterp/x86_64/op_sput_byte.S
new file mode 100644
index 0000000..8718915
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sput_byte.S
@@ -0,0 +1 @@
+%include "x86_64/op_sput.S" {"helper":"artSet8StaticFromCode"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sput_char.S b/runtime/interpreter/mterp/x86_64/op_sput_char.S
new file mode 100644
index 0000000..2fe9d14
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sput_char.S
@@ -0,0 +1 @@
+%include "x86_64/op_sput.S" {"helper":"artSet16StaticFromCode"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sput_object.S b/runtime/interpreter/mterp/x86_64/op_sput_object.S
new file mode 100644
index 0000000..eb5a376
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sput_object.S
@@ -0,0 +1,10 @@
+ EXPORT_PC
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
+ movq rPC, OUT_ARG1
+ REFRESH_INST ${opnum}
+ movq rINSTq, OUT_ARG2
+ movq rSELF, OUT_ARG3
+ call SYMBOL(MterpSputObject)
+ testb %al, %al
+ jz MterpException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_sput_short.S b/runtime/interpreter/mterp/x86_64/op_sput_short.S
new file mode 100644
index 0000000..2fe9d14
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sput_short.S
@@ -0,0 +1 @@
+%include "x86_64/op_sput.S" {"helper":"artSet16StaticFromCode"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sput_wide.S b/runtime/interpreter/mterp/x86_64/op_sput_wide.S
new file mode 100644
index 0000000..c4bc269
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sput_wide.S
@@ -0,0 +1,15 @@
+/*
+ * SPUT_WIDE handler wrapper.
+ *
+ */
+ /* sput-wide vAA, field@BBBB */
+ .extern artSet64IndirectStaticFromMterp
+ EXPORT_PC
+ movzwq 2(rPC), OUT_ARG0 # field ref BBBB
+ movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer
+ leaq VREG_ADDRESS(rINSTq), OUT_ARG2 # &fp[AA]
+ movq rSELF, OUT_ARG3 # self
+ call SYMBOL(artSet64IndirectStaticFromMterp)
+ testb %al, %al
+ jnz MterpException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_sub_double.S b/runtime/interpreter/mterp/x86_64/op_sub_double.S
new file mode 100644
index 0000000..952667e
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sub_double.S
@@ -0,0 +1 @@
+%include "x86_64/sseBinop.S" {"instr":"subs","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sub_double_2addr.S b/runtime/interpreter/mterp/x86_64/op_sub_double_2addr.S
new file mode 100644
index 0000000..0bd5dbb
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sub_double_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/sseBinop2Addr.S" {"instr":"subs","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sub_float.S b/runtime/interpreter/mterp/x86_64/op_sub_float.S
new file mode 100644
index 0000000..ea0ae14
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sub_float.S
@@ -0,0 +1 @@
+%include "x86_64/sseBinop.S" {"instr":"subs","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sub_float_2addr.S b/runtime/interpreter/mterp/x86_64/op_sub_float_2addr.S
new file mode 100644
index 0000000..9dd1780
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sub_float_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/sseBinop2Addr.S" {"instr":"subs","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sub_int.S b/runtime/interpreter/mterp/x86_64/op_sub_int.S
new file mode 100644
index 0000000..560394f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sub_int.S
@@ -0,0 +1 @@
+%include "x86_64/binop.S" {"instr":"subl (rFP,%rcx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sub_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_sub_int_2addr.S
new file mode 100644
index 0000000..6f50f78
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sub_int_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/binop2addr.S" {"instr":"subl %eax, (rFP,%rcx,4)"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sub_long.S b/runtime/interpreter/mterp/x86_64/op_sub_long.S
new file mode 100644
index 0000000..7fa54e7
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sub_long.S
@@ -0,0 +1 @@
+%include "x86_64/binopWide.S" {"instr":"subq (rFP,%rcx,4), %rax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sub_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_sub_long_2addr.S
new file mode 100644
index 0000000..c18be10
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_sub_long_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/binopWide2addr.S" {"instr":"subq %rax, (rFP,%rcx,4)"}
diff --git a/runtime/interpreter/mterp/x86_64/op_throw.S b/runtime/interpreter/mterp/x86_64/op_throw.S
new file mode 100644
index 0000000..22ed990
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_throw.S
@@ -0,0 +1,10 @@
+/*
+ * Throw an exception object in the current thread.
+ */
+ /* throw vAA */
+ EXPORT_PC
+ GET_VREG %eax, rINSTq # eax<- vAA (exception object)
+ testb %al, %al
+ jz common_errNullObject
+ movq %rax, THREAD_EXCEPTION_OFFSET(rSELF)
+ jmp MterpException
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_3e.S b/runtime/interpreter/mterp/x86_64/op_unused_3e.S
new file mode 100644
index 0000000..280615f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_unused_3e.S
@@ -0,0 +1 @@
+%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_3f.S b/runtime/interpreter/mterp/x86_64/op_unused_3f.S
new file mode 100644
index 0000000..280615f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_unused_3f.S
@@ -0,0 +1 @@
+%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_40.S b/runtime/interpreter/mterp/x86_64/op_unused_40.S
new file mode 100644
index 0000000..280615f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_unused_40.S
@@ -0,0 +1 @@
+%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_41.S b/runtime/interpreter/mterp/x86_64/op_unused_41.S
new file mode 100644
index 0000000..280615f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_unused_41.S
@@ -0,0 +1 @@
+%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_42.S b/runtime/interpreter/mterp/x86_64/op_unused_42.S
new file mode 100644
index 0000000..280615f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_unused_42.S
@@ -0,0 +1 @@
+%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_43.S b/runtime/interpreter/mterp/x86_64/op_unused_43.S
new file mode 100644
index 0000000..280615f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_unused_43.S
@@ -0,0 +1 @@
+%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_79.S b/runtime/interpreter/mterp/x86_64/op_unused_79.S
new file mode 100644
index 0000000..280615f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_unused_79.S
@@ -0,0 +1 @@
+%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_7a.S b/runtime/interpreter/mterp/x86_64/op_unused_7a.S
new file mode 100644
index 0000000..280615f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_unused_7a.S
@@ -0,0 +1 @@
+%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_f4.S b/runtime/interpreter/mterp/x86_64/op_unused_f4.S
new file mode 100644
index 0000000..280615f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_unused_f4.S
@@ -0,0 +1 @@
+%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_fa.S b/runtime/interpreter/mterp/x86_64/op_unused_fa.S
new file mode 100644
index 0000000..280615f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_unused_fa.S
@@ -0,0 +1 @@
+%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_fb.S b/runtime/interpreter/mterp/x86_64/op_unused_fb.S
new file mode 100644
index 0000000..280615f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_unused_fb.S
@@ -0,0 +1 @@
+%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_fc.S b/runtime/interpreter/mterp/x86_64/op_unused_fc.S
new file mode 100644
index 0000000..280615f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_unused_fc.S
@@ -0,0 +1 @@
+%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_fd.S b/runtime/interpreter/mterp/x86_64/op_unused_fd.S
new file mode 100644
index 0000000..280615f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_unused_fd.S
@@ -0,0 +1 @@
+%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_fe.S b/runtime/interpreter/mterp/x86_64/op_unused_fe.S
new file mode 100644
index 0000000..280615f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_unused_fe.S
@@ -0,0 +1 @@
+%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_ff.S b/runtime/interpreter/mterp/x86_64/op_unused_ff.S
new file mode 100644
index 0000000..280615f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_unused_ff.S
@@ -0,0 +1 @@
+%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_ushr_int.S b/runtime/interpreter/mterp/x86_64/op_ushr_int.S
new file mode 100644
index 0000000..dd91086
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_ushr_int.S
@@ -0,0 +1 @@
+%include "x86_64/binop1.S" {"instr":"shrl %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_ushr_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_ushr_int_2addr.S
new file mode 100644
index 0000000..d38aedd
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_ushr_int_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/shop2addr.S" {"instr":"shrl %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_ushr_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_ushr_int_lit8.S
new file mode 100644
index 0000000..f7ff8ab
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_ushr_int_lit8.S
@@ -0,0 +1 @@
+%include "x86_64/binopLit8.S" {"instr":"shrl %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_ushr_long.S b/runtime/interpreter/mterp/x86_64/op_ushr_long.S
new file mode 100644
index 0000000..7c6daca
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_ushr_long.S
@@ -0,0 +1 @@
+%include "x86_64/binop1.S" {"instr":"shrq %cl, %rax","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_ushr_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_ushr_long_2addr.S
new file mode 100644
index 0000000..cd6a22c
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_ushr_long_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/shop2addr.S" {"instr":"shrq %cl, %rax","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_xor_int.S b/runtime/interpreter/mterp/x86_64/op_xor_int.S
new file mode 100644
index 0000000..b295d74
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_xor_int.S
@@ -0,0 +1 @@
+%include "x86_64/binop.S" {"instr":"xorl (rFP,%rcx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_xor_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_xor_int_2addr.S
new file mode 100644
index 0000000..879bfc0
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_xor_int_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/binop2addr.S" {"instr":"xorl %eax, (rFP,%rcx,4)"}
diff --git a/runtime/interpreter/mterp/x86_64/op_xor_int_lit16.S b/runtime/interpreter/mterp/x86_64/op_xor_int_lit16.S
new file mode 100644
index 0000000..5d375a1
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_xor_int_lit16.S
@@ -0,0 +1 @@
+%include "x86_64/binopLit16.S" {"instr":"xorl %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_xor_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_xor_int_lit8.S
new file mode 100644
index 0000000..54cce9c
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_xor_int_lit8.S
@@ -0,0 +1 @@
+%include "x86_64/binopLit8.S" {"instr":"xorl %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_xor_long.S b/runtime/interpreter/mterp/x86_64/op_xor_long.S
new file mode 100644
index 0000000..52b44e2
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_xor_long.S
@@ -0,0 +1 @@
+%include "x86_64/binopWide.S" {"instr":"xorq (rFP,%rcx,4), %rax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_xor_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_xor_long_2addr.S
new file mode 100644
index 0000000..d75c4ba
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_xor_long_2addr.S
@@ -0,0 +1 @@
+%include "x86_64/binopWide2addr.S" {"instr":"xorq %rax, (rFP,%rcx,4)"}
diff --git a/runtime/interpreter/mterp/x86_64/shop2addr.S b/runtime/interpreter/mterp/x86_64/shop2addr.S
new file mode 100644
index 0000000..6b06d00
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/shop2addr.S
@@ -0,0 +1,19 @@
+%default {"wide":"0"}
+/*
+ * Generic 32-bit "shift/2addr" operation.
+ */
+ /* shift/2addr vA, vB */
+ movl rINST, %ecx # ecx <- BA
+ sarl $$4, %ecx # ecx <- B
+ GET_VREG %ecx, %rcx # ecx <- vBB
+ andb $$0xf, rINSTbl # rINST <- A
+ .if $wide
+ GET_WIDE_VREG %rax, rINSTq # rax <- vAA
+ $instr # ex: sarl %cl, %eax
+ SET_WIDE_VREG %rax, rINSTq
+ .else
+ GET_VREG %eax, rINSTq # eax <- vAA
+ $instr # ex: sarl %cl, %eax
+ SET_VREG %eax, rINSTq
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/sseBinop.S b/runtime/interpreter/mterp/x86_64/sseBinop.S
new file mode 100644
index 0000000..09d3364
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/sseBinop.S
@@ -0,0 +1,9 @@
+%default {"instr":"","suff":""}
+ movzbq 2(rPC), %rcx # ecx <- BB
+ movzbq 3(rPC), %rax # eax <- CC
+ movs${suff} VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
+ ${instr}${suff} VREG_ADDRESS(%rax), %xmm0
+ movs${suff} %xmm0, VREG_ADDRESS(rINSTq) # vAA <- %xmm0
+ pxor %xmm0, %xmm0
+ movs${suff} %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/sseBinop2Addr.S b/runtime/interpreter/mterp/x86_64/sseBinop2Addr.S
new file mode 100644
index 0000000..084166b
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/sseBinop2Addr.S
@@ -0,0 +1,10 @@
+%default {"instr":"","suff":""}
+ movl rINST, %ecx # ecx <- A+
+ andl $$0xf, %ecx # ecx <- A
+ movs${suff} VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
+ sarl $$4, rINST # rINST<- B
+ ${instr}${suff} VREG_ADDRESS(rINSTq), %xmm0
+ movs${suff} %xmm0, VREG_ADDRESS(%rcx) # vAA<- %xmm0
+ pxor %xmm0, %xmm0
+ movs${suff} %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/unop.S b/runtime/interpreter/mterp/x86_64/unop.S
new file mode 100644
index 0000000..1777123
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/unop.S
@@ -0,0 +1,22 @@
+%default {"preinstr":"", "instr":"", "wide":"0"}
+/*
+ * Generic 32/64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op eax".
+ */
+ /* unop vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $$4,rINST # rINST <- B
+ .if ${wide}
+ GET_WIDE_VREG %rax, rINSTq # rax <- vB
+ .else
+ GET_VREG %eax, rINSTq # eax <- vB
+ .endif
+ andb $$0xf,%cl # ecx <- A
+$preinstr
+$instr
+ .if ${wide}
+ SET_WIDE_VREG %rax, %rcx
+ .else
+ SET_VREG %eax, %rcx
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/unused.S b/runtime/interpreter/mterp/x86_64/unused.S
new file mode 100644
index 0000000..c95ef94
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/unused.S
@@ -0,0 +1,4 @@
+/*
+ * Bail to reference interpreter to throw.
+ */
+ jmp MterpFallback
diff --git a/runtime/interpreter/mterp/x86_64/zcmp.S b/runtime/interpreter/mterp/x86_64/zcmp.S
new file mode 100644
index 0000000..0051407
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/zcmp.S
@@ -0,0 +1,19 @@
+/*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ cmpl $$0, VREG_ADDRESS(rINSTq) # compare (vA, 0)
+ movl $$2, rINST # assume branch not taken
+ j${revcmp} 1f
+ movswq 2(rPC), rINSTq # fetch signed displacement
+1:
+ MTERP_PROFILE_BRANCH
+ addq rINSTq, rINSTq # rINSTq <- AA * 2
+ leaq (rPC, rINSTq), rPC
+ FETCH_INST
+ jle MterpCheckSuspendAndContinue # AA * 2 <= 0 => suspend check
+ GOTO_NEXT
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 0e175b8..b21f1ec 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -22,11 +22,13 @@
#include "ScopedLocalRef.h"
#include "art_method-inl.h"
+#include "base/casts.h"
#include "base/logging.h"
#include "base/macros.h"
#include "class_linker.h"
#include "common_throws.h"
#include "entrypoints/entrypoint_utils-inl.h"
+#include "gc/reference_processor.h"
#include "handle_scope-inl.h"
#include "interpreter/interpreter_common.h"
#include "mirror/array-inl.h"
@@ -261,6 +263,25 @@
}
}
+// This is required for Enum(Set) code, as that uses reflection to inspect enum classes.
+void UnstartedRuntime::UnstartedClassGetDeclaredMethod(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
+ // Special managed code cut-out to allow method lookup in a un-started runtime.
+ mirror::Class* klass = shadow_frame->GetVRegReference(arg_offset)->AsClass();
+ if (klass == nullptr) {
+ ThrowNullPointerExceptionForMethodAccess(shadow_frame->GetMethod(), InvokeType::kVirtual);
+ return;
+ }
+ mirror::String* name = shadow_frame->GetVRegReference(arg_offset + 1)->AsString();
+ mirror::ObjectArray<mirror::Class>* args =
+ shadow_frame->GetVRegReference(arg_offset + 2)->AsObjectArray<mirror::Class>();
+ if (Runtime::Current()->IsActiveTransaction()) {
+ result->SetL(mirror::Class::GetDeclaredMethodInternal<true>(self, klass, name, args));
+ } else {
+ result->SetL(mirror::Class::GetDeclaredMethodInternal<false>(self, klass, name, args));
+ }
+}
+
void UnstartedRuntime::UnstartedClassGetEnclosingClass(
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
StackHandleScope<1> hs(self);
@@ -860,6 +881,155 @@
result->SetL(string->ToCharArray(self));
}
+// This allows statically initializing ConcurrentHashMap and SynchronousQueue.
+void UnstartedRuntime::UnstartedReferenceGetReferent(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
+ mirror::Reference* const ref = down_cast<mirror::Reference*>(
+ shadow_frame->GetVRegReference(arg_offset));
+ if (ref == nullptr) {
+ AbortTransactionOrFail(self, "Reference.getReferent() with null object");
+ return;
+ }
+ mirror::Object* const referent =
+ Runtime::Current()->GetHeap()->GetReferenceProcessor()->GetReferent(self, ref);
+ result->SetL(referent);
+}
+
+// This allows statically initializing ConcurrentHashMap and SynchronousQueue. We use a somewhat
+// conservative upper bound. We restrict the callers to SynchronousQueue and ConcurrentHashMap,
+// where we can predict the behavior (somewhat).
+// Note: this is required (instead of lazy initialization) as these classes are used in the static
+// initialization of other classes, so will *use* the value.
+void UnstartedRuntime::UnstartedRuntimeAvailableProcessors(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset ATTRIBUTE_UNUSED) {
+ std::string caller(PrettyMethod(shadow_frame->GetLink()->GetMethod()));
+ if (caller == "void java.util.concurrent.SynchronousQueue.<clinit>()") {
+ // SynchronousQueue really only separates between single- and multiprocessor case. Return
+ // 8 as a conservative upper approximation.
+ result->SetI(8);
+ } else if (caller == "void java.util.concurrent.ConcurrentHashMap.<clinit>()") {
+ // ConcurrentHashMap uses it for striding. 8 still seems an OK general value, as it's likely
+ // a good upper bound.
+ // TODO: Consider resetting in the zygote?
+ result->SetI(8);
+ } else {
+ // Not supported.
+ AbortTransactionOrFail(self, "Accessing availableProcessors not allowed");
+ }
+}
+
+// This allows accessing ConcurrentHashMap/SynchronousQueue.
+
+void UnstartedRuntime::UnstartedUnsafeCompareAndSwapLong(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
+ // Argument 0 is the Unsafe instance, skip.
+ mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset + 1);
+ if (obj == nullptr) {
+ AbortTransactionOrFail(self, "Cannot access null object, retry at runtime.");
+ return;
+ }
+ int64_t offset = shadow_frame->GetVRegLong(arg_offset + 2);
+ int64_t expectedValue = shadow_frame->GetVRegLong(arg_offset + 4);
+ int64_t newValue = shadow_frame->GetVRegLong(arg_offset + 6);
+
+ // Must use non transactional mode.
+ if (kUseReadBarrier) {
+ // Need to make sure the reference stored in the field is a to-space one before attempting the
+ // CAS or the CAS could fail incorrectly.
+ mirror::HeapReference<mirror::Object>* field_addr =
+ reinterpret_cast<mirror::HeapReference<mirror::Object>*>(
+ reinterpret_cast<uint8_t*>(obj) + static_cast<size_t>(offset));
+ ReadBarrier::Barrier<mirror::Object, kWithReadBarrier, /*kAlwaysUpdateField*/true>(
+ obj,
+ MemberOffset(offset),
+ field_addr);
+ }
+ bool success;
+ // Check whether we're in a transaction, call accordingly.
+ if (Runtime::Current()->IsActiveTransaction()) {
+ success = obj->CasFieldStrongSequentiallyConsistent64<true>(MemberOffset(offset),
+ expectedValue,
+ newValue);
+ } else {
+ success = obj->CasFieldStrongSequentiallyConsistent64<false>(MemberOffset(offset),
+ expectedValue,
+ newValue);
+ }
+ result->SetZ(success ? 1 : 0);
+}
+
+void UnstartedRuntime::UnstartedUnsafeCompareAndSwapObject(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
+ // Argument 0 is the Unsafe instance, skip.
+ mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset + 1);
+ if (obj == nullptr) {
+ AbortTransactionOrFail(self, "Cannot access null object, retry at runtime.");
+ return;
+ }
+ int64_t offset = shadow_frame->GetVRegLong(arg_offset + 2);
+ mirror::Object* expected_value = shadow_frame->GetVRegReference(arg_offset + 4);
+ mirror::Object* newValue = shadow_frame->GetVRegReference(arg_offset + 5);
+
+ // Must use non transactional mode.
+ if (kUseReadBarrier) {
+ // Need to make sure the reference stored in the field is a to-space one before attempting the
+ // CAS or the CAS could fail incorrectly.
+ mirror::HeapReference<mirror::Object>* field_addr =
+ reinterpret_cast<mirror::HeapReference<mirror::Object>*>(
+ reinterpret_cast<uint8_t*>(obj) + static_cast<size_t>(offset));
+ ReadBarrier::Barrier<mirror::Object, kWithReadBarrier, /*kAlwaysUpdateField*/true>(
+ obj,
+ MemberOffset(offset),
+ field_addr);
+ }
+ bool success;
+ // Check whether we're in a transaction, call accordingly.
+ if (Runtime::Current()->IsActiveTransaction()) {
+ success = obj->CasFieldStrongSequentiallyConsistentObject<true>(MemberOffset(offset),
+ expected_value,
+ newValue);
+ } else {
+ success = obj->CasFieldStrongSequentiallyConsistentObject<false>(MemberOffset(offset),
+ expected_value,
+ newValue);
+ }
+ result->SetZ(success ? 1 : 0);
+}
+
+void UnstartedRuntime::UnstartedUnsafeGetObjectVolatile(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ // Argument 0 is the Unsafe instance, skip.
+ mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset + 1);
+ if (obj == nullptr) {
+ AbortTransactionOrFail(self, "Cannot access null object, retry at runtime.");
+ return;
+ }
+ int64_t offset = shadow_frame->GetVRegLong(arg_offset + 2);
+ mirror::Object* value = obj->GetFieldObjectVolatile<mirror::Object>(MemberOffset(offset));
+ result->SetL(value);
+}
+
+void UnstartedRuntime::UnstartedUnsafePutOrderedObject(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result ATTRIBUTE_UNUSED, size_t arg_offset)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ // Argument 0 is the Unsafe instance, skip.
+ mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset + 1);
+ if (obj == nullptr) {
+ AbortTransactionOrFail(self, "Cannot access null object, retry at runtime.");
+ return;
+ }
+ int64_t offset = shadow_frame->GetVRegLong(arg_offset + 2);
+ mirror::Object* newValue = shadow_frame->GetVRegReference(arg_offset + 4);
+ QuasiAtomic::ThreadFenceRelease();
+ if (Runtime::Current()->IsActiveTransaction()) {
+ obj->SetFieldObject<true>(MemberOffset(offset), newValue);
+ } else {
+ obj->SetFieldObject<false>(MemberOffset(offset), newValue);
+ }
+}
+
+
void UnstartedRuntime::UnstartedJNIVMRuntimeNewUnpaddedArray(
Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver ATTRIBUTE_UNUSED,
uint32_t* args, JValue* result) {
@@ -906,6 +1076,17 @@
result->SetD(exp(value.GetD()));
}
+void UnstartedRuntime::UnstartedJNIAtomicLongVMSupportsCS8(
+ Thread* self ATTRIBUTE_UNUSED,
+ ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args ATTRIBUTE_UNUSED,
+ JValue* result) {
+ result->SetZ(QuasiAtomic::LongAtomicsUseMutexes(Runtime::Current()->GetInstructionSet())
+ ? 0
+ : 1);
+}
+
void UnstartedRuntime::UnstartedJNIClassGetNameNative(
Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver,
uint32_t* args ATTRIBUTE_UNUSED, JValue* result) {
@@ -913,6 +1094,13 @@
result->SetL(mirror::Class::ComputeName(hs.NewHandle(receiver->AsClass())));
}
+void UnstartedRuntime::UnstartedJNIDoubleLongBitsToDouble(
+ Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args, JValue* result) {
+ uint64_t long_input = args[0] | (static_cast<uint64_t>(args[1]) << 32);
+ result->SetD(bit_cast<double>(long_input));
+}
+
void UnstartedRuntime::UnstartedJNIFloatFloatToRawIntBits(
Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED,
mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args, JValue* result) {
diff --git a/runtime/interpreter/unstarted_runtime_list.h b/runtime/interpreter/unstarted_runtime_list.h
index 6d4d711..29f2197 100644
--- a/runtime/interpreter/unstarted_runtime_list.h
+++ b/runtime/interpreter/unstarted_runtime_list.h
@@ -24,6 +24,7 @@
V(ClassClassForName, "java.lang.Class java.lang.Class.classForName(java.lang.String, boolean, java.lang.ClassLoader)") \
V(ClassNewInstance, "java.lang.Object java.lang.Class.newInstance()") \
V(ClassGetDeclaredField, "java.lang.reflect.Field java.lang.Class.getDeclaredField(java.lang.String)") \
+ V(ClassGetDeclaredMethod, "java.lang.reflect.Method java.lang.Class.getDeclaredMethodInternal(java.lang.String, java.lang.Class[])") \
V(ClassGetEnclosingClass, "java.lang.Class java.lang.Class.getEnclosingClass()") \
V(VmClassLoaderFindLoadedClass, "java.lang.Class java.lang.VMClassLoader.findLoadedClass(java.lang.ClassLoader, java.lang.String)") \
V(VoidLookupType, "java.lang.Class java.lang.Void.lookupType()") \
@@ -40,6 +41,8 @@
V(MemoryPeekInt, "int libcore.io.Memory.peekIntNative(long)") \
V(MemoryPeekLong, "long libcore.io.Memory.peekLongNative(long)") \
V(MemoryPeekByteArray, "void libcore.io.Memory.peekByteArray(long, byte[], int, int)") \
+ V(ReferenceGetReferent, "java.lang.Object java.lang.ref.Reference.getReferent()") \
+ V(RuntimeAvailableProcessors, "int java.lang.Runtime.availableProcessors()") \
V(SecurityGetSecurityPropertiesReader, "java.io.Reader java.security.Security.getSecurityPropertiesReader()") \
V(StringGetCharsNoCheck, "void java.lang.String.getCharsNoCheck(int, int, char[], int)") \
V(StringCharAt, "char java.lang.String.charAt(int)") \
@@ -47,7 +50,11 @@
V(StringFactoryNewStringFromChars, "java.lang.String java.lang.StringFactory.newStringFromChars(int, int, char[])") \
V(StringFactoryNewStringFromString, "java.lang.String java.lang.StringFactory.newStringFromString(java.lang.String)") \
V(StringFastSubstring, "java.lang.String java.lang.String.fastSubstring(int, int)") \
- V(StringToCharArray, "char[] java.lang.String.toCharArray()")
+ V(StringToCharArray, "char[] java.lang.String.toCharArray()") \
+ V(UnsafeCompareAndSwapLong, "boolean sun.misc.Unsafe.compareAndSwapLong(java.lang.Object, long, long, long)") \
+ V(UnsafeCompareAndSwapObject, "boolean sun.misc.Unsafe.compareAndSwapObject(java.lang.Object, long, java.lang.Object, java.lang.Object)") \
+ V(UnsafeGetObjectVolatile, "java.lang.Object sun.misc.Unsafe.getObjectVolatile(java.lang.Object, long)") \
+ V(UnsafePutOrderedObject, "void sun.misc.Unsafe.putOrderedObject(java.lang.Object, long, java.lang.Object)")
// Methods that are native.
#define UNSTARTED_RUNTIME_JNI_LIST(V) \
@@ -56,7 +63,9 @@
V(VMStackGetStackClass2, "java.lang.Class dalvik.system.VMStack.getStackClass2()") \
V(MathLog, "double java.lang.Math.log(double)") \
V(MathExp, "double java.lang.Math.exp(double)") \
+ V(AtomicLongVMSupportsCS8, "boolean java.util.concurrent.atomic.AtomicLong.VMSupportsCS8()") \
V(ClassGetNameNative, "java.lang.String java.lang.Class.getNameNative()") \
+ V(DoubleLongBitsToDouble, "double java.lang.Double.longBitsToDouble(long)") \
V(FloatFloatToRawIntBits, "int java.lang.Float.floatToRawIntBits(float)") \
V(FloatIntBitsToFloat, "float java.lang.Float.intBitsToFloat(int)") \
V(ObjectInternalClone, "java.lang.Object java.lang.Object.internalClone()") \
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index f1f4a03..6278ef0 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -690,6 +690,19 @@
}
/*
+ * Invoke a static method on an interface.
+ */
+static JdwpError IT_InvokeMethod(JdwpState* state, Request* request,
+ ExpandBuf* pReply ATTRIBUTE_UNUSED)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ RefTypeId class_id = request->ReadRefTypeId();
+ ObjectId thread_id = request->ReadThreadId();
+ MethodId method_id = request->ReadMethodId();
+
+ return RequestInvoke(state, request, thread_id, 0, class_id, method_id, false);
+}
+
+/*
* Return line number information for the method, if present.
*/
static JdwpError M_LineTable(JdwpState*, Request* request, ExpandBuf* pReply)
@@ -1481,6 +1494,7 @@
{ 4, 1, AT_newInstance, "ArrayType.NewInstance" },
/* InterfaceType command set (5) */
+ { 5, 1, IT_InvokeMethod, "InterfaceType.InvokeMethod" },
/* Method command set (6) */
{ 6, 1, M_LineTable, "Method.LineTable" },
@@ -1579,6 +1593,8 @@
return command == kJDWPClassTypeInvokeMethodCmd || command == kJDWPClassTypeNewInstanceCmd;
} else if (command_set == kJDWPObjectReferenceCmdSet) {
return command == kJDWPObjectReferenceInvokeCmd;
+ } else if (command_set == kJDWPInterfaceTypeCmdSet) {
+ return command == kJDWPInterfaceTypeInvokeMethodCmd;
} else {
return false;
}
diff --git a/runtime/jdwp/jdwp_priv.h b/runtime/jdwp/jdwp_priv.h
index 29314f6..4e1bda8 100644
--- a/runtime/jdwp/jdwp_priv.h
+++ b/runtime/jdwp/jdwp_priv.h
@@ -45,6 +45,8 @@
static constexpr uint8_t kJDWPClassTypeCmdSet = 3U;
static constexpr uint8_t kJDWPClassTypeInvokeMethodCmd = 3U;
static constexpr uint8_t kJDWPClassTypeNewInstanceCmd = 4U;
+static constexpr uint8_t kJDWPInterfaceTypeCmdSet = 5U;
+static constexpr uint8_t kJDWPInterfaceTypeInvokeMethodCmd = 1U;
static constexpr uint8_t kJDWPObjectReferenceCmdSet = 9U;
static constexpr uint8_t kJDWPObjectReferenceInvokeCmd = 6U;
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 3e66ce2..91b006a 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -213,6 +213,10 @@
return false;
}
+bool Jit::CanInvokeCompiledCode(ArtMethod* method) {
+ return code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode());
+}
+
Jit::~Jit() {
DCHECK(!save_profiling_info_ || !ProfileSaver::IsStarted());
if (dump_info_on_shutdown_) {
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index 109ca3d..3f54192 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -84,8 +84,12 @@
// into the specified class linker to the jit debug interface,
void DumpTypeInfoForLoadedTypes(ClassLinker* linker);
+ // Return whether we should try to JIT compiled code as soon as an ArtMethod is invoked.
bool JitAtFirstUse();
+ // Return whether we can invoke JIT code for `method`.
+ bool CanInvokeCompiledCode(ArtMethod* method);
+
// If an OSR compiled version is available for `method`,
// and `dex_pc + dex_pc_offset` is an entry point of that compiled
// version, this method will jump to the compiled code, let it run,
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 8858b48..e0380bd 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -123,7 +123,7 @@
current_capacity_(initial_code_capacity + initial_data_capacity),
code_end_(initial_code_capacity),
data_end_(initial_data_capacity),
- has_done_full_collection_(false),
+ last_collection_increased_code_cache_(false),
last_update_time_ns_(0),
garbage_collect_code_(garbage_collect_code),
used_memory_for_data_(0),
@@ -546,34 +546,20 @@
}
}
-void JitCodeCache::RemoveUnusedCode(Thread* self) {
- // Clear the osr map, chances are most of the code in it is now dead.
- {
- MutexLock mu(self, lock_);
- osr_code_map_.clear();
- }
-
- // Run a checkpoint on all threads to mark the JIT compiled code they are running.
- MarkCompiledCodeOnThreadStacks(self);
-
- // Iterate over all compiled code and remove entries that are not marked and not
- // the entrypoint of their corresponding ArtMethod.
- {
- MutexLock mu(self, lock_);
- ScopedCodeCacheWrite scc(code_map_.get());
- for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
- const void* code_ptr = it->first;
- ArtMethod* method = it->second;
- uintptr_t allocation = FromCodeToAllocation(code_ptr);
- const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
- if ((method->GetEntryPointFromQuickCompiledCode() != method_header->GetEntryPoint()) &&
- !GetLiveBitmap()->Test(allocation)) {
- FreeCode(code_ptr, method);
- it = method_code_map_.erase(it);
- } else {
- ++it;
- }
- }
+bool JitCodeCache::ShouldDoFullCollection() {
+ if (current_capacity_ == max_capacity_) {
+ // Always do a full collection when the code cache is full.
+ return true;
+ } else if (current_capacity_ < kReservedCapacity) {
+ // Always do partial collection when the code cache size is below the reserved
+ // capacity.
+ return false;
+ } else if (last_collection_increased_code_cache_) {
+ // This time do a full collection.
+ return true;
+ } else {
+ // This time do a partial collection.
+ return false;
}
}
@@ -599,21 +585,10 @@
}
}
- // Check if we want to do a full collection.
- bool do_full_collection = true;
+ bool do_full_collection = false;
{
MutexLock mu(self, lock_);
- if (current_capacity_ == max_capacity_) {
- // Always do a full collection when the code cache is full.
- do_full_collection = true;
- } else if (current_capacity_ < kReservedCapacity) {
- // Do a partial collection until we hit the reserved capacity limit.
- do_full_collection = false;
- } else if (has_done_full_collection_) {
- // Do a partial collection if we have done a full collection in the last
- // collection round.
- do_full_collection = false;
- }
+ do_full_collection = ShouldDoFullCollection();
}
if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
@@ -624,45 +599,91 @@
<< ", data=" << PrettySize(DataCacheSize());
}
- if (do_full_collection) {
- DoFullCollection(self);
- } else {
- RemoveUnusedCode(self);
- }
-
- {
- MutexLock mu(self, lock_);
- if (!do_full_collection) {
- has_done_full_collection_ = false;
- IncreaseCodeCacheCapacity();
- } else {
- has_done_full_collection_ = true;
- }
- live_bitmap_.reset(nullptr);
- NotifyCollectionDone(self);
- }
+ DoCollection(self, /* collect_profiling_info */ do_full_collection);
if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
LOG(INFO) << "After code cache collection, code="
<< PrettySize(CodeCacheSize())
<< ", data=" << PrettySize(DataCacheSize());
}
-}
-void JitCodeCache::DoFullCollection(Thread* self) {
- instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
{
MutexLock mu(self, lock_);
- // Walk over all compiled methods and set the entry points of these
- // methods to interpreter.
- for (auto& it : method_code_map_) {
- instrumentation->UpdateMethodsCode(it.second, GetQuickToInterpreterBridge());
+
+ // Increase the code cache only when we do partial collections.
+ // TODO: base this strategy on how full the code cache is?
+ if (do_full_collection) {
+ last_collection_increased_code_cache_ = false;
+ } else {
+ last_collection_increased_code_cache_ = true;
+ IncreaseCodeCacheCapacity();
}
- // Clear the profiling info of methods that are not being compiled.
- for (ProfilingInfo* info : profiling_infos_) {
- if (!info->IsMethodBeingCompiled()) {
- info->GetMethod()->SetProfilingInfo(nullptr);
+ bool next_collection_will_be_full = ShouldDoFullCollection();
+
+ // Start polling the liveness of compiled code to prepare for the next full collection.
+ // We avoid doing this if exit stubs are installed to not mess with the instrumentation.
+ // TODO(ngeoffray): Clean up instrumentation and code cache interactions.
+ if (!Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled() &&
+ next_collection_will_be_full) {
+ // Save the entry point of methods we have compiled, and update the entry
+ // point of those methods to the interpreter. If the method is invoked, the
+ // interpreter will update its entry point to the compiled code and call it.
+ for (ProfilingInfo* info : profiling_infos_) {
+ const void* entry_point = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
+ if (ContainsPc(entry_point)) {
+ info->SetSavedEntryPoint(entry_point);
+ info->GetMethod()->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
+ }
+ }
+
+ DCHECK(CheckLiveCompiledCodeHasProfilingInfo());
+ }
+ live_bitmap_.reset(nullptr);
+ NotifyCollectionDone(self);
+ }
+}
+
+void JitCodeCache::RemoveUnusedAndUnmarkedCode(Thread* self) {
+ MutexLock mu(self, lock_);
+ ScopedCodeCacheWrite scc(code_map_.get());
+ // Iterate over all compiled code and remove entries that are not marked and not
+ // the entrypoint of their corresponding ArtMethod.
+ for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
+ const void* code_ptr = it->first;
+ ArtMethod* method = it->second;
+ uintptr_t allocation = FromCodeToAllocation(code_ptr);
+ const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+ const void* entrypoint = method->GetEntryPointFromQuickCompiledCode();
+ if ((entrypoint == method_header->GetEntryPoint()) || GetLiveBitmap()->Test(allocation)) {
+ ++it;
+ } else {
+ if (entrypoint == GetQuickToInterpreterBridge()) {
+ method->ClearCounter();
+ }
+ FreeCode(code_ptr, method);
+ it = method_code_map_.erase(it);
+ }
+ }
+}
+
+void JitCodeCache::DoCollection(Thread* self, bool collect_profiling_info) {
+ {
+ MutexLock mu(self, lock_);
+ if (collect_profiling_info) {
+ // Clear the profiling info of methods that do not have compiled code as entrypoint.
+ // Also remove the saved entry point from the ProfilingInfo objects.
+ for (ProfilingInfo* info : profiling_infos_) {
+ const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
+ if (!ContainsPc(ptr) && !info->IsMethodBeingCompiled()) {
+ info->GetMethod()->SetProfilingInfo(nullptr);
+ }
+ info->SetSavedEntryPoint(nullptr);
+ }
+ } else if (kIsDebugBuild) {
+ // Sanity check that the profiling infos do not have a dangling entry point.
+ for (ProfilingInfo* info : profiling_infos_) {
+ DCHECK(info->GetSavedEntryPoint() == nullptr);
}
}
@@ -674,41 +695,58 @@
// Run a checkpoint on all threads to mark the JIT compiled code they are running.
MarkCompiledCodeOnThreadStacks(self);
- {
- MutexLock mu(self, lock_);
- // Free unused compiled code, and restore the entry point of used compiled code.
- {
- ScopedCodeCacheWrite scc(code_map_.get());
- for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
- const void* code_ptr = it->first;
- ArtMethod* method = it->second;
- uintptr_t allocation = FromCodeToAllocation(code_ptr);
- const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
- if (GetLiveBitmap()->Test(allocation)) {
- instrumentation->UpdateMethodsCode(method, method_header->GetEntryPoint());
- ++it;
- } else {
- method->ClearCounter();
- DCHECK_NE(method->GetEntryPointFromQuickCompiledCode(), method_header->GetEntryPoint());
- FreeCode(code_ptr, method);
- it = method_code_map_.erase(it);
- }
- }
- }
+ // Remove compiled code that is not the entrypoint of their method and not in the call
+ // stack.
+ RemoveUnusedAndUnmarkedCode(self);
- // Free all profiling infos of methods that were not being compiled.
+ if (collect_profiling_info) {
+ MutexLock mu(self, lock_);
+ // Free all profiling infos of methods not compiled nor being compiled.
auto profiling_kept_end = std::remove_if(profiling_infos_.begin(), profiling_infos_.end(),
[this] (ProfilingInfo* info) NO_THREAD_SAFETY_ANALYSIS {
- if (info->GetMethod()->GetProfilingInfo(sizeof(void*)) == nullptr) {
+ const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
+ // We have previously cleared the ProfilingInfo pointer in the ArtMethod in the hope
+ // that the compiled code would not get revived. As mutator threads run concurrently,
+ // they may have revived the compiled code, and now we are in the situation where
+ // a method has compiled code but no ProfilingInfo.
+ // We make sure compiled methods have a ProfilingInfo object. It is needed for
+ // code cache collection.
+ if (ContainsPc(ptr) && info->GetMethod()->GetProfilingInfo(sizeof(void*)) == nullptr) {
+ // We clear the inline caches as classes in it might be stalled.
+ info->ClearInlineCaches();
+ // Do a fence to make sure the clearing is seen before attaching to the method.
+ QuasiAtomic::ThreadFenceRelease();
+ info->GetMethod()->SetProfilingInfo(info);
+ } else if (info->GetMethod()->GetProfilingInfo(sizeof(void*)) != info) {
+ // No need for this ProfilingInfo object anymore.
FreeData(reinterpret_cast<uint8_t*>(info));
return true;
}
return false;
});
profiling_infos_.erase(profiling_kept_end, profiling_infos_.end());
+ DCHECK(CheckLiveCompiledCodeHasProfilingInfo());
}
}
+bool JitCodeCache::CheckLiveCompiledCodeHasProfilingInfo() {
+ // Check that methods we have compiled do have a ProfilingInfo object. We would
+ // have memory leaks of compiled code otherwise.
+ for (const auto& it : method_code_map_) {
+ ArtMethod* method = it.second;
+ if (method->GetProfilingInfo(sizeof(void*)) == nullptr) {
+ const void* code_ptr = it.first;
+ const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+ if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) {
+ // If the code is not dead, then we have a problem. Note that this can even
+ // happen just after a collection, as mutator threads are running in parallel
+ // and could deoptimize an existing compiled code.
+ return false;
+ }
+ }
+ }
+ return true;
+}
OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod* method) {
static_assert(kRuntimeISA != kThumb2, "kThumb2 cannot be a runtime ISA");
@@ -751,23 +789,38 @@
ProfilingInfo* JitCodeCache::AddProfilingInfo(Thread* self,
ArtMethod* method,
const std::vector<uint32_t>& entries,
- bool retry_allocation) {
- ProfilingInfo* info = AddProfilingInfoInternal(self, method, entries);
+ bool retry_allocation)
+ // No thread safety analysis as we are using TryLock/Unlock explicitly.
+ NO_THREAD_SAFETY_ANALYSIS {
+ ProfilingInfo* info = nullptr;
+ if (!retry_allocation) {
+ // If we are allocating for the interpreter, just try to lock, to avoid
+ // lock contention with the JIT.
+ if (lock_.ExclusiveTryLock(self)) {
+ info = AddProfilingInfoInternal(self, method, entries);
+ lock_.ExclusiveUnlock(self);
+ }
+ } else {
+ {
+ MutexLock mu(self, lock_);
+ info = AddProfilingInfoInternal(self, method, entries);
+ }
- if (info == nullptr && retry_allocation) {
- GarbageCollectCache(self);
- info = AddProfilingInfoInternal(self, method, entries);
+ if (info == nullptr) {
+ GarbageCollectCache(self);
+ MutexLock mu(self, lock_);
+ info = AddProfilingInfoInternal(self, method, entries);
+ }
}
return info;
}
-ProfilingInfo* JitCodeCache::AddProfilingInfoInternal(Thread* self,
+ProfilingInfo* JitCodeCache::AddProfilingInfoInternal(Thread* self ATTRIBUTE_UNUSED,
ArtMethod* method,
const std::vector<uint32_t>& entries) {
size_t profile_info_size = RoundUp(
sizeof(ProfilingInfo) + sizeof(InlineCache) * entries.size(),
sizeof(void*));
- MutexLock mu(self, lock_);
// Check whether some other thread has concurrently created it.
ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
@@ -849,6 +902,13 @@
void JitCodeCache::InvalidateCompiledCodeFor(ArtMethod* method,
const OatQuickMethodHeader* header) {
+ ProfilingInfo* profiling_info = method->GetProfilingInfo(sizeof(void*));
+ if ((profiling_info != nullptr) &&
+ (profiling_info->GetSavedEntryPoint() == header->GetEntryPoint())) {
+ // Prevent future uses of the compiled code.
+ profiling_info->SetSavedEntryPoint(nullptr);
+ }
+
if (method->GetEntryPointFromQuickCompiledCode() == header->GetEntryPoint()) {
// The entrypoint is the one to invalidate, so we just update
// it to the interpreter entry point and clear the counter to get the method
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 4574edf..aa1b139 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -124,6 +124,11 @@
return live_bitmap_.get();
}
+ // Return whether we should do a full collection given the current state of the cache.
+ bool ShouldDoFullCollection()
+ REQUIRES(lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
// Perform a collection on the code cache.
void GarbageCollectCache(Thread* self)
REQUIRES(!lock_)
@@ -208,7 +213,7 @@
ProfilingInfo* AddProfilingInfoInternal(Thread* self,
ArtMethod* method,
const std::vector<uint32_t>& entries)
- REQUIRES(!lock_)
+ REQUIRES(lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
// If a collection is in progress, wait for it to finish. Return
@@ -235,11 +240,11 @@
// Set the footprint limit of the code cache.
void SetFootprintLimit(size_t new_footprint) REQUIRES(lock_);
- void DoFullCollection(Thread* self)
+ void DoCollection(Thread* self, bool collect_profiling_info)
REQUIRES(!lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
- void RemoveUnusedCode(Thread* self)
+ void RemoveUnusedAndUnmarkedCode(Thread* self)
REQUIRES(!lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -247,6 +252,10 @@
REQUIRES(!lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
+ bool CheckLiveCompiledCodeHasProfilingInfo()
+ REQUIRES(lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
// Lock for guarding allocations, collections, and the method_code_map_.
Mutex lock_;
// Condition to wait on during collection.
@@ -282,8 +291,8 @@
// The current footprint in bytes of the data portion of the code cache.
size_t data_end_ GUARDED_BY(lock_);
- // Whether a full collection has already been done on the current capacity.
- bool has_done_full_collection_ GUARDED_BY(lock_);
+ // Whether the last collection round increased the code cache.
+ bool last_collection_increased_code_cache_ GUARDED_BY(lock_);
// Last time the the code_cache was updated.
// It is atomic to avoid locking when reading it.
diff --git a/runtime/jit/jit_instrumentation.cc b/runtime/jit/jit_instrumentation.cc
index a4e40ad..d751e5a 100644
--- a/runtime/jit/jit_instrumentation.cc
+++ b/runtime/jit/jit_instrumentation.cc
@@ -25,6 +25,9 @@
namespace art {
namespace jit {
+// At what priority to schedule jit threads. 9 is the lowest foreground priority on device.
+static constexpr int kJitPoolThreadPthreadPriority = 9;
+
class JitCompileTask FINAL : public Task {
public:
enum TaskKind {
@@ -92,6 +95,7 @@
// There is a DCHECK in the 'AddSamples' method to ensure the tread pool
// is not null when we instrument.
thread_pool_.reset(new ThreadPool("Jit thread pool", 1));
+ thread_pool_->SetPthreadPriority(kJitPoolThreadPthreadPriority);
thread_pool_->StartWorkers(Thread::Current());
{
// Add Jit interpreter instrumentation, tells the interpreter when
@@ -183,7 +187,18 @@
return;
}
- instrumentation_cache_->AddSamples(thread, method, 1);
+ ProfilingInfo* profiling_info = method->GetProfilingInfo(sizeof(void*));
+ // Update the entrypoint if the ProfilingInfo has one. The interpreter will call it
+ // instead of interpreting the method.
+ // We avoid doing this if exit stubs are installed to not mess with the instrumentation.
+ // TODO(ngeoffray): Clean up instrumentation and code cache interactions.
+ if ((profiling_info != nullptr) &&
+ (profiling_info->GetSavedEntryPoint() != nullptr) &&
+ !Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled()) {
+ method->SetEntryPointFromQuickCompiledCode(profiling_info->GetSavedEntryPoint());
+ } else {
+ instrumentation_cache_->AddSamples(thread, method, 1);
+ }
}
void JitInstrumentationListener::Branch(Thread* thread,
diff --git a/runtime/jit/offline_profiling_info.cc b/runtime/jit/offline_profiling_info.cc
index 747b112..67c9b5f 100644
--- a/runtime/jit/offline_profiling_info.cc
+++ b/runtime/jit/offline_profiling_info.cc
@@ -48,9 +48,11 @@
}
}
-bool ProfileCompilationInfo::SaveProfilingInfo(const std::string& filename,
- const std::vector<ArtMethod*>& methods) {
- if (methods.empty()) {
+bool ProfileCompilationInfo::SaveProfilingInfo(
+ const std::string& filename,
+ const std::vector<ArtMethod*>& methods,
+ const std::set<DexCacheResolvedClasses>& resolved_classes) {
+ if (methods.empty() && resolved_classes.empty()) {
VLOG(profiler) << "No info to save to " << filename;
return true;
}
@@ -71,14 +73,17 @@
}
{
ScopedObjectAccess soa(Thread::Current());
- for (auto it = methods.begin(); it != methods.end(); it++) {
- const DexFile* dex_file = (*it)->GetDexFile();
- if (!info.AddData(GetProfileDexFileKey(dex_file->GetLocation()),
- dex_file->GetLocationChecksum(),
- (*it)->GetDexMethodIndex())) {
+ for (ArtMethod* method : methods) {
+ const DexFile* dex_file = method->GetDexFile();
+ if (!info.AddMethodIndex(GetProfileDexFileKey(dex_file->GetLocation()),
+ dex_file->GetLocationChecksum(),
+ method->GetDexMethodIndex())) {
return false;
}
}
+ for (const DexCacheResolvedClasses& dex_cache : resolved_classes) {
+ info.AddResolvedClasses(dex_cache);
+ }
}
if (!flock.GetFile()->ClearContent()) {
@@ -116,13 +121,14 @@
static constexpr const char kFieldSeparator = ',';
static constexpr const char kLineSeparator = '\n';
+static constexpr const char* kClassesMarker = "classes";
/**
* Serialization format:
- * dex_location1,dex_location_checksum1,method_id11,method_id12...
- * dex_location2,dex_location_checksum2,method_id21,method_id22...
+ * dex_location1,dex_location_checksum1,method_id11,method_id12...,classes,class_id1,class_id2...
+ * dex_location2,dex_location_checksum2,method_id21,method_id22...,classes,class_id1,class_id2...
* e.g.
- * app.apk,131232145,11,23,454,54
+ * app.apk,131232145,11,23,454,54,classes,1,2,4,1234
* app.apk:classes5.dex,218490184,39,13,49,1
**/
bool ProfileCompilationInfo::Save(int fd) {
@@ -133,11 +139,20 @@
for (const auto& it : info_) {
const std::string& dex_location = it.first;
const DexFileData& dex_data = it.second;
+ if (dex_data.method_set.empty() && dex_data.class_set.empty()) {
+ continue;
+ }
os << dex_location << kFieldSeparator << dex_data.checksum;
for (auto method_it : dex_data.method_set) {
os << kFieldSeparator << method_it;
}
+ if (!dex_data.class_set.empty()) {
+ os << kFieldSeparator << kClassesMarker;
+ for (auto class_id : dex_data.class_set) {
+ os << kFieldSeparator << class_id;
+ }
+ }
os << kLineSeparator;
}
@@ -168,18 +183,50 @@
}
}
-bool ProfileCompilationInfo::AddData(const std::string& dex_location,
- uint32_t checksum,
- uint16_t method_idx) {
+ProfileCompilationInfo::DexFileData* ProfileCompilationInfo::GetOrAddDexFileData(
+ const std::string& dex_location,
+ uint32_t checksum) {
auto info_it = info_.find(dex_location);
if (info_it == info_.end()) {
info_it = info_.Put(dex_location, DexFileData(checksum));
}
if (info_it->second.checksum != checksum) {
LOG(WARNING) << "Checksum mismatch for dex " << dex_location;
+ return nullptr;
+ }
+ return &info_it->second;
+}
+
+bool ProfileCompilationInfo::AddResolvedClasses(const DexCacheResolvedClasses& classes) {
+ const std::string dex_location = GetProfileDexFileKey(classes.GetDexLocation());
+ const uint32_t checksum = classes.GetLocationChecksum();
+ DexFileData* const data = GetOrAddDexFileData(dex_location, checksum);
+ if (data == nullptr) {
return false;
}
- info_it->second.method_set.insert(method_idx);
+ data->class_set.insert(classes.GetClasses().begin(), classes.GetClasses().end());
+ return true;
+}
+
+bool ProfileCompilationInfo::AddMethodIndex(const std::string& dex_location,
+ uint32_t checksum,
+ uint16_t method_idx) {
+ DexFileData* const data = GetOrAddDexFileData(dex_location, checksum);
+ if (data == nullptr) {
+ return false;
+ }
+ data->method_set.insert(method_idx);
+ return true;
+}
+
+bool ProfileCompilationInfo::AddClassIndex(const std::string& dex_location,
+ uint32_t checksum,
+ uint16_t class_idx) {
+ DexFileData* const data = GetOrAddDexFileData(dex_location, checksum);
+ if (data == nullptr) {
+ return false;
+ }
+ data->class_set.insert(class_idx);
return true;
}
@@ -198,12 +245,30 @@
}
for (size_t i = 2; i < parts.size(); i++) {
+ if (parts[i] == kClassesMarker) {
+ ++i;
+ // All of the remaining idx are class def indexes.
+ for (++i; i < parts.size(); ++i) {
+ uint32_t class_def_idx;
+ if (!ParseInt(parts[i].c_str(), &class_def_idx)) {
+ LOG(WARNING) << "Cannot parse class_def_idx " << parts[i];
+ return false;
+ } else if (class_def_idx >= std::numeric_limits<uint16_t>::max()) {
+ LOG(WARNING) << "Class def idx " << class_def_idx << " is larger than uint16_t max";
+ return false;
+ }
+ if (!AddClassIndex(dex_location, checksum, class_def_idx)) {
+ return false;
+ }
+ }
+ break;
+ }
uint32_t method_idx;
if (!ParseInt(parts[i].c_str(), &method_idx)) {
LOG(WARNING) << "Cannot parse method_idx " << parts[i];
return false;
}
- if (!AddData(dex_location, checksum, method_idx)) {
+ if (!AddMethodIndex(dex_location, checksum, method_idx)) {
return false;
}
}
@@ -280,6 +345,8 @@
}
info_it->second.method_set.insert(other_dex_data.method_set.begin(),
other_dex_data.method_set.end());
+ info_it->second.class_set.insert(other_dex_data.class_set.begin(),
+ other_dex_data.class_set.end());
}
return true;
}
@@ -347,4 +414,16 @@
return info_.Equals(other.info_);
}
+std::set<DexCacheResolvedClasses> ProfileCompilationInfo::GetResolvedClasses() const {
+ std::set<DexCacheResolvedClasses> ret;
+ for (auto&& pair : info_) {
+ const std::string& profile_key = pair.first;
+ const DexFileData& data = pair.second;
+ DexCacheResolvedClasses classes(profile_key, data.checksum);
+ classes.AddClasses(data.class_set.begin(), data.class_set.end());
+ ret.insert(classes);
+ }
+ return ret;
+}
+
} // namespace art
diff --git a/runtime/jit/offline_profiling_info.h b/runtime/jit/offline_profiling_info.h
index edc591c..ee7ce27 100644
--- a/runtime/jit/offline_profiling_info.h
+++ b/runtime/jit/offline_profiling_info.h
@@ -21,6 +21,7 @@
#include <vector>
#include "atomic.h"
+#include "dex_cache_resolved_classes.h"
#include "dex_file.h"
#include "method_reference.h"
#include "safe_map.h"
@@ -28,6 +29,7 @@
namespace art {
class ArtMethod;
+class DexCacheProfileData;
// TODO: rename file.
/**
@@ -43,7 +45,8 @@
// Note that the saving proceeds only if the file can be locked for exclusive access.
// If not (the locking is not blocking), the function does not save and returns false.
static bool SaveProfilingInfo(const std::string& filename,
- const std::vector<ArtMethod*>& methods);
+ const std::vector<ArtMethod*>& methods,
+ const std::set<DexCacheResolvedClasses>& resolved_classes);
// Loads profile information from the given file descriptor.
bool Load(int fd);
@@ -68,14 +71,17 @@
bool Equals(const ProfileCompilationInfo& other);
static std::string GetProfileDexFileKey(const std::string& dex_location);
- private:
- bool AddData(const std::string& dex_location, uint32_t checksum, uint16_t method_idx);
- bool ProcessLine(const std::string& line);
+ // Returns the class descriptors for all of the classes in the profiles' class sets.
+ // Note the dex location is actually the profile key, the caller needs to call back in to the
+ // profile info stuff to generate a map back to the dex location.
+ std::set<DexCacheResolvedClasses> GetResolvedClasses() const;
+ private:
struct DexFileData {
explicit DexFileData(uint32_t location_checksum) : checksum(location_checksum) {}
uint32_t checksum;
std::set<uint16_t> method_set;
+ std::set<uint16_t> class_set;
bool operator==(const DexFileData& other) const {
return checksum == other.checksum && method_set == other.method_set;
@@ -84,6 +90,13 @@
using DexFileToProfileInfoMap = SafeMap<const std::string, DexFileData>;
+ DexFileData* GetOrAddDexFileData(const std::string& dex_location, uint32_t checksum);
+ bool AddMethodIndex(const std::string& dex_location, uint32_t checksum, uint16_t method_idx);
+ bool AddClassIndex(const std::string& dex_location, uint32_t checksum, uint16_t class_idx);
+ bool AddResolvedClasses(const DexCacheResolvedClasses& classes)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ bool ProcessLine(const std::string& line);
+
friend class ProfileCompilationInfoTest;
friend class CompilerDriverProfileTest;
friend class ProfileAssistantTest;
diff --git a/runtime/jit/profile_compilation_info_test.cc b/runtime/jit/profile_compilation_info_test.cc
index 482ea06..fdd8c6e 100644
--- a/runtime/jit/profile_compilation_info_test.cc
+++ b/runtime/jit/profile_compilation_info_test.cc
@@ -53,7 +53,7 @@
uint32_t checksum,
uint16_t method_index,
ProfileCompilationInfo* info) {
- return info->AddData(dex_location, checksum, method_index);
+ return info->AddMethodIndex(dex_location, checksum, method_index);
}
uint32_t GetFd(const ScratchFile& file) {
@@ -73,8 +73,11 @@
ASSERT_NE(class_loader, nullptr);
// Save virtual methods from Main.
+ std::set<DexCacheResolvedClasses> resolved_classes;
std::vector<ArtMethod*> main_methods = GetVirtualMethods(class_loader, "LMain;");
- ASSERT_TRUE(ProfileCompilationInfo::SaveProfilingInfo(profile.GetFilename(), main_methods));
+ ASSERT_TRUE(ProfileCompilationInfo::SaveProfilingInfo(profile.GetFilename(),
+ main_methods,
+ resolved_classes));
// Check that what we saved is in the profile.
ProfileCompilationInfo info1;
@@ -89,7 +92,9 @@
// Save virtual methods from Second.
std::vector<ArtMethod*> second_methods = GetVirtualMethods(class_loader, "LSecond;");
- ASSERT_TRUE(ProfileCompilationInfo::SaveProfilingInfo(profile.GetFilename(), second_methods));
+ ASSERT_TRUE(ProfileCompilationInfo::SaveProfilingInfo(profile.GetFilename(),
+ second_methods,
+ resolved_classes));
// Check that what we saved is in the profile (methods form Main and Second).
ProfileCompilationInfo info2;
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index b1a5a4b..ab26f6f 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -32,6 +32,7 @@
static constexpr const uint64_t kRandomDelayMaxMs = 20 * 1000; // 20 seconds
static constexpr const uint64_t kMaxBackoffMs = 5 * 60 * 1000; // 5 minutes
static constexpr const uint64_t kSavePeriodMs = 10 * 1000; // 10 seconds
+static constexpr const uint64_t kInitialDelayMs = 2 * 1000; // 2 seconds
static constexpr const double kBackoffCoef = 1.5;
static constexpr const uint32_t kMinimumNrOrMethodsToSave = 10;
@@ -45,6 +46,7 @@
: jit_code_cache_(jit_code_cache),
code_cache_last_update_time_ns_(0),
shutting_down_(false),
+ first_profile_(true),
wait_lock_("ProfileSaver wait lock"),
period_condition_("ProfileSaver period condition", wait_lock_) {
AddTrackedLocations(output_filename, code_paths);
@@ -56,13 +58,18 @@
uint64_t save_period_ms = kSavePeriodMs;
VLOG(profiler) << "Save profiling information every " << save_period_ms << " ms";
- while (true) {
- if (ShuttingDown(self)) {
- break;
- }
- uint64_t random_sleep_delay_ms = rand() % kRandomDelayMaxMs;
- uint64_t sleep_time_ms = save_period_ms + random_sleep_delay_ms;
+ bool first_iteration = true;
+ while (!ShuttingDown(self)) {
+ uint64_t sleep_time_ms;
+ if (first_iteration) {
+ // Sleep less long for the first iteration since we want to record loaded classes shortly
+ // after app launch.
+ sleep_time_ms = kInitialDelayMs;
+ } else {
+ const uint64_t random_sleep_delay_ms = rand() % kRandomDelayMaxMs;
+ sleep_time_ms = save_period_ms + random_sleep_delay_ms;
+ }
{
MutexLock mu(self, wait_lock_);
period_condition_.TimedWait(self, sleep_time_ms, 0);
@@ -81,13 +88,14 @@
// Reset the period to the initial value as it's highly likely to JIT again.
save_period_ms = kSavePeriodMs;
}
+ first_iteration = false;
}
}
bool ProfileSaver::ProcessProfilingInfo() {
uint64_t last_update_time_ns = jit_code_cache_->GetLastUpdateTimeNs();
- if (last_update_time_ns - code_cache_last_update_time_ns_
- < kMinimumTimeBetweenCodeCacheUpdatesNs) {
+ if (!first_profile_ && last_update_time_ns - code_cache_last_update_time_ns_
+ < kMinimumTimeBetweenCodeCacheUpdatesNs) {
VLOG(profiler) << "Not enough time has passed since the last code cache update."
<< "Last update: " << last_update_time_ns
<< " Last save: " << code_cache_last_update_time_ns_;
@@ -113,19 +121,27 @@
ScopedObjectAccess soa(Thread::Current());
jit_code_cache_->GetCompiledArtMethods(locations, methods);
}
- if (methods.size() < kMinimumNrOrMethodsToSave) {
+ // Always save for the first one for loaded classes profile.
+ if (methods.size() < kMinimumNrOrMethodsToSave && !first_profile_) {
VLOG(profiler) << "Not enough information to save to: " << filename
<<" Nr of methods: " << methods.size();
return false;
}
- if (!ProfileCompilationInfo::SaveProfilingInfo(filename, methods)) {
+ std::set<DexCacheResolvedClasses> resolved_classes;
+ if (first_profile_) {
+ ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+ resolved_classes = class_linker->GetResolvedClasses(/*ignore boot classes*/true);
+ }
+
+ if (!ProfileCompilationInfo::SaveProfilingInfo(filename, methods, resolved_classes)) {
LOG(WARNING) << "Could not save profiling info to " << filename;
return false;
}
VLOG(profiler) << "Profile process time: " << PrettyDuration(NanoTime() - start);
}
+ first_profile_ = false;
return true;
}
diff --git a/runtime/jit/profile_saver.h b/runtime/jit/profile_saver.h
index 3342790..21017c1 100644
--- a/runtime/jit/profile_saver.h
+++ b/runtime/jit/profile_saver.h
@@ -74,6 +74,7 @@
GUARDED_BY(Locks::profiler_lock_);
uint64_t code_cache_last_update_time_ns_;
bool shutting_down_ GUARDED_BY(Locks::profiler_lock_);
+ bool first_profile_ = true;
// Save period condition support.
Mutex wait_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
index ab72373..a8c056c 100644
--- a/runtime/jit/profiling_info.h
+++ b/runtime/jit/profiling_info.h
@@ -126,12 +126,25 @@
is_method_being_compiled_ = value;
}
+ void SetSavedEntryPoint(const void* entry_point) {
+ saved_entry_point_ = entry_point;
+ }
+
+ const void* GetSavedEntryPoint() const {
+ return saved_entry_point_;
+ }
+
+ void ClearInlineCaches() {
+ memset(&cache_, 0, number_of_inline_caches_ * sizeof(InlineCache));
+ }
+
private:
ProfilingInfo(ArtMethod* method, const std::vector<uint32_t>& entries)
: number_of_inline_caches_(entries.size()),
method_(method),
- is_method_being_compiled_(false) {
- memset(&cache_, 0, number_of_inline_caches_ * sizeof(InlineCache));
+ is_method_being_compiled_(false),
+ saved_entry_point_(nullptr) {
+ ClearInlineCaches();
for (size_t i = 0; i < number_of_inline_caches_; ++i) {
cache_[i].dex_pc_ = entries[i];
}
@@ -148,6 +161,10 @@
// TODO: Make the JIT code cache lock global.
bool is_method_being_compiled_;
+ // Entry point of the corresponding ArtMethod, while the JIT code cache
+ // is poking for the liveness of compiled code.
+ const void* saved_entry_point_;
+
// Dynamically allocated array of size `number_of_inline_caches_`.
InlineCache cache_[0];
diff --git a/runtime/mirror/abstract_method.cc b/runtime/mirror/abstract_method.cc
index 91a9870..5a07dee 100644
--- a/runtime/mirror/abstract_method.cc
+++ b/runtime/mirror/abstract_method.cc
@@ -21,25 +21,36 @@
namespace art {
namespace mirror {
+template <bool kTransactionActive>
bool AbstractMethod::CreateFromArtMethod(ArtMethod* method) {
- auto* interface_method = method->GetInterfaceMethodIfProxy(sizeof(void*));
- SetArtMethod(method);
- SetFieldObject<false>(DeclaringClassOffset(), method->GetDeclaringClass());
- SetFieldObject<false>(
+ auto* interface_method = method->GetInterfaceMethodIfProxy(
+ kTransactionActive
+ ? Runtime::Current()->GetClassLinker()->GetImagePointerSize()
+ : sizeof(void*));
+ SetArtMethod<kTransactionActive>(method);
+ SetFieldObject<kTransactionActive>(DeclaringClassOffset(), method->GetDeclaringClass());
+ SetFieldObject<kTransactionActive>(
DeclaringClassOfOverriddenMethodOffset(), interface_method->GetDeclaringClass());
- SetField32<false>(AccessFlagsOffset(), method->GetAccessFlags());
- SetField32<false>(DexMethodIndexOffset(), method->GetDexMethodIndex());
+ SetField32<kTransactionActive>(AccessFlagsOffset(), method->GetAccessFlags());
+ SetField32<kTransactionActive>(DexMethodIndexOffset(), method->GetDexMethodIndex());
return true;
}
+template bool AbstractMethod::CreateFromArtMethod<false>(ArtMethod* method);
+template bool AbstractMethod::CreateFromArtMethod<true>(ArtMethod* method);
+
ArtMethod* AbstractMethod::GetArtMethod() {
return reinterpret_cast<ArtMethod*>(GetField64(ArtMethodOffset()));
}
+template <bool kTransactionActive>
void AbstractMethod::SetArtMethod(ArtMethod* method) {
- SetField64<false>(ArtMethodOffset(), reinterpret_cast<uint64_t>(method));
+ SetField64<kTransactionActive>(ArtMethodOffset(), reinterpret_cast<uint64_t>(method));
}
+template void AbstractMethod::SetArtMethod<false>(ArtMethod* method);
+template void AbstractMethod::SetArtMethod<true>(ArtMethod* method);
+
mirror::Class* AbstractMethod::GetDeclaringClass() {
return GetFieldObject<mirror::Class>(DeclaringClassOffset());
}
diff --git a/runtime/mirror/abstract_method.h b/runtime/mirror/abstract_method.h
index dc084be..a39f94d 100644
--- a/runtime/mirror/abstract_method.h
+++ b/runtime/mirror/abstract_method.h
@@ -34,11 +34,13 @@
class MANAGED AbstractMethod : public AccessibleObject {
public:
// Called from Constructor::CreateFromArtMethod, Method::CreateFromArtMethod.
+ template <bool kTransactionActive = false>
bool CreateFromArtMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
ArtMethod* GetArtMethod() SHARED_REQUIRES(Locks::mutator_lock_);
// Only used by the image writer.
+ template <bool kTransactionActive = false>
void SetArtMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
mirror::Class* GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 9190e44..7900eac 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -1054,5 +1054,89 @@
return (type_id == nullptr) ? DexFile::kDexNoIndex : dex_file.GetIndexForTypeId(*type_id);
}
+template <bool kTransactionActive>
+mirror::Method* Class::GetDeclaredMethodInternal(Thread* self,
+ mirror::Class* klass,
+ mirror::String* name,
+ mirror::ObjectArray<mirror::Class>* args) {
+ // Covariant return types permit the class to define multiple
+ // methods with the same name and parameter types. Prefer to
+ // return a non-synthetic method in such situations. We may
+ // still return a synthetic method to handle situations like
+ // escalated visibility. We never return miranda methods that
+ // were synthesized by the runtime.
+ constexpr uint32_t kSkipModifiers = kAccMiranda | kAccSynthetic;
+ StackHandleScope<3> hs(self);
+ auto h_method_name = hs.NewHandle(name);
+ if (UNLIKELY(h_method_name.Get() == nullptr)) {
+ ThrowNullPointerException("name == null");
+ return nullptr;
+ }
+ auto h_args = hs.NewHandle(args);
+ Handle<mirror::Class> h_klass = hs.NewHandle(klass);
+ ArtMethod* result = nullptr;
+ const size_t pointer_size = kTransactionActive
+ ? Runtime::Current()->GetClassLinker()->GetImagePointerSize()
+ : sizeof(void*);
+ for (auto& m : h_klass->GetDeclaredVirtualMethods(pointer_size)) {
+ auto* np_method = m.GetInterfaceMethodIfProxy(pointer_size);
+ // May cause thread suspension.
+ mirror::String* np_name = np_method->GetNameAsString(self);
+ if (!np_name->Equals(h_method_name.Get()) || !np_method->EqualParameters(h_args)) {
+ if (UNLIKELY(self->IsExceptionPending())) {
+ return nullptr;
+ }
+ continue;
+ }
+ auto modifiers = m.GetAccessFlags();
+ if ((modifiers & kSkipModifiers) == 0) {
+ return mirror::Method::CreateFromArtMethod<kTransactionActive>(self, &m);
+ }
+ if ((modifiers & kAccMiranda) == 0) {
+ result = &m; // Remember as potential result if it's not a miranda method.
+ }
+ }
+ if (result == nullptr) {
+ for (auto& m : h_klass->GetDirectMethods(pointer_size)) {
+ auto modifiers = m.GetAccessFlags();
+ if ((modifiers & kAccConstructor) != 0) {
+ continue;
+ }
+ auto* np_method = m.GetInterfaceMethodIfProxy(pointer_size);
+ // May cause thread suspension.
+ mirror::String* np_name = np_method->GetNameAsString(self);
+ if (np_name == nullptr) {
+ self->AssertPendingException();
+ return nullptr;
+ }
+ if (!np_name->Equals(h_method_name.Get()) || !np_method->EqualParameters(h_args)) {
+ if (UNLIKELY(self->IsExceptionPending())) {
+ return nullptr;
+ }
+ continue;
+ }
+ if ((modifiers & kSkipModifiers) == 0) {
+ return mirror::Method::CreateFromArtMethod<kTransactionActive>(self, &m);
+ }
+ // Direct methods cannot be miranda methods, so this potential result must be synthetic.
+ result = &m;
+ }
+ }
+ return result != nullptr
+ ? mirror::Method::CreateFromArtMethod<kTransactionActive>(self, result)
+ : nullptr;
+}
+
+template
+mirror::Method* Class::GetDeclaredMethodInternal<false>(Thread* self,
+ mirror::Class* klass,
+ mirror::String* name,
+ mirror::ObjectArray<mirror::Class>* args);
+template
+mirror::Method* Class::GetDeclaredMethodInternal<true>(Thread* self,
+ mirror::Class* klass,
+ mirror::String* name,
+ mirror::ObjectArray<mirror::Class>* args);
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 6e3463c..7082c88 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -55,6 +55,7 @@
class Constructor;
class DexCache;
class IfTable;
+class Method;
// C++ mirror of java.lang.Class
class MANAGED Class FINAL : public Object {
@@ -759,6 +760,13 @@
size_t pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
+ template <bool kTransactionActive = false>
+ static Method* GetDeclaredMethodInternal(Thread* self,
+ mirror::Class* klass,
+ mirror::String* name,
+ mirror::ObjectArray<mirror::Class>* args)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredVirtualMethodsSlice(size_t pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/mirror/method.cc b/runtime/mirror/method.cc
index 85c52e9..97973e6 100644
--- a/runtime/mirror/method.cc
+++ b/runtime/mirror/method.cc
@@ -51,15 +51,19 @@
array_class_ = GcRoot<Class>(nullptr);
}
+template <bool kTransactionActive>
Method* Method::CreateFromArtMethod(Thread* self, ArtMethod* method) {
DCHECK(!method->IsConstructor()) << PrettyMethod(method);
auto* ret = down_cast<Method*>(StaticClass()->AllocObject(self));
if (LIKELY(ret != nullptr)) {
- static_cast<AbstractMethod*>(ret)->CreateFromArtMethod(method);
+ static_cast<AbstractMethod*>(ret)->CreateFromArtMethod<kTransactionActive>(method);
}
return ret;
}
+template Method* Method::CreateFromArtMethod<false>(Thread* self, ArtMethod* method);
+template Method* Method::CreateFromArtMethod<true>(Thread* self, ArtMethod* method);
+
void Method::VisitRoots(RootVisitor* visitor) {
static_class_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
array_class_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
diff --git a/runtime/mirror/method.h b/runtime/mirror/method.h
index 0c28e4f..12a72fe 100644
--- a/runtime/mirror/method.h
+++ b/runtime/mirror/method.h
@@ -28,6 +28,7 @@
// C++ mirror of java.lang.reflect.Method.
class MANAGED Method : public AbstractMethod {
public:
+ template <bool kTransactionActive = false>
static Method* CreateFromArtMethod(Thread* self, ArtMethod* method)
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index b5d859b..bf24de5 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -371,70 +371,13 @@
static jobject Class_getDeclaredMethodInternal(JNIEnv* env, jobject javaThis,
jobject name, jobjectArray args) {
- // Covariant return types permit the class to define multiple
- // methods with the same name and parameter types. Prefer to
- // return a non-synthetic method in such situations. We may
- // still return a synthetic method to handle situations like
- // escalated visibility. We never return miranda methods that
- // were synthesized by the runtime.
- constexpr uint32_t kSkipModifiers = kAccMiranda | kAccSynthetic;
ScopedFastNativeObjectAccess soa(env);
- StackHandleScope<3> hs(soa.Self());
- auto h_method_name = hs.NewHandle(soa.Decode<mirror::String*>(name));
- if (UNLIKELY(h_method_name.Get() == nullptr)) {
- ThrowNullPointerException("name == null");
- return nullptr;
- }
- auto h_args = hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Class>*>(args));
- Handle<mirror::Class> h_klass = hs.NewHandle(DecodeClass(soa, javaThis));
- ArtMethod* result = nullptr;
- for (auto& m : h_klass->GetDeclaredVirtualMethods(sizeof(void*))) {
- auto* np_method = m.GetInterfaceMethodIfProxy(sizeof(void*));
- // May cause thread suspension.
- mirror::String* np_name = np_method->GetNameAsString(soa.Self());
- if (!np_name->Equals(h_method_name.Get()) || !np_method->EqualParameters(h_args)) {
- if (UNLIKELY(soa.Self()->IsExceptionPending())) {
- return nullptr;
- }
- continue;
- }
- auto modifiers = m.GetAccessFlags();
- if ((modifiers & kSkipModifiers) == 0) {
- return soa.AddLocalReference<jobject>(mirror::Method::CreateFromArtMethod(soa.Self(), &m));
- }
- if ((modifiers & kAccMiranda) == 0) {
- result = &m; // Remember as potential result if it's not a miranda method.
- }
- }
- if (result == nullptr) {
- for (auto& m : h_klass->GetDirectMethods(sizeof(void*))) {
- auto modifiers = m.GetAccessFlags();
- if ((modifiers & kAccConstructor) != 0) {
- continue;
- }
- auto* np_method = m.GetInterfaceMethodIfProxy(sizeof(void*));
- // May cause thread suspension.
- mirror::String* np_name = np_method->GetNameAsString(soa.Self());
- if (np_name == nullptr) {
- soa.Self()->AssertPendingException();
- return nullptr;
- }
- if (!np_name->Equals(h_method_name.Get()) || !np_method->EqualParameters(h_args)) {
- if (UNLIKELY(soa.Self()->IsExceptionPending())) {
- return nullptr;
- }
- continue;
- }
- if ((modifiers & kSkipModifiers) == 0) {
- return soa.AddLocalReference<jobject>(mirror::Method::CreateFromArtMethod(soa.Self(), &m));
- }
- // Direct methods cannot be miranda methods, so this potential result must be synthetic.
- result = &m;
- }
- }
- return result != nullptr ?
- soa.AddLocalReference<jobject>(mirror::Method::CreateFromArtMethod(soa.Self(), result)) :
- nullptr;
+ mirror::Method* result = mirror::Class::GetDeclaredMethodInternal(
+ soa.Self(),
+ DecodeClass(soa, javaThis),
+ soa.Decode<mirror::String*>(name),
+ soa.Decode<mirror::ObjectArray<mirror::Class>*>(args));
+ return soa.AddLocalReference<jobject>(result);
}
static jobjectArray Class_getDeclaredMethodsUnchecked(JNIEnv* env, jobject javaThis,
diff --git a/runtime/native/java_lang_StringFactory.cc b/runtime/native/java_lang_StringFactory.cc
index 34d6a37..5a219ef 100644
--- a/runtime/native/java_lang_StringFactory.cc
+++ b/runtime/native/java_lang_StringFactory.cc
@@ -50,8 +50,10 @@
return soa.AddLocalReference<jstring>(result);
}
+// The char array passed as `java_data` must not be a null reference.
static jstring StringFactory_newStringFromChars(JNIEnv* env, jclass, jint offset,
jint char_count, jcharArray java_data) {
+ DCHECK(java_data != nullptr);
ScopedFastNativeObjectAccess soa(env);
StackHandleScope<1> hs(soa.Self());
Handle<mirror::CharArray> char_array(hs.NewHandle(soa.Decode<mirror::CharArray*>(java_data)));
diff --git a/runtime/native/java_util_concurrent_atomic_AtomicLong.cc b/runtime/native/java_util_concurrent_atomic_AtomicLong.cc
index 04f0ba0..4d2ea67 100644
--- a/runtime/native/java_util_concurrent_atomic_AtomicLong.cc
+++ b/runtime/native/java_util_concurrent_atomic_AtomicLong.cc
@@ -16,13 +16,14 @@
#include "java_util_concurrent_atomic_AtomicLong.h"
+#include "arch/instruction_set.h"
#include "atomic.h"
#include "jni_internal.h"
namespace art {
static jboolean AtomicLong_VMSupportsCS8(JNIEnv*, jclass) {
- return QuasiAtomic::LongAtomicsUseMutexes() ? JNI_FALSE : JNI_TRUE;
+ return QuasiAtomic::LongAtomicsUseMutexes(kRuntimeISA) ? JNI_FALSE : JNI_TRUE;
}
static JNINativeMethod gMethods[] = {
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index a82974c..8c813b4 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -316,6 +316,7 @@
linear_alloc_.reset();
low_4gb_arena_pool_.reset();
arena_pool_.reset();
+ jit_arena_pool_.reset();
MemMap::Shutdown();
ATRACE_END();
@@ -1019,10 +1020,13 @@
// Use MemMap arena pool for jit, malloc otherwise. Malloc arenas are faster to allocate but
// can't be trimmed as easily.
const bool use_malloc = IsAotCompiler();
- arena_pool_.reset(new ArenaPool(use_malloc, false));
+ arena_pool_.reset(new ArenaPool(use_malloc, /* low_4gb */ false));
+ jit_arena_pool_.reset(
+ new ArenaPool(/* use_malloc */ false, /* low_4gb */ false, "CompilerMetadata"));
+
if (IsAotCompiler() && Is64BitInstructionSet(kRuntimeISA)) {
// 4gb, no malloc. Explanation in header.
- low_4gb_arena_pool_.reset(new ArenaPool(false, true));
+ low_4gb_arena_pool_.reset(new ArenaPool(/* use_malloc */ false, /* low_4gb */ true));
}
linear_alloc_.reset(CreateLinearAlloc());
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 8aac4ce..83e77d2 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -561,6 +561,9 @@
ArenaPool* GetArenaPool() {
return arena_pool_.get();
}
+ ArenaPool* GetJitArenaPool() {
+ return jit_arena_pool_.get();
+ }
const ArenaPool* GetArenaPool() const {
return arena_pool_.get();
}
@@ -669,6 +672,7 @@
gc::Heap* heap_;
+ std::unique_ptr<ArenaPool> jit_arena_pool_;
std::unique_ptr<ArenaPool> arena_pool_;
// Special low 4gb pool for compiler linear alloc. We need ArtFields to be in low 4gb if we are
// compiling using a 32 bit image on a 64 bit compiler in case we resolve things in the image
diff --git a/runtime/stack.cc b/runtime/stack.cc
index b1f1ed6..ee5da8e 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -739,7 +739,7 @@
// Check class linker linear allocs.
mirror::Class* klass = method->GetDeclaringClass();
LinearAlloc* const class_linear_alloc = (klass != nullptr)
- ? ClassLinker::GetAllocatorForClassLoader(klass->GetClassLoader())
+ ? runtime->GetClassLinker()->GetAllocatorForClassLoader(klass->GetClassLoader())
: linear_alloc;
if (!class_linear_alloc->Contains(method)) {
// Check image space.
diff --git a/runtime/thread.h b/runtime/thread.h
index 97c47e1..234750c 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -1324,8 +1324,8 @@
instrumentation_stack(nullptr), debug_invoke_req(nullptr), single_step_control(nullptr),
stacked_shadow_frame_record(nullptr), deoptimization_context_stack(nullptr),
frame_id_to_shadow_frame(nullptr), name(nullptr), pthread_self(0),
- last_no_thread_suspension_cause(nullptr), thread_local_start(nullptr),
- thread_local_pos(nullptr), thread_local_end(nullptr), thread_local_objects(0),
+ last_no_thread_suspension_cause(nullptr), thread_local_objects(0),
+ thread_local_start(nullptr), thread_local_pos(nullptr), thread_local_end(nullptr),
mterp_current_ibase(nullptr), mterp_default_ibase(nullptr), mterp_alt_ibase(nullptr),
thread_local_alloc_stack_top(nullptr), thread_local_alloc_stack_end(nullptr),
nested_signal_state(nullptr), flip_function(nullptr), method_verifier(nullptr),
@@ -1440,10 +1440,12 @@
QuickEntryPoints quick_entrypoints;
// Thread-local allocation pointer.
+ size_t thread_local_objects;
uint8_t* thread_local_start;
+ // thread_local_pos and thread_local_end must be consecutive for ldrd and are 8 byte aligned for
+ // potentially better performance.
uint8_t* thread_local_pos;
uint8_t* thread_local_end;
- size_t thread_local_objects;
// Mterp jump table bases.
void* mterp_current_ibase;
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index 5a4dfb8..2fba805 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -16,6 +16,11 @@
#include "thread_pool.h"
+#include <pthread.h>
+
+#include <sys/time.h>
+#include <sys/resource.h>
+
#include "base/bit_utils.h"
#include "base/casts.h"
#include "base/logging.h"
@@ -53,6 +58,19 @@
CHECK_PTHREAD_CALL(pthread_join, (pthread_, nullptr), "thread pool worker shutdown");
}
+void ThreadPoolWorker::SetPthreadPriority(int priority) {
+ CHECK_GE(priority, PRIO_MIN);
+ CHECK_LE(priority, PRIO_MAX);
+#if defined(__ANDROID__)
+ int result = setpriority(PRIO_PROCESS, pthread_gettid_np(pthread_), priority);
+ if (result != 0) {
+ PLOG(ERROR) << "Failed to setpriority to :" << priority;
+ }
+#else
+ UNUSED(priority);
+#endif
+}
+
void ThreadPoolWorker::Run() {
Thread* self = Thread::Current();
Task* task = nullptr;
@@ -214,4 +232,10 @@
return tasks_.size();
}
+void ThreadPool::SetPthreadPriority(int priority) {
+ for (ThreadPoolWorker* worker : threads_) {
+ worker->SetPthreadPriority(priority);
+ }
+}
+
} // namespace art
diff --git a/runtime/thread_pool.h b/runtime/thread_pool.h
index 6cd4ad3..b6c6f02 100644
--- a/runtime/thread_pool.h
+++ b/runtime/thread_pool.h
@@ -59,6 +59,9 @@
virtual ~ThreadPoolWorker();
+ // Set the "nice" priorty for this worker.
+ void SetPthreadPriority(int priority);
+
protected:
ThreadPoolWorker(ThreadPool* thread_pool, const std::string& name, size_t stack_size);
static void* Callback(void* arg) REQUIRES(!Locks::mutator_lock_);
@@ -111,6 +114,9 @@
// thread count of the thread pool.
void SetMaxActiveWorkers(size_t threads) REQUIRES(!task_queue_lock_);
+ // Set the "nice" priorty for threads in the pool.
+ void SetPthreadPriority(int priority);
+
protected:
// get a task to run, blocks if there are no tasks left
virtual Task* GetTask(Thread* self) REQUIRES(!task_queue_lock_);
diff --git a/test/004-ReferenceMap/build b/test/004-ReferenceMap/build
deleted file mode 100644
index 08987b5..0000000
--- a/test/004-ReferenceMap/build
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Stop if something fails.
-set -e
-
-# The test relies on DEX file produced by javac+dx so keep building with them for now
-# (see b/19467889)
-mkdir classes
-${JAVAC} -d classes `find src -name '*.java'`
-${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex \
- --dump-width=1000 ${DX_FLAGS} classes
-zip $TEST_NAME.jar classes.dex
diff --git a/test/004-ReferenceMap/stack_walk_refmap_jni.cc b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
index 2dbd7e8..2d26fa1 100644
--- a/test/004-ReferenceMap/stack_walk_refmap_jni.cc
+++ b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
@@ -65,19 +65,19 @@
// We eliminate the non-live registers at a return, so only v3 is live.
// Note that it is OK for a compiler to not have a dex map at this dex PC because
// a return is not necessarily a safepoint.
- CHECK_REGS_CONTAIN_REFS(0x13U, false, 3); // v3: y
+ CHECK_REGS_CONTAIN_REFS(0x14U, false, 2); // v2: y
// Note that v0: ex can be eliminated because it's a dead merge of two different exceptions.
CHECK_REGS_CONTAIN_REFS(0x18U, true, 8, 2, 1); // v8: this, v2: y, v1: x (dead v0: ex)
if (!GetCurrentOatQuickMethodHeader()->IsOptimized()) {
- // v8: this, v5: x[1], v2: y, v1: x (dead v0: ex)
- CHECK_REGS_CONTAIN_REFS(0x1aU, true, 8, 5, 2, 1);
- // v8: this, v5: x[1], v2: y, v1: x (dead v0: ex)
- CHECK_REGS_CONTAIN_REFS(0x1dU, true, 8, 5, 2, 1);
- // v5 is removed from the root set because there is a "merge" operation.
- // See 0015: if-nez v2, 001f.
- CHECK_REGS_CONTAIN_REFS(0x1fU, true, 8, 2, 1); // v8: this, v2: y, v1: x (dead v0: ex)
+ // v8: this, v4: x[1], v2: y, v1: x (dead v0: ex)
+ CHECK_REGS_CONTAIN_REFS(0x1aU, true, 8, 4, 2, 1);
+ // v8: this, v4: x[1], v2: y, v1: x (dead v0: ex)
+ CHECK_REGS_CONTAIN_REFS(0x1eU, true, 8, 4, 2, 1);
+ // v4 is removed from the root set because there is a "merge" operation.
+ // See 0016: if-nez v2, 0020.
+ CHECK_REGS_CONTAIN_REFS(0x20U, true, 8, 2, 1); // v8: this, v2: y, v1: x (dead v0: ex)
}
- CHECK_REGS_CONTAIN_REFS(0x21U, true, 8, 2, 1); // v8: this, v2: y, v1: x (dead v0: ex)
+ CHECK_REGS_CONTAIN_REFS(0x22U, true, 8, 2, 1); // v8: this, v2: y, v1: x (dead v0: ex)
if (!GetCurrentOatQuickMethodHeader()->IsOptimized()) {
CHECK_REGS_CONTAIN_REFS(0x27U, true, 8, 4, 2, 1); // v8: this, v4: ex, v2: y, v1: x
@@ -94,124 +94,79 @@
}
};
-// Dex instructions for the function 'f' in ReferenceMap.java
-// Virtual methods -
-// #0 : (in LReferenceMap;)
-// name : 'f'
-// type : '()Ljava/lang/Object;'
-// access : 0x0000 ()
-// code -
-// registers : 9
-// ins : 1
-// outs : 2
-// insns size : 51 16-bit code units
-// |[0001e8] ReferenceMap.f:()Ljava/lang/Object;
-// |0000: const/4 v4, #int 2 // #2
-// |0001: const/4 v7, #int 0 // #0
-// |0002: const/4 v6, #int 1 // #1
+// DEX code
//
-// 0:[Unknown],1:[Unknown],2:[Unknown],3:[Unknown],4:[32-bit Constant: 2],5:[Unknown],6:[32-bit Constant: 1],7:[Zero],8:[Reference: ReferenceMap],
-// |0003: new-array v1, v4, [Ljava/lang/Object; // type@0007
-// |0005: const/4 v2, #int 0 // #0
-
-// 0:[Unknown],1:[Reference: java.lang.Object[]],2:[Zero],3:[Unknown],4:[32-bit Constant: 2],5:[Unknown],6:[32-bit Constant: 1],7:[Zero],8:[Reference: ReferenceMap],
-// |0006: new-instance v3, Ljava/lang/Object; // type@0003
-
-// [Unknown],1:[Reference: java.lang.Object[]],2:[Zero],3:[Uninitialized Reference: java.lang.Object],4:[32-bit Constant: 2],5:[Unknown],6:[32-bit Constant: 1],7:[Zero],8:[Reference: ReferenceMap],
-// |0008: +invoke-object-init/range {}, Ljava/lang/Object;.<init>:()V // method@0005
-// |000b: const/4 v4, #int 2 // #2
-
-// 0:[Unknown],1:[Reference: java.lang.Object[]],2:[Zero],3:[Reference: java.lang.Object],4:[32-bit Constant: 2],5:[Unknown],6:[32-bit Constant: 1],7:[Zero],8:[Reference: ReferenceMap],
-// |000c: aput-object v3, v1, v4
-
-// 0:[Unknown],1:[Reference: java.lang.Object[]],2:[Zero],3:[Reference: java.lang.Object],4:[32-bit Constant: 2],5:[Unknown],6:[32-bit Constant: 1],7:[Zero],8:[Reference: ReferenceMap],
-// |000e: aput-object v3, v1, v6
-
-// 0:[Unknown],1:[Reference: java.lang.Object[]],2:[Zero],3:[Reference: java.lang.Object],4:[32-bit Constant: 2],5:[Unknown],6:[32-bit Constant: 1],7:[Zero],8:[Reference: ReferenceMap],
-// |0010: +invoke-virtual-quick {v8, v7}, [000c] // vtable #000c
-
-// 0:[Conflict],1:[Conflict],2:[Conflict],3:[Reference: java.lang.Object],4:[Conflict],5:[Conflict],6:[Conflict],7:[Conflict],8:[Conflict],
-// |0013: return-object v3
-// |0014: move-exception v0
-
-// 0:[Reference: java.lang.Exception],1:[Reference: java.lang.Object[]],2:[Reference: java.lang.Object],3:[Conflict],4:[32-bit Constant: 2],5:[Unknown],6:[32-bit Constant: 1],7:[Zero],8:[Reference: ReferenceMap],
-// |0015: if-nez v2, 001f // +000a
-// |0017: const/4 v4, #int 1 // #1
-
-// 0:[Reference: java.lang.Exception],1:[Reference: java.lang.Object[]],2:[Reference: java.lang.Object],3:[Conflict],4:[32-bit Constant: 1],5:[Unknown],6:[32-bit Constant: 1],7:[Zero],8:[Reference: ReferenceMap],
-// |0018: new-instance v5, Ljava/lang/Object; // type@0003
-
-// 0:[Reference: java.lang.Exception],1:[Reference: java.lang.Object[]],2:[Reference: java.lang.Object],3:[Conflict],4:[32-bit Constant: 1],5:[Uninitialized Reference: java.lang.Object],6:[32-bit Constant: 1],7:[Zero],8:[Reference: ReferenceMap],
-// |001a: +invoke-object-init/range {}, Ljava/lang/Object;.<init>:()V // method@0005
-
-// 0:[Reference: java.lang.Exception],1:[Reference: java.lang.Object[]],2:[Reference: java.lang.Object],3:[Conflict],4:[32-bit Constant: 1],5:[Reference: java.lang.Object],6:[32-bit Constant: 1],7:[Zero],8:[Reference: ReferenceMap],
-// |001d: aput-object v5, v1, v4
-
-// 0:[Reference: java.lang.Exception],1:[Reference: java.lang.Object[]],2:[Reference: java.lang.Object],3:[Conflict],4:[32-bit Constant: 2],5:[Conflict],6:[32-bit Constant: 1],7:[Zero],8:[Reference: ReferenceMap],
-// |001f: aput-object v2, v1, v6
-
-// 0:[Reference: java.lang.Exception],1:[Reference: java.lang.Object[]],2:[Reference: java.lang.Object],3:[Conflict],4:[32-bit Constant: 2],5:[Conflict],6:[32-bit Constant: 1],7:[Zero],8:[Reference: ReferenceMap],
-// |0021: +invoke-virtual-quick {v8, v7}, [000c] // vtable #000c
-// |0024: move-object v3, v2
-
-// 0:[Reference: java.lang.Exception],1:[Reference: java.lang.Object[]],2:[Reference: java.lang.Object],3:[Reference: java.lang.Object],4:[32-bit Constant: 2],5:[Conflict],6:[32-bit Constant: 1],7:[Zero],8:[Reference: ReferenceMap],
-// |0025: goto 0013 // -0012
-// |0026: move-exception v4
-
-// 0:[Conflict],1:[Reference: java.lang.Object[]],2:[Reference: java.lang.Object],3:[Conflict],4:[Reference: java.lang.Throwable],5:[Conflict],6:[32-bit Constant: 1],7:[Zero],8:[Reference: ReferenceMap],
-// |0027: aput-object v2, v1, v6
-
-// 0:[Conflict],1:[Reference: java.lang.Object[]],2:[Reference: java.lang.Object],3:[Conflict],4:[Reference: java.lang.Throwable],5:[Conflict],6:[32-bit Constant: 1],7:[Zero],8:[Reference: ReferenceMap],
-// |0029: +invoke-virtual-quick {v8, v7}, [000c] // vtable #000c
-
-// 0:[Conflict],1:[Reference: java.lang.Object[]],2:[Reference: java.lang.Object],3:[Conflict],4:[Reference: java.lang.Throwable],5:[Conflict],6:[32-bit Constant: 1],7:[Zero],8:[Reference: ReferenceMap],
-// |002c: throw v4
-// |002d: move-exception v4
-// |002e: move-object v2, v3
-
-// 0:[Unknown],1:[Reference: java.lang.Object[]],2:[Reference: java.lang.Object],3:[Reference: java.lang.Object],4:[Reference: java.lang.Throwable],5:[Unknown],6:[32-bit Constant: 1],7:[Zero],8:[Reference: ReferenceMap],
-// |002f: goto 0027 // -0008
-// |0030: move-exception v0
-// |0031: move-object v2, v3
-
-// 0:[Reference: java.lang.Exception],1:[Reference: java.lang.Object[]],2:[Reference: java.lang.Object],3:[Reference: java.lang.Object],4:[32-bit Constant: 2],5:[Unknown],6:[32-bit Constant: 1],7:[Zero],8:[Reference: ReferenceMap],
-// |0032: goto 0015 // -001d
-// catches : 3
-// 0x0006 - 0x000b
-// Ljava/lang/Exception; -> 0x0014
-// <any> -> 0x0026
-// 0x000c - 0x000e
-// Ljava/lang/Exception; -> 0x0030
-// <any> -> 0x002d
-// 0x0018 - 0x001f
-// <any> -> 0x0026
-// positions :
-// 0x0003 line=8
-// 0x0005 line=9
-// 0x0006 line=11
-// 0x000b line=12
-// 0x000e line=18
-// 0x0010 line=19
-// 0x0013 line=21
-// 0x0014 line=13
-// 0x0015 line=14
-// 0x0017 line=15
-// 0x001f line=18
-// 0x0021 line=19
-// 0x0025 line=20
-// 0x0026 line=18
-// 0x0029 line=19
-// 0x002d line=18
-// 0x0030 line=13
-// locals :
-// 0x0006 - 0x000b reg=2 y Ljava/lang/Object;
-// 0x000b - 0x0013 reg=3 y Ljava/lang/Object;
-// 0x0014 - 0x0015 reg=2 y Ljava/lang/Object;
-// 0x0015 - 0x0026 reg=0 ex Ljava/lang/Exception;
-// 0x002d - 0x0032 reg=3 y Ljava/lang/Object;
-// 0x0005 - 0x0033 reg=1 x [Ljava/lang/Object;
-// 0x0032 - 0x0033 reg=2 y Ljava/lang/Object;
-// 0x0000 - 0x0033 reg=8 this LReferenceMap;
+// 0000: const/4 v4, #int 2 // #2
+// 0001: const/4 v7, #int 0 // #0
+// 0002: const/4 v6, #int 1 // #1
+// 0003: new-array v1, v4, [Ljava/lang/Object; // type@0007
+// 0005: const/4 v2, #int 0 // #0
+// 0006: new-instance v3, Ljava/lang/Object; // type@0003
+// 0008: invoke-direct {v3}, Ljava/lang/Object;.<init>:()V // method@0004
+// 000b: const/4 v4, #int 2 // #2
+// 000c: aput-object v3, v1, v4
+// 000e: aput-object v3, v1, v6
+// 0010: invoke-virtual {v8, v7}, LMain;.refmap:(I)I // method@0003
+// 0013: move-object v2, v3
+// 0014: return-object v2
+// 0015: move-exception v0
+// 0016: if-nez v2, 0020 // +000a
+// 0018: new-instance v4, Ljava/lang/Object; // type@0003
+// 001a: invoke-direct {v4}, Ljava/lang/Object;.<init>:()V // method@0004
+// 001d: const/4 v5, #int 1 // #1
+// 001e: aput-object v4, v1, v5
+// 0020: aput-object v2, v1, v6
+// 0022: invoke-virtual {v8, v7}, LMain;.refmap:(I)I // method@0003
+// 0025: goto 0014 // -0011
+// 0026: move-exception v4
+// 0027: aput-object v2, v1, v6
+// 0029: invoke-virtual {v8, v7}, LMain;.refmap:(I)I // method@0003
+// 002c: throw v4
+// 002d: move-exception v4
+// 002e: move-object v2, v3
+// 002f: goto 0027 // -0008
+// 0030: move-exception v0
+// 0031: move-object v2, v3
+// 0032: goto 0016 // -001c
+// catches : 3
+// 0x0006 - 0x000b
+// Ljava/lang/Exception; -> 0x0015
+// <any> -> 0x0026
+// 0x000c - 0x000e
+// Ljava/lang/Exception; -> 0x0030
+// <any> -> 0x002d
+// 0x0018 - 0x0020
+// <any> -> 0x0026
+// positions :
+// 0x0003 line=22
+// 0x0005 line=23
+// 0x0006 line=25
+// 0x000b line=26
+// 0x000e line=32
+// 0x0010 line=33
+// 0x0014 line=35
+// 0x0015 line=27
+// 0x0016 line=28
+// 0x0018 line=29
+// 0x0020 line=32
+// 0x0022 line=33
+// 0x0026 line=31
+// 0x0027 line=32
+// 0x0029 line=33
+// 0x002c line=31
+// 0x0030 line=27
+// locals :
+// 0x0006 - 0x000b reg=2 y Ljava/lang/Object;
+// 0x000b - 0x0014 reg=3 y Ljava/lang/Object;
+// 0x0015 - 0x0016 reg=2 y Ljava/lang/Object;
+// 0x0016 - 0x0026 reg=0 ex Ljava/lang/Exception;
+// 0x002d - 0x002f reg=3 y Ljava/lang/Object;
+// 0x002f - 0x0030 reg=2 y Ljava/lang/Object;
+// 0x0030 - 0x0032 reg=3 y Ljava/lang/Object;
+// 0x0031 - 0x0033 reg=0 ex Ljava/lang/Exception;
+// 0x0005 - 0x0033 reg=1 x [Ljava/lang/Object;
+// 0x0032 - 0x0033 reg=2 y Ljava/lang/Object;
+// 0x0000 - 0x0033 reg=8 this LMain;
extern "C" JNIEXPORT jint JNICALL Java_Main_refmap(JNIEnv*, jobject, jint count) {
// Visitor
diff --git a/test/004-StackWalk/build b/test/004-StackWalk/build
deleted file mode 100644
index 08987b5..0000000
--- a/test/004-StackWalk/build
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Stop if something fails.
-set -e
-
-# The test relies on DEX file produced by javac+dx so keep building with them for now
-# (see b/19467889)
-mkdir classes
-${JAVAC} -d classes `find src -name '*.java'`
-${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex \
- --dump-width=1000 ${DX_FLAGS} classes
-zip $TEST_NAME.jar classes.dex
diff --git a/test/004-StackWalk/stack_walk_jni.cc b/test/004-StackWalk/stack_walk_jni.cc
index 3a5854b..51bb68f 100644
--- a/test/004-StackWalk/stack_walk_jni.cc
+++ b/test/004-StackWalk/stack_walk_jni.cc
@@ -42,31 +42,31 @@
// Given the method name and the number of times the method has been called,
// we know the Dex registers with live reference values. Assert that what we
// find is what is expected.
- if (m_name == "f") {
+ if (m_name == "$noinline$f") {
if (gJava_StackWalk_refmap_calls == 1) {
CHECK_EQ(1U, GetDexPc());
- CHECK_REGS(4);
+ CHECK_REGS(1); // v1: this
} else {
CHECK_EQ(gJava_StackWalk_refmap_calls, 2);
CHECK_EQ(5U, GetDexPc());
- CHECK_REGS(4);
+ CHECK_REGS(1); // v1: this
}
} else if (m_name == "g") {
if (gJava_StackWalk_refmap_calls == 1) {
- CHECK_EQ(0xcU, GetDexPc());
- CHECK_REGS(0, 2); // Note that v1 is not in the minimal root set
+ CHECK_EQ(0xdU, GetDexPc());
+ CHECK_REGS(0, 2); // v2: this (Note that v1 is not in the minimal root set)
} else {
CHECK_EQ(gJava_StackWalk_refmap_calls, 2);
- CHECK_EQ(0xcU, GetDexPc());
+ CHECK_EQ(0xdU, GetDexPc());
CHECK_REGS(0, 2);
}
} else if (m_name == "shlemiel") {
if (gJava_StackWalk_refmap_calls == 1) {
- CHECK_EQ(0x380U, GetDexPc());
+ CHECK_EQ(0x393U, GetDexPc());
CHECK_REGS(2, 4, 5, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 21, 25);
} else {
CHECK_EQ(gJava_StackWalk_refmap_calls, 2);
- CHECK_EQ(0x380U, GetDexPc());
+ CHECK_EQ(0x393U, GetDexPc());
CHECK_REGS(2, 4, 5, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 21, 25);
}
}
diff --git a/test/089-many-methods/build b/test/089-many-methods/build
index ff77c60..58144e1 100644
--- a/test/089-many-methods/build
+++ b/test/089-many-methods/build
@@ -43,8 +43,4 @@
printf("}\n") > fileName;
}'
-# The test relies on the error message produced by dx, not jack, so keep building with dx for now
-# (b/19467889).
-mkdir classes
-${JAVAC} -d classes `find src -name '*.java'`
-${DX} -JXmx1024m --dex --no-optimize classes
+./default-build
diff --git a/test/089-many-methods/expected.txt b/test/089-many-methods/expected.txt
index b74e0ee..bfee8b3 100644
--- a/test/089-many-methods/expected.txt
+++ b/test/089-many-methods/expected.txt
@@ -1,6 +1,2 @@
-
-trouble writing output: Too many field references: 131000; max is 65536.
-You may try using --multi-dex option.
-References by package:
-131000 default
-build exit status: 2
+ERROR: Dex writing phase: classes.dex has too many IDs. Try using multi-dex
+build exit status: 4
diff --git a/test/138-duplicate-classes-check2/build b/test/138-duplicate-classes-check2/build
index abcbbb8..d346251 100755
--- a/test/138-duplicate-classes-check2/build
+++ b/test/138-duplicate-classes-check2/build
@@ -24,9 +24,17 @@
${JAVAC} -d classes-ex `find src-ex -name '*.java'`
rm classes-ex/A.class
-if [ ${NEED_DEX} = "true" ]; then
+if [ ${USE_JACK} = "true" ]; then
+ jar cf classes.jill.jar -C classes .
+ ${JACK} --import classes.jill.jar --output-dex .
+ zip ${TEST_NAME}.jar classes.dex
+
+ jar cf classes-ex.jill.jar -C classes-ex .
+ ${JACK} --import classes-ex.jill.jar --output-dex .
+ zip ${TEST_NAME}-ex.jar classes.dex
+elif [ ${NEED_DEX} = "true" ]; then
${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex --dump-width=1000 classes
- zip $TEST_NAME.jar classes.dex
+ zip ${TEST_NAME}.jar classes.dex
${DX} -JXmx256m --debug --dex --dump-to=classes-ex.lst --output=classes.dex --dump-width=1000 classes-ex
zip ${TEST_NAME}-ex.jar classes.dex
fi
diff --git a/test/145-alloc-tracking-stress/expected.txt b/test/145-alloc-tracking-stress/expected.txt
new file mode 100644
index 0000000..134d8d0
--- /dev/null
+++ b/test/145-alloc-tracking-stress/expected.txt
@@ -0,0 +1 @@
+Finishing
diff --git a/test/145-alloc-tracking-stress/info.txt b/test/145-alloc-tracking-stress/info.txt
new file mode 100644
index 0000000..443062d
--- /dev/null
+++ b/test/145-alloc-tracking-stress/info.txt
@@ -0,0 +1 @@
+Regression test for b/18661622
diff --git a/test/145-alloc-tracking-stress/src/Main.java b/test/145-alloc-tracking-stress/src/Main.java
new file mode 100644
index 0000000..752fdd9
--- /dev/null
+++ b/test/145-alloc-tracking-stress/src/Main.java
@@ -0,0 +1,74 @@
+/*
+
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+import java.util.Map;
+
+public class Main implements Runnable {
+ static final int numberOfThreads = 4;
+ static final int totalOperations = 1000;
+ static Method enableAllocTrackingMethod;
+ static Object holder;
+ static volatile boolean trackingThreadDone = false;
+ int threadIndex;
+
+ Main(int index) {
+ threadIndex = index;
+ }
+
+ public static void main(String[] args) throws Exception {
+ Class klass = Class.forName("org.apache.harmony.dalvik.ddmc.DdmVmInternal");
+ if (klass == null) {
+ throw new AssertionError("Couldn't find DdmVmInternal class");
+ }
+ enableAllocTrackingMethod = klass.getDeclaredMethod("enableRecentAllocations",
+ Boolean.TYPE);
+ if (enableAllocTrackingMethod == null) {
+ throw new AssertionError("Couldn't find enableRecentAllocations method");
+ }
+
+ final Thread[] threads = new Thread[numberOfThreads];
+ for (int t = 0; t < threads.length; t++) {
+ threads[t] = new Thread(new Main(t));
+ threads[t].start();
+ }
+ for (Thread t : threads) {
+ t.join();
+ }
+ System.out.println("Finishing");
+ }
+
+ public void run() {
+ if (threadIndex == 0) {
+ for (int i = 0; i < totalOperations; ++i) {
+ try {
+ enableAllocTrackingMethod.invoke(null, true);
+ holder = new Object();
+ enableAllocTrackingMethod.invoke(null, false);
+ } catch (Exception e) {
+ System.out.println(e);
+ return;
+ }
+ }
+ trackingThreadDone = true;
+ } else {
+ while (!trackingThreadDone) {
+ holder = new Object();
+ }
+ }
+ }
+}
diff --git a/test/442-checker-constant-folding/src/Main.java b/test/442-checker-constant-folding/src/Main.java
index 93fe397..b7712a7 100644
--- a/test/442-checker-constant-folding/src/Main.java
+++ b/test/442-checker-constant-folding/src/Main.java
@@ -51,6 +51,21 @@
}
}
+ private static int $inline$int(int x) {
+ return x;
+ }
+
+ private static long $inline$long(long x) {
+ return x;
+ }
+
+ private static float $inline$float(float x) {
+ return x;
+ }
+
+ private static double $inline$double(double x) {
+ return x;
+ }
// Wrappers around methods located in file TestCmp.smali.
@@ -194,121 +209,119 @@
return y;
}
-
/**
* Exercise constant folding on addition.
*/
- /// CHECK-START: int Main.IntAddition1() constant_folding (before)
+ /// CHECK-START: int Main.IntAddition1() constant_folding_after_inlining (before)
/// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
/// CHECK-DAG: <<Const2:i\d+>> IntConstant 2
/// CHECK-DAG: <<Add:i\d+>> Add [<<Const1>>,<<Const2>>]
/// CHECK-DAG: Return [<<Add>>]
- /// CHECK-START: int Main.IntAddition1() constant_folding (after)
+ /// CHECK-START: int Main.IntAddition1() constant_folding_after_inlining (after)
/// CHECK-DAG: <<Const3:i\d+>> IntConstant 3
/// CHECK-DAG: Return [<<Const3>>]
- /// CHECK-START: int Main.IntAddition1() constant_folding (after)
+ /// CHECK-START: int Main.IntAddition1() constant_folding_after_inlining (after)
/// CHECK-NOT: Add
public static int IntAddition1() {
int a, b, c;
- a = 1;
- b = 2;
+ a = $inline$int(1);
+ b = $inline$int(2);
c = a + b;
return c;
}
- /// CHECK-START: int Main.IntAddition2() constant_folding (before)
+ /// CHECK-START: int Main.IntAddition2() constant_folding_after_inlining (before)
/// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
/// CHECK-DAG: <<Const2:i\d+>> IntConstant 2
/// CHECK-DAG: <<Const5:i\d+>> IntConstant 5
/// CHECK-DAG: <<Const6:i\d+>> IntConstant 6
- /// CHECK-DAG: <<Const11:i\d+>> IntConstant 11
/// CHECK-DAG: <<Add1:i\d+>> Add [<<Const1>>,<<Const2>>]
- /// CHECK-DAG: Add [<<Const5>>,<<Const6>>]
- /// CHECK-DAG: <<Add3:i\d+>> Add [<<Add1>>,<<Const11>>]
+ /// CHECK-DAG: <<Add2:i\d+>> Add [<<Const5>>,<<Const6>>]
+ /// CHECK-DAG: <<Add3:i\d+>> Add [<<Add1>>,<<Add2>>]
/// CHECK-DAG: Return [<<Add3>>]
- /// CHECK-START: int Main.IntAddition2() constant_folding (after)
+ /// CHECK-START: int Main.IntAddition2() constant_folding_after_inlining (after)
/// CHECK-DAG: <<Const14:i\d+>> IntConstant 14
/// CHECK-DAG: Return [<<Const14>>]
- /// CHECK-START: int Main.IntAddition2() constant_folding (after)
+ /// CHECK-START: int Main.IntAddition2() constant_folding_after_inlining (after)
/// CHECK-NOT: Add
public static int IntAddition2() {
int a, b, c;
- a = 1;
- b = 2;
+ a = $inline$int(1);
+ b = $inline$int(2);
a += b;
- b = 5;
- c = 6;
+ b = $inline$int(5);
+ c = $inline$int(6);
b += c;
c = a + b;
return c;
}
- /// CHECK-START: long Main.LongAddition() constant_folding (before)
+ /// CHECK-START: long Main.LongAddition() constant_folding_after_inlining (before)
/// CHECK-DAG: <<Const1:j\d+>> LongConstant 1
/// CHECK-DAG: <<Const2:j\d+>> LongConstant 2
/// CHECK-DAG: <<Add:j\d+>> Add [<<Const1>>,<<Const2>>]
/// CHECK-DAG: Return [<<Add>>]
- /// CHECK-START: long Main.LongAddition() constant_folding (after)
+ /// CHECK-START: long Main.LongAddition() constant_folding_after_inlining (after)
/// CHECK-DAG: <<Const3:j\d+>> LongConstant 3
/// CHECK-DAG: Return [<<Const3>>]
- /// CHECK-START: long Main.LongAddition() constant_folding (after)
+ /// CHECK-START: long Main.LongAddition() constant_folding_after_inlining (after)
/// CHECK-NOT: Add
public static long LongAddition() {
long a, b, c;
- a = 1L;
- b = 2L;
+ a = $inline$long(1L);
+ b = $inline$long(2L);
c = a + b;
return c;
}
- /// CHECK-START: float Main.FloatAddition() constant_folding (before)
+ /// CHECK-START: float Main.FloatAddition() constant_folding_after_inlining (before)
/// CHECK-DAG: <<Const1:f\d+>> FloatConstant 1
/// CHECK-DAG: <<Const2:f\d+>> FloatConstant 2
/// CHECK-DAG: <<Add:f\d+>> Add [<<Const1>>,<<Const2>>]
/// CHECK-DAG: Return [<<Add>>]
- /// CHECK-START: float Main.FloatAddition() constant_folding (after)
+ /// CHECK-START: float Main.FloatAddition() constant_folding_after_inlining (after)
/// CHECK-DAG: <<Const3:f\d+>> FloatConstant 3
/// CHECK-DAG: Return [<<Const3>>]
- /// CHECK-START: float Main.FloatAddition() constant_folding (after)
+ /// CHECK-START: float Main.FloatAddition() constant_folding_after_inlining (after)
/// CHECK-NOT: Add
public static float FloatAddition() {
float a, b, c;
- a = 1F;
- b = 2F;
+ a = $inline$float(1F);
+ b = $inline$float(2F);
c = a + b;
return c;
}
- /// CHECK-START: double Main.DoubleAddition() constant_folding (before)
+ /// CHECK-START: double Main.DoubleAddition() constant_folding_after_inlining (before)
/// CHECK-DAG: <<Const1:d\d+>> DoubleConstant 1
/// CHECK-DAG: <<Const2:d\d+>> DoubleConstant 2
/// CHECK-DAG: <<Add:d\d+>> Add [<<Const1>>,<<Const2>>]
/// CHECK-DAG: Return [<<Add>>]
- /// CHECK-START: double Main.DoubleAddition() constant_folding (after)
+ /// CHECK-START: double Main.DoubleAddition() constant_folding_after_inlining (after)
/// CHECK-DAG: <<Const3:d\d+>> DoubleConstant 3
/// CHECK-DAG: Return [<<Const3>>]
- /// CHECK-START: double Main.DoubleAddition() constant_folding (after)
+ /// CHECK-START: double Main.DoubleAddition() constant_folding_after_inlining (after)
/// CHECK-NOT: Add
public static double DoubleAddition() {
double a, b, c;
- a = 1D;
- b = 2D;
+ a = $inline$double(1D);
+ b = $inline$double(2D);
c = a + b;
return c;
}
@@ -318,86 +331,86 @@
* Exercise constant folding on subtraction.
*/
- /// CHECK-START: int Main.IntSubtraction() constant_folding (before)
+ /// CHECK-START: int Main.IntSubtraction() constant_folding_after_inlining (before)
/// CHECK-DAG: <<Const6:i\d+>> IntConstant 6
/// CHECK-DAG: <<Const2:i\d+>> IntConstant 2
/// CHECK-DAG: <<Sub:i\d+>> Sub [<<Const6>>,<<Const2>>]
/// CHECK-DAG: Return [<<Sub>>]
- /// CHECK-START: int Main.IntSubtraction() constant_folding (after)
+ /// CHECK-START: int Main.IntSubtraction() constant_folding_after_inlining (after)
/// CHECK-DAG: <<Const4:i\d+>> IntConstant 4
/// CHECK-DAG: Return [<<Const4>>]
- /// CHECK-START: int Main.IntSubtraction() constant_folding (after)
+ /// CHECK-START: int Main.IntSubtraction() constant_folding_after_inlining (after)
/// CHECK-NOT: Sub
public static int IntSubtraction() {
int a, b, c;
- a = 6;
- b = 2;
+ a = $inline$int(6);
+ b = $inline$int(2);
c = a - b;
return c;
}
- /// CHECK-START: long Main.LongSubtraction() constant_folding (before)
+ /// CHECK-START: long Main.LongSubtraction() constant_folding_after_inlining (before)
/// CHECK-DAG: <<Const6:j\d+>> LongConstant 6
/// CHECK-DAG: <<Const2:j\d+>> LongConstant 2
/// CHECK-DAG: <<Sub:j\d+>> Sub [<<Const6>>,<<Const2>>]
/// CHECK-DAG: Return [<<Sub>>]
- /// CHECK-START: long Main.LongSubtraction() constant_folding (after)
+ /// CHECK-START: long Main.LongSubtraction() constant_folding_after_inlining (after)
/// CHECK-DAG: <<Const4:j\d+>> LongConstant 4
/// CHECK-DAG: Return [<<Const4>>]
- /// CHECK-START: long Main.LongSubtraction() constant_folding (after)
+ /// CHECK-START: long Main.LongSubtraction() constant_folding_after_inlining (after)
/// CHECK-NOT: Sub
public static long LongSubtraction() {
long a, b, c;
- a = 6L;
- b = 2L;
+ a = $inline$long(6L);
+ b = $inline$long(2L);
c = a - b;
return c;
}
- /// CHECK-START: float Main.FloatSubtraction() constant_folding (before)
+ /// CHECK-START: float Main.FloatSubtraction() constant_folding_after_inlining (before)
/// CHECK-DAG: <<Const6:f\d+>> FloatConstant 6
/// CHECK-DAG: <<Const2:f\d+>> FloatConstant 2
/// CHECK-DAG: <<Sub:f\d+>> Sub [<<Const6>>,<<Const2>>]
/// CHECK-DAG: Return [<<Sub>>]
- /// CHECK-START: float Main.FloatSubtraction() constant_folding (after)
+ /// CHECK-START: float Main.FloatSubtraction() constant_folding_after_inlining (after)
/// CHECK-DAG: <<Const4:f\d+>> FloatConstant 4
/// CHECK-DAG: Return [<<Const4>>]
- /// CHECK-START: float Main.FloatSubtraction() constant_folding (after)
+ /// CHECK-START: float Main.FloatSubtraction() constant_folding_after_inlining (after)
/// CHECK-NOT: Sub
public static float FloatSubtraction() {
float a, b, c;
- a = 6F;
- b = 2F;
+ a = $inline$float(6F);
+ b = $inline$float(2F);
c = a - b;
return c;
}
- /// CHECK-START: double Main.DoubleSubtraction() constant_folding (before)
+ /// CHECK-START: double Main.DoubleSubtraction() constant_folding_after_inlining (before)
/// CHECK-DAG: <<Const6:d\d+>> DoubleConstant 6
/// CHECK-DAG: <<Const2:d\d+>> DoubleConstant 2
/// CHECK-DAG: <<Sub:d\d+>> Sub [<<Const6>>,<<Const2>>]
/// CHECK-DAG: Return [<<Sub>>]
- /// CHECK-START: double Main.DoubleSubtraction() constant_folding (after)
+ /// CHECK-START: double Main.DoubleSubtraction() constant_folding_after_inlining (after)
/// CHECK-DAG: <<Const4:d\d+>> DoubleConstant 4
/// CHECK-DAG: Return [<<Const4>>]
- /// CHECK-START: double Main.DoubleSubtraction() constant_folding (after)
+ /// CHECK-START: double Main.DoubleSubtraction() constant_folding_after_inlining (after)
/// CHECK-NOT: Sub
public static double DoubleSubtraction() {
double a, b, c;
- a = 6D;
- b = 2D;
+ a = $inline$double(6D);
+ b = $inline$double(2D);
c = a - b;
return c;
}
@@ -407,86 +420,86 @@
* Exercise constant folding on multiplication.
*/
- /// CHECK-START: int Main.IntMultiplication() constant_folding (before)
+ /// CHECK-START: int Main.IntMultiplication() constant_folding_after_inlining (before)
/// CHECK-DAG: <<Const7:i\d+>> IntConstant 7
/// CHECK-DAG: <<Const3:i\d+>> IntConstant 3
/// CHECK-DAG: <<Mul:i\d+>> Mul [<<Const7>>,<<Const3>>]
/// CHECK-DAG: Return [<<Mul>>]
- /// CHECK-START: int Main.IntMultiplication() constant_folding (after)
+ /// CHECK-START: int Main.IntMultiplication() constant_folding_after_inlining (after)
/// CHECK-DAG: <<Const21:i\d+>> IntConstant 21
/// CHECK-DAG: Return [<<Const21>>]
- /// CHECK-START: int Main.IntMultiplication() constant_folding (after)
+ /// CHECK-START: int Main.IntMultiplication() constant_folding_after_inlining (after)
/// CHECK-NOT: Mul
public static int IntMultiplication() {
int a, b, c;
- a = 7;
- b = 3;
+ a = $inline$int(7);
+ b = $inline$int(3);
c = a * b;
return c;
}
- /// CHECK-START: long Main.LongMultiplication() constant_folding (before)
+ /// CHECK-START: long Main.LongMultiplication() constant_folding_after_inlining (before)
/// CHECK-DAG: <<Const7:j\d+>> LongConstant 7
/// CHECK-DAG: <<Const3:j\d+>> LongConstant 3
/// CHECK-DAG: <<Mul:j\d+>> Mul [<<Const7>>,<<Const3>>]
/// CHECK-DAG: Return [<<Mul>>]
- /// CHECK-START: long Main.LongMultiplication() constant_folding (after)
+ /// CHECK-START: long Main.LongMultiplication() constant_folding_after_inlining (after)
/// CHECK-DAG: <<Const21:j\d+>> LongConstant 21
/// CHECK-DAG: Return [<<Const21>>]
- /// CHECK-START: long Main.LongMultiplication() constant_folding (after)
+ /// CHECK-START: long Main.LongMultiplication() constant_folding_after_inlining (after)
/// CHECK-NOT: Mul
public static long LongMultiplication() {
long a, b, c;
- a = 7L;
- b = 3L;
+ a = $inline$long(7L);
+ b = $inline$long(3L);
c = a * b;
return c;
}
- /// CHECK-START: float Main.FloatMultiplication() constant_folding (before)
+ /// CHECK-START: float Main.FloatMultiplication() constant_folding_after_inlining (before)
/// CHECK-DAG: <<Const7:f\d+>> FloatConstant 7
/// CHECK-DAG: <<Const3:f\d+>> FloatConstant 3
/// CHECK-DAG: <<Mul:f\d+>> Mul [<<Const7>>,<<Const3>>]
/// CHECK-DAG: Return [<<Mul>>]
- /// CHECK-START: float Main.FloatMultiplication() constant_folding (after)
+ /// CHECK-START: float Main.FloatMultiplication() constant_folding_after_inlining (after)
/// CHECK-DAG: <<Const21:f\d+>> FloatConstant 21
/// CHECK-DAG: Return [<<Const21>>]
- /// CHECK-START: float Main.FloatMultiplication() constant_folding (after)
+ /// CHECK-START: float Main.FloatMultiplication() constant_folding_after_inlining (after)
/// CHECK-NOT: Mul
public static float FloatMultiplication() {
float a, b, c;
- a = 7F;
- b = 3F;
+ a = $inline$float(7F);
+ b = $inline$float(3F);
c = a * b;
return c;
}
- /// CHECK-START: double Main.DoubleMultiplication() constant_folding (before)
+ /// CHECK-START: double Main.DoubleMultiplication() constant_folding_after_inlining (before)
/// CHECK-DAG: <<Const7:d\d+>> DoubleConstant 7
/// CHECK-DAG: <<Const3:d\d+>> DoubleConstant 3
/// CHECK-DAG: <<Mul:d\d+>> Mul [<<Const7>>,<<Const3>>]
/// CHECK-DAG: Return [<<Mul>>]
- /// CHECK-START: double Main.DoubleMultiplication() constant_folding (after)
+ /// CHECK-START: double Main.DoubleMultiplication() constant_folding_after_inlining (after)
/// CHECK-DAG: <<Const21:d\d+>> DoubleConstant 21
/// CHECK-DAG: Return [<<Const21>>]
- /// CHECK-START: double Main.DoubleMultiplication() constant_folding (after)
+ /// CHECK-START: double Main.DoubleMultiplication() constant_folding_after_inlining (after)
/// CHECK-NOT: Mul
public static double DoubleMultiplication() {
double a, b, c;
- a = 7D;
- b = 3D;
+ a = $inline$double(7D);
+ b = $inline$double(3D);
c = a * b;
return c;
}
@@ -496,90 +509,90 @@
* Exercise constant folding on division.
*/
- /// CHECK-START: int Main.IntDivision() constant_folding (before)
+ /// CHECK-START: int Main.IntDivision() constant_folding_after_inlining (before)
/// CHECK-DAG: <<Const8:i\d+>> IntConstant 8
/// CHECK-DAG: <<Const3:i\d+>> IntConstant 3
/// CHECK-DAG: <<Div0Chk:i\d+>> DivZeroCheck [<<Const3>>]
/// CHECK-DAG: <<Div:i\d+>> Div [<<Const8>>,<<Div0Chk>>]
/// CHECK-DAG: Return [<<Div>>]
- /// CHECK-START: int Main.IntDivision() constant_folding (after)
+ /// CHECK-START: int Main.IntDivision() constant_folding_after_inlining (after)
/// CHECK-DAG: <<Const2:i\d+>> IntConstant 2
/// CHECK-DAG: Return [<<Const2>>]
- /// CHECK-START: int Main.IntDivision() constant_folding (after)
+ /// CHECK-START: int Main.IntDivision() constant_folding_after_inlining (after)
/// CHECK-NOT: DivZeroCheck
/// CHECK-NOT: Div
public static int IntDivision() {
int a, b, c;
- a = 8;
- b = 3;
+ a = $inline$int(8);
+ b = $inline$int(3);
c = a / b;
return c;
}
- /// CHECK-START: long Main.LongDivision() constant_folding (before)
+ /// CHECK-START: long Main.LongDivision() constant_folding_after_inlining (before)
/// CHECK-DAG: <<Const8:j\d+>> LongConstant 8
/// CHECK-DAG: <<Const3:j\d+>> LongConstant 3
/// CHECK-DAG: <<Div0Chk:j\d+>> DivZeroCheck [<<Const3>>]
/// CHECK-DAG: <<Div:j\d+>> Div [<<Const8>>,<<Div0Chk>>]
/// CHECK-DAG: Return [<<Div>>]
- /// CHECK-START: long Main.LongDivision() constant_folding (after)
+ /// CHECK-START: long Main.LongDivision() constant_folding_after_inlining (after)
/// CHECK-DAG: <<Const2:j\d+>> LongConstant 2
/// CHECK-DAG: Return [<<Const2>>]
- /// CHECK-START: long Main.LongDivision() constant_folding (after)
+ /// CHECK-START: long Main.LongDivision() constant_folding_after_inlining (after)
/// CHECK-NOT: DivZeroCheck
/// CHECK-NOT: Div
public static long LongDivision() {
long a, b, c;
- a = 8L;
- b = 3L;
+ a = $inline$long(8L);
+ b = $inline$long(3L);
c = a / b;
return c;
}
- /// CHECK-START: float Main.FloatDivision() constant_folding (before)
+ /// CHECK-START: float Main.FloatDivision() constant_folding_after_inlining (before)
/// CHECK-DAG: <<Const8:f\d+>> FloatConstant 8
/// CHECK-DAG: <<Const2P5:f\d+>> FloatConstant 2.5
/// CHECK-DAG: <<Div:f\d+>> Div [<<Const8>>,<<Const2P5>>]
/// CHECK-DAG: Return [<<Div>>]
- /// CHECK-START: float Main.FloatDivision() constant_folding (after)
+ /// CHECK-START: float Main.FloatDivision() constant_folding_after_inlining (after)
/// CHECK-DAG: <<Const3P2:f\d+>> FloatConstant 3.2
/// CHECK-DAG: Return [<<Const3P2>>]
- /// CHECK-START: float Main.FloatDivision() constant_folding (after)
+ /// CHECK-START: float Main.FloatDivision() constant_folding_after_inlining (after)
/// CHECK-NOT: Div
public static float FloatDivision() {
float a, b, c;
- a = 8F;
- b = 2.5F;
+ a = $inline$float(8F);
+ b = $inline$float(2.5F);
c = a / b;
return c;
}
- /// CHECK-START: double Main.DoubleDivision() constant_folding (before)
+ /// CHECK-START: double Main.DoubleDivision() constant_folding_after_inlining (before)
/// CHECK-DAG: <<Const8:d\d+>> DoubleConstant 8
/// CHECK-DAG: <<Const2P5:d\d+>> DoubleConstant 2.5
/// CHECK-DAG: <<Div:d\d+>> Div [<<Const8>>,<<Const2P5>>]
/// CHECK-DAG: Return [<<Div>>]
- /// CHECK-START: double Main.DoubleDivision() constant_folding (after)
+ /// CHECK-START: double Main.DoubleDivision() constant_folding_after_inlining (after)
/// CHECK-DAG: <<Const3P2:d\d+>> DoubleConstant 3.2
/// CHECK-DAG: Return [<<Const3P2>>]
- /// CHECK-START: double Main.DoubleDivision() constant_folding (after)
+ /// CHECK-START: double Main.DoubleDivision() constant_folding_after_inlining (after)
/// CHECK-NOT: Div
public static double DoubleDivision() {
double a, b, c;
- a = 8D;
- b = 2.5D;
+ a = $inline$double(8D);
+ b = $inline$double(2.5D);
c = a / b;
return c;
}
@@ -589,90 +602,90 @@
* Exercise constant folding on remainder.
*/
- /// CHECK-START: int Main.IntRemainder() constant_folding (before)
+ /// CHECK-START: int Main.IntRemainder() constant_folding_after_inlining (before)
/// CHECK-DAG: <<Const8:i\d+>> IntConstant 8
/// CHECK-DAG: <<Const3:i\d+>> IntConstant 3
/// CHECK-DAG: <<Div0Chk:i\d+>> DivZeroCheck [<<Const3>>]
/// CHECK-DAG: <<Rem:i\d+>> Rem [<<Const8>>,<<Div0Chk>>]
/// CHECK-DAG: Return [<<Rem>>]
- /// CHECK-START: int Main.IntRemainder() constant_folding (after)
+ /// CHECK-START: int Main.IntRemainder() constant_folding_after_inlining (after)
/// CHECK-DAG: <<Const2:i\d+>> IntConstant 2
/// CHECK-DAG: Return [<<Const2>>]
- /// CHECK-START: int Main.IntRemainder() constant_folding (after)
+ /// CHECK-START: int Main.IntRemainder() constant_folding_after_inlining (after)
/// CHECK-NOT: DivZeroCheck
/// CHECK-NOT: Rem
public static int IntRemainder() {
int a, b, c;
- a = 8;
- b = 3;
+ a = $inline$int(8);
+ b = $inline$int(3);
c = a % b;
return c;
}
- /// CHECK-START: long Main.LongRemainder() constant_folding (before)
+ /// CHECK-START: long Main.LongRemainder() constant_folding_after_inlining (before)
/// CHECK-DAG: <<Const8:j\d+>> LongConstant 8
/// CHECK-DAG: <<Const3:j\d+>> LongConstant 3
/// CHECK-DAG: <<Div0Chk:j\d+>> DivZeroCheck [<<Const3>>]
/// CHECK-DAG: <<Rem:j\d+>> Rem [<<Const8>>,<<Div0Chk>>]
/// CHECK-DAG: Return [<<Rem>>]
- /// CHECK-START: long Main.LongRemainder() constant_folding (after)
+ /// CHECK-START: long Main.LongRemainder() constant_folding_after_inlining (after)
/// CHECK-DAG: <<Const2:j\d+>> LongConstant 2
/// CHECK-DAG: Return [<<Const2>>]
- /// CHECK-START: long Main.LongRemainder() constant_folding (after)
+ /// CHECK-START: long Main.LongRemainder() constant_folding_after_inlining (after)
/// CHECK-NOT: DivZeroCheck
/// CHECK-NOT: Rem
public static long LongRemainder() {
long a, b, c;
- a = 8L;
- b = 3L;
+ a = $inline$long(8L);
+ b = $inline$long(3L);
c = a % b;
return c;
}
- /// CHECK-START: float Main.FloatRemainder() constant_folding (before)
+ /// CHECK-START: float Main.FloatRemainder() constant_folding_after_inlining (before)
/// CHECK-DAG: <<Const8:f\d+>> FloatConstant 8
/// CHECK-DAG: <<Const2P5:f\d+>> FloatConstant 2.5
/// CHECK-DAG: <<Rem:f\d+>> Rem [<<Const8>>,<<Const2P5>>]
/// CHECK-DAG: Return [<<Rem>>]
- /// CHECK-START: float Main.FloatRemainder() constant_folding (after)
+ /// CHECK-START: float Main.FloatRemainder() constant_folding_after_inlining (after)
/// CHECK-DAG: <<Const0P5:f\d+>> FloatConstant 0.5
/// CHECK-DAG: Return [<<Const0P5>>]
- /// CHECK-START: float Main.FloatRemainder() constant_folding (after)
+ /// CHECK-START: float Main.FloatRemainder() constant_folding_after_inlining (after)
/// CHECK-NOT: Rem
public static float FloatRemainder() {
float a, b, c;
- a = 8F;
- b = 2.5F;
+ a = $inline$float(8F);
+ b = $inline$float(2.5F);
c = a % b;
return c;
}
- /// CHECK-START: double Main.DoubleRemainder() constant_folding (before)
+ /// CHECK-START: double Main.DoubleRemainder() constant_folding_after_inlining (before)
/// CHECK-DAG: <<Const8:d\d+>> DoubleConstant 8
/// CHECK-DAG: <<Const2P5:d\d+>> DoubleConstant 2.5
/// CHECK-DAG: <<Rem:d\d+>> Rem [<<Const8>>,<<Const2P5>>]
/// CHECK-DAG: Return [<<Rem>>]
- /// CHECK-START: double Main.DoubleRemainder() constant_folding (after)
+ /// CHECK-START: double Main.DoubleRemainder() constant_folding_after_inlining (after)
/// CHECK-DAG: <<Const0P5:d\d+>> DoubleConstant 0.5
/// CHECK-DAG: Return [<<Const0P5>>]
- /// CHECK-START: double Main.DoubleRemainder() constant_folding (after)
+ /// CHECK-START: double Main.DoubleRemainder() constant_folding_after_inlining (after)
/// CHECK-NOT: Rem
public static double DoubleRemainder() {
double a, b, c;
- a = 8D;
- b = 2.5D;
+ a = $inline$double(8D);
+ b = $inline$double(2.5D);
c = a % b;
return c;
}
@@ -682,42 +695,42 @@
* Exercise constant folding on left shift.
*/
- /// CHECK-START: int Main.ShlIntLong() constant_folding (before)
+ /// CHECK-START: int Main.ShlIntLong() constant_folding_after_inlining (before)
/// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
/// CHECK-DAG: <<Const2L:j\d+>> LongConstant 2
/// CHECK-DAG: <<TypeConv:i\d+>> TypeConversion [<<Const2L>>]
/// CHECK-DAG: <<Shl:i\d+>> Shl [<<Const1>>,<<TypeConv>>]
/// CHECK-DAG: Return [<<Shl>>]
- /// CHECK-START: int Main.ShlIntLong() constant_folding (after)
+ /// CHECK-START: int Main.ShlIntLong() constant_folding_after_inlining (after)
/// CHECK-DAG: <<Const4:i\d+>> IntConstant 4
/// CHECK-DAG: Return [<<Const4>>]
- /// CHECK-START: int Main.ShlIntLong() constant_folding (after)
+ /// CHECK-START: int Main.ShlIntLong() constant_folding_after_inlining (after)
/// CHECK-NOT: Shl
public static int ShlIntLong() {
- int lhs = 1;
- long rhs = 2;
+ int lhs = $inline$int(1);
+ long rhs = $inline$long(2L);
return lhs << rhs;
}
- /// CHECK-START: long Main.ShlLongInt() constant_folding (before)
+ /// CHECK-START: long Main.ShlLongInt() constant_folding_after_inlining (before)
/// CHECK-DAG: <<Const3L:j\d+>> LongConstant 3
/// CHECK-DAG: <<Const2:i\d+>> IntConstant 2
/// CHECK-DAG: <<Shl:j\d+>> Shl [<<Const3L>>,<<Const2>>]
/// CHECK-DAG: Return [<<Shl>>]
- /// CHECK-START: long Main.ShlLongInt() constant_folding (after)
+ /// CHECK-START: long Main.ShlLongInt() constant_folding_after_inlining (after)
/// CHECK-DAG: <<Const12L:j\d+>> LongConstant 12
/// CHECK-DAG: Return [<<Const12L>>]
- /// CHECK-START: long Main.ShlLongInt() constant_folding (after)
+ /// CHECK-START: long Main.ShlLongInt() constant_folding_after_inlining (after)
/// CHECK-NOT: Shl
public static long ShlLongInt() {
- long lhs = 3;
- int rhs = 2;
+ long lhs = $inline$long(3L);
+ int rhs = $inline$int(2);
return lhs << rhs;
}
@@ -726,42 +739,42 @@
* Exercise constant folding on right shift.
*/
- /// CHECK-START: int Main.ShrIntLong() constant_folding (before)
+ /// CHECK-START: int Main.ShrIntLong() constant_folding_after_inlining (before)
/// CHECK-DAG: <<Const7:i\d+>> IntConstant 7
/// CHECK-DAG: <<Const2L:j\d+>> LongConstant 2
/// CHECK-DAG: <<TypeConv:i\d+>> TypeConversion [<<Const2L>>]
/// CHECK-DAG: <<Shr:i\d+>> Shr [<<Const7>>,<<TypeConv>>]
/// CHECK-DAG: Return [<<Shr>>]
- /// CHECK-START: int Main.ShrIntLong() constant_folding (after)
+ /// CHECK-START: int Main.ShrIntLong() constant_folding_after_inlining (after)
/// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
/// CHECK-DAG: Return [<<Const1>>]
- /// CHECK-START: int Main.ShrIntLong() constant_folding (after)
+ /// CHECK-START: int Main.ShrIntLong() constant_folding_after_inlining (after)
/// CHECK-NOT: Shr
public static int ShrIntLong() {
- int lhs = 7;
- long rhs = 2;
+ int lhs = $inline$int(7);
+ long rhs = $inline$long(2L);
return lhs >> rhs;
}
- /// CHECK-START: long Main.ShrLongInt() constant_folding (before)
+ /// CHECK-START: long Main.ShrLongInt() constant_folding_after_inlining (before)
/// CHECK-DAG: <<Const9L:j\d+>> LongConstant 9
/// CHECK-DAG: <<Const2:i\d+>> IntConstant 2
/// CHECK-DAG: <<Shr:j\d+>> Shr [<<Const9L>>,<<Const2>>]
/// CHECK-DAG: Return [<<Shr>>]
- /// CHECK-START: long Main.ShrLongInt() constant_folding (after)
+ /// CHECK-START: long Main.ShrLongInt() constant_folding_after_inlining (after)
/// CHECK-DAG: <<Const2L:j\d+>> LongConstant 2
/// CHECK-DAG: Return [<<Const2L>>]
- /// CHECK-START: long Main.ShrLongInt() constant_folding (after)
+ /// CHECK-START: long Main.ShrLongInt() constant_folding_after_inlining (after)
/// CHECK-NOT: Shr
public static long ShrLongInt() {
- long lhs = 9;
- int rhs = 2;
+ long lhs = $inline$long(9);
+ int rhs = $inline$int(2);
return lhs >> rhs;
}
@@ -770,42 +783,42 @@
* Exercise constant folding on unsigned right shift.
*/
- /// CHECK-START: int Main.UShrIntLong() constant_folding (before)
+ /// CHECK-START: int Main.UShrIntLong() constant_folding_after_inlining (before)
/// CHECK-DAG: <<ConstM7:i\d+>> IntConstant -7
/// CHECK-DAG: <<Const2L:j\d+>> LongConstant 2
/// CHECK-DAG: <<TypeConv:i\d+>> TypeConversion [<<Const2L>>]
/// CHECK-DAG: <<UShr:i\d+>> UShr [<<ConstM7>>,<<TypeConv>>]
/// CHECK-DAG: Return [<<UShr>>]
- /// CHECK-START: int Main.UShrIntLong() constant_folding (after)
+ /// CHECK-START: int Main.UShrIntLong() constant_folding_after_inlining (after)
/// CHECK-DAG: <<ConstRes:i\d+>> IntConstant 1073741822
/// CHECK-DAG: Return [<<ConstRes>>]
- /// CHECK-START: int Main.UShrIntLong() constant_folding (after)
+ /// CHECK-START: int Main.UShrIntLong() constant_folding_after_inlining (after)
/// CHECK-NOT: UShr
public static int UShrIntLong() {
- int lhs = -7;
- long rhs = 2;
+ int lhs = $inline$int(-7);
+ long rhs = $inline$long(2L);
return lhs >>> rhs;
}
- /// CHECK-START: long Main.UShrLongInt() constant_folding (before)
+ /// CHECK-START: long Main.UShrLongInt() constant_folding_after_inlining (before)
/// CHECK-DAG: <<ConstM9L:j\d+>> LongConstant -9
/// CHECK-DAG: <<Const2:i\d+>> IntConstant 2
/// CHECK-DAG: <<UShr:j\d+>> UShr [<<ConstM9L>>,<<Const2>>]
/// CHECK-DAG: Return [<<UShr>>]
- /// CHECK-START: long Main.UShrLongInt() constant_folding (after)
+ /// CHECK-START: long Main.UShrLongInt() constant_folding_after_inlining (after)
/// CHECK-DAG: <<ConstRes:j\d+>> LongConstant 4611686018427387901
/// CHECK-DAG: Return [<<ConstRes>>]
- /// CHECK-START: long Main.UShrLongInt() constant_folding (after)
+ /// CHECK-START: long Main.UShrLongInt() constant_folding_after_inlining (after)
/// CHECK-NOT: UShr
public static long UShrLongInt() {
- long lhs = -9;
- int rhs = 2;
+ long lhs = $inline$long(-9);
+ int rhs = $inline$int(2);
return lhs >>> rhs;
}
@@ -814,43 +827,43 @@
* Exercise constant folding on logical and.
*/
- /// CHECK-START: long Main.AndIntLong() constant_folding (before)
+ /// CHECK-START: long Main.AndIntLong() constant_folding_after_inlining (before)
/// CHECK-DAG: <<Const10:i\d+>> IntConstant 10
/// CHECK-DAG: <<Const3L:j\d+>> LongConstant 3
/// CHECK-DAG: <<TypeConv:j\d+>> TypeConversion [<<Const10>>]
/// CHECK-DAG: <<And:j\d+>> And [<<TypeConv>>,<<Const3L>>]
/// CHECK-DAG: Return [<<And>>]
- /// CHECK-START: long Main.AndIntLong() constant_folding (after)
+ /// CHECK-START: long Main.AndIntLong() constant_folding_after_inlining (after)
/// CHECK-DAG: <<Const2:j\d+>> LongConstant 2
/// CHECK-DAG: Return [<<Const2>>]
- /// CHECK-START: long Main.AndIntLong() constant_folding (after)
+ /// CHECK-START: long Main.AndIntLong() constant_folding_after_inlining (after)
/// CHECK-NOT: And
public static long AndIntLong() {
- int lhs = 10;
- long rhs = 3;
+ int lhs = $inline$int(10);
+ long rhs = $inline$long(3L);
return lhs & rhs;
}
- /// CHECK-START: long Main.AndLongInt() constant_folding (before)
+ /// CHECK-START: long Main.AndLongInt() constant_folding_after_inlining (before)
/// CHECK-DAG: <<Const10L:j\d+>> LongConstant 10
/// CHECK-DAG: <<Const3:i\d+>> IntConstant 3
/// CHECK-DAG: <<TypeConv:j\d+>> TypeConversion [<<Const3>>]
/// CHECK-DAG: <<And:j\d+>> And [<<TypeConv>>,<<Const10L>>]
/// CHECK-DAG: Return [<<And>>]
- /// CHECK-START: long Main.AndLongInt() constant_folding (after)
+ /// CHECK-START: long Main.AndLongInt() constant_folding_after_inlining (after)
/// CHECK-DAG: <<Const2:j\d+>> LongConstant 2
/// CHECK-DAG: Return [<<Const2>>]
- /// CHECK-START: long Main.AndLongInt() constant_folding (after)
+ /// CHECK-START: long Main.AndLongInt() constant_folding_after_inlining (after)
/// CHECK-NOT: And
public static long AndLongInt() {
- long lhs = 10;
- int rhs = 3;
+ long lhs = $inline$long(10L);
+ int rhs = $inline$int(3);
return lhs & rhs;
}
@@ -859,43 +872,43 @@
* Exercise constant folding on logical or.
*/
- /// CHECK-START: long Main.OrIntLong() constant_folding (before)
+ /// CHECK-START: long Main.OrIntLong() constant_folding_after_inlining (before)
/// CHECK-DAG: <<Const10:i\d+>> IntConstant 10
/// CHECK-DAG: <<Const3L:j\d+>> LongConstant 3
/// CHECK-DAG: <<TypeConv:j\d+>> TypeConversion [<<Const10>>]
/// CHECK-DAG: <<Or:j\d+>> Or [<<TypeConv>>,<<Const3L>>]
/// CHECK-DAG: Return [<<Or>>]
- /// CHECK-START: long Main.OrIntLong() constant_folding (after)
+ /// CHECK-START: long Main.OrIntLong() constant_folding_after_inlining (after)
/// CHECK-DAG: <<Const11:j\d+>> LongConstant 11
/// CHECK-DAG: Return [<<Const11>>]
- /// CHECK-START: long Main.OrIntLong() constant_folding (after)
+ /// CHECK-START: long Main.OrIntLong() constant_folding_after_inlining (after)
/// CHECK-NOT: Or
public static long OrIntLong() {
- int lhs = 10;
- long rhs = 3;
+ int lhs = $inline$int(10);
+ long rhs = $inline$long(3L);
return lhs | rhs;
}
- /// CHECK-START: long Main.OrLongInt() constant_folding (before)
+ /// CHECK-START: long Main.OrLongInt() constant_folding_after_inlining (before)
/// CHECK-DAG: <<Const10L:j\d+>> LongConstant 10
/// CHECK-DAG: <<Const3:i\d+>> IntConstant 3
/// CHECK-DAG: <<TypeConv:j\d+>> TypeConversion [<<Const3>>]
/// CHECK-DAG: <<Or:j\d+>> Or [<<TypeConv>>,<<Const10L>>]
/// CHECK-DAG: Return [<<Or>>]
- /// CHECK-START: long Main.OrLongInt() constant_folding (after)
+ /// CHECK-START: long Main.OrLongInt() constant_folding_after_inlining (after)
/// CHECK-DAG: <<Const11:j\d+>> LongConstant 11
/// CHECK-DAG: Return [<<Const11>>]
- /// CHECK-START: long Main.OrLongInt() constant_folding (after)
+ /// CHECK-START: long Main.OrLongInt() constant_folding_after_inlining (after)
/// CHECK-NOT: Or
public static long OrLongInt() {
- long lhs = 10;
- int rhs = 3;
+ long lhs = $inline$long(10L);
+ int rhs = $inline$int(3);
return lhs | rhs;
}
@@ -904,43 +917,43 @@
* Exercise constant folding on logical exclusive or.
*/
- /// CHECK-START: long Main.XorIntLong() constant_folding (before)
+ /// CHECK-START: long Main.XorIntLong() constant_folding_after_inlining (before)
/// CHECK-DAG: <<Const10:i\d+>> IntConstant 10
/// CHECK-DAG: <<Const3L:j\d+>> LongConstant 3
/// CHECK-DAG: <<TypeConv:j\d+>> TypeConversion [<<Const10>>]
/// CHECK-DAG: <<Xor:j\d+>> Xor [<<TypeConv>>,<<Const3L>>]
/// CHECK-DAG: Return [<<Xor>>]
- /// CHECK-START: long Main.XorIntLong() constant_folding (after)
+ /// CHECK-START: long Main.XorIntLong() constant_folding_after_inlining (after)
/// CHECK-DAG: <<Const9:j\d+>> LongConstant 9
/// CHECK-DAG: Return [<<Const9>>]
- /// CHECK-START: long Main.XorIntLong() constant_folding (after)
+ /// CHECK-START: long Main.XorIntLong() constant_folding_after_inlining (after)
/// CHECK-NOT: Xor
public static long XorIntLong() {
- int lhs = 10;
- long rhs = 3;
+ int lhs = $inline$int(10);
+ long rhs = $inline$long(3L);
return lhs ^ rhs;
}
- /// CHECK-START: long Main.XorLongInt() constant_folding (before)
+ /// CHECK-START: long Main.XorLongInt() constant_folding_after_inlining (before)
/// CHECK-DAG: <<Const10L:j\d+>> LongConstant 10
/// CHECK-DAG: <<Const3:i\d+>> IntConstant 3
/// CHECK-DAG: <<TypeConv:j\d+>> TypeConversion [<<Const3>>]
/// CHECK-DAG: <<Xor:j\d+>> Xor [<<TypeConv>>,<<Const10L>>]
/// CHECK-DAG: Return [<<Xor>>]
- /// CHECK-START: long Main.XorLongInt() constant_folding (after)
+ /// CHECK-START: long Main.XorLongInt() constant_folding_after_inlining (after)
/// CHECK-DAG: <<Const9:j\d+>> LongConstant 9
/// CHECK-DAG: Return [<<Const9>>]
- /// CHECK-START: long Main.XorLongInt() constant_folding (after)
+ /// CHECK-START: long Main.XorLongInt() constant_folding_after_inlining (after)
/// CHECK-NOT: Xor
public static long XorLongInt() {
- long lhs = 10;
- int rhs = 3;
+ long lhs = $inline$long(10L);
+ int rhs = $inline$int(3);
return lhs ^ rhs;
}
@@ -949,23 +962,23 @@
* Exercise constant folding on constant (static) condition.
*/
- /// CHECK-START: int Main.StaticCondition() constant_folding (before)
+ /// CHECK-START: int Main.StaticCondition() constant_folding_after_inlining (before)
/// CHECK-DAG: <<Const7:i\d+>> IntConstant 7
/// CHECK-DAG: <<Const2:i\d+>> IntConstant 2
/// CHECK-DAG: <<Cond:z\d+>> GreaterThanOrEqual [<<Const7>>,<<Const2>>]
- /// CHECK-DAG: If [<<Cond>>]
+ /// CHECK-DAG: Select [{{i\d+}},{{i\d+}},<<Cond>>]
- /// CHECK-START: int Main.StaticCondition() constant_folding (after)
+ /// CHECK-START: int Main.StaticCondition() constant_folding_after_inlining (after)
/// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
- /// CHECK-DAG: If [<<Const1>>]
+ /// CHECK-DAG: Select [{{i\d+}},{{i\d+}},<<Const1>>]
- /// CHECK-START: int Main.StaticCondition() constant_folding (after)
+ /// CHECK-START: int Main.StaticCondition() constant_folding_after_inlining (after)
/// CHECK-NOT: GreaterThanOrEqual
public static int StaticCondition() {
int a, b, c;
- a = 7;
- b = 2;
+ a = $inline$int(7);
+ b = $inline$int(2);
if (a < b)
c = a + b;
else
@@ -1010,28 +1023,30 @@
* (forward) post-order traversal of the the dominator tree.
*/
- /// CHECK-START: int Main.JumpsAndConditionals(boolean) constant_folding (before)
+ /// CHECK-START: int Main.JumpsAndConditionals(boolean) constant_folding_after_inlining (before)
+ /// CHECK-DAG: <<Cond:z\d+>> ParameterValue
/// CHECK-DAG: <<Const2:i\d+>> IntConstant 2
/// CHECK-DAG: <<Const5:i\d+>> IntConstant 5
/// CHECK-DAG: <<Add:i\d+>> Add [<<Const5>>,<<Const2>>]
/// CHECK-DAG: <<Sub:i\d+>> Sub [<<Const5>>,<<Const2>>]
- /// CHECK-DAG: <<Phi:i\d+>> Phi [<<Add>>,<<Sub>>]
+ /// CHECK-DAG: <<Phi:i\d+>> Select [<<Sub>>,<<Add>>,<<Cond>>]
/// CHECK-DAG: Return [<<Phi>>]
- /// CHECK-START: int Main.JumpsAndConditionals(boolean) constant_folding (after)
+ /// CHECK-START: int Main.JumpsAndConditionals(boolean) constant_folding_after_inlining (after)
+ /// CHECK-DAG: <<Cond:z\d+>> ParameterValue
/// CHECK-DAG: <<Const3:i\d+>> IntConstant 3
/// CHECK-DAG: <<Const7:i\d+>> IntConstant 7
- /// CHECK-DAG: <<Phi:i\d+>> Phi [<<Const7>>,<<Const3>>]
+ /// CHECK-DAG: <<Phi:i\d+>> Select [<<Const3>>,<<Const7>>,<<Cond>>]
/// CHECK-DAG: Return [<<Phi>>]
- /// CHECK-START: int Main.JumpsAndConditionals(boolean) constant_folding (after)
+ /// CHECK-START: int Main.JumpsAndConditionals(boolean) constant_folding_after_inlining (after)
/// CHECK-NOT: Add
/// CHECK-NOT: Sub
public static int JumpsAndConditionals(boolean cond) {
int a, b, c;
- a = 5;
- b = 2;
+ a = $inline$int(5);
+ b = $inline$int(2);
if (cond)
c = a + b;
else
@@ -1310,204 +1325,204 @@
* Exercise constant folding on type conversions.
*/
- /// CHECK-START: int Main.ReturnInt33() constant_folding (before)
+ /// CHECK-START: int Main.ReturnInt33() constant_folding_after_inlining (before)
/// CHECK-DAG: <<Const33:j\d+>> LongConstant 33
/// CHECK-DAG: <<Convert:i\d+>> TypeConversion [<<Const33>>]
/// CHECK-DAG: Return [<<Convert>>]
- /// CHECK-START: int Main.ReturnInt33() constant_folding (after)
+ /// CHECK-START: int Main.ReturnInt33() constant_folding_after_inlining (after)
/// CHECK-DAG: <<Const33:i\d+>> IntConstant 33
/// CHECK-DAG: Return [<<Const33>>]
- /// CHECK-START: int Main.ReturnInt33() constant_folding (after)
+ /// CHECK-START: int Main.ReturnInt33() constant_folding_after_inlining (after)
/// CHECK-NOT: TypeConversion
public static int ReturnInt33() {
- long imm = 33L;
+ long imm = $inline$long(33L);
return (int) imm;
}
- /// CHECK-START: int Main.ReturnIntMax() constant_folding (before)
+ /// CHECK-START: int Main.ReturnIntMax() constant_folding_after_inlining (before)
/// CHECK-DAG: <<ConstMax:f\d+>> FloatConstant 1e+34
/// CHECK-DAG: <<Convert:i\d+>> TypeConversion [<<ConstMax>>]
/// CHECK-DAG: Return [<<Convert>>]
- /// CHECK-START: int Main.ReturnIntMax() constant_folding (after)
+ /// CHECK-START: int Main.ReturnIntMax() constant_folding_after_inlining (after)
/// CHECK-DAG: <<ConstMax:i\d+>> IntConstant 2147483647
/// CHECK-DAG: Return [<<ConstMax>>]
- /// CHECK-START: int Main.ReturnIntMax() constant_folding (after)
+ /// CHECK-START: int Main.ReturnIntMax() constant_folding_after_inlining (after)
/// CHECK-NOT: TypeConversion
public static int ReturnIntMax() {
- float imm = 1.0e34f;
+ float imm = $inline$float(1.0e34f);
return (int) imm;
}
- /// CHECK-START: int Main.ReturnInt0() constant_folding (before)
+ /// CHECK-START: int Main.ReturnInt0() constant_folding_after_inlining (before)
/// CHECK-DAG: <<ConstNaN:d\d+>> DoubleConstant nan
/// CHECK-DAG: <<Convert:i\d+>> TypeConversion [<<ConstNaN>>]
/// CHECK-DAG: Return [<<Convert>>]
- /// CHECK-START: int Main.ReturnInt0() constant_folding (after)
+ /// CHECK-START: int Main.ReturnInt0() constant_folding_after_inlining (after)
/// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
/// CHECK-DAG: Return [<<Const0>>]
- /// CHECK-START: int Main.ReturnInt0() constant_folding (after)
+ /// CHECK-START: int Main.ReturnInt0() constant_folding_after_inlining (after)
/// CHECK-NOT: TypeConversion
public static int ReturnInt0() {
- double imm = Double.NaN;
+ double imm = $inline$double(Double.NaN);
return (int) imm;
}
- /// CHECK-START: long Main.ReturnLong33() constant_folding (before)
+ /// CHECK-START: long Main.ReturnLong33() constant_folding_after_inlining (before)
/// CHECK-DAG: <<Const33:i\d+>> IntConstant 33
/// CHECK-DAG: <<Convert:j\d+>> TypeConversion [<<Const33>>]
/// CHECK-DAG: Return [<<Convert>>]
- /// CHECK-START: long Main.ReturnLong33() constant_folding (after)
+ /// CHECK-START: long Main.ReturnLong33() constant_folding_after_inlining (after)
/// CHECK-DAG: <<Const33:j\d+>> LongConstant 33
/// CHECK-DAG: Return [<<Const33>>]
- /// CHECK-START: long Main.ReturnLong33() constant_folding (after)
+ /// CHECK-START: long Main.ReturnLong33() constant_folding_after_inlining (after)
/// CHECK-NOT: TypeConversion
public static long ReturnLong33() {
- int imm = 33;
+ int imm = $inline$int(33);
return (long) imm;
}
- /// CHECK-START: long Main.ReturnLong34() constant_folding (before)
+ /// CHECK-START: long Main.ReturnLong34() constant_folding_after_inlining (before)
/// CHECK-DAG: <<Const34:f\d+>> FloatConstant 34
/// CHECK-DAG: <<Convert:j\d+>> TypeConversion [<<Const34>>]
/// CHECK-DAG: Return [<<Convert>>]
- /// CHECK-START: long Main.ReturnLong34() constant_folding (after)
+ /// CHECK-START: long Main.ReturnLong34() constant_folding_after_inlining (after)
/// CHECK-DAG: <<Const34:j\d+>> LongConstant 34
/// CHECK-DAG: Return [<<Const34>>]
- /// CHECK-START: long Main.ReturnLong34() constant_folding (after)
+ /// CHECK-START: long Main.ReturnLong34() constant_folding_after_inlining (after)
/// CHECK-NOT: TypeConversion
public static long ReturnLong34() {
- float imm = 34.0f;
+ float imm = $inline$float(34.0f);
return (long) imm;
}
- /// CHECK-START: long Main.ReturnLong0() constant_folding (before)
+ /// CHECK-START: long Main.ReturnLong0() constant_folding_after_inlining (before)
/// CHECK-DAG: <<ConstNaN:d\d+>> DoubleConstant nan
/// CHECK-DAG: <<Convert:j\d+>> TypeConversion [<<ConstNaN>>]
/// CHECK-DAG: Return [<<Convert>>]
- /// CHECK-START: long Main.ReturnLong0() constant_folding (after)
+ /// CHECK-START: long Main.ReturnLong0() constant_folding_after_inlining (after)
/// CHECK-DAG: <<Const0:j\d+>> LongConstant 0
/// CHECK-DAG: Return [<<Const0>>]
- /// CHECK-START: long Main.ReturnLong0() constant_folding (after)
+ /// CHECK-START: long Main.ReturnLong0() constant_folding_after_inlining (after)
/// CHECK-NOT: TypeConversion
public static long ReturnLong0() {
- double imm = -Double.NaN;
+ double imm = $inline$double(-Double.NaN);
return (long) imm;
}
- /// CHECK-START: float Main.ReturnFloat33() constant_folding (before)
+ /// CHECK-START: float Main.ReturnFloat33() constant_folding_after_inlining (before)
/// CHECK-DAG: <<Const33:i\d+>> IntConstant 33
/// CHECK-DAG: <<Convert:f\d+>> TypeConversion [<<Const33>>]
/// CHECK-DAG: Return [<<Convert>>]
- /// CHECK-START: float Main.ReturnFloat33() constant_folding (after)
+ /// CHECK-START: float Main.ReturnFloat33() constant_folding_after_inlining (after)
/// CHECK-DAG: <<Const33:f\d+>> FloatConstant 33
/// CHECK-DAG: Return [<<Const33>>]
- /// CHECK-START: float Main.ReturnFloat33() constant_folding (after)
+ /// CHECK-START: float Main.ReturnFloat33() constant_folding_after_inlining (after)
/// CHECK-NOT: TypeConversion
public static float ReturnFloat33() {
- int imm = 33;
+ int imm = $inline$int(33);
return (float) imm;
}
- /// CHECK-START: float Main.ReturnFloat34() constant_folding (before)
+ /// CHECK-START: float Main.ReturnFloat34() constant_folding_after_inlining (before)
/// CHECK-DAG: <<Const34:j\d+>> LongConstant 34
/// CHECK-DAG: <<Convert:f\d+>> TypeConversion [<<Const34>>]
/// CHECK-DAG: Return [<<Convert>>]
- /// CHECK-START: float Main.ReturnFloat34() constant_folding (after)
+ /// CHECK-START: float Main.ReturnFloat34() constant_folding_after_inlining (after)
/// CHECK-DAG: <<Const34:f\d+>> FloatConstant 34
/// CHECK-DAG: Return [<<Const34>>]
- /// CHECK-START: float Main.ReturnFloat34() constant_folding (after)
+ /// CHECK-START: float Main.ReturnFloat34() constant_folding_after_inlining (after)
/// CHECK-NOT: TypeConversion
public static float ReturnFloat34() {
- long imm = 34L;
+ long imm = $inline$long(34L);
return (float) imm;
}
- /// CHECK-START: float Main.ReturnFloat99P25() constant_folding (before)
+ /// CHECK-START: float Main.ReturnFloat99P25() constant_folding_after_inlining (before)
/// CHECK-DAG: <<Const:d\d+>> DoubleConstant 99.25
/// CHECK-DAG: <<Convert:f\d+>> TypeConversion [<<Const>>]
/// CHECK-DAG: Return [<<Convert>>]
- /// CHECK-START: float Main.ReturnFloat99P25() constant_folding (after)
+ /// CHECK-START: float Main.ReturnFloat99P25() constant_folding_after_inlining (after)
/// CHECK-DAG: <<Const:f\d+>> FloatConstant 99.25
/// CHECK-DAG: Return [<<Const>>]
- /// CHECK-START: float Main.ReturnFloat99P25() constant_folding (after)
+ /// CHECK-START: float Main.ReturnFloat99P25() constant_folding_after_inlining (after)
/// CHECK-NOT: TypeConversion
public static float ReturnFloat99P25() {
- double imm = 99.25;
+ double imm = $inline$double(99.25);
return (float) imm;
}
- /// CHECK-START: double Main.ReturnDouble33() constant_folding (before)
+ /// CHECK-START: double Main.ReturnDouble33() constant_folding_after_inlining (before)
/// CHECK-DAG: <<Const33:i\d+>> IntConstant 33
/// CHECK-DAG: <<Convert:d\d+>> TypeConversion [<<Const33>>]
/// CHECK-DAG: Return [<<Convert>>]
- /// CHECK-START: double Main.ReturnDouble33() constant_folding (after)
+ /// CHECK-START: double Main.ReturnDouble33() constant_folding_after_inlining (after)
/// CHECK-DAG: <<Const33:d\d+>> DoubleConstant 33
/// CHECK-DAG: Return [<<Const33>>]
public static double ReturnDouble33() {
- int imm = 33;
+ int imm = $inline$int(33);
return (double) imm;
}
- /// CHECK-START: double Main.ReturnDouble34() constant_folding (before)
+ /// CHECK-START: double Main.ReturnDouble34() constant_folding_after_inlining (before)
/// CHECK-DAG: <<Const34:j\d+>> LongConstant 34
/// CHECK-DAG: <<Convert:d\d+>> TypeConversion [<<Const34>>]
/// CHECK-DAG: Return [<<Convert>>]
- /// CHECK-START: double Main.ReturnDouble34() constant_folding (after)
+ /// CHECK-START: double Main.ReturnDouble34() constant_folding_after_inlining (after)
/// CHECK-DAG: <<Const34:d\d+>> DoubleConstant 34
/// CHECK-DAG: Return [<<Const34>>]
- /// CHECK-START: double Main.ReturnDouble34() constant_folding (after)
+ /// CHECK-START: double Main.ReturnDouble34() constant_folding_after_inlining (after)
/// CHECK-NOT: TypeConversion
public static double ReturnDouble34() {
- long imm = 34L;
+ long imm = $inline$long(34L);
return (double) imm;
}
- /// CHECK-START: double Main.ReturnDouble99P25() constant_folding (before)
+ /// CHECK-START: double Main.ReturnDouble99P25() constant_folding_after_inlining (before)
/// CHECK-DAG: <<Const:f\d+>> FloatConstant 99.25
/// CHECK-DAG: <<Convert:d\d+>> TypeConversion [<<Const>>]
/// CHECK-DAG: Return [<<Convert>>]
- /// CHECK-START: double Main.ReturnDouble99P25() constant_folding (after)
+ /// CHECK-START: double Main.ReturnDouble99P25() constant_folding_after_inlining (after)
/// CHECK-DAG: <<Const:d\d+>> DoubleConstant 99.25
/// CHECK-DAG: Return [<<Const>>]
- /// CHECK-START: double Main.ReturnDouble99P25() constant_folding (after)
+ /// CHECK-START: double Main.ReturnDouble99P25() constant_folding_after_inlining (after)
/// CHECK-NOT: TypeConversion
public static double ReturnDouble99P25() {
- float imm = 99.25f;
+ float imm = $inline$float(99.25f);
return (double) imm;
}
diff --git a/test/450-checker-types/smali/SmaliTests.smali b/test/450-checker-types/smali/SmaliTests.smali
new file mode 100644
index 0000000..6a3122e
--- /dev/null
+++ b/test/450-checker-types/smali/SmaliTests.smali
@@ -0,0 +1,120 @@
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LSmaliTests;
+.super Ljava/lang/Object;
+
+## CHECK-START: void SmaliTests.testInstanceOf_EQ0_NotInlined(java.lang.Object) builder (after)
+## CHECK-DAG: <<Cst0:i\d+>> IntConstant 0
+## CHECK-DAG: <<IOf:z\d+>> InstanceOf
+## CHECK-DAG: Equal [<<IOf>>,<<Cst0>>]
+
+## CHECK-START: void SmaliTests.testInstanceOf_EQ0_NotInlined(java.lang.Object) instruction_simplifier (before)
+## CHECK: CheckCast
+
+## CHECK-START: void SmaliTests.testInstanceOf_EQ0_NotInlined(java.lang.Object) instruction_simplifier (after)
+## CHECK-NOT: CheckCast
+
+.method public static testInstanceOf_EQ0_NotInlined(Ljava/lang/Object;)V
+ .registers 3
+
+ const v0, 0x0
+ instance-of v1, p0, LSubclassC;
+ if-eq v1, v0, :return
+
+ check-cast p0, LSubclassC;
+ invoke-virtual {p0}, LSubclassC;->$noinline$g()V
+
+ :return
+ return-void
+
+.end method
+
+## CHECK-START: void SmaliTests.testInstanceOf_EQ1_NotInlined(java.lang.Object) builder (after)
+## CHECK-DAG: <<Cst1:i\d+>> IntConstant 1
+## CHECK-DAG: <<IOf:z\d+>> InstanceOf
+## CHECK-DAG: Equal [<<IOf>>,<<Cst1>>]
+
+## CHECK-START: void SmaliTests.testInstanceOf_EQ1_NotInlined(java.lang.Object) instruction_simplifier (before)
+## CHECK: CheckCast
+
+## CHECK-START: void SmaliTests.testInstanceOf_EQ1_NotInlined(java.lang.Object) instruction_simplifier (after)
+## CHECK-NOT: CheckCast
+
+.method public static testInstanceOf_EQ1_NotInlined(Ljava/lang/Object;)V
+ .registers 3
+
+ const v0, 0x1
+ instance-of v1, p0, LSubclassC;
+ if-eq v1, v0, :invoke
+ return-void
+
+ :invoke
+ check-cast p0, LSubclassC;
+ invoke-virtual {p0}, LSubclassC;->$noinline$g()V
+ return-void
+
+.end method
+
+## CHECK-START: void SmaliTests.testInstanceOf_NE0_NotInlined(java.lang.Object) builder (after)
+## CHECK-DAG: <<Cst0:i\d+>> IntConstant 0
+## CHECK-DAG: <<IOf:z\d+>> InstanceOf
+## CHECK-DAG: NotEqual [<<IOf>>,<<Cst0>>]
+
+## CHECK-START: void SmaliTests.testInstanceOf_NE0_NotInlined(java.lang.Object) instruction_simplifier (before)
+## CHECK: CheckCast
+
+## CHECK-START: void SmaliTests.testInstanceOf_NE0_NotInlined(java.lang.Object) instruction_simplifier (after)
+## CHECK-NOT: CheckCast
+
+.method public static testInstanceOf_NE0_NotInlined(Ljava/lang/Object;)V
+ .registers 3
+
+ const v0, 0x0
+ instance-of v1, p0, LSubclassC;
+ if-ne v1, v0, :invoke
+ return-void
+
+ :invoke
+ check-cast p0, LSubclassC;
+ invoke-virtual {p0}, LSubclassC;->$noinline$g()V
+ return-void
+
+.end method
+
+## CHECK-START: void SmaliTests.testInstanceOf_NE1_NotInlined(java.lang.Object) builder (after)
+## CHECK-DAG: <<Cst1:i\d+>> IntConstant 1
+## CHECK-DAG: <<IOf:z\d+>> InstanceOf
+## CHECK-DAG: NotEqual [<<IOf>>,<<Cst1>>]
+
+## CHECK-START: void SmaliTests.testInstanceOf_NE1_NotInlined(java.lang.Object) instruction_simplifier (before)
+## CHECK: CheckCast
+
+## CHECK-START: void SmaliTests.testInstanceOf_NE1_NotInlined(java.lang.Object) instruction_simplifier (after)
+## CHECK-NOT: CheckCast
+
+.method public static testInstanceOf_NE1_NotInlined(Ljava/lang/Object;)V
+ .registers 3
+
+ const v0, 0x1
+ instance-of v1, p0, LSubclassC;
+ if-ne v1, v0, :return
+
+ check-cast p0, LSubclassC;
+ invoke-virtual {p0}, LSubclassC;->$noinline$g()V
+
+ :return
+ return-void
+
+.end method
diff --git a/test/450-checker-types/src/Main.java b/test/450-checker-types/src/Main.java
index 027a9d9..08b6cec 100644
--- a/test/450-checker-types/src/Main.java
+++ b/test/450-checker-types/src/Main.java
@@ -205,58 +205,6 @@
public static boolean $inline$InstanceofSubclassB(Object o) { return o instanceof SubclassB; }
public static boolean $inline$InstanceofSubclassC(Object o) { return o instanceof SubclassC; }
- /// CHECK-START: void Main.testInstanceOf_NotInlined(java.lang.Object) builder (after)
- /// CHECK-DAG: <<Cst0:i\d+>> IntConstant 0
- /// CHECK-DAG: <<Cst1:i\d+>> IntConstant 1
- /// CHECK-DAG: <<IOf1:z\d+>> InstanceOf
- /// CHECK-DAG: NotEqual [<<IOf1>>,<<Cst1>>]
- /// CHECK-DAG: <<IOf2:z\d+>> InstanceOf
- /// CHECK-DAG: Equal [<<IOf2>>,<<Cst0>>]
-
- /// CHECK-START: void Main.testInstanceOf_NotInlined(java.lang.Object) instruction_simplifier (before)
- /// CHECK: CheckCast
- /// CHECK: CheckCast
- /// CHECK-NOT: CheckCast
-
- /// CHECK-START: void Main.testInstanceOf_NotInlined(java.lang.Object) instruction_simplifier (after)
- /// CHECK-NOT: CheckCast
- public void testInstanceOf_NotInlined(Object o) {
- if ((o instanceof SubclassC) == true) {
- ((SubclassC)o).$noinline$g();
- }
- if ((o instanceof SubclassB) != false) {
- ((SubclassB)o).$noinline$g();
- }
- }
-
- /// CHECK-START: void Main.testNotInstanceOf_NotInlined(java.lang.Object) builder (after)
- /// CHECK-DAG: <<Cst0:i\d+>> IntConstant 0
- /// CHECK-DAG: <<Cst1:i\d+>> IntConstant 1
- /// CHECK-DAG: <<IOf1:z\d+>> InstanceOf
- /// CHECK-DAG: Equal [<<IOf1>>,<<Cst1>>]
- /// CHECK-DAG: <<IOf2:z\d+>> InstanceOf
- /// CHECK-DAG: NotEqual [<<IOf2>>,<<Cst0>>]
-
- /// CHECK-START: void Main.testNotInstanceOf_NotInlined(java.lang.Object) instruction_simplifier (before)
- /// CHECK: CheckCast
- /// CHECK: CheckCast
- /// CHECK-NOT: CheckCast
-
- /// CHECK-START: void Main.testNotInstanceOf_NotInlined(java.lang.Object) instruction_simplifier (after)
- /// CHECK-NOT: CheckCast
- public void testNotInstanceOf_NotInlined(Object o) {
- if ((o instanceof SubclassC) != true) {
- // Empty branch to flip the condition.
- } else {
- ((SubclassC)o).$noinline$g();
- }
- if ((o instanceof SubclassB) == false) {
- // Empty branch to flip the condition.
- } else {
- ((SubclassB)o).$noinline$g();
- }
- }
-
/// CHECK-START: void Main.testInstanceOf_Inlined(java.lang.Object) inliner (after)
/// CHECK-DAG: <<IOf:z\d+>> InstanceOf
/// CHECK-DAG: If [<<IOf>>]
diff --git a/test/454-get-vreg/build b/test/454-get-vreg/build
deleted file mode 100644
index 08987b5..0000000
--- a/test/454-get-vreg/build
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Stop if something fails.
-set -e
-
-# The test relies on DEX file produced by javac+dx so keep building with them for now
-# (see b/19467889)
-mkdir classes
-${JAVAC} -d classes `find src -name '*.java'`
-${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex \
- --dump-width=1000 ${DX_FLAGS} classes
-zip $TEST_NAME.jar classes.dex
diff --git a/test/458-checker-instruction-simplification/smali/SmaliTests.smali b/test/458-checker-instruction-simplification/smali/SmaliTests.smali
new file mode 100644
index 0000000..ede599b
--- /dev/null
+++ b/test/458-checker-instruction-simplification/smali/SmaliTests.smali
@@ -0,0 +1,193 @@
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LSmaliTests;
+.super Ljava/lang/Object;
+
+## CHECK-START: int SmaliTests.EqualTrueRhs(boolean) instruction_simplifier (before)
+## CHECK-DAG: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+## CHECK-DAG: <<Cond:z\d+>> Equal [<<Arg>>,<<Const1>>]
+## CHECK-DAG: If [<<Cond>>]
+
+## CHECK-START: int SmaliTests.EqualTrueRhs(boolean) instruction_simplifier (after)
+## CHECK-DAG: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: If [<<Arg>>]
+
+.method public static EqualTrueRhs(Z)I
+ .registers 3
+
+ const v0, 0x1
+ const v1, 0x5
+ if-eq p0, v0, :return
+ const v1, 0x3
+ :return
+ return v1
+
+.end method
+
+## CHECK-START: int SmaliTests.EqualTrueLhs(boolean) instruction_simplifier (before)
+## CHECK-DAG: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+## CHECK-DAG: <<Cond:z\d+>> Equal [<<Const1>>,<<Arg>>]
+## CHECK-DAG: If [<<Cond>>]
+
+## CHECK-START: int SmaliTests.EqualTrueLhs(boolean) instruction_simplifier (after)
+## CHECK-DAG: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: If [<<Arg>>]
+
+.method public static EqualTrueLhs(Z)I
+ .registers 3
+
+ const v0, 0x1
+ const v1, 0x5
+ if-eq v0, p0, :return
+ const v1, 0x3
+ :return
+ return v1
+
+.end method
+
+## CHECK-START: int SmaliTests.EqualFalseRhs(boolean) instruction_simplifier (before)
+## CHECK-DAG: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+## CHECK-DAG: <<Cond:z\d+>> Equal [<<Arg>>,<<Const0>>]
+## CHECK-DAG: If [<<Cond>>]
+
+## CHECK-START: int SmaliTests.EqualFalseRhs(boolean) instruction_simplifier (after)
+## CHECK-DAG: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: If [<<Arg>>]
+
+.method public static EqualFalseRhs(Z)I
+ .registers 3
+
+ const v0, 0x0
+ const v1, 0x3
+ if-eq p0, v0, :return
+ const v1, 0x5
+ :return
+ return v1
+
+.end method
+
+## CHECK-START: int SmaliTests.EqualFalseLhs(boolean) instruction_simplifier (before)
+## CHECK-DAG: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+## CHECK-DAG: <<Cond:z\d+>> Equal [<<Const0>>,<<Arg>>]
+## CHECK-DAG: If [<<Cond>>]
+
+## CHECK-START: int SmaliTests.EqualFalseLhs(boolean) instruction_simplifier (after)
+## CHECK-DAG: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: If [<<Arg>>]
+
+.method public static EqualFalseLhs(Z)I
+ .registers 3
+
+ const v0, 0x0
+ const v1, 0x3
+ if-eq v0, p0, :return
+ const v1, 0x5
+ :return
+ return v1
+
+.end method
+
+## CHECK-START: int SmaliTests.NotEqualTrueRhs(boolean) instruction_simplifier (before)
+## CHECK-DAG: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+## CHECK-DAG: <<Cond:z\d+>> NotEqual [<<Arg>>,<<Const1>>]
+## CHECK-DAG: If [<<Cond>>]
+
+## CHECK-START: int SmaliTests.NotEqualTrueRhs(boolean) instruction_simplifier (after)
+## CHECK-DAG: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: If [<<Arg>>]
+
+.method public static NotEqualTrueRhs(Z)I
+ .registers 3
+
+ const v0, 0x1
+ const v1, 0x3
+ if-ne p0, v0, :return
+ const v1, 0x5
+ :return
+ return v1
+
+.end method
+
+## CHECK-START: int SmaliTests.NotEqualTrueLhs(boolean) instruction_simplifier (before)
+## CHECK-DAG: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+## CHECK-DAG: <<Cond:z\d+>> NotEqual [<<Const1>>,<<Arg>>]
+## CHECK-DAG: If [<<Cond>>]
+
+## CHECK-START: int SmaliTests.NotEqualTrueLhs(boolean) instruction_simplifier (after)
+## CHECK-DAG: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: If [<<Arg>>]
+
+.method public static NotEqualTrueLhs(Z)I
+ .registers 3
+
+ const v0, 0x1
+ const v1, 0x3
+ if-ne v0, p0, :return
+ const v1, 0x5
+ :return
+ return v1
+
+.end method
+
+## CHECK-START: int SmaliTests.NotEqualFalseRhs(boolean) instruction_simplifier (before)
+## CHECK-DAG: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+## CHECK-DAG: <<Cond:z\d+>> NotEqual [<<Arg>>,<<Const0>>]
+## CHECK-DAG: If [<<Cond>>]
+
+## CHECK-START: int SmaliTests.NotEqualFalseRhs(boolean) instruction_simplifier (after)
+## CHECK-DAG: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: If [<<Arg>>]
+
+.method public static NotEqualFalseRhs(Z)I
+ .registers 3
+
+ const v0, 0x0
+ const v1, 0x5
+ if-ne p0, v0, :return
+ const v1, 0x3
+ :return
+ return v1
+
+.end method
+
+## CHECK-START: int SmaliTests.NotEqualFalseLhs(boolean) instruction_simplifier (before)
+## CHECK-DAG: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+## CHECK-DAG: <<Cond:z\d+>> NotEqual [<<Const0>>,<<Arg>>]
+## CHECK-DAG: If [<<Cond>>]
+
+## CHECK-START: int SmaliTests.NotEqualFalseLhs(boolean) instruction_simplifier (after)
+## CHECK-DAG: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: If [<<Arg>>]
+
+.method public static NotEqualFalseLhs(Z)I
+ .registers 3
+
+ const v0, 0x0
+ const v1, 0x5
+ if-ne v0, p0, :return
+ const v1, 0x3
+ :return
+ return v1
+
+.end method
+
diff --git a/test/458-checker-instruction-simplification/src/Main.java b/test/458-checker-instruction-simplification/src/Main.java
index 8d6bb65..8640148 100644
--- a/test/458-checker-instruction-simplification/src/Main.java
+++ b/test/458-checker-instruction-simplification/src/Main.java
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+import java.lang.reflect.Method;
+
public class Main {
public static void assertBooleanEquals(boolean expected, boolean result) {
@@ -826,17 +828,16 @@
/// CHECK-START: long Main.NotNot1(long) instruction_simplifier (before)
/// CHECK-DAG: <<Arg:j\d+>> ParameterValue
- /// CHECK-DAG: <<ConstF1:j\d+>> LongConstant -1
- /// CHECK-DAG: <<Xor1:j\d+>> Xor [<<Arg>>,<<ConstF1>>]
- /// CHECK-DAG: <<Xor2:j\d+>> Xor [<<Xor1>>,<<ConstF1>>]
- /// CHECK-DAG: Return [<<Xor2>>]
+ /// CHECK-DAG: <<Not1:j\d+>> Not [<<Arg>>]
+ /// CHECK-DAG: <<Not2:j\d+>> Not [<<Not1>>]
+ /// CHECK-DAG: Return [<<Not2>>]
/// CHECK-START: long Main.NotNot1(long) instruction_simplifier (after)
/// CHECK-DAG: <<Arg:j\d+>> ParameterValue
/// CHECK-DAG: Return [<<Arg>>]
/// CHECK-START: long Main.NotNot1(long) instruction_simplifier (after)
- /// CHECK-NOT: Xor
+ /// CHECK-NOT: Not
public static long NotNot1(long arg) {
return ~~arg;
@@ -844,10 +845,9 @@
/// CHECK-START: int Main.NotNot2(int) instruction_simplifier (before)
/// CHECK-DAG: <<Arg:i\d+>> ParameterValue
- /// CHECK-DAG: <<ConstF1:i\d+>> IntConstant -1
- /// CHECK-DAG: <<Xor1:i\d+>> Xor [<<Arg>>,<<ConstF1>>]
- /// CHECK-DAG: <<Xor2:i\d+>> Xor [<<Xor1>>,<<ConstF1>>]
- /// CHECK-DAG: <<Add:i\d+>> Add [<<Xor2>>,<<Xor1>>]
+ /// CHECK-DAG: <<Not1:i\d+>> Not [<<Arg>>]
+ /// CHECK-DAG: <<Not2:i\d+>> Not [<<Not1>>]
+ /// CHECK-DAG: <<Add:i\d+>> Add [<<Not2>>,<<Not1>>]
/// CHECK-DAG: Return [<<Add>>]
/// CHECK-START: int Main.NotNot2(int) instruction_simplifier (after)
@@ -857,7 +857,8 @@
/// CHECK-DAG: Return [<<Add>>]
/// CHECK-START: int Main.NotNot2(int) instruction_simplifier (after)
- /// CHECK-NOT: Xor
+ /// CHECK: Not
+ /// CHECK-NOT: Not
public static int NotNot2(int arg) {
int temp = ~arg;
@@ -965,174 +966,6 @@
return res;
}
- /// CHECK-START: int Main.EqualTrueRhs(boolean) instruction_simplifier (before)
- /// CHECK-DAG: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
- /// CHECK-DAG: <<Cond:z\d+>> Equal [<<Arg>>,<<Const1>>]
- /// CHECK-DAG: If [<<Cond>>]
-
- /// CHECK-START: int Main.EqualTrueRhs(boolean) instruction_simplifier (after)
- /// CHECK-DAG: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: If [<<Arg>>]
-
- /// CHECK-START: int Main.EqualTrueRhs(boolean) instruction_simplifier_before_codegen (after)
- /// CHECK-DAG: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Const3:i\d+>> IntConstant 3
- /// CHECK-DAG: <<Const5:i\d+>> IntConstant 5
- /// CHECK-DAG: <<Select:i\d+>> Select [<<Const3>>,<<Const5>>,<<Arg>>]
- /// CHECK-DAG: Return [<<Select>>]
-
- public static int EqualTrueRhs(boolean arg) {
- return (arg != true) ? 3 : 5;
- }
-
- /// CHECK-START: int Main.EqualTrueLhs(boolean) instruction_simplifier (before)
- /// CHECK-DAG: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
- /// CHECK-DAG: <<Cond:z\d+>> Equal [<<Const1>>,<<Arg>>]
- /// CHECK-DAG: If [<<Cond>>]
-
- /// CHECK-START: int Main.EqualTrueLhs(boolean) instruction_simplifier (after)
- /// CHECK-DAG: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: If [<<Arg>>]
-
- /// CHECK-START: int Main.EqualTrueLhs(boolean) instruction_simplifier_before_codegen (after)
- /// CHECK-DAG: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Const3:i\d+>> IntConstant 3
- /// CHECK-DAG: <<Const5:i\d+>> IntConstant 5
- /// CHECK-DAG: <<Select:i\d+>> Select [<<Const3>>,<<Const5>>,<<Arg>>]
- /// CHECK-DAG: Return [<<Select>>]
-
- public static int EqualTrueLhs(boolean arg) {
- return (true != arg) ? 3 : 5;
- }
-
- /// CHECK-START: int Main.EqualFalseRhs(boolean) instruction_simplifier (before)
- /// CHECK-DAG: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
- /// CHECK-DAG: <<Cond:z\d+>> Equal [<<Arg>>,<<Const0>>]
- /// CHECK-DAG: If [<<Cond>>]
-
- /// CHECK-START: int Main.EqualFalseRhs(boolean) instruction_simplifier (after)
- /// CHECK-DAG: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: If [<<Arg>>]
-
- /// CHECK-START: int Main.EqualFalseRhs(boolean) instruction_simplifier_before_codegen (after)
- /// CHECK-DAG: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Const3:i\d+>> IntConstant 3
- /// CHECK-DAG: <<Const5:i\d+>> IntConstant 5
- /// CHECK-DAG: <<Select:i\d+>> Select [<<Const5>>,<<Const3>>,<<Arg>>]
- /// CHECK-DAG: Return [<<Select>>]
-
- public static int EqualFalseRhs(boolean arg) {
- return (arg != false) ? 3 : 5;
- }
-
- /// CHECK-START: int Main.EqualFalseLhs(boolean) instruction_simplifier (before)
- /// CHECK-DAG: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
- /// CHECK-DAG: <<Cond:z\d+>> Equal [<<Arg>>,<<Const0>>]
- /// CHECK-DAG: If [<<Cond>>]
-
- /// CHECK-START: int Main.EqualFalseLhs(boolean) instruction_simplifier (after)
- /// CHECK-DAG: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: If [<<Arg>>]
-
- /// CHECK-START: int Main.EqualFalseLhs(boolean) instruction_simplifier_before_codegen (after)
- /// CHECK-DAG: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Const3:i\d+>> IntConstant 3
- /// CHECK-DAG: <<Const5:i\d+>> IntConstant 5
- /// CHECK-DAG: <<Select:i\d+>> Select [<<Const5>>,<<Const3>>,<<Arg>>]
- /// CHECK-DAG: Return [<<Select>>]
-
- public static int EqualFalseLhs(boolean arg) {
- return (false != arg) ? 3 : 5;
- }
-
- /// CHECK-START: int Main.NotEqualTrueRhs(boolean) instruction_simplifier (before)
- /// CHECK-DAG: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
- /// CHECK-DAG: <<Cond:z\d+>> NotEqual [<<Arg>>,<<Const1>>]
- /// CHECK-DAG: If [<<Cond>>]
-
- /// CHECK-START: int Main.NotEqualTrueRhs(boolean) instruction_simplifier (after)
- /// CHECK-DAG: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: If [<<Arg>>]
-
- /// CHECK-START: int Main.NotEqualTrueRhs(boolean) instruction_simplifier_before_codegen (after)
- /// CHECK-DAG: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Const3:i\d+>> IntConstant 3
- /// CHECK-DAG: <<Const5:i\d+>> IntConstant 5
- /// CHECK-DAG: <<Select:i\d+>> Select [<<Const5>>,<<Const3>>,<<Arg>>]
- /// CHECK-DAG: Return [<<Select>>]
-
- public static int NotEqualTrueRhs(boolean arg) {
- return (arg == true) ? 3 : 5;
- }
-
- /// CHECK-START: int Main.NotEqualTrueLhs(boolean) instruction_simplifier (before)
- /// CHECK-DAG: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
- /// CHECK-DAG: <<Cond:z\d+>> NotEqual [<<Const1>>,<<Arg>>]
- /// CHECK-DAG: If [<<Cond>>]
-
- /// CHECK-START: int Main.NotEqualTrueLhs(boolean) instruction_simplifier (after)
- /// CHECK-DAG: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: If [<<Arg>>]
-
- /// CHECK-START: int Main.NotEqualTrueLhs(boolean) instruction_simplifier_before_codegen (after)
- /// CHECK-DAG: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Const3:i\d+>> IntConstant 3
- /// CHECK-DAG: <<Const5:i\d+>> IntConstant 5
- /// CHECK-DAG: <<Select:i\d+>> Select [<<Const5>>,<<Const3>>,<<Arg>>]
- /// CHECK-DAG: Return [<<Select>>]
-
- public static int NotEqualTrueLhs(boolean arg) {
- return (true == arg) ? 3 : 5;
- }
-
- /// CHECK-START: int Main.NotEqualFalseRhs(boolean) instruction_simplifier (before)
- /// CHECK-DAG: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
- /// CHECK-DAG: <<Cond:z\d+>> NotEqual [<<Arg>>,<<Const0>>]
- /// CHECK-DAG: If [<<Cond>>]
-
- /// CHECK-START: int Main.NotEqualFalseRhs(boolean) instruction_simplifier (after)
- /// CHECK-DAG: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: If [<<Arg>>]
-
- /// CHECK-START: int Main.NotEqualFalseRhs(boolean) instruction_simplifier_before_codegen (after)
- /// CHECK-DAG: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Const3:i\d+>> IntConstant 3
- /// CHECK-DAG: <<Const5:i\d+>> IntConstant 5
- /// CHECK-DAG: <<Select:i\d+>> Select [<<Const3>>,<<Const5>>,<<Arg>>]
- /// CHECK-DAG: Return [<<Select>>]
-
- public static int NotEqualFalseRhs(boolean arg) {
- return (arg == false) ? 3 : 5;
- }
-
- /// CHECK-START: int Main.NotEqualFalseLhs(boolean) instruction_simplifier (before)
- /// CHECK-DAG: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
- /// CHECK-DAG: <<Cond:z\d+>> NotEqual [<<Arg>>,<<Const0>>]
- /// CHECK-DAG: If [<<Cond>>]
-
- /// CHECK-START: int Main.NotEqualFalseLhs(boolean) instruction_simplifier (after)
- /// CHECK-DAG: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: If [<<Arg>>]
-
- /// CHECK-START: int Main.NotEqualFalseLhs(boolean) instruction_simplifier_before_codegen (after)
- /// CHECK-DAG: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Const3:i\d+>> IntConstant 3
- /// CHECK-DAG: <<Const5:i\d+>> IntConstant 5
- /// CHECK-DAG: <<Select:i\d+>> Select [<<Const3>>,<<Const5>>,<<Arg>>]
- /// CHECK-DAG: Return [<<Select>>]
-
- public static int NotEqualFalseLhs(boolean arg) {
- return (false == arg) ? 3 : 5;
- }
-
/// CHECK-START: boolean Main.EqualBoolVsIntConst(boolean) instruction_simplifier_after_bce (before)
/// CHECK-DAG: <<Arg:z\d+>> ParameterValue
/// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
@@ -1307,17 +1140,16 @@
return arg * 31;
}
- /// CHECK-START: int Main.booleanFieldNotEqualOne() instruction_simplifier (before)
+ /// CHECK-START: int Main.booleanFieldNotEqualOne() instruction_simplifier_after_bce (before)
/// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+ /// CHECK-DAG: <<Const13:i\d+>> IntConstant 13
+ /// CHECK-DAG: <<Const54:i\d+>> IntConstant 54
/// CHECK-DAG: <<Field:z\d+>> StaticFieldGet
/// CHECK-DAG: <<NE:z\d+>> NotEqual [<<Field>>,<<Const1>>]
- /// CHECK-DAG: If [<<NE>>]
+ /// CHECK-DAG: <<Select:i\d+>> Select [<<Const13>>,<<Const54>>,<<NE>>]
+ /// CHECK-DAG: Return [<<Select>>]
- /// CHECK-START: int Main.booleanFieldNotEqualOne() instruction_simplifier (after)
- /// CHECK-DAG: <<Field:z\d+>> StaticFieldGet
- /// CHECK-DAG: If [<<Field>>]
-
- /// CHECK-START: int Main.booleanFieldNotEqualOne() instruction_simplifier_before_codegen (after)
+ /// CHECK-START: int Main.booleanFieldNotEqualOne() instruction_simplifier_after_bce (after)
/// CHECK-DAG: <<Field:z\d+>> StaticFieldGet
/// CHECK-DAG: <<Const13:i\d+>> IntConstant 13
/// CHECK-DAG: <<Const54:i\d+>> IntConstant 54
@@ -1325,20 +1157,19 @@
/// CHECK-DAG: Return [<<Select>>]
public static int booleanFieldNotEqualOne() {
- return (booleanField == true) ? 13 : 54;
+ return (booleanField == $inline$true()) ? 13 : 54;
}
- /// CHECK-START: int Main.booleanFieldEqualZero() instruction_simplifier (before)
+ /// CHECK-START: int Main.booleanFieldEqualZero() instruction_simplifier_after_bce (before)
/// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+ /// CHECK-DAG: <<Const13:i\d+>> IntConstant 13
+ /// CHECK-DAG: <<Const54:i\d+>> IntConstant 54
/// CHECK-DAG: <<Field:z\d+>> StaticFieldGet
- /// CHECK-DAG: <<EQ:z\d+>> Equal [<<Field>>,<<Const0>>]
- /// CHECK-DAG: If [<<EQ>>]
+ /// CHECK-DAG: <<NE:z\d+>> Equal [<<Field>>,<<Const0>>]
+ /// CHECK-DAG: <<Select:i\d+>> Select [<<Const13>>,<<Const54>>,<<NE>>]
+ /// CHECK-DAG: Return [<<Select>>]
- /// CHECK-START: int Main.booleanFieldEqualZero() instruction_simplifier (after)
- /// CHECK-DAG: <<Field:z\d+>> StaticFieldGet
- /// CHECK-DAG: If [<<Field>>]
-
- /// CHECK-START: int Main.booleanFieldEqualZero() instruction_simplifier_before_codegen (after)
+ /// CHECK-START: int Main.booleanFieldEqualZero() instruction_simplifier_after_bce (after)
/// CHECK-DAG: <<Field:z\d+>> StaticFieldGet
/// CHECK-DAG: <<Const13:i\d+>> IntConstant 13
/// CHECK-DAG: <<Const54:i\d+>> IntConstant 54
@@ -1346,7 +1177,7 @@
/// CHECK-DAG: Return [<<Select>>]
public static int booleanFieldEqualZero() {
- return (booleanField != false) ? 13 : 54;
+ return (booleanField != $inline$false()) ? 13 : 54;
}
/// CHECK-START: int Main.intConditionNotEqualOne(int) instruction_simplifier_after_bce (before)
@@ -1374,7 +1205,7 @@
// LessThanOrEqual instructions.
public static int intConditionNotEqualOne(int i) {
- return ((i > 42) == true) ? 13 : 54;
+ return ((i > 42) == $inline$true()) ? 13 : 54;
}
/// CHECK-START: int Main.intConditionEqualZero(int) instruction_simplifier_after_bce (before)
@@ -1402,7 +1233,7 @@
// LessThanOrEqual instructions.
public static int intConditionEqualZero(int i) {
- return ((i > 42) != false) ? 13 : 54;
+ return ((i > 42) != $inline$false()) ? 13 : 54;
}
// Test that conditions on float/double are not flipped.
@@ -1770,6 +1601,16 @@
return (short) (value & 0x17fff);
}
+ public static int runSmaliTest(String name, boolean input) {
+ try {
+ Class<?> c = Class.forName("SmaliTests");
+ Method m = c.getMethod(name, new Class[] { boolean.class });
+ return (Integer) m.invoke(null, input);
+ } catch (Exception ex) {
+ throw new Error(ex);
+ }
+ }
+
public static void main(String[] args) {
int arg = 123456;
@@ -1804,14 +1645,6 @@
assertIntEquals(SubNeg1(arg, arg + 1), -(arg + arg + 1));
assertIntEquals(SubNeg2(arg, arg + 1), -(arg + arg + 1));
assertLongEquals(SubNeg3(arg, arg + 1), -(2 * arg + 1));
- assertIntEquals(EqualTrueRhs(true), 5);
- assertIntEquals(EqualTrueLhs(true), 5);
- assertIntEquals(EqualFalseRhs(true), 3);
- assertIntEquals(EqualFalseLhs(true), 3);
- assertIntEquals(NotEqualTrueRhs(true), 3);
- assertIntEquals(NotEqualTrueLhs(true), 3);
- assertIntEquals(NotEqualFalseRhs(true), 5);
- assertIntEquals(NotEqualFalseLhs(true), 5);
assertBooleanEquals(EqualBoolVsIntConst(true), true);
assertBooleanEquals(EqualBoolVsIntConst(true), true);
assertBooleanEquals(NotEqualBoolVsIntConst(false), false);
@@ -1906,7 +1739,20 @@
assertIntEquals(intAnd0x17fffToShort(0x88888888), 0x0888);
assertIntEquals(intAnd0x17fffToShort(Integer.MIN_VALUE), 0);
assertIntEquals(intAnd0x17fffToShort(Integer.MAX_VALUE), Short.MAX_VALUE);
+
+ for (String condition : new String[] { "Equal", "NotEqual" }) {
+ for (String constant : new String[] { "True", "False" }) {
+ for (String side : new String[] { "Rhs", "Lhs" }) {
+ String name = condition + constant + side;
+ assertIntEquals(runSmaliTest(name, true), 5);
+ assertIntEquals(runSmaliTest(name, false), 3);
+ }
+ }
+ }
}
+ private static boolean $inline$true() { return true; }
+ private static boolean $inline$false() { return false; }
+
public static boolean booleanField;
}
diff --git a/test/463-checker-boolean-simplifier/src/Main.java b/test/463-checker-boolean-simplifier/src/Main.java
index 682f126..f0fe1b1 100644
--- a/test/463-checker-boolean-simplifier/src/Main.java
+++ b/test/463-checker-boolean-simplifier/src/Main.java
@@ -42,7 +42,7 @@
/// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
/// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
/// CHECK-DAG: If [<<Param>>]
- /// CHECK-DAG: <<Phi:i\d+>> Phi [<<Const1>>,<<Const0>>]
+ /// CHECK-DAG: <<Phi:i\d+>> Phi [<<Const0>>,<<Const1>>]
/// CHECK-DAG: Return [<<Phi>>]
/// CHECK-START: boolean Main.BooleanNot(boolean) select_generator (before)
@@ -185,11 +185,7 @@
/// CHECK-NOT: BooleanNot
public static int NegatedCondition(boolean x) {
- if (x != false) {
- return 42;
- } else {
- return 43;
- }
+ return (x != false) ? 42 : 43;
}
/// CHECK-START: int Main.SimpleTrueBlock(boolean, int) select_generator (after)
@@ -253,13 +249,7 @@
/// CHECK-DAG: Return [<<Select123>>]
public static int ThreeBlocks(boolean x, boolean y) {
- if (x) {
- return 1;
- } else if (y) {
- return 2;
- } else {
- return 3;
- }
+ return x ? 1 : (y ? 2 : 3);
}
/// CHECK-START: int Main.MultiplePhis() select_generator (before)
@@ -292,8 +282,10 @@
while (y++ < 10) {
if (y > 1) {
x = 13;
+ continue;
} else {
x = 42;
+ continue;
}
}
return x;
diff --git a/test/530-checker-loops/src/Main.java b/test/530-checker-loops/src/Main.java
index d5111b0..2e5fd25 100644
--- a/test/530-checker-loops/src/Main.java
+++ b/test/530-checker-loops/src/Main.java
@@ -633,6 +633,10 @@
}
}
+ //
+ // Verifier.
+ //
+
public static void main(String[] args) {
int[] empty = { };
int[] x = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
diff --git a/test/537-checker-inline-and-unverified/src/Main.java b/test/537-checker-inline-and-unverified/src/Main.java
index bdc14b0..b9d5fc9 100644
--- a/test/537-checker-inline-and-unverified/src/Main.java
+++ b/test/537-checker-inline-and-unverified/src/Main.java
@@ -45,12 +45,14 @@
}
public static boolean $opt$noinline$testNoInline() {
+ boolean result = true;
try {
- return null instanceof InaccessibleClass;
+ result = (null instanceof InaccessibleClass);
+ throw new Error("Unreachable");
} catch (IllegalAccessError e) {
// expected
}
- return false;
+ return result;
}
public static boolean $opt$inline$testInline() {
diff --git a/test/555-checker-regression-x86const/build b/test/555-checker-regression-x86const/build
index 09dcc36..92ddfc9 100644
--- a/test/555-checker-regression-x86const/build
+++ b/test/555-checker-regression-x86const/build
@@ -27,14 +27,12 @@
mv classes/UnresolvedClass.class classes-ex
if [ ${USE_JACK} = "true" ]; then
- # Create .jack files from classes generated with javac.
- ${JILL} classes --output classes.jack
- ${JILL} classes-ex --output classes-ex.jack
+ jar cf classes.jill.jar -C classes .
+ jar cf classes-ex.jill.jar -C classes-ex .
- # Create DEX files from .jack files.
- ${JACK} --import classes.jack --output-dex .
+ ${JACK} --import classes.jill.jar --output-dex .
zip $TEST_NAME.jar classes.dex
- ${JACK} --import classes-ex.jack --output-dex .
+ ${JACK} --import classes-ex.jill.jar --output-dex .
zip ${TEST_NAME}-ex.jar classes.dex
else
if [ ${NEED_DEX} = "true" ]; then
diff --git a/test/565-checker-condition-liveness/src/Main.java b/test/565-checker-condition-liveness/src/Main.java
index dc4cb76..acfcecd 100644
--- a/test/565-checker-condition-liveness/src/Main.java
+++ b/test/565-checker-condition-liveness/src/Main.java
@@ -28,10 +28,7 @@
/// CHECK-EVAL: <<UseInput>> == <<LivSel>> + 1
public static int p(float arg) {
- if (arg > 5.0f) {
- return 0;
- }
- return -1;
+ return (arg > 5.0f) ? 0 : -1;
}
/// CHECK-START: void Main.main(java.lang.String[]) liveness (after)
diff --git a/test/565-checker-doublenegbitwise/src/Main.java b/test/565-checker-doublenegbitwise/src/Main.java
index 2d70e11..e426b75 100644
--- a/test/565-checker-doublenegbitwise/src/Main.java
+++ b/test/565-checker-doublenegbitwise/src/Main.java
@@ -35,14 +35,11 @@
* Test transformation of Not/Not/And into Or/Not.
*/
- // Note: before the instruction_simplifier pass, Xor's are used instead of
- // Not's (the simplification happens during the same pass).
/// CHECK-START: int Main.$opt$noinline$andToOr(int, int) instruction_simplifier (before)
/// CHECK: <<P1:i\d+>> ParameterValue
/// CHECK: <<P2:i\d+>> ParameterValue
- /// CHECK: <<CstM1:i\d+>> IntConstant -1
- /// CHECK: <<Not1:i\d+>> Xor [<<P1>>,<<CstM1>>]
- /// CHECK: <<Not2:i\d+>> Xor [<<P2>>,<<CstM1>>]
+ /// CHECK: <<Not1:i\d+>> Not [<<P1>>]
+ /// CHECK: <<Not2:i\d+>> Not [<<P2>>]
/// CHECK: <<And:i\d+>> And [<<Not1>>,<<Not2>>]
/// CHECK: Return [<<And>>]
@@ -106,14 +103,11 @@
* Test transformation of Not/Not/Or into And/Not.
*/
- // See note above.
- // The second Xor has its arguments reversed for no obvious reason.
/// CHECK-START: long Main.$opt$noinline$orToAnd(long, long) instruction_simplifier (before)
/// CHECK: <<P1:j\d+>> ParameterValue
/// CHECK: <<P2:j\d+>> ParameterValue
- /// CHECK: <<CstM1:j\d+>> LongConstant -1
- /// CHECK: <<Not1:j\d+>> Xor [<<P1>>,<<CstM1>>]
- /// CHECK: <<Not2:j\d+>> Xor [<<CstM1>>,<<P2>>]
+ /// CHECK: <<Not1:j\d+>> Not [<<P1>>]
+ /// CHECK: <<Not2:j\d+>> Not [<<P2>>]
/// CHECK: <<Or:j\d+>> Or [<<Not1>>,<<Not2>>]
/// CHECK: Return [<<Or>>]
@@ -183,12 +177,11 @@
/// CHECK-START: int Main.$opt$noinline$regressInputsAway(int, int) instruction_simplifier (before)
/// CHECK: <<P1:i\d+>> ParameterValue
/// CHECK: <<P2:i\d+>> ParameterValue
- /// CHECK-DAG: <<Cst1:i\d+>> IntConstant 1
- /// CHECK-DAG: <<CstM1:i\d+>> IntConstant -1
+ /// CHECK: <<Cst1:i\d+>> IntConstant 1
/// CHECK: <<AddP1:i\d+>> Add [<<P1>>,<<Cst1>>]
- /// CHECK: <<Not1:i\d+>> Xor [<<AddP1>>,<<CstM1>>]
+ /// CHECK: <<Not1:i\d+>> Not [<<AddP1>>]
/// CHECK: <<AddP2:i\d+>> Add [<<P2>>,<<Cst1>>]
- /// CHECK: <<Not2:i\d+>> Xor [<<AddP2>>,<<CstM1>>]
+ /// CHECK: <<Not2:i\d+>> Not [<<AddP2>>]
/// CHECK: <<Or:i\d+>> Or [<<Not1>>,<<Not2>>]
/// CHECK: Return [<<Or>>]
@@ -226,9 +219,8 @@
/// CHECK-START: int Main.$opt$noinline$notXorToXor(int, int) instruction_simplifier (before)
/// CHECK: <<P1:i\d+>> ParameterValue
/// CHECK: <<P2:i\d+>> ParameterValue
- /// CHECK: <<CstM1:i\d+>> IntConstant -1
- /// CHECK: <<Not1:i\d+>> Xor [<<P1>>,<<CstM1>>]
- /// CHECK: <<Not2:i\d+>> Xor [<<P2>>,<<CstM1>>]
+ /// CHECK: <<Not1:i\d+>> Not [<<P1>>]
+ /// CHECK: <<Not2:i\d+>> Not [<<P2>>]
/// CHECK: <<Xor:i\d+>> Xor [<<Not1>>,<<Not2>>]
/// CHECK: Return [<<Xor>>]
@@ -285,11 +277,10 @@
/// CHECK-START: int Main.$opt$noinline$notMultipleUses(int, int) instruction_simplifier (before)
/// CHECK: <<P1:i\d+>> ParameterValue
/// CHECK: <<P2:i\d+>> ParameterValue
- /// CHECK: <<CstM1:i\d+>> IntConstant -1
/// CHECK: <<One:i\d+>> IntConstant 1
- /// CHECK: <<Not2:i\d+>> Xor [<<P2>>,<<CstM1>>]
+ /// CHECK: <<Not2:i\d+>> Not [<<P2>>]
/// CHECK: <<And2:i\d+>> And [<<Not2>>,<<One>>]
- /// CHECK: <<Not1:i\d+>> Xor [<<P1>>,<<CstM1>>]
+ /// CHECK: <<Not1:i\d+>> Not [<<P1>>]
/// CHECK: <<And1:i\d+>> And [<<Not1>>,<<Not2>>]
/// CHECK: <<Add:i\d+>> Add [<<And2>>,<<And1>>]
/// CHECK: Return [<<Add>>]
diff --git a/test/577-checker-fp2int/src/Main.java b/test/577-checker-fp2int/src/Main.java
index e3f1230..ace956d 100644
--- a/test/577-checker-fp2int/src/Main.java
+++ b/test/577-checker-fp2int/src/Main.java
@@ -21,7 +21,8 @@
/// CHECK-DAG: Return [<<Result>>]
//
/// CHECK-START: int Main.f2int(float) instruction_simplifier (after)
- /// CHECK-DAG: <<Raw:i\d+>> InvokeStaticOrDirect [<<Arg:f\d+>>] intrinsic:FloatFloatToRawIntBits
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: <<Raw:i\d+>> InvokeStaticOrDirect [<<Arg:f\d+>>{{(,[ij]\d+)?}}] intrinsic:FloatFloatToRawIntBits
/// CHECK-DAG: <<Cond:z\d+>> NotEqual [<<Arg>>,<<Arg>>]
/// CHECK-DAG: <<Result:i\d+>> Select [<<Raw>>,{{i\d+}},<<Cond>>]
/// CHECK-DAG: Return [<<Result>>]
@@ -34,7 +35,8 @@
/// CHECK-DAG: Return [<<Result>>]
//
/// CHECK-START: long Main.d2long(double) instruction_simplifier (after)
- /// CHECK-DAG: <<Raw:j\d+>> InvokeStaticOrDirect [<<Arg:d\d+>>] intrinsic:DoubleDoubleToRawLongBits
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: <<Raw:j\d+>> InvokeStaticOrDirect [<<Arg:d\d+>>{{(,[ij]\d+)?}}] intrinsic:DoubleDoubleToRawLongBits
/// CHECK-DAG: <<Cond:z\d+>> NotEqual [<<Arg>>,<<Arg>>]
/// CHECK-DAG: <<Result:j\d+>> Select [<<Raw>>,{{j\d+}},<<Cond>>]
/// CHECK-DAG: Return [<<Result>>]
diff --git a/test/578-bce-visit/expected.txt b/test/578-bce-visit/expected.txt
new file mode 100644
index 0000000..28fca2c
--- /dev/null
+++ b/test/578-bce-visit/expected.txt
@@ -0,0 +1,2 @@
+exception caught
+FUZZ result = 1001 16
diff --git a/test/578-bce-visit/info.txt b/test/578-bce-visit/info.txt
new file mode 100644
index 0000000..2462e1b
--- /dev/null
+++ b/test/578-bce-visit/info.txt
@@ -0,0 +1 @@
+Fuzz test that exposed bug in bounds check elimination visiting of blocks.
diff --git a/test/578-bce-visit/src/Main.java b/test/578-bce-visit/src/Main.java
new file mode 100644
index 0000000..b0e920e
--- /dev/null
+++ b/test/578-bce-visit/src/Main.java
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Automatically generated fuzz test that exposed bug in the way bounds
+ * check elimination visits basic blocks. If, after dynamic bce, the same
+ * block would be visited again, then static length based bce would incorrectly
+ * feed information back to itself and removed a necessary bounds check.
+ */
+public class Main {
+
+ private static int[][][] mA = new int[10][10][10];
+
+ private static int mX = 17;
+
+ private static int doit() {
+ int l0 = (((++mA[7][2][8]) <= mA[0][1][3]) ? (++mA[9][0][5]) : ((( -mA[0][7][0]) * ((mX == mX) ? 180 : mX)) + (mA[7][8][8]++)));
+ mA[1][0][4] -= mX;
+ int l1 = (((l0 >= ( ~mA[6][7][5])) && ((921 <= l0) && (mA[3][9][6] > l0))) ? mX : (l0--));
+ int l2 = ( -384);
+ for (int i0 = 7 - 1; i0 >= 1; i0--) {
+ mA[6][0][0] -= ((((l0++) == ( -mX)) ? (((mA[3][i0][1] > 503) || (mX <= i0)) ? (--l0) : (l0--)) : mX) - ( ~(mX--)));
+ int l3 = 24;
+ int l4 = ((l2--) & mX);
+ for (int i1 = i0-2 - 1; i1 >= 3; i1--) {
+ for (int i2 = 2; i2 < i0; i2++) {
+ mA[i0][4][l3] >>= 1;
+ }
+ }
+ }
+ return 1;
+ }
+
+ public static void main(String[] args) {
+ int k = 1;
+ for (int i0 = 0; i0 < 10; i0++)
+ for (int i1 = 0; i1 < 10; i1++)
+ for (int i2 = 0; i2 < 10; i2++)
+ mA[i0][i1][i2] = k++;
+ try {
+ k = doit();
+ } catch (Exception e) {
+ System.out.println("exception caught");
+ }
+ System.out.println("FUZZ result = " + k + " " + mX);
+ }
+}
diff --git a/test/578-polymorphic-inlining/expected.txt b/test/578-polymorphic-inlining/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/578-polymorphic-inlining/expected.txt
diff --git a/test/578-polymorphic-inlining/info.txt b/test/578-polymorphic-inlining/info.txt
new file mode 100644
index 0000000..77ec49b
--- /dev/null
+++ b/test/578-polymorphic-inlining/info.txt
@@ -0,0 +1,2 @@
+Regression test for polymorphic inlining that used to propagate
+wrongly the try/catch information of new blocks.
diff --git a/test/578-polymorphic-inlining/src/Main.java b/test/578-polymorphic-inlining/src/Main.java
new file mode 100644
index 0000000..22d33d0
--- /dev/null
+++ b/test/578-polymorphic-inlining/src/Main.java
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) {
+ for (int i = 0; i < 20000; ++i) {
+ $noinline$testInTryCatch(new Main(), i);
+ $noinline$testInTryCatch(new SubMain(), i);
+ }
+ }
+
+ public static void $noinline$testInTryCatch(Main m, int i) {
+ final int value;
+ try {
+ throw new Exception();
+ } catch (Exception e) {
+ // The polymorphic inlining of 'willInlineVoid' used to generate an
+ // incorrect graph, by setting the inlined blocks as catch blocks.
+ m.willInlineVoid(i);
+ return;
+ }
+ }
+
+ public void willInlineVoid(int i) {
+ if (i == 0) {
+ $noinline$foo();
+ } else {
+ $noinline$foo();
+ $noinline$foo();
+ }
+ }
+
+ public static void $noinline$foo() {
+ if (doThrow) throw new Error("");
+ }
+
+ public static boolean doThrow;
+}
+
+class SubMain extends Main {
+ public void willInlineVoid(int i) {
+ }
+}
diff --git a/test/579-inline-infinite/expected.txt b/test/579-inline-infinite/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/579-inline-infinite/expected.txt
diff --git a/test/579-inline-infinite/info.txt b/test/579-inline-infinite/info.txt
new file mode 100644
index 0000000..6fb917c
--- /dev/null
+++ b/test/579-inline-infinite/info.txt
@@ -0,0 +1,2 @@
+Regression test for optimizing.
+Inlining of method with infinite loop cause a crash.
diff --git a/test/579-inline-infinite/src/Main.java b/test/579-inline-infinite/src/Main.java
new file mode 100644
index 0000000..f214ed4
--- /dev/null
+++ b/test/579-inline-infinite/src/Main.java
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Infinite implements Runnable {
+ public int field;
+
+ private final void $noinline$infinite() {
+ while(true) {
+ field++;
+ }
+ }
+
+ public void run() {
+ $noinline$infinite();
+ }
+}
+
+public class Main {
+ public static void main(String[] args) {
+ Thread thr = new Thread(new Infinite());
+ thr.setDaemon(true);
+ thr.start();
+ // This is a compiler test, so just finish.
+ }
+}
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 167ad85..e05d4f5 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -222,8 +222,14 @@
# Disable 097-duplicate-method while investigation (broken by latest Jack release, b/27358065)
+# Disable 137-cfi (b/27391690).
+# Disable 536-checker-needs-access-check and 537-checker-inline-and-unverified (b/27425061)
+# Disable 577-profile-foreign-dex (b/27454772).
TEST_ART_BROKEN_ALL_TARGET_TESTS := \
- 097-duplicate-method
+ 097-duplicate-method \
+ 536-checker-needs-access-check \
+ 537-checker-inline-and-unverified \
+ 577-profile-foreign-dex \
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
$(COMPILER_TYPES), $(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
@@ -545,6 +551,7 @@
$(TEST_ART_BROKEN_INTERPRETER_RUN_TESTS)
# Tests that should fail in the read barrier configuration with the Optimizing compiler (AOT).
+# 145: Test sometimes times out in read barrier configuration (b/27467554).
# 484: Baker's fast path based read barrier compiler instrumentation generates code containing
# more parallel moves on x86, thus some Checker assertions may fail.
# 527: On ARM64, the read barrier instrumentation does not support the HArm64IntermediateAddress
@@ -552,6 +559,7 @@
# 537: Expects an array copy to be intrinsified on x86-64, but calling-on-slowpath intrinsics are
# not yet handled in the read barrier configuration.
TEST_ART_BROKEN_OPTIMIZING_READ_BARRIER_RUN_TESTS := \
+ 145-alloc-tracking-stress \
484-checker-register-hints \
527-checker-array-access-split \
537-checker-arraycopy
diff --git a/test/run-test b/test/run-test
index f1875d7..d0f93b9 100755
--- a/test/run-test
+++ b/test/run-test
@@ -677,11 +677,7 @@
# Tests named '<number>-checker-*' will also have their CFGs verified with
# Checker when compiled with Optimizing on host.
if [[ "$TEST_NAME" =~ ^[0-9]+-checker- ]]; then
- # Jack does not necessarily generate the same DEX output than dx. Because these tests depend
- # on a particular DEX output, keep building them with dx for now (b/19467889).
- USE_JACK="false"
-
- if [ "$runtime" = "art" -a "$image_suffix" = "-optimizing" ]; then
+ if [ "$runtime" = "art" -a "$image_suffix" = "-optimizing" -a "$USE_JACK" = "true" ]; then
# Optimizing has read barrier support for certain architectures
# only. On other architectures, compiling is disabled when read
# barriers are enabled, meaning that we do not produce a CFG file
diff --git a/tools/Android.mk b/tools/Android.mk
index 9a96f7a..bc2fd8c 100644
--- a/tools/Android.mk
+++ b/tools/Android.mk
@@ -19,21 +19,14 @@
# Copy the art shell script to the host's bin directory
include $(CLEAR_VARS)
LOCAL_IS_HOST_MODULE := true
-LOCAL_MODULE_TAGS := optional
LOCAL_MODULE_CLASS := EXECUTABLES
LOCAL_MODULE := art
-include $(BUILD_SYSTEM)/base_rules.mk
-$(LOCAL_BUILT_MODULE): $(LOCAL_PATH)/art $(ACP)
- @echo "Copy: $(PRIVATE_MODULE) ($@)"
- $(copy-file-to-new-target)
- $(hide) chmod 755 $@
+LOCAL_SRC_FILES := art
+include $(BUILD_PREBUILT)
# Copy the art shell script to the target's bin directory
include $(CLEAR_VARS)
LOCAL_MODULE_CLASS := EXECUTABLES
LOCAL_MODULE := art
-include $(BUILD_SYSTEM)/base_rules.mk
-$(LOCAL_BUILT_MODULE): $(LOCAL_PATH)/art $(ACP)
- @echo "Copy: $(PRIVATE_MODULE) ($@)"
- $(copy-file-to-new-target)
- $(hide) chmod 755 $@
+LOCAL_SRC_FILES := art
+include $(BUILD_PREBUILT)
diff --git a/tools/ahat/Android.mk b/tools/ahat/Android.mk
index 6869b04..cfbafde 100644
--- a/tools/ahat/Android.mk
+++ b/tools/ahat/Android.mk
@@ -35,16 +35,10 @@
# --- ahat script ----------------
include $(CLEAR_VARS)
LOCAL_IS_HOST_MODULE := true
-LOCAL_MODULE_TAGS := optional
LOCAL_MODULE_CLASS := EXECUTABLES
LOCAL_MODULE := ahat
-include $(BUILD_SYSTEM)/base_rules.mk
-$(LOCAL_BUILT_MODULE): $(LOCAL_PATH)/ahat $(ACP)
- @echo "Copy: $(PRIVATE_MODULE) ($@)"
- $(copy-file-to-new-target)
- $(hide) chmod 755 $@
-
-ahat: $(LOCAL_BUILT_MODULE)
+LOCAL_SRC_FILES := ahat
+include $(BUILD_PREBUILT)
# --- ahat-tests.jar --------------
include $(CLEAR_VARS)
diff --git a/tools/checker/common/logger.py b/tools/checker/common/logger.py
index 28bb458..f13eaf6 100644
--- a/tools/checker/common/logger.py
+++ b/tools/checker/common/logger.py
@@ -13,6 +13,7 @@
# limitations under the License.
from __future__ import print_function
+import collections
import sys
class Logger(object):
@@ -21,7 +22,7 @@
NoOutput, Error, Info = range(3)
class Color(object):
- Default, Blue, Gray, Purple, Red = range(5)
+ Default, Blue, Gray, Purple, Red, Green = range(6)
@staticmethod
def terminalCode(color, out=sys.stdout):
@@ -35,6 +36,8 @@
return '\033[95m'
elif color == Logger.Color.Red:
return '\033[91m'
+ elif color == Logger.Color.Green:
+ return '\033[32m'
else:
return '\033[0m'
@@ -52,19 +55,34 @@
out.flush()
@staticmethod
- def fail(msg, file=None, line=-1):
- location = ""
- if file:
- location += file + ":"
- if line > 0:
- location += str(line) + ":"
- if location:
- location += " "
-
- Logger.log(location, Logger.Level.Error, color=Logger.Color.Gray, newLine=False, out=sys.stderr)
+ def fail(msg, file=None, line=-1, lineText=None, variables=None):
Logger.log("error: ", Logger.Level.Error, color=Logger.Color.Red, newLine=False, out=sys.stderr)
Logger.log(msg, Logger.Level.Error, out=sys.stderr)
- sys.exit(msg)
+
+ if lineText:
+ loc = ""
+ if file:
+ loc += file + ":"
+ if line > 0:
+ loc += str(line) + ":"
+ if loc:
+ loc += " "
+ Logger.log(loc, Logger.Level.Error, color=Logger.Color.Gray, newLine=False, out=sys.stderr)
+ Logger.log(lineText, Logger.Level.Error, out=sys.stderr)
+
+ if variables:
+ longestName = 0
+ for var in variables:
+ longestName = max(longestName, len(var))
+
+ for var in collections.OrderedDict(sorted(variables.items())):
+ padding = ' ' * (longestName - len(var))
+ Logger.log(var, Logger.Level.Error, color=Logger.Color.Green, newLine=False, out=sys.stderr)
+ Logger.log(padding, Logger.Level.Error, newLine=False, out=sys.stderr)
+ Logger.log(" = ", Logger.Level.Error, newLine=False, out=sys.stderr)
+ Logger.log(variables[var], Logger.Level.Error, out=sys.stderr)
+
+ sys.exit(1)
@staticmethod
def startTest(name):
@@ -76,6 +94,6 @@
Logger.log("PASS", color=Logger.Color.Blue)
@staticmethod
- def testFailed(msg, file=None, line=-1):
+ def testFailed(msg, assertion, variables):
Logger.log("FAIL", color=Logger.Color.Red)
- Logger.fail(msg, file, line)
+ Logger.fail(msg, assertion.fileName, assertion.lineNo, assertion.originalText, variables)
diff --git a/tools/checker/match/file.py b/tools/checker/match/file.py
index 3ded074..6ff19d5 100644
--- a/tools/checker/match/file.py
+++ b/tools/checker/match/file.py
@@ -23,9 +23,10 @@
MatchInfo = namedtuple("MatchInfo", ["scope", "variables"])
class MatchFailedException(Exception):
- def __init__(self, assertion, lineNo):
+ def __init__(self, assertion, lineNo, variables):
self.assertion = assertion
self.lineNo = lineNo
+ self.variables = variables
def splitIntoGroups(assertions):
""" Breaks up a list of assertions, grouping instructions which should be
@@ -58,7 +59,7 @@
newVariables = MatchLines(assertion, c1Pass.body[i], variables)
if newVariables is not None:
return MatchInfo(MatchScope(i, i), newVariables)
- raise MatchFailedException(assertion, scope.start)
+ raise MatchFailedException(assertion, scope.start, variables)
def matchDagGroup(assertions, c1Pass, scope, variables):
""" Attempts to find matching `c1Pass` lines for a group of DAG assertions.
@@ -92,12 +93,12 @@
for assertion in assertions:
assert assertion.variant == TestAssertion.Variant.Not
if MatchLines(assertion, line, variables) is not None:
- raise MatchFailedException(assertion, i)
+ raise MatchFailedException(assertion, i, variables)
def testEvalGroup(assertions, scope, variables):
for assertion in assertions:
if not EvaluateLine(assertion, variables):
- raise MatchFailedException(assertion, scope.start)
+ raise MatchFailedException(assertion, scope.start, variables)
def MatchTestCase(testCase, c1Pass):
""" Runs a test case against a C1visualizer graph dump.
@@ -181,8 +182,8 @@
except MatchFailedException as e:
lineNo = c1Pass.startLineNo + e.lineNo
if e.assertion.variant == TestAssertion.Variant.Not:
- Logger.testFailed("NOT assertion matched line {}".format(lineNo),
- e.assertion.fileName, e.assertion.lineNo)
+ msg = "NOT assertion matched line {}"
else:
- Logger.testFailed("Assertion could not be matched starting from line {}".format(lineNo),
- e.assertion.fileName, e.assertion.lineNo)
+ msg = "Assertion could not be matched starting from line {}"
+ msg = msg.format(lineNo)
+ Logger.testFailed(msg, e.assertion, e.variables)
diff --git a/tools/checker/match/line.py b/tools/checker/match/line.py
index 08f001f..ed48a53 100644
--- a/tools/checker/match/line.py
+++ b/tools/checker/match/line.py
@@ -35,15 +35,13 @@
if name in variables:
return variables[name]
else:
- Logger.testFailed("Missing definition of variable \"{}\"".format(name),
- pos.fileName, pos.lineNo)
+ Logger.testFailed("Missing definition of variable \"{}\"".format(name), pos, variables)
def setVariable(name, value, variables, pos):
if name not in variables:
return variables.copyWith(name, value)
else:
- Logger.testFailed("Multiple definitions of variable \"{}\"".format(name),
- pos.fileName, pos.lineNo)
+ Logger.testFailed("Multiple definitions of variable \"{}\"".format(name), pos, variables)
def matchWords(checkerWord, stringWord, variables, pos):
""" Attempts to match a list of TestExpressions against a string.
diff --git a/tools/dexfuzz/Android.mk b/tools/dexfuzz/Android.mk
index 1580bc3..473f6de 100644
--- a/tools/dexfuzz/Android.mk
+++ b/tools/dexfuzz/Android.mk
@@ -27,14 +27,10 @@
# --- dexfuzz script ----------------
include $(CLEAR_VARS)
LOCAL_IS_HOST_MODULE := true
-LOCAL_MODULE_TAGS := optional
LOCAL_MODULE_CLASS := EXECUTABLES
LOCAL_MODULE := dexfuzz
-include $(BUILD_SYSTEM)/base_rules.mk
-$(LOCAL_BUILT_MODULE): $(LOCAL_PATH)/dexfuzz $(ACP)
- @echo "Copy: $(PRIVATE_MODULE) ($@)"
- $(copy-file-to-new-target)
- $(hide) chmod 755 $@
+LOCAL_SRC_FILES := dexfuzz
+include $(BUILD_PREBUILT)
# --- dexfuzz script with core image dependencies ----------------
fuzzer: $(LOCAL_BUILT_MODULE) $(HOST_CORE_IMG_OUTS)