MIPS64: Support short and long branches
Change-Id: I618c960bd211048166d9fde78d4106bd3ca42b3a
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
index ba2525e..107d5bb 100644
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -19,15 +19,73 @@
#include "base/bit_utils.h"
#include "base/casts.h"
#include "entrypoints/quick/quick_entrypoints.h"
+#include "entrypoints/quick/quick_entrypoints_enum.h"
#include "memory_region.h"
#include "thread.h"
namespace art {
namespace mips64 {
+void Mips64Assembler::FinalizeCode() {
+ for (auto& exception_block : exception_blocks_) {
+ EmitExceptionPoll(&exception_block);
+ }
+ PromoteBranches();
+}
+
+void Mips64Assembler::FinalizeInstructions(const MemoryRegion& region) {
+ EmitBranches();
+ Assembler::FinalizeInstructions(region);
+ PatchCFI();
+}
+
+void Mips64Assembler::PatchCFI() {
+ if (cfi().NumberOfDelayedAdvancePCs() == 0u) {
+ return;
+ }
+
+ typedef DebugFrameOpCodeWriterForAssembler::DelayedAdvancePC DelayedAdvancePC;
+ const auto data = cfi().ReleaseStreamAndPrepareForDelayedAdvancePC();
+ const std::vector<uint8_t>& old_stream = data.first;
+ const std::vector<DelayedAdvancePC>& advances = data.second;
+
+ // Refill our data buffer with patched opcodes.
+ cfi().ReserveCFIStream(old_stream.size() + advances.size() + 16);
+ size_t stream_pos = 0;
+ for (const DelayedAdvancePC& advance : advances) {
+ DCHECK_GE(advance.stream_pos, stream_pos);
+ // Copy old data up to the point where advance was issued.
+ cfi().AppendRawData(old_stream, stream_pos, advance.stream_pos);
+ stream_pos = advance.stream_pos;
+ // Insert the advance command with its final offset.
+ size_t final_pc = GetAdjustedPosition(advance.pc);
+ cfi().AdvancePC(final_pc);
+ }
+ // Copy the final segment if any.
+ cfi().AppendRawData(old_stream, stream_pos, old_stream.size());
+}
+
+void Mips64Assembler::EmitBranches() {
+ CHECK(!overwriting_);
+ // Switch from appending instructions at the end of the buffer to overwriting
+ // existing instructions (branch placeholders) in the buffer.
+ overwriting_ = true;
+ for (auto& branch : branches_) {
+ EmitBranch(&branch);
+ }
+ overwriting_ = false;
+}
+
void Mips64Assembler::Emit(uint32_t value) {
- AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- buffer_.Emit<uint32_t>(value);
+ if (overwriting_) {
+ // Branches to labels are emitted into their placeholders here.
+ buffer_.Store<uint32_t>(overwrite_location_, value);
+ overwrite_location_ += sizeof(uint32_t);
+ } else {
+ // Other instructions are simply appended at the end here.
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ buffer_.Emit<uint32_t>(value);
+ }
}
void Mips64Assembler::EmitR(int opcode, GpuRegister rs, GpuRegister rt, GpuRegister rd,
@@ -82,15 +140,16 @@
void Mips64Assembler::EmitI21(int opcode, GpuRegister rs, uint32_t imm21) {
CHECK_NE(rs, kNoGpuRegister);
+ CHECK(IsUint<21>(imm21)) << imm21;
uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
static_cast<uint32_t>(rs) << kRsShift |
- (imm21 & 0x1FFFFF);
+ imm21;
Emit(encoding);
}
-void Mips64Assembler::EmitJ(int opcode, uint32_t addr26) {
- uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
- (addr26 & 0x3FFFFFF);
+void Mips64Assembler::EmitI26(int opcode, uint32_t imm26) {
+ CHECK(IsUint<26>(imm26)) << imm26;
+ uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift | imm26;
Emit(encoding);
}
@@ -428,26 +487,6 @@
EmitI(0xb, rs, rt, imm16);
}
-void Mips64Assembler::Beq(GpuRegister rs, GpuRegister rt, uint16_t imm16) {
- EmitI(0x4, rs, rt, imm16);
- Nop();
-}
-
-void Mips64Assembler::Bne(GpuRegister rs, GpuRegister rt, uint16_t imm16) {
- EmitI(0x5, rs, rt, imm16);
- Nop();
-}
-
-void Mips64Assembler::J(uint32_t addr26) {
- EmitJ(0x2, addr26);
- Nop();
-}
-
-void Mips64Assembler::Jal(uint32_t addr26) {
- EmitJ(0x3, addr26);
- Nop();
-}
-
void Mips64Assembler::Seleqz(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
EmitR(0, rs, rt, rd, 0, 0x35);
}
@@ -474,7 +513,6 @@
void Mips64Assembler::Jalr(GpuRegister rd, GpuRegister rs) {
EmitR(0, rs, static_cast<GpuRegister>(0), rd, 0, 0x09);
- Nop();
}
void Mips64Assembler::Jalr(GpuRegister rs) {
@@ -489,6 +527,15 @@
EmitI(0x3B, rs, static_cast<GpuRegister>(0x1E), imm16);
}
+void Mips64Assembler::Addiupc(GpuRegister rs, uint32_t imm19) {
+ CHECK(IsUint<19>(imm19)) << imm19;
+ EmitI21(0x3B, rs, imm19);
+}
+
+void Mips64Assembler::Bc(uint32_t imm26) {
+ EmitI26(0x32, imm26);
+}
+
void Mips64Assembler::Jic(GpuRegister rt, uint16_t imm16) {
EmitI(0x36, static_cast<GpuRegister>(0), rt, imm16);
}
@@ -549,14 +596,14 @@
CHECK_NE(rs, ZERO);
CHECK_NE(rt, ZERO);
CHECK_NE(rs, rt);
- EmitI(0x8, (rs < rt) ? rs : rt, (rs < rt) ? rt : rs, imm16);
+ EmitI(0x8, std::min(rs, rt), std::max(rs, rt), imm16);
}
void Mips64Assembler::Bnec(GpuRegister rs, GpuRegister rt, uint16_t imm16) {
CHECK_NE(rs, ZERO);
CHECK_NE(rt, ZERO);
CHECK_NE(rs, rt);
- EmitI(0x18, (rs < rt) ? rs : rt, (rs < rt) ? rt : rs, imm16);
+ EmitI(0x18, std::min(rs, rt), std::max(rs, rt), imm16);
}
void Mips64Assembler::Beqzc(GpuRegister rs, uint32_t imm21) {
@@ -569,6 +616,65 @@
EmitI21(0x3E, rs, imm21);
}
+void Mips64Assembler::EmitBcondc(BranchCondition cond,
+ GpuRegister rs,
+ GpuRegister rt,
+ uint32_t imm16_21) {
+ switch (cond) {
+ case kCondLT:
+ Bltc(rs, rt, imm16_21);
+ break;
+ case kCondGE:
+ Bgec(rs, rt, imm16_21);
+ break;
+ case kCondLE:
+ Bgec(rt, rs, imm16_21);
+ break;
+ case kCondGT:
+ Bltc(rt, rs, imm16_21);
+ break;
+ case kCondLTZ:
+ CHECK_EQ(rt, ZERO);
+ Bltzc(rs, imm16_21);
+ break;
+ case kCondGEZ:
+ CHECK_EQ(rt, ZERO);
+ Bgezc(rs, imm16_21);
+ break;
+ case kCondLEZ:
+ CHECK_EQ(rt, ZERO);
+ Blezc(rs, imm16_21);
+ break;
+ case kCondGTZ:
+ CHECK_EQ(rt, ZERO);
+ Bgtzc(rs, imm16_21);
+ break;
+ case kCondEQ:
+ Beqc(rs, rt, imm16_21);
+ break;
+ case kCondNE:
+ Bnec(rs, rt, imm16_21);
+ break;
+ case kCondEQZ:
+ CHECK_EQ(rt, ZERO);
+ Beqzc(rs, imm16_21);
+ break;
+ case kCondNEZ:
+ CHECK_EQ(rt, ZERO);
+ Bnezc(rs, imm16_21);
+ break;
+ case kCondLTU:
+ Bltuc(rs, rt, imm16_21);
+ break;
+ case kCondGEU:
+ Bgeuc(rs, rt, imm16_21);
+ break;
+ case kUncond:
+ LOG(FATAL) << "Unexpected branch condition " << cond;
+ UNREACHABLE();
+ }
+}
+
void Mips64Assembler::AddS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
EmitFR(0x11, 0x10, ft, fs, fd, 0x0);
}
@@ -925,15 +1031,6 @@
}
}
-void Mips64Assembler::Addiu32(GpuRegister rt, GpuRegister rs, int32_t value, GpuRegister rtmp) {
- if (IsInt<16>(value)) {
- Addiu(rt, rs, value);
- } else {
- LoadConst32(rtmp, value);
- Addu(rt, rs, rtmp);
- }
-}
-
void Mips64Assembler::Daddiu64(GpuRegister rt, GpuRegister rs, int64_t value, GpuRegister rtmp) {
if (IsInt<16>(value)) {
Daddiu(rt, rs, value);
@@ -943,177 +1040,621 @@
}
}
-//
-// MIPS64R6 branches
-//
-//
-// Unconditional (pc + 32-bit signed offset):
-//
-// auipc at, ofs_high
-// jic at, ofs_low
-// // no delay/forbidden slot
-//
-//
-// Conditional (pc + 32-bit signed offset):
-//
-// b<cond>c reg, +2 // skip next 2 instructions
-// auipc at, ofs_high
-// jic at, ofs_low
-// // no delay/forbidden slot
-//
-//
-// Unconditional (pc + 32-bit signed offset) and link:
-//
-// auipc reg, ofs_high
-// daddiu reg, ofs_low
-// jialc reg, 0
-// // no delay/forbidden slot
-//
-//
-// TODO: use shorter instruction sequences whenever possible.
-//
+void Mips64Assembler::Branch::InitShortOrLong(Mips64Assembler::Branch::OffsetBits offset_size,
+ Mips64Assembler::Branch::Type short_type,
+ Mips64Assembler::Branch::Type long_type) {
+ type_ = (offset_size <= branch_info_[short_type].offset_size) ? short_type : long_type;
+}
-void Mips64Assembler::Bind(Label* label) {
+void Mips64Assembler::Branch::InitializeType(bool is_call) {
+ OffsetBits offset_size = GetOffsetSizeNeeded(location_, target_);
+ if (is_call) {
+ InitShortOrLong(offset_size, kCall, kLongCall);
+ } else if (condition_ == kUncond) {
+ InitShortOrLong(offset_size, kUncondBranch, kLongUncondBranch);
+ } else {
+ if (condition_ == kCondEQZ || condition_ == kCondNEZ) {
+ // Special case for beqzc/bnezc with longer offset than in other b<cond>c instructions.
+ type_ = (offset_size <= kOffset23) ? kCondBranch : kLongCondBranch;
+ } else {
+ InitShortOrLong(offset_size, kCondBranch, kLongCondBranch);
+ }
+ }
+ old_type_ = type_;
+}
+
+bool Mips64Assembler::Branch::IsNop(BranchCondition condition, GpuRegister lhs, GpuRegister rhs) {
+ switch (condition) {
+ case kCondLT:
+ case kCondGT:
+ case kCondNE:
+ case kCondLTU:
+ return lhs == rhs;
+ default:
+ return false;
+ }
+}
+
+bool Mips64Assembler::Branch::IsUncond(BranchCondition condition,
+ GpuRegister lhs,
+ GpuRegister rhs) {
+ switch (condition) {
+ case kUncond:
+ return true;
+ case kCondGE:
+ case kCondLE:
+ case kCondEQ:
+ case kCondGEU:
+ return lhs == rhs;
+ default:
+ return false;
+ }
+}
+
+Mips64Assembler::Branch::Branch(uint32_t location, uint32_t target)
+ : old_location_(location),
+ location_(location),
+ target_(target),
+ lhs_reg_(ZERO),
+ rhs_reg_(ZERO),
+ condition_(kUncond) {
+ InitializeType(false);
+}
+
+Mips64Assembler::Branch::Branch(uint32_t location,
+ uint32_t target,
+ Mips64Assembler::BranchCondition condition,
+ GpuRegister lhs_reg,
+ GpuRegister rhs_reg)
+ : old_location_(location),
+ location_(location),
+ target_(target),
+ lhs_reg_(lhs_reg),
+ rhs_reg_(rhs_reg),
+ condition_(condition) {
+ CHECK_NE(condition, kUncond);
+ switch (condition) {
+ case kCondEQ:
+ case kCondNE:
+ case kCondLT:
+ case kCondGE:
+ case kCondLE:
+ case kCondGT:
+ case kCondLTU:
+ case kCondGEU:
+ CHECK_NE(lhs_reg, ZERO);
+ CHECK_NE(rhs_reg, ZERO);
+ break;
+ case kCondLTZ:
+ case kCondGEZ:
+ case kCondLEZ:
+ case kCondGTZ:
+ case kCondEQZ:
+ case kCondNEZ:
+ CHECK_NE(lhs_reg, ZERO);
+ CHECK_EQ(rhs_reg, ZERO);
+ break;
+ case kUncond:
+ UNREACHABLE();
+ }
+ CHECK(!IsNop(condition, lhs_reg, rhs_reg));
+ if (IsUncond(condition, lhs_reg, rhs_reg)) {
+ // Branch condition is always true, make the branch unconditional.
+ condition_ = kUncond;
+ }
+ InitializeType(false);
+}
+
+Mips64Assembler::Branch::Branch(uint32_t location, uint32_t target, GpuRegister indirect_reg)
+ : old_location_(location),
+ location_(location),
+ target_(target),
+ lhs_reg_(indirect_reg),
+ rhs_reg_(ZERO),
+ condition_(kUncond) {
+ CHECK_NE(indirect_reg, ZERO);
+ CHECK_NE(indirect_reg, AT);
+ InitializeType(true);
+}
+
+Mips64Assembler::BranchCondition Mips64Assembler::Branch::OppositeCondition(
+ Mips64Assembler::BranchCondition cond) {
+ switch (cond) {
+ case kCondLT:
+ return kCondGE;
+ case kCondGE:
+ return kCondLT;
+ case kCondLE:
+ return kCondGT;
+ case kCondGT:
+ return kCondLE;
+ case kCondLTZ:
+ return kCondGEZ;
+ case kCondGEZ:
+ return kCondLTZ;
+ case kCondLEZ:
+ return kCondGTZ;
+ case kCondGTZ:
+ return kCondLEZ;
+ case kCondEQ:
+ return kCondNE;
+ case kCondNE:
+ return kCondEQ;
+ case kCondEQZ:
+ return kCondNEZ;
+ case kCondNEZ:
+ return kCondEQZ;
+ case kCondLTU:
+ return kCondGEU;
+ case kCondGEU:
+ return kCondLTU;
+ case kUncond:
+ LOG(FATAL) << "Unexpected branch condition " << cond;
+ }
+ UNREACHABLE();
+}
+
+Mips64Assembler::Branch::Type Mips64Assembler::Branch::GetType() const {
+ return type_;
+}
+
+Mips64Assembler::BranchCondition Mips64Assembler::Branch::GetCondition() const {
+ return condition_;
+}
+
+GpuRegister Mips64Assembler::Branch::GetLeftRegister() const {
+ return lhs_reg_;
+}
+
+GpuRegister Mips64Assembler::Branch::GetRightRegister() const {
+ return rhs_reg_;
+}
+
+uint32_t Mips64Assembler::Branch::GetTarget() const {
+ return target_;
+}
+
+uint32_t Mips64Assembler::Branch::GetLocation() const {
+ return location_;
+}
+
+uint32_t Mips64Assembler::Branch::GetOldLocation() const {
+ return old_location_;
+}
+
+uint32_t Mips64Assembler::Branch::GetLength() const {
+ return branch_info_[type_].length;
+}
+
+uint32_t Mips64Assembler::Branch::GetOldLength() const {
+ return branch_info_[old_type_].length;
+}
+
+uint32_t Mips64Assembler::Branch::GetSize() const {
+ return GetLength() * sizeof(uint32_t);
+}
+
+uint32_t Mips64Assembler::Branch::GetOldSize() const {
+ return GetOldLength() * sizeof(uint32_t);
+}
+
+uint32_t Mips64Assembler::Branch::GetEndLocation() const {
+ return GetLocation() + GetSize();
+}
+
+uint32_t Mips64Assembler::Branch::GetOldEndLocation() const {
+ return GetOldLocation() + GetOldSize();
+}
+
+bool Mips64Assembler::Branch::IsLong() const {
+ switch (type_) {
+ // Short branches.
+ case kUncondBranch:
+ case kCondBranch:
+ case kCall:
+ return false;
+ // Long branches.
+ case kLongUncondBranch:
+ case kLongCondBranch:
+ case kLongCall:
+ return true;
+ }
+ UNREACHABLE();
+}
+
+bool Mips64Assembler::Branch::IsResolved() const {
+ return target_ != kUnresolved;
+}
+
+Mips64Assembler::Branch::OffsetBits Mips64Assembler::Branch::GetOffsetSize() const {
+ OffsetBits offset_size =
+ (type_ == kCondBranch && (condition_ == kCondEQZ || condition_ == kCondNEZ))
+ ? kOffset23
+ : branch_info_[type_].offset_size;
+ return offset_size;
+}
+
+Mips64Assembler::Branch::OffsetBits Mips64Assembler::Branch::GetOffsetSizeNeeded(uint32_t location,
+ uint32_t target) {
+ // For unresolved targets assume the shortest encoding
+ // (later it will be made longer if needed).
+ if (target == kUnresolved)
+ return kOffset16;
+ int64_t distance = static_cast<int64_t>(target) - location;
+ // To simplify calculations in composite branches consisting of multiple instructions
+ // bump up the distance by a value larger than the max byte size of a composite branch.
+ distance += (distance >= 0) ? kMaxBranchSize : -kMaxBranchSize;
+ if (IsInt<kOffset16>(distance))
+ return kOffset16;
+ else if (IsInt<kOffset18>(distance))
+ return kOffset18;
+ else if (IsInt<kOffset21>(distance))
+ return kOffset21;
+ else if (IsInt<kOffset23>(distance))
+ return kOffset23;
+ else if (IsInt<kOffset28>(distance))
+ return kOffset28;
+ return kOffset32;
+}
+
+void Mips64Assembler::Branch::Resolve(uint32_t target) {
+ target_ = target;
+}
+
+void Mips64Assembler::Branch::Relocate(uint32_t expand_location, uint32_t delta) {
+ if (location_ > expand_location) {
+ location_ += delta;
+ }
+ if (!IsResolved()) {
+ return; // Don't know the target yet.
+ }
+ if (target_ > expand_location) {
+ target_ += delta;
+ }
+}
+
+void Mips64Assembler::Branch::PromoteToLong() {
+ switch (type_) {
+ // Short branches.
+ case kUncondBranch:
+ type_ = kLongUncondBranch;
+ break;
+ case kCondBranch:
+ type_ = kLongCondBranch;
+ break;
+ case kCall:
+ type_ = kLongCall;
+ break;
+ default:
+ // Note: 'type_' is already long.
+ break;
+ }
+ CHECK(IsLong());
+}
+
+uint32_t Mips64Assembler::Branch::PromoteIfNeeded(uint32_t max_short_distance) {
+ // If the branch is still unresolved or already long, nothing to do.
+ if (IsLong() || !IsResolved()) {
+ return 0;
+ }
+ // Promote the short branch to long if the offset size is too small
+ // to hold the distance between location_ and target_.
+ if (GetOffsetSizeNeeded(location_, target_) > GetOffsetSize()) {
+ PromoteToLong();
+ uint32_t old_size = GetOldSize();
+ uint32_t new_size = GetSize();
+ CHECK_GT(new_size, old_size);
+ return new_size - old_size;
+ }
+ // The following logic is for debugging/testing purposes.
+ // Promote some short branches to long when it's not really required.
+ if (UNLIKELY(max_short_distance != std::numeric_limits<uint32_t>::max())) {
+ int64_t distance = static_cast<int64_t>(target_) - location_;
+ distance = (distance >= 0) ? distance : -distance;
+ if (distance >= max_short_distance) {
+ PromoteToLong();
+ uint32_t old_size = GetOldSize();
+ uint32_t new_size = GetSize();
+ CHECK_GT(new_size, old_size);
+ return new_size - old_size;
+ }
+ }
+ return 0;
+}
+
+uint32_t Mips64Assembler::Branch::GetOffsetLocation() const {
+ return location_ + branch_info_[type_].instr_offset * sizeof(uint32_t);
+}
+
+uint32_t Mips64Assembler::Branch::GetOffset() const {
+ CHECK(IsResolved());
+ uint32_t ofs_mask = 0xFFFFFFFF >> (32 - GetOffsetSize());
+ // Calculate the byte distance between instructions and also account for
+ // different PC-relative origins.
+ uint32_t offset = target_ - GetOffsetLocation() - branch_info_[type_].pc_org * sizeof(uint32_t);
+ // Prepare the offset for encoding into the instruction(s).
+ offset = (offset & ofs_mask) >> branch_info_[type_].offset_shift;
+ return offset;
+}
+
+Mips64Assembler::Branch* Mips64Assembler::GetBranch(uint32_t branch_id) {
+ CHECK_LT(branch_id, branches_.size());
+ return &branches_[branch_id];
+}
+
+const Mips64Assembler::Branch* Mips64Assembler::GetBranch(uint32_t branch_id) const {
+ CHECK_LT(branch_id, branches_.size());
+ return &branches_[branch_id];
+}
+
+void Mips64Assembler::Bind(Mips64Label* label) {
CHECK(!label->IsBound());
- int32_t bound_pc = buffer_.Size();
+ uint32_t bound_pc = buffer_.Size();
- // Walk the list of the branches (auipc + jic pairs) referring to and preceding this label.
- // Embed the previously unknown pc-relative addresses in them.
+ // Walk the list of branches referring to and preceding this label.
+ // Store the previously unknown target addresses in them.
while (label->IsLinked()) {
- int32_t position = label->Position();
- // Extract the branch (instruction pair)
- uint32_t auipc = buffer_.Load<uint32_t>(position);
- uint32_t jic = buffer_.Load<uint32_t>(position + 4); // actually, jic or daddiu
+ uint32_t branch_id = label->Position();
+ Branch* branch = GetBranch(branch_id);
+ branch->Resolve(bound_pc);
- // Extract the location of the previous pair in the list (walking the list backwards;
- // the previous pair location was stored in the immediate operands of the instructions)
- int32_t prev = (auipc << 16) | (jic & 0xFFFF);
-
- // Get the pc-relative address
- uint32_t offset = bound_pc - position;
- offset += (offset & 0x8000) << 1; // account for sign extension in jic/daddiu
-
- // Embed it in the two instructions
- auipc = (auipc & 0xFFFF0000) | (offset >> 16);
- jic = (jic & 0xFFFF0000) | (offset & 0xFFFF);
-
- // Save the adjusted instructions
- buffer_.Store<uint32_t>(position, auipc);
- buffer_.Store<uint32_t>(position + 4, jic);
+ uint32_t branch_location = branch->GetLocation();
+ // Extract the location of the previous branch in the list (walking the list backwards;
+ // the previous branch ID was stored in the space reserved for this branch).
+ uint32_t prev = buffer_.Load<uint32_t>(branch_location);
// On to the previous branch in the list...
label->position_ = prev;
}
- // Now make the label object contain its own location
- // (it will be used by the branches referring to and following this label)
+ // Now make the label object contain its own location (relative to the end of the preceding
+ // branch, if any; it will be used by the branches referring to and following this label).
+ label->prev_branch_id_plus_one_ = branches_.size();
+ if (label->prev_branch_id_plus_one_) {
+ uint32_t branch_id = label->prev_branch_id_plus_one_ - 1;
+ const Branch* branch = GetBranch(branch_id);
+ bound_pc -= branch->GetEndLocation();
+ }
label->BindTo(bound_pc);
}
-void Mips64Assembler::B(Label* label) {
- if (label->IsBound()) {
- // Branch backwards (to a preceding label), distance is known
- uint32_t offset = label->Position() - buffer_.Size();
- CHECK_LE(static_cast<int32_t>(offset), 0);
- offset += (offset & 0x8000) << 1; // account for sign extension in jic
- Auipc(AT, offset >> 16);
- Jic(AT, offset);
- } else {
- // Branch forward (to a following label), distance is unknown
- int32_t position = buffer_.Size();
- // The first branch forward will have 0 in its pc-relative address (copied from label's
- // position). It will be the terminator of the list of forward-reaching branches.
- uint32_t prev = label->position_;
- Auipc(AT, prev >> 16);
- Jic(AT, prev);
- // Now make the link object point to the location of this branch
- // (this forms a linked list of branches preceding this label)
- label->LinkTo(position);
+uint32_t Mips64Assembler::GetLabelLocation(Mips64Label* label) const {
+ CHECK(label->IsBound());
+ uint32_t target = label->Position();
+ if (label->prev_branch_id_plus_one_) {
+ // Get label location based on the branch preceding it.
+ uint32_t branch_id = label->prev_branch_id_plus_one_ - 1;
+ const Branch* branch = GetBranch(branch_id);
+ target += branch->GetEndLocation();
+ }
+ return target;
+}
+
+uint32_t Mips64Assembler::GetAdjustedPosition(uint32_t old_position) {
+ // We can reconstruct the adjustment by going through all the branches from the beginning
+ // up to the old_position. Since we expect AdjustedPosition() to be called in a loop
+ // with increasing old_position, we can use the data from last AdjustedPosition() to
+ // continue where we left off and the whole loop should be O(m+n) where m is the number
+ // of positions to adjust and n is the number of branches.
+ if (old_position < last_old_position_) {
+ last_position_adjustment_ = 0;
+ last_old_position_ = 0;
+ last_branch_id_ = 0;
+ }
+ while (last_branch_id_ != branches_.size()) {
+ const Branch* branch = GetBranch(last_branch_id_);
+ if (branch->GetLocation() >= old_position + last_position_adjustment_) {
+ break;
+ }
+ last_position_adjustment_ += branch->GetSize() - branch->GetOldSize();
+ ++last_branch_id_;
+ }
+ last_old_position_ = old_position;
+ return old_position + last_position_adjustment_;
+}
+
+void Mips64Assembler::FinalizeLabeledBranch(Mips64Label* label) {
+ uint32_t length = branches_.back().GetLength();
+ if (!label->IsBound()) {
+ // Branch forward (to a following label), distance is unknown.
+ // The first branch forward will contain 0, serving as the terminator of
+ // the list of forward-reaching branches.
+ Emit(label->position_);
+ length--;
+ // Now make the label object point to this branch
+ // (this forms a linked list of branches preceding this label).
+ uint32_t branch_id = branches_.size() - 1;
+ label->LinkTo(branch_id);
+ }
+ // Reserve space for the branch.
+ while (length--) {
+ Nop();
}
}
-void Mips64Assembler::Jalr(Label* label, GpuRegister indirect_reg) {
- if (label->IsBound()) {
- // Branch backwards (to a preceding label), distance is known
- uint32_t offset = label->Position() - buffer_.Size();
- CHECK_LE(static_cast<int32_t>(offset), 0);
- offset += (offset & 0x8000) << 1; // account for sign extension in daddiu
- Auipc(indirect_reg, offset >> 16);
- Daddiu(indirect_reg, indirect_reg, offset);
- Jialc(indirect_reg, 0);
- } else {
- // Branch forward (to a following label), distance is unknown
- int32_t position = buffer_.Size();
- // The first branch forward will have 0 in its pc-relative address (copied from label's
- // position). It will be the terminator of the list of forward-reaching branches.
- uint32_t prev = label->position_;
- Auipc(indirect_reg, prev >> 16);
- Daddiu(indirect_reg, indirect_reg, prev);
- Jialc(indirect_reg, 0);
- // Now make the link object point to the location of this branch
- // (this forms a linked list of branches preceding this label)
- label->LinkTo(position);
+void Mips64Assembler::Buncond(Mips64Label* label) {
+ uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
+ branches_.emplace_back(buffer_.Size(), target);
+ FinalizeLabeledBranch(label);
+}
+
+void Mips64Assembler::Bcond(Mips64Label* label,
+ BranchCondition condition,
+ GpuRegister lhs,
+ GpuRegister rhs) {
+ // If lhs = rhs, this can be a NOP.
+ if (Branch::IsNop(condition, lhs, rhs)) {
+ return;
+ }
+ uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
+ branches_.emplace_back(buffer_.Size(), target, condition, lhs, rhs);
+ FinalizeLabeledBranch(label);
+}
+
+void Mips64Assembler::Call(Mips64Label* label, GpuRegister indirect_reg) {
+ uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
+ branches_.emplace_back(buffer_.Size(), target, indirect_reg);
+ FinalizeLabeledBranch(label);
+}
+
+void Mips64Assembler::PromoteBranches() {
+ // Promote short branches to long as necessary.
+ bool changed;
+ do {
+ changed = false;
+ for (auto& branch : branches_) {
+ CHECK(branch.IsResolved());
+ uint32_t delta = branch.PromoteIfNeeded();
+ // If this branch has been promoted and needs to expand in size,
+ // relocate all branches by the expansion size.
+ if (delta) {
+ changed = true;
+ uint32_t expand_location = branch.GetLocation();
+ for (auto& branch2 : branches_) {
+ branch2.Relocate(expand_location, delta);
+ }
+ }
+ }
+ } while (changed);
+
+ // Account for branch expansion by resizing the code buffer
+ // and moving the code in it to its final location.
+ size_t branch_count = branches_.size();
+ if (branch_count > 0) {
+ // Resize.
+ Branch& last_branch = branches_[branch_count - 1];
+ uint32_t size_delta = last_branch.GetEndLocation() - last_branch.GetOldEndLocation();
+ uint32_t old_size = buffer_.Size();
+ buffer_.Resize(old_size + size_delta);
+ // Move the code residing between branch placeholders.
+ uint32_t end = old_size;
+ for (size_t i = branch_count; i > 0; ) {
+ Branch& branch = branches_[--i];
+ uint32_t size = end - branch.GetOldEndLocation();
+ buffer_.Move(branch.GetEndLocation(), branch.GetOldEndLocation(), size);
+ end = branch.GetOldLocation();
+ }
}
}
-void Mips64Assembler::Bltc(GpuRegister rs, GpuRegister rt, Label* label) {
- Bgec(rs, rt, 2);
- B(label);
+// Note: make sure branch_info_[] and EmitBranch() are kept synchronized.
+const Mips64Assembler::Branch::BranchInfo Mips64Assembler::Branch::branch_info_[] = {
+ // Short branches.
+ { 1, 0, 1, Mips64Assembler::Branch::kOffset28, 2 }, // kUncondBranch
+ { 2, 0, 1, Mips64Assembler::Branch::kOffset18, 2 }, // kCondBranch
+ // Exception: kOffset23 for beqzc/bnezc
+ { 2, 0, 0, Mips64Assembler::Branch::kOffset21, 2 }, // kCall
+ // Long branches.
+ { 2, 0, 0, Mips64Assembler::Branch::kOffset32, 0 }, // kLongUncondBranch
+ { 3, 1, 0, Mips64Assembler::Branch::kOffset32, 0 }, // kLongCondBranch
+ { 3, 0, 0, Mips64Assembler::Branch::kOffset32, 0 }, // kLongCall
+};
+
+// Note: make sure branch_info_[] and EmitBranch() are kept synchronized.
+void Mips64Assembler::EmitBranch(Mips64Assembler::Branch* branch) {
+ CHECK(overwriting_);
+ overwrite_location_ = branch->GetLocation();
+ uint32_t offset = branch->GetOffset();
+ BranchCondition condition = branch->GetCondition();
+ GpuRegister lhs = branch->GetLeftRegister();
+ GpuRegister rhs = branch->GetRightRegister();
+ switch (branch->GetType()) {
+ // Short branches.
+ case Branch::kUncondBranch:
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ Bc(offset);
+ break;
+ case Branch::kCondBranch:
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ EmitBcondc(condition, lhs, rhs, offset);
+ Nop(); // TODO: improve by filling the forbidden slot.
+ break;
+ case Branch::kCall:
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ Addiupc(lhs, offset);
+ Jialc(lhs, 0);
+ break;
+
+ // Long branches.
+ case Branch::kLongUncondBranch:
+ offset += (offset & 0x8000) << 1; // Account for sign extension in jic.
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ Auipc(AT, High16Bits(offset));
+ Jic(AT, Low16Bits(offset));
+ break;
+ case Branch::kLongCondBranch:
+ EmitBcondc(Branch::OppositeCondition(condition), lhs, rhs, 2);
+ offset += (offset & 0x8000) << 1; // Account for sign extension in jic.
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ Auipc(AT, High16Bits(offset));
+ Jic(AT, Low16Bits(offset));
+ break;
+ case Branch::kLongCall:
+ offset += (offset & 0x8000) << 1; // Account for sign extension in daddiu.
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ Auipc(lhs, High16Bits(offset));
+ Daddiu(lhs, lhs, Low16Bits(offset));
+ Jialc(lhs, 0);
+ break;
+ }
+ CHECK_EQ(overwrite_location_, branch->GetEndLocation());
+ CHECK_LT(branch->GetSize(), static_cast<uint32_t>(Branch::kMaxBranchSize));
}
-void Mips64Assembler::Bltzc(GpuRegister rt, Label* label) {
- Bgezc(rt, 2);
- B(label);
+void Mips64Assembler::Bc(Mips64Label* label) {
+ Buncond(label);
}
-void Mips64Assembler::Bgtzc(GpuRegister rt, Label* label) {
- Blezc(rt, 2);
- B(label);
+void Mips64Assembler::Jialc(Mips64Label* label, GpuRegister indirect_reg) {
+ Call(label, indirect_reg);
}
-void Mips64Assembler::Bgec(GpuRegister rs, GpuRegister rt, Label* label) {
- Bltc(rs, rt, 2);
- B(label);
+void Mips64Assembler::Bltc(GpuRegister rs, GpuRegister rt, Mips64Label* label) {
+ Bcond(label, kCondLT, rs, rt);
}
-void Mips64Assembler::Bgezc(GpuRegister rt, Label* label) {
- Bltzc(rt, 2);
- B(label);
+void Mips64Assembler::Bltzc(GpuRegister rt, Mips64Label* label) {
+ Bcond(label, kCondLTZ, rt);
}
-void Mips64Assembler::Blezc(GpuRegister rt, Label* label) {
- Bgtzc(rt, 2);
- B(label);
+void Mips64Assembler::Bgtzc(GpuRegister rt, Mips64Label* label) {
+ Bcond(label, kCondGTZ, rt);
}
-void Mips64Assembler::Bltuc(GpuRegister rs, GpuRegister rt, Label* label) {
- Bgeuc(rs, rt, 2);
- B(label);
+void Mips64Assembler::Bgec(GpuRegister rs, GpuRegister rt, Mips64Label* label) {
+ Bcond(label, kCondGE, rs, rt);
}
-void Mips64Assembler::Bgeuc(GpuRegister rs, GpuRegister rt, Label* label) {
- Bltuc(rs, rt, 2);
- B(label);
+void Mips64Assembler::Bgezc(GpuRegister rt, Mips64Label* label) {
+ Bcond(label, kCondGEZ, rt);
}
-void Mips64Assembler::Beqc(GpuRegister rs, GpuRegister rt, Label* label) {
- Bnec(rs, rt, 2);
- B(label);
+void Mips64Assembler::Blezc(GpuRegister rt, Mips64Label* label) {
+ Bcond(label, kCondLEZ, rt);
}
-void Mips64Assembler::Bnec(GpuRegister rs, GpuRegister rt, Label* label) {
- Beqc(rs, rt, 2);
- B(label);
+void Mips64Assembler::Bltuc(GpuRegister rs, GpuRegister rt, Mips64Label* label) {
+ Bcond(label, kCondLTU, rs, rt);
}
-void Mips64Assembler::Beqzc(GpuRegister rs, Label* label) {
- Bnezc(rs, 2);
- B(label);
+void Mips64Assembler::Bgeuc(GpuRegister rs, GpuRegister rt, Mips64Label* label) {
+ Bcond(label, kCondGEU, rs, rt);
}
-void Mips64Assembler::Bnezc(GpuRegister rs, Label* label) {
- Beqzc(rs, 2);
- B(label);
+void Mips64Assembler::Beqc(GpuRegister rs, GpuRegister rt, Mips64Label* label) {
+ Bcond(label, kCondEQ, rs, rt);
+}
+
+void Mips64Assembler::Bnec(GpuRegister rs, GpuRegister rt, Mips64Label* label) {
+ Bcond(label, kCondNE, rs, rt);
+}
+
+void Mips64Assembler::Beqzc(GpuRegister rs, Mips64Label* label) {
+ Bcond(label, kCondEQZ, rs);
+}
+
+void Mips64Assembler::Bnezc(GpuRegister rs, Mips64Label* label) {
+ Bcond(label, kCondNEZ, rs);
}
void Mips64Assembler::LoadFromOffset(LoadOperandType type, GpuRegister reg, GpuRegister base,
@@ -1256,6 +1797,7 @@
const std::vector<ManagedRegister>& callee_save_regs,
const ManagedRegisterEntrySpills& entry_spills) {
CHECK_ALIGNED(frame_size, kStackAlignment);
+ DCHECK(!overwriting_);
// Increase frame to required size.
IncreaseFrameSize(frame_size);
@@ -1298,6 +1840,7 @@
void Mips64Assembler::RemoveFrame(size_t frame_size,
const std::vector<ManagedRegister>& callee_save_regs) {
CHECK_ALIGNED(frame_size, kStackAlignment);
+ DCHECK(!overwriting_);
cfi_.RememberState();
// Pop callee saves and return address
@@ -1316,6 +1859,7 @@
// Then jump to the return address.
Jr(RA);
+ Nop();
// The CFI should be restored for any code that follows the exit block.
cfi_.RestoreState();
@@ -1324,12 +1868,14 @@
void Mips64Assembler::IncreaseFrameSize(size_t adjust) {
CHECK_ALIGNED(adjust, kFramePointerSize);
+ DCHECK(!overwriting_);
Daddiu64(SP, SP, static_cast<int32_t>(-adjust));
cfi_.AdjustCFAOffset(adjust);
}
void Mips64Assembler::DecreaseFrameSize(size_t adjust) {
CHECK_ALIGNED(adjust, kFramePointerSize);
+ DCHECK(!overwriting_);
Daddiu64(SP, SP, static_cast<int32_t>(adjust));
cfi_.AdjustCFAOffset(-adjust);
}
@@ -1379,17 +1925,7 @@
StoreToOffset(kStoreWord, scratch.AsGpuRegister(), SP, dest.Int32Value());
}
-void Mips64Assembler::StoreImmediateToThread64(ThreadOffset<8> dest, uint32_t imm,
- ManagedRegister mscratch) {
- Mips64ManagedRegister scratch = mscratch.AsMips64();
- CHECK(scratch.IsGpuRegister()) << scratch;
- // TODO: it's unclear wether 32 or 64 bits need to be stored (Arm64 and x86/x64 disagree?).
- // Is this function even referenced anywhere else in the code?
- LoadConst32(scratch.AsGpuRegister(), imm);
- StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), S1, dest.Int32Value());
-}
-
-void Mips64Assembler::StoreStackOffsetToThread64(ThreadOffset<8> thr_offs,
+void Mips64Assembler::StoreStackOffsetToThread64(ThreadOffset<kMipsDoublewordSize> thr_offs,
FrameOffset fr_offs,
ManagedRegister mscratch) {
Mips64ManagedRegister scratch = mscratch.AsMips64();
@@ -1398,7 +1934,7 @@
StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), S1, thr_offs.Int32Value());
}
-void Mips64Assembler::StoreStackPointerToThread64(ThreadOffset<8> thr_offs) {
+void Mips64Assembler::StoreStackPointerToThread64(ThreadOffset<kMipsDoublewordSize> thr_offs) {
StoreToOffset(kStoreDoubleword, SP, S1, thr_offs.Int32Value());
}
@@ -1415,7 +1951,9 @@
return EmitLoad(mdest, SP, src.Int32Value(), size);
}
-void Mips64Assembler::LoadFromThread64(ManagedRegister mdest, ThreadOffset<8> src, size_t size) {
+void Mips64Assembler::LoadFromThread64(ManagedRegister mdest,
+ ThreadOffset<kMipsDoublewordSize> src,
+ size_t size) {
return EmitLoad(mdest, S1, src.Int32Value(), size);
}
@@ -1449,18 +1987,20 @@
}
void Mips64Assembler::LoadRawPtrFromThread64(ManagedRegister mdest,
- ThreadOffset<8> offs) {
+ ThreadOffset<kMipsDoublewordSize> offs) {
Mips64ManagedRegister dest = mdest.AsMips64();
CHECK(dest.IsGpuRegister());
LoadFromOffset(kLoadDoubleword, dest.AsGpuRegister(), S1, offs.Int32Value());
}
-void Mips64Assembler::SignExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
- UNIMPLEMENTED(FATAL) << "no sign extension necessary for mips";
+void Mips64Assembler::SignExtend(ManagedRegister mreg ATTRIBUTE_UNUSED,
+ size_t size ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL) << "No sign extension necessary for MIPS64";
}
-void Mips64Assembler::ZeroExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
- UNIMPLEMENTED(FATAL) << "no zero extension necessary for mips";
+void Mips64Assembler::ZeroExtend(ManagedRegister mreg ATTRIBUTE_UNUSED,
+ size_t size ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL) << "No zero extension necessary for MIPS64";
}
void Mips64Assembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) {
@@ -1492,7 +2032,7 @@
}
void Mips64Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs,
- ThreadOffset<8> thr_offs,
+ ThreadOffset<kMipsDoublewordSize> thr_offs,
ManagedRegister mscratch) {
Mips64ManagedRegister scratch = mscratch.AsMips64();
CHECK(scratch.IsGpuRegister()) << scratch;
@@ -1500,7 +2040,7 @@
StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, fr_offs.Int32Value());
}
-void Mips64Assembler::CopyRawPtrToThread64(ThreadOffset<8> thr_offs,
+void Mips64Assembler::CopyRawPtrToThread64(ThreadOffset<kMipsDoublewordSize> thr_offs,
FrameOffset fr_offs,
ManagedRegister mscratch) {
Mips64ManagedRegister scratch = mscratch.AsMips64();
@@ -1561,9 +2101,12 @@
}
}
-void Mips64Assembler::Copy(FrameOffset /*dest*/, FrameOffset /*src_base*/, Offset /*src_offset*/,
- ManagedRegister /*mscratch*/, size_t /*size*/) {
- UNIMPLEMENTED(FATAL) << "no mips64 implementation";
+void Mips64Assembler::Copy(FrameOffset dest ATTRIBUTE_UNUSED,
+ FrameOffset src_base ATTRIBUTE_UNUSED,
+ Offset src_offset ATTRIBUTE_UNUSED,
+ ManagedRegister mscratch ATTRIBUTE_UNUSED,
+ size_t size ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL) << "No MIPS64 implementation";
}
void Mips64Assembler::Copy(ManagedRegister dest, Offset dest_offset,
@@ -1584,15 +2127,18 @@
}
}
-void Mips64Assembler::Copy(FrameOffset /*dest*/, Offset /*dest_offset*/, FrameOffset /*src*/, Offset
-/*src_offset*/,
- ManagedRegister /*mscratch*/, size_t /*size*/) {
- UNIMPLEMENTED(FATAL) << "no mips64 implementation";
+void Mips64Assembler::Copy(FrameOffset dest ATTRIBUTE_UNUSED,
+ Offset dest_offset ATTRIBUTE_UNUSED,
+ FrameOffset src ATTRIBUTE_UNUSED,
+ Offset src_offset ATTRIBUTE_UNUSED,
+ ManagedRegister mscratch ATTRIBUTE_UNUSED,
+ size_t size ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL) << "No MIPS64 implementation";
}
-void Mips64Assembler::MemoryBarrier(ManagedRegister) {
+void Mips64Assembler::MemoryBarrier(ManagedRegister mreg ATTRIBUTE_UNUSED) {
// TODO: sync?
- UNIMPLEMENTED(FATAL) << "no mips64 implementation";
+ UNIMPLEMENTED(FATAL) << "No MIPS64 implementation";
}
void Mips64Assembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
@@ -1604,7 +2150,7 @@
CHECK(in_reg.IsNoRegister() || in_reg.IsGpuRegister()) << in_reg;
CHECK(out_reg.IsGpuRegister()) << out_reg;
if (null_allowed) {
- Label null_arg;
+ Mips64Label null_arg;
// Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
// the address in the handle scope holding the reference.
// e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
@@ -1631,7 +2177,7 @@
Mips64ManagedRegister scratch = mscratch.AsMips64();
CHECK(scratch.IsGpuRegister()) << scratch;
if (null_allowed) {
- Label null_arg;
+ Mips64Label null_arg;
LoadFromOffset(kLoadUnsignedWord, scratch.AsGpuRegister(), SP,
handle_scope_offset.Int32Value());
// Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
@@ -1653,7 +2199,7 @@
Mips64ManagedRegister in_reg = min_reg.AsMips64();
CHECK(out_reg.IsGpuRegister()) << out_reg;
CHECK(in_reg.IsGpuRegister()) << in_reg;
- Label null_arg;
+ Mips64Label null_arg;
if (!out_reg.Equals(in_reg)) {
LoadConst32(out_reg.AsGpuRegister(), 0);
}
@@ -1663,11 +2209,13 @@
Bind(&null_arg);
}
-void Mips64Assembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
+void Mips64Assembler::VerifyObject(ManagedRegister src ATTRIBUTE_UNUSED,
+ bool could_be_null ATTRIBUTE_UNUSED) {
// TODO: not validating references
}
-void Mips64Assembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
+void Mips64Assembler::VerifyObject(FrameOffset src ATTRIBUTE_UNUSED,
+ bool could_be_null ATTRIBUTE_UNUSED) {
// TODO: not validating references
}
@@ -1679,6 +2227,7 @@
LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(),
base.AsGpuRegister(), offset.Int32Value());
Jalr(scratch.AsGpuRegister());
+ Nop();
// TODO: place reference map on call
}
@@ -1691,11 +2240,13 @@
LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(),
scratch.AsGpuRegister(), offset.Int32Value());
Jalr(scratch.AsGpuRegister());
+ Nop();
// TODO: place reference map on call
}
-void Mips64Assembler::CallFromThread64(ThreadOffset<8> /*offset*/, ManagedRegister /*mscratch*/) {
- UNIMPLEMENTED(FATAL) << "no mips64 implementation";
+void Mips64Assembler::CallFromThread64(ThreadOffset<kMipsDoublewordSize> offset ATTRIBUTE_UNUSED,
+ ManagedRegister mscratch ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL) << "No MIPS64 implementation";
}
void Mips64Assembler::GetCurrentThread(ManagedRegister tr) {
@@ -1703,37 +2254,39 @@
}
void Mips64Assembler::GetCurrentThread(FrameOffset offset,
- ManagedRegister /*mscratch*/) {
+ ManagedRegister mscratch ATTRIBUTE_UNUSED) {
StoreToOffset(kStoreDoubleword, S1, SP, offset.Int32Value());
}
void Mips64Assembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) {
Mips64ManagedRegister scratch = mscratch.AsMips64();
- Mips64ExceptionSlowPath* slow = new Mips64ExceptionSlowPath(scratch, stack_adjust);
- buffer_.EnqueueSlowPath(slow);
- LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(),
- S1, Thread::ExceptionOffset<8>().Int32Value());
- Bnezc(scratch.AsGpuRegister(), slow->Entry());
+ exception_blocks_.emplace_back(scratch, stack_adjust);
+ LoadFromOffset(kLoadDoubleword,
+ scratch.AsGpuRegister(),
+ S1,
+ Thread::ExceptionOffset<kMipsDoublewordSize>().Int32Value());
+ Bnezc(scratch.AsGpuRegister(), exception_blocks_.back().Entry());
}
-void Mips64ExceptionSlowPath::Emit(Assembler* sasm) {
- Mips64Assembler* sp_asm = down_cast<Mips64Assembler*>(sasm);
-#define __ sp_asm->
- __ Bind(&entry_);
- if (stack_adjust_ != 0) { // Fix up the frame.
- __ DecreaseFrameSize(stack_adjust_);
+void Mips64Assembler::EmitExceptionPoll(Mips64ExceptionSlowPath* exception) {
+ Bind(exception->Entry());
+ if (exception->stack_adjust_ != 0) { // Fix up the frame.
+ DecreaseFrameSize(exception->stack_adjust_);
}
- // Pass exception object as argument
- // Don't care about preserving A0 as this call won't return
- __ Move(A0, scratch_.AsGpuRegister());
+ // Pass exception object as argument.
+ // Don't care about preserving A0 as this call won't return.
+ CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
+ Move(A0, exception->scratch_.AsGpuRegister());
// Set up call to Thread::Current()->pDeliverException
- __ LoadFromOffset(kLoadDoubleword, T9, S1,
- QUICK_ENTRYPOINT_OFFSET(8, pDeliverException).Int32Value());
- // TODO: check T9 usage
- __ Jr(T9);
+ LoadFromOffset(kLoadDoubleword,
+ T9,
+ S1,
+ QUICK_ENTRYPOINT_OFFSET(kMipsDoublewordSize, pDeliverException).Int32Value());
+ Jr(T9);
+ Nop();
+
// Call never returns
- __ Break();
-#undef __
+ Break();
}
} // namespace mips64