summaryrefslogtreecommitdiff
path: root/compiler/utils
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/utils')
-rw-r--r--compiler/utils/arm/assembler_arm.h3
-rw-r--r--compiler/utils/arm/assembler_thumb2.cc153
-rw-r--r--compiler/utils/arm/assembler_thumb2.h39
-rw-r--r--compiler/utils/mips64/assembler_mips64.cc100
-rw-r--r--compiler/utils/mips64/assembler_mips64.h20
-rw-r--r--compiler/utils/mips64/assembler_mips64_test.cc100
-rw-r--r--compiler/utils/swap_space.h2
7 files changed, 360 insertions, 57 deletions
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index 0ed8a35338..0f24e81be2 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -652,6 +652,9 @@ class ArmAssembler : public Assembler {
virtual void blx(Register rm, Condition cond = AL) = 0;
virtual void bx(Register rm, Condition cond = AL) = 0;
+ // ADR instruction loading register for branching to the label.
+ virtual void AdrCode(Register rt, Label* label) = 0;
+
// Memory barriers.
virtual void dmb(DmbOptions flavor) = 0;
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index 1e71d06b49..d7096b3c87 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -214,14 +214,14 @@ void Thumb2Assembler::EmitFixups(uint32_t adjusted_code_size) {
DCHECK_GE(dest_end, src_end);
for (auto i = fixups_.rbegin(), end = fixups_.rend(); i != end; ++i) {
Fixup* fixup = &*i;
+ size_t old_fixup_location = fixup->GetLocation();
if (fixup->GetOriginalSize() == fixup->GetSize()) {
// The size of this Fixup didn't change. To avoid moving the data
// in small chunks, emit the code to its original position.
- fixup->Emit(&buffer_, adjusted_code_size);
fixup->Finalize(dest_end - src_end);
+ fixup->Emit(old_fixup_location, &buffer_, adjusted_code_size);
} else {
// Move the data between the end of the fixup and src_end to its final location.
- size_t old_fixup_location = fixup->GetLocation();
size_t src_begin = old_fixup_location + fixup->GetOriginalSizeInBytes();
size_t data_size = src_end - src_begin;
size_t dest_begin = dest_end - data_size;
@@ -230,7 +230,7 @@ void Thumb2Assembler::EmitFixups(uint32_t adjusted_code_size) {
dest_end = dest_begin - fixup->GetSizeInBytes();
// Finalize the Fixup and emit the data to the new location.
fixup->Finalize(dest_end - src_end);
- fixup->Emit(&buffer_, adjusted_code_size);
+ fixup->Emit(fixup->GetLocation(), &buffer_, adjusted_code_size);
}
}
CHECK_EQ(src_end, dest_end);
@@ -1895,6 +1895,9 @@ inline size_t Thumb2Assembler::Fixup::SizeInBytes(Size size) {
case kCbxz48Bit:
return 6u;
+ case kCodeAddr4KiB:
+ return 4u;
+
case kLiteral1KiB:
return 2u;
case kLiteral4KiB:
@@ -1973,6 +1976,15 @@ inline int32_t Thumb2Assembler::Fixup::GetOffset(uint32_t current_code_size) con
diff -= 2; // Extra CMP Rn, #0, 16-bit.
break;
+ case kCodeAddr4KiB:
+ // The ADR instruction rounds down the PC+4 to a multiple of 4, so if the PC
+ // isn't a multiple of 2, we need to adjust.
+ DCHECK_ALIGNED(diff, 2);
+ diff += location_ & 2;
+ // Add the Thumb mode bit.
+ diff += 1;
+ break;
+
case kLiteral1KiB:
case kLiteral4KiB:
case kLongOrFPLiteral1KiB:
@@ -1987,8 +1999,8 @@ inline int32_t Thumb2Assembler::Fixup::GetOffset(uint32_t current_code_size) con
diff = diff + (diff & 2);
DCHECK_GE(diff, 0);
break;
- case kLiteral1MiB:
case kLiteral64KiB:
+ case kLiteral1MiB:
case kLongOrFPLiteral64KiB:
case kLiteralAddr64KiB:
DCHECK_GE(diff, 4); // The target must be at least 4 bytes after the ADD rX, PC.
@@ -2041,6 +2053,10 @@ bool Thumb2Assembler::Fixup::IsCandidateForEmitEarly() const {
// We don't support conditional branches beyond +-1MiB.
return true;
+ case kCodeAddr4KiB:
+ // ADR uses the aligned PC and as such the offset cannot be calculated early.
+ return false;
+
case kLiteral1KiB:
case kLiteral4KiB:
case kLiteral64KiB:
@@ -2087,6 +2103,10 @@ uint32_t Thumb2Assembler::Fixup::AdjustSizeIfNeeded(uint32_t current_code_size)
// We don't support conditional branches beyond +-1MiB.
break;
+ case kCodeAddr4KiB:
+ // We don't support Code address ADR beyond +4KiB.
+ break;
+
case kLiteral1KiB:
DCHECK(!IsHighRegister(rn_));
if (IsUint<10>(GetOffset(current_code_size))) {
@@ -2159,13 +2179,15 @@ uint32_t Thumb2Assembler::Fixup::AdjustSizeIfNeeded(uint32_t current_code_size)
return current_code_size - old_code_size;
}
-void Thumb2Assembler::Fixup::Emit(AssemblerBuffer* buffer, uint32_t code_size) const {
+void Thumb2Assembler::Fixup::Emit(uint32_t emit_location,
+ AssemblerBuffer* buffer,
+ uint32_t code_size) const {
switch (GetSize()) {
case kBranch16Bit: {
DCHECK(type_ == kUnconditional || type_ == kConditional);
DCHECK_EQ(type_ == kConditional, cond_ != AL);
int16_t encoding = BEncoding16(GetOffset(code_size), cond_);
- buffer->Store<int16_t>(location_, encoding);
+ buffer->Store<int16_t>(emit_location, encoding);
break;
}
case kBranch32Bit: {
@@ -2180,15 +2202,15 @@ void Thumb2Assembler::Fixup::Emit(AssemblerBuffer* buffer, uint32_t code_size) c
DCHECK_NE(encoding & B12, 0);
encoding ^= B14 | B12;
}
- buffer->Store<int16_t>(location_, encoding >> 16);
- buffer->Store<int16_t>(location_ + 2u, static_cast<int16_t>(encoding & 0xffff));
+ buffer->Store<int16_t>(emit_location, encoding >> 16);
+ buffer->Store<int16_t>(emit_location + 2u, static_cast<int16_t>(encoding & 0xffff));
break;
}
case kCbxz16Bit: {
DCHECK(type_ == kCompareAndBranchXZero);
int16_t encoding = CbxzEncoding16(rn_, GetOffset(code_size), cond_);
- buffer->Store<int16_t>(location_, encoding);
+ buffer->Store<int16_t>(emit_location, encoding);
break;
}
case kCbxz32Bit: {
@@ -2196,8 +2218,8 @@ void Thumb2Assembler::Fixup::Emit(AssemblerBuffer* buffer, uint32_t code_size) c
DCHECK(cond_ == EQ || cond_ == NE);
int16_t cmp_encoding = CmpRnImm8Encoding16(rn_, 0);
int16_t b_encoding = BEncoding16(GetOffset(code_size), cond_);
- buffer->Store<int16_t>(location_, cmp_encoding);
- buffer->Store<int16_t>(location_ + 2, b_encoding);
+ buffer->Store<int16_t>(emit_location, cmp_encoding);
+ buffer->Store<int16_t>(emit_location + 2, b_encoding);
break;
}
case kCbxz48Bit: {
@@ -2205,24 +2227,32 @@ void Thumb2Assembler::Fixup::Emit(AssemblerBuffer* buffer, uint32_t code_size) c
DCHECK(cond_ == EQ || cond_ == NE);
int16_t cmp_encoding = CmpRnImm8Encoding16(rn_, 0);
int32_t b_encoding = BEncoding32(GetOffset(code_size), cond_);
- buffer->Store<int16_t>(location_, cmp_encoding);
- buffer->Store<int16_t>(location_ + 2u, b_encoding >> 16);
- buffer->Store<int16_t>(location_ + 4u, static_cast<int16_t>(b_encoding & 0xffff));
+ buffer->Store<int16_t>(emit_location, cmp_encoding);
+ buffer->Store<int16_t>(emit_location + 2u, b_encoding >> 16);
+ buffer->Store<int16_t>(emit_location + 4u, static_cast<int16_t>(b_encoding & 0xffff));
+ break;
+ }
+
+ case kCodeAddr4KiB: {
+ DCHECK(type_ == kLoadCodeAddr);
+ int32_t encoding = AdrEncoding32(rn_, GetOffset(code_size));
+ buffer->Store<int16_t>(emit_location, encoding >> 16);
+ buffer->Store<int16_t>(emit_location + 2u, static_cast<int16_t>(encoding & 0xffff));
break;
}
case kLiteral1KiB: {
DCHECK(type_ == kLoadLiteralNarrow);
int16_t encoding = LdrLitEncoding16(rn_, GetOffset(code_size));
- buffer->Store<int16_t>(location_, encoding);
+ buffer->Store<int16_t>(emit_location, encoding);
break;
}
case kLiteral4KiB: {
DCHECK(type_ == kLoadLiteralNarrow);
// GetOffset() uses PC+4 but load literal uses AlignDown(PC+4, 4). Adjust offset accordingly.
int32_t encoding = LdrLitEncoding32(rn_, GetOffset(code_size));
- buffer->Store<int16_t>(location_, encoding >> 16);
- buffer->Store<int16_t>(location_ + 2u, static_cast<int16_t>(encoding & 0xffff));
+ buffer->Store<int16_t>(emit_location, encoding >> 16);
+ buffer->Store<int16_t>(emit_location + 2u, static_cast<int16_t>(encoding & 0xffff));
break;
}
case kLiteral64KiB: {
@@ -2242,11 +2272,11 @@ void Thumb2Assembler::Fixup::Emit(AssemblerBuffer* buffer, uint32_t code_size) c
int32_t mov_encoding = MovModImmEncoding32(rn_, offset & ~0xfff);
int16_t add_pc_encoding = AddRdnRmEncoding16(rn_, PC);
int32_t ldr_encoding = LdrRtRnImm12Encoding(rn_, rn_, offset & 0xfff);
- buffer->Store<int16_t>(location_, mov_encoding >> 16);
- buffer->Store<int16_t>(location_ + 2u, static_cast<int16_t>(mov_encoding & 0xffff));
- buffer->Store<int16_t>(location_ + 4u, add_pc_encoding);
- buffer->Store<int16_t>(location_ + 6u, ldr_encoding >> 16);
- buffer->Store<int16_t>(location_ + 8u, static_cast<int16_t>(ldr_encoding & 0xffff));
+ buffer->Store<int16_t>(emit_location, mov_encoding >> 16);
+ buffer->Store<int16_t>(emit_location + 2u, static_cast<int16_t>(mov_encoding & 0xffff));
+ buffer->Store<int16_t>(emit_location + 4u, add_pc_encoding);
+ buffer->Store<int16_t>(emit_location + 6u, ldr_encoding >> 16);
+ buffer->Store<int16_t>(emit_location + 8u, static_cast<int16_t>(ldr_encoding & 0xffff));
break;
}
case kLiteralFar: {
@@ -2256,36 +2286,36 @@ void Thumb2Assembler::Fixup::Emit(AssemblerBuffer* buffer, uint32_t code_size) c
int32_t movt_encoding = MovtEncoding32(rn_, offset & ~0xffff);
int16_t add_pc_encoding = AddRdnRmEncoding16(rn_, PC);
int32_t ldr_encoding = LdrRtRnImm12Encoding(rn_, rn_, 0);
- buffer->Store<int16_t>(location_, movw_encoding >> 16);
- buffer->Store<int16_t>(location_ + 2u, static_cast<int16_t>(movw_encoding & 0xffff));
- buffer->Store<int16_t>(location_ + 4u, movt_encoding >> 16);
- buffer->Store<int16_t>(location_ + 6u, static_cast<int16_t>(movt_encoding & 0xffff));
- buffer->Store<int16_t>(location_ + 8u, add_pc_encoding);
- buffer->Store<int16_t>(location_ + 10u, ldr_encoding >> 16);
- buffer->Store<int16_t>(location_ + 12u, static_cast<int16_t>(ldr_encoding & 0xffff));
+ buffer->Store<int16_t>(emit_location, movw_encoding >> 16);
+ buffer->Store<int16_t>(emit_location + 2u, static_cast<int16_t>(movw_encoding & 0xffff));
+ buffer->Store<int16_t>(emit_location + 4u, movt_encoding >> 16);
+ buffer->Store<int16_t>(emit_location + 6u, static_cast<int16_t>(movt_encoding & 0xffff));
+ buffer->Store<int16_t>(emit_location + 8u, add_pc_encoding);
+ buffer->Store<int16_t>(emit_location + 10u, ldr_encoding >> 16);
+ buffer->Store<int16_t>(emit_location + 12u, static_cast<int16_t>(ldr_encoding & 0xffff));
break;
}
case kLiteralAddr1KiB: {
DCHECK(type_ == kLoadLiteralAddr);
int16_t encoding = AdrEncoding16(rn_, GetOffset(code_size));
- buffer->Store<int16_t>(location_, encoding);
+ buffer->Store<int16_t>(emit_location, encoding);
break;
}
case kLiteralAddr4KiB: {
DCHECK(type_ == kLoadLiteralAddr);
int32_t encoding = AdrEncoding32(rn_, GetOffset(code_size));
- buffer->Store<int16_t>(location_, encoding >> 16);
- buffer->Store<int16_t>(location_ + 2u, static_cast<int16_t>(encoding & 0xffff));
+ buffer->Store<int16_t>(emit_location, encoding >> 16);
+ buffer->Store<int16_t>(emit_location + 2u, static_cast<int16_t>(encoding & 0xffff));
break;
}
case kLiteralAddr64KiB: {
DCHECK(type_ == kLoadLiteralAddr);
int32_t mov_encoding = MovwEncoding32(rn_, GetOffset(code_size));
int16_t add_pc_encoding = AddRdnRmEncoding16(rn_, PC);
- buffer->Store<int16_t>(location_, mov_encoding >> 16);
- buffer->Store<int16_t>(location_ + 2u, static_cast<int16_t>(mov_encoding & 0xffff));
- buffer->Store<int16_t>(location_ + 4u, add_pc_encoding);
+ buffer->Store<int16_t>(emit_location, mov_encoding >> 16);
+ buffer->Store<int16_t>(emit_location + 2u, static_cast<int16_t>(mov_encoding & 0xffff));
+ buffer->Store<int16_t>(emit_location + 4u, add_pc_encoding);
break;
}
case kLiteralAddrFar: {
@@ -2294,29 +2324,29 @@ void Thumb2Assembler::Fixup::Emit(AssemblerBuffer* buffer, uint32_t code_size) c
int32_t movw_encoding = MovwEncoding32(rn_, offset & 0xffff);
int32_t movt_encoding = MovtEncoding32(rn_, offset & ~0xffff);
int16_t add_pc_encoding = AddRdnRmEncoding16(rn_, PC);
- buffer->Store<int16_t>(location_, movw_encoding >> 16);
- buffer->Store<int16_t>(location_ + 2u, static_cast<int16_t>(movw_encoding & 0xffff));
- buffer->Store<int16_t>(location_ + 4u, movt_encoding >> 16);
- buffer->Store<int16_t>(location_ + 6u, static_cast<int16_t>(movt_encoding & 0xffff));
- buffer->Store<int16_t>(location_ + 8u, add_pc_encoding);
+ buffer->Store<int16_t>(emit_location, movw_encoding >> 16);
+ buffer->Store<int16_t>(emit_location + 2u, static_cast<int16_t>(movw_encoding & 0xffff));
+ buffer->Store<int16_t>(emit_location + 4u, movt_encoding >> 16);
+ buffer->Store<int16_t>(emit_location + 6u, static_cast<int16_t>(movt_encoding & 0xffff));
+ buffer->Store<int16_t>(emit_location + 8u, add_pc_encoding);
break;
}
case kLongOrFPLiteral1KiB: {
int32_t encoding = LoadWideOrFpEncoding(PC, GetOffset(code_size)); // DCHECKs type_.
- buffer->Store<int16_t>(location_, encoding >> 16);
- buffer->Store<int16_t>(location_ + 2u, static_cast<int16_t>(encoding & 0xffff));
+ buffer->Store<int16_t>(emit_location, encoding >> 16);
+ buffer->Store<int16_t>(emit_location + 2u, static_cast<int16_t>(encoding & 0xffff));
break;
}
case kLongOrFPLiteral64KiB: {
int32_t mov_encoding = MovwEncoding32(IP, GetOffset(code_size));
int16_t add_pc_encoding = AddRdnRmEncoding16(IP, PC);
int32_t ldr_encoding = LoadWideOrFpEncoding(IP, 0u); // DCHECKs type_.
- buffer->Store<int16_t>(location_, mov_encoding >> 16);
- buffer->Store<int16_t>(location_ + 2u, static_cast<int16_t>(mov_encoding & 0xffff));
- buffer->Store<int16_t>(location_ + 4u, add_pc_encoding);
- buffer->Store<int16_t>(location_ + 6u, ldr_encoding >> 16);
- buffer->Store<int16_t>(location_ + 8u, static_cast<int16_t>(ldr_encoding & 0xffff));
+ buffer->Store<int16_t>(emit_location, mov_encoding >> 16);
+ buffer->Store<int16_t>(emit_location + 2u, static_cast<int16_t>(mov_encoding & 0xffff));
+ buffer->Store<int16_t>(emit_location + 4u, add_pc_encoding);
+ buffer->Store<int16_t>(emit_location + 6u, ldr_encoding >> 16);
+ buffer->Store<int16_t>(emit_location + 8u, static_cast<int16_t>(ldr_encoding & 0xffff));
break;
}
case kLongOrFPLiteralFar: {
@@ -2325,13 +2355,13 @@ void Thumb2Assembler::Fixup::Emit(AssemblerBuffer* buffer, uint32_t code_size) c
int32_t movt_encoding = MovtEncoding32(IP, offset & ~0xffff);
int16_t add_pc_encoding = AddRdnRmEncoding16(IP, PC);
int32_t ldr_encoding = LoadWideOrFpEncoding(IP, 0); // DCHECKs type_.
- buffer->Store<int16_t>(location_, movw_encoding >> 16);
- buffer->Store<int16_t>(location_ + 2u, static_cast<int16_t>(movw_encoding & 0xffff));
- buffer->Store<int16_t>(location_ + 4u, movt_encoding >> 16);
- buffer->Store<int16_t>(location_ + 6u, static_cast<int16_t>(movt_encoding & 0xffff));
- buffer->Store<int16_t>(location_ + 8u, add_pc_encoding);
- buffer->Store<int16_t>(location_ + 10u, ldr_encoding >> 16);
- buffer->Store<int16_t>(location_ + 12u, static_cast<int16_t>(ldr_encoding & 0xffff));
+ buffer->Store<int16_t>(emit_location, movw_encoding >> 16);
+ buffer->Store<int16_t>(emit_location + 2u, static_cast<int16_t>(movw_encoding & 0xffff));
+ buffer->Store<int16_t>(emit_location + 4u, movt_encoding >> 16);
+ buffer->Store<int16_t>(emit_location + 6u, static_cast<int16_t>(movt_encoding & 0xffff));
+ buffer->Store<int16_t>(emit_location + 8u, add_pc_encoding);
+ buffer->Store<int16_t>(emit_location + 10u, ldr_encoding >> 16);
+ buffer->Store<int16_t>(emit_location + 12u, static_cast<int16_t>(ldr_encoding & 0xffff));
break;
}
}
@@ -3331,6 +3361,19 @@ void Thumb2Assembler::bx(Register rm, Condition cond) {
}
+void Thumb2Assembler::AdrCode(Register rt, Label* label) {
+ uint32_t pc = buffer_.Size();
+ FixupId branch_id = AddFixup(Fixup::LoadCodeAddress(pc, rt));
+ CHECK(!label->IsBound());
+ // ADR target must be an unbound label. Add it to a singly-linked list maintained within
+ // the code with the label serving as the head.
+ Emit16(static_cast<uint16_t>(label->position_));
+ label->LinkTo(branch_id);
+ Emit16(0);
+ DCHECK_EQ(buffer_.Size() - pc, GetFixup(branch_id)->GetSizeInBytes());
+}
+
+
void Thumb2Assembler::Push(Register rd, Condition cond) {
str(rd, Address(SP, -kRegisterSize, Address::PreIndex), cond);
}
@@ -3405,7 +3448,7 @@ void Thumb2Assembler::Bind(Label* label) {
break;
}
}
- last_fixup.Emit(&buffer_, buffer_.Size());
+ last_fixup.Emit(last_fixup.GetLocation(), &buffer_, buffer_.Size());
fixups_.pop_back();
}
}
diff --git a/compiler/utils/arm/assembler_thumb2.h b/compiler/utils/arm/assembler_thumb2.h
index 1c495aa7a7..5c36110cf6 100644
--- a/compiler/utils/arm/assembler_thumb2.h
+++ b/compiler/utils/arm/assembler_thumb2.h
@@ -268,6 +268,9 @@ class Thumb2Assembler FINAL : public ArmAssembler {
void blx(Register rm, Condition cond = AL) OVERRIDE;
void bx(Register rm, Condition cond = AL) OVERRIDE;
+ // ADR instruction loading register for branching to the label, including the Thumb mode bit.
+ void AdrCode(Register rt, Label* label) OVERRIDE;
+
virtual void Lsl(Register rd, Register rm, uint32_t shift_imm,
Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
virtual void Lsr(Register rd, Register rm, uint32_t shift_imm,
@@ -377,6 +380,10 @@ class Thumb2Assembler FINAL : public ArmAssembler {
force_32bit_ = true;
}
+ void Allow16Bit() {
+ force_32bit_ = false;
+ }
+
// Emit an ADR (or a sequence of instructions) to load the jump table address into base_reg. This
// will generate a fixup.
JumpTable* CreateJumpTable(std::vector<Label*>&& labels, Register base_reg) OVERRIDE;
@@ -422,6 +429,7 @@ class Thumb2Assembler FINAL : public ArmAssembler {
kUnconditionalLink, // BL.
kUnconditionalLinkX, // BLX.
kCompareAndBranchXZero, // cbz/cbnz.
+ kLoadCodeAddr, // Get address of a code label, used for Baker read barriers.
kLoadLiteralNarrow, // Load narrrow integer literal.
kLoadLiteralWide, // Load wide integer literal.
kLoadLiteralAddr, // Load address of literal (used for jump table).
@@ -442,6 +450,10 @@ class Thumb2Assembler FINAL : public ArmAssembler {
kCbxz32Bit, // CMP rX, #0 + Bcc label; X < 8; 16-bit Bcc; +-8-bit offset.
kCbxz48Bit, // CMP rX, #0 + Bcc label; X < 8; 32-bit Bcc; up to +-1MiB offset.
+ // ADR variants.
+ kCodeAddr4KiB, // ADR rX, <label>; label must be after the ADR but within 4KiB range.
+ // Multi-instruction expansion is not supported.
+
// Load integer literal variants.
// LDR rX, label; X < 8; 16-bit variant up to 1KiB offset; 2 bytes.
kLiteral1KiB,
@@ -492,6 +504,12 @@ class Thumb2Assembler FINAL : public ArmAssembler {
cond, kCompareAndBranchXZero, kCbxz16Bit, location);
}
+ // Code address.
+ static Fixup LoadCodeAddress(uint32_t location, Register rt) {
+ return Fixup(rt, kNoRegister, kNoSRegister, kNoDRegister,
+ AL, kLoadCodeAddr, kCodeAddr4KiB, location);
+ }
+
// Load narrow literal.
static Fixup LoadNarrowLiteral(uint32_t location, Register rt, Size size) {
DCHECK(size == kLiteral1KiB || size == kLiteral4KiB || size == kLiteral64KiB ||
@@ -550,6 +568,7 @@ class Thumb2Assembler FINAL : public ArmAssembler {
switch (GetOriginalSize()) {
case kBranch32Bit:
case kCbxz48Bit:
+ case kCodeAddr4KiB:
case kLiteralFar:
case kLiteralAddrFar:
case kLongOrFPLiteralFar:
@@ -623,7 +642,7 @@ class Thumb2Assembler FINAL : public ArmAssembler {
// Emit the branch instruction into the assembler buffer. This does the
// encoding into the thumb instruction.
- void Emit(AssemblerBuffer* buffer, uint32_t code_size) const;
+ void Emit(uint32_t emit_location, AssemblerBuffer* buffer, uint32_t code_size) const;
private:
Fixup(Register rn, Register rt2, SRegister sd, DRegister dd,
@@ -903,6 +922,24 @@ class Thumb2Assembler FINAL : public ArmAssembler {
FixupId last_fixup_id_;
};
+class ScopedForce32Bit {
+ public:
+ explicit ScopedForce32Bit(Thumb2Assembler* assembler)
+ : assembler_(assembler), old_force_32bit_(assembler->IsForced32Bit()) {
+ assembler->Force32Bit();
+ }
+
+ ~ScopedForce32Bit() {
+ if (!old_force_32bit_) {
+ assembler_->Allow16Bit();
+ }
+ }
+
+ private:
+ Thumb2Assembler* const assembler_;
+ const bool old_force_32bit_;
+};
+
} // namespace arm
} // namespace art
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
index 57223b52a3..f4afb33034 100644
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -1356,6 +1356,106 @@ void Mips64Assembler::Mod_uD(VectorRegister wd, VectorRegister ws, VectorRegiste
EmitMsa3R(0x7, 0x3, wt, ws, wd, 0x12);
}
+void Mips64Assembler::Add_aB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x0, 0x0, wt, ws, wd, 0x10);
+}
+
+void Mips64Assembler::Add_aH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x0, 0x1, wt, ws, wd, 0x10);
+}
+
+void Mips64Assembler::Add_aW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x0, 0x2, wt, ws, wd, 0x10);
+}
+
+void Mips64Assembler::Add_aD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x0, 0x3, wt, ws, wd, 0x10);
+}
+
+void Mips64Assembler::Ave_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x4, 0x0, wt, ws, wd, 0x10);
+}
+
+void Mips64Assembler::Ave_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x4, 0x1, wt, ws, wd, 0x10);
+}
+
+void Mips64Assembler::Ave_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x4, 0x2, wt, ws, wd, 0x10);
+}
+
+void Mips64Assembler::Ave_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x4, 0x3, wt, ws, wd, 0x10);
+}
+
+void Mips64Assembler::Ave_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x5, 0x0, wt, ws, wd, 0x10);
+}
+
+void Mips64Assembler::Ave_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x5, 0x1, wt, ws, wd, 0x10);
+}
+
+void Mips64Assembler::Ave_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x5, 0x2, wt, ws, wd, 0x10);
+}
+
+void Mips64Assembler::Ave_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x5, 0x3, wt, ws, wd, 0x10);
+}
+
+void Mips64Assembler::Aver_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x6, 0x0, wt, ws, wd, 0x10);
+}
+
+void Mips64Assembler::Aver_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x6, 0x1, wt, ws, wd, 0x10);
+}
+
+void Mips64Assembler::Aver_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x6, 0x2, wt, ws, wd, 0x10);
+}
+
+void Mips64Assembler::Aver_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x6, 0x3, wt, ws, wd, 0x10);
+}
+
+void Mips64Assembler::Aver_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x7, 0x0, wt, ws, wd, 0x10);
+}
+
+void Mips64Assembler::Aver_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x7, 0x1, wt, ws, wd, 0x10);
+}
+
+void Mips64Assembler::Aver_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x7, 0x2, wt, ws, wd, 0x10);
+}
+
+void Mips64Assembler::Aver_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x7, 0x3, wt, ws, wd, 0x10);
+}
+
void Mips64Assembler::FaddW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
CHECK(HasMsa());
EmitMsa3R(0x0, 0x0, wt, ws, wd, 0x1b);
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index 666c6935a1..6ac336178b 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -682,6 +682,26 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
void Mod_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
void Mod_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
void Mod_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Add_aB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Add_aH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Add_aW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Add_aD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Ave_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Ave_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Ave_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Ave_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Ave_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Ave_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Ave_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Ave_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Aver_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Aver_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Aver_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Aver_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Aver_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Aver_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Aver_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Aver_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
void FaddW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
void FaddD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc
index f2e3b1610c..084ce6fa08 100644
--- a/compiler/utils/mips64/assembler_mips64_test.cc
+++ b/compiler/utils/mips64/assembler_mips64_test.cc
@@ -2668,6 +2668,106 @@ TEST_F(AssemblerMIPS64Test, Mod_uD) {
"mod_u.d");
}
+TEST_F(AssemblerMIPS64Test, Add_aB) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Add_aB, "add_a.b ${reg1}, ${reg2}, ${reg3}"),
+ "add_a.b");
+}
+
+TEST_F(AssemblerMIPS64Test, Add_aH) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Add_aH, "add_a.h ${reg1}, ${reg2}, ${reg3}"),
+ "add_a.h");
+}
+
+TEST_F(AssemblerMIPS64Test, Add_aW) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Add_aW, "add_a.w ${reg1}, ${reg2}, ${reg3}"),
+ "add_a.w");
+}
+
+TEST_F(AssemblerMIPS64Test, Add_aD) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Add_aD, "add_a.d ${reg1}, ${reg2}, ${reg3}"),
+ "add_a.d");
+}
+
+TEST_F(AssemblerMIPS64Test, Ave_sB) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Ave_sB, "ave_s.b ${reg1}, ${reg2}, ${reg3}"),
+ "ave_s.b");
+}
+
+TEST_F(AssemblerMIPS64Test, Ave_sH) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Ave_sH, "ave_s.h ${reg1}, ${reg2}, ${reg3}"),
+ "ave_s.h");
+}
+
+TEST_F(AssemblerMIPS64Test, Ave_sW) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Ave_sW, "ave_s.w ${reg1}, ${reg2}, ${reg3}"),
+ "ave_s.w");
+}
+
+TEST_F(AssemblerMIPS64Test, Ave_sD) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Ave_sD, "ave_s.d ${reg1}, ${reg2}, ${reg3}"),
+ "ave_s.d");
+}
+
+TEST_F(AssemblerMIPS64Test, Ave_uB) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Ave_uB, "ave_u.b ${reg1}, ${reg2}, ${reg3}"),
+ "ave_u.b");
+}
+
+TEST_F(AssemblerMIPS64Test, Ave_uH) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Ave_uH, "ave_u.h ${reg1}, ${reg2}, ${reg3}"),
+ "ave_u.h");
+}
+
+TEST_F(AssemblerMIPS64Test, Ave_uW) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Ave_uW, "ave_u.w ${reg1}, ${reg2}, ${reg3}"),
+ "ave_u.w");
+}
+
+TEST_F(AssemblerMIPS64Test, Ave_uD) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Ave_uD, "ave_u.d ${reg1}, ${reg2}, ${reg3}"),
+ "ave_u.d");
+}
+
+TEST_F(AssemblerMIPS64Test, Aver_sB) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Aver_sB, "aver_s.b ${reg1}, ${reg2}, ${reg3}"),
+ "aver_s.b");
+}
+
+TEST_F(AssemblerMIPS64Test, Aver_sH) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Aver_sH, "aver_s.h ${reg1}, ${reg2}, ${reg3}"),
+ "aver_s.h");
+}
+
+TEST_F(AssemblerMIPS64Test, Aver_sW) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Aver_sW, "aver_s.w ${reg1}, ${reg2}, ${reg3}"),
+ "aver_s.w");
+}
+
+TEST_F(AssemblerMIPS64Test, Aver_sD) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Aver_sD, "aver_s.d ${reg1}, ${reg2}, ${reg3}"),
+ "aver_s.d");
+}
+
+TEST_F(AssemblerMIPS64Test, Aver_uB) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Aver_uB, "aver_u.b ${reg1}, ${reg2}, ${reg3}"),
+ "aver_u.b");
+}
+
+TEST_F(AssemblerMIPS64Test, Aver_uH) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Aver_uH, "aver_u.h ${reg1}, ${reg2}, ${reg3}"),
+ "aver_u.h");
+}
+
+TEST_F(AssemblerMIPS64Test, Aver_uW) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Aver_uW, "aver_u.w ${reg1}, ${reg2}, ${reg3}"),
+ "aver_u.w");
+}
+
+TEST_F(AssemblerMIPS64Test, Aver_uD) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Aver_uD, "aver_u.d ${reg1}, ${reg2}, ${reg3}"),
+ "aver_u.d");
+}
+
TEST_F(AssemblerMIPS64Test, FaddW) {
DriverStr(RepeatVVV(&mips64::Mips64Assembler::FaddW, "fadd.w ${reg1}, ${reg2}, ${reg3}"),
"fadd.w");
diff --git a/compiler/utils/swap_space.h b/compiler/utils/swap_space.h
index c286b820fe..0ff9fc69ed 100644
--- a/compiler/utils/swap_space.h
+++ b/compiler/utils/swap_space.h
@@ -78,7 +78,7 @@ class SwapSpace {
mutable FreeByStartSet::const_iterator free_by_start_entry;
};
struct FreeBySizeComparator {
- bool operator()(const FreeBySizeEntry& lhs, const FreeBySizeEntry& rhs) {
+ bool operator()(const FreeBySizeEntry& lhs, const FreeBySizeEntry& rhs) const {
if (lhs.size != rhs.size) {
return lhs.size < rhs.size;
} else {