summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/dex/verification_results.cc5
-rw-r--r--compiler/dex/verified_method.cc104
-rw-r--r--compiler/dex/verified_method.h37
-rw-r--r--compiler/optimizing/code_generator_mips64.cc183
-rw-r--r--compiler/optimizing/code_generator_mips64.h8
-rw-r--r--compiler/optimizing/intrinsics_mips64.cc79
-rw-r--r--compiler/utils/arm/assembler_arm_vixl.h7
-rw-r--r--compiler/utils/assembler_test.h111
-rw-r--r--compiler/utils/assembler_thumb_test_expected.cc.inc8
-rw-r--r--compiler/utils/mips64/assembler_mips64.cc12
-rw-r--r--compiler/utils/mips64/assembler_mips64.h2
-rw-r--r--compiler/utils/mips64/assembler_mips64_test.cc16
-rw-r--r--disassembler/disassembler_mips.cc5
-rw-r--r--tools/libcore_failures.txt3
14 files changed, 386 insertions, 194 deletions
diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc
index 9d39bf2c7a..00a7d44bac 100644
--- a/compiler/dex/verification_results.cc
+++ b/compiler/dex/verification_results.cc
@@ -46,9 +46,7 @@ VerificationResults::~VerificationResults() {
void VerificationResults::ProcessVerifiedMethod(verifier::MethodVerifier* method_verifier) {
DCHECK(method_verifier != nullptr);
MethodReference ref = method_verifier->GetMethodReference();
- bool compile = IsCandidateForCompilation(ref, method_verifier->GetAccessFlags());
- std::unique_ptr<const VerifiedMethod> verified_method(
- VerifiedMethod::Create(method_verifier, compile));
+ std::unique_ptr<const VerifiedMethod> verified_method(VerifiedMethod::Create(method_verifier));
if (verified_method == nullptr) {
// We'll punt this later.
return;
@@ -84,7 +82,6 @@ void VerificationResults::ProcessVerifiedMethod(verifier::MethodVerifier* method
// TODO: Investigate why are we doing the work again for this method and try to avoid it.
LOG(WARNING) << "Method processed more than once: " << ref.PrettyMethod();
if (!Runtime::Current()->UseJitCompilation()) {
- DCHECK_EQ(existing->GetDevirtMap().size(), verified_method->GetDevirtMap().size());
DCHECK_EQ(existing->GetSafeCastSet().size(), verified_method->GetSafeCastSet().size());
}
// Let the unique_ptr delete the new verified method since there was already an existing one
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index fdcafe8a15..188209bfb8 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -42,25 +42,12 @@ VerifiedMethod::VerifiedMethod(uint32_t encountered_error_types, bool has_runtim
has_runtime_throw_(has_runtime_throw) {
}
-const VerifiedMethod* VerifiedMethod::Create(verifier::MethodVerifier* method_verifier,
- bool compile) {
+const VerifiedMethod* VerifiedMethod::Create(verifier::MethodVerifier* method_verifier) {
+ DCHECK(Runtime::Current()->IsAotCompiler());
std::unique_ptr<VerifiedMethod> verified_method(
new VerifiedMethod(method_verifier->GetEncounteredFailureTypes(),
method_verifier->HasInstructionThatWillThrow()));
- if (compile) {
- // TODO: move this out when DEX-to-DEX supports devirtualization.
- if (method_verifier->HasVirtualOrInterfaceInvokes()) {
- verified_method->GenerateDevirtMap(method_verifier);
- }
-
- // Only need dequicken info for JIT so far.
- if (Runtime::Current()->UseJitCompilation() &&
- !verified_method->GenerateDequickenMap(method_verifier)) {
- return nullptr;
- }
- }
-
if (method_verifier->HasCheckCasts()) {
verified_method->GenerateSafeCastSet(method_verifier);
}
@@ -68,17 +55,6 @@ const VerifiedMethod* VerifiedMethod::Create(verifier::MethodVerifier* method_ve
return verified_method.release();
}
-const MethodReference* VerifiedMethod::GetDevirtTarget(uint32_t dex_pc) const {
- auto it = devirt_map_.find(dex_pc);
- return (it != devirt_map_.end()) ? &it->second : nullptr;
-}
-
-const DexFileReference* VerifiedMethod::GetDequickenIndex(uint32_t dex_pc) const {
- DCHECK(Runtime::Current()->UseJitCompilation());
- auto it = dequicken_map_.find(dex_pc);
- return (it != dequicken_map_.end()) ? &it->second : nullptr;
-}
-
bool VerifiedMethod::IsSafeCast(uint32_t pc) const {
return std::binary_search(safe_cast_set_.begin(), safe_cast_set_.end(), pc);
}
@@ -126,82 +102,6 @@ bool VerifiedMethod::GenerateDequickenMap(verifier::MethodVerifier* method_verif
return true;
}
-void VerifiedMethod::GenerateDevirtMap(verifier::MethodVerifier* method_verifier) {
- // It is risky to rely on reg_types for sharpening in cases of soft
- // verification, we might end up sharpening to a wrong implementation. Just abort.
- if (method_verifier->HasFailures()) {
- return;
- }
-
- const DexFile::CodeItem* code_item = method_verifier->CodeItem();
- const uint16_t* insns = code_item->insns_;
- const Instruction* inst = Instruction::At(insns);
- const Instruction* end = Instruction::At(insns + code_item->insns_size_in_code_units_);
-
- for (; inst < end; inst = inst->Next()) {
- const bool is_virtual = inst->Opcode() == Instruction::INVOKE_VIRTUAL ||
- inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE;
- const bool is_interface = inst->Opcode() == Instruction::INVOKE_INTERFACE ||
- inst->Opcode() == Instruction::INVOKE_INTERFACE_RANGE;
-
- if (!is_interface && !is_virtual) {
- continue;
- }
- // Get reg type for register holding the reference to the object that will be dispatched upon.
- uint32_t dex_pc = inst->GetDexPc(insns);
- verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc);
- const bool is_range = inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE ||
- inst->Opcode() == Instruction::INVOKE_INTERFACE_RANGE;
- const verifier::RegType&
- reg_type(line->GetRegisterType(method_verifier,
- is_range ? inst->VRegC_3rc() : inst->VRegC_35c()));
-
- if (!reg_type.HasClass()) {
- // We will compute devirtualization information only when we know the Class of the reg type.
- continue;
- }
- mirror::Class* reg_class = reg_type.GetClass();
- if (reg_class->IsInterface()) {
- // We can't devirtualize when the known type of the register is an interface.
- continue;
- }
- if (reg_class->IsAbstract() && !reg_class->IsArrayClass()) {
- // We can't devirtualize abstract classes except on arrays of abstract classes.
- continue;
- }
- auto* cl = Runtime::Current()->GetClassLinker();
- PointerSize pointer_size = cl->GetImagePointerSize();
- ArtMethod* abstract_method = method_verifier->GetDexCache()->GetResolvedMethod(
- is_range ? inst->VRegB_3rc() : inst->VRegB_35c(), pointer_size);
- if (abstract_method == nullptr) {
- // If the method is not found in the cache this means that it was never found
- // by ResolveMethodAndCheckAccess() called when verifying invoke_*.
- continue;
- }
- // Find the concrete method.
- ArtMethod* concrete_method = nullptr;
- if (is_interface) {
- concrete_method = reg_type.GetClass()->FindVirtualMethodForInterface(
- abstract_method, pointer_size);
- }
- if (is_virtual) {
- concrete_method = reg_type.GetClass()->FindVirtualMethodForVirtual(
- abstract_method, pointer_size);
- }
- if (concrete_method == nullptr || !concrete_method->IsInvokable()) {
- // In cases where concrete_method is not found, or is not invokable, continue to the next
- // invoke.
- continue;
- }
- if (reg_type.IsPreciseReference() || concrete_method->IsFinal() ||
- concrete_method->GetDeclaringClass()->IsFinal()) {
- // If we knew exactly the class being dispatched upon, or if the target method cannot be
- // overridden record the target to be used in the compiler driver.
- devirt_map_.Put(dex_pc, concrete_method->ToMethodReference());
- }
- }
-}
-
void VerifiedMethod::GenerateSafeCastSet(verifier::MethodVerifier* method_verifier) {
/*
* Walks over the method code and adds any cast instructions in which
diff --git a/compiler/dex/verified_method.h b/compiler/dex/verified_method.h
index ce53417185..0530a8cc18 100644
--- a/compiler/dex/verified_method.h
+++ b/compiler/dex/verified_method.h
@@ -39,31 +39,17 @@ class VerifiedMethod {
// is better for performance (not just memory usage), especially for large sets.
typedef std::vector<uint32_t> SafeCastSet;
- // Devirtualization map type maps dex offset to concrete method reference.
- typedef SafeMap<uint32_t, MethodReference> DevirtualizationMap;
-
// Devirtualization map type maps dex offset to field / method idx.
typedef SafeMap<uint32_t, DexFileReference> DequickenMap;
- static const VerifiedMethod* Create(verifier::MethodVerifier* method_verifier, bool compile)
+ static const VerifiedMethod* Create(verifier::MethodVerifier* method_verifier)
REQUIRES_SHARED(Locks::mutator_lock_);
~VerifiedMethod() = default;
- const DevirtualizationMap& GetDevirtMap() const {
- return devirt_map_;
- }
-
const SafeCastSet& GetSafeCastSet() const {
return safe_cast_set_;
}
- // Returns the devirtualization target method, or null if none.
- const MethodReference* GetDevirtTarget(uint32_t dex_pc) const;
-
- // Returns the dequicken field / method for a quick invoke / field get. Returns null if there is
- // no entry for that dex pc.
- const DexFileReference* GetDequickenIndex(uint32_t dex_pc) const;
-
// Returns true if the cast can statically be verified to be redundant
// by using the check-cast elision peephole optimization in the verifier.
bool IsSafeCast(uint32_t pc) const;
@@ -82,26 +68,6 @@ class VerifiedMethod {
}
private:
- /*
- * Generate the GC map for a method that has just been verified (i.e. we're doing this as part of
- * verification). For type-precise determination we have all the data we need, so we just need to
- * encode it in some clever fashion.
- * Stores the data in dex_gc_map_, returns true on success and false on failure.
- */
- bool GenerateGcMap(verifier::MethodVerifier* method_verifier);
-
- // Verify that the GC map associated with method_ is well formed.
- static void VerifyGcMap(verifier::MethodVerifier* method_verifier,
- const std::vector<uint8_t>& data);
-
- // Compute sizes for GC map data.
- static void ComputeGcMapSizes(verifier::MethodVerifier* method_verifier,
- size_t* gc_points, size_t* ref_bitmap_bits, size_t* log2_max_gc_pc);
-
- // Generate devirtualizaion map into devirt_map_.
- void GenerateDevirtMap(verifier::MethodVerifier* method_verifier)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
// Generate dequickening map into dequicken_map_. Returns false if there is an error.
bool GenerateDequickenMap(verifier::MethodVerifier* method_verifier)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -110,7 +76,6 @@ class VerifiedMethod {
void GenerateSafeCastSet(verifier::MethodVerifier* method_verifier)
REQUIRES_SHARED(Locks::mutator_lock_);
- DevirtualizationMap devirt_map_;
// Dequicken map is required for compiling quickened byte codes. The quicken maps from
// dex PC to dex method index or dex field index based on the instruction.
DequickenMap dequicken_map_;
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index cdbc712f86..ced0e90657 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -1929,9 +1929,6 @@ void InstructionCodeGeneratorMIPS64::HandleCondition(HCondition* instruction) {
Primitive::Type type = instruction->InputAt(0)->GetType();
LocationSummary* locations = instruction->GetLocations();
- GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
- Mips64Label true_label;
-
switch (type) {
default:
// Integer case.
@@ -1940,29 +1937,11 @@ void InstructionCodeGeneratorMIPS64::HandleCondition(HCondition* instruction) {
case Primitive::kPrimLong:
GenerateIntLongCompare(instruction->GetCondition(), /* is64bit */ true, locations);
return;
-
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
- // TODO: don't use branches.
- GenerateFpCompareAndBranch(instruction->GetCondition(),
- instruction->IsGtBias(),
- type,
- locations,
- &true_label);
- break;
+ GenerateFpCompare(instruction->GetCondition(), instruction->IsGtBias(), type, locations);
+ return;
}
-
- // Convert the branches into the result.
- Mips64Label done;
-
- // False case: result = 0.
- __ LoadConst32(dst, 0);
- __ Bc(&done);
-
- // True case: result = 1.
- __ Bind(&true_label);
- __ LoadConst32(dst, 1);
- __ Bind(&done);
}
void InstructionCodeGeneratorMIPS64::DivRemOneOrMinusOne(HBinaryOperation* instruction) {
@@ -2377,19 +2356,40 @@ void InstructionCodeGeneratorMIPS64::GenerateIntLongCompare(IfCondition cond,
switch (cond) {
case kCondEQ:
case kCondNE:
- if (use_imm && IsUint<16>(rhs_imm)) {
- __ Xori(dst, lhs, rhs_imm);
- } else {
- if (use_imm) {
- rhs_reg = TMP;
- __ LoadConst64(rhs_reg, rhs_imm);
+ if (use_imm && IsInt<16>(-rhs_imm)) {
+ if (rhs_imm == 0) {
+ if (cond == kCondEQ) {
+ __ Sltiu(dst, lhs, 1);
+ } else {
+ __ Sltu(dst, ZERO, lhs);
+ }
+ } else {
+ if (is64bit) {
+ __ Daddiu(dst, lhs, -rhs_imm);
+ } else {
+ __ Addiu(dst, lhs, -rhs_imm);
+ }
+ if (cond == kCondEQ) {
+ __ Sltiu(dst, dst, 1);
+ } else {
+ __ Sltu(dst, ZERO, dst);
+ }
}
- __ Xor(dst, lhs, rhs_reg);
- }
- if (cond == kCondEQ) {
- __ Sltiu(dst, dst, 1);
} else {
- __ Sltu(dst, ZERO, dst);
+ if (use_imm && IsUint<16>(rhs_imm)) {
+ __ Xori(dst, lhs, rhs_imm);
+ } else {
+ if (use_imm) {
+ rhs_reg = TMP;
+ __ LoadConst64(rhs_reg, rhs_imm);
+ }
+ __ Xor(dst, lhs, rhs_reg);
+ }
+ if (cond == kCondEQ) {
+ __ Sltiu(dst, dst, 1);
+ } else {
+ __ Sltu(dst, ZERO, dst);
+ }
}
break;
@@ -2576,6 +2576,121 @@ void InstructionCodeGeneratorMIPS64::GenerateIntLongCompareAndBranch(IfCondition
}
}
+void InstructionCodeGeneratorMIPS64::GenerateFpCompare(IfCondition cond,
+ bool gt_bias,
+ Primitive::Type type,
+ LocationSummary* locations) {
+ GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
+ FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
+ FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
+ if (type == Primitive::kPrimFloat) {
+ switch (cond) {
+ case kCondEQ:
+ __ CmpEqS(FTMP, lhs, rhs);
+ __ Mfc1(dst, FTMP);
+ __ Andi(dst, dst, 1);
+ break;
+ case kCondNE:
+ __ CmpEqS(FTMP, lhs, rhs);
+ __ Mfc1(dst, FTMP);
+ __ Addiu(dst, dst, 1);
+ break;
+ case kCondLT:
+ if (gt_bias) {
+ __ CmpLtS(FTMP, lhs, rhs);
+ } else {
+ __ CmpUltS(FTMP, lhs, rhs);
+ }
+ __ Mfc1(dst, FTMP);
+ __ Andi(dst, dst, 1);
+ break;
+ case kCondLE:
+ if (gt_bias) {
+ __ CmpLeS(FTMP, lhs, rhs);
+ } else {
+ __ CmpUleS(FTMP, lhs, rhs);
+ }
+ __ Mfc1(dst, FTMP);
+ __ Andi(dst, dst, 1);
+ break;
+ case kCondGT:
+ if (gt_bias) {
+ __ CmpUltS(FTMP, rhs, lhs);
+ } else {
+ __ CmpLtS(FTMP, rhs, lhs);
+ }
+ __ Mfc1(dst, FTMP);
+ __ Andi(dst, dst, 1);
+ break;
+ case kCondGE:
+ if (gt_bias) {
+ __ CmpUleS(FTMP, rhs, lhs);
+ } else {
+ __ CmpLeS(FTMP, rhs, lhs);
+ }
+ __ Mfc1(dst, FTMP);
+ __ Andi(dst, dst, 1);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected non-floating-point condition " << cond;
+ UNREACHABLE();
+ }
+ } else {
+ DCHECK_EQ(type, Primitive::kPrimDouble);
+ switch (cond) {
+ case kCondEQ:
+ __ CmpEqD(FTMP, lhs, rhs);
+ __ Mfc1(dst, FTMP);
+ __ Andi(dst, dst, 1);
+ break;
+ case kCondNE:
+ __ CmpEqD(FTMP, lhs, rhs);
+ __ Mfc1(dst, FTMP);
+ __ Addiu(dst, dst, 1);
+ break;
+ case kCondLT:
+ if (gt_bias) {
+ __ CmpLtD(FTMP, lhs, rhs);
+ } else {
+ __ CmpUltD(FTMP, lhs, rhs);
+ }
+ __ Mfc1(dst, FTMP);
+ __ Andi(dst, dst, 1);
+ break;
+ case kCondLE:
+ if (gt_bias) {
+ __ CmpLeD(FTMP, lhs, rhs);
+ } else {
+ __ CmpUleD(FTMP, lhs, rhs);
+ }
+ __ Mfc1(dst, FTMP);
+ __ Andi(dst, dst, 1);
+ break;
+ case kCondGT:
+ if (gt_bias) {
+ __ CmpUltD(FTMP, rhs, lhs);
+ } else {
+ __ CmpLtD(FTMP, rhs, lhs);
+ }
+ __ Mfc1(dst, FTMP);
+ __ Andi(dst, dst, 1);
+ break;
+ case kCondGE:
+ if (gt_bias) {
+ __ CmpUleD(FTMP, rhs, lhs);
+ } else {
+ __ CmpLeD(FTMP, rhs, lhs);
+ }
+ __ Mfc1(dst, FTMP);
+ __ Andi(dst, dst, 1);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected non-floating-point condition " << cond;
+ UNREACHABLE();
+ }
+ }
+}
+
void InstructionCodeGeneratorMIPS64::GenerateFpCompareAndBranch(IfCondition cond,
bool gt_bias,
Primitive::Type type,
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 5e049aedcf..d5811c20e3 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -250,6 +250,10 @@ class InstructionCodeGeneratorMIPS64 : public InstructionCodeGenerator {
bool is64bit,
LocationSummary* locations,
Mips64Label* label);
+ void GenerateFpCompare(IfCondition cond,
+ bool gt_bias,
+ Primitive::Type type,
+ LocationSummary* locations);
void GenerateFpCompareAndBranch(IfCondition cond,
bool gt_bias,
Primitive::Type type,
@@ -320,6 +324,10 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
block_labels_ = CommonInitializeLabels<Mips64Label>();
}
+ // We prefer aligned loads and stores (less code), so spill and restore registers in slow paths
+ // at aligned locations.
+ uint32_t GetPreferredSlotsAlignment() const OVERRIDE { return kMips64DoublewordSize; }
+
void Finalize(CodeAllocator* allocator) OVERRIDE;
// Code generation helpers.
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 5a998861eb..3022e975e8 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -1846,6 +1846,84 @@ void IntrinsicCodeGeneratorMIPS64::VisitDoubleIsInfinite(HInvoke* invoke) {
GenIsInfinite(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
}
+// void java.lang.String.getChars(int srcBegin, int srcEnd, char[] dst, int dstBegin)
+void IntrinsicLocationsBuilderMIPS64::VisitStringGetCharsNoCheck(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCallOnMainOnly,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(2, Location::RequiresRegister());
+ locations->SetInAt(3, Location::RequiresRegister());
+ locations->SetInAt(4, Location::RequiresRegister());
+
+ // We will call memcpy() to do the actual work. Allocate the temporary
+ // registers to use the correct input registers, and output register.
+ // memcpy() uses the normal MIPS calling conventions.
+ InvokeRuntimeCallingConvention calling_convention;
+
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+
+ Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimLong);
+ locations->AddTemp(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>()));
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitStringGetCharsNoCheck(HInvoke* invoke) {
+ Mips64Assembler* assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ // Check assumption that sizeof(Char) is 2 (used in scaling below).
+ const size_t char_size = Primitive::ComponentSize(Primitive::kPrimChar);
+ DCHECK_EQ(char_size, 2u);
+ const size_t char_shift = Primitive::ComponentSizeShift(Primitive::kPrimChar);
+
+ GpuRegister srcObj = locations->InAt(0).AsRegister<GpuRegister>();
+ GpuRegister srcBegin = locations->InAt(1).AsRegister<GpuRegister>();
+ GpuRegister srcEnd = locations->InAt(2).AsRegister<GpuRegister>();
+ GpuRegister dstObj = locations->InAt(3).AsRegister<GpuRegister>();
+ GpuRegister dstBegin = locations->InAt(4).AsRegister<GpuRegister>();
+
+ GpuRegister dstPtr = locations->GetTemp(0).AsRegister<GpuRegister>();
+ DCHECK_EQ(dstPtr, A0);
+ GpuRegister srcPtr = locations->GetTemp(1).AsRegister<GpuRegister>();
+ DCHECK_EQ(srcPtr, A1);
+ GpuRegister numChrs = locations->GetTemp(2).AsRegister<GpuRegister>();
+ DCHECK_EQ(numChrs, A2);
+
+ GpuRegister dstReturn = locations->GetTemp(3).AsRegister<GpuRegister>();
+ DCHECK_EQ(dstReturn, V0);
+
+ Mips64Label done;
+
+ // Location of data in char array buffer.
+ const uint32_t data_offset = mirror::Array::DataOffset(char_size).Uint32Value();
+
+ // Get offset of value field within a string object.
+ const int32_t value_offset = mirror::String::ValueOffset().Int32Value();
+
+ __ Beqc(srcEnd, srcBegin, &done); // No characters to move.
+
+ // Calculate number of characters to be copied.
+ __ Dsubu(numChrs, srcEnd, srcBegin);
+
+ // Calculate destination address.
+ __ Daddiu(dstPtr, dstObj, data_offset);
+ __ Dlsa(dstPtr, dstBegin, dstPtr, char_shift);
+
+ // Calculate source address.
+ __ Daddiu(srcPtr, srcObj, value_offset);
+ __ Dlsa(srcPtr, srcBegin, srcPtr, char_shift);
+
+ // Calculate number of bytes to copy from number of characters.
+ __ Dsll(numChrs, numChrs, char_shift);
+
+ codegen_->InvokeRuntime(kQuickMemcpy, invoke, invoke->GetDexPc(), nullptr);
+
+ __ Bind(&done);
+}
+
static void GenHighestOneBit(LocationSummary* locations,
Primitive::Type type,
Mips64Assembler* assembler) {
@@ -1925,7 +2003,6 @@ void IntrinsicCodeGeneratorMIPS64::VisitLongLowestOneBit(HInvoke* invoke) {
}
UNIMPLEMENTED_INTRINSIC(MIPS64, ReferenceGetReferent)
-UNIMPLEMENTED_INTRINSIC(MIPS64, StringGetCharsNoCheck)
UNIMPLEMENTED_INTRINSIC(MIPS64, SystemArrayCopyChar)
UNIMPLEMENTED_INTRINSIC(MIPS64, SystemArrayCopy)
diff --git a/compiler/utils/arm/assembler_arm_vixl.h b/compiler/utils/arm/assembler_arm_vixl.h
index 3cf6a2ea84..322f6c4d70 100644
--- a/compiler/utils/arm/assembler_arm_vixl.h
+++ b/compiler/utils/arm/assembler_arm_vixl.h
@@ -39,6 +39,13 @@ namespace arm {
class ArmVIXLMacroAssembler FINAL : public vixl32::MacroAssembler {
public:
+ // Most methods fit in a 1KB code buffer, which results in more optimal alloc/realloc and
+ // fewer system calls than a larger default capacity.
+ static constexpr size_t kDefaultCodeBufferCapacity = 1 * KB;
+
+ ArmVIXLMacroAssembler()
+ : vixl32::MacroAssembler(ArmVIXLMacroAssembler::kDefaultCodeBufferCapacity) {}
+
// The following interfaces can generate CMP+Bcc or Cbz/Cbnz.
// CMP+Bcc are generated by default.
// If a hint is given (is_far_target = false) and rn and label can all fit into Cbz/Cbnz,
diff --git a/compiler/utils/assembler_test.h b/compiler/utils/assembler_test.h
index b34e125866..5c4875951b 100644
--- a/compiler/utils/assembler_test.h
+++ b/compiler/utils/assembler_test.h
@@ -145,7 +145,8 @@ class AssemblerTest : public testing::Test {
const std::vector<Reg2*> reg2_registers,
std::string (AssemblerTest::*GetName1)(const Reg1&),
std::string (AssemblerTest::*GetName2)(const Reg2&),
- const std::string& fmt) {
+ const std::string& fmt,
+ int bias = 0) {
std::string str;
std::vector<int64_t> imms = CreateImmediateValuesBits(abs(imm_bits), (imm_bits > 0));
@@ -153,7 +154,7 @@ class AssemblerTest : public testing::Test {
for (auto reg2 : reg2_registers) {
for (int64_t imm : imms) {
ImmType new_imm = CreateImmediate(imm);
- (assembler_.get()->*f)(*reg1, *reg2, new_imm);
+ (assembler_.get()->*f)(*reg1, *reg2, new_imm + bias);
std::string base = fmt;
std::string reg1_string = (this->*GetName1)(*reg1);
@@ -171,7 +172,7 @@ class AssemblerTest : public testing::Test {
size_t imm_index = base.find(IMM_TOKEN);
if (imm_index != std::string::npos) {
std::ostringstream sreg;
- sreg << imm;
+ sreg << imm + bias;
std::string imm_string = sreg.str();
base.replace(imm_index, ConstexprStrLen(IMM_TOKEN), imm_string);
}
@@ -188,6 +189,67 @@ class AssemblerTest : public testing::Test {
return str;
}
+ template <typename Reg1, typename Reg2, typename Reg3, typename ImmType>
+ std::string RepeatTemplatedRegistersImmBits(void (Ass::*f)(Reg1, Reg2, Reg3, ImmType),
+ int imm_bits,
+ const std::vector<Reg1*> reg1_registers,
+ const std::vector<Reg2*> reg2_registers,
+ const std::vector<Reg3*> reg3_registers,
+ std::string (AssemblerTest::*GetName1)(const Reg1&),
+ std::string (AssemblerTest::*GetName2)(const Reg2&),
+ std::string (AssemblerTest::*GetName3)(const Reg3&),
+ std::string fmt,
+ int bias) {
+ std::string str;
+ std::vector<int64_t> imms = CreateImmediateValuesBits(abs(imm_bits), (imm_bits > 0));
+
+ for (auto reg1 : reg1_registers) {
+ for (auto reg2 : reg2_registers) {
+ for (auto reg3 : reg3_registers) {
+ for (int64_t imm : imms) {
+ ImmType new_imm = CreateImmediate(imm);
+ (assembler_.get()->*f)(*reg1, *reg2, *reg3, new_imm + bias);
+ std::string base = fmt;
+
+ std::string reg1_string = (this->*GetName1)(*reg1);
+ size_t reg1_index;
+ while ((reg1_index = base.find(REG1_TOKEN)) != std::string::npos) {
+ base.replace(reg1_index, ConstexprStrLen(REG1_TOKEN), reg1_string);
+ }
+
+ std::string reg2_string = (this->*GetName2)(*reg2);
+ size_t reg2_index;
+ while ((reg2_index = base.find(REG2_TOKEN)) != std::string::npos) {
+ base.replace(reg2_index, ConstexprStrLen(REG2_TOKEN), reg2_string);
+ }
+
+ std::string reg3_string = (this->*GetName3)(*reg3);
+ size_t reg3_index;
+ while ((reg3_index = base.find(REG3_TOKEN)) != std::string::npos) {
+ base.replace(reg3_index, ConstexprStrLen(REG3_TOKEN), reg3_string);
+ }
+
+ size_t imm_index = base.find(IMM_TOKEN);
+ if (imm_index != std::string::npos) {
+ std::ostringstream sreg;
+ sreg << imm + bias;
+ std::string imm_string = sreg.str();
+ base.replace(imm_index, ConstexprStrLen(IMM_TOKEN), imm_string);
+ }
+
+ if (str.size() > 0) {
+ str += "\n";
+ }
+ str += base;
+ }
+ }
+ }
+ }
+ // Add a newline at the end.
+ str += "\n";
+ return str;
+ }
+
template <typename ImmType, typename Reg1, typename Reg2>
std::string RepeatTemplatedImmBitsRegisters(void (Ass::*f)(ImmType, Reg1, Reg2),
const std::vector<Reg1*> reg1_registers,
@@ -245,14 +307,15 @@ class AssemblerTest : public testing::Test {
int imm_bits,
const std::vector<Reg*> registers,
std::string (AssemblerTest::*GetName)(const RegType&),
- const std::string& fmt) {
+ const std::string& fmt,
+ int bias) {
std::string str;
std::vector<int64_t> imms = CreateImmediateValuesBits(abs(imm_bits), (imm_bits > 0));
for (auto reg : registers) {
for (int64_t imm : imms) {
ImmType new_imm = CreateImmediate(imm);
- (assembler_.get()->*f)(*reg, new_imm);
+ (assembler_.get()->*f)(*reg, new_imm + bias);
std::string base = fmt;
std::string reg_string = (this->*GetName)(*reg);
@@ -264,7 +327,7 @@ class AssemblerTest : public testing::Test {
size_t imm_index = base.find(IMM_TOKEN);
if (imm_index != std::string::npos) {
std::ostringstream sreg;
- sreg << imm;
+ sreg << imm + bias;
std::string imm_string = sreg.str();
base.replace(imm_index, ConstexprStrLen(IMM_TOKEN), imm_string);
}
@@ -281,36 +344,60 @@ class AssemblerTest : public testing::Test {
}
template <typename ImmType>
- std::string RepeatRRIb(void (Ass::*f)(Reg, Reg, ImmType), int imm_bits, const std::string& fmt) {
+ std::string RepeatRRIb(void (Ass::*f)(Reg, Reg, ImmType),
+ int imm_bits,
+ const std::string& fmt,
+ int bias = 0) {
return RepeatTemplatedRegistersImmBits<Reg, Reg, ImmType>(f,
imm_bits,
GetRegisters(),
GetRegisters(),
&AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
&AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
- fmt);
+ fmt,
+ bias);
+ }
+
+ template <typename ImmType>
+ std::string RepeatRRRIb(void (Ass::*f)(Reg, Reg, Reg, ImmType),
+ int imm_bits,
+ const std::string& fmt,
+ int bias = 0) {
+ return RepeatTemplatedRegistersImmBits<Reg, Reg, Reg, ImmType>(f,
+ imm_bits,
+ GetRegisters(),
+ GetRegisters(),
+ GetRegisters(),
+ &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
+ &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
+ &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
+ fmt,
+ bias);
}
template <typename ImmType>
- std::string RepeatRIb(void (Ass::*f)(Reg, ImmType), int imm_bits, const std::string& fmt) {
+ std::string RepeatRIb(void (Ass::*f)(Reg, ImmType), int imm_bits, std::string fmt, int bias = 0) {
return RepeatTemplatedRegisterImmBits<Reg, ImmType>(f,
imm_bits,
GetRegisters(),
&AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
- fmt);
+ fmt,
+ bias);
}
template <typename ImmType>
std::string RepeatFRIb(void (Ass::*f)(FPReg, Reg, ImmType),
int imm_bits,
- const std::string& fmt) {
+ const std::string& fmt,
+ int bias = 0) {
return RepeatTemplatedRegistersImmBits<FPReg, Reg, ImmType>(f,
imm_bits,
GetFPRegisters(),
GetRegisters(),
&AssemblerTest::GetFPRegName,
&AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
- fmt);
+ fmt,
+ bias);
}
std::string RepeatFF(void (Ass::*f)(FPReg, FPReg), const std::string& fmt) {
diff --git a/compiler/utils/assembler_thumb_test_expected.cc.inc b/compiler/utils/assembler_thumb_test_expected.cc.inc
index b16d99aa11..ab4f9e944c 100644
--- a/compiler/utils/assembler_thumb_test_expected.cc.inc
+++ b/compiler/utils/assembler_thumb_test_expected.cc.inc
@@ -5537,7 +5537,7 @@ const char* const VixlJniHelpersResults[] = {
" f6: f20d 4c01 addwne ip, sp, #1025 ; 0x401\n",
" fa: f8d9 c084 ldr.w ip, [r9, #132] ; 0x84\n",
" fe: f1bc 0f00 cmp.w ip, #0\n",
- " 102: d16f bne.n 1e4 <VixlJniHelpers+0x1e4>\n",
+ " 102: d171 bne.n 1e8 <VixlJniHelpers+0x1e8>\n",
" 104: f8cd c7ff str.w ip, [sp, #2047] ; 0x7ff\n",
" 108: f8cd c7ff str.w ip, [sp, #2047] ; 0x7ff\n",
" 10c: f8cd c7ff str.w ip, [sp, #2047] ; 0x7ff\n",
@@ -5593,9 +5593,9 @@ const char* const VixlJniHelpersResults[] = {
" 1d4: f8cd c7ff str.w ip, [sp, #2047] ; 0x7ff\n",
" 1d8: f8cd c7ff str.w ip, [sp, #2047] ; 0x7ff\n",
" 1dc: f8cd c7ff str.w ip, [sp, #2047] ; 0x7ff\n",
- " 1e0: f000 b802 b.w 1e8 <VixlJniHelpers+0x1e8>\n",
- " 1e4: f000 b81a b.w 21c <VixlJniHelpers+0x21c>\n",
- " 1e8: f8cd c7ff str.w ip, [sp, #2047] ; 0x7ff\n",
+ " 1e0: f8cd c7ff str.w ip, [sp, #2047] ; 0x7ff\n",
+ " 1e4: f000 b802 b.w 1ec <VixlJniHelpers+0x1ec>\n",
+ " 1e8: f000 b818 b.w 21c <VixlJniHelpers+0x21c>\n",
" 1ec: f8cd c7ff str.w ip, [sp, #2047] ; 0x7ff\n",
" 1f0: f8cd c7ff str.w ip, [sp, #2047] ; 0x7ff\n",
" 1f4: f8cd c7ff str.w ip, [sp, #2047] ; 0x7ff\n",
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
index 04430b13f1..5906a71b38 100644
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -319,6 +319,18 @@ void Mips64Assembler::Dinsu(GpuRegister rt, GpuRegister rs, int pos, int size) {
EmitR(0x1f, rs, rt, static_cast<GpuRegister>(pos + size - 33), pos - 32, 0x6);
}
+void Mips64Assembler::Lsa(GpuRegister rd, GpuRegister rs, GpuRegister rt, int saPlusOne) {
+ CHECK(1 <= saPlusOne && saPlusOne <= 4) << saPlusOne;
+ int sa = saPlusOne - 1;
+ EmitR(0x0, rs, rt, rd, sa, 0x05);
+}
+
+void Mips64Assembler::Dlsa(GpuRegister rd, GpuRegister rs, GpuRegister rt, int saPlusOne) {
+ CHECK(1 <= saPlusOne && saPlusOne <= 4) << saPlusOne;
+ int sa = saPlusOne - 1;
+ EmitR(0x0, rs, rt, rd, sa, 0x15);
+}
+
void Mips64Assembler::Wsbh(GpuRegister rd, GpuRegister rt) {
EmitRtd(0x1f, rt, rd, 2, 0x20);
}
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index 08a55ed41f..7ef5ab0d39 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -440,6 +440,8 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
void Dshd(GpuRegister rd, GpuRegister rt); // MIPS64
void Dext(GpuRegister rs, GpuRegister rt, int pos, int size); // MIPS64
void Dinsu(GpuRegister rt, GpuRegister rs, int pos, int size); // MIPS64
+ void Lsa(GpuRegister rd, GpuRegister rs, GpuRegister rt, int saPlusOne);
+ void Dlsa(GpuRegister rd, GpuRegister rs, GpuRegister rt, int saPlusOne); // MIPS64
void Wsbh(GpuRegister rd, GpuRegister rt);
void Sc(GpuRegister rt, GpuRegister base, int16_t imm9 = 0);
void Scd(GpuRegister rt, GpuRegister base, int16_t imm9 = 0); // MIPS64
diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc
index 9d0d0fcc60..564559f92c 100644
--- a/compiler/utils/mips64/assembler_mips64_test.cc
+++ b/compiler/utils/mips64/assembler_mips64_test.cc
@@ -1141,6 +1141,22 @@ TEST_F(AssemblerMIPS64Test, Dinsu) {
DriverStr(expected.str(), "Dinsu");
}
+TEST_F(AssemblerMIPS64Test, Lsa) {
+ DriverStr(RepeatRRRIb(&mips64::Mips64Assembler::Lsa,
+ 2,
+ "lsa ${reg1}, ${reg2}, ${reg3}, {imm}",
+ 1),
+ "lsa");
+}
+
+TEST_F(AssemblerMIPS64Test, Dlsa) {
+ DriverStr(RepeatRRRIb(&mips64::Mips64Assembler::Dlsa,
+ 2,
+ "dlsa ${reg1}, ${reg2}, ${reg3}, {imm}",
+ 1),
+ "dlsa");
+}
+
TEST_F(AssemblerMIPS64Test, Wsbh) {
DriverStr(RepeatRR(&mips64::Mips64Assembler::Wsbh, "wsbh ${reg1}, ${reg2}"), "wsbh");
}
diff --git a/disassembler/disassembler_mips.cc b/disassembler/disassembler_mips.cc
index c82600b109..1f6b87447f 100644
--- a/disassembler/disassembler_mips.cc
+++ b/disassembler/disassembler_mips.cc
@@ -139,6 +139,8 @@ static const MipsInstruction gMipsInstructions[] = {
{ kSpecial0Mask | (0x1f << 16) | 0x7ff, (0x01 << 6) | 0x11, "clo", "DS" },
{ kSpecial0Mask | (0x1f << 16) | 0x7ff, (0x01 << 6) | 0x12, "dclz", "DS" },
{ kSpecial0Mask | (0x1f << 16) | 0x7ff, (0x01 << 6) | 0x13, "dclo", "DS" },
+ { kSpecial0Mask | 0x73f, 0x05, "lsa", "DSTj" },
+ { kSpecial0Mask | 0x73f, 0x15, "dlsa", "DSTj" },
// TODO: sdbbp
// SPECIAL2
@@ -490,6 +492,9 @@ size_t DisassemblerMips::Dump(std::ostream& os, const uint8_t* instr_ptr) {
case 'i': // Sign-extended lower 16-bit immediate.
args << static_cast<int16_t>(instruction & 0xffff);
break;
+ case 'j': // sa value for lsa/dlsa.
+ args << (sa + 1);
+ break;
case 'L': // Jump label.
{
// TODO: is this right?
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index 53fe8fedc5..dcef8c01c2 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -169,7 +169,8 @@
description: "Lack of IPv6 on some buildbot slaves",
result: EXEC_FAILED,
names: ["libcore.io.OsTest#test_byteBufferPositions_sendto_recvfrom_af_inet6",
- "libcore.io.OsTest#test_sendtoSocketAddress_af_inet6"],
+ "libcore.io.OsTest#test_sendtoSocketAddress_af_inet6",
+ "libcore.io.OsTest#test_recvfrom_EmptyPacket"],
bug: 25178637
},
{