summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
author Vladimir Marko <vmarko@google.com> 2016-06-21 16:18:10 +0000
committer Gerrit Code Review <noreply-gerritcodereview@google.com> 2016-06-21 16:18:10 +0000
commit4692c35c151951aa1fa901ca24bfa302a9beeacf (patch)
tree8d28d7714dd7fa0ae1ff44888ae61f3c8786bfeb /compiler/optimizing
parentf6d4f6e0e61977777b7a9ca18b75bcd26e98e9f9 (diff)
parent87f3fcbd0db352157fc59148e94647ef21b73bce (diff)
Merge "Replace String.charAt() with HIR."
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/code_generator.cc7
-rw-r--r--compiler/optimizing/code_generator.h5
-rw-r--r--compiler/optimizing/code_generator_arm.cc17
-rw-r--r--compiler/optimizing/code_generator_arm64.cc9
-rw-r--r--compiler/optimizing/code_generator_mips.cc17
-rw-r--r--compiler/optimizing/code_generator_mips64.cc17
-rw-r--r--compiler/optimizing/code_generator_x86.cc16
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc16
-rw-r--r--compiler/optimizing/graph_visualizer.cc10
-rw-r--r--compiler/optimizing/instruction_simplifier.cc27
-rw-r--r--compiler/optimizing/instruction_simplifier_arm64.cc13
-rw-r--r--compiler/optimizing/instruction_simplifier_arm64.h2
-rw-r--r--compiler/optimizing/intrinsics.h1
-rw-r--r--compiler/optimizing/intrinsics_arm.cc49
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc50
-rw-r--r--compiler/optimizing/intrinsics_mips.cc48
-rw-r--r--compiler/optimizing/intrinsics_mips64.cc46
-rw-r--r--compiler/optimizing/intrinsics_x86.cc42
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc43
-rw-r--r--compiler/optimizing/nodes.h52
-rw-r--r--compiler/optimizing/prepare_for_register_allocation.cc16
-rw-r--r--compiler/optimizing/stack_map_stream.cc7
-rw-r--r--compiler/optimizing/stack_map_stream.h2
23 files changed, 164 insertions, 348 deletions
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 6e851bf1ba..12aa15207c 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -146,6 +146,13 @@ uint32_t CodeGenerator::GetArrayLengthOffset(HArrayLength* array_length) {
: mirror::Array::LengthOffset().Uint32Value();
}
+uint32_t CodeGenerator::GetArrayDataOffset(HArrayGet* array_get) {
+ DCHECK(array_get->GetType() == Primitive::kPrimChar || !array_get->IsStringCharAt());
+ return array_get->IsStringCharAt()
+ ? mirror::String::ValueOffset().Uint32Value()
+ : mirror::Array::DataOffset(Primitive::ComponentSize(array_get->GetType())).Uint32Value();
+}
+
bool CodeGenerator::GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const {
DCHECK_EQ((*block_order_)[current_block_index_], current);
return GetNextBlockToEmit() == FirstNonEmptyBlock(next);
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 81c1a7fe16..9364be35ff 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -345,6 +345,11 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
// accessing the String's `count` field in String intrinsics.
static uint32_t GetArrayLengthOffset(HArrayLength* array_length);
+ // Helper that returns the offset of the array's data.
+ // Note: Besides the normal arrays, we also use the HArrayGet for
+ // accessing the String's `value` field in String intrinsics.
+ static uint32_t GetArrayDataOffset(HArrayGet* array_get);
+
void EmitParallelMoves(Location from1,
Location to1,
Primitive::Type type1,
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index f5befa65c1..b9466ba212 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -176,8 +176,11 @@ class BoundsCheckSlowPathARM : public SlowPathCode {
locations->InAt(1),
Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
Primitive::kPrimInt);
- arm_codegen->InvokeRuntime(
- QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc(), this);
+ uint32_t entry_point_offset = instruction_->AsBoundsCheck()->IsStringCharAt()
+ ? QUICK_ENTRY_POINT(pThrowStringBounds)
+ : QUICK_ENTRY_POINT(pThrowArrayBounds);
+ arm_codegen->InvokeRuntime(entry_point_offset, instruction_, instruction_->GetDexPc(), this);
+ CheckEntrypointTypes<kQuickThrowStringBounds, void, int32_t, int32_t>();
CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
}
@@ -4289,11 +4292,11 @@ void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
Register obj = obj_loc.AsRegister<Register>();
Location index = locations->InAt(1);
Location out_loc = locations->Out();
+ uint32_t data_offset = CodeGenerator::GetArrayDataOffset(instruction);
Primitive::Type type = instruction->GetType();
switch (type) {
case Primitive::kPrimBoolean: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
Register out = out_loc.AsRegister<Register>();
if (index.IsConstant()) {
size_t offset =
@@ -4307,7 +4310,6 @@ void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
}
case Primitive::kPrimByte: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
Register out = out_loc.AsRegister<Register>();
if (index.IsConstant()) {
size_t offset =
@@ -4321,7 +4323,6 @@ void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
}
case Primitive::kPrimShort: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
Register out = out_loc.AsRegister<Register>();
if (index.IsConstant()) {
size_t offset =
@@ -4335,7 +4336,6 @@ void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
}
case Primitive::kPrimChar: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
Register out = out_loc.AsRegister<Register>();
if (index.IsConstant()) {
size_t offset =
@@ -4349,7 +4349,6 @@ void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
}
case Primitive::kPrimInt: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
Register out = out_loc.AsRegister<Register>();
if (index.IsConstant()) {
size_t offset =
@@ -4366,7 +4365,6 @@ void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
static_assert(
sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
"art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
// /* HeapReference<Object> */ out =
// *(obj + data_offset + index * sizeof(HeapReference<Object>))
if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
@@ -4401,7 +4399,6 @@ void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
}
case Primitive::kPrimLong: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
@@ -4414,7 +4411,6 @@ void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
}
case Primitive::kPrimFloat: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
SRegister out = out_loc.AsFpuRegister<SRegister>();
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
@@ -4427,7 +4423,6 @@ void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
}
case Primitive::kPrimDouble: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
SRegister out = out_loc.AsFpuRegisterPairLow<SRegister>();
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index f748898c13..4692a4a876 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -237,8 +237,11 @@ class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 {
codegen->EmitParallelMoves(
locations->InAt(0), LocationFrom(calling_convention.GetRegisterAt(0)), Primitive::kPrimInt,
locations->InAt(1), LocationFrom(calling_convention.GetRegisterAt(1)), Primitive::kPrimInt);
- arm64_codegen->InvokeRuntime(
- QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc(), this);
+ uint32_t entry_point_offset = instruction_->AsBoundsCheck()->IsStringCharAt()
+ ? QUICK_ENTRY_POINT(pThrowStringBounds)
+ : QUICK_ENTRY_POINT(pThrowArrayBounds);
+ arm64_codegen->InvokeRuntime(entry_point_offset, instruction_, instruction_->GetDexPc(), this);
+ CheckEntrypointTypes<kQuickThrowStringBounds, void, int32_t, int32_t>();
CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
}
@@ -2054,8 +2057,8 @@ void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
Register obj = InputRegisterAt(instruction, 0);
LocationSummary* locations = instruction->GetLocations();
Location index = locations->InAt(1);
- uint32_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(type)).Uint32Value();
Location out = locations->Out();
+ uint32_t offset = CodeGenerator::GetArrayDataOffset(instruction);
MacroAssembler* masm = GetVIXLAssembler();
UseScratchRegisterScope temps(masm);
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index b2cebc0dbd..738180670f 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -166,11 +166,15 @@ class BoundsCheckSlowPathMIPS : public SlowPathCodeMIPS {
locations->InAt(1),
Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
Primitive::kPrimInt);
- mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowArrayBounds),
+ uint32_t entry_point_offset = instruction_->AsBoundsCheck()->IsStringCharAt()
+ ? QUICK_ENTRY_POINT(pThrowStringBounds)
+ : QUICK_ENTRY_POINT(pThrowArrayBounds);
+ mips_codegen->InvokeRuntime(entry_point_offset,
instruction_,
instruction_->GetDexPc(),
this,
IsDirectEntrypoint(kQuickThrowArrayBounds));
+ CheckEntrypointTypes<kQuickThrowStringBounds, void, int32_t, int32_t>();
CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
}
@@ -1635,11 +1639,11 @@ void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) {
LocationSummary* locations = instruction->GetLocations();
Register obj = locations->InAt(0).AsRegister<Register>();
Location index = locations->InAt(1);
- Primitive::Type type = instruction->GetType();
+ uint32_t data_offset = CodeGenerator::GetArrayDataOffset(instruction);
+ Primitive::Type type = instruction->GetType();
switch (type) {
case Primitive::kPrimBoolean: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
Register out = locations->Out().AsRegister<Register>();
if (index.IsConstant()) {
size_t offset =
@@ -1653,7 +1657,6 @@ void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) {
}
case Primitive::kPrimByte: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
Register out = locations->Out().AsRegister<Register>();
if (index.IsConstant()) {
size_t offset =
@@ -1667,7 +1670,6 @@ void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) {
}
case Primitive::kPrimShort: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
Register out = locations->Out().AsRegister<Register>();
if (index.IsConstant()) {
size_t offset =
@@ -1682,7 +1684,6 @@ void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) {
}
case Primitive::kPrimChar: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
Register out = locations->Out().AsRegister<Register>();
if (index.IsConstant()) {
size_t offset =
@@ -1699,7 +1700,6 @@ void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) {
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t));
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
Register out = locations->Out().AsRegister<Register>();
if (index.IsConstant()) {
size_t offset =
@@ -1714,7 +1714,6 @@ void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) {
}
case Primitive::kPrimLong: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
Register out = locations->Out().AsRegisterPairLow<Register>();
if (index.IsConstant()) {
size_t offset =
@@ -1729,7 +1728,6 @@ void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) {
}
case Primitive::kPrimFloat: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
FRegister out = locations->Out().AsFpuRegister<FRegister>();
if (index.IsConstant()) {
size_t offset =
@@ -1744,7 +1742,6 @@ void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) {
}
case Primitive::kPrimDouble: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
FRegister out = locations->Out().AsFpuRegister<FRegister>();
if (index.IsConstant()) {
size_t offset =
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 1a20f5c0fe..80d029a936 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -127,10 +127,14 @@ class BoundsCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
locations->InAt(1),
Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
Primitive::kPrimInt);
- mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowArrayBounds),
+ uint32_t entry_point_offset = instruction_->AsBoundsCheck()->IsStringCharAt()
+ ? QUICK_ENTRY_POINT(pThrowStringBounds)
+ : QUICK_ENTRY_POINT(pThrowArrayBounds);
+ mips64_codegen->InvokeRuntime(entry_point_offset,
instruction_,
instruction_->GetDexPc(),
this);
+ CheckEntrypointTypes<kQuickThrowStringBounds, void, int32_t, int32_t>();
CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
}
@@ -1289,11 +1293,11 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
LocationSummary* locations = instruction->GetLocations();
GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
Location index = locations->InAt(1);
- Primitive::Type type = instruction->GetType();
+ uint32_t data_offset = CodeGenerator::GetArrayDataOffset(instruction);
+ Primitive::Type type = instruction->GetType();
switch (type) {
case Primitive::kPrimBoolean: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
GpuRegister out = locations->Out().AsRegister<GpuRegister>();
if (index.IsConstant()) {
size_t offset =
@@ -1307,7 +1311,6 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
}
case Primitive::kPrimByte: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
GpuRegister out = locations->Out().AsRegister<GpuRegister>();
if (index.IsConstant()) {
size_t offset =
@@ -1321,7 +1324,6 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
}
case Primitive::kPrimShort: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
GpuRegister out = locations->Out().AsRegister<GpuRegister>();
if (index.IsConstant()) {
size_t offset =
@@ -1336,7 +1338,6 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
}
case Primitive::kPrimChar: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
GpuRegister out = locations->Out().AsRegister<GpuRegister>();
if (index.IsConstant()) {
size_t offset =
@@ -1353,7 +1354,6 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t));
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
GpuRegister out = locations->Out().AsRegister<GpuRegister>();
LoadOperandType load_type = (type == Primitive::kPrimNot) ? kLoadUnsignedWord : kLoadWord;
if (index.IsConstant()) {
@@ -1369,7 +1369,6 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
}
case Primitive::kPrimLong: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
GpuRegister out = locations->Out().AsRegister<GpuRegister>();
if (index.IsConstant()) {
size_t offset =
@@ -1384,7 +1383,6 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
}
case Primitive::kPrimFloat: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
if (index.IsConstant()) {
size_t offset =
@@ -1399,7 +1397,6 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
}
case Primitive::kPrimDouble: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
if (index.IsConstant()) {
size_t offset =
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index a816d5f0e8..52868f4b2e 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -148,10 +148,14 @@ class BoundsCheckSlowPathX86 : public SlowPathCode {
locations->InAt(1),
Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
Primitive::kPrimInt);
- x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowArrayBounds),
+ uint32_t entry_point_offset = instruction_->AsBoundsCheck()->IsStringCharAt()
+ ? QUICK_ENTRY_POINT(pThrowStringBounds)
+ : QUICK_ENTRY_POINT(pThrowArrayBounds);
+ x86_codegen->InvokeRuntime(entry_point_offset,
instruction_,
instruction_->GetDexPc(),
this);
+ CheckEntrypointTypes<kQuickThrowStringBounds, void, int32_t, int32_t>();
CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
}
@@ -5060,11 +5064,11 @@ void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) {
Register obj = obj_loc.AsRegister<Register>();
Location index = locations->InAt(1);
Location out_loc = locations->Out();
+ uint32_t data_offset = CodeGenerator::GetArrayDataOffset(instruction);
Primitive::Type type = instruction->GetType();
switch (type) {
case Primitive::kPrimBoolean: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
Register out = out_loc.AsRegister<Register>();
if (index.IsConstant()) {
__ movzxb(out, Address(obj,
@@ -5076,7 +5080,6 @@ void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) {
}
case Primitive::kPrimByte: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
Register out = out_loc.AsRegister<Register>();
if (index.IsConstant()) {
__ movsxb(out, Address(obj,
@@ -5088,7 +5091,6 @@ void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) {
}
case Primitive::kPrimShort: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
Register out = out_loc.AsRegister<Register>();
if (index.IsConstant()) {
__ movsxw(out, Address(obj,
@@ -5100,7 +5102,6 @@ void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) {
}
case Primitive::kPrimChar: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
Register out = out_loc.AsRegister<Register>();
if (index.IsConstant()) {
__ movzxw(out, Address(obj,
@@ -5112,7 +5113,6 @@ void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) {
}
case Primitive::kPrimInt: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
Register out = out_loc.AsRegister<Register>();
if (index.IsConstant()) {
__ movl(out, Address(obj,
@@ -5127,7 +5127,6 @@ void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) {
static_assert(
sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
"art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
// /* HeapReference<Object> */ out =
// *(obj + data_offset + index * sizeof(HeapReference<Object>))
if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
@@ -5161,7 +5160,6 @@ void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) {
}
case Primitive::kPrimLong: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
DCHECK_NE(obj, out_loc.AsRegisterPairLow<Register>());
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
@@ -5179,7 +5177,6 @@ void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) {
}
case Primitive::kPrimFloat: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
XmmRegister out = out_loc.AsFpuRegister<XmmRegister>();
if (index.IsConstant()) {
__ movss(out, Address(obj,
@@ -5191,7 +5188,6 @@ void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) {
}
case Primitive::kPrimDouble: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
XmmRegister out = out_loc.AsFpuRegister<XmmRegister>();
if (index.IsConstant()) {
__ movsd(out, Address(obj,
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index ccfb181e14..9a3e8d266b 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -204,10 +204,14 @@ class BoundsCheckSlowPathX86_64 : public SlowPathCode {
locations->InAt(1),
Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
Primitive::kPrimInt);
- x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowArrayBounds),
+ uint32_t entry_point_offset = instruction_->AsBoundsCheck()->IsStringCharAt()
+ ? QUICK_ENTRY_POINT(pThrowStringBounds)
+ : QUICK_ENTRY_POINT(pThrowArrayBounds);
+ x86_64_codegen->InvokeRuntime(entry_point_offset,
instruction_,
instruction_->GetDexPc(),
this);
+ CheckEntrypointTypes<kQuickThrowStringBounds, void, int32_t, int32_t>();
CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
}
@@ -4555,11 +4559,11 @@ void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) {
CpuRegister obj = obj_loc.AsRegister<CpuRegister>();
Location index = locations->InAt(1);
Location out_loc = locations->Out();
+ uint32_t data_offset = CodeGenerator::GetArrayDataOffset(instruction);
Primitive::Type type = instruction->GetType();
switch (type) {
case Primitive::kPrimBoolean: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
CpuRegister out = out_loc.AsRegister<CpuRegister>();
if (index.IsConstant()) {
__ movzxb(out, Address(obj,
@@ -4571,7 +4575,6 @@ void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) {
}
case Primitive::kPrimByte: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
CpuRegister out = out_loc.AsRegister<CpuRegister>();
if (index.IsConstant()) {
__ movsxb(out, Address(obj,
@@ -4583,7 +4586,6 @@ void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) {
}
case Primitive::kPrimShort: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
CpuRegister out = out_loc.AsRegister<CpuRegister>();
if (index.IsConstant()) {
__ movsxw(out, Address(obj,
@@ -4595,7 +4597,6 @@ void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) {
}
case Primitive::kPrimChar: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
CpuRegister out = out_loc.AsRegister<CpuRegister>();
if (index.IsConstant()) {
__ movzxw(out, Address(obj,
@@ -4607,7 +4608,6 @@ void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) {
}
case Primitive::kPrimInt: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
CpuRegister out = out_loc.AsRegister<CpuRegister>();
if (index.IsConstant()) {
__ movl(out, Address(obj,
@@ -4622,7 +4622,6 @@ void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) {
static_assert(
sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
"art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
// /* HeapReference<Object> */ out =
// *(obj + data_offset + index * sizeof(HeapReference<Object>))
if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
@@ -4656,7 +4655,6 @@ void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) {
}
case Primitive::kPrimLong: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
CpuRegister out = out_loc.AsRegister<CpuRegister>();
if (index.IsConstant()) {
__ movq(out, Address(obj,
@@ -4668,7 +4666,6 @@ void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) {
}
case Primitive::kPrimFloat: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
XmmRegister out = out_loc.AsFpuRegister<XmmRegister>();
if (index.IsConstant()) {
__ movss(out, Address(obj,
@@ -4680,7 +4677,6 @@ void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) {
}
case Primitive::kPrimDouble: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
XmmRegister out = out_loc.AsFpuRegister<XmmRegister>();
if (index.IsConstant()) {
__ movsd(out, Address(obj,
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 7d11948f4e..4af8d1985b 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -403,6 +403,16 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
<< array_length->IsStringLength() << std::noboolalpha;
}
+ void VisitBoundsCheck(HBoundsCheck* bounds_check) OVERRIDE {
+ StartAttributeStream("is_string_char_at") << std::boolalpha
+ << bounds_check->IsStringCharAt() << std::noboolalpha;
+ }
+
+ void VisitArrayGet(HArrayGet* array_get) OVERRIDE {
+ StartAttributeStream("is_string_char_at") << std::boolalpha
+ << array_get->IsStringCharAt() << std::noboolalpha;
+ }
+
void VisitArraySet(HArraySet* array_set) OVERRIDE {
StartAttributeStream("value_can_be_null") << std::boolalpha
<< array_set->GetValueCanBeNull() << std::noboolalpha;
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index eb1d1560db..62d637081d 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -101,6 +101,7 @@ class InstructionSimplifierVisitor : public HGraphDelegateVisitor {
void SimplifyCompare(HInvoke* invoke, bool is_signum, Primitive::Type type);
void SimplifyIsNaN(HInvoke* invoke);
void SimplifyFP2Int(HInvoke* invoke);
+ void SimplifyStringCharAt(HInvoke* invoke);
void SimplifyStringIsEmptyOrLength(HInvoke* invoke);
void SimplifyMemBarrier(HInvoke* invoke, MemBarrierKind barrier_kind);
@@ -1685,13 +1686,32 @@ void InstructionSimplifierVisitor::SimplifyFP2Int(HInvoke* invoke) {
invoke->ReplaceWithExceptInReplacementAtIndex(select, 0); // false at index 0
}
+void InstructionSimplifierVisitor::SimplifyStringCharAt(HInvoke* invoke) {
+ HInstruction* str = invoke->InputAt(0);
+ HInstruction* index = invoke->InputAt(1);
+ uint32_t dex_pc = invoke->GetDexPc();
+ ArenaAllocator* arena = GetGraph()->GetArena();
+ // We treat String as an array to allow DCE and BCE to seamlessly work on strings,
+ // so create the HArrayLength, HBoundsCheck and HArrayGet.
+ HArrayLength* length = new (arena) HArrayLength(str, dex_pc, /* is_string_length */ true);
+ invoke->GetBlock()->InsertInstructionBefore(length, invoke);
+ HBoundsCheck* bounds_check =
+ new (arena) HBoundsCheck(index, length, dex_pc, invoke->GetDexMethodIndex());
+ invoke->GetBlock()->InsertInstructionBefore(bounds_check, invoke);
+ HArrayGet* array_get =
+ new (arena) HArrayGet(str, index, Primitive::kPrimChar, dex_pc, /* is_string_char_at */ true);
+ invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, array_get);
+ bounds_check->CopyEnvironmentFrom(invoke->GetEnvironment());
+ GetGraph()->SetHasBoundsChecks(true);
+}
+
void InstructionSimplifierVisitor::SimplifyStringIsEmptyOrLength(HInvoke* invoke) {
HInstruction* str = invoke->InputAt(0);
uint32_t dex_pc = invoke->GetDexPc();
// We treat String as an array to allow DCE and BCE to seamlessly work on strings,
// so create the HArrayLength.
- HArrayLength* length = new (GetGraph()->GetArena()) HArrayLength(str, dex_pc);
- length->MarkAsStringLength();
+ HArrayLength* length =
+ new (GetGraph()->GetArena()) HArrayLength(str, dex_pc, /* is_string_length */ true);
HInstruction* replacement;
if (invoke->GetIntrinsic() == Intrinsics::kStringIsEmpty) {
// For String.isEmpty(), create the `HEqual` representing the `length == 0`.
@@ -1752,6 +1772,9 @@ void InstructionSimplifierVisitor::VisitInvoke(HInvoke* instruction) {
case Intrinsics::kDoubleDoubleToLongBits:
SimplifyFP2Int(instruction);
break;
+ case Intrinsics::kStringCharAt:
+ SimplifyStringCharAt(instruction);
+ break;
case Intrinsics::kStringIsEmpty:
case Intrinsics::kStringLength:
SimplifyStringIsEmptyOrLength(instruction);
diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc
index e4a711ec83..983d31d168 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.cc
+++ b/compiler/optimizing/instruction_simplifier_arm64.cc
@@ -19,6 +19,7 @@
#include "common_arm64.h"
#include "instruction_simplifier_shared.h"
#include "mirror/array-inl.h"
+#include "mirror/string.h"
namespace art {
namespace arm64 {
@@ -30,7 +31,7 @@ using helpers::ShifterOperandSupportsExtension;
void InstructionSimplifierArm64Visitor::TryExtractArrayAccessAddress(HInstruction* access,
HInstruction* array,
HInstruction* index,
- int access_size) {
+ size_t data_offset) {
if (kEmitCompilerReadBarrier) {
// The read barrier instrumentation does not support the
// HArm64IntermediateAddress instruction yet.
@@ -55,8 +56,7 @@ void InstructionSimplifierArm64Visitor::TryExtractArrayAccessAddress(HInstructio
// Proceed to extract the base address computation.
ArenaAllocator* arena = GetGraph()->GetArena();
- HIntConstant* offset =
- GetGraph()->GetIntConstant(mirror::Array::DataOffset(access_size).Uint32Value());
+ HIntConstant* offset = GetGraph()->GetIntConstant(data_offset);
HArm64IntermediateAddress* address =
new (arena) HArm64IntermediateAddress(array, offset, kNoDexPc);
address->SetReferenceTypeInfo(array->GetReferenceTypeInfo());
@@ -189,17 +189,20 @@ void InstructionSimplifierArm64Visitor::VisitAnd(HAnd* instruction) {
}
void InstructionSimplifierArm64Visitor::VisitArrayGet(HArrayGet* instruction) {
+ size_t data_offset = CodeGenerator::GetArrayDataOffset(instruction);
TryExtractArrayAccessAddress(instruction,
instruction->GetArray(),
instruction->GetIndex(),
- Primitive::ComponentSize(instruction->GetType()));
+ data_offset);
}
void InstructionSimplifierArm64Visitor::VisitArraySet(HArraySet* instruction) {
+ size_t access_size = Primitive::ComponentSize(instruction->GetComponentType());
+ size_t data_offset = mirror::Array::DataOffset(access_size).Uint32Value();
TryExtractArrayAccessAddress(instruction,
instruction->GetArray(),
instruction->GetIndex(),
- Primitive::ComponentSize(instruction->GetComponentType()));
+ data_offset);
}
void InstructionSimplifierArm64Visitor::VisitMul(HMul* instruction) {
diff --git a/compiler/optimizing/instruction_simplifier_arm64.h b/compiler/optimizing/instruction_simplifier_arm64.h
index da269980e8..4735f85ab0 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.h
+++ b/compiler/optimizing/instruction_simplifier_arm64.h
@@ -38,7 +38,7 @@ class InstructionSimplifierArm64Visitor : public HGraphVisitor {
void TryExtractArrayAccessAddress(HInstruction* access,
HInstruction* array,
HInstruction* index,
- int access_size);
+ size_t data_offset);
bool TryMergeIntoUsersShifterOperand(HInstruction* instruction);
bool TryMergeIntoShifterOperand(HInstruction* use,
HInstruction* bitfield_op,
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 83a512738b..3429a8fdbb 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -239,6 +239,7 @@ UNREACHABLE_INTRINSIC(Arch, IntegerCompare) \
UNREACHABLE_INTRINSIC(Arch, LongCompare) \
UNREACHABLE_INTRINSIC(Arch, IntegerSignum) \
UNREACHABLE_INTRINSIC(Arch, LongSignum) \
+UNREACHABLE_INTRINSIC(Arch, StringCharAt) \
UNREACHABLE_INTRINSIC(Arch, StringIsEmpty) \
UNREACHABLE_INTRINSIC(Arch, StringLength) \
UNREACHABLE_INTRINSIC(Arch, UnsafeLoadFence) \
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 7d1c2ebe0b..93950d58b5 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -935,55 +935,6 @@ void IntrinsicCodeGeneratorARM::VisitUnsafeCASObject(HInvoke* invoke) {
GenCas(invoke->GetLocations(), Primitive::kPrimNot, codegen_);
}
-void IntrinsicLocationsBuilderARM::VisitStringCharAt(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnSlowPath,
- kIntrinsified);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
- locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
-
- locations->AddTemp(Location::RequiresRegister());
- locations->AddTemp(Location::RequiresRegister());
-}
-
-void IntrinsicCodeGeneratorARM::VisitStringCharAt(HInvoke* invoke) {
- ArmAssembler* assembler = GetAssembler();
- LocationSummary* locations = invoke->GetLocations();
-
- // Location of reference to data array
- const MemberOffset value_offset = mirror::String::ValueOffset();
- // Location of count
- const MemberOffset count_offset = mirror::String::CountOffset();
-
- Register obj = locations->InAt(0).AsRegister<Register>(); // String object pointer.
- Register idx = locations->InAt(1).AsRegister<Register>(); // Index of character.
- Register out = locations->Out().AsRegister<Register>(); // Result character.
-
- Register temp = locations->GetTemp(0).AsRegister<Register>();
- Register array_temp = locations->GetTemp(1).AsRegister<Register>();
-
- // TODO: Maybe we can support range check elimination. Overall, though, I think it's not worth
- // the cost.
- // TODO: For simplicity, the index parameter is requested in a register, so different from Quick
- // we will not optimize the code for constants (which would save a register).
-
- SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathARM(invoke);
- codegen_->AddSlowPath(slow_path);
-
- __ ldr(temp, Address(obj, count_offset.Int32Value())); // temp = str.length.
- codegen_->MaybeRecordImplicitNullCheck(invoke);
- __ cmp(idx, ShifterOperand(temp));
- __ b(slow_path->GetEntryLabel(), CS);
-
- __ add(array_temp, obj, ShifterOperand(value_offset.Int32Value())); // array_temp := str.value.
-
- // Load the value.
- __ ldrh(out, Address(array_temp, idx, LSL, 1)); // out := array_temp[idx].
-
- __ Bind(slow_path->GetExitLabel());
-}
-
void IntrinsicLocationsBuilderARM::VisitStringCompareTo(HInvoke* invoke) {
// The inputs plus one temp.
LocationSummary* locations = new (arena_) LocationSummary(invoke,
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index c8d6ddc8f1..4da0843a76 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -1122,56 +1122,6 @@ void IntrinsicCodeGeneratorARM64::VisitUnsafeCASObject(HInvoke* invoke) {
GenCas(invoke->GetLocations(), Primitive::kPrimNot, codegen_);
}
-void IntrinsicLocationsBuilderARM64::VisitStringCharAt(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnSlowPath,
- kIntrinsified);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
- // In case we need to go in the slow path, we can't have the output be the same
- // as the input: the current liveness analysis considers the input to be live
- // at the point of the call.
- locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
-}
-
-void IntrinsicCodeGeneratorARM64::VisitStringCharAt(HInvoke* invoke) {
- vixl::MacroAssembler* masm = GetVIXLAssembler();
- LocationSummary* locations = invoke->GetLocations();
-
- // Location of reference to data array
- const MemberOffset value_offset = mirror::String::ValueOffset();
- // Location of count
- const MemberOffset count_offset = mirror::String::CountOffset();
-
- Register obj = WRegisterFrom(locations->InAt(0)); // String object pointer.
- Register idx = WRegisterFrom(locations->InAt(1)); // Index of character.
- Register out = WRegisterFrom(locations->Out()); // Result character.
-
- UseScratchRegisterScope temps(masm);
- Register temp = temps.AcquireW();
- Register array_temp = temps.AcquireW(); // We can trade this for worse scheduling.
-
- // TODO: Maybe we can support range check elimination. Overall, though, I think it's not worth
- // the cost.
- // TODO: For simplicity, the index parameter is requested in a register, so different from Quick
- // we will not optimize the code for constants (which would save a register).
-
- SlowPathCodeARM64* slow_path = new (GetAllocator()) IntrinsicSlowPathARM64(invoke);
- codegen_->AddSlowPath(slow_path);
-
- __ Ldr(temp, HeapOperand(obj, count_offset)); // temp = str.length.
- codegen_->MaybeRecordImplicitNullCheck(invoke);
- __ Cmp(idx, temp);
- __ B(hs, slow_path->GetEntryLabel());
-
- __ Add(array_temp, obj, Operand(value_offset.Int32Value())); // array_temp := str.value.
-
- // Load the value.
- __ Ldrh(out, MemOperand(array_temp.X(), idx, UXTW, 1)); // out := array_temp[idx].
-
- __ Bind(slow_path->GetExitLabel());
-}
-
void IntrinsicLocationsBuilderARM64::VisitStringCompareTo(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
invoke->InputAt(1)->CanBeNull()
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 140f56a870..d4f44d63e2 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -1872,54 +1872,6 @@ void IntrinsicCodeGeneratorMIPS::VisitUnsafeCASObject(HInvoke* invoke) {
GenCas(invoke->GetLocations(), Primitive::kPrimNot, codegen_);
}
-// char java.lang.String.charAt(int index)
-void IntrinsicLocationsBuilderMIPS::VisitStringCharAt(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnSlowPath,
- kIntrinsified);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
- // The inputs will be considered live at the last instruction and restored. This would overwrite
- // the output with kNoOutputOverlap.
- locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitStringCharAt(HInvoke* invoke) {
- LocationSummary* locations = invoke->GetLocations();
- MipsAssembler* assembler = GetAssembler();
-
- // Location of reference to data array
- const int32_t value_offset = mirror::String::ValueOffset().Int32Value();
- // Location of count
- const int32_t count_offset = mirror::String::CountOffset().Int32Value();
-
- Register obj = locations->InAt(0).AsRegister<Register>();
- Register idx = locations->InAt(1).AsRegister<Register>();
- Register out = locations->Out().AsRegister<Register>();
-
- // TODO: Maybe we can support range check elimination. Overall,
- // though, I think it's not worth the cost.
- // TODO: For simplicity, the index parameter is requested in a
- // register, so different from Quick we will not optimize the
- // code for constants (which would save a register).
-
- SlowPathCodeMIPS* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS(invoke);
- codegen_->AddSlowPath(slow_path);
-
- // Load the string size
- __ Lw(TMP, obj, count_offset);
- codegen_->MaybeRecordImplicitNullCheck(invoke);
- // Revert to slow path if idx is too large, or negative
- __ Bgeu(idx, TMP, slow_path->GetEntryLabel());
-
- // out = obj[2*idx].
- __ Sll(TMP, idx, 1); // idx * 2
- __ Addu(TMP, TMP, obj); // Address of char at location idx
- __ Lhu(out, TMP, value_offset); // Load char at location idx
-
- __ Bind(slow_path->GetExitLabel());
-}
-
// int java.lang.String.compareTo(String anotherString)
void IntrinsicLocationsBuilderMIPS::VisitStringCompareTo(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 6c4e64e4b1..9243f4c93f 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -1371,52 +1371,6 @@ void IntrinsicCodeGeneratorMIPS64::VisitUnsafeCASObject(HInvoke* invoke) {
GenCas(invoke->GetLocations(), Primitive::kPrimNot, codegen_);
}
-// char java.lang.String.charAt(int index)
-void IntrinsicLocationsBuilderMIPS64::VisitStringCharAt(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnSlowPath,
- kIntrinsified);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
- locations->SetOut(Location::SameAsFirstInput());
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitStringCharAt(HInvoke* invoke) {
- LocationSummary* locations = invoke->GetLocations();
- Mips64Assembler* assembler = GetAssembler();
-
- // Location of reference to data array
- const int32_t value_offset = mirror::String::ValueOffset().Int32Value();
- // Location of count
- const int32_t count_offset = mirror::String::CountOffset().Int32Value();
-
- GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
- GpuRegister idx = locations->InAt(1).AsRegister<GpuRegister>();
- GpuRegister out = locations->Out().AsRegister<GpuRegister>();
-
- // TODO: Maybe we can support range check elimination. Overall,
- // though, I think it's not worth the cost.
- // TODO: For simplicity, the index parameter is requested in a
- // register, so different from Quick we will not optimize the
- // code for constants (which would save a register).
-
- SlowPathCodeMIPS64* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS64(invoke);
- codegen_->AddSlowPath(slow_path);
-
- // Load the string size
- __ Lw(TMP, obj, count_offset);
- codegen_->MaybeRecordImplicitNullCheck(invoke);
- // Revert to slow path if idx is too large, or negative
- __ Bgeuc(idx, TMP, slow_path->GetEntryLabel());
-
- // out = obj[2*idx].
- __ Sll(TMP, idx, 1); // idx * 2
- __ Daddu(TMP, TMP, obj); // Address of char at location idx
- __ Lhu(out, TMP, value_offset); // Load char at location idx
-
- __ Bind(slow_path->GetExitLabel());
-}
-
// int java.lang.String.compareTo(String anotherString)
void IntrinsicLocationsBuilderMIPS64::VisitStringCompareTo(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 05377f984b..4988398c92 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -1030,48 +1030,6 @@ void IntrinsicCodeGeneratorX86::VisitMathNextAfter(HInvoke* invoke) {
GenFPToFPCall(invoke, codegen_, kQuickNextAfter);
}
-void IntrinsicLocationsBuilderX86::VisitStringCharAt(HInvoke* invoke) {
- // The inputs plus one temp.
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnSlowPath,
- kIntrinsified);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
- locations->SetOut(Location::SameAsFirstInput());
-}
-
-void IntrinsicCodeGeneratorX86::VisitStringCharAt(HInvoke* invoke) {
- LocationSummary* locations = invoke->GetLocations();
-
- // Location of reference to data array.
- const int32_t value_offset = mirror::String::ValueOffset().Int32Value();
- // Location of count.
- const int32_t count_offset = mirror::String::CountOffset().Int32Value();
-
- Register obj = locations->InAt(0).AsRegister<Register>();
- Register idx = locations->InAt(1).AsRegister<Register>();
- Register out = locations->Out().AsRegister<Register>();
-
- // TODO: Maybe we can support range check elimination. Overall, though, I think it's not worth
- // the cost.
- // TODO: For simplicity, the index parameter is requested in a register, so different from Quick
- // we will not optimize the code for constants (which would save a register).
-
- SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke);
- codegen_->AddSlowPath(slow_path);
-
- X86Assembler* assembler = GetAssembler();
-
- __ cmpl(idx, Address(obj, count_offset));
- codegen_->MaybeRecordImplicitNullCheck(invoke);
- __ j(kAboveEqual, slow_path->GetEntryLabel());
-
- // out = out[2*idx].
- __ movzxw(out, Address(out, idx, ScaleFactor::TIMES_2, value_offset));
-
- __ Bind(slow_path->GetExitLabel());
-}
-
void IntrinsicLocationsBuilderX86::VisitSystemArrayCopyChar(HInvoke* invoke) {
// We need at least two of the positions or length to be an integer constant,
// or else we won't have enough free registers.
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 67c2f3a866..593c8f319b 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -891,49 +891,6 @@ void IntrinsicCodeGeneratorX86_64::VisitMathNextAfter(HInvoke* invoke) {
GenFPToFPCall(invoke, codegen_, kQuickNextAfter);
}
-void IntrinsicLocationsBuilderX86_64::VisitStringCharAt(HInvoke* invoke) {
- // The inputs plus one temp.
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnSlowPath,
- kIntrinsified);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
- locations->SetOut(Location::SameAsFirstInput());
- locations->AddTemp(Location::RequiresRegister());
-}
-
-void IntrinsicCodeGeneratorX86_64::VisitStringCharAt(HInvoke* invoke) {
- LocationSummary* locations = invoke->GetLocations();
-
- // Location of reference to data array.
- const int32_t value_offset = mirror::String::ValueOffset().Int32Value();
- // Location of count.
- const int32_t count_offset = mirror::String::CountOffset().Int32Value();
-
- CpuRegister obj = locations->InAt(0).AsRegister<CpuRegister>();
- CpuRegister idx = locations->InAt(1).AsRegister<CpuRegister>();
- CpuRegister out = locations->Out().AsRegister<CpuRegister>();
-
- // TODO: Maybe we can support range check elimination. Overall, though, I think it's not worth
- // the cost.
- // TODO: For simplicity, the index parameter is requested in a register, so different from Quick
- // we will not optimize the code for constants (which would save a register).
-
- SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke);
- codegen_->AddSlowPath(slow_path);
-
- X86_64Assembler* assembler = GetAssembler();
-
- __ cmpl(idx, Address(obj, count_offset));
- codegen_->MaybeRecordImplicitNullCheck(invoke);
- __ j(kAboveEqual, slow_path->GetEntryLabel());
-
- // out = out[2*idx].
- __ movzxw(out, Address(out, idx, ScaleFactor::TIMES_2, value_offset));
-
- __ Bind(slow_path->GetExitLabel());
-}
-
void IntrinsicLocationsBuilderX86_64::VisitSystemArrayCopyChar(HInvoke* invoke) {
// Check to see if we have known failures that will cause us to have to bail out
// to the runtime, and just generate the runtime call directly.
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 74a4acae3f..455f4e338d 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -26,6 +26,7 @@
#include "base/arena_object.h"
#include "base/stl_util.h"
#include "dex/compiler_enums.h"
+#include "dex_file.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
#include "handle.h"
#include "handle_scope.h"
@@ -1930,6 +1931,14 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
environment_ = environment;
}
+ void InsertRawEnvironment(HEnvironment* environment) {
+ DCHECK(environment_ != nullptr);
+ DCHECK_EQ(environment->GetHolder(), this);
+ DCHECK(environment->GetParent() == nullptr);
+ environment->parent_ = environment_;
+ environment_ = environment;
+ }
+
void RemoveEnvironment();
// Set the environment of this instruction, copying it from `environment`. While
@@ -5089,8 +5098,13 @@ class HInstanceFieldSet FINAL : public HTemplateInstruction<2> {
class HArrayGet FINAL : public HExpression<2> {
public:
- HArrayGet(HInstruction* array, HInstruction* index, Primitive::Type type, uint32_t dex_pc)
+ HArrayGet(HInstruction* array,
+ HInstruction* index,
+ Primitive::Type type,
+ uint32_t dex_pc,
+ bool is_string_char_at = false)
: HExpression(type, SideEffects::ArrayReadOfType(type), dex_pc) {
+ SetPackedFlag<kFlagIsStringCharAt>(is_string_char_at);
SetRawInputAt(0, array);
SetRawInputAt(1, index);
}
@@ -5124,12 +5138,24 @@ class HArrayGet FINAL : public HExpression<2> {
return result;
}
+ bool IsStringCharAt() const { return GetPackedFlag<kFlagIsStringCharAt>(); }
+
HInstruction* GetArray() const { return InputAt(0); }
HInstruction* GetIndex() const { return InputAt(1); }
DECLARE_INSTRUCTION(ArrayGet);
private:
+ // We treat a String as an array, creating the HArrayGet from String.charAt()
+ // intrinsic in the instruction simplifier. We can always determine whether
+ // a particular HArrayGet is actually a String.charAt() by looking at the type
+ // of the input but that requires holding the mutator lock, so we prefer to use
+ // a flag, so that code generators don't need to do the locking.
+ static constexpr size_t kFlagIsStringCharAt = kNumberOfExpressionPackedBits;
+ static constexpr size_t kNumberOfArrayGetPackedBits = kFlagIsStringCharAt + 1;
+ static_assert(kNumberOfArrayGetPackedBits <= HInstruction::kMaxNumberOfPackedBits,
+ "Too many packed fields.");
+
DISALLOW_COPY_AND_ASSIGN(HArrayGet);
};
@@ -5235,8 +5261,9 @@ class HArraySet FINAL : public HTemplateInstruction<3> {
class HArrayLength FINAL : public HExpression<1> {
public:
- HArrayLength(HInstruction* array, uint32_t dex_pc)
+ HArrayLength(HInstruction* array, uint32_t dex_pc, bool is_string_length = false)
: HExpression(Primitive::kPrimInt, SideEffects::None(), dex_pc) {
+ SetPackedFlag<kFlagIsStringLength>(is_string_length);
// Note that arrays do not change length, so the instruction does not
// depend on any write.
SetRawInputAt(0, array);
@@ -5250,7 +5277,6 @@ class HArrayLength FINAL : public HExpression<1> {
return obj == InputAt(0);
}
- void MarkAsStringLength() { SetPackedFlag<kFlagIsStringLength>(); }
bool IsStringLength() const { return GetPackedFlag<kFlagIsStringLength>(); }
DECLARE_INSTRUCTION(ArrayLength);
@@ -5273,8 +5299,12 @@ class HBoundsCheck FINAL : public HExpression<2> {
public:
// `HBoundsCheck` can trigger GC, as it may call the `IndexOutOfBoundsException`
// constructor.
- HBoundsCheck(HInstruction* index, HInstruction* length, uint32_t dex_pc)
- : HExpression(index->GetType(), SideEffects::CanTriggerGC(), dex_pc) {
+ HBoundsCheck(HInstruction* index,
+ HInstruction* length,
+ uint32_t dex_pc,
+ uint32_t string_char_at_method_index = DexFile::kDexNoIndex)
+ : HExpression(index->GetType(), SideEffects::CanTriggerGC(), dex_pc),
+ string_char_at_method_index_(string_char_at_method_index) {
DCHECK_EQ(Primitive::kPrimInt, Primitive::PrimitiveKind(index->GetType()));
SetRawInputAt(0, index);
SetRawInputAt(1, length);
@@ -5289,11 +5319,23 @@ class HBoundsCheck FINAL : public HExpression<2> {
bool CanThrow() const OVERRIDE { return true; }
+ bool IsStringCharAt() const { return GetStringCharAtMethodIndex() != DexFile::kDexNoIndex; }
+ uint32_t GetStringCharAtMethodIndex() const { return string_char_at_method_index_; }
+
HInstruction* GetIndex() const { return InputAt(0); }
DECLARE_INSTRUCTION(BoundsCheck);
private:
+ // We treat a String as an array, creating the HBoundsCheck from String.charAt()
+ // intrinsic in the instruction simplifier. We want to include the String.charAt()
+ // in the stack trace if we actually throw the StringIndexOutOfBoundsException,
+ // so we need to create an HEnvironment which will be translated to an InlineInfo
+ // indicating the extra stack frame. Since we add this HEnvironment quite late,
+ // in the PrepareForRegisterAllocation pass, we need to remember the method index
+ // from the invoke as we don't want to look again at the dex bytecode.
+ uint32_t string_char_at_method_index_; // DexFile::kDexNoIndex if regular array.
+
DISALLOW_COPY_AND_ASSIGN(HBoundsCheck);
};
diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc
index c941c0c086..696b8c6859 100644
--- a/compiler/optimizing/prepare_for_register_allocation.cc
+++ b/compiler/optimizing/prepare_for_register_allocation.cc
@@ -40,6 +40,22 @@ void PrepareForRegisterAllocation::VisitDivZeroCheck(HDivZeroCheck* check) {
void PrepareForRegisterAllocation::VisitBoundsCheck(HBoundsCheck* check) {
check->ReplaceWith(check->InputAt(0));
+ if (check->IsStringCharAt()) {
+ // Add a fake environment for String.charAt() inline info as we want
+ // the exception to appear as being thrown from there.
+ const DexFile& dex_file = check->GetEnvironment()->GetDexFile();
+ DCHECK_STREQ(PrettyMethod(check->GetStringCharAtMethodIndex(), dex_file).c_str(),
+ "char java.lang.String.charAt(int)");
+ ArenaAllocator* arena = GetGraph()->GetArena();
+ HEnvironment* environment = new (arena) HEnvironment(arena,
+ /* number_of_vregs */ 0u,
+ dex_file,
+ check->GetStringCharAtMethodIndex(),
+ /* dex_pc */ DexFile::kDexNoIndex,
+ kVirtual,
+ check);
+ check->InsertRawEnvironment(environment);
+ }
}
void PrepareForRegisterAllocation::VisitBoundType(HBoundType* bound_type) {
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index 11a254ef63..fc8af6462a 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -228,7 +228,7 @@ size_t StackMapStream::ComputeDexRegisterMapsSize() const {
void StackMapStream::ComputeInlineInfoEncoding() {
uint32_t method_index_max = 0;
- uint32_t dex_pc_max = 0;
+ uint32_t dex_pc_max = DexFile::kDexNoIndex;
uint32_t invoke_type_max = 0;
uint32_t inline_info_index = 0;
@@ -236,7 +236,10 @@ void StackMapStream::ComputeInlineInfoEncoding() {
for (size_t j = 0; j < entry.inlining_depth; ++j) {
InlineInfoEntry inline_entry = inline_infos_[inline_info_index++];
method_index_max = std::max(method_index_max, inline_entry.method_index);
- dex_pc_max = std::max(dex_pc_max, inline_entry.dex_pc);
+ if (inline_entry.dex_pc != DexFile::kDexNoIndex &&
+ (dex_pc_max == DexFile::kDexNoIndex || dex_pc_max < inline_entry.dex_pc)) {
+ dex_pc_max = inline_entry.dex_pc;
+ }
invoke_type_max = std::max(invoke_type_max, static_cast<uint32_t>(inline_entry.invoke_type));
}
}
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index 41f72f508b..53a9795d52 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -108,7 +108,7 @@ class StackMapStream : public ValueObject {
};
struct InlineInfoEntry {
- uint32_t dex_pc;
+ uint32_t dex_pc; // DexFile::kDexNoIndex for intrinsified native methods.
uint32_t method_index;
InvokeType invoke_type;
uint32_t num_dex_registers;