diff options
Diffstat (limited to 'compiler/optimizing')
-rw-r--r-- | compiler/optimizing/code_generator_arm_vixl.cc | 254 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm_vixl.h | 6 | ||||
-rw-r--r-- | compiler/optimizing/escape.cc | 49 | ||||
-rw-r--r-- | compiler/optimizing/escape.h | 23 | ||||
-rw-r--r-- | compiler/optimizing/instruction_simplifier.cc | 63 | ||||
-rw-r--r-- | compiler/optimizing/intrinsics_arm.cc | 6 | ||||
-rw-r--r-- | compiler/optimizing/intrinsics_arm64.cc | 6 | ||||
-rw-r--r-- | compiler/optimizing/intrinsics_arm_vixl.cc | 11 | ||||
-rw-r--r-- | compiler/optimizing/intrinsics_mips.cc | 6 | ||||
-rw-r--r-- | compiler/optimizing/intrinsics_mips64.cc | 6 | ||||
-rw-r--r-- | compiler/optimizing/intrinsics_x86.cc | 6 | ||||
-rw-r--r-- | compiler/optimizing/intrinsics_x86_64.cc | 6 | ||||
-rw-r--r-- | compiler/optimizing/load_store_elimination.cc | 29 | ||||
-rw-r--r-- | compiler/optimizing/nodes.h | 17 |
14 files changed, 364 insertions, 124 deletions
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc index aa8a77e8fc..1ca439e8cf 100644 --- a/compiler/optimizing/code_generator_arm_vixl.cc +++ b/compiler/optimizing/code_generator_arm_vixl.cc @@ -1322,11 +1322,10 @@ void LocationsBuilderARMVIXL::HandleCondition(HCondition* cond) { } break; - // TODO(VIXL): https://android-review.googlesource.com/#/c/252265/ case Primitive::kPrimFloat: case Primitive::kPrimDouble: locations->SetInAt(0, Location::RequiresFpuRegister()); - locations->SetInAt(1, Location::RequiresFpuRegister()); + locations->SetInAt(1, ArithmeticZeroOrFpuRegister(cond->InputAt(1))); if (!cond->IsEmittedAtUseSite()) { locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); } @@ -1346,13 +1345,20 @@ void InstructionCodeGeneratorARMVIXL::HandleCondition(HCondition* cond) { return; } + Location right = cond->GetLocations()->InAt(1); vixl32::Register out = OutputRegister(cond); vixl32::Label true_label, false_label; switch (cond->InputAt(0)->GetType()) { default: { // Integer case. - __ Cmp(InputRegisterAt(cond, 0), InputOperandAt(cond, 1)); + if (right.IsRegister()) { + __ Cmp(InputRegisterAt(cond, 0), InputOperandAt(cond, 1)); + } else { + DCHECK(right.IsConstant()); + __ Cmp(InputRegisterAt(cond, 0), + CodeGenerator::GetInt32ValueOf(right.GetConstant())); + } AssemblerAccurateScope aas(GetVIXLAssembler(), kArmInstrMaxSizeInBytes * 3u, CodeBufferCheckScope::kMaximumSize); @@ -2776,15 +2782,8 @@ void InstructionCodeGeneratorARMVIXL::VisitRem(HRem* rem) { void LocationsBuilderARMVIXL::VisitDivZeroCheck(HDivZeroCheck* instruction) { - // TODO(VIXL): https://android-review.googlesource.com/#/c/275337/ - LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock() - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall; - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); + LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction); locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0))); - if (instruction->HasUses()) { - locations->SetOut(Location::SameAsFirstInput()); - } } void InstructionCodeGeneratorARMVIXL::VisitDivZeroCheck(HDivZeroCheck* instruction) { @@ -3956,15 +3955,8 @@ void InstructionCodeGeneratorARMVIXL::VisitUnresolvedStaticFieldSet( } void LocationsBuilderARMVIXL::VisitNullCheck(HNullCheck* instruction) { - // TODO(VIXL): https://android-review.googlesource.com/#/c/275337/ - LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock() - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall; - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); + LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction); locations->SetInAt(0, Location::RequiresRegister()); - if (instruction->HasUses()) { - locations->SetOut(Location::SameAsFirstInput()); - } } void CodeGeneratorARMVIXL::GenerateImplicitNullCheck(HNullCheck* instruction) { @@ -4697,8 +4689,9 @@ void InstructionCodeGeneratorARMVIXL::VisitParallelMove(HParallelMove* instructi } void LocationsBuilderARMVIXL::VisitSuspendCheck(HSuspendCheck* instruction) { - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath); - // TODO(VIXL): https://android-review.googlesource.com/#/c/275337/ and related. + LocationSummary* locations = + new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath); + locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } void InstructionCodeGeneratorARMVIXL::VisitSuspendCheck(HSuspendCheck* instruction) { @@ -5200,14 +5193,27 @@ void InstructionCodeGeneratorARMVIXL::VisitThrow(HThrow* instruction) { CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>(); } -static bool TypeCheckNeedsATemporary(TypeCheckKind type_check_kind) { - return kEmitCompilerReadBarrier && - (kUseBakerReadBarrier || - type_check_kind == TypeCheckKind::kAbstractClassCheck || - type_check_kind == TypeCheckKind::kClassHierarchyCheck || - type_check_kind == TypeCheckKind::kArrayObjectCheck); +// Temp is used for read barrier. +static size_t NumberOfInstanceOfTemps(TypeCheckKind type_check_kind) { + if (kEmitCompilerReadBarrier && + (kUseBakerReadBarrier || + type_check_kind == TypeCheckKind::kAbstractClassCheck || + type_check_kind == TypeCheckKind::kClassHierarchyCheck || + type_check_kind == TypeCheckKind::kArrayObjectCheck)) { + return 1; + } + return 0; } +// Interface case has 3 temps, one for holding the number of interfaces, one for the current +// interface pointer, one for loading the current interface. +// The other checks have one temp for loading the object's class. +static size_t NumberOfCheckCastTemps(TypeCheckKind type_check_kind) { + if (type_check_kind == TypeCheckKind::kInterfaceCheck) { + return 3; + } + return 1 + NumberOfInstanceOfTemps(type_check_kind); +} void LocationsBuilderARMVIXL::VisitInstanceOf(HInstanceOf* instruction) { LocationSummary::CallKind call_kind = LocationSummary::kNoCall; @@ -5238,11 +5244,7 @@ void LocationsBuilderARMVIXL::VisitInstanceOf(HInstanceOf* instruction) { // The "out" register is used as a temporary, so it overlaps with the inputs. // Note that TypeCheckSlowPathARM uses this register too. locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); - // When read barriers are enabled, we need a temporary register for - // some cases. - if (TypeCheckNeedsATemporary(type_check_kind)) { - locations->AddTemp(Location::RequiresRegister()); - } + locations->AddRegisterTemps(NumberOfInstanceOfTemps(type_check_kind)); } void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction) { @@ -5253,9 +5255,9 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction) vixl32::Register cls = InputRegisterAt(instruction, 1); Location out_loc = locations->Out(); vixl32::Register out = OutputRegister(instruction); - Location maybe_temp_loc = TypeCheckNeedsATemporary(type_check_kind) ? - locations->GetTemp(0) : - Location::NoLocation(); + const size_t num_temps = NumberOfInstanceOfTemps(type_check_kind); + DCHECK_LE(num_temps, 1u); + Location maybe_temp_loc = (num_temps >= 1) ? locations->GetTemp(0) : Location::NoLocation(); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value(); uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value(); @@ -5276,7 +5278,8 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction) out_loc, obj_loc, class_offset, - maybe_temp_loc); + maybe_temp_loc, + kCompilerReadBarrierOption); __ Cmp(out, cls); // Classes must be equal for the instanceof to succeed. __ B(ne, &zero); @@ -5291,13 +5294,18 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction) out_loc, obj_loc, class_offset, - maybe_temp_loc); + maybe_temp_loc, + kCompilerReadBarrierOption); // If the class is abstract, we eagerly fetch the super class of the // object to avoid doing a comparison we know will fail. vixl32::Label loop; __ Bind(&loop); // /* HeapReference<Class> */ out = out->super_class_ - GenerateReferenceLoadOneRegister(instruction, out_loc, super_offset, maybe_temp_loc); + GenerateReferenceLoadOneRegister(instruction, + out_loc, + super_offset, + maybe_temp_loc, + kCompilerReadBarrierOption); // If `out` is null, we use it for the result, and jump to `done`. __ CompareAndBranchIfZero(out, &done, /* far_target */ false); __ Cmp(out, cls); @@ -5315,14 +5323,19 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction) out_loc, obj_loc, class_offset, - maybe_temp_loc); + maybe_temp_loc, + kCompilerReadBarrierOption); // Walk over the class hierarchy to find a match. vixl32::Label loop, success; __ Bind(&loop); __ Cmp(out, cls); __ B(eq, &success); // /* HeapReference<Class> */ out = out->super_class_ - GenerateReferenceLoadOneRegister(instruction, out_loc, super_offset, maybe_temp_loc); + GenerateReferenceLoadOneRegister(instruction, + out_loc, + super_offset, + maybe_temp_loc, + kCompilerReadBarrierOption); __ CompareAndBranchIfNonZero(out, &loop); // If `out` is null, we use it for the result, and jump to `done`. __ B(&done); @@ -5340,14 +5353,19 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction) out_loc, obj_loc, class_offset, - maybe_temp_loc); + maybe_temp_loc, + kCompilerReadBarrierOption); // Do an exact check. vixl32::Label exact_check; __ Cmp(out, cls); __ B(eq, &exact_check); // Otherwise, we need to check that the object's class is a non-primitive array. // /* HeapReference<Class> */ out = out->component_type_ - GenerateReferenceLoadOneRegister(instruction, out_loc, component_offset, maybe_temp_loc); + GenerateReferenceLoadOneRegister(instruction, + out_loc, + component_offset, + maybe_temp_loc, + kCompilerReadBarrierOption); // If `out` is null, we use it for the result, and jump to `done`. __ CompareAndBranchIfZero(out, &done, /* far_target */ false); GetAssembler()->LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset); @@ -5360,12 +5378,14 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction) } case TypeCheckKind::kArrayCheck: { + // No read barrier since the slow path will retry upon failure. // /* HeapReference<Class> */ out = obj->klass_ GenerateReferenceLoadTwoRegisters(instruction, out_loc, obj_loc, class_offset, - maybe_temp_loc); + maybe_temp_loc, + kWithoutReadBarrier); __ Cmp(out, cls); DCHECK(locations->OnlyCallsOnSlowPath()); slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARMVIXL(instruction, @@ -5449,13 +5469,7 @@ void LocationsBuilderARMVIXL::VisitCheckCast(HCheckCast* instruction) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); - // Note that TypeCheckSlowPathARM uses this "temp" register too. - locations->AddTemp(Location::RequiresRegister()); - // When read barriers are enabled, we need an additional temporary - // register for some cases. - if (TypeCheckNeedsATemporary(type_check_kind)) { - locations->AddTemp(Location::RequiresRegister()); - } + locations->AddRegisterTemps(NumberOfCheckCastTemps(type_check_kind)); } void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) { @@ -5466,20 +5480,31 @@ void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) { vixl32::Register cls = InputRegisterAt(instruction, 1); Location temp_loc = locations->GetTemp(0); vixl32::Register temp = RegisterFrom(temp_loc); - Location maybe_temp2_loc = TypeCheckNeedsATemporary(type_check_kind) ? - locations->GetTemp(1) : - Location::NoLocation(); - uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); - uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value(); - uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value(); - uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value(); - - bool is_type_check_slow_path_fatal = - (type_check_kind == TypeCheckKind::kExactCheck || - type_check_kind == TypeCheckKind::kAbstractClassCheck || - type_check_kind == TypeCheckKind::kClassHierarchyCheck || - type_check_kind == TypeCheckKind::kArrayObjectCheck) && - !instruction->CanThrowIntoCatchBlock(); + const size_t num_temps = NumberOfCheckCastTemps(type_check_kind); + DCHECK_LE(num_temps, 3u); + Location maybe_temp2_loc = (num_temps >= 2) ? locations->GetTemp(1) : Location::NoLocation(); + Location maybe_temp3_loc = (num_temps >= 3) ? locations->GetTemp(2) : Location::NoLocation(); + const uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); + const uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value(); + const uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value(); + const uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value(); + const uint32_t iftable_offset = mirror::Class::IfTableOffset().Uint32Value(); + const uint32_t array_length_offset = mirror::Array::LengthOffset().Uint32Value(); + const uint32_t object_array_data_offset = + mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value(); + + // Always false for read barriers since we may need to go to the entrypoint for non-fatal cases + // from false negatives. The false negatives may come from avoiding read barriers below. Avoiding + // read barriers is done for performance and code size reasons. + bool is_type_check_slow_path_fatal = false; + if (!kEmitCompilerReadBarrier) { + is_type_check_slow_path_fatal = + (type_check_kind == TypeCheckKind::kExactCheck || + type_check_kind == TypeCheckKind::kAbstractClassCheck || + type_check_kind == TypeCheckKind::kClassHierarchyCheck || + type_check_kind == TypeCheckKind::kArrayObjectCheck) && + !instruction->CanThrowIntoCatchBlock(); + } SlowPathCodeARMVIXL* type_check_slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARMVIXL(instruction, is_type_check_slow_path_fatal); @@ -5491,12 +5516,17 @@ void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) { __ CompareAndBranchIfZero(obj, &done, /* far_target */ false); } - // /* HeapReference<Class> */ temp = obj->klass_ - GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc); - switch (type_check_kind) { case TypeCheckKind::kExactCheck: case TypeCheckKind::kArrayCheck: { + // /* HeapReference<Class> */ temp = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + temp_loc, + obj_loc, + class_offset, + maybe_temp2_loc, + kWithoutReadBarrier); + __ Cmp(temp, cls); // Jump to slow path for throwing the exception or doing a // more involved array check. @@ -5505,12 +5535,24 @@ void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) { } case TypeCheckKind::kAbstractClassCheck: { + // /* HeapReference<Class> */ temp = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + temp_loc, + obj_loc, + class_offset, + maybe_temp2_loc, + kWithoutReadBarrier); + // If the class is abstract, we eagerly fetch the super class of the // object to avoid doing a comparison we know will fail. vixl32::Label loop; __ Bind(&loop); // /* HeapReference<Class> */ temp = temp->super_class_ - GenerateReferenceLoadOneRegister(instruction, temp_loc, super_offset, maybe_temp2_loc); + GenerateReferenceLoadOneRegister(instruction, + temp_loc, + super_offset, + maybe_temp2_loc, + kWithoutReadBarrier); // If the class reference currently in `temp` is null, jump to the slow path to throw the // exception. @@ -5523,6 +5565,14 @@ void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) { } case TypeCheckKind::kClassHierarchyCheck: { + // /* HeapReference<Class> */ temp = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + temp_loc, + obj_loc, + class_offset, + maybe_temp2_loc, + kWithoutReadBarrier); + // Walk over the class hierarchy to find a match. vixl32::Label loop; __ Bind(&loop); @@ -5530,7 +5580,11 @@ void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) { __ B(eq, &done); // /* HeapReference<Class> */ temp = temp->super_class_ - GenerateReferenceLoadOneRegister(instruction, temp_loc, super_offset, maybe_temp2_loc); + GenerateReferenceLoadOneRegister(instruction, + temp_loc, + super_offset, + maybe_temp2_loc, + kWithoutReadBarrier); // If the class reference currently in `temp` is null, jump to the slow path to throw the // exception. @@ -5541,13 +5595,25 @@ void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) { } case TypeCheckKind::kArrayObjectCheck: { + // /* HeapReference<Class> */ temp = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + temp_loc, + obj_loc, + class_offset, + maybe_temp2_loc, + kWithoutReadBarrier); + // Do an exact check. __ Cmp(temp, cls); __ B(eq, &done); // Otherwise, we need to check that the object's class is a non-primitive array. // /* HeapReference<Class> */ temp = temp->component_type_ - GenerateReferenceLoadOneRegister(instruction, temp_loc, component_offset, maybe_temp2_loc); + GenerateReferenceLoadOneRegister(instruction, + temp_loc, + component_offset, + maybe_temp2_loc, + kWithoutReadBarrier); // If the component type is null, jump to the slow path to throw the exception. __ CompareAndBranchIfZero(temp, type_check_slow_path->GetEntryLabel()); // Otherwise,the object is indeed an array, jump to label `check_non_primitive_component_type` @@ -5559,10 +5625,7 @@ void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) { } case TypeCheckKind::kUnresolvedCheck: - case TypeCheckKind::kInterfaceCheck: - // We always go into the type check slow path for the unresolved - // and interface check cases. - // + // We always go into the type check slow path for the unresolved check case. // We cannot directly call the CheckCast runtime entry point // without resorting to a type checking slow path here (i.e. by // calling InvokeRuntime directly), as it would require to @@ -5570,8 +5633,45 @@ void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) { // instruction (following the runtime calling convention), which // might be cluttered by the potential first read barrier // emission at the beginning of this method. + __ B(type_check_slow_path->GetEntryLabel()); break; + + case TypeCheckKind::kInterfaceCheck: { + // Avoid read barriers to improve performance of the fast path. We can not get false + // positives by doing this. + // /* HeapReference<Class> */ temp = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + temp_loc, + obj_loc, + class_offset, + maybe_temp2_loc, + kWithoutReadBarrier); + + // /* HeapReference<Class> */ temp = temp->iftable_ + GenerateReferenceLoadTwoRegisters(instruction, + temp_loc, + temp_loc, + iftable_offset, + maybe_temp2_loc, + kWithoutReadBarrier); + // Iftable is never null. + __ Ldr(RegisterFrom(maybe_temp2_loc), MemOperand(temp, array_length_offset)); + // Loop through the iftable and check if any class matches. + vixl32::Label start_loop; + __ Bind(&start_loop); + __ CompareAndBranchIfZero(RegisterFrom(maybe_temp2_loc), + type_check_slow_path->GetEntryLabel()); + __ Ldr(RegisterFrom(maybe_temp3_loc), MemOperand(temp, object_array_data_offset)); + GetAssembler()->MaybeUnpoisonHeapReference(RegisterFrom(maybe_temp3_loc)); + // Go to next interface. + __ Add(temp, temp, Operand::From(2 * kHeapReferenceSize)); + __ Sub(RegisterFrom(maybe_temp2_loc), RegisterFrom(maybe_temp2_loc), 2); + // Compare the classes and continue the loop if they do not match. + __ Cmp(cls, RegisterFrom(maybe_temp3_loc)); + __ B(ne, &start_loop); + break; + } } __ Bind(&done); @@ -5862,7 +5962,8 @@ void InstructionCodeGeneratorARMVIXL::GenerateReferenceLoadOneRegister( HInstruction* instruction ATTRIBUTE_UNUSED, Location out, uint32_t offset, - Location maybe_temp ATTRIBUTE_UNUSED) { + Location maybe_temp ATTRIBUTE_UNUSED, + ReadBarrierOption read_barrier_option ATTRIBUTE_UNUSED) { vixl32::Register out_reg = RegisterFrom(out); if (kEmitCompilerReadBarrier) { TODO_VIXL32(FATAL); @@ -5879,7 +5980,8 @@ void InstructionCodeGeneratorARMVIXL::GenerateReferenceLoadTwoRegisters( Location out, Location obj, uint32_t offset, - Location maybe_temp ATTRIBUTE_UNUSED) { + Location maybe_temp ATTRIBUTE_UNUSED, + ReadBarrierOption read_barrier_option ATTRIBUTE_UNUSED) { vixl32::Register out_reg = RegisterFrom(out); vixl32::Register obj_reg = RegisterFrom(obj); if (kEmitCompilerReadBarrier) { diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h index 89fef43e46..bd91127121 100644 --- a/compiler/optimizing/code_generator_arm_vixl.h +++ b/compiler/optimizing/code_generator_arm_vixl.h @@ -422,7 +422,8 @@ class InstructionCodeGeneratorARMVIXL : public InstructionCodeGenerator { void GenerateReferenceLoadOneRegister(HInstruction* instruction, Location out, uint32_t offset, - Location maybe_temp); + Location maybe_temp, + ReadBarrierOption read_barrier_option); // Generate a heap reference load using two different registers // `out` and `obj`: // @@ -437,7 +438,8 @@ class InstructionCodeGeneratorARMVIXL : public InstructionCodeGenerator { Location out, Location obj, uint32_t offset, - Location maybe_temp); + Location maybe_temp, + ReadBarrierOption read_barrier_option); // Generate a GC root reference load: // diff --git a/compiler/optimizing/escape.cc b/compiler/optimizing/escape.cc index c80e19ef15..9df5bf1017 100644 --- a/compiler/optimizing/escape.cc +++ b/compiler/optimizing/escape.cc @@ -23,16 +23,19 @@ namespace art { void CalculateEscape(HInstruction* reference, bool (*no_escape)(HInstruction*, HInstruction*), /*out*/ bool* is_singleton, - /*out*/ bool* is_singleton_and_non_escaping) { + /*out*/ bool* is_singleton_and_not_returned, + /*out*/ bool* is_singleton_and_not_deopt_visible) { // For references not allocated in the method, don't assume anything. if (!reference->IsNewInstance() && !reference->IsNewArray()) { *is_singleton = false; - *is_singleton_and_non_escaping = false; + *is_singleton_and_not_returned = false; + *is_singleton_and_not_deopt_visible = false; return; } // Assume the best until proven otherwise. *is_singleton = true; - *is_singleton_and_non_escaping = true; + *is_singleton_and_not_returned = true; + *is_singleton_and_not_deopt_visible = true; // Visit all uses to determine if this reference can escape into the heap, // a method call, an alias, etc. for (const HUseListNode<HInstruction*>& use : reference->GetUses()) { @@ -45,7 +48,8 @@ void CalculateEscape(HInstruction* reference, // for the uncommon cases. Similarly, null checks are eventually eliminated for explicit // allocations, but if we see one before it is simplified, assume an alias. *is_singleton = false; - *is_singleton_and_non_escaping = false; + *is_singleton_and_not_returned = false; + *is_singleton_and_not_deopt_visible = false; return; } else if (user->IsPhi() || user->IsSelect() || user->IsInvoke() || (user->IsInstanceFieldSet() && (reference == user->InputAt(1))) || @@ -56,7 +60,8 @@ void CalculateEscape(HInstruction* reference, // The reference is merged to HPhi/HSelect, passed to a callee, or stored to heap. // Hence, the reference is no longer the only name that can refer to its value. *is_singleton = false; - *is_singleton_and_non_escaping = false; + *is_singleton_and_not_returned = false; + *is_singleton_and_not_deopt_visible = false; return; } else if ((user->IsUnresolvedInstanceFieldGet() && (reference == user->InputAt(0))) || (user->IsUnresolvedInstanceFieldSet() && (reference == user->InputAt(0)))) { @@ -64,37 +69,35 @@ void CalculateEscape(HInstruction* reference, // Note that we could optimize this case and still perform some optimizations until // we hit the unresolved access, but the conservative assumption is the simplest. *is_singleton = false; - *is_singleton_and_non_escaping = false; + *is_singleton_and_not_returned = false; + *is_singleton_and_not_deopt_visible = false; return; } else if (user->IsReturn()) { - *is_singleton_and_non_escaping = false; + *is_singleton_and_not_returned = false; } } - // Need for further analysis? - if (!*is_singleton_and_non_escaping) { - return; - } - - // Look at the environment uses and if it's for HDeoptimize, it's treated the - // same as a return which escapes at the end of executing the compiled code. - // Other environment uses are fine, as long as all client optimizations that - // rely on this informations are disabled for debuggable. + // Look at the environment uses if it's for HDeoptimize. Other environment uses are fine, + // as long as client optimizations that rely on this information are disabled for debuggable. for (const HUseListNode<HEnvironment*>& use : reference->GetEnvUses()) { HEnvironment* user = use.GetUser(); if (user->GetHolder()->IsDeoptimize()) { - *is_singleton_and_non_escaping = false; + *is_singleton_and_not_deopt_visible = false; break; } } } -bool IsNonEscapingSingleton(HInstruction* reference, - bool (*no_escape)(HInstruction*, HInstruction*)) { - bool is_singleton = true; - bool is_singleton_and_non_escaping = true; - CalculateEscape(reference, no_escape, &is_singleton, &is_singleton_and_non_escaping); - return is_singleton_and_non_escaping; +bool DoesNotEscape(HInstruction* reference, bool (*no_escape)(HInstruction*, HInstruction*)) { + bool is_singleton = false; + bool is_singleton_and_not_returned = false; + bool is_singleton_and_not_deopt_visible = false; // not relevant for escape + CalculateEscape(reference, + no_escape, + &is_singleton, + &is_singleton_and_not_returned, + &is_singleton_and_not_deopt_visible); + return is_singleton_and_not_returned; } } // namespace art diff --git a/compiler/optimizing/escape.h b/compiler/optimizing/escape.h index 6514843247..75e37b0551 100644 --- a/compiler/optimizing/escape.h +++ b/compiler/optimizing/escape.h @@ -31,9 +31,18 @@ class HInstruction; * allocation. The method assigns true to parameter 'is_singleton' if the reference * is the only name that can refer to its value during the lifetime of the method, * meaning that the reference is not aliased with something else, is not stored to - * heap memory, and not passed to another method. The method assigns true to parameter - * 'is_singleton_and_non_escaping' if the reference is a singleton and is not returned - * to the caller or used as an environment local of an HDeoptimize instruction. + * heap memory, and not passed to another method. In addition, the method assigns + * true to parameter 'is_singleton_and_not_returned' if the reference is a singleton + * and not returned to the caller and to parameter 'is_singleton_and_not_deopt_visible' + * if the reference is a singleton and not used as an environment local of an + * HDeoptimize instruction (clients of the final value must run after BCE to ensure + * all such instructions have been introduced already). + * + * Note that being visible to a HDeoptimize instruction does not count for ordinary + * escape analysis, since switching between compiled code and interpreted code keeps + * non escaping references restricted to the lifetime of the method and the thread + * executing it. This property only concerns optimizations that are interested in + * escape analysis with respect to the *compiled* code (such as LSE). * * When set, the no_escape function is applied to any use of the allocation instruction * prior to any built-in escape analysis. This allows clients to define better escape @@ -45,14 +54,14 @@ class HInstruction; void CalculateEscape(HInstruction* reference, bool (*no_escape)(HInstruction*, HInstruction*), /*out*/ bool* is_singleton, - /*out*/ bool* is_singleton_and_non_escaping); + /*out*/ bool* is_singleton_and_not_returned, + /*out*/ bool* is_singleton_and_not_deopt_visible); /* - * Convenience method for testing singleton and non-escaping property at once. + * Convenience method for testing the singleton and not returned properties at once. * Callers should be aware that this method invokes the full analysis at each call. */ -bool IsNonEscapingSingleton(HInstruction* reference, - bool (*no_escape)(HInstruction*, HInstruction*)); +bool DoesNotEscape(HInstruction* reference, bool (*no_escape)(HInstruction*, HInstruction*)); } // namespace art diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc index 85b461dcf6..658b80468e 100644 --- a/compiler/optimizing/instruction_simplifier.cc +++ b/compiler/optimizing/instruction_simplifier.cc @@ -16,6 +16,7 @@ #include "instruction_simplifier.h" +#include "escape.h" #include "intrinsics.h" #include "mirror/class-inl.h" #include "scoped_thread_state_change-inl.h" @@ -107,6 +108,8 @@ class InstructionSimplifierVisitor : public HGraphDelegateVisitor { void SimplifyStringCharAt(HInvoke* invoke); void SimplifyStringIsEmptyOrLength(HInvoke* invoke); void SimplifyNPEOnArgN(HInvoke* invoke, size_t); + void SimplifyReturnThis(HInvoke* invoke); + void SimplifyAllocationIntrinsic(HInvoke* invoke); void SimplifyMemBarrier(HInvoke* invoke, MemBarrierKind barrier_kind); OptimizingCompilerStats* stats_; @@ -1864,11 +1867,61 @@ void InstructionSimplifierVisitor::SimplifyStringIsEmptyOrLength(HInvoke* invoke // is provably non-null, we can clear the flag. void InstructionSimplifierVisitor::SimplifyNPEOnArgN(HInvoke* invoke, size_t n) { HInstruction* arg = invoke->InputAt(n); - if (!arg->CanBeNull()) { + if (invoke->CanThrow() && !arg->CanBeNull()) { invoke->SetCanThrow(false); } } +// Methods that return "this" can replace the returned value with the receiver. +void InstructionSimplifierVisitor::SimplifyReturnThis(HInvoke* invoke) { + if (invoke->HasUses()) { + HInstruction* receiver = invoke->InputAt(0); + invoke->ReplaceWith(receiver); + RecordSimplification(); + } +} + +// Helper method for StringBuffer escape analysis. +static bool NoEscapeForStringBufferReference(HInstruction* reference, HInstruction* user) { + if (user->IsInvokeStaticOrDirect()) { + // Any constructor on StringBuffer is okay. + return user->AsInvokeStaticOrDirect()->GetResolvedMethod()->IsConstructor() && + user->InputAt(0) == reference; + } else if (user->IsInvokeVirtual()) { + switch (user->AsInvokeVirtual()->GetIntrinsic()) { + case Intrinsics::kStringBufferLength: + case Intrinsics::kStringBufferToString: + DCHECK_EQ(user->InputAt(0), reference); + return true; + case Intrinsics::kStringBufferAppend: + // Returns "this", so only okay if no further uses. + DCHECK_EQ(user->InputAt(0), reference); + DCHECK_NE(user->InputAt(1), reference); + return !user->HasUses(); + default: + break; + } + } + return false; +} + +// Certain allocation intrinsics are not removed by dead code elimination +// because of potentially throwing an OOM exception or other side effects. +// This method removes such intrinsics when special circumstances allow. +void InstructionSimplifierVisitor::SimplifyAllocationIntrinsic(HInvoke* invoke) { + if (!invoke->HasUses()) { + // Instruction has no uses. If unsynchronized, we can remove right away, safely ignoring + // the potential OOM of course. Otherwise, we must ensure the receiver object of this + // call does not escape since only thread-local synchronization may be removed. + bool is_synchronized = invoke->GetIntrinsic() == Intrinsics::kStringBufferToString; + HInstruction* receiver = invoke->InputAt(0); + if (!is_synchronized || DoesNotEscape(receiver, NoEscapeForStringBufferReference)) { + invoke->GetBlock()->RemoveInstruction(invoke); + RecordSimplification(); + } + } +} + void InstructionSimplifierVisitor::SimplifyMemBarrier(HInvoke* invoke, MemBarrierKind barrier_kind) { uint32_t dex_pc = invoke->GetDexPc(); HMemoryBarrier* mem_barrier = new (GetGraph()->GetArena()) HMemoryBarrier(barrier_kind, dex_pc); @@ -1926,6 +1979,14 @@ void InstructionSimplifierVisitor::VisitInvoke(HInvoke* instruction) { case Intrinsics::kStringStringIndexOfAfter: SimplifyNPEOnArgN(instruction, 1); // 0th has own NullCheck break; + case Intrinsics::kStringBufferAppend: + case Intrinsics::kStringBuilderAppend: + SimplifyReturnThis(instruction); + break; + case Intrinsics::kStringBufferToString: + case Intrinsics::kStringBuilderToString: + SimplifyAllocationIntrinsic(instruction); + break; case Intrinsics::kUnsafeLoadFence: SimplifyMemBarrier(instruction, MemBarrierKind::kLoadAny); break; diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc index 8234b2467d..8f64faeac0 100644 --- a/compiler/optimizing/intrinsics_arm.cc +++ b/compiler/optimizing/intrinsics_arm.cc @@ -2613,6 +2613,12 @@ UNIMPLEMENTED_INTRINSIC(ARM, LongLowestOneBit) UNIMPLEMENTED_INTRINSIC(ARM, StringStringIndexOf); UNIMPLEMENTED_INTRINSIC(ARM, StringStringIndexOfAfter); +UNIMPLEMENTED_INTRINSIC(ARM, StringBufferAppend); +UNIMPLEMENTED_INTRINSIC(ARM, StringBufferLength); +UNIMPLEMENTED_INTRINSIC(ARM, StringBufferToString); +UNIMPLEMENTED_INTRINSIC(ARM, StringBuilderAppend); +UNIMPLEMENTED_INTRINSIC(ARM, StringBuilderLength); +UNIMPLEMENTED_INTRINSIC(ARM, StringBuilderToString); // 1.8. UNIMPLEMENTED_INTRINSIC(ARM, UnsafeGetAndAddInt) diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc index 17a97da6cc..d8a896e926 100644 --- a/compiler/optimizing/intrinsics_arm64.cc +++ b/compiler/optimizing/intrinsics_arm64.cc @@ -2781,6 +2781,12 @@ UNIMPLEMENTED_INTRINSIC(ARM64, LongLowestOneBit) UNIMPLEMENTED_INTRINSIC(ARM64, StringStringIndexOf); UNIMPLEMENTED_INTRINSIC(ARM64, StringStringIndexOfAfter); +UNIMPLEMENTED_INTRINSIC(ARM64, StringBufferAppend); +UNIMPLEMENTED_INTRINSIC(ARM64, StringBufferLength); +UNIMPLEMENTED_INTRINSIC(ARM64, StringBufferToString); +UNIMPLEMENTED_INTRINSIC(ARM64, StringBuilderAppend); +UNIMPLEMENTED_INTRINSIC(ARM64, StringBuilderLength); +UNIMPLEMENTED_INTRINSIC(ARM64, StringBuilderToString); // 1.8. UNIMPLEMENTED_INTRINSIC(ARM64, UnsafeGetAndAddInt) diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc index c8e3534164..9e724474d0 100644 --- a/compiler/optimizing/intrinsics_arm_vixl.cc +++ b/compiler/optimizing/intrinsics_arm_vixl.cc @@ -677,7 +677,10 @@ static void GenUnsafeGet(HInvoke* invoke, vixl32::Register trg_lo = LowRegisterFrom(trg_loc); vixl32::Register trg_hi = HighRegisterFrom(trg_loc); if (is_volatile && !codegen->GetInstructionSetFeatures().HasAtomicLdrdAndStrd()) { - __ Ldrexd(trg_lo, trg_hi, MemOperand(base, offset)); + UseScratchRegisterScope temps(assembler->GetVIXLAssembler()); + const vixl32::Register temp_reg = temps.Acquire(); + __ Add(temp_reg, base, offset); + __ Ldrexd(trg_lo, trg_hi, MemOperand(temp_reg)); } else { __ Ldrd(trg_lo, trg_hi, MemOperand(base, offset)); } @@ -2703,6 +2706,12 @@ UNIMPLEMENTED_INTRINSIC(ARMVIXL, LongLowestOneBit) UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringStringIndexOf); UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringStringIndexOfAfter); +UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringBufferAppend); +UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringBufferLength); +UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringBufferToString); +UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringBuilderAppend); +UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringBuilderLength); +UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringBuilderToString); // 1.8. UNIMPLEMENTED_INTRINSIC(ARMVIXL, UnsafeGetAndAddInt) diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc index 7c81588cda..9b5d7a02dd 100644 --- a/compiler/optimizing/intrinsics_mips.cc +++ b/compiler/optimizing/intrinsics_mips.cc @@ -2497,6 +2497,12 @@ UNIMPLEMENTED_INTRINSIC(MIPS, MathTanh) UNIMPLEMENTED_INTRINSIC(MIPS, StringStringIndexOf); UNIMPLEMENTED_INTRINSIC(MIPS, StringStringIndexOfAfter); +UNIMPLEMENTED_INTRINSIC(MIPS, StringBufferAppend); +UNIMPLEMENTED_INTRINSIC(MIPS, StringBufferLength); +UNIMPLEMENTED_INTRINSIC(MIPS, StringBufferToString); +UNIMPLEMENTED_INTRINSIC(MIPS, StringBuilderAppend); +UNIMPLEMENTED_INTRINSIC(MIPS, StringBuilderLength); +UNIMPLEMENTED_INTRINSIC(MIPS, StringBuilderToString); // 1.8. UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndAddInt) diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc index 2d4f417b14..5a998861eb 100644 --- a/compiler/optimizing/intrinsics_mips64.cc +++ b/compiler/optimizing/intrinsics_mips64.cc @@ -1949,6 +1949,12 @@ UNIMPLEMENTED_INTRINSIC(MIPS64, MathTanh) UNIMPLEMENTED_INTRINSIC(MIPS64, StringStringIndexOf); UNIMPLEMENTED_INTRINSIC(MIPS64, StringStringIndexOfAfter); +UNIMPLEMENTED_INTRINSIC(MIPS64, StringBufferAppend); +UNIMPLEMENTED_INTRINSIC(MIPS64, StringBufferLength); +UNIMPLEMENTED_INTRINSIC(MIPS64, StringBufferToString); +UNIMPLEMENTED_INTRINSIC(MIPS64, StringBuilderAppend); +UNIMPLEMENTED_INTRINSIC(MIPS64, StringBuilderLength); +UNIMPLEMENTED_INTRINSIC(MIPS64, StringBuilderToString); // 1.8. UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndAddInt) diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc index 06ab46f536..922c3bcac9 100644 --- a/compiler/optimizing/intrinsics_x86.cc +++ b/compiler/optimizing/intrinsics_x86.cc @@ -3331,6 +3331,12 @@ UNIMPLEMENTED_INTRINSIC(X86, LongLowestOneBit) UNIMPLEMENTED_INTRINSIC(X86, StringStringIndexOf); UNIMPLEMENTED_INTRINSIC(X86, StringStringIndexOfAfter); +UNIMPLEMENTED_INTRINSIC(X86, StringBufferAppend); +UNIMPLEMENTED_INTRINSIC(X86, StringBufferLength); +UNIMPLEMENTED_INTRINSIC(X86, StringBufferToString); +UNIMPLEMENTED_INTRINSIC(X86, StringBuilderAppend); +UNIMPLEMENTED_INTRINSIC(X86, StringBuilderLength); +UNIMPLEMENTED_INTRINSIC(X86, StringBuilderToString); // 1.8. UNIMPLEMENTED_INTRINSIC(X86, UnsafeGetAndAddInt) diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc index 2ea8670100..05d270a4e6 100644 --- a/compiler/optimizing/intrinsics_x86_64.cc +++ b/compiler/optimizing/intrinsics_x86_64.cc @@ -3000,6 +3000,12 @@ UNIMPLEMENTED_INTRINSIC(X86_64, DoubleIsInfinite) UNIMPLEMENTED_INTRINSIC(X86_64, StringStringIndexOf); UNIMPLEMENTED_INTRINSIC(X86_64, StringStringIndexOfAfter); +UNIMPLEMENTED_INTRINSIC(X86_64, StringBufferAppend); +UNIMPLEMENTED_INTRINSIC(X86_64, StringBufferLength); +UNIMPLEMENTED_INTRINSIC(X86_64, StringBufferToString); +UNIMPLEMENTED_INTRINSIC(X86_64, StringBuilderAppend); +UNIMPLEMENTED_INTRINSIC(X86_64, StringBuilderLength); +UNIMPLEMENTED_INTRINSIC(X86_64, StringBuilderToString); // 1.8. UNIMPLEMENTED_INTRINSIC(X86_64, UnsafeGetAndAddInt) diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc index edecf17f33..2856c3ea11 100644 --- a/compiler/optimizing/load_store_elimination.cc +++ b/compiler/optimizing/load_store_elimination.cc @@ -37,8 +37,13 @@ class ReferenceInfo : public ArenaObject<kArenaAllocMisc> { : reference_(reference), position_(pos), is_singleton_(true), - is_singleton_and_non_escaping_(true) { - CalculateEscape(reference_, nullptr, &is_singleton_, &is_singleton_and_non_escaping_); + is_singleton_and_not_returned_(true), + is_singleton_and_not_deopt_visible_(true) { + CalculateEscape(reference_, + nullptr, + &is_singleton_, + &is_singleton_and_not_returned_, + &is_singleton_and_not_deopt_visible_); } HInstruction* GetReference() const { @@ -59,19 +64,17 @@ class ReferenceInfo : public ArenaObject<kArenaAllocMisc> { // Returns true if reference_ is a singleton and not returned to the caller or // used as an environment local of an HDeoptimize instruction. // The allocation and stores into reference_ may be eliminated for such cases. - bool IsSingletonAndNonEscaping() const { - return is_singleton_and_non_escaping_; + bool IsSingletonAndRemovable() const { + return is_singleton_and_not_returned_ && is_singleton_and_not_deopt_visible_; } private: HInstruction* const reference_; - const size_t position_; // position in HeapLocationCollector's ref_info_array_. - bool is_singleton_; // can only be referred to by a single name in the method. + const size_t position_; // position in HeapLocationCollector's ref_info_array_. - // reference_ is singleton and does not escape in the end either by - // returning to the caller, or being used as an environment local of an - // HDeoptimize instruction. - bool is_singleton_and_non_escaping_; + bool is_singleton_; // can only be referred to by a single name in the method, + bool is_singleton_and_not_returned_; // and not returned to caller, + bool is_singleton_and_not_deopt_visible_; // and not used as an environment local of HDeoptimize. DISALLOW_COPY_AND_ASSIGN(ReferenceInfo); }; @@ -623,7 +626,7 @@ class LSEVisitor : public HGraphVisitor { bool from_all_predecessors = true; ReferenceInfo* ref_info = heap_location_collector_.GetHeapLocation(i)->GetReferenceInfo(); HInstruction* singleton_ref = nullptr; - if (ref_info->IsSingletonAndNonEscaping()) { + if (ref_info->IsSingletonAndRemovable()) { // We do more analysis of liveness when merging heap values for such // cases since stores into such references may potentially be eliminated. singleton_ref = ref_info->GetReference(); @@ -796,7 +799,7 @@ class LSEVisitor : public HGraphVisitor { } else if (index != nullptr) { // For array element, don't eliminate stores since it can be easily aliased // with non-constant index. - } else if (ref_info->IsSingletonAndNonEscaping()) { + } else if (ref_info->IsSingletonAndRemovable()) { // Store into a field of a singleton that's not returned. The value cannot be // killed due to aliasing/invocation. It can be redundant since future loads can // directly get the value set by this instruction. The value can still be killed due to @@ -970,7 +973,7 @@ class LSEVisitor : public HGraphVisitor { // new_instance isn't used for field accesses. No need to process it. return; } - if (ref_info->IsSingletonAndNonEscaping() && + if (ref_info->IsSingletonAndRemovable() && !new_instance->IsFinalizable() && !new_instance->NeedsAccessCheck()) { singleton_new_instances_.push_back(new_instance); diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 165dce3f0d..7ab04e15fc 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -2072,6 +2072,8 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> { #undef INSTRUCTION_TYPE_CHECK // Returns whether the instruction can be moved within the graph. + // TODO: this method is used by LICM and GVN with possibly different + // meanings? split and rename? virtual bool CanBeMoved() const { return false; } // Returns whether the two instructions are of the same kind. @@ -3789,7 +3791,7 @@ class HInvoke : public HInstruction { bool CanThrow() const OVERRIDE { return GetPackedFlag<kFlagCanThrow>(); } - bool CanBeMoved() const OVERRIDE { return IsIntrinsic(); } + bool CanBeMoved() const OVERRIDE { return IsIntrinsic() && !DoesAnyWrite(); } bool InstructionDataEquals(const HInstruction* other) const OVERRIDE { return intrinsic_ != Intrinsics::kNone && intrinsic_ == other->AsInvoke()->intrinsic_; @@ -4181,6 +4183,19 @@ class HInvokeVirtual FINAL : public HInvoke { kVirtual), vtable_index_(vtable_index) {} + bool CanBeNull() const OVERRIDE { + switch (GetIntrinsic()) { + case Intrinsics::kThreadCurrentThread: + case Intrinsics::kStringBufferAppend: + case Intrinsics::kStringBufferToString: + case Intrinsics::kStringBuilderAppend: + case Intrinsics::kStringBuilderToString: + return false; + default: + return HInvoke::CanBeNull(); + } + } + bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE { // TODO: Add implicit null checks in intrinsics. return (obj == InputAt(0)) && !GetLocations()->Intrinsified(); |