summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/code_generator.cc14
-rw-r--r--compiler/optimizing/code_generator.h3
-rw-r--r--compiler/optimizing/code_generator_arm.cc6
-rw-r--r--compiler/optimizing/code_generator_arm64.cc3
-rw-r--r--compiler/optimizing/code_generator_mips.cc6
-rw-r--r--compiler/optimizing/code_generator_mips64.cc6
-rw-r--r--compiler/optimizing/code_generator_x86.cc25
-rw-r--r--compiler/optimizing/code_generator_x86.h1
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc18
-rw-r--r--compiler/optimizing/code_generator_x86_64.h1
-rw-r--r--compiler/optimizing/intrinsics.h2
-rw-r--r--compiler/optimizing/intrinsics_arm.cc150
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc27
-rw-r--r--compiler/optimizing/intrinsics_mips.cc117
-rw-r--r--compiler/optimizing/intrinsics_x86.cc68
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc87
-rw-r--r--compiler/optimizing/nodes.h156
-rw-r--r--compiler/optimizing/nodes_arm.h2
-rw-r--r--compiler/optimizing/nodes_arm64.h4
-rw-r--r--compiler/optimizing/nodes_shared.h4
-rw-r--r--compiler/optimizing/nodes_x86.h8
21 files changed, 546 insertions, 162 deletions
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 51fbaea519..08670a0d82 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -1305,4 +1305,18 @@ void CodeGenerator::CreateSystemArrayCopyLocationSummary(HInvoke* invoke) {
locations->AddTemp(Location::RequiresRegister());
}
+uint32_t CodeGenerator::GetReferenceSlowFlagOffset() const {
+ ScopedObjectAccess soa(Thread::Current());
+ mirror::Class* klass = mirror::Reference::GetJavaLangRefReference();
+ DCHECK(klass->IsInitialized());
+ return klass->GetSlowPathFlagOffset().Uint32Value();
+}
+
+uint32_t CodeGenerator::GetReferenceDisableFlagOffset() const {
+ ScopedObjectAccess soa(Thread::Current());
+ mirror::Class* klass = mirror::Reference::GetJavaLangRefReference();
+ DCHECK(klass->IsInitialized());
+ return klass->GetDisableIntrinsicFlagOffset().Uint32Value();
+}
+
} // namespace art
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 6e75e3bb2e..82a54d2ed1 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -469,6 +469,9 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
virtual void GenerateNop() = 0;
+ uint32_t GetReferenceSlowFlagOffset() const;
+ uint32_t GetReferenceDisableFlagOffset() const;
+
protected:
// Method patch info used for recording locations of required linker patches and
// target methods. The target method can be used for various purposes, whether for
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index e0106628c6..7ddd677fd0 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -59,7 +59,8 @@ static constexpr DRegister DTMP = D31;
static constexpr uint32_t kPackedSwitchCompareJumpThreshold = 7;
-#define __ down_cast<ArmAssembler*>(codegen->GetAssembler())->
+// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
+#define __ down_cast<ArmAssembler*>(codegen->GetAssembler())-> // NOLINT
#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArmWordSize, x).Int32Value()
class NullCheckSlowPathARM : public SlowPathCode {
@@ -674,7 +675,8 @@ class ReadBarrierForRootSlowPathARM : public SlowPathCode {
};
#undef __
-#define __ down_cast<ArmAssembler*>(GetAssembler())->
+// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
+#define __ down_cast<ArmAssembler*>(GetAssembler())-> // NOLINT
inline Condition ARMCondition(IfCondition cond) {
switch (cond) {
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 261c04f062..362957bb31 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -132,7 +132,8 @@ Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type retur
return ARM64ReturnLocation(return_type);
}
-#define __ down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler()->
+// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
+#define __ down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler()-> // NOLINT
#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, x).Int32Value()
// Calculate memory accessing operand for save/restore live registers.
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index fb50680c91..c3f425ac0d 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -141,7 +141,8 @@ Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type type)
return MipsReturnLocation(type);
}
-#define __ down_cast<CodeGeneratorMIPS*>(codegen)->GetAssembler()->
+// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
+#define __ down_cast<CodeGeneratorMIPS*>(codegen)->GetAssembler()-> // NOLINT
#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, x).Int32Value()
class BoundsCheckSlowPathMIPS : public SlowPathCodeMIPS {
@@ -478,7 +479,8 @@ CodeGeneratorMIPS::CodeGeneratorMIPS(HGraph* graph,
}
#undef __
-#define __ down_cast<MipsAssembler*>(GetAssembler())->
+// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
+#define __ down_cast<MipsAssembler*>(GetAssembler())-> // NOLINT
#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, x).Int32Value()
void CodeGeneratorMIPS::Finalize(CodeAllocator* allocator) {
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index e67d8d0dc5..bb6df500cd 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -102,7 +102,8 @@ Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type type)
return Mips64ReturnLocation(type);
}
-#define __ down_cast<CodeGeneratorMIPS64*>(codegen)->GetAssembler()->
+// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
+#define __ down_cast<CodeGeneratorMIPS64*>(codegen)->GetAssembler()-> // NOLINT
#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64DoublewordSize, x).Int32Value()
class BoundsCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
@@ -424,7 +425,8 @@ CodeGeneratorMIPS64::CodeGeneratorMIPS64(HGraph* graph,
}
#undef __
-#define __ down_cast<Mips64Assembler*>(GetAssembler())->
+// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
+#define __ down_cast<Mips64Assembler*>(GetAssembler())-> // NOLINT
#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64DoublewordSize, x).Int32Value()
void CodeGeneratorMIPS64::Finalize(CodeAllocator* allocator) {
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 50892a9d48..b95c806acf 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -47,7 +47,8 @@ static constexpr int kC2ConditionMask = 0x400;
static constexpr int kFakeReturnRegister = Register(8);
-#define __ down_cast<X86Assembler*>(codegen->GetAssembler())->
+// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
+#define __ down_cast<X86Assembler*>(codegen->GetAssembler())-> // NOLINT
#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kX86WordSize, x).Int32Value()
class NullCheckSlowPathX86 : public SlowPathCode {
@@ -691,7 +692,8 @@ class ReadBarrierForRootSlowPathX86 : public SlowPathCode {
};
#undef __
-#define __ down_cast<X86Assembler*>(GetAssembler())->
+// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
+#define __ down_cast<X86Assembler*>(GetAssembler())-> /* NOLINT */
inline Condition X86Condition(IfCondition cond) {
switch (cond) {
@@ -4308,16 +4310,18 @@ Register CodeGeneratorX86::GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOr
// save one load. However, since this is just an intrinsic slow path we prefer this
// simple and more robust approach rather that trying to determine if that's the case.
SlowPathCode* slow_path = GetCurrentSlowPath();
- DCHECK(slow_path != nullptr); // For intrinsified invokes the call is emitted on the slow path.
- if (slow_path->IsCoreRegisterSaved(location.AsRegister<Register>())) {
- int stack_offset = slow_path->GetStackOffsetOfCoreRegister(location.AsRegister<Register>());
- __ movl(temp, Address(ESP, stack_offset));
- return temp;
+ if (slow_path != nullptr) {
+ if (slow_path->IsCoreRegisterSaved(location.AsRegister<Register>())) {
+ int stack_offset = slow_path->GetStackOffsetOfCoreRegister(location.AsRegister<Register>());
+ __ movl(temp, Address(ESP, stack_offset));
+ return temp;
+ }
}
return location.AsRegister<Register>();
}
-void CodeGeneratorX86::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
+Location CodeGeneratorX86::GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
+ Location temp) {
Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
switch (invoke->GetMethodLoadKind()) {
case HInvokeStaticOrDirect::MethodLoadKind::kStringInit:
@@ -4366,6 +4370,11 @@ void CodeGeneratorX86::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
break;
}
}
+ return callee_method;
+}
+
+void CodeGeneratorX86::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
+ Location callee_method = GenerateCalleeMethodStaticOrDirectCall(invoke, temp);
switch (invoke->GetCodePtrLocation()) {
case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index fe7d3ed85c..98dc8ca280 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -398,6 +398,7 @@ class CodeGeneratorX86 : public CodeGenerator {
MethodReference target_method) OVERRIDE;
// Generate a call to a static or direct method.
+ Location GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp);
void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;
// Generate a call to a virtual method.
void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 56c5b06945..054891ba48 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -51,7 +51,8 @@ static constexpr FloatRegister kFpuCalleeSaves[] = { XMM12, XMM13, XMM14, XMM15
static constexpr int kC2ConditionMask = 0x400;
-#define __ down_cast<X86_64Assembler*>(codegen->GetAssembler())->
+// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
+#define __ down_cast<X86_64Assembler*>(codegen->GetAssembler())-> // NOLINT
#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, x).Int32Value()
class NullCheckSlowPathX86_64 : public SlowPathCode {
@@ -710,7 +711,8 @@ class ReadBarrierForRootSlowPathX86_64 : public SlowPathCode {
};
#undef __
-#define __ down_cast<X86_64Assembler*>(GetAssembler())->
+// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
+#define __ down_cast<X86_64Assembler*>(GetAssembler())-> // NOLINT
inline Condition X86_64IntegerCondition(IfCondition cond) {
switch (cond) {
@@ -762,10 +764,9 @@ HInvokeStaticOrDirect::DispatchInfo CodeGeneratorX86_64::GetSupportedInvokeStati
}
}
-void CodeGeneratorX86_64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
- Location temp) {
+Location CodeGeneratorX86_64::GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
+ Location temp) {
// All registers are assumed to be correctly set up.
-
Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
switch (invoke->GetMethodLoadKind()) {
case HInvokeStaticOrDirect::MethodLoadKind::kStringInit:
@@ -815,6 +816,13 @@ void CodeGeneratorX86_64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invo
break;
}
}
+ return callee_method;
+}
+
+void CodeGeneratorX86_64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
+ Location temp) {
+ // All registers are assumed to be correctly set up.
+ Location callee_method = GenerateCalleeMethodStaticOrDirectCall(invoke, temp);
switch (invoke->GetCodePtrLocation()) {
case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index d9908bb961..7cf12459b0 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -394,6 +394,7 @@ class CodeGeneratorX86_64 : public CodeGenerator {
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
MethodReference target_method) OVERRIDE;
+ Location GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp);
void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;
void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 214250f337..83a512738b 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -165,7 +165,7 @@ public: \
void Set##name() { SetBit(k##name); } \
bool Get##name() const { return IsBitSet(k##name); } \
private: \
-static constexpr size_t k##name = bit + kNumberOfGenericOptimizations
+static constexpr size_t k##name = (bit) + kNumberOfGenericOptimizations
class StringEqualsOptimizations : public IntrinsicOptimizations {
public:
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index de04175e38..29f7672b0a 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -987,31 +987,126 @@ void IntrinsicCodeGeneratorARM::VisitStringCharAt(HInvoke* invoke) {
void IntrinsicLocationsBuilderARM::VisitStringCompareTo(HInvoke* invoke) {
// The inputs plus one temp.
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCall,
+ invoke->InputAt(1)->CanBeNull()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
kIntrinsified);
- InvokeRuntimeCallingConvention calling_convention;
- locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
- locations->SetOut(Location::RegisterLocation(R0));
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
}
void IntrinsicCodeGeneratorARM::VisitStringCompareTo(HInvoke* invoke) {
ArmAssembler* assembler = GetAssembler();
LocationSummary* locations = invoke->GetLocations();
+ Register str = locations->InAt(0).AsRegister<Register>();
+ Register arg = locations->InAt(1).AsRegister<Register>();
+ Register out = locations->Out().AsRegister<Register>();
+
+ Register temp0 = locations->GetTemp(0).AsRegister<Register>();
+ Register temp1 = locations->GetTemp(1).AsRegister<Register>();
+ Register temp2 = locations->GetTemp(2).AsRegister<Register>();
+
+ Label loop;
+ Label find_char_diff;
+ Label end;
+
+ // Get offsets of count and value fields within a string object.
+ const int32_t count_offset = mirror::String::CountOffset().Int32Value();
+ const int32_t value_offset = mirror::String::ValueOffset().Int32Value();
+
// Note that the null check must have been done earlier.
DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
- Register argument = locations->InAt(1).AsRegister<Register>();
- __ cmp(argument, ShifterOperand(0));
- SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathARM(invoke);
- codegen_->AddSlowPath(slow_path);
- __ b(slow_path->GetEntryLabel(), EQ);
+ // Take slow path and throw if input can be and is null.
+ SlowPathCode* slow_path = nullptr;
+ const bool can_slow_path = invoke->InputAt(1)->CanBeNull();
+ if (can_slow_path) {
+ slow_path = new (GetAllocator()) IntrinsicSlowPathARM(invoke);
+ codegen_->AddSlowPath(slow_path);
+ __ CompareAndBranchIfZero(arg, slow_path->GetEntryLabel());
+ }
- __ LoadFromOffset(
- kLoadWord, LR, TR, QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pStringCompareTo).Int32Value());
- __ blx(LR);
- __ Bind(slow_path->GetExitLabel());
+ // Reference equality check, return 0 if same reference.
+ __ subs(out, str, ShifterOperand(arg));
+ __ b(&end, EQ);
+ // Load lengths of this and argument strings.
+ __ ldr(temp2, Address(str, count_offset));
+ __ ldr(temp1, Address(arg, count_offset));
+ // out = length diff.
+ __ subs(out, temp2, ShifterOperand(temp1));
+ // temp0 = min(len(str), len(arg)).
+ __ it(Condition::LT, kItElse);
+ __ mov(temp0, ShifterOperand(temp2), Condition::LT);
+ __ mov(temp0, ShifterOperand(temp1), Condition::GE);
+ // Shorter string is empty?
+ __ CompareAndBranchIfZero(temp0, &end);
+
+ // Store offset of string value in preparation for comparison loop.
+ __ mov(temp1, ShifterOperand(value_offset));
+
+ // Assertions that must hold in order to compare multiple characters at a time.
+ CHECK_ALIGNED(value_offset, 8);
+ static_assert(IsAligned<8>(kObjectAlignment),
+ "String data must be 8-byte aligned for unrolled CompareTo loop.");
+
+ const size_t char_size = Primitive::ComponentSize(Primitive::kPrimChar);
+ DCHECK_EQ(char_size, 2u);
+
+ // Unrolled loop comparing 4x16-bit chars per iteration (ok because of string data alignment).
+ __ Bind(&loop);
+ __ ldr(IP, Address(str, temp1));
+ __ ldr(temp2, Address(arg, temp1));
+ __ cmp(IP, ShifterOperand(temp2));
+ __ b(&find_char_diff, NE);
+ __ add(temp1, temp1, ShifterOperand(char_size * 2));
+ __ sub(temp0, temp0, ShifterOperand(2));
+
+ __ ldr(IP, Address(str, temp1));
+ __ ldr(temp2, Address(arg, temp1));
+ __ cmp(IP, ShifterOperand(temp2));
+ __ b(&find_char_diff, NE);
+ __ add(temp1, temp1, ShifterOperand(char_size * 2));
+ __ subs(temp0, temp0, ShifterOperand(2));
+
+ __ b(&loop, GT);
+ __ b(&end);
+
+ // Find the single 16-bit character difference.
+ __ Bind(&find_char_diff);
+ // Get the bit position of the first character that differs.
+ __ eor(temp1, temp2, ShifterOperand(IP));
+ __ rbit(temp1, temp1);
+ __ clz(temp1, temp1);
+
+ // temp0 = number of 16-bit characters remaining to compare.
+ // (it could be < 1 if a difference is found after the first SUB in the comparison loop, and
+ // after the end of the shorter string data).
+
+ // (temp1 >> 4) = character where difference occurs between the last two words compared, on the
+ // interval [0,1] (0 for low half-word different, 1 for high half-word different).
+
+ // If temp0 <= (temp1 >> 4), the difference occurs outside the remaining string data, so just
+ // return length diff (out).
+ __ cmp(temp0, ShifterOperand(temp1, LSR, 4));
+ __ b(&end, LE);
+ // Extract the characters and calculate the difference.
+ __ bic(temp1, temp1, ShifterOperand(0xf));
+ __ Lsr(temp2, temp2, temp1);
+ __ Lsr(IP, IP, temp1);
+ __ movt(temp2, 0);
+ __ movt(IP, 0);
+ __ sub(out, IP, ShifterOperand(temp2));
+
+ __ Bind(&end);
+
+ if (can_slow_path) {
+ __ Bind(slow_path->GetExitLabel());
+ }
}
void IntrinsicLocationsBuilderARM::VisitStringEquals(HInvoke* invoke) {
@@ -1055,17 +1150,22 @@ void IntrinsicCodeGeneratorARM::VisitStringEquals(HInvoke* invoke) {
// Note that the null check must have been done earlier.
DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
- // Check if input is null, return false if it is.
- __ CompareAndBranchIfZero(arg, &return_false);
+ StringEqualsOptimizations optimizations(invoke);
+ if (!optimizations.GetArgumentNotNull()) {
+ // Check if input is null, return false if it is.
+ __ CompareAndBranchIfZero(arg, &return_false);
+ }
- // Instanceof check for the argument by comparing class fields.
- // All string objects must have the same type since String cannot be subclassed.
- // Receiver must be a string object, so its class field is equal to all strings' class fields.
- // If the argument is a string object, its class field must be equal to receiver's class field.
- __ ldr(temp, Address(str, class_offset));
- __ ldr(temp1, Address(arg, class_offset));
- __ cmp(temp, ShifterOperand(temp1));
- __ b(&return_false, NE);
+ if (!optimizations.GetArgumentIsString()) {
+ // Instanceof check for the argument by comparing class fields.
+ // All string objects must have the same type since String cannot be subclassed.
+ // Receiver must be a string object, so its class field is equal to all strings' class fields.
+ // If the argument is a string object, its class field must be equal to receiver's class field.
+ __ ldr(temp, Address(str, class_offset));
+ __ ldr(temp1, Address(arg, class_offset));
+ __ cmp(temp, ShifterOperand(temp1));
+ __ b(&return_false, NE);
+ }
// Load lengths of this and argument strings.
__ ldr(temp, Address(str, count_offset));
@@ -1082,7 +1182,7 @@ void IntrinsicCodeGeneratorARM::VisitStringEquals(HInvoke* invoke) {
// Assertions that must hold in order to compare strings 2 characters at a time.
DCHECK_ALIGNED(value_offset, 4);
- static_assert(IsAligned<4>(kObjectAlignment), "String of odd length is not zero padded");
+ static_assert(IsAligned<4>(kObjectAlignment), "String data must be aligned for fast compare.");
__ LoadImmediate(temp1, value_offset);
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 6cd1726eb3..d776fb4406 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -1270,12 +1270,12 @@ void IntrinsicCodeGeneratorARM64::VisitStringCompareTo(HInvoke* invoke) {
__ Eor(temp1, temp0, temp4);
__ Rbit(temp1, temp1);
__ Clz(temp1, temp1);
- __ Bic(temp1, temp1, 0xf);
// If the number of 16-bit chars remaining <= the index where the difference occurs (0-3), then
// the difference occurs outside the remaining string data, so just return length diff (out).
__ Cmp(temp2, Operand(temp1, LSR, 4));
__ B(le, &end);
// Extract the characters and calculate the difference.
+ __ Bic(temp1, temp1, 0xf);
__ Lsr(temp0, temp0, temp1);
__ Lsr(temp4, temp4, temp1);
__ And(temp4, temp4, 0xffff);
@@ -1327,21 +1327,26 @@ void IntrinsicCodeGeneratorARM64::VisitStringEquals(HInvoke* invoke) {
// Note that the null check must have been done earlier.
DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
- // Check if input is null, return false if it is.
- __ Cbz(arg, &return_false);
+ StringEqualsOptimizations optimizations(invoke);
+ if (!optimizations.GetArgumentNotNull()) {
+ // Check if input is null, return false if it is.
+ __ Cbz(arg, &return_false);
+ }
// Reference equality check, return true if same reference.
__ Cmp(str, arg);
__ B(&return_true, eq);
- // Instanceof check for the argument by comparing class fields.
- // All string objects must have the same type since String cannot be subclassed.
- // Receiver must be a string object, so its class field is equal to all strings' class fields.
- // If the argument is a string object, its class field must be equal to receiver's class field.
- __ Ldr(temp, MemOperand(str.X(), class_offset));
- __ Ldr(temp1, MemOperand(arg.X(), class_offset));
- __ Cmp(temp, temp1);
- __ B(&return_false, ne);
+ if (!optimizations.GetArgumentIsString()) {
+ // Instanceof check for the argument by comparing class fields.
+ // All string objects must have the same type since String cannot be subclassed.
+ // Receiver must be a string object, so its class field is equal to all strings' class fields.
+ // If the argument is a string object, its class field must be equal to receiver's class field.
+ __ Ldr(temp, MemOperand(str.X(), class_offset));
+ __ Ldr(temp1, MemOperand(arg.X(), class_offset));
+ __ Cmp(temp, temp1);
+ __ B(&return_false, ne);
+ }
// Load lengths of this and argument strings.
__ Ldr(temp, MemOperand(str.X(), count_offset));
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index fa250a3063..140f56a870 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -2433,13 +2433,128 @@ void IntrinsicCodeGeneratorMIPS::VisitLongLowestOneBit(HInvoke* invoke) {
GenLowestOneBit(invoke->GetLocations(), Primitive::kPrimLong, IsR6(), GetAssembler());
}
+// int java.lang.Math.round(float)
+void IntrinsicLocationsBuilderMIPS::VisitMathRoundFloat(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->AddTemp(Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitMathRoundFloat(HInvoke* invoke) {
+ LocationSummary* locations = invoke->GetLocations();
+ MipsAssembler* assembler = GetAssembler();
+ FRegister in = locations->InAt(0).AsFpuRegister<FRegister>();
+ FRegister half = locations->GetTemp(0).AsFpuRegister<FRegister>();
+ Register out = locations->Out().AsRegister<Register>();
+
+ MipsLabel done;
+ MipsLabel finite;
+ MipsLabel add;
+
+ // if (in.isNaN) {
+ // return 0;
+ // }
+ //
+ // out = floor.w.s(in);
+ //
+ // /*
+ // * This "if" statement is only needed for the pre-R6 version of floor.w.s
+ // * which outputs Integer.MAX_VALUE for negative numbers with magnitudes
+ // * too large to fit in a 32-bit integer.
+ // *
+ // * Starting with MIPSR6, which always sets FCSR.NAN2008=1, negative
+ // * numbers which are too large to be represented in a 32-bit signed
+ // * integer will be processed by floor.w.s to output Integer.MIN_VALUE,
+ // * and will no longer be processed by this "if" statement.
+ // */
+ // if (out == Integer.MAX_VALUE) {
+ // TMP = (in < 0.0f) ? 1 : 0;
+ // /*
+ // * If TMP is 1, then adding it to out will wrap its value from
+ // * Integer.MAX_VALUE to Integer.MIN_VALUE.
+ // */
+ // return out += TMP;
+ // }
+ //
+ // /*
+ // * For negative values not handled by the previous "if" statement the
+ // * test here will correctly set the value of TMP.
+ // */
+ // TMP = ((in - out) >= 0.5f) ? 1 : 0;
+ // return out += TMP;
+
+ // Test for NaN.
+ if (IsR6()) {
+ __ CmpUnS(FTMP, in, in);
+ } else {
+ __ CunS(in, in);
+ }
+
+ // Return zero for NaN.
+ __ Move(out, ZERO);
+ if (IsR6()) {
+ __ Bc1nez(FTMP, &done);
+ } else {
+ __ Bc1t(&done);
+ }
+
+ // out = floor(in);
+ __ FloorWS(FTMP, in);
+ __ Mfc1(out, FTMP);
+
+ __ LoadConst32(TMP, 1);
+
+ // TMP = (out = java.lang.Integer.MAX_VALUE) ? 1 : 0;
+ __ LoadConst32(AT, std::numeric_limits<int32_t>::max());
+ __ Bne(AT, out, &finite);
+
+ __ Mtc1(ZERO, FTMP);
+ if (IsR6()) {
+ __ CmpLtS(FTMP, in, FTMP);
+ __ Mfc1(AT, FTMP);
+ } else {
+ __ ColtS(in, FTMP);
+ }
+
+ __ B(&add);
+
+ __ Bind(&finite);
+
+ // TMP = (0.5f <= (in - out)) ? 1 : 0;
+ __ Cvtsw(FTMP, FTMP); // Convert output of floor.w.s back to "float".
+ __ LoadConst32(AT, bit_cast<int32_t, float>(0.5f));
+ __ SubS(FTMP, in, FTMP);
+ __ Mtc1(AT, half);
+ if (IsR6()) {
+ __ CmpLeS(FTMP, half, FTMP);
+ __ Mfc1(AT, FTMP);
+ } else {
+ __ ColeS(half, FTMP);
+ }
+
+ __ Bind(&add);
+
+ if (IsR6()) {
+ __ Selnez(TMP, TMP, AT);
+ } else {
+ __ Movf(TMP, ZERO);
+ }
+
+ // Return out += TMP.
+ __ Addu(out, out, TMP);
+
+ __ Bind(&done);
+}
+
// Unimplemented intrinsics.
UNIMPLEMENTED_INTRINSIC(MIPS, MathCeil)
UNIMPLEMENTED_INTRINSIC(MIPS, MathFloor)
UNIMPLEMENTED_INTRINSIC(MIPS, MathRint)
UNIMPLEMENTED_INTRINSIC(MIPS, MathRoundDouble)
-UNIMPLEMENTED_INTRINSIC(MIPS, MathRoundFloat)
UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeCASLong)
UNIMPLEMENTED_INTRINSIC(MIPS, ReferenceGetReferent)
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 99bc40e71e..05377f984b 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -1319,11 +1319,11 @@ void IntrinsicCodeGeneratorX86::VisitStringEquals(HInvoke* invoke) {
__ j(kEqual, &return_false);
}
- // Instanceof check for the argument by comparing class fields.
- // All string objects must have the same type since String cannot be subclassed.
- // Receiver must be a string object, so its class field is equal to all strings' class fields.
- // If the argument is a string object, its class field must be equal to receiver's class field.
if (!optimizations.GetArgumentIsString()) {
+ // Instanceof check for the argument by comparing class fields.
+ // All string objects must have the same type since String cannot be subclassed.
+ // Receiver must be a string object, so its class field is equal to all strings' class fields.
+ // If the argument is a string object, its class field must be equal to receiver's class field.
__ movl(ecx, Address(str, class_offset));
__ cmpl(ecx, Address(arg, class_offset));
__ j(kNotEqual, &return_false);
@@ -2631,8 +2631,66 @@ void IntrinsicCodeGeneratorX86::VisitLongNumberOfTrailingZeros(HInvoke* invoke)
GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);
}
+void IntrinsicLocationsBuilderX86::VisitReferenceGetReferent(HInvoke* invoke) {
+ if (kEmitCompilerReadBarrier) {
+ // Do not intrinsify this call with the read barrier configuration.
+ return;
+ }
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCallOnSlowPath,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+ locations->AddTemp(Location::RequiresRegister());
+}
+
+void IntrinsicCodeGeneratorX86::VisitReferenceGetReferent(HInvoke* invoke) {
+ DCHECK(!kEmitCompilerReadBarrier);
+ LocationSummary* locations = invoke->GetLocations();
+ X86Assembler* assembler = GetAssembler();
+
+ Register obj = locations->InAt(0).AsRegister<Register>();
+ Register out = locations->Out().AsRegister<Register>();
+
+ SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke);
+ codegen_->AddSlowPath(slow_path);
+
+ // Load ArtMethod first.
+ HInvokeStaticOrDirect* invoke_direct = invoke->AsInvokeStaticOrDirect();
+ DCHECK(invoke_direct != nullptr);
+ Location temp_loc = codegen_->GenerateCalleeMethodStaticOrDirectCall(
+ invoke_direct, locations->GetTemp(0));
+ DCHECK(temp_loc.Equals(locations->GetTemp(0)));
+ Register temp = temp_loc.AsRegister<Register>();
+
+ // Now get declaring class.
+ __ movl(temp, Address(temp, ArtMethod::DeclaringClassOffset().Int32Value()));
+
+ uint32_t slow_path_flag_offset = codegen_->GetReferenceSlowFlagOffset();
+ uint32_t disable_flag_offset = codegen_->GetReferenceDisableFlagOffset();
+ DCHECK_NE(slow_path_flag_offset, 0u);
+ DCHECK_NE(disable_flag_offset, 0u);
+ DCHECK_NE(slow_path_flag_offset, disable_flag_offset);
+
+ // Check static flags preventing us for using intrinsic.
+ if (slow_path_flag_offset == disable_flag_offset + 1) {
+ __ cmpw(Address(temp, disable_flag_offset), Immediate(0));
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ } else {
+ __ cmpb(Address(temp, disable_flag_offset), Immediate(0));
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ cmpb(Address(temp, slow_path_flag_offset), Immediate(0));
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ }
+
+ // Fast path.
+ __ movl(out, Address(obj, mirror::Reference::ReferentOffset().Int32Value()));
+ codegen_->MaybeRecordImplicitNullCheck(invoke);
+ __ MaybeUnpoisonHeapReference(out);
+ __ Bind(slow_path->GetExitLabel());
+}
+
UNIMPLEMENTED_INTRINSIC(X86, MathRoundDouble)
-UNIMPLEMENTED_INTRINSIC(X86, ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(X86, SystemArrayCopy)
UNIMPLEMENTED_INTRINSIC(X86, FloatIsInfinite)
UNIMPLEMENTED_INTRINSIC(X86, DoubleIsInfinite)
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 06e9cc2b28..67c2f3a866 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -1416,17 +1416,22 @@ void IntrinsicCodeGeneratorX86_64::VisitStringEquals(HInvoke* invoke) {
// Note that the null check must have been done earlier.
DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
- // Check if input is null, return false if it is.
- __ testl(arg, arg);
- __ j(kEqual, &return_false);
-
- // Instanceof check for the argument by comparing class fields.
- // All string objects must have the same type since String cannot be subclassed.
- // Receiver must be a string object, so its class field is equal to all strings' class fields.
- // If the argument is a string object, its class field must be equal to receiver's class field.
- __ movl(rcx, Address(str, class_offset));
- __ cmpl(rcx, Address(arg, class_offset));
- __ j(kNotEqual, &return_false);
+ StringEqualsOptimizations optimizations(invoke);
+ if (!optimizations.GetArgumentNotNull()) {
+ // Check if input is null, return false if it is.
+ __ testl(arg, arg);
+ __ j(kEqual, &return_false);
+ }
+
+ if (!optimizations.GetArgumentIsString()) {
+ // Instanceof check for the argument by comparing class fields.
+ // All string objects must have the same type since String cannot be subclassed.
+ // Receiver must be a string object, so its class field is equal to all strings' class fields.
+ // If the argument is a string object, its class field must be equal to receiver's class field.
+ __ movl(rcx, Address(str, class_offset));
+ __ cmpl(rcx, Address(arg, class_offset));
+ __ j(kNotEqual, &return_false);
+ }
// Reference equality check, return true if same reference.
__ cmpl(str, arg);
@@ -2719,7 +2724,65 @@ void IntrinsicCodeGeneratorX86_64::VisitLongNumberOfTrailingZeros(HInvoke* invok
GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);
}
-UNIMPLEMENTED_INTRINSIC(X86_64, ReferenceGetReferent)
+void IntrinsicLocationsBuilderX86_64::VisitReferenceGetReferent(HInvoke* invoke) {
+ if (kEmitCompilerReadBarrier) {
+ // Do not intrinsify this call with the read barrier configuration.
+ return;
+ }
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCallOnSlowPath,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+ locations->AddTemp(Location::RequiresRegister());
+}
+
+void IntrinsicCodeGeneratorX86_64::VisitReferenceGetReferent(HInvoke* invoke) {
+ DCHECK(!kEmitCompilerReadBarrier);
+ LocationSummary* locations = invoke->GetLocations();
+ X86_64Assembler* assembler = GetAssembler();
+
+ CpuRegister obj = locations->InAt(0).AsRegister<CpuRegister>();
+ CpuRegister out = locations->Out().AsRegister<CpuRegister>();
+
+ SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke);
+ codegen_->AddSlowPath(slow_path);
+
+ // Load ArtMethod first.
+ HInvokeStaticOrDirect* invoke_direct = invoke->AsInvokeStaticOrDirect();
+ DCHECK(invoke_direct != nullptr);
+ Location temp_loc = codegen_->GenerateCalleeMethodStaticOrDirectCall(
+ invoke_direct, locations->GetTemp(0));
+ DCHECK(temp_loc.Equals(locations->GetTemp(0)));
+ CpuRegister temp = temp_loc.AsRegister<CpuRegister>();
+
+ // Now get declaring class.
+ __ movl(temp, Address(temp, ArtMethod::DeclaringClassOffset().Int32Value()));
+
+ uint32_t slow_path_flag_offset = codegen_->GetReferenceSlowFlagOffset();
+ uint32_t disable_flag_offset = codegen_->GetReferenceDisableFlagOffset();
+ DCHECK_NE(slow_path_flag_offset, 0u);
+ DCHECK_NE(disable_flag_offset, 0u);
+ DCHECK_NE(slow_path_flag_offset, disable_flag_offset);
+
+ // Check static flags preventing us for using intrinsic.
+ if (slow_path_flag_offset == disable_flag_offset + 1) {
+ __ cmpw(Address(temp, disable_flag_offset), Immediate(0));
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ } else {
+ __ cmpb(Address(temp, disable_flag_offset), Immediate(0));
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ cmpb(Address(temp, slow_path_flag_offset), Immediate(0));
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ }
+
+ // Fast path.
+ __ movl(out, Address(obj, mirror::Reference::ReferentOffset().Int32Value()));
+ codegen_->MaybeRecordImplicitNullCheck(invoke);
+ __ MaybeUnpoisonHeapReference(out);
+ __ Bind(slow_path->GetExitLabel());
+}
+
UNIMPLEMENTED_INTRINSIC(X86_64, FloatIsInfinite)
UNIMPLEMENTED_INTRINSIC(X86_64, DoubleIsInfinite)
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 12ea059d3f..c08323a0c6 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -2290,7 +2290,7 @@ class HExpression : public HTemplateInstruction<N> {
// Represents dex's RETURN_VOID opcode. A HReturnVoid is a control flow
// instruction that branches to the exit block.
-class HReturnVoid : public HTemplateInstruction<0> {
+class HReturnVoid FINAL : public HTemplateInstruction<0> {
public:
explicit HReturnVoid(uint32_t dex_pc = kNoDexPc)
: HTemplateInstruction(SideEffects::None(), dex_pc) {}
@@ -2305,7 +2305,7 @@ class HReturnVoid : public HTemplateInstruction<0> {
// Represents dex's RETURN opcodes. A HReturn is a control flow
// instruction that branches to the exit block.
-class HReturn : public HTemplateInstruction<1> {
+class HReturn FINAL : public HTemplateInstruction<1> {
public:
explicit HReturn(HInstruction* value, uint32_t dex_pc = kNoDexPc)
: HTemplateInstruction(SideEffects::None(), dex_pc) {
@@ -2320,7 +2320,7 @@ class HReturn : public HTemplateInstruction<1> {
DISALLOW_COPY_AND_ASSIGN(HReturn);
};
-class HPhi : public HInstruction {
+class HPhi FINAL : public HInstruction {
public:
HPhi(ArenaAllocator* arena,
uint32_t reg_number,
@@ -2424,7 +2424,7 @@ class HPhi : public HInstruction {
// The exit instruction is the only instruction of the exit block.
// Instructions aborting the method (HThrow and HReturn) must branch to the
// exit block.
-class HExit : public HTemplateInstruction<0> {
+class HExit FINAL : public HTemplateInstruction<0> {
public:
explicit HExit(uint32_t dex_pc = kNoDexPc) : HTemplateInstruction(SideEffects::None(), dex_pc) {}
@@ -2437,7 +2437,7 @@ class HExit : public HTemplateInstruction<0> {
};
// Jumps from one block to another.
-class HGoto : public HTemplateInstruction<0> {
+class HGoto FINAL : public HTemplateInstruction<0> {
public:
explicit HGoto(uint32_t dex_pc = kNoDexPc) : HTemplateInstruction(SideEffects::None(), dex_pc) {}
@@ -2477,7 +2477,7 @@ class HConstant : public HExpression<0> {
DISALLOW_COPY_AND_ASSIGN(HConstant);
};
-class HNullConstant : public HConstant {
+class HNullConstant FINAL : public HConstant {
public:
bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
@@ -2501,7 +2501,7 @@ class HNullConstant : public HConstant {
// Constants of the type int. Those can be from Dex instructions, or
// synthesized (for example with the if-eqz instruction).
-class HIntConstant : public HConstant {
+class HIntConstant FINAL : public HConstant {
public:
int32_t GetValue() const { return value_; }
@@ -2542,7 +2542,7 @@ class HIntConstant : public HConstant {
DISALLOW_COPY_AND_ASSIGN(HIntConstant);
};
-class HLongConstant : public HConstant {
+class HLongConstant FINAL : public HConstant {
public:
int64_t GetValue() const { return value_; }
@@ -2572,7 +2572,7 @@ class HLongConstant : public HConstant {
DISALLOW_COPY_AND_ASSIGN(HLongConstant);
};
-class HFloatConstant : public HConstant {
+class HFloatConstant FINAL : public HConstant {
public:
float GetValue() const { return value_; }
@@ -2625,7 +2625,7 @@ class HFloatConstant : public HConstant {
DISALLOW_COPY_AND_ASSIGN(HFloatConstant);
};
-class HDoubleConstant : public HConstant {
+class HDoubleConstant FINAL : public HConstant {
public:
double GetValue() const { return value_; }
@@ -2678,7 +2678,7 @@ class HDoubleConstant : public HConstant {
// Conditional branch. A block ending with an HIf instruction must have
// two successors.
-class HIf : public HTemplateInstruction<1> {
+class HIf FINAL : public HTemplateInstruction<1> {
public:
explicit HIf(HInstruction* input, uint32_t dex_pc = kNoDexPc)
: HTemplateInstruction(SideEffects::None(), dex_pc) {
@@ -2707,7 +2707,7 @@ class HIf : public HTemplateInstruction<1> {
// non-exceptional control flow.
// Normal-flow successor is stored at index zero, exception handlers under
// higher indices in no particular order.
-class HTryBoundary : public HTemplateInstruction<0> {
+class HTryBoundary FINAL : public HTemplateInstruction<0> {
public:
enum class BoundaryKind {
kEntry,
@@ -2765,7 +2765,7 @@ class HTryBoundary : public HTemplateInstruction<0> {
};
// Deoptimize to interpreter, upon checking a condition.
-class HDeoptimize : public HTemplateInstruction<1> {
+class HDeoptimize FINAL : public HTemplateInstruction<1> {
public:
// We set CanTriggerGC to prevent any intermediate address to be live
// at the point of the `HDeoptimize`.
@@ -2790,7 +2790,7 @@ class HDeoptimize : public HTemplateInstruction<1> {
// Represents the ArtMethod that was passed as a first argument to
// the method. It is used by instructions that depend on it, like
// instructions that work with the dex cache.
-class HCurrentMethod : public HExpression<0> {
+class HCurrentMethod FINAL : public HExpression<0> {
public:
explicit HCurrentMethod(Primitive::Type type, uint32_t dex_pc = kNoDexPc)
: HExpression(type, SideEffects::None(), dex_pc) {}
@@ -2803,7 +2803,7 @@ class HCurrentMethod : public HExpression<0> {
// Fetches an ArtMethod from the virtual table or the interface method table
// of a class.
-class HClassTableGet : public HExpression<1> {
+class HClassTableGet FINAL : public HExpression<1> {
public:
enum class TableKind {
kVTable,
@@ -2850,7 +2850,7 @@ class HClassTableGet : public HExpression<1> {
// PackedSwitch (jump table). A block ending with a PackedSwitch instruction will
// have one successor for each entry in the switch table, and the final successor
// will be the block containing the next Dex opcode.
-class HPackedSwitch : public HTemplateInstruction<1> {
+class HPackedSwitch FINAL : public HTemplateInstruction<1> {
public:
HPackedSwitch(int32_t start_value,
uint32_t num_entries,
@@ -3095,7 +3095,7 @@ class HCondition : public HBinaryOperation {
};
// Instruction to check if two inputs are equal to each other.
-class HEqual : public HCondition {
+class HEqual FINAL : public HCondition {
public:
HEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
: HCondition(first, second, dex_pc) {}
@@ -3139,7 +3139,7 @@ class HEqual : public HCondition {
DISALLOW_COPY_AND_ASSIGN(HEqual);
};
-class HNotEqual : public HCondition {
+class HNotEqual FINAL : public HCondition {
public:
HNotEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
: HCondition(first, second, dex_pc) {}
@@ -3182,7 +3182,7 @@ class HNotEqual : public HCondition {
DISALLOW_COPY_AND_ASSIGN(HNotEqual);
};
-class HLessThan : public HCondition {
+class HLessThan FINAL : public HCondition {
public:
HLessThan(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
: HCondition(first, second, dex_pc) {}
@@ -3219,7 +3219,7 @@ class HLessThan : public HCondition {
DISALLOW_COPY_AND_ASSIGN(HLessThan);
};
-class HLessThanOrEqual : public HCondition {
+class HLessThanOrEqual FINAL : public HCondition {
public:
HLessThanOrEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
: HCondition(first, second, dex_pc) {}
@@ -3256,7 +3256,7 @@ class HLessThanOrEqual : public HCondition {
DISALLOW_COPY_AND_ASSIGN(HLessThanOrEqual);
};
-class HGreaterThan : public HCondition {
+class HGreaterThan FINAL : public HCondition {
public:
HGreaterThan(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
: HCondition(first, second, dex_pc) {}
@@ -3293,7 +3293,7 @@ class HGreaterThan : public HCondition {
DISALLOW_COPY_AND_ASSIGN(HGreaterThan);
};
-class HGreaterThanOrEqual : public HCondition {
+class HGreaterThanOrEqual FINAL : public HCondition {
public:
HGreaterThanOrEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
: HCondition(first, second, dex_pc) {}
@@ -3330,7 +3330,7 @@ class HGreaterThanOrEqual : public HCondition {
DISALLOW_COPY_AND_ASSIGN(HGreaterThanOrEqual);
};
-class HBelow : public HCondition {
+class HBelow FINAL : public HCondition {
public:
HBelow(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
: HCondition(first, second, dex_pc) {}
@@ -3370,7 +3370,7 @@ class HBelow : public HCondition {
DISALLOW_COPY_AND_ASSIGN(HBelow);
};
-class HBelowOrEqual : public HCondition {
+class HBelowOrEqual FINAL : public HCondition {
public:
HBelowOrEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
: HCondition(first, second, dex_pc) {}
@@ -3410,7 +3410,7 @@ class HBelowOrEqual : public HCondition {
DISALLOW_COPY_AND_ASSIGN(HBelowOrEqual);
};
-class HAbove : public HCondition {
+class HAbove FINAL : public HCondition {
public:
HAbove(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
: HCondition(first, second, dex_pc) {}
@@ -3450,7 +3450,7 @@ class HAbove : public HCondition {
DISALLOW_COPY_AND_ASSIGN(HAbove);
};
-class HAboveOrEqual : public HCondition {
+class HAboveOrEqual FINAL : public HCondition {
public:
HAboveOrEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
: HCondition(first, second, dex_pc) {}
@@ -3492,7 +3492,7 @@ class HAboveOrEqual : public HCondition {
// Instruction to check how two inputs compare to each other.
// Result is 0 if input0 == input1, 1 if input0 > input1, or -1 if input0 < input1.
-class HCompare : public HBinaryOperation {
+class HCompare FINAL : public HBinaryOperation {
public:
// Note that `comparison_type` is the type of comparison performed
// between the comparison's inputs, not the type of the instantiated
@@ -3581,7 +3581,7 @@ class HCompare : public HBinaryOperation {
DISALLOW_COPY_AND_ASSIGN(HCompare);
};
-class HNewInstance : public HExpression<2> {
+class HNewInstance FINAL : public HExpression<2> {
public:
HNewInstance(HInstruction* cls,
HCurrentMethod* current_method,
@@ -3784,7 +3784,7 @@ class HInvoke : public HInstruction {
DISALLOW_COPY_AND_ASSIGN(HInvoke);
};
-class HInvokeUnresolved : public HInvoke {
+class HInvokeUnresolved FINAL : public HInvoke {
public:
HInvokeUnresolved(ArenaAllocator* arena,
uint32_t number_of_arguments,
@@ -3807,7 +3807,7 @@ class HInvokeUnresolved : public HInvoke {
DISALLOW_COPY_AND_ASSIGN(HInvokeUnresolved);
};
-class HInvokeStaticOrDirect : public HInvoke {
+class HInvokeStaticOrDirect FINAL : public HInvoke {
public:
// Requirements of this method call regarding the class
// initialization (clinit) check of its declaring class.
@@ -4096,7 +4096,7 @@ class HInvokeStaticOrDirect : public HInvoke {
std::ostream& operator<<(std::ostream& os, HInvokeStaticOrDirect::MethodLoadKind rhs);
std::ostream& operator<<(std::ostream& os, HInvokeStaticOrDirect::ClinitCheckRequirement rhs);
-class HInvokeVirtual : public HInvoke {
+class HInvokeVirtual FINAL : public HInvoke {
public:
HInvokeVirtual(ArenaAllocator* arena,
uint32_t number_of_arguments,
@@ -4122,7 +4122,7 @@ class HInvokeVirtual : public HInvoke {
DISALLOW_COPY_AND_ASSIGN(HInvokeVirtual);
};
-class HInvokeInterface : public HInvoke {
+class HInvokeInterface FINAL : public HInvoke {
public:
HInvokeInterface(ArenaAllocator* arena,
uint32_t number_of_arguments,
@@ -4149,7 +4149,7 @@ class HInvokeInterface : public HInvoke {
DISALLOW_COPY_AND_ASSIGN(HInvokeInterface);
};
-class HNeg : public HUnaryOperation {
+class HNeg FINAL : public HUnaryOperation {
public:
HNeg(Primitive::Type result_type, HInstruction* input, uint32_t dex_pc = kNoDexPc)
: HUnaryOperation(result_type, input, dex_pc) {
@@ -4177,7 +4177,7 @@ class HNeg : public HUnaryOperation {
DISALLOW_COPY_AND_ASSIGN(HNeg);
};
-class HNewArray : public HExpression<2> {
+class HNewArray FINAL : public HExpression<2> {
public:
HNewArray(HInstruction* length,
HCurrentMethod* current_method,
@@ -4216,7 +4216,7 @@ class HNewArray : public HExpression<2> {
DISALLOW_COPY_AND_ASSIGN(HNewArray);
};
-class HAdd : public HBinaryOperation {
+class HAdd FINAL : public HBinaryOperation {
public:
HAdd(Primitive::Type result_type,
HInstruction* left,
@@ -4251,7 +4251,7 @@ class HAdd : public HBinaryOperation {
DISALLOW_COPY_AND_ASSIGN(HAdd);
};
-class HSub : public HBinaryOperation {
+class HSub FINAL : public HBinaryOperation {
public:
HSub(Primitive::Type result_type,
HInstruction* left,
@@ -4284,7 +4284,7 @@ class HSub : public HBinaryOperation {
DISALLOW_COPY_AND_ASSIGN(HSub);
};
-class HMul : public HBinaryOperation {
+class HMul FINAL : public HBinaryOperation {
public:
HMul(Primitive::Type result_type,
HInstruction* left,
@@ -4319,7 +4319,7 @@ class HMul : public HBinaryOperation {
DISALLOW_COPY_AND_ASSIGN(HMul);
};
-class HDiv : public HBinaryOperation {
+class HDiv FINAL : public HBinaryOperation {
public:
HDiv(Primitive::Type result_type,
HInstruction* left,
@@ -4371,7 +4371,7 @@ class HDiv : public HBinaryOperation {
DISALLOW_COPY_AND_ASSIGN(HDiv);
};
-class HRem : public HBinaryOperation {
+class HRem FINAL : public HBinaryOperation {
public:
HRem(Primitive::Type result_type,
HInstruction* left,
@@ -4422,7 +4422,7 @@ class HRem : public HBinaryOperation {
DISALLOW_COPY_AND_ASSIGN(HRem);
};
-class HDivZeroCheck : public HExpression<1> {
+class HDivZeroCheck FINAL : public HExpression<1> {
public:
// `HDivZeroCheck` can trigger GC, as it may call the `ArithmeticException`
// constructor.
@@ -4448,7 +4448,7 @@ class HDivZeroCheck : public HExpression<1> {
DISALLOW_COPY_AND_ASSIGN(HDivZeroCheck);
};
-class HShl : public HBinaryOperation {
+class HShl FINAL : public HBinaryOperation {
public:
HShl(Primitive::Type result_type,
HInstruction* value,
@@ -4494,7 +4494,7 @@ class HShl : public HBinaryOperation {
DISALLOW_COPY_AND_ASSIGN(HShl);
};
-class HShr : public HBinaryOperation {
+class HShr FINAL : public HBinaryOperation {
public:
HShr(Primitive::Type result_type,
HInstruction* value,
@@ -4540,7 +4540,7 @@ class HShr : public HBinaryOperation {
DISALLOW_COPY_AND_ASSIGN(HShr);
};
-class HUShr : public HBinaryOperation {
+class HUShr FINAL : public HBinaryOperation {
public:
HUShr(Primitive::Type result_type,
HInstruction* value,
@@ -4588,7 +4588,7 @@ class HUShr : public HBinaryOperation {
DISALLOW_COPY_AND_ASSIGN(HUShr);
};
-class HAnd : public HBinaryOperation {
+class HAnd FINAL : public HBinaryOperation {
public:
HAnd(Primitive::Type result_type,
HInstruction* left,
@@ -4625,7 +4625,7 @@ class HAnd : public HBinaryOperation {
DISALLOW_COPY_AND_ASSIGN(HAnd);
};
-class HOr : public HBinaryOperation {
+class HOr FINAL : public HBinaryOperation {
public:
HOr(Primitive::Type result_type,
HInstruction* left,
@@ -4662,7 +4662,7 @@ class HOr : public HBinaryOperation {
DISALLOW_COPY_AND_ASSIGN(HOr);
};
-class HXor : public HBinaryOperation {
+class HXor FINAL : public HBinaryOperation {
public:
HXor(Primitive::Type result_type,
HInstruction* left,
@@ -4699,7 +4699,7 @@ class HXor : public HBinaryOperation {
DISALLOW_COPY_AND_ASSIGN(HXor);
};
-class HRor : public HBinaryOperation {
+class HRor FINAL : public HBinaryOperation {
public:
HRor(Primitive::Type result_type, HInstruction* value, HInstruction* distance)
: HBinaryOperation(result_type, value, distance) {
@@ -4752,7 +4752,7 @@ class HRor : public HBinaryOperation {
// The value of a parameter in this method. Its location depends on
// the calling convention.
-class HParameterValue : public HExpression<0> {
+class HParameterValue FINAL : public HExpression<0> {
public:
HParameterValue(const DexFile& dex_file,
uint16_t type_index,
@@ -4794,7 +4794,7 @@ class HParameterValue : public HExpression<0> {
DISALLOW_COPY_AND_ASSIGN(HParameterValue);
};
-class HNot : public HUnaryOperation {
+class HNot FINAL : public HUnaryOperation {
public:
HNot(Primitive::Type result_type, HInstruction* input, uint32_t dex_pc = kNoDexPc)
: HUnaryOperation(result_type, input, dex_pc) {}
@@ -4827,7 +4827,7 @@ class HNot : public HUnaryOperation {
DISALLOW_COPY_AND_ASSIGN(HNot);
};
-class HBooleanNot : public HUnaryOperation {
+class HBooleanNot FINAL : public HUnaryOperation {
public:
explicit HBooleanNot(HInstruction* input, uint32_t dex_pc = kNoDexPc)
: HUnaryOperation(Primitive::Type::kPrimBoolean, input, dex_pc) {}
@@ -4864,7 +4864,7 @@ class HBooleanNot : public HUnaryOperation {
DISALLOW_COPY_AND_ASSIGN(HBooleanNot);
};
-class HTypeConversion : public HExpression<1> {
+class HTypeConversion FINAL : public HExpression<1> {
public:
// Instantiate a type conversion of `input` to `result_type`.
HTypeConversion(Primitive::Type result_type, HInstruction* input, uint32_t dex_pc)
@@ -4907,7 +4907,7 @@ class HTypeConversion : public HExpression<1> {
static constexpr uint32_t kNoRegNumber = -1;
-class HNullCheck : public HExpression<1> {
+class HNullCheck FINAL : public HExpression<1> {
public:
// `HNullCheck` can trigger GC, as it may call the `NullPointerException`
// constructor.
@@ -4969,7 +4969,7 @@ class FieldInfo : public ValueObject {
const Handle<mirror::DexCache> dex_cache_;
};
-class HInstanceFieldGet : public HExpression<1> {
+class HInstanceFieldGet FINAL : public HExpression<1> {
public:
HInstanceFieldGet(HInstruction* value,
Primitive::Type field_type,
@@ -5021,7 +5021,7 @@ class HInstanceFieldGet : public HExpression<1> {
DISALLOW_COPY_AND_ASSIGN(HInstanceFieldGet);
};
-class HInstanceFieldSet : public HTemplateInstruction<2> {
+class HInstanceFieldSet FINAL : public HTemplateInstruction<2> {
public:
HInstanceFieldSet(HInstruction* object,
HInstruction* value,
@@ -5072,7 +5072,7 @@ class HInstanceFieldSet : public HTemplateInstruction<2> {
DISALLOW_COPY_AND_ASSIGN(HInstanceFieldSet);
};
-class HArrayGet : public HExpression<2> {
+class HArrayGet FINAL : public HExpression<2> {
public:
HArrayGet(HInstruction* array, HInstruction* index, Primitive::Type type, uint32_t dex_pc)
: HExpression(type, SideEffects::ArrayReadOfType(type), dex_pc) {
@@ -5118,7 +5118,7 @@ class HArrayGet : public HExpression<2> {
DISALLOW_COPY_AND_ASSIGN(HArrayGet);
};
-class HArraySet : public HTemplateInstruction<3> {
+class HArraySet FINAL : public HTemplateInstruction<3> {
public:
HArraySet(HInstruction* array,
HInstruction* index,
@@ -5218,7 +5218,7 @@ class HArraySet : public HTemplateInstruction<3> {
DISALLOW_COPY_AND_ASSIGN(HArraySet);
};
-class HArrayLength : public HExpression<1> {
+class HArrayLength FINAL : public HExpression<1> {
public:
HArrayLength(HInstruction* array, uint32_t dex_pc)
: HExpression(Primitive::kPrimInt, SideEffects::None(), dex_pc) {
@@ -5254,7 +5254,7 @@ class HArrayLength : public HExpression<1> {
DISALLOW_COPY_AND_ASSIGN(HArrayLength);
};
-class HBoundsCheck : public HExpression<2> {
+class HBoundsCheck FINAL : public HExpression<2> {
public:
// `HBoundsCheck` can trigger GC, as it may call the `IndexOutOfBoundsException`
// constructor.
@@ -5282,7 +5282,7 @@ class HBoundsCheck : public HExpression<2> {
DISALLOW_COPY_AND_ASSIGN(HBoundsCheck);
};
-class HSuspendCheck : public HTemplateInstruction<0> {
+class HSuspendCheck FINAL : public HTemplateInstruction<0> {
public:
explicit HSuspendCheck(uint32_t dex_pc = kNoDexPc)
: HTemplateInstruction(SideEffects::CanTriggerGC(), dex_pc), slow_path_(nullptr) {}
@@ -5324,7 +5324,7 @@ class HNativeDebugInfo : public HTemplateInstruction<0> {
/**
* Instruction to load a Class object.
*/
-class HLoadClass : public HExpression<1> {
+class HLoadClass FINAL : public HExpression<1> {
public:
HLoadClass(HCurrentMethod* current_method,
uint16_t type_index,
@@ -5428,7 +5428,7 @@ class HLoadClass : public HExpression<1> {
DISALLOW_COPY_AND_ASSIGN(HLoadClass);
};
-class HLoadString : public HExpression<1> {
+class HLoadString FINAL : public HExpression<1> {
public:
// Determines how to load the String.
enum class LoadKind {
@@ -5630,7 +5630,7 @@ inline void HLoadString::AddSpecialInput(HInstruction* special_input) {
/**
* Performs an initialization check on its Class object input.
*/
-class HClinitCheck : public HExpression<1> {
+class HClinitCheck FINAL : public HExpression<1> {
public:
HClinitCheck(HLoadClass* constant, uint32_t dex_pc)
: HExpression(
@@ -5660,7 +5660,7 @@ class HClinitCheck : public HExpression<1> {
DISALLOW_COPY_AND_ASSIGN(HClinitCheck);
};
-class HStaticFieldGet : public HExpression<1> {
+class HStaticFieldGet FINAL : public HExpression<1> {
public:
HStaticFieldGet(HInstruction* cls,
Primitive::Type field_type,
@@ -5709,7 +5709,7 @@ class HStaticFieldGet : public HExpression<1> {
DISALLOW_COPY_AND_ASSIGN(HStaticFieldGet);
};
-class HStaticFieldSet : public HTemplateInstruction<2> {
+class HStaticFieldSet FINAL : public HTemplateInstruction<2> {
public:
HStaticFieldSet(HInstruction* cls,
HInstruction* value,
@@ -5757,7 +5757,7 @@ class HStaticFieldSet : public HTemplateInstruction<2> {
DISALLOW_COPY_AND_ASSIGN(HStaticFieldSet);
};
-class HUnresolvedInstanceFieldGet : public HExpression<1> {
+class HUnresolvedInstanceFieldGet FINAL : public HExpression<1> {
public:
HUnresolvedInstanceFieldGet(HInstruction* obj,
Primitive::Type field_type,
@@ -5782,7 +5782,7 @@ class HUnresolvedInstanceFieldGet : public HExpression<1> {
DISALLOW_COPY_AND_ASSIGN(HUnresolvedInstanceFieldGet);
};
-class HUnresolvedInstanceFieldSet : public HTemplateInstruction<2> {
+class HUnresolvedInstanceFieldSet FINAL : public HTemplateInstruction<2> {
public:
HUnresolvedInstanceFieldSet(HInstruction* obj,
HInstruction* value,
@@ -5820,7 +5820,7 @@ class HUnresolvedInstanceFieldSet : public HTemplateInstruction<2> {
DISALLOW_COPY_AND_ASSIGN(HUnresolvedInstanceFieldSet);
};
-class HUnresolvedStaticFieldGet : public HExpression<0> {
+class HUnresolvedStaticFieldGet FINAL : public HExpression<0> {
public:
HUnresolvedStaticFieldGet(Primitive::Type field_type,
uint32_t field_index,
@@ -5843,7 +5843,7 @@ class HUnresolvedStaticFieldGet : public HExpression<0> {
DISALLOW_COPY_AND_ASSIGN(HUnresolvedStaticFieldGet);
};
-class HUnresolvedStaticFieldSet : public HTemplateInstruction<1> {
+class HUnresolvedStaticFieldSet FINAL : public HTemplateInstruction<1> {
public:
HUnresolvedStaticFieldSet(HInstruction* value,
Primitive::Type field_type,
@@ -5880,7 +5880,7 @@ class HUnresolvedStaticFieldSet : public HTemplateInstruction<1> {
};
// Implement the move-exception DEX instruction.
-class HLoadException : public HExpression<0> {
+class HLoadException FINAL : public HExpression<0> {
public:
explicit HLoadException(uint32_t dex_pc = kNoDexPc)
: HExpression(Primitive::kPrimNot, SideEffects::None(), dex_pc) {}
@@ -5895,7 +5895,7 @@ class HLoadException : public HExpression<0> {
// Implicit part of move-exception which clears thread-local exception storage.
// Must not be removed because the runtime expects the TLS to get cleared.
-class HClearException : public HTemplateInstruction<0> {
+class HClearException FINAL : public HTemplateInstruction<0> {
public:
explicit HClearException(uint32_t dex_pc = kNoDexPc)
: HTemplateInstruction(SideEffects::AllWrites(), dex_pc) {}
@@ -5906,7 +5906,7 @@ class HClearException : public HTemplateInstruction<0> {
DISALLOW_COPY_AND_ASSIGN(HClearException);
};
-class HThrow : public HTemplateInstruction<1> {
+class HThrow FINAL : public HTemplateInstruction<1> {
public:
HThrow(HInstruction* exception, uint32_t dex_pc)
: HTemplateInstruction(SideEffects::CanTriggerGC(), dex_pc) {
@@ -5943,7 +5943,7 @@ enum class TypeCheckKind {
std::ostream& operator<<(std::ostream& os, TypeCheckKind rhs);
-class HInstanceOf : public HExpression<2> {
+class HInstanceOf FINAL : public HExpression<2> {
public:
HInstanceOf(HInstruction* object,
HLoadClass* constant,
@@ -5997,7 +5997,7 @@ class HInstanceOf : public HExpression<2> {
DISALLOW_COPY_AND_ASSIGN(HInstanceOf);
};
-class HBoundType : public HExpression<1> {
+class HBoundType FINAL : public HExpression<1> {
public:
HBoundType(HInstruction* input, uint32_t dex_pc = kNoDexPc)
: HExpression(Primitive::kPrimNot, SideEffects::None(), dex_pc),
@@ -6041,7 +6041,7 @@ class HBoundType : public HExpression<1> {
DISALLOW_COPY_AND_ASSIGN(HBoundType);
};
-class HCheckCast : public HTemplateInstruction<2> {
+class HCheckCast FINAL : public HTemplateInstruction<2> {
public:
HCheckCast(HInstruction* object,
HLoadClass* constant,
@@ -6086,7 +6086,7 @@ class HCheckCast : public HTemplateInstruction<2> {
DISALLOW_COPY_AND_ASSIGN(HCheckCast);
};
-class HMemoryBarrier : public HTemplateInstruction<0> {
+class HMemoryBarrier FINAL : public HTemplateInstruction<0> {
public:
explicit HMemoryBarrier(MemBarrierKind barrier_kind, uint32_t dex_pc = kNoDexPc)
: HTemplateInstruction(
@@ -6111,7 +6111,7 @@ class HMemoryBarrier : public HTemplateInstruction<0> {
DISALLOW_COPY_AND_ASSIGN(HMemoryBarrier);
};
-class HMonitorOperation : public HTemplateInstruction<1> {
+class HMonitorOperation FINAL : public HTemplateInstruction<1> {
public:
enum class OperationKind {
kEnter,
@@ -6156,7 +6156,7 @@ class HMonitorOperation : public HTemplateInstruction<1> {
DISALLOW_COPY_AND_ASSIGN(HMonitorOperation);
};
-class HSelect : public HExpression<3> {
+class HSelect FINAL : public HExpression<3> {
public:
HSelect(HInstruction* condition,
HInstruction* true_value,
@@ -6269,7 +6269,7 @@ std::ostream& operator<<(std::ostream& os, const MoveOperands& rhs);
static constexpr size_t kDefaultNumberOfMoves = 4;
-class HParallelMove : public HTemplateInstruction<0> {
+class HParallelMove FINAL : public HTemplateInstruction<0> {
public:
explicit HParallelMove(ArenaAllocator* arena, uint32_t dex_pc = kNoDexPc)
: HTemplateInstruction(SideEffects::None(), dex_pc),
diff --git a/compiler/optimizing/nodes_arm.h b/compiler/optimizing/nodes_arm.h
index 6a1dbb9e70..371e8ef6bb 100644
--- a/compiler/optimizing/nodes_arm.h
+++ b/compiler/optimizing/nodes_arm.h
@@ -19,7 +19,7 @@
namespace art {
-class HArmDexCacheArraysBase : public HExpression<0> {
+class HArmDexCacheArraysBase FINAL : public HExpression<0> {
public:
explicit HArmDexCacheArraysBase(const DexFile& dex_file)
: HExpression(Primitive::kPrimInt, SideEffects::None(), kNoDexPc),
diff --git a/compiler/optimizing/nodes_arm64.h b/compiler/optimizing/nodes_arm64.h
index 173852a55d..737aece9c8 100644
--- a/compiler/optimizing/nodes_arm64.h
+++ b/compiler/optimizing/nodes_arm64.h
@@ -21,7 +21,7 @@
namespace art {
-class HArm64DataProcWithShifterOp : public HExpression<2> {
+class HArm64DataProcWithShifterOp FINAL : public HExpression<2> {
public:
enum OpKind {
kLSL, // Logical shift left.
@@ -97,7 +97,7 @@ std::ostream& operator<<(std::ostream& os, const HArm64DataProcWithShifterOp::Op
// This instruction computes an intermediate address pointing in the 'middle' of an object. The
// result pointer cannot be handled by GC, so extra care is taken to make sure that this value is
// never used across anything that can trigger GC.
-class HArm64IntermediateAddress : public HExpression<2> {
+class HArm64IntermediateAddress FINAL : public HExpression<2> {
public:
HArm64IntermediateAddress(HInstruction* base_address, HInstruction* offset, uint32_t dex_pc)
: HExpression(Primitive::kPrimNot, SideEffects::DependsOnGC(), dex_pc) {
diff --git a/compiler/optimizing/nodes_shared.h b/compiler/optimizing/nodes_shared.h
index c10c718ff4..bdcf54a6fb 100644
--- a/compiler/optimizing/nodes_shared.h
+++ b/compiler/optimizing/nodes_shared.h
@@ -19,7 +19,7 @@
namespace art {
-class HMultiplyAccumulate : public HExpression<3> {
+class HMultiplyAccumulate FINAL : public HExpression<3> {
public:
HMultiplyAccumulate(Primitive::Type type,
InstructionKind op,
@@ -53,7 +53,7 @@ class HMultiplyAccumulate : public HExpression<3> {
DISALLOW_COPY_AND_ASSIGN(HMultiplyAccumulate);
};
-class HBitwiseNegatedRight : public HBinaryOperation {
+class HBitwiseNegatedRight FINAL : public HBinaryOperation {
public:
HBitwiseNegatedRight(Primitive::Type result_type,
InstructionKind op,
diff --git a/compiler/optimizing/nodes_x86.h b/compiler/optimizing/nodes_x86.h
index 0b3a84d3d3..c3696b5936 100644
--- a/compiler/optimizing/nodes_x86.h
+++ b/compiler/optimizing/nodes_x86.h
@@ -20,7 +20,7 @@
namespace art {
// Compute the address of the method for X86 Constant area support.
-class HX86ComputeBaseMethodAddress : public HExpression<0> {
+class HX86ComputeBaseMethodAddress FINAL : public HExpression<0> {
public:
// Treat the value as an int32_t, but it is really a 32 bit native pointer.
HX86ComputeBaseMethodAddress()
@@ -33,7 +33,7 @@ class HX86ComputeBaseMethodAddress : public HExpression<0> {
};
// Load a constant value from the constant table.
-class HX86LoadFromConstantTable : public HExpression<2> {
+class HX86LoadFromConstantTable FINAL : public HExpression<2> {
public:
HX86LoadFromConstantTable(HX86ComputeBaseMethodAddress* method_base,
HConstant* constant)
@@ -57,7 +57,7 @@ class HX86LoadFromConstantTable : public HExpression<2> {
};
// Version of HNeg with access to the constant table for FP types.
-class HX86FPNeg : public HExpression<2> {
+class HX86FPNeg FINAL : public HExpression<2> {
public:
HX86FPNeg(Primitive::Type result_type,
HInstruction* input,
@@ -76,7 +76,7 @@ class HX86FPNeg : public HExpression<2> {
};
// X86 version of HPackedSwitch that holds a pointer to the base method address.
-class HX86PackedSwitch : public HTemplateInstruction<2> {
+class HX86PackedSwitch FINAL : public HTemplateInstruction<2> {
public:
HX86PackedSwitch(int32_t start_value,
int32_t num_entries,