summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/optimizing/code_generator.cc2
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc2
-rw-r--r--compiler/optimizing/code_generator_x86.cc4
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc2
-rw-r--r--compiler/optimizing/constant_folding_test.cc2
-rw-r--r--compiler/optimizing/dead_code_elimination_test.cc2
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc6
-rw-r--r--compiler/optimizing/intrinsics_arm_vixl.cc6
-rw-r--r--compiler/optimizing/intrinsics_x86.cc6
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc9
10 files changed, 20 insertions, 21 deletions
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 05aebee2ee..77cebfc56c 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -628,7 +628,7 @@ void CodeGenerator::CreateUnresolvedFieldLocationSummary(
}
// Note that pSetXXStatic/pGetXXStatic always takes/returns an int or int64
- // regardless of the the type. Because of that we forced to special case
+ // regardless of the type. Because of that we forced to special case
// the access to floating point values.
if (is_get) {
if (DataType::IsFloatingPointType(field_type)) {
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index c9e454e858..8795edf74b 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -149,7 +149,7 @@ static RegisterSet OneRegInReferenceOutSaveEverythingCallerSaves() {
RegisterSet caller_saves = RegisterSet::Empty();
caller_saves.Add(LocationFrom(calling_convention.GetRegisterAt(0)));
// TODO: Add GetReturnLocation() to the calling convention so that we can DCHECK()
- // that the the kPrimNot result register is the same as the first argument register.
+ // that the kPrimNot result register is the same as the first argument register.
return caller_saves;
}
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 959de47b8d..9783db7d17 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -68,7 +68,7 @@ static RegisterSet OneRegInReferenceOutSaveEverythingCallerSaves() {
RegisterSet caller_saves = RegisterSet::Empty();
caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
// TODO: Add GetReturnLocation() to the calling convention so that we can DCHECK()
- // that the the kPrimNot result register is the same as the first argument register.
+ // that the kPrimNot result register is the same as the first argument register.
return caller_saves;
}
@@ -653,7 +653,7 @@ class ReadBarrierMarkAndUpdateFieldSlowPathX86 : public SlowPathCode {
__ cmpl(temp_, ref_reg);
__ j(kEqual, &done);
- // Update the the holder's field atomically. This may fail if
+ // Update the holder's field atomically. This may fail if
// mutator updates before us, but it's OK. This is achieved
// using a strong compare-and-set (CAS) operation with relaxed
// memory synchronization ordering, where the expected value is
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 4f81c00741..3e8c86bcde 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -668,7 +668,7 @@ class ReadBarrierMarkAndUpdateFieldSlowPathX86_64 : public SlowPathCode {
__ cmpl(temp1_, ref_cpu_reg);
__ j(kEqual, &done);
- // Update the the holder's field atomically. This may fail if
+ // Update the holder's field atomically. This may fail if
// mutator updates before us, but it's OK. This is achived
// using a strong compare-and-set (CAS) operation with relaxed
// memory synchronization ordering, where the expected value is
diff --git a/compiler/optimizing/constant_folding_test.cc b/compiler/optimizing/constant_folding_test.cc
index 741fd3f822..acdc8e6d3c 100644
--- a/compiler/optimizing/constant_folding_test.cc
+++ b/compiler/optimizing/constant_folding_test.cc
@@ -551,7 +551,7 @@ TEST_F(ConstantFoldingTest, LongConstantFoldingOnSubtraction) {
*
* The intent of this test is to ensure that all constant expressions
* are actually evaluated at compile-time, thanks to the reverse
- * (forward) post-order traversal of the the dominator tree.
+ * (forward) post-order traversal of the dominator tree.
*
* 16-bit
* offset
diff --git a/compiler/optimizing/dead_code_elimination_test.cc b/compiler/optimizing/dead_code_elimination_test.cc
index 079fff91b6..4082ec58fc 100644
--- a/compiler/optimizing/dead_code_elimination_test.cc
+++ b/compiler/optimizing/dead_code_elimination_test.cc
@@ -115,7 +115,7 @@ TEST_F(DeadCodeEliminationTest, AdditionAndConditionalJump) {
*
* The intent of this test is to ensure that all dead instructions are
* actually pruned at compile-time, thanks to the (backward)
- * post-order traversal of the the dominator tree.
+ * post-order traversal of the dominator tree.
*
* 16-bit
* offset
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index c833e3b107..3183dac348 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -3260,7 +3260,7 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) {
/* use_load_acquire= */ false);
__ Cbz(temp1, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `temp1` has been unpoisoned
- // by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
+ // by the previous call to GenerateFieldLoadWithBakerReadBarrier.
// /* uint16_t */ temp1 = static_cast<uint16>(temp1->primitive_type_);
__ Ldrh(temp1, HeapOperand(temp1, primitive_offset));
static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
@@ -3294,7 +3294,7 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) {
/* use_load_acquire= */ false);
__ Cbz(temp2, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `temp2` has been unpoisoned
- // by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
+ // by the previous call to GenerateFieldLoadWithBakerReadBarrier.
// /* uint16_t */ temp2 = static_cast<uint16>(temp2->primitive_type_);
__ Ldrh(temp2, HeapOperand(temp2, primitive_offset));
static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
@@ -3419,7 +3419,7 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) {
/* use_load_acquire= */ false);
__ Cbz(temp2, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `temp2` has been unpoisoned
- // by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
+ // by the previous call to GenerateFieldLoadWithBakerReadBarrier.
} else {
// /* HeapReference<Class> */ temp1 = src->klass_
__ Ldr(temp1, HeapOperand(src.W(), class_offset));
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index b94fbe035c..5f4de8cda2 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -1465,7 +1465,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) {
invoke, temp1_loc, temp1, component_offset, temp2_loc, /* needs_null_check= */ false);
__ CompareAndBranchIfZero(temp1, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `temp1` has been unpoisoned
- // by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
+ // by the previous call to GenerateFieldLoadWithBakerReadBarrier.
// /* uint16_t */ temp1 = static_cast<uint16>(temp1->primitive_type_);
__ Ldrh(temp1, MemOperand(temp1, primitive_offset));
static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
@@ -1489,7 +1489,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) {
invoke, temp2_loc, temp1, component_offset, temp3_loc, /* needs_null_check= */ false);
__ CompareAndBranchIfZero(temp2, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `temp2` has been unpoisoned
- // by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
+ // by the previous call to GenerateFieldLoadWithBakerReadBarrier.
// /* uint16_t */ temp2 = static_cast<uint16>(temp2->primitive_type_);
__ Ldrh(temp2, MemOperand(temp2, primitive_offset));
static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
@@ -1594,7 +1594,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) {
invoke, temp3_loc, temp1, component_offset, temp2_loc, /* needs_null_check= */ false);
__ CompareAndBranchIfZero(temp3, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `temp3` has been unpoisoned
- // by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
+ // by the previous call to GenerateFieldLoadWithBakerReadBarrier.
} else {
// /* HeapReference<Class> */ temp1 = src->klass_
__ Ldr(temp1, MemOperand(src, class_offset));
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 2714961162..1823bd4b4c 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -3258,7 +3258,7 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) {
__ testl(temp1, temp1);
__ j(kEqual, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `temp1` has been unpoisoned
- // by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
+ // by the previous call to GenerateFieldLoadWithBakerReadBarrier.
} else {
// /* HeapReference<Class> */ temp1 = src->klass_
__ movl(temp1, Address(src, class_offset));
@@ -3303,7 +3303,7 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) {
__ testl(temp2, temp2);
__ j(kEqual, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `temp2` has been unpoisoned
- // by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
+ // by the previous call to GenerateFieldLoadWithBakerReadBarrier.
__ cmpw(Address(temp2, primitive_offset), Immediate(Primitive::kPrimNot));
__ j(kNotEqual, intrinsic_slow_path->GetEntryLabel());
}
@@ -3382,7 +3382,7 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) {
__ testl(temp1, temp1);
__ j(kEqual, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `temp1` has been unpoisoned
- // by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
+ // by the previous call to GenerateFieldLoadWithBakerReadBarrier.
} else {
// /* HeapReference<Class> */ temp1 = src->klass_
__ movl(temp1, Address(src, class_offset));
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 680f2797e4..493cd67c27 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -1015,9 +1015,8 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) {
// /* HeapReference<Class> */ temp2 = src->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
invoke, temp2_loc, src, class_offset, /* needs_null_check= */ false);
- // If heap poisoning is enabled, `temp1` and `temp2` have been
- // unpoisoned by the the previous calls to
- // GenerateFieldLoadWithBakerReadBarrier.
+ // If heap poisoning is enabled, `temp1` and `temp2` have been unpoisoned
+ // by the previous calls to GenerateFieldLoadWithBakerReadBarrier.
} else {
// /* HeapReference<Class> */ temp1 = dest->klass_
__ movl(temp1, Address(dest, class_offset));
@@ -1042,7 +1041,7 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) {
__ testl(CpuRegister(TMP), CpuRegister(TMP));
__ j(kEqual, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `TMP` has been unpoisoned by
- // the the previous call to GenerateFieldLoadWithBakerReadBarrier.
+ // the previous call to GenerateFieldLoadWithBakerReadBarrier.
} else {
// /* HeapReference<Class> */ TMP = temp1->component_type_
__ movl(CpuRegister(TMP), Address(temp1, component_offset));
@@ -1065,7 +1064,7 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) {
__ testl(CpuRegister(TMP), CpuRegister(TMP));
__ j(kEqual, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `TMP` has been unpoisoned by
- // the the previous call to GenerateFieldLoadWithBakerReadBarrier.
+ // the previous call to GenerateFieldLoadWithBakerReadBarrier.
} else {
// /* HeapReference<Class> */ TMP = temp2->component_type_
__ movl(CpuRegister(TMP), Address(temp2, component_offset));