diff options
| -rw-r--r-- | compiler/optimizing/constant_folding.cc | 53 | ||||
| -rw-r--r-- | compiler/optimizing/constant_folding_test.cc | 232 | ||||
| -rw-r--r-- | runtime/gc/accounting/space_bitmap-inl.h | 2 | ||||
| -rw-r--r-- | runtime/gc/collector/concurrent_copying.cc | 6 | ||||
| -rw-r--r-- | runtime/mirror/object-inl.h | 85 | ||||
| -rw-r--r-- | runtime/mirror/object.h | 21 | ||||
| -rw-r--r-- | runtime/read_barrier-inl.h | 6 | ||||
| -rw-r--r-- | runtime/thread_list.cc | 34 | ||||
| -rw-r--r-- | runtime/thread_list.h | 39 |
9 files changed, 390 insertions, 88 deletions
diff --git a/compiler/optimizing/constant_folding.cc b/compiler/optimizing/constant_folding.cc index e0aa4ff489..57452cc076 100644 --- a/compiler/optimizing/constant_folding.cc +++ b/compiler/optimizing/constant_folding.cc @@ -27,6 +27,11 @@ class InstructionWithAbsorbingInputSimplifier : public HGraphVisitor { private: void VisitShift(HBinaryOperation* shift); + void VisitAbove(HAbove* instruction) OVERRIDE; + void VisitAboveOrEqual(HAboveOrEqual* instruction) OVERRIDE; + void VisitBelow(HBelow* instruction) OVERRIDE; + void VisitBelowOrEqual(HBelowOrEqual* instruction) OVERRIDE; + void VisitAnd(HAnd* instruction) OVERRIDE; void VisitCompare(HCompare* instruction) OVERRIDE; void VisitMul(HMul* instruction) OVERRIDE; @@ -105,6 +110,54 @@ void InstructionWithAbsorbingInputSimplifier::VisitShift(HBinaryOperation* instr } } +void InstructionWithAbsorbingInputSimplifier::VisitAbove(HAbove* instruction) { + if (instruction->GetLeft()->IsConstant() && + instruction->GetLeft()->AsConstant()->IsZero()) { + // Replace code looking like + // ABOVE dst, 0, src // unsigned 0 > src is always false + // with + // CONSTANT false + instruction->ReplaceWith(GetGraph()->GetConstant(Primitive::kPrimBoolean, 0)); + instruction->GetBlock()->RemoveInstruction(instruction); + } +} + +void InstructionWithAbsorbingInputSimplifier::VisitAboveOrEqual(HAboveOrEqual* instruction) { + if (instruction->GetRight()->IsConstant() && + instruction->GetRight()->AsConstant()->IsZero()) { + // Replace code looking like + // ABOVE_OR_EQUAL dst, src, 0 // unsigned src >= 0 is always true + // with + // CONSTANT true + instruction->ReplaceWith(GetGraph()->GetConstant(Primitive::kPrimBoolean, 1)); + instruction->GetBlock()->RemoveInstruction(instruction); + } +} + +void InstructionWithAbsorbingInputSimplifier::VisitBelow(HBelow* instruction) { + if (instruction->GetRight()->IsConstant() && + instruction->GetRight()->AsConstant()->IsZero()) { + // Replace code looking like + // BELOW dst, src, 0 // unsigned src < 0 is always false + // with + // CONSTANT false + instruction->ReplaceWith(GetGraph()->GetConstant(Primitive::kPrimBoolean, 0)); + instruction->GetBlock()->RemoveInstruction(instruction); + } +} + +void InstructionWithAbsorbingInputSimplifier::VisitBelowOrEqual(HBelowOrEqual* instruction) { + if (instruction->GetLeft()->IsConstant() && + instruction->GetLeft()->AsConstant()->IsZero()) { + // Replace code looking like + // BELOW_OR_EQUAL dst, 0, src // unsigned 0 <= src is always true + // with + // CONSTANT true + instruction->ReplaceWith(GetGraph()->GetConstant(Primitive::kPrimBoolean, 1)); + instruction->GetBlock()->RemoveInstruction(instruction); + } +} + void InstructionWithAbsorbingInputSimplifier::VisitAnd(HAnd* instruction) { HConstant* input_cst = instruction->GetConstantRight(); if ((input_cst != nullptr) && input_cst->IsZero()) { diff --git a/compiler/optimizing/constant_folding_test.cc b/compiler/optimizing/constant_folding_test.cc index 2feb75cc9f..e469c8d6d0 100644 --- a/compiler/optimizing/constant_folding_test.cc +++ b/compiler/optimizing/constant_folding_test.cc @@ -29,50 +29,70 @@ namespace art { -static void TestCode(const uint16_t* data, - const std::string& expected_before, - const std::string& expected_after_cf, - const std::string& expected_after_dce, - std::function<void(HGraph*)> check_after_cf, - Primitive::Type return_type = Primitive::kPrimInt) { - ArenaPool pool; - ArenaAllocator allocator(&pool); - HGraph* graph = CreateCFG(&allocator, data, return_type); - ASSERT_NE(graph, nullptr); - - graph->TryBuildingSsa(); - - StringPrettyPrinter printer_before(graph); - printer_before.VisitInsertionOrder(); - std::string actual_before = printer_before.str(); - ASSERT_EQ(expected_before, actual_before); - - std::unique_ptr<const X86InstructionSetFeatures> features_x86( - X86InstructionSetFeatures::FromCppDefines()); - x86::CodeGeneratorX86 codegenX86(graph, *features_x86.get(), CompilerOptions()); - HConstantFolding(graph).Run(); - SSAChecker ssa_checker_cf(graph); - ssa_checker_cf.Run(); - ASSERT_TRUE(ssa_checker_cf.IsValid()); - - StringPrettyPrinter printer_after_cf(graph); - printer_after_cf.VisitInsertionOrder(); - std::string actual_after_cf = printer_after_cf.str(); - ASSERT_EQ(expected_after_cf, actual_after_cf); - - check_after_cf(graph); - - HDeadCodeElimination(graph).Run(); - SSAChecker ssa_checker_dce(graph); - ssa_checker_dce.Run(); - ASSERT_TRUE(ssa_checker_dce.IsValid()); - - StringPrettyPrinter printer_after_dce(graph); - printer_after_dce.VisitInsertionOrder(); - std::string actual_after_dce = printer_after_dce.str(); - ASSERT_EQ(expected_after_dce, actual_after_dce); -} - +/** + * Fixture class for the constant folding and dce tests. + */ +class ConstantFoldingTest : public testing::Test { + public: + ConstantFoldingTest() : pool_(), allocator_(&pool_) { + graph_ = CreateGraph(&allocator_); + } + + void TestCode(const uint16_t* data, + const std::string& expected_before, + const std::string& expected_after_cf, + const std::string& expected_after_dce, + std::function<void(HGraph*)> check_after_cf, + Primitive::Type return_type = Primitive::kPrimInt) { + graph_ = CreateCFG(&allocator_, data, return_type); + TestCodeOnReadyGraph(expected_before, + expected_after_cf, + expected_after_dce, + check_after_cf); + } + + void TestCodeOnReadyGraph(const std::string& expected_before, + const std::string& expected_after_cf, + const std::string& expected_after_dce, + std::function<void(HGraph*)> check_after_cf) { + ASSERT_NE(graph_, nullptr); + graph_->TryBuildingSsa(); + + StringPrettyPrinter printer_before(graph_); + printer_before.VisitInsertionOrder(); + std::string actual_before = printer_before.str(); + EXPECT_EQ(expected_before, actual_before); + + std::unique_ptr<const X86InstructionSetFeatures> features_x86( + X86InstructionSetFeatures::FromCppDefines()); + x86::CodeGeneratorX86 codegenX86(graph_, *features_x86.get(), CompilerOptions()); + HConstantFolding(graph_).Run(); + SSAChecker ssa_checker_cf(graph_); + ssa_checker_cf.Run(); + ASSERT_TRUE(ssa_checker_cf.IsValid()); + + StringPrettyPrinter printer_after_cf(graph_); + printer_after_cf.VisitInsertionOrder(); + std::string actual_after_cf = printer_after_cf.str(); + EXPECT_EQ(expected_after_cf, actual_after_cf); + + check_after_cf(graph_); + + HDeadCodeElimination(graph_).Run(); + SSAChecker ssa_checker_dce(graph_); + ssa_checker_dce.Run(); + ASSERT_TRUE(ssa_checker_dce.IsValid()); + + StringPrettyPrinter printer_after_dce(graph_); + printer_after_dce.VisitInsertionOrder(); + std::string actual_after_dce = printer_after_dce.str(); + EXPECT_EQ(expected_after_dce, actual_after_dce); + } + + ArenaPool pool_; + ArenaAllocator allocator_; + HGraph* graph_; +}; /** * Tiny three-register program exercising int constant folding on negation. @@ -84,7 +104,7 @@ static void TestCode(const uint16_t* data, * v1 <- -v0 1. neg-int v1, v0 * return v1 2. return v1 */ -TEST(ConstantFolding, IntConstantFoldingNegation) { +TEST_F(ConstantFoldingTest, IntConstantFoldingNegation) { const uint16_t data[] = TWO_REGISTERS_CODE_ITEM( Instruction::CONST_4 | 0 << 8 | 1 << 12, Instruction::NEG_INT | 1 << 8 | 0 << 12, @@ -141,7 +161,7 @@ TEST(ConstantFolding, IntConstantFoldingNegation) { * (v2, v3) <- -(v0, v1) 1. neg-long v2, v0 * return (v2, v3) 2. return-wide v2 */ -TEST(ConstantFolding, LongConstantFoldingNegation) { +TEST_F(ConstantFoldingTest, LongConstantFoldingNegation) { const int64_t input = INT64_C(4294967296); // 2^32 const uint16_t word0 = Low16Bits(Low32Bits(input)); // LSW. const uint16_t word1 = High16Bits(Low32Bits(input)); @@ -205,7 +225,7 @@ TEST(ConstantFolding, LongConstantFoldingNegation) { * v2 <- v0 + v1 2. add-int v2, v0, v1 * return v2 4. return v2 */ -TEST(ConstantFolding, IntConstantFoldingOnAddition1) { +TEST_F(ConstantFoldingTest, IntConstantFoldingOnAddition1) { const uint16_t data[] = THREE_REGISTERS_CODE_ITEM( Instruction::CONST_4 | 0 << 8 | 1 << 12, Instruction::CONST_4 | 1 << 8 | 2 << 12, @@ -271,7 +291,7 @@ TEST(ConstantFolding, IntConstantFoldingOnAddition1) { * v2 <- v0 + v1 6. add-int v2, v0, v1 * return v2 8. return v2 */ -TEST(ConstantFolding, IntConstantFoldingOnAddition2) { +TEST_F(ConstantFoldingTest, IntConstantFoldingOnAddition2) { const uint16_t data[] = THREE_REGISTERS_CODE_ITEM( Instruction::CONST_4 | 0 << 8 | 1 << 12, Instruction::CONST_4 | 1 << 8 | 2 << 12, @@ -357,7 +377,7 @@ TEST(ConstantFolding, IntConstantFoldingOnAddition2) { * v2 <- v0 - v1 2. sub-int v2, v0, v1 * return v2 4. return v2 */ -TEST(ConstantFolding, IntConstantFoldingOnSubtraction) { +TEST_F(ConstantFoldingTest, IntConstantFoldingOnSubtraction) { const uint16_t data[] = THREE_REGISTERS_CODE_ITEM( Instruction::CONST_4 | 0 << 8 | 3 << 12, Instruction::CONST_4 | 1 << 8 | 2 << 12, @@ -421,7 +441,7 @@ TEST(ConstantFolding, IntConstantFoldingOnSubtraction) { * (v0, v1) + (v1, v2) 4. add-long v4, v0, v2 * return (v4, v5) 6. return-wide v4 */ -TEST(ConstantFolding, LongConstantFoldingOnAddition) { +TEST_F(ConstantFoldingTest, LongConstantFoldingOnAddition) { const uint16_t data[] = SIX_REGISTERS_CODE_ITEM( Instruction::CONST_WIDE_16 | 0 << 8, 1, Instruction::CONST_WIDE_16 | 2 << 8, 2, @@ -486,7 +506,7 @@ TEST(ConstantFolding, LongConstantFoldingOnAddition) { * (v0, v1) - (v1, v2) 4. sub-long v4, v0, v2 * return (v4, v5) 6. return-wide v4 */ -TEST(ConstantFolding, LongConstantFoldingOnSubtraction) { +TEST_F(ConstantFoldingTest, LongConstantFoldingOnSubtraction) { const uint16_t data[] = SIX_REGISTERS_CODE_ITEM( Instruction::CONST_WIDE_16 | 0 << 8, 3, Instruction::CONST_WIDE_16 | 2 << 8, 2, @@ -560,7 +580,7 @@ TEST(ConstantFolding, LongConstantFoldingOnSubtraction) { * L3: v2 <- v1 + 8 11. add-int/lit16 v2, v1, #+8 * return v2 13. return v2 */ -TEST(ConstantFolding, IntConstantFoldingAndJumps) { +TEST_F(ConstantFoldingTest, IntConstantFoldingAndJumps) { const uint16_t data[] = THREE_REGISTERS_CODE_ITEM( Instruction::CONST_4 | 0 << 8 | 1 << 12, Instruction::CONST_4 | 1 << 8 | 2 << 12, @@ -656,7 +676,6 @@ TEST(ConstantFolding, IntConstantFoldingAndJumps) { check_after_cf); } - /** * Three-register program with a constant (static) condition. * @@ -670,7 +689,7 @@ TEST(ConstantFolding, IntConstantFoldingAndJumps) { * L1: v2 <- v0 + v1 5. add-int v2, v0, v1 * return-void 7. return */ -TEST(ConstantFolding, ConstantCondition) { +TEST_F(ConstantFoldingTest, ConstantCondition) { const uint16_t data[] = THREE_REGISTERS_CODE_ITEM( Instruction::CONST_4 | 1 << 8 | 1 << 12, Instruction::CONST_4 | 0 << 8 | 0 << 12, @@ -732,4 +751,109 @@ TEST(ConstantFolding, ConstantCondition) { check_after_cf); } +/** + * Unsigned comparisons with zero. Since these instructions are not present + * in the bytecode, we need to set up the graph explicitly. + */ +TEST_F(ConstantFoldingTest, UnsignedComparisonsWithZero) { + graph_ = CreateGraph(&allocator_); + HBasicBlock* entry_block = new (&allocator_) HBasicBlock(graph_); + graph_->AddBlock(entry_block); + graph_->SetEntryBlock(entry_block); + HBasicBlock* block = new (&allocator_) HBasicBlock(graph_); + graph_->AddBlock(block); + HBasicBlock* exit_block = new (&allocator_) HBasicBlock(graph_); + graph_->AddBlock(exit_block); + graph_->SetExitBlock(exit_block); + entry_block->AddSuccessor(block); + block->AddSuccessor(exit_block); + + // Make various unsigned comparisons with zero against a parameter. + HInstruction* parameter = new (&allocator_) HParameterValue( + graph_->GetDexFile(), 0, 0, Primitive::kPrimInt, true); + entry_block->AddInstruction(parameter); + HInstruction* zero = graph_->GetIntConstant(0); + HInstruction* last; + block->AddInstruction(last = new (&allocator_) HAbove(zero, parameter)); + block->AddInstruction(new (&allocator_) HDeoptimize(last, 0)); + block->AddInstruction(last = new (&allocator_) HAbove(parameter, zero)); + block->AddInstruction(new (&allocator_) HDeoptimize(last, 0)); + block->AddInstruction(last = new (&allocator_) HAboveOrEqual(zero, parameter)); + block->AddInstruction(new (&allocator_) HDeoptimize(last, 0)); + block->AddInstruction(last = new (&allocator_) HAboveOrEqual(parameter, zero)); + block->AddInstruction(new (&allocator_) HDeoptimize(last, 0)); + block->AddInstruction(last = new (&allocator_) HBelow(zero, parameter)); + block->AddInstruction(new (&allocator_) HDeoptimize(last, 0)); + block->AddInstruction(last = new (&allocator_) HBelow(parameter, zero)); + block->AddInstruction(new (&allocator_) HDeoptimize(last, 0)); + block->AddInstruction(last = new (&allocator_) HBelowOrEqual(zero, parameter)); + block->AddInstruction(new (&allocator_) HDeoptimize(last, 0)); + block->AddInstruction(last = new (&allocator_) HBelowOrEqual(parameter, zero)); + block->AddInstruction(new (&allocator_) HDeoptimize(last, 0)); + + entry_block->AddInstruction(new (&allocator_) HGoto()); + block->AddInstruction(new (&allocator_) HReturn(zero)); + exit_block->AddInstruction(new (&allocator_) HExit()); + + const std::string expected_before = + "BasicBlock 0, succ: 1\n" + " 0: ParameterValue [16, 14, 12, 10, 8, 6, 4, 2]\n" + " 1: IntConstant [19, 16, 14, 12, 10, 8, 6, 4, 2]\n" + " 18: Goto 1\n" + "BasicBlock 1, pred: 0, succ: 2\n" + " 2: Above(1, 0) [3]\n" + " 3: Deoptimize(2)\n" + " 4: Above(0, 1) [5]\n" + " 5: Deoptimize(4)\n" + " 6: AboveOrEqual(1, 0) [7]\n" + " 7: Deoptimize(6)\n" + " 8: AboveOrEqual(0, 1) [9]\n" + " 9: Deoptimize(8)\n" + " 10: Below(1, 0) [11]\n" + " 11: Deoptimize(10)\n" + " 12: Below(0, 1) [13]\n" + " 13: Deoptimize(12)\n" + " 14: BelowOrEqual(1, 0) [15]\n" + " 15: Deoptimize(14)\n" + " 16: BelowOrEqual(0, 1) [17]\n" + " 17: Deoptimize(16)\n" + " 19: Return(1)\n" + "BasicBlock 2, pred: 1\n" + " 20: Exit\n"; + + const std::string expected_after_cf = + "BasicBlock 0, succ: 1\n" + " 0: ParameterValue [16, 10, 6, 4]\n" + " 1: IntConstant [13, 3, 19, 16, 10, 6, 4]\n" + " 21: IntConstant [15, 9]\n" + " 18: Goto 1\n" + "BasicBlock 1, pred: 0, succ: 2\n" + " 3: Deoptimize(1)\n" + " 4: Above(0, 1) [5]\n" + " 5: Deoptimize(4)\n" + " 6: AboveOrEqual(1, 0) [7]\n" + " 7: Deoptimize(6)\n" + " 9: Deoptimize(21)\n" + " 10: Below(1, 0) [11]\n" + " 11: Deoptimize(10)\n" + " 13: Deoptimize(1)\n" + " 15: Deoptimize(21)\n" + " 16: BelowOrEqual(0, 1) [17]\n" + " 17: Deoptimize(16)\n" + " 19: Return(1)\n" + "BasicBlock 2, pred: 1\n" + " 20: Exit\n"; + + const std::string expected_after_dce = expected_after_cf; + + auto check_after_cf = [](HGraph* graph) { + CHECK(graph != nullptr); + }; + + TestCodeOnReadyGraph(expected_before, + expected_after_cf, + expected_after_dce, + check_after_cf); +} + } // namespace art diff --git a/runtime/gc/accounting/space_bitmap-inl.h b/runtime/gc/accounting/space_bitmap-inl.h index 006d2c7d30..3be7181df2 100644 --- a/runtime/gc/accounting/space_bitmap-inl.h +++ b/runtime/gc/accounting/space_bitmap-inl.h @@ -46,7 +46,7 @@ inline bool SpaceBitmap<kAlignment>::AtomicTestAndSet(const mirror::Object* obj) DCHECK(Test(obj)); return true; } - } while (!atomic_entry->CompareExchangeWeakSequentiallyConsistent(old_word, old_word | mask)); + } while (!atomic_entry->CompareExchangeWeakRelaxed(old_word, old_word | mask)); DCHECK(Test(obj)); return false; } diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc index d7e8f81227..20e775c7aa 100644 --- a/runtime/gc/collector/concurrent_copying.cc +++ b/runtime/gc/collector/concurrent_copying.cc @@ -1664,7 +1664,7 @@ inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) // It was updated by the mutator. break; } - } while (!obj->CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier< + } while (!obj->CasFieldWeakRelaxedObjectWithoutWriteBarrier< false, false, kVerifyNone>(offset, expected_ref, new_ref)); } @@ -1689,7 +1689,7 @@ void ConcurrentCopying::VisitRoots( // It was updated by the mutator. break; } - } while (!addr->CompareExchangeWeakSequentiallyConsistent(expected_ref, new_ref)); + } while (!addr->CompareExchangeWeakRelaxed(expected_ref, new_ref)); } } @@ -1710,7 +1710,7 @@ void ConcurrentCopying::MarkRoot(mirror::CompressedReference<mirror::Object>* ro // It was updated by the mutator. break; } - } while (!addr->CompareExchangeWeakSequentiallyConsistent(expected_ref, new_ref)); + } while (!addr->CompareExchangeWeakRelaxed(expected_ref, new_ref)); } } diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h index 90180c545b..5c12091ecb 100644 --- a/runtime/mirror/object-inl.h +++ b/runtime/mirror/object-inl.h @@ -95,6 +95,12 @@ inline bool Object::CasLockWordWeakRelaxed(LockWord old_val, LockWord new_val) { OFFSET_OF_OBJECT_MEMBER(Object, monitor_), old_val.GetValue(), new_val.GetValue()); } +inline bool Object::CasLockWordWeakRelease(LockWord old_val, LockWord new_val) { + // Force use of non-transactional mode and do not check. + return CasFieldWeakRelease32<false, false>( + OFFSET_OF_OBJECT_MEMBER(Object, monitor_), old_val.GetValue(), new_val.GetValue()); +} + inline uint32_t Object::GetLockOwnerThreadId() { return Monitor::GetLockOwnerThreadId(this); } @@ -175,7 +181,10 @@ inline bool Object::AtomicSetReadBarrierPointer(Object* expected_rb_ptr, Object* static_cast<uint32_t>(reinterpret_cast<uintptr_t>(expected_rb_ptr))); new_lw = lw; new_lw.SetReadBarrierState(static_cast<uint32_t>(reinterpret_cast<uintptr_t>(rb_ptr))); - } while (!CasLockWordWeakSequentiallyConsistent(expected_lw, new_lw)); + // This CAS is a CAS release so that when GC updates all the fields of an object and then + // changes the object from gray to black, the field updates (stores) will be visible (won't be + // reordered after this CAS.) + } while (!CasLockWordWeakRelease(expected_lw, new_lw)); return true; #elif USE_BROOKS_READ_BARRIER DCHECK(kUseBrooksReadBarrier); @@ -671,6 +680,24 @@ inline bool Object::CasFieldWeakRelaxed32(MemberOffset field_offset, } template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags> +inline bool Object::CasFieldWeakRelease32(MemberOffset field_offset, + int32_t old_value, int32_t new_value) { + if (kCheckTransaction) { + DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction()); + } + if (kTransactionActive) { + Runtime::Current()->RecordWriteField32(this, field_offset, old_value, true); + } + if (kVerifyFlags & kVerifyThis) { + VerifyObject(this); + } + uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value(); + AtomicInteger* atomic_addr = reinterpret_cast<AtomicInteger*>(raw_addr); + + return atomic_addr->CompareExchangeWeakRelease(old_value, new_value); +} + +template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags> inline bool Object::CasFieldStrongSequentiallyConsistent32(MemberOffset field_offset, int32_t old_value, int32_t new_value) { if (kCheckTransaction) { @@ -944,6 +971,62 @@ inline bool Object::CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrie return success; } +template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags> +inline bool Object::CasFieldWeakRelaxedObjectWithoutWriteBarrier( + MemberOffset field_offset, Object* old_value, Object* new_value) { + if (kCheckTransaction) { + DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction()); + } + if (kVerifyFlags & kVerifyThis) { + VerifyObject(this); + } + if (kVerifyFlags & kVerifyWrites) { + VerifyObject(new_value); + } + if (kVerifyFlags & kVerifyReads) { + VerifyObject(old_value); + } + if (kTransactionActive) { + Runtime::Current()->RecordWriteFieldReference(this, field_offset, old_value, true); + } + HeapReference<Object> old_ref(HeapReference<Object>::FromMirrorPtr(old_value)); + HeapReference<Object> new_ref(HeapReference<Object>::FromMirrorPtr(new_value)); + uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value(); + Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr); + + bool success = atomic_addr->CompareExchangeWeakRelaxed(old_ref.reference_, + new_ref.reference_); + return success; +} + +template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags> +inline bool Object::CasFieldStrongRelaxedObjectWithoutWriteBarrier( + MemberOffset field_offset, Object* old_value, Object* new_value) { + if (kCheckTransaction) { + DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction()); + } + if (kVerifyFlags & kVerifyThis) { + VerifyObject(this); + } + if (kVerifyFlags & kVerifyWrites) { + VerifyObject(new_value); + } + if (kVerifyFlags & kVerifyReads) { + VerifyObject(old_value); + } + if (kTransactionActive) { + Runtime::Current()->RecordWriteFieldReference(this, field_offset, old_value, true); + } + HeapReference<Object> old_ref(HeapReference<Object>::FromMirrorPtr(old_value)); + HeapReference<Object> new_ref(HeapReference<Object>::FromMirrorPtr(new_value)); + uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value(); + Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr); + + bool success = atomic_addr->CompareExchangeStrongRelaxed(old_ref.reference_, + new_ref.reference_); + return success; +} + template<bool kIsStatic, typename Visitor> inline void Object::VisitFieldsReferences(uint32_t ref_offsets, const Visitor& visitor) { if (!kIsStatic && (ref_offsets != mirror::Class::kClassWalkSuper)) { diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h index f75b8aeef4..022f31dc53 100644 --- a/runtime/mirror/object.h +++ b/runtime/mirror/object.h @@ -135,6 +135,8 @@ class MANAGED LOCKABLE Object { SHARED_REQUIRES(Locks::mutator_lock_); bool CasLockWordWeakRelaxed(LockWord old_val, LockWord new_val) SHARED_REQUIRES(Locks::mutator_lock_); + bool CasLockWordWeakRelease(LockWord old_val, LockWord new_val) + SHARED_REQUIRES(Locks::mutator_lock_); uint32_t GetLockOwnerThreadId(); mirror::Object* MonitorEnter(Thread* self) @@ -276,7 +278,6 @@ class MANAGED LOCKABLE Object { Object* old_value, Object* new_value) SHARED_REQUIRES(Locks::mutator_lock_); - template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> bool CasFieldStrongSequentiallyConsistentObject(MemberOffset field_offset, Object* old_value, @@ -288,6 +289,18 @@ class MANAGED LOCKABLE Object { Object* old_value, Object* new_value) SHARED_REQUIRES(Locks::mutator_lock_); + template<bool kTransactionActive, bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + bool CasFieldWeakRelaxedObjectWithoutWriteBarrier(MemberOffset field_offset, + Object* old_value, + Object* new_value) + SHARED_REQUIRES(Locks::mutator_lock_); + template<bool kTransactionActive, bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + bool CasFieldStrongRelaxedObjectWithoutWriteBarrier(MemberOffset field_offset, + Object* old_value, + Object* new_value) + SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> HeapReference<Object>* GetFieldObjectReferenceAddr(MemberOffset field_offset); @@ -396,6 +409,12 @@ class MANAGED LOCKABLE Object { template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + bool CasFieldWeakRelease32(MemberOffset field_offset, int32_t old_value, + int32_t new_value) ALWAYS_INLINE + SHARED_REQUIRES(Locks::mutator_lock_); + + template<bool kTransactionActive, bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> bool CasFieldStrongSequentiallyConsistent32(MemberOffset field_offset, int32_t old_value, int32_t new_value) ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_); diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h index 85ac4aab96..4998a6a478 100644 --- a/runtime/read_barrier-inl.h +++ b/runtime/read_barrier-inl.h @@ -63,7 +63,7 @@ inline MirrorType* ReadBarrier::Barrier( ref = reinterpret_cast<MirrorType*>(Mark(old_ref)); // Update the field atomically. This may fail if mutator updates before us, but it's ok. if (ref != old_ref) { - obj->CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrier<false, false>( + obj->CasFieldStrongRelaxedObjectWithoutWriteBarrier<false, false>( offset, old_ref, ref); } } @@ -101,7 +101,7 @@ inline MirrorType* ReadBarrier::BarrierForRoot(MirrorType** root, // Update the field atomically. This may fail if mutator updates before us, but it's ok. if (ref != old_ref) { Atomic<mirror::Object*>* atomic_root = reinterpret_cast<Atomic<mirror::Object*>*>(root); - atomic_root->CompareExchangeStrongSequentiallyConsistent(old_ref, ref); + atomic_root->CompareExchangeStrongRelaxed(old_ref, ref); } } AssertToSpaceInvariant(gc_root_source, ref); @@ -140,7 +140,7 @@ inline MirrorType* ReadBarrier::BarrierForRoot(mirror::CompressedReference<Mirro if (new_ref.AsMirrorPtr() != old_ref.AsMirrorPtr()) { auto* atomic_root = reinterpret_cast<Atomic<mirror::CompressedReference<MirrorType>>*>(root); - atomic_root->CompareExchangeStrongSequentiallyConsistent(old_ref, new_ref); + atomic_root->CompareExchangeStrongRelaxed(old_ref, new_ref); } } AssertToSpaceInvariant(gc_root_source, ref); diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc index ed859cfbb0..dcf9601b4b 100644 --- a/runtime/thread_list.cc +++ b/runtime/thread_list.cc @@ -60,8 +60,11 @@ static constexpr useconds_t kThreadSuspendMaxYieldUs = 3000; static constexpr useconds_t kThreadSuspendMaxSleepUs = 5000; ThreadList::ThreadList() - : suspend_all_count_(0), debug_suspend_all_count_(0), unregistering_count_(0), - suspend_all_historam_("suspend all histogram", 16, 64), long_suspend_(false) { + : suspend_all_count_(0), + debug_suspend_all_count_(0), + unregistering_count_(0), + suspend_all_historam_("suspend all histogram", 16, 64), + long_suspend_(false) { CHECK(Monitor::IsValidLockWord(LockWord::FromThinLockId(kMaxThreadId, 1, 0U))); } @@ -381,7 +384,8 @@ size_t ThreadList::RunCheckpointOnRunnableThreads(Closure* checkpoint_function) // from-space to to-space refs. Used to synchronize threads at a point // to mark the initiation of marking while maintaining the to-space // invariant. -size_t ThreadList::FlipThreadRoots(Closure* thread_flip_visitor, Closure* flip_callback, +size_t ThreadList::FlipThreadRoots(Closure* thread_flip_visitor, + Closure* flip_callback, gc::collector::GarbageCollector* collector) { TimingLogger::ScopedTiming split("ThreadListFlip", collector->GetTimings()); const uint64_t start_time = NanoTime(); @@ -509,7 +513,9 @@ void ThreadList::SuspendAll(const char* cause, bool long_suspend) { // Debugger thread might be set to kRunnable for a short period of time after the // SuspendAllInternal. This is safe because it will be set back to suspended state before // the SuspendAll returns. -void ThreadList::SuspendAllInternal(Thread* self, Thread* ignore1, Thread* ignore2, +void ThreadList::SuspendAllInternal(Thread* self, + Thread* ignore1, + Thread* ignore2, bool debug_suspend) { Locks::mutator_lock_->AssertNotExclusiveHeld(self); Locks::thread_list_lock_->AssertNotHeld(self); @@ -698,12 +704,14 @@ void ThreadList::Resume(Thread* thread, bool for_debugger) { VLOG(threads) << "Resume(" << reinterpret_cast<void*>(thread) << ") complete"; } -static void ThreadSuspendByPeerWarning(Thread* self, LogSeverity severity, const char* message, +static void ThreadSuspendByPeerWarning(Thread* self, + LogSeverity severity, + const char* message, jobject peer) { JNIEnvExt* env = self->GetJniEnv(); ScopedLocalRef<jstring> - scoped_name_string(env, (jstring)env->GetObjectField( - peer, WellKnownClasses::java_lang_Thread_name)); + scoped_name_string(env, static_cast<jstring>(env->GetObjectField( + peer, WellKnownClasses::java_lang_Thread_name))); ScopedUtfChars scoped_name_chars(env, scoped_name_string.get()); if (scoped_name_chars.c_str() == nullptr) { LOG(severity) << message << ": " << peer; @@ -713,8 +721,10 @@ static void ThreadSuspendByPeerWarning(Thread* self, LogSeverity severity, const } } -Thread* ThreadList::SuspendThreadByPeer(jobject peer, bool request_suspension, - bool debug_suspension, bool* timed_out) { +Thread* ThreadList::SuspendThreadByPeer(jobject peer, + bool request_suspension, + bool debug_suspension, + bool* timed_out) { const uint64_t start_time = NanoTime(); useconds_t sleep_us = kThreadSuspendInitialSleepUs; *timed_out = false; @@ -811,12 +821,14 @@ Thread* ThreadList::SuspendThreadByPeer(jobject peer, bool request_suspension, } } -static void ThreadSuspendByThreadIdWarning(LogSeverity severity, const char* message, +static void ThreadSuspendByThreadIdWarning(LogSeverity severity, + const char* message, uint32_t thread_id) { LOG(severity) << StringPrintf("%s: %d", message, thread_id); } -Thread* ThreadList::SuspendThreadByThreadId(uint32_t thread_id, bool debug_suspension, +Thread* ThreadList::SuspendThreadByThreadId(uint32_t thread_id, + bool debug_suspension, bool* timed_out) { const uint64_t start_time = NanoTime(); useconds_t sleep_us = kThreadSuspendInitialSleepUs; diff --git a/runtime/thread_list.h b/runtime/thread_list.h index f0933f7786..07ea10dbea 100644 --- a/runtime/thread_list.h +++ b/runtime/thread_list.h @@ -55,8 +55,8 @@ class ThreadList { // Thread suspension support. void ResumeAll() - UNLOCK_FUNCTION(Locks::mutator_lock_) - REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_) + UNLOCK_FUNCTION(Locks::mutator_lock_); void Resume(Thread* thread, bool for_debugger = false) REQUIRES(!Locks::thread_suspend_count_lock_); @@ -76,7 +76,8 @@ class ThreadList { // is set to true. Thread* SuspendThreadByPeer(jobject peer, bool request_suspension, bool debug_suspension, bool* timed_out) - REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, + REQUIRES(!Locks::mutator_lock_, + !Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); // Suspend a thread using its thread id, typically used by lock/monitor inflation. Returns the @@ -84,7 +85,8 @@ class ThreadList { // the thread terminating. Note that as thread ids are recycled this may not suspend the expected // thread, that may be terminating. If the suspension times out then *timeout is set to true. Thread* SuspendThreadByThreadId(uint32_t thread_id, bool debug_suspension, bool* timed_out) - REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, + REQUIRES(!Locks::mutator_lock_, + !Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); // Find an already suspended thread (or self) by its id. @@ -92,7 +94,7 @@ class ThreadList { // Run a checkpoint on threads, running threads are not suspended but run the checkpoint inside // of the suspend check. Returns how many checkpoints that are expected to run, including for - // already suspended threads. + // already suspended threads for b/24191051. size_t RunCheckpoint(Closure* checkpoint_function) REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); @@ -101,14 +103,17 @@ class ThreadList { // Flip thread roots from from-space refs to to-space refs. Used by // the concurrent copying collector. - size_t FlipThreadRoots(Closure* thread_flip_visitor, Closure* flip_callback, + size_t FlipThreadRoots(Closure* thread_flip_visitor, + Closure* flip_callback, gc::collector::GarbageCollector* collector) - REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, + REQUIRES(!Locks::mutator_lock_, + !Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); // Suspends all threads void SuspendAllForDebugger() - REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, + REQUIRES(!Locks::mutator_lock_, + !Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); void SuspendSelfForDebugger() @@ -127,10 +132,14 @@ class ThreadList { // Add/remove current thread from list. void Register(Thread* self) - REQUIRES(Locks::runtime_shutdown_lock_, !Locks::mutator_lock_, !Locks::thread_list_lock_, + REQUIRES(Locks::runtime_shutdown_lock_) + REQUIRES(!Locks::mutator_lock_, + !Locks::thread_list_lock_, + !Locks::thread_suspend_count_lock_); + void Unregister(Thread* self) + REQUIRES(!Locks::mutator_lock_, + !Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); - void Unregister(Thread* self) REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, - !Locks::thread_suspend_count_lock_); void VisitRoots(RootVisitor* visitor) const SHARED_REQUIRES(Locks::mutator_lock_); @@ -160,7 +169,9 @@ class ThreadList { void WaitForOtherNonDaemonThreadsToExit() REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); - void SuspendAllInternal(Thread* self, Thread* ignore1, Thread* ignore2 = nullptr, + void SuspendAllInternal(Thread* self, + Thread* ignore1, + Thread* ignore2 = nullptr, bool debug_suspend = false) REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); @@ -201,8 +212,8 @@ class ScopedSuspendAll : public ValueObject { !Locks::mutator_lock_); // No REQUIRES(mutator_lock_) since the unlock function already asserts this. ~ScopedSuspendAll() - UNLOCK_FUNCTION(Locks::mutator_lock_) - REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_) + UNLOCK_FUNCTION(Locks::mutator_lock_); }; } // namespace art |