diff options
author | 2022-05-29 14:17:29 +0000 | |
---|---|---|
committer | 2022-08-10 18:06:05 +0000 | |
commit | 65429ce9bcaf8779f5d6cd14aeee4acd60006095 (patch) | |
tree | 1f2f185a17d43dd9a5aecdeaad09d4706c37cbc8 | |
parent | ca5ed9f281a5758814d2495da80178de56945720 (diff) |
Fix gtest failures due to userfaultfd GC
Mostly involved replacing compile-test check for read-barrier with
runtime one.
Bug: 160737021
Test: art/test/testrunner/run_build_test_target.py -j <core-count> art-test
Change-Id: I685e4b717d314a19f7714ece5c8b84f23154cc66
-rw-r--r-- | compiler/jni/jni_cfi_test.cc | 21 | ||||
-rw-r--r-- | compiler/optimizing/optimizing_cfi_test.cc | 17 | ||||
-rw-r--r-- | runtime/common_runtime_test.cc | 3 | ||||
-rw-r--r-- | runtime/gc/system_weak_test.cc | 25 |
4 files changed, 50 insertions, 16 deletions
diff --git a/compiler/jni/jni_cfi_test.cc b/compiler/jni/jni_cfi_test.cc index 9e3bb86fb1..368b87c9cd 100644 --- a/compiler/jni/jni_cfi_test.cc +++ b/compiler/jni/jni_cfi_test.cc @@ -124,22 +124,31 @@ class JNICFITest : public CFITest { TestImpl(InstructionSet::isa, #isa, expected_asm, expected_cfi); \ } +// We can't use compile-time macros for read-barrier as the introduction +// of userfaultfd-GC has made it a runtime choice. +#define TEST_ISA_ONLY_CC(isa) \ + TEST_F(JNICFITest, isa) { \ + if (kUseBakerReadBarrier && gUseReadBarrier) { \ + std::vector<uint8_t> expected_asm(expected_asm_##isa, \ + expected_asm_##isa + arraysize(expected_asm_##isa)); \ + std::vector<uint8_t> expected_cfi(expected_cfi_##isa, \ + expected_cfi_##isa + arraysize(expected_cfi_##isa)); \ + TestImpl(InstructionSet::isa, #isa, expected_asm, expected_cfi); \ + } \ + } + #ifdef ART_ENABLE_CODEGEN_arm // Run the tests for ARM only with Baker read barriers, as the // expected generated code contains a Marking Register refresh // instruction. -#if defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER) -TEST_ISA(kThumb2) -#endif +TEST_ISA_ONLY_CC(kThumb2) #endif #ifdef ART_ENABLE_CODEGEN_arm64 // Run the tests for ARM64 only with Baker read barriers, as the // expected generated code contains a Marking Register refresh // instruction. -#if defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER) -TEST_ISA(kArm64) -#endif +TEST_ISA_ONLY_CC(kArm64) #endif #ifdef ART_ENABLE_CODEGEN_x86 diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc index bad540e03c..73e1fbea55 100644 --- a/compiler/optimizing/optimizing_cfi_test.cc +++ b/compiler/optimizing/optimizing_cfi_test.cc @@ -167,9 +167,20 @@ TEST_ISA(kThumb2) // barrier configuration, and as such is removed from the set of // callee-save registers in the ARM64 code generator of the Optimizing // compiler. -#if defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER) -TEST_ISA(kArm64) -#endif +// +// We can't use compile-time macros for read-barrier as the introduction +// of userfaultfd-GC has made it a runtime choice. +TEST_F(OptimizingCFITest, kArm64) { + if (kUseBakerReadBarrier && gUseReadBarrier) { + std::vector<uint8_t> expected_asm( + expected_asm_kArm64, + expected_asm_kArm64 + arraysize(expected_asm_kArm64)); + std::vector<uint8_t> expected_cfi( + expected_cfi_kArm64, + expected_cfi_kArm64 + arraysize(expected_cfi_kArm64)); + TestImpl(InstructionSet::kArm64, "kArm64", expected_asm, expected_cfi); + } +} #endif #ifdef ART_ENABLE_CODEGEN_x86 diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc index a48d860f0a..cd3968610b 100644 --- a/runtime/common_runtime_test.cc +++ b/runtime/common_runtime_test.cc @@ -166,9 +166,6 @@ void CommonRuntimeTestImpl::FinalizeSetup() { WellKnownClasses::Init(Thread::Current()->GetJniEnv()); InitializeIntrinsics(); - // Create the heap thread pool so that the GC runs in parallel for tests. Normally, the thread - // pool is created by the runtime. - runtime_->GetHeap()->CreateThreadPool(); runtime_->GetHeap()->VerifyHeap(); // Check for heap corruption before the test // Reduce timinig-dependent flakiness in OOME behavior (eg StubTest.AllocObject). runtime_->GetHeap()->SetMinIntervalHomogeneousSpaceCompactionByOom(0U); diff --git a/runtime/gc/system_weak_test.cc b/runtime/gc/system_weak_test.cc index ca112972c2..4f552a6203 100644 --- a/runtime/gc/system_weak_test.cc +++ b/runtime/gc/system_weak_test.cc @@ -111,6 +111,7 @@ static bool CollectorDoesAllowOrBroadcast() { CollectorType type = Runtime::Current()->GetHeap()->CurrentCollectorType(); switch (type) { case CollectorType::kCollectorTypeCMS: + case CollectorType::kCollectorTypeCMC: case CollectorType::kCollectorTypeCC: case CollectorType::kCollectorTypeSS: return true; @@ -124,6 +125,7 @@ static bool CollectorDoesDisallow() { CollectorType type = Runtime::Current()->GetHeap()->CurrentCollectorType(); switch (type) { case CollectorType::kCollectorTypeCMS: + case CollectorType::kCollectorTypeCMC: return true; default: @@ -149,7 +151,12 @@ TEST_F(SystemWeakTest, Keep) { // Expect the holder to have been called. EXPECT_EQ(CollectorDoesAllowOrBroadcast() ? 1U : 0U, cswh.allow_count_); EXPECT_EQ(CollectorDoesDisallow() ? 1U : 0U, cswh.disallow_count_); - EXPECT_EQ(1U, cswh.sweep_count_); + // Userfaultfd GC uses SweepSystemWeaks also for concurrent updation. + // TODO: Explore this can be reverted back to unconditionally compare with 1 + // once concurrent updation of native roots is full implemented in userfaultfd + // GC. + size_t expected_sweep_count = gUseUserfaultfd ? 2U : 1U; + EXPECT_EQ(expected_sweep_count, cswh.sweep_count_); // Expect the weak to not be cleared. EXPECT_FALSE(cswh.Get().IsNull()); @@ -170,7 +177,12 @@ TEST_F(SystemWeakTest, Discard) { // Expect the holder to have been called. EXPECT_EQ(CollectorDoesAllowOrBroadcast() ? 1U : 0U, cswh.allow_count_); EXPECT_EQ(CollectorDoesDisallow() ? 1U : 0U, cswh.disallow_count_); - EXPECT_EQ(1U, cswh.sweep_count_); + // Userfaultfd GC uses SweepSystemWeaks also for concurrent updation. + // TODO: Explore this can be reverted back to unconditionally compare with 1 + // once concurrent updation of native roots is full implemented in userfaultfd + // GC. + size_t expected_sweep_count = gUseUserfaultfd ? 2U : 1U; + EXPECT_EQ(expected_sweep_count, cswh.sweep_count_); // Expect the weak to be cleared. EXPECT_TRUE(cswh.Get().IsNull()); @@ -194,7 +206,12 @@ TEST_F(SystemWeakTest, Remove) { // Expect the holder to have been called. ASSERT_EQ(CollectorDoesAllowOrBroadcast() ? 1U : 0U, cswh.allow_count_); ASSERT_EQ(CollectorDoesDisallow() ? 1U : 0U, cswh.disallow_count_); - ASSERT_EQ(1U, cswh.sweep_count_); + // Userfaultfd GC uses SweepSystemWeaks also for concurrent updation. + // TODO: Explore this can be reverted back to unconditionally compare with 1 + // once concurrent updation of native roots is full implemented in userfaultfd + // GC. + size_t expected_sweep_count = gUseUserfaultfd ? 2U : 1U; + EXPECT_EQ(expected_sweep_count, cswh.sweep_count_); // Expect the weak to not be cleared. ASSERT_FALSE(cswh.Get().IsNull()); @@ -209,7 +226,7 @@ TEST_F(SystemWeakTest, Remove) { // Expectation: no change in the numbers. EXPECT_EQ(CollectorDoesAllowOrBroadcast() ? 1U : 0U, cswh.allow_count_); EXPECT_EQ(CollectorDoesDisallow() ? 1U : 0U, cswh.disallow_count_); - EXPECT_EQ(1U, cswh.sweep_count_); + EXPECT_EQ(expected_sweep_count, cswh.sweep_count_); } } // namespace gc |