Make .bss stores atomic release operations.
And rely on architecture-dependent behavior for the .bss
entry loads.
This fixes theoretical races when one thread updates the
.bss entry and another uses it immediately thereafter;
previously we did not ensure correct memory visibility.
Test: m test-art-host-gtest
Test: testrunner.py --host --optimizing
Test: aosp_taimen-userdebug boots.
Test: run-gtests.sh
Test: testrunner.py --target --optimizing
Change-Id: Ie7b7969eb355025b9c9205f8c936e702861943f4
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index d71b694..112d710 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -4874,6 +4874,7 @@
temp.AsRegister<Register>());
__ movl(temp.AsRegister<Register>(), Address(base_reg, kDummy32BitOffset));
RecordMethodBssEntryPatch(invoke);
+ // No need for memory fence, thanks to the x86 memory model.
break;
}
case HInvokeStaticOrDirect::MethodLoadKind::kJitDirectAddress:
@@ -6617,6 +6618,7 @@
Address address(method_address, CodeGeneratorX86::kDummy32BitOffset);
Label* fixup_label = codegen_->NewTypeBssEntryPatch(cls);
GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, read_barrier_option);
+ // No need for memory fence, thanks to the x86 memory model.
generate_null_check = true;
break;
}
@@ -6814,6 +6816,7 @@
Label* fixup_label = codegen_->NewStringBssEntryPatch(load);
// /* GcRoot<mirror::String> */ out = *address /* PC-relative */
GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption);
+ // No need for memory fence, thanks to the x86 memory model.
SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) LoadStringSlowPathX86(load);
codegen_->AddSlowPath(slow_path);
__ testl(out, out);