summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Nicolas Geoffray <ngeoffray@google.com> 2022-03-22 15:44:57 +0000
committer Nicolas Geoffray <ngeoffray@google.com> 2022-07-08 10:00:33 +0000
commitc37e3a0a532fb89b62753d0478c1ba3c9fc87bb3 (patch)
tree93d519edbe9d5cf5a42e2ca9de15e5dee4c283f5
parentd88c1499efe2f718f3cc1f45a3dc178471b22ce6 (diff)
Add clinit checks at entry for some boot image methods.
Look at the list of preloaded classes to know whether the class will be initialized. If it's not in the list, add explicit clinit checks at entry. Update FixupStaticTrampolines to only update the entrypoint if it is the resolution stub. This adds two pages to current on-device boot classpath oat files. Test: imgdiag Bug: 162110941 Change-Id: Ic7b0b01a772444bc615b62cdb9305a1ef555c780
-rw-r--r--compiler/driver/compiler_options.cc26
-rw-r--r--compiler/driver/compiler_options.h15
-rw-r--r--compiler/optimizing/code_generator.h9
-rw-r--r--compiler/optimizing/code_generator_arm64.cc45
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc54
-rw-r--r--compiler/optimizing/code_generator_x86.cc45
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc45
-rw-r--r--dex2oat/dex2oat.cc7
-rw-r--r--dex2oat/driver/compiler_driver.cc16
-rw-r--r--dex2oat/linker/image_writer.cc2
-rw-r--r--libdexfile/dex/modifiers.h3
-rw-r--r--runtime/class_linker.cc5
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc5
-rw-r--r--runtime/mirror/class.h12
-rw-r--r--runtime/thread.h7
15 files changed, 267 insertions, 29 deletions
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index 51cd999b6d..b55e32b59d 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -23,6 +23,7 @@
#include "arch/instruction_set.h"
#include "arch/instruction_set_features.h"
+#include "art_method-inl.h"
#include "base/runtime_debug.h"
#include "base/string_view_cpp20.h"
#include "base/variant_map.h"
@@ -146,14 +147,37 @@ bool CompilerOptions::ParseCompilerOptions(const std::vector<std::string>& optio
bool CompilerOptions::IsImageClass(const char* descriptor) const {
// Historical note: We used to hold the set indirectly and there was a distinction between an
- // empty set and a null, null meaning to include all classes. However, the distiction has been
+ // empty set and a null, null meaning to include all classes. However, the distinction has been
// removed; if we don't have a profile, we treat it as an empty set of classes. b/77340429
return image_classes_.find(std::string_view(descriptor)) != image_classes_.end();
}
+bool CompilerOptions::IsPreloadedClass(const char* pretty_descriptor) const {
+ return preloaded_classes_.find(std::string_view(pretty_descriptor)) != preloaded_classes_.end();
+}
+
const VerificationResults* CompilerOptions::GetVerificationResults() const {
DCHECK(Runtime::Current()->IsAotCompiler());
return verification_results_;
}
+bool CompilerOptions::ShouldCompileWithClinitCheck(ArtMethod* method) const {
+ if (method->IsStatic() &&
+ !method->IsConstructor() &&
+ // Compiled code for native methods never do a clinit check, so we may put the resolution
+ // trampoline for native methods. This means that it's possible post zygote fork for the
+ // entry to be dirtied. We could resolve this by either:
+ // - Make these methods use the generic JNI entrypoint, but that's not
+ // desirable for a method that is in the profile.
+ // - Ensure the declaring class of such native methods are always in the
+ // preloaded-classes list.
+ // - Emit the clinit check in the compiled code of native methods.
+ !method->IsNative()) {
+ ScopedObjectAccess soa(Thread::Current());
+ ObjPtr<mirror::Class> cls = method->GetDeclaringClass<kWithoutReadBarrier>();
+ return cls->IsInBootImageAndNotInPreloadedClasses();
+ }
+ return false;
+}
+
} // namespace art
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 1bffdb11ed..20f54bdecd 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -44,6 +44,7 @@ namespace linker {
class Arm64RelativePatcherTest;
} // namespace linker
+class ArtMethod;
class DexFile;
enum class InstructionSet;
class InstructionSetFeatures;
@@ -300,6 +301,10 @@ class CompilerOptions final {
bool IsImageClass(const char* descriptor) const;
+ // Returns whether the given `pretty_descriptor` is in the list of preloaded
+ // classes. `pretty_descriptor` should be the result of calling `PrettyDescriptor`.
+ bool IsPreloadedClass(const char* pretty_descriptor) const;
+
const VerificationResults* GetVerificationResults() const;
bool ParseCompilerOptions(const std::vector<std::string>& options,
@@ -383,6 +388,12 @@ class CompilerOptions final {
return ContainsElement(GetDexFilesForOatFile(), dex_file);
}
+ // If this is a static non-constructor method in the boot classpath, and its class isn't
+ // initialized at compile-time, or won't be initialized by the zygote, add
+ // initialization checks at entry. This will avoid the need of trampolines
+ // which at runtime we will need to dirty after initialization.
+ bool ShouldCompileWithClinitCheck(ArtMethod* method) const;
+
private:
bool ParseDumpInitFailures(const std::string& option, std::string* error_msg);
bool ParseRegisterAllocationStrategy(const std::string& option, std::string* error_msg);
@@ -408,6 +419,10 @@ class CompilerOptions final {
// Must not be empty for real boot image, only for tests pretending to compile boot image.
HashSet<std::string> image_classes_;
+ // Classes listed in the preloaded-classes file, used for boot image and
+ // boot image extension compilation.
+ HashSet<std::string> preloaded_classes_;
+
// Results of AOT verification.
const VerificationResults* verification_results_;
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index b09219a2ed..43cb9862c2 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -37,6 +37,7 @@
#include "optimizing_compiler_stats.h"
#include "read_barrier_option.h"
#include "stack.h"
+#include "subtype_check.h"
#include "utils/assembler.h"
#include "utils/label.h"
@@ -60,6 +61,14 @@ static int64_t constexpr kPrimLongMax = INT64_C(0x7fffffffffffffff);
static constexpr ReadBarrierOption kCompilerReadBarrierOption =
kEmitCompilerReadBarrier ? kWithReadBarrier : kWithoutReadBarrier;
+constexpr size_t status_lsb_position = SubtypeCheckBits::BitStructSizeOf();
+constexpr size_t status_byte_offset =
+ mirror::Class::StatusOffset().SizeValue() + (status_lsb_position / kBitsPerByte);
+constexpr uint32_t shifted_visibly_initialized_value =
+ enum_cast<uint32_t>(ClassStatus::kVisiblyInitialized) << (status_lsb_position % kBitsPerByte);
+constexpr uint32_t shifted_initializing_value =
+ enum_cast<uint32_t>(ClassStatus::kInitializing) << (status_lsb_position % kBitsPerByte);
+
class Assembler;
class CodeGenerator;
class CompilerOptions;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 2f8c0b22e7..d859ac1a29 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1233,6 +1233,46 @@ void CodeGeneratorARM64::MaybeIncrementHotness(bool is_frame_entry) {
void CodeGeneratorARM64::GenerateFrameEntry() {
MacroAssembler* masm = GetVIXLAssembler();
+
+ // Check if we need to generate the clinit check. We will jump to the
+ // resolution stub if the class is not initialized and the executing thread is
+ // not the thread initializing it.
+ // We do this before constructing the frame to get the correct stack trace if
+ // an exception is thrown.
+ if (GetGraph()->GetArtMethod() != nullptr &&
+ GetCompilerOptions().ShouldCompileWithClinitCheck(GetGraph()->GetArtMethod())) {
+ UseScratchRegisterScope temps(masm);
+ vixl::aarch64::Label resolution;
+
+ Register temp1 = temps.AcquireW();
+ Register temp2 = temps.AcquireW();
+
+ // Check if we're visibly initialized.
+
+ // We don't emit a read barrier here to save on code size. We rely on the
+ // resolution trampoline to do a suspend check before re-entering this code.
+ __ Ldr(temp1, MemOperand(kArtMethodRegister, ArtMethod::DeclaringClassOffset().Int32Value()));
+ __ Ldrb(temp2, HeapOperand(temp1, status_byte_offset));
+ __ Cmp(temp2, shifted_visibly_initialized_value);
+ __ B(hs, &frame_entry_label_);
+
+ // Check if we're initializing and the thread initializing is the one
+ // executing the code.
+ __ Cmp(temp2, shifted_initializing_value);
+ __ B(lo, &resolution);
+
+ __ Ldr(temp1, HeapOperand(temp1, mirror::Class::ClinitThreadIdOffset().Int32Value()));
+ __ Ldr(temp2, MemOperand(tr, Thread::TidOffset<kArmPointerSize>().Int32Value()));
+ __ Cmp(temp1, temp2);
+ __ B(eq, &frame_entry_label_);
+ __ Bind(&resolution);
+
+ // Jump to the resolution stub.
+ ThreadOffset64 entrypoint_offset =
+ GetThreadOffset<kArm64PointerSize>(kQuickQuickResolutionTrampoline);
+ __ Ldr(temp1.X(), MemOperand(tr, entrypoint_offset.Int32Value()));
+ __ Br(temp1.X());
+ }
__ Bind(&frame_entry_label_);
bool do_overflow_check =
@@ -1904,11 +1944,6 @@ void InstructionCodeGeneratorARM64::GenerateClassInitializationCheck(SlowPathCod
Register class_reg) {
UseScratchRegisterScope temps(GetVIXLAssembler());
Register temp = temps.AcquireW();
- constexpr size_t status_lsb_position = SubtypeCheckBits::BitStructSizeOf();
- const size_t status_byte_offset =
- mirror::Class::StatusOffset().SizeValue() + (status_lsb_position / kBitsPerByte);
- constexpr uint32_t shifted_visibly_initialized_value =
- enum_cast<uint32_t>(ClassStatus::kVisiblyInitialized) << (status_lsb_position % kBitsPerByte);
// CMP (immediate) is limited to imm12 or imm12<<12, so we would need to materialize
// the constant 0xf0000000 for comparison with the full 32-bit field. To reduce the code
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 09fa598203..7e3ad349ad 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -2234,6 +2234,53 @@ void CodeGeneratorARMVIXL::GenerateFrameEntry() {
bool skip_overflow_check =
IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kArm);
DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
+
+ // Check if we need to generate the clinit check. We will jump to the
+ // resolution stub if the class is not initialized and the executing thread is
+ // not the thread initializing it.
+ // We do this before constructing the frame to get the correct stack trace if
+ // an exception is thrown.
+ if (GetGraph()->GetArtMethod() != nullptr &&
+ GetCompilerOptions().ShouldCompileWithClinitCheck(GetGraph()->GetArtMethod())) {
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ vixl32::Label resolution;
+
+ // Check if we're visibly initialized.
+
+ vixl32::Register temp1 = temps.Acquire();
+ // Use r4 as other temporary register.
+ DCHECK(!blocked_core_registers_[R4]);
+ DCHECK(!kCoreCalleeSaves.Includes(r4));
+ vixl32::Register temp2 = r4;
+ for (vixl32::Register reg : kParameterCoreRegistersVIXL) {
+ DCHECK(!reg.Is(r4));
+ }
+
+ // We don't emit a read barrier here to save on code size. We rely on the
+ // resolution trampoline to do a suspend check before re-entering this code.
+ __ Ldr(temp1, MemOperand(kMethodRegister, ArtMethod::DeclaringClassOffset().Int32Value()));
+ __ Ldrb(temp2, MemOperand(temp1, status_byte_offset));
+ __ Cmp(temp2, shifted_visibly_initialized_value);
+ __ B(cs, &frame_entry_label_);
+
+ // Check if we're initializing and the thread initializing is the one
+ // executing the code.
+ __ Cmp(temp2, shifted_initializing_value);
+ __ B(lo, &resolution);
+
+ __ Ldr(temp1, MemOperand(temp1, mirror::Class::ClinitThreadIdOffset().Int32Value()));
+ __ Ldr(temp2, MemOperand(tr, Thread::TidOffset<kArmPointerSize>().Int32Value()));
+ __ Cmp(temp1, temp2);
+ __ B(eq, &frame_entry_label_);
+ __ Bind(&resolution);
+
+ // Jump to the resolution stub.
+ ThreadOffset32 entrypoint_offset =
+ GetThreadOffset<kArmPointerSize>(kQuickQuickResolutionTrampoline);
+ __ Ldr(temp1, MemOperand(tr, entrypoint_offset.Int32Value()));
+ __ Bx(temp1);
+ }
+
__ Bind(&frame_entry_label_);
if (HasEmptyFrame()) {
@@ -7622,12 +7669,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateClassInitializationCheck(
LoadClassSlowPathARMVIXL* slow_path, vixl32::Register class_reg) {
UseScratchRegisterScope temps(GetVIXLAssembler());
vixl32::Register temp = temps.Acquire();
- constexpr size_t status_lsb_position = SubtypeCheckBits::BitStructSizeOf();
- constexpr uint32_t shifted_visibly_initialized_value =
- enum_cast<uint32_t>(ClassStatus::kVisiblyInitialized) << status_lsb_position;
-
- const size_t status_offset = mirror::Class::StatusOffset().SizeValue();
- GetAssembler()->LoadFromOffset(kLoadWord, temp, class_reg, status_offset);
+ __ Ldrb(temp, MemOperand(class_reg, status_byte_offset));
__ Cmp(temp, shifted_visibly_initialized_value);
__ B(lo, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 8c6b8027cd..57b2cacd19 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1261,6 +1261,45 @@ void CodeGeneratorX86::MaybeIncrementHotness(bool is_frame_entry) {
void CodeGeneratorX86::GenerateFrameEntry() {
__ cfi().SetCurrentCFAOffset(kX86WordSize); // return address
+
+ // Check if we need to generate the clinit check. We will jump to the
+ // resolution stub if the class is not initialized and the executing thread is
+ // not the thread initializing it.
+ // We do this before constructing the frame to get the correct stack trace if
+ // an exception is thrown.
+ if (GetGraph()->GetArtMethod() != nullptr &&
+ GetCompilerOptions().ShouldCompileWithClinitCheck(GetGraph()->GetArtMethod())) {
+ NearLabel continue_execution, resolution;
+ // We'll use EBP as temporary.
+ __ pushl(EBP);
+ // Check if we're visibly initialized.
+
+ // We don't emit a read barrier here to save on code size. We rely on the
+ // resolution trampoline to do a suspend check before re-entering this code.
+ __ movl(EBP, Address(kMethodRegisterArgument, ArtMethod::DeclaringClassOffset().Int32Value()));
+ __ cmpb(Address(EBP, status_byte_offset), Immediate(shifted_visibly_initialized_value));
+ __ j(kAboveEqual, &continue_execution);
+
+ // Check if we're initializing and the thread initializing is the one
+ // executing the code.
+ __ cmpb(Address(EBP, status_byte_offset), Immediate(shifted_initializing_value));
+ __ j(kBelow, &resolution);
+
+ __ movl(EBP, Address(EBP, mirror::Class::ClinitThreadIdOffset().Int32Value()));
+ __ fs()->cmpl(EBP, Address::Absolute(Thread::TidOffset<kX86PointerSize>().Int32Value()));
+ __ j(kEqual, &continue_execution);
+ __ Bind(&resolution);
+
+ __ popl(EBP);
+ // Jump to the resolution stub.
+ ThreadOffset32 entrypoint_offset =
+ GetThreadOffset<kX86PointerSize>(kQuickQuickResolutionTrampoline);
+ __ fs()->jmp(Address::Absolute(entrypoint_offset));
+
+ __ Bind(&continue_execution);
+ __ popl(EBP);
+ }
+
__ Bind(&frame_entry_label_);
bool skip_overflow_check =
IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kX86);
@@ -7233,12 +7272,6 @@ void InstructionCodeGeneratorX86::VisitClinitCheck(HClinitCheck* check) {
void InstructionCodeGeneratorX86::GenerateClassInitializationCheck(
SlowPathCode* slow_path, Register class_reg) {
- constexpr size_t status_lsb_position = SubtypeCheckBits::BitStructSizeOf();
- const size_t status_byte_offset =
- mirror::Class::StatusOffset().SizeValue() + (status_lsb_position / kBitsPerByte);
- constexpr uint32_t shifted_visibly_initialized_value =
- enum_cast<uint32_t>(ClassStatus::kVisiblyInitialized) << (status_lsb_position % kBitsPerByte);
-
__ cmpb(Address(class_reg, status_byte_offset), Immediate(shifted_visibly_initialized_value));
__ j(kBelow, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 511917a735..8a19cf2bd5 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1653,6 +1653,45 @@ void CodeGeneratorX86_64::MaybeIncrementHotness(bool is_frame_entry) {
void CodeGeneratorX86_64::GenerateFrameEntry() {
__ cfi().SetCurrentCFAOffset(kX86_64WordSize); // return address
+
+ // Check if we need to generate the clinit check. We will jump to the
+ // resolution stub if the class is not initialized and the executing thread is
+ // not the thread initializing it.
+ // We do this before constructing the frame to get the correct stack trace if
+ // an exception is thrown.
+ if (GetGraph()->GetArtMethod() != nullptr &&
+ GetCompilerOptions().ShouldCompileWithClinitCheck(GetGraph()->GetArtMethod())) {
+ NearLabel resolution;
+ // Check if we're visibly initialized.
+
+ // We don't emit a read barrier here to save on code size. We rely on the
+ // resolution trampoline to do a suspend check before re-entering this code.
+ __ movl(CpuRegister(TMP),
+ Address(CpuRegister(kMethodRegisterArgument),
+ ArtMethod::DeclaringClassOffset().Int32Value()));
+ __ cmpb(Address(CpuRegister(TMP), status_byte_offset),
+ Immediate(shifted_visibly_initialized_value));
+ __ j(kAboveEqual, &frame_entry_label_);
+
+ // Check if we're initializing and the thread initializing is the one
+ // executing the code.
+ __ cmpb(Address(CpuRegister(TMP), status_byte_offset), Immediate(shifted_initializing_value));
+ __ j(kBelow, &resolution);
+
+ __ movl(CpuRegister(TMP),
+ Address(CpuRegister(TMP), mirror::Class::ClinitThreadIdOffset().Int32Value()));
+ __ gs()->cmpl(
+ CpuRegister(TMP),
+ Address::Absolute(Thread::TidOffset<kX86_64PointerSize>().Int32Value(), /*no_rip=*/ true));
+ __ j(kEqual, &frame_entry_label_);
+ __ Bind(&resolution);
+
+ // Jump to the resolution stub.
+ ThreadOffset64 entrypoint_offset =
+ GetThreadOffset<kX86_64PointerSize>(kQuickQuickResolutionTrampoline);
+ __ gs()->jmp(Address::Absolute(entrypoint_offset, /*no_rip=*/ true));
+ }
+
__ Bind(&frame_entry_label_);
bool skip_overflow_check = IsLeafMethod()
&& !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kX86_64);
@@ -6282,12 +6321,6 @@ void ParallelMoveResolverX86_64::RestoreScratch(int reg) {
void InstructionCodeGeneratorX86_64::GenerateClassInitializationCheck(
SlowPathCode* slow_path, CpuRegister class_reg) {
- constexpr size_t status_lsb_position = SubtypeCheckBits::BitStructSizeOf();
- const size_t status_byte_offset =
- mirror::Class::StatusOffset().SizeValue() + (status_lsb_position / kBitsPerByte);
- constexpr uint32_t shifted_visibly_initialized_value =
- enum_cast<uint32_t>(ClassStatus::kVisiblyInitialized) << (status_lsb_position % kBitsPerByte);
-
__ cmpb(Address(class_reg, status_byte_offset), Immediate(shifted_visibly_initialized_value));
__ j(kBelow, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 9e6103b424..9b11a93279 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -2538,16 +2538,16 @@ class Dex2Oat final {
}
bool PreparePreloadedClasses() {
- preloaded_classes_ = std::make_unique<HashSet<std::string>>();
if (!preloaded_classes_fds_.empty()) {
for (int fd : preloaded_classes_fds_) {
- if (!ReadCommentedInputFromFd(fd, nullptr, preloaded_classes_.get())) {
+ if (!ReadCommentedInputFromFd(fd, nullptr, &compiler_options_->preloaded_classes_)) {
return false;
}
}
} else {
for (const std::string& file : preloaded_classes_files_) {
- if (!ReadCommentedInputFromFile(file.c_str(), nullptr, preloaded_classes_.get())) {
+ if (!ReadCommentedInputFromFile(
+ file.c_str(), nullptr, &compiler_options_->preloaded_classes_)) {
return false;
}
}
@@ -2942,7 +2942,6 @@ class Dex2Oat final {
const char* dirty_image_objects_filename_;
int dirty_image_objects_fd_;
std::unique_ptr<HashSet<std::string>> dirty_image_objects_;
- std::unique_ptr<HashSet<std::string>> preloaded_classes_;
std::unique_ptr<std::vector<std::string>> passes_to_run_;
bool is_host_;
std::string android_root_;
diff --git a/dex2oat/driver/compiler_driver.cc b/dex2oat/driver/compiler_driver.cc
index d9509b0c53..bb2cafaa7e 100644
--- a/dex2oat/driver/compiler_driver.cc
+++ b/dex2oat/driver/compiler_driver.cc
@@ -2232,6 +2232,22 @@ class InitializeClassVisitor : public CompilationVisitor {
// Make sure the class initialization did not leave any local references.
self->GetJniEnv()->AssertLocalsEmpty();
}
+
+ if (!klass->IsVisiblyInitialized() &&
+ (is_boot_image || is_boot_image_extension) &&
+ !compiler_options.IsPreloadedClass(PrettyDescriptor(descriptor).c_str())) {
+ klass->SetInBootImageAndNotInPreloadedClasses();
+ }
+
+ // For unit-testing the clinit check in compiled code feature.
+ if (compiler_options.CompileArtTest() &&
+ EndsWith(std::string_view(descriptor), "$NoPreloadHolder;")) {
+ klass->SetInBootImageAndNotInPreloadedClasses();
+ }
+ // For stress testing the feature.
+ if (kIsDebugBuild) {
+ klass->SetInBootImageAndNotInPreloadedClasses();
+ }
}
private:
diff --git a/dex2oat/linker/image_writer.cc b/dex2oat/linker/image_writer.cc
index 56fbe9013b..5c697fa8e5 100644
--- a/dex2oat/linker/image_writer.cc
+++ b/dex2oat/linker/image_writer.cc
@@ -3356,7 +3356,7 @@ const uint8_t* ImageWriter::GetQuickCode(ArtMethod* method, const ImageInfo& ima
// The interpreter brige performs class initialization check if needed.
quick_code = GetOatAddress(StubType::kQuickToInterpreterBridge);
}
- } else if (needs_clinit_check) {
+ } else if (needs_clinit_check && !compiler_options_.ShouldCompileWithClinitCheck(method)) {
// If we do have code but the method needs a class initialization check before calling
// that code, install the resolution stub that will perform the check.
quick_code = GetOatAddress(StubType::kQuickResolutionTrampoline);
diff --git a/libdexfile/dex/modifiers.h b/libdexfile/dex/modifiers.h
index 72949b0b7b..a17545c52b 100644
--- a/libdexfile/dex/modifiers.h
+++ b/libdexfile/dex/modifiers.h
@@ -56,6 +56,9 @@ static constexpr uint32_t kAccSkipHiddenapiChecks = 0x00100000; // class (run
// Used by a class to denote that this class and any objects with this as a
// declaring-class/super-class are to be considered obsolete, meaning they should not be used by.
static constexpr uint32_t kAccObsoleteObject = 0x00200000; // class (runtime)
+// Set during boot image compilation to indicate that the class is
+// not initialized at compile tile and not in the list of preloaded classes.
+static constexpr uint32_t kAccInBootImageAndNotInPreloadedClasses = 0x00400000; // class (runtime)
// This is set by the class linker during LinkInterfaceMethods. It is used by a method
// to represent that it was copied from its declaring class into another class.
// We need copies of the original method because the method may end up in different
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index c2ce3e2eb8..7a06ce680c 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -3394,6 +3394,11 @@ void ClassLinker::FixupStaticTrampolines(Thread* self, ObjPtr<mirror::Class> kla
if (!NeedsClinitCheckBeforeCall(method)) {
continue;
}
+ if (klass->IsInBootImageAndNotInPreloadedClasses() && !method->IsNative()) {
+ // Don't update the entrypoint, this is an ArtMethod which we want to
+ // share memory between zygote and apps.
+ continue;
+ }
instrumentation->UpdateMethodsCode(method, instrumentation->GetCodeForInvoke(method));
}
// Ignore virtual methods on the iterator.
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 7e3fdee3e1..96f5be56a7 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -1364,6 +1364,11 @@ extern "C" const void* artQuickResolutionTrampoline(
success = linker->EnsureInitialized(soa.Self(), h_called_class, true, true);
}
if (success) {
+ // When the clinit check is at entry of the AOT code, we do the check
+ // before doing the suspend check. To ensure the code sees the latest
+ // version of the class (the code doesn't do a ready barrier to reduce
+ // size), do a suspend check now.
+ self->CheckSuspend();
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
// Check if we need instrumented code here. Since resolution stubs could suspend, it is
// possible that we instrumented the entry points after we started executing the resolution
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 48fced45c6..97f3bab2f0 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -236,6 +236,15 @@ class MANAGED Class final : public Object {
// Set access flags, recording the change if running inside a Transaction.
void SetAccessFlags(uint32_t new_access_flags) REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetInBootImageAndNotInPreloadedClasses() REQUIRES_SHARED(Locks::mutator_lock_) {
+ uint32_t flags = GetAccessFlags();
+ SetAccessFlags(flags | kAccInBootImageAndNotInPreloadedClasses);
+ }
+
+ ALWAYS_INLINE bool IsInBootImageAndNotInPreloadedClasses() REQUIRES_SHARED(Locks::mutator_lock_) {
+ return (GetAccessFlags() & kAccInBootImageAndNotInPreloadedClasses) != 0;
+ }
+
// Returns true if the class is an enum.
ALWAYS_INLINE bool IsEnum() REQUIRES_SHARED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccEnum) != 0;
@@ -570,6 +579,9 @@ class MANAGED Class final : public Object {
static constexpr MemberOffset ObjectSizeAllocFastPathOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, object_size_alloc_fast_path_);
}
+ static constexpr MemberOffset ClinitThreadIdOffset() {
+ return OFFSET_OF_OBJECT_MEMBER(Class, clinit_thread_id_);
+ }
ALWAYS_INLINE void SetObjectSize(uint32_t new_object_size) REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/thread.h b/runtime/thread.h
index c1c70364fd..2a5abe2f33 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -749,6 +749,13 @@ class Thread {
}
template<PointerSize pointer_size>
+ static constexpr ThreadOffset<pointer_size> TidOffset() {
+ return ThreadOffset<pointer_size>(
+ OFFSETOF_MEMBER(Thread, tls32_) +
+ OFFSETOF_MEMBER(tls_32bit_sized_values, tid));
+ }
+
+ template<PointerSize pointer_size>
static constexpr ThreadOffset<pointer_size> InterruptedOffset() {
return ThreadOffset<pointer_size>(
OFFSETOF_MEMBER(Thread, tls32_) +