Revert "Add clinit checks at entry for some boot image methods."
This reverts commit c37e3a0a532fb89b62753d0478c1ba3c9fc87bb3.
Bug: 162110941
Bug: 238472973
Reason for revert: b/238472973
Change-Id: Ie684612c4e660ff121108ecc5e7455811c93353b
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index b55e32b..51cd999 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -23,7 +23,6 @@
#include "arch/instruction_set.h"
#include "arch/instruction_set_features.h"
-#include "art_method-inl.h"
#include "base/runtime_debug.h"
#include "base/string_view_cpp20.h"
#include "base/variant_map.h"
@@ -147,37 +146,14 @@
bool CompilerOptions::IsImageClass(const char* descriptor) const {
// Historical note: We used to hold the set indirectly and there was a distinction between an
- // empty set and a null, null meaning to include all classes. However, the distinction has been
+ // empty set and a null, null meaning to include all classes. However, the distiction has been
// removed; if we don't have a profile, we treat it as an empty set of classes. b/77340429
return image_classes_.find(std::string_view(descriptor)) != image_classes_.end();
}
-bool CompilerOptions::IsPreloadedClass(const char* pretty_descriptor) const {
- return preloaded_classes_.find(std::string_view(pretty_descriptor)) != preloaded_classes_.end();
-}
-
const VerificationResults* CompilerOptions::GetVerificationResults() const {
DCHECK(Runtime::Current()->IsAotCompiler());
return verification_results_;
}
-bool CompilerOptions::ShouldCompileWithClinitCheck(ArtMethod* method) const {
- if (method->IsStatic() &&
- !method->IsConstructor() &&
- // Compiled code for native methods never do a clinit check, so we may put the resolution
- // trampoline for native methods. This means that it's possible post zygote fork for the
- // entry to be dirtied. We could resolve this by either:
- // - Make these methods use the generic JNI entrypoint, but that's not
- // desirable for a method that is in the profile.
- // - Ensure the declaring class of such native methods are always in the
- // preloaded-classes list.
- // - Emit the clinit check in the compiled code of native methods.
- !method->IsNative()) {
- ScopedObjectAccess soa(Thread::Current());
- ObjPtr<mirror::Class> cls = method->GetDeclaringClass<kWithoutReadBarrier>();
- return cls->IsInBootImageAndNotInPreloadedClasses();
- }
- return false;
-}
-
} // namespace art
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 20f54bd..1bffdb1 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -44,7 +44,6 @@
class Arm64RelativePatcherTest;
} // namespace linker
-class ArtMethod;
class DexFile;
enum class InstructionSet;
class InstructionSetFeatures;
@@ -301,10 +300,6 @@
bool IsImageClass(const char* descriptor) const;
- // Returns whether the given `pretty_descriptor` is in the list of preloaded
- // classes. `pretty_descriptor` should be the result of calling `PrettyDescriptor`.
- bool IsPreloadedClass(const char* pretty_descriptor) const;
-
const VerificationResults* GetVerificationResults() const;
bool ParseCompilerOptions(const std::vector<std::string>& options,
@@ -388,12 +383,6 @@
return ContainsElement(GetDexFilesForOatFile(), dex_file);
}
- // If this is a static non-constructor method in the boot classpath, and its class isn't
- // initialized at compile-time, or won't be initialized by the zygote, add
- // initialization checks at entry. This will avoid the need of trampolines
- // which at runtime we will need to dirty after initialization.
- bool ShouldCompileWithClinitCheck(ArtMethod* method) const;
-
private:
bool ParseDumpInitFailures(const std::string& option, std::string* error_msg);
bool ParseRegisterAllocationStrategy(const std::string& option, std::string* error_msg);
@@ -419,10 +408,6 @@
// Must not be empty for real boot image, only for tests pretending to compile boot image.
HashSet<std::string> image_classes_;
- // Classes listed in the preloaded-classes file, used for boot image and
- // boot image extension compilation.
- HashSet<std::string> preloaded_classes_;
-
// Results of AOT verification.
const VerificationResults* verification_results_;
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 43cb986..b09219a 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -37,7 +37,6 @@
#include "optimizing_compiler_stats.h"
#include "read_barrier_option.h"
#include "stack.h"
-#include "subtype_check.h"
#include "utils/assembler.h"
#include "utils/label.h"
@@ -61,14 +60,6 @@
static constexpr ReadBarrierOption kCompilerReadBarrierOption =
kEmitCompilerReadBarrier ? kWithReadBarrier : kWithoutReadBarrier;
-constexpr size_t status_lsb_position = SubtypeCheckBits::BitStructSizeOf();
-constexpr size_t status_byte_offset =
- mirror::Class::StatusOffset().SizeValue() + (status_lsb_position / kBitsPerByte);
-constexpr uint32_t shifted_visibly_initialized_value =
- enum_cast<uint32_t>(ClassStatus::kVisiblyInitialized) << (status_lsb_position % kBitsPerByte);
-constexpr uint32_t shifted_initializing_value =
- enum_cast<uint32_t>(ClassStatus::kInitializing) << (status_lsb_position % kBitsPerByte);
-
class Assembler;
class CodeGenerator;
class CompilerOptions;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index d859ac1..2f8c0b2 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1233,46 +1233,6 @@
void CodeGeneratorARM64::GenerateFrameEntry() {
MacroAssembler* masm = GetVIXLAssembler();
-
- // Check if we need to generate the clinit check. We will jump to the
- // resolution stub if the class is not initialized and the executing thread is
- // not the thread initializing it.
- // We do this before constructing the frame to get the correct stack trace if
- // an exception is thrown.
- if (GetGraph()->GetArtMethod() != nullptr &&
- GetCompilerOptions().ShouldCompileWithClinitCheck(GetGraph()->GetArtMethod())) {
- UseScratchRegisterScope temps(masm);
- vixl::aarch64::Label resolution;
-
- Register temp1 = temps.AcquireW();
- Register temp2 = temps.AcquireW();
-
- // Check if we're visibly initialized.
-
- // We don't emit a read barrier here to save on code size. We rely on the
- // resolution trampoline to do a suspend check before re-entering this code.
- __ Ldr(temp1, MemOperand(kArtMethodRegister, ArtMethod::DeclaringClassOffset().Int32Value()));
- __ Ldrb(temp2, HeapOperand(temp1, status_byte_offset));
- __ Cmp(temp2, shifted_visibly_initialized_value);
- __ B(hs, &frame_entry_label_);
-
- // Check if we're initializing and the thread initializing is the one
- // executing the code.
- __ Cmp(temp2, shifted_initializing_value);
- __ B(lo, &resolution);
-
- __ Ldr(temp1, HeapOperand(temp1, mirror::Class::ClinitThreadIdOffset().Int32Value()));
- __ Ldr(temp2, MemOperand(tr, Thread::TidOffset<kArmPointerSize>().Int32Value()));
- __ Cmp(temp1, temp2);
- __ B(eq, &frame_entry_label_);
- __ Bind(&resolution);
-
- // Jump to the resolution stub.
- ThreadOffset64 entrypoint_offset =
- GetThreadOffset<kArm64PointerSize>(kQuickQuickResolutionTrampoline);
- __ Ldr(temp1.X(), MemOperand(tr, entrypoint_offset.Int32Value()));
- __ Br(temp1.X());
- }
__ Bind(&frame_entry_label_);
bool do_overflow_check =
@@ -1944,6 +1904,11 @@
Register class_reg) {
UseScratchRegisterScope temps(GetVIXLAssembler());
Register temp = temps.AcquireW();
+ constexpr size_t status_lsb_position = SubtypeCheckBits::BitStructSizeOf();
+ const size_t status_byte_offset =
+ mirror::Class::StatusOffset().SizeValue() + (status_lsb_position / kBitsPerByte);
+ constexpr uint32_t shifted_visibly_initialized_value =
+ enum_cast<uint32_t>(ClassStatus::kVisiblyInitialized) << (status_lsb_position % kBitsPerByte);
// CMP (immediate) is limited to imm12 or imm12<<12, so we would need to materialize
// the constant 0xf0000000 for comparison with the full 32-bit field. To reduce the code
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 7e3ad34..09fa598 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -2234,53 +2234,6 @@
bool skip_overflow_check =
IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kArm);
DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
-
- // Check if we need to generate the clinit check. We will jump to the
- // resolution stub if the class is not initialized and the executing thread is
- // not the thread initializing it.
- // We do this before constructing the frame to get the correct stack trace if
- // an exception is thrown.
- if (GetGraph()->GetArtMethod() != nullptr &&
- GetCompilerOptions().ShouldCompileWithClinitCheck(GetGraph()->GetArtMethod())) {
- UseScratchRegisterScope temps(GetVIXLAssembler());
- vixl32::Label resolution;
-
- // Check if we're visibly initialized.
-
- vixl32::Register temp1 = temps.Acquire();
- // Use r4 as other temporary register.
- DCHECK(!blocked_core_registers_[R4]);
- DCHECK(!kCoreCalleeSaves.Includes(r4));
- vixl32::Register temp2 = r4;
- for (vixl32::Register reg : kParameterCoreRegistersVIXL) {
- DCHECK(!reg.Is(r4));
- }
-
- // We don't emit a read barrier here to save on code size. We rely on the
- // resolution trampoline to do a suspend check before re-entering this code.
- __ Ldr(temp1, MemOperand(kMethodRegister, ArtMethod::DeclaringClassOffset().Int32Value()));
- __ Ldrb(temp2, MemOperand(temp1, status_byte_offset));
- __ Cmp(temp2, shifted_visibly_initialized_value);
- __ B(cs, &frame_entry_label_);
-
- // Check if we're initializing and the thread initializing is the one
- // executing the code.
- __ Cmp(temp2, shifted_initializing_value);
- __ B(lo, &resolution);
-
- __ Ldr(temp1, MemOperand(temp1, mirror::Class::ClinitThreadIdOffset().Int32Value()));
- __ Ldr(temp2, MemOperand(tr, Thread::TidOffset<kArmPointerSize>().Int32Value()));
- __ Cmp(temp1, temp2);
- __ B(eq, &frame_entry_label_);
- __ Bind(&resolution);
-
- // Jump to the resolution stub.
- ThreadOffset32 entrypoint_offset =
- GetThreadOffset<kArmPointerSize>(kQuickQuickResolutionTrampoline);
- __ Ldr(temp1, MemOperand(tr, entrypoint_offset.Int32Value()));
- __ Bx(temp1);
- }
-
__ Bind(&frame_entry_label_);
if (HasEmptyFrame()) {
@@ -7669,7 +7622,12 @@
LoadClassSlowPathARMVIXL* slow_path, vixl32::Register class_reg) {
UseScratchRegisterScope temps(GetVIXLAssembler());
vixl32::Register temp = temps.Acquire();
- __ Ldrb(temp, MemOperand(class_reg, status_byte_offset));
+ constexpr size_t status_lsb_position = SubtypeCheckBits::BitStructSizeOf();
+ constexpr uint32_t shifted_visibly_initialized_value =
+ enum_cast<uint32_t>(ClassStatus::kVisiblyInitialized) << status_lsb_position;
+
+ const size_t status_offset = mirror::Class::StatusOffset().SizeValue();
+ GetAssembler()->LoadFromOffset(kLoadWord, temp, class_reg, status_offset);
__ Cmp(temp, shifted_visibly_initialized_value);
__ B(lo, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 57b2cac..8c6b802 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1261,45 +1261,6 @@
void CodeGeneratorX86::GenerateFrameEntry() {
__ cfi().SetCurrentCFAOffset(kX86WordSize); // return address
-
- // Check if we need to generate the clinit check. We will jump to the
- // resolution stub if the class is not initialized and the executing thread is
- // not the thread initializing it.
- // We do this before constructing the frame to get the correct stack trace if
- // an exception is thrown.
- if (GetGraph()->GetArtMethod() != nullptr &&
- GetCompilerOptions().ShouldCompileWithClinitCheck(GetGraph()->GetArtMethod())) {
- NearLabel continue_execution, resolution;
- // We'll use EBP as temporary.
- __ pushl(EBP);
- // Check if we're visibly initialized.
-
- // We don't emit a read barrier here to save on code size. We rely on the
- // resolution trampoline to do a suspend check before re-entering this code.
- __ movl(EBP, Address(kMethodRegisterArgument, ArtMethod::DeclaringClassOffset().Int32Value()));
- __ cmpb(Address(EBP, status_byte_offset), Immediate(shifted_visibly_initialized_value));
- __ j(kAboveEqual, &continue_execution);
-
- // Check if we're initializing and the thread initializing is the one
- // executing the code.
- __ cmpb(Address(EBP, status_byte_offset), Immediate(shifted_initializing_value));
- __ j(kBelow, &resolution);
-
- __ movl(EBP, Address(EBP, mirror::Class::ClinitThreadIdOffset().Int32Value()));
- __ fs()->cmpl(EBP, Address::Absolute(Thread::TidOffset<kX86PointerSize>().Int32Value()));
- __ j(kEqual, &continue_execution);
- __ Bind(&resolution);
-
- __ popl(EBP);
- // Jump to the resolution stub.
- ThreadOffset32 entrypoint_offset =
- GetThreadOffset<kX86PointerSize>(kQuickQuickResolutionTrampoline);
- __ fs()->jmp(Address::Absolute(entrypoint_offset));
-
- __ Bind(&continue_execution);
- __ popl(EBP);
- }
-
__ Bind(&frame_entry_label_);
bool skip_overflow_check =
IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kX86);
@@ -7272,6 +7233,12 @@
void InstructionCodeGeneratorX86::GenerateClassInitializationCheck(
SlowPathCode* slow_path, Register class_reg) {
+ constexpr size_t status_lsb_position = SubtypeCheckBits::BitStructSizeOf();
+ const size_t status_byte_offset =
+ mirror::Class::StatusOffset().SizeValue() + (status_lsb_position / kBitsPerByte);
+ constexpr uint32_t shifted_visibly_initialized_value =
+ enum_cast<uint32_t>(ClassStatus::kVisiblyInitialized) << (status_lsb_position % kBitsPerByte);
+
__ cmpb(Address(class_reg, status_byte_offset), Immediate(shifted_visibly_initialized_value));
__ j(kBelow, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 8a19cf2..511917a 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1653,45 +1653,6 @@
void CodeGeneratorX86_64::GenerateFrameEntry() {
__ cfi().SetCurrentCFAOffset(kX86_64WordSize); // return address
-
- // Check if we need to generate the clinit check. We will jump to the
- // resolution stub if the class is not initialized and the executing thread is
- // not the thread initializing it.
- // We do this before constructing the frame to get the correct stack trace if
- // an exception is thrown.
- if (GetGraph()->GetArtMethod() != nullptr &&
- GetCompilerOptions().ShouldCompileWithClinitCheck(GetGraph()->GetArtMethod())) {
- NearLabel resolution;
- // Check if we're visibly initialized.
-
- // We don't emit a read barrier here to save on code size. We rely on the
- // resolution trampoline to do a suspend check before re-entering this code.
- __ movl(CpuRegister(TMP),
- Address(CpuRegister(kMethodRegisterArgument),
- ArtMethod::DeclaringClassOffset().Int32Value()));
- __ cmpb(Address(CpuRegister(TMP), status_byte_offset),
- Immediate(shifted_visibly_initialized_value));
- __ j(kAboveEqual, &frame_entry_label_);
-
- // Check if we're initializing and the thread initializing is the one
- // executing the code.
- __ cmpb(Address(CpuRegister(TMP), status_byte_offset), Immediate(shifted_initializing_value));
- __ j(kBelow, &resolution);
-
- __ movl(CpuRegister(TMP),
- Address(CpuRegister(TMP), mirror::Class::ClinitThreadIdOffset().Int32Value()));
- __ gs()->cmpl(
- CpuRegister(TMP),
- Address::Absolute(Thread::TidOffset<kX86_64PointerSize>().Int32Value(), /*no_rip=*/ true));
- __ j(kEqual, &frame_entry_label_);
- __ Bind(&resolution);
-
- // Jump to the resolution stub.
- ThreadOffset64 entrypoint_offset =
- GetThreadOffset<kX86_64PointerSize>(kQuickQuickResolutionTrampoline);
- __ gs()->jmp(Address::Absolute(entrypoint_offset, /*no_rip=*/ true));
- }
-
__ Bind(&frame_entry_label_);
bool skip_overflow_check = IsLeafMethod()
&& !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kX86_64);
@@ -6321,6 +6282,12 @@
void InstructionCodeGeneratorX86_64::GenerateClassInitializationCheck(
SlowPathCode* slow_path, CpuRegister class_reg) {
+ constexpr size_t status_lsb_position = SubtypeCheckBits::BitStructSizeOf();
+ const size_t status_byte_offset =
+ mirror::Class::StatusOffset().SizeValue() + (status_lsb_position / kBitsPerByte);
+ constexpr uint32_t shifted_visibly_initialized_value =
+ enum_cast<uint32_t>(ClassStatus::kVisiblyInitialized) << (status_lsb_position % kBitsPerByte);
+
__ cmpb(Address(class_reg, status_byte_offset), Immediate(shifted_visibly_initialized_value));
__ j(kBelow, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 9b11a93..9e6103b 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -2538,16 +2538,16 @@
}
bool PreparePreloadedClasses() {
+ preloaded_classes_ = std::make_unique<HashSet<std::string>>();
if (!preloaded_classes_fds_.empty()) {
for (int fd : preloaded_classes_fds_) {
- if (!ReadCommentedInputFromFd(fd, nullptr, &compiler_options_->preloaded_classes_)) {
+ if (!ReadCommentedInputFromFd(fd, nullptr, preloaded_classes_.get())) {
return false;
}
}
} else {
for (const std::string& file : preloaded_classes_files_) {
- if (!ReadCommentedInputFromFile(
- file.c_str(), nullptr, &compiler_options_->preloaded_classes_)) {
+ if (!ReadCommentedInputFromFile(file.c_str(), nullptr, preloaded_classes_.get())) {
return false;
}
}
@@ -2942,6 +2942,7 @@
const char* dirty_image_objects_filename_;
int dirty_image_objects_fd_;
std::unique_ptr<HashSet<std::string>> dirty_image_objects_;
+ std::unique_ptr<HashSet<std::string>> preloaded_classes_;
std::unique_ptr<std::vector<std::string>> passes_to_run_;
bool is_host_;
std::string android_root_;
diff --git a/dex2oat/driver/compiler_driver.cc b/dex2oat/driver/compiler_driver.cc
index bb2cafa..d9509b0 100644
--- a/dex2oat/driver/compiler_driver.cc
+++ b/dex2oat/driver/compiler_driver.cc
@@ -2232,22 +2232,6 @@
// Make sure the class initialization did not leave any local references.
self->GetJniEnv()->AssertLocalsEmpty();
}
-
- if (!klass->IsVisiblyInitialized() &&
- (is_boot_image || is_boot_image_extension) &&
- !compiler_options.IsPreloadedClass(PrettyDescriptor(descriptor).c_str())) {
- klass->SetInBootImageAndNotInPreloadedClasses();
- }
-
- // For unit-testing the clinit check in compiled code feature.
- if (compiler_options.CompileArtTest() &&
- EndsWith(std::string_view(descriptor), "$NoPreloadHolder;")) {
- klass->SetInBootImageAndNotInPreloadedClasses();
- }
- // For stress testing the feature.
- if (kIsDebugBuild) {
- klass->SetInBootImageAndNotInPreloadedClasses();
- }
}
private:
diff --git a/dex2oat/linker/image_writer.cc b/dex2oat/linker/image_writer.cc
index 5c697fa..56fbe90 100644
--- a/dex2oat/linker/image_writer.cc
+++ b/dex2oat/linker/image_writer.cc
@@ -3356,7 +3356,7 @@
// The interpreter brige performs class initialization check if needed.
quick_code = GetOatAddress(StubType::kQuickToInterpreterBridge);
}
- } else if (needs_clinit_check && !compiler_options_.ShouldCompileWithClinitCheck(method)) {
+ } else if (needs_clinit_check) {
// If we do have code but the method needs a class initialization check before calling
// that code, install the resolution stub that will perform the check.
quick_code = GetOatAddress(StubType::kQuickResolutionTrampoline);
diff --git a/libdexfile/dex/modifiers.h b/libdexfile/dex/modifiers.h
index a17545c..72949b0 100644
--- a/libdexfile/dex/modifiers.h
+++ b/libdexfile/dex/modifiers.h
@@ -56,9 +56,6 @@
// Used by a class to denote that this class and any objects with this as a
// declaring-class/super-class are to be considered obsolete, meaning they should not be used by.
static constexpr uint32_t kAccObsoleteObject = 0x00200000; // class (runtime)
-// Set during boot image compilation to indicate that the class is
-// not initialized at compile tile and not in the list of preloaded classes.
-static constexpr uint32_t kAccInBootImageAndNotInPreloadedClasses = 0x00400000; // class (runtime)
// This is set by the class linker during LinkInterfaceMethods. It is used by a method
// to represent that it was copied from its declaring class into another class.
// We need copies of the original method because the method may end up in different
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 7a06ce6..c2ce3e2 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -3394,11 +3394,6 @@
if (!NeedsClinitCheckBeforeCall(method)) {
continue;
}
- if (klass->IsInBootImageAndNotInPreloadedClasses() && !method->IsNative()) {
- // Don't update the entrypoint, this is an ArtMethod which we want to
- // share memory between zygote and apps.
- continue;
- }
instrumentation->UpdateMethodsCode(method, instrumentation->GetCodeForInvoke(method));
}
// Ignore virtual methods on the iterator.
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 96f5be5..7e3fdee 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -1364,11 +1364,6 @@
success = linker->EnsureInitialized(soa.Self(), h_called_class, true, true);
}
if (success) {
- // When the clinit check is at entry of the AOT code, we do the check
- // before doing the suspend check. To ensure the code sees the latest
- // version of the class (the code doesn't do a ready barrier to reduce
- // size), do a suspend check now.
- self->CheckSuspend();
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
// Check if we need instrumented code here. Since resolution stubs could suspend, it is
// possible that we instrumented the entry points after we started executing the resolution
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 97f3bab..48fced4 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -236,15 +236,6 @@
// Set access flags, recording the change if running inside a Transaction.
void SetAccessFlags(uint32_t new_access_flags) REQUIRES_SHARED(Locks::mutator_lock_);
- void SetInBootImageAndNotInPreloadedClasses() REQUIRES_SHARED(Locks::mutator_lock_) {
- uint32_t flags = GetAccessFlags();
- SetAccessFlags(flags | kAccInBootImageAndNotInPreloadedClasses);
- }
-
- ALWAYS_INLINE bool IsInBootImageAndNotInPreloadedClasses() REQUIRES_SHARED(Locks::mutator_lock_) {
- return (GetAccessFlags() & kAccInBootImageAndNotInPreloadedClasses) != 0;
- }
-
// Returns true if the class is an enum.
ALWAYS_INLINE bool IsEnum() REQUIRES_SHARED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccEnum) != 0;
@@ -579,9 +570,6 @@
static constexpr MemberOffset ObjectSizeAllocFastPathOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, object_size_alloc_fast_path_);
}
- static constexpr MemberOffset ClinitThreadIdOffset() {
- return OFFSET_OF_OBJECT_MEMBER(Class, clinit_thread_id_);
- }
ALWAYS_INLINE void SetObjectSize(uint32_t new_object_size) REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/thread.h b/runtime/thread.h
index 2a5abe2..c1c7036 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -749,13 +749,6 @@
}
template<PointerSize pointer_size>
- static constexpr ThreadOffset<pointer_size> TidOffset() {
- return ThreadOffset<pointer_size>(
- OFFSETOF_MEMBER(Thread, tls32_) +
- OFFSETOF_MEMBER(tls_32bit_sized_values, tid));
- }
-
- template<PointerSize pointer_size>
static constexpr ThreadOffset<pointer_size> InterruptedOffset() {
return ThreadOffset<pointer_size>(
OFFSETOF_MEMBER(Thread, tls32_) +