summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/code_generator.h6
-rw-r--r--compiler/optimizing/code_generator_arm64.cc4
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc4
-rw-r--r--compiler/optimizing/code_generator_riscv64.cc4
-rw-r--r--compiler/optimizing/code_generator_x86.cc4
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc4
-rw-r--r--compiler/optimizing/graph_visualizer.cc1
-rw-r--r--compiler/optimizing/instruction_builder.cc17
-rw-r--r--compiler/optimizing/instruction_simplifier.cc27
-rw-r--r--compiler/optimizing/intrinsics.h2
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc8
-rw-r--r--compiler/optimizing/intrinsics_arm_vixl.cc8
-rw-r--r--compiler/optimizing/intrinsics_riscv64.cc8
-rw-r--r--compiler/optimizing/intrinsics_utils.h4
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc8
-rw-r--r--compiler/optimizing/nodes.h17
-rw-r--r--compiler/optimizing/sharpening.cc39
17 files changed, 84 insertions, 81 deletions
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 4a6a229098..970da76c43 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -467,10 +467,10 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
instance_of->GetTypeCheckKind() == TypeCheckKind::kClassHierarchyCheck ||
instance_of->GetTypeCheckKind() == TypeCheckKind::kArrayObjectCheck)
<< instance_of->GetTypeCheckKind();
- // If the target class is in the boot image, it's non-moveable and it doesn't matter
+ // If the target class is in the boot or app image, it's non-moveable and it doesn't matter
// if we compare it with a from-space or to-space reference, the result is the same.
// It's OK to traverse a class hierarchy jumping between from-space and to-space.
- return EmitReadBarrier() && !instance_of->GetTargetClass()->IsInBootImage();
+ return EmitReadBarrier() && !instance_of->GetTargetClass()->IsInImage();
}
ReadBarrierOption ReadBarrierOptionForInstanceOf(HInstanceOf* instance_of) {
@@ -485,7 +485,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
case TypeCheckKind::kArrayObjectCheck:
case TypeCheckKind::kInterfaceCheck: {
bool needs_read_barrier =
- EmitReadBarrier() && !check_cast->GetTargetClass()->IsInBootImage();
+ EmitReadBarrier() && !check_cast->GetTargetClass()->IsInImage();
// We do not emit read barriers for HCheckCast, so we can get false negatives
// and the slow path shall re-check and simply return if the cast is actually OK.
return !needs_read_barrier;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 4b4cafe378..c8b5dcee98 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -5489,7 +5489,7 @@ void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) {
load_kind == HLoadClass::LoadKind::kBssEntryPublic ||
load_kind == HLoadClass::LoadKind::kBssEntryPackage);
- const bool requires_read_barrier = !cls->IsInBootImage() && codegen_->EmitReadBarrier();
+ const bool requires_read_barrier = !cls->IsInImage() && codegen_->EmitReadBarrier();
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
@@ -5531,7 +5531,7 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA
Register out = OutputRegister(cls);
const ReadBarrierOption read_barrier_option =
- cls->IsInBootImage() ? kWithoutReadBarrier : codegen_->GetCompilerReadBarrierOption();
+ cls->IsInImage() ? kWithoutReadBarrier : codegen_->GetCompilerReadBarrierOption();
bool generate_null_check = false;
switch (load_kind) {
case HLoadClass::LoadKind::kReferrersClass: {
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 14687bbe14..eb5fbc4364 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -7687,7 +7687,7 @@ void LocationsBuilderARMVIXL::VisitLoadClass(HLoadClass* cls) {
load_kind == HLoadClass::LoadKind::kBssEntryPublic ||
load_kind == HLoadClass::LoadKind::kBssEntryPackage);
- const bool requires_read_barrier = !cls->IsInBootImage() && codegen_->EmitReadBarrier();
+ const bool requires_read_barrier = !cls->IsInImage() && codegen_->EmitReadBarrier();
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
@@ -7730,7 +7730,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_
vixl32::Register out = OutputRegister(cls);
const ReadBarrierOption read_barrier_option =
- cls->IsInBootImage() ? kWithoutReadBarrier : codegen_->GetCompilerReadBarrierOption();
+ cls->IsInImage() ? kWithoutReadBarrier : codegen_->GetCompilerReadBarrierOption();
bool generate_null_check = false;
switch (load_kind) {
case HLoadClass::LoadKind::kReferrersClass: {
diff --git a/compiler/optimizing/code_generator_riscv64.cc b/compiler/optimizing/code_generator_riscv64.cc
index 5a237ca785..c870db662c 100644
--- a/compiler/optimizing/code_generator_riscv64.cc
+++ b/compiler/optimizing/code_generator_riscv64.cc
@@ -4252,7 +4252,7 @@ void LocationsBuilderRISCV64::VisitLoadClass(HLoadClass* instruction) {
load_kind == HLoadClass::LoadKind::kBssEntryPublic ||
load_kind == HLoadClass::LoadKind::kBssEntryPackage);
- const bool requires_read_barrier = !instruction->IsInBootImage() && codegen_->EmitReadBarrier();
+ const bool requires_read_barrier = !instruction->IsInImage() && codegen_->EmitReadBarrier();
LocationSummary::CallKind call_kind = (instruction->NeedsEnvironment() || requires_read_barrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
@@ -4294,7 +4294,7 @@ void InstructionCodeGeneratorRISCV64::VisitLoadClass(HLoadClass* instruction)
Location out_loc = locations->Out();
XRegister out = out_loc.AsRegister<XRegister>();
const ReadBarrierOption read_barrier_option =
- instruction->IsInBootImage() ? kWithoutReadBarrier : codegen_->GetCompilerReadBarrierOption();
+ instruction->IsInImage() ? kWithoutReadBarrier : codegen_->GetCompilerReadBarrierOption();
bool generate_null_check = false;
switch (load_kind) {
case HLoadClass::LoadKind::kReferrersClass: {
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 098356ce00..df133f7063 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -7313,7 +7313,7 @@ void LocationsBuilderX86::VisitLoadClass(HLoadClass* cls) {
load_kind == HLoadClass::LoadKind::kBssEntryPublic ||
load_kind == HLoadClass::LoadKind::kBssEntryPackage);
- const bool requires_read_barrier = !cls->IsInBootImage() && codegen_->EmitReadBarrier();
+ const bool requires_read_barrier = !cls->IsInImage() && codegen_->EmitReadBarrier();
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
@@ -7364,7 +7364,7 @@ void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFE
bool generate_null_check = false;
const ReadBarrierOption read_barrier_option =
- cls->IsInBootImage() ? kWithoutReadBarrier : codegen_->GetCompilerReadBarrierOption();
+ cls->IsInImage() ? kWithoutReadBarrier : codegen_->GetCompilerReadBarrierOption();
switch (load_kind) {
case HLoadClass::LoadKind::kReferrersClass: {
DCHECK(!cls->CanCallRuntime());
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index a03ce0a9b2..2acde534af 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -6648,7 +6648,7 @@ void LocationsBuilderX86_64::VisitLoadClass(HLoadClass* cls) {
load_kind == HLoadClass::LoadKind::kBssEntryPublic ||
load_kind == HLoadClass::LoadKind::kBssEntryPackage);
- const bool requires_read_barrier = !cls->IsInBootImage() && codegen_->EmitReadBarrier();
+ const bool requires_read_barrier = !cls->IsInImage() && codegen_->EmitReadBarrier();
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
@@ -6700,7 +6700,7 @@ void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S
CpuRegister out = out_loc.AsRegister<CpuRegister>();
const ReadBarrierOption read_barrier_option =
- cls->IsInBootImage() ? kWithoutReadBarrier : codegen_->GetCompilerReadBarrierOption();
+ cls->IsInImage() ? kWithoutReadBarrier : codegen_->GetCompilerReadBarrierOption();
bool generate_null_check = false;
switch (load_kind) {
case HLoadClass::LoadKind::kReferrersClass: {
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 5db251cbd3..bc8ebb5917 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -408,6 +408,7 @@ class HGraphVisualizerPrinter final : public HGraphDelegateVisitor {
void VisitLoadClass(HLoadClass* load_class) override {
StartAttributeStream("load_kind") << load_class->GetLoadKind();
+ StartAttributeStream("in_image") << std::boolalpha << load_class->IsInImage();
StartAttributeStream("class_name")
<< load_class->GetDexFile().PrettyType(load_class->GetTypeIndex());
StartAttributeStream("gen_clinit_check")
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index 81970d2108..344a93707d 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -1513,12 +1513,12 @@ void HInstructionBuilder::BuildConstructorFenceForAllocation(HInstruction* alloc
MethodCompilationStat::kConstructorFenceGeneratedNew);
}
-static bool IsInBootImage(ObjPtr<mirror::Class> cls, const CompilerOptions& compiler_options)
+static bool IsInImage(ObjPtr<mirror::Class> cls, const CompilerOptions& compiler_options)
REQUIRES_SHARED(Locks::mutator_lock_) {
if (Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(cls)) {
return true;
}
- if (compiler_options.IsBootImage() || compiler_options.IsBootImageExtension()) {
+ if (compiler_options.IsGeneratingImage()) {
std::string temp;
const char* descriptor = cls->GetDescriptor(&temp);
return compiler_options.IsImageClass(descriptor);
@@ -1634,8 +1634,8 @@ static bool HasTrivialInitialization(ObjPtr<mirror::Class> cls,
// Check the superclass chain.
for (ObjPtr<mirror::Class> klass = cls; klass != nullptr; klass = klass->GetSuperClass()) {
- if (klass->IsInitialized() && IsInBootImage(klass, compiler_options)) {
- break; // `klass` and its superclasses are already initialized in the boot image.
+ if (klass->IsInitialized() && IsInImage(klass, compiler_options)) {
+ break; // `klass` and its superclasses are already initialized in the boot or app image.
}
if (!HasTrivialClinit(klass, pointer_size)) {
return false;
@@ -1650,8 +1650,8 @@ static bool HasTrivialInitialization(ObjPtr<mirror::Class> cls,
if (!iface->HasDefaultMethods()) {
continue; // Initializing `cls` does not initialize this interface.
}
- if (iface->IsInitialized() && IsInBootImage(iface, compiler_options)) {
- continue; // This interface is already initialized in the boot image.
+ if (iface->IsInitialized() && IsInImage(iface, compiler_options)) {
+ continue; // This interface is already initialized in the boot or app image.
}
if (!HasTrivialClinit(iface, pointer_size)) {
return false;
@@ -1669,9 +1669,8 @@ bool HInstructionBuilder::IsInitialized(ObjPtr<mirror::Class> cls) const {
if (cls->IsInitialized()) {
const CompilerOptions& compiler_options = code_generator_->GetCompilerOptions();
if (compiler_options.IsAotCompiler()) {
- // Assume loaded only if klass is in the boot image. App classes cannot be assumed
- // loaded because we don't even know what class loader will be used to load them.
- if (IsInBootImage(cls, compiler_options)) {
+ // Assume loaded only if klass is in the boot or app image.
+ if (IsInImage(cls, compiler_options)) {
return true;
}
} else {
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 2710f49ef2..56bbd8017f 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -127,7 +127,7 @@ class InstructionSimplifierVisitor final : public HGraphDelegateVisitor {
void SimplifyAllocationIntrinsic(HInvoke* invoke);
void SimplifyVarHandleIntrinsic(HInvoke* invoke);
- bool CanUseKnownBootImageVarHandle(HInvoke* invoke);
+ bool CanUseKnownImageVarHandle(HInvoke* invoke);
static bool CanEnsureNotNullAt(HInstruction* input, HInstruction* at);
CodeGenerator* codegen_;
@@ -3025,15 +3025,15 @@ void InstructionSimplifierVisitor::SimplifyVarHandleIntrinsic(HInvoke* invoke) {
}
}
- if (CanUseKnownBootImageVarHandle(invoke)) {
- optimizations.SetUseKnownBootImageVarHandle();
+ if (CanUseKnownImageVarHandle(invoke)) {
+ optimizations.SetUseKnownImageVarHandle();
}
}
-bool InstructionSimplifierVisitor::CanUseKnownBootImageVarHandle(HInvoke* invoke) {
- // If the `VarHandle` comes from a static final field of an initialized class in
- // the boot image, we can do the checks at compile time. We do this optimization only
- // for AOT and only for field handles when we can avoid all checks. This avoids the
+bool InstructionSimplifierVisitor::CanUseKnownImageVarHandle(HInvoke* invoke) {
+ // If the `VarHandle` comes from a static final field of an initialized class in an image
+ // (boot image or app image), we can do the checks at compile time. We do this optimization
+ // only for AOT and only for field handles when we can avoid all checks. This avoids the
// possibility of the code concurrently messing with the `VarHandle` using reflection,
// we simply perform the operation with the `VarHandle` as seen at compile time.
// TODO: Extend this to arrays to support the `AtomicIntegerArray` class.
@@ -3066,18 +3066,17 @@ bool InstructionSimplifierVisitor::CanUseKnownBootImageVarHandle(HInvoke* invoke
}
HInstruction* load_class = var_handle_instruction->InputAt(0);
if (kIsDebugBuild) {
- bool is_in_boot_image = false;
+ bool is_in_image = false;
if (Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(declaring_class)) {
- is_in_boot_image = true;
- } else if (compiler_options.IsBootImage() || compiler_options.IsBootImageExtension()) {
+ is_in_image = true;
+ } else if (compiler_options.IsGeneratingImage()) {
std::string storage;
const char* descriptor = declaring_class->GetDescriptor(&storage);
- is_in_boot_image = compiler_options.IsImageClass(descriptor);
+ is_in_image = compiler_options.IsImageClass(descriptor);
}
- CHECK_EQ(is_in_boot_image,
- load_class->IsLoadClass() && load_class->AsLoadClass()->IsInBootImage());
+ CHECK_EQ(is_in_image, load_class->IsLoadClass() && load_class->AsLoadClass()->IsInImage());
}
- if (!load_class->IsLoadClass() || !load_class->AsLoadClass()->IsInBootImage()) {
+ if (!load_class->IsLoadClass() || !load_class->AsLoadClass()->IsInImage()) {
return false;
}
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 0a431b8aa8..7a27b2506b 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -279,7 +279,7 @@ class VarHandleOptimizations : public IntrinsicOptimizations {
// Note that the object null check is controlled by the above flag `SkipObjectNullCheck`
// and arrays and byte array views (which always need a range check and sometimes also
// array type check) are currently unsupported.
- INTRINSIC_OPTIMIZATION(UseKnownBootImageVarHandle, 2);
+ INTRINSIC_OPTIMIZATION(UseKnownImageVarHandle, 2);
};
#undef INTRISIC_OPTIMIZATION
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 3dde811958..8ed43b1d3e 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -4501,7 +4501,7 @@ static void GenerateVarHandleInstanceFieldChecks(HInvoke* invoke,
__ Cbz(object, slow_path->GetEntryLabel());
}
- if (!optimizations.GetUseKnownBootImageVarHandle()) {
+ if (!optimizations.GetUseKnownImageVarHandle()) {
UseScratchRegisterScope temps(masm);
Register temp = temps.AcquireW();
Register temp2 = temps.AcquireW();
@@ -4628,7 +4628,7 @@ static VarHandleSlowPathARM64* GenerateVarHandleChecks(HInvoke* invoke,
DataType::Type type) {
size_t expected_coordinates_count = GetExpectedVarHandleCoordinatesCount(invoke);
VarHandleOptimizations optimizations(invoke);
- if (optimizations.GetUseKnownBootImageVarHandle()) {
+ if (optimizations.GetUseKnownImageVarHandle()) {
DCHECK_NE(expected_coordinates_count, 2u);
if (expected_coordinates_count == 0u || optimizations.GetSkipObjectNullCheck()) {
return nullptr;
@@ -4639,7 +4639,7 @@ static VarHandleSlowPathARM64* GenerateVarHandleChecks(HInvoke* invoke,
new (codegen->GetScopedAllocator()) VarHandleSlowPathARM64(invoke, order);
codegen->AddSlowPath(slow_path);
- if (!optimizations.GetUseKnownBootImageVarHandle()) {
+ if (!optimizations.GetUseKnownImageVarHandle()) {
GenerateVarHandleAccessModeAndVarTypeChecks(invoke, codegen, slow_path, type);
}
GenerateVarHandleCoordinateChecks(invoke, codegen, slow_path);
@@ -4674,7 +4674,7 @@ static void GenerateVarHandleTarget(HInvoke* invoke,
size_t expected_coordinates_count = GetExpectedVarHandleCoordinatesCount(invoke);
if (expected_coordinates_count <= 1u) {
- if (VarHandleOptimizations(invoke).GetUseKnownBootImageVarHandle()) {
+ if (VarHandleOptimizations(invoke).GetUseKnownImageVarHandle()) {
ScopedObjectAccess soa(Thread::Current());
ArtField* target_field = GetBootImageVarHandleField(invoke);
if (expected_coordinates_count == 0u) {
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index ea7b65181a..25e35540ab 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -4218,7 +4218,7 @@ static void GenerateVarHandleInstanceFieldChecks(HInvoke* invoke,
__ B(eq, slow_path->GetEntryLabel());
}
- if (!optimizations.GetUseKnownBootImageVarHandle()) {
+ if (!optimizations.GetUseKnownImageVarHandle()) {
// Use the first temporary register, whether it's for the declaring class or the offset.
// It is not used yet at this point.
vixl32::Register temp = RegisterFrom(invoke->GetLocations()->GetTemp(0u));
@@ -4351,7 +4351,7 @@ static VarHandleSlowPathARMVIXL* GenerateVarHandleChecks(HInvoke* invoke,
DataType::Type type) {
size_t expected_coordinates_count = GetExpectedVarHandleCoordinatesCount(invoke);
VarHandleOptimizations optimizations(invoke);
- if (optimizations.GetUseKnownBootImageVarHandle()) {
+ if (optimizations.GetUseKnownImageVarHandle()) {
DCHECK_NE(expected_coordinates_count, 2u);
if (expected_coordinates_count == 0u || optimizations.GetSkipObjectNullCheck()) {
return nullptr;
@@ -4362,7 +4362,7 @@ static VarHandleSlowPathARMVIXL* GenerateVarHandleChecks(HInvoke* invoke,
new (codegen->GetScopedAllocator()) VarHandleSlowPathARMVIXL(invoke, order);
codegen->AddSlowPath(slow_path);
- if (!optimizations.GetUseKnownBootImageVarHandle()) {
+ if (!optimizations.GetUseKnownImageVarHandle()) {
GenerateVarHandleAccessModeAndVarTypeChecks(invoke, codegen, slow_path, type);
}
GenerateVarHandleCoordinateChecks(invoke, codegen, slow_path);
@@ -4397,7 +4397,7 @@ static void GenerateVarHandleTarget(HInvoke* invoke,
size_t expected_coordinates_count = GetExpectedVarHandleCoordinatesCount(invoke);
if (expected_coordinates_count <= 1u) {
- if (VarHandleOptimizations(invoke).GetUseKnownBootImageVarHandle()) {
+ if (VarHandleOptimizations(invoke).GetUseKnownImageVarHandle()) {
ScopedObjectAccess soa(Thread::Current());
ArtField* target_field = GetBootImageVarHandleField(invoke);
if (expected_coordinates_count == 0u) {
diff --git a/compiler/optimizing/intrinsics_riscv64.cc b/compiler/optimizing/intrinsics_riscv64.cc
index 3ce6d73fae..4e248a2b7c 100644
--- a/compiler/optimizing/intrinsics_riscv64.cc
+++ b/compiler/optimizing/intrinsics_riscv64.cc
@@ -3198,7 +3198,7 @@ static void GenerateVarHandleInstanceFieldChecks(HInvoke* invoke,
__ Beqz(object, slow_path->GetEntryLabel());
}
- if (!optimizations.GetUseKnownBootImageVarHandle()) {
+ if (!optimizations.GetUseKnownImageVarHandle()) {
ScratchRegisterScope srs(assembler);
XRegister temp = srs.AllocateXRegister();
@@ -3321,7 +3321,7 @@ static VarHandleSlowPathRISCV64* GenerateVarHandleChecks(HInvoke* invoke,
DataType::Type type) {
size_t expected_coordinates_count = GetExpectedVarHandleCoordinatesCount(invoke);
VarHandleOptimizations optimizations(invoke);
- if (optimizations.GetUseKnownBootImageVarHandle()) {
+ if (optimizations.GetUseKnownImageVarHandle()) {
DCHECK_NE(expected_coordinates_count, 2u);
if (expected_coordinates_count == 0u || optimizations.GetSkipObjectNullCheck()) {
return nullptr;
@@ -3332,7 +3332,7 @@ static VarHandleSlowPathRISCV64* GenerateVarHandleChecks(HInvoke* invoke,
new (codegen->GetScopedAllocator()) VarHandleSlowPathRISCV64(invoke, order);
codegen->AddSlowPath(slow_path);
- if (!optimizations.GetUseKnownBootImageVarHandle()) {
+ if (!optimizations.GetUseKnownImageVarHandle()) {
GenerateVarHandleAccessModeAndVarTypeChecks(invoke, codegen, slow_path, type);
}
GenerateVarHandleCoordinateChecks(invoke, codegen, slow_path);
@@ -3368,7 +3368,7 @@ static void GenerateVarHandleTarget(HInvoke* invoke,
size_t expected_coordinates_count = GetExpectedVarHandleCoordinatesCount(invoke);
if (expected_coordinates_count <= 1u) {
- if (VarHandleOptimizations(invoke).GetUseKnownBootImageVarHandle()) {
+ if (VarHandleOptimizations(invoke).GetUseKnownImageVarHandle()) {
ScopedObjectAccess soa(Thread::Current());
ArtField* target_field = GetBootImageVarHandleField(invoke);
if (expected_coordinates_count == 0u) {
diff --git a/compiler/optimizing/intrinsics_utils.h b/compiler/optimizing/intrinsics_utils.h
index 590bc34ee9..13d9bc4b68 100644
--- a/compiler/optimizing/intrinsics_utils.h
+++ b/compiler/optimizing/intrinsics_utils.h
@@ -210,7 +210,7 @@ static inline DataType::Type GetVarHandleExpectedValueType(HInvoke* invoke,
static inline ArtField* GetBootImageVarHandleField(HInvoke* invoke)
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK_LE(GetExpectedVarHandleCoordinatesCount(invoke), 1u);
- DCHECK(VarHandleOptimizations(invoke).GetUseKnownBootImageVarHandle());
+ DCHECK(VarHandleOptimizations(invoke).GetUseKnownImageVarHandle());
HInstruction* var_handle_instruction = invoke->InputAt(0);
if (var_handle_instruction->IsNullCheck()) {
var_handle_instruction = var_handle_instruction->InputAt(0);
@@ -219,7 +219,7 @@ static inline ArtField* GetBootImageVarHandleField(HInvoke* invoke)
ArtField* field = var_handle_instruction->AsStaticFieldGet()->GetFieldInfo().GetField();
DCHECK(field->IsStatic());
DCHECK(field->IsFinal());
- DCHECK(var_handle_instruction->InputAt(0)->AsLoadClass()->IsInBootImage());
+ DCHECK(var_handle_instruction->InputAt(0)->AsLoadClass()->IsInImage());
ObjPtr<mirror::Object> var_handle = field->GetObject(field->GetDeclaringClass());
DCHECK(var_handle->GetClass() ==
(GetExpectedVarHandleCoordinatesCount(invoke) == 0u
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index ea45a7f227..b5ddaaa0b7 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -3730,7 +3730,7 @@ static void GenerateVarHandleInstanceFieldChecks(HInvoke* invoke,
__ j(kZero, slow_path->GetEntryLabel());
}
- if (!optimizations.GetUseKnownBootImageVarHandle()) {
+ if (!optimizations.GetUseKnownImageVarHandle()) {
// Check that the VarHandle references an instance field by checking that
// coordinateType1 == null. coordinateType0 should be not null, but this is handled by the
// type compatibility check with the source object's type, which will fail for null.
@@ -3850,7 +3850,7 @@ static VarHandleSlowPathX86_64* GenerateVarHandleChecks(HInvoke* invoke,
DataType::Type type) {
size_t expected_coordinates_count = GetExpectedVarHandleCoordinatesCount(invoke);
VarHandleOptimizations optimizations(invoke);
- if (optimizations.GetUseKnownBootImageVarHandle()) {
+ if (optimizations.GetUseKnownImageVarHandle()) {
DCHECK_NE(expected_coordinates_count, 2u);
if (expected_coordinates_count == 0u || optimizations.GetSkipObjectNullCheck()) {
return nullptr;
@@ -3861,7 +3861,7 @@ static VarHandleSlowPathX86_64* GenerateVarHandleChecks(HInvoke* invoke,
new (codegen->GetScopedAllocator()) VarHandleSlowPathX86_64(invoke);
codegen->AddSlowPath(slow_path);
- if (!optimizations.GetUseKnownBootImageVarHandle()) {
+ if (!optimizations.GetUseKnownImageVarHandle()) {
GenerateVarHandleAccessModeAndVarTypeChecks(invoke, codegen, slow_path, type);
}
GenerateVarHandleCoordinateChecks(invoke, codegen, slow_path);
@@ -3898,7 +3898,7 @@ static void GenerateVarHandleTarget(HInvoke* invoke,
CpuRegister varhandle = locations->InAt(0).AsRegister<CpuRegister>();
if (expected_coordinates_count <= 1u) {
- if (VarHandleOptimizations(invoke).GetUseKnownBootImageVarHandle()) {
+ if (VarHandleOptimizations(invoke).GetUseKnownImageVarHandle()) {
ScopedObjectAccess soa(Thread::Current());
ArtField* target_field = GetBootImageVarHandleField(invoke);
if (expected_coordinates_count == 0u) {
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 6a5213c932..90fc5db02e 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -6798,7 +6798,7 @@ class HLoadClass final : public HInstruction {
SetPackedField<LoadKindField>(
is_referrers_class ? LoadKind::kReferrersClass : LoadKind::kRuntimeCall);
SetPackedFlag<kFlagNeedsAccessCheck>(needs_access_check);
- SetPackedFlag<kFlagIsInBootImage>(false);
+ SetPackedFlag<kFlagIsInImage>(false);
SetPackedFlag<kFlagGenerateClInitCheck>(false);
SetPackedFlag<kFlagValidLoadedClassRTI>(false);
}
@@ -6851,8 +6851,8 @@ class HLoadClass final : public HInstruction {
bool CanThrow() const override {
return NeedsAccessCheck() ||
MustGenerateClinitCheck() ||
- // If the class is in the boot image, the lookup in the runtime call cannot throw.
- ((GetLoadKind() == LoadKind::kRuntimeCall || NeedsBss()) && !IsInBootImage());
+ // If the class is in the boot or app image, the lookup in the runtime call cannot throw.
+ ((GetLoadKind() == LoadKind::kRuntimeCall || NeedsBss()) && !IsInImage());
}
ReferenceTypeInfo GetLoadedClassRTI() {
@@ -6879,7 +6879,7 @@ class HLoadClass final : public HInstruction {
bool IsReferrersClass() const { return GetLoadKind() == LoadKind::kReferrersClass; }
bool NeedsAccessCheck() const { return GetPackedFlag<kFlagNeedsAccessCheck>(); }
- bool IsInBootImage() const { return GetPackedFlag<kFlagIsInBootImage>(); }
+ bool IsInImage() const { return GetPackedFlag<kFlagIsInImage>(); }
bool MustGenerateClinitCheck() const { return GetPackedFlag<kFlagGenerateClInitCheck>(); }
bool MustResolveTypeOnSlowPath() const {
@@ -6894,8 +6894,8 @@ class HLoadClass final : public HInstruction {
return must_resolve_type_on_slow_path;
}
- void MarkInBootImage() {
- SetPackedFlag<kFlagIsInBootImage>(true);
+ void MarkInImage() {
+ SetPackedFlag<kFlagIsInImage>(true);
}
void AddSpecialInput(HInstruction* special_input);
@@ -6917,10 +6917,11 @@ class HLoadClass final : public HInstruction {
private:
static constexpr size_t kFlagNeedsAccessCheck = kNumberOfGenericPackedBits;
- static constexpr size_t kFlagIsInBootImage = kFlagNeedsAccessCheck + 1;
+ // Whether the type is in an image (boot image or app image).
+ static constexpr size_t kFlagIsInImage = kFlagNeedsAccessCheck + 1;
// Whether this instruction must generate the initialization check.
// Used for code generation.
- static constexpr size_t kFlagGenerateClInitCheck = kFlagIsInBootImage + 1;
+ static constexpr size_t kFlagGenerateClInitCheck = kFlagIsInImage + 1;
static constexpr size_t kFieldLoadKind = kFlagGenerateClInitCheck + 1;
static constexpr size_t kFieldLoadKindSize =
MinimumBitsToStore(static_cast<size_t>(LoadKind::kLast));
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index 63929089ca..aa75c2464b 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -166,17 +166,17 @@ HLoadClass::LoadKind HSharpening::ComputeLoadClassKind(
DCHECK(load_class->GetLoadKind() == HLoadClass::LoadKind::kRuntimeCall ||
load_class->GetLoadKind() == HLoadClass::LoadKind::kReferrersClass)
<< load_class->GetLoadKind();
- DCHECK(!load_class->IsInBootImage()) << "HLoadClass should not be optimized before sharpening.";
+ DCHECK(!load_class->IsInImage()) << "HLoadClass should not be optimized before sharpening.";
const DexFile& dex_file = load_class->GetDexFile();
dex::TypeIndex type_index = load_class->GetTypeIndex();
const CompilerOptions& compiler_options = codegen->GetCompilerOptions();
- auto is_class_in_current_boot_image = [&]() {
- return (compiler_options.IsBootImage() || compiler_options.IsBootImageExtension()) &&
+ auto is_class_in_current_image = [&]() {
+ return compiler_options.IsGeneratingImage() &&
compiler_options.IsImageClass(dex_file.GetTypeDescriptor(type_index));
};
- bool is_in_boot_image = false;
+ bool is_in_image = false;
HLoadClass::LoadKind desired_load_kind = HLoadClass::LoadKind::kInvalid;
if (load_class->GetLoadKind() == HLoadClass::LoadKind::kReferrersClass) {
@@ -187,7 +187,7 @@ HLoadClass::LoadKind HSharpening::ComputeLoadClassKind(
// for using the ArtMethod* should be considered.
desired_load_kind = HLoadClass::LoadKind::kReferrersClass;
// Determine whether the referrer's class is in the boot image.
- is_in_boot_image = is_class_in_current_boot_image();
+ is_in_image = is_class_in_current_image();
} else if (load_class->NeedsAccessCheck()) {
DCHECK_EQ(load_class->GetLoadKind(), HLoadClass::LoadKind::kRuntimeCall);
if (klass != nullptr) {
@@ -195,8 +195,8 @@ HLoadClass::LoadKind HSharpening::ComputeLoadClassKind(
// and the access check is bound to fail. Just emit the runtime call.
desired_load_kind = HLoadClass::LoadKind::kRuntimeCall;
// Determine whether the class is in the boot image.
- is_in_boot_image = Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(klass.Get()) ||
- is_class_in_current_boot_image();
+ is_in_image = Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(klass.Get()) ||
+ is_class_in_current_image();
} else if (compiler_options.IsJitCompiler()) {
// Unresolved class while JITting means that either we never hit this
// instruction or it failed. Either way, just emit the runtime call.
@@ -233,26 +233,25 @@ HLoadClass::LoadKind HSharpening::ComputeLoadClassKind(
// Test configuration, do not sharpen.
desired_load_kind = HLoadClass::LoadKind::kRuntimeCall;
// Determine whether the class is in the boot image.
- is_in_boot_image = Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(klass.Get()) ||
- is_class_in_current_boot_image();
+ is_in_image = Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(klass.Get()) ||
+ is_class_in_current_image();
} else if (klass != nullptr && runtime->GetHeap()->ObjectIsInBootImageSpace(klass.Get())) {
DCHECK(compiler_options.IsBootImageExtension());
- is_in_boot_image = true;
+ is_in_image = true;
desired_load_kind = HLoadClass::LoadKind::kBootImageRelRo;
} else if ((klass != nullptr) &&
compiler_options.IsImageClass(dex_file.GetTypeDescriptor(type_index))) {
- is_in_boot_image = true;
+ is_in_image = true;
desired_load_kind = HLoadClass::LoadKind::kBootImageLinkTimePcRelative;
} else {
// Not a boot image class.
desired_load_kind = HLoadClass::LoadKind::kBssEntry;
}
} else {
- is_in_boot_image = (klass != nullptr) &&
- runtime->GetHeap()->ObjectIsInBootImageSpace(klass.Get());
+ is_in_image = (klass != nullptr) && runtime->GetHeap()->ObjectIsInBootImageSpace(klass.Get());
if (compiler_options.IsJitCompiler()) {
DCHECK(!compiler_options.GetCompilePic());
- if (is_in_boot_image) {
+ if (is_in_image) {
desired_load_kind = HLoadClass::LoadKind::kJitBootImageAddress;
} else if (klass != nullptr) {
if (runtime->GetJit()->CanEncodeClass(
@@ -272,19 +271,23 @@ HLoadClass::LoadKind HSharpening::ComputeLoadClassKind(
// TODO(ngeoffray): Generate HDeoptimize instead.
desired_load_kind = HLoadClass::LoadKind::kRuntimeCall;
}
- } else if (is_in_boot_image) {
+ } else if (is_in_image) {
// AOT app compilation, boot image class.
desired_load_kind = HLoadClass::LoadKind::kBootImageRelRo;
+ } else if (compiler_options.IsAppImage() && is_class_in_current_image()) {
+ // AOT app compilation, app image class.
+ is_in_image = true;
+ desired_load_kind = HLoadClass::LoadKind::kBssEntry;
} else {
- // Not JIT and the klass is not in boot image.
+ // Not JIT and the klass is not in boot image or app image.
desired_load_kind = HLoadClass::LoadKind::kBssEntry;
}
}
}
DCHECK_NE(desired_load_kind, HLoadClass::LoadKind::kInvalid);
- if (is_in_boot_image) {
- load_class->MarkInBootImage();
+ if (is_in_image) {
+ load_class->MarkInImage();
}
HLoadClass::LoadKind load_kind = codegen->GetSupportedLoadClassKind(desired_load_kind);