Instrument ARM64 generated code to check the Marking Register.
Generate run-time code in the Optimizing compiler checking that
the Marking Register's value matches `self.tls32_.is.gc_marking`
in debug mode (on target; and on host with JIT, or with AOT when
compiling the core image). If a check fails, abort.
Test: m test-art-target
Test: m test-art-target with tree built with ART_USE_READ_BARRIER=false
Test: ARM64 device boot test with libartd.
Bug: 37707231
Change-Id: Ie9b322b22b3d26654a06821e1db71dbda3c43061
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index ba4581c..11808c1 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -22,6 +22,8 @@
#include <unordered_set>
#include <vector>
+#include "android-base/strings.h"
+
#include "arch/instruction_set.h"
#include "base/array_ref.h"
#include "base/bit_utils.h"
@@ -379,6 +381,14 @@
bool CanAssumeVerified(ClassReference ref) const;
+ // Is `boot_image_filename` the name of a core image (small boot
+ // image used for ART testing only)?
+ static bool IsCoreImageFilename(const std::string& boot_image_filename) {
+ // TODO: This is under-approximating...
+ return android::base::EndsWith(boot_image_filename, "core.art")
+ || android::base::EndsWith(boot_image_filename, "core-optimizing.art");
+ }
+
private:
void PreCompile(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index 3cacc2c..538845d 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -18,6 +18,8 @@
#include <fstream>
+#include "runtime.h"
+
namespace art {
CompilerOptions::CompilerOptions()
@@ -30,6 +32,7 @@
inline_max_code_units_(kUnsetInlineMaxCodeUnits),
no_inline_from_(nullptr),
boot_image_(false),
+ core_image_(false),
app_image_(false),
top_k_profile_threshold_(kDefaultTopKProfileThreshold),
debuggable_(false),
@@ -55,6 +58,19 @@
// because we don't want to include the PassManagerOptions definition from the header file.
}
+bool CompilerOptions::EmitRunTimeChecksInDebugMode() const {
+ // Run-time checks (e.g. Marking Register checks) are only emitted
+ // in debug mode, and
+ // - when running on device; or
+ // - when running on host, but only
+ // - when compiling the core image (which is used only for testing); or
+ // - when JIT compiling (only relevant for non-native methods).
+ // This is to prevent these checks from being emitted into pre-opted
+ // boot image or apps, as these are compiled with dex2oatd.
+ return kIsDebugBuild &&
+ (kIsTargetBuild || IsCoreImage() || Runtime::Current()->UseJitCompilation());
+}
+
void CompilerOptions::ParseHugeMethodMax(const StringPiece& option, UsageFn Usage) {
ParseUintOption(option, "--huge-method-max", &huge_method_threshold_, Usage);
}
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index b99263d..1e05c4e 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -161,6 +161,9 @@
return generate_mini_debug_info_;
}
+ // Should run-time checks be emitted in debug mode?
+ bool EmitRunTimeChecksInDebugMode() const;
+
bool GetGenerateBuildId() const {
return generate_build_id_;
}
@@ -177,10 +180,19 @@
return implicit_suspend_checks_;
}
+ // Are we compiling a boot image?
bool IsBootImage() const {
return boot_image_;
}
+ // Are we compiling a core image (small boot image only used for ART testing)?
+ bool IsCoreImage() const {
+ // Ensure that `core_image_` => `boot_image_`.
+ DCHECK(!core_image_ || boot_image_);
+ return core_image_;
+ }
+
+ // Are we compiling an app image?
bool IsAppImage() const {
return app_image_;
}
@@ -266,6 +278,7 @@
const std::vector<const DexFile*>* no_inline_from_;
bool boot_image_;
+ bool core_image_;
bool app_image_;
// When using a profile file only the top K% of the profiled samples will be compiled.
double top_k_profile_threshold_;
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index b65b93f..e7e4647 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -219,7 +219,9 @@
// Assembler that holds generated instructions
std::unique_ptr<JNIMacroAssembler<kPointerSize>> jni_asm =
GetMacroAssembler<kPointerSize>(&arena, instruction_set, instruction_set_features);
- jni_asm->cfi().SetEnabled(driver->GetCompilerOptions().GenerateAnyDebugInfo());
+ const CompilerOptions& compiler_options = driver->GetCompilerOptions();
+ jni_asm->cfi().SetEnabled(compiler_options.GenerateAnyDebugInfo());
+ jni_asm->SetEmitRunTimeChecksInDebugMode(compiler_options.EmitRunTimeChecksInDebugMode());
// Offsets into data structures
// TODO: if cross compiling these offsets are for the host not the target
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 4999950..3be774a 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1595,6 +1595,8 @@
__ Str(wzr, MemOperand(sp, GetStackOffsetOfShouldDeoptimizeFlag()));
}
}
+
+ MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
}
void CodeGeneratorARM64::GenerateFrameExit() {
@@ -3587,6 +3589,7 @@
}
if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
}
if (!codegen_->GoesToNextBlock(block, successor)) {
__ B(codegen_->GetLabelOf(successor));
@@ -4391,6 +4394,7 @@
void InstructionCodeGeneratorARM64::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
codegen_->GenerateInvokeUnresolvedRuntimeCall(invoke);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
}
void LocationsBuilderARM64::HandleInvoke(HInvoke* invoke) {
@@ -4459,6 +4463,8 @@
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
+
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
}
void LocationsBuilderARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
@@ -4626,6 +4632,7 @@
void InstructionCodeGeneratorARM64::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
codegen_->GenerateInvokePolymorphicCall(invoke);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
}
vixl::aarch64::Label* CodeGeneratorARM64::NewPcRelativeMethodPatch(
@@ -4801,27 +4808,37 @@
DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
if (TryGenerateIntrinsicCode(invoke, codegen_)) {
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
return;
}
- // Ensure that between the BLR (emitted by GenerateStaticOrDirectCall) and RecordPcInfo there
- // are no pools emitted.
- EmissionCheckScope guard(GetVIXLAssembler(), kInvokeCodeMarginSizeInBytes);
- LocationSummary* locations = invoke->GetLocations();
- codegen_->GenerateStaticOrDirectCall(
- invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation());
+ {
+ // Ensure that between the BLR (emitted by GenerateStaticOrDirectCall) and RecordPcInfo there
+ // are no pools emitted.
+ EmissionCheckScope guard(GetVIXLAssembler(), kInvokeCodeMarginSizeInBytes);
+ LocationSummary* locations = invoke->GetLocations();
+ codegen_->GenerateStaticOrDirectCall(
+ invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation());
+ }
+
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
}
void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
if (TryGenerateIntrinsicCode(invoke, codegen_)) {
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
return;
}
- // Ensure that between the BLR (emitted by GenerateVirtualCall) and RecordPcInfo there
- // are no pools emitted.
- EmissionCheckScope guard(GetVIXLAssembler(), kInvokeCodeMarginSizeInBytes);
- codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0));
- DCHECK(!codegen_->IsLeafMethod());
+ {
+ // Ensure that between the BLR (emitted by GenerateVirtualCall) and RecordPcInfo there
+ // are no pools emitted.
+ EmissionCheckScope guard(GetVIXLAssembler(), kInvokeCodeMarginSizeInBytes);
+ codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0));
+ DCHECK(!codegen_->IsLeafMethod());
+ }
+
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
}
HLoadClass::LoadKind CodeGeneratorARM64::GetSupportedLoadClassKind(
@@ -4895,6 +4912,7 @@
HLoadClass::LoadKind load_kind = cls->GetLoadKind();
if (load_kind == HLoadClass::LoadKind::kRuntimeCall) {
codegen_->GenerateLoadClassRuntimeCall(cls);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
return;
}
DCHECK(!cls->NeedsAccessCheck());
@@ -4995,6 +5013,7 @@
} else {
__ Bind(slow_path->GetExitLabel());
}
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
}
}
@@ -5113,6 +5132,7 @@
codegen_->AddSlowPath(slow_path);
__ Cbz(out.X(), slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
return;
}
case HLoadString::LoadKind::kJitTableAddress: {
@@ -5137,6 +5157,7 @@
__ Mov(calling_convention.GetRegisterAt(0).W(), load->GetStringIndex().index_);
codegen_->InvokeRuntime(kQuickResolveString, load, load->GetDexPc());
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
}
void LocationsBuilderARM64::VisitLongConstant(HLongConstant* constant) {
@@ -5164,6 +5185,7 @@
} else {
CheckEntrypointTypes<kQuickUnlockObject, void, mirror::Object*>();
}
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
}
void LocationsBuilderARM64::VisitMul(HMul* mul) {
@@ -5260,6 +5282,7 @@
CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
}
void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) {
@@ -5296,6 +5319,7 @@
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
}
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
}
void LocationsBuilderARM64::VisitNot(HNot* instruction) {
@@ -5644,6 +5668,7 @@
return;
}
GenerateSuspendCheck(instruction, nullptr);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
}
void LocationsBuilderARM64::VisitThrow(HThrow* instruction) {
@@ -6021,6 +6046,7 @@
// Note that GC roots are not affected by heap poisoning, thus we
// do not have to unpoison `root_reg` here.
}
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
}
void CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
@@ -6074,22 +6100,25 @@
obj.GetCode());
vixl::aarch64::Label* cbnz_label = NewBakerReadBarrierPatch(custom_data);
- EmissionCheckScope guard(GetVIXLAssembler(),
- (kPoisonHeapReferences ? 4u : 3u) * vixl::aarch64::kInstructionSize);
- vixl::aarch64::Label return_address;
- __ adr(lr, &return_address);
- __ Bind(cbnz_label);
- __ cbnz(mr, static_cast<int64_t>(0)); // Placeholder, patched at link-time.
- static_assert(BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET == (kPoisonHeapReferences ? -8 : -4),
- "Field LDR must be 1 instruction (4B) before the return address label; "
- " 2 instructions (8B) for heap poisoning.");
- Register ref_reg = RegisterFrom(ref, Primitive::kPrimNot);
- __ ldr(ref_reg, MemOperand(base.X(), offset));
- if (needs_null_check) {
- MaybeRecordImplicitNullCheck(instruction);
+ {
+ EmissionCheckScope guard(GetVIXLAssembler(),
+ (kPoisonHeapReferences ? 4u : 3u) * vixl::aarch64::kInstructionSize);
+ vixl::aarch64::Label return_address;
+ __ adr(lr, &return_address);
+ __ Bind(cbnz_label);
+ __ cbnz(mr, static_cast<int64_t>(0)); // Placeholder, patched at link-time.
+ static_assert(BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET == (kPoisonHeapReferences ? -8 : -4),
+ "Field LDR must be 1 instruction (4B) before the return address label; "
+ " 2 instructions (8B) for heap poisoning.");
+ Register ref_reg = RegisterFrom(ref, Primitive::kPrimNot);
+ __ ldr(ref_reg, MemOperand(base.X(), offset));
+ if (needs_null_check) {
+ MaybeRecordImplicitNullCheck(instruction);
+ }
+ GetAssembler()->MaybeUnpoisonHeapReference(ref_reg);
+ __ Bind(&return_address);
}
- GetAssembler()->MaybeUnpoisonHeapReference(ref_reg);
- __ Bind(&return_address);
+ MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__, /* temp_loc */ LocationFrom(ip1));
return;
}
@@ -6158,19 +6187,22 @@
vixl::aarch64::Label* cbnz_label = NewBakerReadBarrierPatch(custom_data);
__ Add(temp.X(), obj.X(), Operand(data_offset));
- EmissionCheckScope guard(GetVIXLAssembler(),
- (kPoisonHeapReferences ? 4u : 3u) * vixl::aarch64::kInstructionSize);
- vixl::aarch64::Label return_address;
- __ adr(lr, &return_address);
- __ Bind(cbnz_label);
- __ cbnz(mr, static_cast<int64_t>(0)); // Placeholder, patched at link-time.
- static_assert(BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET == (kPoisonHeapReferences ? -8 : -4),
- "Array LDR must be 1 instruction (4B) before the return address label; "
- " 2 instructions (8B) for heap poisoning.");
- __ ldr(ref_reg, MemOperand(temp.X(), index_reg.X(), LSL, scale_factor));
- DCHECK(!needs_null_check); // The thunk cannot handle the null check.
- GetAssembler()->MaybeUnpoisonHeapReference(ref_reg);
- __ Bind(&return_address);
+ {
+ EmissionCheckScope guard(GetVIXLAssembler(),
+ (kPoisonHeapReferences ? 4u : 3u) * vixl::aarch64::kInstructionSize);
+ vixl::aarch64::Label return_address;
+ __ adr(lr, &return_address);
+ __ Bind(cbnz_label);
+ __ cbnz(mr, static_cast<int64_t>(0)); // Placeholder, patched at link-time.
+ static_assert(BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET == (kPoisonHeapReferences ? -8 : -4),
+ "Array LDR must be 1 instruction (4B) before the return address label; "
+ " 2 instructions (8B) for heap poisoning.");
+ __ ldr(ref_reg, MemOperand(temp.X(), index_reg.X(), LSL, scale_factor));
+ DCHECK(!needs_null_check); // The thunk cannot handle the null check.
+ GetAssembler()->MaybeUnpoisonHeapReference(ref_reg);
+ __ Bind(&return_address);
+ }
+ MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__, /* temp_loc */ LocationFrom(ip1));
return;
}
@@ -6247,6 +6279,7 @@
GenerateRawReferenceLoad(
instruction, ref, obj, offset, index, scale_factor, needs_null_check, use_load_acquire);
__ Bind(slow_path->GetExitLabel());
+ MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
}
void CodeGeneratorARM64::UpdateReferenceFieldWithBakerReadBarrier(HInstruction* instruction,
@@ -6303,6 +6336,7 @@
// Fast path: the GC is not marking: nothing to do (the field is
// up-to-date, and we don't need to load the reference).
__ Bind(slow_path->GetExitLabel());
+ MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
}
void CodeGeneratorARM64::GenerateRawReferenceLoad(HInstruction* instruction,
@@ -6381,6 +6415,19 @@
GetAssembler()->MaybeUnpoisonHeapReference(ref_reg);
}
+void CodeGeneratorARM64::MaybeGenerateMarkingRegisterCheck(int code, Location temp_loc) {
+ // The following condition is a compile-time one, so it does not have a run-time cost.
+ if (kEmitCompilerReadBarrier && kUseBakerReadBarrier && kIsDebugBuild) {
+ // The following condition is a run-time one; it is executed after the
+ // previous compile-time test, to avoid penalizing non-debug builds.
+ if (GetCompilerOptions().EmitRunTimeChecksInDebugMode()) {
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ Register temp = temp_loc.IsValid() ? WRegisterFrom(temp_loc) : temps.AcquireW();
+ GetAssembler()->GenerateMarkingRegisterCheck(temp, code);
+ }
+ }
+}
+
void CodeGeneratorARM64::GenerateReadBarrierSlow(HInstruction* instruction,
Location out,
Location ref,
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 584eead..c339209 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -687,6 +687,22 @@
bool needs_null_check,
bool use_load_acquire);
+ // Emit code checking the status of the Marking Register, and
+ // aborting the program if MR does not match the value stored in the
+ // art::Thread object. Code is only emitted in debug mode and if
+ // CompilerOptions::EmitRunTimeChecksInDebugMode returns true.
+ //
+ // Argument `code` is used to identify the different occurrences of
+ // MaybeGenerateMarkingRegisterCheck in the code generator, and is
+ // passed to the BRK instruction.
+ //
+ // If `temp_loc` is a valid location, it is expected to be a
+ // register and will be used as a temporary to generate code;
+ // otherwise, a temporary will be fetched from the core register
+ // scratch pool.
+ virtual void MaybeGenerateMarkingRegisterCheck(int code,
+ Location temp_loc = Location::NoLocation());
+
// Generate a read barrier for a heap reference within `instruction`
// using a slow path.
//
diff --git a/compiler/optimizing/codegen_test_utils.h b/compiler/optimizing/codegen_test_utils.h
index cada2e6..bdd105f 100644
--- a/compiler/optimizing/codegen_test_utils.h
+++ b/compiler/optimizing/codegen_test_utils.h
@@ -103,6 +103,40 @@
};
#endif
+#ifdef ART_ENABLE_CODEGEN_arm64
+// Special ARM64 code generator for codegen testing in a limited code
+// generation environment (i.e. with no runtime support).
+//
+// Note: If we want to exercise certains HIR constructions
+// (e.g. reference field load in Baker read barrier configuration) in
+// codegen tests in the future, we should also:
+// - save the Thread Register (X19) and possibly the Marking Register
+// (X20) before entering the generated function (both registers are
+// callee-save in AAPCS64);
+// - set these registers to meaningful values before or upon entering
+// the generated function (so that generated code using them is
+// correct);
+// - restore their original values before leaving the generated
+// function.
+class TestCodeGeneratorARM64 : public arm64::CodeGeneratorARM64 {
+ public:
+ TestCodeGeneratorARM64(HGraph* graph,
+ const Arm64InstructionSetFeatures& isa_features,
+ const CompilerOptions& compiler_options)
+ : arm64::CodeGeneratorARM64(graph, isa_features, compiler_options) {}
+
+ void MaybeGenerateMarkingRegisterCheck(int codem ATTRIBUTE_UNUSED,
+ Location temp_loc ATTRIBUTE_UNUSED) OVERRIDE {
+ // When turned on, the marking register checks in
+ // CodeGeneratorARM64::MaybeGenerateMarkingRegisterCheck expect the
+ // Thread Register and the Marking Register to be set to
+ // meaningful values. This is not the case in codegen testing, so
+ // just disable them entirely here (by doing nothing in this
+ // method).
+ }
+};
+#endif
+
#ifdef ART_ENABLE_CODEGEN_x86
class TestCodeGeneratorX86 : public x86::CodeGeneratorX86 {
public:
@@ -263,7 +297,8 @@
bool has_result,
Expected expected) {
CompilerOptions compiler_options;
- std::unique_ptr<CodeGenerator> codegen(target_config.CreateCodeGenerator(graph, compiler_options));
+ std::unique_ptr<CodeGenerator> codegen(target_config.CreateCodeGenerator(graph,
+ compiler_options));
RunCode(codegen.get(), graph, hook_before_codegen, has_result, expected);
}
@@ -280,9 +315,8 @@
CodeGenerator* create_codegen_arm64(HGraph* graph, const CompilerOptions& compiler_options) {
std::unique_ptr<const Arm64InstructionSetFeatures> features_arm64(
Arm64InstructionSetFeatures::FromCppDefines());
- return new (graph->GetArena()) arm64::CodeGeneratorARM64(graph,
- *features_arm64.get(),
- compiler_options);
+ return new (graph->GetArena())
+ TestCodeGeneratorARM64(graph, *features_arm64.get(), compiler_options);
}
#endif
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 51101f1..a6c33b4 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -22,8 +22,6 @@
#include <stdint.h>
-#include "android-base/strings.h"
-
#ifdef ART_ENABLE_CODEGEN_arm64
#include "instruction_simplifier_arm64.h"
#endif
@@ -1111,12 +1109,7 @@
bool IsCompilingWithCoreImage() {
const std::string& image = Runtime::Current()->GetImageLocation();
- // TODO: This is under-approximating...
- if (android::base::EndsWith(image, "core.art") ||
- android::base::EndsWith(image, "core-optimizing.art")) {
- return true;
- }
- return false;
+ return CompilerDriver::IsCoreImageFilename(image);
}
bool EncodeArtMethodInInlineInfo(ArtMethod* method ATTRIBUTE_UNUSED) {
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index 6ed0e9b..d8a48a5 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -158,6 +158,24 @@
}
}
+void Arm64Assembler::GenerateMarkingRegisterCheck(Register temp, int code) {
+ // The Marking Register is only used in the Baker read barrier configuration.
+ DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(kUseBakerReadBarrier);
+
+ vixl::aarch64::Register mr = reg_x(MR); // Marking Register.
+ vixl::aarch64::Register tr = reg_x(TR); // Thread Register.
+ vixl::aarch64::Label mr_is_ok;
+
+ // temp = self.tls32_.is.gc_marking
+ ___ Ldr(temp, MemOperand(tr, Thread::IsGcMarkingOffset<kArm64PointerSize>().Int32Value()));
+ // Check that mr == self.tls32_.is.gc_marking.
+ ___ Cmp(mr.W(), temp);
+ ___ B(eq, &mr_is_ok);
+ ___ Brk(code);
+ ___ Bind(&mr_is_ok);
+}
+
#undef ___
} // namespace arm64
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index 5b8a34e..6b28363 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -98,6 +98,15 @@
// Unpoison a heap reference contained in `reg` if heap poisoning is enabled.
void MaybeUnpoisonHeapReference(vixl::aarch64::Register reg);
+ // Emit code checking the status of the Marking Register, and aborting
+ // the program if MR does not match the value stored in the art::Thread
+ // object.
+ //
+ // Argument `temp` is used as a temporary register to generate code.
+ // Argument `code` is used to identify the different occurrences of
+ // MaybeGenerateMarkingRegisterCheck and is passed to the BRK instruction.
+ void GenerateMarkingRegisterCheck(vixl::aarch64::Register temp, int code = 0);
+
void Bind(Label* label ATTRIBUTE_UNUSED) OVERRIDE {
UNIMPLEMENTED(FATAL) << "Do not use Bind for ARM64";
}
diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.cc b/compiler/utils/arm64/jni_macro_assembler_arm64.cc
index bab84be..9732b76 100644
--- a/compiler/utils/arm64/jni_macro_assembler_arm64.cc
+++ b/compiler/utils/arm64/jni_macro_assembler_arm64.cc
@@ -662,7 +662,7 @@
___ Bind(Arm64JNIMacroLabel::Cast(label)->AsArm64());
}
-void Arm64JNIMacroAssembler::EmitExceptionPoll(Arm64Exception *exception) {
+void Arm64JNIMacroAssembler::EmitExceptionPoll(Arm64Exception* exception) {
UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
temps.Exclude(reg_x(exception->scratch_.AsXRegister()));
Register temp = temps.AcquireX();
diff --git a/compiler/utils/jni_macro_assembler.h b/compiler/utils/jni_macro_assembler.h
index 59a1a48..a8ca111 100644
--- a/compiler/utils/jni_macro_assembler.h
+++ b/compiler/utils/jni_macro_assembler.h
@@ -216,8 +216,15 @@
*/
virtual DebugFrameOpCodeWriterForAssembler& cfi() = 0;
+ void SetEmitRunTimeChecksInDebugMode(bool value) {
+ emit_run_time_checks_in_debug_mode_ = value;
+ }
+
protected:
- explicit JNIMacroAssembler() {}
+ JNIMacroAssembler() {}
+
+ // Should run-time checks be emitted in debug mode?
+ bool emit_run_time_checks_in_debug_mode_ = false;
};
// A "Label" class used with the JNIMacroAssembler
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index ec14e7a..9f2c44d 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -2012,7 +2012,7 @@
x86_64::X86_64ManagedRegister method_reg = ManagedFromCpu(x86_64::RDI);
size_t frame_size = 10 * kStackAlignment;
- assembler->BuildFrame(10 * kStackAlignment, method_reg, spill_regs, entry_spills);
+ assembler->BuildFrame(frame_size, method_reg, spill_regs, entry_spills);
// Construct assembly text counterpart.
std::ostringstream str;
@@ -2048,7 +2048,7 @@
ArrayRef<const ManagedRegister> spill_regs(raw_spill_regs);
size_t frame_size = 10 * kStackAlignment;
- assembler->RemoveFrame(10 * kStackAlignment, spill_regs);
+ assembler->RemoveFrame(frame_size, spill_regs);
// Construct assembly text counterpart.
std::ostringstream str;
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index a6036da..e9ec5fa 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -773,6 +773,11 @@
compiler_options_->boot_image_ = !image_filenames_.empty();
compiler_options_->app_image_ = app_image_fd_ != -1 || !app_image_file_name_.empty();
+ if (IsBootImage() && image_filenames_.size() == 1) {
+ const std::string& boot_image_filename = image_filenames_[0];
+ compiler_options_->core_image_ = CompilerDriver::IsCoreImageFilename(boot_image_filename);
+ }
+
if (IsAppImage() && IsBootImage()) {
Usage("Can't have both --image and (--app-image-fd or --app-image-file)");
}