summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/code_generator.h4
-rw-r--r--compiler/optimizing/code_generator_arm.cc10
-rw-r--r--compiler/optimizing/code_generator_arm64.cc10
-rw-r--r--compiler/optimizing/code_generator_x86.cc10
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc10
5 files changed, 16 insertions, 28 deletions
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 47e6625d07..5e6e175c67 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -80,7 +80,11 @@ class SlowPathCode : public DeletableArenaObject<kArenaAllocSlowPaths> {
virtual void EmitNativeCode(CodeGenerator* codegen) = 0;
+ // Save live core and floating-point caller-save registers and
+ // update the stack mask in `locations` for registers holding object
+ // references.
virtual void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations);
+ // Restore live core and floating-point caller-save registers.
virtual void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations);
bool IsCoreRegisterSaved(int reg) const {
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 1aa7b5404c..474e9d540a 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -437,11 +437,9 @@ class ReadBarrierMarkSlowPathARM : public SlowPathCode {
<< instruction_->DebugName();
__ Bind(GetEntryLabel());
- // Save live registers before the runtime call, and in particular
- // R0 (if it is live), as it is clobbered by functions
- // art_quick_read_barrier_mark_regX.
- SaveLiveRegisters(codegen, locations);
-
+ // No need to save live registers; it's taken care of by the
+ // entrypoint. Also, there is no need to update the stack mask,
+ // as this runtime call will not trigger a garbage collection.
InvokeRuntimeCallingConvention calling_convention;
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
DCHECK_NE(reg, SP);
@@ -469,8 +467,6 @@ class ReadBarrierMarkSlowPathARM : public SlowPathCode {
instruction_,
instruction_->GetDexPc(),
this);
-
- RestoreLiveRegisters(codegen, locations);
__ b(GetExitLabel());
}
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 07b7823571..cec641f27c 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -603,11 +603,9 @@ class ReadBarrierMarkSlowPathARM64 : public SlowPathCodeARM64 {
<< instruction_->DebugName();
__ Bind(GetEntryLabel());
- // Save live registers before the runtime call, and in particular
- // W0 (if it is live), as it is clobbered by functions
- // art_quick_read_barrier_mark_regX.
- SaveLiveRegisters(codegen, locations);
-
+ // No need to save live registers; it's taken care of by the
+ // entrypoint. Also, there is no need to update the stack mask,
+ // as this runtime call will not trigger a garbage collection.
InvokeRuntimeCallingConvention calling_convention;
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
DCHECK_NE(obj_.reg(), LR);
@@ -635,8 +633,6 @@ class ReadBarrierMarkSlowPathARM64 : public SlowPathCodeARM64 {
instruction_,
instruction_->GetDexPc(),
this);
-
- RestoreLiveRegisters(codegen, locations);
__ B(GetExitLabel());
}
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 1cc6060f68..93bf022a32 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -472,11 +472,9 @@ class ReadBarrierMarkSlowPathX86 : public SlowPathCode {
<< instruction_->DebugName();
__ Bind(GetEntryLabel());
- // Save live registers before the runtime call, and in particular
- // EAX (if it is live), as it is clobbered by functions
- // art_quick_read_barrier_mark_regX.
- SaveLiveRegisters(codegen, locations);
-
+ // No need to save live registers; it's taken care of by the
+ // entrypoint. Also, there is no need to update the stack mask,
+ // as this runtime call will not trigger a garbage collection.
InvokeRuntimeCallingConvention calling_convention;
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
DCHECK_NE(reg, ESP);
@@ -502,8 +500,6 @@ class ReadBarrierMarkSlowPathX86 : public SlowPathCode {
instruction_,
instruction_->GetDexPc(),
this);
-
- RestoreLiveRegisters(codegen, locations);
__ jmp(GetExitLabel());
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index a0158938b5..0d85bea8c0 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -493,11 +493,9 @@ class ReadBarrierMarkSlowPathX86_64 : public SlowPathCode {
<< instruction_->DebugName();
__ Bind(GetEntryLabel());
- // Save live registers before the runtime call, and in particular
- // RDI and/or RAX (if they are live), as they are clobbered by
- // functions art_quick_read_barrier_mark_regX.
- SaveLiveRegisters(codegen, locations);
-
+ // No need to save live registers; it's taken care of by the
+ // entrypoint. Also, there is no need to update the stack mask,
+ // as this runtime call will not trigger a garbage collection.
InvokeRuntimeCallingConvention calling_convention;
CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
DCHECK_NE(reg, RSP);
@@ -523,8 +521,6 @@ class ReadBarrierMarkSlowPathX86_64 : public SlowPathCode {
instruction_,
instruction_->GetDexPc(),
this);
-
- RestoreLiveRegisters(codegen, locations);
__ jmp(GetExitLabel());
}