summaryrefslogtreecommitdiff
path: root/compiler/optimizing/code_generator.cc
diff options
context:
space:
mode:
author Vladimir Marko <vmarko@google.com> 2016-09-14 16:26:36 +0100
committer Vladimir Marko <vmarko@google.com> 2016-09-20 14:55:44 +0100
commit804b03ffb9b9dc6cc3153e004c2cd38667508b13 (patch)
tree91c7fd54b5000e041bf9d3d5b233dabce1fad614 /compiler/optimizing/code_generator.cc
parent80eb0bc2757274816a014a2997848d288c9ee553 (diff)
Change remaining slow path throw entrypoints to save everything.
Change DivZeroCheck, BoundsCheck and explicit NullCheck slow path entrypoints to conform to kSaveEverything. On Nexus 9, AOSP ToT, the boot.oat size reduction is prebuilt multi-part boot image: - 32-bit boot.oat: -12KiB (-0.04%) - 64-bit boot.oat: -24KiB (-0.06%) on-device built single boot image: - 32-bit boot.oat: -8KiB (-0.03%) - 64-bit boot.oat: -16KiB (-0.04%) Test: Run ART test suite including gcstress on host and Nexus 9. Test: Manually disable implicit null checks and test as above. Change-Id: If82a8082ea9ae571c5d03b5e545e67fcefafb163
Diffstat (limited to 'compiler/optimizing/code_generator.cc')
-rw-r--r--compiler/optimizing/code_generator.cc16
1 files changed, 8 insertions, 8 deletions
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 51ba187c1b..137cd21864 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -1117,7 +1117,8 @@ void CodeGenerator::MaybeRecordImplicitNullCheck(HInstruction* instr) {
}
}
-LocationSummary* CodeGenerator::CreateNullCheckLocations(HNullCheck* null_check) {
+LocationSummary* CodeGenerator::CreateThrowingSlowPathLocations(HInstruction* instruction,
+ RegisterSet caller_saves) {
// Note: Using kNoCall allows the method to be treated as leaf (and eliminate the
// HSuspendCheck from entry block). However, it will still get a valid stack frame
// because the HNullCheck needs an environment.
@@ -1125,16 +1126,15 @@ LocationSummary* CodeGenerator::CreateNullCheckLocations(HNullCheck* null_check)
// When throwing from a try block, we may need to retrieve dalvik registers from
// physical registers and we also need to set up stack mask for GC. This is
// implicitly achieved by passing kCallOnSlowPath to the LocationSummary.
- bool can_throw_into_catch_block = null_check->CanThrowIntoCatchBlock();
+ bool can_throw_into_catch_block = instruction->CanThrowIntoCatchBlock();
if (can_throw_into_catch_block) {
call_kind = LocationSummary::kCallOnSlowPath;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(null_check, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
if (can_throw_into_catch_block && compiler_options_.GetImplicitNullChecks()) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(caller_saves); // Default: no caller-save registers.
}
- locations->SetInAt(0, Location::RequiresRegister());
- DCHECK(!null_check->HasUses());
+ DCHECK(!instruction->HasUses());
return locations;
}
@@ -1273,7 +1273,7 @@ void SlowPathCode::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* lo
}
const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
- for (size_t i : LowToHighBits(fp_spills)) {
+ for (uint32_t i : LowToHighBits(fp_spills)) {
DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
saved_fpu_stack_offsets_[i] = stack_offset;
@@ -1292,7 +1292,7 @@ void SlowPathCode::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary*
}
const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
- for (size_t i : LowToHighBits(fp_spills)) {
+ for (uint32_t i : LowToHighBits(fp_spills)) {
DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
stack_offset += codegen->RestoreFloatingPointRegister(stack_offset, i);