diff options
43 files changed, 919 insertions, 313 deletions
diff --git a/compiler/jni/quick/arm/calling_convention_arm.cc b/compiler/jni/quick/arm/calling_convention_arm.cc index 769cd4c83d..a3323e133a 100644 --- a/compiler/jni/quick/arm/calling_convention_arm.cc +++ b/compiler/jni/quick/arm/calling_convention_arm.cc @@ -16,6 +16,7 @@ #include "base/logging.h" #include "calling_convention_arm.h" +#include "handle_scope-inl.h" #include "utils/arm/managed_register_arm.h" namespace art { diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.cc b/compiler/jni/quick/arm64/calling_convention_arm64.cc index 29763a2a10..b9c81787f0 100644 --- a/compiler/jni/quick/arm64/calling_convention_arm64.cc +++ b/compiler/jni/quick/arm64/calling_convention_arm64.cc @@ -16,6 +16,7 @@ #include "base/logging.h" #include "calling_convention_arm64.h" +#include "handle_scope-inl.h" #include "utils/arm64/managed_register_arm64.h" namespace art { diff --git a/compiler/jni/quick/mips/calling_convention_mips.cc b/compiler/jni/quick/mips/calling_convention_mips.cc index f7a7be7304..aefbf06fd7 100644 --- a/compiler/jni/quick/mips/calling_convention_mips.cc +++ b/compiler/jni/quick/mips/calling_convention_mips.cc @@ -17,6 +17,7 @@ #include "calling_convention_mips.h" #include "base/logging.h" +#include "handle_scope-inl.h" #include "utils/mips/managed_register_mips.h" namespace art { diff --git a/compiler/jni/quick/x86/calling_convention_x86.cc b/compiler/jni/quick/x86/calling_convention_x86.cc index 9bf7d0f071..a5686e1ac7 100644 --- a/compiler/jni/quick/x86/calling_convention_x86.cc +++ b/compiler/jni/quick/x86/calling_convention_x86.cc @@ -17,6 +17,7 @@ #include "calling_convention_x86.h" #include "base/logging.h" +#include "handle_scope-inl.h" #include "utils/x86/managed_register_x86.h" #include "utils.h" diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc index a100552695..bbdf1fe7bb 100644 --- a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc +++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc @@ -17,6 +17,7 @@ #include "calling_convention_x86_64.h" #include "base/logging.h" +#include "handle_scope-inl.h" #include "utils/x86_64/managed_register_x86_64.h" #include "utils.h" diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index 4d8154e6a0..ada0fb75d7 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -620,6 +620,14 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction, uint32_t dex_pc) { break; } + case Location::kFpuRegisterPair : { + stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kInFpuRegister, location.low()); + stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kInFpuRegister, location.high()); + ++i; + DCHECK_LT(i, environment_size); + break; + } + default: LOG(FATAL) << "Unexpected kind " << location.GetKind(); } diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h index 4205ebebf9..9880239c88 100644 --- a/compiler/optimizing/code_generator.h +++ b/compiler/optimizing/code_generator.h @@ -142,6 +142,7 @@ class CodeGenerator : public ArenaObject<kArenaAllocMisc> { UNIMPLEMENTED(FATAL); UNREACHABLE(); } + virtual bool NeedsTwoRegisters(Primitive::Type type) const = 0; void RecordPcInfo(HInstruction* instruction, uint32_t dex_pc); diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index 3b3fb64763..d0a72bb42a 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -373,6 +373,16 @@ size_t CodeGeneratorARM::RestoreCoreRegister(size_t stack_index, uint32_t reg_id return kArmWordSize; } +size_t CodeGeneratorARM::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) { + __ StoreSToOffset(static_cast<SRegister>(reg_id), SP, stack_index); + return kArmWordSize; +} + +size_t CodeGeneratorARM::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) { + __ LoadSFromOffset(static_cast<SRegister>(reg_id), SP, stack_index); + return kArmWordSize; +} + CodeGeneratorARM::CodeGeneratorARM(HGraph* graph, const ArmInstructionSetFeatures* isa_features) : CodeGenerator(graph, kNumberOfCoreRegisters, kNumberOfSRegisters, kNumberOfRegisterPairs), @@ -802,7 +812,8 @@ void CodeGeneratorARM::Move(HInstruction* instruction, Location location, HInstr __ LoadImmediate(IP, value); __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex()); } - } else if (const_to_move->IsLongConstant()) { + } else { + DCHECK(const_to_move->IsLongConstant()) << const_to_move; int64_t value = const_to_move->AsLongConstant()->GetValue(); if (location.IsRegisterPair()) { __ LoadImmediate(location.AsRegisterPairLow<Register>(), Low32Bits(value)); @@ -2585,7 +2596,8 @@ void InstructionCodeGeneratorARM::GenerateWideAtomicLoad(Register addr, Register out_hi) { if (offset != 0) { __ LoadImmediate(out_lo, offset); - __ add(addr, addr, ShifterOperand(out_lo)); + __ add(IP, addr, ShifterOperand(out_lo)); + addr = IP; } __ ldrexd(out_lo, out_hi, addr); } @@ -2599,7 +2611,8 @@ void InstructionCodeGeneratorARM::GenerateWideAtomicStore(Register addr, Label fail; if (offset != 0) { __ LoadImmediate(temp1, offset); - __ add(addr, addr, ShifterOperand(temp1)); + __ add(IP, addr, ShifterOperand(temp1)); + addr = IP; } __ Bind(&fail); // We need a load followed by store. (The address used in a STREX instruction must @@ -2994,10 +3007,34 @@ void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) { break; } - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: - LOG(FATAL) << "Unimplemented register type " << instruction->GetType(); - UNREACHABLE(); + case Primitive::kPrimFloat: { + uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value(); + Location out = locations->Out(); + DCHECK(out.IsFpuRegister()); + if (index.IsConstant()) { + size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset; + __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), obj, offset); + } else { + __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4)); + __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), IP, data_offset); + } + break; + } + + case Primitive::kPrimDouble: { + uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value(); + Location out = locations->Out(); + DCHECK(out.IsFpuRegisterPair()); + if (index.IsConstant()) { + size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset; + __ LoadDFromOffset(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), obj, offset); + } else { + __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8)); + __ LoadDFromOffset(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), IP, data_offset); + } + break; + } + case Primitive::kPrimVoid: LOG(FATAL) << "Unreachable type " << instruction->GetType(); UNREACHABLE(); @@ -3114,12 +3151,36 @@ void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) { break; } - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: - LOG(FATAL) << "Unimplemented register type " << instruction->GetType(); - UNREACHABLE(); + case Primitive::kPrimFloat: { + uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value(); + Location value = locations->InAt(2); + DCHECK(value.IsFpuRegister()); + if (index.IsConstant()) { + size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset; + __ StoreSToOffset(value.AsFpuRegister<SRegister>(), obj, offset); + } else { + __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4)); + __ StoreSToOffset(value.AsFpuRegister<SRegister>(), IP, data_offset); + } + break; + } + + case Primitive::kPrimDouble: { + uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value(); + Location value = locations->InAt(2); + DCHECK(value.IsFpuRegisterPair()); + if (index.IsConstant()) { + size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset; + __ StoreDToOffset(FromLowSToD(value.AsFpuRegisterPairLow<SRegister>()), obj, offset); + } else { + __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8)); + __ StoreDToOffset(FromLowSToD(value.AsFpuRegisterPairLow<SRegister>()), IP, data_offset); + } + break; + } + case Primitive::kPrimVoid: - LOG(FATAL) << "Unreachable type " << instruction->GetType(); + LOG(FATAL) << "Unreachable type " << value_type; UNREACHABLE(); } } @@ -3247,21 +3308,62 @@ void ParallelMoveResolverARM::EmitMove(size_t index) { if (destination.IsRegister()) { __ LoadFromOffset(kLoadWord, destination.AsRegister<Register>(), SP, source.GetStackIndex()); + } else if (destination.IsFpuRegister()) { + __ LoadSFromOffset(destination.AsFpuRegister<SRegister>(), SP, source.GetStackIndex()); } else { DCHECK(destination.IsStackSlot()); __ LoadFromOffset(kLoadWord, IP, SP, source.GetStackIndex()); __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex()); } - } else { - DCHECK(source.IsConstant()); - DCHECK(source.GetConstant()->IsIntConstant()); - int32_t value = source.GetConstant()->AsIntConstant()->GetValue(); - if (destination.IsRegister()) { - __ LoadImmediate(destination.AsRegister<Register>(), value); + } else if (source.IsFpuRegister()) { + if (destination.IsFpuRegister()) { + __ vmovs(destination.AsFpuRegister<SRegister>(), source.AsFpuRegister<SRegister>()); } else { DCHECK(destination.IsStackSlot()); - __ LoadImmediate(IP, value); + __ StoreSToOffset(source.AsFpuRegister<SRegister>(), SP, destination.GetStackIndex()); + } + } else if (source.IsFpuRegisterPair()) { + if (destination.IsFpuRegisterPair()) { + __ vmovd(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()), + FromLowSToD(source.AsFpuRegisterPairLow<SRegister>())); + } else { + DCHECK(destination.IsDoubleStackSlot()) << destination; + __ StoreDToOffset(FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()), + SP, destination.GetStackIndex()); + } + } else if (source.IsDoubleStackSlot()) { + if (destination.IsFpuRegisterPair()) { + __ LoadDFromOffset(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()), + SP, source.GetStackIndex()); + } else { + DCHECK(destination.IsDoubleStackSlot()) << destination; + __ LoadFromOffset(kLoadWord, IP, SP, source.GetStackIndex()); __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex()); + __ LoadFromOffset(kLoadWord, IP, SP, source.GetHighStackIndex(kArmWordSize)); + __ StoreToOffset(kStoreWord, IP, SP, destination.GetHighStackIndex(kArmWordSize)); + } + } else { + DCHECK(source.IsConstant()) << source; + HInstruction* constant = source.GetConstant(); + if (constant->IsIntConstant()) { + int32_t value = constant->AsIntConstant()->GetValue(); + if (destination.IsRegister()) { + __ LoadImmediate(destination.AsRegister<Register>(), value); + } else { + DCHECK(destination.IsStackSlot()); + __ LoadImmediate(IP, value); + __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex()); + } + } else { + DCHECK(constant->IsFloatConstant()); + float value = constant->AsFloatConstant()->GetValue(); + if (destination.IsFpuRegister()) { + __ LoadSImmediate(destination.AsFpuRegister<SRegister>(), value); + } else { + DCHECK(destination.IsStackSlot()); + __ LoadImmediate(IP, bit_cast<int32_t, float>(value)); + __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex()); + } } } } @@ -3300,6 +3402,20 @@ void ParallelMoveResolverARM::EmitSwap(size_t index) { Exchange(destination.AsRegister<Register>(), source.GetStackIndex()); } else if (source.IsStackSlot() && destination.IsStackSlot()) { Exchange(source.GetStackIndex(), destination.GetStackIndex()); + } else if (source.IsFpuRegister() && destination.IsFpuRegister()) { + __ vmovrs(IP, source.AsFpuRegister<SRegister>()); + __ vmovs(source.AsFpuRegister<SRegister>(), destination.AsFpuRegister<SRegister>()); + __ vmovsr(destination.AsFpuRegister<SRegister>(), IP); + } else if (source.IsFpuRegister() || destination.IsFpuRegister()) { + SRegister reg = source.IsFpuRegister() ? source.AsFpuRegister<SRegister>() + : destination.AsFpuRegister<SRegister>(); + int mem = source.IsFpuRegister() + ? destination.GetStackIndex() + : source.GetStackIndex(); + + __ vmovrs(IP, reg); + __ LoadSFromOffset(reg, SP, mem); + __ StoreToOffset(kStoreWord, IP, SP, mem); } else { LOG(FATAL) << "Unimplemented"; } diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h index 40f4edc4eb..c1b4eda3a4 100644 --- a/compiler/optimizing/code_generator_arm.h +++ b/compiler/optimizing/code_generator_arm.h @@ -168,6 +168,8 @@ class CodeGeneratorARM : public CodeGenerator { void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE; size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; + size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; + size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; size_t GetWordSize() const OVERRIDE { return kArmWordSize; @@ -237,6 +239,10 @@ class CodeGeneratorARM : public CodeGenerator { return isa_features_; } + bool NeedsTwoRegisters(Primitive::Type type) const OVERRIDE { + return type == Primitive::kPrimDouble || type == Primitive::kPrimLong; + } + private: // Labels for each block that will be compiled. GrowableArray<Label> block_labels_; diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h index 19488a4ba2..e4da07be43 100644 --- a/compiler/optimizing/code_generator_arm64.h +++ b/compiler/optimizing/code_generator_arm64.h @@ -267,6 +267,10 @@ class CodeGeneratorARM64 : public CodeGenerator { ParallelMoveResolverARM64* GetMoveResolver() { return &move_resolver_; } + bool NeedsTwoRegisters(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE { + return false; + } + private: // Labels for each block that will be compiled. vixl::Label* block_labels_; diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h index 636f8845e5..acde122917 100644 --- a/compiler/optimizing/code_generator_x86.h +++ b/compiler/optimizing/code_generator_x86.h @@ -222,6 +222,10 @@ class CodeGeneratorX86 : public CodeGenerator { block_labels_.SetSize(GetGraph()->GetBlocks().Size()); } + bool NeedsTwoRegisters(Primitive::Type type) const OVERRIDE { + return type == Primitive::kPrimLong; + } + private: // Labels for each block that will be compiled. GrowableArray<Label> block_labels_; diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h index 070886460b..87f6b0f779 100644 --- a/compiler/optimizing/code_generator_x86_64.h +++ b/compiler/optimizing/code_generator_x86_64.h @@ -218,6 +218,10 @@ class CodeGeneratorX86_64 : public CodeGenerator { block_labels_.SetSize(GetGraph()->GetBlocks().Size()); } + bool NeedsTwoRegisters(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE { + return false; + } + private: // Labels for each block that will be compiled. GrowableArray<Label> block_labels_; diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc index 9ed1e4528c..9e0a5b89e9 100644 --- a/compiler/optimizing/graph_visualizer.cc +++ b/compiler/optimizing/graph_visualizer.cc @@ -30,10 +30,12 @@ class HGraphVisualizerPrinter : public HGraphVisitor { HGraphVisualizerPrinter(HGraph* graph, std::ostream& output, const char* pass_name, + bool is_after_pass, const CodeGenerator& codegen) : HGraphVisitor(graph), output_(output), pass_name_(pass_name), + is_after_pass_(is_after_pass), codegen_(codegen), indent_(0) {} @@ -136,6 +138,10 @@ class HGraphVisualizerPrinter : public HGraphVisitor { output_ << "invalid"; } else if (location.IsStackSlot()) { output_ << location.GetStackIndex() << "(sp)"; + } else if (location.IsFpuRegisterPair()) { + codegen_.DumpFloatingPointRegister(output_, location.low()); + output_ << " and "; + codegen_.DumpFloatingPointRegister(output_, location.high()); } else { DCHECK(location.IsDoubleStackSlot()); output_ << "2x" << location.GetStackIndex() << "(sp)"; @@ -157,19 +163,19 @@ class HGraphVisualizerPrinter : public HGraphVisitor { output_ << " (liveness: " << instruction->GetLifetimePosition() << ")"; } - void VisitIntConstant(HIntConstant *instruction) OVERRIDE { + void VisitIntConstant(HIntConstant* instruction) OVERRIDE { output_ << " " << instruction->GetValue(); } - void VisitLongConstant(HLongConstant *instruction) OVERRIDE { + void VisitLongConstant(HLongConstant* instruction) OVERRIDE { output_ << " " << instruction->GetValue(); } - void VisitFloatConstant(HFloatConstant *instruction) OVERRIDE { + void VisitFloatConstant(HFloatConstant* instruction) OVERRIDE { output_ << " " << instruction->GetValue(); } - void VisitDoubleConstant(HDoubleConstant *instruction) OVERRIDE { + void VisitDoubleConstant(HDoubleConstant* instruction) OVERRIDE { output_ << " " << instruction->GetValue(); } @@ -224,7 +230,8 @@ class HGraphVisualizerPrinter : public HGraphVisitor { void Run() { StartTag("cfg"); - PrintProperty("name", pass_name_); + std::string pass_desc = std::string(pass_name_) + (is_after_pass_ ? " (after)" : " (before)"); + PrintProperty("name", pass_desc.c_str()); VisitInsertionOrder(); EndTag("cfg"); } @@ -275,6 +282,7 @@ class HGraphVisualizerPrinter : public HGraphVisitor { private: std::ostream& output_; const char* pass_name_; + const bool is_after_pass_; const CodeGenerator& codegen_; size_t indent_; @@ -295,7 +303,7 @@ HGraphVisualizer::HGraphVisualizer(std::ostream* output, } is_enabled_ = true; - HGraphVisualizerPrinter printer(graph_, *output_, "", codegen_); + HGraphVisualizerPrinter printer(graph_, *output_, "", true, codegen_); printer.StartTag("compilation"); printer.PrintProperty("name", method_name); printer.PrintProperty("method", method_name); @@ -305,8 +313,7 @@ HGraphVisualizer::HGraphVisualizer(std::ostream* output, void HGraphVisualizer::DumpGraph(const char* pass_name, bool is_after_pass) const { if (is_enabled_) { - std::string pass_desc = std::string(pass_name) + (is_after_pass ? " (after)" : " (before)"); - HGraphVisualizerPrinter printer(graph_, *output_, pass_desc.c_str(), codegen_); + HGraphVisualizerPrinter printer(graph_, *output_, pass_name, is_after_pass, codegen_); printer.Run(); } } diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h index 1ff26d914c..7df99d4b6f 100644 --- a/compiler/optimizing/locations.h +++ b/compiler/optimizing/locations.h @@ -160,6 +160,16 @@ class Location : public ValueObject { return GetPayload(); } + int low() const { + DCHECK(IsPair()); + return GetPayload() >> 16; + } + + int high() const { + DCHECK(IsPair()); + return GetPayload() & 0xFFFF; + } + template <typename T> T AsRegister() const { DCHECK(IsRegister()); @@ -175,25 +185,41 @@ class Location : public ValueObject { template <typename T> T AsRegisterPairLow() const { DCHECK(IsRegisterPair()); - return static_cast<T>(GetPayload() >> 16); + return static_cast<T>(low()); } template <typename T> T AsRegisterPairHigh() const { DCHECK(IsRegisterPair()); - return static_cast<T>(GetPayload() & 0xFFFF); + return static_cast<T>(high()); } template <typename T> T AsFpuRegisterPairLow() const { DCHECK(IsFpuRegisterPair()); - return static_cast<T>(GetPayload() >> 16); + return static_cast<T>(low()); } template <typename T> T AsFpuRegisterPairHigh() const { DCHECK(IsFpuRegisterPair()); - return static_cast<T>(GetPayload() & 0xFFFF); + return static_cast<T>(high()); + } + + bool IsPair() const { + return IsRegisterPair() || IsFpuRegisterPair(); + } + + Location ToLow() const { + return IsRegisterPair() + ? Location::RegisterLocation(low()) + : Location::FpuRegisterLocation(low()); + } + + Location ToHigh() const { + return IsRegisterPair() + ? Location::RegisterLocation(high()) + : Location::FpuRegisterLocation(high()); } static uintptr_t EncodeStackIndex(intptr_t stack_index) { @@ -264,6 +290,18 @@ class Location : public ValueObject { return value_ == other.value_; } + // Returns whether this location contains `other`. + bool Contains(Location other) const { + if (Equals(other)) return true; + if (IsRegisterPair() && other.IsRegister()) { + return low() == other.reg() || high() == other.reg(); + } + if (IsFpuRegisterPair() && other.IsFpuRegister()) { + return low() == other.reg() || high() == other.reg(); + } + return false; + } + const char* DebugString() const { switch (GetKind()) { case kInvalid: return "I"; @@ -525,7 +563,8 @@ class LocationSummary : public ArenaObject<kArenaAllocMisc> { && (output_.GetPolicy() == Location::kSameAsFirstInput)) { return false; } - if (inputs_.Get(input_index).IsRegister() || inputs_.Get(input_index).IsFpuRegister()) { + Location input = inputs_.Get(input_index); + if (input.IsRegister() || input.IsFpuRegister() || input.IsPair()) { return false; } return true; diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 0fc1fd8663..b98bc70a9f 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -2734,7 +2734,7 @@ class MoveOperands : public ArenaObject<kArenaAllocMisc> { // True if this blocks a move from the given location. bool Blocks(Location loc) const { - return !IsEliminated() && source_.Equals(loc); + return !IsEliminated() && source_.Contains(loc); } // A move is redundant if it's been eliminated, if its source and diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc index c1c805dc56..1efc52b9ec 100644 --- a/compiler/optimizing/register_allocator.cc +++ b/compiler/optimizing/register_allocator.cc @@ -27,6 +27,12 @@ namespace art { static constexpr size_t kMaxLifetimePosition = -1; static constexpr size_t kDefaultNumberOfSpillSlots = 4; +// For simplicity, we implement register pairs as (reg, reg + 1). +// Note that this is a requirement for double registers on ARM, since we +// allocate SRegister. +static int GetHighForLowRegister(int reg) { return reg + 1; } +static bool IsLowRegister(int reg) { return (reg & 1) == 0; } + RegisterAllocator::RegisterAllocator(ArenaAllocator* allocator, CodeGenerator* codegen, const SsaLivenessAnalysis& liveness) @@ -72,10 +78,16 @@ bool RegisterAllocator::CanAllocateRegistersFor(const HGraph& graph, !it.Done(); it.Advance()) { HInstruction* current = it.Current(); - if (current->GetType() == Primitive::kPrimLong || - current->GetType() == Primitive::kPrimFloat || - current->GetType() == Primitive::kPrimDouble) { - return false; + if (instruction_set == kX86) { + if (current->GetType() == Primitive::kPrimLong || + current->GetType() == Primitive::kPrimFloat || + current->GetType() == Primitive::kPrimDouble) { + return false; + } + } else if (instruction_set == kArm || instruction_set == kThumb2) { + if (current->GetType() == Primitive::kPrimLong) { + return false; + } } } } @@ -130,7 +142,7 @@ void RegisterAllocator::BlockRegister(Location location, : physical_fp_register_intervals_.Get(reg); Primitive::Type type = location.IsRegister() ? Primitive::kPrimInt - : Primitive::kPrimDouble; + : Primitive::kPrimFloat; if (interval == nullptr) { interval = LiveInterval::MakeFixedInterval(allocator_, reg, type); if (location.IsRegister()) { @@ -226,6 +238,12 @@ void RegisterAllocator::ProcessInstruction(HInstruction* instruction) { LiveInterval::MakeTempInterval(allocator_, Primitive::kPrimDouble); temp_intervals_.Add(interval); interval->AddRange(position, position + 1); + if (codegen_->NeedsTwoRegisters(Primitive::kPrimDouble)) { + interval->AddHighInterval(true); + LiveInterval* high = interval->GetHighInterval(); + temp_intervals_.Add(high); + unhandled_fp_intervals_.Add(high); + } unhandled_fp_intervals_.Add(interval); break; } @@ -279,6 +297,9 @@ void RegisterAllocator::ProcessInstruction(HInstruction* instruction) { Location input = locations->InAt(i); if (input.IsRegister() || input.IsFpuRegister()) { BlockRegister(input, position, position + 1); + } else if (input.IsPair()) { + BlockRegister(input.ToLow(), position, position + 1); + BlockRegister(input.ToHigh(), position, position + 1); } } @@ -291,6 +312,10 @@ void RegisterAllocator::ProcessInstruction(HInstruction* instruction) { DCHECK(unhandled.IsEmpty() || current->StartsBeforeOrAt(unhandled.Peek())); + if (codegen_->NeedsTwoRegisters(current->GetType())) { + current->AddHighInterval(); + } + // Some instructions define their output in fixed register/stack slot. We need // to ensure we know these locations before doing register allocation. For a // given register, we create an interval that covers these locations. The register @@ -304,14 +329,30 @@ void RegisterAllocator::ProcessInstruction(HInstruction* instruction) { if (first.IsRegister() || first.IsFpuRegister()) { current->SetFrom(position + 1); current->SetRegister(first.reg()); + } else if (first.IsPair()) { + current->SetFrom(position + 1); + current->SetRegister(first.low()); + LiveInterval* high = current->GetHighInterval(); + high->SetRegister(first.high()); + high->SetFrom(position + 1); } } else if (output.IsRegister() || output.IsFpuRegister()) { // Shift the interval's start by one to account for the blocked register. current->SetFrom(position + 1); current->SetRegister(output.reg()); BlockRegister(output, position, position + 1); + } else if (output.IsPair()) { + current->SetFrom(position + 1); + current->SetRegister(output.low()); + LiveInterval* high = current->GetHighInterval(); + high->SetRegister(output.high()); + high->SetFrom(position + 1); + BlockRegister(output.ToLow(), position, position + 1); + BlockRegister(output.ToHigh(), position, position + 1); } else if (output.IsStackSlot() || output.IsDoubleStackSlot()) { current->SetSpillSlot(output.GetStackIndex()); + } else { + DCHECK(output.IsUnallocated() || output.IsConstant()); } // If needed, add interval to the list of unhandled intervals. @@ -516,6 +557,7 @@ void RegisterAllocator::LinearScan() { LiveInterval* current = unhandled_->Pop(); DCHECK(!current->IsFixed() && !current->HasSpillSlot()); DCHECK(unhandled_->IsEmpty() || unhandled_->Peek()->GetStart() >= current->GetStart()); + DCHECK(!current->IsLowInterval() || unhandled_->Peek()->IsHighInterval()); size_t position = current->GetStart(); @@ -566,6 +608,13 @@ void RegisterAllocator::LinearScan() { continue; } + if (current->IsHighInterval() && !current->GetLowInterval()->HasRegister()) { + DCHECK(!current->HasRegister()); + // Allocating the low part was unsucessful. The splitted interval for the high part + // will be handled next (it is in the `unhandled_` list). + continue; + } + // (4) Try to find an available register. bool success = TryAllocateFreeReg(current); @@ -578,6 +627,9 @@ void RegisterAllocator::LinearScan() { // intervals. if (success) { active_.Add(current); + if (current->HasHighInterval() && !current->GetHighInterval()->HasRegister()) { + current->GetHighInterval()->SetRegister(GetHighForLowRegister(current->GetRegister())); + } } } } @@ -630,26 +682,31 @@ bool RegisterAllocator::TryAllocateFreeReg(LiveInterval* current) { if (current->HasRegister()) { // Some instructions have a fixed register output. reg = current->GetRegister(); - DCHECK_NE(free_until[reg], 0u); + if (free_until[reg] == 0) { + DCHECK(current->IsHighInterval()); + // AllocateBlockedReg will spill the holder of the register. + return false; + } } else { + DCHECK(!current->IsHighInterval()); int hint = current->FindFirstRegisterHint(free_until); if (hint != kNoRegister) { DCHECK(!IsBlocked(hint)); reg = hint; + } else if (current->IsLowInterval()) { + reg = FindAvailableRegisterPair(free_until); } else { - // Pick the register that is free the longest. - for (size_t i = 0; i < number_of_registers_; ++i) { - if (IsBlocked(i)) continue; - if (reg == -1 || free_until[i] > free_until[reg]) { - reg = i; - if (free_until[i] == kMaxLifetimePosition) break; - } - } + reg = FindAvailableRegister(free_until); } } + DCHECK_NE(reg, -1); // If we could not find a register, we need to spill. - if (reg == -1 || free_until[reg] == 0) { + if (free_until[reg] == 0) { + return false; + } + + if (current->IsLowInterval() && free_until[GetHighForLowRegister(reg)] == 0) { return false; } @@ -671,6 +728,40 @@ bool RegisterAllocator::IsBlocked(int reg) const { : blocked_fp_registers_[reg]; } +int RegisterAllocator::FindAvailableRegisterPair(size_t* next_use) const { + int reg = -1; + // Pick the register pair that is used the last. + for (size_t i = 0; i < number_of_registers_; ++i) { + if (IsBlocked(i)) continue; + if (!IsLowRegister(i)) continue; + int high_register = GetHighForLowRegister(i); + if (IsBlocked(high_register)) continue; + int existing_high_register = GetHighForLowRegister(reg); + if ((reg == -1) || (next_use[i] >= next_use[reg] + && next_use[high_register] >= next_use[existing_high_register])) { + reg = i; + if (next_use[i] == kMaxLifetimePosition + && next_use[high_register] == kMaxLifetimePosition) { + break; + } + } + } + return reg; +} + +int RegisterAllocator::FindAvailableRegister(size_t* next_use) const { + int reg = -1; + // Pick the register that is used the last. + for (size_t i = 0; i < number_of_registers_; ++i) { + if (IsBlocked(i)) continue; + if (reg == -1 || next_use[i] > next_use[reg]) { + reg = i; + if (next_use[i] == kMaxLifetimePosition) break; + } + } + return reg; +} + // Find the register that is used the last, and spill the interval // that holds it. If the first use of `current` is after that register // we spill `current` instead. @@ -731,17 +822,20 @@ bool RegisterAllocator::AllocateBlockedReg(LiveInterval* current) { } } - // Pick the register that is used the last. int reg = -1; - for (size_t i = 0; i < number_of_registers_; ++i) { - if (IsBlocked(i)) continue; - if (reg == -1 || next_use[i] > next_use[reg]) { - reg = i; - if (next_use[i] == kMaxLifetimePosition) break; - } + if (current->HasRegister()) { + DCHECK(current->IsHighInterval()); + reg = current->GetRegister(); + } else if (current->IsLowInterval()) { + reg = FindAvailableRegisterPair(next_use); + } else { + DCHECK(!current->IsHighInterval()); + reg = FindAvailableRegister(next_use); } - if (first_register_use >= next_use[reg]) { + if ((first_register_use >= next_use[reg]) + || (current->IsLowInterval() && first_register_use >= next_use[GetHighForLowRegister(reg)])) { + DCHECK(!current->IsHighInterval()); // If the first use of that instruction is after the last use of the found // register, we split this interval just before its first register use. AllocateSpillSlotFor(current); @@ -815,23 +909,49 @@ void RegisterAllocator::AddSorted(GrowableArray<LiveInterval*>* array, LiveInter break; } } + array->InsertAt(insert_at, interval); + // Insert the high interval before the low, to ensure the low is processed before. + if (interval->HasHighInterval()) { + array->InsertAt(insert_at, interval->GetHighInterval()); + } else if (interval->HasLowInterval()) { + array->InsertAt(insert_at + 1, interval->GetLowInterval()); + } } LiveInterval* RegisterAllocator::Split(LiveInterval* interval, size_t position) { - DCHECK(position >= interval->GetStart()); + DCHECK_GE(position, interval->GetStart()); DCHECK(!interval->IsDeadAt(position)); if (position == interval->GetStart()) { // Spill slot will be allocated when handling `interval` again. interval->ClearRegister(); + if (interval->HasHighInterval()) { + interval->GetHighInterval()->ClearRegister(); + } else if (interval->HasLowInterval()) { + interval->GetLowInterval()->ClearRegister(); + } return interval; } else { LiveInterval* new_interval = interval->SplitAt(position); + if (interval->HasHighInterval()) { + LiveInterval* high = interval->GetHighInterval()->SplitAt(position); + new_interval->SetHighInterval(high); + high->SetLowInterval(new_interval); + } else if (interval->HasLowInterval()) { + LiveInterval* low = interval->GetLowInterval()->SplitAt(position); + new_interval->SetLowInterval(low); + low->SetHighInterval(new_interval); + } return new_interval; } } void RegisterAllocator::AllocateSpillSlotFor(LiveInterval* interval) { + if (interval->IsHighInterval()) { + // The low interval will contain the spill slot. + return; + } + LiveInterval* parent = interval->GetParent(); // An instruction gets a spill slot for its entire lifetime. If the parent @@ -898,6 +1018,7 @@ void RegisterAllocator::AllocateSpillSlotFor(LiveInterval* interval) { static bool IsValidDestination(Location destination) { return destination.IsRegister() || destination.IsFpuRegister() + || destination.IsFpuRegisterPair() || destination.IsStackSlot() || destination.IsDoubleStackSlot(); } @@ -905,7 +1026,6 @@ static bool IsValidDestination(Location destination) { void RegisterAllocator::AddInputMoveFor(HInstruction* user, Location source, Location destination) const { - DCHECK(IsValidDestination(destination)); if (source.Equals(destination)) return; DCHECK(!user->IsPhi()); @@ -1075,9 +1195,7 @@ void RegisterAllocator::ConnectSiblings(LiveInterval* interval) { if (current->HasSpillSlot() && current->HasRegister()) { // We spill eagerly, so move must be at definition. InsertMoveAfter(interval->GetDefinedBy(), - interval->IsFloatingPoint() - ? Location::FpuRegisterLocation(interval->GetRegister()) - : Location::RegisterLocation(interval->GetRegister()), + interval->ToLocation(), interval->NeedsTwoSpillSlots() ? Location::DoubleStackSlot(interval->GetParent()->GetSpillSlot()) : Location::StackSlot(interval->GetParent()->GetSpillSlot())); @@ -1148,6 +1266,11 @@ void RegisterAllocator::ConnectSiblings(LiveInterval* interval) { locations->AddLiveRegister(source); break; } + case Location::kFpuRegisterPair: { + locations->AddLiveRegister(source.ToLow()); + locations->AddLiveRegister(source.ToHigh()); + break; + } case Location::kStackSlot: // Fall-through case Location::kDoubleStackSlot: // Fall-through case Location::kConstant: { @@ -1307,6 +1430,10 @@ void RegisterAllocator::Resolve() { size_t temp_index = 0; for (size_t i = 0; i < temp_intervals_.Size(); ++i) { LiveInterval* temp = temp_intervals_.Get(i); + if (temp->IsHighInterval()) { + // High intervals can be skipped, they are already handled by the low interval. + continue; + } HInstruction* at = liveness_.GetTempUser(temp); if (at != current) { temp_index = 0; @@ -1320,14 +1447,14 @@ void RegisterAllocator::Resolve() { break; case Primitive::kPrimDouble: - // TODO: Support the case of ARM, where a double value - // requires an FPU register pair (note that the ARM back end - // does not yet use this register allocator when a method uses - // floats or doubles). - DCHECK(codegen_->GetInstructionSet() != kArm - && codegen_->GetInstructionSet() != kThumb2); - locations->SetTempAt( - temp_index++, Location::FpuRegisterLocation(temp->GetRegister())); + if (codegen_->NeedsTwoRegisters(Primitive::kPrimDouble)) { + Location location = Location::FpuRegisterPairLocation( + temp->GetRegister(), temp->GetHighInterval()->GetRegister()); + locations->SetTempAt(temp_index++, location); + } else { + locations->SetTempAt( + temp_index++, Location::FpuRegisterLocation(temp->GetRegister())); + } break; default: diff --git a/compiler/optimizing/register_allocator.h b/compiler/optimizing/register_allocator.h index cbe741c2b3..c152a8bf67 100644 --- a/compiler/optimizing/register_allocator.h +++ b/compiler/optimizing/register_allocator.h @@ -128,6 +128,8 @@ class RegisterAllocator { bool ValidateInternal(bool log_fatal_on_failure) const; void DumpInterval(std::ostream& stream, LiveInterval* interval) const; void DumpAllIntervals(std::ostream& stream) const; + int FindAvailableRegisterPair(size_t* next_use) const; + int FindAvailableRegister(size_t* next_use) const; ArenaAllocator* const allocator_; CodeGenerator* const codegen_; diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc index 660a5c5f60..d41157b8d8 100644 --- a/compiler/optimizing/ssa_liveness_analysis.cc +++ b/compiler/optimizing/ssa_liveness_analysis.cc @@ -419,10 +419,21 @@ bool LiveInterval::NeedsTwoSpillSlots() const { } Location LiveInterval::ToLocation() const { + DCHECK(!IsHighInterval()); if (HasRegister()) { - return IsFloatingPoint() - ? Location::FpuRegisterLocation(GetRegister()) - : Location::RegisterLocation(GetRegister()); + if (IsFloatingPoint()) { + if (HasHighInterval()) { + return Location::FpuRegisterPairLocation(GetRegister(), GetHighInterval()->GetRegister()); + } else { + return Location::FpuRegisterLocation(GetRegister()); + } + } else { + if (HasHighInterval()) { + return Location::RegisterPairLocation(GetRegister(), GetHighInterval()->GetRegister()); + } else { + return Location::RegisterLocation(GetRegister()); + } + } } else { HInstruction* defined_by = GetParent()->GetDefinedBy(); if (defined_by->IsConstant()) { diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h index 23123891ef..74611e1cbb 100644 --- a/compiler/optimizing/ssa_liveness_analysis.h +++ b/compiler/optimizing/ssa_liveness_analysis.h @@ -77,6 +77,15 @@ class LiveRange FINAL : public ArenaObject<kArenaAllocMisc> { stream << "[" << start_ << ", " << end_ << ")"; } + LiveRange* Dup(ArenaAllocator* allocator) const { + return new (allocator) LiveRange( + start_, end_, next_ == nullptr ? nullptr : next_->Dup(allocator)); + } + + LiveRange* GetLastRange() { + return next_ == nullptr ? this : next_->GetLastRange(); + } + private: size_t start_; size_t end_; @@ -123,6 +132,12 @@ class UsePosition : public ArenaObject<kArenaAllocMisc> { stream << position_; } + UsePosition* Dup(ArenaAllocator* allocator) const { + return new (allocator) UsePosition( + user_, input_index_, is_environment_, position_, + next_ == nullptr ? nullptr : next_->Dup(allocator)); + } + private: HInstruction* const user_; const size_t input_index_; @@ -478,6 +493,8 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> { } stream << "}"; stream << " is_fixed: " << is_fixed_ << ", is_split: " << IsSplit(); + stream << " is_high: " << IsHighInterval(); + stream << " is_low: " << IsLowInterval(); } LiveInterval* GetNextSibling() const { return next_sibling_; } @@ -512,6 +529,58 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> { // Returns whether `other` and `this` share the same kind of register. bool SameRegisterKind(Location other) const; + bool HasHighInterval() const { + return !IsHighInterval() && (GetParent()->high_or_low_interval_ != nullptr); + } + + bool HasLowInterval() const { + return IsHighInterval(); + } + + LiveInterval* GetLowInterval() const { + DCHECK(HasLowInterval()); + return high_or_low_interval_; + } + + LiveInterval* GetHighInterval() const { + DCHECK(HasHighInterval()); + return high_or_low_interval_; + } + + bool IsHighInterval() const { + return GetParent()->is_high_interval_; + } + + bool IsLowInterval() const { + return !IsHighInterval() && (GetParent()->high_or_low_interval_ != nullptr); + } + + void SetLowInterval(LiveInterval* low) { + DCHECK(IsHighInterval()); + high_or_low_interval_ = low; + } + + void SetHighInterval(LiveInterval* high) { + DCHECK(IsLowInterval()); + high_or_low_interval_ = high; + } + + void AddHighInterval(bool is_temp = false) { + DCHECK_EQ(GetParent(), this); + DCHECK(!HasHighInterval()); + DCHECK(!HasLowInterval()); + high_or_low_interval_ = new (allocator_) LiveInterval( + allocator_, type_, defined_by_, false, kNoRegister, is_temp, false, true); + high_or_low_interval_->high_or_low_interval_ = this; + if (first_range_ != nullptr) { + high_or_low_interval_->first_range_ = first_range_->Dup(allocator_); + high_or_low_interval_->last_range_ = first_range_->GetLastRange(); + } + if (first_use_ != nullptr) { + high_or_low_interval_->first_use_ = first_use_->Dup(allocator_); + } + } + private: LiveInterval(ArenaAllocator* allocator, Primitive::Type type, @@ -519,7 +588,8 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> { bool is_fixed = false, int reg = kNoRegister, bool is_temp = false, - bool is_slow_path_safepoint = false) + bool is_slow_path_safepoint = false, + bool is_high_interval = false) : allocator_(allocator), first_range_(nullptr), last_range_(nullptr), @@ -532,6 +602,8 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> { is_fixed_(is_fixed), is_temp_(is_temp), is_slow_path_safepoint_(is_slow_path_safepoint), + is_high_interval_(is_high_interval), + high_or_low_interval_(nullptr), defined_by_(defined_by) {} ArenaAllocator* const allocator_; @@ -568,6 +640,13 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> { // Whether the interval is for a safepoint that calls on slow path. const bool is_slow_path_safepoint_; + // Whether this interval is a synthesized interval for register pair. + const bool is_high_interval_; + + // If this interval needs a register pair, the high or low equivalent. + // `is_high_interval_` tells whether this holds the low or the high. + LiveInterval* high_or_low_interval_; + // The instruction represented by this interval. HInstruction* const defined_by_; diff --git a/compiler/optimizing/test/ConstantFolding.java b/compiler/optimizing/test/ConstantFolding.java index 92f2a775b9..d08006b4d5 100644 --- a/compiler/optimizing/test/ConstantFolding.java +++ b/compiler/optimizing/test/ConstantFolding.java @@ -22,13 +22,13 @@ public class ConstantFolding { */ // CHECK-START: int ConstantFolding.IntNegation() constant_folding (before) - // CHECK-DAG: [[Const42:i[0-9]+]] IntConstant 42 - // CHECK-DAG: [[Neg:i[0-9]+]] Neg [ [[Const42]] ] - // CHECK-DAG: Return [ [[Neg]] ] + // CHECK-DAG: [[Const42:i\d+]] IntConstant 42 + // CHECK-DAG: [[Neg:i\d+]] Neg [ [[Const42]] ] + // CHECK-DAG: Return [ [[Neg]] ] // CHECK-START: int ConstantFolding.IntNegation() constant_folding (after) - // CHECK-DAG: [[ConstN42:i[0-9]+]] IntConstant -42 - // CHECK-DAG: Return [ [[ConstN42]] ] + // CHECK-DAG: [[ConstN42:i\d+]] IntConstant -42 + // CHECK-DAG: Return [ [[ConstN42]] ] public static int IntNegation() { int x, y; @@ -43,14 +43,14 @@ public class ConstantFolding { */ // CHECK-START: int ConstantFolding.IntAddition1() constant_folding (before) - // CHECK-DAG: [[Const1:i[0-9]+]] IntConstant 1 - // CHECK-DAG: [[Const2:i[0-9]+]] IntConstant 2 - // CHECK-DAG: [[Add:i[0-9]+]] Add [ [[Const1]] [[Const2]] ] - // CHECK-DAG: Return [ [[Add]] ] + // CHECK-DAG: [[Const1:i\d+]] IntConstant 1 + // CHECK-DAG: [[Const2:i\d+]] IntConstant 2 + // CHECK-DAG: [[Add:i\d+]] Add [ [[Const1]] [[Const2]] ] + // CHECK-DAG: Return [ [[Add]] ] // CHECK-START: int ConstantFolding.IntAddition1() constant_folding (after) - // CHECK-DAG: [[Const3:i[0-9]+]] IntConstant 3 - // CHECK-DAG: Return [ [[Const3]] ] + // CHECK-DAG: [[Const3:i\d+]] IntConstant 3 + // CHECK-DAG: Return [ [[Const3]] ] public static int IntAddition1() { int a, b, c; @@ -66,18 +66,18 @@ public class ConstantFolding { */ // CHECK-START: int ConstantFolding.IntAddition2() constant_folding (before) - // CHECK-DAG: [[Const1:i[0-9]+]] IntConstant 1 - // CHECK-DAG: [[Const2:i[0-9]+]] IntConstant 2 - // CHECK-DAG: [[Const5:i[0-9]+]] IntConstant 5 - // CHECK-DAG: [[Const6:i[0-9]+]] IntConstant 6 - // CHECK-DAG: [[Add1:i[0-9]+]] Add [ [[Const1]] [[Const2]] ] - // CHECK-DAG: [[Add2:i[0-9]+]] Add [ [[Const5]] [[Const6]] ] - // CHECK-DAG: [[Add3:i[0-9]+]] Add [ [[Add1]] [[Add2]] ] - // CHECK-DAG: Return [ [[Add3]] ] + // CHECK-DAG: [[Const1:i\d+]] IntConstant 1 + // CHECK-DAG: [[Const2:i\d+]] IntConstant 2 + // CHECK-DAG: [[Const5:i\d+]] IntConstant 5 + // CHECK-DAG: [[Const6:i\d+]] IntConstant 6 + // CHECK-DAG: [[Add1:i\d+]] Add [ [[Const1]] [[Const2]] ] + // CHECK-DAG: [[Add2:i\d+]] Add [ [[Const5]] [[Const6]] ] + // CHECK-DAG: [[Add3:i\d+]] Add [ [[Add1]] [[Add2]] ] + // CHECK-DAG: Return [ [[Add3]] ] // CHECK-START: int ConstantFolding.IntAddition2() constant_folding (after) - // CHECK-DAG: [[Const14:i[0-9]+]] IntConstant 14 - // CHECK-DAG: Return [ [[Const14]] ] + // CHECK-DAG: [[Const14:i\d+]] IntConstant 14 + // CHECK-DAG: Return [ [[Const14]] ] public static int IntAddition2() { int a, b, c; @@ -97,14 +97,14 @@ public class ConstantFolding { */ // CHECK-START: int ConstantFolding.IntSubtraction() constant_folding (before) - // CHECK-DAG: [[Const5:i[0-9]+]] IntConstant 5 - // CHECK-DAG: [[Const2:i[0-9]+]] IntConstant 2 - // CHECK-DAG: [[Sub:i[0-9]+]] Sub [ [[Const5]] [[Const2]] ] - // CHECK-DAG: Return [ [[Sub]] ] + // CHECK-DAG: [[Const5:i\d+]] IntConstant 5 + // CHECK-DAG: [[Const2:i\d+]] IntConstant 2 + // CHECK-DAG: [[Sub:i\d+]] Sub [ [[Const5]] [[Const2]] ] + // CHECK-DAG: Return [ [[Sub]] ] // CHECK-START: int ConstantFolding.IntSubtraction() constant_folding (after) - // CHECK-DAG: [[Const3:i[0-9]+]] IntConstant 3 - // CHECK-DAG: Return [ [[Const3]] ] + // CHECK-DAG: [[Const3:i\d+]] IntConstant 3 + // CHECK-DAG: Return [ [[Const3]] ] public static int IntSubtraction() { int a, b, c; @@ -120,14 +120,14 @@ public class ConstantFolding { */ // CHECK-START: long ConstantFolding.LongAddition() constant_folding (before) - // CHECK-DAG: [[Const1:j[0-9]+]] LongConstant 1 - // CHECK-DAG: [[Const2:j[0-9]+]] LongConstant 2 - // CHECK-DAG: [[Add:j[0-9]+]] Add [ [[Const1]] [[Const2]] ] - // CHECK-DAG: Return [ [[Add]] ] + // CHECK-DAG: [[Const1:j\d+]] LongConstant 1 + // CHECK-DAG: [[Const2:j\d+]] LongConstant 2 + // CHECK-DAG: [[Add:j\d+]] Add [ [[Const1]] [[Const2]] ] + // CHECK-DAG: Return [ [[Add]] ] // CHECK-START: long ConstantFolding.LongAddition() constant_folding (after) - // CHECK-DAG: [[Const3:j[0-9]+]] LongConstant 3 - // CHECK-DAG: Return [ [[Const3]] ] + // CHECK-DAG: [[Const3:j\d+]] LongConstant 3 + // CHECK-DAG: Return [ [[Const3]] ] public static long LongAddition() { long a, b, c; @@ -143,14 +143,14 @@ public class ConstantFolding { */ // CHECK-START: long ConstantFolding.LongSubtraction() constant_folding (before) - // CHECK-DAG: [[Const5:j[0-9]+]] LongConstant 5 - // CHECK-DAG: [[Const2:j[0-9]+]] LongConstant 2 - // CHECK-DAG: [[Sub:j[0-9]+]] Sub [ [[Const5]] [[Const2]] ] - // CHECK-DAG: Return [ [[Sub]] ] + // CHECK-DAG: [[Const5:j\d+]] LongConstant 5 + // CHECK-DAG: [[Const2:j\d+]] LongConstant 2 + // CHECK-DAG: [[Sub:j\d+]] Sub [ [[Const5]] [[Const2]] ] + // CHECK-DAG: Return [ [[Sub]] ] // CHECK-START: long ConstantFolding.LongSubtraction() constant_folding (after) - // CHECK-DAG: [[Const3:j[0-9]+]] LongConstant 3 - // CHECK-DAG: Return [ [[Const3]] ] + // CHECK-DAG: [[Const3:j\d+]] LongConstant 3 + // CHECK-DAG: Return [ [[Const3]] ] public static long LongSubtraction() { long a, b, c; @@ -165,14 +165,14 @@ public class ConstantFolding { */ // CHECK-START: int ConstantFolding.StaticCondition() constant_folding (before) - // CHECK-DAG: [[Const5:i[0-9]+]] IntConstant 5 - // CHECK-DAG: [[Const2:i[0-9]+]] IntConstant 2 - // CHECK-DAG: [[Cond:z[0-9]+]] GreaterThanOrEqual [ [[Const5]] [[Const2]] ] - // CHECK-DAG: If [ [[Cond]] ] + // CHECK-DAG: [[Const5:i\d+]] IntConstant 5 + // CHECK-DAG: [[Const2:i\d+]] IntConstant 2 + // CHECK-DAG: [[Cond:z\d+]] GreaterThanOrEqual [ [[Const5]] [[Const2]] ] + // CHECK-DAG: If [ [[Cond]] ] // CHECK-START: int ConstantFolding.StaticCondition() constant_folding (after) - // CHECK-DAG: [[Const1:i[0-9]+]] IntConstant 1 - // CHECK-DAG: If [ [[Const1]] ] + // CHECK-DAG: [[Const1:i\d+]] IntConstant 1 + // CHECK-DAG: If [ [[Const1]] ] public static int StaticCondition() { int a, b, c; @@ -195,18 +195,18 @@ public class ConstantFolding { */ // CHECK-START: int ConstantFolding.JumpsAndConditionals(boolean) constant_folding (before) - // CHECK-DAG: [[Const2:i[0-9]+]] IntConstant 2 - // CHECK-DAG: [[Const5:i[0-9]+]] IntConstant 5 - // CHECK-DAG: [[Add:i[0-9]+]] Add [ [[Const5]] [[Const2]] ] - // CHECK-DAG: [[Sub:i[0-9]+]] Sub [ [[Const5]] [[Const2]] ] - // CHECK-DAG: [[Phi:i[0-9]+]] Phi [ [[Add]] [[Sub]] ] - // CHECK-DAG: Return [ [[Phi]] ] + // CHECK-DAG: [[Const2:i\d+]] IntConstant 2 + // CHECK-DAG: [[Const5:i\d+]] IntConstant 5 + // CHECK-DAG: [[Add:i\d+]] Add [ [[Const5]] [[Const2]] ] + // CHECK-DAG: [[Sub:i\d+]] Sub [ [[Const5]] [[Const2]] ] + // CHECK-DAG: [[Phi:i\d+]] Phi [ [[Add]] [[Sub]] ] + // CHECK-DAG: Return [ [[Phi]] ] // CHECK-START: int ConstantFolding.JumpsAndConditionals(boolean) constant_folding (after) - // CHECK-DAG: [[Const3:i[0-9]+]] IntConstant 3 - // CHECK-DAG: [[Const7:i[0-9]+]] IntConstant 7 - // CHECK-DAG: [[Phi:i[0-9]+]] Phi [ [[Const7]] [[Const3]] ] - // CHECK-DAG: Return [ [[Phi]] ] + // CHECK-DAG: [[Const3:i\d+]] IntConstant 3 + // CHECK-DAG: [[Const7:i\d+]] IntConstant 7 + // CHECK-DAG: [[Phi:i\d+]] Phi [ [[Const7]] [[Const3]] ] + // CHECK-DAG: Return [ [[Phi]] ] public static int JumpsAndConditionals(boolean cond) { int a, b, c; diff --git a/compiler/optimizing/test/Inliner.java b/compiler/optimizing/test/Inliner.java index ce7409c958..54cce62a57 100644 --- a/compiler/optimizing/test/Inliner.java +++ b/compiler/optimizing/test/Inliner.java @@ -17,12 +17,12 @@ public class Inliner { // CHECK-START: void Inliner.InlineVoid() inliner (before) - // CHECK-DAG: [[Const42:i[0-9]+]] IntConstant 42 - // CHECK-DAG: InvokeStaticOrDirect - // CHECK-DAG: InvokeStaticOrDirect [ [[Const42]] ] + // CHECK-DAG: [[Const42:i\d+]] IntConstant 42 + // CHECK-DAG: InvokeStaticOrDirect + // CHECK-DAG: InvokeStaticOrDirect [ [[Const42]] ] // CHECK-START: void Inliner.InlineVoid() inliner (after) - // CHECK-NOT: InvokeStaticOrDirect + // CHECK-NOT: InvokeStaticOrDirect public static void InlineVoid() { returnVoid(); @@ -30,119 +30,119 @@ public class Inliner { } // CHECK-START: int Inliner.InlineParameter(int) inliner (before) - // CHECK-DAG: [[Param:i[0-9]+]] ParameterValue - // CHECK-DAG: [[Result:i[0-9]+]] InvokeStaticOrDirect [ [[Param]] ] - // CHECK-DAG: Return [ [[Result]] ] + // CHECK-DAG: [[Param:i\d+]] ParameterValue + // CHECK-DAG: [[Result:i\d+]] InvokeStaticOrDirect [ [[Param]] ] + // CHECK-DAG: Return [ [[Result]] ] // CHECK-START: int Inliner.InlineParameter(int) inliner (after) - // CHECK-DAG: [[Param:i[0-9]+]] ParameterValue - // CHECK-DAG: Return [ [[Param]] ] + // CHECK-DAG: [[Param:i\d+]] ParameterValue + // CHECK-DAG: Return [ [[Param]] ] public static int InlineParameter(int a) { return returnParameter(a); } // CHECK-START: long Inliner.InlineWideParameter(long) inliner (before) - // CHECK-DAG: [[Param:j[0-9]+]] ParameterValue - // CHECK-DAG: [[Result:j[0-9]+]] InvokeStaticOrDirect [ [[Param]] ] - // CHECK-DAG: Return [ [[Result]] ] + // CHECK-DAG: [[Param:j\d+]] ParameterValue + // CHECK-DAG: [[Result:j\d+]] InvokeStaticOrDirect [ [[Param]] ] + // CHECK-DAG: Return [ [[Result]] ] // CHECK-START: long Inliner.InlineWideParameter(long) inliner (after) - // CHECK-DAG: [[Param:j[0-9]+]] ParameterValue - // CHECK-DAG: Return [ [[Param]] ] + // CHECK-DAG: [[Param:j\d+]] ParameterValue + // CHECK-DAG: Return [ [[Param]] ] public static long InlineWideParameter(long a) { return returnWideParameter(a); } // CHECK-START: java.lang.Object Inliner.InlineReferenceParameter(java.lang.Object) inliner (before) - // CHECK-DAG: [[Param:l[0-9]+]] ParameterValue - // CHECK-DAG: [[Result:l[0-9]+]] InvokeStaticOrDirect [ [[Param]] ] - // CHECK-DAG: Return [ [[Result]] ] + // CHECK-DAG: [[Param:l\d+]] ParameterValue + // CHECK-DAG: [[Result:l\d+]] InvokeStaticOrDirect [ [[Param]] ] + // CHECK-DAG: Return [ [[Result]] ] // CHECK-START: java.lang.Object Inliner.InlineReferenceParameter(java.lang.Object) inliner (after) - // CHECK-DAG: [[Param:l[0-9]+]] ParameterValue - // CHECK-DAG: Return [ [[Param]] ] + // CHECK-DAG: [[Param:l\d+]] ParameterValue + // CHECK-DAG: Return [ [[Param]] ] public static Object InlineReferenceParameter(Object o) { return returnReferenceParameter(o); } // CHECK-START: int Inliner.InlineInt() inliner (before) - // CHECK-DAG: [[Result:i[0-9]+]] InvokeStaticOrDirect - // CHECK-DAG: Return [ [[Result]] ] + // CHECK-DAG: [[Result:i\d+]] InvokeStaticOrDirect + // CHECK-DAG: Return [ [[Result]] ] // CHECK-START: int Inliner.InlineInt() inliner (after) - // CHECK-DAG: [[Const4:i[0-9]+]] IntConstant 4 - // CHECK-DAG: Return [ [[Const4]] ] + // CHECK-DAG: [[Const4:i\d+]] IntConstant 4 + // CHECK-DAG: Return [ [[Const4]] ] public static int InlineInt() { return returnInt(); } // CHECK-START: long Inliner.InlineWide() inliner (before) - // CHECK-DAG: [[Result:j[0-9]+]] InvokeStaticOrDirect - // CHECK-DAG: Return [ [[Result]] ] + // CHECK-DAG: [[Result:j\d+]] InvokeStaticOrDirect + // CHECK-DAG: Return [ [[Result]] ] // CHECK-START: long Inliner.InlineWide() inliner (after) - // CHECK-DAG: [[Const8:j[0-9]+]] LongConstant 8 - // CHECK-DAG: Return [ [[Const8]] ] + // CHECK-DAG: [[Const8:j\d+]] LongConstant 8 + // CHECK-DAG: Return [ [[Const8]] ] public static long InlineWide() { return returnWide(); } // CHECK-START: int Inliner.InlineAdd() inliner (before) - // CHECK-DAG: [[Const3:i[0-9]+]] IntConstant 3 - // CHECK-DAG: [[Const5:i[0-9]+]] IntConstant 5 - // CHECK-DAG: [[Result:i[0-9]+]] InvokeStaticOrDirect - // CHECK-DAG: Return [ [[Result]] ] + // CHECK-DAG: [[Const3:i\d+]] IntConstant 3 + // CHECK-DAG: [[Const5:i\d+]] IntConstant 5 + // CHECK-DAG: [[Result:i\d+]] InvokeStaticOrDirect + // CHECK-DAG: Return [ [[Result]] ] // CHECK-START: int Inliner.InlineAdd() inliner (after) - // CHECK-DAG: [[Const3:i[0-9]+]] IntConstant 3 - // CHECK-DAG: [[Const5:i[0-9]+]] IntConstant 5 - // CHECK-DAG: [[Add:i[0-9]+]] Add [ [[Const3]] [[Const5]] ] - // CHECK-DAG: Return [ [[Add]] ] + // CHECK-DAG: [[Const3:i\d+]] IntConstant 3 + // CHECK-DAG: [[Const5:i\d+]] IntConstant 5 + // CHECK-DAG: [[Add:i\d+]] Add [ [[Const3]] [[Const5]] ] + // CHECK-DAG: Return [ [[Add]] ] public static int InlineAdd() { return returnAdd(3, 5); } // CHECK-START: int Inliner.InlineFieldAccess() inliner (before) - // CHECK-DAG: [[After:i[0-9]+]] InvokeStaticOrDirect - // CHECK-DAG: Return [ [[After]] ] + // CHECK-DAG: [[After:i\d+]] InvokeStaticOrDirect + // CHECK-DAG: Return [ [[After]] ] // CHECK-START: int Inliner.InlineFieldAccess() inliner (after) - // CHECK-DAG: [[Const1:i[0-9]+]] IntConstant 1 - // CHECK-DAG: [[Before:i[0-9]+]] StaticFieldGet - // CHECK-DAG: [[After:i[0-9]+]] Add [ [[Before]] [[Const1]] ] - // CHECK-DAG: StaticFieldSet [ {{l[0-9]+}} [[After]] ] - // CHECK-DAG: Return [ [[After]] ] + // CHECK-DAG: [[Const1:i\d+]] IntConstant 1 + // CHECK-DAG: [[Before:i\d+]] StaticFieldGet + // CHECK-DAG: [[After:i\d+]] Add [ [[Before]] [[Const1]] ] + // CHECK-DAG: StaticFieldSet [ {{l\d+}} [[After]] ] + // CHECK-DAG: Return [ [[After]] ] // CHECK-START: int Inliner.InlineFieldAccess() inliner (after) - // CHECK-NOT: InvokeStaticOrDirect + // CHECK-NOT: InvokeStaticOrDirect public static int InlineFieldAccess() { return incCounter(); } // CHECK-START: int Inliner.InlineWithControlFlow(boolean) inliner (before) - // CHECK-DAG: [[Const1:i[0-9]+]] IntConstant 1 - // CHECK-DAG: [[Const3:i[0-9]+]] IntConstant 3 - // CHECK-DAG: [[Const5:i[0-9]+]] IntConstant 5 - // CHECK-DAG: [[Add:i[0-9]+]] InvokeStaticOrDirect [ [[Const1]] [[Const3]] ] - // CHECK-DAG: [[Sub:i[0-9]+]] InvokeStaticOrDirect [ [[Const5]] [[Const3]] ] - // CHECK-DAG: [[Phi:i[0-9]+]] Phi [ [[Add]] [[Sub]] ] - // CHECK-DAG: Return [ [[Phi]] ] + // CHECK-DAG: [[Const1:i\d+]] IntConstant 1 + // CHECK-DAG: [[Const3:i\d+]] IntConstant 3 + // CHECK-DAG: [[Const5:i\d+]] IntConstant 5 + // CHECK-DAG: [[Add:i\d+]] InvokeStaticOrDirect [ [[Const1]] [[Const3]] ] + // CHECK-DAG: [[Sub:i\d+]] InvokeStaticOrDirect [ [[Const5]] [[Const3]] ] + // CHECK-DAG: [[Phi:i\d+]] Phi [ [[Add]] [[Sub]] ] + // CHECK-DAG: Return [ [[Phi]] ] // CHECK-START: int Inliner.InlineWithControlFlow(boolean) inliner (after) - // CHECK-DAG: [[Const1:i[0-9]+]] IntConstant 1 - // CHECK-DAG: [[Const3:i[0-9]+]] IntConstant 3 - // CHECK-DAG: [[Const5:i[0-9]+]] IntConstant 5 - // CHECK-DAG: [[Add:i[0-9]+]] Add [ [[Const1]] [[Const3]] ] - // CHECK-DAG: [[Sub:i[0-9]+]] Sub [ [[Const5]] [[Const3]] ] - // CHECK-DAG: [[Phi:i[0-9]+]] Phi [ [[Add]] [[Sub]] ] - // CHECK-DAG: Return [ [[Phi]] ] + // CHECK-DAG: [[Const1:i\d+]] IntConstant 1 + // CHECK-DAG: [[Const3:i\d+]] IntConstant 3 + // CHECK-DAG: [[Const5:i\d+]] IntConstant 5 + // CHECK-DAG: [[Add:i\d+]] Add [ [[Const1]] [[Const3]] ] + // CHECK-DAG: [[Sub:i\d+]] Sub [ [[Const5]] [[Const3]] ] + // CHECK-DAG: [[Phi:i\d+]] Phi [ [[Add]] [[Sub]] ] + // CHECK-DAG: Return [ [[Phi]] ] public static int InlineWithControlFlow(boolean cond) { int x, const1, const3, const5; diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h index 87b38133fb..d9122764d0 100644 --- a/compiler/utils/arm/assembler_arm.h +++ b/compiler/utils/arm/assembler_arm.h @@ -534,6 +534,13 @@ class ArmAssembler : public Assembler { // Load and Store. May clobber IP. virtual void LoadImmediate(Register rd, int32_t value, Condition cond = AL) = 0; + void LoadSImmediate(SRegister sd, float value, Condition cond = AL) { + if (!vmovs(sd, value, cond)) { + LoadImmediate(IP, bit_cast<int32_t, float>(value), cond); + vmovsr(sd, IP, cond); + } + } + virtual void MarkExceptionHandler(Label* label) = 0; virtual void LoadFromOffset(LoadOperandType type, Register reg, diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc index 63009bf25e..4f279f23d4 100644 --- a/dex2oat/dex2oat.cc +++ b/dex2oat/dex2oat.cc @@ -1014,17 +1014,16 @@ class Dex2Oat FINAL { bool Setup() { TimingLogger::ScopedTiming t("dex2oat Setup", timings_); RuntimeOptions runtime_options; - std::vector<const DexFile*> boot_class_path; art::MemMap::Init(); // For ZipEntry::ExtractToMemMap. if (boot_image_option_.empty()) { - size_t failure_count = OpenDexFiles(dex_filenames_, dex_locations_, boot_class_path); - if (failure_count > 0) { - LOG(ERROR) << "Failed to open some dex files: " << failure_count; - return false; - } - runtime_options.push_back(std::make_pair("bootclasspath", &boot_class_path)); + std::string boot_class_path = "-Xbootclasspath:"; + boot_class_path += Join(dex_filenames_, ':'); + runtime_options.push_back(std::make_pair(boot_class_path, nullptr)); + std::string boot_class_path_locations = "-Xbootclasspath-locations:"; + boot_class_path_locations += Join(dex_locations_, ':'); + runtime_options.push_back(std::make_pair(boot_class_path_locations, nullptr)); } else { - runtime_options.push_back(std::make_pair(boot_image_option_.c_str(), nullptr)); + runtime_options.push_back(std::make_pair(boot_image_option_, nullptr)); } for (size_t i = 0; i < runtime_args_.size(); i++) { runtime_options.push_back(std::make_pair(runtime_args_[i], nullptr)); diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc index 98fe0798ae..75ba9dd969 100644 --- a/runtime/common_runtime_test.cc +++ b/runtime/common_runtime_test.cc @@ -201,22 +201,17 @@ void CommonRuntimeTest::SetUp() { int mkdir_result = mkdir(dalvik_cache_.c_str(), 0700); ASSERT_EQ(mkdir_result, 0); - MemMap::Init(); // For LoadExpectSingleDexFile - - std::string error_msg; - java_lang_dex_file_ = LoadExpectSingleDexFile(GetLibCoreDexFileName().c_str()); - boot_class_path_.push_back(java_lang_dex_file_); - std::string min_heap_string(StringPrintf("-Xms%zdm", gc::Heap::kDefaultInitialSize / MB)); std::string max_heap_string(StringPrintf("-Xmx%zdm", gc::Heap::kDefaultMaximumSize / MB)); callbacks_.reset(new NoopCompilerCallbacks()); RuntimeOptions options; - options.push_back(std::make_pair("bootclasspath", &boot_class_path_)); + std::string boot_class_path_string = "-Xbootclasspath:" + GetLibCoreDexFileName(); + options.push_back(std::make_pair(boot_class_path_string, nullptr)); options.push_back(std::make_pair("-Xcheck:jni", nullptr)); - options.push_back(std::make_pair(min_heap_string.c_str(), nullptr)); - options.push_back(std::make_pair(max_heap_string.c_str(), nullptr)); + options.push_back(std::make_pair(min_heap_string, nullptr)); + options.push_back(std::make_pair(max_heap_string, nullptr)); options.push_back(std::make_pair("compilercallbacks", callbacks_.get())); SetUpRuntimeOptions(&options); if (!Runtime::Create(options, false)) { @@ -239,6 +234,11 @@ void CommonRuntimeTest::SetUp() { // pool is created by the runtime. runtime_->GetHeap()->CreateThreadPool(); runtime_->GetHeap()->VerifyHeap(); // Check for heap corruption before the test + + // Get the boot class path from the runtime so it can be used in tests. + boot_class_path_ = class_linker_->GetBootClassPath(); + ASSERT_FALSE(boot_class_path_.empty()); + java_lang_dex_file_ = boot_class_path_[0]; } void CommonRuntimeTest::ClearDirectory(const char* dirpath) { diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h index 8851185ce1..35dc30fb1f 100644 --- a/runtime/common_runtime_test.h +++ b/runtime/common_runtime_test.h @@ -116,7 +116,7 @@ class CommonRuntimeTest : public testing::Test { std::string android_data_; std::string dalvik_cache_; const DexFile* java_lang_dex_file_; // owned by runtime_ - std::vector<const DexFile*> boot_class_path_; + std::vector<const DexFile*> boot_class_path_; // owned by runtime_ std::unique_ptr<Runtime> runtime_; // Owned by the runtime ClassLinker* class_linker_; diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h index e73166b091..7bc83effd3 100644 --- a/runtime/gc/accounting/space_bitmap.h +++ b/runtime/gc/accounting/space_bitmap.h @@ -160,6 +160,12 @@ class SpaceBitmap { return IndexToOffset<uint64_t>(Size() / sizeof(intptr_t)); } + void SetHeapSize(size_t bytes) { + // TODO: Un-map the end of the mem map. + bitmap_size_ = OffsetToIndex(bytes) * sizeof(intptr_t); + CHECK_EQ(HeapSize(), bytes); + } + uintptr_t HeapBegin() const { return heap_begin_; } diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index 2575676bc8..db287d3436 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -2978,6 +2978,20 @@ void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran, } } +void Heap::ClampGrowthLimit() { + capacity_ = growth_limit_; + for (const auto& space : continuous_spaces_) { + if (space->IsMallocSpace()) { + gc::space::MallocSpace* malloc_space = space->AsMallocSpace(); + malloc_space->ClampGrowthLimit(); + } + } + // This space isn't added for performance reasons. + if (main_space_backup_.get() != nullptr) { + main_space_backup_->ClampGrowthLimit(); + } +} + void Heap::ClearGrowthLimit() { growth_limit_ = capacity_; for (const auto& space : continuous_spaces_) { diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h index 1738124c0c..fc61fc57d5 100644 --- a/runtime/gc/heap.h +++ b/runtime/gc/heap.h @@ -302,6 +302,10 @@ class Heap { // implement dalvik.system.VMRuntime.clearGrowthLimit. void ClearGrowthLimit(); + // Make the current growth limit the new maximum capacity, unmaps pages at the end of spaces + // which will never be used. Used to implement dalvik.system.VMRuntime.clampGrowthLimit. + void ClampGrowthLimit(); + // Target ideal heap utilization ratio, implements // dalvik.system.VMRuntime.getTargetHeapUtilization. double GetTargetHeapUtilization() const { @@ -902,7 +906,7 @@ class Heap { collector::GcType next_gc_type_; // Maximum size that the heap can reach. - const size_t capacity_; + size_t capacity_; // The size the heap is limited to. This is initially smaller than capacity, but for largeHeap // programs it is "cleared" making it the same as capacity. diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc index 7905bb4854..9bbbb3cbdf 100644 --- a/runtime/gc/space/malloc_space.cc +++ b/runtime/gc/space/malloc_space.cc @@ -248,6 +248,16 @@ void MallocSpace::SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* ar context->freed.bytes += space->FreeList(self, num_ptrs, ptrs); } +void MallocSpace::ClampGrowthLimit() { + size_t new_capacity = Capacity(); + CHECK_LE(new_capacity, NonGrowthLimitCapacity()); + GetLiveBitmap()->SetHeapSize(new_capacity); + GetMarkBitmap()->SetHeapSize(new_capacity); + GetMemMap()->SetSize(new_capacity); + limit_ = Begin() + new_capacity; + CHECK(temp_bitmap_.get() == nullptr); +} + } // namespace space } // namespace gc } // namespace art diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h index 2fbd5f0238..06239e5e73 100644 --- a/runtime/gc/space/malloc_space.h +++ b/runtime/gc/space/malloc_space.h @@ -110,6 +110,10 @@ class MallocSpace : public ContinuousMemMapAllocSpace { return GetMemMap()->Size(); } + // Change the non growth limit capacity by shrinking or expanding the map. Currently, only + // shrinking is supported. + void ClampGrowthLimit(); + void Dump(std::ostream& os) const; void SetGrowthLimit(size_t growth_limit); diff --git a/runtime/handle_scope-inl.h b/runtime/handle_scope-inl.h index 9ddaf61c9f..222083b39f 100644 --- a/runtime/handle_scope-inl.h +++ b/runtime/handle_scope-inl.h @@ -21,12 +21,14 @@ #include "handle.h" #include "thread.h" +#include "verify_object-inl.h" namespace art { template<size_t kNumReferences> inline StackHandleScope<kNumReferences>::StackHandleScope(Thread* self, mirror::Object* fill_value) : HandleScope(self->GetTopHandleScope(), kNumReferences), self_(self), pos_(0) { + DCHECK_EQ(self, Thread::Current()); static_assert(kNumReferences >= 1, "StackHandleScope must contain at least 1 reference"); // TODO: Figure out how to use a compile assert. CHECK_EQ(&storage_[0], GetReferences()); @@ -42,6 +44,71 @@ inline StackHandleScope<kNumReferences>::~StackHandleScope() { DCHECK_EQ(top_handle_scope, this); } +inline size_t HandleScope::SizeOf(uint32_t num_references) { + size_t header_size = sizeof(HandleScope); + size_t data_size = sizeof(StackReference<mirror::Object>) * num_references; + return header_size + data_size; +} + +inline size_t HandleScope::SizeOf(size_t pointer_size, uint32_t num_references) { + // Assume that the layout is packed. + size_t header_size = pointer_size + sizeof(number_of_references_); + size_t data_size = sizeof(StackReference<mirror::Object>) * num_references; + return header_size + data_size; +} + +inline mirror::Object* HandleScope::GetReference(size_t i) const { + DCHECK_LT(i, number_of_references_); + return GetReferences()[i].AsMirrorPtr(); +} + +inline Handle<mirror::Object> HandleScope::GetHandle(size_t i) { + DCHECK_LT(i, number_of_references_); + return Handle<mirror::Object>(&GetReferences()[i]); +} + +inline MutableHandle<mirror::Object> HandleScope::GetMutableHandle(size_t i) { + DCHECK_LT(i, number_of_references_); + return MutableHandle<mirror::Object>(&GetReferences()[i]); +} + +inline void HandleScope::SetReference(size_t i, mirror::Object* object) { + DCHECK_LT(i, number_of_references_); + GetReferences()[i].Assign(object); +} + +inline bool HandleScope::Contains(StackReference<mirror::Object>* handle_scope_entry) const { + // A HandleScope should always contain something. One created by the + // jni_compiler should have a jobject/jclass as a native method is + // passed in a this pointer or a class + DCHECK_GT(number_of_references_, 0U); + return &GetReferences()[0] <= handle_scope_entry && + handle_scope_entry <= &GetReferences()[number_of_references_ - 1]; +} + +template<size_t kNumReferences> template<class T> +inline MutableHandle<T> StackHandleScope<kNumReferences>::NewHandle(T* object) { + SetReference(pos_, object); + MutableHandle<T> h(GetHandle<T>(pos_)); + pos_++; + return h; +} + +template<size_t kNumReferences> template<class T> +inline HandleWrapper<T> StackHandleScope<kNumReferences>::NewHandleWrapper(T** object) { + SetReference(pos_, *object); + MutableHandle<T> h(GetHandle<T>(pos_)); + pos_++; + return HandleWrapper<T>(object, h); +} + +template<size_t kNumReferences> +inline void StackHandleScope<kNumReferences>::SetReference(size_t i, mirror::Object* object) { + DCHECK_LT(i, kNumReferences); + VerifyObject(object); + GetReferences()[i].Assign(object); +} + } // namespace art #endif // ART_RUNTIME_HANDLE_SCOPE_INL_H_ diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h index 2c4f0f915d..782bbeaabc 100644 --- a/runtime/handle_scope.h +++ b/runtime/handle_scope.h @@ -22,6 +22,7 @@ #include "handle.h" #include "stack.h" #include "utils.h" +#include "verify_object.h" namespace art { namespace mirror { @@ -47,19 +48,10 @@ class PACKED(4) HandleScope { // takes the pointer size explicitly so that at compile time we can cross-compile correctly. // Returns the size of a HandleScope containing num_references handles. - static size_t SizeOf(uint32_t num_references) { - size_t header_size = sizeof(HandleScope); - size_t data_size = sizeof(StackReference<mirror::Object>) * num_references; - return header_size + data_size; - } + static size_t SizeOf(uint32_t num_references); // Returns the size of a HandleScope containing num_references handles. - static size_t SizeOf(size_t pointer_size, uint32_t num_references) { - // Assume that the layout is packed. - size_t header_size = pointer_size + sizeof(number_of_references_); - size_t data_size = sizeof(StackReference<mirror::Object>) * num_references; - return header_size + data_size; - } + static size_t SizeOf(size_t pointer_size, uint32_t num_references); // Link to previous HandleScope or null. HandleScope* GetLink() const { @@ -67,37 +59,18 @@ class PACKED(4) HandleScope { } ALWAYS_INLINE mirror::Object* GetReference(size_t i) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK_LT(i, number_of_references_); - return GetReferences()[i].AsMirrorPtr(); - } + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); ALWAYS_INLINE Handle<mirror::Object> GetHandle(size_t i) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK_LT(i, number_of_references_); - return Handle<mirror::Object>(&GetReferences()[i]); - } + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); ALWAYS_INLINE MutableHandle<mirror::Object> GetMutableHandle(size_t i) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK_LT(i, number_of_references_); - return MutableHandle<mirror::Object>(&GetReferences()[i]); - } + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); ALWAYS_INLINE void SetReference(size_t i, mirror::Object* object) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK_LT(i, number_of_references_); - GetReferences()[i].Assign(object); - } + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool Contains(StackReference<mirror::Object>* handle_scope_entry) const { - // A HandleScope should always contain something. One created by the - // jni_compiler should have a jobject/jclass as a native method is - // passed in a this pointer or a class - DCHECK_GT(number_of_references_, 0U); - return &GetReferences()[0] <= handle_scope_entry && - handle_scope_entry <= &GetReferences()[number_of_references_ - 1]; - } + ALWAYS_INLINE bool Contains(StackReference<mirror::Object>* handle_scope_entry) const; // Offset of link within HandleScope, used by generated code. static size_t LinkOffset(size_t pointer_size ATTRIBUTE_UNUSED) { @@ -174,27 +147,14 @@ class PACKED(4) StackHandleScope FINAL : public HandleScope { ALWAYS_INLINE ~StackHandleScope(); template<class T> - ALWAYS_INLINE MutableHandle<T> NewHandle(T* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - SetReference(pos_, object); - MutableHandle<T> h(GetHandle<T>(pos_)); - pos_++; - return h; - } + ALWAYS_INLINE MutableHandle<T> NewHandle(T* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template<class T> ALWAYS_INLINE HandleWrapper<T> NewHandleWrapper(T** object) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - SetReference(pos_, *object); - MutableHandle<T> h(GetHandle<T>(pos_)); - pos_++; - return HandleWrapper<T>(object, h); - } + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); ALWAYS_INLINE void SetReference(size_t i, mirror::Object* object) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK_LT(i, kNumReferences); - GetReferences()[i].Assign(object); - } + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); private: template<class T> diff --git a/runtime/java_vm_ext_test.cc b/runtime/java_vm_ext_test.cc index 60c6a5c23a..2cbfa81b91 100644 --- a/runtime/java_vm_ext_test.cc +++ b/runtime/java_vm_ext_test.cc @@ -69,7 +69,12 @@ static void* attach_current_thread_callback(void* arg ATTRIBUTE_UNUSED) { } else { ok = vms_buf[0]->AttachCurrentThreadAsDaemon(&env, nullptr); } - EXPECT_EQ(gSmallStack ? JNI_ERR : JNI_OK, ok); + // TODO: Find a way to test with exact SMALL_STACK value, for which we would bail. The pthreads + // spec says that the stack size argument is a lower bound, and bionic currently gives us + // a chunk more on arm64. + if (!gSmallStack) { + EXPECT_EQ(JNI_OK, ok); + } if (ok == JNI_OK) { ok = vms_buf[0]->DetachCurrentThread(); EXPECT_EQ(JNI_OK, ok); diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc index 8303f845a8..a722813867 100644 --- a/runtime/mem_map.cc +++ b/runtime/mem_map.cc @@ -665,6 +665,19 @@ void MemMap::Shutdown() { maps_ = nullptr; } +void MemMap::SetSize(size_t new_size) { + if (new_size == base_size_) { + return; + } + CHECK_ALIGNED(new_size, kPageSize); + CHECK_EQ(base_size_, size_) << "Unsupported"; + CHECK_LE(new_size, base_size_); + CHECK_EQ(munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) + new_size), + base_size_ - new_size), 0) << new_size << " " << base_size_; + base_size_ = new_size; + size_ = new_size; +} + std::ostream& operator<<(std::ostream& os, const MemMap& mem_map) { os << StringPrintf("[MemMap: %p-%p prot=0x%x %s]", mem_map.BaseBegin(), mem_map.BaseEnd(), mem_map.GetProtect(), diff --git a/runtime/mem_map.h b/runtime/mem_map.h index 9b003aa66c..dc337e0564 100644 --- a/runtime/mem_map.h +++ b/runtime/mem_map.h @@ -107,6 +107,9 @@ class MemMap { return size_; } + // Resize the mem-map by unmapping pages at the end. Currently only supports shrinking. + void SetSize(size_t new_size); + uint8_t* End() const { return Begin() + Size(); } diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h index 13f881d966..4dddd38b80 100644 --- a/runtime/mirror/array-inl.h +++ b/runtime/mirror/array-inl.h @@ -19,6 +19,7 @@ #include "array.h" +#include "base/stringprintf.h" #include "class.h" #include "gc/heap-inl.h" #include "thread.h" diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc index f503b354f7..471aa9c034 100644 --- a/runtime/native/dalvik_system_VMRuntime.cc +++ b/runtime/native/dalvik_system_VMRuntime.cc @@ -134,6 +134,10 @@ static void VMRuntime_clearGrowthLimit(JNIEnv*, jobject) { Runtime::Current()->GetHeap()->ClearGrowthLimit(); } +static void VMRuntime_clampGrowthLimit(JNIEnv*, jobject) { + Runtime::Current()->GetHeap()->ClampGrowthLimit(); +} + static jboolean VMRuntime_isDebuggerActive(JNIEnv*, jobject) { return Dbg::IsDebuggerActive(); } @@ -577,6 +581,7 @@ static jstring VMRuntime_getCurrentInstructionSet(JNIEnv* env, jclass) { static JNINativeMethod gMethods[] = { NATIVE_METHOD(VMRuntime, addressOf, "!(Ljava/lang/Object;)J"), NATIVE_METHOD(VMRuntime, bootClassPath, "()Ljava/lang/String;"), + NATIVE_METHOD(VMRuntime, clampGrowthLimit, "()V"), NATIVE_METHOD(VMRuntime, classPath, "()Ljava/lang/String;"), NATIVE_METHOD(VMRuntime, clearGrowthLimit, "()V"), NATIVE_METHOD(VMRuntime, concurrentGC, "()V"), diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc index 1b992d5159..4ba3cb999d 100644 --- a/runtime/parsed_options.cc +++ b/runtime/parsed_options.cc @@ -34,7 +34,6 @@ namespace art { ParsedOptions::ParsedOptions() : - boot_class_path_(nullptr), check_jni_(kIsDebugBuild), // -Xcheck:jni is off by default for regular // builds but on by default in debug builds. force_copy_(false), @@ -288,6 +287,9 @@ bool ParsedOptions::Parse(const RuntimeOptions& options, bool ignore_unrecognize } else if (StartsWith(option, "-Xbootclasspath:")) { boot_class_path_string_ = option.substr(strlen("-Xbootclasspath:")).data(); LOG(INFO) << "setting boot class path to " << boot_class_path_string_; + } else if (StartsWith(option, "-Xbootclasspath-locations:")) { + boot_class_path_locations_string_ = option.substr( + strlen("-Xbootclasspath-locations:")).data(); } else if (option == "-classpath" || option == "-cp") { // TODO: support -Djava.class.path i++; @@ -297,9 +299,6 @@ bool ParsedOptions::Parse(const RuntimeOptions& options, bool ignore_unrecognize } const StringPiece& value = options[i].first; class_path_string_ = value.data(); - } else if (option == "bootclasspath") { - boot_class_path_ - = reinterpret_cast<const std::vector<const DexFile*>*>(options[i].second); } else if (StartsWith(option, "-Ximage:")) { if (!ParseStringAfterChar(option, ':', &image_)) { return false; @@ -720,6 +719,24 @@ bool ParsedOptions::Parse(const RuntimeOptions& options, bool ignore_unrecognize boot_class_path_string_.replace(core_jar_pos, core_jar.size(), core_libart_jar); } + if (!boot_class_path_locations_string_.empty()) { + std::vector<std::string> files; + Split(boot_class_path_string_, ':', &files); + + std::vector<std::string> locations; + Split(boot_class_path_locations_string_, ':', &locations); + + if (files.size() != locations.size()) { + Usage("The number of boot class path files does not match" + " the number of boot class path locations given\n" + " boot class path files (%zu): %s\n" + " boot class path locations (%zu): %s\n", + files.size(), boot_class_path_string_.c_str(), + locations.size(), boot_class_path_locations_string_.c_str()); + return false; + } + } + if (compiler_callbacks_ == nullptr && image_.empty()) { image_ += GetAndroidRoot(); image_ += "/framework/boot.art"; @@ -804,6 +821,8 @@ void ParsedOptions::Usage(const char* fmt, ...) { UsageMessage(stream, " -Xgc:[no]postverify_rosalloc\n"); UsageMessage(stream, " -Xgc:[no]presweepingverify\n"); UsageMessage(stream, " -Ximage:filename\n"); + UsageMessage(stream, " -Xbootclasspath-locations:bootclasspath\n" + " (override the dex locations of the -Xbootclasspath files)\n"); UsageMessage(stream, " -XX:+DisableExplicitGC\n"); UsageMessage(stream, " -XX:ParallelGCThreads=integervalue\n"); UsageMessage(stream, " -XX:ConcGCThreads=integervalue\n"); diff --git a/runtime/parsed_options.h b/runtime/parsed_options.h index 9294868349..c7162b826f 100644 --- a/runtime/parsed_options.h +++ b/runtime/parsed_options.h @@ -40,8 +40,8 @@ class ParsedOptions { // returns null if problem parsing and ignore_unrecognized is false static ParsedOptions* Create(const RuntimeOptions& options, bool ignore_unrecognized); - const std::vector<const DexFile*>* boot_class_path_; std::string boot_class_path_string_; + std::string boot_class_path_locations_string_; std::string class_path_string_; std::string image_; bool check_jni_; diff --git a/runtime/runtime.cc b/runtime/runtime.cc index a2c9f502b9..fb6034dcd3 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -683,6 +683,7 @@ static bool OpenDexFilesFromImage(const std::string& image_location, static size_t OpenDexFiles(const std::vector<std::string>& dex_filenames, + const std::vector<std::string>& dex_locations, const std::string& image_location, std::vector<const DexFile*>& dex_files) { size_t failure_count = 0; @@ -692,12 +693,13 @@ static size_t OpenDexFiles(const std::vector<std::string>& dex_filenames, failure_count = 0; for (size_t i = 0; i < dex_filenames.size(); i++) { const char* dex_filename = dex_filenames[i].c_str(); + const char* dex_location = dex_locations[i].c_str(); std::string error_msg; if (!OS::FileExists(dex_filename)) { LOG(WARNING) << "Skipping non-existent dex file '" << dex_filename << "'"; continue; } - if (!DexFile::Open(dex_filename, dex_filename, &error_msg, &dex_files)) { + if (!DexFile::Open(dex_filename, dex_location, &error_msg, &dex_files)) { LOG(WARNING) << "Failed to open .dex from file '" << dex_filename << "': " << error_msg; ++failure_count; } @@ -858,17 +860,25 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized) CHECK_GE(GetHeap()->GetContinuousSpaces().size(), 1U); class_linker_ = new ClassLinker(intern_table_); - bool options_class_path_used = false; if (GetHeap()->HasImageSpace()) { class_linker_->InitFromImage(); if (kIsDebugBuild) { GetHeap()->GetImageSpace()->VerifyImageAllocations(); } - } else if (!IsCompiler() || !image_dex2oat_enabled_) { + } else { std::vector<std::string> dex_filenames; Split(boot_class_path_string_, ':', &dex_filenames); + + std::vector<std::string> dex_locations; + if (options->boot_class_path_locations_string_.empty()) { + dex_locations = dex_filenames; + } else { + Split(options->boot_class_path_locations_string_, ':', &dex_locations); + CHECK_EQ(dex_filenames.size(), dex_locations.size()); + } + std::vector<const DexFile*> boot_class_path; - OpenDexFiles(dex_filenames, options->image_, boot_class_path); + OpenDexFiles(dex_filenames, dex_locations, options->image_, boot_class_path); class_linker_->InitWithoutImage(boot_class_path); // TODO: Should we move the following to InitWithoutImage? SetInstructionSet(kRuntimeISA); @@ -878,18 +888,6 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized) SetCalleeSaveMethod(CreateCalleeSaveMethod(), type); } } - } else { - CHECK(options->boot_class_path_ != nullptr); - CHECK_NE(options->boot_class_path_->size(), 0U); - class_linker_->InitWithoutImage(*options->boot_class_path_); - options_class_path_used = true; - } - - if (!options_class_path_used) { - // If the class linker does not take ownership of the boot class path, wipe it to prevent leaks. - auto boot_class_path_vector_ptr = - const_cast<std::vector<const DexFile*>*>(options->boot_class_path_); - STLDeleteElements(boot_class_path_vector_ptr); } CHECK(class_linker_ != nullptr); diff --git a/runtime/thread.cc b/runtime/thread.cc index d2d5be7c1e..527d758ab0 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -982,8 +982,9 @@ static bool ShouldShowNativeStack(const Thread* thread) void Thread::DumpJavaStack(std::ostream& os) const { // Dumping the Java stack involves the verifier for locks. The verifier operates under the // assumption that there is no exception pending on entry. Thus, stash any pending exception. - // TODO: Find a way to avoid const_cast. - StackHandleScope<3> scope(const_cast<Thread*>(this)); + // Thread::Current() instead of this in case a thread is dumping the stack of another suspended + // thread. + StackHandleScope<3> scope(Thread::Current()); Handle<mirror::Throwable> exc; Handle<mirror::Object> throw_location_this_object; Handle<mirror::ArtMethod> throw_location_method; diff --git a/runtime/utils.cc b/runtime/utils.cc index 7234ec0951..ef12d6e127 100644 --- a/runtime/utils.cc +++ b/runtime/utils.cc @@ -60,6 +60,10 @@ namespace art { +#if defined(__linux__) +static constexpr bool kUseAddr2line = !kIsTargetBuild; +#endif + pid_t GetTid() { #if defined(__APPLE__) uint64_t owner; @@ -1117,6 +1121,66 @@ std::string GetSchedulerGroupName(pid_t tid) { return ""; } +#if defined(__linux__) +static bool RunCommand(std::string cmd, std::ostream* os, const char* prefix) { + FILE* stream = popen(cmd.c_str(), "r"); + if (stream) { + if (os != nullptr) { + bool odd_line = true; // We indent them differently. + constexpr size_t kMaxBuffer = 128; // Relatively small buffer. Should be OK as we're on an + // alt stack, but just to be sure... + char buffer[kMaxBuffer]; + while (!feof(stream)) { + if (fgets(buffer, kMaxBuffer, stream) != nullptr) { + // Split on newlines. + char* tmp = buffer; + for (;;) { + char* new_line = strchr(tmp, '\n'); + if (new_line == nullptr) { + // Print the rest. + if (*tmp != 0) { + if (prefix != nullptr) { + *os << prefix; + } + if (!odd_line) { + *os << " "; + } + *os << tmp; + } + break; + } + if (prefix != nullptr) { + *os << prefix; + } + *os << " "; + if (!odd_line) { + *os << " "; + } + char saved = *(new_line + 1); + *(new_line + 1) = 0; + *os << tmp; + *(new_line + 1) = saved; + tmp = new_line + 1; + odd_line = !odd_line; + } + } + } + } + pclose(stream); + return true; + } else { + return false; + } +} + +static void Addr2line(const std::string& map_src, uintptr_t offset, std::ostream& os, + const char* prefix) { + std::string cmdline(StringPrintf("addr2line --functions --inlines --demangle -e %s %zx", + map_src.c_str(), offset)); + RunCommand(cmdline.c_str(), &os, prefix); +} +#endif + void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix, mirror::ArtMethod* current_method, void* ucontext_ptr) { #if __linux__ @@ -1142,6 +1206,16 @@ void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix, return; } + // Check whether we have and should use addr2line. + bool use_addr2line; + if (kUseAddr2line) { + // Try to run it to see whether we have it. Push an argument so that it doesn't assume a.out + // and print to stderr. + use_addr2line = RunCommand("addr2line -h", nullptr, nullptr); + } else { + use_addr2line = false; + } + for (Backtrace::const_iterator it = backtrace->begin(); it != backtrace->end(); ++it) { // We produce output like this: @@ -1153,6 +1227,7 @@ void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix, // after the <RELATIVE_ADDR>. There can be any prefix data before the // #XX. <RELATIVE_ADDR> has to be a hex number but with no 0x prefix. os << prefix << StringPrintf("#%02zu pc ", it->num); + bool try_addr2line = false; if (!it->map) { os << StringPrintf("%08" PRIxPTR " ???", it->pc); } else { @@ -1163,6 +1238,7 @@ void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix, if (it->func_offset != 0) { os << "+" << it->func_offset; } + try_addr2line = true; } else if (current_method != nullptr && Locks::mutator_lock_->IsSharedHeld(Thread::Current()) && current_method->PcIsWithinQuickCode(it->pc)) { @@ -1175,9 +1251,12 @@ void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix, os << ")"; } os << "\n"; + if (try_addr2line && use_addr2line) { + Addr2line(it->map->name, it->pc - it->map->start, os, prefix); + } } #else - UNUSED(os, tid, prefix, current_method); + UNUSED(os, tid, prefix, current_method, ucontext_ptr); #endif } diff --git a/tools/checker.py b/tools/checker.py index 3d5ca45c80..4561dd6cdd 100755 --- a/tools/checker.py +++ b/tools/checker.py @@ -159,6 +159,23 @@ class CheckElement(CommonEqualityMixin): """Supported language constructs.""" Text, Pattern, VarRef, VarDef = range(4) + rStartOptional = r"(" + rEndOptional = r")?" + + rName = r"([a-zA-Z][a-zA-Z0-9]*)" + rRegex = r"(.+?)" + rPatternStartSym = r"(\{\{)" + rPatternEndSym = r"(\}\})" + rVariableStartSym = r"(\[\[)" + rVariableEndSym = r"(\]\])" + rVariableSeparator = r"(:)" + + regexPattern = rPatternStartSym + rRegex + rPatternEndSym + regexVariable = rVariableStartSym + \ + rName + \ + (rStartOptional + rVariableSeparator + rRegex + rEndOptional) + \ + rVariableEndSym + def __init__(self, variant, name, pattern): self.variant = variant self.name = name @@ -170,22 +187,21 @@ class CheckElement(CommonEqualityMixin): @staticmethod def parsePattern(patternElem): - return CheckElement(CheckElement.Variant.Pattern, None, patternElem[2:len(patternElem)-2]) + return CheckElement(CheckElement.Variant.Pattern, None, patternElem[2:-2]) @staticmethod def parseVariable(varElem): colonPos = varElem.find(":") if colonPos == -1: # Variable reference - name = varElem[2:len(varElem)-2] + name = varElem[2:-2] return CheckElement(CheckElement.Variant.VarRef, name, None) else: # Variable definition name = varElem[2:colonPos] - body = varElem[colonPos+1:len(varElem)-2] + body = varElem[colonPos+1:-2] return CheckElement(CheckElement.Variant.VarDef, name, body) - class CheckLine(CommonEqualityMixin): """Representation of a single assertion in the check file formed of one or more regex elements. Matching against an output line is successful only @@ -226,24 +242,6 @@ class CheckLine(CommonEqualityMixin): starts = map(lambda m: len(string) if m is None else m.start(), matches) return min(starts) - # Returns the regex for finding a regex pattern in the check line. - def __getPatternRegex(self): - rStartSym = "\{\{" - rEndSym = "\}\}" - rBody = ".+?" - return rStartSym + rBody + rEndSym - - # Returns the regex for finding a variable use in the check line. - def __getVariableRegex(self): - rStartSym = "\[\[" - rEndSym = "\]\]" - rStartOptional = "(" - rEndOptional = ")?" - rName = "[a-zA-Z][a-zA-Z0-9]*" - rSeparator = ":" - rBody = ".+?" - return rStartSym + rName + rStartOptional + rSeparator + rBody + rEndOptional + rEndSym - # This method parses the content of a check line stripped of the initial # comment symbol and the CHECK keyword. def __parse(self, line): @@ -251,9 +249,9 @@ class CheckLine(CommonEqualityMixin): # Loop as long as there is something to parse. while line: # Search for the nearest occurrence of the special markers. - matchWhitespace = re.search("\s+", line) - matchPattern = re.search(self.__getPatternRegex(), line) - matchVariable = re.search(self.__getVariableRegex(), line) + matchWhitespace = re.search(r"\s+", line) + matchPattern = re.search(CheckElement.regexPattern, line) + matchVariable = re.search(CheckElement.regexVariable, line) # If one of the above was identified at the current position, extract them # from the line, parse them and add to the list of line parts. @@ -262,7 +260,7 @@ class CheckLine(CommonEqualityMixin): # a whitespace, we add a regex pattern for an arbitrary non-zero number # of whitespaces. line = line[matchWhitespace.end():] - lineParts.append(CheckElement.parsePattern("{{\s+}}")) + lineParts.append(CheckElement.parsePattern(r"{{\s+}}")) elif self.__isMatchAtStart(matchPattern): pattern = line[0:matchPattern.end()] line = line[matchPattern.end():] @@ -536,16 +534,16 @@ class CheckFile(FileSplitMixin): # followed by the CHECK keyword, given attribute and a colon at the very # beginning of the line. Whitespaces are ignored. def _extractLine(self, prefix, line): - ignoreWhitespace = "\s*" - commentSymbols = ["//", "#"] - prefixRegex = ignoreWhitespace + \ - "(" + "|".join(commentSymbols) + ")" + \ - ignoreWhitespace + \ - prefix + ":" + rIgnoreWhitespace = r"\s*" + rCommentSymbols = [r"//", r"#"] + regexPrefix = rIgnoreWhitespace + \ + r"(" + r"|".join(rCommentSymbols) + r")" + \ + rIgnoreWhitespace + \ + prefix + r":" # The 'match' function succeeds only if the pattern is matched at the # beginning of the line. - match = re.match(prefixRegex, line) + match = re.match(regexPrefix, line) if match is not None: return line[match.end():].strip() else: |