Merge "Reduce file descriptor limit for 151-OpenFileLimit."
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 51bf9ea..2f9164c 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -156,7 +156,7 @@
const InstructionSet instruction_set = kRuntimeISA;
// Take the default set of instruction features from the build.
- instruction_set_features_.reset(InstructionSetFeatures::FromCppDefines());
+ instruction_set_features_ = InstructionSetFeatures::FromCppDefines();
runtime_->SetInstructionSet(instruction_set);
for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index c398703..f83d37c 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -124,30 +124,30 @@
if (option.starts_with("--instruction-set-variant=")) {
StringPiece str = option.substr(strlen("--instruction-set-variant=")).data();
VLOG(compiler) << "JIT instruction set variant " << str;
- instruction_set_features_.reset(InstructionSetFeatures::FromVariant(
- instruction_set, str.as_string(), &error_msg));
+ instruction_set_features_ = InstructionSetFeatures::FromVariant(
+ instruction_set, str.as_string(), &error_msg);
if (instruction_set_features_ == nullptr) {
LOG(WARNING) << "Error parsing " << option << " message=" << error_msg;
}
} else if (option.starts_with("--instruction-set-features=")) {
StringPiece str = option.substr(strlen("--instruction-set-features=")).data();
VLOG(compiler) << "JIT instruction set features " << str;
- if (instruction_set_features_.get() == nullptr) {
- instruction_set_features_.reset(InstructionSetFeatures::FromVariant(
- instruction_set, "default", &error_msg));
+ if (instruction_set_features_ == nullptr) {
+ instruction_set_features_ = InstructionSetFeatures::FromVariant(
+ instruction_set, "default", &error_msg);
if (instruction_set_features_ == nullptr) {
LOG(WARNING) << "Error parsing " << option << " message=" << error_msg;
}
}
- instruction_set_features_.reset(
- instruction_set_features_->AddFeaturesFromString(str.as_string(), &error_msg));
+ instruction_set_features_ =
+ instruction_set_features_->AddFeaturesFromString(str.as_string(), &error_msg);
if (instruction_set_features_ == nullptr) {
LOG(WARNING) << "Error parsing " << option << " message=" << error_msg;
}
}
}
if (instruction_set_features_ == nullptr) {
- instruction_set_features_.reset(InstructionSetFeatures::FromCppDefines());
+ instruction_set_features_ = InstructionSetFeatures::FromCppDefines();
}
cumulative_logger_.reset(new CumulativeLogger("jit times"));
compiler_driver_.reset(new CompilerDriver(
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index fd1b135..64ee574 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -92,7 +92,7 @@
const std::vector<std::string>& compiler_options,
/*out*/std::string* error_msg) {
ASSERT_TRUE(error_msg != nullptr);
- insn_features_.reset(InstructionSetFeatures::FromVariant(insn_set, "default", error_msg));
+ insn_features_ = InstructionSetFeatures::FromVariant(insn_set, "default", error_msg);
ASSERT_TRUE(insn_features_ != nullptr) << error_msg;
compiler_options_.reset(new CompilerOptions);
for (const std::string& option : compiler_options) {
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 32287a0..bfade3c 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -488,6 +488,15 @@
isa_features_(isa_features) {
// Always save the LR register to mimic Quick.
AddAllocatedRegister(Location::RegisterLocation(LR));
+ // Give d14 and d15 as scratch registers to VIXL.
+ // They are removed from the register allocator in `SetupBlockedRegisters()`.
+ // TODO(VIXL): We need two scratch D registers for `EmitSwap` when swapping two double stack
+ // slots. If that is sufficiently rare, and we have pressure on FP registers, we could instead
+ // spill in `EmitSwap`. But if we actually are guaranteed to have 32 D registers, we could give
+ // d30 and d31 to VIXL to avoid removing registers from the allocator. If that is the case, we may
+ // also want to investigate giving those 14 other D registers to the allocator.
+ GetVIXLAssembler()->GetScratchVRegisterList()->Combine(d14);
+ GetVIXLAssembler()->GetScratchVRegisterList()->Combine(d15);
}
#define __ reinterpret_cast<ArmVIXLAssembler*>(GetAssembler())->GetVIXLAssembler()->
@@ -509,6 +518,13 @@
// Reserve temp register.
blocked_core_registers_[IP] = true;
+ // Registers s28-s31 (d14-d15) are left to VIXL for scratch registers.
+ // (They are given to the `MacroAssembler` in `CodeGeneratorARMVIXL::CodeGeneratorARMVIXL`.)
+ blocked_fpu_registers_[28] = true;
+ blocked_fpu_registers_[29] = true;
+ blocked_fpu_registers_[30] = true;
+ blocked_fpu_registers_[31] = true;
+
if (GetGraph()->IsDebuggable()) {
// Stubs do not save callee-save floating point registers. If the graph
// is debuggable, we need to deal with these registers differently. For
@@ -2161,6 +2177,92 @@
}
}
+void LocationsBuilderARMVIXL::VisitCompare(HCompare* compare) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
+ switch (compare->InputAt(0)->GetType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong: {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ // Output overlaps because it is written before doing the low comparison.
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+ break;
+ }
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, ArithmeticZeroOrFpuRegister(compare->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister());
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected type for compare operation " << compare->InputAt(0)->GetType();
+ }
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitCompare(HCompare* compare) {
+ LocationSummary* locations = compare->GetLocations();
+ vixl32::Register out = OutputRegister(compare);
+ Location left = locations->InAt(0);
+ Location right = locations->InAt(1);
+
+ vixl32::Label less, greater, done;
+ Primitive::Type type = compare->InputAt(0)->GetType();
+ vixl32::Condition less_cond = vixl32::Condition(kNone);
+ switch (type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimInt: {
+ // Emit move to `out` before the `Cmp`, as `Mov` might affect the status flags.
+ __ Mov(out, 0);
+ __ Cmp(RegisterFrom(left), RegisterFrom(right)); // Signed compare.
+ less_cond = lt;
+ break;
+ }
+ case Primitive::kPrimLong: {
+ __ Cmp(HighRegisterFrom(left), HighRegisterFrom(right)); // Signed compare.
+ __ B(lt, &less);
+ __ B(gt, &greater);
+ // Emit move to `out` before the last `Cmp`, as `Mov` might affect the status flags.
+ __ Mov(out, 0);
+ __ Cmp(LowRegisterFrom(left), LowRegisterFrom(right)); // Unsigned compare.
+ less_cond = lo;
+ break;
+ }
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ __ Mov(out, 0);
+ GenerateVcmp(compare);
+ // To branch on the FP compare result we transfer FPSCR to APSR (encoded as PC in VMRS).
+ __ Vmrs(RegisterOrAPSR_nzcv(kPcCode), FPSCR);
+ less_cond = ARMFPCondition(kCondLT, compare->IsGtBias());
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected compare type " << type;
+ UNREACHABLE();
+ }
+
+ __ B(eq, &done);
+ __ B(less_cond, &less);
+
+ __ Bind(&greater);
+ __ Mov(out, 1);
+ __ B(&done);
+
+ __ Bind(&less);
+ __ Mov(out, -1);
+
+ __ Bind(&done);
+}
+
void LocationsBuilderARMVIXL::VisitPhi(HPhi* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
@@ -2648,6 +2750,16 @@
}
}
+Location LocationsBuilderARMVIXL::ArithmeticZeroOrFpuRegister(HInstruction* input) {
+ DCHECK(Primitive::IsFloatingPointType(input->GetType())) << input->GetType();
+ if ((input->IsFloatConstant() && (input->AsFloatConstant()->IsArithmeticZero())) ||
+ (input->IsDoubleConstant() && (input->AsDoubleConstant()->IsArithmeticZero()))) {
+ return Location::ConstantLocation(input->AsConstant());
+ } else {
+ return Location::RequiresFpuRegister();
+ }
+}
+
void LocationsBuilderARMVIXL::HandleFieldGet(HInstruction* instruction,
const FieldInfo& field_info) {
DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
@@ -2991,7 +3103,17 @@
} else if (source.IsFpuRegister()) {
TODO_VIXL32(FATAL);
} else if (source.IsDoubleStackSlot()) {
- TODO_VIXL32(FATAL);
+ if (destination.IsDoubleStackSlot()) {
+ vixl32::DRegister temp = temps.AcquireD();
+ GetAssembler()->LoadDFromOffset(temp, sp, source.GetStackIndex());
+ GetAssembler()->StoreDToOffset(temp, sp, destination.GetStackIndex());
+ } else if (destination.IsRegisterPair()) {
+ DCHECK(ExpectedPairLayout(destination));
+ GetAssembler()->LoadFromOffset(
+ kLoadWordPair, LowRegisterFrom(destination), sp, source.GetStackIndex());
+ } else {
+ TODO_VIXL32(FATAL);
+ }
} else if (source.IsRegisterPair()) {
if (destination.IsRegisterPair()) {
__ Mov(LowRegisterFrom(destination), LowRegisterFrom(source));
@@ -3070,18 +3192,77 @@
}
}
-void ParallelMoveResolverARMVIXL::Exchange(Register reg ATTRIBUTE_UNUSED,
- int mem ATTRIBUTE_UNUSED) {
- TODO_VIXL32(FATAL);
+void ParallelMoveResolverARMVIXL::Exchange(vixl32::Register reg, int mem) {
+ UseScratchRegisterScope temps(GetAssembler()->GetVIXLAssembler());
+ vixl32::Register temp = temps.Acquire();
+ __ Mov(temp, reg);
+ GetAssembler()->LoadFromOffset(kLoadWord, reg, sp, mem);
+ GetAssembler()->StoreToOffset(kStoreWord, temp, sp, mem);
}
-void ParallelMoveResolverARMVIXL::Exchange(int mem1 ATTRIBUTE_UNUSED,
- int mem2 ATTRIBUTE_UNUSED) {
- TODO_VIXL32(FATAL);
+void ParallelMoveResolverARMVIXL::Exchange(int mem1, int mem2) {
+ // TODO(VIXL32): Double check the performance of this implementation.
+ UseScratchRegisterScope temps(GetAssembler()->GetVIXLAssembler());
+ vixl32::Register temp = temps.Acquire();
+ vixl32::SRegister temp_s = temps.AcquireS();
+
+ __ Ldr(temp, MemOperand(sp, mem1));
+ __ Vldr(temp_s, MemOperand(sp, mem2));
+ __ Str(temp, MemOperand(sp, mem2));
+ __ Vstr(temp_s, MemOperand(sp, mem1));
}
-void ParallelMoveResolverARMVIXL::EmitSwap(size_t index ATTRIBUTE_UNUSED) {
- TODO_VIXL32(FATAL);
+void ParallelMoveResolverARMVIXL::EmitSwap(size_t index) {
+ MoveOperands* move = moves_[index];
+ Location source = move->GetSource();
+ Location destination = move->GetDestination();
+ UseScratchRegisterScope temps(GetAssembler()->GetVIXLAssembler());
+
+ if (source.IsRegister() && destination.IsRegister()) {
+ vixl32::Register temp = temps.Acquire();
+ DCHECK(!RegisterFrom(source).Is(temp));
+ DCHECK(!RegisterFrom(destination).Is(temp));
+ __ Mov(temp, RegisterFrom(destination));
+ __ Mov(RegisterFrom(destination), RegisterFrom(source));
+ __ Mov(RegisterFrom(source), temp);
+ } else if (source.IsRegister() && destination.IsStackSlot()) {
+ Exchange(RegisterFrom(source), destination.GetStackIndex());
+ } else if (source.IsStackSlot() && destination.IsRegister()) {
+ Exchange(RegisterFrom(destination), source.GetStackIndex());
+ } else if (source.IsStackSlot() && destination.IsStackSlot()) {
+ TODO_VIXL32(FATAL);
+ } else if (source.IsFpuRegister() && destination.IsFpuRegister()) {
+ TODO_VIXL32(FATAL);
+ } else if (source.IsRegisterPair() && destination.IsRegisterPair()) {
+ vixl32::DRegister temp = temps.AcquireD();
+ __ Vmov(temp, LowRegisterFrom(source), HighRegisterFrom(source));
+ __ Mov(LowRegisterFrom(source), LowRegisterFrom(destination));
+ __ Mov(HighRegisterFrom(source), HighRegisterFrom(destination));
+ __ Vmov(LowRegisterFrom(destination), HighRegisterFrom(destination), temp);
+ } else if (source.IsRegisterPair() || destination.IsRegisterPair()) {
+ vixl32::Register low_reg = LowRegisterFrom(source.IsRegisterPair() ? source : destination);
+ int mem = source.IsRegisterPair() ? destination.GetStackIndex() : source.GetStackIndex();
+ DCHECK(ExpectedPairLayout(source.IsRegisterPair() ? source : destination));
+ vixl32::DRegister temp = temps.AcquireD();
+ __ Vmov(temp, low_reg, vixl32::Register(low_reg.GetCode() + 1));
+ GetAssembler()->LoadFromOffset(kLoadWordPair, low_reg, sp, mem);
+ GetAssembler()->StoreDToOffset(temp, sp, mem);
+ } else if (source.IsFpuRegisterPair() && destination.IsFpuRegisterPair()) {
+ TODO_VIXL32(FATAL);
+ } else if (source.IsFpuRegisterPair() || destination.IsFpuRegisterPair()) {
+ TODO_VIXL32(FATAL);
+ } else if (source.IsFpuRegister() || destination.IsFpuRegister()) {
+ TODO_VIXL32(FATAL);
+ } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
+ vixl32::DRegister temp1 = temps.AcquireD();
+ vixl32::DRegister temp2 = temps.AcquireD();
+ __ Vldr(temp1, MemOperand(sp, source.GetStackIndex()));
+ __ Vldr(temp2, MemOperand(sp, destination.GetStackIndex()));
+ __ Vstr(temp1, MemOperand(sp, destination.GetStackIndex()));
+ __ Vstr(temp2, MemOperand(sp, source.GetStackIndex()));
+ } else {
+ LOG(FATAL) << "Unimplemented" << source << " <-> " << destination;
+ }
}
void ParallelMoveResolverARMVIXL::SpillScratch(int reg ATTRIBUTE_UNUSED) {
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index c749f86..7815db2 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -110,6 +110,7 @@
M(BelowOrEqual) \
M(ClearException) \
M(ClinitCheck) \
+ M(Compare) \
M(CurrentMethod) \
M(Div) \
M(DivZeroCheck) \
@@ -161,7 +162,6 @@
M(BoundType) \
M(CheckCast) \
M(ClassTableGet) \
- M(Compare) \
M(Deoptimize) \
M(DoubleConstant) \
M(FloatConstant) \
@@ -246,7 +246,7 @@
ArmVIXLAssembler* GetAssembler() const;
private:
- void Exchange(Register reg, int mem);
+ void Exchange(vixl32::Register reg, int mem);
void Exchange(int mem1, int mem2);
CodeGeneratorARMVIXL* const codegen_;
@@ -280,6 +280,8 @@
void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
+ Location ArithmeticZeroOrFpuRegister(HInstruction* input);
+
CodeGeneratorARMVIXL* const codegen_;
InvokeDexCallingConventionVisitorARM parameter_visitor_;
diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc
index bacf994..013e110 100644
--- a/compiler/optimizing/optimizing_cfi_test.cc
+++ b/compiler/optimizing/optimizing_cfi_test.cc
@@ -52,7 +52,7 @@
void SetUpFrame(InstructionSet isa) {
// Setup simple context.
std::string error;
- isa_features_.reset(InstructionSetFeatures::FromVariant(isa, "default", &error));
+ isa_features_ = InstructionSetFeatures::FromVariant(isa, "default", &error);
graph_ = CreateGraph(&allocator_);
// Generate simple frame with some spills.
code_gen_ = CodeGenerator::Create(graph_, isa, *isa_features_, opts_);
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 8bbe685..28d6289 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -637,9 +637,8 @@
void ParseInstructionSetVariant(const StringPiece& option, ParserOptions* parser_options) {
DCHECK(option.starts_with("--instruction-set-variant="));
StringPiece str = option.substr(strlen("--instruction-set-variant=")).data();
- instruction_set_features_.reset(
- InstructionSetFeatures::FromVariant(
- instruction_set_, str.as_string(), &parser_options->error_msg));
+ instruction_set_features_ = InstructionSetFeatures::FromVariant(
+ instruction_set_, str.as_string(), &parser_options->error_msg);
if (instruction_set_features_.get() == nullptr) {
Usage("%s", parser_options->error_msg.c_str());
}
@@ -648,19 +647,18 @@
void ParseInstructionSetFeatures(const StringPiece& option, ParserOptions* parser_options) {
DCHECK(option.starts_with("--instruction-set-features="));
StringPiece str = option.substr(strlen("--instruction-set-features=")).data();
- if (instruction_set_features_.get() == nullptr) {
- instruction_set_features_.reset(
- InstructionSetFeatures::FromVariant(
- instruction_set_, "default", &parser_options->error_msg));
+ if (instruction_set_features_ == nullptr) {
+ instruction_set_features_ = InstructionSetFeatures::FromVariant(
+ instruction_set_, "default", &parser_options->error_msg);
if (instruction_set_features_.get() == nullptr) {
Usage("Problem initializing default instruction set features variant: %s",
parser_options->error_msg.c_str());
}
}
- instruction_set_features_.reset(
+ instruction_set_features_ =
instruction_set_features_->AddFeaturesFromString(str.as_string(),
- &parser_options->error_msg));
- if (instruction_set_features_.get() == nullptr) {
+ &parser_options->error_msg);
+ if (instruction_set_features_ == nullptr) {
Usage("Error parsing '%s': %s", option.data(), parser_options->error_msg.c_str());
}
}
@@ -828,9 +826,8 @@
// If no instruction set feature was given, use the default one for the target
// instruction set.
if (instruction_set_features_.get() == nullptr) {
- instruction_set_features_.reset(
- InstructionSetFeatures::FromVariant(
- instruction_set_, "default", &parser_options->error_msg));
+ instruction_set_features_ = InstructionSetFeatures::FromVariant(
+ instruction_set_, "default", &parser_options->error_msg);
if (instruction_set_features_.get() == nullptr) {
Usage("Problem initializing default instruction set features variant: %s",
parser_options->error_msg.c_str());
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 4e81d50..da0db01 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -115,13 +115,13 @@
bool Symbolize() {
const InstructionSet isa = oat_file_->GetOatHeader().GetInstructionSet();
- const InstructionSetFeatures* features = InstructionSetFeatures::FromBitmap(
+ std::unique_ptr<const InstructionSetFeatures> features = InstructionSetFeatures::FromBitmap(
isa, oat_file_->GetOatHeader().GetInstructionSetFeaturesBitmap());
File* elf_file = OS::CreateEmptyFile(output_name_.c_str());
std::unique_ptr<BufferedOutputStream> output_stream(
MakeUnique<BufferedOutputStream>(MakeUnique<FileOutputStream>(elf_file)));
- builder_.reset(new ElfBuilder<ElfTypes>(isa, features, output_stream.get()));
+ builder_.reset(new ElfBuilder<ElfTypes>(isa, features.get(), output_stream.get()));
builder_->Start();
diff --git a/runtime/arch/arm/instruction_set_features_arm.cc b/runtime/arch/arm/instruction_set_features_arm.cc
index c3a5829..c81a93c 100644
--- a/runtime/arch/arm/instruction_set_features_arm.cc
+++ b/runtime/arch/arm/instruction_set_features_arm.cc
@@ -33,7 +33,7 @@
namespace art {
-const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromVariant(
+ArmFeaturesUniquePtr ArmInstructionSetFeatures::FromVariant(
const std::string& variant, std::string* error_msg) {
// Assume all ARM processors are SMP.
// TODO: set the SMP support based on variant.
@@ -69,7 +69,7 @@
if (FindVariantInArray(unsupported_arm_variants, arraysize(unsupported_arm_variants),
variant)) {
*error_msg = StringPrintf("Attempt to use unsupported ARM variant: %s", variant.c_str());
- return nullptr;
+ return ArmFeaturesUniquePtr();
}
// Warn if the variant is unknown.
// TODO: some of the variants below may have feature support, but that support is currently
@@ -97,17 +97,17 @@
<< ") using conservative defaults";
}
}
- return new ArmInstructionSetFeatures(smp, has_div, has_lpae);
+ return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(smp, has_div, has_lpae));
}
-const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromBitmap(uint32_t bitmap) {
+ArmFeaturesUniquePtr ArmInstructionSetFeatures::FromBitmap(uint32_t bitmap) {
bool smp = (bitmap & kSmpBitfield) != 0;
bool has_div = (bitmap & kDivBitfield) != 0;
bool has_atomic_ldrd_strd = (bitmap & kAtomicLdrdStrdBitfield) != 0;
- return new ArmInstructionSetFeatures(smp, has_div, has_atomic_ldrd_strd);
+ return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(smp, has_div, has_atomic_ldrd_strd));
}
-const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromCppDefines() {
+ArmFeaturesUniquePtr ArmInstructionSetFeatures::FromCppDefines() {
const bool smp = true;
#if defined(__ARM_ARCH_EXT_IDIV__)
const bool has_div = true;
@@ -119,10 +119,10 @@
#else
const bool has_lpae = false;
#endif
- return new ArmInstructionSetFeatures(smp, has_div, has_lpae);
+ return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(smp, has_div, has_lpae));
}
-const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromCpuInfo() {
+ArmFeaturesUniquePtr ArmInstructionSetFeatures::FromCpuInfo() {
// Look in /proc/cpuinfo for features we need. Only use this when we can guarantee that
// the kernel puts the appropriate feature flags in here. Sometimes it doesn't.
bool smp = false;
@@ -157,10 +157,10 @@
} else {
LOG(ERROR) << "Failed to open /proc/cpuinfo";
}
- return new ArmInstructionSetFeatures(smp, has_div, has_lpae);
+ return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(smp, has_div, has_lpae));
}
-const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromHwcap() {
+ArmFeaturesUniquePtr ArmInstructionSetFeatures::FromHwcap() {
bool smp = sysconf(_SC_NPROCESSORS_CONF) > 1;
bool has_div = false;
@@ -180,7 +180,7 @@
}
#endif
- return new ArmInstructionSetFeatures(smp, has_div, has_lpae);
+ return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(smp, has_div, has_lpae));
}
// A signal handler called by a fault for an illegal instruction. We record the fact in r0
@@ -198,7 +198,7 @@
#endif
}
-const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromAssembly() {
+ArmFeaturesUniquePtr ArmInstructionSetFeatures::FromAssembly() {
const bool smp = true;
// See if have a sdiv instruction. Register a signal handler and try to execute an sdiv
@@ -226,7 +226,7 @@
#else
const bool has_lpae = false;
#endif
- return new ArmInstructionSetFeatures(smp, has_div, has_lpae);
+ return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(smp, has_div, has_lpae));
}
bool ArmInstructionSetFeatures::Equals(const InstructionSetFeatures* other) const {
@@ -265,7 +265,8 @@
return result;
}
-const InstructionSetFeatures* ArmInstructionSetFeatures::AddFeaturesFromSplitString(
+std::unique_ptr<const InstructionSetFeatures>
+ArmInstructionSetFeatures::AddFeaturesFromSplitString(
const bool smp, const std::vector<std::string>& features, std::string* error_msg) const {
bool has_atomic_ldrd_strd = has_atomic_ldrd_strd_;
bool has_div = has_div_;
@@ -284,7 +285,8 @@
return nullptr;
}
}
- return new ArmInstructionSetFeatures(smp, has_div, has_atomic_ldrd_strd);
+ return std::unique_ptr<const InstructionSetFeatures>(
+ new ArmInstructionSetFeatures(smp, has_div, has_atomic_ldrd_strd));
}
} // namespace art
diff --git a/runtime/arch/arm/instruction_set_features_arm.h b/runtime/arch/arm/instruction_set_features_arm.h
index 221bf1f..204d1d7 100644
--- a/runtime/arch/arm/instruction_set_features_arm.h
+++ b/runtime/arch/arm/instruction_set_features_arm.h
@@ -21,29 +21,31 @@
namespace art {
+class ArmInstructionSetFeatures;
+using ArmFeaturesUniquePtr = std::unique_ptr<const ArmInstructionSetFeatures>;
+
// Instruction set features relevant to the ARM architecture.
class ArmInstructionSetFeatures FINAL : public InstructionSetFeatures {
public:
// Process a CPU variant string like "krait" or "cortex-a15" and create InstructionSetFeatures.
- static const ArmInstructionSetFeatures* FromVariant(const std::string& variant,
- std::string* error_msg);
+ static ArmFeaturesUniquePtr FromVariant(const std::string& variant, std::string* error_msg);
// Parse a bitmap and create an InstructionSetFeatures.
- static const ArmInstructionSetFeatures* FromBitmap(uint32_t bitmap);
+ static ArmFeaturesUniquePtr FromBitmap(uint32_t bitmap);
// Turn C pre-processor #defines into the equivalent instruction set features.
- static const ArmInstructionSetFeatures* FromCppDefines();
+ static ArmFeaturesUniquePtr FromCppDefines();
// Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
- static const ArmInstructionSetFeatures* FromCpuInfo();
+ static ArmFeaturesUniquePtr FromCpuInfo();
// Process the auxiliary vector AT_HWCAP entry and use kRuntimeISA to produce
// InstructionSetFeatures.
- static const ArmInstructionSetFeatures* FromHwcap();
+ static ArmFeaturesUniquePtr FromHwcap();
// Use assembly tests of the current runtime (ie kRuntimeISA) to determine the
// InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
- static const ArmInstructionSetFeatures* FromAssembly();
+ static ArmFeaturesUniquePtr FromAssembly();
bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
@@ -71,7 +73,7 @@
protected:
// Parse a vector of the form "div", "lpae" adding these to a new ArmInstructionSetFeatures.
- const InstructionSetFeatures*
+ std::unique_ptr<const InstructionSetFeatures>
AddFeaturesFromSplitString(const bool smp, const std::vector<std::string>& features,
std::string* error_msg) const OVERRIDE;
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.cc b/runtime/arch/arm64/instruction_set_features_arm64.cc
index cad13b2..4e7dea3 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64.cc
+++ b/runtime/arch/arm64/instruction_set_features_arm64.cc
@@ -19,12 +19,13 @@
#include <fstream>
#include <sstream>
+#include "base/stl_util.h"
#include "base/stringprintf.h"
#include "utils.h" // For Trim.
namespace art {
-const Arm64InstructionSetFeatures* Arm64InstructionSetFeatures::FromVariant(
+Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromVariant(
const std::string& variant, std::string* error_msg) {
const bool smp = true; // Conservative default.
@@ -52,22 +53,23 @@
// The variants that need a fix for 843419 are the same that need a fix for 835769.
bool needs_a53_843419_fix = needs_a53_835769_fix;
- return new Arm64InstructionSetFeatures(smp, needs_a53_835769_fix, needs_a53_843419_fix);
+ return Arm64FeaturesUniquePtr(
+ new Arm64InstructionSetFeatures(smp, needs_a53_835769_fix, needs_a53_843419_fix));
}
-const Arm64InstructionSetFeatures* Arm64InstructionSetFeatures::FromBitmap(uint32_t bitmap) {
+Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromBitmap(uint32_t bitmap) {
bool smp = (bitmap & kSmpBitfield) != 0;
bool is_a53 = (bitmap & kA53Bitfield) != 0;
- return new Arm64InstructionSetFeatures(smp, is_a53, is_a53);
+ return Arm64FeaturesUniquePtr(new Arm64InstructionSetFeatures(smp, is_a53, is_a53));
}
-const Arm64InstructionSetFeatures* Arm64InstructionSetFeatures::FromCppDefines() {
+Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromCppDefines() {
const bool smp = true;
const bool is_a53 = true; // Pessimistically assume all ARM64s are A53s.
- return new Arm64InstructionSetFeatures(smp, is_a53, is_a53);
+ return Arm64FeaturesUniquePtr(new Arm64InstructionSetFeatures(smp, is_a53, is_a53));
}
-const Arm64InstructionSetFeatures* Arm64InstructionSetFeatures::FromCpuInfo() {
+Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromCpuInfo() {
// Look in /proc/cpuinfo for features we need. Only use this when we can guarantee that
// the kernel puts the appropriate feature flags in here. Sometimes it doesn't.
bool smp = false;
@@ -89,16 +91,16 @@
} else {
LOG(ERROR) << "Failed to open /proc/cpuinfo";
}
- return new Arm64InstructionSetFeatures(smp, is_a53, is_a53);
+ return Arm64FeaturesUniquePtr(new Arm64InstructionSetFeatures(smp, is_a53, is_a53));
}
-const Arm64InstructionSetFeatures* Arm64InstructionSetFeatures::FromHwcap() {
+Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromHwcap() {
bool smp = sysconf(_SC_NPROCESSORS_CONF) > 1;
const bool is_a53 = true; // Pessimistically assume all ARM64s are A53s.
- return new Arm64InstructionSetFeatures(smp, is_a53, is_a53);
+ return Arm64FeaturesUniquePtr(new Arm64InstructionSetFeatures(smp, is_a53, is_a53));
}
-const Arm64InstructionSetFeatures* Arm64InstructionSetFeatures::FromAssembly() {
+Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromAssembly() {
UNIMPLEMENTED(WARNING);
return FromCppDefines();
}
@@ -130,7 +132,8 @@
return result;
}
-const InstructionSetFeatures* Arm64InstructionSetFeatures::AddFeaturesFromSplitString(
+std::unique_ptr<const InstructionSetFeatures>
+Arm64InstructionSetFeatures::AddFeaturesFromSplitString(
const bool smp, const std::vector<std::string>& features, std::string* error_msg) const {
bool is_a53 = fix_cortex_a53_835769_;
for (auto i = features.begin(); i != features.end(); i++) {
@@ -144,7 +147,8 @@
return nullptr;
}
}
- return new Arm64InstructionSetFeatures(smp, is_a53, is_a53);
+ return std::unique_ptr<const InstructionSetFeatures>(
+ new Arm64InstructionSetFeatures(smp, is_a53, is_a53));
}
} // namespace art
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.h b/runtime/arch/arm64/instruction_set_features_arm64.h
index abd7e83..e51aa1c 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64.h
+++ b/runtime/arch/arm64/instruction_set_features_arm64.h
@@ -21,29 +21,31 @@
namespace art {
+class Arm64InstructionSetFeatures;
+using Arm64FeaturesUniquePtr = std::unique_ptr<const Arm64InstructionSetFeatures>;
+
// Instruction set features relevant to the ARM64 architecture.
class Arm64InstructionSetFeatures FINAL : public InstructionSetFeatures {
public:
// Process a CPU variant string like "krait" or "cortex-a15" and create InstructionSetFeatures.
- static const Arm64InstructionSetFeatures* FromVariant(const std::string& variant,
- std::string* error_msg);
+ static Arm64FeaturesUniquePtr FromVariant(const std::string& variant, std::string* error_msg);
// Parse a bitmap and create an InstructionSetFeatures.
- static const Arm64InstructionSetFeatures* FromBitmap(uint32_t bitmap);
+ static Arm64FeaturesUniquePtr FromBitmap(uint32_t bitmap);
// Turn C pre-processor #defines into the equivalent instruction set features.
- static const Arm64InstructionSetFeatures* FromCppDefines();
+ static Arm64FeaturesUniquePtr FromCppDefines();
// Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
- static const Arm64InstructionSetFeatures* FromCpuInfo();
+ static Arm64FeaturesUniquePtr FromCpuInfo();
// Process the auxiliary vector AT_HWCAP entry and use kRuntimeISA to produce
// InstructionSetFeatures.
- static const Arm64InstructionSetFeatures* FromHwcap();
+ static Arm64FeaturesUniquePtr FromHwcap();
// Use assembly tests of the current runtime (ie kRuntimeISA) to determine the
// InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
- static const Arm64InstructionSetFeatures* FromAssembly();
+ static Arm64FeaturesUniquePtr FromAssembly();
bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
@@ -70,7 +72,7 @@
protected:
// Parse a vector of the form "a53" adding these to a new ArmInstructionSetFeatures.
- const InstructionSetFeatures*
+ std::unique_ptr<const InstructionSetFeatures>
AddFeaturesFromSplitString(const bool smp, const std::vector<std::string>& features,
std::string* error_msg) const OVERRIDE;
diff --git a/runtime/arch/instruction_set_features.cc b/runtime/arch/instruction_set_features.cc
index 898f83a..b32391f 100644
--- a/runtime/arch/instruction_set_features.cc
+++ b/runtime/arch/instruction_set_features.cc
@@ -29,29 +29,28 @@
namespace art {
-const InstructionSetFeatures* InstructionSetFeatures::FromVariant(InstructionSet isa,
- const std::string& variant,
- std::string* error_msg) {
- const InstructionSetFeatures* result;
+std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromVariant(
+ InstructionSet isa, const std::string& variant, std::string* error_msg) {
+ std::unique_ptr<const InstructionSetFeatures> result;
switch (isa) {
case kArm:
case kThumb2:
- result = ArmInstructionSetFeatures::FromVariant(variant, error_msg);
+ result.reset(ArmInstructionSetFeatures::FromVariant(variant, error_msg).release());
break;
case kArm64:
- result = Arm64InstructionSetFeatures::FromVariant(variant, error_msg);
+ result.reset(Arm64InstructionSetFeatures::FromVariant(variant, error_msg).release());
break;
case kMips:
- result = MipsInstructionSetFeatures::FromVariant(variant, error_msg);
+ result.reset(MipsInstructionSetFeatures::FromVariant(variant, error_msg).release());
break;
case kMips64:
result = Mips64InstructionSetFeatures::FromVariant(variant, error_msg);
break;
case kX86:
- result = X86InstructionSetFeatures::FromVariant(variant, error_msg);
+ result.reset(X86InstructionSetFeatures::FromVariant(variant, error_msg).release());
break;
case kX86_64:
- result = X86_64InstructionSetFeatures::FromVariant(variant, error_msg);
+ result.reset(X86_64InstructionSetFeatures::FromVariant(variant, error_msg).release());
break;
default:
UNIMPLEMENTED(FATAL) << isa;
@@ -61,28 +60,28 @@
return result;
}
-const InstructionSetFeatures* InstructionSetFeatures::FromBitmap(InstructionSet isa,
- uint32_t bitmap) {
- const InstructionSetFeatures* result;
+std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromBitmap(InstructionSet isa,
+ uint32_t bitmap) {
+ std::unique_ptr<const InstructionSetFeatures> result;
switch (isa) {
case kArm:
case kThumb2:
- result = ArmInstructionSetFeatures::FromBitmap(bitmap);
+ result.reset(ArmInstructionSetFeatures::FromBitmap(bitmap).release());
break;
case kArm64:
- result = Arm64InstructionSetFeatures::FromBitmap(bitmap);
+ result.reset(Arm64InstructionSetFeatures::FromBitmap(bitmap).release());
break;
case kMips:
- result = MipsInstructionSetFeatures::FromBitmap(bitmap);
+ result.reset(MipsInstructionSetFeatures::FromBitmap(bitmap).release());
break;
case kMips64:
result = Mips64InstructionSetFeatures::FromBitmap(bitmap);
break;
case kX86:
- result = X86InstructionSetFeatures::FromBitmap(bitmap);
+ result.reset(X86InstructionSetFeatures::FromBitmap(bitmap).release());
break;
case kX86_64:
- result = X86_64InstructionSetFeatures::FromBitmap(bitmap);
+ result.reset(X86_64InstructionSetFeatures::FromBitmap(bitmap).release());
break;
default:
UNIMPLEMENTED(FATAL) << isa;
@@ -92,27 +91,27 @@
return result;
}
-const InstructionSetFeatures* InstructionSetFeatures::FromCppDefines() {
- const InstructionSetFeatures* result;
+std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromCppDefines() {
+ std::unique_ptr<const InstructionSetFeatures> result;
switch (kRuntimeISA) {
case kArm:
case kThumb2:
- result = ArmInstructionSetFeatures::FromCppDefines();
+ result.reset(ArmInstructionSetFeatures::FromCppDefines().release());
break;
case kArm64:
- result = Arm64InstructionSetFeatures::FromCppDefines();
+ result.reset(Arm64InstructionSetFeatures::FromCppDefines().release());
break;
case kMips:
- result = MipsInstructionSetFeatures::FromCppDefines();
+ result.reset(MipsInstructionSetFeatures::FromCppDefines().release());
break;
case kMips64:
result = Mips64InstructionSetFeatures::FromCppDefines();
break;
case kX86:
- result = X86InstructionSetFeatures::FromCppDefines();
+ result.reset(X86InstructionSetFeatures::FromCppDefines().release());
break;
case kX86_64:
- result = X86_64InstructionSetFeatures::FromCppDefines();
+ result.reset(X86_64InstructionSetFeatures::FromCppDefines().release());
break;
default:
UNIMPLEMENTED(FATAL) << kRuntimeISA;
@@ -122,27 +121,27 @@
}
-const InstructionSetFeatures* InstructionSetFeatures::FromCpuInfo() {
- const InstructionSetFeatures* result;
+std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromCpuInfo() {
+ std::unique_ptr<const InstructionSetFeatures> result;
switch (kRuntimeISA) {
case kArm:
case kThumb2:
- result = ArmInstructionSetFeatures::FromCpuInfo();
+ result.reset(ArmInstructionSetFeatures::FromCpuInfo().release());
break;
case kArm64:
- result = Arm64InstructionSetFeatures::FromCpuInfo();
+ result.reset(Arm64InstructionSetFeatures::FromCpuInfo().release());
break;
case kMips:
- result = MipsInstructionSetFeatures::FromCpuInfo();
+ result.reset(MipsInstructionSetFeatures::FromCpuInfo().release());
break;
case kMips64:
result = Mips64InstructionSetFeatures::FromCpuInfo();
break;
case kX86:
- result = X86InstructionSetFeatures::FromCpuInfo();
+ result.reset(X86InstructionSetFeatures::FromCpuInfo().release());
break;
case kX86_64:
- result = X86_64InstructionSetFeatures::FromCpuInfo();
+ result.reset(X86_64InstructionSetFeatures::FromCpuInfo().release());
break;
default:
UNIMPLEMENTED(FATAL) << kRuntimeISA;
@@ -151,27 +150,27 @@
return result;
}
-const InstructionSetFeatures* InstructionSetFeatures::FromHwcap() {
- const InstructionSetFeatures* result;
+std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromHwcap() {
+ std::unique_ptr<const InstructionSetFeatures> result;
switch (kRuntimeISA) {
case kArm:
case kThumb2:
- result = ArmInstructionSetFeatures::FromHwcap();
+ result.reset(ArmInstructionSetFeatures::FromHwcap().release());
break;
case kArm64:
- result = Arm64InstructionSetFeatures::FromHwcap();
+ result.reset(Arm64InstructionSetFeatures::FromHwcap().release());
break;
case kMips:
- result = MipsInstructionSetFeatures::FromHwcap();
+ result.reset(MipsInstructionSetFeatures::FromHwcap().release());
break;
case kMips64:
result = Mips64InstructionSetFeatures::FromHwcap();
break;
case kX86:
- result = X86InstructionSetFeatures::FromHwcap();
+ result.reset(X86InstructionSetFeatures::FromHwcap().release());
break;
case kX86_64:
- result = X86_64InstructionSetFeatures::FromHwcap();
+ result.reset(X86_64InstructionSetFeatures::FromHwcap().release());
break;
default:
UNIMPLEMENTED(FATAL) << kRuntimeISA;
@@ -180,27 +179,27 @@
return result;
}
-const InstructionSetFeatures* InstructionSetFeatures::FromAssembly() {
- const InstructionSetFeatures* result;
+std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromAssembly() {
+ std::unique_ptr<const InstructionSetFeatures> result;
switch (kRuntimeISA) {
case kArm:
case kThumb2:
- result = ArmInstructionSetFeatures::FromAssembly();
+ result.reset(ArmInstructionSetFeatures::FromAssembly().release());
break;
case kArm64:
- result = Arm64InstructionSetFeatures::FromAssembly();
+ result.reset(Arm64InstructionSetFeatures::FromAssembly().release());
break;
case kMips:
- result = MipsInstructionSetFeatures::FromAssembly();
+ result.reset(MipsInstructionSetFeatures::FromAssembly().release());
break;
case kMips64:
result = Mips64InstructionSetFeatures::FromAssembly();
break;
case kX86:
- result = X86InstructionSetFeatures::FromAssembly();
+ result.reset(X86InstructionSetFeatures::FromAssembly().release());
break;
case kX86_64:
- result = X86_64InstructionSetFeatures::FromAssembly();
+ result.reset(X86_64InstructionSetFeatures::FromAssembly().release());
break;
default:
UNIMPLEMENTED(FATAL) << kRuntimeISA;
@@ -209,11 +208,11 @@
return result;
}
-const InstructionSetFeatures* InstructionSetFeatures::AddFeaturesFromString(
+std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::AddFeaturesFromString(
const std::string& feature_list, std::string* error_msg) const {
if (feature_list.empty()) {
*error_msg = "No instruction set features specified";
- return nullptr;
+ return std::unique_ptr<const InstructionSetFeatures>();
}
std::vector<std::string> features;
Split(feature_list, ',', &features);
@@ -223,7 +222,7 @@
for (auto it = features.begin(); it != features.end();) {
if (use_default) {
*error_msg = "Unexpected instruction set features after 'default'";
- return nullptr;
+ return std::unique_ptr<const InstructionSetFeatures>();
}
std::string feature = Trim(*it);
bool erase = false;
@@ -233,7 +232,7 @@
erase = true;
} else {
*error_msg = "Unexpected instruction set features before 'default'";
- return nullptr;
+ return std::unique_ptr<const InstructionSetFeatures>();
}
} else if (feature == "smp") {
smp = true;
diff --git a/runtime/arch/instruction_set_features.h b/runtime/arch/instruction_set_features.h
index d10ae21..d84bc02 100644
--- a/runtime/arch/instruction_set_features.h
+++ b/runtime/arch/instruction_set_features.h
@@ -17,6 +17,7 @@
#ifndef ART_RUNTIME_ARCH_INSTRUCTION_SET_FEATURES_H_
#define ART_RUNTIME_ARCH_INSTRUCTION_SET_FEATURES_H_
+#include <memory>
#include <ostream>
#include <vector>
@@ -36,31 +37,32 @@
class InstructionSetFeatures {
public:
// Process a CPU variant string for the given ISA and create an InstructionSetFeatures.
- static const InstructionSetFeatures* FromVariant(InstructionSet isa,
- const std::string& variant,
- std::string* error_msg);
+ static std::unique_ptr<const InstructionSetFeatures> FromVariant(InstructionSet isa,
+ const std::string& variant,
+ std::string* error_msg);
// Parse a bitmap for the given isa and create an InstructionSetFeatures.
- static const InstructionSetFeatures* FromBitmap(InstructionSet isa, uint32_t bitmap);
+ static std::unique_ptr<const InstructionSetFeatures> FromBitmap(InstructionSet isa,
+ uint32_t bitmap);
// Turn C pre-processor #defines into the equivalent instruction set features for kRuntimeISA.
- static const InstructionSetFeatures* FromCppDefines();
+ static std::unique_ptr<const InstructionSetFeatures> FromCppDefines();
// Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
- static const InstructionSetFeatures* FromCpuInfo();
+ static std::unique_ptr<const InstructionSetFeatures> FromCpuInfo();
// Process the auxiliary vector AT_HWCAP entry and use kRuntimeISA to produce
// InstructionSetFeatures.
- static const InstructionSetFeatures* FromHwcap();
+ static std::unique_ptr<const InstructionSetFeatures> FromHwcap();
// Use assembly tests of the current runtime (ie kRuntimeISA) to determine the
// InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
- static const InstructionSetFeatures* FromAssembly();
+ static std::unique_ptr<const InstructionSetFeatures> FromAssembly();
// Parse a string of the form "div,-atomic_ldrd_strd" adding and removing these features to
// create a new InstructionSetFeatures.
- const InstructionSetFeatures* AddFeaturesFromString(const std::string& feature_list,
- std::string* error_msg) const WARN_UNUSED;
+ std::unique_ptr<const InstructionSetFeatures> AddFeaturesFromString(
+ const std::string& feature_list, std::string* error_msg) const WARN_UNUSED;
// Are these features the same as the other given features?
virtual bool Equals(const InstructionSetFeatures* other) const = 0;
@@ -107,7 +109,7 @@
const std::string& variant);
// Add architecture specific features in sub-classes.
- virtual const InstructionSetFeatures*
+ virtual std::unique_ptr<const InstructionSetFeatures>
AddFeaturesFromSplitString(bool smp, const std::vector<std::string>& features,
std::string* error_msg) const = 0;
diff --git a/runtime/arch/mips/instruction_set_features_mips.cc b/runtime/arch/mips/instruction_set_features_mips.cc
index b3a9866..a95b6f6 100644
--- a/runtime/arch/mips/instruction_set_features_mips.cc
+++ b/runtime/arch/mips/instruction_set_features_mips.cc
@@ -19,6 +19,7 @@
#include <fstream>
#include <sstream>
+#include "base/stl_util.h"
#include "base/stringprintf.h"
#include "utils.h" // For Trim.
@@ -63,7 +64,7 @@
}
}
-const MipsInstructionSetFeatures* MipsInstructionSetFeatures::FromVariant(
+MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromVariant(
const std::string& variant, std::string* error_msg ATTRIBUTE_UNUSED) {
bool smp = true; // Conservative default.
@@ -97,18 +98,19 @@
LOG(WARNING) << "Unexpected CPU variant for Mips32 using defaults: " << variant;
}
- return new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2, r6);
+ return MipsFeaturesUniquePtr(new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2, r6));
}
-const MipsInstructionSetFeatures* MipsInstructionSetFeatures::FromBitmap(uint32_t bitmap) {
+MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromBitmap(
+ uint32_t bitmap) {
bool smp = (bitmap & kSmpBitfield) != 0;
bool fpu_32bit = (bitmap & kFpu32Bitfield) != 0;
bool mips_isa_gte2 = (bitmap & kIsaRevGte2Bitfield) != 0;
bool r6 = (bitmap & kR6) != 0;
- return new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2, r6);
+ return MipsFeaturesUniquePtr(new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2, r6));
}
-const MipsInstructionSetFeatures* MipsInstructionSetFeatures::FromCppDefines() {
+MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromCppDefines() {
// Assume conservative defaults.
const bool smp = true;
@@ -117,10 +119,10 @@
bool r6;
GetFlagsFromCppDefined(&mips_isa_gte2, &r6, &fpu_32bit);
- return new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2, r6);
+ return MipsFeaturesUniquePtr(new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2, r6));
}
-const MipsInstructionSetFeatures* MipsInstructionSetFeatures::FromCpuInfo() {
+MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromCpuInfo() {
// Look in /proc/cpuinfo for features we need. Only use this when we can guarantee that
// the kernel puts the appropriate feature flags in here. Sometimes it doesn't.
// Assume conservative defaults.
@@ -147,15 +149,15 @@
} else {
LOG(ERROR) << "Failed to open /proc/cpuinfo";
}
- return new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2, r6);
+ return MipsFeaturesUniquePtr(new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2, r6));
}
-const MipsInstructionSetFeatures* MipsInstructionSetFeatures::FromHwcap() {
+MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromHwcap() {
UNIMPLEMENTED(WARNING);
return FromCppDefines();
}
-const MipsInstructionSetFeatures* MipsInstructionSetFeatures::FromAssembly() {
+MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromAssembly() {
UNIMPLEMENTED(WARNING);
return FromCppDefines();
}
@@ -201,7 +203,8 @@
return result;
}
-const InstructionSetFeatures* MipsInstructionSetFeatures::AddFeaturesFromSplitString(
+std::unique_ptr<const InstructionSetFeatures>
+MipsInstructionSetFeatures::AddFeaturesFromSplitString(
const bool smp, const std::vector<std::string>& features, std::string* error_msg) const {
bool fpu_32bit = fpu_32bit_;
bool mips_isa_gte2 = mips_isa_gte2_;
@@ -225,7 +228,8 @@
return nullptr;
}
}
- return new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2, r6);
+ return std::unique_ptr<const InstructionSetFeatures>(
+ new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2, r6));
}
} // namespace art
diff --git a/runtime/arch/mips/instruction_set_features_mips.h b/runtime/arch/mips/instruction_set_features_mips.h
index 2d54988..c2a28dc 100644
--- a/runtime/arch/mips/instruction_set_features_mips.h
+++ b/runtime/arch/mips/instruction_set_features_mips.h
@@ -23,29 +23,31 @@
namespace art {
+class MipsInstructionSetFeatures;
+using MipsFeaturesUniquePtr = std::unique_ptr<const MipsInstructionSetFeatures>;
+
// Instruction set features relevant to the MIPS architecture.
class MipsInstructionSetFeatures FINAL : public InstructionSetFeatures {
public:
// Process a CPU variant string like "r4000" and create InstructionSetFeatures.
- static const MipsInstructionSetFeatures* FromVariant(const std::string& variant,
- std::string* error_msg);
+ static MipsFeaturesUniquePtr FromVariant(const std::string& variant, std::string* error_msg);
// Parse a bitmap and create an InstructionSetFeatures.
- static const MipsInstructionSetFeatures* FromBitmap(uint32_t bitmap);
+ static MipsFeaturesUniquePtr FromBitmap(uint32_t bitmap);
// Turn C pre-processor #defines into the equivalent instruction set features.
- static const MipsInstructionSetFeatures* FromCppDefines();
+ static MipsFeaturesUniquePtr FromCppDefines();
// Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
- static const MipsInstructionSetFeatures* FromCpuInfo();
+ static MipsFeaturesUniquePtr FromCpuInfo();
// Process the auxiliary vector AT_HWCAP entry and use kRuntimeISA to produce
// InstructionSetFeatures.
- static const MipsInstructionSetFeatures* FromHwcap();
+ static MipsFeaturesUniquePtr FromHwcap();
// Use assembly tests of the current runtime (ie kRuntimeISA) to determine the
// InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
- static const MipsInstructionSetFeatures* FromAssembly();
+ static MipsFeaturesUniquePtr FromAssembly();
bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
@@ -77,7 +79,7 @@
protected:
// Parse a vector of the form "fpu32", "mips2" adding these to a new MipsInstructionSetFeatures.
- virtual const InstructionSetFeatures*
+ std::unique_ptr<const InstructionSetFeatures>
AddFeaturesFromSplitString(const bool smp, const std::vector<std::string>& features,
std::string* error_msg) const OVERRIDE;
diff --git a/runtime/arch/mips64/instruction_set_features_mips64.cc b/runtime/arch/mips64/instruction_set_features_mips64.cc
index 5c0c914..490a8d2 100644
--- a/runtime/arch/mips64/instruction_set_features_mips64.cc
+++ b/runtime/arch/mips64/instruction_set_features_mips64.cc
@@ -24,27 +24,27 @@
namespace art {
-const Mips64InstructionSetFeatures* Mips64InstructionSetFeatures::FromVariant(
+Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromVariant(
const std::string& variant, std::string* error_msg ATTRIBUTE_UNUSED) {
if (variant != "default" && variant != "mips64r6") {
LOG(WARNING) << "Unexpected CPU variant for Mips64 using defaults: " << variant;
}
bool smp = true; // Conservative default.
- return new Mips64InstructionSetFeatures(smp);
+ return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures(smp));
}
-const Mips64InstructionSetFeatures* Mips64InstructionSetFeatures::FromBitmap(uint32_t bitmap) {
+Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromBitmap(uint32_t bitmap) {
bool smp = (bitmap & kSmpBitfield) != 0;
- return new Mips64InstructionSetFeatures(smp);
+ return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures(smp));
}
-const Mips64InstructionSetFeatures* Mips64InstructionSetFeatures::FromCppDefines() {
+Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromCppDefines() {
const bool smp = true;
- return new Mips64InstructionSetFeatures(smp);
+ return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures(smp));
}
-const Mips64InstructionSetFeatures* Mips64InstructionSetFeatures::FromCpuInfo() {
+Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromCpuInfo() {
// Look in /proc/cpuinfo for features we need. Only use this when we can guarantee that
// the kernel puts the appropriate feature flags in here. Sometimes it doesn't.
bool smp = false;
@@ -65,15 +65,15 @@
} else {
LOG(ERROR) << "Failed to open /proc/cpuinfo";
}
- return new Mips64InstructionSetFeatures(smp);
+ return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures(smp));
}
-const Mips64InstructionSetFeatures* Mips64InstructionSetFeatures::FromHwcap() {
+Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromHwcap() {
UNIMPLEMENTED(WARNING);
return FromCppDefines();
}
-const Mips64InstructionSetFeatures* Mips64InstructionSetFeatures::FromAssembly() {
+Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromAssembly() {
UNIMPLEMENTED(WARNING);
return FromCppDefines();
}
@@ -99,7 +99,8 @@
return result;
}
-const InstructionSetFeatures* Mips64InstructionSetFeatures::AddFeaturesFromSplitString(
+std::unique_ptr<const InstructionSetFeatures>
+Mips64InstructionSetFeatures::AddFeaturesFromSplitString(
const bool smp, const std::vector<std::string>& features, std::string* error_msg) const {
auto i = features.begin();
if (i != features.end()) {
@@ -108,7 +109,7 @@
*error_msg = StringPrintf("Unknown instruction set feature: '%s'", feature.c_str());
return nullptr;
}
- return new Mips64InstructionSetFeatures(smp);
+ return std::unique_ptr<const InstructionSetFeatures>(new Mips64InstructionSetFeatures(smp));
}
} // namespace art
diff --git a/runtime/arch/mips64/instruction_set_features_mips64.h b/runtime/arch/mips64/instruction_set_features_mips64.h
index d5d6012..2e66235 100644
--- a/runtime/arch/mips64/instruction_set_features_mips64.h
+++ b/runtime/arch/mips64/instruction_set_features_mips64.h
@@ -21,29 +21,32 @@
namespace art {
+class Mips64InstructionSetFeatures;
+using Mips64FeaturesUniquePtr = std::unique_ptr<const Mips64InstructionSetFeatures>;
+
// Instruction set features relevant to the MIPS64 architecture.
class Mips64InstructionSetFeatures FINAL : public InstructionSetFeatures {
public:
// Process a CPU variant string like "r4000" and create InstructionSetFeatures.
- static const Mips64InstructionSetFeatures* FromVariant(const std::string& variant,
- std::string* error_msg);
+ static Mips64FeaturesUniquePtr FromVariant(const std::string& variant,
+ std::string* error_msg);
// Parse a bitmap and create an InstructionSetFeatures.
- static const Mips64InstructionSetFeatures* FromBitmap(uint32_t bitmap);
+ static Mips64FeaturesUniquePtr FromBitmap(uint32_t bitmap);
// Turn C pre-processor #defines into the equivalent instruction set features.
- static const Mips64InstructionSetFeatures* FromCppDefines();
+ static Mips64FeaturesUniquePtr FromCppDefines();
// Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
- static const Mips64InstructionSetFeatures* FromCpuInfo();
+ static Mips64FeaturesUniquePtr FromCpuInfo();
// Process the auxiliary vector AT_HWCAP entry and use kRuntimeISA to produce
// InstructionSetFeatures.
- static const Mips64InstructionSetFeatures* FromHwcap();
+ static Mips64FeaturesUniquePtr FromHwcap();
// Use assembly tests of the current runtime (ie kRuntimeISA) to determine the
// InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
- static const Mips64InstructionSetFeatures* FromAssembly();
+ static Mips64FeaturesUniquePtr FromAssembly();
bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
@@ -59,8 +62,9 @@
protected:
// Parse a vector of the form "fpu32", "mips2" adding these to a new Mips64InstructionSetFeatures.
- virtual const InstructionSetFeatures*
- AddFeaturesFromSplitString(const bool smp, const std::vector<std::string>& features,
+ std::unique_ptr<const InstructionSetFeatures>
+ AddFeaturesFromSplitString(const bool smp,
+ const std::vector<std::string>& features,
std::string* error_msg) const OVERRIDE;
private:
diff --git a/runtime/arch/x86/instruction_set_features_x86.cc b/runtime/arch/x86/instruction_set_features_x86.cc
index 0093e82..90b55a9 100644
--- a/runtime/arch/x86/instruction_set_features_x86.cc
+++ b/runtime/arch/x86/instruction_set_features_x86.cc
@@ -49,7 +49,34 @@
"silvermont",
};
-const X86InstructionSetFeatures* X86InstructionSetFeatures::FromVariant(
+X86FeaturesUniquePtr X86InstructionSetFeatures::Create(bool x86_64,
+ bool smp,
+ bool has_SSSE3,
+ bool has_SSE4_1,
+ bool has_SSE4_2,
+ bool has_AVX,
+ bool has_AVX2,
+ bool has_POPCNT) {
+ if (x86_64) {
+ return X86FeaturesUniquePtr(new X86_64InstructionSetFeatures(smp,
+ has_SSSE3,
+ has_SSE4_1,
+ has_SSE4_2,
+ has_AVX,
+ has_AVX2,
+ has_POPCNT));
+ } else {
+ return X86FeaturesUniquePtr(new X86InstructionSetFeatures(smp,
+ has_SSSE3,
+ has_SSE4_1,
+ has_SSE4_2,
+ has_AVX,
+ has_AVX2,
+ has_POPCNT));
+ }
+}
+
+X86FeaturesUniquePtr X86InstructionSetFeatures::FromVariant(
const std::string& variant, std::string* error_msg ATTRIBUTE_UNUSED,
bool x86_64) {
bool smp = true; // Conservative default.
@@ -75,17 +102,10 @@
LOG(WARNING) << "Unexpected CPU variant for X86 using defaults: " << variant;
}
- if (x86_64) {
- return new X86_64InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
- has_AVX2, has_POPCNT);
- } else {
- return new X86InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
- has_AVX2, has_POPCNT);
- }
+ return Create(x86_64, smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX, has_AVX2, has_POPCNT);
}
-const X86InstructionSetFeatures* X86InstructionSetFeatures::FromBitmap(uint32_t bitmap,
- bool x86_64) {
+X86FeaturesUniquePtr X86InstructionSetFeatures::FromBitmap(uint32_t bitmap, bool x86_64) {
bool smp = (bitmap & kSmpBitfield) != 0;
bool has_SSSE3 = (bitmap & kSsse3Bitfield) != 0;
bool has_SSE4_1 = (bitmap & kSse4_1Bitfield) != 0;
@@ -93,16 +113,10 @@
bool has_AVX = (bitmap & kAvxBitfield) != 0;
bool has_AVX2 = (bitmap & kAvxBitfield) != 0;
bool has_POPCNT = (bitmap & kPopCntBitfield) != 0;
- if (x86_64) {
- return new X86_64InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2,
- has_AVX, has_AVX2, has_POPCNT);
- } else {
- return new X86InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2,
- has_AVX, has_AVX2, has_POPCNT);
- }
+ return Create(x86_64, smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX, has_AVX2, has_POPCNT);
}
-const X86InstructionSetFeatures* X86InstructionSetFeatures::FromCppDefines(bool x86_64) {
+X86FeaturesUniquePtr X86InstructionSetFeatures::FromCppDefines(bool x86_64) {
const bool smp = true;
#ifndef __SSSE3__
@@ -141,16 +155,10 @@
const bool has_POPCNT = true;
#endif
- if (x86_64) {
- return new X86_64InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
- has_AVX2, has_POPCNT);
- } else {
- return new X86InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
- has_AVX2, has_POPCNT);
- }
+ return Create(x86_64, smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX, has_AVX2, has_POPCNT);
}
-const X86InstructionSetFeatures* X86InstructionSetFeatures::FromCpuInfo(bool x86_64) {
+X86FeaturesUniquePtr X86InstructionSetFeatures::FromCpuInfo(bool x86_64) {
// Look in /proc/cpuinfo for features we need. Only use this when we can guarantee that
// the kernel puts the appropriate feature flags in here. Sometimes it doesn't.
bool smp = false;
@@ -198,21 +206,15 @@
} else {
LOG(ERROR) << "Failed to open /proc/cpuinfo";
}
- if (x86_64) {
- return new X86_64InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
- has_AVX2, has_POPCNT);
- } else {
- return new X86InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
- has_AVX2, has_POPCNT);
- }
+ return Create(x86_64, smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX, has_AVX2, has_POPCNT);
}
-const X86InstructionSetFeatures* X86InstructionSetFeatures::FromHwcap(bool x86_64) {
+X86FeaturesUniquePtr X86InstructionSetFeatures::FromHwcap(bool x86_64) {
UNIMPLEMENTED(WARNING);
return FromCppDefines(x86_64);
}
-const X86InstructionSetFeatures* X86InstructionSetFeatures::FromAssembly(bool x86_64) {
+X86FeaturesUniquePtr X86InstructionSetFeatures::FromAssembly(bool x86_64) {
UNIMPLEMENTED(WARNING);
return FromCppDefines(x86_64);
}
@@ -281,7 +283,7 @@
return result;
}
-const InstructionSetFeatures* X86InstructionSetFeatures::AddFeaturesFromSplitString(
+std::unique_ptr<const InstructionSetFeatures> X86InstructionSetFeatures::AddFeaturesFromSplitString(
const bool smp, const std::vector<std::string>& features, bool x86_64,
std::string* error_msg) const {
bool has_SSSE3 = has_SSSE3_;
@@ -321,13 +323,7 @@
return nullptr;
}
}
- if (x86_64) {
- return new X86_64InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
- has_AVX2, has_POPCNT);
- } else {
- return new X86InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
- has_AVX2, has_POPCNT);
- }
+ return Create(x86_64, smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX, has_AVX2, has_POPCNT);
}
} // namespace art
diff --git a/runtime/arch/x86/instruction_set_features_x86.h b/runtime/arch/x86/instruction_set_features_x86.h
index 2aa8ae6..672892e 100644
--- a/runtime/arch/x86/instruction_set_features_x86.h
+++ b/runtime/arch/x86/instruction_set_features_x86.h
@@ -21,30 +21,34 @@
namespace art {
+class X86InstructionSetFeatures;
+using X86FeaturesUniquePtr = std::unique_ptr<const X86InstructionSetFeatures>;
+
// Instruction set features relevant to the X86 architecture.
class X86InstructionSetFeatures : public InstructionSetFeatures {
public:
// Process a CPU variant string like "atom" or "nehalem" and create InstructionSetFeatures.
- static const X86InstructionSetFeatures* FromVariant(const std::string& variant,
- std::string* error_msg,
- bool x86_64 = false);
+ static X86FeaturesUniquePtr FromVariant(const std::string& variant,
+ std::string* error_msg,
+ bool x86_64 = false);
// Parse a bitmap and create an InstructionSetFeatures.
- static const X86InstructionSetFeatures* FromBitmap(uint32_t bitmap, bool x86_64 = false);
+ static X86FeaturesUniquePtr FromBitmap(uint32_t bitmap,
+ bool x86_64 = false);
// Turn C pre-processor #defines into the equivalent instruction set features.
- static const X86InstructionSetFeatures* FromCppDefines(bool x86_64 = false);
+ static X86FeaturesUniquePtr FromCppDefines(bool x86_64 = false);
// Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
- static const X86InstructionSetFeatures* FromCpuInfo(bool x86_64 = false);
+ static X86FeaturesUniquePtr FromCpuInfo(bool x86_64 = false);
// Process the auxiliary vector AT_HWCAP entry and use kRuntimeISA to produce
// InstructionSetFeatures.
- static const X86InstructionSetFeatures* FromHwcap(bool x86_64 = false);
+ static X86FeaturesUniquePtr FromHwcap(bool x86_64 = false);
// Use assembly tests of the current runtime (ie kRuntimeISA) to determine the
// InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
- static const X86InstructionSetFeatures* FromAssembly(bool x86_64 = false);
+ static X86FeaturesUniquePtr FromAssembly(bool x86_64 = false);
bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
@@ -64,13 +68,13 @@
protected:
// Parse a string of the form "ssse3" adding these to a new InstructionSetFeatures.
- virtual const InstructionSetFeatures*
+ virtual std::unique_ptr<const InstructionSetFeatures>
AddFeaturesFromSplitString(const bool smp, const std::vector<std::string>& features,
std::string* error_msg) const OVERRIDE {
return AddFeaturesFromSplitString(smp, features, false, error_msg);
}
- const InstructionSetFeatures*
+ std::unique_ptr<const InstructionSetFeatures>
AddFeaturesFromSplitString(const bool smp, const std::vector<std::string>& features,
bool x86_64, std::string* error_msg) const;
@@ -85,6 +89,15 @@
has_POPCNT_(has_POPCNT) {
}
+ static X86FeaturesUniquePtr Create(bool x86_64,
+ bool smp,
+ bool has_SSSE3,
+ bool has_SSE4_1,
+ bool has_SSE4_2,
+ bool has_AVX,
+ bool has_AVX2,
+ bool has_POPCNT);
+
private:
// Bitmap positions for encoding features as a bitmap.
enum {
diff --git a/runtime/arch/x86_64/instruction_set_features_x86_64.h b/runtime/arch/x86_64/instruction_set_features_x86_64.h
index 0840f89..bc0f708 100644
--- a/runtime/arch/x86_64/instruction_set_features_x86_64.h
+++ b/runtime/arch/x86_64/instruction_set_features_x86_64.h
@@ -21,41 +21,42 @@
namespace art {
+class X86_64InstructionSetFeatures;
+using X86_64FeaturesUniquePtr = std::unique_ptr<const X86_64InstructionSetFeatures>;
+
// Instruction set features relevant to the X86_64 architecture.
class X86_64InstructionSetFeatures FINAL : public X86InstructionSetFeatures {
public:
// Process a CPU variant string like "atom" or "nehalem" and create InstructionSetFeatures.
- static const X86_64InstructionSetFeatures* FromVariant(const std::string& variant,
- std::string* error_msg) {
- return X86InstructionSetFeatures::FromVariant(variant, error_msg, true)
- ->AsX86_64InstructionSetFeatures();
+ static X86_64FeaturesUniquePtr FromVariant(const std::string& variant, std::string* error_msg) {
+ return Convert(X86InstructionSetFeatures::FromVariant(variant, error_msg, true));
}
// Parse a bitmap and create an InstructionSetFeatures.
- static const X86_64InstructionSetFeatures* FromBitmap(uint32_t bitmap) {
- return X86InstructionSetFeatures::FromBitmap(bitmap, true)->AsX86_64InstructionSetFeatures();
+ static X86_64FeaturesUniquePtr FromBitmap(uint32_t bitmap) {
+ return Convert(X86InstructionSetFeatures::FromBitmap(bitmap, true));
}
// Turn C pre-processor #defines into the equivalent instruction set features.
- static const X86_64InstructionSetFeatures* FromCppDefines() {
- return X86InstructionSetFeatures::FromCppDefines(true)->AsX86_64InstructionSetFeatures();
+ static X86_64FeaturesUniquePtr FromCppDefines() {
+ return Convert(X86InstructionSetFeatures::FromCppDefines(true));
}
// Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
- static const X86_64InstructionSetFeatures* FromCpuInfo() {
- return X86InstructionSetFeatures::FromCpuInfo(true)->AsX86_64InstructionSetFeatures();
+ static X86_64FeaturesUniquePtr FromCpuInfo() {
+ return Convert(X86InstructionSetFeatures::FromCpuInfo(true));
}
// Process the auxiliary vector AT_HWCAP entry and use kRuntimeISA to produce
// InstructionSetFeatures.
- static const X86_64InstructionSetFeatures* FromHwcap() {
- return X86InstructionSetFeatures::FromHwcap(true)->AsX86_64InstructionSetFeatures();
+ static X86_64FeaturesUniquePtr FromHwcap() {
+ return Convert(X86InstructionSetFeatures::FromHwcap(true));
}
// Use assembly tests of the current runtime (ie kRuntimeISA) to determine the
// InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
- static const X86_64InstructionSetFeatures* FromAssembly() {
- return X86InstructionSetFeatures::FromAssembly(true)->AsX86_64InstructionSetFeatures();
+ static X86_64FeaturesUniquePtr FromAssembly() {
+ return Convert(X86InstructionSetFeatures::FromAssembly(true));
}
InstructionSet GetInstructionSet() const OVERRIDE {
@@ -66,7 +67,7 @@
protected:
// Parse a string of the form "ssse3" adding these to a new InstructionSetFeatures.
- const InstructionSetFeatures*
+ std::unique_ptr<const InstructionSetFeatures>
AddFeaturesFromSplitString(const bool smp, const std::vector<std::string>& features,
std::string* error_msg) const OVERRIDE {
return X86InstructionSetFeatures::AddFeaturesFromSplitString(smp, features, true, error_msg);
@@ -79,6 +80,10 @@
has_AVX2, has_POPCNT) {
}
+ static X86_64FeaturesUniquePtr Convert(X86FeaturesUniquePtr&& in) {
+ return X86_64FeaturesUniquePtr(in.release()->AsX86_64InstructionSetFeatures());
+ }
+
friend class X86InstructionSetFeatures;
DISALLOW_COPY_AND_ASSIGN(X86_64InstructionSetFeatures);
diff --git a/test/Android.arm_vixl.mk b/test/Android.arm_vixl.mk
index 0bbcb64..5e1b953 100644
--- a/test/Android.arm_vixl.mk
+++ b/test/Android.arm_vixl.mk
@@ -52,7 +52,6 @@
037-inherit \
041-narrowing \
042-new-instance \
- 043-privates \
044-proxy \
045-reflect-array \
046-reflect \
@@ -62,7 +61,6 @@
050-sync-test \
051-thread \
052-verifier-fun \
- 053-wait-some \
054-uncaught \
058-enum-order \
059-finalizer-throw \
@@ -137,9 +135,6 @@
201-built-in-except-detail-messages \
302-float-conversion \
304-method-tracing \
- 403-optimizing-long \
- 404-optimizing-allocator \
- 405-optimizing-long-allocator \
406-fields \
407-arrays \
410-floats \
@@ -153,7 +148,6 @@
417-optimizing-arith-div \
419-long-parameter \
421-exceptions \
- 421-large-frame \
422-instanceof \
422-type-conversion \
423-invoke-interface \
@@ -180,7 +174,6 @@
442-checker-constant-folding \
444-checker-nce \
445-checker-licm \
- 446-checker-inliner2 \
447-checker-inliner3 \
448-multiple-returns \
449-checker-bce \
@@ -197,33 +190,24 @@
459-dead-phi \
460-multiple-returns3 \
461-get-reference-vreg \
- 462-checker-inlining-dex-files \
463-checker-boolean-simplifier \
- 464-checker-inline-sharpen-calls \
466-get-live-vreg \
467-regalloc-pair \
468-checker-bool-simplif-regression \
469-condition-materialization \
- 470-huge-method \
471-deopt-environment \
472-type-propagation \
- 473-checker-inliner-constants \
474-checker-boolean-input \
474-fp-sub-neg \
475-regression-inliner-ids \
- 476-checker-ctor-memory-barrier \
477-checker-bound-type \
477-long-2-float-convers-precision \
478-checker-clinit-check-pruning \
- 478-checker-inliner-nested-loop \
- 480-checker-dead-blocks \
- 482-checker-loop-back-edge-use \
483-dce-block \
484-checker-register-hints \
485-checker-dce-loop-update \
485-checker-dce-switch \
486-checker-must-do-null-check \
- 487-checker-inline-calls \
488-checker-inline-recursive-calls \
490-checker-inline \
491-current-method \
@@ -273,7 +257,6 @@
535-regression-const-val \
536-checker-intrinsic-optimization \
536-checker-needs-access-check \
- 537-checker-arraycopy \
537-checker-inline-and-unverified \
537-checker-jump-over-jump \
538-checker-embed-constants \
@@ -285,10 +268,8 @@
543-env-long-ref \
545-tracing-and-jit \
546-regression-simplify-catch \
- 548-checker-inlining-and-dce \
550-checker-multiply-accumulate \
550-checker-regression-wide-store \
- 551-checker-clinit \
551-checker-shifter-operand \
551-implicit-null-checks \
551-invoke-super \
@@ -311,17 +292,13 @@
562-no-intermediate \
563-checker-fakestring \
564-checker-bitcount \
- 564-checker-inline-loop \
564-checker-irreducible-loop \
564-checker-negbitwise \
565-checker-condition-liveness \
565-checker-doublenegbitwise \
565-checker-irreducible-loop \
565-checker-rotate \
- 566-checker-codegen-select \
- 566-checker-signum \
566-polymorphic-inlining \
- 567-checker-compare \
568-checker-onebit \
570-checker-osr \
570-checker-select \
@@ -346,7 +323,6 @@
590-checker-arr-set-null-regression \
591-new-instance-string \
592-checker-regression-bool-input \
- 593-checker-boolean-2-integral-conv \
593-checker-long-2-float-regression \
593-checker-shift-and-simplifier \
594-checker-array-alias \
diff --git a/test/VerifierDeps/MyClassWithNoSuper.smali b/test/VerifierDeps/MyClassWithNoSuper.smali
new file mode 100644
index 0000000..d8509bc
--- /dev/null
+++ b/test/VerifierDeps/MyClassWithNoSuper.smali
@@ -0,0 +1,16 @@
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LMyClassWithNoSuper;
+.super LNoSuper;
diff --git a/test/VerifierDeps/MyClassWithNoSuperButFailures.smali b/test/VerifierDeps/MyClassWithNoSuperButFailures.smali
new file mode 100644
index 0000000..1dbe9d1
--- /dev/null
+++ b/test/VerifierDeps/MyClassWithNoSuperButFailures.smali
@@ -0,0 +1,21 @@
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LMyClassWithNoSuperButFailures;
+.super LNoSuper;
+
+.method public final foo()I
+ .registers 1
+ return-void
+.end method
diff --git a/test/VerifierDeps/MyVerificationFailure.smali b/test/VerifierDeps/MyVerificationFailure.smali
new file mode 100644
index 0000000..187b1ad
--- /dev/null
+++ b/test/VerifierDeps/MyVerificationFailure.smali
@@ -0,0 +1,21 @@
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LMyVerificationFailure;
+.super Ljava/lang/Object;
+
+.method public final foo()I
+ .registers 1
+ return-void
+.end method