Merge "ART-Optimizing: Fix the type of HDivZeroCheck"
diff --git a/compiler/dex/gvn_dead_code_elimination.cc b/compiler/dex/gvn_dead_code_elimination.cc
index 044989e..d29b865 100644
--- a/compiler/dex/gvn_dead_code_elimination.cc
+++ b/compiler/dex/gvn_dead_code_elimination.cc
@@ -74,7 +74,7 @@
GvnDeadCodeElimination::VRegChains::VRegChains(uint32_t num_vregs, ScopedArenaAllocator* alloc)
: num_vregs_(num_vregs),
vreg_data_(alloc->AllocArray<VRegValue>(num_vregs, kArenaAllocMisc)),
- vreg_high_words_(num_vregs, false, Allocator::GetNoopAllocator(),
+ vreg_high_words_(false, Allocator::GetNoopAllocator(),
BitVector::BitsToWords(num_vregs),
alloc->AllocArray<uint32_t>(BitVector::BitsToWords(num_vregs))),
mir_data_(alloc->Adapter()) {
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 17b19dd..d2a90ec 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -54,6 +54,10 @@
static const size_t kDefaultInlineDepthLimit = 3;
static const size_t kDefaultInlineMaxCodeUnits = 18;
+ // Default inlining settings when the space filter is used.
+ static constexpr size_t kSpaceFilterInlineDepthLimit = 3;
+ static constexpr size_t kSpaceFilterInlineMaxCodeUnits = 10;
+
CompilerOptions();
~CompilerOptions();
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index b85a129..4ce3129 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -1372,6 +1372,11 @@
<< "Missing relocation for AbstractMethod.artMethod " << PrettyMethod(src_method);
dest->SetArtMethod(
reinterpret_cast<ArtMethod*>(image_begin_ + it->second.offset));
+ } else if (!klass->IsArrayClass() && klass->IsSubClass(down_cast<mirror::Class*>(
+ Thread::Current()->DecodeJObject(WellKnownClasses::java_lang_ClassLoader)))) {
+ // If src is a ClassLoader, set the class table to null so that it gets recreated by the
+ // ClassLoader.
+ down_cast<mirror::ClassLoader*>(copy)->SetClassTable(nullptr);
}
FixupVisitor visitor(this, copy);
orig->VisitReferences<true /*visit class*/>(visitor, visitor);
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index eb63b49..540da1c 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -98,6 +98,8 @@
return saved_fpu_stack_offsets_[reg];
}
+ virtual bool IsFatal() const { return false; }
+
virtual const char* GetDescription() const = 0;
protected:
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index e832871..6c0292c 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -69,6 +69,8 @@
QUICK_ENTRY_POINT(pThrowNullPointer), instruction_, instruction_->GetDexPc(), this);
}
+ bool IsFatal() const OVERRIDE { return true; }
+
const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathARM"; }
private:
@@ -87,6 +89,8 @@
QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc(), this);
}
+ bool IsFatal() const OVERRIDE { return true; }
+
const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathARM"; }
private:
@@ -161,6 +165,8 @@
QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc(), this);
}
+ bool IsFatal() const OVERRIDE { return true; }
+
const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathARM"; }
private:
@@ -947,6 +953,14 @@
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
+ // Ensure that the call kind indication given to the register allocator is
+ // coherent with the runtime call generated.
+ if (slow_path == nullptr) {
+ DCHECK(instruction->GetLocations()->WillCall());
+ } else {
+ DCHECK(instruction->GetLocations()->OnlyCallsOnSlowPath() || slow_path->IsFatal());
+ }
+
__ LoadFromOffset(kLoadWord, LR, TR, entry_point_offset);
__ blx(LR);
RecordPcInfo(instruction, dex_pc, slow_path);
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 8a9669a..b44c5ba 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -212,6 +212,8 @@
CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
}
+ bool IsFatal() const OVERRIDE { return true; }
+
const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathARM64"; }
private:
@@ -234,6 +236,8 @@
CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
}
+ bool IsFatal() const OVERRIDE { return true; }
+
const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathARM64"; }
private:
@@ -344,6 +348,8 @@
CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
}
+ bool IsFatal() const OVERRIDE { return true; }
+
const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathARM64"; }
private:
@@ -1097,6 +1103,14 @@
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
+ // Ensure that the call kind indication given to the register allocator is
+ // coherent with the runtime call generated.
+ if (slow_path == nullptr) {
+ DCHECK(instruction->GetLocations()->WillCall());
+ } else {
+ DCHECK(instruction->GetLocations()->OnlyCallsOnSlowPath() || slow_path->IsFatal());
+ }
+
BlockPoolsScope block_pools(GetVIXLAssembler());
__ Ldr(lr, MemOperand(tr, entry_point_offset));
__ Blr(lr);
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 523b236..b6ebeb4 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -138,6 +138,8 @@
CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
}
+ bool IsFatal() const OVERRIDE { return true; }
+
const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathMIPS64"; }
private:
@@ -162,6 +164,8 @@
CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
}
+ bool IsFatal() const OVERRIDE { return true; }
+
const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathMIPS64"; }
private:
@@ -278,6 +282,8 @@
CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
}
+ bool IsFatal() const OVERRIDE { return true; }
+
const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathMIPS64"; }
private:
@@ -971,6 +977,14 @@
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
+ // Ensure that the call kind indication given to the register allocator is
+ // coherent with the runtime call generated.
+ if (slow_path == nullptr) {
+ DCHECK(instruction->GetLocations()->WillCall());
+ } else {
+ DCHECK(instruction->GetLocations()->OnlyCallsOnSlowPath() || slow_path->IsFatal());
+ }
+
// TODO: anything related to T9/GP/GOT/PIC/.so's?
__ LoadFromOffset(kLoadDoubleword, T9, TR, entry_point_offset);
__ Jalr(T9);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index e89ea85..4efdbb9 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -45,17 +45,23 @@
static constexpr int kFakeReturnRegister = Register(8);
#define __ down_cast<X86Assembler*>(codegen->GetAssembler())->
+#define QUICK_ENTRY_POINT(x) Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, x))
class NullCheckSlowPathX86 : public SlowPathCodeX86 {
public:
explicit NullCheckSlowPathX86(HNullCheck* instruction) : instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pThrowNullPointer)));
- RecordPcInfo(codegen, instruction_, instruction_->GetDexPc());
+ x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowNullPointer),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
}
+ bool IsFatal() const OVERRIDE { return true; }
+
const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathX86"; }
private:
@@ -68,11 +74,16 @@
explicit DivZeroCheckSlowPathX86(HDivZeroCheck* instruction) : instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pThrowDivZero)));
- RecordPcInfo(codegen, instruction_, instruction_->GetDexPc());
+ x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowDivZero),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
}
+ bool IsFatal() const OVERRIDE { return true; }
+
const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathX86"; }
private:
@@ -124,10 +135,14 @@
length_location_,
Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
Primitive::kPrimInt);
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pThrowArrayBounds)));
- RecordPcInfo(codegen, instruction_, instruction_->GetDexPc());
+ x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowArrayBounds),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
}
+ bool IsFatal() const OVERRIDE { return true; }
+
const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathX86"; }
private:
@@ -147,8 +162,10 @@
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, instruction_->GetLocations());
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pTestSuspend)));
- RecordPcInfo(codegen, instruction_, instruction_->GetDexPc());
+ x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pTestSuspend),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
RestoreLiveRegisters(codegen, instruction_->GetLocations());
if (successor_ == nullptr) {
__ jmp(GetReturnLabel());
@@ -190,8 +207,10 @@
InvokeRuntimeCallingConvention calling_convention;
__ movl(calling_convention.GetRegisterAt(0), Immediate(instruction_->GetStringIndex()));
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pResolveString)));
- RecordPcInfo(codegen, instruction_, instruction_->GetDexPc());
+ x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pResolveString),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX));
RestoreLiveRegisters(codegen, locations);
@@ -224,10 +243,9 @@
InvokeRuntimeCallingConvention calling_convention;
__ movl(calling_convention.GetRegisterAt(0), Immediate(cls_->GetTypeIndex()));
- __ fs()->call(Address::Absolute(do_clinit_
- ? QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInitializeStaticStorage)
- : QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInitializeType)));
- RecordPcInfo(codegen, at_, dex_pc_);
+ x86_codegen->InvokeRuntime(do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
+ : QUICK_ENTRY_POINT(pInitializeType),
+ at_, dex_pc_, this);
// Move the class to the desired location.
Location out = locations->Out();
@@ -291,11 +309,16 @@
Primitive::kPrimNot);
if (instruction_->IsInstanceOf()) {
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize,
- pInstanceofNonTrivial)));
+ x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
} else {
DCHECK(instruction_->IsCheckCast());
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pCheckCast)));
+ x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
}
RecordPcInfo(codegen, instruction_, dex_pc_);
@@ -324,9 +347,13 @@
: instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, instruction_->GetLocations());
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pDeoptimize)));
+ x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
// No need to restore live registers.
DCHECK(instruction_->IsDeoptimize());
HDeoptimize* deoptimize = instruction_->AsDeoptimize();
@@ -398,6 +425,27 @@
return GetFloatingPointSpillSlotSize();
}
+void CodeGeneratorX86::InvokeRuntime(Address entry_point,
+ HInstruction* instruction,
+ uint32_t dex_pc,
+ SlowPathCode* slow_path) {
+ // Ensure that the call kind indication given to the register allocator is
+ // coherent with the runtime call generated.
+ if (slow_path == nullptr) {
+ DCHECK(instruction->GetLocations()->WillCall());
+ } else {
+ DCHECK(instruction->GetLocations()->OnlyCallsOnSlowPath() || slow_path->IsFatal());
+ }
+
+ __ fs()->call(entry_point);
+ RecordPcInfo(instruction, dex_pc, slow_path);
+ DCHECK(instruction->IsSuspendCheck()
+ || instruction->IsBoundsCheck()
+ || instruction->IsNullCheck()
+ || instruction->IsDivZeroCheck()
+ || !IsLeafMethod());
+}
+
CodeGeneratorX86::CodeGeneratorX86(HGraph* graph,
const X86InstructionSetFeatures& isa_features,
const CompilerOptions& compiler_options)
@@ -2015,14 +2063,18 @@
case Primitive::kPrimFloat:
// Processing a Dex `float-to-long' instruction.
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pF2l)));
- codegen_->RecordPcInfo(conversion, conversion->GetDexPc());
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pF2l),
+ conversion,
+ conversion->GetDexPc(),
+ nullptr);
break;
case Primitive::kPrimDouble:
// Processing a Dex `double-to-long' instruction.
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pD2l)));
- codegen_->RecordPcInfo(conversion, conversion->GetDexPc());
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pD2l),
+ conversion,
+ conversion->GetDexPc(),
+ nullptr);
break;
default:
@@ -2779,9 +2831,15 @@
DCHECK_EQ(EDX, out.AsRegisterPairHigh<Register>());
if (is_div) {
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pLdiv)));
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLdiv),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr);
} else {
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pLmod)));
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLmod),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr);
}
uint32_t dex_pc = is_div
? instruction->AsDiv()->GetDexPc()
@@ -3239,9 +3297,11 @@
__ movl(calling_convention.GetRegisterAt(0), Immediate(instruction->GetTypeIndex()));
// Note: if heap poisoning is enabled, the entry point takes cares
// of poisoning the reference.
- __ fs()->call(Address::Absolute(GetThreadOffset<kX86WordSize>(instruction->GetEntrypoint())));
-
- codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+ codegen_->InvokeRuntime(
+ Address::Absolute(GetThreadOffset<kX86WordSize>(instruction->GetEntrypoint())),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr);
DCHECK(!codegen_->IsLeafMethod());
}
@@ -3261,9 +3321,11 @@
// Note: if heap poisoning is enabled, the entry point takes cares
// of poisoning the reference.
- __ fs()->call(Address::Absolute(GetThreadOffset<kX86WordSize>(instruction->GetEntrypoint())));
-
- codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+ codegen_->InvokeRuntime(
+ Address::Absolute(GetThreadOffset<kX86WordSize>(instruction->GetEntrypoint())),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr);
DCHECK(!codegen_->IsLeafMethod());
}
@@ -4166,8 +4228,10 @@
DCHECK(!codegen_->IsLeafMethod());
// Note: if heap poisoning is enabled, pAputObject takes cares
// of poisoning the reference.
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAputObject)));
- codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr);
}
break;
}
@@ -4729,8 +4793,10 @@
}
void InstructionCodeGeneratorX86::VisitThrow(HThrow* instruction) {
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pDeliverException)));
- codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pDeliverException),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr);
}
void LocationsBuilderX86::VisitInstanceOf(HInstanceOf* instruction) {
@@ -4841,10 +4907,11 @@
}
void InstructionCodeGeneratorX86::VisitMonitorOperation(HMonitorOperation* instruction) {
- __ fs()->call(Address::Absolute(instruction->IsEnter()
- ? QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pLockObject)
- : QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pUnlockObject)));
- codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+ codegen_->InvokeRuntime(instruction->IsEnter() ? QUICK_ENTRY_POINT(pLockObject)
+ : QUICK_ENTRY_POINT(pUnlockObject),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr);
}
void LocationsBuilderX86::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction); }
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 65d6e0a..2e3d4d4 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -232,6 +232,12 @@
size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ // Generate code to invoke a runtime entry point.
+ void InvokeRuntime(Address entry_point,
+ HInstruction* instruction,
+ uint32_t dex_pc,
+ SlowPathCode* slow_path);
+
size_t GetWordSize() const OVERRIDE {
return kX86WordSize;
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 2c1392b..6991414 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -47,18 +47,23 @@
static constexpr int kC2ConditionMask = 0x400;
#define __ down_cast<X86_64Assembler*>(codegen->GetAssembler())->
+#define QUICK_ENTRY_POINT(x) Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, x), true)
class NullCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
public:
explicit NullCheckSlowPathX86_64(HNullCheck* instruction) : instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
- __ gs()->call(
- Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pThrowNullPointer), true));
- RecordPcInfo(codegen, instruction_, instruction_->GetDexPc());
+ x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowNullPointer),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
}
+ bool IsFatal() const OVERRIDE { return true; }
+
const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathX86_64"; }
private:
@@ -71,12 +76,16 @@
explicit DivZeroCheckSlowPathX86_64(HDivZeroCheck* instruction) : instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
- __ gs()->call(
- Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pThrowDivZero), true));
- RecordPcInfo(codegen, instruction_, instruction_->GetDexPc());
+ x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowDivZero),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
}
+ bool IsFatal() const OVERRIDE { return true; }
+
const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathX86_64"; }
private:
@@ -127,8 +136,10 @@
CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, instruction_->GetLocations());
- __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pTestSuspend), true));
- RecordPcInfo(codegen, instruction_, instruction_->GetDexPc());
+ x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pTestSuspend),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
RestoreLiveRegisters(codegen, instruction_->GetLocations());
if (successor_ == nullptr) {
__ jmp(GetReturnLabel());
@@ -166,6 +177,7 @@
length_location_(length_location) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
@@ -177,11 +189,12 @@
length_location_,
Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
Primitive::kPrimInt);
- __ gs()->call(Address::Absolute(
- QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pThrowArrayBounds), true));
- RecordPcInfo(codegen, instruction_, instruction_->GetDexPc());
+ x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowArrayBounds),
+ instruction_, instruction_->GetDexPc(), this);
}
+ bool IsFatal() const OVERRIDE { return true; }
+
const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathX86_64"; }
private:
@@ -211,10 +224,9 @@
InvokeRuntimeCallingConvention calling_convention;
__ movl(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(cls_->GetTypeIndex()));
- __ gs()->call(Address::Absolute((do_clinit_
- ? QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInitializeStaticStorage)
- : QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInitializeType)), true));
- RecordPcInfo(codegen, at_, dex_pc_);
+ x64_codegen->InvokeRuntime(do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
+ : QUICK_ENTRY_POINT(pInitializeType),
+ at_, dex_pc_, this);
Location out = locations->Out();
// Move the class to the desired location.
@@ -261,9 +273,10 @@
InvokeRuntimeCallingConvention calling_convention;
__ movl(CpuRegister(calling_convention.GetRegisterAt(0)),
Immediate(instruction_->GetStringIndex()));
- __ gs()->call(Address::Absolute(
- QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pResolveString), true));
- RecordPcInfo(codegen, instruction_, instruction_->GetDexPc());
+ x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pResolveString),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
x64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX));
RestoreLiveRegisters(codegen, locations);
__ jmp(GetExitLabel());
@@ -309,14 +322,17 @@
Primitive::kPrimNot);
if (instruction_->IsInstanceOf()) {
- __ gs()->call(
- Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInstanceofNonTrivial), true));
+ x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
+ instruction_,
+ dex_pc_,
+ this);
} else {
DCHECK(instruction_->IsCheckCast());
- __ gs()->call(
- Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pCheckCast), true));
+ x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
+ instruction_,
+ dex_pc_,
+ this);
}
- RecordPcInfo(codegen, instruction_, dex_pc_);
if (instruction_->IsInstanceOf()) {
x64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX));
@@ -343,14 +359,15 @@
: instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, instruction_->GetLocations());
- __ gs()->call(
- Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pDeoptimize), true));
DCHECK(instruction_->IsDeoptimize());
HDeoptimize* deoptimize = instruction_->AsDeoptimize();
- uint32_t dex_pc = deoptimize->GetDexPc();
- codegen->RecordPcInfo(instruction_, dex_pc, this);
+ x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize),
+ deoptimize,
+ deoptimize->GetDexPc(),
+ this);
}
const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathX86_64"; }
@@ -463,6 +480,27 @@
return kX86_64WordSize;
}
+void CodeGeneratorX86_64::InvokeRuntime(Address entry_point,
+ HInstruction* instruction,
+ uint32_t dex_pc,
+ SlowPathCode* slow_path) {
+ // Ensure that the call kind indication given to the register allocator is
+ // coherent with the runtime call generated.
+ if (slow_path == nullptr) {
+ DCHECK(instruction->GetLocations()->WillCall());
+ } else {
+ DCHECK(instruction->GetLocations()->OnlyCallsOnSlowPath() || slow_path->IsFatal());
+ }
+
+ __ gs()->call(entry_point);
+ RecordPcInfo(instruction, dex_pc, slow_path);
+ DCHECK(instruction->IsSuspendCheck()
+ || instruction->IsBoundsCheck()
+ || instruction->IsNullCheck()
+ || instruction->IsDivZeroCheck()
+ || !IsLeafMethod());
+}
+
static constexpr int kNumberOfCpuRegisterPairs = 0;
// Use a fake return address register to mimic Quick.
static constexpr Register kFakeReturnRegister = Register(kLastCpuRegister + 1);
@@ -3295,11 +3333,14 @@
instruction->GetTypeIndex());
// Note: if heap poisoning is enabled, the entry point takes cares
// of poisoning the reference.
- __ gs()->call(
- Address::Absolute(GetThreadOffset<kX86_64WordSize>(instruction->GetEntrypoint()), true));
+
+ codegen_->InvokeRuntime(
+ Address::Absolute(GetThreadOffset<kX86_64WordSize>(instruction->GetEntrypoint()), true),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr);
DCHECK(!codegen_->IsLeafMethod());
- codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
}
void LocationsBuilderX86_64::VisitNewArray(HNewArray* instruction) {
@@ -3319,11 +3360,13 @@
// Note: if heap poisoning is enabled, the entry point takes cares
// of poisoning the reference.
- __ gs()->call(
- Address::Absolute(GetThreadOffset<kX86_64WordSize>(instruction->GetEntrypoint()), true));
+ codegen_->InvokeRuntime(
+ Address::Absolute(GetThreadOffset<kX86_64WordSize>(instruction->GetEntrypoint()), true),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr);
DCHECK(!codegen_->IsLeafMethod());
- codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
}
void LocationsBuilderX86_64::VisitParameterValue(HParameterValue* instruction) {
@@ -4010,10 +4053,11 @@
DCHECK_EQ(value_type, Primitive::kPrimNot);
// Note: if heap poisoning is enabled, pAputObject takes cares
// of poisoning the reference.
- __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pAputObject),
- true));
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr);
DCHECK(!codegen_->IsLeafMethod());
- codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
}
break;
}
@@ -4560,9 +4604,10 @@
}
void InstructionCodeGeneratorX86_64::VisitThrow(HThrow* instruction) {
- __ gs()->call(
- Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pDeliverException), true));
- codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pDeliverException),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr);
}
void LocationsBuilderX86_64::VisitInstanceOf(HInstanceOf* instruction) {
@@ -4672,11 +4717,11 @@
}
void InstructionCodeGeneratorX86_64::VisitMonitorOperation(HMonitorOperation* instruction) {
- __ gs()->call(Address::Absolute(instruction->IsEnter()
- ? QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pLockObject)
- : QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pUnlockObject),
- true));
- codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+ codegen_->InvokeRuntime(instruction->IsEnter() ? QUICK_ENTRY_POINT(pLockObject)
+ : QUICK_ENTRY_POINT(pUnlockObject),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr);
}
void LocationsBuilderX86_64::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction); }
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 4b90381..3b3915f 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -232,6 +232,12 @@
size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ // Generate code to invoke a runtime entry point.
+ void InvokeRuntime(Address entry_point,
+ HInstruction* instruction,
+ uint32_t dex_pc,
+ SlowPathCode* slow_path);
+
size_t GetWordSize() const OVERRIDE {
return kX86_64WordSize;
}
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index d391145..931a1c3 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -438,9 +438,14 @@
// ADD dst, src, 0
// with
// src
- instruction->ReplaceWith(input_other);
- instruction->GetBlock()->RemoveInstruction(instruction);
- return;
+ // Note that we cannot optimize `x + 0.0` to `x` for floating-point. When
+ // `x` is `-0.0`, the former expression yields `0.0`, while the later
+ // yields `-0.0`.
+ if (Primitive::IsIntegralType(instruction->GetType())) {
+ instruction->ReplaceWith(input_other);
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ return;
+ }
}
HInstruction* left = instruction->GetLeft();
@@ -800,21 +805,24 @@
HConstant* input_cst = instruction->GetConstantRight();
HInstruction* input_other = instruction->GetLeastConstantLeft();
+ Primitive::Type type = instruction->GetType();
+ if (Primitive::IsFloatingPointType(type)) {
+ return;
+ }
+
if ((input_cst != nullptr) && input_cst->IsZero()) {
// Replace code looking like
// SUB dst, src, 0
// with
// src
+ // Note that we cannot optimize `x - 0.0` to `x` for floating-point. When
+ // `x` is `-0.0`, the former expression yields `0.0`, while the later
+ // yields `-0.0`.
instruction->ReplaceWith(input_other);
instruction->GetBlock()->RemoveInstruction(instruction);
return;
}
- Primitive::Type type = instruction->GetType();
- if (!Primitive::IsIntegralType(type)) {
- return;
- }
-
HBasicBlock* block = instruction->GetBlock();
ArenaAllocator* allocator = GetGraph()->GetArena();
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 72ddabe..60f5ab2 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -248,7 +248,7 @@
bool core_register = (instruction->GetType() != Primitive::kPrimDouble)
&& (instruction->GetType() != Primitive::kPrimFloat);
- if (locations->CanCall()) {
+ if (locations->NeedsSafepoint()) {
if (codegen_->IsLeafMethod()) {
// TODO: We do this here because we do not want the suspend check to artificially
// create live registers. We should find another place, but this is currently the
@@ -782,7 +782,10 @@
} else {
DCHECK(!current->IsHighInterval());
int hint = current->FindFirstRegisterHint(free_until, liveness_);
- if (hint != kNoRegister) {
+ if ((hint != kNoRegister)
+ // For simplicity, if the hint we are getting for a pair cannot be used,
+ // we are just going to allocate a new pair.
+ && !(current->IsLowInterval() && IsBlocked(GetHighForLowRegister(hint)))) {
DCHECK(!IsBlocked(hint));
reg = hint;
} else if (current->IsLowInterval()) {
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 75d6137..976c002 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -282,12 +282,15 @@
UsageError("");
UsageError(" --inline-depth-limit=<depth-limit>: the depth limit of inlining for fine tuning");
UsageError(" the compiler. A zero value will disable inlining. Honored only by Optimizing.");
+ UsageError(" Has priority over the --compiler-filter option. Intended for ");
+ UsageError(" development/experimental use.");
UsageError(" Example: --inline-depth-limit=%d", CompilerOptions::kDefaultInlineDepthLimit);
UsageError(" Default: %d", CompilerOptions::kDefaultInlineDepthLimit);
UsageError("");
UsageError(" --inline-max-code-units=<code-units-count>: the maximum code units that a method");
UsageError(" can have to be considered for inlining. A zero value will disable inlining.");
- UsageError(" Honored only by Optimizing.");
+ UsageError(" Honored only by Optimizing. Has priority over the --compiler-filter option.");
+ UsageError(" Intended for development/experimental use.");
UsageError(" Example: --inline-max-code-units=%d",
CompilerOptions::kDefaultInlineMaxCodeUnits);
UsageError(" Default: %d", CompilerOptions::kDefaultInlineMaxCodeUnits);
@@ -562,8 +565,10 @@
int small_method_threshold = CompilerOptions::kDefaultSmallMethodThreshold;
int tiny_method_threshold = CompilerOptions::kDefaultTinyMethodThreshold;
int num_dex_methods_threshold = CompilerOptions::kDefaultNumDexMethodsThreshold;
- int inline_depth_limit = CompilerOptions::kDefaultInlineDepthLimit;
- int inline_max_code_units = CompilerOptions::kDefaultInlineMaxCodeUnits;
+ static constexpr int kUnsetInlineDepthLimit = -1;
+ int inline_depth_limit = kUnsetInlineDepthLimit;
+ static constexpr int kUnsetInlineMaxCodeUnits = -1;
+ int inline_max_code_units = kUnsetInlineMaxCodeUnits;
// Profile file to use
double top_k_profile_threshold = CompilerOptions::kDefaultTopKProfileThreshold;
@@ -994,6 +999,22 @@
Usage("Unknown --compiler-filter value %s", compiler_filter_string);
}
+ // It they are not set, use default values for inlining settings.
+ // TODO: We should rethink the compiler filter. We mostly save
+ // time here, which is orthogonal to space.
+ if (inline_depth_limit == kUnsetInlineDepthLimit) {
+ inline_depth_limit = (compiler_filter == CompilerOptions::kSpace)
+ // Implementation of the space filter: limit inlining depth.
+ ? CompilerOptions::kSpaceFilterInlineDepthLimit
+ : CompilerOptions::kDefaultInlineDepthLimit;
+ }
+ if (inline_max_code_units == kUnsetInlineMaxCodeUnits) {
+ inline_max_code_units = (compiler_filter == CompilerOptions::kSpace)
+ // Implementation of the space filter: limit inlining max code units.
+ ? CompilerOptions::kSpaceFilterInlineMaxCodeUnits
+ : CompilerOptions::kDefaultInlineMaxCodeUnits;
+ }
+
// Checks are all explicit until we know the architecture.
bool implicit_null_checks = false;
bool implicit_so_checks = false;
diff --git a/disassembler/disassembler_arm64.cc b/disassembler/disassembler_arm64.cc
index 348b2a5..5f88714 100644
--- a/disassembler/disassembler_arm64.cc
+++ b/disassembler/disassembler_arm64.cc
@@ -94,7 +94,7 @@
int64_t offset = instr->ImmLSUnsigned() << instr->SizeLS();
std::ostringstream tmp_stream;
Thread::DumpThreadOffset<8>(tmp_stream, static_cast<uint32_t>(offset));
- AppendToOutput(" (%s)", tmp_stream.str().c_str());
+ AppendToOutput(" ; %s", tmp_stream.str().c_str());
}
}
diff --git a/runtime/base/bit_vector.cc b/runtime/base/bit_vector.cc
index 39ce0d2..cfd3d24 100644
--- a/runtime/base/bit_vector.cc
+++ b/runtime/base/bit_vector.cc
@@ -24,11 +24,7 @@
namespace art {
-// TODO: replace excessive argument defaulting when we are at gcc 4.7
-// or later on host with delegating constructor support. Specifically,
-// starts_bits and storage_size/storage are mutually exclusive.
-BitVector::BitVector(uint32_t start_bits,
- bool expandable,
+BitVector::BitVector(bool expandable,
Allocator* allocator,
uint32_t storage_size,
uint32_t* storage)
@@ -36,12 +32,31 @@
storage_size_(storage_size),
allocator_(allocator),
expandable_(expandable) {
+ DCHECK(storage_ != nullptr);
+
static_assert(sizeof(*storage_) == kWordBytes, "word bytes");
static_assert(sizeof(*storage_) * 8u == kWordBits, "word bits");
- if (storage_ == nullptr) {
- storage_size_ = BitsToWords(start_bits);
- storage_ = static_cast<uint32_t*>(allocator_->Alloc(storage_size_ * kWordBytes));
- }
+}
+
+BitVector::BitVector(uint32_t start_bits,
+ bool expandable,
+ Allocator* allocator)
+ : BitVector(expandable,
+ allocator,
+ BitsToWords(start_bits),
+ static_cast<uint32_t*>(allocator->Alloc(BitsToWords(start_bits) * kWordBytes))) {
+}
+
+
+BitVector::BitVector(const BitVector& src,
+ bool expandable,
+ Allocator* allocator)
+ : BitVector(expandable,
+ allocator,
+ src.storage_size_,
+ static_cast<uint32_t*>(allocator->Alloc(src.storage_size_ * kWordBytes))) {
+ // Direct memcpy would be faster, but this should be fine too and is cleaner.
+ Copy(&src);
}
BitVector::~BitVector() {
@@ -357,4 +372,8 @@
}
}
+Allocator* BitVector::GetAllocator() const {
+ return allocator_;
+}
+
} // namespace art
diff --git a/runtime/base/bit_vector.h b/runtime/base/bit_vector.h
index afa8dc1..9b55e70 100644
--- a/runtime/base/bit_vector.h
+++ b/runtime/base/bit_vector.h
@@ -113,9 +113,16 @@
BitVector(uint32_t start_bits,
bool expandable,
+ Allocator* allocator);
+
+ BitVector(bool expandable,
Allocator* allocator,
- uint32_t storage_size = 0,
- uint32_t* storage = nullptr);
+ uint32_t storage_size,
+ uint32_t* storage);
+
+ BitVector(const BitVector& src,
+ bool expandable,
+ Allocator* allocator);
virtual ~BitVector();
@@ -245,6 +252,8 @@
void Dump(std::ostream& os, const char* prefix) const;
+ Allocator* GetAllocator() const;
+
private:
/**
* @brief Dump the bitvector into buffer in a 00101..01 format.
diff --git a/runtime/base/bit_vector_test.cc b/runtime/base/bit_vector_test.cc
index 19c01f2..0e3df76 100644
--- a/runtime/base/bit_vector_test.cc
+++ b/runtime/base/bit_vector_test.cc
@@ -71,7 +71,7 @@
uint32_t bits[kWords];
memset(bits, 0, sizeof(bits));
- BitVector bv(0U, false, Allocator::GetNoopAllocator(), kWords, bits);
+ BitVector bv(false, Allocator::GetNoopAllocator(), kWords, bits);
EXPECT_EQ(kWords, bv.GetStorageSize());
EXPECT_EQ(kWords * sizeof(uint32_t), bv.GetSizeOf());
EXPECT_EQ(bits, bv.GetRawStorage());
@@ -128,7 +128,7 @@
uint32_t bits[kWords];
memset(bits, 0, sizeof(bits));
- BitVector bv(0U, false, Allocator::GetNoopAllocator(), kWords, bits);
+ BitVector bv(false, Allocator::GetNoopAllocator(), kWords, bits);
bv.SetInitialBits(0u);
EXPECT_EQ(0u, bv.NumSetBits());
bv.SetInitialBits(1u);
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 62ba907..48dc88d 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1097,6 +1097,28 @@
}
}
+// Set image methods' entry point to interpreter.
+class SetInterpreterEntrypointArtMethodVisitor : public ArtMethodVisitor {
+ public:
+ explicit SetInterpreterEntrypointArtMethodVisitor(size_t image_pointer_size)
+ : image_pointer_size_(image_pointer_size) {}
+
+ void Visit(ArtMethod* method) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (kIsDebugBuild && !method->IsRuntimeMethod()) {
+ CHECK(method->GetDeclaringClass() != nullptr);
+ }
+ if (!method->IsNative() && !method->IsRuntimeMethod() && !method->IsResolutionMethod()) {
+ method->SetEntryPointFromQuickCompiledCodePtrSize(GetQuickToInterpreterBridge(),
+ image_pointer_size_);
+ }
+ }
+
+ private:
+ const size_t image_pointer_size_;
+
+ DISALLOW_COPY_AND_ASSIGN(SetInterpreterEntrypointArtMethodVisitor);
+};
+
void ClassLinker::InitFromImage() {
VLOG(startup) << "ClassLinker::InitFromImage entering";
CHECK(!init_done_);
@@ -1187,19 +1209,11 @@
// Set entry point to interpreter if in InterpretOnly mode.
if (!runtime->IsAotCompiler() && runtime->GetInstrumentation()->InterpretOnly()) {
- const auto& header = space->GetImageHeader();
- const auto& methods = header.GetMethodsSection();
- const auto art_method_size = ArtMethod::ObjectSize(image_pointer_size_);
- for (uintptr_t pos = 0; pos < methods.Size(); pos += art_method_size) {
- auto* method = reinterpret_cast<ArtMethod*>(space->Begin() + pos + methods.Offset());
- if (kIsDebugBuild && !method->IsRuntimeMethod()) {
- CHECK(method->GetDeclaringClass() != nullptr);
- }
- if (!method->IsNative() && !method->IsRuntimeMethod() && !method->IsResolutionMethod()) {
- method->SetEntryPointFromQuickCompiledCodePtrSize(GetQuickToInterpreterBridge(),
- image_pointer_size_);
- }
- }
+ const ImageHeader& header = space->GetImageHeader();
+ const ImageSection& methods = header.GetMethodsSection();
+ const size_t art_method_size = ArtMethod::ObjectSize(image_pointer_size_);
+ SetInterpreterEntrypointArtMethodVisitor visitor(image_pointer_size_);
+ methods.VisitPackedArtMethods(&visitor, space->Begin(), art_method_size);
}
// reinit class_roots_
@@ -1259,23 +1273,16 @@
// Moving concurrent:
// Need to make sure to not copy ArtMethods without doing read barriers since the roots are
// marked concurrently and we don't hold the classlinker_classes_lock_ when we do the copy.
- std::vector<std::pair<GcRoot<mirror::ClassLoader>, ClassTable*>> reinsert;
- for (auto it = classes_.begin(); it != classes_.end(); ) {
- it->second->VisitRoots(visitor, flags);
- const GcRoot<mirror::ClassLoader>& root = it->first;
- mirror::ClassLoader* old_ref = root.Read<kWithoutReadBarrier>();
- root.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
- mirror::ClassLoader* new_ref = root.Read<kWithoutReadBarrier>();
- if (new_ref != old_ref) {
- reinsert.push_back(*it);
- it = classes_.erase(it);
- } else {
- ++it;
+ boot_class_table_.VisitRoots(visitor, flags);
+ for (GcRoot<mirror::ClassLoader>& root : class_loaders_) {
+ // May be null for boot ClassLoader.
+ root.VisitRoot(visitor, RootInfo(kRootVMInternal));
+ ClassTable* const class_table = root.Read()->GetClassTable();
+ if (class_table != nullptr) {
+ // May be null if we have no classes.
+ class_table->VisitRoots(visitor, flags);
}
}
- for (auto& pair : reinsert) {
- classes_.Put(pair.first, pair.second);
- }
} else if ((flags & kVisitRootFlagNewRoots) != 0) {
for (auto& root : new_class_roots_) {
mirror::Class* old_ref = root.Read<kWithoutReadBarrier>();
@@ -1332,10 +1339,12 @@
}
void ClassLinker::VisitClassesInternal(ClassVisitor* visitor) {
- for (auto& pair : classes_) {
- ClassTable* const class_table = pair.second;
- if (!class_table->Visit(visitor)) {
- return;
+ if (boot_class_table_.Visit(visitor)) {
+ for (GcRoot<mirror::ClassLoader>& root : class_loaders_) {
+ ClassTable* const class_table = root.Read()->GetClassTable();
+ if (class_table != nullptr && !class_table->Visit(visitor)) {
+ return;
+ }
}
}
}
@@ -1455,7 +1464,10 @@
mirror::LongArray::ResetArrayClass();
mirror::ShortArray::ResetArrayClass();
STLDeleteElements(&oat_files_);
- STLDeleteValues(&classes_);
+ for (GcRoot<mirror::ClassLoader>& root : class_loaders_) {
+ ClassTable* const class_table = root.Read()->GetClassTable();
+ delete class_table;
+ }
}
mirror::PointerArray* ClassLinker::AllocPointerArray(Thread* self, size_t length) {
@@ -2893,8 +2905,12 @@
void ClassLinker::MoveClassTableToPreZygote() {
WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- for (auto& class_table : classes_) {
- class_table.second->FreezeSnapshot();
+ boot_class_table_.FreezeSnapshot();
+ for (GcRoot<mirror::ClassLoader>& root : class_loaders_) {
+ ClassTable* const class_table = root.Read()->GetClassTable();
+ if (class_table != nullptr) {
+ class_table->FreezeSnapshot();
+ }
}
}
@@ -2927,10 +2943,15 @@
MoveImageClassesToClassTable();
}
WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- for (auto& pair : classes_) {
+ const size_t hash = ComputeModifiedUtf8Hash(descriptor);
+ mirror::Class* klass = boot_class_table_.Lookup(descriptor, hash);
+ if (klass != nullptr) {
+ result.push_back(klass);
+ }
+ for (GcRoot<mirror::ClassLoader>& root : class_loaders_) {
// There can only be one class with the same descriptor per class loader.
- ClassTable* const class_table = pair.second;
- mirror::Class* klass = class_table->Lookup(descriptor, ComputeModifiedUtf8Hash(descriptor));
+ ClassTable* const class_table = root.Read()->GetClassTable();
+ klass = class_table->Lookup(descriptor, hash);
if (klass != nullptr) {
result.push_back(klass);
}
@@ -3984,22 +4005,21 @@
}
ClassTable* ClassLinker::InsertClassTableForClassLoader(mirror::ClassLoader* class_loader) {
- auto it = classes_.find(GcRoot<mirror::ClassLoader>(class_loader));
- if (it != classes_.end()) {
- return it->second;
+ if (class_loader == nullptr) {
+ return &boot_class_table_;
}
- // Class table for loader not found, add it to the table.
- auto* const class_table = new ClassTable;
- classes_.Put(GcRoot<mirror::ClassLoader>(class_loader), class_table);
+ ClassTable* class_table = class_loader->GetClassTable();
+ if (class_table == nullptr) {
+ class_table = new ClassTable;
+ class_loaders_.push_back(class_loader);
+ // Don't already have a class table, add it to the class loader.
+ class_loader->SetClassTable(class_table);
+ }
return class_table;
}
ClassTable* ClassLinker::ClassTableForClassLoader(mirror::ClassLoader* class_loader) {
- auto it = classes_.find(GcRoot<mirror::ClassLoader>(class_loader));
- if (it != classes_.end()) {
- return it->second;
- }
- return nullptr;
+ return class_loader == nullptr ? &boot_class_table_ : class_loader->GetClassTable();
}
bool ClassLinker::LinkClass(Thread* self, const char* descriptor, Handle<mirror::Class> klass,
@@ -4043,10 +4063,10 @@
// Retire the temporary class and create the correctly sized resolved class.
StackHandleScope<1> hs(self);
auto h_new_class = hs.NewHandle(klass->CopyOf(self, class_size, imt, image_pointer_size_));
- // Set array lengths to 0 since we don't want the GC to visit two different classes with the
- // same ArtFields with the same If this occurs, it causes bugs in remembered sets since the GC
- // may not see any references to the from space and clean the card. Though there was references
- // to the from space that got marked by the first class.
+ // Set arrays to null since we don't want to have multiple classes with the same ArtField or
+ // ArtMethod array pointers. If this occurs, it causes bugs in remembered sets since the GC
+ // may not see any references to the target space and clean the card for a class if another
+ // class had the same array pointer.
klass->SetDirectMethodsPtrUnchecked(nullptr);
klass->SetVirtualMethodsPtr(nullptr);
klass->SetSFieldsPtrUnchecked(nullptr);
@@ -5635,28 +5655,33 @@
}
void ClassLinker::DumpForSigQuit(std::ostream& os) {
- Thread* self = Thread::Current();
+ ScopedObjectAccess soa(Thread::Current());
if (dex_cache_image_class_lookup_required_) {
- ScopedObjectAccess soa(self);
MoveImageClassesToClassTable();
}
- ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
+ ReaderMutexLock mu(soa.Self(), *Locks::classlinker_classes_lock_);
os << "Zygote loaded classes=" << NumZygoteClasses() << " post zygote classes="
<< NumNonZygoteClasses() << "\n";
}
size_t ClassLinker::NumZygoteClasses() const {
- size_t sum = 0;
- for (auto& pair : classes_) {
- sum += pair.second->NumZygoteClasses();
+ size_t sum = boot_class_table_.NumZygoteClasses();
+ for (const GcRoot<mirror::ClassLoader>& root : class_loaders_) {
+ ClassTable* const class_table = root.Read()->GetClassTable();
+ if (class_table != nullptr) {
+ sum += class_table->NumZygoteClasses();
+ }
}
return sum;
}
size_t ClassLinker::NumNonZygoteClasses() const {
- size_t sum = 0;
- for (auto& pair : classes_) {
- sum += pair.second->NumNonZygoteClasses();
+ size_t sum = boot_class_table_.NumNonZygoteClasses();
+ for (const GcRoot<mirror::ClassLoader>& root : class_loaders_) {
+ ClassTable* const class_table = root.Read()->GetClassTable();
+ if (class_table != nullptr) {
+ sum += class_table->NumNonZygoteClasses();
+ }
}
return sum;
}
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 17d6be6..7243a25 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -475,25 +475,18 @@
void DropFindArrayClassCache() SHARED_REQUIRES(Locks::mutator_lock_);
private:
- class CompareClassLoaderGcRoot {
- public:
- bool operator()(const GcRoot<mirror::ClassLoader>& a, const GcRoot<mirror::ClassLoader>& b)
- const SHARED_REQUIRES(Locks::mutator_lock_) {
- return a.Read() < b.Read();
- }
- };
-
- typedef SafeMap<GcRoot<mirror::ClassLoader>, ClassTable*, CompareClassLoaderGcRoot>
- ClassLoaderClassTable;
-
void VisitClassesInternal(ClassVisitor* visitor)
REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
// Returns the number of zygote and image classes.
- size_t NumZygoteClasses() const REQUIRES(Locks::classlinker_classes_lock_);
+ size_t NumZygoteClasses() const
+ REQUIRES(Locks::classlinker_classes_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Returns the number of non zygote nor image classes.
- size_t NumNonZygoteClasses() const REQUIRES(Locks::classlinker_classes_lock_);
+ size_t NumNonZygoteClasses() const
+ REQUIRES(Locks::classlinker_classes_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
OatFile& GetImageOatFile(gc::space::ImageSpace* space)
REQUIRES(!dex_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
@@ -707,8 +700,13 @@
std::vector<GcRoot<mirror::DexCache>> dex_caches_ GUARDED_BY(dex_lock_);
std::vector<const OatFile*> oat_files_ GUARDED_BY(dex_lock_);
- // This contains strong roots. To enable concurrent root scanning of the class table.
- ClassLoaderClassTable classes_ GUARDED_BY(Locks::classlinker_classes_lock_);
+ // This contains the class laoders which have class tables. It is populated by
+ // InsertClassTableForClassLoader.
+ std::vector<GcRoot<mirror::ClassLoader>> class_loaders_
+ GUARDED_BY(Locks::classlinker_classes_lock_);
+
+ // Boot class path table. Since the class loader for this is null.
+ ClassTable boot_class_table_ GUARDED_BY(Locks::classlinker_classes_lock_);
// New class roots, only used by CMS since the GC needs to mark these in the pause.
std::vector<GcRoot<mirror::Class>> new_class_roots_ GUARDED_BY(Locks::classlinker_classes_lock_);
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 4212dda..3c84d8f 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -546,6 +546,7 @@
struct ClassLoaderOffsets : public CheckOffsets<mirror::ClassLoader> {
ClassLoaderOffsets() : CheckOffsets<mirror::ClassLoader>(false, "Ljava/lang/ClassLoader;") {
+ addOffset(OFFSETOF_MEMBER(mirror::ClassLoader, class_table_), "classTable");
addOffset(OFFSETOF_MEMBER(mirror::ClassLoader, packages_), "packages");
addOffset(OFFSETOF_MEMBER(mirror::ClassLoader, parent_), "parent");
addOffset(OFFSETOF_MEMBER(mirror::ClassLoader, proxyCache_), "proxyCache");
diff --git a/runtime/dex_instruction_utils.h b/runtime/dex_instruction_utils.h
index f892f98..1ae2b1b 100644
--- a/runtime/dex_instruction_utils.h
+++ b/runtime/dex_instruction_utils.h
@@ -144,49 +144,49 @@
constexpr DexMemAccessType IGetMemAccessType(Instruction::Code code) {
#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
- DCHECK(IsInstructionIGet(opcode));
+ DCHECK(IsInstructionIGet(code));
#endif
return static_cast<DexMemAccessType>(code - Instruction::IGET);
}
constexpr DexMemAccessType IPutMemAccessType(Instruction::Code code) {
#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
- DCHECK(IsInstructionIPut(opcode));
+ DCHECK(IsInstructionIPut(code));
#endif
return static_cast<DexMemAccessType>(code - Instruction::IPUT);
}
constexpr DexMemAccessType SGetMemAccessType(Instruction::Code code) {
#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
- DCHECK(IsInstructionSGet(opcode));
+ DCHECK(IsInstructionSGet(code));
#endif
return static_cast<DexMemAccessType>(code - Instruction::SGET);
}
constexpr DexMemAccessType SPutMemAccessType(Instruction::Code code) {
#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
- DCHECK(IsInstructionSPut(opcode));
+ DCHECK(IsInstructionSPut(code));
#endif
return static_cast<DexMemAccessType>(code - Instruction::SPUT);
}
constexpr DexMemAccessType AGetMemAccessType(Instruction::Code code) {
#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
- DCHECK(IsInstructionAGet(opcode));
+ DCHECK(IsInstructionAGet(code));
#endif
return static_cast<DexMemAccessType>(code - Instruction::AGET);
}
constexpr DexMemAccessType APutMemAccessType(Instruction::Code code) {
#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
- DCHECK(IsInstructionAPut(opcode));
+ DCHECK(IsInstructionAPut(code));
#endif
return static_cast<DexMemAccessType>(code - Instruction::APUT);
}
constexpr DexMemAccessType IGetOrIPutMemAccessType(Instruction::Code code) {
#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
- DCHECK(IsInstructionIGetOrIPut(opcode));
+ DCHECK(IsInstructionIGetOrIPut(code));
#endif
return (code >= Instruction::IPUT) ? IPutMemAccessType(code) : IGetMemAccessType(code);
}
@@ -216,14 +216,14 @@
constexpr DexMemAccessType SGetOrSPutMemAccessType(Instruction::Code code) {
#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
- DCHECK(IsInstructionSGetOrSPut(opcode));
+ DCHECK(IsInstructionSGetOrSPut(code));
#endif
return (code >= Instruction::SPUT) ? SPutMemAccessType(code) : SGetMemAccessType(code);
}
constexpr DexMemAccessType AGetOrAPutMemAccessType(Instruction::Code code) {
#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
- DCHECK(IsInstructionAGetOrAPut(opcode));
+ DCHECK(IsInstructionAGetOrAPut(code));
#endif
return (code >= Instruction::APUT) ? APutMemAccessType(code) : AGetMemAccessType(code);
}
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 3e15cc5..66e88ba 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -426,15 +426,15 @@
template<InvokeType type, bool access_check>
inline ArtMethod* FindMethodFromCode(uint32_t method_idx, mirror::Object** this_object,
- ArtMethod** referrer, Thread* self) {
+ ArtMethod* referrer, Thread* self) {
ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
- ArtMethod* resolved_method = class_linker->GetResolvedMethod(method_idx, *referrer);
+ ArtMethod* resolved_method = class_linker->GetResolvedMethod(method_idx, referrer);
if (resolved_method == nullptr) {
StackHandleScope<1> hs(self);
mirror::Object* null_this = nullptr;
HandleWrapper<mirror::Object> h_this(
hs.NewHandleWrapper(type == kStatic ? &null_this : this_object));
- resolved_method = class_linker->ResolveMethod(self, method_idx, *referrer, type);
+ resolved_method = class_linker->ResolveMethod(self, method_idx, referrer, type);
}
if (UNLIKELY(resolved_method == nullptr)) {
DCHECK(self->IsExceptionPending()); // Throw exception and unwind.
@@ -448,11 +448,11 @@
// Incompatible class change should have been handled in resolve method.
if (UNLIKELY(resolved_method->CheckIncompatibleClassChange(type))) {
ThrowIncompatibleClassChangeError(type, resolved_method->GetInvokeType(), resolved_method,
- *referrer);
+ referrer);
return nullptr; // Failure.
}
mirror::Class* methods_class = resolved_method->GetDeclaringClass();
- mirror::Class* referring_class = (*referrer)->GetDeclaringClass();
+ mirror::Class* referring_class = referrer->GetDeclaringClass();
bool can_access_resolved_method =
referring_class->CheckResolvedMethodAccess<type>(methods_class, resolved_method,
method_idx);
@@ -480,7 +480,7 @@
return klass->GetVTableEntry(vtable_index, class_linker->GetImagePointerSize());
}
case kSuper: {
- mirror::Class* super_class = (*referrer)->GetDeclaringClass()->GetSuperClass();
+ mirror::Class* super_class = referrer->GetDeclaringClass()->GetSuperClass();
uint16_t vtable_index = resolved_method->GetMethodIndex();
if (access_check) {
// Check existence of super class.
@@ -517,7 +517,7 @@
resolved_method, class_linker->GetImagePointerSize());
if (UNLIKELY(interface_method == nullptr)) {
ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(resolved_method,
- *this_object, *referrer);
+ *this_object, referrer);
return nullptr; // Failure.
}
return interface_method;
@@ -534,7 +534,7 @@
template SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE \
ArtMethod* FindMethodFromCode<_type, _access_check>(uint32_t method_idx, \
mirror::Object** this_object, \
- ArtMethod** referrer, \
+ ArtMethod* referrer, \
Thread* self)
#define EXPLICIT_FIND_METHOD_FROM_CODE_TYPED_TEMPLATE_DECL(_type) \
EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL(_type, false); \
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index dc04c0a..53f2677 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -138,7 +138,7 @@
template<InvokeType type, bool access_check>
inline ArtMethod* FindMethodFromCode(
- uint32_t method_idx, mirror::Object** this_object, ArtMethod** referrer, Thread* self)
+ uint32_t method_idx, mirror::Object** this_object, ArtMethod* referrer, Thread* self)
SHARED_REQUIRES(Locks::mutator_lock_);
// Fast path field resolution that can't initialize classes or throw exceptions.
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 6fe2bb6..da4b82c 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -1989,7 +1989,7 @@
ScopedObjectAccessUnchecked soa(self->GetJniEnv());
RememberForGcArgumentVisitor visitor(sp, type == kStatic, shorty, shorty_len, &soa);
visitor.VisitArguments();
- method = FindMethodFromCode<type, access_check>(method_idx, &this_object, &caller_method,
+ method = FindMethodFromCode<type, access_check>(method_idx, &this_object, caller_method,
self);
visitor.FixupReferences();
}
@@ -2112,7 +2112,7 @@
ScopedObjectAccessUnchecked soa(self->GetJniEnv());
RememberForGcArgumentVisitor visitor(sp, false, shorty, shorty_len, &soa);
visitor.VisitArguments();
- method = FindMethodFromCode<kInterface, false>(dex_method_idx, &this_object, &caller_method,
+ method = FindMethodFromCode<kInterface, false>(dex_method_idx, &this_object, caller_method,
self);
visitor.FixupReferences();
}
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 2486a98..6468659 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -169,7 +169,7 @@
mirror::Object* receiver = nullptr; // Always static. (see 'kStatic')
ArtMethod* sf_method = shadow_frame.GetMethod();
ArtMethod* const called_method = FindMethodFromCode<kStatic, do_access_check>(
- method_idx, &receiver, &sf_method, self);
+ method_idx, &receiver, sf_method, self);
uint32_t vregA = inst->VRegA_21c();
@@ -254,7 +254,7 @@
Object* receiver = (type == kStatic) ? nullptr : shadow_frame.GetVRegReference(vregC);
ArtMethod* sf_method = shadow_frame.GetMethod();
ArtMethod* const called_method = FindMethodFromCode<type, do_access_check>(
- method_idx, &receiver, &sf_method, self);
+ method_idx, &receiver, sf_method, self);
// The shadow frame should already be pushed, so we don't need to update it.
if (UNLIKELY(called_method == nullptr)) {
CHECK(self->IsExceptionPending());
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index f449406..7776f8f 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -1391,7 +1391,7 @@
// heap requirements is probably more valuable than the efficiency.
CHECK_GT(replyLen, 0);
memcpy(expandBufAddSpace(pReply, replyLen), replyBuf, replyLen);
- free(replyBuf);
+ delete[] replyBuf;
}
return ERR_NONE;
}
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 701ba4a..6af90bb 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -824,6 +824,34 @@
}
}
+class ReadBarrierOnNativeRootsVisitor {
+ public:
+ void operator()(mirror::Object* obj ATTRIBUTE_UNUSED,
+ MemberOffset offset ATTRIBUTE_UNUSED,
+ bool is_static ATTRIBUTE_UNUSED) const {}
+
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::Object* old_ref = root->AsMirrorPtr();
+ mirror::Object* new_ref = ReadBarrier::BarrierForRoot(root);
+ if (old_ref != new_ref) {
+ // Update the field atomically. This may fail if mutator updates before us, but it's ok.
+ auto* atomic_root =
+ reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root);
+ atomic_root->CompareExchangeStrongSequentiallyConsistent(
+ mirror::CompressedReference<mirror::Object>::FromMirrorPtr(old_ref),
+ mirror::CompressedReference<mirror::Object>::FromMirrorPtr(new_ref));
+ }
+ }
+};
+
// The pre-fence visitor for Class::CopyOf().
class CopyClassVisitor {
public:
@@ -842,6 +870,10 @@
mirror::Class::SetStatus(h_new_class_obj, Class::kStatusResolving, self_);
h_new_class_obj->PopulateEmbeddedImtAndVTable(imt_, pointer_size_);
h_new_class_obj->SetClassSize(new_length_);
+ // Visit all of the references to make sure there is no from space references in the native
+ // roots.
+ h_new_class_obj->VisitReferences<true>(h_new_class_obj->GetClass(),
+ ReadBarrierOnNativeRootsVisitor());
}
private:
diff --git a/runtime/mirror/class_loader.h b/runtime/mirror/class_loader.h
index 134f1cd..940aaa6 100644
--- a/runtime/mirror/class_loader.h
+++ b/runtime/mirror/class_loader.h
@@ -22,6 +22,7 @@
namespace art {
struct ClassLoaderOffsets;
+class ClassTable;
namespace mirror {
@@ -35,12 +36,23 @@
ClassLoader* GetParent() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldObject<ClassLoader>(OFFSET_OF_OBJECT_MEMBER(ClassLoader, parent_));
}
+ ClassTable* GetClassTable() SHARED_REQUIRES(Locks::mutator_lock_) {
+ return reinterpret_cast<ClassTable*>(
+ GetField64(OFFSET_OF_OBJECT_MEMBER(ClassLoader, class_table_)));
+ }
+ void SetClassTable(ClassTable* class_table) SHARED_REQUIRES(Locks::mutator_lock_) {
+ SetField64<false>(OFFSET_OF_OBJECT_MEMBER(ClassLoader, class_table_),
+ reinterpret_cast<uint64_t>(class_table));
+ }
private:
// Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
HeapReference<Object> packages_;
HeapReference<ClassLoader> parent_;
HeapReference<Object> proxyCache_;
+ // Native pointer to class table, need to zero this out when image writing.
+ uint32_t padding_ ATTRIBUTE_UNUSED;
+ uint64_t class_table_;
friend struct art::ClassLoaderOffsets; // for verifying offset information
DISALLOW_IMPLICIT_CONSTRUCTORS(ClassLoader);
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index 7fe8bb9..b86a4c8 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -16,6 +16,7 @@
#include "reg_type-inl.h"
+#include "base/bit_vector-inl.h"
#include "base/casts.h"
#include "class_linker-inl.h"
#include "dex_file-inl.h"
@@ -309,13 +310,17 @@
std::string UnresolvedMergedType::Dump() const {
std::stringstream result;
- std::set<uint16_t> types = GetMergedTypes();
- result << "UnresolvedMergedReferences(";
- auto it = types.begin();
- result << reg_type_cache_->GetFromId(*it).Dump();
- for (++it; it != types.end(); ++it) {
- result << ", ";
- result << reg_type_cache_->GetFromId(*it).Dump();
+ result << "UnresolvedMergedReferences(" << GetResolvedPart().Dump() << " | ";
+ const BitVector& types = GetUnresolvedTypes();
+
+ bool first = true;
+ for (uint32_t idx : types.Indexes()) {
+ if (!first) {
+ result << ", ";
+ } else {
+ first = false;
+ }
+ result << reg_type_cache_->GetFromId(idx).Dump();
}
result << ")";
return result.str();
@@ -492,32 +497,6 @@
return true;
}
-std::set<uint16_t> UnresolvedMergedType::GetMergedTypes() const {
- std::pair<uint16_t, uint16_t> refs = GetTopMergedTypes();
- const RegType& left = reg_type_cache_->GetFromId(refs.first);
- const RegType& right = reg_type_cache_->GetFromId(refs.second);
-
- std::set<uint16_t> types;
- if (left.IsUnresolvedMergedReference()) {
- types = down_cast<const UnresolvedMergedType*>(&left)->GetMergedTypes();
- } else {
- types.insert(refs.first);
- }
- if (right.IsUnresolvedMergedReference()) {
- std::set<uint16_t> right_types =
- down_cast<const UnresolvedMergedType*>(&right)->GetMergedTypes();
- types.insert(right_types.begin(), right_types.end());
- } else {
- types.insert(refs.second);
- }
- if (kIsDebugBuild) {
- for (const auto& type : types) {
- CHECK(!reg_type_cache_->GetFromId(type).IsUnresolvedMergedReference());
- }
- }
- return types;
-}
-
const RegType& RegType::GetSuperClass(RegTypeCache* cache) const {
if (!IsUnresolvedTypes()) {
mirror::Class* super_klass = GetClass()->GetSuperClass();
@@ -803,12 +782,24 @@
CHECK(klass_.IsNull()) << *this;
}
+UnresolvedMergedType::UnresolvedMergedType(const RegType& resolved,
+ const BitVector& unresolved,
+ const RegTypeCache* reg_type_cache,
+ uint16_t cache_id)
+ : UnresolvedType("", cache_id),
+ reg_type_cache_(reg_type_cache),
+ resolved_part_(resolved),
+ unresolved_types_(unresolved, false, unresolved.GetAllocator()) {
+ if (kIsDebugBuild) {
+ CheckInvariants();
+ }
+}
void UnresolvedMergedType::CheckInvariants() const {
// Unresolved merged types: merged types should be defined.
CHECK(descriptor_.empty()) << *this;
CHECK(klass_.IsNull()) << *this;
- CHECK_NE(merged_types_.first, 0U) << *this;
- CHECK_NE(merged_types_.second, 0U) << *this;
+ CHECK(resolved_part_.IsReferenceTypes());
+ CHECK(!resolved_part_.IsUnresolvedTypes());
}
void UnresolvedReferenceType::CheckInvariants() const {
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index 4893088..2834a9a 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -22,6 +22,7 @@
#include <set>
#include <string>
+#include "base/bit_vector.h"
#include "base/macros.h"
#include "base/mutex.h"
#include "gc_root.h"
@@ -230,6 +231,14 @@
// from another.
const RegType& Merge(const RegType& incoming_type, RegTypeCache* reg_types) const
SHARED_REQUIRES(Locks::mutator_lock_);
+ // Same as above, but also handles the case where incoming_type == this.
+ const RegType& SafeMerge(const RegType& incoming_type, RegTypeCache* reg_types) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (Equals(incoming_type)) {
+ return *this;
+ }
+ return Merge(incoming_type, reg_types);
+ }
/*
* A basic Join operation on classes. For a pair of types S and T the Join,
@@ -868,30 +877,23 @@
const RegTypeCache* const reg_type_cache_;
};
-// A merge of two unresolved types. If the types were resolved this may be
-// Conflict or another
-// known ReferenceType.
+// A merge of unresolved (and resolved) types. If the types were resolved this may be
+// Conflict or another known ReferenceType.
class UnresolvedMergedType FINAL : public UnresolvedType {
public:
- UnresolvedMergedType(uint16_t left_id, uint16_t right_id,
+ // Note: the constructor will copy the unresolved BitVector, not use it directly.
+ UnresolvedMergedType(const RegType& resolved, const BitVector& unresolved,
const RegTypeCache* reg_type_cache, uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_)
- : UnresolvedType("", cache_id),
- reg_type_cache_(reg_type_cache),
- merged_types_(left_id, right_id) {
- if (kIsDebugBuild) {
- CheckInvariants();
- }
- }
+ SHARED_REQUIRES(Locks::mutator_lock_);
- // The top of a tree of merged types.
- std::pair<uint16_t, uint16_t> GetTopMergedTypes() const {
- DCHECK(IsUnresolvedMergedReference());
- return merged_types_;
+ // The resolved part. See description below.
+ const RegType& GetResolvedPart() const {
+ return resolved_part_;
}
-
- // The complete set of merged types.
- std::set<uint16_t> GetMergedTypes() const;
+ // The unresolved part.
+ const BitVector& GetUnresolvedTypes() const {
+ return unresolved_types_;
+ }
bool IsUnresolvedMergedReference() const OVERRIDE { return true; }
@@ -903,7 +905,16 @@
void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_);
const RegTypeCache* const reg_type_cache_;
- const std::pair<uint16_t, uint16_t> merged_types_;
+
+ // The original implementation of merged types was a binary tree. Collection of the flattened
+ // types ("leaves") can be expensive, so we store the expanded list now, as two components:
+ // 1) A resolved component. We use Zero when there is no resolved component, as that will be
+ // an identity merge.
+ // 2) A bitvector of the unresolved reference types. A bitvector was chosen with the assumption
+ // that there should not be too many types in flight in practice. (We also bias the index
+ // against the index of Zero, which is one of the later default entries in any cache.)
+ const RegType& resolved_part_;
+ const BitVector unresolved_types_;
};
std::ostream& operator<<(std::ostream& os, const RegType& rhs)
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index 4469e64..e14306c 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -317,39 +317,62 @@
}
const RegType& RegTypeCache::FromUnresolvedMerge(const RegType& left, const RegType& right) {
- std::set<uint16_t> types;
+ BitVector types(1, // Allocate at least a word.
+ true, // Is expandable.
+ Allocator::GetMallocAllocator()); // TODO: Arenas in the verifier.
+ const RegType* left_resolved;
if (left.IsUnresolvedMergedReference()) {
- RegType& non_const(const_cast<RegType&>(left));
- types = (down_cast<UnresolvedMergedType*>(&non_const))->GetMergedTypes();
+ const UnresolvedMergedType* left_merge = down_cast<const UnresolvedMergedType*>(&left);
+ types.Copy(&left_merge->GetUnresolvedTypes());
+ left_resolved = &left_merge->GetResolvedPart();
+ } else if (left.IsUnresolvedTypes()) {
+ types.SetBit(left.GetId());
+ left_resolved = &Zero();
} else {
- types.insert(left.GetId());
+ left_resolved = &left;
}
+
+ const RegType* right_resolved;
if (right.IsUnresolvedMergedReference()) {
- RegType& non_const(const_cast<RegType&>(right));
- std::set<uint16_t> right_types = (down_cast<UnresolvedMergedType*>(&non_const))->GetMergedTypes();
- types.insert(right_types.begin(), right_types.end());
+ const UnresolvedMergedType* right_merge = down_cast<const UnresolvedMergedType*>(&right);
+ types.Union(&right_merge->GetUnresolvedTypes());
+ right_resolved = &right_merge->GetResolvedPart();
+ } else if (right.IsUnresolvedTypes()) {
+ types.SetBit(right.GetId());
+ right_resolved = &Zero();
} else {
- types.insert(right.GetId());
+ right_resolved = &right;
}
+
+ // Merge the resolved parts. Left and right might be equal, so use SafeMerge.
+ const RegType& resolved_parts_merged = left_resolved->SafeMerge(*right_resolved, this);
+ // If we get a conflict here, the merge result is a conflict, not an unresolved merge type.
+ if (resolved_parts_merged.IsConflict()) {
+ return Conflict();
+ }
+
// Check if entry already exists.
for (size_t i = primitive_count_; i < entries_.size(); i++) {
const RegType* cur_entry = entries_[i];
if (cur_entry->IsUnresolvedMergedReference()) {
- std::set<uint16_t> cur_entry_types =
- (down_cast<const UnresolvedMergedType*>(cur_entry))->GetMergedTypes();
- if (cur_entry_types == types) {
+ const UnresolvedMergedType* cmp_type = down_cast<const UnresolvedMergedType*>(cur_entry);
+ const RegType& resolved_part = cmp_type->GetResolvedPart();
+ const BitVector& unresolved_part = cmp_type->GetUnresolvedTypes();
+ // Use SameBitsSet. "types" is expandable to allow merging in the components, but the
+ // BitVector in the final RegType will be made non-expandable.
+ if (&resolved_part == &resolved_parts_merged &&
+ types.SameBitsSet(&unresolved_part)) {
return *cur_entry;
}
}
}
+
// Create entry.
- RegType* entry = new UnresolvedMergedType(left.GetId(), right.GetId(), this, entries_.size());
+ RegType* entry = new UnresolvedMergedType(resolved_parts_merged,
+ types,
+ this,
+ entries_.size());
AddEntry(entry);
- if (kIsDebugBuild) {
- UnresolvedMergedType* tmp_entry = down_cast<UnresolvedMergedType*>(entry);
- std::set<uint16_t> check_types = tmp_entry->GetMergedTypes();
- CHECK(check_types == types);
- }
return *entry;
}
diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc
index 2fecc8b..971b1f5 100644
--- a/runtime/verifier/reg_type_test.cc
+++ b/runtime/verifier/reg_type_test.cc
@@ -18,6 +18,7 @@
#include <set>
+#include "base/bit_vector.h"
#include "base/casts.h"
#include "common_runtime_test.h"
#include "reg_type_cache-inl.h"
@@ -421,7 +422,7 @@
EXPECT_EQ(expected, resolved_unintialiesd.Dump());
expected = "Unresolved And Uninitialized Reference: java.lang.DoesNotExist Allocation PC: 12";
EXPECT_EQ(expected, unresolved_unintialized.Dump());
- expected = "UnresolvedMergedReferences(Unresolved Reference: java.lang.DoesNotExist, Unresolved Reference: java.lang.DoesNotExistEither)";
+ expected = "UnresolvedMergedReferences(Zero/null | Unresolved Reference: java.lang.DoesNotExist, Unresolved Reference: java.lang.DoesNotExistEither)";
EXPECT_EQ(expected, unresolved_merged.Dump());
}
@@ -477,9 +478,10 @@
EXPECT_TRUE(merged.IsUnresolvedMergedReference());
RegType& merged_nonconst = const_cast<RegType&>(merged);
- std::set<uint16_t> merged_ids = (down_cast<UnresolvedMergedType*>(&merged_nonconst))->GetMergedTypes();
- EXPECT_EQ(ref_type_0.GetId(), *(merged_ids.begin()));
- EXPECT_EQ(ref_type_1.GetId(), *((++merged_ids.begin())));
+ const BitVector& unresolved_parts =
+ down_cast<UnresolvedMergedType*>(&merged_nonconst)->GetUnresolvedTypes();
+ EXPECT_TRUE(unresolved_parts.IsBitSet(ref_type_0.GetId()));
+ EXPECT_TRUE(unresolved_parts.IsBitSet(ref_type_1.GetId()));
}
TEST_F(RegTypeTest, MergingFloat) {
diff --git a/test/474-fp-sub-neg/expected.txt b/test/474-fp-sub-neg/expected.txt
index e6ffe0d..1c15abb 100644
--- a/test/474-fp-sub-neg/expected.txt
+++ b/test/474-fp-sub-neg/expected.txt
@@ -1,2 +1,6 @@
-0.0
+0.0
+0.0
-0.0
+0.0
+0.0
diff --git a/test/474-fp-sub-neg/info.txt b/test/474-fp-sub-neg/info.txt
index eced93f..82effdb 100644
--- a/test/474-fp-sub-neg/info.txt
+++ b/test/474-fp-sub-neg/info.txt
@@ -1,5 +1,11 @@
Regression check for optimizing simplify instruction pass.
+
A pair (sub, neg) should not be transforemd to (sub) for
fp calculation because we can lose the sign of zero for
the following expression:
- ( A - B ) != B - A ; if B == A
+
+Addition or subtraction with fp zero should not be eliminated
+because:
+ -0.0 + 0.0 = 0.0
+ -0.0 - -0.0 = 0.0
diff --git a/test/474-fp-sub-neg/src/Main.java b/test/474-fp-sub-neg/src/Main.java
index e6bce67..c190e8e 100644
--- a/test/474-fp-sub-neg/src/Main.java
+++ b/test/474-fp-sub-neg/src/Main.java
@@ -24,6 +24,8 @@
}
System.out.println(f);
+ System.out.println(f + 0f);
+ System.out.println(f - (-0f));
}
public static void doubleTest() {
@@ -35,6 +37,8 @@
}
System.out.println(d);
+ System.out.println(d + 0f);
+ System.out.println(d - (-0f));
}
public static void main(String[] args) {
diff --git a/test/526-checker-caller-callee-regs/expected.txt b/test/526-checker-caller-callee-regs/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/526-checker-caller-callee-regs/expected.txt
diff --git a/test/526-checker-caller-callee-regs/info.txt b/test/526-checker-caller-callee-regs/info.txt
new file mode 100644
index 0000000..0e0373a
--- /dev/null
+++ b/test/526-checker-caller-callee-regs/info.txt
@@ -0,0 +1 @@
+Test allocation of caller and callee saved registers.
diff --git a/test/526-checker-caller-callee-regs/src/Main.java b/test/526-checker-caller-callee-regs/src/Main.java
new file mode 100644
index 0000000..a1f3301
--- /dev/null
+++ b/test/526-checker-caller-callee-regs/src/Main.java
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ public static void assertIntEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ static boolean doThrow = false;
+
+ // This function always returns 1.
+ // We use 'throw' to prevent the function from being inlined.
+ public static int $opt$noinline$function_call(int arg) {
+ if (doThrow) throw new Error();
+ return 1 % arg;
+ }
+
+ // | registers available to | regexp
+ // | the register allocator |
+ // ------------------------------|------------------------|-----------------
+ // ARM64 callee-saved registers | [x20-x29] | x2[0-9]
+ // ARM callee-saved registers | [r5-r8,r10,r11] | r([5-8]|10|11)
+
+ /**
+ * Check that a value live across a function call is allocated in a callee
+ * saved register.
+ */
+
+ /// CHECK-START-ARM: int Main.$opt$LiveInCall(int) register (after)
+ /// CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+ /// CHECK: <<t1:i\d+>> Add [<<Arg>>,<<Const1>>] {{.*->r([5-8]|10|11)}}
+ /// CHECK: <<t2:i\d+>> InvokeStaticOrDirect
+ /// CHECK: Sub [<<t1>>,<<t2>>]
+ /// CHECK: Return
+
+ /// CHECK-START-ARM64: int Main.$opt$LiveInCall(int) register (after)
+ /// CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+ /// CHECK: <<t1:i\d+>> Add [<<Arg>>,<<Const1>>] {{.*->x2[0-9]}}
+ /// CHECK: <<t2:i\d+>> InvokeStaticOrDirect
+ /// CHECK: Sub [<<t1>>,<<t2>>]
+ /// CHECK: Return
+
+ // TODO: Add tests for other architectures.
+
+ public static int $opt$LiveInCall(int arg) {
+ int t1 = arg + 1;
+ int t2 = $opt$noinline$function_call(arg);
+ return t1 - t2;
+ }
+
+ public static void main(String[] args) {
+ int arg = 123;
+ assertIntEquals($opt$LiveInCall(arg), arg);
+ }
+}
diff --git a/test/528-long-hint/expected.txt b/test/528-long-hint/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/528-long-hint/expected.txt
diff --git a/test/528-long-hint/info.txt b/test/528-long-hint/info.txt
new file mode 100644
index 0000000..6a9cfae
--- /dev/null
+++ b/test/528-long-hint/info.txt
@@ -0,0 +1,2 @@
+Regression test for optimizing that used to crash on x86 when
+allocating a wrong register pair.
diff --git a/test/528-long-hint/src/Main.java b/test/528-long-hint/src/Main.java
new file mode 100644
index 0000000..ca1a114
--- /dev/null
+++ b/test/528-long-hint/src/Main.java
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import sun.misc.Unsafe;
+import java.lang.reflect.Field;
+
+public class Main {
+
+ long instanceField;
+ static long myLongField1;
+ static long myLongField2;
+
+ public static void main(String[] args) throws Exception {
+ Unsafe unsafe = getUnsafe();
+ Main f = new Main();
+ long offset = unsafe.objectFieldOffset(Main.class.getDeclaredField("instanceField"));
+ getUnsafe(); // spill offset
+ long a = myLongField1;
+ // We used the hinted register for the low part of b, which is EBX, as requested
+ // by the intrinsic below. Allocating EBX for the low part, would put ESP as the high
+ // part, and we did not check that ESP was blocked.
+ long b = myLongField2;
+ unsafe.compareAndSwapLong(f, offset, a, b);
+ }
+
+
+ private static Unsafe getUnsafe() throws Exception {
+ Field f = Unsafe.class.getDeclaredField("theUnsafe");
+ f.setAccessible(true);
+ return (Unsafe) f.get(null);
+ }
+}
diff --git a/test/800-smali/expected.txt b/test/800-smali/expected.txt
index 728ccea..884f280 100644
--- a/test/800-smali/expected.txt
+++ b/test/800-smali/expected.txt
@@ -37,4 +37,5 @@
b/22411633 (4)
b/22411633 (5)
b/22777307
+b/22881413
Done!
diff --git a/test/800-smali/smali/b_22881413.smali b/test/800-smali/smali/b_22881413.smali
new file mode 100644
index 0000000..29dd82a
--- /dev/null
+++ b/test/800-smali/smali/b_22881413.smali
@@ -0,0 +1,295 @@
+.class public LB22881413;
+.super Ljava/lang/Object;
+
+# A couple of fields to allow "loading" resolved and unresolved types. Use non-final classes to
+# avoid automatically getting precise reference types.
+.field private static res1:Ljava/lang/Number;
+.field private static res2:Ljava/lang/ClassLoader;
+.field private static res3:Ljava/lang/Package;
+.field private static res4:Ljava/lang/RuntimeException;
+.field private static res5:Ljava/lang/Exception;
+.field private static res6:Ljava/util/ArrayList;
+.field private static res7:Ljava/util/LinkedList;
+.field private static res8:Ljava/lang/Thread;
+.field private static res9:Ljava/lang/ThreadGroup;
+.field private static res10:Ljava/lang/Runtime;
+
+.field private static unres1:La/b/c/d1;
+.field private static unres2:La/b/c/d2;
+.field private static unres3:La/b/c/d3;
+.field private static unres4:La/b/c/d4;
+.field private static unres5:La/b/c/d5;
+.field private static unres6:La/b/c/d6;
+.field private static unres7:La/b/c/d7;
+.field private static unres8:La/b/c/d8;
+.field private static unres9:La/b/c/d9;
+.field private static unres10:La/b/c/d10;
+
+.field private static unresBase0:La/b/c/dBase0;
+.field private static unresBase1:La/b/c/dBase1;
+.field private static unresBase2:La/b/c/dBase2;
+.field private static unresBase3:La/b/c/dBase3;
+.field private static unresBase4:La/b/c/dBase4;
+.field private static unresBase5:La/b/c/dBase5;
+.field private static unresBase6:La/b/c/dBase6;
+.field private static unresBase7:La/b/c/dBase7;
+.field private static unresBase8:La/b/c/dBase8;
+
+# Empty, ignore this. We want to see if the other method can be verified in a reasonable amount of
+# time.
+.method public static run()V
+.registers 2
+ return-void
+.end method
+
+.method public static foo(IZZ) V
+.registers 11
+ # v8 = int, v9 = boolean, v10 = boolean
+
+ sget-object v0, LB22881413;->unresBase0:La/b/c/dBase0;
+
+# Test an UnresolvedUninitializedReference type.
+ new-instance v0, La/b/c/dBaseInit;
+
+ const v1, 0
+ const v2, 0
+
+# We're trying to create something like this (with more loops to amplify things).
+#
+# v0 = Unresolved1
+# while (something) {
+#
+# [Repeatedly]
+# if (cond) {
+# v0 = ResolvedX;
+# } else {
+# v0 = UnresolvedX;
+# }
+#
+# v0 = Unresolved2
+# };
+#
+# Important points:
+# 1) Use a while, so that the end of the loop is a goto. That way, the merging of outer-loop
+# unresolved classes is postponed.
+# 2) Put the else cases after all if cases. That way there are backward gotos that will lead
+# to stabilization loops in the body.
+#
+
+:Loop1
+
+ const v6, 0
+ add-int/lit16 v8, v8, -1
+ if-ge v8, v6, :Loop1End
+
+:Loop2
+
+ const v6, 0
+ add-int/lit16 v8, v8, -1
+ if-ge v8, v6, :Loop2End
+
+:Loop3
+
+ const v6, 0
+ add-int/lit16 v8, v8, -1
+ if-ge v8, v6, :Loop3End
+
+:Loop4
+
+ const v6, 0
+ add-int/lit16 v8, v8, -1
+ if-ge v8, v6, :Loop4End
+
+:Loop5
+
+ const v6, 0
+ add-int/lit16 v8, v8, -1
+ if-ge v8, v6, :Loop5End
+
+:Loop6
+
+ const v6, 0
+ add-int/lit16 v8, v8, -1
+ if-ge v8, v6, :Loop6End
+
+:Loop7
+
+ const v6, 0
+ add-int/lit16 v8, v8, -1
+ if-ge v8, v6, :Loop7End
+
+:Loop8
+
+ const v6, 0
+ add-int/lit16 v8, v8, -1
+ if-ge v8, v6, :Loop8End
+
+# Prototype:
+#
+# if-eqz v9, :ElseX
+# sget-object v0, LB22881413;->res1:Ljava/lang/Number;
+#:JoinX
+#
+# And somewhere at the end
+#
+#:ElseX
+# sget-object v0, LB22881413;->unresX:La/b/c/dX;
+# goto :JoinX
+#
+#
+
+ if-eqz v10, :Join1
+ if-eqz v9, :Else1
+ sget-object v0, LB22881413;->res1:Ljava/lang/Number;
+:Join1
+
+
+ if-eqz v10, :Join2
+ if-eqz v9, :Else2
+ sget-object v0, LB22881413;->res2:Ljava/lang/ClassLoader;
+:Join2
+
+
+ if-eqz v10, :Join3
+ if-eqz v9, :Else3
+ sget-object v0, LB22881413;->res3:Ljava/lang/Package;
+:Join3
+
+
+ if-eqz v10, :Join4
+ if-eqz v9, :Else4
+ sget-object v0, LB22881413;->res4:Ljava/lang/RuntimeException;
+:Join4
+
+
+ if-eqz v10, :Join5
+ if-eqz v9, :Else5
+ sget-object v0, LB22881413;->res5:Ljava/lang/Exception;
+:Join5
+
+
+ if-eqz v10, :Join6
+ if-eqz v9, :Else6
+ sget-object v0, LB22881413;->res6:Ljava/util/ArrayList;
+:Join6
+
+
+ if-eqz v10, :Join7
+ if-eqz v9, :Else7
+ sget-object v0, LB22881413;->res7:Ljava/util/LinkedList;
+:Join7
+
+
+ if-eqz v10, :Join8
+ if-eqz v9, :Else8
+ sget-object v0, LB22881413;->res8:Ljava/lang/Thread;
+:Join8
+
+
+ if-eqz v10, :Join9
+ if-eqz v9, :Else9
+ sget-object v0, LB22881413;->res9:Ljava/lang/ThreadGroup;
+:Join9
+
+
+ if-eqz v10, :Join10
+ if-eqz v9, :Else10
+ sget-object v0, LB22881413;->res10:Ljava/lang/Runtime;
+:Join10
+
+
+ goto :InnerMostLoopEnd
+
+:Else1
+ sget-object v0, LB22881413;->unres1:La/b/c/d1;
+ goto :Join1
+
+:Else2
+ sget-object v0, LB22881413;->unres2:La/b/c/d2;
+ goto :Join2
+
+:Else3
+ sget-object v0, LB22881413;->unres3:La/b/c/d3;
+ goto :Join3
+
+:Else4
+ sget-object v0, LB22881413;->unres4:La/b/c/d4;
+ goto :Join4
+
+:Else5
+ sget-object v0, LB22881413;->unres5:La/b/c/d5;
+ goto :Join5
+
+:Else6
+ sget-object v0, LB22881413;->unres6:La/b/c/d6;
+ goto :Join6
+
+:Else7
+ sget-object v0, LB22881413;->unres7:La/b/c/d7;
+ goto :Join7
+
+:Else8
+ sget-object v0, LB22881413;->unres8:La/b/c/d8;
+ goto :Join8
+
+:Else9
+ sget-object v0, LB22881413;->unres9:La/b/c/d9;
+ goto :Join9
+
+:Else10
+ sget-object v0, LB22881413;->unres10:La/b/c/d10;
+ goto :Join10
+
+:InnerMostLoopEnd
+
+ # Loop 8 end of body.
+ sget-object v0, LB22881413;->unresBase8:La/b/c/dBase8;
+ goto :Loop8
+
+:Loop8End
+
+ # Loop 7 end of body.
+ sget-object v0, LB22881413;->unresBase7:La/b/c/dBase7;
+ goto :Loop7
+
+:Loop7End
+
+ # Loop 6 end of body.
+ sget-object v0, LB22881413;->unresBase6:La/b/c/dBase6;
+ goto :Loop6
+
+:Loop6End
+
+ # Loop 5 end of body
+ sget-object v0, LB22881413;->unresBase5:La/b/c/dBase5;
+ goto :Loop5
+
+:Loop5End
+
+ # Loop 4 end of body
+ sget-object v0, LB22881413;->unresBase4:La/b/c/dBase4;
+ goto :Loop4
+
+:Loop4End
+
+ # Loop 3 end of body
+ sget-object v0, LB22881413;->unresBase3:La/b/c/dBase3;
+ goto :Loop3
+
+:Loop3End
+
+ # Loop 2 end of body
+ sget-object v0, LB22881413;->unresBase2:La/b/c/dBase2;
+ goto :Loop2
+
+:Loop2End
+
+ # Loop 1 end of body
+ sget-object v0, LB22881413;->unresBase1:La/b/c/dBase1;
+ goto :Loop1
+
+:Loop1End
+
+ return-void
+
+.end method
diff --git a/test/800-smali/src/Main.java b/test/800-smali/src/Main.java
index 438e214..e1ac749 100644
--- a/test/800-smali/src/Main.java
+++ b/test/800-smali/src/Main.java
@@ -121,6 +121,7 @@
null, null));
testCases.add(new TestCase("b/22777307", "B22777307", "run", null, new InstantiationError(),
null));
+ testCases.add(new TestCase("b/22881413", "B22881413", "run", null, null, null));
}
public void runTests() {
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 750a29f..a1af577 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -338,6 +338,17 @@
if [ "x$INSTRUCTION_SET_FEATURES" != "x" ] ; then
dex2oat_cmdline="${dex2oat_cmdline} --instruction-set-features=${INSTRUCTION_SET_FEATURES}"
fi
+
+ # Add in a timeout. This is important for testing the compilation/verification time of
+ # pathological cases.
+ # Note: as we don't know how decent targets are (e.g., emulator), only do this on the host for
+ # now. We should try to improve this.
+ # The current value is rather arbitrary. run-tests should compile quickly.
+ if [ "$HOST" != "n" ]; then
+ # Use SIGRTMIN+2 to try to dump threads.
+ # Use -k 1m to SIGKILL it a minute later if it hasn't ended.
+ dex2oat_cmdline="timeout -k 1m -s SIGRTMIN+2 1m ${dex2oat_cmdline}"
+ fi
fi
DALVIKVM_ISA_FEATURES_ARGS=""
diff --git a/test/run-test b/test/run-test
index 3d6f073..84c818b 100755
--- a/test/run-test
+++ b/test/run-test
@@ -626,12 +626,19 @@
# on a particular DEX output, keep building them with dx for now (b/19467889).
USE_JACK="false"
- if [ "$runtime" = "art" -a "$image_suffix" = "-optimizing" -a "$target_mode" = "no" -a "$debuggable" = "no" ]; then
+ if [ "$runtime" = "art" -a "$image_suffix" = "-optimizing" -a "$debuggable" = "no" ]; then
# In no-prebuild mode, the compiler is only invoked if both dex2oat and
# patchoat are available. Disable Checker otherwise (b/22552692).
if [ "$prebuild_mode" = "yes" ] || [ "$have_patchoat" = "yes" -a "$have_dex2oat" = "yes" ]; then
run_checker="yes"
- run_args="${run_args} -Xcompiler-option --dump-cfg=$tmp_dir/$cfg_output \
+ if [ "$target_mode" = "no" ]; then
+ cfg_output_dir="$tmp_dir"
+ checker_arch_option=
+ else
+ cfg_output_dir="$DEX_LOCATION"
+ checker_arch_option="--arch=${target_arch_name^^}"
+ fi
+ run_args="${run_args} -Xcompiler-option --dump-cfg=$cfg_output_dir/$cfg_output \
-Xcompiler-option -j1"
fi
fi
@@ -647,6 +654,12 @@
build_file_size_limit=5120
run_file_size_limit=5120
fi
+if [ "$run_checker" = "yes" -a "$target_mode" = "yes" ]; then
+ # We will need to `adb pull` the .cfg output from the target onto the host to
+ # run checker on it. This file can be big.
+ build_file_size_limit=16384
+ run_file_size_limit=16384
+fi
if [ ${USE_JACK} = "false" ]; then
# Set ulimit if we build with dx only, Jack can generate big temp files.
if ! ulimit -S "$build_file_size_limit"; then
@@ -671,7 +684,10 @@
if [ "$run_exit" = "0" ]; then
if [ "$run_checker" = "yes" ]; then
- "$checker" "$cfg_output" "$tmp_dir" 2>&1
+ if [ "$target_mode" = "yes" ]; then
+ adb pull $cfg_output_dir/$cfg_output &> /dev/null
+ fi
+ "$checker" $checker_arch_option "$cfg_output" "$tmp_dir" 2>&1
checker_exit="$?"
if [ "$checker_exit" = "0" ]; then
good="yes"
@@ -693,7 +709,10 @@
echo "${test_dir}: running..." 1>&2
"./${run}" $run_args "$@" >"$output" 2>&1
if [ "$run_checker" = "yes" ]; then
- "$checker" -q "$cfg_output" "$tmp_dir" >> "$output" 2>&1
+ if [ "$target_mode" = "yes" ]; then
+ adb pull $cfg_output_dir/$cfg_output &> /dev/null
+ fi
+ "$checker" -q $checker_arch_option "$cfg_output" "$tmp_dir" >> "$output" 2>&1
fi
sed -e 's/[[:cntrl:]]$//g' < "$output" >"${td_expected}"
good="yes"
@@ -731,7 +750,10 @@
echo "run exit status: $run_exit" 1>&2
good_run="no"
elif [ "$run_checker" = "yes" ]; then
- "$checker" -q "$cfg_output" "$tmp_dir" >> "$output" 2>&1
+ if [ "$target_mode" = "yes" ]; then
+ adb pull $cfg_output_dir/$cfg_output &> /dev/null
+ fi
+ "$checker" -q $checker_arch_option "$cfg_output" "$tmp_dir" >> "$output" 2>&1
checker_exit="$?"
if [ "$checker_exit" != "0" ]; then
echo "checker exit status: $checker_exit" 1>&2
diff --git a/tools/checker/README b/tools/checker/README
index 858a773..259691e 100644
--- a/tools/checker/README
+++ b/tools/checker/README
@@ -52,3 +52,11 @@
The engine will attempt to match the check lines against the output of the
group named on the first line. Together they verify that the CFG after
constant folding returns an integer constant with value either 11 or 22.
+
+A group of check lines can be made architecture-specific by inserting '-<arch>'
+after the 'CHECK-START' keyword. The previous example can be updated to run for
+arm64 only with:
+
+ // CHECK-START-ARM64: int MyClass.MyMethod() constant_folding (after)
+ // CHECK: <<ID:i\d+>> IntConstant {{11|22}}
+ // CHECK: Return [<<ID>>]
diff --git a/tools/checker/checker.py b/tools/checker/checker.py
index 4e516de..bc5e17d 100755
--- a/tools/checker/checker.py
+++ b/tools/checker/checker.py
@@ -17,6 +17,7 @@
import argparse
import os
+from common.archs import archs_list
from common.logger import Logger
from file_format.c1visualizer.parser import ParseC1visualizerStream
from file_format.checker.parser import ParseCheckerStream
@@ -34,6 +35,8 @@
help="print a list of all passes found in the tested file")
parser.add_argument("--dump-pass", dest="dump_pass", metavar="PASS",
help="print a compiler pass dump")
+ parser.add_argument("--arch", dest="arch", choices=archs_list,
+ help="Run the tests for the specified target architecture.")
parser.add_argument("-q", "--quiet", action="store_true",
help="print only errors")
return parser.parse_args()
@@ -80,13 +83,13 @@
Logger.fail("Source path \"" + path + "\" not found")
-def RunTests(checkPrefix, checkPath, outputFilename):
+def RunTests(checkPrefix, checkPath, outputFilename, targetArch):
c1File = ParseC1visualizerStream(os.path.basename(outputFilename), open(outputFilename, "r"))
for checkFilename in FindCheckerFiles(checkPath):
checkerFile = ParseCheckerStream(os.path.basename(checkFilename),
checkPrefix,
open(checkFilename, "r"))
- MatchFiles(checkerFile, c1File)
+ MatchFiles(checkerFile, c1File, targetArch)
if __name__ == "__main__":
@@ -100,4 +103,4 @@
elif args.dump_pass:
DumpPass(args.tested_file, args.dump_pass)
else:
- RunTests(args.check_prefix, args.source_path, args.tested_file)
+ RunTests(args.check_prefix, args.source_path, args.tested_file, args.arch)
diff --git a/tools/checker/common/archs.py b/tools/checker/common/archs.py
new file mode 100644
index 0000000..84bded9
--- /dev/null
+++ b/tools/checker/common/archs.py
@@ -0,0 +1,15 @@
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+archs_list = ['ARM', 'ARM64', 'MIPS64', 'X86', 'X86_64']
diff --git a/tools/checker/file_format/c1visualizer/parser.py b/tools/checker/file_format/c1visualizer/parser.py
index 335a195..bdcde9d 100644
--- a/tools/checker/file_format/c1visualizer/parser.py
+++ b/tools/checker/file_format/c1visualizer/parser.py
@@ -27,10 +27,12 @@
def __parseC1Line(line, lineNo, state, fileName):
""" This function is invoked on each line of the output file and returns
- a pair which instructs the parser how the line should be handled. If the
+ a triplet which instructs the parser how the line should be handled. If the
line is to be included in the current group, it is returned in the first
value. If the line starts a new output group, the name of the group is
- returned in the second value.
+ returned in the second value. The third value is only here to make the
+ function prototype compatible with `SplitStream` and is always set to
+ `None` here.
"""
if state.currentState == C1ParserState.StartingCfgBlock:
# Previous line started a new 'cfg' block which means that this one must
@@ -39,16 +41,16 @@
# Extract the pass name, prepend it with the name of the method and
# return as the beginning of a new group.
state.currentState = C1ParserState.InsideCfgBlock
- return (None, state.lastMethodName + " " + line.split("\"")[1])
+ return (None, state.lastMethodName + " " + line.split("\"")[1], None)
else:
Logger.fail("Expected output group name", fileName, lineNo)
elif state.currentState == C1ParserState.InsideCfgBlock:
if line == "end_cfg":
state.currentState = C1ParserState.OutsideBlock
- return (None, None)
+ return (None, None, None)
else:
- return (line, None)
+ return (line, None, None)
elif state.currentState == C1ParserState.InsideCompilationBlock:
# Search for the method's name. Format: method "<name>"
@@ -59,7 +61,7 @@
state.lastMethodName = methodName
elif line == "end_compilation":
state.currentState = C1ParserState.OutsideBlock
- return (None, None)
+ return (None, None, None)
else:
assert state.currentState == C1ParserState.OutsideBlock
@@ -69,10 +71,10 @@
if state.lastMethodName is None:
Logger.fail("Expected method header", fileName, lineNo)
state.currentState = C1ParserState.StartingCfgBlock
- return (None, None)
+ return (None, None, None)
elif line == "begin_compilation":
state.currentState = C1ParserState.InsideCompilationBlock
- return (None, None)
+ return (None, None, None)
else:
Logger.fail("C1visualizer line not inside a group", fileName, lineNo)
@@ -82,6 +84,7 @@
fnProcessLine = lambda line, lineNo: __parseC1Line(line, lineNo, state, fileName)
fnLineOutsideChunk = lambda line, lineNo: \
Logger.fail("C1visualizer line not inside a group", fileName, lineNo)
- for passName, passLines, startLineNo in SplitStream(stream, fnProcessLine, fnLineOutsideChunk):
+ for passName, passLines, startLineNo, testArch in \
+ SplitStream(stream, fnProcessLine, fnLineOutsideChunk):
C1visualizerPass(c1File, passName, passLines, startLineNo + 1)
return c1File
diff --git a/tools/checker/file_format/checker/parser.py b/tools/checker/file_format/checker/parser.py
index f354395..001f72a 100644
--- a/tools/checker/file_format/checker/parser.py
+++ b/tools/checker/file_format/checker/parser.py
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from common.archs import archs_list
from common.logger import Logger
from file_format.common import SplitStream
from file_format.checker.struct import CheckerFile, TestCase, TestAssertion, RegexExpression
@@ -21,17 +22,18 @@
def __isCheckerLine(line):
return line.startswith("///") or line.startswith("##")
-def __extractLine(prefix, line):
+def __extractLine(prefix, line, arch = None):
""" Attempts to parse a check line. The regex searches for a comment symbol
followed by the CHECK keyword, given attribute and a colon at the very
beginning of the line. Whitespaces are ignored.
"""
rIgnoreWhitespace = r"\s*"
rCommentSymbols = [r"///", r"##"]
+ arch_specifier = r"-%s" % arch if arch is not None else r""
regexPrefix = rIgnoreWhitespace + \
r"(" + r"|".join(rCommentSymbols) + r")" + \
rIgnoreWhitespace + \
- prefix + r":"
+ prefix + arch_specifier + r":"
# The 'match' function succeeds only if the pattern is matched at the
# beginning of the line.
@@ -42,39 +44,42 @@
return None
def __processLine(line, lineNo, prefix, fileName):
- """ This function is invoked on each line of the check file and returns a pair
+ """ This function is invoked on each line of the check file and returns a triplet
which instructs the parser how the line should be handled. If the line is
to be included in the current check group, it is returned in the first
value. If the line starts a new check group, the name of the group is
- returned in the second value.
+ returned in the second value. The third value indicates whether the line
+ contained an architecture-specific suffix.
"""
if not __isCheckerLine(line):
- return None, None
+ return None, None, None
# Lines beginning with 'CHECK-START' start a new test case.
- startLine = __extractLine(prefix + "-START", line)
- if startLine is not None:
- return None, startLine
+ # We currently only consider the architecture suffix in "CHECK-START" lines.
+ for arch in [None] + archs_list:
+ startLine = __extractLine(prefix + "-START", line, arch)
+ if startLine is not None:
+ return None, startLine, arch
# Lines starting only with 'CHECK' are matched in order.
plainLine = __extractLine(prefix, line)
if plainLine is not None:
- return (plainLine, TestAssertion.Variant.InOrder, lineNo), None
+ return (plainLine, TestAssertion.Variant.InOrder, lineNo), None, None
# 'CHECK-NEXT' lines are in-order but must match the very next line.
nextLine = __extractLine(prefix + "-NEXT", line)
if nextLine is not None:
- return (nextLine, TestAssertion.Variant.NextLine, lineNo), None
+ return (nextLine, TestAssertion.Variant.NextLine, lineNo), None, None
# 'CHECK-DAG' lines are no-order assertions.
dagLine = __extractLine(prefix + "-DAG", line)
if dagLine is not None:
- return (dagLine, TestAssertion.Variant.DAG, lineNo), None
+ return (dagLine, TestAssertion.Variant.DAG, lineNo), None, None
# 'CHECK-NOT' lines are no-order negative assertions.
notLine = __extractLine(prefix + "-NOT", line)
if notLine is not None:
- return (notLine, TestAssertion.Variant.Not, lineNo), None
+ return (notLine, TestAssertion.Variant.Not, lineNo), None, None
Logger.fail("Checker assertion could not be parsed: '" + line + "'", fileName, lineNo)
@@ -146,8 +151,9 @@
fnProcessLine = lambda line, lineNo: __processLine(line, lineNo, prefix, fileName)
fnLineOutsideChunk = lambda line, lineNo: \
Logger.fail("Checker line not inside a group", fileName, lineNo)
- for caseName, caseLines, startLineNo in SplitStream(stream, fnProcessLine, fnLineOutsideChunk):
- testCase = TestCase(checkerFile, caseName, startLineNo)
+ for caseName, caseLines, startLineNo, testArch in \
+ SplitStream(stream, fnProcessLine, fnLineOutsideChunk):
+ testCase = TestCase(checkerFile, caseName, startLineNo, testArch)
for caseLine in caseLines:
ParseCheckerAssertion(testCase, caseLine[0], caseLine[1], caseLine[2])
return checkerFile
diff --git a/tools/checker/file_format/checker/struct.py b/tools/checker/file_format/checker/struct.py
index 6a54142..2b2e442 100644
--- a/tools/checker/file_format/checker/struct.py
+++ b/tools/checker/file_format/checker/struct.py
@@ -26,6 +26,9 @@
def addTestCase(self, new_test_case):
self.testCases.append(new_test_case)
+ def testCasesForArch(self, targetArch):
+ return [t for t in self.testCases if t.testArch == targetArch]
+
def __eq__(self, other):
return isinstance(other, self.__class__) \
and self.testCases == other.testCases
@@ -33,13 +36,14 @@
class TestCase(PrintableMixin):
- def __init__(self, parent, name, startLineNo):
+ def __init__(self, parent, name, startLineNo, testArch = None):
assert isinstance(parent, CheckerFile)
self.parent = parent
self.name = name
self.assertions = []
self.startLineNo = startLineNo
+ self.testArch = testArch
if not self.name:
Logger.fail("Test case does not have a name", self.fileName, self.startLineNo)
diff --git a/tools/checker/file_format/checker/test.py b/tools/checker/file_format/checker/test.py
index ff24cc1..36ed4b1 100644
--- a/tools/checker/file_format/checker/test.py
+++ b/tools/checker/file_format/checker/test.py
@@ -14,6 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from common.archs import archs_list
from common.testing import ToUnicode
from file_format.checker.parser import ParseCheckerStream
from file_format.checker.struct import CheckerFile, TestCase, TestAssertion, RegexExpression
@@ -280,3 +281,51 @@
/// CHECK-START: Example Group
/// CHECK-NEXT: bar
""")
+
+
+class CheckerParser_ArchTests(unittest.TestCase):
+
+ noarch_block = """
+ /// CHECK-START: Group
+ /// CHECK: foo
+ /// CHECK-NEXT: bar
+ /// CHECK-NOT: baz
+ /// CHECK-DAG: yoyo
+ """
+
+ arch_block = """
+ /// CHECK-START-{test_arch}: Group
+ /// CHECK: foo
+ /// CHECK-NEXT: bar
+ /// CHECK-NOT: baz
+ /// CHECK-DAG: yoyo
+ """
+
+ def test_NonArchTests(self):
+ for arch in [None] + archs_list:
+ checkerFile = ParseCheckerStream("<test-file>",
+ "CHECK",
+ io.StringIO(ToUnicode(self.noarch_block)))
+ self.assertEqual(len(checkerFile.testCases), 1)
+ self.assertEqual(len(checkerFile.testCases[0].assertions), 4)
+
+ def test_IgnoreNonTargetArch(self):
+ for targetArch in archs_list:
+ for testArch in [a for a in archs_list if a != targetArch]:
+ checkerText = self.arch_block.format(test_arch = testArch)
+ checkerFile = ParseCheckerStream("<test-file>",
+ "CHECK",
+ io.StringIO(ToUnicode(checkerText)))
+ self.assertEqual(len(checkerFile.testCases), 1)
+ self.assertEqual(len(checkerFile.testCasesForArch(testArch)), 1)
+ self.assertEqual(len(checkerFile.testCasesForArch(targetArch)), 0)
+
+ def test_Arch(self):
+ for arch in archs_list:
+ checkerText = self.arch_block.format(test_arch = arch)
+ checkerFile = ParseCheckerStream("<test-file>",
+ "CHECK",
+ io.StringIO(ToUnicode(checkerText)))
+ self.assertEqual(len(checkerFile.testCases), 1)
+ self.assertEqual(len(checkerFile.testCasesForArch(arch)), 1)
+ self.assertEqual(len(checkerFile.testCases[0].assertions), 4)
diff --git a/tools/checker/file_format/common.py b/tools/checker/file_format/common.py
index f91fdeb..4931550 100644
--- a/tools/checker/file_format/common.py
+++ b/tools/checker/file_format/common.py
@@ -18,8 +18,9 @@
Arguments:
- fnProcessLine: Called on each line with the text and line number. Must
- return a pair, name of the chunk started on this line and data extracted
- from this line (or None in both cases).
+ return a triplet, composed of the name of the chunk started on this line,
+ the data extracted, and the name of the architecture this test applies to
+ (or None to indicate that all architectures should run this test).
- fnLineOutsideChunk: Called on attempt to attach data prior to creating
a chunk.
"""
@@ -36,9 +37,11 @@
# Let the child class process the line and return information about it.
# The _processLine method can modify the content of the line (or delete it
# entirely) and specify whether it starts a new group.
- processedLine, newChunkName = fnProcessLine(line, lineNo)
+ processedLine, newChunkName, testArch = fnProcessLine(line, lineNo)
+ # Currently, only a full chunk can be specified as architecture-specific.
+ assert testArch is None or newChunkName is not None
if newChunkName is not None:
- currentChunk = (newChunkName, [], lineNo)
+ currentChunk = (newChunkName, [], lineNo, testArch)
allChunks.append(currentChunk)
if processedLine is not None:
if currentChunk is not None:
diff --git a/tools/checker/match/file.py b/tools/checker/match/file.py
index b22211a..42ca7df 100644
--- a/tools/checker/match/file.py
+++ b/tools/checker/match/file.py
@@ -150,8 +150,10 @@
matchFrom = match.scope.end + 1
variables = match.variables
-def MatchFiles(checkerFile, c1File):
+def MatchFiles(checkerFile, c1File, targetArch):
for testCase in checkerFile.testCases:
+ if testCase.testArch not in [None, targetArch]:
+ continue
# TODO: Currently does not handle multiple occurrences of the same group
# name, e.g. when a pass is run multiple times. It will always try to
# match a check group against the first output group of the same name.
diff --git a/tools/checker/run_unit_tests.py b/tools/checker/run_unit_tests.py
index 01708db..2f5b1fe 100755
--- a/tools/checker/run_unit_tests.py
+++ b/tools/checker/run_unit_tests.py
@@ -18,7 +18,8 @@
from file_format.c1visualizer.test import C1visualizerParser_Test
from file_format.checker.test import CheckerParser_PrefixTest, \
CheckerParser_RegexExpressionTest, \
- CheckerParser_FileLayoutTest
+ CheckerParser_FileLayoutTest, \
+ CheckerParser_ArchTests
from match.test import MatchLines_Test, \
MatchFiles_Test