summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/dex/quick/arm/call_arm.cc21
-rw-r--r--compiler/dex/quick/dex_file_method_inliner.cc24
-rw-r--r--compiler/dex/quick/dex_file_method_inliner.h4
-rw-r--r--compiler/optimizing/builder.cc467
-rw-r--r--compiler/optimizing/builder.h22
-rw-r--r--compiler/optimizing/code_generator_arm.cc62
-rw-r--r--compiler/optimizing/code_generator_arm.h1
-rw-r--r--compiler/optimizing/code_generator_arm64.cc44
-rw-r--r--compiler/optimizing/code_generator_arm64.h1
-rw-r--r--compiler/optimizing/code_generator_x86.cc396
-rw-r--r--compiler/optimizing/code_generator_x86.h35
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc36
-rw-r--r--compiler/optimizing/code_generator_x86_64.h1
-rw-r--r--compiler/optimizing/constant_area_fixups_x86.h37
-rw-r--r--compiler/optimizing/induction_var_analysis.cc573
-rw-r--r--compiler/optimizing/induction_var_analysis.h114
-rw-r--r--compiler/optimizing/induction_var_analysis_test.cc385
-rw-r--r--compiler/optimizing/induction_var_range.cc343
-rw-r--r--compiler/optimizing/induction_var_range.h103
-rw-r--r--compiler/optimizing/induction_var_range_test.cc341
-rw-r--r--compiler/optimizing/intrinsics.cc83
-rw-r--r--compiler/optimizing/intrinsics_arm.cc227
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc132
-rw-r--r--compiler/optimizing/intrinsics_list.h6
-rw-r--r--compiler/optimizing/intrinsics_x86.cc11
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc11
-rw-r--r--compiler/optimizing/nodes.cc92
-rw-r--r--compiler/optimizing/nodes.h583
-rw-r--r--compiler/optimizing/nodes_x86.h66
-rw-r--r--compiler/optimizing/optimizing_compiler.cc15
-rw-r--r--compiler/utils/arm/assembler_arm.h12
-rw-r--r--compiler/utils/arm/assembler_arm32.cc64
-rw-r--r--compiler/utils/arm/assembler_arm32.h6
-rw-r--r--compiler/utils/arm/assembler_arm32_test.cc4
-rw-r--r--compiler/utils/arm/assembler_thumb2.cc68
-rw-r--r--compiler/utils/arm/assembler_thumb2.h6
-rw-r--r--compiler/utils/arm/assembler_thumb2_test.cc8
-rw-r--r--compiler/utils/x86/assembler_x86.cc55
-rw-r--r--compiler/utils/x86/assembler_x86.h90
-rw-r--r--runtime/Android.mk1
-rw-r--r--runtime/art_method-inl.h5
-rw-r--r--runtime/art_method.cc13
-rw-r--r--runtime/art_method.h23
-rw-r--r--runtime/class_linker.cc183
-rw-r--r--runtime/class_linker.h29
-rw-r--r--runtime/class_table.h6
-rw-r--r--runtime/debugger.cc67
-rw-r--r--runtime/debugger.h16
-rw-r--r--runtime/dex_file.h5
-rw-r--r--runtime/dex_file_verifier.cc534
-rw-r--r--runtime/dex_file_verifier.h63
-rw-r--r--runtime/dex_file_verifier_test.cc1127
-rw-r--r--runtime/gc_root.h10
-rw-r--r--runtime/instrumentation.cc28
-rw-r--r--runtime/instrumentation.h35
-rw-r--r--runtime/instrumentation_test.cc25
-rw-r--r--runtime/interpreter/interpreter.cc8
-rw-r--r--runtime/interpreter/interpreter_common.cc5
-rw-r--r--runtime/interpreter/interpreter_common.h12
-rw-r--r--runtime/java_vm_ext.cc10
-rw-r--r--runtime/jit/jit.cc10
-rw-r--r--runtime/jit/jit.h9
-rw-r--r--runtime/jit/jit_code_cache.cc12
-rw-r--r--runtime/jit/jit_code_cache.h3
-rw-r--r--runtime/jit/jit_instrumentation.cc79
-rw-r--r--runtime/jit/jit_instrumentation.h53
-rw-r--r--runtime/jit/profiling_info.cc117
-rw-r--r--runtime/jit/profiling_info.h106
-rw-r--r--runtime/lambda/closure_builder.cc1
-rw-r--r--runtime/oat.h2
-rw-r--r--runtime/parsed_options.cc3
-rw-r--r--runtime/quick/inline_method_analyser.h3
-rw-r--r--runtime/quick_exception_handler.cc22
-rw-r--r--runtime/quick_exception_handler.h12
-rw-r--r--runtime/reflection.cc8
-rw-r--r--runtime/runtime.cc3
-rw-r--r--runtime/runtime_options.def1
-rw-r--r--runtime/thread.cc23
-rw-r--r--runtime/trace.cc9
-rw-r--r--runtime/trace.h6
-rw-r--r--runtime/verifier/method_verifier.cc118
-rw-r--r--runtime/verifier/method_verifier.h29
-rw-r--r--test/044-proxy/expected.txt1
-rw-r--r--test/044-proxy/native_proxy.cc32
-rw-r--r--test/044-proxy/src/Main.java1
-rw-r--r--test/044-proxy/src/NativeProxy.java62
-rw-r--r--test/082-inline-execute/src/Main.java140
-rw-r--r--test/800-smali/smali/b_18380491AbstractBase.smali2
-rw-r--r--test/800-smali/smali/b_18380491ConcreteClass.smali6
-rw-r--r--test/Android.libarttest.mk1
-rwxr-xr-xtest/etc/run-test-jar6
-rwxr-xr-xtools/run-jdwp-tests.sh22
-rwxr-xr-xtools/setup-buildbot-device.sh3
93 files changed, 6166 insertions, 1493 deletions
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index eb8730cf4b..868d9a43e9 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -547,27 +547,28 @@ void ArmMir2Lir::GenExitSequence() {
cfi_.RestoreMany(DwarfFpReg(0), fp_spill_mask_);
}
bool unspill_LR_to_PC = (core_spill_mask_ & (1 << rs_rARM_LR.GetRegNum())) != 0;
+ uint32_t core_unspill_mask = core_spill_mask_;
if (unspill_LR_to_PC) {
- core_spill_mask_ &= ~(1 << rs_rARM_LR.GetRegNum());
- core_spill_mask_ |= (1 << rs_rARM_PC.GetRegNum());
+ core_unspill_mask &= ~(1 << rs_rARM_LR.GetRegNum());
+ core_unspill_mask |= (1 << rs_rARM_PC.GetRegNum());
}
- if (core_spill_mask_ != 0u) {
- if ((core_spill_mask_ & ~(0xffu | (1u << rs_rARM_PC.GetRegNum()))) == 0u) {
+ if (core_unspill_mask != 0u) {
+ if ((core_unspill_mask & ~(0xffu | (1u << rs_rARM_PC.GetRegNum()))) == 0u) {
// Unspilling only low regs and/or PC, use 16-bit POP.
constexpr int pc_bit_shift = rs_rARM_PC.GetRegNum() - 8;
NewLIR1(kThumbPop,
- (core_spill_mask_ & ~(1u << rs_rARM_PC.GetRegNum())) |
- ((core_spill_mask_ & (1u << rs_rARM_PC.GetRegNum())) >> pc_bit_shift));
- } else if (IsPowerOfTwo(core_spill_mask_)) {
+ (core_unspill_mask & ~(1u << rs_rARM_PC.GetRegNum())) |
+ ((core_unspill_mask & (1u << rs_rARM_PC.GetRegNum())) >> pc_bit_shift));
+ } else if (IsPowerOfTwo(core_unspill_mask)) {
// kThumb2Pop cannot be used to unspill a single register.
- NewLIR1(kThumb2Pop1, CTZ(core_spill_mask_));
+ NewLIR1(kThumb2Pop1, CTZ(core_unspill_mask));
} else {
- NewLIR1(kThumb2Pop, core_spill_mask_);
+ NewLIR1(kThumb2Pop, core_unspill_mask);
}
// If we pop to PC, there is no further epilogue code.
if (!unspill_LR_to_PC) {
cfi_.AdjustCFAOffset(-num_core_spills_ * kArmPointerSize);
- cfi_.RestoreMany(DwarfCoreReg(0), core_spill_mask_);
+ cfi_.RestoreMany(DwarfCoreReg(0), core_unspill_mask);
DCHECK_EQ(cfi_.GetCurrentCFAOffset(), 0); // empty stack.
}
}
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index 42b792ca1a..af93aabc91 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -39,6 +39,9 @@ static constexpr bool kIntrinsicIsStatic[] = {
true, // kIntrinsicReverseBits
true, // kIntrinsicReverseBytes
true, // kIntrinsicNumberOfLeadingZeros
+ true, // kIntrinsicNumberOfTrailingZeros
+ true, // kIntrinsicRotateRight
+ true, // kIntrinsicRotateLeft
true, // kIntrinsicAbsInt
true, // kIntrinsicAbsLong
true, // kIntrinsicAbsFloat
@@ -79,6 +82,10 @@ static_assert(kIntrinsicIsStatic[kIntrinsicReverseBits], "ReverseBits must be st
static_assert(kIntrinsicIsStatic[kIntrinsicReverseBytes], "ReverseBytes must be static");
static_assert(kIntrinsicIsStatic[kIntrinsicNumberOfLeadingZeros],
"NumberOfLeadingZeros must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicNumberOfTrailingZeros],
+ "NumberOfTrailingZeros must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicRotateRight], "RotateRight must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicRotateLeft], "RotateLeft must be static");
static_assert(kIntrinsicIsStatic[kIntrinsicAbsInt], "AbsInt must be static");
static_assert(kIntrinsicIsStatic[kIntrinsicAbsLong], "AbsLong must be static");
static_assert(kIntrinsicIsStatic[kIntrinsicAbsFloat], "AbsFloat must be static");
@@ -232,6 +239,9 @@ const char* const DexFileMethodInliner::kNameCacheNames[] = {
"putOrderedObject", // kNameCachePutOrderedObject
"arraycopy", // kNameCacheArrayCopy
"numberOfLeadingZeros", // kNameCacheNumberOfLeadingZeros
+ "numberOfTrailingZeros", // kNameCacheNumberOfTrailingZeros
+ "rotateRight", // kNameCacheRotateRight
+ "rotateLeft", // kNameCacheRotateLeft
};
const DexFileMethodInliner::ProtoDef DexFileMethodInliner::kProtoCacheDefs[] = {
@@ -289,6 +299,8 @@ const DexFileMethodInliner::ProtoDef DexFileMethodInliner::kProtoCacheDefs[] = {
{ kClassCacheVoid, 2, { kClassCacheLong, kClassCacheShort } },
// kProtoCacheObject_Z
{ kClassCacheBoolean, 1, { kClassCacheJavaLangObject } },
+ // kProtoCacheJI_J
+ { kClassCacheLong, 2, { kClassCacheLong, kClassCacheInt } },
// kProtoCacheObjectJII_Z
{ kClassCacheBoolean, 4, { kClassCacheJavaLangObject, kClassCacheLong,
kClassCacheInt, kClassCacheInt } },
@@ -379,6 +391,8 @@ const DexFileMethodInliner::IntrinsicDef DexFileMethodInliner::kIntrinsicMethods
INTRINSIC(JavaLangInteger, NumberOfLeadingZeros, I_I, kIntrinsicNumberOfLeadingZeros, k32),
INTRINSIC(JavaLangLong, NumberOfLeadingZeros, J_I, kIntrinsicNumberOfLeadingZeros, k64),
+ INTRINSIC(JavaLangInteger, NumberOfTrailingZeros, I_I, kIntrinsicNumberOfTrailingZeros, k32),
+ INTRINSIC(JavaLangLong, NumberOfTrailingZeros, J_I, kIntrinsicNumberOfTrailingZeros, k64),
INTRINSIC(JavaLangMath, Abs, I_I, kIntrinsicAbsInt, 0),
INTRINSIC(JavaLangStrictMath, Abs, I_I, kIntrinsicAbsInt, 0),
@@ -468,6 +482,11 @@ const DexFileMethodInliner::IntrinsicDef DexFileMethodInliner::kIntrinsicMethods
INTRINSIC(JavaLangSystem, ArrayCopy, CharArrayICharArrayII_V , kIntrinsicSystemArrayCopyCharArray,
0),
+ INTRINSIC(JavaLangInteger, RotateRight, II_I, kIntrinsicRotateRight, k32),
+ INTRINSIC(JavaLangLong, RotateRight, JI_J, kIntrinsicRotateRight, k64),
+ INTRINSIC(JavaLangInteger, RotateLeft, II_I, kIntrinsicRotateLeft, k32),
+ INTRINSIC(JavaLangLong, RotateLeft, JI_J, kIntrinsicRotateLeft, k64),
+
#undef INTRINSIC
#define SPECIAL(c, n, p, o, d) \
@@ -631,7 +650,10 @@ bool DexFileMethodInliner::GenIntrinsic(Mir2Lir* backend, CallInfo* info) {
case kIntrinsicSystemArrayCopyCharArray:
return backend->GenInlinedArrayCopyCharArray(info);
case kIntrinsicNumberOfLeadingZeros:
- return false; // not implemented in quick
+ case kIntrinsicNumberOfTrailingZeros:
+ case kIntrinsicRotateRight:
+ case kIntrinsicRotateLeft:
+ return false; // not implemented in quick.
default:
LOG(FATAL) << "Unexpected intrinsic opcode: " << intrinsic.opcode;
return false; // avoid warning "control reaches end of non-void function"
diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h
index d6c8bfbdb6..8458806e5e 100644
--- a/compiler/dex/quick/dex_file_method_inliner.h
+++ b/compiler/dex/quick/dex_file_method_inliner.h
@@ -208,6 +208,9 @@ class DexFileMethodInliner {
kNameCachePutOrderedObject,
kNameCacheArrayCopy,
kNameCacheNumberOfLeadingZeros,
+ kNameCacheNumberOfTrailingZeros,
+ kNameCacheRotateRight,
+ kNameCacheRotateLeft,
kNameCacheLast
};
@@ -245,6 +248,7 @@ class DexFileMethodInliner {
kProtoCacheJJ_V,
kProtoCacheJS_V,
kProtoCacheObject_Z,
+ kProtoCacheJI_J,
kProtoCacheObjectJII_Z,
kProtoCacheObjectJJJ_Z,
kProtoCacheObjectJObjectObject_Z,
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 1650fd1ced..7a3aa58149 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -46,7 +46,7 @@ class Temporaries : public ValueObject {
explicit Temporaries(HGraph* graph) : graph_(graph), index_(0) {}
void Add(HInstruction* instruction) {
- HInstruction* temp = new (graph_->GetArena()) HTemporary(index_);
+ HInstruction* temp = new (graph_->GetArena()) HTemporary(index_, instruction->GetDexPc());
instruction->GetBlock()->AddInstruction(temp);
DCHECK(temp->GetPrevious() == instruction);
@@ -161,23 +161,25 @@ void HGraphBuilder::InitializeParameters(uint16_t number_of_parameters) {
if (!dex_compilation_unit_->IsStatic()) {
// Add the implicit 'this' argument, not expressed in the signature.
- HParameterValue* parameter =
- new (arena_) HParameterValue(parameter_index++, Primitive::kPrimNot, true);
+ HParameterValue* parameter = new (arena_) HParameterValue(parameter_index++,
+ Primitive::kPrimNot,
+ true);
entry_block_->AddInstruction(parameter);
HLocal* local = GetLocalAt(locals_index++);
- entry_block_->AddInstruction(new (arena_) HStoreLocal(local, parameter));
+ entry_block_->AddInstruction(new (arena_) HStoreLocal(local, parameter, local->GetDexPc()));
number_of_parameters--;
}
uint32_t pos = 1;
for (int i = 0; i < number_of_parameters; i++) {
- HParameterValue* parameter =
- new (arena_) HParameterValue(parameter_index++, Primitive::GetType(shorty[pos++]));
+ HParameterValue* parameter = new (arena_) HParameterValue(parameter_index++,
+ Primitive::GetType(shorty[pos++]),
+ false);
entry_block_->AddInstruction(parameter);
HLocal* local = GetLocalAt(locals_index++);
// Store the parameter value in the local that the dex code will use
// to reference that parameter.
- entry_block_->AddInstruction(new (arena_) HStoreLocal(local, parameter));
+ entry_block_->AddInstruction(new (arena_) HStoreLocal(local, parameter, local->GetDexPc()));
bool is_wide = (parameter->GetType() == Primitive::kPrimLong)
|| (parameter->GetType() == Primitive::kPrimDouble);
if (is_wide) {
@@ -196,11 +198,11 @@ void HGraphBuilder::If_22t(const Instruction& instruction, uint32_t dex_pc) {
DCHECK(branch_target != nullptr);
DCHECK(fallthrough_target != nullptr);
PotentiallyAddSuspendCheck(branch_target, dex_pc);
- HInstruction* first = LoadLocal(instruction.VRegA(), Primitive::kPrimInt);
- HInstruction* second = LoadLocal(instruction.VRegB(), Primitive::kPrimInt);
- T* comparison = new (arena_) T(first, second);
+ HInstruction* first = LoadLocal(instruction.VRegA(), Primitive::kPrimInt, dex_pc);
+ HInstruction* second = LoadLocal(instruction.VRegB(), Primitive::kPrimInt, dex_pc);
+ T* comparison = new (arena_) T(first, second, dex_pc);
current_block_->AddInstruction(comparison);
- HInstruction* ifinst = new (arena_) HIf(comparison);
+ HInstruction* ifinst = new (arena_) HIf(comparison, dex_pc);
current_block_->AddInstruction(ifinst);
current_block_->AddSuccessor(branch_target);
current_block_->AddSuccessor(fallthrough_target);
@@ -215,10 +217,10 @@ void HGraphBuilder::If_21t(const Instruction& instruction, uint32_t dex_pc) {
DCHECK(branch_target != nullptr);
DCHECK(fallthrough_target != nullptr);
PotentiallyAddSuspendCheck(branch_target, dex_pc);
- HInstruction* value = LoadLocal(instruction.VRegA(), Primitive::kPrimInt);
- T* comparison = new (arena_) T(value, graph_->GetIntConstant(0));
+ HInstruction* value = LoadLocal(instruction.VRegA(), Primitive::kPrimInt, dex_pc);
+ T* comparison = new (arena_) T(value, graph_->GetIntConstant(0, dex_pc), dex_pc);
current_block_->AddInstruction(comparison);
- HInstruction* ifinst = new (arena_) HIf(comparison);
+ HInstruction* ifinst = new (arena_) HIf(comparison, dex_pc);
current_block_->AddInstruction(ifinst);
current_block_->AddSuccessor(branch_target);
current_block_->AddSuccessor(fallthrough_target);
@@ -320,7 +322,7 @@ void HGraphBuilder::SplitTryBoundaryEdge(HBasicBlock* predecessor,
const DexFile::CodeItem& code_item,
const DexFile::TryItem& try_item) {
// Split the edge with a single TryBoundary instruction.
- HTryBoundary* try_boundary = new (arena_) HTryBoundary(kind);
+ HTryBoundary* try_boundary = new (arena_) HTryBoundary(kind, successor->GetDexPc());
HBasicBlock* try_entry_block = graph_->SplitEdge(predecessor, successor);
try_entry_block->AddInstruction(try_boundary);
@@ -538,7 +540,7 @@ void HGraphBuilder::MaybeUpdateCurrentBlock(size_t dex_pc) {
// Branching instructions clear current_block, so we know
// the last instruction of the current block is not a branching
// instruction. We add an unconditional goto to the found block.
- current_block_->AddInstruction(new (arena_) HGoto());
+ current_block_->AddInstruction(new (arena_) HGoto(dex_pc));
current_block_->AddSuccessor(block);
}
graph_->AddBlock(block);
@@ -634,104 +636,92 @@ HBasicBlock* HGraphBuilder::FindOrCreateBlockStartingAt(int32_t dex_pc) {
}
template<typename T>
-void HGraphBuilder::Unop_12x(const Instruction& instruction, Primitive::Type type) {
- HInstruction* first = LoadLocal(instruction.VRegB(), type);
- current_block_->AddInstruction(new (arena_) T(type, first));
- UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
+void HGraphBuilder::Unop_12x(const Instruction& instruction,
+ Primitive::Type type,
+ uint32_t dex_pc) {
+ HInstruction* first = LoadLocal(instruction.VRegB(), type, dex_pc);
+ current_block_->AddInstruction(new (arena_) T(type, first, dex_pc));
+ UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction(), dex_pc);
}
void HGraphBuilder::Conversion_12x(const Instruction& instruction,
Primitive::Type input_type,
Primitive::Type result_type,
uint32_t dex_pc) {
- HInstruction* first = LoadLocal(instruction.VRegB(), input_type);
+ HInstruction* first = LoadLocal(instruction.VRegB(), input_type, dex_pc);
current_block_->AddInstruction(new (arena_) HTypeConversion(result_type, first, dex_pc));
- UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
-}
-
-template<typename T>
-void HGraphBuilder::Binop_23x(const Instruction& instruction, Primitive::Type type) {
- HInstruction* first = LoadLocal(instruction.VRegB(), type);
- HInstruction* second = LoadLocal(instruction.VRegC(), type);
- current_block_->AddInstruction(new (arena_) T(type, first, second));
- UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
+ UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction(), dex_pc);
}
template<typename T>
void HGraphBuilder::Binop_23x(const Instruction& instruction,
Primitive::Type type,
uint32_t dex_pc) {
- HInstruction* first = LoadLocal(instruction.VRegB(), type);
- HInstruction* second = LoadLocal(instruction.VRegC(), type);
+ HInstruction* first = LoadLocal(instruction.VRegB(), type, dex_pc);
+ HInstruction* second = LoadLocal(instruction.VRegC(), type, dex_pc);
current_block_->AddInstruction(new (arena_) T(type, first, second, dex_pc));
- UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
+ UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction(), dex_pc);
}
template<typename T>
void HGraphBuilder::Binop_23x_shift(const Instruction& instruction,
- Primitive::Type type) {
- HInstruction* first = LoadLocal(instruction.VRegB(), type);
- HInstruction* second = LoadLocal(instruction.VRegC(), Primitive::kPrimInt);
- current_block_->AddInstruction(new (arena_) T(type, first, second));
- UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
+ Primitive::Type type,
+ uint32_t dex_pc) {
+ HInstruction* first = LoadLocal(instruction.VRegB(), type, dex_pc);
+ HInstruction* second = LoadLocal(instruction.VRegC(), Primitive::kPrimInt, dex_pc);
+ current_block_->AddInstruction(new (arena_) T(type, first, second, dex_pc));
+ UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction(), dex_pc);
}
void HGraphBuilder::Binop_23x_cmp(const Instruction& instruction,
Primitive::Type type,
ComparisonBias bias,
uint32_t dex_pc) {
- HInstruction* first = LoadLocal(instruction.VRegB(), type);
- HInstruction* second = LoadLocal(instruction.VRegC(), type);
+ HInstruction* first = LoadLocal(instruction.VRegB(), type, dex_pc);
+ HInstruction* second = LoadLocal(instruction.VRegC(), type, dex_pc);
current_block_->AddInstruction(new (arena_) HCompare(type, first, second, bias, dex_pc));
- UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
+ UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction(), dex_pc);
}
template<typename T>
-void HGraphBuilder::Binop_12x(const Instruction& instruction, Primitive::Type type) {
- HInstruction* first = LoadLocal(instruction.VRegA(), type);
- HInstruction* second = LoadLocal(instruction.VRegB(), type);
- current_block_->AddInstruction(new (arena_) T(type, first, second));
- UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
-}
-
-template<typename T>
-void HGraphBuilder::Binop_12x_shift(const Instruction& instruction, Primitive::Type type) {
- HInstruction* first = LoadLocal(instruction.VRegA(), type);
- HInstruction* second = LoadLocal(instruction.VRegB(), Primitive::kPrimInt);
- current_block_->AddInstruction(new (arena_) T(type, first, second));
- UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
+void HGraphBuilder::Binop_12x_shift(const Instruction& instruction, Primitive::Type type,
+ uint32_t dex_pc) {
+ HInstruction* first = LoadLocal(instruction.VRegA(), type, dex_pc);
+ HInstruction* second = LoadLocal(instruction.VRegB(), Primitive::kPrimInt, dex_pc);
+ current_block_->AddInstruction(new (arena_) T(type, first, second, dex_pc));
+ UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction(), dex_pc);
}
template<typename T>
void HGraphBuilder::Binop_12x(const Instruction& instruction,
Primitive::Type type,
uint32_t dex_pc) {
- HInstruction* first = LoadLocal(instruction.VRegA(), type);
- HInstruction* second = LoadLocal(instruction.VRegB(), type);
+ HInstruction* first = LoadLocal(instruction.VRegA(), type, dex_pc);
+ HInstruction* second = LoadLocal(instruction.VRegB(), type, dex_pc);
current_block_->AddInstruction(new (arena_) T(type, first, second, dex_pc));
- UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
+ UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction(), dex_pc);
}
template<typename T>
-void HGraphBuilder::Binop_22s(const Instruction& instruction, bool reverse) {
- HInstruction* first = LoadLocal(instruction.VRegB(), Primitive::kPrimInt);
- HInstruction* second = graph_->GetIntConstant(instruction.VRegC_22s());
+void HGraphBuilder::Binop_22s(const Instruction& instruction, bool reverse, uint32_t dex_pc) {
+ HInstruction* first = LoadLocal(instruction.VRegB(), Primitive::kPrimInt, dex_pc);
+ HInstruction* second = graph_->GetIntConstant(instruction.VRegC_22s(), dex_pc);
if (reverse) {
std::swap(first, second);
}
- current_block_->AddInstruction(new (arena_) T(Primitive::kPrimInt, first, second));
- UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
+ current_block_->AddInstruction(new (arena_) T(Primitive::kPrimInt, first, second, dex_pc));
+ UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction(), dex_pc);
}
template<typename T>
-void HGraphBuilder::Binop_22b(const Instruction& instruction, bool reverse) {
- HInstruction* first = LoadLocal(instruction.VRegB(), Primitive::kPrimInt);
- HInstruction* second = graph_->GetIntConstant(instruction.VRegC_22b());
+void HGraphBuilder::Binop_22b(const Instruction& instruction, bool reverse, uint32_t dex_pc) {
+ HInstruction* first = LoadLocal(instruction.VRegB(), Primitive::kPrimInt, dex_pc);
+ HInstruction* second = graph_->GetIntConstant(instruction.VRegC_22b(), dex_pc);
if (reverse) {
std::swap(first, second);
}
- current_block_->AddInstruction(new (arena_) T(Primitive::kPrimInt, first, second));
- UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
+ current_block_->AddInstruction(new (arena_) T(Primitive::kPrimInt, first, second, dex_pc));
+ UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction(), dex_pc);
}
static bool RequiresConstructorBarrier(const DexCompilationUnit* cu, const CompilerDriver& driver) {
@@ -740,7 +730,9 @@ static bool RequiresConstructorBarrier(const DexCompilationUnit* cu, const Compi
&& driver.RequiresConstructorBarrier(self, cu->GetDexFile(), cu->GetClassDefIndex());
}
-void HGraphBuilder::BuildReturn(const Instruction& instruction, Primitive::Type type) {
+void HGraphBuilder::BuildReturn(const Instruction& instruction,
+ Primitive::Type type,
+ uint32_t dex_pc) {
if (type == Primitive::kPrimVoid) {
if (graph_->ShouldGenerateConstructorBarrier()) {
// The compilation unit is null during testing.
@@ -748,12 +740,12 @@ void HGraphBuilder::BuildReturn(const Instruction& instruction, Primitive::Type
DCHECK(RequiresConstructorBarrier(dex_compilation_unit_, *compiler_driver_))
<< "Inconsistent use of ShouldGenerateConstructorBarrier. Should not generate a barrier.";
}
- current_block_->AddInstruction(new (arena_) HMemoryBarrier(kStoreStore));
+ current_block_->AddInstruction(new (arena_) HMemoryBarrier(kStoreStore, dex_pc));
}
- current_block_->AddInstruction(new (arena_) HReturnVoid());
+ current_block_->AddInstruction(new (arena_) HReturnVoid(dex_pc));
} else {
- HInstruction* value = LoadLocal(instruction.VRegA(), type);
- current_block_->AddInstruction(new (arena_) HReturn(value));
+ HInstruction* value = LoadLocal(instruction.VRegA(), type, dex_pc);
+ current_block_->AddInstruction(new (arena_) HReturn(value, dex_pc));
}
current_block_->AddSuccessor(exit_block_);
current_block_ = nullptr;
@@ -1050,6 +1042,7 @@ bool HGraphBuilder::SetupArgumentsAndAddInvoke(HInvoke* invoke,
size_t start_index = 0;
size_t argument_index = 0;
uint32_t descriptor_index = 1; // Skip the return type.
+ uint32_t dex_pc = invoke->GetDexPc();
bool is_instance_call = invoke->GetOriginalInvokeType() != InvokeType::kStatic;
bool is_string_init = invoke->IsInvokeStaticOrDirect()
@@ -1060,7 +1053,7 @@ bool HGraphBuilder::SetupArgumentsAndAddInvoke(HInvoke* invoke,
argument_index = 0;
} else if (is_instance_call) {
Temporaries temps(graph_);
- HInstruction* arg = LoadLocal(is_range ? register_index : args[0], Primitive::kPrimNot);
+ HInstruction* arg = LoadLocal(is_range ? register_index : args[0], Primitive::kPrimNot, dex_pc);
HNullCheck* null_check = new (arena_) HNullCheck(arg, invoke->GetDexPc());
current_block_->AddInstruction(null_check);
temps.Add(null_check);
@@ -1089,7 +1082,7 @@ bool HGraphBuilder::SetupArgumentsAndAddInvoke(HInvoke* invoke,
MaybeRecordStat(MethodCompilationStat::kNotCompiledMalformedOpcode);
return false;
}
- HInstruction* arg = LoadLocal(is_range ? register_index + i : args[i], type);
+ HInstruction* arg = LoadLocal(is_range ? register_index + i : args[i], type, dex_pc);
invoke->SetArgumentAt(argument_index, arg);
if (is_wide) {
i++;
@@ -1122,7 +1115,7 @@ bool HGraphBuilder::SetupArgumentsAndAddInvoke(HInvoke* invoke,
// Add move-result for StringFactory method.
if (is_string_init) {
uint32_t orig_this_reg = is_range ? register_index : args[0];
- HInstruction* fake_string = LoadLocal(orig_this_reg, Primitive::kPrimNot);
+ HInstruction* fake_string = LoadLocal(orig_this_reg, Primitive::kPrimNot, dex_pc);
invoke->SetArgumentAt(argument_index, fake_string);
current_block_->AddInstruction(invoke);
PotentiallySimplifyFakeString(orig_this_reg, invoke->GetDexPc(), invoke);
@@ -1148,15 +1141,15 @@ void HGraphBuilder::PotentiallySimplifyFakeString(uint16_t original_dex_register
const VerifiedMethod* verified_method =
compiler_driver_->GetVerifiedMethod(dex_file_, dex_compilation_unit_->GetDexMethodIndex());
if (verified_method != nullptr) {
- UpdateLocal(original_dex_register, actual_string);
+ UpdateLocal(original_dex_register, actual_string, dex_pc);
const SafeMap<uint32_t, std::set<uint32_t>>& string_init_map =
verified_method->GetStringInitPcRegMap();
auto map_it = string_init_map.find(dex_pc);
if (map_it != string_init_map.end()) {
std::set<uint32_t> reg_set = map_it->second;
for (auto set_it = reg_set.begin(); set_it != reg_set.end(); ++set_it) {
- HInstruction* load_local = LoadLocal(original_dex_register, Primitive::kPrimNot);
- UpdateLocal(*set_it, load_local);
+ HInstruction* load_local = LoadLocal(original_dex_register, Primitive::kPrimNot, dex_pc);
+ UpdateLocal(*set_it, load_local, dex_pc);
}
}
} else {
@@ -1190,14 +1183,14 @@ bool HGraphBuilder::BuildInstanceFieldAccess(const Instruction& instruction,
Primitive::Type field_type = resolved_field->GetTypeAsPrimitiveType();
- HInstruction* object = LoadLocal(obj_reg, Primitive::kPrimNot);
+ HInstruction* object = LoadLocal(obj_reg, Primitive::kPrimNot, dex_pc);
current_block_->AddInstruction(new (arena_) HNullCheck(object, dex_pc));
if (is_put) {
Temporaries temps(graph_);
HInstruction* null_check = current_block_->GetLastInstruction();
// We need one temporary for the null check.
temps.Add(null_check);
- HInstruction* value = LoadLocal(source_or_dest_reg, field_type);
+ HInstruction* value = LoadLocal(source_or_dest_reg, field_type, dex_pc);
current_block_->AddInstruction(new (arena_) HInstanceFieldSet(
null_check,
value,
@@ -1206,7 +1199,8 @@ bool HGraphBuilder::BuildInstanceFieldAccess(const Instruction& instruction,
resolved_field->IsVolatile(),
field_index,
*dex_file_,
- dex_compilation_unit_->GetDexCache()));
+ dex_compilation_unit_->GetDexCache(),
+ dex_pc));
} else {
current_block_->AddInstruction(new (arena_) HInstanceFieldGet(
current_block_->GetLastInstruction(),
@@ -1215,9 +1209,10 @@ bool HGraphBuilder::BuildInstanceFieldAccess(const Instruction& instruction,
resolved_field->IsVolatile(),
field_index,
*dex_file_,
- dex_compilation_unit_->GetDexCache()));
+ dex_compilation_unit_->GetDexCache(),
+ dex_pc));
- UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction());
+ UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction(), dex_pc);
}
return true;
}
@@ -1328,7 +1323,7 @@ bool HGraphBuilder::BuildStaticFieldAccess(const Instruction& instruction,
// We need to keep the class alive before loading the value.
Temporaries temps(graph_);
temps.Add(cls);
- HInstruction* value = LoadLocal(source_or_dest_reg, field_type);
+ HInstruction* value = LoadLocal(source_or_dest_reg, field_type, dex_pc);
DCHECK_EQ(value->GetType(), field_type);
current_block_->AddInstruction(new (arena_) HStaticFieldSet(cls,
value,
@@ -1337,7 +1332,8 @@ bool HGraphBuilder::BuildStaticFieldAccess(const Instruction& instruction,
resolved_field->IsVolatile(),
field_index,
*dex_file_,
- dex_cache_));
+ dex_cache_,
+ dex_pc));
} else {
current_block_->AddInstruction(new (arena_) HStaticFieldGet(cls,
field_type,
@@ -1345,8 +1341,9 @@ bool HGraphBuilder::BuildStaticFieldAccess(const Instruction& instruction,
resolved_field->IsVolatile(),
field_index,
*dex_file_,
- dex_cache_));
- UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction());
+ dex_cache_,
+ dex_pc));
+ UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction(), dex_pc);
}
return true;
}
@@ -1360,16 +1357,16 @@ void HGraphBuilder::BuildCheckedDivRem(uint16_t out_vreg,
bool isDiv) {
DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong);
- HInstruction* first = LoadLocal(first_vreg, type);
+ HInstruction* first = LoadLocal(first_vreg, type, dex_pc);
HInstruction* second = nullptr;
if (second_is_constant) {
if (type == Primitive::kPrimInt) {
- second = graph_->GetIntConstant(second_vreg_or_constant);
+ second = graph_->GetIntConstant(second_vreg_or_constant, dex_pc);
} else {
- second = graph_->GetLongConstant(second_vreg_or_constant);
+ second = graph_->GetLongConstant(second_vreg_or_constant, dex_pc);
}
} else {
- second = LoadLocal(second_vreg_or_constant, type);
+ second = LoadLocal(second_vreg_or_constant, type, dex_pc);
}
if (!second_is_constant
@@ -1386,7 +1383,7 @@ void HGraphBuilder::BuildCheckedDivRem(uint16_t out_vreg,
} else {
current_block_->AddInstruction(new (arena_) HRem(type, first, second, dex_pc));
}
- UpdateLocal(out_vreg, current_block_->GetLastInstruction());
+ UpdateLocal(out_vreg, current_block_->GetLastInstruction(), dex_pc);
}
void HGraphBuilder::BuildArrayAccess(const Instruction& instruction,
@@ -1400,26 +1397,26 @@ void HGraphBuilder::BuildArrayAccess(const Instruction& instruction,
// We need one temporary for the null check, one for the index, and one for the length.
Temporaries temps(graph_);
- HInstruction* object = LoadLocal(array_reg, Primitive::kPrimNot);
+ HInstruction* object = LoadLocal(array_reg, Primitive::kPrimNot, dex_pc);
object = new (arena_) HNullCheck(object, dex_pc);
current_block_->AddInstruction(object);
temps.Add(object);
- HInstruction* length = new (arena_) HArrayLength(object);
+ HInstruction* length = new (arena_) HArrayLength(object, dex_pc);
current_block_->AddInstruction(length);
temps.Add(length);
- HInstruction* index = LoadLocal(index_reg, Primitive::kPrimInt);
+ HInstruction* index = LoadLocal(index_reg, Primitive::kPrimInt, dex_pc);
index = new (arena_) HBoundsCheck(index, length, dex_pc);
current_block_->AddInstruction(index);
temps.Add(index);
if (is_put) {
- HInstruction* value = LoadLocal(source_or_dest_reg, anticipated_type);
+ HInstruction* value = LoadLocal(source_or_dest_reg, anticipated_type, dex_pc);
// TODO: Insert a type check node if the type is Object.
current_block_->AddInstruction(new (arena_) HArraySet(
object, index, value, anticipated_type, dex_pc));
} else {
- current_block_->AddInstruction(new (arena_) HArrayGet(object, index, anticipated_type));
- UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction());
+ current_block_->AddInstruction(new (arena_) HArrayGet(object, index, anticipated_type, dex_pc));
+ UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction(), dex_pc);
}
graph_->SetHasBoundsChecks(true);
}
@@ -1430,7 +1427,7 @@ void HGraphBuilder::BuildFilledNewArray(uint32_t dex_pc,
bool is_range,
uint32_t* args,
uint32_t register_index) {
- HInstruction* length = graph_->GetIntConstant(number_of_vreg_arguments);
+ HInstruction* length = graph_->GetIntConstant(number_of_vreg_arguments, dex_pc);
QuickEntrypointEnum entrypoint = NeedsAccessCheck(type_index)
? kQuickAllocArrayWithAccessCheck
: kQuickAllocArray;
@@ -1454,8 +1451,8 @@ void HGraphBuilder::BuildFilledNewArray(uint32_t dex_pc,
Temporaries temps(graph_);
temps.Add(object);
for (size_t i = 0; i < number_of_vreg_arguments; ++i) {
- HInstruction* value = LoadLocal(is_range ? register_index + i : args[i], type);
- HInstruction* index = graph_->GetIntConstant(i);
+ HInstruction* value = LoadLocal(is_range ? register_index + i : args[i], type, dex_pc);
+ HInstruction* index = graph_->GetIntConstant(i, dex_pc);
current_block_->AddInstruction(
new (arena_) HArraySet(object, index, value, type, dex_pc));
}
@@ -1469,8 +1466,8 @@ void HGraphBuilder::BuildFillArrayData(HInstruction* object,
Primitive::Type anticipated_type,
uint32_t dex_pc) {
for (uint32_t i = 0; i < element_count; ++i) {
- HInstruction* index = graph_->GetIntConstant(i);
- HInstruction* value = graph_->GetIntConstant(data[i]);
+ HInstruction* index = graph_->GetIntConstant(i, dex_pc);
+ HInstruction* value = graph_->GetIntConstant(data[i], dex_pc);
current_block_->AddInstruction(new (arena_) HArraySet(
object, index, value, anticipated_type, dex_pc));
}
@@ -1478,12 +1475,12 @@ void HGraphBuilder::BuildFillArrayData(HInstruction* object,
void HGraphBuilder::BuildFillArrayData(const Instruction& instruction, uint32_t dex_pc) {
Temporaries temps(graph_);
- HInstruction* array = LoadLocal(instruction.VRegA_31t(), Primitive::kPrimNot);
+ HInstruction* array = LoadLocal(instruction.VRegA_31t(), Primitive::kPrimNot, dex_pc);
HNullCheck* null_check = new (arena_) HNullCheck(array, dex_pc);
current_block_->AddInstruction(null_check);
temps.Add(null_check);
- HInstruction* length = new (arena_) HArrayLength(null_check);
+ HInstruction* length = new (arena_) HArrayLength(null_check, dex_pc);
current_block_->AddInstruction(length);
int32_t payload_offset = instruction.VRegB_31t() + dex_pc;
@@ -1494,7 +1491,7 @@ void HGraphBuilder::BuildFillArrayData(const Instruction& instruction, uint32_t
// Implementation of this DEX instruction seems to be that the bounds check is
// done before doing any stores.
- HInstruction* last_index = graph_->GetIntConstant(payload->element_count - 1);
+ HInstruction* last_index = graph_->GetIntConstant(payload->element_count - 1, dex_pc);
current_block_->AddInstruction(new (arena_) HBoundsCheck(last_index, length, dex_pc));
switch (payload->element_width) {
@@ -1536,8 +1533,8 @@ void HGraphBuilder::BuildFillWideArrayData(HInstruction* object,
uint32_t element_count,
uint32_t dex_pc) {
for (uint32_t i = 0; i < element_count; ++i) {
- HInstruction* index = graph_->GetIntConstant(i);
- HInstruction* value = graph_->GetLongConstant(data[i]);
+ HInstruction* index = graph_->GetIntConstant(i, dex_pc);
+ HInstruction* value = graph_->GetLongConstant(data[i], dex_pc);
current_block_->AddInstruction(new (arena_) HArraySet(
object, index, value, Primitive::kPrimLong, dex_pc));
}
@@ -1562,7 +1559,7 @@ bool HGraphBuilder::BuildTypeCheck(const Instruction& instruction,
MaybeRecordStat(MethodCompilationStat::kNotCompiledCantAccesType);
return false;
}
- HInstruction* object = LoadLocal(reference, Primitive::kPrimNot);
+ HInstruction* object = LoadLocal(reference, Primitive::kPrimNot, dex_pc);
HLoadClass* cls = new (arena_) HLoadClass(
graph_->GetCurrentMethod(),
type_index,
@@ -1576,7 +1573,7 @@ bool HGraphBuilder::BuildTypeCheck(const Instruction& instruction,
if (instruction.Opcode() == Instruction::INSTANCE_OF) {
current_block_->AddInstruction(
new (arena_) HInstanceOf(object, cls, type_known_final, dex_pc));
- UpdateLocal(destination, current_block_->GetLastInstruction());
+ UpdateLocal(destination, current_block_->GetLastInstruction(), dex_pc);
} else {
DCHECK_EQ(instruction.Opcode(), Instruction::CHECK_CAST);
current_block_->AddInstruction(
@@ -1598,7 +1595,7 @@ void HGraphBuilder::BuildPackedSwitch(const Instruction& instruction, uint32_t d
SwitchTable table(instruction, dex_pc, false);
// Value to test against.
- HInstruction* value = LoadLocal(instruction.VRegA(), Primitive::kPrimInt);
+ HInstruction* value = LoadLocal(instruction.VRegA(), Primitive::kPrimInt, dex_pc);
// Retrieve number of entries.
uint16_t num_entries = table.GetNumEntries();
@@ -1623,7 +1620,7 @@ void HGraphBuilder::BuildSparseSwitch(const Instruction& instruction, uint32_t d
SwitchTable table(instruction, dex_pc, true);
// Value to test against.
- HInstruction* value = LoadLocal(instruction.VRegA(), Primitive::kPrimInt);
+ HInstruction* value = LoadLocal(instruction.VRegA(), Primitive::kPrimInt, dex_pc);
uint16_t num_entries = table.GetNumEntries();
@@ -1642,12 +1639,12 @@ void HGraphBuilder::BuildSwitchCaseHelper(const Instruction& instruction, size_t
PotentiallyAddSuspendCheck(case_target, dex_pc);
// The current case's value.
- HInstruction* this_case_value = graph_->GetIntConstant(case_value_int);
+ HInstruction* this_case_value = graph_->GetIntConstant(case_value_int, dex_pc);
// Compare value and this_case_value.
- HEqual* comparison = new (arena_) HEqual(value, this_case_value);
+ HEqual* comparison = new (arena_) HEqual(value, this_case_value, dex_pc);
current_block_->AddInstruction(comparison);
- HInstruction* ifinst = new (arena_) HIf(comparison);
+ HInstruction* ifinst = new (arena_) HIf(comparison, dex_pc);
current_block_->AddInstruction(ifinst);
// Case hit: use the target offset to determine where to go.
@@ -1711,29 +1708,29 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
switch (instruction.Opcode()) {
case Instruction::CONST_4: {
int32_t register_index = instruction.VRegA();
- HIntConstant* constant = graph_->GetIntConstant(instruction.VRegB_11n());
- UpdateLocal(register_index, constant);
+ HIntConstant* constant = graph_->GetIntConstant(instruction.VRegB_11n(), dex_pc);
+ UpdateLocal(register_index, constant, dex_pc);
break;
}
case Instruction::CONST_16: {
int32_t register_index = instruction.VRegA();
- HIntConstant* constant = graph_->GetIntConstant(instruction.VRegB_21s());
- UpdateLocal(register_index, constant);
+ HIntConstant* constant = graph_->GetIntConstant(instruction.VRegB_21s(), dex_pc);
+ UpdateLocal(register_index, constant, dex_pc);
break;
}
case Instruction::CONST: {
int32_t register_index = instruction.VRegA();
- HIntConstant* constant = graph_->GetIntConstant(instruction.VRegB_31i());
- UpdateLocal(register_index, constant);
+ HIntConstant* constant = graph_->GetIntConstant(instruction.VRegB_31i(), dex_pc);
+ UpdateLocal(register_index, constant, dex_pc);
break;
}
case Instruction::CONST_HIGH16: {
int32_t register_index = instruction.VRegA();
- HIntConstant* constant = graph_->GetIntConstant(instruction.VRegB_21h() << 16);
- UpdateLocal(register_index, constant);
+ HIntConstant* constant = graph_->GetIntConstant(instruction.VRegB_21h() << 16, dex_pc);
+ UpdateLocal(register_index, constant, dex_pc);
break;
}
@@ -1743,8 +1740,8 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
int64_t value = instruction.VRegB_21s();
value <<= 48;
value >>= 48;
- HLongConstant* constant = graph_->GetLongConstant(value);
- UpdateLocal(register_index, constant);
+ HLongConstant* constant = graph_->GetLongConstant(value, dex_pc);
+ UpdateLocal(register_index, constant, dex_pc);
break;
}
@@ -1754,23 +1751,23 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
int64_t value = instruction.VRegB_31i();
value <<= 32;
value >>= 32;
- HLongConstant* constant = graph_->GetLongConstant(value);
- UpdateLocal(register_index, constant);
+ HLongConstant* constant = graph_->GetLongConstant(value, dex_pc);
+ UpdateLocal(register_index, constant, dex_pc);
break;
}
case Instruction::CONST_WIDE: {
int32_t register_index = instruction.VRegA();
- HLongConstant* constant = graph_->GetLongConstant(instruction.VRegB_51l());
- UpdateLocal(register_index, constant);
+ HLongConstant* constant = graph_->GetLongConstant(instruction.VRegB_51l(), dex_pc);
+ UpdateLocal(register_index, constant, dex_pc);
break;
}
case Instruction::CONST_WIDE_HIGH16: {
int32_t register_index = instruction.VRegA();
int64_t value = static_cast<int64_t>(instruction.VRegB_21h()) << 48;
- HLongConstant* constant = graph_->GetLongConstant(value);
- UpdateLocal(register_index, constant);
+ HLongConstant* constant = graph_->GetLongConstant(value, dex_pc);
+ UpdateLocal(register_index, constant, dex_pc);
break;
}
@@ -1778,8 +1775,8 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
case Instruction::MOVE:
case Instruction::MOVE_FROM16:
case Instruction::MOVE_16: {
- HInstruction* value = LoadLocal(instruction.VRegB(), Primitive::kPrimInt);
- UpdateLocal(instruction.VRegA(), value);
+ HInstruction* value = LoadLocal(instruction.VRegB(), Primitive::kPrimInt, dex_pc);
+ UpdateLocal(instruction.VRegA(), value, dex_pc);
break;
}
@@ -1787,22 +1784,22 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
case Instruction::MOVE_WIDE:
case Instruction::MOVE_WIDE_FROM16:
case Instruction::MOVE_WIDE_16: {
- HInstruction* value = LoadLocal(instruction.VRegB(), Primitive::kPrimLong);
- UpdateLocal(instruction.VRegA(), value);
+ HInstruction* value = LoadLocal(instruction.VRegB(), Primitive::kPrimLong, dex_pc);
+ UpdateLocal(instruction.VRegA(), value, dex_pc);
break;
}
case Instruction::MOVE_OBJECT:
case Instruction::MOVE_OBJECT_16:
case Instruction::MOVE_OBJECT_FROM16: {
- HInstruction* value = LoadLocal(instruction.VRegB(), Primitive::kPrimNot);
- UpdateLocal(instruction.VRegA(), value);
+ HInstruction* value = LoadLocal(instruction.VRegB(), Primitive::kPrimNot, dex_pc);
+ UpdateLocal(instruction.VRegA(), value, dex_pc);
break;
}
case Instruction::RETURN_VOID_NO_BARRIER:
case Instruction::RETURN_VOID: {
- BuildReturn(instruction, Primitive::kPrimVoid);
+ BuildReturn(instruction, Primitive::kPrimVoid, dex_pc);
break;
}
@@ -1824,24 +1821,24 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
HBasicBlock* target = FindBlockStartingAt(offset + dex_pc);
DCHECK(target != nullptr);
PotentiallyAddSuspendCheck(target, dex_pc);
- current_block_->AddInstruction(new (arena_) HGoto());
+ current_block_->AddInstruction(new (arena_) HGoto(dex_pc));
current_block_->AddSuccessor(target);
current_block_ = nullptr;
break;
}
case Instruction::RETURN: {
- BuildReturn(instruction, return_type_);
+ BuildReturn(instruction, return_type_, dex_pc);
break;
}
case Instruction::RETURN_OBJECT: {
- BuildReturn(instruction, return_type_);
+ BuildReturn(instruction, return_type_, dex_pc);
break;
}
case Instruction::RETURN_WIDE: {
- BuildReturn(instruction, return_type_);
+ BuildReturn(instruction, return_type_, dex_pc);
break;
}
@@ -1895,32 +1892,32 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
}
case Instruction::NEG_INT: {
- Unop_12x<HNeg>(instruction, Primitive::kPrimInt);
+ Unop_12x<HNeg>(instruction, Primitive::kPrimInt, dex_pc);
break;
}
case Instruction::NEG_LONG: {
- Unop_12x<HNeg>(instruction, Primitive::kPrimLong);
+ Unop_12x<HNeg>(instruction, Primitive::kPrimLong, dex_pc);
break;
}
case Instruction::NEG_FLOAT: {
- Unop_12x<HNeg>(instruction, Primitive::kPrimFloat);
+ Unop_12x<HNeg>(instruction, Primitive::kPrimFloat, dex_pc);
break;
}
case Instruction::NEG_DOUBLE: {
- Unop_12x<HNeg>(instruction, Primitive::kPrimDouble);
+ Unop_12x<HNeg>(instruction, Primitive::kPrimDouble, dex_pc);
break;
}
case Instruction::NOT_INT: {
- Unop_12x<HNot>(instruction, Primitive::kPrimInt);
+ Unop_12x<HNot>(instruction, Primitive::kPrimInt, dex_pc);
break;
}
case Instruction::NOT_LONG: {
- Unop_12x<HNot>(instruction, Primitive::kPrimLong);
+ Unop_12x<HNot>(instruction, Primitive::kPrimLong, dex_pc);
break;
}
@@ -2000,67 +1997,67 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
}
case Instruction::ADD_INT: {
- Binop_23x<HAdd>(instruction, Primitive::kPrimInt);
+ Binop_23x<HAdd>(instruction, Primitive::kPrimInt, dex_pc);
break;
}
case Instruction::ADD_LONG: {
- Binop_23x<HAdd>(instruction, Primitive::kPrimLong);
+ Binop_23x<HAdd>(instruction, Primitive::kPrimLong, dex_pc);
break;
}
case Instruction::ADD_DOUBLE: {
- Binop_23x<HAdd>(instruction, Primitive::kPrimDouble);
+ Binop_23x<HAdd>(instruction, Primitive::kPrimDouble, dex_pc);
break;
}
case Instruction::ADD_FLOAT: {
- Binop_23x<HAdd>(instruction, Primitive::kPrimFloat);
+ Binop_23x<HAdd>(instruction, Primitive::kPrimFloat, dex_pc);
break;
}
case Instruction::SUB_INT: {
- Binop_23x<HSub>(instruction, Primitive::kPrimInt);
+ Binop_23x<HSub>(instruction, Primitive::kPrimInt, dex_pc);
break;
}
case Instruction::SUB_LONG: {
- Binop_23x<HSub>(instruction, Primitive::kPrimLong);
+ Binop_23x<HSub>(instruction, Primitive::kPrimLong, dex_pc);
break;
}
case Instruction::SUB_FLOAT: {
- Binop_23x<HSub>(instruction, Primitive::kPrimFloat);
+ Binop_23x<HSub>(instruction, Primitive::kPrimFloat, dex_pc);
break;
}
case Instruction::SUB_DOUBLE: {
- Binop_23x<HSub>(instruction, Primitive::kPrimDouble);
+ Binop_23x<HSub>(instruction, Primitive::kPrimDouble, dex_pc);
break;
}
case Instruction::ADD_INT_2ADDR: {
- Binop_12x<HAdd>(instruction, Primitive::kPrimInt);
+ Binop_12x<HAdd>(instruction, Primitive::kPrimInt, dex_pc);
break;
}
case Instruction::MUL_INT: {
- Binop_23x<HMul>(instruction, Primitive::kPrimInt);
+ Binop_23x<HMul>(instruction, Primitive::kPrimInt, dex_pc);
break;
}
case Instruction::MUL_LONG: {
- Binop_23x<HMul>(instruction, Primitive::kPrimLong);
+ Binop_23x<HMul>(instruction, Primitive::kPrimLong, dex_pc);
break;
}
case Instruction::MUL_FLOAT: {
- Binop_23x<HMul>(instruction, Primitive::kPrimFloat);
+ Binop_23x<HMul>(instruction, Primitive::kPrimFloat, dex_pc);
break;
}
case Instruction::MUL_DOUBLE: {
- Binop_23x<HMul>(instruction, Primitive::kPrimDouble);
+ Binop_23x<HMul>(instruction, Primitive::kPrimDouble, dex_pc);
break;
}
@@ -2109,117 +2106,117 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
}
case Instruction::AND_INT: {
- Binop_23x<HAnd>(instruction, Primitive::kPrimInt);
+ Binop_23x<HAnd>(instruction, Primitive::kPrimInt, dex_pc);
break;
}
case Instruction::AND_LONG: {
- Binop_23x<HAnd>(instruction, Primitive::kPrimLong);
+ Binop_23x<HAnd>(instruction, Primitive::kPrimLong, dex_pc);
break;
}
case Instruction::SHL_INT: {
- Binop_23x_shift<HShl>(instruction, Primitive::kPrimInt);
+ Binop_23x_shift<HShl>(instruction, Primitive::kPrimInt, dex_pc);
break;
}
case Instruction::SHL_LONG: {
- Binop_23x_shift<HShl>(instruction, Primitive::kPrimLong);
+ Binop_23x_shift<HShl>(instruction, Primitive::kPrimLong, dex_pc);
break;
}
case Instruction::SHR_INT: {
- Binop_23x_shift<HShr>(instruction, Primitive::kPrimInt);
+ Binop_23x_shift<HShr>(instruction, Primitive::kPrimInt, dex_pc);
break;
}
case Instruction::SHR_LONG: {
- Binop_23x_shift<HShr>(instruction, Primitive::kPrimLong);
+ Binop_23x_shift<HShr>(instruction, Primitive::kPrimLong, dex_pc);
break;
}
case Instruction::USHR_INT: {
- Binop_23x_shift<HUShr>(instruction, Primitive::kPrimInt);
+ Binop_23x_shift<HUShr>(instruction, Primitive::kPrimInt, dex_pc);
break;
}
case Instruction::USHR_LONG: {
- Binop_23x_shift<HUShr>(instruction, Primitive::kPrimLong);
+ Binop_23x_shift<HUShr>(instruction, Primitive::kPrimLong, dex_pc);
break;
}
case Instruction::OR_INT: {
- Binop_23x<HOr>(instruction, Primitive::kPrimInt);
+ Binop_23x<HOr>(instruction, Primitive::kPrimInt, dex_pc);
break;
}
case Instruction::OR_LONG: {
- Binop_23x<HOr>(instruction, Primitive::kPrimLong);
+ Binop_23x<HOr>(instruction, Primitive::kPrimLong, dex_pc);
break;
}
case Instruction::XOR_INT: {
- Binop_23x<HXor>(instruction, Primitive::kPrimInt);
+ Binop_23x<HXor>(instruction, Primitive::kPrimInt, dex_pc);
break;
}
case Instruction::XOR_LONG: {
- Binop_23x<HXor>(instruction, Primitive::kPrimLong);
+ Binop_23x<HXor>(instruction, Primitive::kPrimLong, dex_pc);
break;
}
case Instruction::ADD_LONG_2ADDR: {
- Binop_12x<HAdd>(instruction, Primitive::kPrimLong);
+ Binop_12x<HAdd>(instruction, Primitive::kPrimLong, dex_pc);
break;
}
case Instruction::ADD_DOUBLE_2ADDR: {
- Binop_12x<HAdd>(instruction, Primitive::kPrimDouble);
+ Binop_12x<HAdd>(instruction, Primitive::kPrimDouble, dex_pc);
break;
}
case Instruction::ADD_FLOAT_2ADDR: {
- Binop_12x<HAdd>(instruction, Primitive::kPrimFloat);
+ Binop_12x<HAdd>(instruction, Primitive::kPrimFloat, dex_pc);
break;
}
case Instruction::SUB_INT_2ADDR: {
- Binop_12x<HSub>(instruction, Primitive::kPrimInt);
+ Binop_12x<HSub>(instruction, Primitive::kPrimInt, dex_pc);
break;
}
case Instruction::SUB_LONG_2ADDR: {
- Binop_12x<HSub>(instruction, Primitive::kPrimLong);
+ Binop_12x<HSub>(instruction, Primitive::kPrimLong, dex_pc);
break;
}
case Instruction::SUB_FLOAT_2ADDR: {
- Binop_12x<HSub>(instruction, Primitive::kPrimFloat);
+ Binop_12x<HSub>(instruction, Primitive::kPrimFloat, dex_pc);
break;
}
case Instruction::SUB_DOUBLE_2ADDR: {
- Binop_12x<HSub>(instruction, Primitive::kPrimDouble);
+ Binop_12x<HSub>(instruction, Primitive::kPrimDouble, dex_pc);
break;
}
case Instruction::MUL_INT_2ADDR: {
- Binop_12x<HMul>(instruction, Primitive::kPrimInt);
+ Binop_12x<HMul>(instruction, Primitive::kPrimInt, dex_pc);
break;
}
case Instruction::MUL_LONG_2ADDR: {
- Binop_12x<HMul>(instruction, Primitive::kPrimLong);
+ Binop_12x<HMul>(instruction, Primitive::kPrimLong, dex_pc);
break;
}
case Instruction::MUL_FLOAT_2ADDR: {
- Binop_12x<HMul>(instruction, Primitive::kPrimFloat);
+ Binop_12x<HMul>(instruction, Primitive::kPrimFloat, dex_pc);
break;
}
case Instruction::MUL_DOUBLE_2ADDR: {
- Binop_12x<HMul>(instruction, Primitive::kPrimDouble);
+ Binop_12x<HMul>(instruction, Primitive::kPrimDouble, dex_pc);
break;
}
@@ -2258,32 +2255,32 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
}
case Instruction::SHL_INT_2ADDR: {
- Binop_12x_shift<HShl>(instruction, Primitive::kPrimInt);
+ Binop_12x_shift<HShl>(instruction, Primitive::kPrimInt, dex_pc);
break;
}
case Instruction::SHL_LONG_2ADDR: {
- Binop_12x_shift<HShl>(instruction, Primitive::kPrimLong);
+ Binop_12x_shift<HShl>(instruction, Primitive::kPrimLong, dex_pc);
break;
}
case Instruction::SHR_INT_2ADDR: {
- Binop_12x_shift<HShr>(instruction, Primitive::kPrimInt);
+ Binop_12x_shift<HShr>(instruction, Primitive::kPrimInt, dex_pc);
break;
}
case Instruction::SHR_LONG_2ADDR: {
- Binop_12x_shift<HShr>(instruction, Primitive::kPrimLong);
+ Binop_12x_shift<HShr>(instruction, Primitive::kPrimLong, dex_pc);
break;
}
case Instruction::USHR_INT_2ADDR: {
- Binop_12x_shift<HUShr>(instruction, Primitive::kPrimInt);
+ Binop_12x_shift<HUShr>(instruction, Primitive::kPrimInt, dex_pc);
break;
}
case Instruction::USHR_LONG_2ADDR: {
- Binop_12x_shift<HUShr>(instruction, Primitive::kPrimLong);
+ Binop_12x_shift<HUShr>(instruction, Primitive::kPrimLong, dex_pc);
break;
}
@@ -2298,92 +2295,92 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
}
case Instruction::AND_INT_2ADDR: {
- Binop_12x<HAnd>(instruction, Primitive::kPrimInt);
+ Binop_12x<HAnd>(instruction, Primitive::kPrimInt, dex_pc);
break;
}
case Instruction::AND_LONG_2ADDR: {
- Binop_12x<HAnd>(instruction, Primitive::kPrimLong);
+ Binop_12x<HAnd>(instruction, Primitive::kPrimLong, dex_pc);
break;
}
case Instruction::OR_INT_2ADDR: {
- Binop_12x<HOr>(instruction, Primitive::kPrimInt);
+ Binop_12x<HOr>(instruction, Primitive::kPrimInt, dex_pc);
break;
}
case Instruction::OR_LONG_2ADDR: {
- Binop_12x<HOr>(instruction, Primitive::kPrimLong);
+ Binop_12x<HOr>(instruction, Primitive::kPrimLong, dex_pc);
break;
}
case Instruction::XOR_INT_2ADDR: {
- Binop_12x<HXor>(instruction, Primitive::kPrimInt);
+ Binop_12x<HXor>(instruction, Primitive::kPrimInt, dex_pc);
break;
}
case Instruction::XOR_LONG_2ADDR: {
- Binop_12x<HXor>(instruction, Primitive::kPrimLong);
+ Binop_12x<HXor>(instruction, Primitive::kPrimLong, dex_pc);
break;
}
case Instruction::ADD_INT_LIT16: {
- Binop_22s<HAdd>(instruction, false);
+ Binop_22s<HAdd>(instruction, false, dex_pc);
break;
}
case Instruction::AND_INT_LIT16: {
- Binop_22s<HAnd>(instruction, false);
+ Binop_22s<HAnd>(instruction, false, dex_pc);
break;
}
case Instruction::OR_INT_LIT16: {
- Binop_22s<HOr>(instruction, false);
+ Binop_22s<HOr>(instruction, false, dex_pc);
break;
}
case Instruction::XOR_INT_LIT16: {
- Binop_22s<HXor>(instruction, false);
+ Binop_22s<HXor>(instruction, false, dex_pc);
break;
}
case Instruction::RSUB_INT: {
- Binop_22s<HSub>(instruction, true);
+ Binop_22s<HSub>(instruction, true, dex_pc);
break;
}
case Instruction::MUL_INT_LIT16: {
- Binop_22s<HMul>(instruction, false);
+ Binop_22s<HMul>(instruction, false, dex_pc);
break;
}
case Instruction::ADD_INT_LIT8: {
- Binop_22b<HAdd>(instruction, false);
+ Binop_22b<HAdd>(instruction, false, dex_pc);
break;
}
case Instruction::AND_INT_LIT8: {
- Binop_22b<HAnd>(instruction, false);
+ Binop_22b<HAnd>(instruction, false, dex_pc);
break;
}
case Instruction::OR_INT_LIT8: {
- Binop_22b<HOr>(instruction, false);
+ Binop_22b<HOr>(instruction, false, dex_pc);
break;
}
case Instruction::XOR_INT_LIT8: {
- Binop_22b<HXor>(instruction, false);
+ Binop_22b<HXor>(instruction, false, dex_pc);
break;
}
case Instruction::RSUB_INT_LIT8: {
- Binop_22b<HSub>(instruction, true);
+ Binop_22b<HSub>(instruction, true, dex_pc);
break;
}
case Instruction::MUL_INT_LIT8: {
- Binop_22b<HMul>(instruction, false);
+ Binop_22b<HMul>(instruction, false, dex_pc);
break;
}
@@ -2402,17 +2399,17 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
}
case Instruction::SHL_INT_LIT8: {
- Binop_22b<HShl>(instruction, false);
+ Binop_22b<HShl>(instruction, false, dex_pc);
break;
}
case Instruction::SHR_INT_LIT8: {
- Binop_22b<HShr>(instruction, false);
+ Binop_22b<HShr>(instruction, false, dex_pc);
break;
}
case Instruction::USHR_INT_LIT8: {
- Binop_22b<HUShr>(instruction, false);
+ Binop_22b<HUShr>(instruction, false, dex_pc);
break;
}
@@ -2420,9 +2417,9 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
uint16_t type_index = instruction.VRegB_21c();
if (compiler_driver_->IsStringTypeIndex(type_index, dex_file_)) {
int32_t register_index = instruction.VRegA();
- HFakeString* fake_string = new (arena_) HFakeString();
+ HFakeString* fake_string = new (arena_) HFakeString(dex_pc);
current_block_->AddInstruction(fake_string);
- UpdateLocal(register_index, fake_string);
+ UpdateLocal(register_index, fake_string, dex_pc);
} else {
QuickEntrypointEnum entrypoint = NeedsAccessCheck(type_index)
? kQuickAllocObjectWithAccessCheck
@@ -2434,14 +2431,14 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
type_index,
*dex_compilation_unit_->GetDexFile(),
entrypoint));
- UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
+ UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction(), dex_pc);
}
break;
}
case Instruction::NEW_ARRAY: {
uint16_t type_index = instruction.VRegC_22c();
- HInstruction* length = LoadLocal(instruction.VRegB_22c(), Primitive::kPrimInt);
+ HInstruction* length = LoadLocal(instruction.VRegB_22c(), Primitive::kPrimInt, dex_pc);
QuickEntrypointEnum entrypoint = NeedsAccessCheck(type_index)
? kQuickAllocArrayWithAccessCheck
: kQuickAllocArray;
@@ -2451,7 +2448,7 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
type_index,
*dex_compilation_unit_->GetDexFile(),
entrypoint));
- UpdateLocal(instruction.VRegA_22c(), current_block_->GetLastInstruction());
+ UpdateLocal(instruction.VRegA_22c(), current_block_->GetLastInstruction(), dex_pc);
break;
}
@@ -2491,7 +2488,7 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
// FilledNewArray, the local needs to be updated after the array was
// filled, otherwise we might overwrite an input vreg.
HStoreLocal* update_local =
- new (arena_) HStoreLocal(GetLocalAt(instruction.VRegA()), latest_result_);
+ new (arena_) HStoreLocal(GetLocalAt(instruction.VRegA()), latest_result_, dex_pc);
HBasicBlock* block = latest_result_->GetBlock();
if (block == current_block_) {
// MoveResult and the previous instruction are in the same block.
@@ -2621,27 +2618,27 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
ARRAY_XX(_SHORT, Primitive::kPrimShort);
case Instruction::ARRAY_LENGTH: {
- HInstruction* object = LoadLocal(instruction.VRegB_12x(), Primitive::kPrimNot);
+ HInstruction* object = LoadLocal(instruction.VRegB_12x(), Primitive::kPrimNot, dex_pc);
// No need for a temporary for the null check, it is the only input of the following
// instruction.
object = new (arena_) HNullCheck(object, dex_pc);
current_block_->AddInstruction(object);
- current_block_->AddInstruction(new (arena_) HArrayLength(object));
- UpdateLocal(instruction.VRegA_12x(), current_block_->GetLastInstruction());
+ current_block_->AddInstruction(new (arena_) HArrayLength(object, dex_pc));
+ UpdateLocal(instruction.VRegA_12x(), current_block_->GetLastInstruction(), dex_pc);
break;
}
case Instruction::CONST_STRING: {
current_block_->AddInstruction(
new (arena_) HLoadString(graph_->GetCurrentMethod(), instruction.VRegB_21c(), dex_pc));
- UpdateLocal(instruction.VRegA_21c(), current_block_->GetLastInstruction());
+ UpdateLocal(instruction.VRegA_21c(), current_block_->GetLastInstruction(), dex_pc);
break;
}
case Instruction::CONST_STRING_JUMBO: {
current_block_->AddInstruction(
new (arena_) HLoadString(graph_->GetCurrentMethod(), instruction.VRegB_31c(), dex_pc));
- UpdateLocal(instruction.VRegA_31c(), current_block_->GetLastInstruction());
+ UpdateLocal(instruction.VRegA_31c(), current_block_->GetLastInstruction(), dex_pc);
break;
}
@@ -2667,19 +2664,19 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
*dex_compilation_unit_->GetDexFile(),
IsOutermostCompilingClass(type_index),
dex_pc));
- UpdateLocal(instruction.VRegA_21c(), current_block_->GetLastInstruction());
+ UpdateLocal(instruction.VRegA_21c(), current_block_->GetLastInstruction(), dex_pc);
break;
}
case Instruction::MOVE_EXCEPTION: {
- current_block_->AddInstruction(new (arena_) HLoadException());
- UpdateLocal(instruction.VRegA_11x(), current_block_->GetLastInstruction());
- current_block_->AddInstruction(new (arena_) HClearException());
+ current_block_->AddInstruction(new (arena_) HLoadException(dex_pc));
+ UpdateLocal(instruction.VRegA_11x(), current_block_->GetLastInstruction(), dex_pc);
+ current_block_->AddInstruction(new (arena_) HClearException(dex_pc));
break;
}
case Instruction::THROW: {
- HInstruction* exception = LoadLocal(instruction.VRegA_11x(), Primitive::kPrimNot);
+ HInstruction* exception = LoadLocal(instruction.VRegA_11x(), Primitive::kPrimNot, dex_pc);
current_block_->AddInstruction(new (arena_) HThrow(exception, dex_pc));
// A throw instruction must branch to the exit block.
current_block_->AddSuccessor(exit_block_);
@@ -2710,7 +2707,7 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
case Instruction::MONITOR_ENTER: {
current_block_->AddInstruction(new (arena_) HMonitorOperation(
- LoadLocal(instruction.VRegA_11x(), Primitive::kPrimNot),
+ LoadLocal(instruction.VRegA_11x(), Primitive::kPrimNot, dex_pc),
HMonitorOperation::kEnter,
dex_pc));
break;
@@ -2718,7 +2715,7 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
case Instruction::MONITOR_EXIT: {
current_block_->AddInstruction(new (arena_) HMonitorOperation(
- LoadLocal(instruction.VRegA_11x(), Primitive::kPrimNot),
+ LoadLocal(instruction.VRegA_11x(), Primitive::kPrimNot, dex_pc),
HMonitorOperation::kExit,
dex_pc));
break;
@@ -2749,14 +2746,18 @@ HLocal* HGraphBuilder::GetLocalAt(int register_index) const {
return locals_.Get(register_index);
}
-void HGraphBuilder::UpdateLocal(int register_index, HInstruction* instruction) const {
+void HGraphBuilder::UpdateLocal(int register_index,
+ HInstruction* instruction,
+ uint32_t dex_pc) const {
HLocal* local = GetLocalAt(register_index);
- current_block_->AddInstruction(new (arena_) HStoreLocal(local, instruction));
+ current_block_->AddInstruction(new (arena_) HStoreLocal(local, instruction, dex_pc));
}
-HInstruction* HGraphBuilder::LoadLocal(int register_index, Primitive::Type type) const {
+HInstruction* HGraphBuilder::LoadLocal(int register_index,
+ Primitive::Type type,
+ uint32_t dex_pc) const {
HLocal* local = GetLocalAt(register_index);
- current_block_->AddInstruction(new (arena_) HLoadLocal(local, type));
+ current_block_->AddInstruction(new (arena_) HLoadLocal(local, type, dex_pc));
return current_block_->GetLastInstruction();
}
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 560ed86e50..b0238dc5f8 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -131,23 +131,20 @@ class HGraphBuilder : public ValueObject {
void InitializeLocals(uint16_t count);
HLocal* GetLocalAt(int register_index) const;
- void UpdateLocal(int register_index, HInstruction* instruction) const;
- HInstruction* LoadLocal(int register_index, Primitive::Type type) const;
+ void UpdateLocal(int register_index, HInstruction* instruction, uint32_t dex_pc) const;
+ HInstruction* LoadLocal(int register_index, Primitive::Type type, uint32_t dex_pc) const;
void PotentiallyAddSuspendCheck(HBasicBlock* target, uint32_t dex_pc);
void InitializeParameters(uint16_t number_of_parameters);
bool NeedsAccessCheck(uint32_t type_index) const;
template<typename T>
- void Unop_12x(const Instruction& instruction, Primitive::Type type);
-
- template<typename T>
- void Binop_23x(const Instruction& instruction, Primitive::Type type);
+ void Unop_12x(const Instruction& instruction, Primitive::Type type, uint32_t dex_pc);
template<typename T>
void Binop_23x(const Instruction& instruction, Primitive::Type type, uint32_t dex_pc);
template<typename T>
- void Binop_23x_shift(const Instruction& instruction, Primitive::Type type);
+ void Binop_23x_shift(const Instruction& instruction, Primitive::Type type, uint32_t dex_pc);
void Binop_23x_cmp(const Instruction& instruction,
Primitive::Type type,
@@ -155,19 +152,16 @@ class HGraphBuilder : public ValueObject {
uint32_t dex_pc);
template<typename T>
- void Binop_12x(const Instruction& instruction, Primitive::Type type);
-
- template<typename T>
void Binop_12x(const Instruction& instruction, Primitive::Type type, uint32_t dex_pc);
template<typename T>
- void Binop_12x_shift(const Instruction& instruction, Primitive::Type type);
+ void Binop_12x_shift(const Instruction& instruction, Primitive::Type type, uint32_t dex_pc);
template<typename T>
- void Binop_22b(const Instruction& instruction, bool reverse);
+ void Binop_22b(const Instruction& instruction, bool reverse, uint32_t dex_pc);
template<typename T>
- void Binop_22s(const Instruction& instruction, bool reverse);
+ void Binop_22s(const Instruction& instruction, bool reverse, uint32_t dex_pc);
template<typename T> void If_21t(const Instruction& instruction, uint32_t dex_pc);
template<typename T> void If_22t(const Instruction& instruction, uint32_t dex_pc);
@@ -185,7 +179,7 @@ class HGraphBuilder : public ValueObject {
bool second_is_lit,
bool is_div);
- void BuildReturn(const Instruction& instruction, Primitive::Type type);
+ void BuildReturn(const Instruction& instruction, Primitive::Type type, uint32_t dex_pc);
// Builds an instance field access node and returns whether the instruction is supported.
bool BuildInstanceFieldAccess(const Instruction& instruction, uint32_t dex_pc, bool is_put);
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 679899a23c..a4c58b095a 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -48,7 +48,7 @@ static constexpr Register kMethodRegisterArgument = R0;
// with baseline.
static constexpr Register kCoreSavedRegisterForBaseline = R5;
static constexpr Register kCoreCalleeSaves[] =
- { R5, R6, R7, R8, R10, R11, PC };
+ { R5, R6, R7, R8, R10, R11, LR };
static constexpr SRegister kFpuCalleeSaves[] =
{ S16, S17, S18, S19, S20, S21, S22, S23, S24, S25, S26, S27, S28, S29, S30, S31 };
@@ -409,8 +409,8 @@ CodeGeneratorARM::CodeGeneratorARM(HGraph* graph,
method_patches_(MethodReferenceComparator(), graph->GetArena()->Adapter()),
call_patches_(MethodReferenceComparator(), graph->GetArena()->Adapter()),
relative_call_patches_(graph->GetArena()->Adapter()) {
- // Save the PC register to mimic Quick.
- AddAllocatedRegister(Location::RegisterLocation(PC));
+ // Always save the LR register to mimic Quick.
+ AddAllocatedRegister(Location::RegisterLocation(LR));
}
void CodeGeneratorARM::Finalize(CodeAllocator* allocator) {
@@ -599,12 +599,9 @@ void CodeGeneratorARM::GenerateFrameEntry() {
RecordPcInfo(nullptr, 0);
}
- // PC is in the list of callee-save to mimic Quick, but we need to push
- // LR at entry instead.
- uint32_t push_mask = (core_spill_mask_ & (~(1 << PC))) | 1 << LR;
- __ PushList(push_mask);
- __ cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(push_mask));
- __ cfi().RelOffsetForMany(DWARFReg(kMethodRegisterArgument), 0, push_mask, kArmWordSize);
+ __ PushList(core_spill_mask_);
+ __ cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(core_spill_mask_));
+ __ cfi().RelOffsetForMany(DWARFReg(kMethodRegisterArgument), 0, core_spill_mask_, kArmWordSize);
if (fpu_spill_mask_ != 0) {
SRegister start_register = SRegister(LeastSignificantBit(fpu_spill_mask_));
__ vpushs(start_register, POPCOUNT(fpu_spill_mask_));
@@ -632,7 +629,10 @@ void CodeGeneratorARM::GenerateFrameExit() {
__ cfi().AdjustCFAOffset(-kArmPointerSize * POPCOUNT(fpu_spill_mask_));
__ cfi().RestoreMany(DWARFReg(SRegister(0)), fpu_spill_mask_);
}
- __ PopList(core_spill_mask_);
+ // Pop LR into PC to return.
+ DCHECK_NE(core_spill_mask_ & (1 << LR), 0U);
+ uint32_t pop_mask = (core_spill_mask_ & (~(1 << LR))) | 1 << PC;
+ __ PopList(pop_mask);
__ cfi().RestoreState();
__ cfi().DefCFAOffset(GetFrameSize());
}
@@ -1560,25 +1560,7 @@ void InstructionCodeGeneratorARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
return;
}
- Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
- uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
- invoke->GetVTableIndex(), kArmPointerSize).Uint32Value();
- LocationSummary* locations = invoke->GetLocations();
- Location receiver = locations->InAt(0);
- uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
- // temp = object->GetClass();
- DCHECK(receiver.IsRegister());
- __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
- codegen_->MaybeRecordImplicitNullCheck(invoke);
- __ MaybeUnpoisonHeapReference(temp);
- // temp = temp->GetMethodAt(method_offset);
- uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- kArmWordSize).Int32Value();
- __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
- // LR = temp->GetEntryPoint();
- __ LoadFromOffset(kLoadWord, LR, temp, entry_point);
- // LR();
- __ blx(LR);
+ codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
@@ -4607,6 +4589,28 @@ void CodeGeneratorARM::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
DCHECK(!IsLeafMethod());
}
+void CodeGeneratorARM::GenerateVirtualCall(HInvokeVirtual* invoke, Location temp_location) {
+ Register temp = temp_location.AsRegister<Register>();
+ uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
+ invoke->GetVTableIndex(), kArmPointerSize).Uint32Value();
+ LocationSummary* locations = invoke->GetLocations();
+ Location receiver = locations->InAt(0);
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ // temp = object->GetClass();
+ DCHECK(receiver.IsRegister());
+ __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
+ MaybeRecordImplicitNullCheck(invoke);
+ __ MaybeUnpoisonHeapReference(temp);
+ // temp = temp->GetMethodAt(method_offset);
+ uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArmWordSize).Int32Value();
+ __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
+ // LR = temp->GetEntryPoint();
+ __ LoadFromOffset(kLoadWord, LR, temp, entry_point);
+ // LR();
+ __ blx(LR);
+}
+
void CodeGeneratorARM::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) {
DCHECK(linker_patches->empty());
size_t size = method_patches_.size() + call_patches_.size() + relative_call_patches_.size();
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 9528cca36f..4a0df4e936 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -327,6 +327,7 @@ class CodeGeneratorARM : public CodeGenerator {
Label* GetFrameEntryLabel() { return &frame_entry_label_; }
void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp);
+ void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp);
void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 390ea6b576..6b1457bc31 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -2474,6 +2474,29 @@ void CodeGeneratorARM64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invok
DCHECK(!IsLeafMethod());
}
+void CodeGeneratorARM64::GenerateVirtualCall(HInvokeVirtual* invoke, Location temp_in) {
+ LocationSummary* locations = invoke->GetLocations();
+ Location receiver = locations->InAt(0);
+ Register temp = XRegisterFrom(temp_in);
+ size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
+ invoke->GetVTableIndex(), kArm64PointerSize).SizeValue();
+ Offset class_offset = mirror::Object::ClassOffset();
+ Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
+
+ BlockPoolsScope block_pools(GetVIXLAssembler());
+
+ DCHECK(receiver.IsRegister());
+ __ Ldr(temp.W(), HeapOperandFrom(receiver, class_offset));
+ MaybeRecordImplicitNullCheck(invoke);
+ GetAssembler()->MaybeUnpoisonHeapReference(temp.W());
+ // temp = temp->GetMethodAt(method_offset);
+ __ Ldr(temp, MemOperand(temp, method_offset));
+ // lr = temp->GetEntryPoint();
+ __ Ldr(lr, MemOperand(temp, entry_point.SizeValue()));
+ // lr();
+ __ Blr(lr);
+}
+
void CodeGeneratorARM64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) {
DCHECK(linker_patches->empty());
size_t size =
@@ -2567,26 +2590,7 @@ void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
return;
}
- LocationSummary* locations = invoke->GetLocations();
- Location receiver = locations->InAt(0);
- Register temp = XRegisterFrom(invoke->GetLocations()->GetTemp(0));
- size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
- invoke->GetVTableIndex(), kArm64PointerSize).SizeValue();
- Offset class_offset = mirror::Object::ClassOffset();
- Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
-
- BlockPoolsScope block_pools(GetVIXLAssembler());
-
- DCHECK(receiver.IsRegister());
- __ Ldr(temp.W(), HeapOperandFrom(receiver, class_offset));
- codegen_->MaybeRecordImplicitNullCheck(invoke);
- GetAssembler()->MaybeUnpoisonHeapReference(temp.W());
- // temp = temp->GetMethodAt(method_offset);
- __ Ldr(temp, MemOperand(temp, method_offset));
- // lr = temp->GetEntryPoint();
- __ Ldr(lr, MemOperand(temp, entry_point.SizeValue()));
- // lr();
- __ Blr(lr);
+ codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 18070fc6b6..12ead7e11d 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -359,6 +359,7 @@ class CodeGeneratorARM64 : public CodeGenerator {
}
void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp);
+ void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp);
void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index f48395b1e1..a5ad226e0b 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -19,6 +19,7 @@
#include "art_method.h"
#include "code_generator_utils.h"
#include "compiled_method.h"
+#include "constant_area_fixups_x86.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
#include "gc/accounting/card_table.h"
@@ -1548,23 +1549,11 @@ void LocationsBuilderX86::HandleInvoke(HInvoke* invoke) {
}
void InstructionCodeGeneratorX86::VisitInvokeVirtual(HInvokeVirtual* invoke) {
- Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
- uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
- invoke->GetVTableIndex(), kX86PointerSize).Uint32Value();
- LocationSummary* locations = invoke->GetLocations();
- Location receiver = locations->InAt(0);
- uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
- // temp = object->GetClass();
- DCHECK(receiver.IsRegister());
- __ movl(temp, Address(receiver.AsRegister<Register>(), class_offset));
- codegen_->MaybeRecordImplicitNullCheck(invoke);
- __ MaybeUnpoisonHeapReference(temp);
- // temp = temp->GetMethodAt(method_offset);
- __ movl(temp, Address(temp, method_offset));
- // call temp->GetEntryPoint();
- __ call(Address(
- temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value()));
+ if (TryGenerateIntrinsicCode(invoke, codegen_)) {
+ return;
+ }
+ codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
@@ -2213,7 +2202,7 @@ void LocationsBuilderX86::VisitAdd(HAdd* add) {
case Primitive::kPrimFloat:
case Primitive::kPrimDouble: {
locations->SetInAt(0, Location::RequiresFpuRegister());
- locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::Any());
locations->SetOut(Location::SameAsFirstInput());
break;
}
@@ -2275,6 +2264,16 @@ void InstructionCodeGeneratorX86::VisitAdd(HAdd* add) {
case Primitive::kPrimFloat: {
if (second.IsFpuRegister()) {
__ addss(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
+ } else if (add->InputAt(1)->IsX86LoadFromConstantTable()) {
+ HX86LoadFromConstantTable* const_area = add->InputAt(1)->AsX86LoadFromConstantTable();
+ DCHECK(!const_area->NeedsMaterialization());
+ __ addss(first.AsFpuRegister<XmmRegister>(),
+ codegen_->LiteralFloatAddress(
+ const_area->GetConstant()->AsFloatConstant()->GetValue(),
+ const_area->GetLocations()->InAt(0).AsRegister<Register>()));
+ } else {
+ DCHECK(second.IsStackSlot());
+ __ addss(first.AsFpuRegister<XmmRegister>(), Address(ESP, second.GetStackIndex()));
}
break;
}
@@ -2282,6 +2281,16 @@ void InstructionCodeGeneratorX86::VisitAdd(HAdd* add) {
case Primitive::kPrimDouble: {
if (second.IsFpuRegister()) {
__ addsd(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
+ } else if (add->InputAt(1)->IsX86LoadFromConstantTable()) {
+ HX86LoadFromConstantTable* const_area = add->InputAt(1)->AsX86LoadFromConstantTable();
+ DCHECK(!const_area->NeedsMaterialization());
+ __ addsd(first.AsFpuRegister<XmmRegister>(),
+ codegen_->LiteralDoubleAddress(
+ const_area->GetConstant()->AsDoubleConstant()->GetValue(),
+ const_area->GetLocations()->InAt(0).AsRegister<Register>()));
+ } else {
+ DCHECK(second.IsDoubleStackSlot());
+ __ addsd(first.AsFpuRegister<XmmRegister>(), Address(ESP, second.GetStackIndex()));
}
break;
}
@@ -2305,7 +2314,7 @@ void LocationsBuilderX86::VisitSub(HSub* sub) {
case Primitive::kPrimFloat:
case Primitive::kPrimDouble: {
locations->SetInAt(0, Location::RequiresFpuRegister());
- locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::Any());
locations->SetOut(Location::SameAsFirstInput());
break;
}
@@ -2351,12 +2360,36 @@ void InstructionCodeGeneratorX86::VisitSub(HSub* sub) {
}
case Primitive::kPrimFloat: {
- __ subss(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
+ if (second.IsFpuRegister()) {
+ __ subss(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
+ } else if (sub->InputAt(1)->IsX86LoadFromConstantTable()) {
+ HX86LoadFromConstantTable* const_area = sub->InputAt(1)->AsX86LoadFromConstantTable();
+ DCHECK(!const_area->NeedsMaterialization());
+ __ subss(first.AsFpuRegister<XmmRegister>(),
+ codegen_->LiteralFloatAddress(
+ const_area->GetConstant()->AsFloatConstant()->GetValue(),
+ const_area->GetLocations()->InAt(0).AsRegister<Register>()));
+ } else {
+ DCHECK(second.IsStackSlot());
+ __ subss(first.AsFpuRegister<XmmRegister>(), Address(ESP, second.GetStackIndex()));
+ }
break;
}
case Primitive::kPrimDouble: {
- __ subsd(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
+ if (second.IsFpuRegister()) {
+ __ subsd(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
+ } else if (sub->InputAt(1)->IsX86LoadFromConstantTable()) {
+ HX86LoadFromConstantTable* const_area = sub->InputAt(1)->AsX86LoadFromConstantTable();
+ DCHECK(!const_area->NeedsMaterialization());
+ __ subsd(first.AsFpuRegister<XmmRegister>(),
+ codegen_->LiteralDoubleAddress(
+ const_area->GetConstant()->AsDoubleConstant()->GetValue(),
+ const_area->GetLocations()->InAt(0).AsRegister<Register>()));
+ } else {
+ DCHECK(second.IsDoubleStackSlot());
+ __ subsd(first.AsFpuRegister<XmmRegister>(), Address(ESP, second.GetStackIndex()));
+ }
break;
}
@@ -2391,7 +2424,7 @@ void LocationsBuilderX86::VisitMul(HMul* mul) {
case Primitive::kPrimFloat:
case Primitive::kPrimDouble: {
locations->SetInAt(0, Location::RequiresFpuRegister());
- locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::Any());
locations->SetOut(Location::SameAsFirstInput());
break;
}
@@ -2507,12 +2540,38 @@ void InstructionCodeGeneratorX86::VisitMul(HMul* mul) {
}
case Primitive::kPrimFloat: {
- __ mulss(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
+ DCHECK(first.Equals(locations->Out()));
+ if (second.IsFpuRegister()) {
+ __ mulss(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
+ } else if (mul->InputAt(1)->IsX86LoadFromConstantTable()) {
+ HX86LoadFromConstantTable* const_area = mul->InputAt(1)->AsX86LoadFromConstantTable();
+ DCHECK(!const_area->NeedsMaterialization());
+ __ mulss(first.AsFpuRegister<XmmRegister>(),
+ codegen_->LiteralFloatAddress(
+ const_area->GetConstant()->AsFloatConstant()->GetValue(),
+ const_area->GetLocations()->InAt(0).AsRegister<Register>()));
+ } else {
+ DCHECK(second.IsStackSlot());
+ __ mulss(first.AsFpuRegister<XmmRegister>(), Address(ESP, second.GetStackIndex()));
+ }
break;
}
case Primitive::kPrimDouble: {
- __ mulsd(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
+ DCHECK(first.Equals(locations->Out()));
+ if (second.IsFpuRegister()) {
+ __ mulsd(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
+ } else if (mul->InputAt(1)->IsX86LoadFromConstantTable()) {
+ HX86LoadFromConstantTable* const_area = mul->InputAt(1)->AsX86LoadFromConstantTable();
+ DCHECK(!const_area->NeedsMaterialization());
+ __ mulsd(first.AsFpuRegister<XmmRegister>(),
+ codegen_->LiteralDoubleAddress(
+ const_area->GetConstant()->AsDoubleConstant()->GetValue(),
+ const_area->GetLocations()->InAt(0).AsRegister<Register>()));
+ } else {
+ DCHECK(second.IsDoubleStackSlot());
+ __ mulsd(first.AsFpuRegister<XmmRegister>(), Address(ESP, second.GetStackIndex()));
+ }
break;
}
@@ -2855,7 +2914,7 @@ void LocationsBuilderX86::VisitDiv(HDiv* div) {
case Primitive::kPrimFloat:
case Primitive::kPrimDouble: {
locations->SetInAt(0, Location::RequiresFpuRegister());
- locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::Any());
locations->SetOut(Location::SameAsFirstInput());
break;
}
@@ -2867,7 +2926,6 @@ void LocationsBuilderX86::VisitDiv(HDiv* div) {
void InstructionCodeGeneratorX86::VisitDiv(HDiv* div) {
LocationSummary* locations = div->GetLocations();
- Location out = locations->Out();
Location first = locations->InAt(0);
Location second = locations->InAt(1);
@@ -2879,14 +2937,36 @@ void InstructionCodeGeneratorX86::VisitDiv(HDiv* div) {
}
case Primitive::kPrimFloat: {
- DCHECK(first.Equals(out));
- __ divss(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
+ if (second.IsFpuRegister()) {
+ __ divss(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
+ } else if (div->InputAt(1)->IsX86LoadFromConstantTable()) {
+ HX86LoadFromConstantTable* const_area = div->InputAt(1)->AsX86LoadFromConstantTable();
+ DCHECK(!const_area->NeedsMaterialization());
+ __ divss(first.AsFpuRegister<XmmRegister>(),
+ codegen_->LiteralFloatAddress(
+ const_area->GetConstant()->AsFloatConstant()->GetValue(),
+ const_area->GetLocations()->InAt(0).AsRegister<Register>()));
+ } else {
+ DCHECK(second.IsStackSlot());
+ __ divss(first.AsFpuRegister<XmmRegister>(), Address(ESP, second.GetStackIndex()));
+ }
break;
}
case Primitive::kPrimDouble: {
- DCHECK(first.Equals(out));
- __ divsd(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
+ if (second.IsFpuRegister()) {
+ __ divsd(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
+ } else if (div->InputAt(1)->IsX86LoadFromConstantTable()) {
+ HX86LoadFromConstantTable* const_area = div->InputAt(1)->AsX86LoadFromConstantTable();
+ DCHECK(!const_area->NeedsMaterialization());
+ __ divsd(first.AsFpuRegister<XmmRegister>(),
+ codegen_->LiteralDoubleAddress(
+ const_area->GetConstant()->AsDoubleConstant()->GetValue(),
+ const_area->GetLocations()->InAt(0).AsRegister<Register>()));
+ } else {
+ DCHECK(second.IsDoubleStackSlot());
+ __ divsd(first.AsFpuRegister<XmmRegister>(), Address(ESP, second.GetStackIndex()));
+ }
break;
}
@@ -3570,6 +3650,25 @@ void CodeGeneratorX86::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
DCHECK(!IsLeafMethod());
}
+void CodeGeneratorX86::GenerateVirtualCall(HInvokeVirtual* invoke, Location temp_in) {
+ Register temp = temp_in.AsRegister<Register>();
+ uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
+ invoke->GetVTableIndex(), kX86PointerSize).Uint32Value();
+ LocationSummary* locations = invoke->GetLocations();
+ Location receiver = locations->InAt(0);
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ // temp = object->GetClass();
+ DCHECK(receiver.IsRegister());
+ __ movl(temp, Address(receiver.AsRegister<Register>(), class_offset));
+ MaybeRecordImplicitNullCheck(invoke);
+ __ MaybeUnpoisonHeapReference(temp);
+ // temp = temp->GetMethodAt(method_offset);
+ __ movl(temp, Address(temp, method_offset));
+ // call temp->GetEntryPoint();
+ __ call(Address(
+ temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value()));
+}
+
void CodeGeneratorX86::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) {
DCHECK(linker_patches->empty());
linker_patches->reserve(method_patches_.size() + relative_call_patches_.size());
@@ -5085,6 +5184,245 @@ void InstructionCodeGeneratorX86::VisitFakeString(HFakeString* instruction ATTRI
// Will be generated at use site.
}
+void LocationsBuilderX86::VisitX86ComputeBaseMethodAddress(
+ HX86ComputeBaseMethodAddress* insn) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(insn, LocationSummary::kNoCall);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorX86::VisitX86ComputeBaseMethodAddress(
+ HX86ComputeBaseMethodAddress* insn) {
+ LocationSummary* locations = insn->GetLocations();
+ Register reg = locations->Out().AsRegister<Register>();
+
+ // Generate call to next instruction.
+ Label next_instruction;
+ __ call(&next_instruction);
+ __ Bind(&next_instruction);
+
+ // Remember this offset for later use with constant area.
+ codegen_->SetMethodAddressOffset(GetAssembler()->CodeSize());
+
+ // Grab the return address off the stack.
+ __ popl(reg);
+}
+
+void LocationsBuilderX86::VisitX86LoadFromConstantTable(
+ HX86LoadFromConstantTable* insn) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(insn, LocationSummary::kNoCall);
+
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::ConstantLocation(insn->GetConstant()));
+
+ // If we don't need to be materialized, we only need the inputs to be set.
+ if (!insn->NeedsMaterialization()) {
+ return;
+ }
+
+ switch (insn->GetType()) {
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetOut(Location::RequiresFpuRegister());
+ break;
+
+ case Primitive::kPrimInt:
+ locations->SetOut(Location::RequiresRegister());
+ break;
+
+ default:
+ LOG(FATAL) << "Unsupported x86 constant area type " << insn->GetType();
+ }
+}
+
+void InstructionCodeGeneratorX86::VisitX86LoadFromConstantTable(HX86LoadFromConstantTable* insn) {
+ if (!insn->NeedsMaterialization()) {
+ return;
+ }
+
+ LocationSummary* locations = insn->GetLocations();
+ Location out = locations->Out();
+ Register const_area = locations->InAt(0).AsRegister<Register>();
+ HConstant *value = insn->GetConstant();
+
+ switch (insn->GetType()) {
+ case Primitive::kPrimFloat:
+ __ movss(out.AsFpuRegister<XmmRegister>(),
+ codegen_->LiteralFloatAddress(value->AsFloatConstant()->GetValue(), const_area));
+ break;
+
+ case Primitive::kPrimDouble:
+ __ movsd(out.AsFpuRegister<XmmRegister>(),
+ codegen_->LiteralDoubleAddress(value->AsDoubleConstant()->GetValue(), const_area));
+ break;
+
+ case Primitive::kPrimInt:
+ __ movl(out.AsRegister<Register>(),
+ codegen_->LiteralInt32Address(value->AsIntConstant()->GetValue(), const_area));
+ break;
+
+ default:
+ LOG(FATAL) << "Unsupported x86 constant area type " << insn->GetType();
+ }
+}
+
+void CodeGeneratorX86::Finalize(CodeAllocator* allocator) {
+ // Generate the constant area if needed.
+ X86Assembler* assembler = GetAssembler();
+ if (!assembler->IsConstantAreaEmpty()) {
+ // Align to 4 byte boundary to reduce cache misses, as the data is 4 and 8
+ // byte values.
+ assembler->Align(4, 0);
+ constant_area_start_ = assembler->CodeSize();
+ assembler->AddConstantArea();
+ }
+
+ // And finish up.
+ CodeGenerator::Finalize(allocator);
+}
+
+/**
+ * Class to handle late fixup of offsets into constant area.
+ */
+class RIPFixup : public AssemblerFixup, public ArenaObject<kArenaAllocMisc> {
+ public:
+ RIPFixup(const CodeGeneratorX86& codegen, int offset)
+ : codegen_(codegen), offset_into_constant_area_(offset) {}
+
+ private:
+ void Process(const MemoryRegion& region, int pos) OVERRIDE {
+ // Patch the correct offset for the instruction. The place to patch is the
+ // last 4 bytes of the instruction.
+ // The value to patch is the distance from the offset in the constant area
+ // from the address computed by the HX86ComputeBaseMethodAddress instruction.
+ int32_t constant_offset = codegen_.ConstantAreaStart() + offset_into_constant_area_;
+ int32_t relative_position = constant_offset - codegen_.GetMethodAddressOffset();;
+
+ // Patch in the right value.
+ region.StoreUnaligned<int32_t>(pos - 4, relative_position);
+ }
+
+ const CodeGeneratorX86& codegen_;
+
+ // Location in constant area that the fixup refers to.
+ int offset_into_constant_area_;
+};
+
+Address CodeGeneratorX86::LiteralDoubleAddress(double v, Register reg) {
+ AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, __ AddDouble(v));
+ return Address(reg, kDummy32BitOffset, fixup);
+}
+
+Address CodeGeneratorX86::LiteralFloatAddress(float v, Register reg) {
+ AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, __ AddFloat(v));
+ return Address(reg, kDummy32BitOffset, fixup);
+}
+
+Address CodeGeneratorX86::LiteralInt32Address(int32_t v, Register reg) {
+ AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, __ AddInt32(v));
+ return Address(reg, kDummy32BitOffset, fixup);
+}
+
+Address CodeGeneratorX86::LiteralInt64Address(int64_t v, Register reg) {
+ AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, __ AddInt64(v));
+ return Address(reg, kDummy32BitOffset, fixup);
+}
+
+/**
+ * Finds instructions that need the constant area base as an input.
+ */
+class ConstantHandlerVisitor : public HGraphVisitor {
+ public:
+ explicit ConstantHandlerVisitor(HGraph* graph) : HGraphVisitor(graph), base_(nullptr) {}
+
+ private:
+ void VisitAdd(HAdd* add) OVERRIDE {
+ BinaryFP(add);
+ }
+
+ void VisitSub(HSub* sub) OVERRIDE {
+ BinaryFP(sub);
+ }
+
+ void VisitMul(HMul* mul) OVERRIDE {
+ BinaryFP(mul);
+ }
+
+ void VisitDiv(HDiv* div) OVERRIDE {
+ BinaryFP(div);
+ }
+
+ void VisitReturn(HReturn* ret) OVERRIDE {
+ HConstant* value = ret->InputAt(0)->AsConstant();
+ if ((value != nullptr && Primitive::IsFloatingPointType(value->GetType()))) {
+ ReplaceInput(ret, value, 0, true);
+ }
+ }
+
+ void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
+ HandleInvoke(invoke);
+ }
+
+ void VisitInvokeVirtual(HInvokeVirtual* invoke) OVERRIDE {
+ HandleInvoke(invoke);
+ }
+
+ void VisitInvokeInterface(HInvokeInterface* invoke) OVERRIDE {
+ HandleInvoke(invoke);
+ }
+
+ void BinaryFP(HBinaryOperation* bin) {
+ HConstant* rhs = bin->InputAt(1)->AsConstant();
+ if (rhs != nullptr && Primitive::IsFloatingPointType(bin->GetResultType())) {
+ ReplaceInput(bin, rhs, 1, false);
+ }
+ }
+
+ void InitializeConstantAreaPointer(HInstruction* user) {
+ // Ensure we only initialize the pointer once.
+ if (base_ != nullptr) {
+ return;
+ }
+
+ HGraph* graph = GetGraph();
+ HBasicBlock* entry = graph->GetEntryBlock();
+ base_ = new (graph->GetArena()) HX86ComputeBaseMethodAddress();
+ HInstruction* insert_pos = (user->GetBlock() == entry) ? user : entry->GetLastInstruction();
+ entry->InsertInstructionBefore(base_, insert_pos);
+ DCHECK(base_ != nullptr);
+ }
+
+ void ReplaceInput(HInstruction* insn, HConstant* value, int input_index, bool materialize) {
+ InitializeConstantAreaPointer(insn);
+ HGraph* graph = GetGraph();
+ HBasicBlock* block = insn->GetBlock();
+ HX86LoadFromConstantTable* load_constant =
+ new (graph->GetArena()) HX86LoadFromConstantTable(base_, value, materialize);
+ block->InsertInstructionBefore(load_constant, insn);
+ insn->ReplaceInput(load_constant, input_index);
+ }
+
+ void HandleInvoke(HInvoke* invoke) {
+ // Ensure that we can load FP arguments from the constant area.
+ for (size_t i = 0, e = invoke->InputCount(); i < e; i++) {
+ HConstant* input = invoke->InputAt(i)->AsConstant();
+ if (input != nullptr && Primitive::IsFloatingPointType(input->GetType())) {
+ ReplaceInput(invoke, input, i, true);
+ }
+ }
+ }
+
+ // The generated HX86ComputeBaseMethodAddress in the entry block needed as an
+ // input to the HX86LoadFromConstantTable instructions.
+ HX86ComputeBaseMethodAddress* base_;
+};
+
+void ConstantAreaFixups::Run() {
+ ConstantHandlerVisitor visitor(graph_);
+ visitor.VisitInsertionOrder();
+}
+
#undef __
} // namespace x86
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 17787a82df..bd7cb12777 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -294,6 +294,8 @@ class CodeGeneratorX86 : public CodeGenerator {
// Generate a call to a static or direct method.
void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp);
+ // Generate a call to a virtual method.
+ void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp);
// Emit linker patches.
void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE;
@@ -325,6 +327,25 @@ class CodeGeneratorX86 : public CodeGenerator {
return isa_features_;
}
+ void SetMethodAddressOffset(int32_t offset) {
+ method_address_offset_ = offset;
+ }
+
+ int32_t GetMethodAddressOffset() const {
+ return method_address_offset_;
+ }
+
+ int32_t ConstantAreaStart() const {
+ return constant_area_start_;
+ }
+
+ Address LiteralDoubleAddress(double v, Register reg);
+ Address LiteralFloatAddress(float v, Register reg);
+ Address LiteralInt32Address(int32_t v, Register reg);
+ Address LiteralInt64Address(int64_t v, Register reg);
+
+ void Finalize(CodeAllocator* allocator) OVERRIDE;
+
private:
// Labels for each block that will be compiled.
GrowableArray<Label> block_labels_;
@@ -339,6 +360,20 @@ class CodeGeneratorX86 : public CodeGenerator {
ArenaDeque<MethodPatchInfo<Label>> method_patches_;
ArenaDeque<MethodPatchInfo<Label>> relative_call_patches_;
+ // Offset to the start of the constant area in the assembled code.
+ // Used for fixups to the constant area.
+ int32_t constant_area_start_;
+
+ // If there is a HX86ComputeBaseMethodAddress instruction in the graph
+ // (which shall be the sole instruction of this kind), subtracting this offset
+ // from the value contained in the out register of this HX86ComputeBaseMethodAddress
+ // instruction gives the address of the start of this method.
+ int32_t method_address_offset_;
+
+ // When we don't know the proper offset for the value, we use kDummy32BitOffset.
+ // The correct value will be inserted when processing Assembler fixups.
+ static constexpr int32_t kDummy32BitOffset = 256;
+
DISALLOW_COPY_AND_ASSIGN(CodeGeneratorX86);
};
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index e1ec2ea64f..0f3eb74c64 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -475,6 +475,25 @@ void CodeGeneratorX86_64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invo
DCHECK(!IsLeafMethod());
}
+void CodeGeneratorX86_64::GenerateVirtualCall(HInvokeVirtual* invoke, Location temp_in) {
+ CpuRegister temp = temp_in.AsRegister<CpuRegister>();
+ size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
+ invoke->GetVTableIndex(), kX86_64PointerSize).SizeValue();
+ LocationSummary* locations = invoke->GetLocations();
+ Location receiver = locations->InAt(0);
+ size_t class_offset = mirror::Object::ClassOffset().SizeValue();
+ // temp = object->GetClass();
+ DCHECK(receiver.IsRegister());
+ __ movl(temp, Address(receiver.AsRegister<CpuRegister>(), class_offset));
+ MaybeRecordImplicitNullCheck(invoke);
+ __ MaybeUnpoisonHeapReference(temp);
+ // temp = temp->GetMethodAt(method_offset);
+ __ movq(temp, Address(temp, method_offset));
+ // call temp->GetEntryPoint();
+ __ call(Address(temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kX86_64WordSize).SizeValue()));
+}
+
void CodeGeneratorX86_64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) {
DCHECK(linker_patches->empty());
size_t size =
@@ -1709,22 +1728,7 @@ void InstructionCodeGeneratorX86_64::VisitInvokeVirtual(HInvokeVirtual* invoke)
return;
}
- CpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<CpuRegister>();
- size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
- invoke->GetVTableIndex(), kX86_64PointerSize).SizeValue();
- LocationSummary* locations = invoke->GetLocations();
- Location receiver = locations->InAt(0);
- size_t class_offset = mirror::Object::ClassOffset().SizeValue();
- // temp = object->GetClass();
- DCHECK(receiver.IsRegister());
- __ movl(temp, Address(receiver.AsRegister<CpuRegister>(), class_offset));
- codegen_->MaybeRecordImplicitNullCheck(invoke);
- __ MaybeUnpoisonHeapReference(temp);
- // temp = temp->GetMethodAt(method_offset);
- __ movq(temp, Address(temp, method_offset));
- // call temp->GetEntryPoint();
- __ call(Address(temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- kX86_64WordSize).SizeValue()));
+ codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 21357be0a5..f9d8e041d9 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -305,6 +305,7 @@ class CodeGeneratorX86_64 : public CodeGenerator {
}
void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp);
+ void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp);
void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE;
diff --git a/compiler/optimizing/constant_area_fixups_x86.h b/compiler/optimizing/constant_area_fixups_x86.h
new file mode 100644
index 0000000000..4138039cdd
--- /dev/null
+++ b/compiler/optimizing/constant_area_fixups_x86.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_CONSTANT_AREA_FIXUPS_X86_H_
+#define ART_COMPILER_OPTIMIZING_CONSTANT_AREA_FIXUPS_X86_H_
+
+#include "nodes.h"
+#include "optimization.h"
+
+namespace art {
+namespace x86 {
+
+class ConstantAreaFixups : public HOptimization {
+ public:
+ ConstantAreaFixups(HGraph* graph, OptimizingCompilerStats* stats)
+ : HOptimization(graph, "constant_area_fixups_x86", stats) {}
+
+ void Run() OVERRIDE;
+};
+
+} // namespace x86
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_CONSTANT_AREA_FIXUPS_X86_H_
diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc
index 8aaec6804d..3f5a6e7993 100644
--- a/compiler/optimizing/induction_var_analysis.cc
+++ b/compiler/optimizing/induction_var_analysis.cc
@@ -51,14 +51,15 @@ HInductionVarAnalysis::HInductionVarAnalysis(HGraph* graph)
global_depth_(0),
stack_(graph->GetArena()->Adapter()),
scc_(graph->GetArena()->Adapter()),
- map_(std::less<int>(), graph->GetArena()->Adapter()),
- cycle_(std::less<int>(), graph->GetArena()->Adapter()),
- induction_(std::less<int>(), graph->GetArena()->Adapter()) {
+ map_(std::less<HInstruction*>(), graph->GetArena()->Adapter()),
+ cycle_(std::less<HInstruction*>(), graph->GetArena()->Adapter()),
+ induction_(std::less<HLoopInformation*>(), graph->GetArena()->Adapter()) {
}
void HInductionVarAnalysis::Run() {
- // Detects sequence variables (generalized induction variables) during an
- // inner-loop-first traversal of all loops using Gerlek's algorithm.
+ // Detects sequence variables (generalized induction variables) during an inner-loop-first
+ // traversal of all loops using Gerlek's algorithm. The order is only relevant if outer
+ // loops would use induction information of inner loops (not currently done).
for (HPostOrderIterator it_graph(*graph_); !it_graph.Done(); it_graph.Advance()) {
HBasicBlock* graph_block = it_graph.Current();
if (graph_block->IsLoopHeader()) {
@@ -71,38 +72,40 @@ void HInductionVarAnalysis::VisitLoop(HLoopInformation* loop) {
// Find strongly connected components (SSCs) in the SSA graph of this loop using Tarjan's
// algorithm. Due to the descendant-first nature, classification happens "on-demand".
global_depth_ = 0;
- CHECK(stack_.empty());
+ DCHECK(stack_.empty());
map_.clear();
for (HBlocksInLoopIterator it_loop(*loop); !it_loop.Done(); it_loop.Advance()) {
HBasicBlock* loop_block = it_loop.Current();
- CHECK(loop_block->IsInLoop());
+ DCHECK(loop_block->IsInLoop());
if (loop_block->GetLoopInformation() != loop) {
continue; // Inner loops already visited.
}
// Visit phi-operations and instructions.
for (HInstructionIterator it(loop_block->GetPhis()); !it.Done(); it.Advance()) {
HInstruction* instruction = it.Current();
- if (!IsVisitedNode(instruction->GetId())) {
+ if (!IsVisitedNode(instruction)) {
VisitNode(loop, instruction);
}
}
for (HInstructionIterator it(loop_block->GetInstructions()); !it.Done(); it.Advance()) {
HInstruction* instruction = it.Current();
- if (!IsVisitedNode(instruction->GetId())) {
+ if (!IsVisitedNode(instruction)) {
VisitNode(loop, instruction);
}
}
}
- CHECK(stack_.empty());
+ DCHECK(stack_.empty());
map_.clear();
+
+ // Determine the loop's trip count.
+ VisitControl(loop);
}
void HInductionVarAnalysis::VisitNode(HLoopInformation* loop, HInstruction* instruction) {
- const int id = instruction->GetId();
const uint32_t d1 = ++global_depth_;
- map_.Put(id, NodeInfo(d1));
+ map_.Put(instruction, NodeInfo(d1));
stack_.push_back(instruction);
// Visit all descendants.
@@ -113,7 +116,7 @@ void HInductionVarAnalysis::VisitNode(HLoopInformation* loop, HInstruction* inst
// Lower or found SCC?
if (low < d1) {
- map_.find(id)->second.depth = low;
+ map_.find(instruction)->second.depth = low;
} else {
scc_.clear();
cycle_.clear();
@@ -123,7 +126,7 @@ void HInductionVarAnalysis::VisitNode(HLoopInformation* loop, HInstruction* inst
HInstruction* x = stack_.back();
scc_.push_back(x);
stack_.pop_back();
- map_.find(x->GetId())->second.done = true;
+ map_.find(x)->second.done = true;
if (x == instruction) {
break;
}
@@ -150,12 +153,11 @@ uint32_t HInductionVarAnalysis::VisitDescendant(HLoopInformation* loop, HInstruc
}
// Inspect descendant node.
- const int id = instruction->GetId();
- if (!IsVisitedNode(id)) {
+ if (!IsVisitedNode(instruction)) {
VisitNode(loop, instruction);
- return map_.find(id)->second.depth;
+ return map_.find(instruction)->second.depth;
} else {
- auto it = map_.find(id);
+ auto it = map_.find(instruction);
return it->second.done ? global_depth_ : it->second.depth;
}
}
@@ -176,8 +178,20 @@ void HInductionVarAnalysis::ClassifyTrivial(HLoopInformation* loop, HInstruction
} else if (instruction->IsMul()) {
info = TransferMul(LookupInfo(loop, instruction->InputAt(0)),
LookupInfo(loop, instruction->InputAt(1)));
+ } else if (instruction->IsShl()) {
+ info = TransferShl(LookupInfo(loop, instruction->InputAt(0)),
+ LookupInfo(loop, instruction->InputAt(1)),
+ instruction->InputAt(0)->GetType());
} else if (instruction->IsNeg()) {
info = TransferNeg(LookupInfo(loop, instruction->InputAt(0)));
+ } else if (instruction->IsBoundsCheck()) {
+ info = LookupInfo(loop, instruction->InputAt(0)); // Pass-through.
+ } else if (instruction->IsTypeConversion()) {
+ HTypeConversion* conversion = instruction->AsTypeConversion();
+ // TODO: accept different conversion scenarios.
+ if (conversion->GetResultType() == conversion->GetInputType()) {
+ info = LookupInfo(loop, conversion->GetInput());
+ }
}
// Successfully classified?
@@ -188,7 +202,7 @@ void HInductionVarAnalysis::ClassifyTrivial(HLoopInformation* loop, HInstruction
void HInductionVarAnalysis::ClassifyNonTrivial(HLoopInformation* loop) {
const size_t size = scc_.size();
- CHECK_GE(size, 1u);
+ DCHECK_GE(size, 1u);
HInstruction* phi = scc_[size - 1];
if (!IsEntryPhi(loop, phi)) {
return;
@@ -204,41 +218,74 @@ void HInductionVarAnalysis::ClassifyNonTrivial(HLoopInformation* loop) {
if (size == 1) {
InductionInfo* update = LookupInfo(loop, internal);
if (update != nullptr) {
- AssignInfo(loop, phi, NewInductionInfo(kWrapAround, kNop, initial, update, nullptr));
+ AssignInfo(loop, phi, CreateInduction(kWrapAround, initial, update));
}
return;
}
// Inspect remainder of the cycle that resides in scc_. The cycle_ mapping assigns
- // temporary meaning to its nodes.
- cycle_.Overwrite(phi->GetId(), nullptr);
+ // temporary meaning to its nodes, seeded from the phi instruction and back.
for (size_t i = 0; i < size - 1; i++) {
- HInstruction* operation = scc_[i];
+ HInstruction* instruction = scc_[i];
InductionInfo* update = nullptr;
- if (operation->IsPhi()) {
- update = TransferCycleOverPhi(operation);
- } else if (operation->IsAdd()) {
- update = TransferCycleOverAddSub(loop, operation->InputAt(0), operation->InputAt(1), kAdd, true);
- } else if (operation->IsSub()) {
- update = TransferCycleOverAddSub(loop, operation->InputAt(0), operation->InputAt(1), kSub, true);
+ if (instruction->IsPhi()) {
+ update = SolvePhi(loop, phi, instruction);
+ } else if (instruction->IsAdd()) {
+ update = SolveAddSub(
+ loop, phi, instruction, instruction->InputAt(0), instruction->InputAt(1), kAdd, true);
+ } else if (instruction->IsSub()) {
+ update = SolveAddSub(
+ loop, phi, instruction, instruction->InputAt(0), instruction->InputAt(1), kSub, true);
}
if (update == nullptr) {
return;
}
- cycle_.Overwrite(operation->GetId(), update);
+ cycle_.Put(instruction, update);
}
- // Success if the internal link received accumulated nonzero update.
- auto it = cycle_.find(internal->GetId());
- if (it != cycle_.end() && it->second != nullptr) {
- // Classify header phi and feed the cycle "on-demand".
- AssignInfo(loop, phi, NewInductionInfo(kLinear, kNop, it->second, initial, nullptr));
- for (size_t i = 0; i < size - 1; i++) {
- ClassifyTrivial(loop, scc_[i]);
+ // Success if the internal link received a meaning.
+ auto it = cycle_.find(internal);
+ if (it != cycle_.end()) {
+ InductionInfo* induction = it->second;
+ switch (induction->induction_class) {
+ case kInvariant:
+ // Classify phi (last element in scc_) and then the rest of the cycle "on-demand".
+ // Statements are scanned in the Tarjan SCC order, with phi first.
+ AssignInfo(loop, phi, CreateInduction(kLinear, induction, initial));
+ for (size_t i = 0; i < size - 1; i++) {
+ ClassifyTrivial(loop, scc_[i]);
+ }
+ break;
+ case kPeriodic:
+ // Classify all elements in the cycle with the found periodic induction while rotating
+ // each first element to the end. Lastly, phi (last element in scc_) is classified.
+ // Statements are scanned in the reverse Tarjan SCC order, with phi last.
+ for (size_t i = 2; i <= size; i++) {
+ AssignInfo(loop, scc_[size - i], induction);
+ induction = RotatePeriodicInduction(induction->op_b, induction->op_a);
+ }
+ AssignInfo(loop, phi, induction);
+ break;
+ default:
+ break;
}
}
}
+HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::RotatePeriodicInduction(
+ InductionInfo* induction,
+ InductionInfo* last) {
+ // Rotates a periodic induction of the form
+ // (a, b, c, d, e)
+ // into
+ // (b, c, d, e, a)
+ // in preparation of assigning this to the previous variable in the sequence.
+ if (induction->induction_class == kInvariant) {
+ return CreateInduction(kPeriodic, induction, last);
+ }
+ return CreateInduction(kPeriodic, induction->op_a, RotatePeriodicInduction(induction->op_b, last));
+}
+
HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::TransferPhi(InductionInfo* a,
InductionInfo* b) {
// Transfer over a phi: if both inputs are identical, result is input.
@@ -251,36 +298,33 @@ HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::TransferPhi(Inducti
HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::TransferAddSub(InductionInfo* a,
InductionInfo* b,
InductionOp op) {
- // Transfer over an addition or subtraction: invariant or linear
- // inputs combine into new invariant or linear result.
+ // Transfer over an addition or subtraction: any invariant, linear, wrap-around, or periodic
+ // can be combined with an invariant to yield a similar result. Even two linear inputs can
+ // be combined. All other combinations fail, however.
if (a != nullptr && b != nullptr) {
if (a->induction_class == kInvariant && b->induction_class == kInvariant) {
- return NewInductionInfo(kInvariant, op, a, b, nullptr);
- } else if (a->induction_class == kLinear && b->induction_class == kInvariant) {
- return NewInductionInfo(
- kLinear,
- kNop,
- a->op_a,
- NewInductionInfo(kInvariant, op, a->op_b, b, nullptr),
- nullptr);
- } else if (a->induction_class == kInvariant && b->induction_class == kLinear) {
- InductionInfo* ba = b->op_a;
- if (op == kSub) { // negation required
- ba = NewInductionInfo(kInvariant, kNeg, nullptr, ba, nullptr);
- }
- return NewInductionInfo(
- kLinear,
- kNop,
- ba,
- NewInductionInfo(kInvariant, op, a, b->op_b, nullptr),
- nullptr);
+ return CreateInvariantOp(op, a, b);
} else if (a->induction_class == kLinear && b->induction_class == kLinear) {
- return NewInductionInfo(
- kLinear,
- kNop,
- NewInductionInfo(kInvariant, op, a->op_a, b->op_a, nullptr),
- NewInductionInfo(kInvariant, op, a->op_b, b->op_b, nullptr),
- nullptr);
+ return CreateInduction(
+ kLinear, TransferAddSub(a->op_a, b->op_a, op), TransferAddSub(a->op_b, b->op_b, op));
+ } else if (a->induction_class == kInvariant) {
+ InductionInfo* new_a = b->op_a;
+ InductionInfo* new_b = TransferAddSub(a, b->op_b, op);
+ if (b->induction_class != kLinear) {
+ DCHECK(b->induction_class == kWrapAround || b->induction_class == kPeriodic);
+ new_a = TransferAddSub(a, new_a, op);
+ } else if (op == kSub) { // Negation required.
+ new_a = TransferNeg(new_a);
+ }
+ return CreateInduction(b->induction_class, new_a, new_b);
+ } else if (b->induction_class == kInvariant) {
+ InductionInfo* new_a = a->op_a;
+ InductionInfo* new_b = TransferAddSub(a->op_b, b, op);
+ if (a->induction_class != kLinear) {
+ DCHECK(a->induction_class == kWrapAround || a->induction_class == kPeriodic);
+ new_a = TransferAddSub(new_a, b, op);
+ }
+ return CreateInduction(a->induction_class, new_a, new_b);
}
}
return nullptr;
@@ -288,141 +332,335 @@ HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::TransferAddSub(Indu
HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::TransferMul(InductionInfo* a,
InductionInfo* b) {
- // Transfer over a multiplication: invariant or linear
- // inputs combine into new invariant or linear result.
- // Two linear inputs would become quadratic.
+ // Transfer over a multiplication: any invariant, linear, wrap-around, or periodic
+ // can be multiplied with an invariant to yield a similar but multiplied result.
+ // Two non-invariant inputs cannot be multiplied, however.
if (a != nullptr && b != nullptr) {
if (a->induction_class == kInvariant && b->induction_class == kInvariant) {
- return NewInductionInfo(kInvariant, kMul, a, b, nullptr);
- } else if (a->induction_class == kLinear && b->induction_class == kInvariant) {
- return NewInductionInfo(
- kLinear,
- kNop,
- NewInductionInfo(kInvariant, kMul, a->op_a, b, nullptr),
- NewInductionInfo(kInvariant, kMul, a->op_b, b, nullptr),
- nullptr);
- } else if (a->induction_class == kInvariant && b->induction_class == kLinear) {
- return NewInductionInfo(
- kLinear,
- kNop,
- NewInductionInfo(kInvariant, kMul, a, b->op_a, nullptr),
- NewInductionInfo(kInvariant, kMul, a, b->op_b, nullptr),
- nullptr);
+ return CreateInvariantOp(kMul, a, b);
+ } else if (a->induction_class == kInvariant) {
+ return CreateInduction(b->induction_class, TransferMul(a, b->op_a), TransferMul(a, b->op_b));
+ } else if (b->induction_class == kInvariant) {
+ return CreateInduction(a->induction_class, TransferMul(a->op_a, b), TransferMul(a->op_b, b));
+ }
+ }
+ return nullptr;
+}
+
+HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::TransferShl(InductionInfo* a,
+ InductionInfo* b,
+ Primitive::Type type) {
+ // Transfer over a shift left: treat shift by restricted constant as equivalent multiplication.
+ int64_t value = -1;
+ if (a != nullptr && IsIntAndGet(b, &value)) {
+ // Obtain the constant needed for the multiplication. This yields an existing instruction
+ // if the constants is already there. Otherwise, this has a side effect on the HIR.
+ // The restriction on the shift factor avoids generating a negative constant
+ // (viz. 1 << 31 and 1L << 63 set the sign bit). The code assumes that generalization
+ // for shift factors outside [0,32) and [0,64) ranges is done by earlier simplification.
+ if ((type == Primitive::kPrimInt && 0 <= value && value < 31) ||
+ (type == Primitive::kPrimLong && 0 <= value && value < 63)) {
+ return TransferMul(a, CreateConstant(1 << value, type));
}
}
return nullptr;
}
HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::TransferNeg(InductionInfo* a) {
- // Transfer over a unary negation: invariant or linear input
- // yields a similar, but negated result.
+ // Transfer over a unary negation: an invariant, linear, wrap-around, or periodic input
+ // yields a similar but negated induction as result.
if (a != nullptr) {
if (a->induction_class == kInvariant) {
- return NewInductionInfo(kInvariant, kNeg, nullptr, a, nullptr);
- } else if (a->induction_class == kLinear) {
- return NewInductionInfo(
- kLinear,
- kNop,
- NewInductionInfo(kInvariant, kNeg, nullptr, a->op_a, nullptr),
- NewInductionInfo(kInvariant, kNeg, nullptr, a->op_b, nullptr),
- nullptr);
+ return CreateInvariantOp(kNeg, nullptr, a);
}
+ return CreateInduction(a->induction_class, TransferNeg(a->op_a), TransferNeg(a->op_b));
}
return nullptr;
}
-HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::TransferCycleOverPhi(HInstruction* phi) {
- // Transfer within a cycle over a phi: only identical inputs
- // can be combined into that input as result.
- const size_t count = phi->InputCount();
- CHECK_GT(count, 0u);
- auto ita = cycle_.find(phi->InputAt(0)->GetId());
+HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::SolvePhi(HLoopInformation* loop,
+ HInstruction* phi,
+ HInstruction* instruction) {
+ // Solve within a cycle over a phi: identical inputs are combined into that input as result.
+ const size_t count = instruction->InputCount();
+ DCHECK_GT(count, 0u);
+ auto ita = cycle_.find(instruction->InputAt(0));
if (ita != cycle_.end()) {
InductionInfo* a = ita->second;
for (size_t i = 1; i < count; i++) {
- auto itb = cycle_.find(phi->InputAt(i)->GetId());
- if (itb == cycle_.end() ||!HInductionVarAnalysis::InductionEqual(a, itb->second)) {
+ auto itb = cycle_.find(instruction->InputAt(i));
+ if (itb == cycle_.end() || !HInductionVarAnalysis::InductionEqual(a, itb->second)) {
return nullptr;
}
}
return a;
}
+
+ // Solve within a cycle over another entry-phi: add invariants into a periodic.
+ if (IsEntryPhi(loop, instruction)) {
+ InductionInfo* a = LookupInfo(loop, instruction->InputAt(0));
+ if (a != nullptr && a->induction_class == kInvariant) {
+ if (instruction->InputAt(1) == phi) {
+ InductionInfo* initial = LookupInfo(loop, phi->InputAt(0));
+ return CreateInduction(kPeriodic, a, initial);
+ }
+ auto it = cycle_.find(instruction->InputAt(1));
+ if (it != cycle_.end()) {
+ InductionInfo* b = it->second;
+ if (b->induction_class == kPeriodic) {
+ return CreateInduction(kPeriodic, a, b);
+ }
+ }
+ }
+ }
+
return nullptr;
}
-HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::TransferCycleOverAddSub(
- HLoopInformation* loop,
- HInstruction* x,
- HInstruction* y,
- InductionOp op,
- bool first) {
- // Transfer within a cycle over an addition or subtraction: adding or
- // subtracting an invariant value adds to the stride of the induction,
- // starting with the phi value denoted by the unusual nullptr value.
- auto it = cycle_.find(x->GetId());
- if (it != cycle_.end()) {
- InductionInfo* a = it->second;
- InductionInfo* b = LookupInfo(loop, y);
- if (b != nullptr && b->induction_class == kInvariant) {
- if (a == nullptr) {
- if (op == kSub) { // negation required
- return NewInductionInfo(kInvariant, kNeg, nullptr, b, nullptr);
- }
- return b;
- } else if (a->induction_class == kInvariant) {
- return NewInductionInfo(kInvariant, op, a, b, nullptr);
+HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::SolveAddSub(HLoopInformation* loop,
+ HInstruction* phi,
+ HInstruction* instruction,
+ HInstruction* x,
+ HInstruction* y,
+ InductionOp op,
+ bool is_first_call) {
+ // Solve within a cycle over an addition or subtraction: adding or subtracting an
+ // invariant value, seeded from phi, keeps adding to the stride of the induction.
+ InductionInfo* b = LookupInfo(loop, y);
+ if (b != nullptr && b->induction_class == kInvariant) {
+ if (x == phi) {
+ return (op == kAdd) ? b : CreateInvariantOp(kNeg, nullptr, b);
+ }
+ auto it = cycle_.find(x);
+ if (it != cycle_.end()) {
+ InductionInfo* a = it->second;
+ if (a->induction_class == kInvariant) {
+ return CreateInvariantOp(op, a, b);
}
}
}
- // On failure, try alternatives.
+
+ // Try some alternatives before failing.
if (op == kAdd) {
- // Try the other way around for an addition.
- if (first) {
- return TransferCycleOverAddSub(loop, y, x, op, false);
+ // Try the other way around for an addition if considered for first time.
+ if (is_first_call) {
+ return SolveAddSub(loop, phi, instruction, y, x, op, false);
+ }
+ } else if (op == kSub) {
+ // Solve within a tight cycle for a periodic idiom k = c - k;
+ if (y == phi && instruction == phi->InputAt(1)) {
+ InductionInfo* a = LookupInfo(loop, x);
+ if (a != nullptr && a->induction_class == kInvariant) {
+ InductionInfo* initial = LookupInfo(loop, phi->InputAt(0));
+ return CreateInduction(kPeriodic, CreateInvariantOp(kSub, a, initial), initial);
+ }
}
}
+
return nullptr;
}
-void HInductionVarAnalysis::PutInfo(int loop_id, int id, InductionInfo* info) {
- auto it = induction_.find(loop_id);
+void HInductionVarAnalysis::VisitControl(HLoopInformation* loop) {
+ HInstruction* control = loop->GetHeader()->GetLastInstruction();
+ if (control->IsIf()) {
+ HIf* ifs = control->AsIf();
+ HBasicBlock* if_true = ifs->IfTrueSuccessor();
+ HBasicBlock* if_false = ifs->IfFalseSuccessor();
+ HInstruction* if_expr = ifs->InputAt(0);
+ // Determine if loop has following structure in header.
+ // loop-header: ....
+ // if (condition) goto X
+ if (if_expr->IsCondition()) {
+ HCondition* condition = if_expr->AsCondition();
+ InductionInfo* a = LookupInfo(loop, condition->InputAt(0));
+ InductionInfo* b = LookupInfo(loop, condition->InputAt(1));
+ Primitive::Type type = condition->InputAt(0)->GetType();
+ // Determine if the loop control uses integral arithmetic and an if-exit (X outside) or an
+ // if-iterate (X inside), always expressed as if-iterate when passing into VisitCondition().
+ if (type != Primitive::kPrimInt && type != Primitive::kPrimLong) {
+ // Loop control is not 32/64-bit integral.
+ } else if (a == nullptr || b == nullptr) {
+ // Loop control is not a sequence.
+ } else if (if_true->GetLoopInformation() != loop && if_false->GetLoopInformation() == loop) {
+ VisitCondition(loop, a, b, type, condition->GetOppositeCondition());
+ } else if (if_true->GetLoopInformation() == loop && if_false->GetLoopInformation() != loop) {
+ VisitCondition(loop, a, b, type, condition->GetCondition());
+ }
+ }
+ }
+}
+
+void HInductionVarAnalysis::VisitCondition(HLoopInformation* loop,
+ InductionInfo* a,
+ InductionInfo* b,
+ Primitive::Type type,
+ IfCondition cmp) {
+ if (a->induction_class == kInvariant && b->induction_class == kLinear) {
+ // Swap conditions (e.g. U > i is same as i < U).
+ switch (cmp) {
+ case kCondLT: VisitCondition(loop, b, a, type, kCondGT); break;
+ case kCondLE: VisitCondition(loop, b, a, type, kCondGE); break;
+ case kCondGT: VisitCondition(loop, b, a, type, kCondLT); break;
+ case kCondGE: VisitCondition(loop, b, a, type, kCondLE); break;
+ default: break;
+ }
+ } else if (a->induction_class == kLinear && b->induction_class == kInvariant) {
+ // Normalize a linear loop control with a constant, nonzero stride:
+ // stride > 0, either i < U or i <= U
+ // stride < 0, either i > U or i >= U
+ InductionInfo* stride = a->op_a;
+ InductionInfo* lo_val = a->op_b;
+ InductionInfo* hi_val = b;
+ int64_t value = -1;
+ if (IsIntAndGet(stride, &value)) {
+ if ((value > 0 && (cmp == kCondLT || cmp == kCondLE)) ||
+ (value < 0 && (cmp == kCondGT || cmp == kCondGE))) {
+ bool is_strict = cmp == kCondLT || cmp == kCondGT;
+ VisitTripCount(loop, lo_val, hi_val, stride, value, type, is_strict);
+ }
+ }
+ }
+}
+
+void HInductionVarAnalysis::VisitTripCount(HLoopInformation* loop,
+ InductionInfo* lo_val,
+ InductionInfo* hi_val,
+ InductionInfo* stride,
+ int32_t stride_value,
+ Primitive::Type type,
+ bool is_strict) {
+ // Any loop of the general form:
+ //
+ // for (i = L; i <= U; i += S) // S > 0
+ // or for (i = L; i >= U; i += S) // S < 0
+ // .. i ..
+ //
+ // can be normalized into:
+ //
+ // for (n = 0; n < TC; n++) // where TC = (U + S - L) / S
+ // .. L + S * n ..
+ //
+ // NOTE: The TC (trip-count) expression is only valid if the top-test path is taken at
+ // least once. Otherwise TC is 0. Also, the expression assumes the loop does not
+ // have any early-exits. Otherwise, TC is an upper bound.
+ //
+ bool cancels = is_strict && abs(stride_value) == 1; // compensation cancels conversion?
+ if (!cancels) {
+ // Convert exclusive integral inequality into inclusive integral inequality,
+ // viz. condition i < U is i <= U - 1 and condition i > U is i >= U + 1.
+ if (is_strict) {
+ const InductionOp op = stride_value > 0 ? kSub : kAdd;
+ hi_val = CreateInvariantOp(op, hi_val, CreateConstant(1, type));
+ }
+ // Compensate for stride.
+ hi_val = CreateInvariantOp(kAdd, hi_val, stride);
+ }
+
+ // Assign the trip-count expression to the loop control. Clients that use the information
+ // should be aware that due to the L <= U assumption, the expression is only valid in the
+ // loop-body proper, and not yet in the loop-header. If the loop has any early exits, the
+ // trip-count forms a conservative upper bound on the number of loop iterations.
+ InductionInfo* trip_count =
+ CreateInvariantOp(kDiv, CreateInvariantOp(kSub, hi_val, lo_val), stride);
+ AssignInfo(loop, loop->GetHeader()->GetLastInstruction(), trip_count);
+}
+
+void HInductionVarAnalysis::AssignInfo(HLoopInformation* loop,
+ HInstruction* instruction,
+ InductionInfo* info) {
+ auto it = induction_.find(loop);
if (it == induction_.end()) {
- it = induction_.Put(
- loop_id, ArenaSafeMap<int, InductionInfo*>(std::less<int>(), graph_->GetArena()->Adapter()));
+ it = induction_.Put(loop,
+ ArenaSafeMap<HInstruction*, InductionInfo*>(
+ std::less<HInstruction*>(), graph_->GetArena()->Adapter()));
}
- it->second.Overwrite(id, info);
+ it->second.Put(instruction, info);
}
-HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::GetInfo(int loop_id, int id) {
- auto it = induction_.find(loop_id);
+HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::LookupInfo(HLoopInformation* loop,
+ HInstruction* instruction) {
+ auto it = induction_.find(loop);
if (it != induction_.end()) {
- auto loop_it = it->second.find(id);
+ auto loop_it = it->second.find(instruction);
if (loop_it != it->second.end()) {
return loop_it->second;
}
}
+ if (IsLoopInvariant(loop, instruction)) {
+ InductionInfo* info = CreateInvariantFetch(instruction);
+ AssignInfo(loop, instruction, info);
+ return info;
+ }
return nullptr;
}
-void HInductionVarAnalysis::AssignInfo(HLoopInformation* loop,
- HInstruction* instruction,
- InductionInfo* info) {
- const int loopId = loop->GetHeader()->GetBlockId();
- const int id = instruction->GetId();
- PutInfo(loopId, id, info);
+HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::CreateConstant(int64_t value,
+ Primitive::Type type) {
+ if (type == Primitive::kPrimInt) {
+ return CreateInvariantFetch(graph_->GetIntConstant(value));
+ }
+ DCHECK_EQ(type, Primitive::kPrimLong);
+ return CreateInvariantFetch(graph_->GetLongConstant(value));
}
-HInductionVarAnalysis::InductionInfo*
-HInductionVarAnalysis::LookupInfo(HLoopInformation* loop,
- HInstruction* instruction) {
- const int loop_id = loop->GetHeader()->GetBlockId();
- const int id = instruction->GetId();
- InductionInfo* info = GetInfo(loop_id, id);
- if (info == nullptr && IsLoopInvariant(loop, instruction)) {
- info = NewInductionInfo(kInvariant, kFetch, nullptr, nullptr, instruction);
- PutInfo(loop_id, id, info);
- }
- return info;
+HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::CreateSimplifiedInvariant(
+ InductionOp op,
+ InductionInfo* a,
+ InductionInfo* b) {
+ // Perform some light-weight simplifications during construction of a new invariant.
+ // This often safes memory and yields a more concise representation of the induction.
+ // More exhaustive simplifications are done by later phases once induction nodes are
+ // translated back into HIR code (e.g. by loop optimizations or BCE).
+ int64_t value = -1;
+ if (IsIntAndGet(a, &value)) {
+ if (value == 0) {
+ // Simplify 0 + b = b, 0 * b = 0.
+ if (op == kAdd) {
+ return b;
+ } else if (op == kMul) {
+ return a;
+ }
+ } else if (op == kMul) {
+ // Simplify 1 * b = b, -1 * b = -b
+ if (value == 1) {
+ return b;
+ } else if (value == -1) {
+ op = kNeg;
+ a = nullptr;
+ }
+ }
+ }
+ if (IsIntAndGet(b, &value)) {
+ if (value == 0) {
+ // Simplify a + 0 = a, a - 0 = a, a * 0 = 0, -0 = 0.
+ if (op == kAdd || op == kSub) {
+ return a;
+ } else if (op == kMul || op == kNeg) {
+ return b;
+ }
+ } else if (op == kMul || op == kDiv) {
+ // Simplify a * 1 = a, a / 1 = a, a * -1 = -a, a / -1 = -a
+ if (value == 1) {
+ return a;
+ } else if (value == -1) {
+ op = kNeg;
+ b = a;
+ a = nullptr;
+ }
+ }
+ } else if (b->operation == kNeg) {
+ // Simplify a + (-b) = a - b, a - (-b) = a + b, -(-b) = b.
+ if (op == kAdd) {
+ op = kSub;
+ b = b->op_b;
+ } else if (op == kSub) {
+ op = kAdd;
+ b = b->op_b;
+ } else if (op == kNeg) {
+ return b->op_b;
+ }
+ }
+ return new (graph_->GetArena()) InductionInfo(kInvariant, op, a, b, nullptr);
}
bool HInductionVarAnalysis::InductionEqual(InductionInfo* info1,
@@ -440,27 +678,46 @@ bool HInductionVarAnalysis::InductionEqual(InductionInfo* info1,
return info1 == info2;
}
+bool HInductionVarAnalysis::IsIntAndGet(InductionInfo* info, int64_t* value) {
+ if (info != nullptr && info->induction_class == kInvariant && info->operation == kFetch) {
+ DCHECK(info->fetch);
+ if (info->fetch->IsIntConstant()) {
+ *value = info->fetch->AsIntConstant()->GetValue();
+ return true;
+ } else if (info->fetch->IsLongConstant()) {
+ *value = info->fetch->AsLongConstant()->GetValue();
+ return true;
+ }
+ }
+ return false;
+}
+
std::string HInductionVarAnalysis::InductionToString(InductionInfo* info) {
if (info != nullptr) {
if (info->induction_class == kInvariant) {
+ int64_t value = -1;
std::string inv = "(";
inv += InductionToString(info->op_a);
switch (info->operation) {
- case kNop: inv += " ? "; break;
- case kAdd: inv += " + "; break;
+ case kNop: inv += " @ "; break;
+ case kAdd: inv += " + "; break;
case kSub:
- case kNeg: inv += " - "; break;
- case kMul: inv += " * "; break;
- case kDiv: inv += " / "; break;
+ case kNeg: inv += " - "; break;
+ case kMul: inv += " * "; break;
+ case kDiv: inv += " / "; break;
case kFetch:
- CHECK(info->fetch != nullptr);
- inv += std::to_string(info->fetch->GetId()) + ":" + info->fetch->DebugName();
+ DCHECK(info->fetch);
+ if (IsIntAndGet(info, &value)) {
+ inv += std::to_string(value);
+ } else {
+ inv += std::to_string(info->fetch->GetId()) + ":" + info->fetch->DebugName();
+ }
break;
}
inv += InductionToString(info->op_b);
return inv + ")";
} else {
- CHECK(info->operation == kNop);
+ DCHECK(info->operation == kNop);
if (info->induction_class == kLinear) {
return "(" + InductionToString(info->op_a) + " * i + " +
InductionToString(info->op_b) + ")";
diff --git a/compiler/optimizing/induction_var_analysis.h b/compiler/optimizing/induction_var_analysis.h
index 09a0a380a1..8eccf925c1 100644
--- a/compiler/optimizing/induction_var_analysis.h
+++ b/compiler/optimizing/induction_var_analysis.h
@@ -25,9 +25,11 @@
namespace art {
/**
- * Induction variable analysis.
+ * Induction variable analysis. This class does not have a direct public API.
+ * Instead, the results of induction variable analysis can be queried through
+ * friend classes, such as InductionVarRange.
*
- * Based on the paper by M. Gerlek et al.
+ * The analysis implementation is based on the paper by M. Gerlek et al.
* "Beyond Induction Variables: Detecting and Classifying Sequences Using a Demand-Driven SSA Form"
* (ACM Transactions on Programming Languages and Systems, Volume 17 Issue 1, Jan. 1995).
*/
@@ -35,16 +37,6 @@ class HInductionVarAnalysis : public HOptimization {
public:
explicit HInductionVarAnalysis(HGraph* graph);
- // TODO: design public API useful in later phases
-
- /**
- * Returns string representation of induction found for the instruction
- * in the given loop (for testing and debugging only).
- */
- std::string InductionToString(HLoopInformation* loop, HInstruction* instruction) {
- return InductionToString(LookupInfo(loop, instruction));
- }
-
void Run() OVERRIDE;
private:
@@ -57,12 +49,10 @@ class HInductionVarAnalysis : public HOptimization {
};
enum InductionClass {
- kNone,
kInvariant,
kLinear,
kWrapAround,
- kPeriodic,
- kMonotonic
+ kPeriodic
};
enum InductionOp {
@@ -79,7 +69,7 @@ class HInductionVarAnalysis : public HOptimization {
* Defines a detected induction as:
* (1) invariant:
* operation: a + b, a - b, -b, a * b, a / b
- * or
+ * or:
* fetch: fetch from HIR
* (2) linear:
* nop: a * i + b
@@ -87,8 +77,6 @@ class HInductionVarAnalysis : public HOptimization {
* nop: a, then defined by b
* (4) periodic
* nop: a, then defined by b (repeated when exhausted)
- * (5) monotonic
- * // TODO: determine representation
*/
struct InductionInfo : public ArenaObject<kArenaAllocMisc> {
InductionInfo(InductionClass ic,
@@ -108,17 +96,23 @@ class HInductionVarAnalysis : public HOptimization {
HInstruction* fetch;
};
- inline bool IsVisitedNode(int id) const {
- return map_.find(id) != map_.end();
+ bool IsVisitedNode(HInstruction* instruction) const {
+ return map_.find(instruction) != map_.end();
+ }
+
+ InductionInfo* CreateInvariantOp(InductionOp op, InductionInfo* a, InductionInfo* b) {
+ DCHECK(((op != kNeg && a != nullptr) || (op == kNeg && a == nullptr)) && b != nullptr);
+ return CreateSimplifiedInvariant(op, a, b);
}
- inline InductionInfo* NewInductionInfo(
- InductionClass c,
- InductionOp op,
- InductionInfo* a,
- InductionInfo* b,
- HInstruction* i) {
- return new (graph_->GetArena()) InductionInfo(c, op, a, b, i);
+ InductionInfo* CreateInvariantFetch(HInstruction* f) {
+ DCHECK(f != nullptr);
+ return new (graph_->GetArena()) InductionInfo(kInvariant, kFetch, nullptr, nullptr, f);
+ }
+
+ InductionInfo* CreateInduction(InductionClass ic, InductionInfo* a, InductionInfo* b) {
+ DCHECK(a != nullptr && b != nullptr);
+ return new (graph_->GetArena()) InductionInfo(ic, kNop, a, b, nullptr);
}
// Methods for analysis.
@@ -132,36 +126,66 @@ class HInductionVarAnalysis : public HOptimization {
InductionInfo* TransferPhi(InductionInfo* a, InductionInfo* b);
InductionInfo* TransferAddSub(InductionInfo* a, InductionInfo* b, InductionOp op);
InductionInfo* TransferMul(InductionInfo* a, InductionInfo* b);
+ InductionInfo* TransferShl(InductionInfo* a, InductionInfo* b, Primitive::Type type);
InductionInfo* TransferNeg(InductionInfo* a);
- InductionInfo* TransferCycleOverPhi(HInstruction* phi);
- InductionInfo* TransferCycleOverAddSub(HLoopInformation* loop,
- HInstruction* x,
- HInstruction* y,
- InductionOp op,
- bool first);
+
+ // Solvers.
+ InductionInfo* SolvePhi(HLoopInformation* loop,
+ HInstruction* phi,
+ HInstruction* instruction);
+ InductionInfo* SolveAddSub(HLoopInformation* loop,
+ HInstruction* phi,
+ HInstruction* instruction,
+ HInstruction* x,
+ HInstruction* y,
+ InductionOp op,
+ bool is_first_call);
+ InductionInfo* RotatePeriodicInduction(InductionInfo* induction, InductionInfo* last);
+
+ // Trip count information.
+ void VisitControl(HLoopInformation* loop);
+ void VisitCondition(HLoopInformation* loop,
+ InductionInfo* a,
+ InductionInfo* b,
+ Primitive::Type type,
+ IfCondition cmp);
+ void VisitTripCount(HLoopInformation* loop,
+ InductionInfo* lo_val,
+ InductionInfo* hi_val,
+ InductionInfo* stride,
+ int32_t stride_value,
+ Primitive::Type type,
+ bool is_strict);
// Assign and lookup.
- void PutInfo(int loop_id, int id, InductionInfo* info);
- InductionInfo* GetInfo(int loop_id, int id);
void AssignInfo(HLoopInformation* loop, HInstruction* instruction, InductionInfo* info);
InductionInfo* LookupInfo(HLoopInformation* loop, HInstruction* instruction);
- bool InductionEqual(InductionInfo* info1, InductionInfo* info2);
- std::string InductionToString(InductionInfo* info);
+ InductionInfo* CreateConstant(int64_t value, Primitive::Type type);
+ InductionInfo* CreateSimplifiedInvariant(InductionOp op, InductionInfo* a, InductionInfo* b);
- // Bookkeeping during and after analysis.
- // TODO: fine tune data structures, only keep relevant data
+ // Helpers.
+ static bool InductionEqual(InductionInfo* info1, InductionInfo* info2);
+ static bool IsIntAndGet(InductionInfo* info, int64_t* value);
+ static std::string InductionToString(InductionInfo* info);
- uint32_t global_depth_;
+ // TODO: fine tune the following data structures, only keep relevant data.
+ // Temporary book-keeping during the analysis.
+ uint32_t global_depth_;
ArenaVector<HInstruction*> stack_;
ArenaVector<HInstruction*> scc_;
+ ArenaSafeMap<HInstruction*, NodeInfo> map_;
+ ArenaSafeMap<HInstruction*, InductionInfo*> cycle_;
- // Mappings of instruction id to node and induction information.
- ArenaSafeMap<int, NodeInfo> map_;
- ArenaSafeMap<int, InductionInfo*> cycle_;
+ /**
+ * Maintains the results of the analysis as a mapping from loops to a mapping from instructions
+ * to the induction information for that instruction in that loop.
+ */
+ ArenaSafeMap<HLoopInformation*, ArenaSafeMap<HInstruction*, InductionInfo*>> induction_;
- // Mapping from loop id to mapping of instruction id to induction information.
- ArenaSafeMap<int, ArenaSafeMap<int, InductionInfo*>> induction_;
+ friend class InductionVarAnalysisTest;
+ friend class InductionVarRange;
+ friend class InductionVarRangeTest;
DISALLOW_COPY_AND_ASSIGN(HInductionVarAnalysis);
};
diff --git a/compiler/optimizing/induction_var_analysis_test.cc b/compiler/optimizing/induction_var_analysis_test.cc
index 2093e3355d..fca1ca55e5 100644
--- a/compiler/optimizing/induction_var_analysis_test.cc
+++ b/compiler/optimizing/induction_var_analysis_test.cc
@@ -63,7 +63,7 @@ class InductionVarAnalysisTest : public testing::Test {
// populate the loop with instructions to set up interesting scenarios.
void BuildLoopNest(int n) {
ASSERT_LE(n, 10);
- graph_->SetNumberOfVRegs(n + 2);
+ graph_->SetNumberOfVRegs(n + 3);
// Build basic blocks with entry, nested loop, exit.
entry_ = new (&allocator_) HBasicBlock(graph_);
@@ -77,47 +77,36 @@ class InductionVarAnalysisTest : public testing::Test {
graph_->SetExitBlock(exit_);
// Provide entry and exit instructions.
- // 0 : parameter
- // 1 : constant 0
- // 2 : constant 1
- // 3 : constant 100
- parameter_ = new (&allocator_)
- HParameterValue(0, Primitive::kPrimNot, true);
+ parameter_ = new (&allocator_) HParameterValue(0, Primitive::kPrimNot, true);
entry_->AddInstruction(parameter_);
- constant0_ = new (&allocator_) HConstant(Primitive::kPrimInt);
- entry_->AddInstruction(constant0_);
- constant1_ = new (&allocator_) HConstant(Primitive::kPrimInt);
- entry_->AddInstruction(constant1_);
- constant100_ = new (&allocator_) HConstant(Primitive::kPrimInt);
- entry_->AddInstruction(constant100_);
- exit_->AddInstruction(new (&allocator_) HExit());
+ constant0_ = graph_->GetIntConstant(0);
+ constant1_ = graph_->GetIntConstant(1);
+ constant100_ = graph_->GetIntConstant(100);
induc_ = new (&allocator_) HLocal(n);
entry_->AddInstruction(induc_);
entry_->AddInstruction(new (&allocator_) HStoreLocal(induc_, constant0_));
tmp_ = new (&allocator_) HLocal(n + 1);
entry_->AddInstruction(tmp_);
entry_->AddInstruction(new (&allocator_) HStoreLocal(tmp_, constant100_));
+ dum_ = new (&allocator_) HLocal(n + 2);
+ entry_->AddInstruction(dum_);
+ exit_->AddInstruction(new (&allocator_) HExit());
// Provide loop instructions.
for (int d = 0; d < n; d++) {
basic_[d] = new (&allocator_) HLocal(d);
entry_->AddInstruction(basic_[d]);
- loop_preheader_[d]->AddInstruction(
- new (&allocator_) HStoreLocal(basic_[d], constant0_));
- HInstruction* load = new (&allocator_)
- HLoadLocal(basic_[d], Primitive::kPrimInt);
+ loop_preheader_[d]->AddInstruction(new (&allocator_) HStoreLocal(basic_[d], constant0_));
+ HInstruction* load = new (&allocator_) HLoadLocal(basic_[d], Primitive::kPrimInt);
loop_header_[d]->AddInstruction(load);
- HInstruction* compare = new (&allocator_)
- HGreaterThanOrEqual(load, constant100_);
+ HInstruction* compare = new (&allocator_) HLessThan(load, constant100_);
loop_header_[d]->AddInstruction(compare);
loop_header_[d]->AddInstruction(new (&allocator_) HIf(compare));
load = new (&allocator_) HLoadLocal(basic_[d], Primitive::kPrimInt);
loop_body_[d]->AddInstruction(load);
- increment_[d] = new (&allocator_)
- HAdd(Primitive::kPrimInt, load, constant1_);
+ increment_[d] = new (&allocator_) HAdd(Primitive::kPrimInt, load, constant1_);
loop_body_[d]->AddInstruction(increment_[d]);
- loop_body_[d]->AddInstruction(
- new (&allocator_) HStoreLocal(basic_[d], increment_[d]));
+ loop_body_[d]->AddInstruction(new (&allocator_) HStoreLocal(basic_[d], increment_[d]));
loop_body_[d]->AddInstruction(new (&allocator_) HGoto());
}
}
@@ -149,8 +138,7 @@ class InductionVarAnalysisTest : public testing::Test {
// Inserts local load at depth d.
HInstruction* InsertLocalLoad(HLocal* local, int d) {
- return InsertInstruction(
- new (&allocator_) HLoadLocal(local, Primitive::kPrimInt), d);
+ return InsertInstruction(new (&allocator_) HLoadLocal(local, Primitive::kPrimInt), d);
}
// Inserts local store at depth d.
@@ -167,9 +155,10 @@ class InductionVarAnalysisTest : public testing::Test {
parameter_, load, constant0_, Primitive::kPrimInt, 0), d);
}
- // Returns loop information of loop at depth d.
- HLoopInformation* GetLoopInfo(int d) {
- return loop_body_[d]->GetLoopInformation();
+ // Returns induction information of instruction in loop at depth d.
+ std::string GetInductionInfo(HInstruction* instruction, int d) {
+ return HInductionVarAnalysis::InductionToString(
+ iva_->LookupInfo(loop_body_[d]->GetLoopInformation(), instruction));
}
// Performs InductionVarAnalysis (after proper set up).
@@ -194,6 +183,7 @@ class InductionVarAnalysisTest : public testing::Test {
HInstruction* constant100_;
HLocal* induc_; // "vreg_n", the "k"
HLocal* tmp_; // "vreg_n+1"
+ HLocal* dum_; // "vreg_n+2"
// Loop specifics.
HBasicBlock* loop_preheader_[10];
@@ -230,222 +220,159 @@ TEST_F(InductionVarAnalysisTest, ProperLoopSetup) {
ASSERT_EQ(exit_->GetLoopInformation(), nullptr);
}
-TEST_F(InductionVarAnalysisTest, FindBasicInductionVar) {
+TEST_F(InductionVarAnalysisTest, FindBasicInduction) {
// Setup:
// for (int i = 0; i < 100; i++) {
- // a[i] = 0;
+ // a[i] = 0;
// }
BuildLoopNest(1);
HInstruction* store = InsertArrayStore(basic_[0], 0);
PerformInductionVarAnalysis();
- EXPECT_STREQ(
- "((2:Constant) * i + (1:Constant))",
- iva_->InductionToString(GetLoopInfo(0), store->InputAt(1)).c_str());
- EXPECT_STREQ(
- "((2:Constant) * i + ((1:Constant) + (2:Constant)))",
- iva_->InductionToString(GetLoopInfo(0), increment_[0]).c_str());
+ EXPECT_STREQ("((1) * i + (0))", GetInductionInfo(store->InputAt(1), 0).c_str());
+ EXPECT_STREQ("((1) * i + (1))", GetInductionInfo(increment_[0], 0).c_str());
+
+ // Trip-count.
+ EXPECT_STREQ("(100)", GetInductionInfo(loop_header_[0]->GetLastInstruction(), 0).c_str());
}
-TEST_F(InductionVarAnalysisTest, FindDerivedInductionVarAdd) {
+TEST_F(InductionVarAnalysisTest, FindDerivedInduction) {
// Setup:
// for (int i = 0; i < 100; i++) {
- // k = 100 + i;
- // a[k] = 0;
+ // k = 100 + i;
+ // k = 100 - i;
+ // k = 100 * i;
+ // k = i << 1;
+ // k = - i;
// }
BuildLoopNest(1);
HInstruction *add = InsertInstruction(
- new (&allocator_) HAdd(
- Primitive::kPrimInt, constant100_, InsertLocalLoad(basic_[0], 0)), 0);
+ new (&allocator_) HAdd(Primitive::kPrimInt, constant100_, InsertLocalLoad(basic_[0], 0)), 0);
InsertLocalStore(induc_, add, 0);
- HInstruction* store = InsertArrayStore(induc_, 0);
- PerformInductionVarAnalysis();
-
- EXPECT_STREQ(
- "((2:Constant) * i + ((3:Constant) + (1:Constant)))",
- iva_->InductionToString(GetLoopInfo(0), store->InputAt(1)).c_str());
-}
-
-TEST_F(InductionVarAnalysisTest, FindDerivedInductionVarSub) {
- // Setup:
- // for (int i = 0; i < 100; i++) {
- // k = 100 - i;
- // a[k] = 0;
- // }
- BuildLoopNest(1);
HInstruction *sub = InsertInstruction(
- new (&allocator_) HSub(
- Primitive::kPrimInt, constant100_, InsertLocalLoad(basic_[0], 0)), 0);
+ new (&allocator_) HSub(Primitive::kPrimInt, constant100_, InsertLocalLoad(basic_[0], 0)), 0);
InsertLocalStore(induc_, sub, 0);
- HInstruction* store = InsertArrayStore(induc_, 0);
- PerformInductionVarAnalysis();
-
- EXPECT_STREQ(
- "(( - (2:Constant)) * i + ((3:Constant) - (1:Constant)))",
- iva_->InductionToString(GetLoopInfo(0), store->InputAt(1)).c_str());
-}
-
-TEST_F(InductionVarAnalysisTest, FindDerivedInductionVarMul) {
- // Setup:
- // for (int i = 0; i < 100; i++) {
- // k = 100 * i;
- // a[k] = 0;
- // }
- BuildLoopNest(1);
HInstruction *mul = InsertInstruction(
- new (&allocator_) HMul(
- Primitive::kPrimInt, constant100_, InsertLocalLoad(basic_[0], 0)), 0);
+ new (&allocator_) HMul(Primitive::kPrimInt, constant100_, InsertLocalLoad(basic_[0], 0)), 0);
InsertLocalStore(induc_, mul, 0);
- HInstruction* store = InsertArrayStore(induc_, 0);
- PerformInductionVarAnalysis();
-
- EXPECT_STREQ(
- "(((3:Constant) * (2:Constant)) * i + ((3:Constant) * (1:Constant)))",
- iva_->InductionToString(GetLoopInfo(0), store->InputAt(1)).c_str());
-}
-
-TEST_F(InductionVarAnalysisTest, FindDerivedInductionVarNeg) {
- // Setup:
- // for (int i = 0; i < 100; i++) {
- // k = - i;
- // a[k] = 0;
- // }
- BuildLoopNest(1);
+ HInstruction *shl = InsertInstruction(
+ new (&allocator_) HShl(Primitive::kPrimInt, InsertLocalLoad(basic_[0], 0), constant1_), 0);
+ InsertLocalStore(induc_, shl, 0);
HInstruction *neg = InsertInstruction(
- new (&allocator_) HNeg(
- Primitive::kPrimInt, InsertLocalLoad(basic_[0], 0)), 0);
+ new (&allocator_) HNeg(Primitive::kPrimInt, InsertLocalLoad(basic_[0], 0)), 0);
InsertLocalStore(induc_, neg, 0);
- HInstruction* store = InsertArrayStore(induc_, 0);
PerformInductionVarAnalysis();
- EXPECT_STREQ(
- "(( - (2:Constant)) * i + ( - (1:Constant)))",
- iva_->InductionToString(GetLoopInfo(0), store->InputAt(1)).c_str());
+ EXPECT_STREQ("((1) * i + (100))", GetInductionInfo(add, 0).c_str());
+ EXPECT_STREQ("(( - (1)) * i + (100))", GetInductionInfo(sub, 0).c_str());
+ EXPECT_STREQ("((100) * i + (0))", GetInductionInfo(mul, 0).c_str());
+ EXPECT_STREQ("((2) * i + (0))", GetInductionInfo(shl, 0).c_str());
+ EXPECT_STREQ("(( - (1)) * i + (0))", GetInductionInfo(neg, 0).c_str());
}
TEST_F(InductionVarAnalysisTest, FindChainInduction) {
// Setup:
// k = 0;
// for (int i = 0; i < 100; i++) {
- // k = k + 100;
- // a[k] = 0;
- // k = k - 1;
- // a[k] = 0;
+ // k = k + 100;
+ // a[k] = 0;
+ // k = k - 1;
+ // a[k] = 0;
// }
BuildLoopNest(1);
HInstruction *add = InsertInstruction(
- new (&allocator_) HAdd(
- Primitive::kPrimInt, InsertLocalLoad(induc_, 0), constant100_), 0);
+ new (&allocator_) HAdd(Primitive::kPrimInt, InsertLocalLoad(induc_, 0), constant100_), 0);
InsertLocalStore(induc_, add, 0);
HInstruction* store1 = InsertArrayStore(induc_, 0);
HInstruction *sub = InsertInstruction(
- new (&allocator_) HSub(
- Primitive::kPrimInt, InsertLocalLoad(induc_, 0), constant1_), 0);
+ new (&allocator_) HSub(Primitive::kPrimInt, InsertLocalLoad(induc_, 0), constant1_), 0);
InsertLocalStore(induc_, sub, 0);
HInstruction* store2 = InsertArrayStore(induc_, 0);
PerformInductionVarAnalysis();
- EXPECT_STREQ(
- "(((3:Constant) - (2:Constant)) * i + ((1:Constant) + (3:Constant)))",
- iva_->InductionToString(GetLoopInfo(0), store1->InputAt(1)).c_str());
- EXPECT_STREQ(
- "(((3:Constant) - (2:Constant)) * i + "
- "(((1:Constant) + (3:Constant)) - (2:Constant)))",
- iva_->InductionToString(GetLoopInfo(0), store2->InputAt(1)).c_str());
+ EXPECT_STREQ("(((100) - (1)) * i + (100))",
+ GetInductionInfo(store1->InputAt(1), 0).c_str());
+ EXPECT_STREQ("(((100) - (1)) * i + ((100) - (1)))",
+ GetInductionInfo(store2->InputAt(1), 0).c_str());
}
TEST_F(InductionVarAnalysisTest, FindTwoWayBasicInduction) {
// Setup:
// k = 0;
// for (int i = 0; i < 100; i++) {
- // if () k = k + 1;
- // else k = k + 1;
- // a[k] = 0;
+ // if () k = k + 1;
+ // else k = k + 1;
+ // a[k] = 0;
// }
BuildLoopNest(1);
HBasicBlock* ifTrue;
HBasicBlock* ifFalse;
BuildIf(0, &ifTrue, &ifFalse);
// True-branch.
- HInstruction* load1 = new (&allocator_)
- HLoadLocal(induc_, Primitive::kPrimInt);
+ HInstruction* load1 = new (&allocator_) HLoadLocal(induc_, Primitive::kPrimInt);
ifTrue->AddInstruction(load1);
- HInstruction* inc1 = new (&allocator_)
- HAdd(Primitive::kPrimInt, load1, constant1_);
+ HInstruction* inc1 = new (&allocator_) HAdd(Primitive::kPrimInt, load1, constant1_);
ifTrue->AddInstruction(inc1);
ifTrue->AddInstruction(new (&allocator_) HStoreLocal(induc_, inc1));
// False-branch.
- HInstruction* load2 = new (&allocator_)
- HLoadLocal(induc_, Primitive::kPrimInt);
+ HInstruction* load2 = new (&allocator_) HLoadLocal(induc_, Primitive::kPrimInt);
ifFalse->AddInstruction(load2);
- HInstruction* inc2 = new (&allocator_)
- HAdd(Primitive::kPrimInt, load2, constant1_);
+ HInstruction* inc2 = new (&allocator_) HAdd(Primitive::kPrimInt, load2, constant1_);
ifFalse->AddInstruction(inc2);
ifFalse->AddInstruction(new (&allocator_) HStoreLocal(induc_, inc2));
// Merge over a phi.
HInstruction* store = InsertArrayStore(induc_, 0);
PerformInductionVarAnalysis();
- EXPECT_STREQ(
- "((2:Constant) * i + ((1:Constant) + (2:Constant)))",
- iva_->InductionToString(GetLoopInfo(0), store->InputAt(1)).c_str());
+ EXPECT_STREQ("((1) * i + (1))", GetInductionInfo(store->InputAt(1), 0).c_str());
}
TEST_F(InductionVarAnalysisTest, FindTwoWayDerivedInduction) {
// Setup:
// for (int i = 0; i < 100; i++) {
- // if () k = i + 1;
- // else k = i + 1;
- // a[k] = 0;
+ // if () k = i + 1;
+ // else k = i + 1;
+ // a[k] = 0;
// }
BuildLoopNest(1);
HBasicBlock* ifTrue;
HBasicBlock* ifFalse;
BuildIf(0, &ifTrue, &ifFalse);
// True-branch.
- HInstruction* load1 = new (&allocator_)
- HLoadLocal(basic_[0], Primitive::kPrimInt);
+ HInstruction* load1 = new (&allocator_) HLoadLocal(basic_[0], Primitive::kPrimInt);
ifTrue->AddInstruction(load1);
- HInstruction* inc1 = new (&allocator_)
- HAdd(Primitive::kPrimInt, load1, constant1_);
+ HInstruction* inc1 = new (&allocator_) HAdd(Primitive::kPrimInt, load1, constant1_);
ifTrue->AddInstruction(inc1);
ifTrue->AddInstruction(new (&allocator_) HStoreLocal(induc_, inc1));
// False-branch.
- HInstruction* load2 = new (&allocator_)
- HLoadLocal(basic_[0], Primitive::kPrimInt);
+ HInstruction* load2 = new (&allocator_) HLoadLocal(basic_[0], Primitive::kPrimInt);
ifFalse->AddInstruction(load2);
- HInstruction* inc2 = new (&allocator_)
- HAdd(Primitive::kPrimInt, load2, constant1_);
+ HInstruction* inc2 = new (&allocator_) HAdd(Primitive::kPrimInt, load2, constant1_);
ifFalse->AddInstruction(inc2);
ifFalse->AddInstruction(new (&allocator_) HStoreLocal(induc_, inc2));
// Merge over a phi.
HInstruction* store = InsertArrayStore(induc_, 0);
PerformInductionVarAnalysis();
- EXPECT_STREQ(
- "((2:Constant) * i + ((1:Constant) + (2:Constant)))",
- iva_->InductionToString(GetLoopInfo(0), store->InputAt(1)).c_str());
+ EXPECT_STREQ("((1) * i + (1))", GetInductionInfo(store->InputAt(1), 0).c_str());
}
TEST_F(InductionVarAnalysisTest, FindFirstOrderWrapAroundInduction) {
// Setup:
// k = 0;
// for (int i = 0; i < 100; i++) {
- // a[k] = 0;
- // k = 100 - i;
+ // a[k] = 0;
+ // k = 100 - i;
// }
BuildLoopNest(1);
HInstruction* store = InsertArrayStore(induc_, 0);
HInstruction *sub = InsertInstruction(
- new (&allocator_) HSub(
- Primitive::kPrimInt, constant100_, InsertLocalLoad(basic_[0], 0)), 0);
+ new (&allocator_) HSub(Primitive::kPrimInt, constant100_, InsertLocalLoad(basic_[0], 0)), 0);
InsertLocalStore(induc_, sub, 0);
PerformInductionVarAnalysis();
- EXPECT_STREQ(
- "wrap((1:Constant), "
- "(( - (2:Constant)) * i + ((3:Constant) - (1:Constant))))",
- iva_->InductionToString(GetLoopInfo(0), store->InputAt(1)).c_str());
+ EXPECT_STREQ("wrap((0), (( - (1)) * i + (100)))",
+ GetInductionInfo(store->InputAt(1), 0).c_str());
}
TEST_F(InductionVarAnalysisTest, FindSecondOrderWrapAroundInduction) {
@@ -453,23 +380,144 @@ TEST_F(InductionVarAnalysisTest, FindSecondOrderWrapAroundInduction) {
// k = 0;
// t = 100;
// for (int i = 0; i < 100; i++) {
- // a[k] = 0;
- // k = t;
- // t = 100 - i;
+ // a[k] = 0;
+ // k = t;
+ // t = 100 - i;
// }
BuildLoopNest(1);
HInstruction* store = InsertArrayStore(induc_, 0);
InsertLocalStore(induc_, InsertLocalLoad(tmp_, 0), 0);
HInstruction *sub = InsertInstruction(
- new (&allocator_) HSub(
- Primitive::kPrimInt, constant100_, InsertLocalLoad(basic_[0], 0)), 0);
+ new (&allocator_) HSub(Primitive::kPrimInt, constant100_, InsertLocalLoad(basic_[0], 0)), 0);
+ InsertLocalStore(tmp_, sub, 0);
+ PerformInductionVarAnalysis();
+
+ EXPECT_STREQ("wrap((0), wrap((100), (( - (1)) * i + (100))))",
+ GetInductionInfo(store->InputAt(1), 0).c_str());
+}
+
+TEST_F(InductionVarAnalysisTest, FindWrapAroundDerivedInduction) {
+ // Setup:
+ // k = 0;
+ // for (int i = 0; i < 100; i++) {
+ // t = k + 100;
+ // t = k - 100;
+ // t = k * 100;
+ // t = k << 1;
+ // t = - k;
+ // k = i << 1;
+ // }
+ BuildLoopNest(1);
+ HInstruction *add = InsertInstruction(
+ new (&allocator_) HAdd(Primitive::kPrimInt, InsertLocalLoad(induc_, 0), constant100_), 0);
+ InsertLocalStore(tmp_, add, 0);
+ HInstruction *sub = InsertInstruction(
+ new (&allocator_) HSub(Primitive::kPrimInt, InsertLocalLoad(induc_, 0), constant100_), 0);
InsertLocalStore(tmp_, sub, 0);
+ HInstruction *mul = InsertInstruction(
+ new (&allocator_) HMul(Primitive::kPrimInt, InsertLocalLoad(induc_, 0), constant100_), 0);
+ InsertLocalStore(tmp_, mul, 0);
+ HInstruction *shl = InsertInstruction(
+ new (&allocator_) HShl(Primitive::kPrimInt, InsertLocalLoad(induc_, 0), constant1_), 0);
+ InsertLocalStore(tmp_, shl, 0);
+ HInstruction *neg = InsertInstruction(
+ new (&allocator_) HNeg(Primitive::kPrimInt, InsertLocalLoad(induc_, 0)), 0);
+ InsertLocalStore(tmp_, neg, 0);
+ InsertLocalStore(
+ induc_,
+ InsertInstruction(
+ new (&allocator_)
+ HShl(Primitive::kPrimInt, InsertLocalLoad(basic_[0], 0), constant1_), 0), 0);
+ PerformInductionVarAnalysis();
+
+ EXPECT_STREQ("wrap((100), ((2) * i + (100)))", GetInductionInfo(add, 0).c_str());
+ EXPECT_STREQ("wrap(((0) - (100)), ((2) * i + ((0) - (100))))", GetInductionInfo(sub, 0).c_str());
+ EXPECT_STREQ("wrap((0), (((2) * (100)) * i + (0)))", GetInductionInfo(mul, 0).c_str());
+ EXPECT_STREQ("wrap((0), (((2) * (2)) * i + (0)))", GetInductionInfo(shl, 0).c_str());
+ EXPECT_STREQ("wrap((0), (( - (2)) * i + (0)))", GetInductionInfo(neg, 0).c_str());
+}
+
+TEST_F(InductionVarAnalysisTest, FindPeriodicInduction) {
+ // Setup:
+ // k = 0;
+ // t = 100;
+ // for (int i = 0; i < 100; i++) {
+ // a[k] = 0;
+ // a[t] = 0;
+ // // Swap t <-> k.
+ // d = t;
+ // t = k;
+ // k = d;
+ // }
+ BuildLoopNest(1);
+ HInstruction* store1 = InsertArrayStore(induc_, 0);
+ HInstruction* store2 = InsertArrayStore(tmp_, 0);
+ InsertLocalStore(dum_, InsertLocalLoad(tmp_, 0), 0);
+ InsertLocalStore(tmp_, InsertLocalLoad(induc_, 0), 0);
+ InsertLocalStore(induc_, InsertLocalLoad(dum_, 0), 0);
+ PerformInductionVarAnalysis();
+
+ EXPECT_STREQ("periodic((0), (100))", GetInductionInfo(store1->InputAt(1), 0).c_str());
+ EXPECT_STREQ("periodic((100), (0))", GetInductionInfo(store2->InputAt(1), 0).c_str());
+}
+
+TEST_F(InductionVarAnalysisTest, FindIdiomaticPeriodicInduction) {
+ // Setup:
+ // k = 0;
+ // for (int i = 0; i < 100; i++) {
+ // a[k] = 0;
+ // k = 1 - k;
+ // }
+ BuildLoopNest(1);
+ HInstruction* store = InsertArrayStore(induc_, 0);
+ HInstruction *sub = InsertInstruction(
+ new (&allocator_) HSub(Primitive::kPrimInt, constant1_, InsertLocalLoad(induc_, 0)), 0);
+ InsertLocalStore(induc_, sub, 0);
+ PerformInductionVarAnalysis();
+
+ EXPECT_STREQ("periodic((0), (1))", GetInductionInfo(store->InputAt(1), 0).c_str());
+ EXPECT_STREQ("periodic((1), (0))", GetInductionInfo(sub, 0).c_str());
+}
+
+TEST_F(InductionVarAnalysisTest, FindDerivedPeriodicInduction) {
+ // Setup:
+ // k = 0;
+ // for (int i = 0; i < 100; i++) {
+ // k = 1 - k;
+ // t = k + 100;
+ // t = k - 100;
+ // t = k * 100;
+ // t = k << 1;
+ // t = - k;
+ // }
+ BuildLoopNest(1);
+ InsertLocalStore(
+ induc_,
+ InsertInstruction(new (&allocator_)
+ HSub(Primitive::kPrimInt, constant1_, InsertLocalLoad(induc_, 0)), 0), 0);
+ // Derived expressions.
+ HInstruction *add = InsertInstruction(
+ new (&allocator_) HAdd(Primitive::kPrimInt, InsertLocalLoad(induc_, 0), constant100_), 0);
+ InsertLocalStore(tmp_, add, 0);
+ HInstruction *sub = InsertInstruction(
+ new (&allocator_) HSub(Primitive::kPrimInt, InsertLocalLoad(induc_, 0), constant100_), 0);
+ InsertLocalStore(tmp_, sub, 0);
+ HInstruction *mul = InsertInstruction(
+ new (&allocator_) HMul(Primitive::kPrimInt, InsertLocalLoad(induc_, 0), constant100_), 0);
+ InsertLocalStore(tmp_, mul, 0);
+ HInstruction *shl = InsertInstruction(
+ new (&allocator_) HShl(Primitive::kPrimInt, InsertLocalLoad(induc_, 0), constant1_), 0);
+ InsertLocalStore(tmp_, shl, 0);
+ HInstruction *neg = InsertInstruction(
+ new (&allocator_) HNeg(Primitive::kPrimInt, InsertLocalLoad(induc_, 0)), 0);
+ InsertLocalStore(tmp_, neg, 0);
PerformInductionVarAnalysis();
- EXPECT_STREQ(
- "wrap((1:Constant), wrap((3:Constant), "
- "(( - (2:Constant)) * i + ((3:Constant) - (1:Constant)))))",
- iva_->InductionToString(GetLoopInfo(0), store->InputAt(1)).c_str());
+ EXPECT_STREQ("periodic(((1) + (100)), (100))", GetInductionInfo(add, 0).c_str());
+ EXPECT_STREQ("periodic(((1) - (100)), ((0) - (100)))", GetInductionInfo(sub, 0).c_str());
+ EXPECT_STREQ("periodic((100), (0))", GetInductionInfo(mul, 0).c_str());
+ EXPECT_STREQ("periodic((2), (0))", GetInductionInfo(shl, 0).c_str());
+ EXPECT_STREQ("periodic(( - (1)), (0))", GetInductionInfo(neg, 0).c_str());
}
TEST_F(InductionVarAnalysisTest, FindDeepLoopInduction) {
@@ -485,29 +533,24 @@ TEST_F(InductionVarAnalysisTest, FindDeepLoopInduction) {
// }
BuildLoopNest(10);
HInstruction *inc = InsertInstruction(
- new (&allocator_) HAdd(
- Primitive::kPrimInt, constant1_, InsertLocalLoad(induc_, 9)), 9);
+ new (&allocator_) HAdd(Primitive::kPrimInt, constant1_, InsertLocalLoad(induc_, 9)), 9);
InsertLocalStore(induc_, inc, 9);
HInstruction* store = InsertArrayStore(induc_, 9);
PerformInductionVarAnalysis();
- // Match exact number of constants, but be less strict on phi number,
- // since that depends on the SSA building phase.
- std::regex r("\\(\\(2:Constant\\) \\* i \\+ "
- "\\(\\(2:Constant\\) \\+ \\(\\d+:Phi\\)\\)\\)");
+ // Avoid exact phi number, since that depends on the SSA building phase.
+ std::regex r("\\(\\(1\\) \\* i \\+ "
+ "\\(\\(1\\) \\+ \\(\\d+:Phi\\)\\)\\)");
for (int d = 0; d < 10; d++) {
if (d == 9) {
- EXPECT_TRUE(std::regex_match(
- iva_->InductionToString(GetLoopInfo(d), store->InputAt(1)), r));
+ EXPECT_TRUE(std::regex_match(GetInductionInfo(store->InputAt(1), d), r));
} else {
- EXPECT_STREQ(
- "",
- iva_->InductionToString(GetLoopInfo(d), store->InputAt(1)).c_str());
+ EXPECT_STREQ("", GetInductionInfo(store->InputAt(1), d).c_str());
}
- EXPECT_STREQ(
- "((2:Constant) * i + ((1:Constant) + (2:Constant)))",
- iva_->InductionToString(GetLoopInfo(d), increment_[d]).c_str());
+ EXPECT_STREQ("((1) * i + (1))", GetInductionInfo(increment_[d], d).c_str());
+ // Trip-count.
+ EXPECT_STREQ("(100)", GetInductionInfo(loop_header_[d]->GetLastInstruction(), d).c_str());
}
}
diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc
new file mode 100644
index 0000000000..bd903340ad
--- /dev/null
+++ b/compiler/optimizing/induction_var_range.cc
@@ -0,0 +1,343 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <limits.h>
+
+#include "induction_var_range.h"
+
+namespace art {
+
+static bool IsValidConstant32(int32_t c) {
+ return INT_MIN < c && c < INT_MAX;
+}
+
+static bool IsValidConstant64(int64_t c) {
+ return INT_MIN < c && c < INT_MAX;
+}
+
+/** Returns true if 32-bit addition can be done safely (and is not an unknown range). */
+static bool IsSafeAdd(int32_t c1, int32_t c2) {
+ if (IsValidConstant32(c1) && IsValidConstant32(c2)) {
+ return IsValidConstant64(static_cast<int64_t>(c1) + static_cast<int64_t>(c2));
+ }
+ return false;
+}
+
+/** Returns true if 32-bit subtraction can be done safely (and is not an unknown range). */
+static bool IsSafeSub(int32_t c1, int32_t c2) {
+ if (IsValidConstant32(c1) && IsValidConstant32(c2)) {
+ return IsValidConstant64(static_cast<int64_t>(c1) - static_cast<int64_t>(c2));
+ }
+ return false;
+}
+
+/** Returns true if 32-bit multiplication can be done safely (and is not an unknown range). */
+static bool IsSafeMul(int32_t c1, int32_t c2) {
+ if (IsValidConstant32(c1) && IsValidConstant32(c2)) {
+ return IsValidConstant64(static_cast<int64_t>(c1) * static_cast<int64_t>(c2));
+ }
+ return false;
+}
+
+/** Returns true if 32-bit division can be done safely (and is not an unknown range). */
+static bool IsSafeDiv(int32_t c1, int32_t c2) {
+ if (IsValidConstant32(c1) && IsValidConstant32(c2) && c2 != 0) {
+ return IsValidConstant64(static_cast<int64_t>(c1) / static_cast<int64_t>(c2));
+ }
+ return false;
+}
+
+/** Returns true for 32/64-bit integral constant within known range. */
+static bool IsIntAndGet(HInstruction* instruction, int32_t* value) {
+ if (instruction->IsIntConstant()) {
+ const int32_t c = instruction->AsIntConstant()->GetValue();
+ if (IsValidConstant32(c)) {
+ *value = c;
+ return true;
+ }
+ } else if (instruction->IsLongConstant()) {
+ const int64_t c = instruction->AsLongConstant()->GetValue();
+ if (IsValidConstant64(c)) {
+ *value = c;
+ return true;
+ }
+ }
+ return false;
+}
+
+//
+// Public class methods.
+//
+
+InductionVarRange::InductionVarRange(HInductionVarAnalysis* induction_analysis)
+ : induction_analysis_(induction_analysis) {
+}
+
+InductionVarRange::Value InductionVarRange::GetMinInduction(HInstruction* context,
+ HInstruction* instruction) {
+ HLoopInformation* loop = context->GetBlock()->GetLoopInformation();
+ if (loop != nullptr && induction_analysis_ != nullptr) {
+ return GetMin(induction_analysis_->LookupInfo(loop, instruction), GetTripCount(loop, context));
+ }
+ return Value(INT_MIN);
+}
+
+InductionVarRange::Value InductionVarRange::GetMaxInduction(HInstruction* context,
+ HInstruction* instruction) {
+ HLoopInformation* loop = context->GetBlock()->GetLoopInformation();
+ if (loop != nullptr && induction_analysis_ != nullptr) {
+ return GetMax(induction_analysis_->LookupInfo(loop, instruction), GetTripCount(loop, context));
+ }
+ return Value(INT_MAX);
+}
+
+//
+// Private class methods.
+//
+
+HInductionVarAnalysis::InductionInfo* InductionVarRange::GetTripCount(HLoopInformation* loop,
+ HInstruction* context) {
+ // The trip-count expression is only valid when the top-test is taken at least once,
+ // that means, when the analyzed context appears outside the loop header itself.
+ // Early-exit loops are okay, since in those cases, the trip-count is conservative.
+ if (context->GetBlock() != loop->GetHeader()) {
+ HInductionVarAnalysis::InductionInfo* trip =
+ induction_analysis_->LookupInfo(loop, loop->GetHeader()->GetLastInstruction());
+ if (trip != nullptr) {
+ // Wrap the trip-count representation in its own unusual NOP node, so that range analysis
+ // is able to determine the [0, TC - 1] interval without having to construct constants.
+ return induction_analysis_->CreateInvariantOp(HInductionVarAnalysis::kNop, trip, trip);
+ }
+ }
+ return nullptr;
+}
+
+InductionVarRange::Value InductionVarRange::GetFetch(HInstruction* instruction,
+ int32_t fail_value) {
+ // Detect constants and chase the fetch a bit deeper into the HIR tree, so that it becomes
+ // more likely range analysis will compare the same instructions as terminal nodes.
+ int32_t value;
+ if (IsIntAndGet(instruction, &value)) {
+ return Value(value);
+ } else if (instruction->IsAdd()) {
+ if (IsIntAndGet(instruction->InputAt(0), &value)) {
+ return AddValue(Value(value), GetFetch(instruction->InputAt(1), fail_value), fail_value);
+ } else if (IsIntAndGet(instruction->InputAt(1), &value)) {
+ return AddValue(GetFetch(instruction->InputAt(0), fail_value), Value(value), fail_value);
+ }
+ }
+ return Value(instruction, 1, 0);
+}
+
+InductionVarRange::Value InductionVarRange::GetMin(HInductionVarAnalysis::InductionInfo* info,
+ HInductionVarAnalysis::InductionInfo* trip) {
+ if (info != nullptr) {
+ switch (info->induction_class) {
+ case HInductionVarAnalysis::kInvariant:
+ // Invariants.
+ switch (info->operation) {
+ case HInductionVarAnalysis::kNop: // normalized: 0
+ DCHECK_EQ(info->op_a, info->op_b);
+ return Value(0);
+ case HInductionVarAnalysis::kAdd:
+ return AddValue(GetMin(info->op_a, trip), GetMin(info->op_b, trip), INT_MIN);
+ case HInductionVarAnalysis::kSub: // second max!
+ return SubValue(GetMin(info->op_a, trip), GetMax(info->op_b, trip), INT_MIN);
+ case HInductionVarAnalysis::kNeg: // second max!
+ return SubValue(Value(0), GetMax(info->op_b, trip), INT_MIN);
+ case HInductionVarAnalysis::kMul:
+ return GetMul(info->op_a, info->op_b, trip, INT_MIN);
+ case HInductionVarAnalysis::kDiv:
+ return GetDiv(info->op_a, info->op_b, trip, INT_MIN);
+ case HInductionVarAnalysis::kFetch:
+ return GetFetch(info->fetch, INT_MIN);
+ }
+ break;
+ case HInductionVarAnalysis::kLinear:
+ // Minimum over linear induction a * i + b, for normalized 0 <= i < TC.
+ return AddValue(GetMul(info->op_a, trip, trip, INT_MIN),
+ GetMin(info->op_b, trip), INT_MIN);
+ case HInductionVarAnalysis::kWrapAround:
+ case HInductionVarAnalysis::kPeriodic:
+ // Minimum over all values in the wrap-around/periodic.
+ return MinValue(GetMin(info->op_a, trip), GetMin(info->op_b, trip));
+ }
+ }
+ return Value(INT_MIN);
+}
+
+InductionVarRange::Value InductionVarRange::GetMax(HInductionVarAnalysis::InductionInfo* info,
+ HInductionVarAnalysis::InductionInfo* trip) {
+ if (info != nullptr) {
+ switch (info->induction_class) {
+ case HInductionVarAnalysis::kInvariant:
+ // Invariants.
+ switch (info->operation) {
+ case HInductionVarAnalysis::kNop: // normalized: TC - 1
+ DCHECK_EQ(info->op_a, info->op_b);
+ return SubValue(GetMax(info->op_b, trip), Value(1), INT_MAX);
+ case HInductionVarAnalysis::kAdd:
+ return AddValue(GetMax(info->op_a, trip), GetMax(info->op_b, trip), INT_MAX);
+ case HInductionVarAnalysis::kSub: // second min!
+ return SubValue(GetMax(info->op_a, trip), GetMin(info->op_b, trip), INT_MAX);
+ case HInductionVarAnalysis::kNeg: // second min!
+ return SubValue(Value(0), GetMin(info->op_b, trip), INT_MAX);
+ case HInductionVarAnalysis::kMul:
+ return GetMul(info->op_a, info->op_b, trip, INT_MAX);
+ case HInductionVarAnalysis::kDiv:
+ return GetDiv(info->op_a, info->op_b, trip, INT_MAX);
+ case HInductionVarAnalysis::kFetch:
+ return GetFetch(info->fetch, INT_MAX);
+ }
+ break;
+ case HInductionVarAnalysis::kLinear:
+ // Maximum over linear induction a * i + b, for normalized 0 <= i < TC.
+ return AddValue(GetMul(info->op_a, trip, trip, INT_MAX),
+ GetMax(info->op_b, trip), INT_MAX);
+ case HInductionVarAnalysis::kWrapAround:
+ case HInductionVarAnalysis::kPeriodic:
+ // Maximum over all values in the wrap-around/periodic.
+ return MaxValue(GetMax(info->op_a, trip), GetMax(info->op_b, trip));
+ }
+ }
+ return Value(INT_MAX);
+}
+
+InductionVarRange::Value InductionVarRange::GetMul(HInductionVarAnalysis::InductionInfo* info1,
+ HInductionVarAnalysis::InductionInfo* info2,
+ HInductionVarAnalysis::InductionInfo* trip,
+ int32_t fail_value) {
+ Value v1_min = GetMin(info1, trip);
+ Value v1_max = GetMax(info1, trip);
+ Value v2_min = GetMin(info2, trip);
+ Value v2_max = GetMax(info2, trip);
+ if (v1_min.a_constant == 0 && v1_min.b_constant >= 0) {
+ // Positive range vs. positive or negative range.
+ if (v2_min.a_constant == 0 && v2_min.b_constant >= 0) {
+ return (fail_value < 0) ? MulValue(v1_min, v2_min, fail_value)
+ : MulValue(v1_max, v2_max, fail_value);
+ } else if (v2_max.a_constant == 0 && v2_max.b_constant <= 0) {
+ return (fail_value < 0) ? MulValue(v1_max, v2_min, fail_value)
+ : MulValue(v1_min, v2_max, fail_value);
+ }
+ } else if (v1_min.a_constant == 0 && v1_min.b_constant <= 0) {
+ // Negative range vs. positive or negative range.
+ if (v2_min.a_constant == 0 && v2_min.b_constant >= 0) {
+ return (fail_value < 0) ? MulValue(v1_min, v2_max, fail_value)
+ : MulValue(v1_max, v2_min, fail_value);
+ } else if (v2_max.a_constant == 0 && v2_max.b_constant <= 0) {
+ return (fail_value < 0) ? MulValue(v1_max, v2_max, fail_value)
+ : MulValue(v1_min, v2_min, fail_value);
+ }
+ }
+ return Value(fail_value);
+}
+
+InductionVarRange::Value InductionVarRange::GetDiv(HInductionVarAnalysis::InductionInfo* info1,
+ HInductionVarAnalysis::InductionInfo* info2,
+ HInductionVarAnalysis::InductionInfo* trip,
+ int32_t fail_value) {
+ Value v1_min = GetMin(info1, trip);
+ Value v1_max = GetMax(info1, trip);
+ Value v2_min = GetMin(info2, trip);
+ Value v2_max = GetMax(info2, trip);
+ if (v1_min.a_constant == 0 && v1_min.b_constant >= 0) {
+ // Positive range vs. positive or negative range.
+ if (v2_min.a_constant == 0 && v2_min.b_constant >= 0) {
+ return (fail_value < 0) ? DivValue(v1_min, v2_max, fail_value)
+ : DivValue(v1_max, v2_min, fail_value);
+ } else if (v2_max.a_constant == 0 && v2_max.b_constant <= 0) {
+ return (fail_value < 0) ? DivValue(v1_max, v2_max, fail_value)
+ : DivValue(v1_min, v2_min, fail_value);
+ }
+ } else if (v1_min.a_constant == 0 && v1_min.b_constant <= 0) {
+ // Negative range vs. positive or negative range.
+ if (v2_min.a_constant == 0 && v2_min.b_constant >= 0) {
+ return (fail_value < 0) ? DivValue(v1_min, v2_min, fail_value)
+ : DivValue(v1_max, v2_max, fail_value);
+ } else if (v2_max.a_constant == 0 && v2_max.b_constant <= 0) {
+ return (fail_value < 0) ? DivValue(v1_max, v2_min, fail_value)
+ : DivValue(v1_min, v2_max, fail_value);
+ }
+ }
+ return Value(fail_value);
+}
+
+InductionVarRange::Value InductionVarRange::AddValue(Value v1, Value v2, int32_t fail_value) {
+ if (IsSafeAdd(v1.b_constant, v2.b_constant)) {
+ const int32_t b = v1.b_constant + v2.b_constant;
+ if (v1.a_constant == 0) {
+ return Value(v2.instruction, v2.a_constant, b);
+ } else if (v2.a_constant == 0) {
+ return Value(v1.instruction, v1.a_constant, b);
+ } else if (v1.instruction == v2.instruction && IsSafeAdd(v1.a_constant, v2.a_constant)) {
+ return Value(v1.instruction, v1.a_constant + v2.a_constant, b);
+ }
+ }
+ return Value(fail_value);
+}
+
+InductionVarRange::Value InductionVarRange::SubValue(Value v1, Value v2, int32_t fail_value) {
+ if (IsSafeSub(v1.b_constant, v2.b_constant)) {
+ const int32_t b = v1.b_constant - v2.b_constant;
+ if (v1.a_constant == 0 && IsSafeSub(0, v2.a_constant)) {
+ return Value(v2.instruction, -v2.a_constant, b);
+ } else if (v2.a_constant == 0) {
+ return Value(v1.instruction, v1.a_constant, b);
+ } else if (v1.instruction == v2.instruction && IsSafeSub(v1.a_constant, v2.a_constant)) {
+ return Value(v1.instruction, v1.a_constant - v2.a_constant, b);
+ }
+ }
+ return Value(fail_value);
+}
+
+InductionVarRange::Value InductionVarRange::MulValue(Value v1, Value v2, int32_t fail_value) {
+ if (v1.a_constant == 0) {
+ if (IsSafeMul(v1.b_constant, v2.a_constant) && IsSafeMul(v1.b_constant, v2.b_constant)) {
+ return Value(v2.instruction, v1.b_constant * v2.a_constant, v1.b_constant * v2.b_constant);
+ }
+ } else if (v2.a_constant == 0) {
+ if (IsSafeMul(v1.a_constant, v2.b_constant) && IsSafeMul(v1.b_constant, v2.b_constant)) {
+ return Value(v1.instruction, v1.a_constant * v2.b_constant, v1.b_constant * v2.b_constant);
+ }
+ }
+ return Value(fail_value);
+}
+
+InductionVarRange::Value InductionVarRange::DivValue(Value v1, Value v2, int32_t fail_value) {
+ if (v1.a_constant == 0 && v2.a_constant == 0) {
+ if (IsSafeDiv(v1.b_constant, v2.b_constant)) {
+ return Value(v1.b_constant / v2.b_constant);
+ }
+ }
+ return Value(fail_value);
+}
+
+InductionVarRange::Value InductionVarRange::MinValue(Value v1, Value v2) {
+ if (v1.instruction == v2.instruction && v1.a_constant == v2.a_constant) {
+ return Value(v1.instruction, v1.a_constant, std::min(v1.b_constant, v2.b_constant));
+ }
+ return Value(INT_MIN);
+}
+
+InductionVarRange::Value InductionVarRange::MaxValue(Value v1, Value v2) {
+ if (v1.instruction == v2.instruction && v1.a_constant == v2.a_constant) {
+ return Value(v1.instruction, v1.a_constant, std::max(v1.b_constant, v2.b_constant));
+ }
+ return Value(INT_MAX);
+}
+
+} // namespace art
diff --git a/compiler/optimizing/induction_var_range.h b/compiler/optimizing/induction_var_range.h
new file mode 100644
index 0000000000..b079076852
--- /dev/null
+++ b/compiler/optimizing/induction_var_range.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_INDUCTION_VAR_RANGE_H_
+#define ART_COMPILER_OPTIMIZING_INDUCTION_VAR_RANGE_H_
+
+#include "induction_var_analysis.h"
+
+namespace art {
+
+/**
+ * This class implements induction variable based range analysis on expressions within loops.
+ * It takes the results of induction variable analysis in the constructor and provides a public
+ * API to obtain a conservative lower and upper bound value on each instruction in the HIR.
+ *
+ * For example, given a linear induction 2 * i + x where 0 <= i <= 10, range analysis yields lower
+ * bound value x and upper bound value x + 20 for the expression, thus, the range [0, x + 20].
+ */
+class InductionVarRange {
+ public:
+ /*
+ * A value that can be represented as "a * instruction + b" for 32-bit constants, where
+ * Value(INT_MIN) and Value(INT_MAX) denote an unknown lower and upper bound, respectively.
+ * Although range analysis could yield more complex values, the format is sufficiently powerful
+ * to represent useful cases and feeds directly into optimizations like bounds check elimination.
+ */
+ struct Value {
+ Value(HInstruction* i, int32_t a, int32_t b)
+ : instruction(a ? i : nullptr),
+ a_constant(a),
+ b_constant(b) {}
+ explicit Value(int32_t b) : Value(nullptr, 0, b) {}
+ HInstruction* instruction;
+ int32_t a_constant;
+ int32_t b_constant;
+ };
+
+ explicit InductionVarRange(HInductionVarAnalysis* induction);
+
+ /**
+ * Given a context denoted by the first instruction, returns a,
+ * possibly conservative, lower bound on the instruction's value.
+ */
+ Value GetMinInduction(HInstruction* context, HInstruction* instruction);
+
+ /**
+ * Given a context denoted by the first instruction, returns a,
+ * possibly conservative, upper bound on the instruction's value.
+ */
+ Value GetMaxInduction(HInstruction* context, HInstruction* instruction);
+
+ private:
+ //
+ // Private helper methods.
+ //
+
+ HInductionVarAnalysis::InductionInfo* GetTripCount(HLoopInformation* loop,
+ HInstruction* context);
+
+ static Value GetFetch(HInstruction* instruction, int32_t fail_value);
+
+ static Value GetMin(HInductionVarAnalysis::InductionInfo* info,
+ HInductionVarAnalysis::InductionInfo* trip);
+ static Value GetMax(HInductionVarAnalysis::InductionInfo* info,
+ HInductionVarAnalysis::InductionInfo* trip);
+ static Value GetMul(HInductionVarAnalysis::InductionInfo* info1,
+ HInductionVarAnalysis::InductionInfo* info2,
+ HInductionVarAnalysis::InductionInfo* trip, int32_t fail_value);
+ static Value GetDiv(HInductionVarAnalysis::InductionInfo* info1,
+ HInductionVarAnalysis::InductionInfo* info2,
+ HInductionVarAnalysis::InductionInfo* trip, int32_t fail_value);
+
+ static Value AddValue(Value v1, Value v2, int32_t fail_value);
+ static Value SubValue(Value v1, Value v2, int32_t fail_value);
+ static Value MulValue(Value v1, Value v2, int32_t fail_value);
+ static Value DivValue(Value v1, Value v2, int32_t fail_value);
+ static Value MinValue(Value v1, Value v2);
+ static Value MaxValue(Value v1, Value v2);
+
+ /** Results of prior induction variable analysis. */
+ HInductionVarAnalysis *induction_analysis_;
+
+ friend class InductionVarRangeTest;
+
+ DISALLOW_COPY_AND_ASSIGN(InductionVarRange);
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_INDUCTION_VAR_RANGE_H_
diff --git a/compiler/optimizing/induction_var_range_test.cc b/compiler/optimizing/induction_var_range_test.cc
new file mode 100644
index 0000000000..d3c3518193
--- /dev/null
+++ b/compiler/optimizing/induction_var_range_test.cc
@@ -0,0 +1,341 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <limits.h>
+
+#include "base/arena_allocator.h"
+#include "builder.h"
+#include "gtest/gtest.h"
+#include "induction_var_analysis.h"
+#include "induction_var_range.h"
+#include "nodes.h"
+#include "optimizing_unit_test.h"
+
+namespace art {
+
+using Value = InductionVarRange::Value;
+
+/**
+ * Fixture class for the InductionVarRange tests.
+ */
+class InductionVarRangeTest : public testing::Test {
+ public:
+ InductionVarRangeTest() : pool_(), allocator_(&pool_) {
+ graph_ = CreateGraph(&allocator_);
+ iva_ = new (&allocator_) HInductionVarAnalysis(graph_);
+ BuildGraph();
+ }
+
+ ~InductionVarRangeTest() { }
+
+ void ExpectEqual(Value v1, Value v2) {
+ EXPECT_EQ(v1.instruction, v2.instruction);
+ EXPECT_EQ(v1.a_constant, v2.a_constant);
+ EXPECT_EQ(v1.b_constant, v2.b_constant);
+ }
+
+ /** Constructs bare minimum graph. */
+ void BuildGraph() {
+ graph_->SetNumberOfVRegs(1);
+ HBasicBlock* entry_block = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* exit_block = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(entry_block);
+ graph_->AddBlock(exit_block);
+ graph_->SetEntryBlock(entry_block);
+ graph_->SetExitBlock(exit_block);
+ }
+
+ /** Constructs an invariant. */
+ HInductionVarAnalysis::InductionInfo* CreateInvariant(char opc,
+ HInductionVarAnalysis::InductionInfo* a,
+ HInductionVarAnalysis::InductionInfo* b) {
+ HInductionVarAnalysis::InductionOp op;
+ switch (opc) {
+ case '+': op = HInductionVarAnalysis::kAdd; break;
+ case '-': op = HInductionVarAnalysis::kSub; break;
+ case 'n': op = HInductionVarAnalysis::kNeg; break;
+ case '*': op = HInductionVarAnalysis::kMul; break;
+ case '/': op = HInductionVarAnalysis::kDiv; break;
+ default: op = HInductionVarAnalysis::kNop; break;
+ }
+ return iva_->CreateInvariantOp(op, a, b);
+ }
+
+ /** Constructs a fetch. */
+ HInductionVarAnalysis::InductionInfo* CreateFetch(HInstruction* fetch) {
+ return iva_->CreateInvariantFetch(fetch);
+ }
+
+ /** Constructs a constant. */
+ HInductionVarAnalysis::InductionInfo* CreateConst(int32_t c) {
+ return CreateFetch(graph_->GetIntConstant(c));
+ }
+
+ /** Constructs a trip-count. */
+ HInductionVarAnalysis::InductionInfo* CreateTripCount(int32_t tc) {
+ HInductionVarAnalysis::InductionInfo* trip = CreateConst(tc);
+ return CreateInvariant('@', trip, trip);
+ }
+
+ /** Constructs a linear a * i + b induction. */
+ HInductionVarAnalysis::InductionInfo* CreateLinear(int32_t a, int32_t b) {
+ return iva_->CreateInduction(HInductionVarAnalysis::kLinear, CreateConst(a), CreateConst(b));
+ }
+
+ /** Constructs a range [lo, hi] using a periodic induction. */
+ HInductionVarAnalysis::InductionInfo* CreateRange(int32_t lo, int32_t hi) {
+ return iva_->CreateInduction(
+ HInductionVarAnalysis::kPeriodic, CreateConst(lo), CreateConst(hi));
+ }
+
+ /** Constructs a wrap-around induction consisting of a constant, followed by a range. */
+ HInductionVarAnalysis::InductionInfo* CreateWrapAround(int32_t initial, int32_t lo, int32_t hi) {
+ return iva_->CreateInduction(
+ HInductionVarAnalysis::kWrapAround, CreateConst(initial), CreateRange(lo, hi));
+ }
+
+ //
+ // Relay methods.
+ //
+
+ Value GetMin(HInductionVarAnalysis::InductionInfo* info,
+ HInductionVarAnalysis::InductionInfo* induc) {
+ return InductionVarRange::GetMin(info, induc);
+ }
+
+ Value GetMax(HInductionVarAnalysis::InductionInfo* info,
+ HInductionVarAnalysis::InductionInfo* induc) {
+ return InductionVarRange::GetMax(info, induc);
+ }
+
+ Value GetMul(HInductionVarAnalysis::InductionInfo* info1,
+ HInductionVarAnalysis::InductionInfo* info2, int32_t fail_value) {
+ return InductionVarRange::GetMul(info1, info2, nullptr, fail_value);
+ }
+
+ Value GetDiv(HInductionVarAnalysis::InductionInfo* info1,
+ HInductionVarAnalysis::InductionInfo* info2, int32_t fail_value) {
+ return InductionVarRange::GetDiv(info1, info2, nullptr, fail_value);
+ }
+
+ Value AddValue(Value v1, Value v2) { return InductionVarRange::AddValue(v1, v2, INT_MIN); }
+ Value SubValue(Value v1, Value v2) { return InductionVarRange::SubValue(v1, v2, INT_MIN); }
+ Value MulValue(Value v1, Value v2) { return InductionVarRange::MulValue(v1, v2, INT_MIN); }
+ Value DivValue(Value v1, Value v2) { return InductionVarRange::DivValue(v1, v2, INT_MIN); }
+ Value MinValue(Value v1, Value v2) { return InductionVarRange::MinValue(v1, v2); }
+ Value MaxValue(Value v1, Value v2) { return InductionVarRange::MaxValue(v1, v2); }
+
+ // General building fields.
+ ArenaPool pool_;
+ ArenaAllocator allocator_;
+ HGraph* graph_;
+ HInductionVarAnalysis* iva_;
+
+ // Two dummy instructions.
+ HReturnVoid x_;
+ HReturnVoid y_;
+};
+
+//
+// The actual InductionVarRange tests.
+//
+
+TEST_F(InductionVarRangeTest, GetMinMaxNull) {
+ ExpectEqual(Value(INT_MIN), GetMin(nullptr, nullptr));
+ ExpectEqual(Value(INT_MAX), GetMax(nullptr, nullptr));
+}
+
+TEST_F(InductionVarRangeTest, GetMinMaxAdd) {
+ ExpectEqual(Value(12),
+ GetMin(CreateInvariant('+', CreateConst(2), CreateRange(10, 20)), nullptr));
+ ExpectEqual(Value(22),
+ GetMax(CreateInvariant('+', CreateConst(2), CreateRange(10, 20)), nullptr));
+ ExpectEqual(Value(&x_, 1, -20),
+ GetMin(CreateInvariant('+', CreateFetch(&x_), CreateRange(-20, -10)), nullptr));
+ ExpectEqual(Value(&x_, 1, -10),
+ GetMax(CreateInvariant('+', CreateFetch(&x_), CreateRange(-20, -10)), nullptr));
+ ExpectEqual(Value(&x_, 1, 10),
+ GetMin(CreateInvariant('+', CreateRange(10, 20), CreateFetch(&x_)), nullptr));
+ ExpectEqual(Value(&x_, 1, 20),
+ GetMax(CreateInvariant('+', CreateRange(10, 20), CreateFetch(&x_)), nullptr));
+ ExpectEqual(Value(5),
+ GetMin(CreateInvariant('+', CreateRange(-5, -1), CreateRange(10, 20)), nullptr));
+ ExpectEqual(Value(19),
+ GetMax(CreateInvariant('+', CreateRange(-5, -1), CreateRange(10, 20)), nullptr));
+}
+
+TEST_F(InductionVarRangeTest, GetMinMaxSub) {
+ ExpectEqual(Value(-18),
+ GetMin(CreateInvariant('-', CreateConst(2), CreateRange(10, 20)), nullptr));
+ ExpectEqual(Value(-8),
+ GetMax(CreateInvariant('-', CreateConst(2), CreateRange(10, 20)), nullptr));
+ ExpectEqual(Value(&x_, 1, 10),
+ GetMin(CreateInvariant('-', CreateFetch(&x_), CreateRange(-20, -10)), nullptr));
+ ExpectEqual(Value(&x_, 1, 20),
+ GetMax(CreateInvariant('-', CreateFetch(&x_), CreateRange(-20, -10)), nullptr));
+ ExpectEqual(Value(&x_, -1, 10),
+ GetMin(CreateInvariant('-', CreateRange(10, 20), CreateFetch(&x_)), nullptr));
+ ExpectEqual(Value(&x_, -1, 20),
+ GetMax(CreateInvariant('-', CreateRange(10, 20), CreateFetch(&x_)), nullptr));
+ ExpectEqual(Value(-25),
+ GetMin(CreateInvariant('-', CreateRange(-5, -1), CreateRange(10, 20)), nullptr));
+ ExpectEqual(Value(-11),
+ GetMax(CreateInvariant('-', CreateRange(-5, -1), CreateRange(10, 20)), nullptr));
+}
+
+TEST_F(InductionVarRangeTest, GetMinMaxNeg) {
+ ExpectEqual(Value(-20), GetMin(CreateInvariant('n', nullptr, CreateRange(10, 20)), nullptr));
+ ExpectEqual(Value(-10), GetMax(CreateInvariant('n', nullptr, CreateRange(10, 20)), nullptr));
+ ExpectEqual(Value(10), GetMin(CreateInvariant('n', nullptr, CreateRange(-20, -10)), nullptr));
+ ExpectEqual(Value(20), GetMax(CreateInvariant('n', nullptr, CreateRange(-20, -10)), nullptr));
+ ExpectEqual(Value(&x_, -1, 0), GetMin(CreateInvariant('n', nullptr, CreateFetch(&x_)), nullptr));
+ ExpectEqual(Value(&x_, -1, 0), GetMax(CreateInvariant('n', nullptr, CreateFetch(&x_)), nullptr));
+}
+
+TEST_F(InductionVarRangeTest, GetMinMaxMul) {
+ ExpectEqual(Value(20),
+ GetMin(CreateInvariant('*', CreateConst(2), CreateRange(10, 20)), nullptr));
+ ExpectEqual(Value(40),
+ GetMax(CreateInvariant('*', CreateConst(2), CreateRange(10, 20)), nullptr));
+}
+
+TEST_F(InductionVarRangeTest, GetMinMaxDiv) {
+ ExpectEqual(Value(3),
+ GetMin(CreateInvariant('/', CreateRange(12, 20), CreateConst(4)), nullptr));
+ ExpectEqual(Value(5),
+ GetMax(CreateInvariant('/', CreateRange(12, 20), CreateConst(4)), nullptr));
+}
+
+TEST_F(InductionVarRangeTest, GetMinMaxConstant) {
+ ExpectEqual(Value(12345), GetMin(CreateConst(12345), nullptr));
+ ExpectEqual(Value(12345), GetMax(CreateConst(12345), nullptr));
+}
+
+TEST_F(InductionVarRangeTest, GetMinMaxFetch) {
+ ExpectEqual(Value(&x_, 1, 0), GetMin(CreateFetch(&x_), nullptr));
+ ExpectEqual(Value(&x_, 1, 0), GetMax(CreateFetch(&x_), nullptr));
+}
+
+TEST_F(InductionVarRangeTest, GetMinMaxLinear) {
+ ExpectEqual(Value(20), GetMin(CreateLinear(10, 20), CreateTripCount(100)));
+ ExpectEqual(Value(1010), GetMax(CreateLinear(10, 20), CreateTripCount(100)));
+ ExpectEqual(Value(-970), GetMin(CreateLinear(-10, 20), CreateTripCount(100)));
+ ExpectEqual(Value(20), GetMax(CreateLinear(-10, 20), CreateTripCount(100)));
+}
+
+TEST_F(InductionVarRangeTest, GetMinMaxWrapAround) {
+ ExpectEqual(Value(-5), GetMin(CreateWrapAround(-5, -1, 10), nullptr));
+ ExpectEqual(Value(10), GetMax(CreateWrapAround(-5, -1, 10), nullptr));
+ ExpectEqual(Value(-1), GetMin(CreateWrapAround(2, -1, 10), nullptr));
+ ExpectEqual(Value(10), GetMax(CreateWrapAround(2, -1, 10), nullptr));
+ ExpectEqual(Value(-1), GetMin(CreateWrapAround(20, -1, 10), nullptr));
+ ExpectEqual(Value(20), GetMax(CreateWrapAround(20, -1, 10), nullptr));
+}
+
+TEST_F(InductionVarRangeTest, GetMinMaxPeriodic) {
+ ExpectEqual(Value(-2), GetMin(CreateRange(-2, 99), nullptr));
+ ExpectEqual(Value(99), GetMax(CreateRange(-2, 99), nullptr));
+}
+
+TEST_F(InductionVarRangeTest, GetMulMin) {
+ ExpectEqual(Value(6), GetMul(CreateRange(2, 10), CreateRange(3, 5), INT_MIN));
+ ExpectEqual(Value(-50), GetMul(CreateRange(2, 10), CreateRange(-5, -3), INT_MIN));
+ ExpectEqual(Value(-50), GetMul(CreateRange(-10, -2), CreateRange(3, 5), INT_MIN));
+ ExpectEqual(Value(6), GetMul(CreateRange(-10, -2), CreateRange(-5, -3), INT_MIN));
+}
+
+TEST_F(InductionVarRangeTest, GetMulMax) {
+ ExpectEqual(Value(50), GetMul(CreateRange(2, 10), CreateRange(3, 5), INT_MAX));
+ ExpectEqual(Value(-6), GetMul(CreateRange(2, 10), CreateRange(-5, -3), INT_MAX));
+ ExpectEqual(Value(-6), GetMul(CreateRange(-10, -2), CreateRange(3, 5), INT_MAX));
+ ExpectEqual(Value(50), GetMul(CreateRange(-10, -2), CreateRange(-5, -3), INT_MAX));
+}
+
+TEST_F(InductionVarRangeTest, GetDivMin) {
+ ExpectEqual(Value(10), GetDiv(CreateRange(40, 1000), CreateRange(2, 4), INT_MIN));
+ ExpectEqual(Value(-500), GetDiv(CreateRange(40, 1000), CreateRange(-4, -2), INT_MIN));
+ ExpectEqual(Value(-500), GetDiv(CreateRange(-1000, -40), CreateRange(2, 4), INT_MIN));
+ ExpectEqual(Value(10), GetDiv(CreateRange(-1000, -40), CreateRange(-4, -2), INT_MIN));
+}
+
+TEST_F(InductionVarRangeTest, GetDivMax) {
+ ExpectEqual(Value(500), GetDiv(CreateRange(40, 1000), CreateRange(2, 4), INT_MAX));
+ ExpectEqual(Value(-10), GetDiv(CreateRange(40, 1000), CreateRange(-4, -2), INT_MAX));
+ ExpectEqual(Value(-10), GetDiv(CreateRange(-1000, -40), CreateRange(2, 4), INT_MAX));
+ ExpectEqual(Value(500), GetDiv(CreateRange(-1000, -40), CreateRange(-4, -2), INT_MAX));
+}
+
+TEST_F(InductionVarRangeTest, AddValue) {
+ ExpectEqual(Value(110), AddValue(Value(10), Value(100)));
+ ExpectEqual(Value(-5), AddValue(Value(&x_, 1, -4), Value(&x_, -1, -1)));
+ ExpectEqual(Value(&x_, 3, -5), AddValue(Value(&x_, 2, -4), Value(&x_, 1, -1)));
+ ExpectEqual(Value(INT_MIN), AddValue(Value(&x_, 1, 5), Value(&y_, 1, -7)));
+ ExpectEqual(Value(&x_, 1, 23), AddValue(Value(&x_, 1, 20), Value(3)));
+ ExpectEqual(Value(&y_, 1, 5), AddValue(Value(55), Value(&y_, 1, -50)));
+ // Unsafe.
+ ExpectEqual(Value(INT_MIN), AddValue(Value(INT_MAX - 5), Value(6)));
+}
+
+TEST_F(InductionVarRangeTest, SubValue) {
+ ExpectEqual(Value(-90), SubValue(Value(10), Value(100)));
+ ExpectEqual(Value(-3), SubValue(Value(&x_, 1, -4), Value(&x_, 1, -1)));
+ ExpectEqual(Value(&x_, 2, -3), SubValue(Value(&x_, 3, -4), Value(&x_, 1, -1)));
+ ExpectEqual(Value(INT_MIN), SubValue(Value(&x_, 1, 5), Value(&y_, 1, -7)));
+ ExpectEqual(Value(&x_, 1, 17), SubValue(Value(&x_, 1, 20), Value(3)));
+ ExpectEqual(Value(&y_, -4, 105), SubValue(Value(55), Value(&y_, 4, -50)));
+ // Unsafe.
+ ExpectEqual(Value(INT_MIN), SubValue(Value(INT_MIN + 5), Value(6)));
+}
+
+TEST_F(InductionVarRangeTest, MulValue) {
+ ExpectEqual(Value(1000), MulValue(Value(10), Value(100)));
+ ExpectEqual(Value(INT_MIN), MulValue(Value(&x_, 1, -4), Value(&x_, 1, -1)));
+ ExpectEqual(Value(INT_MIN), MulValue(Value(&x_, 1, 5), Value(&y_, 1, -7)));
+ ExpectEqual(Value(&x_, 9, 60), MulValue(Value(&x_, 3, 20), Value(3)));
+ ExpectEqual(Value(&y_, 55, -110), MulValue(Value(55), Value(&y_, 1, -2)));
+ // Unsafe.
+ ExpectEqual(Value(INT_MIN), MulValue(Value(90000), Value(-90000)));
+}
+
+TEST_F(InductionVarRangeTest, DivValue) {
+ ExpectEqual(Value(25), DivValue(Value(100), Value(4)));
+ ExpectEqual(Value(INT_MIN), DivValue(Value(&x_, 1, -4), Value(&x_, 1, -1)));
+ ExpectEqual(Value(INT_MIN), DivValue(Value(&x_, 1, 5), Value(&y_, 1, -7)));
+ ExpectEqual(Value(INT_MIN), DivValue(Value(&x_, 12, 24), Value(3)));
+ ExpectEqual(Value(INT_MIN), DivValue(Value(55), Value(&y_, 1, -50)));
+ // Unsafe.
+ ExpectEqual(Value(INT_MIN), DivValue(Value(1), Value(0)));
+}
+
+TEST_F(InductionVarRangeTest, MinValue) {
+ ExpectEqual(Value(10), MinValue(Value(10), Value(100)));
+ ExpectEqual(Value(&x_, 1, -4), MinValue(Value(&x_, 1, -4), Value(&x_, 1, -1)));
+ ExpectEqual(Value(&x_, 4, -4), MinValue(Value(&x_, 4, -4), Value(&x_, 4, -1)));
+ ExpectEqual(Value(INT_MIN), MinValue(Value(&x_, 1, 5), Value(&y_, 1, -7)));
+ ExpectEqual(Value(INT_MIN), MinValue(Value(&x_, 1, 20), Value(3)));
+ ExpectEqual(Value(INT_MIN), MinValue(Value(55), Value(&y_, 1, -50)));
+}
+
+TEST_F(InductionVarRangeTest, MaxValue) {
+ ExpectEqual(Value(100), MaxValue(Value(10), Value(100)));
+ ExpectEqual(Value(&x_, 1, -1), MaxValue(Value(&x_, 1, -4), Value(&x_, 1, -1)));
+ ExpectEqual(Value(&x_, 4, -1), MaxValue(Value(&x_, 4, -4), Value(&x_, 4, -1)));
+ ExpectEqual(Value(INT_MAX), MaxValue(Value(&x_, 1, 5), Value(&y_, 1, -7)));
+ ExpectEqual(Value(INT_MAX), MaxValue(Value(&x_, 1, 20), Value(3)));
+ ExpectEqual(Value(INT_MAX), MaxValue(Value(55), Value(&y_, 1, -50)));
+}
+
+} // namespace art
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 075ec1ee2e..b71fdb8f1d 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -16,12 +16,17 @@
#include "intrinsics.h"
+#include "art_method.h"
+#include "class_linker.h"
#include "dex/quick/dex_file_method_inliner.h"
#include "dex/quick/dex_file_to_method_inliner_map.h"
#include "driver/compiler_driver.h"
#include "invoke_type.h"
+#include "mirror/dex_cache-inl.h"
#include "nodes.h"
#include "quick/inline_method_analyser.h"
+#include "scoped_thread_state_change.h"
+#include "thread-inl.h"
#include "utils.h"
namespace art {
@@ -120,6 +125,28 @@ static Intrinsics GetIntrinsic(InlineMethod method, InstructionSet instruction_s
LOG(FATAL) << "Unknown/unsupported op size " << method.d.data;
UNREACHABLE();
}
+ case kIntrinsicRotateRight:
+ switch (GetType(method.d.data, true)) {
+ case Primitive::kPrimInt:
+ return Intrinsics::kIntegerRotateRight;
+ case Primitive::kPrimLong:
+ return Intrinsics::kLongRotateRight;
+ default:
+ LOG(FATAL) << "Unknown/unsupported op size " << method.d.data;
+ UNREACHABLE();
+ }
+ case kIntrinsicRotateLeft:
+ switch (GetType(method.d.data, true)) {
+ case Primitive::kPrimInt:
+ return Intrinsics::kIntegerRotateLeft;
+ case Primitive::kPrimLong:
+ return Intrinsics::kLongRotateLeft;
+ default:
+ LOG(FATAL) << "Unknown/unsupported op size " << method.d.data;
+ UNREACHABLE();
+ }
+
+ // Misc data processing.
case kIntrinsicNumberOfLeadingZeros:
switch (GetType(method.d.data, true)) {
case Primitive::kPrimInt:
@@ -130,6 +157,16 @@ static Intrinsics GetIntrinsic(InlineMethod method, InstructionSet instruction_s
LOG(FATAL) << "Unknown/unsupported op size " << method.d.data;
UNREACHABLE();
}
+ case kIntrinsicNumberOfTrailingZeros:
+ switch (GetType(method.d.data, true)) {
+ case Primitive::kPrimInt:
+ return Intrinsics::kIntegerNumberOfTrailingZeros;
+ case Primitive::kPrimLong:
+ return Intrinsics::kLongNumberOfTrailingZeros;
+ default:
+ LOG(FATAL) << "Unknown/unsupported op size " << method.d.data;
+ UNREACHABLE();
+ }
// Abs.
case kIntrinsicAbsDouble:
@@ -328,14 +365,23 @@ static Intrinsics GetIntrinsic(InlineMethod method, InstructionSet instruction_s
return Intrinsics::kNone;
}
-static bool CheckInvokeType(Intrinsics intrinsic, HInvoke* invoke) {
+static bool CheckInvokeType(Intrinsics intrinsic, HInvoke* invoke, const DexFile& dex_file) {
// The DexFileMethodInliner should have checked whether the methods are agreeing with
// what we expect, i.e., static methods are called as such. Add another check here for
// our expectations:
- // Whenever the intrinsic is marked as static-or-direct, report an error if we find an
- // InvokeVirtual. The other direction is not possible: we have intrinsics for virtual
- // functions that will perform a check inline. If the precise type is known, however,
- // the instruction will be sharpened to an InvokeStaticOrDirect.
+ //
+ // Whenever the intrinsic is marked as static, report an error if we find an InvokeVirtual.
+ //
+ // Whenever the intrinsic is marked as direct and we find an InvokeVirtual, a devirtualization
+ // failure occured. We might be in a situation where we have inlined a method that calls an
+ // intrinsic, but that method is in a different dex file on which we do not have a
+ // verified_method that would have helped the compiler driver sharpen the call. In that case,
+ // make sure that the intrinsic is actually for some final method (or in a final class), as
+ // otherwise the intrinsics setup is broken.
+ //
+ // For the last direction, we have intrinsics for virtual functions that will perform a check
+ // inline. If the precise type is known, however, the instruction will be sharpened to an
+ // InvokeStaticOrDirect.
InvokeType intrinsic_type = GetIntrinsicInvokeType(intrinsic);
InvokeType invoke_type = invoke->IsInvokeStaticOrDirect() ?
invoke->AsInvokeStaticOrDirect()->GetInvokeType() :
@@ -343,8 +389,22 @@ static bool CheckInvokeType(Intrinsics intrinsic, HInvoke* invoke) {
switch (intrinsic_type) {
case kStatic:
return (invoke_type == kStatic);
+
case kDirect:
- return (invoke_type == kDirect);
+ if (invoke_type == kDirect) {
+ return true;
+ }
+ if (invoke_type == kVirtual) {
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ ScopedObjectAccess soa(Thread::Current());
+ ArtMethod* art_method =
+ class_linker->FindDexCache(soa.Self(), dex_file)->GetResolvedMethod(
+ invoke->GetDexMethodIndex(), class_linker->GetImagePointerSize());
+ return art_method != nullptr &&
+ (art_method->IsFinal() || art_method->GetDeclaringClass()->IsFinal());
+ }
+ return false;
+
case kVirtual:
// Call might be devirtualized.
return (invoke_type == kVirtual || invoke_type == kDirect);
@@ -364,17 +424,18 @@ void IntrinsicsRecognizer::Run() {
if (inst->IsInvoke()) {
HInvoke* invoke = inst->AsInvoke();
InlineMethod method;
- DexFileMethodInliner* inliner =
- driver_->GetMethodInlinerMap()->GetMethodInliner(&invoke->GetDexFile());
+ const DexFile& dex_file = invoke->GetDexFile();
+ DexFileMethodInliner* inliner = driver_->GetMethodInlinerMap()->GetMethodInliner(&dex_file);
DCHECK(inliner != nullptr);
if (inliner->IsIntrinsic(invoke->GetDexMethodIndex(), &method)) {
Intrinsics intrinsic = GetIntrinsic(method, graph_->GetInstructionSet());
if (intrinsic != Intrinsics::kNone) {
- if (!CheckInvokeType(intrinsic, invoke)) {
+ if (!CheckInvokeType(intrinsic, invoke, dex_file)) {
LOG(WARNING) << "Found an intrinsic with unexpected invoke type: "
- << intrinsic << " for "
- << PrettyMethod(invoke->GetDexMethodIndex(), invoke->GetDexFile());
+ << intrinsic << " for "
+ << PrettyMethod(invoke->GetDexMethodIndex(), invoke->GetDexFile())
+ << invoke->DebugName();
} else {
invoke->SetIntrinsic(intrinsic, NeedsEnvironmentOrCache(intrinsic));
}
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 69a3e627c9..cc8ddb6299 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -103,11 +103,11 @@ class IntrinsicSlowPathARM : public SlowPathCodeARM {
if (invoke_->IsInvokeStaticOrDirect()) {
codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(),
Location::RegisterLocation(kArtMethodRegister));
- codegen->RecordPcInfo(invoke_, invoke_->GetDexPc(), this);
} else {
- UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented";
- UNREACHABLE();
+ codegen->GenerateVirtualCall(invoke_->AsInvokeVirtual(),
+ Location::RegisterLocation(kArtMethodRegister));
}
+ codegen->RecordPcInfo(invoke_, invoke_->GetDexPc(), this);
// Copy the result back to the expected output.
Location out = invoke_->GetLocations()->Out();
@@ -266,6 +266,227 @@ void IntrinsicCodeGeneratorARM::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
GenNumberOfLeadingZeros(invoke->GetLocations(), Primitive::kPrimLong, GetAssembler());
}
+static void GenNumberOfTrailingZeros(LocationSummary* locations,
+ Primitive::Type type,
+ ArmAssembler* assembler) {
+ DCHECK((type == Primitive::kPrimInt) || (type == Primitive::kPrimLong));
+
+ Register out = locations->Out().AsRegister<Register>();
+
+ if (type == Primitive::kPrimLong) {
+ Register in_reg_lo = locations->InAt(0).AsRegisterPairLow<Register>();
+ Register in_reg_hi = locations->InAt(0).AsRegisterPairHigh<Register>();
+ Label end;
+ __ rbit(out, in_reg_lo);
+ __ clz(out, out);
+ __ CompareAndBranchIfNonZero(in_reg_lo, &end);
+ __ rbit(out, in_reg_hi);
+ __ clz(out, out);
+ __ AddConstant(out, 32);
+ __ Bind(&end);
+ } else {
+ Register in = locations->InAt(0).AsRegister<Register>();
+ __ rbit(out, in);
+ __ clz(out, out);
+ }
+}
+
+void IntrinsicLocationsBuilderARM::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorARM::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
+ GenNumberOfTrailingZeros(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler());
+}
+
+void IntrinsicLocationsBuilderARM::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorARM::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
+ GenNumberOfTrailingZeros(invoke->GetLocations(), Primitive::kPrimLong, GetAssembler());
+}
+
+static void GenIntegerRotate(LocationSummary* locations,
+ ArmAssembler* assembler,
+ bool is_left) {
+ Register in = locations->InAt(0).AsRegister<Register>();
+ Location rhs = locations->InAt(1);
+ Register out = locations->Out().AsRegister<Register>();
+
+ if (rhs.IsConstant()) {
+ // Arm32 and Thumb2 assemblers require a rotation on the interval [1,31],
+ // so map all rotations to a +ve. equivalent in that range.
+ // (e.g. left *or* right by -2 bits == 30 bits in the same direction.)
+ uint32_t rot = rhs.GetConstant()->AsIntConstant()->GetValue() & 0x1F;
+ if (rot) {
+ // Rotate, mapping left rotations to right equivalents if necessary.
+ // (e.g. left by 2 bits == right by 30.)
+ __ Ror(out, in, is_left ? (0x20 - rot) : rot);
+ } else if (out != in) {
+ __ Mov(out, in);
+ }
+ } else {
+ if (is_left) {
+ __ rsb(out, rhs.AsRegister<Register>(), ShifterOperand(0));
+ __ Ror(out, in, out);
+ } else {
+ __ Ror(out, in, rhs.AsRegister<Register>());
+ }
+ }
+}
+
+// Gain some speed by mapping all Long rotates onto equivalent pairs of Integer
+// rotates by swapping input regs (effectively rotating by the first 32-bits of
+// a larger rotation) or flipping direction (thus treating larger right/left
+// rotations as sub-word sized rotations in the other direction) as appropriate.
+static void GenLongRotate(LocationSummary* locations,
+ ArmAssembler* assembler,
+ bool is_left) {
+ Register in_reg_lo = locations->InAt(0).AsRegisterPairLow<Register>();
+ Register in_reg_hi = locations->InAt(0).AsRegisterPairHigh<Register>();
+ Location rhs = locations->InAt(1);
+ Register out_reg_lo = locations->Out().AsRegisterPairLow<Register>();
+ Register out_reg_hi = locations->Out().AsRegisterPairHigh<Register>();
+
+ if (rhs.IsConstant()) {
+ uint32_t rot = rhs.GetConstant()->AsIntConstant()->GetValue();
+ // Map all left rotations to right equivalents.
+ if (is_left) {
+ rot = 0x40 - rot;
+ }
+ // Map all rotations to +ve. equivalents on the interval [0,63].
+ rot &= 0x3F;
+ // For rotates over a word in size, 'pre-rotate' by 32-bits to keep rotate
+ // logic below to a simple pair of binary orr.
+ // (e.g. 34 bits == in_reg swap + 2 bits right.)
+ if (rot >= 0x20) {
+ rot -= 0x20;
+ std::swap(in_reg_hi, in_reg_lo);
+ }
+ // Rotate, or mov to out for zero or word size rotations.
+ if (rot) {
+ __ Lsr(out_reg_hi, in_reg_hi, rot);
+ __ orr(out_reg_hi, out_reg_hi, ShifterOperand(in_reg_lo, arm::LSL, 0x20 - rot));
+ __ Lsr(out_reg_lo, in_reg_lo, rot);
+ __ orr(out_reg_lo, out_reg_lo, ShifterOperand(in_reg_hi, arm::LSL, 0x20 - rot));
+ } else {
+ __ Mov(out_reg_lo, in_reg_lo);
+ __ Mov(out_reg_hi, in_reg_hi);
+ }
+ } else {
+ Register shift_left = locations->GetTemp(0).AsRegister<Register>();
+ Register shift_right = locations->GetTemp(1).AsRegister<Register>();
+ Label end;
+ Label right;
+
+ __ and_(shift_left, rhs.AsRegister<Register>(), ShifterOperand(0x1F));
+ __ Lsrs(shift_right, rhs.AsRegister<Register>(), 6);
+ __ rsb(shift_right, shift_left, ShifterOperand(0x20), AL, kCcKeep);
+
+ if (is_left) {
+ __ b(&right, CS);
+ } else {
+ __ b(&right, CC);
+ std::swap(shift_left, shift_right);
+ }
+
+ // out_reg_hi = (reg_hi << shift_left) | (reg_lo >> shift_right).
+ // out_reg_lo = (reg_lo << shift_left) | (reg_hi >> shift_right).
+ __ Lsl(out_reg_hi, in_reg_hi, shift_left);
+ __ Lsr(out_reg_lo, in_reg_lo, shift_right);
+ __ add(out_reg_hi, out_reg_hi, ShifterOperand(out_reg_lo));
+ __ Lsl(out_reg_lo, in_reg_lo, shift_left);
+ __ Lsr(shift_left, in_reg_hi, shift_right);
+ __ add(out_reg_lo, out_reg_lo, ShifterOperand(shift_left));
+ __ b(&end);
+
+ // out_reg_hi = (reg_hi >> shift_right) | (reg_lo << shift_left).
+ // out_reg_lo = (reg_lo >> shift_right) | (reg_hi << shift_left).
+ __ Bind(&right);
+ __ Lsr(out_reg_hi, in_reg_hi, shift_right);
+ __ Lsl(out_reg_lo, in_reg_lo, shift_left);
+ __ add(out_reg_hi, out_reg_hi, ShifterOperand(out_reg_lo));
+ __ Lsr(out_reg_lo, in_reg_lo, shift_right);
+ __ Lsl(shift_right, in_reg_hi, shift_left);
+ __ add(out_reg_lo, out_reg_lo, ShifterOperand(shift_right));
+
+ __ Bind(&end);
+ }
+}
+
+void IntrinsicLocationsBuilderARM::VisitIntegerRotateRight(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorARM::VisitIntegerRotateRight(HInvoke* invoke) {
+ GenIntegerRotate(invoke->GetLocations(), GetAssembler(), false /* is_left */);
+}
+
+void IntrinsicLocationsBuilderARM::VisitLongRotateRight(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ if (invoke->InputAt(1)->IsConstant()) {
+ locations->SetInAt(1, Location::ConstantLocation(invoke->InputAt(1)->AsConstant()));
+ } else {
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ }
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorARM::VisitLongRotateRight(HInvoke* invoke) {
+ GenLongRotate(invoke->GetLocations(), GetAssembler(), false /* is_left */);
+}
+
+void IntrinsicLocationsBuilderARM::VisitIntegerRotateLeft(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorARM::VisitIntegerRotateLeft(HInvoke* invoke) {
+ GenIntegerRotate(invoke->GetLocations(), GetAssembler(), true /* is_left */);
+}
+
+void IntrinsicLocationsBuilderARM::VisitLongRotateLeft(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ if (invoke->InputAt(1)->IsConstant()) {
+ locations->SetInAt(1, Location::ConstantLocation(invoke->InputAt(1)->AsConstant()));
+ } else {
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ }
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorARM::VisitLongRotateLeft(HInvoke* invoke) {
+ GenLongRotate(invoke->GetLocations(), GetAssembler(), true /* is_left */);
+}
+
static void MathAbsFP(LocationSummary* locations, bool is64bit, ArmAssembler* assembler) {
Location in = locations->InAt(0);
Location out = locations->Out();
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 0171d6949d..b0cfd0d1bc 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -41,12 +41,12 @@ using helpers::DRegisterFrom;
using helpers::FPRegisterFrom;
using helpers::HeapOperand;
using helpers::LocationFrom;
+using helpers::OperandFrom;
using helpers::RegisterFrom;
using helpers::SRegisterFrom;
using helpers::WRegisterFrom;
using helpers::XRegisterFrom;
-
namespace {
ALWAYS_INLINE inline MemOperand AbsoluteHeapOperandFrom(Location location, size_t offset = 0) {
@@ -112,11 +112,10 @@ class IntrinsicSlowPathARM64 : public SlowPathCodeARM64 {
if (invoke_->IsInvokeStaticOrDirect()) {
codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(),
LocationFrom(kArtMethodRegister));
- codegen->RecordPcInfo(invoke_, invoke_->GetDexPc(), this);
} else {
- UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented";
- UNREACHABLE();
+ codegen->GenerateVirtualCall(invoke_->AsInvokeVirtual(), LocationFrom(kArtMethodRegister));
}
+ codegen->RecordPcInfo(invoke_, invoke_->GetDexPc(), this);
// Copy the result back to the expected output.
Location out = invoke_->GetLocations()->Out();
@@ -287,6 +286,131 @@ void IntrinsicCodeGeneratorARM64::VisitLongNumberOfLeadingZeros(HInvoke* invoke)
GenNumberOfLeadingZeros(invoke->GetLocations(), Primitive::kPrimLong, GetVIXLAssembler());
}
+static void GenNumberOfTrailingZeros(LocationSummary* locations,
+ Primitive::Type type,
+ vixl::MacroAssembler* masm) {
+ DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong);
+
+ Location in = locations->InAt(0);
+ Location out = locations->Out();
+
+ __ Rbit(RegisterFrom(out, type), RegisterFrom(in, type));
+ __ Clz(RegisterFrom(out, type), RegisterFrom(out, type));
+}
+
+void IntrinsicLocationsBuilderARM64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
+ CreateIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
+ GenNumberOfTrailingZeros(invoke->GetLocations(), Primitive::kPrimInt, GetVIXLAssembler());
+}
+
+void IntrinsicLocationsBuilderARM64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
+ CreateIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
+ GenNumberOfTrailingZeros(invoke->GetLocations(), Primitive::kPrimLong, GetVIXLAssembler());
+}
+
+static void GenRotateRight(LocationSummary* locations,
+ Primitive::Type type,
+ vixl::MacroAssembler* masm) {
+ DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong);
+
+ Location in = locations->InAt(0);
+ Location out = locations->Out();
+ Operand rhs = OperandFrom(locations->InAt(1), type);
+
+ if (rhs.IsImmediate()) {
+ uint32_t shift = rhs.immediate() & (RegisterFrom(in, type).SizeInBits() - 1);
+ __ Ror(RegisterFrom(out, type),
+ RegisterFrom(in, type),
+ shift);
+ } else {
+ DCHECK(rhs.shift() == vixl::LSL && rhs.shift_amount() == 0);
+ __ Ror(RegisterFrom(out, type),
+ RegisterFrom(in, type),
+ rhs.reg());
+ }
+}
+
+void IntrinsicLocationsBuilderARM64::VisitIntegerRotateRight(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorARM64::VisitIntegerRotateRight(HInvoke* invoke) {
+ GenRotateRight(invoke->GetLocations(), Primitive::kPrimInt, GetVIXLAssembler());
+}
+
+void IntrinsicLocationsBuilderARM64::VisitLongRotateRight(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorARM64::VisitLongRotateRight(HInvoke* invoke) {
+ GenRotateRight(invoke->GetLocations(), Primitive::kPrimLong, GetVIXLAssembler());
+}
+
+static void GenRotateLeft(LocationSummary* locations,
+ Primitive::Type type,
+ vixl::MacroAssembler* masm) {
+ DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong);
+
+ Location in = locations->InAt(0);
+ Location out = locations->Out();
+ Operand rhs = OperandFrom(locations->InAt(1), type);
+
+ if (rhs.IsImmediate()) {
+ uint32_t regsize = RegisterFrom(in, type).SizeInBits();
+ uint32_t shift = (regsize - rhs.immediate()) & (regsize - 1);
+ __ Ror(RegisterFrom(out, type), RegisterFrom(in, type), shift);
+ } else {
+ DCHECK(rhs.shift() == vixl::LSL && rhs.shift_amount() == 0);
+ __ Neg(RegisterFrom(out, type),
+ Operand(RegisterFrom(locations->InAt(1), type)));
+ __ Ror(RegisterFrom(out, type),
+ RegisterFrom(in, type),
+ RegisterFrom(out, type));
+ }
+}
+
+void IntrinsicLocationsBuilderARM64::VisitIntegerRotateLeft(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorARM64::VisitIntegerRotateLeft(HInvoke* invoke) {
+ GenRotateLeft(invoke->GetLocations(), Primitive::kPrimInt, GetVIXLAssembler());
+}
+
+void IntrinsicLocationsBuilderARM64::VisitLongRotateLeft(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorARM64::VisitLongRotateLeft(HInvoke* invoke) {
+ GenRotateLeft(invoke->GetLocations(), Primitive::kPrimLong, GetVIXLAssembler());
+}
+
static void GenReverse(LocationSummary* locations,
Primitive::Type type,
vixl::MacroAssembler* masm) {
diff --git a/compiler/optimizing/intrinsics_list.h b/compiler/optimizing/intrinsics_list.h
index 7e5339ec21..bfe5e55c56 100644
--- a/compiler/optimizing/intrinsics_list.h
+++ b/compiler/optimizing/intrinsics_list.h
@@ -29,9 +29,15 @@
V(IntegerReverse, kStatic, kNeedsEnvironmentOrCache) \
V(IntegerReverseBytes, kStatic, kNeedsEnvironmentOrCache) \
V(IntegerNumberOfLeadingZeros, kStatic, kNeedsEnvironmentOrCache) \
+ V(IntegerNumberOfTrailingZeros, kStatic, kNeedsEnvironmentOrCache) \
+ V(IntegerRotateRight, kStatic, kNeedsEnvironmentOrCache) \
+ V(IntegerRotateLeft, kStatic, kNeedsEnvironmentOrCache) \
V(LongReverse, kStatic, kNeedsEnvironmentOrCache) \
V(LongReverseBytes, kStatic, kNeedsEnvironmentOrCache) \
V(LongNumberOfLeadingZeros, kStatic, kNeedsEnvironmentOrCache) \
+ V(LongNumberOfTrailingZeros, kStatic, kNeedsEnvironmentOrCache) \
+ V(LongRotateRight, kStatic, kNeedsEnvironmentOrCache) \
+ V(LongRotateLeft, kStatic, kNeedsEnvironmentOrCache) \
V(ShortReverseBytes, kStatic, kNeedsEnvironmentOrCache) \
V(MathAbsDouble, kStatic, kNeedsEnvironmentOrCache) \
V(MathAbsFloat, kStatic, kNeedsEnvironmentOrCache) \
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index be076cd3ff..c5d88d2b25 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -141,11 +141,10 @@ class IntrinsicSlowPathX86 : public SlowPathCodeX86 {
if (invoke_->IsInvokeStaticOrDirect()) {
codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(),
Location::RegisterLocation(EAX));
- codegen->RecordPcInfo(invoke_, invoke_->GetDexPc(), this);
} else {
- UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented";
- UNREACHABLE();
+ codegen->GenerateVirtualCall(invoke_->AsInvokeVirtual(), Location::RegisterLocation(EAX));
}
+ codegen->RecordPcInfo(invoke_, invoke_->GetDexPc(), this);
// Copy the result back to the expected output.
Location out = invoke_->GetLocations()->Out();
@@ -1957,6 +1956,12 @@ UNIMPLEMENTED_INTRINSIC(MathRoundDouble)
UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck)
UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
+UNIMPLEMENTED_INTRINSIC(IntegerNumberOfTrailingZeros)
+UNIMPLEMENTED_INTRINSIC(LongNumberOfTrailingZeros)
+UNIMPLEMENTED_INTRINSIC(IntegerRotateRight)
+UNIMPLEMENTED_INTRINSIC(LongRotateRight)
+UNIMPLEMENTED_INTRINSIC(IntegerRotateLeft)
+UNIMPLEMENTED_INTRINSIC(LongRotateLeft)
#undef UNIMPLEMENTED_INTRINSIC
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 1f35b597fe..258ae9a55f 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -132,11 +132,10 @@ class IntrinsicSlowPathX86_64 : public SlowPathCodeX86_64 {
if (invoke_->IsInvokeStaticOrDirect()) {
codegen->GenerateStaticOrDirectCall(
invoke_->AsInvokeStaticOrDirect(), Location::RegisterLocation(RDI));
- codegen->RecordPcInfo(invoke_, invoke_->GetDexPc(), this);
} else {
- UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented";
- UNREACHABLE();
+ codegen->GenerateVirtualCall(invoke_->AsInvokeVirtual(), Location::RegisterLocation(RDI));
}
+ codegen->RecordPcInfo(invoke_, invoke_->GetDexPc(), this);
// Copy the result back to the expected output.
Location out = invoke_->GetLocations()->Out();
@@ -1775,6 +1774,12 @@ void IntrinsicCodeGeneratorX86_64::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSE
UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck)
UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
+UNIMPLEMENTED_INTRINSIC(IntegerNumberOfTrailingZeros)
+UNIMPLEMENTED_INTRINSIC(LongNumberOfTrailingZeros)
+UNIMPLEMENTED_INTRINSIC(IntegerRotateRight)
+UNIMPLEMENTED_INTRINSIC(LongRotateRight)
+UNIMPLEMENTED_INTRINSIC(IntegerRotateLeft)
+UNIMPLEMENTED_INTRINSIC(LongRotateLeft)
#undef UNIMPLEMENTED_INTRINSIC
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 4332d7ed02..650c8e5fed 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -207,7 +207,7 @@ void HGraph::SplitCriticalEdge(HBasicBlock* block, HBasicBlock* successor) {
// Insert a new node between `block` and `successor` to split the
// critical edge.
HBasicBlock* new_block = SplitEdge(block, successor);
- new_block->AddInstruction(new (arena_) HGoto());
+ new_block->AddInstruction(new (arena_) HGoto(successor->GetDexPc()));
if (successor->IsLoopHeader()) {
// If we split at a back edge boundary, make the new block the back edge.
HLoopInformation* info = successor->GetLoopInformation();
@@ -228,7 +228,7 @@ void HGraph::SimplifyLoop(HBasicBlock* header) {
if (number_of_incomings != 1) {
HBasicBlock* pre_header = new (arena_) HBasicBlock(this, header->GetDexPc());
AddBlock(pre_header);
- pre_header->AddInstruction(new (arena_) HGoto());
+ pre_header->AddInstruction(new (arena_) HGoto(header->GetDexPc()));
for (size_t pred = 0; pred < header->GetPredecessors().Size(); ++pred) {
HBasicBlock* predecessor = header->GetPredecessors().Get(pred);
@@ -409,12 +409,12 @@ void HGraph::InsertConstant(HConstant* constant) {
}
}
-HNullConstant* HGraph::GetNullConstant() {
+HNullConstant* HGraph::GetNullConstant(uint32_t dex_pc) {
// For simplicity, don't bother reviving the cached null constant if it is
// not null and not in a block. Otherwise, we need to clear the instruction
// id and/or any invariants the graph is assuming when adding new instructions.
if ((cached_null_constant_ == nullptr) || (cached_null_constant_->GetBlock() == nullptr)) {
- cached_null_constant_ = new (arena_) HNullConstant();
+ cached_null_constant_ = new (arena_) HNullConstant(dex_pc);
InsertConstant(cached_null_constant_);
}
return cached_null_constant_;
@@ -426,7 +426,8 @@ HCurrentMethod* HGraph::GetCurrentMethod() {
// id and/or any invariants the graph is assuming when adding new instructions.
if ((cached_current_method_ == nullptr) || (cached_current_method_->GetBlock() == nullptr)) {
cached_current_method_ = new (arena_) HCurrentMethod(
- Is64BitInstructionSet(instruction_set_) ? Primitive::kPrimLong : Primitive::kPrimInt);
+ Is64BitInstructionSet(instruction_set_) ? Primitive::kPrimLong : Primitive::kPrimInt,
+ entry_block_->GetDexPc());
if (entry_block_->GetFirstInstruction() == nullptr) {
entry_block_->AddInstruction(cached_current_method_);
} else {
@@ -437,7 +438,7 @@ HCurrentMethod* HGraph::GetCurrentMethod() {
return cached_current_method_;
}
-HConstant* HGraph::GetConstant(Primitive::Type type, int64_t value) {
+HConstant* HGraph::GetConstant(Primitive::Type type, int64_t value, uint32_t dex_pc) {
switch (type) {
case Primitive::Type::kPrimBoolean:
DCHECK(IsUint<1>(value));
@@ -447,10 +448,10 @@ HConstant* HGraph::GetConstant(Primitive::Type type, int64_t value) {
case Primitive::Type::kPrimShort:
case Primitive::Type::kPrimInt:
DCHECK(IsInt(Primitive::ComponentSize(type) * kBitsPerByte, value));
- return GetIntConstant(static_cast<int32_t>(value));
+ return GetIntConstant(static_cast<int32_t>(value), dex_pc);
case Primitive::Type::kPrimLong:
- return GetLongConstant(value);
+ return GetLongConstant(value, dex_pc);
default:
LOG(FATAL) << "Unsupported constant type";
@@ -944,11 +945,11 @@ HConstant* HTypeConversion::TryStaticEvaluation() const {
int32_t value = GetInput()->AsIntConstant()->GetValue();
switch (GetResultType()) {
case Primitive::kPrimLong:
- return graph->GetLongConstant(static_cast<int64_t>(value));
+ return graph->GetLongConstant(static_cast<int64_t>(value), GetDexPc());
case Primitive::kPrimFloat:
- return graph->GetFloatConstant(static_cast<float>(value));
+ return graph->GetFloatConstant(static_cast<float>(value), GetDexPc());
case Primitive::kPrimDouble:
- return graph->GetDoubleConstant(static_cast<double>(value));
+ return graph->GetDoubleConstant(static_cast<double>(value), GetDexPc());
default:
return nullptr;
}
@@ -956,11 +957,11 @@ HConstant* HTypeConversion::TryStaticEvaluation() const {
int64_t value = GetInput()->AsLongConstant()->GetValue();
switch (GetResultType()) {
case Primitive::kPrimInt:
- return graph->GetIntConstant(static_cast<int32_t>(value));
+ return graph->GetIntConstant(static_cast<int32_t>(value), GetDexPc());
case Primitive::kPrimFloat:
- return graph->GetFloatConstant(static_cast<float>(value));
+ return graph->GetFloatConstant(static_cast<float>(value), GetDexPc());
case Primitive::kPrimDouble:
- return graph->GetDoubleConstant(static_cast<double>(value));
+ return graph->GetDoubleConstant(static_cast<double>(value), GetDexPc());
default:
return nullptr;
}
@@ -969,22 +970,22 @@ HConstant* HTypeConversion::TryStaticEvaluation() const {
switch (GetResultType()) {
case Primitive::kPrimInt:
if (std::isnan(value))
- return graph->GetIntConstant(0);
+ return graph->GetIntConstant(0, GetDexPc());
if (value >= kPrimIntMax)
- return graph->GetIntConstant(kPrimIntMax);
+ return graph->GetIntConstant(kPrimIntMax, GetDexPc());
if (value <= kPrimIntMin)
- return graph->GetIntConstant(kPrimIntMin);
- return graph->GetIntConstant(static_cast<int32_t>(value));
+ return graph->GetIntConstant(kPrimIntMin, GetDexPc());
+ return graph->GetIntConstant(static_cast<int32_t>(value), GetDexPc());
case Primitive::kPrimLong:
if (std::isnan(value))
- return graph->GetLongConstant(0);
+ return graph->GetLongConstant(0, GetDexPc());
if (value >= kPrimLongMax)
- return graph->GetLongConstant(kPrimLongMax);
+ return graph->GetLongConstant(kPrimLongMax, GetDexPc());
if (value <= kPrimLongMin)
- return graph->GetLongConstant(kPrimLongMin);
- return graph->GetLongConstant(static_cast<int64_t>(value));
+ return graph->GetLongConstant(kPrimLongMin, GetDexPc());
+ return graph->GetLongConstant(static_cast<int64_t>(value), GetDexPc());
case Primitive::kPrimDouble:
- return graph->GetDoubleConstant(static_cast<double>(value));
+ return graph->GetDoubleConstant(static_cast<double>(value), GetDexPc());
default:
return nullptr;
}
@@ -993,22 +994,22 @@ HConstant* HTypeConversion::TryStaticEvaluation() const {
switch (GetResultType()) {
case Primitive::kPrimInt:
if (std::isnan(value))
- return graph->GetIntConstant(0);
+ return graph->GetIntConstant(0, GetDexPc());
if (value >= kPrimIntMax)
- return graph->GetIntConstant(kPrimIntMax);
+ return graph->GetIntConstant(kPrimIntMax, GetDexPc());
if (value <= kPrimLongMin)
- return graph->GetIntConstant(kPrimIntMin);
- return graph->GetIntConstant(static_cast<int32_t>(value));
+ return graph->GetIntConstant(kPrimIntMin, GetDexPc());
+ return graph->GetIntConstant(static_cast<int32_t>(value), GetDexPc());
case Primitive::kPrimLong:
if (std::isnan(value))
- return graph->GetLongConstant(0);
+ return graph->GetLongConstant(0, GetDexPc());
if (value >= kPrimLongMax)
- return graph->GetLongConstant(kPrimLongMax);
+ return graph->GetLongConstant(kPrimLongMax, GetDexPc());
if (value <= kPrimLongMin)
- return graph->GetLongConstant(kPrimLongMin);
- return graph->GetLongConstant(static_cast<int64_t>(value));
+ return graph->GetLongConstant(kPrimLongMin, GetDexPc());
+ return graph->GetLongConstant(static_cast<int64_t>(value), GetDexPc());
case Primitive::kPrimFloat:
- return graph->GetFloatConstant(static_cast<float>(value));
+ return graph->GetFloatConstant(static_cast<float>(value), GetDexPc());
default:
return nullptr;
}
@@ -1122,7 +1123,8 @@ HBasicBlock* HBasicBlock::SplitBefore(HInstruction* cursor) {
DCHECK(!graph_->IsInSsaForm()) << "Support for SSA form not implemented";
DCHECK_EQ(cursor->GetBlock(), this);
- HBasicBlock* new_block = new (GetGraph()->GetArena()) HBasicBlock(GetGraph(), GetDexPc());
+ HBasicBlock* new_block = new (GetGraph()->GetArena()) HBasicBlock(GetGraph(),
+ cursor->GetDexPc());
new_block->instructions_.first_instruction_ = cursor;
new_block->instructions_.last_instruction_ = instructions_.last_instruction_;
instructions_.last_instruction_ = cursor->previous_;
@@ -1134,7 +1136,7 @@ HBasicBlock* HBasicBlock::SplitBefore(HInstruction* cursor) {
}
new_block->instructions_.SetBlockOfInstructions(new_block);
- AddInstruction(new (GetGraph()->GetArena()) HGoto());
+ AddInstruction(new (GetGraph()->GetArena()) HGoto(new_block->GetDexPc()));
for (size_t i = 0, e = GetSuccessors().Size(); i < e; ++i) {
HBasicBlock* successor = GetSuccessors().Get(i);
@@ -1309,7 +1311,7 @@ void HBasicBlock::DisconnectAndDelete() {
predecessor->RemoveSuccessor(this);
if (predecessor->GetSuccessors().Size() == 1u) {
DCHECK(last_instruction->IsIf());
- predecessor->AddInstruction(new (graph_->GetArena()) HGoto());
+ predecessor->AddInstruction(new (graph_->GetArena()) HGoto(last_instruction->GetDexPc()));
} else {
// The predecessor has no remaining successors and therefore must be dead.
// We deliberately leave it without a control-flow instruction so that the
@@ -1562,13 +1564,13 @@ HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
if (!returns_void) {
return_value = last->InputAt(0);
}
- predecessor->AddInstruction(new (allocator) HGoto());
+ predecessor->AddInstruction(new (allocator) HGoto(last->GetDexPc()));
predecessor->RemoveInstruction(last);
} else {
if (!returns_void) {
// There will be multiple returns.
return_value = new (allocator) HPhi(
- allocator, kNoRegNumber, 0, HPhi::ToPhiType(invoke->GetType()));
+ allocator, kNoRegNumber, 0, HPhi::ToPhiType(invoke->GetType()), to->GetDexPc());
to->AddPhi(return_value->AsPhi());
}
for (size_t i = 0, e = to->GetPredecessors().Size(); i < e; ++i) {
@@ -1577,7 +1579,7 @@ HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
if (!returns_void) {
return_value->AsPhi()->AddInput(last->InputAt(0));
}
- predecessor->AddInstruction(new (allocator) HGoto());
+ predecessor->AddInstruction(new (allocator) HGoto(last->GetDexPc()));
predecessor->RemoveInstruction(last);
}
}
@@ -1659,15 +1661,19 @@ HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
for (HInstructionIterator it(entry_block_->GetInstructions()); !it.Done(); it.Advance()) {
HInstruction* current = it.Current();
if (current->IsNullConstant()) {
- current->ReplaceWith(outer_graph->GetNullConstant());
+ current->ReplaceWith(outer_graph->GetNullConstant(current->GetDexPc()));
} else if (current->IsIntConstant()) {
- current->ReplaceWith(outer_graph->GetIntConstant(current->AsIntConstant()->GetValue()));
+ current->ReplaceWith(outer_graph->GetIntConstant(
+ current->AsIntConstant()->GetValue(), current->GetDexPc()));
} else if (current->IsLongConstant()) {
- current->ReplaceWith(outer_graph->GetLongConstant(current->AsLongConstant()->GetValue()));
+ current->ReplaceWith(outer_graph->GetLongConstant(
+ current->AsLongConstant()->GetValue(), current->GetDexPc()));
} else if (current->IsFloatConstant()) {
- current->ReplaceWith(outer_graph->GetFloatConstant(current->AsFloatConstant()->GetValue()));
+ current->ReplaceWith(outer_graph->GetFloatConstant(
+ current->AsFloatConstant()->GetValue(), current->GetDexPc()));
} else if (current->IsDoubleConstant()) {
- current->ReplaceWith(outer_graph->GetDoubleConstant(current->AsDoubleConstant()->GetValue()));
+ current->ReplaceWith(outer_graph->GetDoubleConstant(
+ current->AsDoubleConstant()->GetValue(), current->GetDexPc()));
} else if (current->IsParameterValue()) {
if (kIsDebugBuild
&& invoke->IsInvokeStaticOrDirect()
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index fef6f21b46..23d605b7b5 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -77,6 +77,8 @@ static constexpr uint32_t kUnknownFieldIndex = static_cast<uint32_t>(-1);
static constexpr InvokeType kInvalidInvokeType = static_cast<InvokeType>(-1);
+static constexpr uint32_t kNoDexPc = -1;
+
enum IfCondition {
kCondEQ,
kCondNE,
@@ -316,24 +318,24 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
// Returns a constant of the given type and value. If it does not exist
// already, it is created and inserted into the graph. This method is only for
// integral types.
- HConstant* GetConstant(Primitive::Type type, int64_t value);
+ HConstant* GetConstant(Primitive::Type type, int64_t value, uint32_t dex_pc = kNoDexPc);
// TODO: This is problematic for the consistency of reference type propagation
// because it can be created anytime after the pass and thus it will be left
// with an invalid type.
- HNullConstant* GetNullConstant();
+ HNullConstant* GetNullConstant(uint32_t dex_pc = kNoDexPc);
- HIntConstant* GetIntConstant(int32_t value) {
- return CreateConstant(value, &cached_int_constants_);
+ HIntConstant* GetIntConstant(int32_t value, uint32_t dex_pc = kNoDexPc) {
+ return CreateConstant(value, &cached_int_constants_, dex_pc);
}
- HLongConstant* GetLongConstant(int64_t value) {
- return CreateConstant(value, &cached_long_constants_);
+ HLongConstant* GetLongConstant(int64_t value, uint32_t dex_pc = kNoDexPc) {
+ return CreateConstant(value, &cached_long_constants_, dex_pc);
}
- HFloatConstant* GetFloatConstant(float value) {
- return CreateConstant(bit_cast<int32_t, float>(value), &cached_float_constants_);
+ HFloatConstant* GetFloatConstant(float value, uint32_t dex_pc = kNoDexPc) {
+ return CreateConstant(bit_cast<int32_t, float>(value), &cached_float_constants_, dex_pc);
}
- HDoubleConstant* GetDoubleConstant(double value) {
- return CreateConstant(bit_cast<int64_t, double>(value), &cached_double_constants_);
+ HDoubleConstant* GetDoubleConstant(double value, uint32_t dex_pc = kNoDexPc) {
+ return CreateConstant(bit_cast<int64_t, double>(value), &cached_double_constants_, dex_pc);
}
HCurrentMethod* GetCurrentMethod();
@@ -372,7 +374,8 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
template <class InstructionType, typename ValueType>
InstructionType* CreateConstant(ValueType value,
- ArenaSafeMap<ValueType, InstructionType*>* cache) {
+ ArenaSafeMap<ValueType, InstructionType*>* cache,
+ uint32_t dex_pc = kNoDexPc) {
// Try to find an existing constant of the given value.
InstructionType* constant = nullptr;
auto cached_constant = cache->find(value);
@@ -383,7 +386,7 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
// If not found or previously deleted, create and cache a new instruction.
// Don't bother reviving a previously deleted instruction, for simplicity.
if (constant == nullptr || constant->GetBlock() == nullptr) {
- constant = new (arena_) InstructionType(value);
+ constant = new (arena_) InstructionType(value, dex_pc);
cache->Overwrite(value, constant);
InsertConstant(constant);
}
@@ -618,7 +621,6 @@ class TryCatchInformation : public ArenaObject<kArenaAllocTryCatchInfo> {
};
static constexpr size_t kNoLifetime = -1;
-static constexpr uint32_t kNoDexPc = -1;
// A block in a method. Contains the list of instructions represented
// as a double linked list. Each block knows its predecessors and
@@ -626,7 +628,7 @@ static constexpr uint32_t kNoDexPc = -1;
class HBasicBlock : public ArenaObject<kArenaAllocBasicBlock> {
public:
- explicit HBasicBlock(HGraph* graph, uint32_t dex_pc = kNoDexPc)
+ HBasicBlock(HGraph* graph, uint32_t dex_pc = kNoDexPc)
: graph_(graph),
predecessors_(graph->GetArena(), kDefaultNumberOfPredecessors),
successors_(graph->GetArena(), kDefaultNumberOfSuccessors),
@@ -683,6 +685,7 @@ class HBasicBlock : public ArenaObject<kArenaAllocBasicBlock> {
int GetBlockId() const { return block_id_; }
void SetBlockId(int id) { block_id_ = id; }
+ uint32_t GetDexPc() const { return dex_pc_; }
HBasicBlock* GetDominator() const { return dominator_; }
void SetDominator(HBasicBlock* dominator) { dominator_ = dominator; }
@@ -943,7 +946,6 @@ class HBasicBlock : public ArenaObject<kArenaAllocBasicBlock> {
void SetLifetimeStart(size_t start) { lifetime_start_ = start; }
void SetLifetimeEnd(size_t end) { lifetime_end_ = end; }
- uint32_t GetDexPc() const { return dex_pc_; }
bool EndsWithControlFlowInstruction() const;
bool EndsWithIf() const;
@@ -1076,7 +1078,9 @@ class HLoopInformationOutwardIterator : public ValueObject {
#define FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(M)
-#define FOR_EACH_CONCRETE_INSTRUCTION_X86(M)
+#define FOR_EACH_CONCRETE_INSTRUCTION_X86(M) \
+ M(X86ComputeBaseMethodAddress, Instruction) \
+ M(X86LoadFromConstantTable, Instruction)
#define FOR_EACH_CONCRETE_INSTRUCTION_X86_64(M)
@@ -1689,10 +1693,11 @@ std::ostream& operator<<(std::ostream& os, const ReferenceTypeInfo& rhs);
class HInstruction : public ArenaObject<kArenaAllocInstruction> {
public:
- explicit HInstruction(SideEffects side_effects)
+ HInstruction(SideEffects side_effects, uint32_t dex_pc = kNoDexPc)
: previous_(nullptr),
next_(nullptr),
block_(nullptr),
+ dex_pc_(dex_pc),
id_(-1),
ssa_index_(-1),
environment_(nullptr),
@@ -1735,9 +1740,9 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
}
virtual bool NeedsEnvironment() const { return false; }
- virtual uint32_t GetDexPc() const {
- return kNoDexPc;
- }
+
+ uint32_t GetDexPc() const { return dex_pc_; }
+
virtual bool IsControlFlow() const { return false; }
virtual bool CanThrow() const { return false; }
@@ -1940,6 +1945,7 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
HInstruction* previous_;
HInstruction* next_;
HBasicBlock* block_;
+ const uint32_t dex_pc_;
// An instruction gets an id when it is added to the graph.
// It reflects creation order. A negative id means the instruction
@@ -2044,8 +2050,8 @@ class HBackwardInstructionIterator : public ValueObject {
template<size_t N>
class HTemplateInstruction: public HInstruction {
public:
- HTemplateInstruction<N>(SideEffects side_effects)
- : HInstruction(side_effects), inputs_() {}
+ HTemplateInstruction<N>(SideEffects side_effects, uint32_t dex_pc = kNoDexPc)
+ : HInstruction(side_effects, dex_pc), inputs_() {}
virtual ~HTemplateInstruction() {}
size_t InputCount() const OVERRIDE { return N; }
@@ -2071,7 +2077,9 @@ class HTemplateInstruction: public HInstruction {
template<>
class HTemplateInstruction<0>: public HInstruction {
public:
- explicit HTemplateInstruction(SideEffects side_effects) : HInstruction(side_effects) {}
+ explicit HTemplateInstruction<0>(SideEffects side_effects, uint32_t dex_pc = kNoDexPc)
+ : HInstruction(side_effects, dex_pc) {}
+
virtual ~HTemplateInstruction() {}
size_t InputCount() const OVERRIDE { return 0; }
@@ -2095,8 +2103,8 @@ class HTemplateInstruction<0>: public HInstruction {
template<intptr_t N>
class HExpression : public HTemplateInstruction<N> {
public:
- HExpression<N>(Primitive::Type type, SideEffects side_effects)
- : HTemplateInstruction<N>(side_effects), type_(type) {}
+ HExpression<N>(Primitive::Type type, SideEffects side_effects, uint32_t dex_pc = kNoDexPc)
+ : HTemplateInstruction<N>(side_effects, dex_pc), type_(type) {}
virtual ~HExpression() {}
Primitive::Type GetType() const OVERRIDE { return type_; }
@@ -2109,7 +2117,8 @@ class HExpression : public HTemplateInstruction<N> {
// instruction that branches to the exit block.
class HReturnVoid : public HTemplateInstruction<0> {
public:
- HReturnVoid() : HTemplateInstruction(SideEffects::None()) {}
+ explicit HReturnVoid(uint32_t dex_pc = kNoDexPc)
+ : HTemplateInstruction(SideEffects::None(), dex_pc) {}
bool IsControlFlow() const OVERRIDE { return true; }
@@ -2123,7 +2132,8 @@ class HReturnVoid : public HTemplateInstruction<0> {
// instruction that branches to the exit block.
class HReturn : public HTemplateInstruction<1> {
public:
- explicit HReturn(HInstruction* value) : HTemplateInstruction(SideEffects::None()) {
+ explicit HReturn(HInstruction* value, uint32_t dex_pc = kNoDexPc)
+ : HTemplateInstruction(SideEffects::None(), dex_pc) {
SetRawInputAt(0, value);
}
@@ -2140,7 +2150,7 @@ class HReturn : public HTemplateInstruction<1> {
// exit block.
class HExit : public HTemplateInstruction<0> {
public:
- HExit() : HTemplateInstruction(SideEffects::None()) {}
+ explicit HExit(uint32_t dex_pc = kNoDexPc) : HTemplateInstruction(SideEffects::None(), dex_pc) {}
bool IsControlFlow() const OVERRIDE { return true; }
@@ -2153,7 +2163,7 @@ class HExit : public HTemplateInstruction<0> {
// Jumps from one block to another.
class HGoto : public HTemplateInstruction<0> {
public:
- HGoto() : HTemplateInstruction(SideEffects::None()) {}
+ explicit HGoto(uint32_t dex_pc = kNoDexPc) : HTemplateInstruction(SideEffects::None(), dex_pc) {}
bool IsControlFlow() const OVERRIDE { return true; }
@@ -2169,7 +2179,8 @@ class HGoto : public HTemplateInstruction<0> {
class HConstant : public HExpression<0> {
public:
- explicit HConstant(Primitive::Type type) : HExpression(type, SideEffects::None()) {}
+ explicit HConstant(Primitive::Type type, uint32_t dex_pc = kNoDexPc)
+ : HExpression(type, SideEffects::None(), dex_pc) {}
bool CanBeMoved() const OVERRIDE { return true; }
@@ -2194,7 +2205,7 @@ class HNullConstant : public HConstant {
DECLARE_INSTRUCTION(NullConstant);
private:
- HNullConstant() : HConstant(Primitive::kPrimNot) {}
+ explicit HNullConstant(uint32_t dex_pc = kNoDexPc) : HConstant(Primitive::kPrimNot, dex_pc) {}
friend class HGraph;
DISALLOW_COPY_AND_ASSIGN(HNullConstant);
@@ -2220,8 +2231,10 @@ class HIntConstant : public HConstant {
DECLARE_INSTRUCTION(IntConstant);
private:
- explicit HIntConstant(int32_t value) : HConstant(Primitive::kPrimInt), value_(value) {}
- explicit HIntConstant(bool value) : HConstant(Primitive::kPrimInt), value_(value ? 1 : 0) {}
+ explicit HIntConstant(int32_t value, uint32_t dex_pc = kNoDexPc)
+ : HConstant(Primitive::kPrimInt, dex_pc), value_(value) {}
+ explicit HIntConstant(bool value, uint32_t dex_pc = kNoDexPc)
+ : HConstant(Primitive::kPrimInt, dex_pc), value_(value ? 1 : 0) {}
const int32_t value_;
@@ -2249,7 +2262,8 @@ class HLongConstant : public HConstant {
DECLARE_INSTRUCTION(LongConstant);
private:
- explicit HLongConstant(int64_t value) : HConstant(Primitive::kPrimLong), value_(value) {}
+ explicit HLongConstant(int64_t value, uint32_t dex_pc = kNoDexPc)
+ : HConstant(Primitive::kPrimLong, dex_pc), value_(value) {}
const int64_t value_;
@@ -2261,7 +2275,8 @@ class HLongConstant : public HConstant {
// two successors.
class HIf : public HTemplateInstruction<1> {
public:
- explicit HIf(HInstruction* input) : HTemplateInstruction(SideEffects::None()) {
+ explicit HIf(HInstruction* input, uint32_t dex_pc = kNoDexPc)
+ : HTemplateInstruction(SideEffects::None(), dex_pc) {
SetRawInputAt(0, input);
}
@@ -2294,8 +2309,8 @@ class HTryBoundary : public HTemplateInstruction<0> {
kExit,
};
- explicit HTryBoundary(BoundaryKind kind)
- : HTemplateInstruction(SideEffects::None()), kind_(kind) {}
+ explicit HTryBoundary(BoundaryKind kind, uint32_t dex_pc = kNoDexPc)
+ : HTemplateInstruction(SideEffects::None(), dex_pc), kind_(kind) {}
bool IsControlFlow() const OVERRIDE { return true; }
@@ -2352,21 +2367,17 @@ class HExceptionHandlerIterator : public ValueObject {
// Deoptimize to interpreter, upon checking a condition.
class HDeoptimize : public HTemplateInstruction<1> {
public:
- HDeoptimize(HInstruction* cond, uint32_t dex_pc)
- : HTemplateInstruction(SideEffects::None()),
- dex_pc_(dex_pc) {
+ explicit HDeoptimize(HInstruction* cond, uint32_t dex_pc)
+ : HTemplateInstruction(SideEffects::None(), dex_pc) {
SetRawInputAt(0, cond);
}
bool NeedsEnvironment() const OVERRIDE { return true; }
bool CanThrow() const OVERRIDE { return true; }
- uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
DECLARE_INSTRUCTION(Deoptimize);
private:
- uint32_t dex_pc_;
-
DISALLOW_COPY_AND_ASSIGN(HDeoptimize);
};
@@ -2375,7 +2386,8 @@ class HDeoptimize : public HTemplateInstruction<1> {
// instructions that work with the dex cache.
class HCurrentMethod : public HExpression<0> {
public:
- explicit HCurrentMethod(Primitive::Type type) : HExpression(type, SideEffects::None()) {}
+ explicit HCurrentMethod(Primitive::Type type, uint32_t dex_pc = kNoDexPc)
+ : HExpression(type, SideEffects::None(), dex_pc) {}
DECLARE_INSTRUCTION(CurrentMethod);
@@ -2385,8 +2397,8 @@ class HCurrentMethod : public HExpression<0> {
class HUnaryOperation : public HExpression<1> {
public:
- HUnaryOperation(Primitive::Type result_type, HInstruction* input)
- : HExpression(result_type, SideEffects::None()) {
+ HUnaryOperation(Primitive::Type result_type, HInstruction* input, uint32_t dex_pc = kNoDexPc)
+ : HExpression(result_type, SideEffects::None(), dex_pc) {
SetRawInputAt(0, input);
}
@@ -2419,8 +2431,9 @@ class HBinaryOperation : public HExpression<2> {
HBinaryOperation(Primitive::Type result_type,
HInstruction* left,
HInstruction* right,
- SideEffects side_effects = SideEffects::None())
- : HExpression(result_type, side_effects) {
+ SideEffects side_effects = SideEffects::None(),
+ uint32_t dex_pc = kNoDexPc)
+ : HExpression(result_type, side_effects, dex_pc) {
SetRawInputAt(0, left);
SetRawInputAt(1, right);
}
@@ -2512,8 +2525,8 @@ enum class ComparisonBias {
class HCondition : public HBinaryOperation {
public:
- HCondition(HInstruction* first, HInstruction* second)
- : HBinaryOperation(Primitive::kPrimBoolean, first, second),
+ HCondition(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
+ : HBinaryOperation(Primitive::kPrimBoolean, first, second, SideEffects::None(), dex_pc),
needs_materialization_(true),
bias_(ComparisonBias::kNoBias) {}
@@ -2564,18 +2577,20 @@ class HCondition : public HBinaryOperation {
// Instruction to check if two inputs are equal to each other.
class HEqual : public HCondition {
public:
- HEqual(HInstruction* first, HInstruction* second)
- : HCondition(first, second) {}
+ HEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
+ : HCondition(first, second, dex_pc) {}
bool IsCommutative() const OVERRIDE { return true; }
template <typename T> bool Compute(T x, T y) const { return x == y; }
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
DECLARE_INSTRUCTION(Equal);
@@ -2594,18 +2609,20 @@ class HEqual : public HCondition {
class HNotEqual : public HCondition {
public:
- HNotEqual(HInstruction* first, HInstruction* second)
- : HCondition(first, second) {}
+ HNotEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
+ : HCondition(first, second, dex_pc) {}
bool IsCommutative() const OVERRIDE { return true; }
template <typename T> bool Compute(T x, T y) const { return x != y; }
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
DECLARE_INSTRUCTION(NotEqual);
@@ -2624,16 +2641,18 @@ class HNotEqual : public HCondition {
class HLessThan : public HCondition {
public:
- HLessThan(HInstruction* first, HInstruction* second)
- : HCondition(first, second) {}
+ HLessThan(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
+ : HCondition(first, second, dex_pc) {}
template <typename T> bool Compute(T x, T y) const { return x < y; }
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
DECLARE_INSTRUCTION(LessThan);
@@ -2652,16 +2671,18 @@ class HLessThan : public HCondition {
class HLessThanOrEqual : public HCondition {
public:
- HLessThanOrEqual(HInstruction* first, HInstruction* second)
- : HCondition(first, second) {}
+ HLessThanOrEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
+ : HCondition(first, second, dex_pc) {}
template <typename T> bool Compute(T x, T y) const { return x <= y; }
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
DECLARE_INSTRUCTION(LessThanOrEqual);
@@ -2680,16 +2701,18 @@ class HLessThanOrEqual : public HCondition {
class HGreaterThan : public HCondition {
public:
- HGreaterThan(HInstruction* first, HInstruction* second)
- : HCondition(first, second) {}
+ HGreaterThan(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
+ : HCondition(first, second, dex_pc) {}
template <typename T> bool Compute(T x, T y) const { return x > y; }
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
DECLARE_INSTRUCTION(GreaterThan);
@@ -2708,16 +2731,18 @@ class HGreaterThan : public HCondition {
class HGreaterThanOrEqual : public HCondition {
public:
- HGreaterThanOrEqual(HInstruction* first, HInstruction* second)
- : HCondition(first, second) {}
+ HGreaterThanOrEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
+ : HCondition(first, second, dex_pc) {}
template <typename T> bool Compute(T x, T y) const { return x >= y; }
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
DECLARE_INSTRUCTION(GreaterThanOrEqual);
@@ -2744,9 +2769,12 @@ class HCompare : public HBinaryOperation {
HInstruction* second,
ComparisonBias bias,
uint32_t dex_pc)
- : HBinaryOperation(Primitive::kPrimInt, first, second, SideEffectsForArchRuntimeCalls(type)),
- bias_(bias),
- dex_pc_(dex_pc) {
+ : HBinaryOperation(Primitive::kPrimInt,
+ first,
+ second,
+ SideEffectsForArchRuntimeCalls(type),
+ dex_pc),
+ bias_(bias) {
DCHECK_EQ(type, first->GetType());
DCHECK_EQ(type, second->GetType());
}
@@ -2755,10 +2783,12 @@ class HCompare : public HBinaryOperation {
int32_t Compute(T x, T y) const { return x == y ? 0 : x > y ? 1 : -1; }
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
@@ -2769,7 +2799,6 @@ class HCompare : public HBinaryOperation {
bool IsGtBias() { return bias_ == ComparisonBias::kGtBias; }
- uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
static SideEffects SideEffectsForArchRuntimeCalls(Primitive::Type type) {
// MIPS64 uses a runtime call for FP comparisons.
@@ -2780,7 +2809,6 @@ class HCompare : public HBinaryOperation {
private:
const ComparisonBias bias_;
- const uint32_t dex_pc_;
DISALLOW_COPY_AND_ASSIGN(HCompare);
};
@@ -2789,7 +2817,7 @@ class HCompare : public HBinaryOperation {
class HLocal : public HTemplateInstruction<0> {
public:
explicit HLocal(uint16_t reg_number)
- : HTemplateInstruction(SideEffects::None()), reg_number_(reg_number) {}
+ : HTemplateInstruction(SideEffects::None(), kNoDexPc), reg_number_(reg_number) {}
DECLARE_INSTRUCTION(Local);
@@ -2805,8 +2833,8 @@ class HLocal : public HTemplateInstruction<0> {
// Load a given local. The local is an input of this instruction.
class HLoadLocal : public HExpression<1> {
public:
- HLoadLocal(HLocal* local, Primitive::Type type)
- : HExpression(type, SideEffects::None()) {
+ HLoadLocal(HLocal* local, Primitive::Type type, uint32_t dex_pc = kNoDexPc)
+ : HExpression(type, SideEffects::None(), dex_pc) {
SetRawInputAt(0, local);
}
@@ -2822,7 +2850,8 @@ class HLoadLocal : public HExpression<1> {
// and the local.
class HStoreLocal : public HTemplateInstruction<2> {
public:
- HStoreLocal(HLocal* local, HInstruction* value) : HTemplateInstruction(SideEffects::None()) {
+ HStoreLocal(HLocal* local, HInstruction* value, uint32_t dex_pc = kNoDexPc)
+ : HTemplateInstruction(SideEffects::None(), dex_pc) {
SetRawInputAt(0, local);
SetRawInputAt(1, value);
}
@@ -2863,9 +2892,10 @@ class HFloatConstant : public HConstant {
DECLARE_INSTRUCTION(FloatConstant);
private:
- explicit HFloatConstant(float value) : HConstant(Primitive::kPrimFloat), value_(value) {}
- explicit HFloatConstant(int32_t value)
- : HConstant(Primitive::kPrimFloat), value_(bit_cast<float, int32_t>(value)) {}
+ explicit HFloatConstant(float value, uint32_t dex_pc = kNoDexPc)
+ : HConstant(Primitive::kPrimFloat, dex_pc), value_(value) {}
+ explicit HFloatConstant(int32_t value, uint32_t dex_pc = kNoDexPc)
+ : HConstant(Primitive::kPrimFloat, dex_pc), value_(bit_cast<float, int32_t>(value)) {}
const float value_;
@@ -2903,9 +2933,10 @@ class HDoubleConstant : public HConstant {
DECLARE_INSTRUCTION(DoubleConstant);
private:
- explicit HDoubleConstant(double value) : HConstant(Primitive::kPrimDouble), value_(value) {}
- explicit HDoubleConstant(int64_t value)
- : HConstant(Primitive::kPrimDouble), value_(bit_cast<double, int64_t>(value)) {}
+ explicit HDoubleConstant(double value, uint32_t dex_pc = kNoDexPc)
+ : HConstant(Primitive::kPrimDouble, dex_pc), value_(value) {}
+ explicit HDoubleConstant(int64_t value, uint32_t dex_pc = kNoDexPc)
+ : HConstant(Primitive::kPrimDouble, dex_pc), value_(bit_cast<double, int64_t>(value)) {}
const double value_;
@@ -2952,7 +2983,6 @@ class HInvoke : public HInstruction {
Primitive::Type GetType() const OVERRIDE { return return_type_; }
- uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
uint32_t GetDexMethodIndex() const { return dex_method_index_; }
const DexFile& GetDexFile() const { return GetEnvironment()->GetDexFile(); }
@@ -2985,11 +3015,10 @@ class HInvoke : public HInstruction {
uint32_t dex_method_index,
InvokeType original_invoke_type)
: HInstruction(
- SideEffects::AllExceptGCDependency()), // Assume write/read on all fields/arrays.
+ SideEffects::AllExceptGCDependency(), dex_pc), // Assume write/read on all fields/arrays.
number_of_arguments_(number_of_arguments),
inputs_(arena, number_of_arguments),
return_type_(return_type),
- dex_pc_(dex_pc),
dex_method_index_(dex_method_index),
original_invoke_type_(original_invoke_type),
intrinsic_(Intrinsics::kNone),
@@ -3006,7 +3035,6 @@ class HInvoke : public HInstruction {
uint32_t number_of_arguments_;
GrowableArray<HUserRecord<HInstruction*> > inputs_;
const Primitive::Type return_type_;
- const uint32_t dex_pc_;
const uint32_t dex_method_index_;
const InvokeType original_invoke_type_;
Intrinsics intrinsic_;
@@ -3307,15 +3335,13 @@ class HNewInstance : public HExpression<1> {
uint16_t type_index,
const DexFile& dex_file,
QuickEntrypointEnum entrypoint)
- : HExpression(Primitive::kPrimNot, SideEffects::CanTriggerGC()),
- dex_pc_(dex_pc),
+ : HExpression(Primitive::kPrimNot, SideEffects::CanTriggerGC(), dex_pc),
type_index_(type_index),
dex_file_(dex_file),
entrypoint_(entrypoint) {
SetRawInputAt(0, current_method);
}
- uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
uint16_t GetTypeIndex() const { return type_index_; }
const DexFile& GetDexFile() const { return dex_file_; }
@@ -3334,7 +3360,6 @@ class HNewInstance : public HExpression<1> {
DECLARE_INSTRUCTION(NewInstance);
private:
- const uint32_t dex_pc_;
const uint16_t type_index_;
const DexFile& dex_file_;
const QuickEntrypointEnum entrypoint_;
@@ -3344,16 +3369,16 @@ class HNewInstance : public HExpression<1> {
class HNeg : public HUnaryOperation {
public:
- HNeg(Primitive::Type result_type, HInstruction* input)
- : HUnaryOperation(result_type, input) {}
+ HNeg(Primitive::Type result_type, HInstruction* input, uint32_t dex_pc = kNoDexPc)
+ : HUnaryOperation(result_type, input, dex_pc) {}
template <typename T> T Compute(T x) const { return -x; }
HConstant* Evaluate(HIntConstant* x) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue()));
+ return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue()), GetDexPc());
}
HConstant* Evaluate(HLongConstant* x) const OVERRIDE {
- return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue()));
+ return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue()), GetDexPc());
}
DECLARE_INSTRUCTION(Neg);
@@ -3370,8 +3395,7 @@ class HNewArray : public HExpression<2> {
uint16_t type_index,
const DexFile& dex_file,
QuickEntrypointEnum entrypoint)
- : HExpression(Primitive::kPrimNot, SideEffects::CanTriggerGC()),
- dex_pc_(dex_pc),
+ : HExpression(Primitive::kPrimNot, SideEffects::CanTriggerGC(), dex_pc),
type_index_(type_index),
dex_file_(dex_file),
entrypoint_(entrypoint) {
@@ -3379,7 +3403,6 @@ class HNewArray : public HExpression<2> {
SetRawInputAt(1, current_method);
}
- uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
uint16_t GetTypeIndex() const { return type_index_; }
const DexFile& GetDexFile() const { return dex_file_; }
@@ -3396,7 +3419,6 @@ class HNewArray : public HExpression<2> {
DECLARE_INSTRUCTION(NewArray);
private:
- const uint32_t dex_pc_;
const uint16_t type_index_;
const DexFile& dex_file_;
const QuickEntrypointEnum entrypoint_;
@@ -3406,18 +3428,23 @@ class HNewArray : public HExpression<2> {
class HAdd : public HBinaryOperation {
public:
- HAdd(Primitive::Type result_type, HInstruction* left, HInstruction* right)
- : HBinaryOperation(result_type, left, right) {}
+ HAdd(Primitive::Type result_type,
+ HInstruction* left,
+ HInstruction* right,
+ uint32_t dex_pc = kNoDexPc)
+ : HBinaryOperation(result_type, left, right, SideEffects::None(), dex_pc) {}
bool IsCommutative() const OVERRIDE { return true; }
template <typename T> T Compute(T x, T y) const { return x + y; }
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue()));
+ return GetBlock()->GetGraph()->GetLongConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
DECLARE_INSTRUCTION(Add);
@@ -3428,16 +3455,21 @@ class HAdd : public HBinaryOperation {
class HSub : public HBinaryOperation {
public:
- HSub(Primitive::Type result_type, HInstruction* left, HInstruction* right)
- : HBinaryOperation(result_type, left, right) {}
+ HSub(Primitive::Type result_type,
+ HInstruction* left,
+ HInstruction* right,
+ uint32_t dex_pc = kNoDexPc)
+ : HBinaryOperation(result_type, left, right, SideEffects::None(), dex_pc) {}
template <typename T> T Compute(T x, T y) const { return x - y; }
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue()));
+ return GetBlock()->GetGraph()->GetLongConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
DECLARE_INSTRUCTION(Sub);
@@ -3448,18 +3480,23 @@ class HSub : public HBinaryOperation {
class HMul : public HBinaryOperation {
public:
- HMul(Primitive::Type result_type, HInstruction* left, HInstruction* right)
- : HBinaryOperation(result_type, left, right) {}
+ HMul(Primitive::Type result_type,
+ HInstruction* left,
+ HInstruction* right,
+ uint32_t dex_pc = kNoDexPc)
+ : HBinaryOperation(result_type, left, right, SideEffects::None(), dex_pc) {}
bool IsCommutative() const OVERRIDE { return true; }
template <typename T> T Compute(T x, T y) const { return x * y; }
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue()));
+ return GetBlock()->GetGraph()->GetLongConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
DECLARE_INSTRUCTION(Mul);
@@ -3470,9 +3507,11 @@ class HMul : public HBinaryOperation {
class HDiv : public HBinaryOperation {
public:
- HDiv(Primitive::Type result_type, HInstruction* left, HInstruction* right, uint32_t dex_pc)
- : HBinaryOperation(result_type, left, right, SideEffectsForArchRuntimeCalls()),
- dex_pc_(dex_pc) {}
+ HDiv(Primitive::Type result_type,
+ HInstruction* left,
+ HInstruction* right,
+ uint32_t dex_pc)
+ : HBinaryOperation(result_type, left, right, SideEffectsForArchRuntimeCalls(), dex_pc) {}
template <typename T>
T Compute(T x, T y) const {
@@ -3484,14 +3523,14 @@ class HDiv : public HBinaryOperation {
}
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue()));
+ return GetBlock()->GetGraph()->GetLongConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
- uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
-
static SideEffects SideEffectsForArchRuntimeCalls() {
// The generated code can use a runtime call.
return SideEffects::CanTriggerGC();
@@ -3500,16 +3539,16 @@ class HDiv : public HBinaryOperation {
DECLARE_INSTRUCTION(Div);
private:
- const uint32_t dex_pc_;
-
DISALLOW_COPY_AND_ASSIGN(HDiv);
};
class HRem : public HBinaryOperation {
public:
- HRem(Primitive::Type result_type, HInstruction* left, HInstruction* right, uint32_t dex_pc)
- : HBinaryOperation(result_type, left, right, SideEffectsForArchRuntimeCalls()),
- dex_pc_(dex_pc) {}
+ HRem(Primitive::Type result_type,
+ HInstruction* left,
+ HInstruction* right,
+ uint32_t dex_pc)
+ : HBinaryOperation(result_type, left, right, SideEffectsForArchRuntimeCalls(), dex_pc) {}
template <typename T>
T Compute(T x, T y) const {
@@ -3521,13 +3560,14 @@ class HRem : public HBinaryOperation {
}
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue()));
+ return GetBlock()->GetGraph()->GetLongConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
- uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
static SideEffects SideEffectsForArchRuntimeCalls() {
return SideEffects::CanTriggerGC();
@@ -3536,15 +3576,13 @@ class HRem : public HBinaryOperation {
DECLARE_INSTRUCTION(Rem);
private:
- const uint32_t dex_pc_;
-
DISALLOW_COPY_AND_ASSIGN(HRem);
};
class HDivZeroCheck : public HExpression<1> {
public:
HDivZeroCheck(HInstruction* value, uint32_t dex_pc)
- : HExpression(value->GetType(), SideEffects::None()), dex_pc_(dex_pc) {
+ : HExpression(value->GetType(), SideEffects::None(), dex_pc) {
SetRawInputAt(0, value);
}
@@ -3560,20 +3598,19 @@ class HDivZeroCheck : public HExpression<1> {
bool NeedsEnvironment() const OVERRIDE { return true; }
bool CanThrow() const OVERRIDE { return true; }
- uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
-
DECLARE_INSTRUCTION(DivZeroCheck);
private:
- const uint32_t dex_pc_;
-
DISALLOW_COPY_AND_ASSIGN(HDivZeroCheck);
};
class HShl : public HBinaryOperation {
public:
- HShl(Primitive::Type result_type, HInstruction* left, HInstruction* right)
- : HBinaryOperation(result_type, left, right) {}
+ HShl(Primitive::Type result_type,
+ HInstruction* left,
+ HInstruction* right,
+ uint32_t dex_pc = kNoDexPc)
+ : HBinaryOperation(result_type, left, right, SideEffects::None(), dex_pc) {}
template <typename T, typename U, typename V>
T Compute(T x, U y, V max_shift_value) const {
@@ -3584,17 +3621,17 @@ class HShl : public HBinaryOperation {
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
return GetBlock()->GetGraph()->GetIntConstant(
- Compute(x->GetValue(), y->GetValue(), kMaxIntShiftValue));
+ Compute(x->GetValue(), y->GetValue(), kMaxIntShiftValue), GetDexPc());
}
// There is no `Evaluate(HIntConstant* x, HLongConstant* y)`, as this
// case is handled as `x << static_cast<int>(y)`.
HConstant* Evaluate(HLongConstant* x, HIntConstant* y) const OVERRIDE {
return GetBlock()->GetGraph()->GetLongConstant(
- Compute(x->GetValue(), y->GetValue(), kMaxLongShiftValue));
+ Compute(x->GetValue(), y->GetValue(), kMaxLongShiftValue), GetDexPc());
}
HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
return GetBlock()->GetGraph()->GetLongConstant(
- Compute(x->GetValue(), y->GetValue(), kMaxLongShiftValue));
+ Compute(x->GetValue(), y->GetValue(), kMaxLongShiftValue), GetDexPc());
}
DECLARE_INSTRUCTION(Shl);
@@ -3605,8 +3642,11 @@ class HShl : public HBinaryOperation {
class HShr : public HBinaryOperation {
public:
- HShr(Primitive::Type result_type, HInstruction* left, HInstruction* right)
- : HBinaryOperation(result_type, left, right) {}
+ HShr(Primitive::Type result_type,
+ HInstruction* left,
+ HInstruction* right,
+ uint32_t dex_pc = kNoDexPc)
+ : HBinaryOperation(result_type, left, right, SideEffects::None(), dex_pc) {}
template <typename T, typename U, typename V>
T Compute(T x, U y, V max_shift_value) const {
@@ -3617,17 +3657,17 @@ class HShr : public HBinaryOperation {
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
return GetBlock()->GetGraph()->GetIntConstant(
- Compute(x->GetValue(), y->GetValue(), kMaxIntShiftValue));
+ Compute(x->GetValue(), y->GetValue(), kMaxIntShiftValue), GetDexPc());
}
// There is no `Evaluate(HIntConstant* x, HLongConstant* y)`, as this
// case is handled as `x >> static_cast<int>(y)`.
HConstant* Evaluate(HLongConstant* x, HIntConstant* y) const OVERRIDE {
return GetBlock()->GetGraph()->GetLongConstant(
- Compute(x->GetValue(), y->GetValue(), kMaxLongShiftValue));
+ Compute(x->GetValue(), y->GetValue(), kMaxLongShiftValue), GetDexPc());
}
HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
return GetBlock()->GetGraph()->GetLongConstant(
- Compute(x->GetValue(), y->GetValue(), kMaxLongShiftValue));
+ Compute(x->GetValue(), y->GetValue(), kMaxLongShiftValue), GetDexPc());
}
DECLARE_INSTRUCTION(Shr);
@@ -3638,8 +3678,11 @@ class HShr : public HBinaryOperation {
class HUShr : public HBinaryOperation {
public:
- HUShr(Primitive::Type result_type, HInstruction* left, HInstruction* right)
- : HBinaryOperation(result_type, left, right) {}
+ HUShr(Primitive::Type result_type,
+ HInstruction* left,
+ HInstruction* right,
+ uint32_t dex_pc = kNoDexPc)
+ : HBinaryOperation(result_type, left, right, SideEffects::None(), dex_pc) {}
template <typename T, typename U, typename V>
T Compute(T x, U y, V max_shift_value) const {
@@ -3651,17 +3694,17 @@ class HUShr : public HBinaryOperation {
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
return GetBlock()->GetGraph()->GetIntConstant(
- Compute(x->GetValue(), y->GetValue(), kMaxIntShiftValue));
+ Compute(x->GetValue(), y->GetValue(), kMaxIntShiftValue), GetDexPc());
}
// There is no `Evaluate(HIntConstant* x, HLongConstant* y)`, as this
// case is handled as `x >>> static_cast<int>(y)`.
HConstant* Evaluate(HLongConstant* x, HIntConstant* y) const OVERRIDE {
return GetBlock()->GetGraph()->GetLongConstant(
- Compute(x->GetValue(), y->GetValue(), kMaxLongShiftValue));
+ Compute(x->GetValue(), y->GetValue(), kMaxLongShiftValue), GetDexPc());
}
HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
return GetBlock()->GetGraph()->GetLongConstant(
- Compute(x->GetValue(), y->GetValue(), kMaxLongShiftValue));
+ Compute(x->GetValue(), y->GetValue(), kMaxLongShiftValue), GetDexPc());
}
DECLARE_INSTRUCTION(UShr);
@@ -3672,8 +3715,11 @@ class HUShr : public HBinaryOperation {
class HAnd : public HBinaryOperation {
public:
- HAnd(Primitive::Type result_type, HInstruction* left, HInstruction* right)
- : HBinaryOperation(result_type, left, right) {}
+ HAnd(Primitive::Type result_type,
+ HInstruction* left,
+ HInstruction* right,
+ uint32_t dex_pc = kNoDexPc)
+ : HBinaryOperation(result_type, left, right, SideEffects::None(), dex_pc) {}
bool IsCommutative() const OVERRIDE { return true; }
@@ -3681,16 +3727,20 @@ class HAnd : public HBinaryOperation {
auto Compute(T x, U y) const -> decltype(x & y) { return x & y; }
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HIntConstant* x, HLongConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue()));
+ return GetBlock()->GetGraph()->GetLongConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HLongConstant* x, HIntConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue()));
+ return GetBlock()->GetGraph()->GetLongConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue()));
+ return GetBlock()->GetGraph()->GetLongConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
DECLARE_INSTRUCTION(And);
@@ -3701,8 +3751,11 @@ class HAnd : public HBinaryOperation {
class HOr : public HBinaryOperation {
public:
- HOr(Primitive::Type result_type, HInstruction* left, HInstruction* right)
- : HBinaryOperation(result_type, left, right) {}
+ HOr(Primitive::Type result_type,
+ HInstruction* left,
+ HInstruction* right,
+ uint32_t dex_pc = kNoDexPc)
+ : HBinaryOperation(result_type, left, right, SideEffects::None(), dex_pc) {}
bool IsCommutative() const OVERRIDE { return true; }
@@ -3710,16 +3763,20 @@ class HOr : public HBinaryOperation {
auto Compute(T x, U y) const -> decltype(x | y) { return x | y; }
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HIntConstant* x, HLongConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue()));
+ return GetBlock()->GetGraph()->GetLongConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HLongConstant* x, HIntConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue()));
+ return GetBlock()->GetGraph()->GetLongConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue()));
+ return GetBlock()->GetGraph()->GetLongConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
DECLARE_INSTRUCTION(Or);
@@ -3730,8 +3787,11 @@ class HOr : public HBinaryOperation {
class HXor : public HBinaryOperation {
public:
- HXor(Primitive::Type result_type, HInstruction* left, HInstruction* right)
- : HBinaryOperation(result_type, left, right) {}
+ HXor(Primitive::Type result_type,
+ HInstruction* left,
+ HInstruction* right,
+ uint32_t dex_pc = kNoDexPc)
+ : HBinaryOperation(result_type, left, right, SideEffects::None(), dex_pc) {}
bool IsCommutative() const OVERRIDE { return true; }
@@ -3739,16 +3799,20 @@ class HXor : public HBinaryOperation {
auto Compute(T x, U y) const -> decltype(x ^ y) { return x ^ y; }
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue()));
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HIntConstant* x, HLongConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue()));
+ return GetBlock()->GetGraph()->GetLongConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HLongConstant* x, HIntConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue()));
+ return GetBlock()->GetGraph()->GetLongConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue()));
+ return GetBlock()->GetGraph()->GetLongConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
DECLARE_INSTRUCTION(Xor);
@@ -3761,8 +3825,10 @@ class HXor : public HBinaryOperation {
// the calling convention.
class HParameterValue : public HExpression<0> {
public:
- HParameterValue(uint8_t index, Primitive::Type parameter_type, bool is_this = false)
- : HExpression(parameter_type, SideEffects::None()),
+ HParameterValue(uint8_t index,
+ Primitive::Type parameter_type,
+ bool is_this = false)
+ : HExpression(parameter_type, SideEffects::None(), kNoDexPc),
index_(index),
is_this_(is_this),
can_be_null_(!is_this) {}
@@ -3791,8 +3857,8 @@ class HParameterValue : public HExpression<0> {
class HNot : public HUnaryOperation {
public:
- HNot(Primitive::Type result_type, HInstruction* input)
- : HUnaryOperation(result_type, input) {}
+ HNot(Primitive::Type result_type, HInstruction* input, uint32_t dex_pc = kNoDexPc)
+ : HUnaryOperation(result_type, input, dex_pc) {}
bool CanBeMoved() const OVERRIDE { return true; }
bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
@@ -3803,10 +3869,10 @@ class HNot : public HUnaryOperation {
template <typename T> T Compute(T x) const { return ~x; }
HConstant* Evaluate(HIntConstant* x) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue()));
+ return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue()), GetDexPc());
}
HConstant* Evaluate(HLongConstant* x) const OVERRIDE {
- return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue()));
+ return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue()), GetDexPc());
}
DECLARE_INSTRUCTION(Not);
@@ -3817,8 +3883,8 @@ class HNot : public HUnaryOperation {
class HBooleanNot : public HUnaryOperation {
public:
- explicit HBooleanNot(HInstruction* input)
- : HUnaryOperation(Primitive::Type::kPrimBoolean, input) {}
+ explicit HBooleanNot(HInstruction* input, uint32_t dex_pc = kNoDexPc)
+ : HUnaryOperation(Primitive::Type::kPrimBoolean, input, dex_pc) {}
bool CanBeMoved() const OVERRIDE { return true; }
bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
@@ -3832,7 +3898,7 @@ class HBooleanNot : public HUnaryOperation {
}
HConstant* Evaluate(HIntConstant* x) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue()));
+ return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue()), GetDexPc());
}
HConstant* Evaluate(HLongConstant* x ATTRIBUTE_UNUSED) const OVERRIDE {
LOG(FATAL) << DebugName() << " is not defined for long values";
@@ -3849,8 +3915,9 @@ class HTypeConversion : public HExpression<1> {
public:
// Instantiate a type conversion of `input` to `result_type`.
HTypeConversion(Primitive::Type result_type, HInstruction* input, uint32_t dex_pc)
- : HExpression(result_type, SideEffectsForArchRuntimeCalls(input->GetType(), result_type)),
- dex_pc_(dex_pc) {
+ : HExpression(result_type,
+ SideEffectsForArchRuntimeCalls(input->GetType(), result_type),
+ dex_pc) {
SetRawInputAt(0, input);
DCHECK_NE(input->GetType(), result_type);
}
@@ -3861,7 +3928,6 @@ class HTypeConversion : public HExpression<1> {
// Required by the x86 and ARM code generators when producing calls
// to the runtime.
- uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
bool CanBeMoved() const OVERRIDE { return true; }
bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { return true; }
@@ -3885,8 +3951,6 @@ class HTypeConversion : public HExpression<1> {
DECLARE_INSTRUCTION(TypeConversion);
private:
- const uint32_t dex_pc_;
-
DISALLOW_COPY_AND_ASSIGN(HTypeConversion);
};
@@ -3894,8 +3958,12 @@ static constexpr uint32_t kNoRegNumber = -1;
class HPhi : public HInstruction {
public:
- HPhi(ArenaAllocator* arena, uint32_t reg_number, size_t number_of_inputs, Primitive::Type type)
- : HInstruction(SideEffects::None()),
+ HPhi(ArenaAllocator* arena,
+ uint32_t reg_number,
+ size_t number_of_inputs,
+ Primitive::Type type,
+ uint32_t dex_pc = kNoDexPc)
+ : HInstruction(SideEffects::None(), dex_pc),
inputs_(arena, number_of_inputs),
reg_number_(reg_number),
type_(type),
@@ -3973,7 +4041,7 @@ class HPhi : public HInstruction {
class HNullCheck : public HExpression<1> {
public:
HNullCheck(HInstruction* value, uint32_t dex_pc)
- : HExpression(value->GetType(), SideEffects::None()), dex_pc_(dex_pc) {
+ : HExpression(value->GetType(), SideEffects::None(), dex_pc) {
SetRawInputAt(0, value);
}
@@ -3989,13 +4057,10 @@ class HNullCheck : public HExpression<1> {
bool CanBeNull() const OVERRIDE { return false; }
- uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
DECLARE_INSTRUCTION(NullCheck);
private:
- const uint32_t dex_pc_;
-
DISALLOW_COPY_AND_ASSIGN(HNullCheck);
};
@@ -4038,10 +4103,11 @@ class HInstanceFieldGet : public HExpression<1> {
bool is_volatile,
uint32_t field_idx,
const DexFile& dex_file,
- Handle<mirror::DexCache> dex_cache)
+ Handle<mirror::DexCache> dex_cache,
+ uint32_t dex_pc = kNoDexPc)
: HExpression(
field_type,
- SideEffects::FieldReadOfType(field_type, is_volatile)),
+ SideEffects::FieldReadOfType(field_type, is_volatile), dex_pc),
field_info_(field_offset, field_type, is_volatile, field_idx, dex_file, dex_cache) {
SetRawInputAt(0, value);
}
@@ -4083,9 +4149,10 @@ class HInstanceFieldSet : public HTemplateInstruction<2> {
bool is_volatile,
uint32_t field_idx,
const DexFile& dex_file,
- Handle<mirror::DexCache> dex_cache)
+ Handle<mirror::DexCache> dex_cache,
+ uint32_t dex_pc = kNoDexPc)
: HTemplateInstruction(
- SideEffects::FieldWriteOfType(field_type, is_volatile)),
+ SideEffects::FieldWriteOfType(field_type, is_volatile), dex_pc),
field_info_(field_offset, field_type, is_volatile, field_idx, dex_file, dex_cache),
value_can_be_null_(true) {
SetRawInputAt(0, object);
@@ -4115,8 +4182,11 @@ class HInstanceFieldSet : public HTemplateInstruction<2> {
class HArrayGet : public HExpression<2> {
public:
- HArrayGet(HInstruction* array, HInstruction* index, Primitive::Type type)
- : HExpression(type, SideEffects::ArrayReadOfType(type)) {
+ HArrayGet(HInstruction* array,
+ HInstruction* index,
+ Primitive::Type type,
+ uint32_t dex_pc = kNoDexPc)
+ : HExpression(type, SideEffects::ArrayReadOfType(type), dex_pc) {
SetRawInputAt(0, array);
SetRawInputAt(1, index);
}
@@ -4156,8 +4226,7 @@ class HArraySet : public HTemplateInstruction<3> {
uint32_t dex_pc)
: HTemplateInstruction(
SideEffects::ArrayWriteOfType(expected_component_type).Union(
- SideEffectsForArchRuntimeCalls(value->GetType()))),
- dex_pc_(dex_pc),
+ SideEffectsForArchRuntimeCalls(value->GetType())), dex_pc),
expected_component_type_(expected_component_type),
needs_type_check_(value->GetType() == Primitive::kPrimNot),
value_can_be_null_(true) {
@@ -4192,8 +4261,6 @@ class HArraySet : public HTemplateInstruction<3> {
bool GetValueCanBeNull() const { return value_can_be_null_; }
bool NeedsTypeCheck() const { return needs_type_check_; }
- uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
-
HInstruction* GetArray() const { return InputAt(0); }
HInstruction* GetIndex() const { return InputAt(1); }
HInstruction* GetValue() const { return InputAt(2); }
@@ -4216,7 +4283,6 @@ class HArraySet : public HTemplateInstruction<3> {
DECLARE_INSTRUCTION(ArraySet);
private:
- const uint32_t dex_pc_;
const Primitive::Type expected_component_type_;
bool needs_type_check_;
bool value_can_be_null_;
@@ -4226,8 +4292,8 @@ class HArraySet : public HTemplateInstruction<3> {
class HArrayLength : public HExpression<1> {
public:
- explicit HArrayLength(HInstruction* array)
- : HExpression(Primitive::kPrimInt, SideEffects::None()) {
+ explicit HArrayLength(HInstruction* array, uint32_t dex_pc = kNoDexPc)
+ : HExpression(Primitive::kPrimInt, SideEffects::None(), dex_pc) {
// Note that arrays do not change length, so the instruction does not
// depend on any write.
SetRawInputAt(0, array);
@@ -4251,7 +4317,7 @@ class HArrayLength : public HExpression<1> {
class HBoundsCheck : public HExpression<2> {
public:
HBoundsCheck(HInstruction* index, HInstruction* length, uint32_t dex_pc)
- : HExpression(index->GetType(), SideEffects::None()), dex_pc_(dex_pc) {
+ : HExpression(index->GetType(), SideEffects::None(), dex_pc) {
DCHECK(index->GetType() == Primitive::kPrimInt);
SetRawInputAt(0, index);
SetRawInputAt(1, length);
@@ -4267,13 +4333,10 @@ class HBoundsCheck : public HExpression<2> {
bool CanThrow() const OVERRIDE { return true; }
- uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
DECLARE_INSTRUCTION(BoundsCheck);
private:
- const uint32_t dex_pc_;
-
DISALLOW_COPY_AND_ASSIGN(HBoundsCheck);
};
@@ -4286,7 +4349,8 @@ class HBoundsCheck : public HExpression<2> {
*/
class HTemporary : public HTemplateInstruction<0> {
public:
- explicit HTemporary(size_t index) : HTemplateInstruction(SideEffects::None()), index_(index) {}
+ explicit HTemporary(size_t index, uint32_t dex_pc = kNoDexPc)
+ : HTemplateInstruction(SideEffects::None(), dex_pc), index_(index) {}
size_t GetIndex() const { return index_; }
@@ -4300,28 +4364,24 @@ class HTemporary : public HTemplateInstruction<0> {
private:
const size_t index_;
-
DISALLOW_COPY_AND_ASSIGN(HTemporary);
};
class HSuspendCheck : public HTemplateInstruction<0> {
public:
explicit HSuspendCheck(uint32_t dex_pc)
- : HTemplateInstruction(SideEffects::CanTriggerGC()), dex_pc_(dex_pc), slow_path_(nullptr) {}
+ : HTemplateInstruction(SideEffects::CanTriggerGC(), dex_pc), slow_path_(nullptr) {}
bool NeedsEnvironment() const OVERRIDE {
return true;
}
- uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
void SetSlowPath(SlowPathCode* slow_path) { slow_path_ = slow_path; }
SlowPathCode* GetSlowPath() const { return slow_path_; }
DECLARE_INSTRUCTION(SuspendCheck);
private:
- const uint32_t dex_pc_;
-
// Only used for code generation, in order to share the same slow path between back edges
// of a same loop.
SlowPathCode* slow_path_;
@@ -4339,11 +4399,10 @@ class HLoadClass : public HExpression<1> {
const DexFile& dex_file,
bool is_referrers_class,
uint32_t dex_pc)
- : HExpression(Primitive::kPrimNot, SideEffectsForArchRuntimeCalls()),
+ : HExpression(Primitive::kPrimNot, SideEffectsForArchRuntimeCalls(), dex_pc),
type_index_(type_index),
dex_file_(dex_file),
is_referrers_class_(is_referrers_class),
- dex_pc_(dex_pc),
generate_clinit_check_(false),
loaded_class_rti_(ReferenceTypeInfo::CreateInvalid()) {
SetRawInputAt(0, current_method);
@@ -4357,7 +4416,6 @@ class HLoadClass : public HExpression<1> {
size_t ComputeHashCode() const OVERRIDE { return type_index_; }
- uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
uint16_t GetTypeIndex() const { return type_index_; }
bool IsReferrersClass() const { return is_referrers_class_; }
bool CanBeNull() const OVERRIDE { return false; }
@@ -4410,7 +4468,6 @@ class HLoadClass : public HExpression<1> {
const uint16_t type_index_;
const DexFile& dex_file_;
const bool is_referrers_class_;
- const uint32_t dex_pc_;
// Whether this instruction must generate the initialization check.
// Used for code generation.
bool generate_clinit_check_;
@@ -4423,9 +4480,8 @@ class HLoadClass : public HExpression<1> {
class HLoadString : public HExpression<1> {
public:
HLoadString(HCurrentMethod* current_method, uint32_t string_index, uint32_t dex_pc)
- : HExpression(Primitive::kPrimNot, SideEffectsForArchRuntimeCalls()),
- string_index_(string_index),
- dex_pc_(dex_pc) {
+ : HExpression(Primitive::kPrimNot, SideEffectsForArchRuntimeCalls(), dex_pc),
+ string_index_(string_index) {
SetRawInputAt(0, current_method);
}
@@ -4437,7 +4493,6 @@ class HLoadString : public HExpression<1> {
size_t ComputeHashCode() const OVERRIDE { return string_index_; }
- uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
uint32_t GetStringIndex() const { return string_index_; }
// TODO: Can we deopt or debug when we resolve a string?
@@ -4453,7 +4508,6 @@ class HLoadString : public HExpression<1> {
private:
const uint32_t string_index_;
- const uint32_t dex_pc_;
DISALLOW_COPY_AND_ASSIGN(HLoadString);
};
@@ -4466,8 +4520,8 @@ class HClinitCheck : public HExpression<1> {
HClinitCheck(HLoadClass* constant, uint32_t dex_pc)
: HExpression(
Primitive::kPrimNot,
- SideEffects::AllChanges()), // Assume write/read on all fields/arrays.
- dex_pc_(dex_pc) {
+ SideEffects::AllChanges(), // Assume write/read on all fields/arrays.
+ dex_pc) {
SetRawInputAt(0, constant);
}
@@ -4482,15 +4536,12 @@ class HClinitCheck : public HExpression<1> {
return true;
}
- uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
HLoadClass* GetLoadClass() const { return InputAt(0)->AsLoadClass(); }
DECLARE_INSTRUCTION(ClinitCheck);
private:
- const uint32_t dex_pc_;
-
DISALLOW_COPY_AND_ASSIGN(HClinitCheck);
};
@@ -4502,10 +4553,11 @@ class HStaticFieldGet : public HExpression<1> {
bool is_volatile,
uint32_t field_idx,
const DexFile& dex_file,
- Handle<mirror::DexCache> dex_cache)
+ Handle<mirror::DexCache> dex_cache,
+ uint32_t dex_pc = kNoDexPc)
: HExpression(
field_type,
- SideEffects::FieldReadOfType(field_type, is_volatile)),
+ SideEffects::FieldReadOfType(field_type, is_volatile), dex_pc),
field_info_(field_offset, field_type, is_volatile, field_idx, dex_file, dex_cache) {
SetRawInputAt(0, cls);
}
@@ -4544,9 +4596,10 @@ class HStaticFieldSet : public HTemplateInstruction<2> {
bool is_volatile,
uint32_t field_idx,
const DexFile& dex_file,
- Handle<mirror::DexCache> dex_cache)
+ Handle<mirror::DexCache> dex_cache,
+ uint32_t dex_pc = kNoDexPc)
: HTemplateInstruction(
- SideEffects::FieldWriteOfType(field_type, is_volatile)),
+ SideEffects::FieldWriteOfType(field_type, is_volatile), dex_pc),
field_info_(field_offset, field_type, is_volatile, field_idx, dex_file, dex_cache),
value_can_be_null_(true) {
SetRawInputAt(0, cls);
@@ -4574,7 +4627,8 @@ class HStaticFieldSet : public HTemplateInstruction<2> {
// Implement the move-exception DEX instruction.
class HLoadException : public HExpression<0> {
public:
- HLoadException() : HExpression(Primitive::kPrimNot, SideEffects::None()) {}
+ explicit HLoadException(uint32_t dex_pc = kNoDexPc)
+ : HExpression(Primitive::kPrimNot, SideEffects::None(), dex_pc) {}
bool CanBeNull() const OVERRIDE { return false; }
@@ -4588,7 +4642,8 @@ class HLoadException : public HExpression<0> {
// Must not be removed because the runtime expects the TLS to get cleared.
class HClearException : public HTemplateInstruction<0> {
public:
- HClearException() : HTemplateInstruction(SideEffects::AllWrites()) {}
+ explicit HClearException(uint32_t dex_pc = kNoDexPc)
+ : HTemplateInstruction(SideEffects::AllWrites(), dex_pc) {}
DECLARE_INSTRUCTION(ClearException);
@@ -4599,7 +4654,7 @@ class HClearException : public HTemplateInstruction<0> {
class HThrow : public HTemplateInstruction<1> {
public:
HThrow(HInstruction* exception, uint32_t dex_pc)
- : HTemplateInstruction(SideEffects::CanTriggerGC()), dex_pc_(dex_pc) {
+ : HTemplateInstruction(SideEffects::CanTriggerGC(), dex_pc) {
SetRawInputAt(0, exception);
}
@@ -4609,13 +4664,10 @@ class HThrow : public HTemplateInstruction<1> {
bool CanThrow() const OVERRIDE { return true; }
- uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
DECLARE_INSTRUCTION(Throw);
private:
- const uint32_t dex_pc_;
-
DISALLOW_COPY_AND_ASSIGN(HThrow);
};
@@ -4625,10 +4677,11 @@ class HInstanceOf : public HExpression<2> {
HLoadClass* constant,
bool class_is_final,
uint32_t dex_pc)
- : HExpression(Primitive::kPrimBoolean, SideEffectsForArchRuntimeCalls(class_is_final)),
+ : HExpression(Primitive::kPrimBoolean,
+ SideEffectsForArchRuntimeCalls(class_is_final),
+ dex_pc),
class_is_final_(class_is_final),
- must_do_null_check_(true),
- dex_pc_(dex_pc) {
+ must_do_null_check_(true) {
SetRawInputAt(0, object);
SetRawInputAt(1, constant);
}
@@ -4643,8 +4696,6 @@ class HInstanceOf : public HExpression<2> {
return false;
}
- uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
-
bool IsClassFinal() const { return class_is_final_; }
// Used only in code generation.
@@ -4660,7 +4711,6 @@ class HInstanceOf : public HExpression<2> {
private:
const bool class_is_final_;
bool must_do_null_check_;
- const uint32_t dex_pc_;
DISALLOW_COPY_AND_ASSIGN(HInstanceOf);
};
@@ -4669,8 +4719,11 @@ class HBoundType : public HExpression<1> {
public:
// Constructs an HBoundType with the given upper_bound.
// Ensures that the upper_bound is valid.
- HBoundType(HInstruction* input, ReferenceTypeInfo upper_bound, bool upper_can_be_null)
- : HExpression(Primitive::kPrimNot, SideEffects::None()),
+ HBoundType(HInstruction* input,
+ ReferenceTypeInfo upper_bound,
+ bool upper_can_be_null,
+ uint32_t dex_pc = kNoDexPc)
+ : HExpression(Primitive::kPrimNot, SideEffects::None(), dex_pc),
upper_bound_(upper_bound),
upper_can_be_null_(upper_can_be_null),
can_be_null_(upper_can_be_null) {
@@ -4714,10 +4767,9 @@ class HCheckCast : public HTemplateInstruction<2> {
HLoadClass* constant,
bool class_is_final,
uint32_t dex_pc)
- : HTemplateInstruction(SideEffects::CanTriggerGC()),
+ : HTemplateInstruction(SideEffects::CanTriggerGC(), dex_pc),
class_is_final_(class_is_final),
- must_do_null_check_(true),
- dex_pc_(dex_pc) {
+ must_do_null_check_(true) {
SetRawInputAt(0, object);
SetRawInputAt(1, constant);
}
@@ -4738,7 +4790,6 @@ class HCheckCast : public HTemplateInstruction<2> {
bool MustDoNullCheck() const { return must_do_null_check_; }
void ClearMustDoNullCheck() { must_do_null_check_ = false; }
- uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
bool IsClassFinal() const { return class_is_final_; }
@@ -4747,16 +4798,15 @@ class HCheckCast : public HTemplateInstruction<2> {
private:
const bool class_is_final_;
bool must_do_null_check_;
- const uint32_t dex_pc_;
DISALLOW_COPY_AND_ASSIGN(HCheckCast);
};
class HMemoryBarrier : public HTemplateInstruction<0> {
public:
- explicit HMemoryBarrier(MemBarrierKind barrier_kind)
+ explicit HMemoryBarrier(MemBarrierKind barrier_kind, uint32_t dex_pc = kNoDexPc)
: HTemplateInstruction(
- SideEffects::AllWritesAndReads()), // Assume write/read on all fields/arrays.
+ SideEffects::AllWritesAndReads(), dex_pc), // Assume write/read on all fields/arrays.
barrier_kind_(barrier_kind) {}
MemBarrierKind GetBarrierKind() { return barrier_kind_; }
@@ -4778,8 +4828,8 @@ class HMonitorOperation : public HTemplateInstruction<1> {
HMonitorOperation(HInstruction* object, OperationKind kind, uint32_t dex_pc)
: HTemplateInstruction(
- SideEffects::AllExceptGCDependency()), // Assume write/read on all fields/arrays.
- kind_(kind), dex_pc_(dex_pc) {
+ SideEffects::AllExceptGCDependency(), dex_pc), // Assume write/read on all fields/arrays.
+ kind_(kind) {
SetRawInputAt(0, object);
}
@@ -4793,7 +4843,6 @@ class HMonitorOperation : public HTemplateInstruction<1> {
return IsEnter();
}
- uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
bool IsEnter() const { return kind_ == kEnter; }
@@ -4801,7 +4850,6 @@ class HMonitorOperation : public HTemplateInstruction<1> {
private:
const OperationKind kind_;
- const uint32_t dex_pc_;
private:
DISALLOW_COPY_AND_ASSIGN(HMonitorOperation);
@@ -4816,7 +4864,8 @@ class HMonitorOperation : public HTemplateInstruction<1> {
*/
class HFakeString : public HTemplateInstruction<0> {
public:
- HFakeString() : HTemplateInstruction(SideEffects::None()) {}
+ explicit HFakeString(uint32_t dex_pc = kNoDexPc)
+ : HTemplateInstruction(SideEffects::None(), dex_pc) {}
Primitive::Type GetType() const OVERRIDE { return Primitive::kPrimNot; }
@@ -4904,8 +4953,8 @@ static constexpr size_t kDefaultNumberOfMoves = 4;
class HParallelMove : public HTemplateInstruction<0> {
public:
- explicit HParallelMove(ArenaAllocator* arena)
- : HTemplateInstruction(SideEffects::None()), moves_(arena, kDefaultNumberOfMoves) {}
+ explicit HParallelMove(ArenaAllocator* arena, uint32_t dex_pc = kNoDexPc)
+ : HTemplateInstruction(SideEffects::None(), dex_pc), moves_(arena, kDefaultNumberOfMoves) {}
void AddMove(Location source,
Location destination,
@@ -4955,6 +5004,14 @@ class HParallelMove : public HTemplateInstruction<0> {
DISALLOW_COPY_AND_ASSIGN(HParallelMove);
};
+} // namespace art
+
+#ifdef ART_ENABLE_CODEGEN_x86
+#include "nodes_x86.h"
+#endif
+
+namespace art {
+
class HGraphVisitor : public ValueObject {
public:
explicit HGraphVisitor(HGraph* graph) : graph_(graph) {}
diff --git a/compiler/optimizing/nodes_x86.h b/compiler/optimizing/nodes_x86.h
new file mode 100644
index 0000000000..ddc5730215
--- /dev/null
+++ b/compiler/optimizing/nodes_x86.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_NODES_X86_H_
+#define ART_COMPILER_OPTIMIZING_NODES_X86_H_
+
+namespace art {
+
+// Compute the address of the method for X86 Constant area support.
+class HX86ComputeBaseMethodAddress : public HExpression<0> {
+ public:
+ // Treat the value as an int32_t, but it is really a 32 bit native pointer.
+ HX86ComputeBaseMethodAddress() : HExpression(Primitive::kPrimInt, SideEffects::None()) {}
+
+ DECLARE_INSTRUCTION(X86ComputeBaseMethodAddress);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HX86ComputeBaseMethodAddress);
+};
+
+// Load a constant value from the constant table.
+class HX86LoadFromConstantTable : public HExpression<2> {
+ public:
+ HX86LoadFromConstantTable(HX86ComputeBaseMethodAddress* method_base,
+ HConstant* constant,
+ bool needs_materialization = true)
+ : HExpression(constant->GetType(), SideEffects::None()),
+ needs_materialization_(needs_materialization) {
+ SetRawInputAt(0, method_base);
+ SetRawInputAt(1, constant);
+ }
+
+ bool NeedsMaterialization() const { return needs_materialization_; }
+
+ HX86ComputeBaseMethodAddress* GetBaseMethodAddress() const {
+ return InputAt(0)->AsX86ComputeBaseMethodAddress();
+ }
+
+ HConstant* GetConstant() const {
+ return InputAt(1)->AsConstant();
+ }
+
+ DECLARE_INSTRUCTION(X86LoadFromConstantTable);
+
+ private:
+ const bool needs_materialization_;
+
+ DISALLOW_COPY_AND_ASSIGN(HX86LoadFromConstantTable);
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_NODES_X86_H_
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 91b03d4bd1..f549ba8391 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -23,6 +23,10 @@
#include "instruction_simplifier_arm64.h"
#endif
+#ifdef ART_ENABLE_CODEGEN_x86
+#include "constant_area_fixups_x86.h"
+#endif
+
#include "art_method-inl.h"
#include "base/arena_allocator.h"
#include "base/arena_containers.h"
@@ -424,6 +428,17 @@ static void RunArchOptimizations(InstructionSet instruction_set,
break;
}
#endif
+#ifdef ART_ENABLE_CODEGEN_x86
+ case kX86: {
+ x86::ConstantAreaFixups* constant_area_fixups =
+ new (arena) x86::ConstantAreaFixups(graph, stats);
+ HOptimization* x86_optimizations[] = {
+ constant_area_fixups
+ };
+ RunOptimizations(x86_optimizations, arraysize(x86_optimizations), pass_observer);
+ break;
+ }
+#endif
default:
break;
}
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index 7825457d5c..a4d1837748 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -495,6 +495,7 @@ class ArmAssembler : public Assembler {
virtual void clz(Register rd, Register rm, Condition cond = AL) = 0;
virtual void movw(Register rd, uint16_t imm16, Condition cond = AL) = 0;
virtual void movt(Register rd, uint16_t imm16, Condition cond = AL) = 0;
+ virtual void rbit(Register rd, Register rm, Condition cond = AL) = 0;
// Multiply instructions.
virtual void mul(Register rd, Register rn, Register rm, Condition cond = AL) = 0;
@@ -668,11 +669,14 @@ class ArmAssembler : public Assembler {
virtual void LoadLiteral(DRegister dd, Literal* literal) = 0;
// Add signed constant value to rd. May clobber IP.
- virtual void AddConstant(Register rd, int32_t value, Condition cond = AL) = 0;
virtual void AddConstant(Register rd, Register rn, int32_t value,
- Condition cond = AL) = 0;
- virtual void AddConstantSetFlags(Register rd, Register rn, int32_t value,
- Condition cond = AL) = 0;
+ Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
+ void AddConstantSetFlags(Register rd, Register rn, int32_t value, Condition cond = AL) {
+ AddConstant(rd, rn, value, cond, kCcSet);
+ }
+ void AddConstant(Register rd, int32_t value, Condition cond = AL, SetCc set_cc = kCcDontCare) {
+ AddConstant(rd, rd, value, cond, set_cc);
+ }
// Load and Store. May clobber IP.
virtual void LoadImmediate(Register rd, int32_t value, Condition cond = AL) = 0;
diff --git a/compiler/utils/arm/assembler_arm32.cc b/compiler/utils/arm/assembler_arm32.cc
index d91ddee9b9..f7772aea3d 100644
--- a/compiler/utils/arm/assembler_arm32.cc
+++ b/compiler/utils/arm/assembler_arm32.cc
@@ -735,6 +735,20 @@ void Arm32Assembler::movt(Register rd, uint16_t imm16, Condition cond) {
}
+void Arm32Assembler::rbit(Register rd, Register rm, Condition cond) {
+ CHECK_NE(rd, kNoRegister);
+ CHECK_NE(rm, kNoRegister);
+ CHECK_NE(cond, kNoCondition);
+ CHECK_NE(rd, PC);
+ CHECK_NE(rm, PC);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B26 | B25 | B23 | B22 | B21 | B20 | (0xf << 16) |
+ (static_cast<int32_t>(rd) << kRdShift) |
+ (0xf << 8) | B5 | B4 | static_cast<int32_t>(rm);
+ Emit(encoding);
+}
+
+
void Arm32Assembler::EmitMulOp(Condition cond, int32_t opcode,
Register rd, Register rn,
Register rm, Register rs) {
@@ -1321,16 +1335,12 @@ void Arm32Assembler::LoadLiteral(DRegister dd ATTRIBUTE_UNUSED,
UNREACHABLE();
}
-void Arm32Assembler::AddConstant(Register rd, int32_t value, Condition cond) {
- AddConstant(rd, rd, value, cond);
-}
-
void Arm32Assembler::AddConstant(Register rd, Register rn, int32_t value,
- Condition cond) {
- if (value == 0) {
+ Condition cond, SetCc set_cc) {
+ if (value == 0 && set_cc != kCcSet) {
if (rd != rn) {
- mov(rd, ShifterOperand(rn), cond);
+ mov(rd, ShifterOperand(rn), cond, set_cc);
}
return;
}
@@ -1339,55 +1349,29 @@ void Arm32Assembler::AddConstant(Register rd, Register rn, int32_t value,
// the readability of generated code for some constants.
ShifterOperand shifter_op;
if (ShifterOperandCanHoldArm32(value, &shifter_op)) {
- add(rd, rn, shifter_op, cond);
+ add(rd, rn, shifter_op, cond, set_cc);
} else if (ShifterOperandCanHoldArm32(-value, &shifter_op)) {
- sub(rd, rn, shifter_op, cond);
+ sub(rd, rn, shifter_op, cond, set_cc);
} else {
CHECK(rn != IP);
if (ShifterOperandCanHoldArm32(~value, &shifter_op)) {
- mvn(IP, shifter_op, cond);
- add(rd, rn, ShifterOperand(IP), cond);
+ mvn(IP, shifter_op, cond, kCcKeep);
+ add(rd, rn, ShifterOperand(IP), cond, set_cc);
} else if (ShifterOperandCanHoldArm32(~(-value), &shifter_op)) {
- mvn(IP, shifter_op, cond);
- sub(rd, rn, ShifterOperand(IP), cond);
+ mvn(IP, shifter_op, cond, kCcKeep);
+ sub(rd, rn, ShifterOperand(IP), cond, set_cc);
} else {
movw(IP, Low16Bits(value), cond);
uint16_t value_high = High16Bits(value);
if (value_high != 0) {
movt(IP, value_high, cond);
}
- add(rd, rn, ShifterOperand(IP), cond);
+ add(rd, rn, ShifterOperand(IP), cond, set_cc);
}
}
}
-void Arm32Assembler::AddConstantSetFlags(Register rd, Register rn, int32_t value,
- Condition cond) {
- ShifterOperand shifter_op;
- if (ShifterOperandCanHoldArm32(value, &shifter_op)) {
- add(rd, rn, shifter_op, cond, kCcSet);
- } else if (ShifterOperandCanHoldArm32(-value, &shifter_op)) {
- sub(rd, rn, shifter_op, cond, kCcSet);
- } else {
- CHECK(rn != IP);
- if (ShifterOperandCanHoldArm32(~value, &shifter_op)) {
- mvn(IP, shifter_op, cond);
- add(rd, rn, ShifterOperand(IP), cond, kCcSet);
- } else if (ShifterOperandCanHoldArm32(~(-value), &shifter_op)) {
- mvn(IP, shifter_op, cond);
- sub(rd, rn, ShifterOperand(IP), cond, kCcSet);
- } else {
- movw(IP, Low16Bits(value), cond);
- uint16_t value_high = High16Bits(value);
- if (value_high != 0) {
- movt(IP, value_high, cond);
- }
- add(rd, rn, ShifterOperand(IP), cond, kCcSet);
- }
- }
-}
-
void Arm32Assembler::LoadImmediate(Register rd, int32_t value, Condition cond) {
ShifterOperand shifter_op;
if (ShifterOperandCanHoldArm32(value, &shifter_op)) {
diff --git a/compiler/utils/arm/assembler_arm32.h b/compiler/utils/arm/assembler_arm32.h
index b96bb74182..3407369654 100644
--- a/compiler/utils/arm/assembler_arm32.h
+++ b/compiler/utils/arm/assembler_arm32.h
@@ -87,6 +87,7 @@ class Arm32Assembler FINAL : public ArmAssembler {
void clz(Register rd, Register rm, Condition cond = AL) OVERRIDE;
void movw(Register rd, uint16_t imm16, Condition cond = AL) OVERRIDE;
void movt(Register rd, uint16_t imm16, Condition cond = AL) OVERRIDE;
+ void rbit(Register rd, Register rm, Condition cond = AL) OVERRIDE;
// Multiply instructions.
void mul(Register rd, Register rn, Register rm, Condition cond = AL) OVERRIDE;
@@ -254,11 +255,8 @@ class Arm32Assembler FINAL : public ArmAssembler {
void LoadLiteral(DRegister dd, Literal* literal) OVERRIDE;
// Add signed constant value to rd. May clobber IP.
- void AddConstant(Register rd, int32_t value, Condition cond = AL) OVERRIDE;
void AddConstant(Register rd, Register rn, int32_t value,
- Condition cond = AL) OVERRIDE;
- void AddConstantSetFlags(Register rd, Register rn, int32_t value,
- Condition cond = AL) OVERRIDE;
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
// Load and Store. May clobber IP.
void LoadImmediate(Register rd, int32_t value, Condition cond = AL) OVERRIDE;
diff --git a/compiler/utils/arm/assembler_arm32_test.cc b/compiler/utils/arm/assembler_arm32_test.cc
index e6412ac684..2a0912e02d 100644
--- a/compiler/utils/arm/assembler_arm32_test.cc
+++ b/compiler/utils/arm/assembler_arm32_test.cc
@@ -883,4 +883,8 @@ TEST_F(AssemblerArm32Test, strexd) {
DriverStr(expected, "strexd");
}
+TEST_F(AssemblerArm32Test, rbit) {
+ T3Helper(&arm::Arm32Assembler::rbit, true, "rbit{cond} {reg1}, {reg2}", "rbit");
+}
+
} // namespace art
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index 90ed10c498..0f6c4f5a34 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -2426,6 +2426,25 @@ void Thumb2Assembler::movt(Register rd, uint16_t imm16, Condition cond) {
}
+void Thumb2Assembler::rbit(Register rd, Register rm, Condition cond) {
+ CHECK_NE(rd, kNoRegister);
+ CHECK_NE(rm, kNoRegister);
+ CheckCondition(cond);
+ CHECK_NE(rd, PC);
+ CHECK_NE(rm, PC);
+ CHECK_NE(rd, SP);
+ CHECK_NE(rm, SP);
+ int32_t encoding = B31 | B30 | B29 | B28 | B27 |
+ B25 | B23 | B20 |
+ static_cast<uint32_t>(rm) << 16 |
+ 0xf << 12 |
+ static_cast<uint32_t>(rd) << 8 |
+ B7 | B5 |
+ static_cast<uint32_t>(rm);
+ Emit32(encoding);
+}
+
+
void Thumb2Assembler::ldrex(Register rt, Register rn, uint16_t imm, Condition cond) {
CHECK_NE(rn, kNoRegister);
CHECK_NE(rt, kNoRegister);
@@ -3192,14 +3211,10 @@ void Thumb2Assembler::LoadLiteral(DRegister dd, Literal* literal) {
DCHECK_EQ(location + GetFixup(fixup_id)->GetSizeInBytes(), buffer_.Size());
}
-void Thumb2Assembler::AddConstant(Register rd, int32_t value, Condition cond) {
- AddConstant(rd, rd, value, cond);
-}
-
void Thumb2Assembler::AddConstant(Register rd, Register rn, int32_t value,
- Condition cond) {
- if (value == 0) {
+ Condition cond, SetCc set_cc) {
+ if (value == 0 && set_cc != kCcSet) {
if (rd != rn) {
mov(rd, ShifterOperand(rn), cond);
}
@@ -3210,51 +3225,24 @@ void Thumb2Assembler::AddConstant(Register rd, Register rn, int32_t value,
// the readability of generated code for some constants.
ShifterOperand shifter_op;
if (ShifterOperandCanHold(rd, rn, ADD, value, &shifter_op)) {
- add(rd, rn, shifter_op, cond);
+ add(rd, rn, shifter_op, cond, set_cc);
} else if (ShifterOperandCanHold(rd, rn, SUB, -value, &shifter_op)) {
- sub(rd, rn, shifter_op, cond);
- } else {
- CHECK(rn != IP);
- if (ShifterOperandCanHold(rd, rn, MVN, ~value, &shifter_op)) {
- mvn(IP, shifter_op, cond);
- add(rd, rn, ShifterOperand(IP), cond);
- } else if (ShifterOperandCanHold(rd, rn, MVN, ~(-value), &shifter_op)) {
- mvn(IP, shifter_op, cond);
- sub(rd, rn, ShifterOperand(IP), cond);
- } else {
- movw(IP, Low16Bits(value), cond);
- uint16_t value_high = High16Bits(value);
- if (value_high != 0) {
- movt(IP, value_high, cond);
- }
- add(rd, rn, ShifterOperand(IP), cond);
- }
- }
-}
-
-
-void Thumb2Assembler::AddConstantSetFlags(Register rd, Register rn, int32_t value,
- Condition cond) {
- ShifterOperand shifter_op;
- if (ShifterOperandCanHold(rd, rn, ADD, value, &shifter_op)) {
- add(rd, rn, shifter_op, cond, kCcSet);
- } else if (ShifterOperandCanHold(rd, rn, ADD, -value, &shifter_op)) {
- sub(rd, rn, shifter_op, cond, kCcSet);
+ sub(rd, rn, shifter_op, cond, set_cc);
} else {
CHECK(rn != IP);
if (ShifterOperandCanHold(rd, rn, MVN, ~value, &shifter_op)) {
- mvn(IP, shifter_op, cond);
- add(rd, rn, ShifterOperand(IP), cond, kCcSet);
+ mvn(IP, shifter_op, cond, kCcKeep);
+ add(rd, rn, ShifterOperand(IP), cond, set_cc);
} else if (ShifterOperandCanHold(rd, rn, MVN, ~(-value), &shifter_op)) {
- mvn(IP, shifter_op, cond);
- sub(rd, rn, ShifterOperand(IP), cond, kCcSet);
+ mvn(IP, shifter_op, cond, kCcKeep);
+ sub(rd, rn, ShifterOperand(IP), cond, set_cc);
} else {
movw(IP, Low16Bits(value), cond);
uint16_t value_high = High16Bits(value);
if (value_high != 0) {
movt(IP, value_high, cond);
}
- add(rd, rn, ShifterOperand(IP), cond, kCcSet);
+ add(rd, rn, ShifterOperand(IP), cond, set_cc);
}
}
}
diff --git a/compiler/utils/arm/assembler_thumb2.h b/compiler/utils/arm/assembler_thumb2.h
index c802c27ea6..a1a8927f44 100644
--- a/compiler/utils/arm/assembler_thumb2.h
+++ b/compiler/utils/arm/assembler_thumb2.h
@@ -111,6 +111,7 @@ class Thumb2Assembler FINAL : public ArmAssembler {
void clz(Register rd, Register rm, Condition cond = AL) OVERRIDE;
void movw(Register rd, uint16_t imm16, Condition cond = AL) OVERRIDE;
void movt(Register rd, uint16_t imm16, Condition cond = AL) OVERRIDE;
+ void rbit(Register rd, Register rm, Condition cond = AL) OVERRIDE;
// Multiply instructions.
void mul(Register rd, Register rn, Register rm, Condition cond = AL) OVERRIDE;
@@ -297,11 +298,8 @@ class Thumb2Assembler FINAL : public ArmAssembler {
void LoadLiteral(DRegister dd, Literal* literal) OVERRIDE;
// Add signed constant value to rd. May clobber IP.
- void AddConstant(Register rd, int32_t value, Condition cond = AL) OVERRIDE;
void AddConstant(Register rd, Register rn, int32_t value,
- Condition cond = AL) OVERRIDE;
- void AddConstantSetFlags(Register rd, Register rn, int32_t value,
- Condition cond = AL) OVERRIDE;
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
// Load and Store. May clobber IP.
void LoadImmediate(Register rd, int32_t value, Condition cond = AL) OVERRIDE;
diff --git a/compiler/utils/arm/assembler_thumb2_test.cc b/compiler/utils/arm/assembler_thumb2_test.cc
index 84f5cb16fb..9c08ce017e 100644
--- a/compiler/utils/arm/assembler_thumb2_test.cc
+++ b/compiler/utils/arm/assembler_thumb2_test.cc
@@ -1019,4 +1019,12 @@ TEST_F(AssemblerThumb2Test, Clz) {
DriverStr(expected, "clz");
}
+TEST_F(AssemblerThumb2Test, rbit) {
+ __ rbit(arm::R1, arm::R0);
+
+ const char* expected = "rbit r1, r0\n";
+
+ DriverStr(expected, "rbit");
+}
+
} // namespace art
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index a03f857e88..e3962b4d69 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -1750,6 +1750,10 @@ void X86Assembler::EmitOperand(int reg_or_opcode, const Operand& operand) {
for (int i = 1; i < length; i++) {
EmitUint8(operand.encoding_[i]);
}
+ AssemblerFixup* fixup = operand.GetFixup();
+ if (fixup != nullptr) {
+ EmitFixup(fixup);
+ }
}
@@ -2322,5 +2326,56 @@ void X86ExceptionSlowPath::Emit(Assembler *sasm) {
#undef __
}
+void X86Assembler::AddConstantArea() {
+ const std::vector<int32_t>& area = constant_area_.GetBuffer();
+ // Generate the data for the literal area.
+ for (size_t i = 0, e = area.size(); i < e; i++) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitInt32(area[i]);
+ }
+}
+
+int ConstantArea::AddInt32(int32_t v) {
+ for (size_t i = 0, e = buffer_.size(); i < e; i++) {
+ if (v == buffer_[i]) {
+ return i * kEntrySize;
+ }
+ }
+
+ // Didn't match anything.
+ int result = buffer_.size() * kEntrySize;
+ buffer_.push_back(v);
+ return result;
+}
+
+int ConstantArea::AddInt64(int64_t v) {
+ int32_t v_low = Low32Bits(v);
+ int32_t v_high = High32Bits(v);
+ if (buffer_.size() > 1) {
+ // Ensure we don't pass the end of the buffer.
+ for (size_t i = 0, e = buffer_.size() - 1; i < e; i++) {
+ if (v_low == buffer_[i] && v_high == buffer_[i + 1]) {
+ return i * kEntrySize;
+ }
+ }
+ }
+
+ // Didn't match anything.
+ int result = buffer_.size() * kEntrySize;
+ buffer_.push_back(v_low);
+ buffer_.push_back(v_high);
+ return result;
+}
+
+int ConstantArea::AddDouble(double v) {
+ // Treat the value as a 64-bit integer value.
+ return AddInt64(bit_cast<int64_t, double>(v));
+}
+
+int ConstantArea::AddFloat(float v) {
+ // Treat the value as a 32-bit integer value.
+ return AddInt32(bit_cast<int32_t, float>(v));
+}
+
} // namespace x86
} // namespace art
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 0c90f2801a..7d7b3d347b 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -86,7 +86,7 @@ class Operand : public ValueObject {
protected:
// Operand can be sub classed (e.g: Address).
- Operand() : length_(0) { }
+ Operand() : length_(0), fixup_(nullptr) { }
void SetModRM(int mod_in, Register rm_in) {
CHECK_EQ(mod_in & ~3, 0);
@@ -113,11 +113,23 @@ class Operand : public ValueObject {
length_ += disp_size;
}
+ AssemblerFixup* GetFixup() const {
+ return fixup_;
+ }
+
+ void SetFixup(AssemblerFixup* fixup) {
+ fixup_ = fixup;
+ }
+
private:
uint8_t length_;
uint8_t encoding_[6];
- explicit Operand(Register reg) { SetModRM(3, reg); }
+ // A fixup can be associated with the operand, in order to be applied after the
+ // code has been generated. This is used for constant area fixups.
+ AssemblerFixup* fixup_;
+
+ explicit Operand(Register reg) : fixup_(nullptr) { SetModRM(3, reg); }
// Get the operand encoding byte at the given index.
uint8_t encoding_at(int index_in) const {
@@ -136,6 +148,11 @@ class Address : public Operand {
Init(base_in, disp);
}
+ Address(Register base_in, int32_t disp, AssemblerFixup *fixup) {
+ Init(base_in, disp);
+ SetFixup(fixup);
+ }
+
Address(Register base_in, Offset disp) {
Init(base_in, disp.Int32Value());
}
@@ -226,6 +243,50 @@ class NearLabel : private Label {
DISALLOW_COPY_AND_ASSIGN(NearLabel);
};
+/**
+ * Class to handle constant area values.
+ */
+class ConstantArea {
+ public:
+ ConstantArea() {}
+
+ // Add a double to the constant area, returning the offset into
+ // the constant area where the literal resides.
+ int AddDouble(double v);
+
+ // Add a float to the constant area, returning the offset into
+ // the constant area where the literal resides.
+ int AddFloat(float v);
+
+ // Add an int32_t to the constant area, returning the offset into
+ // the constant area where the literal resides.
+ int AddInt32(int32_t v);
+
+ // Add an int64_t to the constant area, returning the offset into
+ // the constant area where the literal resides.
+ int AddInt64(int64_t v);
+
+ bool IsEmpty() const {
+ return buffer_.size() == 0;
+ }
+
+ const std::vector<int32_t>& GetBuffer() const {
+ return buffer_;
+ }
+
+ void AddFixup(AssemblerFixup* fixup) {
+ fixups_.push_back(fixup);
+ }
+
+ const std::vector<AssemblerFixup*>& GetFixups() const {
+ return fixups_;
+ }
+
+ private:
+ static constexpr size_t kEntrySize = sizeof(int32_t);
+ std::vector<int32_t> buffer_;
+ std::vector<AssemblerFixup*> fixups_;
+};
class X86Assembler FINAL : public Assembler {
public:
@@ -667,6 +728,29 @@ class X86Assembler FINAL : public Assembler {
}
}
+ // Add a double to the constant area, returning the offset into
+ // the constant area where the literal resides.
+ int AddDouble(double v) { return constant_area_.AddDouble(v); }
+
+ // Add a float to the constant area, returning the offset into
+ // the constant area where the literal resides.
+ int AddFloat(float v) { return constant_area_.AddFloat(v); }
+
+ // Add an int32_t to the constant area, returning the offset into
+ // the constant area where the literal resides.
+ int AddInt32(int32_t v) { return constant_area_.AddInt32(v); }
+
+ // Add an int64_t to the constant area, returning the offset into
+ // the constant area where the literal resides.
+ int AddInt64(int64_t v) { return constant_area_.AddInt64(v); }
+
+ // Add the contents of the constant area to the assembler buffer.
+ void AddConstantArea();
+
+ // Is the constant area empty? Return true if there are no literals in the constant area.
+ bool IsConstantAreaEmpty() const { return constant_area_.IsEmpty(); }
+ void AddConstantAreaFixup(AssemblerFixup* fixup) { constant_area_.AddFixup(fixup); }
+
private:
inline void EmitUint8(uint8_t value);
inline void EmitInt32(int32_t value);
@@ -685,6 +769,8 @@ class X86Assembler FINAL : public Assembler {
void EmitGenericShift(int rm, const Operand& operand, const Immediate& imm);
void EmitGenericShift(int rm, const Operand& operand, Register shifter);
+ ConstantArea constant_area_;
+
DISALLOW_COPY_AND_ASSIGN(X86Assembler);
};
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 963eecb84a..995a1d5c0d 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -99,6 +99,7 @@ LIBART_COMMON_SRC_FILES := \
jit/jit.cc \
jit/jit_code_cache.cc \
jit/jit_instrumentation.cc \
+ jit/profiling_info.cc \
lambda/art_lambda_method.cc \
lambda/box_table.cc \
lambda/closure.cc \
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index cfd7fcd0d6..a84c20a355 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -26,6 +26,7 @@
#include "dex_file.h"
#include "dex_file-inl.h"
#include "gc_root-inl.h"
+#include "jit/profiling_info.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache-inl.h"
#include "mirror/object-inl.h"
@@ -545,6 +546,10 @@ void ArtMethod::VisitRoots(RootVisitorType& visitor) {
}
visitor.VisitRootIfNonNull(declaring_class_.AddressWithoutBarrier());
+ ProfilingInfo* profiling_info = GetProfilingInfo();
+ if (hotness_count_ != 0 && !IsNative() && profiling_info != nullptr) {
+ profiling_info->VisitRoots(visitor);
+ }
}
inline void ArtMethod::CopyFrom(const ArtMethod* src, size_t image_pointer_size) {
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 64416d2137..5dbea529c5 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -30,6 +30,7 @@
#include "interpreter/interpreter.h"
#include "jit/jit.h"
#include "jit/jit_code_cache.h"
+#include "jit/profiling_info.h"
#include "jni_internal.h"
#include "mapping_table.h"
#include "mirror/abstract_method.h"
@@ -579,4 +580,16 @@ const uint8_t* ArtMethod::GetQuickenedInfo() {
return oat_method.GetVmapTable();
}
+ProfilingInfo* ArtMethod::CreateProfilingInfo() {
+ ProfilingInfo* info = ProfilingInfo::Create(this);
+ MemberOffset offset = ArtMethod::EntryPointFromJniOffset(sizeof(void*));
+ uintptr_t pointer = reinterpret_cast<uintptr_t>(this) + offset.Uint32Value();
+ if (!reinterpret_cast<Atomic<ProfilingInfo*>*>(pointer)->
+ CompareExchangeStrongSequentiallyConsistent(nullptr, info)) {
+ return GetProfilingInfo();
+ } else {
+ return info;
+ }
+}
+
} // namespace art
diff --git a/runtime/art_method.h b/runtime/art_method.h
index e0b11d0e56..3f2161f4ee 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -33,6 +33,7 @@
namespace art {
union JValue;
+class ProfilingInfo;
class ScopedObjectAccessAlreadyRunnable;
class StringPiece;
class ShadowFrame;
@@ -389,16 +390,25 @@ class ArtMethod FINAL {
PtrSizedFields, entry_point_from_quick_compiled_code_) / sizeof(void*) * pointer_size);
}
+ ProfilingInfo* CreateProfilingInfo() SHARED_REQUIRES(Locks::mutator_lock_);
+
+ ProfilingInfo* GetProfilingInfo() {
+ return reinterpret_cast<ProfilingInfo*>(GetEntryPointFromJni());
+ }
+
void* GetEntryPointFromJni() {
return GetEntryPointFromJniPtrSize(sizeof(void*));
}
+
ALWAYS_INLINE void* GetEntryPointFromJniPtrSize(size_t pointer_size) {
return GetNativePointer<void*>(EntryPointFromJniOffset(pointer_size), pointer_size);
}
void SetEntryPointFromJni(const void* entrypoint) SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(IsNative());
SetEntryPointFromJniPtrSize(entrypoint, sizeof(void*));
}
+
ALWAYS_INLINE void SetEntryPointFromJniPtrSize(const void* entrypoint, size_t pointer_size) {
SetNativePointer(EntryPointFromJniOffset(pointer_size), entrypoint, pointer_size);
}
@@ -523,6 +533,10 @@ class ArtMethod FINAL {
ALWAYS_INLINE GcRoot<mirror::Class>* GetDexCacheResolvedTypes(size_t pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
+ uint16_t IncrementCounter() {
+ return ++hotness_count_;
+ }
+
protected:
// Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
// The class we are a part of.
@@ -544,7 +558,11 @@ class ArtMethod FINAL {
// Entry within a dispatch table for this method. For static/direct methods the index is into
// the declaringClass.directMethods, for virtual methods the vtable and for interface methods the
// ifTable.
- uint32_t method_index_;
+ uint16_t method_index_;
+
+ // The hotness we measure for this method. Incremented by the interpreter. Not atomic, as we allow
+ // missing increments: if the method is hot, we will see it eventually.
+ uint16_t hotness_count_;
// Fake padding field gets inserted here.
@@ -558,7 +576,8 @@ class ArtMethod FINAL {
// Short cuts to declaring_class_->dex_cache_ member for fast compiled code access.
GcRoot<mirror::Class>* dex_cache_resolved_types_;
- // Pointer to JNI function registered to this method, or a function to resolve the JNI function.
+ // Pointer to JNI function registered to this method, or a function to resolve the JNI function,
+ // or the profiling data for non-native methods.
void* entry_point_from_jni_;
// Method dispatch from quick compiled code invokes this pointer which may cause bridging into
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 5f2c944838..73da2cbe5b 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1295,7 +1295,8 @@ bool ClassLinker::ClassInClassTable(mirror::Class* klass) {
}
void ClassLinker::VisitClassRoots(RootVisitor* visitor, VisitRootFlags flags) {
- WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+ Thread* const self = Thread::Current();
+ WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(
visitor, RootInfo(kRootStickyClass));
if ((flags & kVisitRootFlagAllRoots) != 0) {
@@ -1315,9 +1316,13 @@ void ClassLinker::VisitClassRoots(RootVisitor* visitor, VisitRootFlags flags) {
// Need to make sure to not copy ArtMethods without doing read barriers since the roots are
// marked concurrently and we don't hold the classlinker_classes_lock_ when we do the copy.
boot_class_table_.VisitRoots(buffered_visitor);
- for (GcRoot<mirror::ClassLoader>& root : class_loaders_) {
- // May be null for boot ClassLoader.
- root.VisitRoot(visitor, RootInfo(kRootVMInternal));
+ // TODO: Avoid marking these to enable class unloading.
+ JavaVMExt* const vm = Runtime::Current()->GetJavaVM();
+ for (jweak weak_root : class_loaders_) {
+ mirror::Object* class_loader =
+ down_cast<mirror::ClassLoader*>(vm->DecodeWeakGlobal(self, weak_root));
+ // Don't need to update anything since the class loaders will be updated by SweepSystemWeaks.
+ visitor->VisitRootIfNonNull(&class_loader, RootInfo(kRootVMInternal));
}
} else if ((flags & kVisitRootFlagNewRoots) != 0) {
for (auto& root : new_class_roots_) {
@@ -1353,14 +1358,31 @@ void ClassLinker::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) {
}
}
+class VisitClassLoaderClassesVisitor : public ClassLoaderVisitor {
+ public:
+ explicit VisitClassLoaderClassesVisitor(ClassVisitor* visitor)
+ : visitor_(visitor),
+ done_(false) {}
+
+ void Visit(mirror::ClassLoader* class_loader)
+ SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_) OVERRIDE {
+ ClassTable* const class_table = class_loader->GetClassTable();
+ if (!done_ && class_table != nullptr && !class_table->Visit(visitor_)) {
+ // If the visitor ClassTable returns false it means that we don't need to continue.
+ done_ = true;
+ }
+ }
+
+ private:
+ ClassVisitor* const visitor_;
+ // If done is true then we don't need to do any more visiting.
+ bool done_;
+};
+
void ClassLinker::VisitClassesInternal(ClassVisitor* visitor) {
if (boot_class_table_.Visit(visitor)) {
- for (GcRoot<mirror::ClassLoader>& root : class_loaders_) {
- ClassTable* const class_table = root.Read()->GetClassTable();
- if (class_table != nullptr && !class_table->Visit(visitor)) {
- return;
- }
- }
+ VisitClassLoaderClassesVisitor loader_visitor(visitor);
+ VisitClassLoaders(&loader_visitor);
}
}
@@ -1479,10 +1501,17 @@ ClassLinker::~ClassLinker() {
mirror::LongArray::ResetArrayClass();
mirror::ShortArray::ResetArrayClass();
STLDeleteElements(&oat_files_);
- for (GcRoot<mirror::ClassLoader>& root : class_loaders_) {
- ClassTable* const class_table = root.Read()->GetClassTable();
- delete class_table;
+ Thread* const self = Thread::Current();
+ JavaVMExt* const vm = Runtime::Current()->GetJavaVM();
+ for (jweak weak_root : class_loaders_) {
+ auto* const class_loader = down_cast<mirror::ClassLoader*>(
+ vm->DecodeWeakGlobal(self, weak_root));
+ if (class_loader != nullptr) {
+ delete class_loader->GetClassTable();
+ }
+ vm->DeleteWeakGlobalRef(self, weak_root);
}
+ class_loaders_.clear();
}
mirror::PointerArray* ClassLinker::AllocPointerArray(Thread* self, size_t length) {
@@ -2611,8 +2640,7 @@ mirror::DexCache* ClassLinker::FindDexCacheLocked(Thread* self,
bool allow_failure) {
// Search assuming unique-ness of dex file.
JavaVMExt* const vm = self->GetJniEnv()->vm;
- for (jobject weak_root : dex_caches_) {
- DCHECK_EQ(GetIndirectRefKind(weak_root), kWeakGlobal);
+ for (jweak weak_root : dex_caches_) {
mirror::DexCache* dex_cache = down_cast<mirror::DexCache*>(
vm->DecodeWeakGlobal(self, weak_root));
if (dex_cache != nullptr && dex_cache->GetDexFile() == &dex_file) {
@@ -2985,15 +3013,25 @@ void ClassLinker::MoveImageClassesToClassTable() {
dex_cache_image_class_lookup_required_ = false;
}
-void ClassLinker::MoveClassTableToPreZygote() {
- WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- boot_class_table_.FreezeSnapshot();
- for (GcRoot<mirror::ClassLoader>& root : class_loaders_) {
- ClassTable* const class_table = root.Read()->GetClassTable();
+class MoveClassTableToPreZygoteVisitor : public ClassLoaderVisitor {
+ public:
+ explicit MoveClassTableToPreZygoteVisitor() {}
+
+ void Visit(mirror::ClassLoader* class_loader)
+ REQUIRES(Locks::classlinker_classes_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE {
+ ClassTable* const class_table = class_loader->GetClassTable();
if (class_table != nullptr) {
class_table->FreezeSnapshot();
}
}
+};
+
+void ClassLinker::MoveClassTableToPreZygote() {
+ WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+ boot_class_table_.FreezeSnapshot();
+ MoveClassTableToPreZygoteVisitor visitor;
+ VisitClassLoadersAndRemoveClearedLoaders(&visitor);
}
mirror::Class* ClassLinker::LookupClassFromImage(const char* descriptor) {
@@ -3019,25 +3057,43 @@ mirror::Class* ClassLinker::LookupClassFromImage(const char* descriptor) {
return nullptr;
}
+// Look up classes by hash and descriptor and put all matching ones in the result array.
+class LookupClassesVisitor : public ClassLoaderVisitor {
+ public:
+ LookupClassesVisitor(const char* descriptor, size_t hash, std::vector<mirror::Class*>* result)
+ : descriptor_(descriptor),
+ hash_(hash),
+ result_(result) {}
+
+ void Visit(mirror::ClassLoader* class_loader)
+ SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_) OVERRIDE {
+ ClassTable* const class_table = class_loader->GetClassTable();
+ mirror::Class* klass = class_table->Lookup(descriptor_, hash_);
+ if (klass != nullptr) {
+ result_->push_back(klass);
+ }
+ }
+
+ private:
+ const char* const descriptor_;
+ const size_t hash_;
+ std::vector<mirror::Class*>* const result_;
+};
+
void ClassLinker::LookupClasses(const char* descriptor, std::vector<mirror::Class*>& result) {
result.clear();
if (dex_cache_image_class_lookup_required_) {
MoveImageClassesToClassTable();
}
- WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+ Thread* const self = Thread::Current();
+ ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
const size_t hash = ComputeModifiedUtf8Hash(descriptor);
mirror::Class* klass = boot_class_table_.Lookup(descriptor, hash);
if (klass != nullptr) {
result.push_back(klass);
}
- for (GcRoot<mirror::ClassLoader>& root : class_loaders_) {
- // There can only be one class with the same descriptor per class loader.
- ClassTable* const class_table = root.Read()->GetClassTable();
- klass = class_table->Lookup(descriptor, hash);
- if (klass != nullptr) {
- result.push_back(klass);
- }
- }
+ LookupClassesVisitor visitor(descriptor, hash, &result);
+ VisitClassLoaders(&visitor);
}
void ClassLinker::VerifyClass(Thread* self, Handle<mirror::Class> klass) {
@@ -4109,7 +4165,8 @@ ClassTable* ClassLinker::InsertClassTableForClassLoader(mirror::ClassLoader* cla
ClassTable* class_table = class_loader->GetClassTable();
if (class_table == nullptr) {
class_table = new ClassTable;
- class_loaders_.push_back(class_loader);
+ Thread* const self = Thread::Current();
+ class_loaders_.push_back(self->GetJniEnv()->vm->AddWeakGlobalRef(self, class_loader));
// Don't already have a class table, add it to the class loader.
class_loader->SetClassTable(class_table);
}
@@ -5875,26 +5932,33 @@ void ClassLinker::DumpForSigQuit(std::ostream& os) {
<< NumNonZygoteClasses() << "\n";
}
-size_t ClassLinker::NumZygoteClasses() const {
- size_t sum = boot_class_table_.NumZygoteClasses();
- for (const GcRoot<mirror::ClassLoader>& root : class_loaders_) {
- ClassTable* const class_table = root.Read()->GetClassTable();
+class CountClassesVisitor : public ClassLoaderVisitor {
+ public:
+ CountClassesVisitor() : num_zygote_classes(0), num_non_zygote_classes(0) {}
+
+ void Visit(mirror::ClassLoader* class_loader)
+ SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_) OVERRIDE {
+ ClassTable* const class_table = class_loader->GetClassTable();
if (class_table != nullptr) {
- sum += class_table->NumZygoteClasses();
+ num_zygote_classes += class_table->NumZygoteClasses();
+ num_non_zygote_classes += class_table->NumNonZygoteClasses();
}
}
- return sum;
+
+ size_t num_zygote_classes;
+ size_t num_non_zygote_classes;
+};
+
+size_t ClassLinker::NumZygoteClasses() const {
+ CountClassesVisitor visitor;
+ VisitClassLoaders(&visitor);
+ return visitor.num_zygote_classes + boot_class_table_.NumZygoteClasses();
}
size_t ClassLinker::NumNonZygoteClasses() const {
- size_t sum = boot_class_table_.NumNonZygoteClasses();
- for (const GcRoot<mirror::ClassLoader>& root : class_loaders_) {
- ClassTable* const class_table = root.Read()->GetClassTable();
- if (class_table != nullptr) {
- sum += class_table->NumNonZygoteClasses();
- }
- }
- return sum;
+ CountClassesVisitor visitor;
+ VisitClassLoaders(&visitor);
+ return visitor.num_non_zygote_classes + boot_class_table_.NumNonZygoteClasses();
}
size_t ClassLinker::NumLoadedClasses() {
@@ -6107,4 +6171,35 @@ void ClassLinker::DropFindArrayClassCache() {
find_array_class_cache_next_victim_ = 0;
}
+void ClassLinker::VisitClassLoadersAndRemoveClearedLoaders(ClassLoaderVisitor* visitor) {
+ Thread* const self = Thread::Current();
+ Locks::classlinker_classes_lock_->AssertExclusiveHeld(self);
+ JavaVMExt* const vm = self->GetJniEnv()->vm;
+ for (auto it = class_loaders_.begin(); it != class_loaders_.end();) {
+ const jweak weak_root = *it;
+ mirror::ClassLoader* const class_loader = down_cast<mirror::ClassLoader*>(
+ vm->DecodeWeakGlobal(self, weak_root));
+ if (class_loader != nullptr) {
+ visitor->Visit(class_loader);
+ ++it;
+ } else {
+ // Remove the cleared weak reference from the array.
+ vm->DeleteWeakGlobalRef(self, weak_root);
+ it = class_loaders_.erase(it);
+ }
+ }
+}
+
+void ClassLinker::VisitClassLoaders(ClassLoaderVisitor* visitor) const {
+ Thread* const self = Thread::Current();
+ JavaVMExt* const vm = self->GetJniEnv()->vm;
+ for (jweak weak_root : class_loaders_) {
+ mirror::ClassLoader* const class_loader = down_cast<mirror::ClassLoader*>(
+ vm->DecodeWeakGlobal(self, weak_root));
+ if (class_loader != nullptr) {
+ visitor->Visit(class_loader);
+ }
+ }
+}
+
} // namespace art
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 17aa48a6f4..fee706625b 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -59,6 +59,13 @@ template<size_t kNumReferences> class PACKED(4) StackHandleScope;
enum VisitRootFlags : uint8_t;
+class ClassLoaderVisitor {
+ public:
+ virtual ~ClassLoaderVisitor() {}
+ virtual void Visit(mirror::ClassLoader* class_loader)
+ SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_) = 0;
+};
+
class ClassLinker {
public:
// Well known mirror::Class roots accessed via GetClassRoot.
@@ -540,8 +547,18 @@ class ClassLinker {
void DropFindArrayClassCache() SHARED_REQUIRES(Locks::mutator_lock_);
private:
+ // The RemoveClearedLoaders version removes cleared weak global class loaders and frees their
+ // class tables. This version can only be called with reader access to the
+ // classlinker_classes_lock_ since it modifies the class_loaders_ list.
+ void VisitClassLoadersAndRemoveClearedLoaders(ClassLoaderVisitor* visitor)
+ REQUIRES(Locks::classlinker_classes_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ void VisitClassLoaders(ClassLoaderVisitor* visitor) const
+ SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_);
+
+
void VisitClassesInternal(ClassVisitor* visitor)
- REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_);
// Returns the number of zygote and image classes.
size_t NumZygoteClasses() const
@@ -726,7 +743,7 @@ class ClassLinker {
size_t GetDexCacheCount() SHARED_REQUIRES(Locks::mutator_lock_, dex_lock_) {
return dex_caches_.size();
}
- const std::list<jobject>& GetDexCaches() SHARED_REQUIRES(Locks::mutator_lock_, dex_lock_) {
+ const std::list<jweak>& GetDexCaches() SHARED_REQUIRES(Locks::mutator_lock_, dex_lock_) {
return dex_caches_;
}
@@ -805,12 +822,12 @@ class ClassLinker {
mutable ReaderWriterMutex dex_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
// JNI weak globals to allow dex caches to get unloaded. We lazily delete weak globals when we
// register new dex files.
- std::list<jobject> dex_caches_ GUARDED_BY(dex_lock_);
+ std::list<jweak> dex_caches_ GUARDED_BY(dex_lock_);
std::vector<const OatFile*> oat_files_ GUARDED_BY(dex_lock_);
- // This contains the class laoders which have class tables. It is populated by
- // InsertClassTableForClassLoader.
- std::vector<GcRoot<mirror::ClassLoader>> class_loaders_
+ // This contains the class loaders which have class tables. It is populated by
+ // InsertClassTableForClassLoader. Weak roots to enable class unloading.
+ std::list<jweak> class_loaders_
GUARDED_BY(Locks::classlinker_classes_lock_);
// Boot class path table. Since the class loader for this is null.
diff --git a/runtime/class_table.h b/runtime/class_table.h
index 6b18d9009d..727392eb6f 100644
--- a/runtime/class_table.h
+++ b/runtime/class_table.h
@@ -58,10 +58,10 @@ class ClassTable {
REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
// Returns the number of classes in previous snapshots.
- size_t NumZygoteClasses() const REQUIRES(Locks::classlinker_classes_lock_);
+ size_t NumZygoteClasses() const SHARED_REQUIRES(Locks::classlinker_classes_lock_);
// Returns all off the classes in the lastest snapshot.
- size_t NumNonZygoteClasses() const REQUIRES(Locks::classlinker_classes_lock_);
+ size_t NumNonZygoteClasses() const SHARED_REQUIRES(Locks::classlinker_classes_lock_);
// Update a class in the table with the new class. Returns the existing class which was replaced.
mirror::Class* UpdateClass(const char* descriptor, mirror::Class* new_klass, size_t hash)
@@ -79,7 +79,7 @@ class ClassTable {
// Return false if the callback told us to exit.
bool Visit(ClassVisitor* visitor)
- REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_);
mirror::Class* Lookup(const char* descriptor, size_t hash)
SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_);
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index e1aca2fdaa..72226af76d 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -218,6 +218,17 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati
<< " " << dex_pc_offset;
}
+ // We only care about invokes in the Jit.
+ void InvokeVirtualOrInterface(Thread* thread ATTRIBUTE_UNUSED,
+ mirror::Object*,
+ ArtMethod* method,
+ uint32_t dex_pc,
+ ArtMethod*)
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ LOG(ERROR) << "Unexpected invoke event in debugger " << PrettyMethod(method)
+ << " " << dex_pc;
+ }
+
private:
static bool IsReturn(ArtMethod* method, uint32_t dex_pc)
SHARED_REQUIRES(Locks::mutator_lock_) {
@@ -3490,6 +3501,62 @@ bool Dbg::IsForcedInterpreterNeededForUpcallImpl(Thread* thread, ArtMethod* m) {
return instrumentation->IsDeoptimized(m);
}
+struct NeedsDeoptimizationVisitor : public StackVisitor {
+ public:
+ explicit NeedsDeoptimizationVisitor(Thread* self)
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ : StackVisitor(self, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+ needs_deoptimization_(false) {}
+
+ bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ // The visitor is meant to be used when handling exception from compiled code only.
+ CHECK(!IsShadowFrame()) << "We only expect to visit compiled frame: " << PrettyMethod(GetMethod());
+ ArtMethod* method = GetMethod();
+ if (method == nullptr) {
+ // We reach an upcall and don't need to deoptimize this part of the stack (ManagedFragment)
+ // so we can stop the visit.
+ DCHECK(!needs_deoptimization_);
+ return false;
+ }
+ if (Runtime::Current()->GetInstrumentation()->InterpretOnly()) {
+ // We found a compiled frame in the stack but instrumentation is set to interpret
+ // everything: we need to deoptimize.
+ needs_deoptimization_ = true;
+ return false;
+ }
+ if (Runtime::Current()->GetInstrumentation()->IsDeoptimized(method)) {
+ // We found a deoptimized method in the stack.
+ needs_deoptimization_ = true;
+ return false;
+ }
+ return true;
+ }
+
+ bool NeedsDeoptimization() const {
+ return needs_deoptimization_;
+ }
+
+ private:
+ // Do we need to deoptimize the stack?
+ bool needs_deoptimization_;
+
+ DISALLOW_COPY_AND_ASSIGN(NeedsDeoptimizationVisitor);
+};
+
+// Do we need to deoptimize the stack to handle an exception?
+bool Dbg::IsForcedInterpreterNeededForExceptionImpl(Thread* thread) {
+ const SingleStepControl* const ssc = thread->GetSingleStepControl();
+ if (ssc != nullptr) {
+ // We deopt to step into the catch handler.
+ return true;
+ }
+ // Deoptimization is required if at least one method in the stack needs it. However we
+ // skip frames that will be unwound (thus not executed).
+ NeedsDeoptimizationVisitor visitor(thread);
+ visitor.WalkStack(true); // includes upcall.
+ return visitor.NeedsDeoptimization();
+}
+
// Scoped utility class to suspend a thread so that we may do tasks such as walk its stack. Doesn't
// cause suspension if the thread is the current thread.
class ScopedDebuggerThreadSuspension {
diff --git a/runtime/debugger.h b/runtime/debugger.h
index a9fa6ce8cb..8278fc6e9e 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -576,6 +576,19 @@ class Dbg {
return IsForcedInterpreterNeededForUpcallImpl(thread, m);
}
+ // Indicates whether we need to force the use of interpreter when handling an
+ // exception. This allows to deoptimize the stack and continue execution with
+ // the interpreter.
+ // Note: the interpreter will start by handling the exception when executing
+ // the deoptimized frames.
+ static bool IsForcedInterpreterNeededForException(Thread* thread)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (!IsDebuggerActive()) {
+ return false;
+ }
+ return IsForcedInterpreterNeededForExceptionImpl(thread);
+ }
+
// Single-stepping.
static JDWP::JdwpError ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize size,
JDWP::JdwpStepDepth depth)
@@ -734,6 +747,9 @@ class Dbg {
static bool IsForcedInterpreterNeededForUpcallImpl(Thread* thread, ArtMethod* m)
SHARED_REQUIRES(Locks::mutator_lock_);
+ static bool IsForcedInterpreterNeededForExceptionImpl(Thread* thread)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
// Indicates whether the debugger is making requests.
static bool gDebuggerActive;
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 98d4e59ec8..47e5c124ff 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -1275,6 +1275,8 @@ class DexFile {
// pointer to the OatDexFile it was loaded from. Otherwise oat_dex_file_ is
// null.
const OatDexFile* oat_dex_file_;
+
+ friend class DexFileVerifierTest;
};
struct DexFileReference {
@@ -1459,6 +1461,9 @@ class ClassDataItemIterator {
uint32_t GetMethodCodeItemOffset() const {
return method_.code_off_;
}
+ const uint8_t* DataPointer() const {
+ return ptr_pos_;
+ }
const uint8_t* EndDataPointer() const {
CHECK(!HasNext());
return ptr_pos_;
diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc
index eec49839ef..09416cc5c4 100644
--- a/runtime/dex_file_verifier.cc
+++ b/runtime/dex_file_verifier.cc
@@ -16,7 +16,9 @@
#include "dex_file_verifier.h"
+#include <inttypes.h>
#include <zlib.h>
+
#include <memory>
#include "base/stringprintf.h"
@@ -444,66 +446,86 @@ bool DexFileVerifier::CheckAndGetHandlerOffsets(const DexFile::CodeItem* code_it
return true;
}
-bool DexFileVerifier::CheckClassDataItemField(uint32_t idx, uint32_t access_flags,
+bool DexFileVerifier::CheckClassDataItemField(uint32_t idx,
+ uint32_t access_flags,
+ uint32_t class_access_flags,
+ uint16_t class_type_index,
bool expect_static) {
+ // Check for overflow.
if (!CheckIndex(idx, header_->field_ids_size_, "class_data_item field_idx")) {
return false;
}
+ // Check that it's the right class.
+ uint16_t my_class_index =
+ (reinterpret_cast<const DexFile::FieldId*>(begin_ + header_->field_ids_off_) + idx)->
+ class_idx_;
+ if (class_type_index != my_class_index) {
+ ErrorStringPrintf("Field's class index unexpected, %" PRIu16 "vs %" PRIu16,
+ my_class_index,
+ class_type_index);
+ return false;
+ }
+
+ // Check that it falls into the right class-data list.
bool is_static = (access_flags & kAccStatic) != 0;
if (UNLIKELY(is_static != expect_static)) {
ErrorStringPrintf("Static/instance field not in expected list");
return false;
}
- if (UNLIKELY((access_flags & ~kAccJavaFlagsMask) != 0)) {
- ErrorStringPrintf("Bad class_data_item field access_flags %x", access_flags);
+ // Check field access flags.
+ std::string error_msg;
+ if (!CheckFieldAccessFlags(access_flags, class_access_flags, &error_msg)) {
+ ErrorStringPrintf("%s", error_msg.c_str());
return false;
}
return true;
}
-bool DexFileVerifier::CheckClassDataItemMethod(uint32_t idx, uint32_t access_flags,
+bool DexFileVerifier::CheckClassDataItemMethod(uint32_t idx,
+ uint32_t access_flags,
+ uint32_t class_access_flags,
+ uint16_t class_type_index,
uint32_t code_offset,
- std::unordered_set<uint32_t>& direct_method_indexes,
+ std::unordered_set<uint32_t>* direct_method_indexes,
bool expect_direct) {
+ DCHECK(direct_method_indexes != nullptr);
+ // Check for overflow.
if (!CheckIndex(idx, header_->method_ids_size_, "class_data_item method_idx")) {
return false;
}
- bool is_direct = (access_flags & (kAccStatic | kAccPrivate | kAccConstructor)) != 0;
- bool expect_code = (access_flags & (kAccNative | kAccAbstract)) == 0;
- bool is_synchronized = (access_flags & kAccSynchronized) != 0;
- bool allow_synchronized = (access_flags & kAccNative) != 0;
-
- if (UNLIKELY(is_direct != expect_direct)) {
- ErrorStringPrintf("Direct/virtual method not in expected list");
+ // Check that it's the right class.
+ uint16_t my_class_index =
+ (reinterpret_cast<const DexFile::MethodId*>(begin_ + header_->method_ids_off_) + idx)->
+ class_idx_;
+ if (class_type_index != my_class_index) {
+ ErrorStringPrintf("Method's class index unexpected, %" PRIu16 "vs %" PRIu16,
+ my_class_index,
+ class_type_index);
return false;
}
+ // Check that it's not defined as both direct and virtual.
if (expect_direct) {
- direct_method_indexes.insert(idx);
- } else if (direct_method_indexes.find(idx) != direct_method_indexes.end()) {
+ direct_method_indexes->insert(idx);
+ } else if (direct_method_indexes->find(idx) != direct_method_indexes->end()) {
ErrorStringPrintf("Found virtual method with same index as direct method: %d", idx);
return false;
}
- constexpr uint32_t access_method_mask = kAccJavaFlagsMask | kAccConstructor |
- kAccDeclaredSynchronized;
- if (UNLIKELY(((access_flags & ~access_method_mask) != 0) ||
- (is_synchronized && !allow_synchronized))) {
- ErrorStringPrintf("Bad class_data_item method access_flags %x", access_flags);
- return false;
- }
-
- if (UNLIKELY(expect_code && (code_offset == 0))) {
- ErrorStringPrintf("Unexpected zero value for class_data_item method code_off with access "
- "flags %x", access_flags);
- return false;
- } else if (UNLIKELY(!expect_code && (code_offset != 0))) {
- ErrorStringPrintf("Unexpected non-zero value %x for class_data_item method code_off"
- " with access flags %x", code_offset, access_flags);
+ // Check method access flags.
+ bool has_code = (code_offset != 0);
+ std::string error_msg;
+ if (!CheckMethodAccessFlags(idx,
+ access_flags,
+ class_access_flags,
+ has_code,
+ expect_direct,
+ &error_msg)) {
+ ErrorStringPrintf("%s", error_msg.c_str());
return false;
}
@@ -689,62 +711,187 @@ bool DexFileVerifier::CheckEncodedAnnotation() {
return true;
}
-bool DexFileVerifier::CheckIntraClassDataItem() {
- ClassDataItemIterator it(*dex_file_, ptr_);
- std::unordered_set<uint32_t> direct_method_indexes;
+bool DexFileVerifier::FindClassFlags(uint32_t index,
+ bool is_field,
+ uint16_t* class_type_index,
+ uint32_t* class_access_flags) {
+ DCHECK(class_type_index != nullptr);
+ DCHECK(class_access_flags != nullptr);
- // These calls use the raw access flags to check whether the whole dex field is valid.
- uint32_t prev_index = 0;
- for (; it.HasNextStaticField(); it.Next()) {
- uint32_t curr_index = it.GetMemberIndex();
- if (curr_index < prev_index) {
- ErrorStringPrintf("out-of-order static field indexes %d and %d", prev_index, curr_index);
- return false;
- }
- prev_index = curr_index;
- if (!CheckClassDataItemField(curr_index, it.GetRawMemberAccessFlags(), true)) {
- return false;
- }
+ // First check if the index is valid.
+ if (index >= (is_field ? header_->field_ids_size_ : header_->method_ids_size_)) {
+ return false;
}
- prev_index = 0;
- for (; it.HasNextInstanceField(); it.Next()) {
- uint32_t curr_index = it.GetMemberIndex();
- if (curr_index < prev_index) {
- ErrorStringPrintf("out-of-order instance field indexes %d and %d", prev_index, curr_index);
- return false;
+
+ // Next get the type index.
+ if (is_field) {
+ *class_type_index =
+ (reinterpret_cast<const DexFile::FieldId*>(begin_ + header_->field_ids_off_) + index)->
+ class_idx_;
+ } else {
+ *class_type_index =
+ (reinterpret_cast<const DexFile::MethodId*>(begin_ + header_->method_ids_off_) + index)->
+ class_idx_;
+ }
+
+ // Check if that is valid.
+ if (*class_type_index >= header_->type_ids_size_) {
+ return false;
+ }
+
+ // Now search for the class def. This is basically a specialized version of the DexFile code, as
+ // we should not trust that this is a valid DexFile just yet.
+ const DexFile::ClassDef* class_def_begin =
+ reinterpret_cast<const DexFile::ClassDef*>(begin_ + header_->class_defs_off_);
+ for (size_t i = 0; i < header_->class_defs_size_; ++i) {
+ const DexFile::ClassDef* class_def = class_def_begin + i;
+ if (class_def->class_idx_ == *class_type_index) {
+ *class_access_flags = class_def->access_flags_;
+ return true;
}
- prev_index = curr_index;
- if (!CheckClassDataItemField(curr_index, it.GetRawMemberAccessFlags(), false)) {
+ }
+
+ // Didn't find the class-def, not defined here...
+ return false;
+}
+
+bool DexFileVerifier::CheckOrderAndGetClassFlags(bool is_field,
+ const char* type_descr,
+ uint32_t curr_index,
+ uint32_t prev_index,
+ bool* have_class,
+ uint16_t* class_type_index,
+ uint32_t* class_access_flags) {
+ if (curr_index < prev_index) {
+ ErrorStringPrintf("out-of-order %s indexes %" PRIu32 " and %" PRIu32,
+ type_descr,
+ prev_index,
+ curr_index);
+ return false;
+ }
+
+ if (!*have_class) {
+ *have_class = FindClassFlags(curr_index, is_field, class_type_index, class_access_flags);
+ if (!*have_class) {
+ // Should have really found one.
+ ErrorStringPrintf("could not find declaring class for %s index %" PRIu32,
+ type_descr,
+ curr_index);
return false;
}
}
- prev_index = 0;
- for (; it.HasNextDirectMethod(); it.Next()) {
- uint32_t curr_index = it.GetMemberIndex();
- if (curr_index < prev_index) {
- ErrorStringPrintf("out-of-order direct method indexes %d and %d", prev_index, curr_index);
+ return true;
+}
+
+template <bool kStatic>
+bool DexFileVerifier::CheckIntraClassDataItemFields(ClassDataItemIterator* it,
+ bool* have_class,
+ uint16_t* class_type_index,
+ uint32_t* class_access_flags) {
+ DCHECK(it != nullptr);
+ // These calls use the raw access flags to check whether the whole dex field is valid.
+ uint32_t prev_index = 0;
+ for (; kStatic ? it->HasNextStaticField() : it->HasNextInstanceField(); it->Next()) {
+ uint32_t curr_index = it->GetMemberIndex();
+ if (!CheckOrderAndGetClassFlags(true,
+ kStatic ? "static field" : "instance field",
+ curr_index,
+ prev_index,
+ have_class,
+ class_type_index,
+ class_access_flags)) {
return false;
}
prev_index = curr_index;
- if (!CheckClassDataItemMethod(curr_index, it.GetRawMemberAccessFlags(),
- it.GetMethodCodeItemOffset(), direct_method_indexes, true)) {
+
+ if (!CheckClassDataItemField(curr_index,
+ it->GetRawMemberAccessFlags(),
+ *class_access_flags,
+ *class_type_index,
+ kStatic)) {
return false;
}
}
- prev_index = 0;
- for (; it.HasNextVirtualMethod(); it.Next()) {
- uint32_t curr_index = it.GetMemberIndex();
- if (curr_index < prev_index) {
- ErrorStringPrintf("out-of-order virtual method indexes %d and %d", prev_index, curr_index);
+
+ return true;
+}
+
+template <bool kDirect>
+bool DexFileVerifier::CheckIntraClassDataItemMethods(
+ ClassDataItemIterator* it,
+ std::unordered_set<uint32_t>* direct_method_indexes,
+ bool* have_class,
+ uint16_t* class_type_index,
+ uint32_t* class_access_flags) {
+ uint32_t prev_index = 0;
+ for (; kDirect ? it->HasNextDirectMethod() : it->HasNextVirtualMethod(); it->Next()) {
+ uint32_t curr_index = it->GetMemberIndex();
+ if (!CheckOrderAndGetClassFlags(false,
+ kDirect ? "direct method" : "virtual method",
+ curr_index,
+ prev_index,
+ have_class,
+ class_type_index,
+ class_access_flags)) {
return false;
}
prev_index = curr_index;
- if (!CheckClassDataItemMethod(curr_index, it.GetRawMemberAccessFlags(),
- it.GetMethodCodeItemOffset(), direct_method_indexes, false)) {
+
+ if (!CheckClassDataItemMethod(curr_index,
+ it->GetRawMemberAccessFlags(),
+ *class_access_flags,
+ *class_type_index,
+ it->GetMethodCodeItemOffset(),
+ direct_method_indexes,
+ kDirect)) {
return false;
}
}
+ return true;
+}
+
+bool DexFileVerifier::CheckIntraClassDataItem() {
+ ClassDataItemIterator it(*dex_file_, ptr_);
+ std::unordered_set<uint32_t> direct_method_indexes;
+
+ // This code is complicated by the fact that we don't directly know which class this belongs to.
+ // So we need to explicitly search with the first item we find (either field or method), and then,
+ // as the lookup is expensive, cache the result.
+ bool have_class = false;
+ uint16_t class_type_index;
+ uint32_t class_access_flags;
+
+ // Check fields.
+ if (!CheckIntraClassDataItemFields<true>(&it,
+ &have_class,
+ &class_type_index,
+ &class_access_flags)) {
+ return false;
+ }
+ if (!CheckIntraClassDataItemFields<false>(&it,
+ &have_class,
+ &class_type_index,
+ &class_access_flags)) {
+ return false;
+ }
+
+ // Check methods.
+ if (!CheckIntraClassDataItemMethods<true>(&it,
+ &direct_method_indexes,
+ &have_class,
+ &class_type_index,
+ &class_access_flags)) {
+ return false;
+ }
+ if (!CheckIntraClassDataItemMethods<false>(&it,
+ &direct_method_indexes,
+ &have_class,
+ &class_type_index,
+ &class_access_flags)) {
+ return false;
+ }
+
ptr_ = it.EndDataPointer();
return true;
}
@@ -2149,4 +2296,259 @@ void DexFileVerifier::ErrorStringPrintf(const char* fmt, ...) {
va_end(ap);
}
+// Fields and methods may have only one of public/protected/private.
+static bool CheckAtMostOneOfPublicProtectedPrivate(uint32_t flags) {
+ size_t count = (((flags & kAccPublic) == 0) ? 0 : 1) +
+ (((flags & kAccProtected) == 0) ? 0 : 1) +
+ (((flags & kAccPrivate) == 0) ? 0 : 1);
+ return count <= 1;
+}
+
+bool DexFileVerifier::CheckFieldAccessFlags(uint32_t field_access_flags,
+ uint32_t class_access_flags,
+ std::string* error_msg) {
+ // Generally sort out >16-bit flags.
+ if ((field_access_flags & ~kAccJavaFlagsMask) != 0) {
+ *error_msg = StringPrintf("Bad class_data_item field access_flags %x", field_access_flags);
+ return false;
+ }
+
+ // Flags allowed on fields, in general. Other lower-16-bit flags are to be ignored.
+ constexpr uint32_t kFieldAccessFlags = kAccPublic |
+ kAccPrivate |
+ kAccProtected |
+ kAccStatic |
+ kAccFinal |
+ kAccVolatile |
+ kAccTransient |
+ kAccSynthetic |
+ kAccEnum;
+
+ // Fields may have only one of public/protected/final.
+ if (!CheckAtMostOneOfPublicProtectedPrivate(field_access_flags)) {
+ *error_msg = StringPrintf("Field may have only one of public/protected/private, %x",
+ field_access_flags);
+ return false;
+ }
+
+ // Interfaces have a pretty restricted list.
+ if ((class_access_flags & kAccInterface) != 0) {
+ // Interface fields must be public final static.
+ constexpr uint32_t kPublicFinalStatic = kAccPublic | kAccFinal | kAccStatic;
+ if ((field_access_flags & kPublicFinalStatic) != kPublicFinalStatic) {
+ *error_msg = StringPrintf("Interface field is not public final static: %x",
+ field_access_flags);
+ return false;
+ }
+ // Interface fields may be synthetic, but may not have other flags.
+ constexpr uint32_t kDisallowed = ~(kPublicFinalStatic | kAccSynthetic);
+ if ((field_access_flags & kFieldAccessFlags & kDisallowed) != 0) {
+ *error_msg = StringPrintf("Interface field has disallowed flag: %x", field_access_flags);
+ return false;
+ }
+ return true;
+ }
+
+ // Volatile fields may not be final.
+ constexpr uint32_t kVolatileFinal = kAccVolatile | kAccFinal;
+ if ((field_access_flags & kVolatileFinal) == kVolatileFinal) {
+ *error_msg = "Fields may not be volatile and final";
+ return false;
+ }
+
+ return true;
+}
+
+// Try to find the name of the method with the given index. We do not want to rely on DexFile
+// infrastructure at this point, so do it all by hand. begin and header correspond to begin_ and
+// header_ of the DexFileVerifier. str will contain the pointer to the method name on success
+// (flagged by the return value), otherwise error_msg will contain an error string.
+static bool FindMethodName(uint32_t method_index,
+ const uint8_t* begin,
+ const DexFile::Header* header,
+ const char** str,
+ std::string* error_msg) {
+ if (method_index >= header->method_ids_size_) {
+ *error_msg = "Method index not available for method flags verification";
+ return false;
+ }
+ uint32_t string_idx =
+ (reinterpret_cast<const DexFile::MethodId*>(begin + header->method_ids_off_) +
+ method_index)->name_idx_;
+ if (string_idx >= header->string_ids_size_) {
+ *error_msg = "String index not available for method flags verification";
+ return false;
+ }
+ uint32_t string_off =
+ (reinterpret_cast<const DexFile::StringId*>(begin + header->string_ids_off_) + string_idx)->
+ string_data_off_;
+ if (string_off >= header->file_size_) {
+ *error_msg = "String offset out of bounds for method flags verification";
+ return false;
+ }
+ const uint8_t* str_data_ptr = begin + string_off;
+ DecodeUnsignedLeb128(&str_data_ptr);
+ *str = reinterpret_cast<const char*>(str_data_ptr);
+ return true;
+}
+
+bool DexFileVerifier::CheckMethodAccessFlags(uint32_t method_index,
+ uint32_t method_access_flags,
+ uint32_t class_access_flags,
+ bool has_code,
+ bool expect_direct,
+ std::string* error_msg) {
+ // Generally sort out >16-bit flags, except dex knows Constructor and DeclaredSynchronized.
+ constexpr uint32_t kAllMethodFlags =
+ kAccJavaFlagsMask | kAccConstructor | kAccDeclaredSynchronized;
+ if ((method_access_flags & ~kAllMethodFlags) != 0) {
+ *error_msg = StringPrintf("Bad class_data_item method access_flags %x", method_access_flags);
+ return false;
+ }
+
+ // Flags allowed on fields, in general. Other lower-16-bit flags are to be ignored.
+ constexpr uint32_t kMethodAccessFlags = kAccPublic |
+ kAccPrivate |
+ kAccProtected |
+ kAccStatic |
+ kAccFinal |
+ kAccSynthetic |
+ kAccSynchronized |
+ kAccBridge |
+ kAccVarargs |
+ kAccNative |
+ kAccAbstract |
+ kAccStrict;
+
+ // Methods may have only one of public/protected/final.
+ if (!CheckAtMostOneOfPublicProtectedPrivate(method_access_flags)) {
+ *error_msg = StringPrintf("Method may have only one of public/protected/private, %x",
+ method_access_flags);
+ return false;
+ }
+
+ // Try to find the name, to check for constructor properties.
+ const char* str;
+ if (!FindMethodName(method_index, begin_, header_, &str, error_msg)) {
+ return false;
+ }
+ bool is_init_by_name = false;
+ constexpr const char* kInitName = "<init>";
+ size_t str_offset = (reinterpret_cast<const uint8_t*>(str) - begin_);
+ if (header_->file_size_ - str_offset >= sizeof(kInitName)) {
+ is_init_by_name = strcmp(kInitName, str) == 0;
+ }
+ bool is_clinit_by_name = false;
+ constexpr const char* kClinitName = "<clinit>";
+ if (header_->file_size_ - str_offset >= sizeof(kClinitName)) {
+ is_clinit_by_name = strcmp(kClinitName, str) == 0;
+ }
+ bool is_constructor = is_init_by_name || is_clinit_by_name;
+
+ // Only methods named "<clinit>" or "<init>" may be marked constructor. Note: we cannot enforce
+ // the reverse for backwards compatibility reasons.
+ if (((method_access_flags & kAccConstructor) != 0) && !is_constructor) {
+ *error_msg = StringPrintf("Method %" PRIu32 " is marked constructor, but doesn't match name",
+ method_index);
+ return false;
+ }
+ // Check that the static constructor (= static initializer) is named "<clinit>" and that the
+ // instance constructor is called "<init>".
+ if (is_constructor) {
+ bool is_static = (method_access_flags & kAccStatic) != 0;
+ if (is_static ^ is_clinit_by_name) {
+ *error_msg = StringPrintf("Constructor %" PRIu32 " is not flagged correctly wrt/ static.",
+ method_index);
+ return false;
+ }
+ }
+ // Check that static and private methods, as well as constructors, are in the direct methods list,
+ // and other methods in the virtual methods list.
+ bool is_direct = (method_access_flags & (kAccStatic | kAccPrivate)) != 0 || is_constructor;
+ if (is_direct != expect_direct) {
+ *error_msg = StringPrintf("Direct/virtual method %" PRIu32 " not in expected list %d",
+ method_index,
+ expect_direct);
+ return false;
+ }
+
+
+ // From here on out it is easier to mask out the bits we're supposed to ignore.
+ method_access_flags &= kMethodAccessFlags;
+
+ // If there aren't any instructions, make sure that's expected.
+ if (!has_code) {
+ // Only native or abstract methods may not have code.
+ if ((method_access_flags & (kAccNative | kAccAbstract)) == 0) {
+ *error_msg = StringPrintf("Method %" PRIu32 " has no code, but is not marked native or "
+ "abstract",
+ method_index);
+ return false;
+ }
+ // Constructors must always have code.
+ if (is_constructor) {
+ *error_msg = StringPrintf("Constructor %u must not be abstract or native", method_index);
+ return false;
+ }
+ if ((method_access_flags & kAccAbstract) != 0) {
+ // Abstract methods are not allowed to have the following flags.
+ constexpr uint32_t kForbidden =
+ kAccPrivate | kAccStatic | kAccFinal | kAccNative | kAccStrict | kAccSynchronized;
+ if ((method_access_flags & kForbidden) != 0) {
+ *error_msg = StringPrintf("Abstract method %" PRIu32 " has disallowed access flags %x",
+ method_index,
+ method_access_flags);
+ return false;
+ }
+ // Abstract methods must be in an abstract class or interface.
+ if ((class_access_flags & (kAccInterface | kAccAbstract)) == 0) {
+ *error_msg = StringPrintf("Method %" PRIu32 " is abstract, but the declaring class "
+ "is neither abstract nor an interface", method_index);
+ return false;
+ }
+ }
+ // Interfaces are special.
+ if ((class_access_flags & kAccInterface) != 0) {
+ // Interface methods must be public and abstract.
+ if ((method_access_flags & (kAccPublic | kAccAbstract)) != (kAccPublic | kAccAbstract)) {
+ *error_msg = StringPrintf("Interface method %" PRIu32 " is not public and abstract",
+ method_index);
+ return false;
+ }
+ // At this point, we know the method is public and abstract. This means that all the checks
+ // for invalid combinations above applies. In addition, interface methods must not be
+ // protected. This is caught by the check for only-one-of-public-protected-private.
+ }
+ return true;
+ }
+
+ // When there's code, the method must not be native or abstract.
+ if ((method_access_flags & (kAccNative | kAccAbstract)) != 0) {
+ *error_msg = StringPrintf("Method %" PRIu32 " has code, but is marked native or abstract",
+ method_index);
+ return false;
+ }
+
+ // Only the static initializer may have code in an interface.
+ if (((class_access_flags & kAccInterface) != 0) && !is_clinit_by_name) {
+ *error_msg = StringPrintf("Non-clinit interface method %" PRIu32 " should not have code",
+ method_index);
+ return false;
+ }
+
+ // Instance constructors must not be synchronized and a few other flags.
+ if (is_init_by_name) {
+ static constexpr uint32_t kInitAllowed =
+ kAccPrivate | kAccProtected | kAccPublic | kAccStrict | kAccVarargs | kAccSynthetic;
+ if ((method_access_flags & ~kInitAllowed) != 0) {
+ *error_msg = StringPrintf("Constructor %" PRIu32 " flagged inappropriately %x",
+ method_index,
+ method_access_flags);
+ return false;
+ }
+ }
+
+ return true;
+}
+
} // namespace art
diff --git a/runtime/dex_file_verifier.h b/runtime/dex_file_verifier.h
index ccc40d4442..4f15357ea0 100644
--- a/runtime/dex_file_verifier.h
+++ b/runtime/dex_file_verifier.h
@@ -57,16 +57,48 @@ class DexFileVerifier {
uint32_t ReadUnsignedLittleEndian(uint32_t size);
bool CheckAndGetHandlerOffsets(const DexFile::CodeItem* code_item,
uint32_t* handler_offsets, uint32_t handlers_size);
- bool CheckClassDataItemField(uint32_t idx, uint32_t access_flags, bool expect_static);
- bool CheckClassDataItemMethod(uint32_t idx, uint32_t access_flags, uint32_t code_offset,
- std::unordered_set<uint32_t>& direct_method_indexes,
+ bool CheckClassDataItemField(uint32_t idx,
+ uint32_t access_flags,
+ uint32_t class_access_flags,
+ uint16_t class_type_index,
+ bool expect_static);
+ bool CheckClassDataItemMethod(uint32_t idx,
+ uint32_t access_flags,
+ uint32_t class_access_flags,
+ uint16_t class_type_index,
+ uint32_t code_offset,
+ std::unordered_set<uint32_t>* direct_method_indexes,
bool expect_direct);
+ bool CheckOrderAndGetClassFlags(bool is_field,
+ const char* type_descr,
+ uint32_t curr_index,
+ uint32_t prev_index,
+ bool* have_class,
+ uint16_t* class_type_index,
+ uint32_t* class_access_flags);
+
bool CheckPadding(size_t offset, uint32_t aligned_offset);
bool CheckEncodedValue();
bool CheckEncodedArray();
bool CheckEncodedAnnotation();
bool CheckIntraClassDataItem();
+ // Check all fields of the given type from the given iterator. Load the class data from the first
+ // field, if necessary (and return it), or use the given values.
+ template <bool kStatic>
+ bool CheckIntraClassDataItemFields(ClassDataItemIterator* it,
+ bool* have_class,
+ uint16_t* class_type_index,
+ uint32_t* class_access_flags);
+ // Check all methods of the given type from the given iterator. Load the class data from the first
+ // method, if necessary (and return it), or use the given values.
+ template <bool kDirect>
+ bool CheckIntraClassDataItemMethods(ClassDataItemIterator* it,
+ std::unordered_set<uint32_t>* direct_method_indexes,
+ bool* have_class,
+ uint16_t* class_type_index,
+ uint32_t* class_access_flags);
+
bool CheckIntraCodeItem();
bool CheckIntraStringDataItem();
bool CheckIntraDebugInfoItem();
@@ -112,6 +144,31 @@ class DexFileVerifier {
void ErrorStringPrintf(const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3))) COLD_ATTR;
+ // Retrieve class index and class access flag from the given member. index is the member index,
+ // which is taken as either a field or a method index (as designated by is_field). The result,
+ // if the member and declaring class could be found, is stored in class_type_index and
+ // class_access_flags.
+ // This is an expensive lookup, as we have to find the class-def by type index, which is a
+ // linear search. The output values should thus be cached by the caller.
+ bool FindClassFlags(uint32_t index,
+ bool is_field,
+ uint16_t* class_type_index,
+ uint32_t* class_access_flags);
+
+ // Check validity of the given access flags, interpreted for a field in the context of a class
+ // with the given second access flags.
+ static bool CheckFieldAccessFlags(uint32_t field_access_flags,
+ uint32_t class_access_flags,
+ std::string* error_msg);
+ // Check validity of the given method and access flags, in the context of a class with the given
+ // second access flags.
+ bool CheckMethodAccessFlags(uint32_t method_index,
+ uint32_t method_access_flags,
+ uint32_t class_access_flags,
+ bool has_code,
+ bool expect_direct,
+ std::string* error_msg);
+
const DexFile* const dex_file_;
const uint8_t* const begin_;
const size_t size_;
diff --git a/runtime/dex_file_verifier_test.cc b/runtime/dex_file_verifier_test.cc
index 9f1ffec35f..1b529c9240 100644
--- a/runtime/dex_file_verifier_test.cc
+++ b/runtime/dex_file_verifier_test.cc
@@ -18,18 +18,20 @@
#include "sys/mman.h"
#include "zlib.h"
+#include <functional>
#include <memory>
#include "base/unix_file/fd_file.h"
+#include "base/bit_utils.h"
#include "base/macros.h"
#include "common_runtime_test.h"
+#include "dex_file-inl.h"
+#include "leb128.h"
#include "scoped_thread_state_change.h"
#include "thread-inl.h"
namespace art {
-class DexFileVerifierTest : public CommonRuntimeTest {};
-
static const uint8_t kBase64Map[256] = {
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
@@ -101,6 +103,64 @@ static inline uint8_t* DecodeBase64(const char* src, size_t* dst_size) {
return dst.release();
}
+static void FixUpChecksum(uint8_t* dex_file) {
+ DexFile::Header* header = reinterpret_cast<DexFile::Header*>(dex_file);
+ uint32_t expected_size = header->file_size_;
+ uint32_t adler_checksum = adler32(0L, Z_NULL, 0);
+ const uint32_t non_sum = sizeof(DexFile::Header::magic_) + sizeof(DexFile::Header::checksum_);
+ const uint8_t* non_sum_ptr = dex_file + non_sum;
+ adler_checksum = adler32(adler_checksum, non_sum_ptr, expected_size - non_sum);
+ header->checksum_ = adler_checksum;
+}
+
+// Custom deleter. Necessary to clean up the memory we use (to be able to mutate).
+struct DexFileDeleter {
+ void operator()(DexFile* in) {
+ if (in != nullptr) {
+ delete in->Begin();
+ delete in;
+ }
+ }
+};
+
+using DexFileUniquePtr = std::unique_ptr<DexFile, DexFileDeleter>;
+
+class DexFileVerifierTest : public CommonRuntimeTest {
+ protected:
+ void VerifyModification(const char* dex_file_base64_content,
+ const char* location,
+ std::function<void(DexFile*)> f,
+ const char* expected_error) {
+ DexFileUniquePtr dex_file(WrapAsDexFile(dex_file_base64_content));
+ f(dex_file.get());
+ FixUpChecksum(const_cast<uint8_t*>(dex_file->Begin()));
+
+ std::string error_msg;
+ bool success = DexFileVerifier::Verify(dex_file.get(),
+ dex_file->Begin(),
+ dex_file->Size(),
+ location,
+ &error_msg);
+ if (expected_error == nullptr) {
+ EXPECT_TRUE(success) << error_msg;
+ } else {
+ EXPECT_FALSE(success) << "Expected " << expected_error;
+ if (!success) {
+ EXPECT_NE(error_msg.find(expected_error), std::string::npos) << error_msg;
+ }
+ }
+ }
+
+ private:
+ static DexFile* WrapAsDexFile(const char* dex_file_content_in_base_64) {
+ // Decode base64.
+ size_t length;
+ uint8_t* dex_bytes = DecodeBase64(dex_file_content_in_base_64, &length);
+ CHECK(dex_bytes != nullptr);
+ return new DexFile(dex_bytes, length, "tmp", 0, nullptr, nullptr);
+ }
+};
+
static std::unique_ptr<const DexFile> OpenDexFileBase64(const char* base64,
const char* location,
std::string* error_msg) {
@@ -133,7 +193,6 @@ static std::unique_ptr<const DexFile> OpenDexFileBase64(const char* base64,
return dex_file;
}
-
// For reference.
static const char kGoodTestDex[] =
"ZGV4CjAzNQDrVbyVkxX1HljTznNf95AglkUAhQuFtmKkAgAAcAAAAHhWNBIAAAAAAAAAAAQCAAAN"
@@ -157,94 +216,1005 @@ TEST_F(DexFileVerifierTest, GoodDex) {
ASSERT_TRUE(raw.get() != nullptr) << error_msg;
}
-static void FixUpChecksum(uint8_t* dex_file) {
- DexFile::Header* header = reinterpret_cast<DexFile::Header*>(dex_file);
- uint32_t expected_size = header->file_size_;
- uint32_t adler_checksum = adler32(0L, Z_NULL, 0);
- const uint32_t non_sum = sizeof(DexFile::Header::magic_) + sizeof(DexFile::Header::checksum_);
- const uint8_t* non_sum_ptr = dex_file + non_sum;
- adler_checksum = adler32(adler_checksum, non_sum_ptr, expected_size - non_sum);
- header->checksum_ = adler_checksum;
+TEST_F(DexFileVerifierTest, MethodId) {
+ // Class idx error.
+ VerifyModification(
+ kGoodTestDex,
+ "method_id_class_idx",
+ [](DexFile* dex_file) {
+ DexFile::MethodId* method_id = const_cast<DexFile::MethodId*>(&dex_file->GetMethodId(0));
+ method_id->class_idx_ = 0xFF;
+ },
+ "could not find declaring class for direct method index 0");
+
+ // Proto idx error.
+ VerifyModification(
+ kGoodTestDex,
+ "method_id_proto_idx",
+ [](DexFile* dex_file) {
+ DexFile::MethodId* method_id = const_cast<DexFile::MethodId*>(&dex_file->GetMethodId(0));
+ method_id->proto_idx_ = 0xFF;
+ },
+ "inter_method_id_item proto_idx");
+
+ // Name idx error.
+ VerifyModification(
+ kGoodTestDex,
+ "method_id_name_idx",
+ [](DexFile* dex_file) {
+ DexFile::MethodId* method_id = const_cast<DexFile::MethodId*>(&dex_file->GetMethodId(0));
+ method_id->name_idx_ = 0xFF;
+ },
+ "String index not available for method flags verification");
}
-static std::unique_ptr<const DexFile> FixChecksumAndOpen(uint8_t* bytes, size_t length,
- const char* location,
- std::string* error_msg) {
- // Check data.
- CHECK(bytes != nullptr);
+// Method flags test class generated from the following smali code. The declared-synchronized
+// flags are there to enforce a 3-byte uLEB128 encoding so we don't have to relayout
+// the code, but we need to remove them before doing tests.
+//
+// .class public LMethodFlags;
+// .super Ljava/lang/Object;
+//
+// .method public static constructor <clinit>()V
+// .registers 1
+// return-void
+// .end method
+//
+// .method public constructor <init>()V
+// .registers 1
+// return-void
+// .end method
+//
+// .method private declared-synchronized foo()V
+// .registers 1
+// return-void
+// .end method
+//
+// .method public declared-synchronized bar()V
+// .registers 1
+// return-void
+// .end method
- // Fixup of checksum.
- FixUpChecksum(bytes);
+static const char kMethodFlagsTestDex[] =
+ "ZGV4CjAzNQCyOQrJaDBwiIWv5MIuYKXhxlLLsQcx5SwgAgAAcAAAAHhWNBIAAAAAAAAAAJgBAAAH"
+ "AAAAcAAAAAMAAACMAAAAAQAAAJgAAAAAAAAAAAAAAAQAAACkAAAAAQAAAMQAAAA8AQAA5AAAAOQA"
+ "AADuAAAA9gAAAAUBAAAZAQAAHAEAACEBAAACAAAAAwAAAAQAAAAEAAAAAgAAAAAAAAAAAAAAAAAA"
+ "AAAAAAABAAAAAAAAAAUAAAAAAAAABgAAAAAAAAABAAAAAQAAAAAAAAD/////AAAAAHoBAAAAAAAA"
+ "CDxjbGluaXQ+AAY8aW5pdD4ADUxNZXRob2RGbGFnczsAEkxqYXZhL2xhbmcvT2JqZWN0OwABVgAD"
+ "YmFyAANmb28AAAAAAAAAAQAAAAAAAAAAAAAAAQAAAA4AAAABAAEAAAAAAAAAAAABAAAADgAAAAEA"
+ "AQAAAAAAAAAAAAEAAAAOAAAAAQABAAAAAAAAAAAAAQAAAA4AAAADAQCJgASsAgGBgATAAgKCgAjU"
+ "AgKBgAjoAgAACwAAAAAAAAABAAAAAAAAAAEAAAAHAAAAcAAAAAIAAAADAAAAjAAAAAMAAAABAAAA"
+ "mAAAAAUAAAAEAAAApAAAAAYAAAABAAAAxAAAAAIgAAAHAAAA5AAAAAMQAAABAAAAKAEAAAEgAAAE"
+ "AAAALAEAAAAgAAABAAAAegEAAAAQAAABAAAAmAEAAA==";
- // write to provided file
- std::unique_ptr<File> file(OS::CreateEmptyFile(location));
- CHECK(file.get() != nullptr);
- if (!file->WriteFully(bytes, length)) {
- PLOG(FATAL) << "Failed to write base64 as dex file";
+// Find the method data for the first method with the given name (from class 0). Note: the pointer
+// is to the access flags, so that the caller doesn't have to handle the leb128-encoded method-index
+// delta.
+static const uint8_t* FindMethodData(const DexFile* dex_file, const char* name) {
+ const DexFile::ClassDef& class_def = dex_file->GetClassDef(0);
+ const uint8_t* class_data = dex_file->GetClassData(class_def);
+
+ ClassDataItemIterator it(*dex_file, class_data);
+
+ const uint8_t* trailing = class_data;
+ // Need to manually decode the four entries. DataPointer() doesn't work for this, as the first
+ // element has already been loaded into the iterator.
+ DecodeUnsignedLeb128(&trailing);
+ DecodeUnsignedLeb128(&trailing);
+ DecodeUnsignedLeb128(&trailing);
+ DecodeUnsignedLeb128(&trailing);
+
+ // Skip all fields.
+ while (it.HasNextStaticField() || it.HasNextInstanceField()) {
+ trailing = it.DataPointer();
+ it.Next();
}
- if (file->FlushCloseOrErase() != 0) {
- PLOG(FATAL) << "Could not flush and close test file.";
+
+ while (it.HasNextDirectMethod() || it.HasNextVirtualMethod()) {
+ uint32_t method_index = it.GetMemberIndex();
+ uint32_t name_index = dex_file->GetMethodId(method_index).name_idx_;
+ const DexFile::StringId& string_id = dex_file->GetStringId(name_index);
+ const char* str = dex_file->GetStringData(string_id);
+ if (strcmp(name, str) == 0) {
+ DecodeUnsignedLeb128(&trailing);
+ return trailing;
+ }
+
+ trailing = it.DataPointer();
+ it.Next();
}
- file.reset();
- // read dex file
- ScopedObjectAccess soa(Thread::Current());
- std::vector<std::unique_ptr<const DexFile>> tmp;
- if (!DexFile::Open(location, location, error_msg, &tmp)) {
- return nullptr;
+ return nullptr;
+}
+
+// Set the method flags to the given value.
+static void SetMethodFlags(DexFile* dex_file, const char* method, uint32_t mask) {
+ uint8_t* method_flags_ptr = const_cast<uint8_t*>(FindMethodData(dex_file, method));
+ CHECK(method_flags_ptr != nullptr) << method;
+
+ // Unroll this, as we only have three bytes, anyways.
+ uint8_t base1 = static_cast<uint8_t>(mask & 0x7F);
+ *(method_flags_ptr++) = (base1 | 0x80);
+ mask >>= 7;
+
+ uint8_t base2 = static_cast<uint8_t>(mask & 0x7F);
+ *(method_flags_ptr++) = (base2 | 0x80);
+ mask >>= 7;
+
+ uint8_t base3 = static_cast<uint8_t>(mask & 0x7F);
+ *method_flags_ptr = base3;
+}
+
+static uint32_t GetMethodFlags(DexFile* dex_file, const char* method) {
+ const uint8_t* method_flags_ptr = const_cast<uint8_t*>(FindMethodData(dex_file, method));
+ CHECK(method_flags_ptr != nullptr) << method;
+ return DecodeUnsignedLeb128(&method_flags_ptr);
+}
+
+// Apply the given mask to method flags.
+static void ApplyMaskToMethodFlags(DexFile* dex_file, const char* method, uint32_t mask) {
+ uint32_t value = GetMethodFlags(dex_file, method);
+ value &= mask;
+ SetMethodFlags(dex_file, method, value);
+}
+
+// Apply the given mask to method flags.
+static void OrMaskToMethodFlags(DexFile* dex_file, const char* method, uint32_t mask) {
+ uint32_t value = GetMethodFlags(dex_file, method);
+ value |= mask;
+ SetMethodFlags(dex_file, method, value);
+}
+
+// Set code_off to 0 for the method.
+static void RemoveCode(DexFile* dex_file, const char* method) {
+ const uint8_t* ptr = FindMethodData(dex_file, method);
+ // Next is flags, pass.
+ DecodeUnsignedLeb128(&ptr);
+
+ // Figure out how many bytes the code_off is.
+ const uint8_t* tmp = ptr;
+ DecodeUnsignedLeb128(&tmp);
+ size_t bytes = tmp - ptr;
+
+ uint8_t* mod = const_cast<uint8_t*>(ptr);
+ for (size_t i = 1; i < bytes; ++i) {
+ *(mod++) = 0x80;
}
- EXPECT_EQ(1U, tmp.size());
- std::unique_ptr<const DexFile> dex_file = std::move(tmp[0]);
- EXPECT_EQ(PROT_READ, dex_file->GetPermissions());
- EXPECT_TRUE(dex_file->IsReadOnly());
- return dex_file;
+ *mod = 0x00;
}
-static bool ModifyAndLoad(const char* dex_file_content, const char* location, size_t offset,
- uint8_t new_val, std::string* error_msg) {
- // Decode base64.
- size_t length;
- std::unique_ptr<uint8_t[]> dex_bytes(DecodeBase64(dex_file_content, &length));
- CHECK(dex_bytes.get() != nullptr);
+TEST_F(DexFileVerifierTest, MethodAccessFlagsBase) {
+ // Check that it's OK when the wrong declared-synchronized flag is removed from "foo."
+ VerifyModification(
+ kMethodFlagsTestDex,
+ "method_flags_ok",
+ [](DexFile* dex_file) {
+ ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+ ApplyMaskToMethodFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+ },
+ nullptr);
+}
+
+TEST_F(DexFileVerifierTest, MethodAccessFlagsConstructors) {
+ // Make sure we still accept constructors without their flags.
+ VerifyModification(
+ kMethodFlagsTestDex,
+ "method_flags_missing_constructor_tag_ok",
+ [](DexFile* dex_file) {
+ ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+ ApplyMaskToMethodFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+ ApplyMaskToMethodFlags(dex_file, "<init>", ~kAccConstructor);
+ ApplyMaskToMethodFlags(dex_file, "<clinit>", ~kAccConstructor);
+ },
+ nullptr);
- // Make modifications.
- dex_bytes.get()[offset] = new_val;
+ constexpr const char* kConstructors[] = { "<clinit>", "<init>"};
+ for (size_t i = 0; i < 2; ++i) {
+ // Constructor with code marked native.
+ VerifyModification(
+ kMethodFlagsTestDex,
+ "method_flags_constructor_native",
+ [&](DexFile* dex_file) {
+ ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+ ApplyMaskToMethodFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
- // Fixup and load.
- std::unique_ptr<const DexFile> file(FixChecksumAndOpen(dex_bytes.get(), length, location,
- error_msg));
- return file.get() != nullptr;
+ OrMaskToMethodFlags(dex_file, kConstructors[i], kAccNative);
+ },
+ "has code, but is marked native or abstract");
+ // Constructor with code marked abstract.
+ VerifyModification(
+ kMethodFlagsTestDex,
+ "method_flags_constructor_abstract",
+ [&](DexFile* dex_file) {
+ ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+ ApplyMaskToMethodFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+ OrMaskToMethodFlags(dex_file, kConstructors[i], kAccAbstract);
+ },
+ "has code, but is marked native or abstract");
+ // Constructor as-is without code.
+ VerifyModification(
+ kMethodFlagsTestDex,
+ "method_flags_constructor_nocode",
+ [&](DexFile* dex_file) {
+ ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+ ApplyMaskToMethodFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+ RemoveCode(dex_file, kConstructors[i]);
+ },
+ "has no code, but is not marked native or abstract");
+ // Constructor without code marked native.
+ VerifyModification(
+ kMethodFlagsTestDex,
+ "method_flags_constructor_native_nocode",
+ [&](DexFile* dex_file) {
+ ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+ ApplyMaskToMethodFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+ OrMaskToMethodFlags(dex_file, kConstructors[i], kAccNative);
+ RemoveCode(dex_file, kConstructors[i]);
+ },
+ "must not be abstract or native");
+ // Constructor without code marked abstract.
+ VerifyModification(
+ kMethodFlagsTestDex,
+ "method_flags_constructor_abstract_nocode",
+ [&](DexFile* dex_file) {
+ ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+ ApplyMaskToMethodFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+ OrMaskToMethodFlags(dex_file, kConstructors[i], kAccAbstract);
+ RemoveCode(dex_file, kConstructors[i]);
+ },
+ "must not be abstract or native");
+ }
+ // <init> may only have (modulo ignored):
+ // kAccPrivate | kAccProtected | kAccPublic | kAccStrict | kAccVarargs | kAccSynthetic
+ static constexpr uint32_t kInitAllowed[] = {
+ 0,
+ kAccPrivate,
+ kAccProtected,
+ kAccPublic,
+ kAccStrict,
+ kAccVarargs,
+ kAccSynthetic
+ };
+ for (size_t i = 0; i < arraysize(kInitAllowed); ++i) {
+ VerifyModification(
+ kMethodFlagsTestDex,
+ "init_allowed_flags",
+ [&](DexFile* dex_file) {
+ ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+ ApplyMaskToMethodFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+ ApplyMaskToMethodFlags(dex_file, "<init>", ~kAccPublic);
+ OrMaskToMethodFlags(dex_file, "<init>", kInitAllowed[i]);
+ },
+ nullptr);
+ }
+ // Only one of public-private-protected.
+ for (size_t i = 1; i < 8; ++i) {
+ if (POPCOUNT(i) < 2) {
+ continue;
+ }
+ // Technically the flags match, but just be defensive here.
+ uint32_t mask = ((i & 1) != 0 ? kAccPrivate : 0) |
+ ((i & 2) != 0 ? kAccProtected : 0) |
+ ((i & 4) != 0 ? kAccPublic : 0);
+ VerifyModification(
+ kMethodFlagsTestDex,
+ "init_one_of_ppp",
+ [&](DexFile* dex_file) {
+ ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+ ApplyMaskToMethodFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+ ApplyMaskToMethodFlags(dex_file, "<init>", ~kAccPublic);
+ OrMaskToMethodFlags(dex_file, "<init>", mask);
+ },
+ "Method may have only one of public/protected/private");
+ }
+ // <init> doesn't allow
+ // kAccStatic | kAccFinal | kAccSynchronized | kAccBridge
+ // Need to handle static separately as it has its own error message.
+ VerifyModification(
+ kMethodFlagsTestDex,
+ "init_not_allowed_flags",
+ [&](DexFile* dex_file) {
+ ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+ ApplyMaskToMethodFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+ ApplyMaskToMethodFlags(dex_file, "<init>", ~kAccPublic);
+ OrMaskToMethodFlags(dex_file, "<init>", kAccStatic);
+ },
+ "Constructor 1 is not flagged correctly wrt/ static");
+ static constexpr uint32_t kInitNotAllowed[] = {
+ kAccFinal,
+ kAccSynchronized,
+ kAccBridge
+ };
+ for (size_t i = 0; i < arraysize(kInitNotAllowed); ++i) {
+ VerifyModification(
+ kMethodFlagsTestDex,
+ "init_not_allowed_flags",
+ [&](DexFile* dex_file) {
+ ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+ ApplyMaskToMethodFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+ ApplyMaskToMethodFlags(dex_file, "<init>", ~kAccPublic);
+ OrMaskToMethodFlags(dex_file, "<init>", kInitNotAllowed[i]);
+ },
+ "Constructor 1 flagged inappropriately");
+ }
}
-TEST_F(DexFileVerifierTest, MethodId) {
- {
- // Class error.
- ScratchFile tmp;
- std::string error_msg;
- bool success = !ModifyAndLoad(kGoodTestDex, tmp.GetFilename().c_str(), 220, 0xFFU, &error_msg);
- ASSERT_TRUE(success);
- ASSERT_NE(error_msg.find("inter_method_id_item class_idx"), std::string::npos) << error_msg;
+TEST_F(DexFileVerifierTest, MethodAccessFlagsMethods) {
+ constexpr const char* kMethods[] = { "foo", "bar"};
+ for (size_t i = 0; i < arraysize(kMethods); ++i) {
+ // Make sure we reject non-constructors marked as constructors.
+ VerifyModification(
+ kMethodFlagsTestDex,
+ "method_flags_non_constructor",
+ [&](DexFile* dex_file) {
+ ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+ ApplyMaskToMethodFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+ OrMaskToMethodFlags(dex_file, kMethods[i], kAccConstructor);
+ },
+ "is marked constructor, but doesn't match name");
+
+ VerifyModification(
+ kMethodFlagsTestDex,
+ "method_flags_native_with_code",
+ [&](DexFile* dex_file) {
+ ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+ ApplyMaskToMethodFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+ OrMaskToMethodFlags(dex_file, kMethods[i], kAccNative);
+ },
+ "has code, but is marked native or abstract");
+
+ VerifyModification(
+ kMethodFlagsTestDex,
+ "method_flags_abstract_with_code",
+ [&](DexFile* dex_file) {
+ ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+ ApplyMaskToMethodFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+ OrMaskToMethodFlags(dex_file, kMethods[i], kAccAbstract);
+ },
+ "has code, but is marked native or abstract");
+
+ VerifyModification(
+ kMethodFlagsTestDex,
+ "method_flags_non_abstract_native_no_code",
+ [&](DexFile* dex_file) {
+ ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+ ApplyMaskToMethodFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+ RemoveCode(dex_file, kMethods[i]);
+ },
+ "has no code, but is not marked native or abstract");
+
+ // Abstract methods may not have the following flags.
+ constexpr uint32_t kAbstractDisallowed[] = {
+ kAccPrivate,
+ kAccStatic,
+ kAccFinal,
+ kAccNative,
+ kAccStrict,
+ kAccSynchronized,
+ };
+ for (size_t j = 0; j < arraysize(kAbstractDisallowed); ++j) {
+ VerifyModification(
+ kMethodFlagsTestDex,
+ "method_flags_abstract_and_disallowed_no_code",
+ [&](DexFile* dex_file) {
+ ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+ ApplyMaskToMethodFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+ RemoveCode(dex_file, kMethods[i]);
+
+ // Can't check private and static with foo, as it's in the virtual list and gives a
+ // different error.
+ if (((GetMethodFlags(dex_file, kMethods[i]) & kAccPublic) != 0) &&
+ ((kAbstractDisallowed[j] & (kAccPrivate | kAccStatic)) != 0)) {
+ // Use another breaking flag.
+ OrMaskToMethodFlags(dex_file, kMethods[i], kAccAbstract | kAccFinal);
+ } else {
+ OrMaskToMethodFlags(dex_file, kMethods[i], kAccAbstract | kAbstractDisallowed[j]);
+ }
+ },
+ "has disallowed access flags");
+ }
+
+ // Only one of public-private-protected.
+ for (size_t j = 1; j < 8; ++j) {
+ if (POPCOUNT(j) < 2) {
+ continue;
+ }
+ // Technically the flags match, but just be defensive here.
+ uint32_t mask = ((j & 1) != 0 ? kAccPrivate : 0) |
+ ((j & 2) != 0 ? kAccProtected : 0) |
+ ((j & 4) != 0 ? kAccPublic : 0);
+ VerifyModification(
+ kMethodFlagsTestDex,
+ "method_flags_one_of_ppp",
+ [&](DexFile* dex_file) {
+ ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+ ApplyMaskToMethodFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+ ApplyMaskToMethodFlags(dex_file, kMethods[i], ~kAccPublic);
+ OrMaskToMethodFlags(dex_file, kMethods[i], mask);
+ },
+ "Method may have only one of public/protected/private");
+ }
}
+}
- {
- // Proto error.
- ScratchFile tmp;
- std::string error_msg;
- bool success = !ModifyAndLoad(kGoodTestDex, tmp.GetFilename().c_str(), 222, 0xFFU, &error_msg);
- ASSERT_TRUE(success);
- ASSERT_NE(error_msg.find("inter_method_id_item proto_idx"), std::string::npos) << error_msg;
+TEST_F(DexFileVerifierTest, MethodAccessFlagsIgnoredOK) {
+ constexpr const char* kMethods[] = { "<clinit>", "<init>", "foo", "bar"};
+ for (size_t i = 0; i < arraysize(kMethods); ++i) {
+ // All interesting method flags, other flags are to be ignored.
+ constexpr uint32_t kAllMethodFlags =
+ kAccPublic |
+ kAccPrivate |
+ kAccProtected |
+ kAccStatic |
+ kAccFinal |
+ kAccSynchronized |
+ kAccBridge |
+ kAccVarargs |
+ kAccNative |
+ kAccAbstract |
+ kAccStrict |
+ kAccSynthetic;
+ constexpr uint32_t kIgnoredMask = ~kAllMethodFlags & 0xFFFF;
+ VerifyModification(
+ kMethodFlagsTestDex,
+ "method_flags_ignored",
+ [&](DexFile* dex_file) {
+ ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+ ApplyMaskToMethodFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+ OrMaskToMethodFlags(dex_file, kMethods[i], kIgnoredMask);
+ },
+ nullptr);
}
+}
- {
- // Name error.
- ScratchFile tmp;
- std::string error_msg;
- bool success = !ModifyAndLoad(kGoodTestDex, tmp.GetFilename().c_str(), 224, 0xFFU, &error_msg);
- ASSERT_TRUE(success);
- ASSERT_NE(error_msg.find("inter_method_id_item name_idx"), std::string::npos) << error_msg;
+// Set of dex files for interface method tests. As it's not as easy to mutate method names, it's
+// just easier to break up bad cases.
+
+// Interface with an instance constructor.
+//
+// .class public interface LInterfaceMethodFlags;
+// .super Ljava/lang/Object;
+//
+// .method public static constructor <clinit>()V
+// .registers 1
+// return-void
+// .end method
+//
+// .method public constructor <init>()V
+// .registers 1
+// return-void
+// .end method
+static const char kMethodFlagsInterfaceWithInit[] =
+ "ZGV4CjAzNQDRNt+hZ6X3I+xe66iVlCW7h9I38HmN4SvUAQAAcAAAAHhWNBIAAAAAAAAAAEwBAAAF"
+ "AAAAcAAAAAMAAACEAAAAAQAAAJAAAAAAAAAAAAAAAAIAAACcAAAAAQAAAKwAAAAIAQAAzAAAAMwA"
+ "AADWAAAA3gAAAPYAAAAKAQAAAgAAAAMAAAAEAAAABAAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAQAA"
+ "AAAAAAABAgAAAQAAAAAAAAD/////AAAAADoBAAAAAAAACDxjbGluaXQ+AAY8aW5pdD4AFkxJbnRl"
+ "cmZhY2VNZXRob2RGbGFnczsAEkxqYXZhL2xhbmcvT2JqZWN0OwABVgAAAAAAAAAAAQAAAAAAAAAA"
+ "AAAAAQAAAA4AAAABAAEAAAAAAAAAAAABAAAADgAAAAIAAImABJQCAYGABKgCAAALAAAAAAAAAAEA"
+ "AAAAAAAAAQAAAAUAAABwAAAAAgAAAAMAAACEAAAAAwAAAAEAAACQAAAABQAAAAIAAACcAAAABgAA"
+ "AAEAAACsAAAAAiAAAAUAAADMAAAAAxAAAAEAAAAQAQAAASAAAAIAAAAUAQAAACAAAAEAAAA6AQAA"
+ "ABAAAAEAAABMAQAA";
+
+// Standard interface. Use declared-synchronized again for 3B encoding.
+//
+// .class public interface LInterfaceMethodFlags;
+// .super Ljava/lang/Object;
+//
+// .method public static constructor <clinit>()V
+// .registers 1
+// return-void
+// .end method
+//
+// .method public abstract declared-synchronized foo()V
+// .end method
+static const char kMethodFlagsInterface[] =
+ "ZGV4CjAzNQCOM0odZ5bws1d9GSmumXaK5iE/7XxFpOm8AQAAcAAAAHhWNBIAAAAAAAAAADQBAAAF"
+ "AAAAcAAAAAMAAACEAAAAAQAAAJAAAAAAAAAAAAAAAAIAAACcAAAAAQAAAKwAAADwAAAAzAAAAMwA"
+ "AADWAAAA7gAAAAIBAAAFAQAAAQAAAAIAAAADAAAAAwAAAAIAAAAAAAAAAAAAAAAAAAAAAAAABAAA"
+ "AAAAAAABAgAAAQAAAAAAAAD/////AAAAACIBAAAAAAAACDxjbGluaXQ+ABZMSW50ZXJmYWNlTWV0"
+ "aG9kRmxhZ3M7ABJMamF2YS9sYW5nL09iamVjdDsAAVYAA2ZvbwAAAAAAAAABAAAAAAAAAAAAAAAB"
+ "AAAADgAAAAEBAImABJACAYGICAAAAAALAAAAAAAAAAEAAAAAAAAAAQAAAAUAAABwAAAAAgAAAAMA"
+ "AACEAAAAAwAAAAEAAACQAAAABQAAAAIAAACcAAAABgAAAAEAAACsAAAAAiAAAAUAAADMAAAAAxAA"
+ "AAEAAAAMAQAAASAAAAEAAAAQAQAAACAAAAEAAAAiAQAAABAAAAEAAAA0AQAA";
+
+// To simplify generation of interesting "sub-states" of src_value, allow a "simple" mask to apply
+// to a src_value, such that mask bit 0 applies to the lowest set bit in src_value, and so on.
+static uint32_t ApplyMaskShifted(uint32_t src_value, uint32_t mask) {
+ uint32_t result = 0;
+ uint32_t mask_index = 0;
+ while (src_value != 0) {
+ uint32_t index = CTZ(src_value);
+ if (((src_value & (1 << index)) != 0) &&
+ ((mask & (1 << mask_index)) != 0)) {
+ result |= (1 << index);
+ }
+ src_value &= ~(1 << index);
+ mask_index++;
+ }
+ return result;
+}
+
+TEST_F(DexFileVerifierTest, MethodAccessFlagsInterfaces) {
+ // Reject interface with <init>.
+ VerifyModification(
+ kMethodFlagsInterfaceWithInit,
+ "method_flags_interface_with_init",
+ [](DexFile* dex_file ATTRIBUTE_UNUSED) {},
+ "Non-clinit interface method 1 should not have code");
+
+ VerifyModification(
+ kMethodFlagsInterface,
+ "method_flags_interface_ok",
+ [](DexFile* dex_file) {
+ ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+ },
+ nullptr);
+
+ VerifyModification(
+ kMethodFlagsInterface,
+ "method_flags_interface_non_public",
+ [](DexFile* dex_file) {
+ ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+
+ ApplyMaskToMethodFlags(dex_file, "foo", ~kAccPublic);
+ },
+ "Interface method 1 is not public and abstract");
+ VerifyModification(
+ kMethodFlagsInterface,
+ "method_flags_interface_non_abstract",
+ [](DexFile* dex_file) {
+ ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+
+ ApplyMaskToMethodFlags(dex_file, "foo", ~kAccAbstract);
+ },
+ "Method 1 has no code, but is not marked native or abstract");
+
+ VerifyModification(
+ kMethodFlagsInterface,
+ "method_flags_interface_static",
+ [](DexFile* dex_file) {
+ ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+
+ OrMaskToMethodFlags(dex_file, "foo", kAccStatic);
+ },
+ "Direct/virtual method 1 not in expected list 0");
+ VerifyModification(
+ kMethodFlagsInterface,
+ "method_flags_interface_private",
+ [](DexFile* dex_file) {
+ ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+
+ ApplyMaskToMethodFlags(dex_file, "foo", ~kAccPublic);
+ OrMaskToMethodFlags(dex_file, "foo", kAccPrivate);
+ },
+ "Direct/virtual method 1 not in expected list 0");
+
+ VerifyModification(
+ kMethodFlagsInterface,
+ "method_flags_interface_non_public",
+ [](DexFile* dex_file) {
+ ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+
+ ApplyMaskToMethodFlags(dex_file, "foo", ~kAccPublic);
+ },
+ "Interface method 1 is not public and abstract");
+ VerifyModification(
+ kMethodFlagsInterface,
+ "method_flags_interface_protected",
+ [](DexFile* dex_file) {
+ ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+
+ ApplyMaskToMethodFlags(dex_file, "foo", ~kAccPublic);
+ OrMaskToMethodFlags(dex_file, "foo", kAccProtected);
+ },
+ "Interface method 1 is not public and abstract");
+
+ constexpr uint32_t kAllMethodFlags =
+ kAccPublic |
+ kAccPrivate |
+ kAccProtected |
+ kAccStatic |
+ kAccFinal |
+ kAccSynchronized |
+ kAccBridge |
+ kAccVarargs |
+ kAccNative |
+ kAccAbstract |
+ kAccStrict |
+ kAccSynthetic;
+ constexpr uint32_t kInterfaceMethodFlags =
+ kAccPublic | kAccAbstract | kAccVarargs | kAccBridge | kAccSynthetic;
+ constexpr uint32_t kInterfaceDisallowed = kAllMethodFlags &
+ ~kInterfaceMethodFlags &
+ // Already tested, needed to be separate.
+ ~kAccStatic &
+ ~kAccPrivate &
+ ~kAccProtected;
+ static_assert(kInterfaceDisallowed != 0, "There should be disallowed flags.");
+
+ uint32_t bits = POPCOUNT(kInterfaceDisallowed);
+ for (uint32_t i = 1; i < (1u << bits); ++i) {
+ VerifyModification(
+ kMethodFlagsInterface,
+ "method_flags_interface_non_abstract",
+ [&](DexFile* dex_file) {
+ ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+
+ uint32_t mask = ApplyMaskShifted(kInterfaceDisallowed, i);
+ if ((mask & kAccProtected) != 0) {
+ mask &= ~kAccProtected;
+ ApplyMaskToMethodFlags(dex_file, "foo", ~kAccPublic);
+ }
+ OrMaskToMethodFlags(dex_file, "foo", mask);
+ },
+ "Abstract method 1 has disallowed access flags");
+ }
+}
+
+///////////////////////////////////////////////////////////////////
+
+// Field flags.
+
+// Find the method data for the first method with the given name (from class 0). Note: the pointer
+// is to the access flags, so that the caller doesn't have to handle the leb128-encoded method-index
+// delta.
+static const uint8_t* FindFieldData(const DexFile* dex_file, const char* name) {
+ const DexFile::ClassDef& class_def = dex_file->GetClassDef(0);
+ const uint8_t* class_data = dex_file->GetClassData(class_def);
+
+ ClassDataItemIterator it(*dex_file, class_data);
+
+ const uint8_t* trailing = class_data;
+ // Need to manually decode the four entries. DataPointer() doesn't work for this, as the first
+ // element has already been loaded into the iterator.
+ DecodeUnsignedLeb128(&trailing);
+ DecodeUnsignedLeb128(&trailing);
+ DecodeUnsignedLeb128(&trailing);
+ DecodeUnsignedLeb128(&trailing);
+
+ while (it.HasNextStaticField() || it.HasNextInstanceField()) {
+ uint32_t field_index = it.GetMemberIndex();
+ uint32_t name_index = dex_file->GetFieldId(field_index).name_idx_;
+ const DexFile::StringId& string_id = dex_file->GetStringId(name_index);
+ const char* str = dex_file->GetStringData(string_id);
+ if (strcmp(name, str) == 0) {
+ DecodeUnsignedLeb128(&trailing);
+ return trailing;
+ }
+
+ trailing = it.DataPointer();
+ it.Next();
+ }
+
+ return nullptr;
+}
+
+// Set the method flags to the given value.
+static void SetFieldFlags(DexFile* dex_file, const char* field, uint32_t mask) {
+ uint8_t* field_flags_ptr = const_cast<uint8_t*>(FindFieldData(dex_file, field));
+ CHECK(field_flags_ptr != nullptr) << field;
+
+ // Unroll this, as we only have three bytes, anyways.
+ uint8_t base1 = static_cast<uint8_t>(mask & 0x7F);
+ *(field_flags_ptr++) = (base1 | 0x80);
+ mask >>= 7;
+
+ uint8_t base2 = static_cast<uint8_t>(mask & 0x7F);
+ *(field_flags_ptr++) = (base2 | 0x80);
+ mask >>= 7;
+
+ uint8_t base3 = static_cast<uint8_t>(mask & 0x7F);
+ *field_flags_ptr = base3;
+}
+
+static uint32_t GetFieldFlags(DexFile* dex_file, const char* field) {
+ const uint8_t* field_flags_ptr = const_cast<uint8_t*>(FindFieldData(dex_file, field));
+ CHECK(field_flags_ptr != nullptr) << field;
+ return DecodeUnsignedLeb128(&field_flags_ptr);
+}
+
+// Apply the given mask to method flags.
+static void ApplyMaskToFieldFlags(DexFile* dex_file, const char* field, uint32_t mask) {
+ uint32_t value = GetFieldFlags(dex_file, field);
+ value &= mask;
+ SetFieldFlags(dex_file, field, value);
+}
+
+// Apply the given mask to method flags.
+static void OrMaskToFieldFlags(DexFile* dex_file, const char* field, uint32_t mask) {
+ uint32_t value = GetFieldFlags(dex_file, field);
+ value |= mask;
+ SetFieldFlags(dex_file, field, value);
+}
+
+// Standard class. Use declared-synchronized again for 3B encoding.
+//
+// .class public LFieldFlags;
+// .super Ljava/lang/Object;
+//
+// .field declared-synchronized public foo:I
+//
+// .field declared-synchronized public static bar:I
+
+static const char kFieldFlagsTestDex[] =
+ "ZGV4CjAzNQBtLw7hydbfv4TdXidZyzAB70W7w3vnYJRwAQAAcAAAAHhWNBIAAAAAAAAAAAABAAAF"
+ "AAAAcAAAAAMAAACEAAAAAAAAAAAAAAACAAAAkAAAAAAAAAAAAAAAAQAAAKAAAACwAAAAwAAAAMAA"
+ "AADDAAAA0QAAAOUAAADqAAAAAAAAAAEAAAACAAAAAQAAAAMAAAABAAAABAAAAAEAAAABAAAAAgAA"
+ "AAAAAAD/////AAAAAPQAAAAAAAAAAUkADExGaWVsZEZsYWdzOwASTGphdmEvbGFuZy9PYmplY3Q7"
+ "AANiYXIAA2ZvbwAAAAAAAAEBAAAAiYAIAYGACAkAAAAAAAAAAQAAAAAAAAABAAAABQAAAHAAAAAC"
+ "AAAAAwAAAIQAAAAEAAAAAgAAAJAAAAAGAAAAAQAAAKAAAAACIAAABQAAAMAAAAADEAAAAQAAAPAA"
+ "AAAAIAAAAQAAAPQAAAAAEAAAAQAAAAABAAA=";
+
+TEST_F(DexFileVerifierTest, FieldAccessFlagsBase) {
+ // Check that it's OK when the wrong declared-synchronized flag is removed from "foo."
+ VerifyModification(
+ kFieldFlagsTestDex,
+ "field_flags_ok",
+ [](DexFile* dex_file) {
+ ApplyMaskToFieldFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+ ApplyMaskToFieldFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+ },
+ nullptr);
+}
+
+TEST_F(DexFileVerifierTest, FieldAccessFlagsWrongList) {
+ // Mark the field so that it should appear in the opposite list (instance vs static).
+ VerifyModification(
+ kFieldFlagsTestDex,
+ "field_flags_wrong_list",
+ [](DexFile* dex_file) {
+ ApplyMaskToFieldFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+ ApplyMaskToFieldFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+ OrMaskToFieldFlags(dex_file, "foo", kAccStatic);
+ },
+ "Static/instance field not in expected list");
+ VerifyModification(
+ kFieldFlagsTestDex,
+ "field_flags_wrong_list",
+ [](DexFile* dex_file) {
+ ApplyMaskToFieldFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+ ApplyMaskToFieldFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+ ApplyMaskToFieldFlags(dex_file, "bar", ~kAccStatic);
+ },
+ "Static/instance field not in expected list");
+}
+
+TEST_F(DexFileVerifierTest, FieldAccessFlagsPPP) {
+ static const char* kFields[] = { "foo", "bar" };
+ for (size_t i = 0; i < arraysize(kFields); ++i) {
+ // Should be OK to remove public.
+ VerifyModification(
+ kFieldFlagsTestDex,
+ "field_flags_non_public",
+ [&](DexFile* dex_file) {
+ ApplyMaskToFieldFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+ ApplyMaskToFieldFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+ ApplyMaskToFieldFlags(dex_file, kFields[i], ~kAccPublic);
+ },
+ nullptr);
+ constexpr uint32_t kAccFlags = kAccPublic | kAccPrivate | kAccProtected;
+ uint32_t bits = POPCOUNT(kAccFlags);
+ for (uint32_t j = 1; j < (1u << bits); ++j) {
+ if (POPCOUNT(j) < 2) {
+ continue;
+ }
+ VerifyModification(
+ kFieldFlagsTestDex,
+ "field_flags_ppp",
+ [&](DexFile* dex_file) {
+ ApplyMaskToFieldFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+ ApplyMaskToFieldFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+ ApplyMaskToFieldFlags(dex_file, kFields[i], ~kAccPublic);
+ uint32_t mask = ApplyMaskShifted(kAccFlags, j);
+ OrMaskToFieldFlags(dex_file, kFields[i], mask);
+ },
+ "Field may have only one of public/protected/private");
+ }
+ }
+}
+
+TEST_F(DexFileVerifierTest, FieldAccessFlagsIgnoredOK) {
+ constexpr const char* kFields[] = { "foo", "bar"};
+ for (size_t i = 0; i < arraysize(kFields); ++i) {
+ // All interesting method flags, other flags are to be ignored.
+ constexpr uint32_t kAllFieldFlags =
+ kAccPublic |
+ kAccPrivate |
+ kAccProtected |
+ kAccStatic |
+ kAccFinal |
+ kAccVolatile |
+ kAccTransient |
+ kAccSynthetic |
+ kAccEnum;
+ constexpr uint32_t kIgnoredMask = ~kAllFieldFlags & 0xFFFF;
+ VerifyModification(
+ kFieldFlagsTestDex,
+ "field_flags_ignored",
+ [&](DexFile* dex_file) {
+ ApplyMaskToFieldFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+ ApplyMaskToFieldFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+ OrMaskToFieldFlags(dex_file, kFields[i], kIgnoredMask);
+ },
+ nullptr);
+ }
+}
+
+TEST_F(DexFileVerifierTest, FieldAccessFlagsVolatileFinal) {
+ constexpr const char* kFields[] = { "foo", "bar"};
+ for (size_t i = 0; i < arraysize(kFields); ++i) {
+ VerifyModification(
+ kFieldFlagsTestDex,
+ "field_flags_final_and_volatile",
+ [&](DexFile* dex_file) {
+ ApplyMaskToFieldFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+ ApplyMaskToFieldFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+ OrMaskToFieldFlags(dex_file, kFields[i], kAccVolatile | kAccFinal);
+ },
+ "Fields may not be volatile and final");
}
}
+// Standard interface. Needs to be separate from class as interfaces do not allow instance fields.
+// Use declared-synchronized again for 3B encoding.
+//
+// .class public interface LInterfaceFieldFlags;
+// .super Ljava/lang/Object;
+//
+// .field declared-synchronized public static final foo:I
+
+static const char kFieldFlagsInterfaceTestDex[] =
+ "ZGV4CjAzNQCVMHfEimR1zZPk6hl6O9GPAYqkl3u0umFkAQAAcAAAAHhWNBIAAAAAAAAAAPQAAAAE"
+ "AAAAcAAAAAMAAACAAAAAAAAAAAAAAAABAAAAjAAAAAAAAAAAAAAAAQAAAJQAAACwAAAAtAAAALQA"
+ "AAC3AAAAzgAAAOIAAAAAAAAAAQAAAAIAAAABAAAAAwAAAAEAAAABAgAAAgAAAAAAAAD/////AAAA"
+ "AOwAAAAAAAAAAUkAFUxJbnRlcmZhY2VGaWVsZEZsYWdzOwASTGphdmEvbGFuZy9PYmplY3Q7AANm"
+ "b28AAAAAAAABAAAAAJmACAkAAAAAAAAAAQAAAAAAAAABAAAABAAAAHAAAAACAAAAAwAAAIAAAAAE"
+ "AAAAAQAAAIwAAAAGAAAAAQAAAJQAAAACIAAABAAAALQAAAADEAAAAQAAAOgAAAAAIAAAAQAAAOwA"
+ "AAAAEAAAAQAAAPQAAAA=";
+
+TEST_F(DexFileVerifierTest, FieldAccessFlagsInterface) {
+ VerifyModification(
+ kFieldFlagsInterfaceTestDex,
+ "field_flags_interface",
+ [](DexFile* dex_file) {
+ ApplyMaskToFieldFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+ },
+ nullptr);
+
+ VerifyModification(
+ kFieldFlagsInterfaceTestDex,
+ "field_flags_interface_non_public",
+ [](DexFile* dex_file) {
+ ApplyMaskToFieldFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+
+ ApplyMaskToFieldFlags(dex_file, "foo", ~kAccPublic);
+ },
+ "Interface field is not public final static");
+ VerifyModification(
+ kFieldFlagsInterfaceTestDex,
+ "field_flags_interface_non_final",
+ [](DexFile* dex_file) {
+ ApplyMaskToFieldFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+
+ ApplyMaskToFieldFlags(dex_file, "foo", ~kAccFinal);
+ },
+ "Interface field is not public final static");
+ VerifyModification(
+ kFieldFlagsInterfaceTestDex,
+ "field_flags_interface_protected",
+ [](DexFile* dex_file) {
+ ApplyMaskToFieldFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+
+ ApplyMaskToFieldFlags(dex_file, "foo", ~kAccPublic);
+ OrMaskToFieldFlags(dex_file, "foo", kAccProtected);
+ },
+ "Interface field is not public final static");
+ VerifyModification(
+ kFieldFlagsInterfaceTestDex,
+ "field_flags_interface_private",
+ [](DexFile* dex_file) {
+ ApplyMaskToFieldFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+
+ ApplyMaskToFieldFlags(dex_file, "foo", ~kAccPublic);
+ OrMaskToFieldFlags(dex_file, "foo", kAccPrivate);
+ },
+ "Interface field is not public final static");
+
+ VerifyModification(
+ kFieldFlagsInterfaceTestDex,
+ "field_flags_interface_synthetic",
+ [](DexFile* dex_file) {
+ ApplyMaskToFieldFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+
+ OrMaskToFieldFlags(dex_file, "foo", kAccSynthetic);
+ },
+ nullptr);
+
+ constexpr uint32_t kAllFieldFlags =
+ kAccPublic |
+ kAccPrivate |
+ kAccProtected |
+ kAccStatic |
+ kAccFinal |
+ kAccVolatile |
+ kAccTransient |
+ kAccSynthetic |
+ kAccEnum;
+ constexpr uint32_t kInterfaceFieldFlags = kAccPublic | kAccStatic | kAccFinal | kAccSynthetic;
+ constexpr uint32_t kInterfaceDisallowed = kAllFieldFlags &
+ ~kInterfaceFieldFlags &
+ ~kAccProtected &
+ ~kAccPrivate;
+ static_assert(kInterfaceDisallowed != 0, "There should be disallowed flags.");
+
+ uint32_t bits = POPCOUNT(kInterfaceDisallowed);
+ for (uint32_t i = 1; i < (1u << bits); ++i) {
+ VerifyModification(
+ kFieldFlagsInterfaceTestDex,
+ "field_flags_interface_disallowed",
+ [&](DexFile* dex_file) {
+ ApplyMaskToFieldFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+
+ uint32_t mask = ApplyMaskShifted(kInterfaceDisallowed, i);
+ if ((mask & kAccProtected) != 0) {
+ mask &= ~kAccProtected;
+ ApplyMaskToFieldFlags(dex_file, "foo", ~kAccPublic);
+ }
+ OrMaskToFieldFlags(dex_file, "foo", mask);
+ },
+ "Interface field has disallowed flag");
+ }
+}
+
+// Standard bad interface. Needs to be separate from class as interfaces do not allow instance
+// fields. Use declared-synchronized again for 3B encoding.
+//
+// .class public interface LInterfaceFieldFlags;
+// .super Ljava/lang/Object;
+//
+// .field declared-synchronized public final foo:I
+
+static const char kFieldFlagsInterfaceBadTestDex[] =
+ "ZGV4CjAzNQByMUnqYKHBkUpvvNp+9CnZ2VyDkKnRN6VkAQAAcAAAAHhWNBIAAAAAAAAAAPQAAAAE"
+ "AAAAcAAAAAMAAACAAAAAAAAAAAAAAAABAAAAjAAAAAAAAAAAAAAAAQAAAJQAAACwAAAAtAAAALQA"
+ "AAC3AAAAzgAAAOIAAAAAAAAAAQAAAAIAAAABAAAAAwAAAAEAAAABAgAAAgAAAAAAAAD/////AAAA"
+ "AOwAAAAAAAAAAUkAFUxJbnRlcmZhY2VGaWVsZEZsYWdzOwASTGphdmEvbGFuZy9PYmplY3Q7AANm"
+ "b28AAAAAAAAAAQAAAJGACAkAAAAAAAAAAQAAAAAAAAABAAAABAAAAHAAAAACAAAAAwAAAIAAAAAE"
+ "AAAAAQAAAIwAAAAGAAAAAQAAAJQAAAACIAAABAAAALQAAAADEAAAAQAAAOgAAAAAIAAAAQAAAOwA"
+ "AAAAEAAAAQAAAPQAAAA=";
+
+TEST_F(DexFileVerifierTest, FieldAccessFlagsInterfaceNonStatic) {
+ VerifyModification(
+ kFieldFlagsInterfaceBadTestDex,
+ "field_flags_interface_non_static",
+ [](DexFile* dex_file) {
+ ApplyMaskToFieldFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+ },
+ "Interface field is not public final static");
+}
+
// Generated from:
//
// .class public LTest;
@@ -305,15 +1275,14 @@ TEST_F(DexFileVerifierTest, DebugInfoTypeIdxTest) {
ASSERT_TRUE(raw.get() != nullptr) << error_msg;
}
- {
- // Modify the debug information entry.
- ScratchFile tmp;
- std::string error_msg;
- bool success = !ModifyAndLoad(kDebugInfoTestDex, tmp.GetFilename().c_str(), 416, 0x14U,
- &error_msg);
- ASSERT_TRUE(success);
- ASSERT_NE(error_msg.find("DBG_START_LOCAL type_idx"), std::string::npos) << error_msg;
- }
+ // Modify the debug information entry.
+ VerifyModification(
+ kDebugInfoTestDex,
+ "debug_start_type_idx",
+ [](DexFile* dex_file) {
+ *(const_cast<uint8_t*>(dex_file->Begin()) + 416) = 0x14U;
+ },
+ "DBG_START_LOCAL type_idx");
}
} // namespace art
diff --git a/runtime/gc_root.h b/runtime/gc_root.h
index 83471e6b96..477e67b3c2 100644
--- a/runtime/gc_root.h
+++ b/runtime/gc_root.h
@@ -90,16 +90,16 @@ class RootVisitor {
virtual ~RootVisitor() { }
// Single root version, not overridable.
- ALWAYS_INLINE void VisitRoot(mirror::Object** roots, const RootInfo& info)
+ ALWAYS_INLINE void VisitRoot(mirror::Object** root, const RootInfo& info)
SHARED_REQUIRES(Locks::mutator_lock_) {
- VisitRoots(&roots, 1, info);
+ VisitRoots(&root, 1, info);
}
// Single root version, not overridable.
- ALWAYS_INLINE void VisitRootIfNonNull(mirror::Object** roots, const RootInfo& info)
+ ALWAYS_INLINE void VisitRootIfNonNull(mirror::Object** root, const RootInfo& info)
SHARED_REQUIRES(Locks::mutator_lock_) {
- if (*roots != nullptr) {
- VisitRoot(roots, info);
+ if (*root != nullptr) {
+ VisitRoot(root, info);
}
}
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 63c02ed686..973cd7d790 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -407,6 +407,10 @@ void Instrumentation::AddListener(InstrumentationListener* listener, uint32_t ev
backward_branch_listeners_.push_back(listener);
have_backward_branch_listeners_ = true;
}
+ if (HasEvent(kInvokeVirtualOrInterface, events)) {
+ invoke_virtual_or_interface_listeners_.push_back(listener);
+ have_invoke_virtual_or_interface_listeners_ = true;
+ }
if (HasEvent(kDexPcMoved, events)) {
std::list<InstrumentationListener*>* modified;
if (have_dex_pc_listeners_) {
@@ -466,13 +470,17 @@ void Instrumentation::RemoveListener(InstrumentationListener* listener, uint32_t
have_method_exit_listeners_ = !method_exit_listeners_.empty();
}
if (HasEvent(kMethodUnwind, events) && have_method_unwind_listeners_) {
- method_unwind_listeners_.remove(listener);
- have_method_unwind_listeners_ = !method_unwind_listeners_.empty();
+ method_unwind_listeners_.remove(listener);
+ have_method_unwind_listeners_ = !method_unwind_listeners_.empty();
}
if (HasEvent(kBackwardBranch, events) && have_backward_branch_listeners_) {
- backward_branch_listeners_.remove(listener);
- have_backward_branch_listeners_ = !backward_branch_listeners_.empty();
- }
+ backward_branch_listeners_.remove(listener);
+ have_backward_branch_listeners_ = !backward_branch_listeners_.empty();
+ }
+ if (HasEvent(kInvokeVirtualOrInterface, events) && have_invoke_virtual_or_interface_listeners_) {
+ invoke_virtual_or_interface_listeners_.remove(listener);
+ have_invoke_virtual_or_interface_listeners_ = !invoke_virtual_or_interface_listeners_.empty();
+ }
if (HasEvent(kDexPcMoved, events) && have_dex_pc_listeners_) {
std::list<InstrumentationListener*>* modified =
new std::list<InstrumentationListener*>(*dex_pc_listeners_.get());
@@ -908,6 +916,16 @@ void Instrumentation::BackwardBranchImpl(Thread* thread, ArtMethod* method,
}
}
+void Instrumentation::InvokeVirtualOrInterfaceImpl(Thread* thread,
+ mirror::Object* this_object,
+ ArtMethod* caller,
+ uint32_t dex_pc,
+ ArtMethod* callee) const {
+ for (InstrumentationListener* listener : invoke_virtual_or_interface_listeners_) {
+ listener->InvokeVirtualOrInterface(thread, this_object, caller, dex_pc, callee);
+ }
+}
+
void Instrumentation::FieldReadEventImpl(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc,
ArtField* field) const {
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 93ff567dc3..6711ac3eb1 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -97,6 +97,14 @@ struct InstrumentationListener {
// Call-back for when we get a backward branch.
virtual void BackwardBranch(Thread* thread, ArtMethod* method, int32_t dex_pc_offset)
SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+
+ // Call-back for when we get an invokevirtual or an invokeinterface.
+ virtual void InvokeVirtualOrInterface(Thread* thread,
+ mirror::Object* this_object,
+ ArtMethod* caller,
+ uint32_t dex_pc,
+ ArtMethod* callee)
+ SHARED_REQUIRES(Locks::mutator_lock_) = 0;
};
// Instrumentation is a catch-all for when extra information is required from the runtime. The
@@ -114,6 +122,7 @@ class Instrumentation {
kFieldWritten = 0x20,
kExceptionCaught = 0x40,
kBackwardBranch = 0x80,
+ kInvokeVirtualOrInterface = 0x100,
};
enum class InstrumentationLevel {
@@ -257,6 +266,10 @@ class Instrumentation {
return have_backward_branch_listeners_;
}
+ bool HasInvokeVirtualOrInterfaceListeners() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ return have_invoke_virtual_or_interface_listeners_;
+ }
+
bool IsActive() const SHARED_REQUIRES(Locks::mutator_lock_) {
return have_dex_pc_listeners_ || have_method_entry_listeners_ || have_method_exit_listeners_ ||
have_field_read_listeners_ || have_field_write_listeners_ ||
@@ -325,6 +338,17 @@ class Instrumentation {
}
}
+ void InvokeVirtualOrInterface(Thread* thread,
+ mirror::Object* this_object,
+ ArtMethod* caller,
+ uint32_t dex_pc,
+ ArtMethod* callee) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (UNLIKELY(HasInvokeVirtualOrInterfaceListeners())) {
+ InvokeVirtualOrInterfaceImpl(thread, this_object, caller, dex_pc, callee);
+ }
+ }
+
// Inform listeners that an exception was caught.
void ExceptionCaughtEvent(Thread* thread, mirror::Throwable* exception_object) const
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -385,6 +409,12 @@ class Instrumentation {
SHARED_REQUIRES(Locks::mutator_lock_);
void BackwardBranchImpl(Thread* thread, ArtMethod* method, int32_t offset) const
SHARED_REQUIRES(Locks::mutator_lock_);
+ void InvokeVirtualOrInterfaceImpl(Thread* thread,
+ mirror::Object* this_object,
+ ArtMethod* caller,
+ uint32_t dex_pc,
+ ArtMethod* callee) const
+ SHARED_REQUIRES(Locks::mutator_lock_);
void FieldReadEventImpl(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc,
ArtField* field) const
@@ -451,6 +481,9 @@ class Instrumentation {
// Do we have any backward branch listeners? Short-cut to avoid taking the instrumentation_lock_.
bool have_backward_branch_listeners_ GUARDED_BY(Locks::mutator_lock_);
+ // Do we have any invoke listeners? Short-cut to avoid taking the instrumentation_lock_.
+ bool have_invoke_virtual_or_interface_listeners_ GUARDED_BY(Locks::mutator_lock_);
+
// Contains the instrumentation level required by each client of the instrumentation identified
// by a string key.
typedef SafeMap<const char*, InstrumentationLevel> InstrumentationLevelTable;
@@ -461,6 +494,8 @@ class Instrumentation {
std::list<InstrumentationListener*> method_exit_listeners_ GUARDED_BY(Locks::mutator_lock_);
std::list<InstrumentationListener*> method_unwind_listeners_ GUARDED_BY(Locks::mutator_lock_);
std::list<InstrumentationListener*> backward_branch_listeners_ GUARDED_BY(Locks::mutator_lock_);
+ std::list<InstrumentationListener*> invoke_virtual_or_interface_listeners_
+ GUARDED_BY(Locks::mutator_lock_);
std::shared_ptr<std::list<InstrumentationListener*>> dex_pc_listeners_
GUARDED_BY(Locks::mutator_lock_);
std::shared_ptr<std::list<InstrumentationListener*>> field_read_listeners_
diff --git a/runtime/instrumentation_test.cc b/runtime/instrumentation_test.cc
index 56fe9ef8ae..c7cc68adf8 100644
--- a/runtime/instrumentation_test.cc
+++ b/runtime/instrumentation_test.cc
@@ -36,7 +36,8 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
: received_method_enter_event(false), received_method_exit_event(false),
received_method_unwind_event(false), received_dex_pc_moved_event(false),
received_field_read_event(false), received_field_written_event(false),
- received_exception_caught_event(false), received_backward_branch_event(false) {}
+ received_exception_caught_event(false), received_backward_branch_event(false),
+ received_invoke_virtual_or_interface_event(false) {}
virtual ~TestInstrumentationListener() {}
@@ -105,6 +106,15 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
received_backward_branch_event = true;
}
+ void InvokeVirtualOrInterface(Thread* thread ATTRIBUTE_UNUSED,
+ mirror::Object* this_object ATTRIBUTE_UNUSED,
+ ArtMethod* caller ATTRIBUTE_UNUSED,
+ uint32_t dex_pc ATTRIBUTE_UNUSED,
+ ArtMethod* callee ATTRIBUTE_UNUSED)
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ received_invoke_virtual_or_interface_event = true;
+ }
+
void Reset() {
received_method_enter_event = false;
received_method_exit_event = false;
@@ -114,6 +124,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
received_field_written_event = false;
received_exception_caught_event = false;
received_backward_branch_event = false;
+ received_invoke_virtual_or_interface_event = false;
}
bool received_method_enter_event;
@@ -124,6 +135,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
bool received_field_written_event;
bool received_exception_caught_event;
bool received_backward_branch_event;
+ bool received_invoke_virtual_or_interface_event;
private:
DISALLOW_COPY_AND_ASSIGN(TestInstrumentationListener);
@@ -287,6 +299,8 @@ class InstrumentationTest : public CommonRuntimeTest {
return instr->HasExceptionCaughtListeners();
case instrumentation::Instrumentation::kBackwardBranch:
return instr->HasBackwardBranchListeners();
+ case instrumentation::Instrumentation::kInvokeVirtualOrInterface:
+ return instr->HasInvokeVirtualOrInterfaceListeners();
default:
LOG(FATAL) << "Unknown instrumentation event " << event_type;
UNREACHABLE();
@@ -330,6 +344,9 @@ class InstrumentationTest : public CommonRuntimeTest {
case instrumentation::Instrumentation::kBackwardBranch:
instr->BackwardBranch(self, method, dex_pc);
break;
+ case instrumentation::Instrumentation::kInvokeVirtualOrInterface:
+ instr->InvokeVirtualOrInterface(self, obj, method, dex_pc, method);
+ break;
default:
LOG(FATAL) << "Unknown instrumentation event " << event_type;
UNREACHABLE();
@@ -355,6 +372,8 @@ class InstrumentationTest : public CommonRuntimeTest {
return listener.received_exception_caught_event;
case instrumentation::Instrumentation::kBackwardBranch:
return listener.received_backward_branch_event;
+ case instrumentation::Instrumentation::kInvokeVirtualOrInterface:
+ return listener.received_invoke_virtual_or_interface_event;
default:
LOG(FATAL) << "Unknown instrumentation event " << event_type;
UNREACHABLE();
@@ -418,6 +437,10 @@ TEST_F(InstrumentationTest, BackwardBranchEvent) {
TestEvent(instrumentation::Instrumentation::kBackwardBranch);
}
+TEST_F(InstrumentationTest, InvokeVirtualOrInterfaceEvent) {
+ TestEvent(instrumentation::Instrumentation::kInvokeVirtualOrInterface);
+}
+
TEST_F(InstrumentationTest, DeoptimizeDirectMethod) {
ScopedObjectAccess soa(Thread::Current());
jobject class_loader = LoadDex("Instrumentation");
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 6c6232c437..3ac80c6642 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -399,14 +399,19 @@ void EnterInterpreterFromDeoptimize(Thread* self, ShadowFrame* shadow_frame, JVa
JValue value;
// Set value to last known result in case the shadow frame chain is empty.
value.SetJ(ret_val->GetJ());
+ // Are we executing the first shadow frame?
+ bool first = true;
while (shadow_frame != nullptr) {
self->SetTopOfShadowStack(shadow_frame);
const DexFile::CodeItem* code_item = shadow_frame->GetMethod()->GetCodeItem();
const uint32_t dex_pc = shadow_frame->GetDexPC();
uint32_t new_dex_pc;
if (UNLIKELY(self->IsExceptionPending())) {
+ // If we deoptimize from the QuickExceptionHandler, we already reported the exception to
+ // the instrumentation. To prevent from reporting it a second time, we simply pass a
+ // null Instrumentation*.
const instrumentation::Instrumentation* const instrumentation =
- Runtime::Current()->GetInstrumentation();
+ first ? nullptr : Runtime::Current()->GetInstrumentation();
uint32_t found_dex_pc = FindNextInstructionFollowingException(self, *shadow_frame, dex_pc,
instrumentation);
new_dex_pc = found_dex_pc; // the dex pc of a matching catch handler
@@ -424,6 +429,7 @@ void EnterInterpreterFromDeoptimize(Thread* self, ShadowFrame* shadow_frame, JVa
ShadowFrame* old_frame = shadow_frame;
shadow_frame = shadow_frame->GetLink();
ShadowFrame::DeleteDeoptimizedFrame(old_frame);
+ first = false;
}
ret_val->SetJ(value.GetJ());
}
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index af67379375..6602840ed0 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -414,20 +414,21 @@ EXPLICIT_DO_IPUT_QUICK_ALL_TEMPLATE_DECL(Primitive::kPrimNot) // iput-objec
#undef EXPLICIT_DO_IPUT_QUICK_ALL_TEMPLATE_DECL
#undef EXPLICIT_DO_IPUT_QUICK_TEMPLATE_DECL
+// We accept a null Instrumentation* meaning we must not report anything to the instrumentation.
uint32_t FindNextInstructionFollowingException(
Thread* self, ShadowFrame& shadow_frame, uint32_t dex_pc,
const instrumentation::Instrumentation* instrumentation) {
self->VerifyStack();
StackHandleScope<2> hs(self);
Handle<mirror::Throwable> exception(hs.NewHandle(self->GetException()));
- if (instrumentation->HasExceptionCaughtListeners()
+ if (instrumentation != nullptr && instrumentation->HasExceptionCaughtListeners()
&& self->IsExceptionThrownByCurrentMethod(exception.Get())) {
instrumentation->ExceptionCaughtEvent(self, exception.Get());
}
bool clear_exception = false;
uint32_t found_dex_pc = shadow_frame.GetMethod()->FindCatchBlock(
hs.NewHandle(exception->GetClass()), dex_pc, &clear_exception);
- if (found_dex_pc == DexFile::kDexNoIndex) {
+ if (found_dex_pc == DexFile::kDexNoIndex && instrumentation != nullptr) {
// Exception is not caught by the current method. We will unwind to the
// caller. Notify any instrumentation listener.
instrumentation->MethodUnwindEvent(self, shadow_frame.GetThisObject(),
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index fdefb9f74c..7398778d15 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -265,6 +265,13 @@ static inline bool DoInvoke(Thread* self, ShadowFrame& shadow_frame, const Instr
result->SetJ(0);
return false;
} else {
+ if (type == kVirtual || type == kInterface) {
+ instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
+ if (UNLIKELY(instrumentation->HasInvokeVirtualOrInterfaceListeners())) {
+ instrumentation->InvokeVirtualOrInterface(
+ self, receiver, sf_method, shadow_frame.GetDexPC(), called_method);
+ }
+ }
return DoCall<is_range, do_access_check>(called_method, self, shadow_frame, inst, inst_data,
result);
}
@@ -297,6 +304,11 @@ static inline bool DoInvokeVirtualQuick(Thread* self, ShadowFrame& shadow_frame,
result->SetJ(0);
return false;
} else {
+ instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
+ if (UNLIKELY(instrumentation->HasInvokeVirtualOrInterfaceListeners())) {
+ instrumentation->InvokeVirtualOrInterface(
+ self, receiver, shadow_frame.GetMethod(), shadow_frame.GetDexPC(), called_method);
+ }
// No need to check since we've been quickened.
return DoCall<is_range, false>(called_method, self, shadow_frame, inst, inst_data, result);
}
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index d6c798a863..63e8887ff3 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -580,9 +580,12 @@ inline bool JavaVMExt::MayAccessWeakGlobals(Thread* self) const {
}
inline bool JavaVMExt::MayAccessWeakGlobalsUnlocked(Thread* self) const {
- return kUseReadBarrier
- ? self->GetWeakRefAccessEnabled()
- : allow_accessing_weak_globals_.LoadSequentiallyConsistent();
+ if (kUseReadBarrier) {
+ // self can be null during a runtime shutdown. ~Runtime()->~ClassLinker()->DecodeWeakGlobal().
+ return self != nullptr ? self->GetWeakRefAccessEnabled() : true;
+ } else {
+ return allow_accessing_weak_globals_.LoadSequentiallyConsistent();
+ }
}
mirror::Object* JavaVMExt::DecodeWeakGlobal(Thread* self, IndirectRef ref) {
@@ -592,6 +595,7 @@ mirror::Object* JavaVMExt::DecodeWeakGlobal(Thread* self, IndirectRef ref) {
// This only applies in the case where MayAccessWeakGlobals goes from false to true. In the other
// case, it may be racy, this is benign since DecodeWeakGlobalLocked does the correct behavior
// if MayAccessWeakGlobals is false.
+ DCHECK_EQ(GetIndirectRefKind(ref), kWeakGlobal);
if (LIKELY(MayAccessWeakGlobalsUnlocked(self))) {
return weak_globals_.SynchronizedGet(ref);
}
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 26a4fe49f1..683b2cfa89 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -39,6 +39,8 @@ JitOptions* JitOptions::CreateFromRuntimeArguments(const RuntimeArgumentMap& opt
options.GetOrDefault(RuntimeArgumentMap::JITCodeCacheCapacity);
jit_options->compile_threshold_ =
options.GetOrDefault(RuntimeArgumentMap::JITCompileThreshold);
+ jit_options->warmup_threshold_ =
+ options.GetOrDefault(RuntimeArgumentMap::JITWarmupThreshold);
jit_options->dump_info_on_shutdown_ =
options.Exists(RuntimeArgumentMap::DumpJITInfoOnShutdown);
return jit_options;
@@ -160,17 +162,19 @@ Jit::~Jit() {
}
}
-void Jit::CreateInstrumentationCache(size_t compile_threshold) {
+void Jit::CreateInstrumentationCache(size_t compile_threshold, size_t warmup_threshold) {
CHECK_GT(compile_threshold, 0U);
Runtime* const runtime = Runtime::Current();
runtime->GetThreadList()->SuspendAll(__FUNCTION__);
// Add Jit interpreter instrumentation, tells the interpreter when to notify the jit to compile
// something.
- instrumentation_cache_.reset(new jit::JitInstrumentationCache(compile_threshold));
+ instrumentation_cache_.reset(
+ new jit::JitInstrumentationCache(compile_threshold, warmup_threshold));
runtime->GetInstrumentation()->AddListener(
new jit::JitInstrumentationListener(instrumentation_cache_.get()),
instrumentation::Instrumentation::kMethodEntered |
- instrumentation::Instrumentation::kBackwardBranch);
+ instrumentation::Instrumentation::kBackwardBranch |
+ instrumentation::Instrumentation::kInvokeVirtualOrInterface);
runtime->GetThreadList()->ResumeAll();
}
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index ca6e7ea1f8..643bc23da3 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -43,13 +43,14 @@ class JitOptions;
class Jit {
public:
static constexpr bool kStressMode = kIsDebugBuild;
- static constexpr size_t kDefaultCompileThreshold = kStressMode ? 1 : 1000;
+ static constexpr size_t kDefaultCompileThreshold = kStressMode ? 2 : 1000;
+ static constexpr size_t kDefaultWarmupThreshold = kDefaultCompileThreshold / 2;
virtual ~Jit();
static Jit* Create(JitOptions* options, std::string* error_msg);
bool CompileMethod(ArtMethod* method, Thread* self)
SHARED_REQUIRES(Locks::mutator_lock_);
- void CreateInstrumentationCache(size_t compile_threshold);
+ void CreateInstrumentationCache(size_t compile_threshold, size_t warmup_threshold);
void CreateThreadPool();
CompilerCallbacks* GetCompilerCallbacks() {
return compiler_callbacks_;
@@ -95,6 +96,9 @@ class JitOptions {
size_t GetCompileThreshold() const {
return compile_threshold_;
}
+ size_t GetWarmupThreshold() const {
+ return warmup_threshold_;
+ }
size_t GetCodeCacheCapacity() const {
return code_cache_capacity_;
}
@@ -112,6 +116,7 @@ class JitOptions {
bool use_jit_;
size_t code_cache_capacity_;
size_t compile_threshold_;
+ size_t warmup_threshold_;
bool dump_info_on_shutdown_;
JitOptions() : use_jit_(false), code_cache_capacity_(0), compile_threshold_(0),
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index cd5f4cb529..4c5316227c 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -82,9 +82,19 @@ uint8_t* JitCodeCache::ReserveCode(Thread* self, size_t size) {
return code_cache_ptr_ - size;
}
+uint8_t* JitCodeCache::ReserveData(Thread* self, size_t size) {
+ MutexLock mu(self, lock_);
+ size = RoundUp(size, sizeof(void*));
+ if (size > DataCacheRemain()) {
+ return nullptr;
+ }
+ data_cache_ptr_ += size;
+ return data_cache_ptr_ - size;
+}
+
uint8_t* JitCodeCache::AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end) {
MutexLock mu(self, lock_);
- const size_t size = end - begin;
+ const size_t size = RoundUp(end - begin, sizeof(void*));
if (size > DataCacheRemain()) {
return nullptr; // Out of space in the data cache.
}
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 9707f6f29d..f485e4aded 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -86,6 +86,9 @@ class JitCodeCache {
// Reserve a region of code of size at least "size". Returns null if there is no more room.
uint8_t* ReserveCode(Thread* self, size_t size) REQUIRES(!lock_);
+ // Reserve a region of data of size at least "size". Returns null if there is no more room.
+ uint8_t* ReserveData(Thread* self, size_t size) REQUIRES(!lock_);
+
// Add a data array of size (end - begin) with the associated contents, returns null if there
// is no more room.
uint8_t* AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end)
diff --git a/runtime/jit/jit_instrumentation.cc b/runtime/jit/jit_instrumentation.cc
index 258c29dd20..f48568271d 100644
--- a/runtime/jit/jit_instrumentation.cc
+++ b/runtime/jit/jit_instrumentation.cc
@@ -26,16 +26,12 @@ namespace jit {
class JitCompileTask : public Task {
public:
- JitCompileTask(ArtMethod* method, JitInstrumentationCache* cache)
- : method_(method), cache_(cache) {
- }
+ explicit JitCompileTask(ArtMethod* method) : method_(method) {}
virtual void Run(Thread* self) OVERRIDE {
ScopedObjectAccess soa(self);
VLOG(jit) << "JitCompileTask compiling method " << PrettyMethod(method_);
- if (Runtime::Current()->GetJit()->CompileMethod(method_, self)) {
- cache_->SignalCompiled(self, method_);
- } else {
+ if (!Runtime::Current()->GetJit()->CompileMethod(method_, self)) {
VLOG(jit) << "Failed to compile method " << PrettyMethod(method_);
}
}
@@ -46,13 +42,14 @@ class JitCompileTask : public Task {
private:
ArtMethod* const method_;
- JitInstrumentationCache* const cache_;
DISALLOW_IMPLICIT_CONSTRUCTORS(JitCompileTask);
};
-JitInstrumentationCache::JitInstrumentationCache(size_t hot_method_threshold)
- : lock_("jit instrumentation lock"), hot_method_threshold_(hot_method_threshold) {
+JitInstrumentationCache::JitInstrumentationCache(size_t hot_method_threshold,
+ size_t warm_method_threshold)
+ : hot_method_threshold_(hot_method_threshold),
+ warm_method_threshold_(warm_method_threshold) {
}
void JitInstrumentationCache::CreateThreadPool() {
@@ -60,20 +57,11 @@ void JitInstrumentationCache::CreateThreadPool() {
}
void JitInstrumentationCache::DeleteThreadPool() {
+ DCHECK(Runtime::Current()->IsShuttingDown(Thread::Current()));
thread_pool_.reset();
}
-void JitInstrumentationCache::SignalCompiled(Thread* self, ArtMethod* method) {
- ScopedObjectAccessUnchecked soa(self);
- jmethodID method_id = soa.EncodeMethod(method);
- MutexLock mu(self, lock_);
- auto it = samples_.find(method_id);
- if (it != samples_.end()) {
- samples_.erase(it);
- }
-}
-
-void JitInstrumentationCache::AddSamples(Thread* self, ArtMethod* method, size_t count) {
+void JitInstrumentationCache::AddSamples(Thread* self, ArtMethod* method, size_t) {
ScopedObjectAccessUnchecked soa(self);
// Since we don't have on-stack replacement, some methods can remain in the interpreter longer
// than we want resulting in samples even after the method is compiled.
@@ -81,35 +69,22 @@ void JitInstrumentationCache::AddSamples(Thread* self, ArtMethod* method, size_t
Runtime::Current()->GetJit()->GetCodeCache()->ContainsMethod(method)) {
return;
}
- jmethodID method_id = soa.EncodeMethod(method);
- bool is_hot = false;
- {
- MutexLock mu(self, lock_);
- size_t sample_count = 0;
- auto it = samples_.find(method_id);
- if (it != samples_.end()) {
- it->second += count;
- sample_count = it->second;
- } else {
- sample_count = count;
- samples_.insert(std::make_pair(method_id, count));
- }
- // If we have enough samples, mark as hot and request Jit compilation.
- if (sample_count >= hot_method_threshold_ && sample_count - count < hot_method_threshold_) {
- is_hot = true;
- }
+ if (thread_pool_.get() == nullptr) {
+ DCHECK(Runtime::Current()->IsShuttingDown(self));
+ return;
}
- if (is_hot) {
- if (thread_pool_.get() != nullptr) {
- thread_pool_->AddTask(self, new JitCompileTask(
- method->GetInterfaceMethodIfProxy(sizeof(void*)), this));
- thread_pool_->StartWorkers(self);
- } else {
- VLOG(jit) << "Compiling hot method " << PrettyMethod(method);
- Runtime::Current()->GetJit()->CompileMethod(
- method->GetInterfaceMethodIfProxy(sizeof(void*)), self);
+ uint16_t sample_count = method->IncrementCounter();
+ if (sample_count == warm_method_threshold_) {
+ ProfilingInfo* info = method->CreateProfilingInfo();
+ if (info != nullptr) {
+ VLOG(jit) << "Start profiling " << PrettyMethod(method);
}
}
+ if (sample_count == hot_method_threshold_) {
+ thread_pool_->AddTask(self, new JitCompileTask(
+ method->GetInterfaceMethodIfProxy(sizeof(void*))));
+ thread_pool_->StartWorkers(self);
+ }
}
JitInstrumentationListener::JitInstrumentationListener(JitInstrumentationCache* cache)
@@ -117,5 +92,17 @@ JitInstrumentationListener::JitInstrumentationListener(JitInstrumentationCache*
CHECK(instrumentation_cache_ != nullptr);
}
+void JitInstrumentationListener::InvokeVirtualOrInterface(Thread* thread,
+ mirror::Object* this_object,
+ ArtMethod* caller,
+ uint32_t dex_pc,
+ ArtMethod* callee ATTRIBUTE_UNUSED) {
+ DCHECK(this_object != nullptr);
+ ProfilingInfo* info = caller->GetProfilingInfo();
+ if (info != nullptr) {
+ info->AddInvokeInfo(thread, dex_pc, this_object->GetClass());
+ }
+}
+
} // namespace jit
} // namespace art
diff --git a/runtime/jit/jit_instrumentation.h b/runtime/jit/jit_instrumentation.h
index 0deaf8ad02..6fdef6585d 100644
--- a/runtime/jit/jit_instrumentation.h
+++ b/runtime/jit/jit_instrumentation.h
@@ -45,18 +45,15 @@ namespace jit {
// Keeps track of which methods are hot.
class JitInstrumentationCache {
public:
- explicit JitInstrumentationCache(size_t hot_method_threshold);
+ JitInstrumentationCache(size_t hot_method_threshold, size_t warm_method_threshold);
void AddSamples(Thread* self, ArtMethod* method, size_t samples)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
- void SignalCompiled(Thread* self, ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
void CreateThreadPool();
void DeleteThreadPool();
private:
- Mutex lock_;
- std::unordered_map<jmethodID, size_t> samples_;
size_t hot_method_threshold_;
+ size_t warm_method_threshold_;
std::unique_ptr<ThreadPool> thread_pool_;
DISALLOW_IMPLICIT_CONSTRUCTORS(JitInstrumentationCache);
@@ -66,37 +63,43 @@ class JitInstrumentationListener : public instrumentation::InstrumentationListen
public:
explicit JitInstrumentationListener(JitInstrumentationCache* cache);
- virtual void MethodEntered(Thread* thread, mirror::Object* /*this_object*/,
- ArtMethod* method, uint32_t /*dex_pc*/)
+ void MethodEntered(Thread* thread, mirror::Object* /*this_object*/,
+ ArtMethod* method, uint32_t /*dex_pc*/)
OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
instrumentation_cache_->AddSamples(thread, method, 1);
}
- virtual void MethodExited(Thread* /*thread*/, mirror::Object* /*this_object*/,
- ArtMethod* /*method*/, uint32_t /*dex_pc*/,
- const JValue& /*return_value*/)
+ void MethodExited(Thread* /*thread*/, mirror::Object* /*this_object*/,
+ ArtMethod* /*method*/, uint32_t /*dex_pc*/,
+ const JValue& /*return_value*/)
OVERRIDE { }
- virtual void MethodUnwind(Thread* /*thread*/, mirror::Object* /*this_object*/,
- ArtMethod* /*method*/, uint32_t /*dex_pc*/) OVERRIDE { }
- virtual void FieldRead(Thread* /*thread*/, mirror::Object* /*this_object*/,
- ArtMethod* /*method*/, uint32_t /*dex_pc*/,
- ArtField* /*field*/) OVERRIDE { }
- virtual void FieldWritten(Thread* /*thread*/, mirror::Object* /*this_object*/,
- ArtMethod* /*method*/, uint32_t /*dex_pc*/,
- ArtField* /*field*/, const JValue& /*field_value*/)
+ void MethodUnwind(Thread* /*thread*/, mirror::Object* /*this_object*/,
+ ArtMethod* /*method*/, uint32_t /*dex_pc*/) OVERRIDE { }
+ void FieldRead(Thread* /*thread*/, mirror::Object* /*this_object*/,
+ ArtMethod* /*method*/, uint32_t /*dex_pc*/,
+ ArtField* /*field*/) OVERRIDE { }
+ void FieldWritten(Thread* /*thread*/, mirror::Object* /*this_object*/,
+ ArtMethod* /*method*/, uint32_t /*dex_pc*/,
+ ArtField* /*field*/, const JValue& /*field_value*/)
OVERRIDE { }
- virtual void ExceptionCaught(Thread* /*thread*/,
- mirror::Throwable* /*exception_object*/) OVERRIDE { }
+ void ExceptionCaught(Thread* /*thread*/,
+ mirror::Throwable* /*exception_object*/) OVERRIDE { }
- virtual void DexPcMoved(Thread* /*self*/, mirror::Object* /*this_object*/,
- ArtMethod* /*method*/, uint32_t /*new_dex_pc*/) OVERRIDE { }
+ void DexPcMoved(Thread* /*self*/, mirror::Object* /*this_object*/,
+ ArtMethod* /*method*/, uint32_t /*new_dex_pc*/) OVERRIDE { }
- // We only care about how many dex instructions were executed in the Jit.
- virtual void BackwardBranch(Thread* thread, ArtMethod* method, int32_t dex_pc_offset)
+ void BackwardBranch(Thread* thread, ArtMethod* method, int32_t dex_pc_offset)
OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
CHECK_LE(dex_pc_offset, 0);
instrumentation_cache_->AddSamples(thread, method, 1);
}
+ void InvokeVirtualOrInterface(Thread* thread,
+ mirror::Object* this_object,
+ ArtMethod* caller,
+ uint32_t dex_pc,
+ ArtMethod* callee)
+ OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+
private:
JitInstrumentationCache* const instrumentation_cache_;
diff --git a/runtime/jit/profiling_info.cc b/runtime/jit/profiling_info.cc
new file mode 100644
index 0000000000..0c039f2bbd
--- /dev/null
+++ b/runtime/jit/profiling_info.cc
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "profiling_info.h"
+
+#include "art_method-inl.h"
+#include "dex_instruction.h"
+#include "jit/jit.h"
+#include "jit/jit_code_cache.h"
+#include "scoped_thread_state_change.h"
+#include "thread.h"
+
+namespace art {
+
+ProfilingInfo* ProfilingInfo::Create(ArtMethod* method) {
+ // Walk over the dex instructions of the method and keep track of
+ // instructions we are interested in profiling.
+ const uint16_t* code_ptr = nullptr;
+ const uint16_t* code_end = nullptr;
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ DCHECK(!method->IsNative());
+ const DexFile::CodeItem& code_item = *method->GetCodeItem();
+ code_ptr = code_item.insns_;
+ code_end = code_item.insns_ + code_item.insns_size_in_code_units_;
+ }
+
+ uint32_t dex_pc = 0;
+ std::vector<uint32_t> entries;
+ while (code_ptr < code_end) {
+ const Instruction& instruction = *Instruction::At(code_ptr);
+ switch (instruction.Opcode()) {
+ case Instruction::INVOKE_VIRTUAL:
+ case Instruction::INVOKE_VIRTUAL_RANGE:
+ case Instruction::INVOKE_VIRTUAL_QUICK:
+ case Instruction::INVOKE_VIRTUAL_RANGE_QUICK:
+ case Instruction::INVOKE_INTERFACE:
+ case Instruction::INVOKE_INTERFACE_RANGE:
+ entries.push_back(dex_pc);
+ break;
+
+ default:
+ break;
+ }
+ dex_pc += instruction.SizeInCodeUnits();
+ code_ptr += instruction.SizeInCodeUnits();
+ }
+
+ // If there is no instruction we are interested in, no need to create a `ProfilingInfo`
+ // object, it will never be filled.
+ if (entries.empty()) {
+ return nullptr;
+ }
+
+ // Allocate the `ProfilingInfo` object int the JIT's data space.
+ jit::JitCodeCache* code_cache = Runtime::Current()->GetJit()->GetCodeCache();
+ size_t profile_info_size = sizeof(ProfilingInfo) + sizeof(InlineCache) * entries.size();
+ uint8_t* data = code_cache->ReserveData(Thread::Current(), profile_info_size);
+
+ if (data == nullptr) {
+ VLOG(jit) << "Cannot allocate profiling info anymore";
+ return nullptr;
+ }
+
+ return new (data) ProfilingInfo(entries);
+}
+
+void ProfilingInfo::AddInvokeInfo(Thread* self, uint32_t dex_pc, mirror::Class* cls) {
+ InlineCache* cache = nullptr;
+ // TODO: binary search if array is too long.
+ for (size_t i = 0; i < number_of_inline_caches_; ++i) {
+ if (cache_[i].dex_pc == dex_pc) {
+ cache = &cache_[i];
+ break;
+ }
+ }
+ DCHECK(cache != nullptr);
+
+ ScopedObjectAccess soa(self);
+ for (size_t i = 0; i < InlineCache::kIndividualCacheSize; ++i) {
+ mirror::Class* existing = cache->classes_[i].Read<kWithoutReadBarrier>();
+ if (existing == cls) {
+ // Receiver type is already in the cache, nothing else to do.
+ return;
+ } else if (existing == nullptr) {
+ // Cache entry is empty, try to put `cls` in it.
+ GcRoot<mirror::Class> expected_root(nullptr);
+ GcRoot<mirror::Class> desired_root(cls);
+ if (!reinterpret_cast<Atomic<GcRoot<mirror::Class>>*>(&cache->classes_[i])->
+ CompareExchangeStrongSequentiallyConsistent(expected_root, desired_root)) {
+ // Some other thread put a class in the cache, continue iteration starting at this
+ // entry in case the entry contains `cls`.
+ --i;
+ } else {
+ // We successfully set `cls`, just return.
+ return;
+ }
+ }
+ }
+ // Unsuccessfull - cache is full, making it megamorphic.
+ DCHECK(cache->IsMegamorphic());
+}
+
+} // namespace art
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
new file mode 100644
index 0000000000..73ca41a9a1
--- /dev/null
+++ b/runtime/jit/profiling_info.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_JIT_PROFILING_INFO_H_
+#define ART_RUNTIME_JIT_PROFILING_INFO_H_
+
+#include <vector>
+
+#include "base/macros.h"
+#include "gc_root.h"
+
+namespace art {
+
+class ArtMethod;
+
+namespace mirror {
+class Class;
+}
+
+/**
+ * Profiling info for a method, created and filled by the interpreter once the
+ * method is warm, and used by the compiler to drive optimizations.
+ */
+class ProfilingInfo {
+ public:
+ static ProfilingInfo* Create(ArtMethod* method);
+
+ // Add information from an executed INVOKE instruction to the profile.
+ void AddInvokeInfo(Thread* self, uint32_t dex_pc, mirror::Class* cls);
+
+ // NO_THREAD_SAFETY_ANALYSIS since we don't know what the callback requires.
+ template<typename RootVisitorType>
+ void VisitRoots(RootVisitorType& visitor) NO_THREAD_SAFETY_ANALYSIS {
+ for (size_t i = 0; i < number_of_inline_caches_; ++i) {
+ InlineCache* cache = &cache_[i];
+ for (size_t j = 0; j < InlineCache::kIndividualCacheSize; ++j) {
+ visitor.VisitRootIfNonNull(cache->classes_[j].AddressWithoutBarrier());
+ }
+ }
+ }
+
+ private:
+ // Structure to store the classes seen at runtime for a specific instruction.
+ // Once the classes_ array is full, we consider the INVOKE to be megamorphic.
+ struct InlineCache {
+ bool IsMonomorphic() const {
+ DCHECK_GE(kIndividualCacheSize, 2);
+ return !classes_[0].IsNull() && classes_[1].IsNull();
+ }
+
+ bool IsMegamorphic() const {
+ for (size_t i = 0; i < kIndividualCacheSize; ++i) {
+ if (classes_[i].IsNull()) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ bool IsUnitialized() const {
+ return classes_[0].IsNull();
+ }
+
+ bool IsPolymorphic() const {
+ DCHECK_GE(kIndividualCacheSize, 3);
+ return !classes_[1].IsNull() && classes_[kIndividualCacheSize - 1].IsNull();
+ }
+
+ static constexpr uint16_t kIndividualCacheSize = 5;
+ uint32_t dex_pc;
+ GcRoot<mirror::Class> classes_[kIndividualCacheSize];
+ };
+
+ explicit ProfilingInfo(const std::vector<uint32_t>& entries)
+ : number_of_inline_caches_(entries.size()) {
+ memset(&cache_, 0, number_of_inline_caches_ * sizeof(InlineCache));
+ for (size_t i = 0; i < number_of_inline_caches_; ++i) {
+ cache_[i].dex_pc = entries[i];
+ }
+ }
+
+ // Number of instructions we are profiling in the ArtMethod.
+ const uint32_t number_of_inline_caches_;
+
+ // Dynamically allocated array of size `number_of_inline_caches_`.
+ InlineCache cache_[0];
+
+ DISALLOW_COPY_AND_ASSIGN(ProfilingInfo);
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_JIT_PROFILING_INFO_H_
diff --git a/runtime/lambda/closure_builder.cc b/runtime/lambda/closure_builder.cc
index 56bb9fb805..9c37db8fcc 100644
--- a/runtime/lambda/closure_builder.cc
+++ b/runtime/lambda/closure_builder.cc
@@ -62,7 +62,6 @@ void ClosureBuilder::CaptureVariableObject(mirror::Object* object) {
if (kIsDebugBuild) {
if (kUseReadBarrier) {
UNIMPLEMENTED(FATAL) << "can't yet safely capture objects with read barrier";
- UNREACHABLE();
}
}
}
diff --git a/runtime/oat.h b/runtime/oat.h
index 1520a9bb02..b8b8d30f99 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,7 +32,7 @@ class InstructionSetFeatures;
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- static constexpr uint8_t kOatVersion[] = { '0', '6', '9', '\0' };
+ static constexpr uint8_t kOatVersion[] = { '0', '7', '0', '\0' };
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 25b5e49b3d..50e2053c73 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -158,6 +158,9 @@ std::unique_ptr<RuntimeParser> ParsedOptions::MakeParser(bool ignore_unrecognize
.Define("-Xjitthreshold:_")
.WithType<unsigned int>()
.IntoKey(M::JITCompileThreshold)
+ .Define("-Xjitwarmupthreshold:_")
+ .WithType<unsigned int>()
+ .IntoKey(M::JITWarmupThreshold)
.Define("-XX:HspaceCompactForOOMMinIntervalMs=_") // in ms
.WithType<MillisecondsToNanoseconds>() // store as ns
.IntoKey(M::HSpaceCompactForOOMMinIntervalsMs)
diff --git a/runtime/quick/inline_method_analyser.h b/runtime/quick/inline_method_analyser.h
index dd3703cad5..64c2249925 100644
--- a/runtime/quick/inline_method_analyser.h
+++ b/runtime/quick/inline_method_analyser.h
@@ -40,6 +40,9 @@ enum InlineMethodOpcode : uint16_t {
kIntrinsicReverseBits,
kIntrinsicReverseBytes,
kIntrinsicNumberOfLeadingZeros,
+ kIntrinsicNumberOfTrailingZeros,
+ kIntrinsicRotateRight,
+ kIntrinsicRotateLeft,
kIntrinsicAbsInt,
kIntrinsicAbsLong,
kIntrinsicAbsFloat,
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 9d5ce9f385..60defbaaa3 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -40,7 +40,7 @@ QuickExceptionHandler::QuickExceptionHandler(Thread* self, bool is_deoptimizatio
handler_dex_pc_(0), clear_exception_(false), handler_frame_depth_(kInvalidFrameDepth) {
}
-// Finds catch handler or prepares for deoptimization.
+// Finds catch handler.
class CatchBlockStackVisitor FINAL : public StackVisitor {
public:
CatchBlockStackVisitor(Thread* self, Context* context, Handle<mirror::Throwable>* exception,
@@ -125,7 +125,7 @@ void QuickExceptionHandler::FindCatch(mirror::Throwable* exception) {
StackHandleScope<1> hs(self_);
Handle<mirror::Throwable> exception_ref(hs.NewHandle(exception));
- // Walk the stack to find catch handler or prepare for deoptimization.
+ // Walk the stack to find catch handler.
CatchBlockStackVisitor visitor(self_, context_, &exception_ref, this);
visitor.WalkStack(true);
@@ -146,16 +146,6 @@ void QuickExceptionHandler::FindCatch(mirror::Throwable* exception) {
// Put exception back in root set with clear throw location.
self_->SetException(exception_ref.Get());
}
- // The debugger may suspend this thread and walk its stack. Let's do this before popping
- // instrumentation frames.
- instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
- if (instrumentation->HasExceptionCaughtListeners()
- && self_->IsExceptionThrownByCurrentMethod(exception)) {
- instrumentation->ExceptionCaughtEvent(self_, exception_ref.Get());
- // Instrumentation may have been updated.
- method_tracing_active_ = is_deoptimization_ ||
- Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled();
- }
}
// Prepares deoptimization.
@@ -189,6 +179,12 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
// Ignore callee save method.
DCHECK(method->IsCalleeSaveMethod());
return true;
+ } else if (method->IsNative()) {
+ // If we return from JNI with a pending exception and want to deoptimize, we need to skip
+ // the native method.
+ // The top method is a runtime method, the native method comes next.
+ CHECK_EQ(GetFrameDepth(), 1U);
+ return true;
} else {
return HandleDeoptimization(method);
}
@@ -201,7 +197,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
bool HandleDeoptimization(ArtMethod* m) SHARED_REQUIRES(Locks::mutator_lock_) {
const DexFile::CodeItem* code_item = m->GetCodeItem();
- CHECK(code_item != nullptr);
+ CHECK(code_item != nullptr) << "No code item for " << PrettyMethod(m);
uint16_t num_regs = code_item->registers_size_;
uint32_t dex_pc = GetDexPc();
StackHandleScope<2> hs(self_); // Dex cache, class loader and method.
diff --git a/runtime/quick_exception_handler.h b/runtime/quick_exception_handler.h
index e934834e3c..4db95a87ec 100644
--- a/runtime/quick_exception_handler.h
+++ b/runtime/quick_exception_handler.h
@@ -43,9 +43,18 @@ class QuickExceptionHandler {
UNREACHABLE();
}
+ // Find the catch handler for the given exception.
void FindCatch(mirror::Throwable* exception) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Deoptimize the stack to the upcall. For every compiled frame, we create a "copy"
+ // shadow frame that will be executed with the interpreter.
void DeoptimizeStack() SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Update the instrumentation stack by removing all methods that will be unwound
+ // by the exception being thrown.
void UpdateInstrumentationStack() SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Long jump either to a catch handler or to the upcall.
NO_RETURN void DoLongJump() SHARED_REQUIRES(Locks::mutator_lock_);
void SetHandlerQuickFrame(ArtMethod** handler_quick_frame) {
@@ -83,9 +92,10 @@ class QuickExceptionHandler {
private:
Thread* const self_;
Context* const context_;
+ // Should we deoptimize the stack?
const bool is_deoptimization_;
// Is method tracing active?
- bool method_tracing_active_;
+ const bool method_tracing_active_;
// Quick frame with found handler or last frame if no handler found.
ArtMethod** handler_quick_frame_;
// PC to branch to for the handler.
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index 019917c077..324bd9f580 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -459,7 +459,7 @@ JValue InvokeWithVarArgs(const ScopedObjectAccessAlreadyRunnable& soa, jobject o
}
mirror::Object* receiver = method->IsStatic() ? nullptr : soa.Decode<mirror::Object*>(obj);
uint32_t shorty_len = 0;
- const char* shorty = method->GetShorty(&shorty_len);
+ const char* shorty = method->GetInterfaceMethodIfProxy(sizeof(void*))->GetShorty(&shorty_len);
JValue result;
ArgArray arg_array(shorty, shorty_len);
arg_array.BuildArgArrayFromVarArgs(soa, receiver, args);
@@ -489,7 +489,7 @@ JValue InvokeWithJValues(const ScopedObjectAccessAlreadyRunnable& soa, jobject o
}
mirror::Object* receiver = method->IsStatic() ? nullptr : soa.Decode<mirror::Object*>(obj);
uint32_t shorty_len = 0;
- const char* shorty = method->GetShorty(&shorty_len);
+ const char* shorty = method->GetInterfaceMethodIfProxy(sizeof(void*))->GetShorty(&shorty_len);
JValue result;
ArgArray arg_array(shorty, shorty_len);
arg_array.BuildArgArrayFromJValues(soa, receiver, args);
@@ -520,7 +520,7 @@ JValue InvokeVirtualOrInterfaceWithJValues(const ScopedObjectAccessAlreadyRunnab
receiver = nullptr;
}
uint32_t shorty_len = 0;
- const char* shorty = method->GetShorty(&shorty_len);
+ const char* shorty = method->GetInterfaceMethodIfProxy(sizeof(void*))->GetShorty(&shorty_len);
JValue result;
ArgArray arg_array(shorty, shorty_len);
arg_array.BuildArgArrayFromJValues(soa, receiver, args);
@@ -551,7 +551,7 @@ JValue InvokeVirtualOrInterfaceWithVarArgs(const ScopedObjectAccessAlreadyRunnab
receiver = nullptr;
}
uint32_t shorty_len = 0;
- const char* shorty = method->GetShorty(&shorty_len);
+ const char* shorty = method->GetInterfaceMethodIfProxy(sizeof(void*))->GetShorty(&shorty_len);
JValue result;
ArgArray arg_array(shorty, shorty_len);
arg_array.BuildArgArrayFromVarArgs(soa, receiver, args);
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 4797564237..7c71e1376d 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1749,7 +1749,8 @@ void Runtime::CreateJit() {
jit_.reset(jit::Jit::Create(jit_options_.get(), &error_msg));
if (jit_.get() != nullptr) {
compiler_callbacks_ = jit_->GetCompilerCallbacks();
- jit_->CreateInstrumentationCache(jit_options_->GetCompileThreshold());
+ jit_->CreateInstrumentationCache(jit_options_->GetCompileThreshold(),
+ jit_options_->GetWarmupThreshold());
jit_->CreateThreadPool();
} else {
LOG(WARNING) << "Failed to create JIT " << error_msg;
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index 02ed3a2553..d88e84b602 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -68,6 +68,7 @@ RUNTIME_OPTIONS_KEY (bool, UseTLAB, (kUseT
RUNTIME_OPTIONS_KEY (bool, EnableHSpaceCompactForOOM, true)
RUNTIME_OPTIONS_KEY (bool, UseJIT, false)
RUNTIME_OPTIONS_KEY (unsigned int, JITCompileThreshold, jit::Jit::kDefaultCompileThreshold)
+RUNTIME_OPTIONS_KEY (unsigned int, JITWarmupThreshold, jit::Jit::kDefaultWarmupThreshold)
RUNTIME_OPTIONS_KEY (MemoryKiB, JITCodeCacheCapacity, jit::JitCodeCache::kDefaultCapacity)
RUNTIME_OPTIONS_KEY (MillisecondsToNanoseconds, \
HSpaceCompactForOOMMinIntervalsMs,\
diff --git a/runtime/thread.cc b/runtime/thread.cc
index af5830aafb..86ac1407da 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -2344,10 +2344,31 @@ void Thread::QuickDeliverException() {
// Get exception from thread.
mirror::Throwable* exception = GetException();
CHECK(exception != nullptr);
+ bool is_deoptimization = (exception == GetDeoptimizationException());
+ if (!is_deoptimization) {
+ // This is a real exception: let the instrumentation know about it.
+ instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
+ if (instrumentation->HasExceptionCaughtListeners() &&
+ IsExceptionThrownByCurrentMethod(exception)) {
+ // Instrumentation may cause GC so keep the exception object safe.
+ StackHandleScope<1> hs(this);
+ HandleWrapper<mirror::Throwable> h_exception(hs.NewHandleWrapper(&exception));
+ instrumentation->ExceptionCaughtEvent(this, exception);
+ }
+ // Does instrumentation need to deoptimize the stack?
+ // Note: we do this *after* reporting the exception to instrumentation in case it
+ // now requires deoptimization. It may happen if a debugger is attached and requests
+ // new events (single-step, breakpoint, ...) when the exception is reported.
+ is_deoptimization = Dbg::IsForcedInterpreterNeededForException(this);
+ if (is_deoptimization) {
+ // Save the exception into the deoptimization context so it can be restored
+ // before entering the interpreter.
+ PushDeoptimizationContext(JValue(), false, exception);
+ }
+ }
// Don't leave exception visible while we try to find the handler, which may cause class
// resolution.
ClearException();
- bool is_deoptimization = (exception == GetDeoptimizationException());
QuickExceptionHandler exception_handler(this, is_deoptimization);
if (is_deoptimization) {
exception_handler.DeoptimizeStack();
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 4ab5c0efe7..d629ce66a8 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -806,6 +806,15 @@ void Trace::BackwardBranch(Thread* /*thread*/, ArtMethod* method,
LOG(ERROR) << "Unexpected backward branch event in tracing" << PrettyMethod(method);
}
+void Trace::InvokeVirtualOrInterface(Thread*,
+ mirror::Object*,
+ ArtMethod* method,
+ uint32_t dex_pc,
+ ArtMethod*) {
+ LOG(ERROR) << "Unexpected invoke event in tracing" << PrettyMethod(method)
+ << " " << dex_pc;
+}
+
void Trace::ReadClocks(Thread* thread, uint32_t* thread_clock_diff, uint32_t* wall_clock_diff) {
if (UseThreadCpuClock()) {
uint64_t clock_base = thread->GetTraceClockBase();
diff --git a/runtime/trace.h b/runtime/trace.h
index 04be3ddeab..87a691d553 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -166,6 +166,12 @@ class Trace FINAL : public instrumentation::InstrumentationListener {
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
void BackwardBranch(Thread* thread, ArtMethod* method, int32_t dex_pc_offset)
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
+ void InvokeVirtualOrInterface(Thread* thread,
+ mirror::Object* this_object,
+ ArtMethod* caller,
+ uint32_t dex_pc,
+ ArtMethod* callee)
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
// Reuse an old stack trace if it exists, otherwise allocate a new one.
static std::vector<ArtMethod*>* AllocStackTrace();
// Clear and store an old stack trace for later use.
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index d768afda54..1ed6980c96 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -424,6 +424,7 @@ MethodVerifier::MethodVerifier(Thread* self,
has_virtual_or_interface_invokes_(false),
verify_to_dump_(verify_to_dump),
allow_thread_suspension_(allow_thread_suspension),
+ is_constructor_(false),
link_(nullptr) {
self->PushVerifier(this);
DCHECK(class_def != nullptr);
@@ -555,15 +556,124 @@ SafeMap<uint32_t, std::set<uint32_t>>& MethodVerifier::FindStringInitMap() {
}
bool MethodVerifier::Verify() {
+ // Some older code doesn't correctly mark constructors as such. Test for this case by looking at
+ // the name.
+ const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
+ const char* method_name = dex_file_->StringDataByIdx(method_id.name_idx_);
+ bool instance_constructor_by_name = strcmp("<init>", method_name) == 0;
+ bool static_constructor_by_name = strcmp("<clinit>", method_name) == 0;
+ bool constructor_by_name = instance_constructor_by_name || static_constructor_by_name;
+ // Check that only constructors are tagged, and check for bad code that doesn't tag constructors.
+ if ((method_access_flags_ & kAccConstructor) != 0) {
+ if (!constructor_by_name) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD)
+ << "method is marked as constructor, but not named accordingly";
+ return false;
+ }
+ is_constructor_ = true;
+ } else if (constructor_by_name) {
+ LOG(WARNING) << "Method " << PrettyMethod(dex_method_idx_, *dex_file_)
+ << " not marked as constructor.";
+ is_constructor_ = true;
+ }
+ // If it's a constructor, check whether IsStatic() matches the name.
+ // This should have been rejected by the dex file verifier. Only do in debug build.
+ if (kIsDebugBuild) {
+ if (IsConstructor()) {
+ if (IsStatic() ^ static_constructor_by_name) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD)
+ << "constructor name doesn't match static flag";
+ return false;
+ }
+ }
+ }
+
+ // Methods may only have one of public/protected/private.
+ // This should have been rejected by the dex file verifier. Only do in debug build.
+ if (kIsDebugBuild) {
+ size_t access_mod_count =
+ (((method_access_flags_ & kAccPublic) == 0) ? 0 : 1) +
+ (((method_access_flags_ & kAccProtected) == 0) ? 0 : 1) +
+ (((method_access_flags_ & kAccPrivate) == 0) ? 0 : 1);
+ if (access_mod_count > 1) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "method has more than one of public/protected/private";
+ return false;
+ }
+ }
+
// If there aren't any instructions, make sure that's expected, then exit successfully.
if (code_item_ == nullptr) {
- if ((method_access_flags_ & (kAccNative | kAccAbstract)) == 0) {
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "zero-length code in concrete non-native method";
+ // This should have been rejected by the dex file verifier. Only do in debug build.
+ if (kIsDebugBuild) {
+ // Only native or abstract methods may not have code.
+ if ((method_access_flags_ & (kAccNative | kAccAbstract)) == 0) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "zero-length code in concrete non-native method";
+ return false;
+ }
+ if ((method_access_flags_ & kAccAbstract) != 0) {
+ // Abstract methods are not allowed to have the following flags.
+ static constexpr uint32_t kForbidden =
+ kAccPrivate |
+ kAccStatic |
+ kAccFinal |
+ kAccNative |
+ kAccStrict |
+ kAccSynchronized;
+ if ((method_access_flags_ & kForbidden) != 0) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD)
+ << "method can't be abstract and private/static/final/native/strict/synchronized";
+ return false;
+ }
+ }
+ if ((class_def_->GetJavaAccessFlags() & kAccInterface) != 0) {
+ // Interface methods must be public and abstract.
+ if ((method_access_flags_ & (kAccPublic | kAccAbstract)) != (kAccPublic | kAccAbstract)) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interface methods must be public and abstract";
+ return false;
+ }
+ // In addition to the above, interface methods must not be protected.
+ static constexpr uint32_t kForbidden = kAccProtected;
+ if ((method_access_flags_ & kForbidden) != 0) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interface methods can't be protected";
+ return false;
+ }
+ }
+ // We also don't allow constructors to be abstract or native.
+ if (IsConstructor()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "constructors can't be abstract or native";
+ return false;
+ }
+ }
+ return true;
+ }
+
+ // This should have been rejected by the dex file verifier. Only do in debug build.
+ if (kIsDebugBuild) {
+ // When there's code, the method must not be native or abstract.
+ if ((method_access_flags_ & (kAccNative | kAccAbstract)) != 0) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "non-zero-length code in abstract or native method";
return false;
- } else {
- return true;
+ }
+
+ // Only the static initializer may have code in an interface.
+ if ((class_def_->GetJavaAccessFlags() & kAccInterface) != 0) {
+ // Interfaces may have static initializers for their fields.
+ if (!IsConstructor() || !IsStatic()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interface methods must be abstract";
+ return false;
+ }
+ }
+
+ // Instance constructors must not be synchronized.
+ if (IsInstanceConstructor()) {
+ static constexpr uint32_t kForbidden = kAccSynchronized;
+ if ((method_access_flags_ & kForbidden) != 0) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "constructors can't be synchronized";
+ return false;
+ }
}
}
+
// Sanity-check the register counts. ins + locals = registers, so make sure that ins <= registers.
if (code_item_->ins_size_ > code_item_->registers_size_) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad register counts (ins=" << code_item_->ins_size_
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index b57abf537d..5e661a59b8 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -262,29 +262,29 @@ class MethodVerifier {
ArtField* GetQuickFieldAccess(const Instruction* inst, RegisterLine* reg_line)
SHARED_REQUIRES(Locks::mutator_lock_);
- // Is the method being verified a constructor?
- bool IsConstructor() const {
- return (method_access_flags_ & kAccConstructor) != 0;
+ SafeMap<uint32_t, std::set<uint32_t>>& GetStringInitPcRegMap() {
+ return string_init_pc_reg_map_;
}
- // Is the method verified static?
- bool IsStatic() const {
- return (method_access_flags_ & kAccStatic) != 0;
+ uint32_t GetEncounteredFailureTypes() {
+ return encountered_failure_types_;
}
bool IsInstanceConstructor() const {
return IsConstructor() && !IsStatic();
}
- SafeMap<uint32_t, std::set<uint32_t>>& GetStringInitPcRegMap() {
- return string_init_pc_reg_map_;
+ private:
+ // Is the method being verified a constructor? See the comment on the field.
+ bool IsConstructor() const {
+ return is_constructor_;
}
- uint32_t GetEncounteredFailureTypes() {
- return encountered_failure_types_;
+ // Is the method verified static?
+ bool IsStatic() const {
+ return (method_access_flags_ & kAccStatic) != 0;
}
- private:
// Private constructor for dumping.
MethodVerifier(Thread* self, const DexFile* dex_file, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexFile::ClassDef* class_def,
@@ -780,6 +780,13 @@ class MethodVerifier {
// FindLocksAtDexPC, resulting in deadlocks.
const bool allow_thread_suspension_;
+ // Whether the method seems to be a constructor. Note that this field exists as we can't trust
+ // the flags in the dex file. Some older code does not mark methods named "<init>" and "<clinit>"
+ // correctly.
+ //
+ // Note: this flag is only valid once Verify() has started.
+ bool is_constructor_;
+
// Link, for the method verifier root linked list.
MethodVerifier* link_;
diff --git a/test/044-proxy/expected.txt b/test/044-proxy/expected.txt
index bcce019457..f86948ad6c 100644
--- a/test/044-proxy/expected.txt
+++ b/test/044-proxy/expected.txt
@@ -93,3 +93,4 @@ Invocation of public abstract java.lang.String NarrowingTest$I2.foo()
Got expected exception
Proxy narrowed invocation return type passed
5.8
+callback
diff --git a/test/044-proxy/native_proxy.cc b/test/044-proxy/native_proxy.cc
new file mode 100644
index 0000000000..f168719bf5
--- /dev/null
+++ b/test/044-proxy/native_proxy.cc
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni.h"
+
+#include "base/logging.h"
+
+namespace art {
+
+extern "C" JNIEXPORT void JNICALL Java_NativeProxy_nativeCall(
+ JNIEnv* env, jclass clazz ATTRIBUTE_UNUSED, jobject inf_ref) {
+ jclass native_inf_class = env->FindClass("NativeInterface");
+ CHECK(native_inf_class != nullptr);
+ jmethodID mid = env->GetMethodID(native_inf_class, "callback", "()V");
+ CHECK(mid != nullptr);
+ env->CallVoidMethod(inf_ref, mid);
+}
+
+} // namespace art
diff --git a/test/044-proxy/src/Main.java b/test/044-proxy/src/Main.java
index 9580871432..05e8e5b512 100644
--- a/test/044-proxy/src/Main.java
+++ b/test/044-proxy/src/Main.java
@@ -28,5 +28,6 @@ public class Main {
WrappedThrow.main(null);
NarrowingTest.main(null);
FloatSelect.main(null);
+ NativeProxy.main(args);
}
}
diff --git a/test/044-proxy/src/NativeProxy.java b/test/044-proxy/src/NativeProxy.java
new file mode 100644
index 0000000000..b425da87b5
--- /dev/null
+++ b/test/044-proxy/src/NativeProxy.java
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.annotation.Annotation;
+import java.lang.reflect.InvocationHandler;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+import java.lang.reflect.Proxy;
+import java.util.Arrays;
+import java.util.Comparator;
+
+/**
+ * Test invoking a proxy method from native code.
+ */
+
+interface NativeInterface {
+ public void callback();
+}
+
+public class NativeProxy {
+
+ public static void main(String[] args) {
+ System.loadLibrary(args[0]);
+
+ try {
+ NativeInterface inf = (NativeInterface)Proxy.newProxyInstance(
+ NativeProxy.class.getClassLoader(),
+ new Class[] { NativeInterface.class },
+ new NativeInvocationHandler());
+
+ nativeCall(inf);
+ } catch (Exception exc) {
+ throw new RuntimeException(exc);
+ }
+ }
+
+ public static class NativeInvocationHandler implements InvocationHandler {
+ public Object invoke(final Object proxy,
+ final Method method,
+ final Object[] args) throws Throwable {
+ System.out.println(method.getName());
+ return null;
+ }
+ }
+
+ public static native void nativeCall(NativeInterface inf);
+}
diff --git a/test/082-inline-execute/src/Main.java b/test/082-inline-execute/src/Main.java
index 08ccf0ef14..5913c40b36 100644
--- a/test/082-inline-execute/src/Main.java
+++ b/test/082-inline-execute/src/Main.java
@@ -78,6 +78,14 @@ public class Main {
test_Memory_pokeShort();
test_Memory_pokeInt();
test_Memory_pokeLong();
+ test_Integer_numberOfTrailingZeros();
+ test_Long_numberOfTrailingZeros();
+ test_Integer_rotateRight();
+ test_Long_rotateRight();
+ test_Integer_rotateLeft();
+ test_Long_rotateLeft();
+ test_Integer_rotateRightLeft();
+ test_Long_rotateRightLeft();
}
/**
@@ -1360,4 +1368,136 @@ public class Main {
poke_long.invoke(null, address + 1, (long)0x2122232425262728L, false);
Assert.assertTrue(Arrays.equals(ru, b));
}
+
+ public static void test_Integer_numberOfTrailingZeros() {
+ Assert.assertEquals(Integer.numberOfTrailingZeros(0), Integer.SIZE);
+ for (int i = 0; i < Integer.SIZE; i++) {
+ Assert.assertEquals(
+ Integer.numberOfTrailingZeros(0x80000000 >> i),
+ Integer.SIZE - 1 - i);
+ Assert.assertEquals(
+ Integer.numberOfTrailingZeros((0x80000000 >> i) | 0x80000000),
+ Integer.SIZE - 1 - i);
+ Assert.assertEquals(Integer.numberOfTrailingZeros(1 << i), i);
+ }
+ }
+
+ public static void test_Long_numberOfTrailingZeros() {
+ Assert.assertEquals(Long.numberOfTrailingZeros(0), Long.SIZE);
+ for (int i = 0; i < Long.SIZE; i++) {
+ Assert.assertEquals(
+ Long.numberOfTrailingZeros(0x8000000000000000L >> i),
+ Long.SIZE - 1 - i);
+ Assert.assertEquals(
+ Long.numberOfTrailingZeros((0x8000000000000000L >> i) | 0x8000000000000000L),
+ Long.SIZE - 1 - i);
+ Assert.assertEquals(Long.numberOfTrailingZeros(1L << i), i);
+ }
+ }
+
+ public static void test_Integer_rotateRight() throws Exception {
+ Assert.assertEquals(Integer.rotateRight(0x11, 0), 0x11);
+
+ Assert.assertEquals(Integer.rotateRight(0x11, 1), 0x80000008);
+ Assert.assertEquals(Integer.rotateRight(0x11, Integer.SIZE - 1), 0x22);
+ Assert.assertEquals(Integer.rotateRight(0x11, Integer.SIZE), 0x11);
+ Assert.assertEquals(Integer.rotateRight(0x11, Integer.SIZE + 1), 0x80000008);
+
+ Assert.assertEquals(Integer.rotateRight(0x11, -1), 0x22);
+ Assert.assertEquals(Integer.rotateRight(0x11, -(Integer.SIZE - 1)), 0x80000008);
+ Assert.assertEquals(Integer.rotateRight(0x11, -Integer.SIZE), 0x11);
+ Assert.assertEquals(Integer.rotateRight(0x11, -(Integer.SIZE + 1)), 0x22);
+
+ Assert.assertEquals(Integer.rotateRight(0x80000000, 1), 0x40000000);
+
+ for (int i = 0; i < Integer.SIZE; i++) {
+ Assert.assertEquals(
+ Integer.rotateRight(0xBBAAAADD, i),
+ (0xBBAAAADD >>> i) | (0xBBAAAADD << (Integer.SIZE - i)));
+ }
+ }
+
+ public static void test_Long_rotateRight() throws Exception {
+ Assert.assertEquals(Long.rotateRight(0x11, 0), 0x11);
+
+ Assert.assertEquals(Long.rotateRight(0x11, 1), 0x8000000000000008L);
+ Assert.assertEquals(Long.rotateRight(0x11, Long.SIZE - 1), 0x22);
+ Assert.assertEquals(Long.rotateRight(0x11, Long.SIZE), 0x11);
+ Assert.assertEquals(Long.rotateRight(0x11, Long.SIZE + 1), 0x8000000000000008L);
+
+ Assert.assertEquals(Long.rotateRight(0x11, -1), 0x22);
+ Assert.assertEquals(Long.rotateRight(0x11, -(Long.SIZE - 1)), 0x8000000000000008L);
+ Assert.assertEquals(Long.rotateRight(0x11, -Long.SIZE), 0x11);
+ Assert.assertEquals(Long.rotateRight(0x11, -(Long.SIZE + 1)), 0x22);
+
+ Assert.assertEquals(Long.rotateRight(0x8000000000000000L, 1), 0x4000000000000000L);
+
+ for (int i = 0; i < Long.SIZE; i++) {
+ Assert.assertEquals(
+ Long.rotateRight(0xBBAAAADDFF0000DDL, i),
+ (0xBBAAAADDFF0000DDL >>> i) | (0xBBAAAADDFF0000DDL << (Long.SIZE - i)));
+ }
+ }
+
+ public static void test_Integer_rotateLeft() throws Exception {
+ Assert.assertEquals(Integer.rotateLeft(0x11, 0), 0x11);
+
+ Assert.assertEquals(Integer.rotateLeft(0x11, 1), 0x22);
+ Assert.assertEquals(Integer.rotateLeft(0x11, Integer.SIZE - 1), 0x80000008);
+ Assert.assertEquals(Integer.rotateLeft(0x11, Integer.SIZE), 0x11);
+ Assert.assertEquals(Integer.rotateLeft(0x11, Integer.SIZE + 1), 0x22);
+
+ Assert.assertEquals(Integer.rotateLeft(0x11, -1), 0x80000008);
+ Assert.assertEquals(Integer.rotateLeft(0x11, -(Integer.SIZE - 1)), 0x22);
+ Assert.assertEquals(Integer.rotateLeft(0x11, -Integer.SIZE), 0x11);
+ Assert.assertEquals(Integer.rotateLeft(0x11, -(Integer.SIZE + 1)), 0x80000008);
+
+ Assert.assertEquals(Integer.rotateLeft(0xC0000000, 1), 0x80000001);
+
+ for (int i = 0; i < Integer.SIZE; i++) {
+ Assert.assertEquals(
+ Integer.rotateLeft(0xBBAAAADD, i),
+ (0xBBAAAADD << i) | (0xBBAAAADD >>> (Integer.SIZE - i)));
+ }
+ }
+
+ public static void test_Long_rotateLeft() throws Exception {
+ Assert.assertEquals(Long.rotateLeft(0x11, 0), 0x11);
+
+ Assert.assertEquals(Long.rotateLeft(0x11, 1), 0x22);
+ Assert.assertEquals(Long.rotateLeft(0x11, Long.SIZE - 1), 0x8000000000000008L);
+ Assert.assertEquals(Long.rotateLeft(0x11, Long.SIZE), 0x11);
+ Assert.assertEquals(Long.rotateLeft(0x11, Long.SIZE + 1), 0x22);
+
+ Assert.assertEquals(Long.rotateLeft(0x11, -1), 0x8000000000000008L);
+ Assert.assertEquals(Long.rotateLeft(0x11, -(Long.SIZE - 1)), 0x22);
+ Assert.assertEquals(Long.rotateLeft(0x11, -Long.SIZE), 0x11);
+ Assert.assertEquals(Long.rotateLeft(0x11, -(Long.SIZE + 1)), 0x8000000000000008L);
+
+ Assert.assertEquals(Long.rotateLeft(0xC000000000000000L, 1), 0x8000000000000001L);
+
+ for (int i = 0; i < Long.SIZE; i++) {
+ Assert.assertEquals(
+ Long.rotateLeft(0xBBAAAADDFF0000DDL, i),
+ (0xBBAAAADDFF0000DDL << i) | (0xBBAAAADDFF0000DDL >>> (Long.SIZE - i)));
+ }
+ }
+
+ public static void test_Integer_rotateRightLeft() throws Exception {
+ for (int i = 0; i < Integer.SIZE * 2; i++) {
+ Assert.assertEquals(Integer.rotateLeft(0xBBAAAADD, i),
+ Integer.rotateRight(0xBBAAAADD, -i));
+ Assert.assertEquals(Integer.rotateLeft(0xBBAAAADD, -i),
+ Integer.rotateRight(0xBBAAAADD, i));
+ }
+ }
+
+ public static void test_Long_rotateRightLeft() throws Exception {
+ for (int i = 0; i < Long.SIZE * 2; i++) {
+ Assert.assertEquals(Long.rotateLeft(0xBBAAAADDFF0000DDL, i),
+ Long.rotateRight(0xBBAAAADDFF0000DDL, -i));
+ Assert.assertEquals(Long.rotateLeft(0xBBAAAADDFF0000DDL, -i),
+ Long.rotateRight(0xBBAAAADDFF0000DDL, i));
+ }
+ }
}
diff --git a/test/800-smali/smali/b_18380491AbstractBase.smali b/test/800-smali/smali/b_18380491AbstractBase.smali
index 7aa1b1a12e..cc05221cdf 100644
--- a/test/800-smali/smali/b_18380491AbstractBase.smali
+++ b/test/800-smali/smali/b_18380491AbstractBase.smali
@@ -1,4 +1,4 @@
-.class public LB18380491ActractBase;
+.class public abstract LB18380491AbstractBase;
.super Ljava/lang/Object;
diff --git a/test/800-smali/smali/b_18380491ConcreteClass.smali b/test/800-smali/smali/b_18380491ConcreteClass.smali
index db5ef3ba6b..1ba684f796 100644
--- a/test/800-smali/smali/b_18380491ConcreteClass.smali
+++ b/test/800-smali/smali/b_18380491ConcreteClass.smali
@@ -1,10 +1,10 @@
.class public LB18380491ConcreteClass;
-.super LB18380491ActractBase;
+.super LB18380491AbstractBase;
.method public constructor <init>()V
.locals 0
- invoke-direct {p0}, LB18380491ActractBase;-><init>()V
+ invoke-direct {p0}, LB18380491AbstractBase;-><init>()V
return-void
.end method
@@ -13,7 +13,7 @@
if-eqz p1, :invoke_super_abstract
return p1
:invoke_super_abstract
- invoke-super {p0, p1}, LB18380491ActractBase;->foo(I)I
+ invoke-super {p0, p1}, LB18380491AbstractBase;->foo(I)I
move-result v0
return v0
.end method
diff --git a/test/Android.libarttest.mk b/test/Android.libarttest.mk
index 6d9d7c3cf6..af945fb66e 100644
--- a/test/Android.libarttest.mk
+++ b/test/Android.libarttest.mk
@@ -24,6 +24,7 @@ LIBARTTEST_COMMON_SRC_FILES := \
004-ReferenceMap/stack_walk_refmap_jni.cc \
004-StackWalk/stack_walk_jni.cc \
004-UnsafeTest/unsafe_test.cc \
+ 044-proxy/native_proxy.cc \
051-thread/thread_test.cc \
088-monitor-verification/stack_inspect.cc \
116-nodex2oat/nodex2oat.cc \
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index ad3fb41586..efc0bfb1b9 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -39,7 +39,11 @@ RELOCATE="y"
SECONDARY_DEX=""
TIME_OUT="gdb" # "n" (disabled), "timeout" (use timeout), "gdb" (use gdb)
# Value in seconds
-TIME_OUT_VALUE=600 # 10 minutes.
+if [ "$ART_USE_READ_BARRIER" = "true" ]; then
+ TIME_OUT_VALUE=900 # 15 minutes.
+else
+ TIME_OUT_VALUE=600 # 10 minutes.
+fi
USE_GDB="n"
USE_JVM="n"
VERIFY="y" # y=yes,n=no,s=softfail
diff --git a/tools/run-jdwp-tests.sh b/tools/run-jdwp-tests.sh
index 116a611a80..104cba7940 100755
--- a/tools/run-jdwp-tests.sh
+++ b/tools/run-jdwp-tests.sh
@@ -40,6 +40,9 @@ device_dir="--device-dir=/data/local/tmp"
vm_command="--vm-command=$art"
image_compiler_option=""
debug="no"
+verbose="no"
+# By default, we run the whole JDWP test suite.
+test="org.apache.harmony.jpda.tests.share.AllTests"
while true; do
if [[ "$1" == "--mode=host" ]]; then
@@ -65,6 +68,19 @@ while true; do
# Remove the --debug from the arguments.
args=${args/$1}
shift
+ elif [[ $1 == "--verbose" ]]; then
+ verbose="yes"
+ # Remove the --verbose from the arguments.
+ args=${args/$1}
+ shift
+ elif [[ $1 == "--test" ]]; then
+ # Remove the --test from the arguments.
+ args=${args/$1}
+ shift
+ test=$1
+ # Remove the test from the arguments.
+ args=${args/$1}
+ shift
elif [[ "$1" == "" ]]; then
break
else
@@ -78,6 +94,10 @@ if [[ $debug == "yes" ]]; then
art_debugee="$art_debugee -d"
vm_args="$vm_args --vm-arg -XXlib:libartd.so"
fi
+if [[ $verbose == "yes" ]]; then
+ # Enable JDWP logs in the debuggee.
+ art_debugee="$art_debugee -verbose:jdwp"
+fi
# Run the tests using vogar.
vogar $vm_command \
@@ -93,4 +113,4 @@ vogar $vm_command \
--vm-arg -Djpda.settings.debuggeeJavaPath="\"$art_debugee $image $debuggee_args\"" \
--classpath $test_jar \
--vm-arg -Xcompiler-option --vm-arg --debuggable \
- org.apache.harmony.jpda.tests.share.AllTests
+ $test
diff --git a/tools/setup-buildbot-device.sh b/tools/setup-buildbot-device.sh
index 8466bb314c..7faf86ed5c 100755
--- a/tools/setup-buildbot-device.sh
+++ b/tools/setup-buildbot-device.sh
@@ -30,6 +30,3 @@ adb shell ifconfig
echo -e "${green}List properties${nc}"
adb shell getprop
-
-echo -e "${green}Stopping framework${nc}"
-adb shell stop