summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/dex/compiler_enums.h1
-rw-r--r--compiler/dex/global_value_numbering.cc38
-rw-r--r--compiler/dex/global_value_numbering.h3
-rw-r--r--compiler/dex/global_value_numbering_test.cc92
-rw-r--r--compiler/dex/gvn_dead_code_elimination.cc7
-rw-r--r--compiler/dex/local_value_numbering.cc26
-rw-r--r--compiler/dex/mir_graph.cc6
-rw-r--r--compiler/dex/mir_graph.h4
-rw-r--r--compiler/dex/mir_optimization.cc3
-rw-r--r--compiler/dex/pass_driver_me_opts.cc2
-rw-r--r--compiler/dex/pass_driver_me_opts.h8
-rw-r--r--compiler/dex/quick/gen_common.cc7
-rw-r--r--compiler/dex/quick/mir_to_lir.cc2
-rw-r--r--compiler/dex/quick/mir_to_lir.h2
-rw-r--r--compiler/dex/quick/quick_compiler.cc2
-rw-r--r--compiler/driver/compiler_driver.cc2
-rw-r--r--compiler/optimizing/code_generator.h9
-rw-r--r--compiler/optimizing/code_generator_arm.cc4
-rw-r--r--compiler/optimizing/code_generator_arm64.cc4
-rw-r--r--compiler/optimizing/code_generator_x86.cc4
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc4
-rw-r--r--compiler/optimizing/optimizing_compiler.cc98
-rw-r--r--compiler/optimizing/stack_map_stream.h4
-rw-r--r--runtime/Android.mk18
-rw-r--r--runtime/base/arena_allocator.cc2
-rw-r--r--runtime/base/hash_set.h157
-rw-r--r--runtime/base/hash_set_test.cc4
-rw-r--r--runtime/class_linker.cc106
-rw-r--r--runtime/class_linker.h6
-rw-r--r--runtime/gc/heap.cc14
-rw-r--r--runtime/interpreter/interpreter_goto_table_impl.cc5
-rw-r--r--runtime/jit/jit.cc4
-rw-r--r--runtime/jit/jit.h10
-rw-r--r--runtime/mirror/class.cc42
-rw-r--r--runtime/mirror/class.h4
-rw-r--r--runtime/native/dalvik_system_ZygoteHooks.cc20
-rw-r--r--runtime/oat_file_assistant_test.cc4
-rw-r--r--runtime/runtime.cc28
-rw-r--r--runtime/runtime.h5
-rw-r--r--runtime/stack_map.h12
-rwxr-xr-xtest/run-test2
-rwxr-xr-xtools/setup-buildbot-device.sh28
42 files changed, 547 insertions, 256 deletions
diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h
index 7edb490176..39725dee38 100644
--- a/compiler/dex/compiler_enums.h
+++ b/compiler/dex/compiler_enums.h
@@ -345,6 +345,7 @@ enum ExtendedMIROpcode {
enum MIROptimizationFlagPositions {
kMIRIgnoreNullCheck = 0,
kMIRIgnoreRangeCheck,
+ kMIRIgnoreCheckCast,
kMIRStoreNonNullValue, // Storing non-null value, always mark GC card.
kMIRClassIsInitialized,
kMIRClassIsInDexCache,
diff --git a/compiler/dex/global_value_numbering.cc b/compiler/dex/global_value_numbering.cc
index ab3c946897..30e3ce0354 100644
--- a/compiler/dex/global_value_numbering.cc
+++ b/compiler/dex/global_value_numbering.cc
@@ -16,6 +16,7 @@
#include "global_value_numbering.h"
+#include "base/bit_vector-inl.h"
#include "base/stl_util.h"
#include "local_value_numbering.h"
@@ -206,4 +207,41 @@ bool GlobalValueNumbering::DivZeroCheckedInAllPredecessors(
return true;
}
+bool GlobalValueNumbering::IsBlockEnteredOnTrue(uint16_t cond, BasicBlockId bb_id) {
+ DCHECK_NE(cond, kNoValue);
+ BasicBlock* bb = mir_graph_->GetBasicBlock(bb_id);
+ if (bb->predecessors.size() == 1u) {
+ BasicBlockId pred_id = bb->predecessors[0];
+ BasicBlock* pred_bb = mir_graph_->GetBasicBlock(pred_id);
+ if (pred_bb->last_mir_insn != nullptr) {
+ Instruction::Code opcode = pred_bb->last_mir_insn->dalvikInsn.opcode;
+ if ((opcode == Instruction::IF_NEZ && pred_bb->taken == bb_id) ||
+ (opcode == Instruction::IF_EQZ && pred_bb->fall_through == bb_id)) {
+ DCHECK(lvns_[pred_id] != nullptr);
+ uint16_t operand = lvns_[pred_id]->GetSregValue(pred_bb->last_mir_insn->ssa_rep->uses[0]);
+ if (operand == cond) {
+ return true;
+ }
+ }
+ }
+ }
+ return false;
+}
+
+bool GlobalValueNumbering::IsTrueInBlock(uint16_t cond, BasicBlockId bb_id) {
+ // We're not doing proper value propagation, so just see if the condition is used
+ // with if-nez/if-eqz to branch/fall-through to this bb or one of its dominators.
+ DCHECK_NE(cond, kNoValue);
+ if (IsBlockEnteredOnTrue(cond, bb_id)) {
+ return true;
+ }
+ BasicBlock* bb = mir_graph_->GetBasicBlock(bb_id);
+ for (uint32_t dom_id : bb->dominators->Indexes()) {
+ if (IsBlockEnteredOnTrue(cond, dom_id)) {
+ return true;
+ }
+ }
+ return false;
+}
+
} // namespace art
diff --git a/compiler/dex/global_value_numbering.h b/compiler/dex/global_value_numbering.h
index 6fa658c0cc..bd2f187d17 100644
--- a/compiler/dex/global_value_numbering.h
+++ b/compiler/dex/global_value_numbering.h
@@ -200,6 +200,9 @@ class GlobalValueNumbering : public DeletableArenaObject<kArenaAllocMisc> {
bool DivZeroCheckedInAllPredecessors(const ScopedArenaVector<uint16_t>& merge_names) const;
+ bool IsBlockEnteredOnTrue(uint16_t cond, BasicBlockId bb_id);
+ bool IsTrueInBlock(uint16_t cond, BasicBlockId bb_id);
+
ScopedArenaAllocator* Allocator() const {
return allocator_;
}
diff --git a/compiler/dex/global_value_numbering_test.cc b/compiler/dex/global_value_numbering_test.cc
index b91c3cac8f..b4559ef375 100644
--- a/compiler/dex/global_value_numbering_test.cc
+++ b/compiler/dex/global_value_numbering_test.cc
@@ -136,6 +136,7 @@ class GlobalValueNumberingTest : public testing::Test {
{ bb, static_cast<Instruction::Code>(kMirOpPhi), 0, 0u, 2u, { src1, src2 }, 1, { reg } }
#define DEF_BINOP(bb, opcode, result, src1, src2) \
{ bb, opcode, 0u, 0u, 2, { src1, src2 }, 1, { result } }
+#define DEF_UNOP(bb, opcode, result, src) DEF_MOVE(bb, opcode, result, src)
void DoPrepareIFields(const IFieldDef* defs, size_t count) {
cu_.mir_graph->ifield_lowering_infos_.clear();
@@ -2315,4 +2316,95 @@ TEST_F(GlobalValueNumberingTestDiamond, DivZeroCheckDiamond) {
}
}
+TEST_F(GlobalValueNumberingTestDiamond, CheckCastDiamond) {
+ static const MIRDef mirs[] = {
+ DEF_UNOP(3u, Instruction::INSTANCE_OF, 0u, 100u),
+ DEF_UNOP(3u, Instruction::INSTANCE_OF, 1u, 200u),
+ DEF_IFZ(3u, Instruction::IF_NEZ, 0u),
+ DEF_INVOKE1(4u, Instruction::CHECK_CAST, 100u),
+ DEF_INVOKE1(5u, Instruction::CHECK_CAST, 100u),
+ DEF_INVOKE1(5u, Instruction::CHECK_CAST, 200u),
+ DEF_INVOKE1(5u, Instruction::CHECK_CAST, 100u),
+ DEF_INVOKE1(6u, Instruction::CHECK_CAST, 100u),
+ };
+
+ static const bool expected_ignore_check_cast[] = {
+ false, // instance-of
+ false, // instance-of
+ false, // if-nez
+ false, // Not eliminated, fall-through branch.
+ true, // Eliminated.
+ false, // Not eliminated, different value.
+ false, // Not eliminated, different type.
+ false, // Not eliminated, bottom block.
+ };
+
+ PrepareMIRs(mirs);
+ mirs_[0].dalvikInsn.vC = 1234; // type for instance-of
+ mirs_[1].dalvikInsn.vC = 1234; // type for instance-of
+ mirs_[3].dalvikInsn.vB = 1234; // type for check-cast
+ mirs_[4].dalvikInsn.vB = 1234; // type for check-cast
+ mirs_[5].dalvikInsn.vB = 1234; // type for check-cast
+ mirs_[6].dalvikInsn.vB = 4321; // type for check-cast
+ mirs_[7].dalvikInsn.vB = 1234; // type for check-cast
+ PerformGVN();
+ PerformGVNCodeModifications();
+ ASSERT_EQ(arraysize(expected_ignore_check_cast), mir_count_);
+ for (size_t i = 0u; i != mir_count_; ++i) {
+ int expected = expected_ignore_check_cast[i] ? MIR_IGNORE_CHECK_CAST : 0u;
+ EXPECT_EQ(expected, mirs_[i].optimization_flags) << i;
+ }
+}
+
+TEST_F(GlobalValueNumberingTest, CheckCastDominators) {
+ const BBDef bbs[] = {
+ DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+ DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+ DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(7)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC2(4, 5), DEF_PRED1(1)), // Block #3, top of the diamond.
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(7), DEF_PRED1(3)), // Block #4, left side.
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)), // Block #5, right side.
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(7), DEF_PRED1(5)), // Block #6, right side.
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED2(4, 6)), // Block #7, bottom.
+ };
+ static const MIRDef mirs[] = {
+ DEF_UNOP(3u, Instruction::INSTANCE_OF, 0u, 100u),
+ DEF_UNOP(3u, Instruction::INSTANCE_OF, 1u, 200u),
+ DEF_IFZ(3u, Instruction::IF_NEZ, 0u),
+ DEF_INVOKE1(4u, Instruction::CHECK_CAST, 100u),
+ DEF_INVOKE1(6u, Instruction::CHECK_CAST, 100u),
+ DEF_INVOKE1(6u, Instruction::CHECK_CAST, 200u),
+ DEF_INVOKE1(6u, Instruction::CHECK_CAST, 100u),
+ DEF_INVOKE1(7u, Instruction::CHECK_CAST, 100u),
+ };
+
+ static const bool expected_ignore_check_cast[] = {
+ false, // instance-of
+ false, // instance-of
+ false, // if-nez
+ false, // Not eliminated, fall-through branch.
+ true, // Eliminated.
+ false, // Not eliminated, different value.
+ false, // Not eliminated, different type.
+ false, // Not eliminated, bottom block.
+ };
+
+ PrepareBasicBlocks(bbs);
+ PrepareMIRs(mirs);
+ mirs_[0].dalvikInsn.vC = 1234; // type for instance-of
+ mirs_[1].dalvikInsn.vC = 1234; // type for instance-of
+ mirs_[3].dalvikInsn.vB = 1234; // type for check-cast
+ mirs_[4].dalvikInsn.vB = 1234; // type for check-cast
+ mirs_[5].dalvikInsn.vB = 1234; // type for check-cast
+ mirs_[6].dalvikInsn.vB = 4321; // type for check-cast
+ mirs_[7].dalvikInsn.vB = 1234; // type for check-cast
+ PerformGVN();
+ PerformGVNCodeModifications();
+ ASSERT_EQ(arraysize(expected_ignore_check_cast), mir_count_);
+ for (size_t i = 0u; i != mir_count_; ++i) {
+ int expected = expected_ignore_check_cast[i] ? MIR_IGNORE_CHECK_CAST : 0u;
+ EXPECT_EQ(expected, mirs_[i].optimization_flags) << i;
+ }
+}
+
} // namespace art
diff --git a/compiler/dex/gvn_dead_code_elimination.cc b/compiler/dex/gvn_dead_code_elimination.cc
index 2e7f0328d2..2d4c18ff49 100644
--- a/compiler/dex/gvn_dead_code_elimination.cc
+++ b/compiler/dex/gvn_dead_code_elimination.cc
@@ -1058,7 +1058,6 @@ bool GvnDeadCodeElimination::RecordMIR(MIR* mir) {
case Instruction::INVOKE_INTERFACE_RANGE:
case Instruction::INVOKE_STATIC:
case Instruction::INVOKE_STATIC_RANGE:
- case Instruction::CHECK_CAST:
case Instruction::THROW:
case Instruction::FILLED_NEW_ARRAY:
case Instruction::FILLED_NEW_ARRAY_RANGE:
@@ -1073,6 +1072,12 @@ bool GvnDeadCodeElimination::RecordMIR(MIR* mir) {
uses_all_vregs = true;
break;
+ case Instruction::CHECK_CAST:
+ DCHECK_EQ(mir->ssa_rep->num_uses, 1);
+ must_keep = true; // Keep for type information even if MIR_IGNORE_CHECK_CAST.
+ uses_all_vregs = (mir->optimization_flags & MIR_IGNORE_CHECK_CAST) == 0;
+ break;
+
case kMirOpNullCheck:
DCHECK_EQ(mir->ssa_rep->num_uses, 1);
if ((mir->optimization_flags & MIR_IGNORE_NULL_CHECK) != 0) {
diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc
index 99b6683b26..dc222b5211 100644
--- a/compiler/dex/local_value_numbering.cc
+++ b/compiler/dex/local_value_numbering.cc
@@ -1520,7 +1520,6 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
case Instruction::GOTO:
case Instruction::GOTO_16:
case Instruction::GOTO_32:
- case Instruction::CHECK_CAST:
case Instruction::THROW:
case Instruction::FILL_ARRAY_DATA:
case Instruction::PACKED_SWITCH:
@@ -1612,9 +1611,32 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
HandleInvokeOrClInitOrAcquireOp(mir);
break;
+ case Instruction::INSTANCE_OF: {
+ uint16_t operand = GetOperandValue(mir->ssa_rep->uses[0]);
+ uint16_t type = mir->dalvikInsn.vC;
+ res = gvn_->LookupValue(Instruction::INSTANCE_OF, operand, type, kNoValue);
+ SetOperandValue(mir->ssa_rep->defs[0], res);
+ }
+ break;
+ case Instruction::CHECK_CAST:
+ if (gvn_->CanModify()) {
+ // Check if there was an instance-of operation on the same value and if we are
+ // in a block where its result is true. If so, we can eliminate the check-cast.
+ uint16_t operand = GetOperandValue(mir->ssa_rep->uses[0]);
+ uint16_t type = mir->dalvikInsn.vB;
+ uint16_t cond = gvn_->FindValue(Instruction::INSTANCE_OF, operand, type, kNoValue);
+ if (cond != kNoValue && gvn_->IsTrueInBlock(cond, Id())) {
+ if (gvn_->GetCompilationUnit()->verbose) {
+ LOG(INFO) << "Removing check-cast at 0x" << std::hex << mir->offset;
+ }
+ // Don't use kMirOpNop. Keep the check-cast as it defines the type of the register.
+ mir->optimization_flags |= MIR_IGNORE_CHECK_CAST;
+ }
+ }
+ break;
+
case Instruction::MOVE_RESULT:
case Instruction::MOVE_RESULT_OBJECT:
- case Instruction::INSTANCE_OF:
// 1 result, treat as unique each time, use result s_reg - will be unique.
res = GetOperandValue(mir->ssa_rep->defs[0]);
SetOperandValue(mir->ssa_rep->defs[0], res);
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index f354a49111..3103f96e4e 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -2459,11 +2459,9 @@ BasicBlock* MIRGraph::CreateNewBB(BBType block_type) {
return res;
}
-void MIRGraph::CalculateBasicBlockInformation() {
- auto* quick_compiler = down_cast<QuickCompiler*>(cu_->compiler_driver->GetCompiler());
- DCHECK(quick_compiler != nullptr);
+void MIRGraph::CalculateBasicBlockInformation(const PassManager* const post_opt_pass_manager) {
/* Create the pass driver and launch it */
- PassDriverMEPostOpt driver(quick_compiler->GetPostOptPassManager(), cu_);
+ PassDriverMEPostOpt driver(post_opt_pass_manager, cu_);
driver.Launch();
}
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index 3dae5b4fa9..3298af1162 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -38,6 +38,7 @@ class DexCompilationUnit;
class DexFileMethodInliner;
class GlobalValueNumbering;
class GvnDeadCodeElimination;
+class PassManager;
// Forward declaration.
class MIRGraph;
@@ -149,6 +150,7 @@ enum OatMethodAttributes {
#define MIR_IGNORE_NULL_CHECK (1 << kMIRIgnoreNullCheck)
#define MIR_IGNORE_RANGE_CHECK (1 << kMIRIgnoreRangeCheck)
+#define MIR_IGNORE_CHECK_CAST (1 << kMIRIgnoreCheckCast)
#define MIR_STORE_NON_NULL_VALUE (1 << kMIRStoreNonNullValue)
#define MIR_CLASS_IS_INITIALIZED (1 << kMIRClassIsInitialized)
#define MIR_CLASS_IS_IN_DEX_CACHE (1 << kMIRClassIsInDexCache)
@@ -1201,7 +1203,7 @@ class MIRGraph {
void AllocateSSAUseData(MIR *mir, int num_uses);
void AllocateSSADefData(MIR *mir, int num_defs);
- void CalculateBasicBlockInformation();
+ void CalculateBasicBlockInformation(const PassManager* const post_opt);
void ComputeDFSOrders();
void ComputeDefBlockMatrix();
void ComputeDominators();
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 93749e4424..266b7c3064 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -1751,6 +1751,9 @@ bool MIRGraph::CanThrow(MIR* mir) const {
DCHECK_NE(opt_flags & MIR_IGNORE_NULL_CHECK, 0);
// Non-throwing only if range check has been eliminated.
return ((opt_flags & MIR_IGNORE_RANGE_CHECK) == 0);
+ } else if (mir->dalvikInsn.opcode == Instruction::CHECK_CAST &&
+ (opt_flags & MIR_IGNORE_CHECK_CAST) != 0) {
+ return false;
} else if (mir->dalvikInsn.opcode == Instruction::ARRAY_LENGTH ||
static_cast<int>(mir->dalvikInsn.opcode) == kMirOpNullCheck) {
// No more checks for these (null check was processed above).
diff --git a/compiler/dex/pass_driver_me_opts.cc b/compiler/dex/pass_driver_me_opts.cc
index 320d06aa06..2e871dafef 100644
--- a/compiler/dex/pass_driver_me_opts.cc
+++ b/compiler/dex/pass_driver_me_opts.cc
@@ -66,7 +66,7 @@ void PassDriverMEOpts::ApplyPass(PassDataHolder* data, const Pass* pass) {
// Is it dirty at least?
if (pass_me_data_holder->dirty == true) {
CompilationUnit* c_unit = pass_me_data_holder->c_unit;
- c_unit->mir_graph.get()->CalculateBasicBlockInformation();
+ c_unit->mir_graph.get()->CalculateBasicBlockInformation(post_opt_pass_manager_);
}
}
}
diff --git a/compiler/dex/pass_driver_me_opts.h b/compiler/dex/pass_driver_me_opts.h
index b930d02d1e..e94c1894c9 100644
--- a/compiler/dex/pass_driver_me_opts.h
+++ b/compiler/dex/pass_driver_me_opts.h
@@ -29,8 +29,10 @@ class PassManager;
class PassDriverMEOpts : public PassDriverME {
public:
- explicit PassDriverMEOpts(const PassManager* const manager, CompilationUnit* cu)
- : PassDriverME(manager, cu) {
+ explicit PassDriverMEOpts(const PassManager* const manager,
+ const PassManager* const post_opt_pass_manager,
+ CompilationUnit* cu)
+ : PassDriverME(manager, cu), post_opt_pass_manager_(post_opt_pass_manager) {
}
~PassDriverMEOpts() {
@@ -45,6 +47,8 @@ class PassDriverMEOpts : public PassDriverME {
* @brief Apply a patch: perform start/work/end functions.
*/
virtual void ApplyPass(PassDataHolder* data, const Pass* pass) OVERRIDE;
+
+ const PassManager* const post_opt_pass_manager_;
};
} // namespace art
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index e57889aeb7..32a469db8c 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -1403,7 +1403,12 @@ void Mir2Lir::GenInstanceof(uint32_t type_idx, RegLocation rl_dest, RegLocation
}
}
-void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_src) {
+void Mir2Lir::GenCheckCast(int opt_flags, uint32_t insn_idx, uint32_t type_idx,
+ RegLocation rl_src) {
+ if ((opt_flags & MIR_IGNORE_CHECK_CAST) != 0) {
+ // Compiler analysis proved that this check-cast would never cause an exception.
+ return;
+ }
bool type_known_final, type_known_abstract, use_declaring_class;
bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
*cu_->dex_file,
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 83486265c4..8edc5fc4f0 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -632,7 +632,7 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
break;
case Instruction::CHECK_CAST: {
- GenCheckCast(mir->offset, vB, rl_src[0]);
+ GenCheckCast(opt_flags, mir->offset, vB, rl_src[0]);
break;
}
case Instruction::INSTANCE_OF:
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 6f3f057038..9a56171531 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -826,7 +826,7 @@ class Mir2Lir {
void GenNewInstance(uint32_t type_idx, RegLocation rl_dest);
void GenThrow(RegLocation rl_src);
void GenInstanceof(uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src);
- void GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_src);
+ void GenCheckCast(int opt_flags, uint32_t insn_idx, uint32_t type_idx, RegLocation rl_src);
void GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_src2);
virtual void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index 02d74a0691..922f2f7ad7 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -708,7 +708,7 @@ CompiledMethod* QuickCompiler::Compile(const DexFile::CodeItem* code_item,
}
/* Create the pass driver and launch it */
- PassDriverMEOpts pass_driver(GetPreOptPassManager(), &cu);
+ PassDriverMEOpts pass_driver(GetPreOptPassManager(), GetPostOptPassManager(), &cu);
pass_driver.Launch();
/* For non-leaf methods check if we should skip compilation when the profiler is enabled. */
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 3d3d5cbc2a..ff4e0d850a 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -1901,7 +1901,7 @@ static void SetVerifiedClass(const ParallelCompilationManager* manager, size_t c
if (klass->IsResolved()) {
if (klass->GetStatus() < mirror::Class::kStatusVerified) {
ObjectLock<mirror::Class> lock(soa.Self(), klass);
- klass->SetStatus(mirror::Class::kStatusVerified, soa.Self());
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusVerified, soa.Self());
}
// Record the final class status if necessary.
ClassReference ref(manager->GetDexFile(), class_def_index);
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 667f686059..ecaa6f0123 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -246,12 +246,9 @@ class CodeGenerator {
void EmitParallelMoves(Location from1, Location to1, Location from2, Location to2);
static bool StoreNeedsWriteBarrier(Primitive::Type type, HInstruction* value) {
- if (kIsDebugBuild) {
- if (type == Primitive::kPrimNot && value->IsIntConstant()) {
- CHECK_EQ(value->AsIntConstant()->GetValue(), 0);
- }
- }
- return type == Primitive::kPrimNot && !value->IsIntConstant();
+ // Check that null value is not represented as an integer constant.
+ DCHECK(type != Primitive::kPrimNot || !value->IsIntConstant());
+ return type == Primitive::kPrimNot && !value->IsNullConstant();
}
void AddAllocatedRegister(Location location) {
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 123f690763..0a069a75ef 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -903,10 +903,6 @@ void LocationsBuilderARM::VisitExit(HExit* exit) {
void InstructionCodeGeneratorARM::VisitExit(HExit* exit) {
UNUSED(exit);
- if (kIsDebugBuild) {
- __ Comment("Unreachable");
- __ bkpt(0);
- }
}
void LocationsBuilderARM::VisitIf(HIf* if_instr) {
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index c48cab4985..aeec5dd1c4 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1596,10 +1596,6 @@ void LocationsBuilderARM64::VisitExit(HExit* exit) {
void InstructionCodeGeneratorARM64::VisitExit(HExit* exit) {
UNUSED(exit);
- if (kIsDebugBuild) {
- down_cast<Arm64Assembler*>(GetAssembler())->Comment("Unreachable");
- __ Brk(__LINE__); // TODO: Introduce special markers for such code locations.
- }
}
void LocationsBuilderARM64::VisitFloatConstant(HFloatConstant* constant) {
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 1db16002c0..754dd1088d 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -812,10 +812,6 @@ void LocationsBuilderX86::VisitExit(HExit* exit) {
void InstructionCodeGeneratorX86::VisitExit(HExit* exit) {
UNUSED(exit);
- if (kIsDebugBuild) {
- __ Comment("Unreachable");
- __ int3();
- }
}
void LocationsBuilderX86::VisitIf(HIf* if_instr) {
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 90d87d4b9f..dbd7c9e8ad 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -749,10 +749,6 @@ void LocationsBuilderX86_64::VisitExit(HExit* exit) {
void InstructionCodeGeneratorX86_64::VisitExit(HExit* exit) {
UNUSED(exit);
- if (kIsDebugBuild) {
- __ Comment("Unreachable");
- __ int3();
- }
}
void LocationsBuilderX86_64::VisitIf(HIf* if_instr) {
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index e47b4f61b2..b70f9252ae 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -173,24 +173,40 @@ class OptimizingCompiler FINAL : public Compiler {
jobject class_loader,
const DexFile& dex_file) const OVERRIDE;
+ CompiledMethod* TryCompile(const DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ jobject class_loader,
+ const DexFile& dex_file) const;
+
CompiledMethod* JniCompile(uint32_t access_flags,
uint32_t method_idx,
- const DexFile& dex_file) const OVERRIDE;
+ const DexFile& dex_file) const OVERRIDE {
+ return ArtQuickJniCompileMethod(GetCompilerDriver(), access_flags, method_idx, dex_file);
+ }
uintptr_t GetEntryPointOf(mirror::ArtMethod* method) const OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize(
+ InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet())));
+ }
bool WriteElf(art::File* file,
OatWriter* oat_writer,
const std::vector<const art::DexFile*>& dex_files,
const std::string& android_root,
- bool is_host) const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool is_host) const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return art::ElfWriterQuick32::Create(file, oat_writer, dex_files, android_root, is_host,
+ *GetCompilerDriver());
+ }
- void InitCompilationUnit(CompilationUnit& cu ATTRIBUTE_UNUSED) const OVERRIDE {}
+ void InitCompilationUnit(CompilationUnit& cu) const OVERRIDE;
void Init() OVERRIDE;
- void UnInit() const OVERRIDE {}
+ void UnInit() const OVERRIDE;
private:
// Whether we should run any optimization or register allocation. If false, will
@@ -214,6 +230,9 @@ class OptimizingCompiler FINAL : public Compiler {
std::unique_ptr<std::ostream> visualizer_output_;
+ // Delegate to Quick in case the optimizing compiler cannot compile a method.
+ std::unique_ptr<Compiler> delegate_;
+
DISALLOW_COPY_AND_ASSIGN(OptimizingCompiler);
};
@@ -224,9 +243,11 @@ OptimizingCompiler::OptimizingCompiler(CompilerDriver* driver)
run_optimizations_(
(driver->GetCompilerOptions().GetCompilerFilter() != CompilerOptions::kTime)
&& !driver->GetCompilerOptions().GetDebuggable()),
- compilation_stats_() {}
+ compilation_stats_(),
+ delegate_(Create(driver, Compiler::Kind::kQuick)) {}
void OptimizingCompiler::Init() {
+ delegate_->Init();
// Enable C1visualizer output. Must be done in Init() because the compiler
// driver is not fully initialized when passed to the compiler's constructor.
CompilerDriver* driver = GetCompilerDriver();
@@ -239,34 +260,24 @@ void OptimizingCompiler::Init() {
}
}
+void OptimizingCompiler::UnInit() const {
+ delegate_->UnInit();
+}
+
OptimizingCompiler::~OptimizingCompiler() {
compilation_stats_.Log();
}
+void OptimizingCompiler::InitCompilationUnit(CompilationUnit& cu) const {
+ delegate_->InitCompilationUnit(cu);
+}
+
bool OptimizingCompiler::CanCompileMethod(uint32_t method_idx ATTRIBUTE_UNUSED,
const DexFile& dex_file ATTRIBUTE_UNUSED,
CompilationUnit* cu ATTRIBUTE_UNUSED) const {
return true;
}
-CompiledMethod* OptimizingCompiler::JniCompile(uint32_t access_flags,
- uint32_t method_idx,
- const DexFile& dex_file) const {
- return ArtQuickJniCompileMethod(GetCompilerDriver(), access_flags, method_idx, dex_file);
-}
-
-uintptr_t OptimizingCompiler::GetEntryPointOf(mirror::ArtMethod* method) const {
- return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize(
- InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet())));
-}
-
-bool OptimizingCompiler::WriteElf(art::File* file, OatWriter* oat_writer,
- const std::vector<const art::DexFile*>& dex_files,
- const std::string& android_root, bool is_host) const {
- return art::ElfWriterQuick32::Create(file, oat_writer, dex_files, android_root, is_host,
- *GetCompilerDriver());
-}
-
static bool IsInstructionSetSupported(InstructionSet instruction_set) {
return instruction_set == kArm64
|| (instruction_set == kThumb2 && !kArm32QuickCodeUseSoftFloat)
@@ -422,13 +433,13 @@ CompiledMethod* OptimizingCompiler::CompileBaseline(
ArrayRef<const uint8_t>());
}
-CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
- uint32_t access_flags,
- InvokeType invoke_type,
- uint16_t class_def_idx,
- uint32_t method_idx,
- jobject class_loader,
- const DexFile& dex_file) const {
+CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ jobject class_loader,
+ const DexFile& dex_file) const {
UNUSED(invoke_type);
std::string method_name = PrettyMethod(method_idx, dex_file);
compilation_stats_.RecordStat(MethodCompilationStat::kAttemptCompilation);
@@ -502,6 +513,11 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
bool can_optimize = CanOptimize(*code_item);
bool can_allocate_registers = RegisterAllocator::CanAllocateRegistersFor(*graph, instruction_set);
+
+ // `run_optimizations_` is set explicitly (either through a compiler filter
+ // or the debuggable flag). If it is set, we can run baseline. Otherwise, we fall back
+ // to Quick.
+ bool can_use_baseline = !run_optimizations_;
if (run_optimizations_ && can_optimize && can_allocate_registers) {
VLOG(compiler) << "Optimizing " << method_name;
@@ -524,7 +540,7 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
} else if (shouldOptimize && can_allocate_registers) {
LOG(FATAL) << "Could not allocate registers in optimizing compiler";
UNREACHABLE();
- } else {
+ } else if (can_use_baseline) {
VLOG(compiler) << "Compile baseline " << method_name;
if (!run_optimizations_) {
@@ -536,7 +552,25 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
}
return CompileBaseline(codegen.get(), compiler_driver, dex_compilation_unit);
+ } else {
+ return nullptr;
+ }
+}
+
+CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ jobject class_loader,
+ const DexFile& dex_file) const {
+ CompiledMethod* method = TryCompile(code_item, access_flags, invoke_type, class_def_idx,
+ method_idx, class_loader, dex_file);
+ if (method != nullptr) {
+ return method;
}
+ return delegate_->Compile(code_item, access_flags, invoke_type, class_def_idx, method_idx,
+ class_loader, dex_file);
}
Compiler* CreateOptimizingCompiler(CompilerDriver* driver) {
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index 63a02862b4..e1a5afe0e7 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -105,8 +105,8 @@ class StackMapStream : public ValueObject {
+ ComputeStackMapsSize()
+ ComputeDexRegisterMapsSize()
+ ComputeInlineInfoSize();
- // On ARM, CodeInfo data must be 4-byte aligned.
- return RoundUp(size, kWordAlignment);
+ // Note: use RoundUp to word-size here if you want CodeInfo objects to be word aligned.
+ return size;
}
size_t ComputeStackMaskSize() const {
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 8f203810e1..6588288d7f 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -79,6 +79,7 @@ LIBART_COMMON_SRC_FILES := \
intern_table.cc \
interpreter/interpreter.cc \
interpreter/interpreter_common.cc \
+ interpreter/interpreter_goto_table_impl.cc \
interpreter/interpreter_switch_impl.cc \
interpreter/unstarted_runtime.cc \
java_vm_ext.cc \
@@ -202,10 +203,6 @@ LIBART_COMMON_SRC_FILES += \
entrypoints/quick/quick_throw_entrypoints.cc \
entrypoints/quick/quick_trampoline_entrypoints.cc
-# Source files that only compile with GCC.
-LIBART_GCC_ONLY_SRC_FILES := \
- interpreter/interpreter_goto_table_impl.cc
-
LIBART_TARGET_LDFLAGS :=
LIBART_HOST_LDFLAGS :=
@@ -436,19 +433,7 @@ $$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PAT
$$(eval $$(call set-target-local-cflags-vars,$(2)))
LOCAL_CFLAGS_$(DEX2OAT_TARGET_ARCH) += -DART_DEFAULT_INSTRUCTION_SET_FEATURES="$(LIBART_TARGET_DEFAULT_INSTRUCTION_SET_FEATURES)"
LOCAL_CFLAGS_$(2ND_DEX2OAT_TARGET_ARCH) += -DART_DEFAULT_INSTRUCTION_SET_FEATURES="$(2ND_LIBART_TARGET_DEFAULT_INSTRUCTION_SET_FEATURES)"
-
- # TODO: Loop with ifeq, ART_TARGET_CLANG
- ifneq ($$(ART_TARGET_CLANG_$$(TARGET_ARCH)),true)
- LOCAL_SRC_FILES_$$(TARGET_ARCH) += $$(LIBART_GCC_ONLY_SRC_FILES)
- endif
- ifneq ($$(ART_TARGET_CLANG_$$(TARGET_2ND_ARCH)),true)
- LOCAL_SRC_FILES_$$(TARGET_2ND_ARCH) += $$(LIBART_GCC_ONLY_SRC_FILES)
- endif
else # host
- ifneq ($$(ART_HOST_CLANG),true)
- # Add files only built with GCC on the host.
- LOCAL_SRC_FILES += $$(LIBART_GCC_ONLY_SRC_FILES)
- endif
LOCAL_CLANG := $$(ART_HOST_CLANG)
LOCAL_LDLIBS := $$(ART_HOST_LDLIBS)
LOCAL_LDLIBS += -ldl -lpthread
@@ -534,7 +519,6 @@ endif
# Clear locally defined variables.
LOCAL_PATH :=
LIBART_COMMON_SRC_FILES :=
-LIBART_GCC_ONLY_SRC_FILES :=
LIBART_HOST_DEFAULT_INSTRUCTION_SET_FEATURES :=
LIBART_TARGET_DEFAULT_INSTRUCTION_SET_FEATURES :=
2ND_LIBART_TARGET_DEFAULT_INSTRUCTION_SET_FEATURES :=
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index e37aca1031..dd29404cd9 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -33,7 +33,7 @@ template <bool kCount>
const char* const ArenaAllocatorStatsImpl<kCount>::kAllocNames[] = {
"Misc ",
"BasicBlock ",
- "BBList "
+ "BBList ",
"BBPreds ",
"DfsPreOrd ",
"DfsPostOrd ",
diff --git a/runtime/base/hash_set.h b/runtime/base/hash_set.h
index 992e5b1697..ab63dddaff 100644
--- a/runtime/base/hash_set.h
+++ b/runtime/base/hash_set.h
@@ -50,91 +50,101 @@ class DefaultEmptyFn<T*> {
};
// Low memory version of a hash set, uses less memory than std::unordered_set since elements aren't
-// boxed. Uses linear probing.
-// EmptyFn needs to implement two functions MakeEmpty(T& item) and IsEmpty(const T& item)
+// boxed. Uses linear probing to resolve collisions.
+// EmptyFn needs to implement two functions MakeEmpty(T& item) and IsEmpty(const T& item).
+// TODO: We could get rid of this requirement by using a bitmap, though maybe this would be slower
+// and more complicated.
template <class T, class EmptyFn = DefaultEmptyFn<T>, class HashFn = std::hash<T>,
class Pred = std::equal_to<T>, class Alloc = std::allocator<T>>
class HashSet {
- public:
- static constexpr double kDefaultMinLoadFactor = 0.5;
- static constexpr double kDefaultMaxLoadFactor = 0.9;
- static constexpr size_t kMinBuckets = 1000;
-
- class Iterator {
+ template <class Elem, class HashSetType>
+ class BaseIterator {
public:
- Iterator(const Iterator&) = default;
- Iterator(HashSet* hash_set, size_t index) : hash_set_(hash_set), index_(index) {
+ BaseIterator(const BaseIterator&) = default;
+ BaseIterator(BaseIterator&&) = default;
+ BaseIterator(HashSetType* hash_set, size_t index) : index_(index), hash_set_(hash_set) {
}
- Iterator& operator=(const Iterator&) = default;
- bool operator==(const Iterator& other) const {
- return hash_set_ == other.hash_set_ && index_ == other.index_;
+ BaseIterator& operator=(const BaseIterator&) = default;
+ BaseIterator& operator=(BaseIterator&&) = default;
+
+ bool operator==(const BaseIterator& other) const {
+ return hash_set_ == other.hash_set_ && this->index_ == other.index_;
}
- bool operator!=(const Iterator& other) const {
+
+ bool operator!=(const BaseIterator& other) const {
return !(*this == other);
}
- Iterator operator++() { // Value after modification.
- index_ = NextNonEmptySlot(index_);
+
+ BaseIterator operator++() { // Value after modification.
+ this->index_ = this->NextNonEmptySlot(this->index_, hash_set_);
return *this;
}
- Iterator operator++(int) {
+
+ BaseIterator operator++(int) {
Iterator temp = *this;
- index_ = NextNonEmptySlot(index_);
+ this->index_ = this->NextNonEmptySlot(this->index_, hash_set_);
return temp;
}
- T& operator*() {
- DCHECK(!hash_set_->IsFreeSlot(GetIndex()));
- return hash_set_->ElementForIndex(index_);
- }
- const T& operator*() const {
- DCHECK(!hash_set_->IsFreeSlot(GetIndex()));
- return hash_set_->ElementForIndex(index_);
- }
- T* operator->() {
- return &**this;
+
+ Elem& operator*() const {
+ DCHECK(!hash_set_->IsFreeSlot(this->index_));
+ return hash_set_->ElementForIndex(this->index_);
}
- const T* operator->() const {
+
+ Elem* operator->() const {
return &**this;
}
+
// TODO: Operator -- --(int)
private:
- HashSet* hash_set_;
size_t index_;
+ HashSetType* hash_set_;
- size_t GetIndex() const {
- return index_;
- }
- size_t NextNonEmptySlot(size_t index) const {
- const size_t num_buckets = hash_set_->NumBuckets();
+ size_t NextNonEmptySlot(size_t index, const HashSet* hash_set) const {
+ const size_t num_buckets = hash_set->NumBuckets();
DCHECK_LT(index, num_buckets);
do {
++index;
- } while (index < num_buckets && hash_set_->IsFreeSlot(index));
+ } while (index < num_buckets && hash_set->IsFreeSlot(index));
return index;
}
friend class HashSet;
};
+ public:
+ static constexpr double kDefaultMinLoadFactor = 0.5;
+ static constexpr double kDefaultMaxLoadFactor = 0.9;
+ static constexpr size_t kMinBuckets = 1000;
+
+ typedef BaseIterator<T, HashSet> Iterator;
+ typedef BaseIterator<const T, const HashSet> ConstIterator;
+
void Clear() {
DeallocateStorage();
AllocateStorage(1);
num_elements_ = 0;
elements_until_expand_ = 0;
}
+
HashSet() : num_elements_(0), num_buckets_(0), data_(nullptr),
min_load_factor_(kDefaultMinLoadFactor), max_load_factor_(kDefaultMaxLoadFactor) {
Clear();
}
+
HashSet(const HashSet& other) : num_elements_(0), num_buckets_(0), data_(nullptr) {
*this = other;
}
+
HashSet(HashSet&& other) : num_elements_(0), num_buckets_(0), data_(nullptr) {
*this = std::move(other);
}
+
~HashSet() {
DeallocateStorage();
}
+
HashSet& operator=(HashSet&& other) {
std::swap(data_, other.data_);
std::swap(num_buckets_, other.num_buckets_);
@@ -144,6 +154,7 @@ class HashSet {
std::swap(max_load_factor_, other.max_load_factor_);
return *this;
}
+
HashSet& operator=(const HashSet& other) {
DeallocateStorage();
AllocateStorage(other.NumBuckets());
@@ -156,21 +167,25 @@ class HashSet {
max_load_factor_ = other.max_load_factor_;
return *this;
}
+
// Lower case for c++11 for each.
Iterator begin() {
Iterator ret(this, 0);
- if (num_buckets_ != 0 && IsFreeSlot(ret.GetIndex())) {
+ if (num_buckets_ != 0 && IsFreeSlot(ret.index_)) {
++ret; // Skip all the empty slots.
}
return ret;
}
+
// Lower case for c++11 for each.
Iterator end() {
return Iterator(this, NumBuckets());
}
+
bool Empty() {
- return begin() == end();
+ return Size() == 0;
}
+
// Erase algorithm:
// Make an empty slot where the iterator is pointing.
// Scan fowards until we hit another empty slot.
@@ -181,7 +196,7 @@ class HashSet {
// element to its actual location/index.
Iterator Erase(Iterator it) {
// empty_index is the index that will become empty.
- size_t empty_index = it.GetIndex();
+ size_t empty_index = it.index_;
DCHECK(!IsFreeSlot(empty_index));
size_t next_index = empty_index;
bool filled = false; // True if we filled the empty index.
@@ -224,33 +239,36 @@ class HashSet {
}
return it;
}
+
// Find an element, returns end() if not found.
- // Allows custom K types, example of when this is useful.
+ // Allows custom key (K) types, example of when this is useful:
// Set of Class* sorted by name, want to find a class with a name but can't allocate a dummy
// object in the heap for performance solution.
template <typename K>
Iterator Find(const K& element) {
return FindWithHash(element, hashfn_(element));
}
+
+ template <typename K>
+ ConstIterator Find(const K& element) const {
+ return FindWithHash(element, hashfn_(element));
+ }
+
template <typename K>
Iterator FindWithHash(const K& element, size_t hash) {
- DCHECK_EQ(hashfn_(element), hash);
- size_t index = IndexForHash(hash);
- while (true) {
- T& slot = ElementForIndex(index);
- if (emptyfn_.IsEmpty(slot)) {
- return end();
- }
- if (pred_(slot, element)) {
- return Iterator(this, index);
- }
- index = NextIndex(index);
- }
+ return Iterator(this, FindIndex(element, hash));
+ }
+
+ template <typename K>
+ ConstIterator FindWithHash(const K& element, size_t hash) const {
+ return ConstIterator(this, FindIndex(element, hash));
}
+
// Insert an element, allows duplicates.
void Insert(const T& element) {
InsertWithHash(element, hashfn_(element));
}
+
void InsertWithHash(const T& element, size_t hash) {
DCHECK_EQ(hash, hashfn_(element));
if (num_elements_ >= elements_until_expand_) {
@@ -261,12 +279,15 @@ class HashSet {
data_[index] = element;
++num_elements_;
}
+
size_t Size() const {
return num_elements_;
}
+
void ShrinkToMaximumLoad() {
Resize(Size() / max_load_factor_);
}
+
// To distance that inserted elements were probed. Used for measuring how good hash functions
// are.
size_t TotalProbeDistance() const {
@@ -284,10 +305,12 @@ class HashSet {
}
return total;
}
+
// Calculate the current load factor and return it.
double CalculateLoadFactor() const {
return static_cast<double>(Size()) / static_cast<double>(NumBuckets());
}
+
// Make sure that everything reinserts in the right spot. Returns the number of errors.
size_t Verify() {
size_t errors = 0;
@@ -314,14 +337,17 @@ class HashSet {
DCHECK(data_ != nullptr);
return data_[index];
}
+
const T& ElementForIndex(size_t index) const {
DCHECK_LT(index, NumBuckets());
DCHECK(data_ != nullptr);
return data_[index];
}
+
size_t IndexForHash(size_t hash) const {
return hash % num_buckets_;
}
+
size_t NextIndex(size_t index) const {
if (UNLIKELY(++index >= num_buckets_)) {
DCHECK_EQ(index, NumBuckets());
@@ -329,12 +355,33 @@ class HashSet {
}
return index;
}
+
+ // Find the hash table slot for an element, or return NumBuckets() if not found.
+ // This value for not found is important so that Iterator(this, FindIndex(...)) == end().
+ template <typename K>
+ size_t FindIndex(const K& element, size_t hash) const {
+ DCHECK_EQ(hashfn_(element), hash);
+ size_t index = IndexForHash(hash);
+ while (true) {
+ const T& slot = ElementForIndex(index);
+ if (emptyfn_.IsEmpty(slot)) {
+ return NumBuckets();
+ }
+ if (pred_(slot, element)) {
+ return index;
+ }
+ index = NextIndex(index);
+ }
+ }
+
bool IsFreeSlot(size_t index) const {
return emptyfn_.IsEmpty(ElementForIndex(index));
}
+
size_t NumBuckets() const {
return num_buckets_;
}
+
// Allocate a number of buckets.
void AllocateStorage(size_t num_buckets) {
num_buckets_ = num_buckets;
@@ -344,6 +391,7 @@ class HashSet {
emptyfn_.MakeEmpty(data_[i]);
}
}
+
void DeallocateStorage() {
if (num_buckets_ != 0) {
for (size_t i = 0; i < NumBuckets(); ++i) {
@@ -354,6 +402,7 @@ class HashSet {
num_buckets_ = 0;
}
}
+
// Expand the set based on the load factors.
void Expand() {
size_t min_index = static_cast<size_t>(Size() / min_load_factor_);
@@ -365,6 +414,7 @@ class HashSet {
// When we hit elements_until_expand_, we are at the max load factor and must expand again.
elements_until_expand_ = NumBuckets() * max_load_factor_;
}
+
// Expand / shrink the table to the new specified size.
void Resize(size_t new_size) {
DCHECK_GE(new_size, Size());
@@ -381,6 +431,7 @@ class HashSet {
}
allocfn_.deallocate(old_data, old_num_buckets);
}
+
ALWAYS_INLINE size_t FirstAvailableSlot(size_t index) const {
while (!emptyfn_.IsEmpty(data_[index])) {
index = NextIndex(index);
@@ -398,8 +449,6 @@ class HashSet {
T* data_; // Backing storage.
double min_load_factor_;
double max_load_factor_;
-
- friend class Iterator;
};
} // namespace art
diff --git a/runtime/base/hash_set_test.cc b/runtime/base/hash_set_test.cc
index 5f498d9c78..e88637ffa5 100644
--- a/runtime/base/hash_set_test.cc
+++ b/runtime/base/hash_set_test.cc
@@ -21,7 +21,7 @@
#include <string>
#include <unordered_set>
-#include "common_runtime_test.h"
+#include <gtest/gtest.h>
#include "hash_map.h"
namespace art {
@@ -35,7 +35,7 @@ struct IsEmptyFnString {
}
};
-class HashSetTest : public CommonRuntimeTest {
+class HashSetTest : public testing::Test {
public:
HashSetTest() : seed_(97421), unique_number_(0) {
}
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 700e1adf91..67872d76ae 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -282,7 +282,7 @@ void ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b
CHECK(java_lang_Object.Get() != nullptr);
// backfill Object as the super class of Class.
java_lang_Class->SetSuperClass(java_lang_Object.Get());
- java_lang_Object->SetStatus(mirror::Class::kStatusLoaded, self);
+ mirror::Class::SetStatus(java_lang_Object, mirror::Class::kStatusLoaded, self);
// Object[] next to hold class roots.
Handle<mirror::Class> object_array_class(hs.NewHandle(
@@ -311,14 +311,14 @@ void ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b
AllocClass(self, java_lang_Class.Get(), mirror::String::ClassSize())));
mirror::String::SetClass(java_lang_String.Get());
java_lang_String->SetObjectSize(mirror::String::InstanceSize());
- java_lang_String->SetStatus(mirror::Class::kStatusResolved, self);
+ mirror::Class::SetStatus(java_lang_String, mirror::Class::kStatusResolved, self);
// Setup Reference.
Handle<mirror::Class> java_lang_ref_Reference(hs.NewHandle(
AllocClass(self, java_lang_Class.Get(), mirror::Reference::ClassSize())));
mirror::Reference::SetClass(java_lang_ref_Reference.Get());
java_lang_ref_Reference->SetObjectSize(mirror::Reference::InstanceSize());
- java_lang_ref_Reference->SetStatus(mirror::Class::kStatusResolved, self);
+ mirror::Class::SetStatus(java_lang_ref_Reference, mirror::Class::kStatusResolved, self);
// Create storage for root classes, save away our work so far (requires descriptors).
class_roots_ = GcRoot<mirror::ObjectArray<mirror::Class> >(
@@ -360,7 +360,7 @@ void ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b
AllocClass(self, java_lang_Class.Get(), mirror::DexCache::ClassSize())));
SetClassRoot(kJavaLangDexCache, java_lang_DexCache.Get());
java_lang_DexCache->SetObjectSize(mirror::DexCache::InstanceSize());
- java_lang_DexCache->SetStatus(mirror::Class::kStatusResolved, self);
+ mirror::Class::SetStatus(java_lang_DexCache, mirror::Class::kStatusResolved, self);
// Constructor, Field, Method, and AbstractMethod are necessary so
// that FindClass can link members.
@@ -369,7 +369,7 @@ void ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b
CHECK(java_lang_reflect_ArtField.Get() != nullptr);
java_lang_reflect_ArtField->SetObjectSize(mirror::ArtField::InstanceSize());
SetClassRoot(kJavaLangReflectArtField, java_lang_reflect_ArtField.Get());
- java_lang_reflect_ArtField->SetStatus(mirror::Class::kStatusResolved, self);
+ mirror::Class::SetStatus(java_lang_reflect_ArtField, mirror::Class::kStatusResolved, self);
mirror::ArtField::SetClass(java_lang_reflect_ArtField.Get());
Handle<mirror::Class> java_lang_reflect_ArtMethod(hs.NewHandle(
@@ -378,7 +378,7 @@ void ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b
size_t pointer_size = GetInstructionSetPointerSize(Runtime::Current()->GetInstructionSet());
java_lang_reflect_ArtMethod->SetObjectSize(mirror::ArtMethod::InstanceSize(pointer_size));
SetClassRoot(kJavaLangReflectArtMethod, java_lang_reflect_ArtMethod.Get());
- java_lang_reflect_ArtMethod->SetStatus(mirror::Class::kStatusResolved, self);
+ mirror::Class::SetStatus(java_lang_reflect_ArtMethod, mirror::Class::kStatusResolved, self);
mirror::ArtMethod::SetClass(java_lang_reflect_ArtMethod.Get());
// Set up array classes for string, field, method
@@ -434,18 +434,18 @@ void ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b
}
// Object, String and DexCache need to be rerun through FindSystemClass to finish init
- java_lang_Object->SetStatus(mirror::Class::kStatusNotReady, self);
+ mirror::Class::SetStatus(java_lang_Object, mirror::Class::kStatusNotReady, self);
mirror::Class* Object_class = FindSystemClass(self, "Ljava/lang/Object;");
CHECK_EQ(java_lang_Object.Get(), Object_class);
CHECK_EQ(java_lang_Object->GetObjectSize(), mirror::Object::InstanceSize());
- java_lang_String->SetStatus(mirror::Class::kStatusNotReady, self);
+ mirror::Class::SetStatus(java_lang_String, mirror::Class::kStatusNotReady, self);
mirror::Class* String_class = FindSystemClass(self, "Ljava/lang/String;");
std::ostringstream os1, os2;
java_lang_String->DumpClass(os1, mirror::Class::kDumpClassFullDetail);
String_class->DumpClass(os2, mirror::Class::kDumpClassFullDetail);
CHECK_EQ(java_lang_String.Get(), String_class) << os1.str() << "\n\n" << os2.str();
CHECK_EQ(java_lang_String->GetObjectSize(), mirror::String::InstanceSize());
- java_lang_DexCache->SetStatus(mirror::Class::kStatusNotReady, self);
+ mirror::Class::SetStatus(java_lang_DexCache, mirror::Class::kStatusNotReady, self);
mirror::Class* DexCache_class = FindSystemClass(self, "Ljava/lang/DexCache;");
CHECK_EQ(java_lang_String.Get(), String_class);
CHECK_EQ(java_lang_DexCache.Get(), DexCache_class);
@@ -505,11 +505,11 @@ void ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b
mirror::Class* Class_class = FindSystemClass(self, "Ljava/lang/Class;");
CHECK_EQ(java_lang_Class.Get(), Class_class);
- java_lang_reflect_ArtMethod->SetStatus(mirror::Class::kStatusNotReady, self);
+ mirror::Class::SetStatus(java_lang_reflect_ArtMethod, mirror::Class::kStatusNotReady, self);
mirror::Class* Art_method_class = FindSystemClass(self, "Ljava/lang/reflect/ArtMethod;");
CHECK_EQ(java_lang_reflect_ArtMethod.Get(), Art_method_class);
- java_lang_reflect_ArtField->SetStatus(mirror::Class::kStatusNotReady, self);
+ mirror::Class::SetStatus(java_lang_reflect_ArtField, mirror::Class::kStatusNotReady, self);
mirror::Class* Art_field_class = FindSystemClass(self, "Ljava/lang/reflect/ArtField;");
CHECK_EQ(java_lang_reflect_ArtField.Get(), Art_field_class);
@@ -533,7 +533,7 @@ void ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b
// java.lang.ref classes need to be specially flagged, but otherwise are normal classes
// finish initializing Reference class
- java_lang_ref_Reference->SetStatus(mirror::Class::kStatusNotReady, self);
+ mirror::Class::SetStatus(java_lang_ref_Reference, mirror::Class::kStatusNotReady, self);
mirror::Class* Reference_class = FindSystemClass(self, "Ljava/lang/ref/Reference;");
CHECK_EQ(java_lang_ref_Reference.Get(), Reference_class);
CHECK_EQ(java_lang_ref_Reference->GetObjectSize(), mirror::Reference::InstanceSize());
@@ -1213,7 +1213,7 @@ mirror::Class* ClassLinker::EnsureResolved(Thread* self, const char* descriptor,
// Check for circular dependencies between classes.
if (!h_class->IsResolved() && h_class->GetClinitThreadId() == self->GetTid()) {
ThrowClassCircularityError(h_class.Get());
- h_class->SetStatus(mirror::Class::kStatusError, self);
+ mirror::Class::SetStatus(h_class, mirror::Class::kStatusError, self);
return nullptr;
}
// Wait for the pending initialization to complete.
@@ -1489,7 +1489,7 @@ mirror::Class* ClassLinker::DefineClass(Thread* self, const char* descriptor, si
// An exception occured during load, set status to erroneous while holding klass' lock in case
// notification is necessary.
if (!klass->IsErroneous()) {
- klass->SetStatus(mirror::Class::kStatusError, self);
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self);
}
return nullptr;
}
@@ -1508,7 +1508,7 @@ mirror::Class* ClassLinker::DefineClass(Thread* self, const char* descriptor, si
if (!LoadSuperAndInterfaces(klass, dex_file)) {
// Loading failed.
if (!klass->IsErroneous()) {
- klass->SetStatus(mirror::Class::kStatusError, self);
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self);
}
return nullptr;
}
@@ -1522,7 +1522,7 @@ mirror::Class* ClassLinker::DefineClass(Thread* self, const char* descriptor, si
if (!LinkClass(self, descriptor, klass, interfaces, &new_class)) {
// Linking failed.
if (!klass->IsErroneous()) {
- klass->SetStatus(mirror::Class::kStatusError, self);
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self);
}
return nullptr;
}
@@ -1894,7 +1894,7 @@ void ClassLinker::LoadClass(Thread* self, const DexFile& dex_file,
klass->SetAccessFlags(access_flags);
klass->SetClassLoader(class_loader);
DCHECK_EQ(klass->GetPrimitiveType(), Primitive::kPrimNot);
- klass->SetStatus(mirror::Class::kStatusIdx, nullptr);
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusIdx, nullptr);
klass->SetDexClassDefIndex(dex_file.GetIndexForClassDef(dex_class_def));
klass->SetDexTypeIndex(dex_class_def.class_idx_);
@@ -2223,14 +2223,14 @@ mirror::Class* ClassLinker::InitializePrimitiveClass(mirror::Class* primitive_cl
StackHandleScope<1> hs(self);
Handle<mirror::Class> h_class(hs.NewHandle(primitive_class));
ObjectLock<mirror::Class> lock(self, h_class);
- primitive_class->SetAccessFlags(kAccPublic | kAccFinal | kAccAbstract);
- primitive_class->SetPrimitiveType(type);
- primitive_class->SetStatus(mirror::Class::kStatusInitialized, self);
+ h_class->SetAccessFlags(kAccPublic | kAccFinal | kAccAbstract);
+ h_class->SetPrimitiveType(type);
+ mirror::Class::SetStatus(h_class, mirror::Class::kStatusInitialized, self);
const char* descriptor = Primitive::Descriptor(type);
- mirror::Class* existing = InsertClass(descriptor, primitive_class,
+ mirror::Class* existing = InsertClass(descriptor, h_class.Get(),
ComputeModifiedUtf8Hash(descriptor));
CHECK(existing == nullptr) << "InitPrimitiveClass(" << type << ") failed";
- return primitive_class;
+ return h_class.Get();
}
// Create an array class (i.e. the class object for the array, not the
@@ -2336,13 +2336,13 @@ mirror::Class* ClassLinker::CreateArrayClass(Thread* self, const char* descripto
new_class->SetVTable(java_lang_Object->GetVTable());
new_class->SetPrimitiveType(Primitive::kPrimNot);
new_class->SetClassLoader(component_type->GetClassLoader());
- new_class->SetStatus(mirror::Class::kStatusLoaded, self);
+ mirror::Class::SetStatus(new_class, mirror::Class::kStatusLoaded, self);
{
StackHandleScope<mirror::Class::kImtSize> hs2(self,
Runtime::Current()->GetImtUnimplementedMethod());
new_class->PopulateEmbeddedImtAndVTable(&hs2);
}
- new_class->SetStatus(mirror::Class::kStatusInitialized, self);
+ mirror::Class::SetStatus(new_class, mirror::Class::kStatusInitialized, self);
// don't need to set new_class->SetObjectSize(..)
// because Object::SizeOf delegates to Array::SizeOf
@@ -2672,17 +2672,17 @@ void ClassLinker::VerifyClass(Thread* self, Handle<mirror::Class> klass) {
}
if (klass->GetStatus() == mirror::Class::kStatusResolved) {
- klass->SetStatus(mirror::Class::kStatusVerifying, self);
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusVerifying, self);
} else {
CHECK_EQ(klass->GetStatus(), mirror::Class::kStatusRetryVerificationAtRuntime)
<< PrettyClass(klass.Get());
CHECK(!Runtime::Current()->IsAotCompiler());
- klass->SetStatus(mirror::Class::kStatusVerifyingAtRuntime, self);
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusVerifyingAtRuntime, self);
}
// Skip verification if disabled.
if (!Runtime::Current()->IsVerificationEnabled()) {
- klass->SetStatus(mirror::Class::kStatusVerified, self);
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusVerified, self);
EnsurePreverifiedMethods(klass);
return;
}
@@ -2715,7 +2715,7 @@ void ClassLinker::VerifyClass(Thread* self, Handle<mirror::Class> klass) {
if (Runtime::Current()->IsAotCompiler()) {
Runtime::Current()->GetCompilerCallbacks()->ClassRejected(ref);
}
- klass->SetStatus(mirror::Class::kStatusError, self);
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self);
return;
}
}
@@ -2730,7 +2730,7 @@ void ClassLinker::VerifyClass(Thread* self, Handle<mirror::Class> klass) {
<< klass->GetDexCache()->GetLocation()->ToModifiedUtf8();
ThrowVerifyError(klass.Get(), "Rejecting class %s because it failed compile-time verification",
PrettyDescriptor(klass.Get()).c_str());
- klass->SetStatus(mirror::Class::kStatusError, self);
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self);
return;
}
verifier::MethodVerifier::FailureKind verifier_failure = verifier::MethodVerifier::kNoFailure;
@@ -2753,10 +2753,10 @@ void ClassLinker::VerifyClass(Thread* self, Handle<mirror::Class> klass) {
// Even though there were no verifier failures we need to respect whether the super-class
// was verified or requiring runtime reverification.
if (super.Get() == nullptr || super->IsVerified()) {
- klass->SetStatus(mirror::Class::kStatusVerified, self);
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusVerified, self);
} else {
CHECK_EQ(super->GetStatus(), mirror::Class::kStatusRetryVerificationAtRuntime);
- klass->SetStatus(mirror::Class::kStatusRetryVerificationAtRuntime, self);
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusRetryVerificationAtRuntime, self);
// Pretend a soft failure occured so that we don't consider the class verified below.
verifier_failure = verifier::MethodVerifier::kSoftFailure;
}
@@ -2766,9 +2766,9 @@ void ClassLinker::VerifyClass(Thread* self, Handle<mirror::Class> klass) {
// failures at runtime will be handled by slow paths in the generated
// code. Set status accordingly.
if (Runtime::Current()->IsAotCompiler()) {
- klass->SetStatus(mirror::Class::kStatusRetryVerificationAtRuntime, self);
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusRetryVerificationAtRuntime, self);
} else {
- klass->SetStatus(mirror::Class::kStatusVerified, self);
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusVerified, self);
// As this is a fake verified status, make sure the methods are _not_ marked preverified
// later.
klass->SetPreverified();
@@ -2780,7 +2780,7 @@ void ClassLinker::VerifyClass(Thread* self, Handle<mirror::Class> klass) {
<< " because: " << error_msg;
self->AssertNoPendingException();
ThrowVerifyError(klass.Get(), "%s", error_msg.c_str());
- klass->SetStatus(mirror::Class::kStatusError, self);
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self);
}
if (preverified || verifier_failure == verifier::MethodVerifier::kNoFailure) {
// Class is verified so we don't need to do any access check on its methods.
@@ -2947,7 +2947,7 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable&
klass->SetName(soa.Decode<mirror::String*>(name));
mirror::Class* proxy_class = GetClassRoot(kJavaLangReflectProxy);
klass->SetDexCache(proxy_class->GetDexCache());
- klass->SetStatus(mirror::Class::kStatusIdx, self);
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusIdx, self);
// Instance fields are inherited, but we add a couple of static fields...
{
@@ -3022,7 +3022,7 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable&
}
klass->SetSuperClass(proxy_class); // The super class is java.lang.reflect.Proxy
- klass->SetStatus(mirror::Class::kStatusLoaded, self); // Now effectively in the loaded state.
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusLoaded, self); // Now effectively in the loaded state.
self->AssertNoPendingException();
std::string descriptor(GetDescriptorForProxy(klass.Get()));
@@ -3034,7 +3034,7 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable&
Handle<mirror::ObjectArray<mirror::Class> > h_interfaces(
hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Class>*>(interfaces)));
if (!LinkClass(self, descriptor.c_str(), klass, h_interfaces, &new_class)) {
- klass->SetStatus(mirror::Class::kStatusError, self);
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self);
return nullptr;
}
}
@@ -3053,7 +3053,7 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable&
{
// Lock on klass is released. Lock new class object.
ObjectLock<mirror::Class> initialization_lock(self, klass);
- klass->SetStatus(mirror::Class::kStatusInitialized, self);
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusInitialized, self);
}
// sanity checks
@@ -3310,7 +3310,7 @@ bool ClassLinker::InitializeClass(Thread* self, Handle<mirror::Class> klass,
}
if (!ValidateSuperClassDescriptors(klass)) {
- klass->SetStatus(mirror::Class::kStatusError, self);
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self);
return false;
}
self->AllowThreadSuspension();
@@ -3320,7 +3320,7 @@ bool ClassLinker::InitializeClass(Thread* self, Handle<mirror::Class> klass,
// From here out other threads may observe that we're initializing and so changes of state
// require the a notification.
klass->SetClinitThreadId(self->GetTid());
- klass->SetStatus(mirror::Class::kStatusInitializing, self);
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusInitializing, self);
t0 = NanoTime();
}
@@ -3345,7 +3345,7 @@ bool ClassLinker::InitializeClass(Thread* self, Handle<mirror::Class> klass,
<< (self->GetException() != nullptr ? self->GetException()->Dump() : "");
ObjectLock<mirror::Class> lock(self, klass);
// Initialization failed because the super-class is erroneous.
- klass->SetStatus(mirror::Class::kStatusError, self);
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self);
return false;
}
}
@@ -3410,7 +3410,7 @@ bool ClassLinker::InitializeClass(Thread* self, Handle<mirror::Class> klass,
if (self->IsExceptionPending()) {
WrapExceptionInInitializer(klass);
- klass->SetStatus(mirror::Class::kStatusError, self);
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self);
success = false;
} else if (Runtime::Current()->IsTransactionAborted()) {
// The exception thrown when the transaction aborted has been caught and cleared
@@ -3418,7 +3418,7 @@ bool ClassLinker::InitializeClass(Thread* self, Handle<mirror::Class> klass,
VLOG(compiler) << "Return from class initializer of " << PrettyDescriptor(klass.Get())
<< " without exception while transaction was aborted: re-throw it now.";
Runtime::Current()->ThrowInternalErrorForAbortedTransaction(self);
- klass->SetStatus(mirror::Class::kStatusError, self);
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self);
success = false;
} else {
RuntimeStats* global_stats = Runtime::Current()->GetStats();
@@ -3428,7 +3428,7 @@ bool ClassLinker::InitializeClass(Thread* self, Handle<mirror::Class> klass,
global_stats->class_init_time_ns += (t1 - t0);
thread_stats->class_init_time_ns += (t1 - t0);
// Set the class as initialized except if failed to initialize static fields.
- klass->SetStatus(mirror::Class::kStatusInitialized, self);
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusInitialized, self);
if (VLOG_IS_ON(class_linker)) {
std::string temp;
LOG(INFO) << "Initialized class " << klass->GetDescriptor(&temp) << " from " <<
@@ -3454,7 +3454,7 @@ bool ClassLinker::WaitForInitializeClass(Handle<mirror::Class> klass, Thread* se
// we were not using WaitIgnoringInterrupts), bail out.
if (self->IsExceptionPending()) {
WrapExceptionInInitializer(klass);
- klass->SetStatus(mirror::Class::kStatusError, self);
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self);
return false;
}
// Spurious wakeup? Go back to waiting.
@@ -3688,7 +3688,7 @@ bool ClassLinker::LinkClass(Thread* self, const char* descriptor, Handle<mirror:
// This will notify waiters on klass that saw the not yet resolved
// class in the class_table_ during EnsureResolved.
- klass->SetStatus(mirror::Class::kStatusResolved, self);
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusResolved, self);
*new_class = klass.Get();
} else {
CHECK(!klass->IsResolved());
@@ -3696,7 +3696,7 @@ bool ClassLinker::LinkClass(Thread* self, const char* descriptor, Handle<mirror:
*new_class = klass->CopyOf(self, class_size, &imt_handle_scope);
if (UNLIKELY(*new_class == nullptr)) {
CHECK(self->IsExceptionPending()); // Expect an OOME.
- klass->SetStatus(mirror::Class::kStatusError, self);
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self);
return false;
}
@@ -3713,12 +3713,12 @@ bool ClassLinker::LinkClass(Thread* self, const char* descriptor, Handle<mirror:
// This will notify waiters on temp class that saw the not yet resolved class in the
// class_table_ during EnsureResolved.
- klass->SetStatus(mirror::Class::kStatusRetired, self);
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusRetired, self);
CHECK_EQ(new_class_h->GetStatus(), mirror::Class::kStatusResolving);
// This will notify waiters on new_class that saw the not yet resolved
// class in the class_table_ during EnsureResolved.
- new_class_h->SetStatus(mirror::Class::kStatusResolved, self);
+ mirror::Class::SetStatus(new_class_h, mirror::Class::kStatusResolved, self);
}
return true;
}
@@ -3933,7 +3933,7 @@ bool ClassLinker::LoadSuperAndInterfaces(Handle<mirror::Class> klass, const DexF
}
}
// Mark the class as loaded.
- klass->SetStatus(mirror::Class::kStatusLoaded, nullptr);
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusLoaded, nullptr);
return true;
}
@@ -5255,7 +5255,7 @@ std::size_t ClassLinker::ClassDescriptorHashEquals::operator()(const GcRoot<mirr
}
bool ClassLinker::ClassDescriptorHashEquals::operator()(const GcRoot<mirror::Class>& a,
- const GcRoot<mirror::Class>& b) {
+ const GcRoot<mirror::Class>& b) const {
if (a.Read()->GetClassLoader() != b.Read()->GetClassLoader()) {
return false;
}
@@ -5269,7 +5269,7 @@ std::size_t ClassLinker::ClassDescriptorHashEquals::operator()(
}
bool ClassLinker::ClassDescriptorHashEquals::operator()(
- const GcRoot<mirror::Class>& a, const std::pair<const char*, mirror::ClassLoader*>& b) {
+ const GcRoot<mirror::Class>& a, const std::pair<const char*, mirror::ClassLoader*>& b) const {
if (a.Read()->GetClassLoader() != b.second) {
return false;
}
@@ -5277,7 +5277,7 @@ bool ClassLinker::ClassDescriptorHashEquals::operator()(
}
bool ClassLinker::ClassDescriptorHashEquals::operator()(const GcRoot<mirror::Class>& a,
- const char* descriptor) {
+ const char* descriptor) const {
return a.Read()->DescriptorEquals(descriptor);
}
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 75fbdf3f59..4ebce3e962 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -661,16 +661,16 @@ class ClassLinker {
public:
// Same class loader and descriptor.
std::size_t operator()(const GcRoot<mirror::Class>& root) const NO_THREAD_SAFETY_ANALYSIS;
- bool operator()(const GcRoot<mirror::Class>& a, const GcRoot<mirror::Class>& b)
+ bool operator()(const GcRoot<mirror::Class>& a, const GcRoot<mirror::Class>& b) const
NO_THREAD_SAFETY_ANALYSIS;
// Same class loader and descriptor.
std::size_t operator()(const std::pair<const char*, mirror::ClassLoader*>& element) const
NO_THREAD_SAFETY_ANALYSIS;
bool operator()(const GcRoot<mirror::Class>& a,
- const std::pair<const char*, mirror::ClassLoader*>& b)
+ const std::pair<const char*, mirror::ClassLoader*>& b) const
NO_THREAD_SAFETY_ANALYSIS;
// Same descriptor.
- bool operator()(const GcRoot<mirror::Class>& a, const char* descriptor)
+ bool operator()(const GcRoot<mirror::Class>& a, const char* descriptor) const
NO_THREAD_SAFETY_ANALYSIS;
std::size_t operator()(const char* descriptor) const NO_THREAD_SAFETY_ANALYSIS;
};
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 0cad11fb80..dff8f4de7a 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -291,10 +291,18 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
// Try to reserve virtual memory at a lower address if we have a separate non moving space.
request_begin = reinterpret_cast<uint8_t*>(300 * MB);
}
+ // Attempt to create 2 mem maps at or after the requested begin.
if (foreground_collector_type_ != kCollectorTypeCC) {
- // Attempt to create 2 mem maps at or after the requested begin.
- main_mem_map_1.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[0], request_begin, capacity_,
- &error_str));
+ if (separate_non_moving_space) {
+ main_mem_map_1.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[0], request_begin,
+ capacity_, &error_str));
+ } else {
+ // If no separate non-moving space, the main space must come
+ // right after the image space to avoid a gap.
+ main_mem_map_1.reset(MemMap::MapAnonymous(kMemMapSpaceName[0], request_begin, capacity_,
+ PROT_READ | PROT_WRITE, true, false,
+ &error_str));
+ }
CHECK(main_mem_map_1.get() != nullptr) << error_str;
}
if (support_homogeneous_space_compaction ||
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
index 5f97f941fb..af0a530688 100644
--- a/runtime/interpreter/interpreter_goto_table_impl.cc
+++ b/runtime/interpreter/interpreter_goto_table_impl.cc
@@ -14,6 +14,9 @@
* limitations under the License.
*/
+#if !defined(__clang__)
+// Clang 3.4 fails to build the goto interpreter implementation.
+
#include "interpreter_common.h"
#include "safe_math.h"
@@ -2477,3 +2480,5 @@ JValue ExecuteGotoImpl<false, true>(Thread* self, const DexFile::CodeItem* code_
} // namespace interpreter
} // namespace art
+
+#endif
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 729791f7b4..13c1f813bd 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -32,10 +32,8 @@ namespace art {
namespace jit {
JitOptions* JitOptions::CreateFromRuntimeArguments(const RuntimeArgumentMap& options) {
- if (!options.GetOrDefault(RuntimeArgumentMap::UseJIT)) {
- return nullptr;
- }
auto* jit_options = new JitOptions;
+ jit_options->use_jit_ = options.GetOrDefault(RuntimeArgumentMap::UseJIT);
jit_options->code_cache_capacity_ =
options.GetOrDefault(RuntimeArgumentMap::JITCodeCacheCapacity);
jit_options->compile_threshold_ =
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index 6b206d15b8..3e80aef4c8 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -100,13 +100,21 @@ class JitOptions {
bool DumpJitInfoOnShutdown() const {
return dump_info_on_shutdown_;
}
+ bool UseJIT() const {
+ return use_jit_;
+ }
+ void SetUseJIT(bool b) {
+ use_jit_ = b;
+ }
private:
+ bool use_jit_;
size_t code_cache_capacity_;
size_t compile_threshold_;
bool dump_info_on_shutdown_;
- JitOptions() : code_cache_capacity_(0), compile_threshold_(0), dump_info_on_shutdown_(false) {
+ JitOptions() : use_jit_(false), code_cache_capacity_(0), compile_threshold_(0),
+ dump_info_on_shutdown_(false) {
}
};
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 6f4ef60e85..9fa6073698 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -55,26 +55,27 @@ void Class::VisitRoots(RootCallback* callback, void* arg) {
java_lang_Class_.VisitRootIfNonNull(callback, arg, RootInfo(kRootStickyClass));
}
-void Class::SetStatus(Status new_status, Thread* self) {
- Status old_status = GetStatus();
+void Class::SetStatus(Handle<Class> h_this, Status new_status, Thread* self) {
+ Status old_status = h_this->GetStatus();
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
bool class_linker_initialized = class_linker != nullptr && class_linker->IsInitialized();
if (LIKELY(class_linker_initialized)) {
if (UNLIKELY(new_status <= old_status && new_status != kStatusError &&
new_status != kStatusRetired)) {
- LOG(FATAL) << "Unexpected change back of class status for " << PrettyClass(this) << " "
- << old_status << " -> " << new_status;
+ LOG(FATAL) << "Unexpected change back of class status for " << PrettyClass(h_this.Get())
+ << " " << old_status << " -> " << new_status;
}
if (new_status >= kStatusResolved || old_status >= kStatusResolved) {
// When classes are being resolved the resolution code should hold the lock.
- CHECK_EQ(GetLockOwnerThreadId(), self->GetThreadId())
+ CHECK_EQ(h_this->GetLockOwnerThreadId(), self->GetThreadId())
<< "Attempt to change status of class while not holding its lock: "
- << PrettyClass(this) << " " << old_status << " -> " << new_status;
+ << PrettyClass(h_this.Get()) << " " << old_status << " -> " << new_status;
}
}
if (UNLIKELY(new_status == kStatusError)) {
- CHECK_NE(GetStatus(), kStatusError)
- << "Attempt to set as erroneous an already erroneous class " << PrettyClass(this);
+ CHECK_NE(h_this->GetStatus(), kStatusError)
+ << "Attempt to set as erroneous an already erroneous class "
+ << PrettyClass(h_this.Get());
// Stash current exception.
StackHandleScope<1> hs(self);
@@ -100,7 +101,7 @@ void Class::SetStatus(Status new_status, Thread* self) {
// case.
Class* exception_class = old_exception->GetClass();
if (!eiie_class->IsAssignableFrom(exception_class)) {
- SetVerifyErrorClass(exception_class);
+ h_this->SetVerifyErrorClass(exception_class);
}
}
@@ -109,9 +110,9 @@ void Class::SetStatus(Status new_status, Thread* self) {
}
static_assert(sizeof(Status) == sizeof(uint32_t), "Size of status not equal to uint32");
if (Runtime::Current()->IsActiveTransaction()) {
- SetField32Volatile<true>(OFFSET_OF_OBJECT_MEMBER(Class, status_), new_status);
+ h_this->SetField32Volatile<true>(OFFSET_OF_OBJECT_MEMBER(Class, status_), new_status);
} else {
- SetField32Volatile<false>(OFFSET_OF_OBJECT_MEMBER(Class, status_), new_status);
+ h_this->SetField32Volatile<false>(OFFSET_OF_OBJECT_MEMBER(Class, status_), new_status);
}
if (!class_linker_initialized) {
@@ -121,17 +122,17 @@ void Class::SetStatus(Status new_status, Thread* self) {
} else {
// Classes that are being resolved or initialized need to notify waiters that the class status
// changed. See ClassLinker::EnsureResolved and ClassLinker::WaitForInitializeClass.
- if (IsTemp()) {
+ if (h_this->IsTemp()) {
// Class is a temporary one, ensure that waiters for resolution get notified of retirement
// so that they can grab the new version of the class from the class linker's table.
- CHECK_LT(new_status, kStatusResolved) << PrettyDescriptor(this);
+ CHECK_LT(new_status, kStatusResolved) << PrettyDescriptor(h_this.Get());
if (new_status == kStatusRetired || new_status == kStatusError) {
- NotifyAll(self);
+ h_this->NotifyAll(self);
}
} else {
CHECK_NE(new_status, kStatusRetired);
if (old_status >= kStatusResolved || new_status >= kStatusResolved) {
- NotifyAll(self);
+ h_this->NotifyAll(self);
}
}
}
@@ -828,11 +829,12 @@ class CopyClassVisitor {
void operator()(Object* obj, size_t usable_size) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
UNUSED(usable_size);
- mirror::Class* new_class_obj = obj->AsClass();
- mirror::Object::CopyObject(self_, new_class_obj, orig_->Get(), copy_bytes_);
- new_class_obj->SetStatus(Class::kStatusResolving, self_);
- new_class_obj->PopulateEmbeddedImtAndVTable(imt_handle_scope_);
- new_class_obj->SetClassSize(new_length_);
+ StackHandleScope<1> hs(self_);
+ Handle<mirror::Class> h_new_class_obj(hs.NewHandle(obj->AsClass()));
+ mirror::Object::CopyObject(self_, h_new_class_obj.Get(), orig_->Get(), copy_bytes_);
+ mirror::Class::SetStatus(h_new_class_obj, Class::kStatusResolving, self_);
+ h_new_class_obj->PopulateEmbeddedImtAndVTable(imt_handle_scope_);
+ h_new_class_obj->SetClassSize(new_length_);
}
private:
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index bd49754731..e7f7c6e96c 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -140,7 +140,9 @@ class MANAGED Class FINAL : public Object {
GetField32Volatile<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, status_)));
}
- void SetStatus(Status new_status, Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // This is static because 'this' may be moved by GC.
+ static void SetStatus(Handle<Class> h_this, Status new_status, Thread* self)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static MemberOffset StatusOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, status_);
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index c056adc82e..022c56fff1 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -21,6 +21,7 @@
#include "arch/instruction_set.h"
#include "debugger.h"
#include "java_vm_ext.h"
+#include "jit/jit.h"
#include "jni_internal.h"
#include "JNIHelp.h"
#include "ScopedUtfChars.h"
@@ -59,10 +60,11 @@ static void EnableDebugFeatures(uint32_t debug_flags) {
DEBUG_ENABLE_ASSERT = 1 << 2,
DEBUG_ENABLE_SAFEMODE = 1 << 3,
DEBUG_ENABLE_JNI_LOGGING = 1 << 4,
+ DEBUG_ENABLE_JIT = 1 << 5,
};
+ Runtime* const runtime = Runtime::Current();
if ((debug_flags & DEBUG_ENABLE_CHECKJNI) != 0) {
- Runtime* runtime = Runtime::Current();
JavaVMExt* vm = runtime->GetJavaVM();
if (!vm->IsCheckJniEnabled()) {
LOG(INFO) << "Late-enabling -Xcheck:jni";
@@ -86,13 +88,25 @@ static void EnableDebugFeatures(uint32_t debug_flags) {
}
debug_flags &= ~DEBUG_ENABLE_DEBUGGER;
- if ((debug_flags & DEBUG_ENABLE_SAFEMODE) != 0) {
+ const bool safe_mode = (debug_flags & DEBUG_ENABLE_SAFEMODE) != 0;
+ if (safe_mode) {
// Ensure that any (secondary) oat files will be interpreted.
- Runtime* runtime = Runtime::Current();
runtime->AddCompilerOption("--compiler-filter=interpret-only");
debug_flags &= ~DEBUG_ENABLE_SAFEMODE;
}
+ bool use_jit = false;
+ if ((debug_flags & DEBUG_ENABLE_JIT) != 0) {
+ if (safe_mode) {
+ LOG(INFO) << "Not enabling JIT due to safe mode";
+ } else {
+ use_jit = true;
+ LOG(INFO) << "Late-enabling JIT";
+ }
+ debug_flags &= ~DEBUG_ENABLE_JIT;
+ }
+ runtime->GetJITOptions()->SetUseJIT(use_jit);
+
// This is for backwards compatibility with Dalvik.
debug_flags &= ~DEBUG_ENABLE_ASSERT;
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index 71679ae480..be8652cec5 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -203,7 +203,9 @@ class OatFileAssistantTest : public CommonRuntimeTest {
// Ensure a chunk of memory is reserved for the image space.
uintptr_t reservation_start = ART_BASE_ADDRESS + ART_BASE_ADDRESS_MIN_DELTA;
uintptr_t reservation_end = ART_BASE_ADDRESS + ART_BASE_ADDRESS_MAX_DELTA
- + 100 * 1024 * 1024;
+ // Include the main space that has to come right after the
+ // image in case of the GSS collector.
+ + 384 * MB;
std::string error_msg;
std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 189559e695..0f0c32720f 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -483,11 +483,10 @@ bool Runtime::Start() {
}
}
- // If we are the zygote then we need to wait until after forking to create the code cache due to
- // SELinux restrictions on r/w/x memory regions.
- if (!IsZygote() && jit_.get() != nullptr) {
- jit_->CreateInstrumentationCache(jit_options_->GetCompileThreshold());
- jit_->CreateThreadPool();
+ // If we are the zygote then we need to wait until after forking to create the code cache
+ // due to SELinux restrictions on r/w/x memory regions.
+ if (!IsZygote() && jit_options_->UseJIT()) {
+ CreateJit();
}
if (!IsImageDex2OatEnabled() || !GetHeap()->HasImageSpace()) {
@@ -611,11 +610,9 @@ void Runtime::DidForkFromZygote(JNIEnv* env, NativeBridgeAction action, const ch
// Create the thread pools.
heap_->CreateThreadPool();
- if (jit_options_.get() != nullptr && jit_.get() == nullptr) {
+ if (jit_.get() == nullptr && jit_options_->UseJIT()) {
// Create the JIT if the flag is set and we haven't already create it (happens for run-tests).
CreateJit();
- jit_->CreateInstrumentationCache(jit_options_->GetCompileThreshold());
- jit_->CreateThreadPool();
}
StartSignalCatcher();
@@ -843,20 +840,19 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized)
Dbg::ConfigureJdwp(runtime_options.GetOrDefault(Opt::JdwpOptions));
}
- if (!IsAotCompiler()) {
+ jit_options_.reset(jit::JitOptions::CreateFromRuntimeArguments(runtime_options));
+ bool use_jit = jit_options_->UseJIT();
+ if (IsAotCompiler()) {
// If we are already the compiler at this point, we must be dex2oat. Don't create the jit in
// this case.
// If runtime_options doesn't have UseJIT set to true then CreateFromRuntimeArguments returns
// nullptr and we don't create the jit.
- jit_options_.reset(jit::JitOptions::CreateFromRuntimeArguments(runtime_options));
- }
- if (!IsZygote() && jit_options_.get() != nullptr) {
- CreateJit();
+ use_jit = false;
}
// Use MemMap arena pool for jit, malloc otherwise. Malloc arenas are faster to allocate but
// can't be trimmed as easily.
- const bool use_malloc = jit_options_.get() == nullptr;
+ const bool use_malloc = !use_jit;
arena_pool_.reset(new ArenaPool(use_malloc));
BlockSignals();
@@ -1661,11 +1657,13 @@ void Runtime::UpdateProfilerState(int state) {
}
void Runtime::CreateJit() {
- CHECK(jit_options_.get() != nullptr);
+ CHECK(!IsAotCompiler());
std::string error_msg;
jit_.reset(jit::Jit::Create(jit_options_.get(), &error_msg));
if (jit_.get() != nullptr) {
compiler_callbacks_ = jit_->GetCompilerCallbacks();
+ jit_->CreateInstrumentationCache(jit_options_->GetCompileThreshold());
+ jit_->CreateThreadPool();
} else {
LOG(WARNING) << "Failed to create JIT " << error_msg;
}
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 3cf22bf269..7f335474bb 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -540,6 +540,7 @@ class Runtime {
return zygote_max_failed_boots_;
}
+ // Create the JIT and instrumentation and code cache.
void CreateJit();
ArenaPool* GetArenaPool() {
@@ -549,6 +550,10 @@ class Runtime {
return arena_pool_.get();
}
+ jit::JitOptions* GetJITOptions() {
+ return jit_options_.get();
+ }
+
private:
static void InitPlatformSignalHandlers();
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index 0db589f082..961772c97e 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -632,27 +632,27 @@ class CodeInfo {
}
uint32_t GetOverallSize() const {
- return region_.Load<uint32_t>(kOverallSizeOffset);
+ return region_.LoadUnaligned<uint32_t>(kOverallSizeOffset);
}
void SetOverallSize(uint32_t size) {
- region_.Store<uint32_t>(kOverallSizeOffset, size);
+ region_.StoreUnaligned<uint32_t>(kOverallSizeOffset, size);
}
uint32_t GetStackMaskSize() const {
- return region_.Load<uint32_t>(kStackMaskSizeOffset);
+ return region_.LoadUnaligned<uint32_t>(kStackMaskSizeOffset);
}
void SetStackMaskSize(uint32_t size) {
- region_.Store<uint32_t>(kStackMaskSizeOffset, size);
+ region_.StoreUnaligned<uint32_t>(kStackMaskSizeOffset, size);
}
size_t GetNumberOfStackMaps() const {
- return region_.Load<uint32_t>(kNumberOfStackMapsOffset);
+ return region_.LoadUnaligned<uint32_t>(kNumberOfStackMapsOffset);
}
void SetNumberOfStackMaps(uint32_t number_of_stack_maps) {
- region_.Store<uint32_t>(kNumberOfStackMapsOffset, number_of_stack_maps);
+ region_.StoreUnaligned<uint32_t>(kNumberOfStackMapsOffset, number_of_stack_maps);
}
// Get the size of one stack map of this CodeInfo object, in bytes.
diff --git a/test/run-test b/test/run-test
index 2f7a5acd83..df0fce4418 100755
--- a/test/run-test
+++ b/test/run-test
@@ -525,7 +525,7 @@ if [[ "$TEST_NAME" =~ ^[0-9]+-checker- ]]; then
# if Checker is not invoked and the test only runs the program.
build_args="${build_args} --dx-option --no-optimize"
- if [ "$runtime" = "art" -a "$image_suffix" = "-optimizing" -a "$target_mode" = "no" -a "$debuggable" = "no"]; then
+ if [ "$runtime" = "art" -a "$image_suffix" = "-optimizing" -a "$target_mode" = "no" -a "$debuggable" = "no" ]; then
run_checker="yes"
run_args="${run_args} -Xcompiler-option --dump-cfg=$tmp_dir/$cfg_output \
-Xcompiler-option -j1"
diff --git a/tools/setup-buildbot-device.sh b/tools/setup-buildbot-device.sh
new file mode 100755
index 0000000000..fc396b6a94
--- /dev/null
+++ b/tools/setup-buildbot-device.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+green='\033[0;32m'
+nc='\033[0m'
+
+echo -e "${green}Date on device${nc}"
+adb shell date
+
+echo -e "${green}Turn off selinux${nc}"
+adb shell setenforce 0
+adb shell getenforce
+
+echo -e "${green}List properties${nc}"
+adb shell getprop