diff options
| -rw-r--r-- | build/Android.gtest.mk | 1 | ||||
| -rw-r--r-- | compiler/Android.mk | 2 | ||||
| -rw-r--r-- | compiler/dex/arena_bit_vector.cc | 124 | ||||
| -rw-r--r-- | compiler/dex/arena_bit_vector.h | 99 | ||||
| -rw-r--r-- | compiler/dex/compiler_ir.h | 11 | ||||
| -rw-r--r-- | compiler/dex/frontend.cc | 32 | ||||
| -rw-r--r-- | compiler/dex/frontend.h | 1 | ||||
| -rw-r--r-- | compiler/dex/mir_dataflow.cc | 3 | ||||
| -rw-r--r-- | compiler/dex/mir_optimization.cc | 86 | ||||
| -rw-r--r-- | compiler/dex/quick/arm/assemble_arm.cc | 4 | ||||
| -rw-r--r-- | compiler/dex/quick/codegen_util.cc | 5 | ||||
| -rw-r--r-- | compiler/dex/quick/mips/assemble_mips.cc | 4 | ||||
| -rw-r--r-- | compiler/dex/quick/mir_to_lir.cc | 4 | ||||
| -rw-r--r-- | compiler/dex/quick/x86/assemble_x86.cc | 4 | ||||
| -rw-r--r-- | compiler/dex/ssa_transformation.cc | 5 | ||||
| -rw-r--r-- | compiler/utils/allocator.cc | 74 | ||||
| -rw-r--r-- | compiler/utils/allocator.h | 41 | ||||
| -rw-r--r-- | compiler/utils/bit_vector.cc | 155 | ||||
| -rw-r--r-- | compiler/utils/bit_vector.h | 134 | ||||
| -rw-r--r-- | compiler/utils/bit_vector_test.cc | 96 | ||||
| -rw-r--r-- | compiler/utils/dedupe_set_test.cc | 6 | ||||
| -rw-r--r-- | runtime/native/dalvik_system_VMRuntime.cc | 4 |
22 files changed, 652 insertions, 243 deletions
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk index 4c658a2bb7..22abba170b 100644 --- a/build/Android.gtest.mk +++ b/build/Android.gtest.mk @@ -23,6 +23,7 @@ TEST_COMMON_SRC_FILES := \ compiler/jni/jni_compiler_test.cc \ compiler/oat_test.cc \ compiler/output_stream_test.cc \ + compiler/utils/bit_vector_test.cc \ compiler/utils/dedupe_set_test.cc \ compiler/utils/arm/managed_register_arm_test.cc \ compiler/utils/x86/managed_register_x86_test.cc \ diff --git a/compiler/Android.mk b/compiler/Android.mk index fc2f02b59e..0d3acbdb71 100644 --- a/compiler/Android.mk +++ b/compiler/Android.mk @@ -79,6 +79,8 @@ LIBART_COMPILER_SRC_FILES := \ utils/arm/assembler_arm.cc \ utils/arm/managed_register_arm.cc \ utils/assembler.cc \ + utils/allocator.cc \ + utils/bit_vector.cc \ utils/mips/assembler_mips.cc \ utils/mips/managed_register_mips.cc \ utils/x86/assembler_x86.cc \ diff --git a/compiler/dex/arena_bit_vector.cc b/compiler/dex/arena_bit_vector.cc index b921f615b6..b567ae8d8a 100644 --- a/compiler/dex/arena_bit_vector.cc +++ b/compiler/dex/arena_bit_vector.cc @@ -19,119 +19,29 @@ namespace art { -// TODO: profile to make sure this is still a win relative to just using shifted masks. -static uint32_t check_masks[32] = { - 0x00000001, 0x00000002, 0x00000004, 0x00000008, 0x00000010, - 0x00000020, 0x00000040, 0x00000080, 0x00000100, 0x00000200, - 0x00000400, 0x00000800, 0x00001000, 0x00002000, 0x00004000, - 0x00008000, 0x00010000, 0x00020000, 0x00040000, 0x00080000, - 0x00100000, 0x00200000, 0x00400000, 0x00800000, 0x01000000, - 0x02000000, 0x04000000, 0x08000000, 0x10000000, 0x20000000, - 0x40000000, 0x80000000 }; +class ArenaBitVectorAllocator : public Allocator { + public: + explicit ArenaBitVectorAllocator(ArenaAllocator* arena) : arena_(arena) {} + ~ArenaBitVectorAllocator() {} -ArenaBitVector::ArenaBitVector(ArenaAllocator* arena, unsigned int start_bits, - bool expandable, OatBitMapKind kind) - : arena_(arena), - expandable_(expandable), - kind_(kind), - storage_size_((start_bits + 31) >> 5), - storage_(static_cast<uint32_t*>(arena_->Alloc(storage_size_ * sizeof(uint32_t), - ArenaAllocator::kAllocGrowableBitMap))) { - DCHECK_EQ(sizeof(storage_[0]), 4U); // Assuming 32-bit units. -} - -/* - * Determine whether or not the specified bit is set. - */ -bool ArenaBitVector::IsBitSet(unsigned int num) { - DCHECK_LT(num, storage_size_ * sizeof(uint32_t) * 8); - - unsigned int val = storage_[num >> 5] & check_masks[num & 0x1f]; - return (val != 0); -} - -// Mark all bits bit as "clear". -void ArenaBitVector::ClearAllBits() { - memset(storage_, 0, storage_size_ * sizeof(uint32_t)); -} - -// Mark the specified bit as "set". -/* - * TUNING: this could have pathologically bad growth/expand behavior. Make sure we're - * not using it badly or change resize mechanism. - */ -void ArenaBitVector::SetBit(unsigned int num) { - if (num >= storage_size_ * sizeof(uint32_t) * 8) { - DCHECK(expandable_) << "Attempted to expand a non-expandable bitmap to position " << num; - - /* Round up to word boundaries for "num+1" bits */ - unsigned int new_size = (num + 1 + 31) >> 5; - DCHECK_GT(new_size, storage_size_); - uint32_t *new_storage = - static_cast<uint32_t*>(arena_->Alloc(new_size * sizeof(uint32_t), - ArenaAllocator::kAllocGrowableBitMap)); - memcpy(new_storage, storage_, storage_size_ * sizeof(uint32_t)); - // Zero out the new storage words. - memset(&new_storage[storage_size_], 0, (new_size - storage_size_) * sizeof(uint32_t)); - // TOTO: collect stats on space wasted because of resize. - storage_ = new_storage; - storage_size_ = new_size; + virtual void* Alloc(size_t size) { + return arena_->Alloc(size, ArenaAllocator::kAllocGrowableBitMap); } - storage_[num >> 5] |= check_masks[num & 0x1f]; -} - -// Mark the specified bit as "unset". -void ArenaBitVector::ClearBit(unsigned int num) { - DCHECK_LT(num, storage_size_ * sizeof(uint32_t) * 8); - storage_[num >> 5] &= ~check_masks[num & 0x1f]; -} + virtual void Free(void*) {} // Nop. -// Intersect with another bit vector. Sizes and expandability must be the same. -void ArenaBitVector::Intersect(const ArenaBitVector* src) { - DCHECK_EQ(storage_size_, src->GetStorageSize()); - DCHECK_EQ(expandable_, src->IsExpandable()); - for (unsigned int idx = 0; idx < storage_size_; idx++) { - storage_[idx] &= src->GetRawStorageWord(idx); + static void* operator new(size_t size, ArenaAllocator* arena) { + return arena->Alloc(sizeof(ArenaBitVectorAllocator), ArenaAllocator::kAllocGrowableBitMap); } -} + static void operator delete(void* p) {} // Nop. -/* - * Union with another bit vector. Sizes and expandability must be the same. - */ -void ArenaBitVector::Union(const ArenaBitVector* src) { - DCHECK_EQ(storage_size_, src->GetStorageSize()); - DCHECK_EQ(expandable_, src->IsExpandable()); - for (unsigned int idx = 0; idx < storage_size_; idx++) { - storage_[idx] |= src->GetRawStorageWord(idx); - } -} - -// Count the number of bits that are set. -int ArenaBitVector::NumSetBits() { - unsigned int count = 0; - - for (unsigned int word = 0; word < storage_size_; word++) { - count += __builtin_popcount(storage_[word]); - } - return count; -} + private: + ArenaAllocator* arena_; + DISALLOW_COPY_AND_ASSIGN(ArenaBitVectorAllocator); +}; -/* - * Mark specified number of bits as "set". Cannot set all bits like ClearAll - * since there might be unused bits - setting those to one will confuse the - * iterator. - */ -void ArenaBitVector::SetInitialBits(unsigned int num_bits) { - DCHECK_LE(((num_bits + 31) >> 5), storage_size_); - unsigned int idx; - for (idx = 0; idx < (num_bits >> 5); idx++) { - storage_[idx] = -1; - } - unsigned int rem_num_bits = num_bits & 0x1f; - if (rem_num_bits) { - storage_[idx] = (1 << rem_num_bits) - 1; - } -} +ArenaBitVector::ArenaBitVector(ArenaAllocator* arena, unsigned int start_bits, + bool expandable, OatBitMapKind kind) + : BitVector(start_bits, expandable, new (arena) ArenaBitVectorAllocator(arena)), kind_(kind) {} } // namespace art diff --git a/compiler/dex/arena_bit_vector.h b/compiler/dex/arena_bit_vector.h index 53e6eb6512..7d2f3ffa5e 100644 --- a/compiler/dex/arena_bit_vector.h +++ b/compiler/dex/arena_bit_vector.h @@ -17,109 +17,28 @@ #ifndef ART_COMPILER_DEX_ARENA_BIT_VECTOR_H_ #define ART_COMPILER_DEX_ARENA_BIT_VECTOR_H_ -#include <stdint.h> -#include <stddef.h> -#include "compiler_enums.h" #include "arena_allocator.h" +#include "compiler_enums.h" +#include "utils/bit_vector.h" namespace art { /* - * Expanding bitmap, used for tracking resources. Bits are numbered starting - * from zero. All operations on a BitVector are unsynchronized. + * A BitVector implementation that uses Arena allocation. */ -class ArenaBitVector { +class ArenaBitVector : public BitVector { public: - class Iterator { - public: - explicit Iterator(ArenaBitVector* bit_vector) - : p_bits_(bit_vector), - bit_storage_(bit_vector->GetRawStorage()), - bit_index_(0), - bit_size_(p_bits_->storage_size_ * sizeof(uint32_t) * 8) {} - - // Return the position of the next set bit. -1 means end-of-element reached. - int32_t Next() { - // Did anything obviously change since we started? - DCHECK_EQ(bit_size_, p_bits_->GetStorageSize() * sizeof(uint32_t) * 8); - DCHECK_EQ(bit_storage_, p_bits_->GetRawStorage()); - - if (UNLIKELY(bit_index_ >= bit_size_)) return -1; - - uint32_t word_index = bit_index_ / 32; - uint32_t word = bit_storage_[word_index]; - // Mask out any bits in the first word we've already considered. - word >>= bit_index_ & 0x1f; - if (word == 0) { - bit_index_ &= ~0x1f; - do { - word_index++; - if (UNLIKELY((word_index * 32) >= bit_size_)) { - bit_index_ = bit_size_; - return -1; - } - word = bit_storage_[word_index]; - bit_index_ += 32; - } while (word == 0); - } - bit_index_ += CTZ(word) + 1; - return bit_index_ - 1; - } - - static void* operator new(size_t size, ArenaAllocator* arena) { - return arena->Alloc(sizeof(ArenaBitVector::Iterator), - ArenaAllocator::kAllocGrowableBitMap); - }; - static void operator delete(void* p) {} // Nop. - - private: - ArenaBitVector* const p_bits_; - uint32_t* const bit_storage_; - uint32_t bit_index_; // Current index (size in bits). - const uint32_t bit_size_; // Size of vector in bits. - }; - ArenaBitVector(ArenaAllocator* arena, uint32_t start_bits, bool expandable, OatBitMapKind kind = kBitMapMisc); ~ArenaBitVector() {} - static void* operator new(size_t size, ArenaAllocator* arena) { - return arena->Alloc(sizeof(ArenaBitVector), ArenaAllocator::kAllocGrowableBitMap); - } - static void operator delete(void* p) {} // Nop. - - void SetBit(uint32_t num); - void ClearBit(uint32_t num); - void MarkAllBits(bool set); - void DebugBitVector(char* msg, int length); - bool IsBitSet(uint32_t num); - void ClearAllBits(); - void SetInitialBits(uint32_t num_bits); - void Copy(ArenaBitVector* src) { - memcpy(storage_, src->GetRawStorage(), sizeof(uint32_t) * storage_size_); - } - void Intersect(const ArenaBitVector* src2); - void Union(const ArenaBitVector* src); - // Are we equal to another bit vector? Note: expandability attributes must also match. - bool Equal(const ArenaBitVector* src) { - return (storage_size_ == src->GetStorageSize()) && - (expandable_ == src->IsExpandable()) && - (memcmp(storage_, src->GetRawStorage(), storage_size_ * 4) == 0); - } - int32_t NumSetBits(); - - uint32_t GetStorageSize() const { return storage_size_; } - bool IsExpandable() const { return expandable_; } - uint32_t GetRawStorageWord(size_t idx) const { return storage_[idx]; } - uint32_t* GetRawStorage() { return storage_; } - const uint32_t* GetRawStorage() const { return storage_; } + static void* operator new(size_t size, ArenaAllocator* arena) { + return arena->Alloc(sizeof(ArenaBitVector), ArenaAllocator::kAllocGrowableBitMap); + } + static void operator delete(void* p) {} // Nop. private: - ArenaAllocator* const arena_; - const bool expandable_; // expand bitmap if we run out? - const OatBitMapKind kind_; // for memory use tuning. - uint32_t storage_size_; // current size, in 32-bit words. - uint32_t* storage_; + const OatBitMapKind kind_; // for memory use tuning. TODO: currently unused. }; diff --git a/compiler/dex/compiler_ir.h b/compiler/dex/compiler_ir.h index bdc31547cb..0d7209e438 100644 --- a/compiler/dex/compiler_ir.h +++ b/compiler/dex/compiler_ir.h @@ -29,6 +29,7 @@ #include "llvm/intrinsic_helper.h" #include "llvm/ir_builder.h" #include "safe_map.h" +#include "base/timing_logger.h" namespace art { @@ -68,7 +69,14 @@ struct CompilationUnit { compiler_flip_match(false), arena(pool), mir_graph(NULL), - cg(NULL) {} + cg(NULL), + timings("QuickCompiler", true, false) { + } + + void StartTimingSplit(const char* label); + void NewTimingSplit(const char* label); + void EndTiming(); + /* * Fields needed/generated by common frontend and generally used throughout * the compiler. @@ -109,6 +117,7 @@ struct CompilationUnit { UniquePtr<MIRGraph> mir_graph; // MIR container. UniquePtr<Backend> cg; // Target-specific codegen. + base::TimingLogger timings; }; } // namespace art diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc index 2952570436..2f8521f788 100644 --- a/compiler/dex/frontend.cc +++ b/compiler/dex/frontend.cc @@ -24,6 +24,7 @@ #include "runtime.h" #include "backend.h" #include "base/logging.h" +#include "base/timing_logger.h" #if defined(ART_USE_PORTABLE_COMPILER) #include "dex/portable/mir_to_gbc.h" @@ -104,8 +105,30 @@ static uint32_t kCompilerDebugFlags = 0 | // Enable debug/testing modes // (1 << kDebugVerifyBitcode) | // (1 << kDebugShowSummaryMemoryUsage) | // (1 << kDebugShowFilterStats) | + // (1 << kDebugTimings) | 0; +// TODO: Add a cumulative version of logging, and combine with dex2oat --dump-timing +void CompilationUnit::StartTimingSplit(const char* label) { + if (enable_debug & (1 << kDebugTimings)) { + timings.StartSplit(label); + } +} + +void CompilationUnit::NewTimingSplit(const char* label) { + if (enable_debug & (1 << kDebugTimings)) { + timings.NewSplit(label); + } +} + +void CompilationUnit::EndTiming() { + if (enable_debug & (1 << kDebugTimings)) { + timings.EndSplit(); + LOG(INFO) << "TIMINGS " << PrettyMethod(method_idx, *dex_file); + LOG(INFO) << Dumpable<base::TimingLogger>(timings); + } +} + static CompiledMethod* CompileMethod(CompilerDriver& compiler, const CompilerBackend compiler_backend, const DexFile::CodeItem* code_item, @@ -175,6 +198,7 @@ static CompiledMethod* CompileMethod(CompilerDriver& compiler, (1 << kPromoteCompilerTemps)); } + cu.StartTimingSplit("BuildMIRGraph"); cu.mir_graph.reset(new MIRGraph(&cu, &cu.arena)); /* Gathering opcode stats? */ @@ -192,22 +216,28 @@ static CompiledMethod* CompileMethod(CompilerDriver& compiler, } #endif + cu.NewTimingSplit("MIROpt:CodeLayout"); + /* Do a code layout pass */ cu.mir_graph->CodeLayout(); /* Perform SSA transformation for the whole method */ + cu.NewTimingSplit("MIROpt:SSATransform"); cu.mir_graph->SSATransformation(); /* Do constant propagation */ + cu.NewTimingSplit("MIROpt:ConstantProp"); cu.mir_graph->PropagateConstants(); /* Count uses */ cu.mir_graph->MethodUseCount(); /* Perform null check elimination */ + cu.NewTimingSplit("MIROpt:NullCheckElimination"); cu.mir_graph->NullCheckElimination(); /* Combine basic blocks where possible */ + cu.NewTimingSplit("MIROpt:BBOpt"); cu.mir_graph->BasicBlockCombine(); /* Do some basic block optimizations */ @@ -250,6 +280,7 @@ static CompiledMethod* CompileMethod(CompilerDriver& compiler, cu.cg->Materialize(); + cu.NewTimingSplit("Cleanup"); result = cu.cg->GetCompiledMethod(); if (result) { @@ -270,6 +301,7 @@ static CompiledMethod* CompileMethod(CompilerDriver& compiler, << " " << PrettyMethod(method_idx, dex_file); } + cu.EndTiming(); return result; } diff --git a/compiler/dex/frontend.h b/compiler/dex/frontend.h index 6c33d109e3..43f68554b5 100644 --- a/compiler/dex/frontend.h +++ b/compiler/dex/frontend.h @@ -78,6 +78,7 @@ enum debugControlVector { kDebugVerifyBitcode, kDebugShowSummaryMemoryUsage, kDebugShowFilterStats, + kDebugTimings }; class LLVMInfo { diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc index 9c8ce23ca5..11e19dc43f 100644 --- a/compiler/dex/mir_dataflow.cc +++ b/compiler/dex/mir_dataflow.cc @@ -1243,7 +1243,8 @@ bool MIRGraph::CountUses(struct BasicBlock* bb) { if (mir->ssa_rep == NULL) { continue; } - uint32_t weight = std::min(16U, static_cast<uint32_t>(bb->nesting_depth)); + // Each level of nesting adds *16 to count, up to 3 levels deep. + uint32_t weight = std::min(3U, static_cast<uint32_t>(bb->nesting_depth) * 4); for (int i = 0; i < mir->ssa_rep->num_uses; i++) { int s_reg = mir->ssa_rep->uses[i]; raw_use_counts_.Increment(s_reg); diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc index 3cd158ffc0..0b453b142f 100644 --- a/compiler/dex/mir_optimization.cc +++ b/compiler/dex/mir_optimization.cc @@ -629,10 +629,15 @@ bool MIRGraph::EliminateNullChecks(struct BasicBlock* bb) { */ if ((bb->block_type == kEntryBlock) | bb->catch_entry) { temp_ssa_register_v_->ClearAllBits(); + // Assume all ins are objects. + for (uint16_t in_reg = cu_->num_dalvik_registers - cu_->num_ins; + in_reg < cu_->num_dalvik_registers; in_reg++) { + temp_ssa_register_v_->SetBit(in_reg); + } if ((cu_->access_flags & kAccStatic) == 0) { // If non-static method, mark "this" as non-null int this_reg = cu_->num_dalvik_registers - cu_->num_ins; - temp_ssa_register_v_->SetBit(this_reg); + temp_ssa_register_v_->ClearBit(this_reg); } } else if (bb->predecessors->Size() == 1) { BasicBlock* pred_bb = GetBasicBlock(bb->predecessors->Get(0)); @@ -645,18 +650,18 @@ bool MIRGraph::EliminateNullChecks(struct BasicBlock* bb) { if (pred_bb->fall_through == bb->id) { // The fall-through of a block following a IF_EQZ, set the vA of the IF_EQZ to show that // it can't be null. - temp_ssa_register_v_->SetBit(last_insn->ssa_rep->uses[0]); + temp_ssa_register_v_->ClearBit(last_insn->ssa_rep->uses[0]); } } else if (last_opcode == Instruction::IF_NEZ) { if (pred_bb->taken == bb->id) { // The taken block following a IF_NEZ, set the vA of the IF_NEZ to show that it can't be // null. - temp_ssa_register_v_->SetBit(last_insn->ssa_rep->uses[0]); + temp_ssa_register_v_->ClearBit(last_insn->ssa_rep->uses[0]); } } } } else { - // Starting state is intersection of all incoming arcs + // Starting state is union of all incoming arcs GrowableArray<BasicBlockId>::Iterator iter(bb->predecessors); BasicBlock* pred_bb = GetBasicBlock(iter.Next()); DCHECK(pred_bb != NULL); @@ -668,10 +673,13 @@ bool MIRGraph::EliminateNullChecks(struct BasicBlock* bb) { (pred_bb->data_flow_info->ending_null_check_v == NULL)) { continue; } - temp_ssa_register_v_->Intersect(pred_bb->data_flow_info->ending_null_check_v); + temp_ssa_register_v_->Union(pred_bb->data_flow_info->ending_null_check_v); } } + // At this point, temp_ssa_register_v_ shows which sregs have an object definition with + // no intervening uses. + // Walk through the instruction in the block, updating as necessary for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) { if (mir->ssa_rep == NULL) { @@ -679,11 +687,41 @@ bool MIRGraph::EliminateNullChecks(struct BasicBlock* bb) { } int df_attributes = oat_data_flow_attributes_[mir->dalvikInsn.opcode]; - // Mark target of NEW* as non-null - if (df_attributes & DF_NON_NULL_DST) { + // Already nullchecked? + if ((df_attributes & DF_HAS_NULL_CHKS) && !(mir->optimization_flags & MIR_IGNORE_NULL_CHECK)) { + int src_idx; + if (df_attributes & DF_NULL_CHK_1) { + src_idx = 1; + } else if (df_attributes & DF_NULL_CHK_2) { + src_idx = 2; + } else { + src_idx = 0; + } + int src_sreg = mir->ssa_rep->uses[src_idx]; + if (!temp_ssa_register_v_->IsBitSet(src_sreg)) { + // Eliminate the null check + mir->optimization_flags |= MIR_IGNORE_NULL_CHECK; + } else { + // Mark s_reg as null-checked + temp_ssa_register_v_->ClearBit(src_sreg); + } + } + + if ((df_attributes & (DF_REF_A | DF_NULL_TRANSFER_0 | DF_NULL_TRANSFER_N)) == 0) { + continue; + } + + // First, mark all object definitions as requiring null check. + if ((df_attributes & (DF_DA | DF_REF_A)) == (DF_DA | DF_REF_A)) { temp_ssa_register_v_->SetBit(mir->ssa_rep->defs[0]); } + // Now, remove mark from all object definitions we know are non-null. + if (df_attributes & DF_NON_NULL_DST) { + // Mark target of NEW* as non-null + temp_ssa_register_v_->ClearBit(mir->ssa_rep->defs[0]); + } + // Mark non-null returns from invoke-style NEW* if (df_attributes & DF_NON_NULL_RET) { MIR* next_mir = mir->next; @@ -691,7 +729,7 @@ bool MIRGraph::EliminateNullChecks(struct BasicBlock* bb) { if (next_mir && next_mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) { // Mark as null checked - temp_ssa_register_v_->SetBit(next_mir->ssa_rep->defs[0]); + temp_ssa_register_v_->ClearBit(next_mir->ssa_rep->defs[0]); } else { if (next_mir) { LOG(WARNING) << "Unexpected opcode following new: " << next_mir->dalvikInsn.opcode; @@ -706,7 +744,7 @@ bool MIRGraph::EliminateNullChecks(struct BasicBlock* bb) { // First non-pseudo should be MOVE_RESULT_OBJECT if (tmir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) { // Mark as null checked - temp_ssa_register_v_->SetBit(tmir->ssa_rep->defs[0]); + temp_ssa_register_v_->ClearBit(tmir->ssa_rep->defs[0]); } else { LOG(WARNING) << "Unexpected op after new: " << tmir->dalvikInsn.opcode; } @@ -719,40 +757,22 @@ bool MIRGraph::EliminateNullChecks(struct BasicBlock* bb) { /* * Propagate nullcheck state on register copies (including * Phi pseudo copies. For the latter, nullcheck state is - * the "and" of all the Phi's operands. + * the "or" of all the Phi's operands. */ if (df_attributes & (DF_NULL_TRANSFER_0 | DF_NULL_TRANSFER_N)) { int tgt_sreg = mir->ssa_rep->defs[0]; int operands = (df_attributes & DF_NULL_TRANSFER_0) ? 1 : mir->ssa_rep->num_uses; - bool null_checked = true; + bool needs_null_check = false; for (int i = 0; i < operands; i++) { - null_checked &= temp_ssa_register_v_->IsBitSet(mir->ssa_rep->uses[i]); + needs_null_check |= temp_ssa_register_v_->IsBitSet(mir->ssa_rep->uses[i]); } - if (null_checked) { + if (needs_null_check) { temp_ssa_register_v_->SetBit(tgt_sreg); - } - } - - // Already nullchecked? - if ((df_attributes & DF_HAS_NULL_CHKS) && !(mir->optimization_flags & MIR_IGNORE_NULL_CHECK)) { - int src_idx; - if (df_attributes & DF_NULL_CHK_1) { - src_idx = 1; - } else if (df_attributes & DF_NULL_CHK_2) { - src_idx = 2; } else { - src_idx = 0; + temp_ssa_register_v_->ClearBit(tgt_sreg); } - int src_sreg = mir->ssa_rep->uses[src_idx]; - if (temp_ssa_register_v_->IsBitSet(src_sreg)) { - // Eliminate the null check - mir->optimization_flags |= MIR_IGNORE_NULL_CHECK; - } else { - // Mark s_reg as null-checked - temp_ssa_register_v_->SetBit(src_sreg); - } - } + } } // Did anything change? diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc index cc40e99c52..2a6e656901 100644 --- a/compiler/dex/quick/arm/assemble_arm.cc +++ b/compiler/dex/quick/arm/assemble_arm.cc @@ -1153,6 +1153,7 @@ void ArmMir2Lir::EncodeLIR(LIR* lir) { void ArmMir2Lir::AssembleLIR() { LIR* lir; LIR* prev_lir; + cu_->NewTimingSplit("Assemble"); int assembler_retries = 0; CodeOffset starting_offset = EncodeRange(first_lir_insn_, last_lir_insn_, 0); data_offset_ = (starting_offset + 0x3) & ~0x3; @@ -1574,6 +1575,7 @@ void ArmMir2Lir::AssembleLIR() { data_offset_ = (code_buffer_.size() + 0x3) & ~0x3; + cu_->NewTimingSplit("LiteralData"); // Install literals InstallLiteralPools(); @@ -1584,8 +1586,10 @@ void ArmMir2Lir::AssembleLIR() { InstallFillArrayData(); // Create the mapping table and native offset to reference map. + cu_->NewTimingSplit("PcMappingTable"); CreateMappingTables(); + cu_->NewTimingSplit("GcMap"); CreateNativeGcMap(); } diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc index 2ce8f581b4..dfbc887299 100644 --- a/compiler/dex/quick/codegen_util.cc +++ b/compiler/dex/quick/codegen_util.cc @@ -164,7 +164,8 @@ void Mir2Lir::DumpLIRInsn(LIR* lir, unsigned char* base_addr) { lir->operands[0] = WrapPointer(ArenaStrdup("No instruction string")); } LOG(INFO) << "-------- dalvik offset: 0x" << std::hex - << lir->dalvik_offset << " @ " << reinterpret_cast<char*>(lir->operands[0]); + << lir->dalvik_offset << " @ " + << reinterpret_cast<char*>(UnwrapPointer(lir->operands[0])); break; case kPseudoExitBlock: LOG(INFO) << "-------- exit offset: 0x" << std::hex << dest; @@ -929,6 +930,7 @@ Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena } void Mir2Lir::Materialize() { + cu_->NewTimingSplit("RegisterAllocation"); CompilerInitializeRegAlloc(); // Needs to happen after SSA naming /* Allocate Registers using simple local allocation scheme */ @@ -940,6 +942,7 @@ void Mir2Lir::Materialize() { * special codegen doesn't succeed, first_lir_insn_ will * set to NULL; */ + cu_->NewTimingSplit("SpecialMIR2LIR"); SpecialMIR2LIR(mir_graph_->GetSpecialCase()); } diff --git a/compiler/dex/quick/mips/assemble_mips.cc b/compiler/dex/quick/mips/assemble_mips.cc index ea8b7a64c1..5f5e5e44ac 100644 --- a/compiler/dex/quick/mips/assemble_mips.cc +++ b/compiler/dex/quick/mips/assemble_mips.cc @@ -768,6 +768,7 @@ void MipsMir2Lir::AssignOffsets() { * TODO: consolidate w/ Arm assembly mechanism. */ void MipsMir2Lir::AssembleLIR() { + cu_->NewTimingSplit("Assemble"); AssignOffsets(); int assembler_retries = 0; /* @@ -792,6 +793,7 @@ void MipsMir2Lir::AssembleLIR() { } // Install literals + cu_->NewTimingSplit("LiteralData"); InstallLiteralPools(); // Install switch tables @@ -801,8 +803,10 @@ void MipsMir2Lir::AssembleLIR() { InstallFillArrayData(); // Create the mapping table and native offset to reference map. + cu_->NewTimingSplit("PcMappingTable"); CreateMappingTables(); + cu_->NewTimingSplit("GcMap"); CreateNativeGcMap(); } diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc index 197e200fc3..fa9a3ad566 100644 --- a/compiler/dex/quick/mir_to_lir.cc +++ b/compiler/dex/quick/mir_to_lir.cc @@ -819,6 +819,8 @@ void Mir2Lir::SpecialMIR2LIR(SpecialCaseHandler special_case) { } void Mir2Lir::MethodMIR2LIR() { + cu_->NewTimingSplit("MIR2LIR"); + // Hold the labels of each block. block_label_list_ = static_cast<LIR*>(arena_->Alloc(sizeof(LIR) * mir_graph_->GetNumBlocks(), @@ -839,7 +841,7 @@ void Mir2Lir::MethodMIR2LIR() { next_bb = iter.Next(); } while ((next_bb != NULL) && (next_bb->block_type == kDead)); } - + cu_->NewTimingSplit("Launchpads"); HandleSuspendLaunchPads(); HandleThrowLaunchPads(); diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc index 9167b1c8f2..d66862bb9a 100644 --- a/compiler/dex/quick/x86/assemble_x86.cc +++ b/compiler/dex/quick/x86/assemble_x86.cc @@ -1443,6 +1443,7 @@ void X86Mir2Lir::AssignOffsets() { * TODO: consolidate w/ Arm assembly mechanism. */ void X86Mir2Lir::AssembleLIR() { + cu_->NewTimingSplit("Assemble"); AssignOffsets(); int assembler_retries = 0; /* @@ -1466,6 +1467,7 @@ void X86Mir2Lir::AssembleLIR() { } } + cu_->NewTimingSplit("LiteralData"); // Install literals InstallLiteralPools(); @@ -1476,8 +1478,10 @@ void X86Mir2Lir::AssembleLIR() { InstallFillArrayData(); // Create the mapping table and native offset to reference map. + cu_->NewTimingSplit("PcMappingTable"); CreateMappingTables(); + cu_->NewTimingSplit("GcMap"); CreateNativeGcMap(); } diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc index eb0d412bae..0d8bd07f40 100644 --- a/compiler/dex/ssa_transformation.cc +++ b/compiler/dex/ssa_transformation.cc @@ -183,7 +183,7 @@ void MIRGraph::ComputeDomPostOrderTraversal(BasicBlock* bb) { ClearAllVisitedFlags(); std::vector<std::pair<BasicBlock*, ArenaBitVector::Iterator*> > work_stack; bb->visited = true; - work_stack.push_back(std::make_pair(bb, new (arena_) ArenaBitVector::Iterator(bb->i_dominated))); + work_stack.push_back(std::make_pair(bb, bb->i_dominated->GetIterator())); while (!work_stack.empty()) { const std::pair<BasicBlock*, ArenaBitVector::Iterator*>& curr = work_stack.back(); BasicBlock* curr_bb = curr.first; @@ -196,7 +196,7 @@ void MIRGraph::ComputeDomPostOrderTraversal(BasicBlock* bb) { BasicBlock* new_bb = GetBasicBlock(bb_idx); new_bb->visited = true; work_stack.push_back( - std::make_pair(new_bb, new (arena_) ArenaBitVector::Iterator(new_bb->i_dominated))); + std::make_pair(new_bb, new_bb->i_dominated->GetIterator())); } else { // no successor/next if (curr_bb->id != NullBasicBlockId) { @@ -206,6 +206,7 @@ void MIRGraph::ComputeDomPostOrderTraversal(BasicBlock* bb) { /* hacky loop detection */ if ((curr_bb->taken != NullBasicBlockId) && curr_bb->dominators->IsBitSet(curr_bb->taken)) { + curr_bb->nesting_depth++; attributes_ |= METHOD_HAS_LOOP; } } diff --git a/compiler/utils/allocator.cc b/compiler/utils/allocator.cc new file mode 100644 index 0000000000..4f7753d476 --- /dev/null +++ b/compiler/utils/allocator.cc @@ -0,0 +1,74 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "allocator.h" + +#include <inttypes.h> +#include <stdlib.h> + +#include "base/logging.h" + +namespace art { + +class MallocAllocator : public Allocator { + public: + explicit MallocAllocator() {} + ~MallocAllocator() {} + + virtual void* Alloc(size_t size) { + return calloc(sizeof(uint8_t), size); + } + + virtual void Free(void* p) { + free(p); + } + + private: + DISALLOW_COPY_AND_ASSIGN(MallocAllocator); +}; + +MallocAllocator g_malloc_allocator; + +class NoopAllocator : public Allocator { + public: + explicit NoopAllocator() {} + ~NoopAllocator() {} + + virtual void* Alloc(size_t size) { + LOG(FATAL) << "NoopAllocator::Alloc should not be called"; + return NULL; + } + + virtual void Free(void* p) { + // Noop. + } + + private: + DISALLOW_COPY_AND_ASSIGN(NoopAllocator); +}; + +NoopAllocator g_noop_allocator; + +Allocator* Allocator::GetMallocAllocator() { + return &g_malloc_allocator; +} + +Allocator* Allocator::GetNoopAllocator() { + return &g_noop_allocator; +} + + +} // namespace art diff --git a/compiler/utils/allocator.h b/compiler/utils/allocator.h new file mode 100644 index 0000000000..3482a7928e --- /dev/null +++ b/compiler/utils/allocator.h @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_UTILS_ALLOCATOR_H_ +#define ART_COMPILER_UTILS_ALLOCATOR_H_ + +#include "base/macros.h" + +namespace art { + +class Allocator { + public: + static Allocator* GetMallocAllocator(); + static Allocator* GetNoopAllocator(); + + Allocator() {} + virtual ~Allocator() {} + + virtual void* Alloc(size_t) = 0; + virtual void Free(void*) = 0; + + private: + DISALLOW_COPY_AND_ASSIGN(Allocator); +}; + +} // namespace art + +#endif // ART_COMPILER_UTILS_ALLOCATOR_H_ diff --git a/compiler/utils/bit_vector.cc b/compiler/utils/bit_vector.cc new file mode 100644 index 0000000000..81a639a050 --- /dev/null +++ b/compiler/utils/bit_vector.cc @@ -0,0 +1,155 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "bit_vector.h" + +namespace art { + +// TODO: profile to make sure this is still a win relative to just using shifted masks. +static uint32_t check_masks[32] = { + 0x00000001, 0x00000002, 0x00000004, 0x00000008, 0x00000010, + 0x00000020, 0x00000040, 0x00000080, 0x00000100, 0x00000200, + 0x00000400, 0x00000800, 0x00001000, 0x00002000, 0x00004000, + 0x00008000, 0x00010000, 0x00020000, 0x00040000, 0x00080000, + 0x00100000, 0x00200000, 0x00400000, 0x00800000, 0x01000000, + 0x02000000, 0x04000000, 0x08000000, 0x10000000, 0x20000000, + 0x40000000, 0x80000000 }; + +static inline uint32_t BitsToWords(unsigned int bits) { + return (bits + 31) >> 5; +} + +// TODO: replace excessive argument defaulting when we are at gcc 4.7 +// or later on host with delegating constructor support. Specifically, +// starts_bits and storage_size/storage are mutually exclusive. +BitVector::BitVector(unsigned int start_bits, + bool expandable, + Allocator* allocator, + uint32_t storage_size, + uint32_t* storage) + : allocator_(allocator), + expandable_(expandable), + storage_size_(storage_size), + storage_(storage) { + DCHECK_EQ(sizeof(storage_[0]), 4U); // Assuming 32-bit units. + if (storage_ == NULL) { + storage_size_ = BitsToWords(start_bits); + storage_ = static_cast<uint32_t*>(allocator_->Alloc(storage_size_ * sizeof(uint32_t))); + } +} + +BitVector::~BitVector() { + allocator_->Free(storage_); +} + +/* + * Determine whether or not the specified bit is set. + */ +bool BitVector::IsBitSet(unsigned int num) { + DCHECK_LT(num, storage_size_ * sizeof(uint32_t) * 8); + + unsigned int val = storage_[num >> 5] & check_masks[num & 0x1f]; + return (val != 0); +} + +// Mark all bits bit as "clear". +void BitVector::ClearAllBits() { + memset(storage_, 0, storage_size_ * sizeof(uint32_t)); +} + +// Mark the specified bit as "set". +/* + * TUNING: this could have pathologically bad growth/expand behavior. Make sure we're + * not using it badly or change resize mechanism. + */ +void BitVector::SetBit(unsigned int num) { + if (num >= storage_size_ * sizeof(uint32_t) * 8) { + DCHECK(expandable_) << "Attempted to expand a non-expandable bitmap to position " << num; + + /* Round up to word boundaries for "num+1" bits */ + unsigned int new_size = BitsToWords(num + 1); + DCHECK_GT(new_size, storage_size_); + uint32_t *new_storage = + static_cast<uint32_t*>(allocator_->Alloc(new_size * sizeof(uint32_t))); + memcpy(new_storage, storage_, storage_size_ * sizeof(uint32_t)); + // Zero out the new storage words. + memset(&new_storage[storage_size_], 0, (new_size - storage_size_) * sizeof(uint32_t)); + // TOTO: collect stats on space wasted because of resize. + storage_ = new_storage; + storage_size_ = new_size; + } + + storage_[num >> 5] |= check_masks[num & 0x1f]; +} + +// Mark the specified bit as "unset". +void BitVector::ClearBit(unsigned int num) { + DCHECK_LT(num, storage_size_ * sizeof(uint32_t) * 8); + storage_[num >> 5] &= ~check_masks[num & 0x1f]; +} + +// Intersect with another bit vector. Sizes and expandability must be the same. +void BitVector::Intersect(const BitVector* src) { + DCHECK_EQ(storage_size_, src->GetStorageSize()); + DCHECK_EQ(expandable_, src->IsExpandable()); + for (unsigned int idx = 0; idx < storage_size_; idx++) { + storage_[idx] &= src->GetRawStorageWord(idx); + } +} + +/* + * Union with another bit vector. Sizes and expandability must be the same. + */ +void BitVector::Union(const BitVector* src) { + DCHECK_EQ(storage_size_, src->GetStorageSize()); + DCHECK_EQ(expandable_, src->IsExpandable()); + for (unsigned int idx = 0; idx < storage_size_; idx++) { + storage_[idx] |= src->GetRawStorageWord(idx); + } +} + +// Count the number of bits that are set. +int BitVector::NumSetBits() { + unsigned int count = 0; + + for (unsigned int word = 0; word < storage_size_; word++) { + count += __builtin_popcount(storage_[word]); + } + return count; +} + +BitVector::Iterator* BitVector::GetIterator() { + return new (allocator_) Iterator(this); +} + +/* + * Mark specified number of bits as "set". Cannot set all bits like ClearAll + * since there might be unused bits - setting those to one will confuse the + * iterator. + */ +void BitVector::SetInitialBits(unsigned int num_bits) { + DCHECK_LE(BitsToWords(num_bits), storage_size_); + unsigned int idx; + for (idx = 0; idx < (num_bits >> 5); idx++) { + storage_[idx] = -1; + } + unsigned int rem_num_bits = num_bits & 0x1f; + if (rem_num_bits) { + storage_[idx] = (1 << rem_num_bits) - 1; + } +} + +} // namespace art diff --git a/compiler/utils/bit_vector.h b/compiler/utils/bit_vector.h new file mode 100644 index 0000000000..bf0f7c32e1 --- /dev/null +++ b/compiler/utils/bit_vector.h @@ -0,0 +1,134 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_UTILS_BIT_VECTOR_H_ +#define ART_COMPILER_UTILS_BIT_VECTOR_H_ + +#include <stdint.h> +#include <stddef.h> + +#include "allocator.h" +#include "base/logging.h" +#include "utils.h" + +namespace art { + +/* + * Expanding bitmap, used for tracking resources. Bits are numbered starting + * from zero. All operations on a BitVector are unsynchronized. + */ +class BitVector { + public: + class Iterator { + public: + explicit Iterator(BitVector* bit_vector) + : p_bits_(bit_vector), + bit_storage_(bit_vector->GetRawStorage()), + bit_index_(0), + bit_size_(p_bits_->storage_size_ * sizeof(uint32_t) * 8) {} + + // Return the position of the next set bit. -1 means end-of-element reached. + int32_t Next() { + // Did anything obviously change since we started? + DCHECK_EQ(bit_size_, p_bits_->GetStorageSize() * sizeof(uint32_t) * 8); + DCHECK_EQ(bit_storage_, p_bits_->GetRawStorage()); + + if (UNLIKELY(bit_index_ >= bit_size_)) return -1; + + uint32_t word_index = bit_index_ / 32; + uint32_t word = bit_storage_[word_index]; + // Mask out any bits in the first word we've already considered. + word >>= bit_index_ & 0x1f; + if (word == 0) { + bit_index_ &= ~0x1f; + do { + word_index++; + if (UNLIKELY((word_index * 32) >= bit_size_)) { + bit_index_ = bit_size_; + return -1; + } + word = bit_storage_[word_index]; + bit_index_ += 32; + } while (word == 0); + } + bit_index_ += CTZ(word) + 1; + return bit_index_ - 1; + } + + static void* operator new(size_t size, Allocator* allocator) { + return allocator->Alloc(sizeof(BitVector::Iterator)); + }; + static void operator delete(void* p) { + Iterator* it = reinterpret_cast<Iterator*>(p); + it->p_bits_->allocator_->Free(p); + } + + private: + BitVector* const p_bits_; + uint32_t* const bit_storage_; + uint32_t bit_index_; // Current index (size in bits). + const uint32_t bit_size_; // Size of vector in bits. + + friend class BitVector; + }; + + BitVector(uint32_t start_bits, + bool expandable, + Allocator* allocator, + uint32_t storage_size = 0, + uint32_t* storage = NULL); + + virtual ~BitVector(); + + void SetBit(uint32_t num); + void ClearBit(uint32_t num); + void MarkAllBits(bool set); + void DebugBitVector(char* msg, int length); + bool IsBitSet(uint32_t num); + void ClearAllBits(); + void SetInitialBits(uint32_t num_bits); + void Copy(BitVector* src) { + memcpy(storage_, src->GetRawStorage(), sizeof(uint32_t) * storage_size_); + } + void Intersect(const BitVector* src2); + void Union(const BitVector* src); + // Are we equal to another bit vector? Note: expandability attributes must also match. + bool Equal(const BitVector* src) { + return (storage_size_ == src->GetStorageSize()) && + (expandable_ == src->IsExpandable()) && + (memcmp(storage_, src->GetRawStorage(), storage_size_ * sizeof(uint32_t)) == 0); + } + int32_t NumSetBits(); + + Iterator* GetIterator(); + + uint32_t GetStorageSize() const { return storage_size_; } + bool IsExpandable() const { return expandable_; } + uint32_t GetRawStorageWord(size_t idx) const { return storage_[idx]; } + uint32_t* GetRawStorage() { return storage_; } + const uint32_t* GetRawStorage() const { return storage_; } + + private: + Allocator* const allocator_; + const bool expandable_; // expand bitmap if we run out? + uint32_t storage_size_; // current size, in 32-bit words. + uint32_t* storage_; +}; + + +} // namespace art + +#endif // ART_COMPILER_UTILS_BIT_VECTOR_H_ diff --git a/compiler/utils/bit_vector_test.cc b/compiler/utils/bit_vector_test.cc new file mode 100644 index 0000000000..5c18ec53d3 --- /dev/null +++ b/compiler/utils/bit_vector_test.cc @@ -0,0 +1,96 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common_test.h" +#include "bit_vector.h" +#include "UniquePtr.h" + +namespace art { + +TEST(BitVector, Test) { + const size_t kBits = 32; + + BitVector bv(kBits, false, Allocator::GetMallocAllocator()); + EXPECT_EQ(1U, bv.GetStorageSize()); + EXPECT_FALSE(bv.IsExpandable()); + + EXPECT_EQ(0, bv.NumSetBits()); + for (size_t i = 0; i < kBits; i++) { + EXPECT_FALSE(bv.IsBitSet(i)); + } + EXPECT_EQ(0U, bv.GetRawStorageWord(0)); + EXPECT_EQ(0U, *bv.GetRawStorage()); + + BitVector::Iterator empty_iterator(&bv); + EXPECT_EQ(-1, empty_iterator.Next()); + + UniquePtr<BitVector::Iterator> empty_iterator_on_heap(bv.GetIterator()); + EXPECT_EQ(-1, empty_iterator_on_heap->Next()); + + bv.SetBit(0); + bv.SetBit(kBits - 1); + EXPECT_EQ(2, bv.NumSetBits()); + EXPECT_TRUE(bv.IsBitSet(0)); + for (size_t i = 1; i < kBits - 1; i++) { + EXPECT_FALSE(bv.IsBitSet(i)); + } + EXPECT_TRUE(bv.IsBitSet(kBits - 1)); + EXPECT_EQ(0x80000001U, bv.GetRawStorageWord(0)); + EXPECT_EQ(0x80000001U, *bv.GetRawStorage()); + + BitVector::Iterator iterator(&bv); + EXPECT_EQ(0, iterator.Next()); + EXPECT_EQ(static_cast<int>(kBits - 1), iterator.Next()); + EXPECT_EQ(-1, iterator.Next()); +} + +TEST(BitVector, NoopAllocator) { + const uint32_t kWords = 2; + + uint32_t bits[kWords]; + memset(bits, 0, sizeof(bits)); + + BitVector bv(0U, false, Allocator::GetNoopAllocator(), kWords, bits); + EXPECT_EQ(kWords, bv.GetStorageSize()); + EXPECT_EQ(bits, bv.GetRawStorage()); + EXPECT_EQ(0, bv.NumSetBits()); + + bv.SetBit(8); + EXPECT_EQ(1, bv.NumSetBits()); + EXPECT_EQ(0x00000100U, bv.GetRawStorageWord(0)); + EXPECT_EQ(0x00000000U, bv.GetRawStorageWord(1)); + EXPECT_EQ(1, bv.NumSetBits()); + + bv.SetBit(16); + EXPECT_EQ(2, bv.NumSetBits()); + EXPECT_EQ(0x00010100U, bv.GetRawStorageWord(0)); + EXPECT_EQ(0x00000000U, bv.GetRawStorageWord(1)); + EXPECT_EQ(2, bv.NumSetBits()); + + bv.SetBit(32); + EXPECT_EQ(3, bv.NumSetBits()); + EXPECT_EQ(0x00010100U, bv.GetRawStorageWord(0)); + EXPECT_EQ(0x00000001U, bv.GetRawStorageWord(1)); + EXPECT_EQ(3, bv.NumSetBits()); + + bv.SetBit(48); + EXPECT_EQ(4, bv.NumSetBits()); + EXPECT_EQ(0x00010100U, bv.GetRawStorageWord(0)); + EXPECT_EQ(0x00010001U, bv.GetRawStorageWord(1)); + EXPECT_EQ(4, bv.NumSetBits()); +} + +} // namespace art diff --git a/compiler/utils/dedupe_set_test.cc b/compiler/utils/dedupe_set_test.cc index 03d8b961fa..2c6787b8a6 100644 --- a/compiler/utils/dedupe_set_test.cc +++ b/compiler/utils/dedupe_set_test.cc @@ -19,10 +19,6 @@ namespace art { -class DedupeSetTest : public testing::Test { - public: -}; - class DedupeHashFunc { public: size_t operator()(const std::vector<uint8_t>& array) const { @@ -35,7 +31,7 @@ class DedupeHashFunc { return hash; } }; -TEST_F(DedupeSetTest, Test) { +TEST(DedupeSetTest, Test) { Thread* self = Thread::Current(); typedef std::vector<uint8_t> ByteArray; DedupeSet<ByteArray, size_t, DedupeHashFunc> deduplicator("test"); diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc index 486328cbba..4629dbd0aa 100644 --- a/runtime/native/dalvik_system_VMRuntime.cc +++ b/runtime/native/dalvik_system_VMRuntime.cc @@ -135,7 +135,7 @@ static jstring VMRuntime_vmLibrary(JNIEnv* env, jobject) { return env->NewStringUTF(kIsDebugBuild ? "libartd.so" : "libart.so"); } -static void VMRuntime_setTargetSdkVersion(JNIEnv* env, jobject, jint targetSdkVersion) { +static void VMRuntime_setTargetSdkVersionNative(JNIEnv* env, jobject, jint targetSdkVersion) { // This is the target SDK version of the app we're about to run. // Note that targetSdkVersion may be CUR_DEVELOPMENT (10000). // Note that targetSdkVersion may be 0, meaning "current". @@ -519,7 +519,7 @@ static JNINativeMethod gMethods[] = { NATIVE_METHOD(VMRuntime, nativeSetTargetHeapUtilization, "(F)V"), NATIVE_METHOD(VMRuntime, newNonMovableArray, "!(Ljava/lang/Class;I)Ljava/lang/Object;"), NATIVE_METHOD(VMRuntime, properties, "()[Ljava/lang/String;"), - NATIVE_METHOD(VMRuntime, setTargetSdkVersion, "(I)V"), + NATIVE_METHOD(VMRuntime, setTargetSdkVersionNative, "(I)V"), NATIVE_METHOD(VMRuntime, registerNativeAllocation, "(I)V"), NATIVE_METHOD(VMRuntime, registerNativeFree, "(I)V"), NATIVE_METHOD(VMRuntime, startJitCompilation, "()V"), |