diff options
| -rw-r--r-- | Android.mk | 46 | ||||
| -rw-r--r-- | compiler/optimizing/code_sinking.cc | 23 | ||||
| -rw-r--r-- | compiler/optimizing/dead_code_elimination.cc | 23 | ||||
| -rw-r--r-- | compiler/optimizing/nodes.cc | 4 | ||||
| -rw-r--r-- | libartbase/base/arena_bit_vector.h | 20 | ||||
| -rw-r--r-- | libartbase/base/bit_vector.h | 69 | ||||
| -rw-r--r-- | libartbase/base/bit_vector_test.cc | 88 | ||||
| -rw-r--r-- | libartservice/service/java/com/android/server/art/Dex2OatStatsReporter.java | 24 | ||||
| -rw-r--r-- | libartservice/service/java/com/android/server/art/Dexopter.java | 10 | ||||
| -rw-r--r-- | libdexfile/Android.bp | 9 | ||||
| -rw-r--r-- | runtime/gc/collector/mark_compact.cc | 27 | ||||
| -rw-r--r-- | runtime/trace_profile.cc | 133 | ||||
| -rw-r--r-- | runtime/trace_profile.h | 43 | ||||
| -rw-r--r-- | test/Android.bp | 24 | ||||
| -rw-r--r-- | test/generate-boot-image/generate-boot-image.cc | 17 | ||||
| -rwxr-xr-x | tools/buildbot-build.sh | 8 |
16 files changed, 439 insertions, 129 deletions
diff --git a/Android.mk b/Android.mk index 1405aa79ff..d784cf6842 100644 --- a/Android.mk +++ b/Android.mk @@ -274,6 +274,52 @@ build-art: build-art-target .PHONY: build-art-target build-art-target: $(TARGET_OUT_EXECUTABLES)/art $(ART_TARGET_DEPENDENCIES) $(TARGET_CORE_IMG_OUTS) +TARGET_BOOT_IMAGE_SYSTEM_DIR := $(PRODUCT_OUT)/system/apex/art_boot_images +TARGET_ART_APEX_SYSTEM := $(PRODUCT_OUT)/system/apex/com.android.art +TARGET_BOOT_IMAGE_PROFILE := $(TARGET_ART_APEX_SYSTEM).testing/etc/boot-image.prof + +.PHONY: build-art-simulator-profile +build-art-simulator-profile: $(HOST_OUT_EXECUTABLES)/profmand $(TARGET_CORE_IMG_DEX_FILES) \ + $(PRODUCT_DEX_PREOPT_BOOT_IMAGE_PROFILE_LOCATION) + mkdir -p $(dir $(TARGET_BOOT_IMAGE_PROFILE)) + # Generate a profile from the core boot jars. This allows the simulator and boot image to use a + # stable profile that is generated on the host. + $(HOST_OUT_EXECUTABLES)/profmand \ + --output-profile-type=boot \ + --create-profile-from=$(PRODUCT_DEX_PREOPT_BOOT_IMAGE_PROFILE_LOCATION) \ + $(foreach jar,$(TARGET_CORE_IMG_DEX_FILES),--apk=$(jar)) \ + $(foreach jar,$(TARGET_CORE_IMG_DEX_LOCATIONS),--dex-location=$(jar)) \ + --reference-profile-file=$(TARGET_BOOT_IMAGE_PROFILE) + +.PHONY: build-art-simulator-boot-image +build-art-simulator-boot-image: $(HOST_OUT_EXECUTABLES)/generate-boot-image64 \ + $(HOST_OUT_EXECUTABLES)/dex2oatd $(TARGET_CORE_IMG_DEX_FILES) build-art-simulator-profile + # Note: The target boot image needs to be in a trusted system directory to be used by the + # zygote or if -Xonly-use-system-oat-files is passed to the runtime. + rm -rf $(TARGET_BOOT_IMAGE_SYSTEM_DIR) + mkdir -p $(TARGET_BOOT_IMAGE_SYSTEM_DIR)/javalib + mkdir -p $(TARGET_ART_APEX_SYSTEM)/javalib + # Copy the core boot jars to the expected directory for generate-boot-image. + $(foreach i,$(call int_range_list, 1, $(words $(TARGET_CORE_IMG_JARS))), \ + cp $(word $(i),$(TARGET_CORE_IMG_DEX_FILES)) \ + $(TARGET_ART_APEX_SYSTEM)/javalib/$(word $(i),$(TARGET_CORE_IMG_JARS)).jar;) + # Generate a target boot image using the host dex2oat. Note: a boot image using a profile is + # required for certain run tests to pass. + $(HOST_OUT_EXECUTABLES)/generate-boot-image64 \ + --output-dir=$(TARGET_BOOT_IMAGE_SYSTEM_DIR)/javalib \ + --compiler-filter=speed-profile \ + --use-profile=true \ + --profile-file=$(TARGET_BOOT_IMAGE_PROFILE) \ + --dex2oat-bin=$(HOST_OUT_EXECUTABLES)/dex2oatd \ + --android-root=$(TARGET_OUT) \ + --android-root-for-location=true \ + --core-only=true \ + --instruction-set=$(TARGET_ARCH) + +# For simulator, build a target profile and boot image on the host. +.PHONY: build-art-simulator +build-art-simulator: build-art-simulator-profile build-art-simulator-boot-image + PRIVATE_ART_APEX_DEPENDENCY_FILES := \ bin/dalvikvm32 \ bin/dalvikvm64 \ diff --git a/compiler/optimizing/code_sinking.cc b/compiler/optimizing/code_sinking.cc index 0abcaea719..b1d14132c4 100644 --- a/compiler/optimizing/code_sinking.cc +++ b/compiler/optimizing/code_sinking.cc @@ -150,8 +150,8 @@ static bool IsInterestingInstruction(HInstruction* instruction) { } static void AddInstruction(HInstruction* instruction, - const ArenaBitVector& processed_instructions, - const ArenaBitVector& discard_blocks, + BitVectorView<size_t> processed_instructions, + BitVectorView<size_t> discard_blocks, ScopedArenaVector<HInstruction*>* worklist) { // Add to the work list if the instruction is not in the list of blocks // to discard, hasn't been already processed and is of interest. @@ -163,8 +163,8 @@ static void AddInstruction(HInstruction* instruction, } static void AddInputs(HInstruction* instruction, - const ArenaBitVector& processed_instructions, - const ArenaBitVector& discard_blocks, + BitVectorView<size_t> processed_instructions, + BitVectorView<size_t> discard_blocks, ScopedArenaVector<HInstruction*>* worklist) { for (HInstruction* input : instruction->GetInputs()) { AddInstruction(input, processed_instructions, discard_blocks, worklist); @@ -172,8 +172,8 @@ static void AddInputs(HInstruction* instruction, } static void AddInputs(HBasicBlock* block, - const ArenaBitVector& processed_instructions, - const ArenaBitVector& discard_blocks, + BitVectorView<size_t> processed_instructions, + BitVectorView<size_t> discard_blocks, ScopedArenaVector<HInstruction*>* worklist) { for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) { AddInputs(it.Current(), processed_instructions, discard_blocks, worklist); @@ -185,7 +185,7 @@ static void AddInputs(HBasicBlock* block, static bool ShouldFilterUse(HInstruction* instruction, HInstruction* user, - const ArenaBitVector& post_dominated) { + BitVectorView<size_t> post_dominated) { if (instruction->IsNewInstance()) { return (user->IsInstanceFieldSet() || user->IsConstructorFence()) && (user->InputAt(0) == instruction) && @@ -204,7 +204,7 @@ static bool ShouldFilterUse(HInstruction* instruction, // This method is tailored to the sinking algorithm, unlike // the generic HInstruction::MoveBeforeFirstUserAndOutOfLoops. static HInstruction* FindIdealPosition(HInstruction* instruction, - const ArenaBitVector& post_dominated, + BitVectorView<size_t> post_dominated, bool filter = false) { DCHECK(!instruction->IsPhi()); // Makes no sense for Phi. @@ -333,9 +333,10 @@ void CodeSinking::SinkCodeToUncommonBranch(HBasicBlock* end_block) { size_t number_of_instructions = graph_->GetCurrentInstructionId(); ScopedArenaVector<HInstruction*> worklist(allocator.Adapter(kArenaAllocMisc)); - ArenaBitVector processed_instructions( - &allocator, number_of_instructions, /* expandable= */ false); - ArenaBitVector post_dominated(&allocator, graph_->GetBlocks().size(), /* expandable= */ false); + BitVectorView<size_t> processed_instructions = + ArenaBitVector::CreateFixedSize(&allocator, number_of_instructions); + BitVectorView<size_t> post_dominated = + ArenaBitVector::CreateFixedSize(&allocator, graph_->GetBlocks().size()); // Step (1): Visit post order to get a subset of blocks post dominated by `end_block`. // TODO(ngeoffray): Getting the full set of post-dominated should be done by diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc index b8cd39e77f..9955982309 100644 --- a/compiler/optimizing/dead_code_elimination.cc +++ b/compiler/optimizing/dead_code_elimination.cc @@ -29,21 +29,21 @@ namespace art HIDDEN { -static void MarkReachableBlocks(HGraph* graph, ArenaBitVector* visited) { +static void MarkReachableBlocks(HGraph* graph, BitVectorView<size_t> visited) { // Use local allocator for allocating memory. ScopedArenaAllocator allocator(graph->GetArenaStack()); ScopedArenaVector<HBasicBlock*> worklist(allocator.Adapter(kArenaAllocDCE)); constexpr size_t kDefaultWorlistSize = 8; worklist.reserve(kDefaultWorlistSize); - visited->SetBit(graph->GetEntryBlock()->GetBlockId()); + visited.SetBit(graph->GetEntryBlock()->GetBlockId()); worklist.push_back(graph->GetEntryBlock()); while (!worklist.empty()) { HBasicBlock* block = worklist.back(); worklist.pop_back(); int block_id = block->GetBlockId(); - DCHECK(visited->IsBitSet(block_id)); + DCHECK(visited.IsBitSet(block_id)); ArrayRef<HBasicBlock* const> live_successors(block->GetSuccessors()); HInstruction* last_instruction = block->GetLastInstruction(); @@ -83,8 +83,8 @@ static void MarkReachableBlocks(HGraph* graph, ArenaBitVector* visited) { for (HBasicBlock* successor : live_successors) { // Add only those successors that have not been visited yet. - if (!visited->IsBitSet(successor->GetBlockId())) { - visited->SetBit(successor->GetBlockId()); + if (!visited.IsBitSet(successor->GetBlockId())) { + visited.SetBit(successor->GetBlockId()); worklist.push_back(successor); } } @@ -799,8 +799,8 @@ bool HDeadCodeElimination::RemoveEmptyIfs() { // 5 // where 2, 3, and 4 are single HGoto blocks, and block 5 has Phis. ScopedArenaAllocator allocator(graph_->GetArenaStack()); - ArenaBitVector visited_blocks( - &allocator, graph_->GetBlocks().size(), /*expandable=*/ false, kArenaAllocDCE); + BitVectorView<size_t> visited_blocks = + ArenaBitVector::CreateFixedSize(&allocator, graph_->GetBlocks().size(), kArenaAllocDCE); HBasicBlock* merge_true = true_block; visited_blocks.SetBit(merge_true->GetBlockId()); while (merge_true->IsSingleGoto()) { @@ -822,8 +822,8 @@ bool HDeadCodeElimination::RemoveEmptyIfs() { // Data structures to help remove now-dead instructions. ScopedArenaQueue<HInstruction*> maybe_remove(allocator.Adapter(kArenaAllocDCE)); - ArenaBitVector visited( - &allocator, graph_->GetCurrentInstructionId(), /*expandable=*/ false, kArenaAllocDCE); + BitVectorView<size_t> visited = ArenaBitVector::CreateFixedSize( + &allocator, graph_->GetCurrentInstructionId(), kArenaAllocDCE); maybe_remove.push(if_instr->InputAt(0)); visited.SetBit(if_instr->GetId()); @@ -874,9 +874,10 @@ bool HDeadCodeElimination::RemoveDeadBlocks(bool force_recomputation, ScopedArenaAllocator allocator(graph_->GetArenaStack()); // Classify blocks as reachable/unreachable. - ArenaBitVector live_blocks(&allocator, graph_->GetBlocks().size(), false, kArenaAllocDCE); + BitVectorView<size_t> live_blocks = + ArenaBitVector::CreateFixedSize(&allocator, graph_->GetBlocks().size(), kArenaAllocDCE); - MarkReachableBlocks(graph_, &live_blocks); + MarkReachableBlocks(graph_, live_blocks); bool removed_one_or_more_blocks = false; bool rerun_dominance_and_loop_analysis = false; diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc index fcac6cdf5e..9b5cc50e93 100644 --- a/compiler/optimizing/nodes.cc +++ b/compiler/optimizing/nodes.cc @@ -68,8 +68,8 @@ void HGraph::FindBackEdges(ArenaBitVector* visited) { // Allocate memory from local ScopedArenaAllocator. ScopedArenaAllocator allocator(GetArenaStack()); // Nodes that we're currently visiting, indexed by block id. - ArenaBitVector visiting( - &allocator, blocks_.size(), /* expandable= */ false, kArenaAllocGraphBuilder); + BitVectorView visiting = + ArenaBitVector::CreateFixedSize(&allocator, blocks_.size(), kArenaAllocGraphBuilder); // Number of successors visited from a given node, indexed by block id. ScopedArenaVector<size_t> successors_visited(blocks_.size(), 0u, diff --git a/libartbase/base/arena_bit_vector.h b/libartbase/base/arena_bit_vector.h index 52ba24cecf..41d4ff7458 100644 --- a/libartbase/base/arena_bit_vector.h +++ b/libartbase/base/arena_bit_vector.h @@ -17,13 +17,15 @@ #ifndef ART_LIBARTBASE_BASE_ARENA_BIT_VECTOR_H_ #define ART_LIBARTBASE_BASE_ARENA_BIT_VECTOR_H_ +#include <algorithm> +#include <cstring> + #include "arena_object.h" #include "base/arena_allocator.h" #include "bit_vector.h" namespace art { -class ArenaAllocator; class ScopedArenaAllocator; /* @@ -53,6 +55,22 @@ class ArenaBitVector : public BitVector, public ArenaObject<kArenaAllocGrowableB ArenaBitVector(ArenaBitVector&&) = default; ArenaBitVector(const ArenaBitVector&) = delete; + + template <typename StorageType = size_t, typename Allocator> + static BitVectorView<StorageType> CreateFixedSize( + Allocator* allocator, size_t bits, ArenaAllocKind kind = kArenaAllocGrowableBitMap) { + static_assert(std::is_same_v<Allocator, ArenaAllocator> || + std::is_same_v<Allocator, ScopedArenaAllocator>); + size_t num_elements = BitVectorView<StorageType>::BitsToWords(bits); + StorageType* storage = allocator->template AllocArray<StorageType>(num_elements, kind); + if (std::is_same_v<Allocator, ScopedArenaAllocator>) { + memset(storage, 0, num_elements * sizeof(StorageType)); + } else { + DCHECK_EQ(std::count(storage, storage + num_elements, static_cast<StorageType>(0)), + num_elements); + } + return {storage, bits}; + } }; } // namespace art diff --git a/libartbase/base/bit_vector.h b/libartbase/base/bit_vector.h index ec94efb09f..35a5e7c95b 100644 --- a/libartbase/base/bit_vector.h +++ b/libartbase/base/bit_vector.h @@ -20,6 +20,7 @@ #include <stdint.h> #include <iterator> +#include <limits> #include "bit_utils.h" #include "globals.h" @@ -27,7 +28,59 @@ namespace art { class Allocator; -class ArenaBitVector; + +// A bit vector view encapsulating externally-provided fixed-size storage for bits. +template <typename StorageType = size_t> +class BitVectorView { + public: + using WordType = StorageType; + static_assert(std::numeric_limits<WordType>::is_integer); + static_assert(!std::numeric_limits<WordType>::is_signed); + static constexpr size_t kWordBits = BitSizeOf<WordType>(); + static_assert(IsPowerOfTwo(kWordBits)); + + static constexpr size_t BitsToWords(size_t bits) { + return (bits + /* round up */ (kWordBits - 1)) / kWordBits; + } + + constexpr BitVectorView(WordType* storage, size_t size_in_bits) + : storage_(storage), size_in_bits_(size_in_bits) {} + + // The `BitVectorView<>` can be copied and passed to functions by value. + // The new copy shall reference the same underlying data, similarly to `std::string_view`. + BitVectorView(const BitVectorView& src) = default; + + constexpr size_t SizeInBits() const { + return size_in_bits_; + } + + void SetBit(size_t index) { + DCHECK_LT(index, size_in_bits_); + storage_[WordIndex(index)] |= BitMask(index); + } + + void ClearBit(size_t index) { + DCHECK_LT(index, size_in_bits_); + storage_[WordIndex(index)] &= ~BitMask(index); + } + + constexpr bool IsBitSet(size_t index) const { + DCHECK_LT(index, size_in_bits_); + return (storage_[WordIndex(index)] & BitMask(index)) != 0u; + } + + private: + static constexpr size_t WordIndex(size_t index) { + return index >> WhichPowerOf2(kWordBits); + } + + static constexpr WordType BitMask(size_t index) { + return static_cast<WordType>(1) << (index % kWordBits); + } + + WordType* storage_; + size_t size_in_bits_; +}; /* * Expanding bitmap. Bits are numbered starting from zero. All operations on a BitVector are @@ -154,7 +207,7 @@ class BitVector { if (idx >= storage_size_ * kWordBits) { EnsureSize(idx); } - storage_[WordIndex(idx)] |= BitMask(idx); + AsView().SetBit(idx); } // Mark the specified bit as "unset". @@ -162,7 +215,7 @@ class BitVector { // If the index is over the size, we don't have to do anything, it is cleared. if (idx < storage_size_ * kWordBits) { // Otherwise, go ahead and clear it. - storage_[WordIndex(idx)] &= ~BitMask(idx); + AsView().ClearBit(idx); } } @@ -170,7 +223,7 @@ class BitVector { bool IsBitSet(uint32_t idx) const { // If the index is over the size, whether it is expandable or not, this bit does not exist: // thus it is not set. - return (idx < (storage_size_ * kWordBits)) && IsBitSet(storage_, idx); + return (idx < (storage_size_ * kWordBits)) && AsView().IsBitSet(idx); } // Mark all bits bit as "clear". @@ -291,6 +344,14 @@ class BitVector { */ void DumpHelper(const char* prefix, std::ostringstream& buffer) const; + BitVectorView<uint32_t> AsView() { + return {storage_, storage_size_ * kWordBits}; + } + + BitVectorView<const uint32_t> AsView() const { + return {storage_, storage_size_ * kWordBits}; + } + // Ensure there is space for a bit at idx. void EnsureSize(uint32_t idx); diff --git a/libartbase/base/bit_vector_test.cc b/libartbase/base/bit_vector_test.cc index 244cff1cb4..15803edd17 100644 --- a/libartbase/base/bit_vector_test.cc +++ b/libartbase/base/bit_vector_test.cc @@ -25,6 +25,94 @@ namespace art { +template <typename StorageType, StorageType kWord0, StorageType kWord1> +void TestBitVectorViewSetBitAndClearBit() { + static constexpr StorageType kStorage[2] = { kWord0, kWord1 }; + static constexpr size_t kSizeInBits = 2 * BitSizeOf<StorageType>(); + static constexpr BitVectorView<const StorageType> kRbv(kStorage, kSizeInBits); + auto get_bit_from_params = [](size_t index) constexpr { + StorageType word = (index < BitSizeOf<StorageType>()) ? kWord0 : kWord1; + size_t shift = index % BitSizeOf<StorageType>(); + return (word & (static_cast<StorageType>(1u) << shift)) != 0u; + }; + auto verify_is_bit_set = [get_bit_from_params]() constexpr { + for (size_t index = 0; index != kSizeInBits; ++index) { + // If the `CHECK_EQ()` fails, the `static_assert` evaluation fails at compile time. + CHECK_EQ(get_bit_from_params(index), kRbv.IsBitSet(index)) << index; + } + return true; + }; + static_assert(verify_is_bit_set()); + + auto verify_size_in_bits = []() constexpr { + for (size_t size = 0; size != kSizeInBits; ++size) { + // If the `CHECK_EQ()` fails, the `static_assert` evaluation fails at compile time. + CHECK_EQ(size, BitVectorView(kStorage, size).SizeInBits()); + } + return true; + }; + static_assert(verify_size_in_bits()); + + StorageType storage[2] = {0u, 0u}; + size_t size_in_bits = 2 * BitSizeOf<StorageType>(); + BitVectorView<StorageType> rbv(storage, size_in_bits); + for (size_t index = 0; index != size_in_bits; ++index) { + ASSERT_FALSE(rbv.IsBitSet(index)); + } + // Set one bit at a time, then clear it. + for (size_t bit_to_set = 0; bit_to_set != size_in_bits; ++bit_to_set) { + rbv.SetBit(bit_to_set); + for (size_t index = 0; index != size_in_bits; ++index) { + ASSERT_EQ(index == bit_to_set, rbv.IsBitSet(index)); + } + rbv.ClearBit(bit_to_set); + for (size_t index = 0; index != size_in_bits; ++index) { + ASSERT_FALSE(rbv.IsBitSet(index)); + } + } + // Set bits for `kWord0` and `kWord1`. + for (size_t index = 0; index != size_in_bits; ++index) { + if (get_bit_from_params(index)) { + rbv.SetBit(index); + } + } + ASSERT_EQ(kWord0, storage[0]); + ASSERT_EQ(kWord1, storage[1]); + // Clear all bits that are already clear. + for (size_t index = 0; index != size_in_bits; ++index) { + if (!get_bit_from_params(index)) { + rbv.ClearBit(index); + } + } + ASSERT_EQ(kWord0, storage[0]); + ASSERT_EQ(kWord1, storage[1]); + // Clear all bits that are set. + for (size_t index = 0; index != size_in_bits; ++index) { + if (get_bit_from_params(index)) { + rbv.ClearBit(index); + } + } + ASSERT_EQ(0u, storage[0]); + ASSERT_EQ(0u, storage[1]); +} + +TEST(BitVectorView, Uint32T) { + TestBitVectorViewSetBitAndClearBit<uint32_t, 0x12345678u, 0x87654321u>(); +} + +TEST(BitVectorView, Uint64T) { + TestBitVectorViewSetBitAndClearBit<uint64_t, + UINT64_C(0x1234567890abcdef), + UINT64_C(0xfedcba0987654321)>(); +} + +TEST(BitVectorView, SizeT) { + // Note: The constants below are truncated on 32-bit architectures. + TestBitVectorViewSetBitAndClearBit<size_t, + static_cast<size_t>(UINT64_C(0xfedcba0987654321)), + static_cast<size_t>(UINT64_C(0x1234567890abcdef))>(); +} + TEST(BitVector, Test) { const size_t kBits = 32; diff --git a/libartservice/service/java/com/android/server/art/Dex2OatStatsReporter.java b/libartservice/service/java/com/android/server/art/Dex2OatStatsReporter.java index d553113301..2e45c01590 100644 --- a/libartservice/service/java/com/android/server/art/Dex2OatStatsReporter.java +++ b/libartservice/service/java/com/android/server/art/Dex2OatStatsReporter.java @@ -16,6 +16,8 @@ package com.android.server.art; +import static com.android.server.art.Utils.Abi; + import android.os.Build; import androidx.annotation.NonNull; @@ -23,6 +25,9 @@ import androidx.annotation.RequiresApi; import com.android.server.art.model.DetailedDexInfo; import com.android.server.art.model.DexMetadata; +import com.android.server.art.model.DexoptParams; + +import java.util.List; /** * A class to report dex2oat metrics to StatsD. @@ -43,6 +48,23 @@ public class Dex2OatStatsReporter { (int) compilationTime); } + public static void reportSkipped(int appId, @NonNull String compilationReason, + @DexMetadata.Type int dexMetadataType, @NonNull DetailedDexInfo dexInfo, + @NonNull List<Abi> abis) { + Dex2OatResult skipped = Dex2OatResult.notRun(); + + for (Abi abi : abis) { + ArtStatsLog.write(ArtStatsLog.ART_DEX2OAT_REPORTED, appId, + translateCompilerFilter(DexoptParams.COMPILER_FILTER_NOOP), + translateCompilationReason(compilationReason), dexMetadataType, + getApkType(dexInfo), translateIsa(abi.isa()), skipped.status, skipped.exitCode, + skipped.signal, + 0, // artifacts size + 0 // compilation time + ); + } + } + private static int translateCompilerFilter(String compilerFilter) { return switch (compilerFilter) { case "assume-verified" -> @@ -66,6 +88,8 @@ public class Dex2OatStatsReporter { case "everything" -> ArtStatsLog .ART_DEX2_OAT_REPORTED__COMPILER_FILTER__ART_COMPILATION_FILTER_EVERYTHING; + case "skip" -> + ArtStatsLog.ART_DEX2_OAT_REPORTED__COMPILER_FILTER__ART_COMPILATION_FILTER_SKIP; default -> ArtStatsLog.ART_DEX2_OAT_REPORTED__COMPILER_FILTER__ART_COMPILATION_FILTER_UNKNOWN; }; diff --git a/libartservice/service/java/com/android/server/art/Dexopter.java b/libartservice/service/java/com/android/server/art/Dexopter.java index b13ec66831..cc1b66b9ba 100644 --- a/libartservice/service/java/com/android/server/art/Dexopter.java +++ b/libartservice/service/java/com/android/server/art/Dexopter.java @@ -114,7 +114,14 @@ public abstract class Dexopter<DexInfoType extends DetailedDexInfo> { } String compilerFilter = adjustCompilerFilter(mParams.getCompilerFilter(), dexInfo); + DexMetadataInfo dmInfo = + mInjector.getDexMetadataHelper().getDexMetadataInfo(buildDmPath(dexInfo)); if (compilerFilter.equals(DexoptParams.COMPILER_FILTER_NOOP)) { + mInjector.getReporterExecutor().execute( + () + -> Dex2OatStatsReporter.reportSkipped(mPkgState.getAppId(), + mParams.getReason(), dmInfo.type(), dexInfo, + getAllAbis(dexInfo))); continue; } @@ -126,9 +133,6 @@ public abstract class Dexopter<DexInfoType extends DetailedDexInfo> { continue; } - DexMetadataInfo dmInfo = - mInjector.getDexMetadataHelper().getDexMetadataInfo(buildDmPath(dexInfo)); - boolean needsToBeShared = needsToBeShared(dexInfo); boolean isOtherReadable = true; // If true, implies that the profile has changed since the last compilation. diff --git a/libdexfile/Android.bp b/libdexfile/Android.bp index 5b859c2f05..69d51b6c01 100644 --- a/libdexfile/Android.bp +++ b/libdexfile/Android.bp @@ -398,9 +398,6 @@ art_cc_test { "art_test_defaults", "art_libdexfile_external_tests_defaults", ], - // TODO(b/395617594): This can be removed when apex_available is removed - // from art_test_defaults. - apex_available: ["//apex_available:platform"], } // Standalone version of ART gtest `art_libdexfile_external_tests`, not bundled with the ART APEX on @@ -543,9 +540,6 @@ art_cc_test { srcs: [ "external/dex_file_supp_test.cc", ], - // TODO(b/395617594): This can be removed when apex_available is removed - // from art_test_defaults. - apex_available: ["//apex_available:platform"], } // For use by external packages allowed to link in static libdexfile_support. @@ -611,9 +605,6 @@ art_cc_test { sanitize: { misc_undefined: ["shift"], }, - // TODO(b/395617594): This can be removed when apex_available is removed - // from art_test_defaults. - apex_available: ["//apex_available:platform"], } filegroup { diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc index fafd8b55b4..03bc448ddf 100644 --- a/runtime/gc/collector/mark_compact.cc +++ b/runtime/gc/collector/mark_compact.cc @@ -4425,14 +4425,17 @@ void MarkCompact::MarkingPhase() { class MarkCompact::RefFieldsVisitor { public: - ALWAYS_INLINE RefFieldsVisitor(MarkCompact* const mark_compact, mirror::Object* obj) + ALWAYS_INLINE RefFieldsVisitor(MarkCompact* const mark_compact) : mark_compact_(mark_compact), young_gen_begin_(mark_compact->mid_gen_end_), young_gen_end_(mark_compact->moving_space_end_), dirty_card_(false), - check_refs_to_young_gen_(mark_compact->use_generational_ && - (reinterpret_cast<uint8_t*>(obj) < young_gen_begin_ || - reinterpret_cast<uint8_t*>(obj) >= young_gen_end_)) {} + // Ideally we should only check for objects outside young-gen. However, + // the boundary of young-gen can change later in PrepareForCompaction() + // as we need the mid-gen-end to be page-aligned. Since most of the + // objects don't have native-roots, it's not too costly to check all + // objects being visited during marking. + check_native_roots_to_young_gen_(mark_compact->use_generational_) {} bool ShouldDirtyCard() const { return dirty_card_; } @@ -4469,7 +4472,7 @@ class MarkCompact::RefFieldsVisitor { } mirror::Object* ref = root->AsMirrorPtr(); mark_compact_->MarkObject(ref); - if (check_refs_to_young_gen_) { + if (check_native_roots_to_young_gen_) { dirty_card_ |= reinterpret_cast<uint8_t*>(ref) >= young_gen_begin_ && reinterpret_cast<uint8_t*>(ref) < young_gen_end_; } @@ -4480,7 +4483,7 @@ class MarkCompact::RefFieldsVisitor { uint8_t* const young_gen_begin_; uint8_t* const young_gen_end_; mutable bool dirty_card_; - const bool check_refs_to_young_gen_; + const bool check_native_roots_to_young_gen_; }; template <size_t kAlignment> @@ -4556,7 +4559,7 @@ void MarkCompact::ScanObject(mirror::Object* obj) { size_t obj_size = obj->SizeOf<kDefaultVerifyFlags>(); bytes_scanned_ += obj_size; - RefFieldsVisitor visitor(this, obj); + RefFieldsVisitor visitor(this); DCHECK(IsMarked(obj)) << "Scanning marked object " << obj << "\n" << heap_->DumpSpaces(); if (kUpdateLiveWords && HasAddress(obj)) { UpdateLivenessInfo(obj, obj_size); @@ -4887,8 +4890,14 @@ void MarkCompact::FinishPhase(bool performed_compaction) { for (auto obj : dirty_cards_later_vec_) { // Only moving and non-moving spaces are relevant as the remaining // spaces are all immune-spaces which anyways use card-table. - if (HasAddress(obj) || non_moving_space_->HasAddress(obj)) { - card_table->MarkCard(PostCompactAddress(obj, black_dense_end_, moving_space_end_)); + if (HasAddress(obj)) { + // Objects in young-gen referring to other young-gen objects doesn't + // need to be tracked. + if (reinterpret_cast<uint8_t*>(obj) < mid_gen_end_) { + card_table->MarkCard(PostCompactAddress(obj, black_dense_end_, moving_space_end_)); + } + } else if (non_moving_space_->HasAddress(obj)) { + card_table->MarkCard(obj); } } } diff --git a/runtime/trace_profile.cc b/runtime/trace_profile.cc index 4c0abbf1da..58927d8948 100644 --- a/runtime/trace_profile.cc +++ b/runtime/trace_profile.cc @@ -88,6 +88,26 @@ void TraceData::SignalTraceDumpComplete() { trace_dump_condition_.Broadcast(Thread::Current()); } +void TraceData::AppendToLongRunningMethods(const uint8_t* buffer, size_t size) { + MutexLock mu(Thread::Current(), trace_data_lock_); + if (curr_buffer_ == nullptr) { + curr_buffer_.reset(new uint8_t[kBufSizeForEncodedData]); + curr_index_ = 0; + } + if (curr_index_ + size <= kBufSizeForEncodedData) { + memcpy(curr_buffer_.get() + curr_index_, buffer, size); + curr_index_ += size; + } else { + size_t remaining_bytes = kBufSizeForEncodedData - curr_index_; + if (remaining_bytes != 0) { + memcpy(curr_buffer_.get() + curr_index_, buffer, remaining_bytes); + } + overflow_buffers_.push_back(std::move(curr_buffer_)); + curr_buffer_.reset(new uint8_t[kBufSizeForEncodedData]); + memcpy(curr_buffer_.get(), buffer + remaining_bytes, size - remaining_bytes); + } +} + void TraceProfiler::AllocateBuffer(Thread* thread) { if (!art_flags::always_enable_profile_code()) { return; @@ -162,8 +182,6 @@ void RecordMethodsOnThreadStack(Thread* thread, uintptr_t* method_trace_buffer) uint64_t init_time = TimestampCounter::GetNanoTime(TimestampCounter::GetTimestamp()); // Set the lsb to 0 to indicate method entry. init_time = init_time & ~1; - std::ostringstream os; - os << "Thread:" << thread->GetTid() << "\n"; size_t index = kAlwaysOnTraceBufSize - 1; for (auto smi = visitor.stack_methods_.rbegin(); smi != visitor.stack_methods_.rend(); smi++) { method_trace_buffer[index--] = reinterpret_cast<uintptr_t>(*smi); @@ -477,38 +495,60 @@ void TraceProfiler::TraceTimeElapsed() { TraceProfiler::StopLocked(); } -void TraceProfiler::DumpLongRunningMethodBuffer(uint32_t thread_id, - uintptr_t* method_trace_entries, - uintptr_t* end_trace_entries, - std::unordered_set<ArtMethod*>& methods, - std::ostringstream& os) { - os << "Thread:" << thread_id << "\n"; - for (uintptr_t* ptr = method_trace_entries + kAlwaysOnTraceBufSize - 1; - ptr >= end_trace_entries;) { - uintptr_t entry = *ptr; - if (entry == 0x1) { - // This is the special placeholder exit we added to record all methods on the stack at the - // start of the trace. Just ignore this entry. - } else if (entry & 0x1) { - // Method exit - os << "<-" << TimestampCounter::GetNanoTime(entry & ~1) << "\n"; +size_t TraceProfiler::DumpLongRunningMethodBuffer(uint32_t thread_id, + uintptr_t* method_trace_entries, + uintptr_t* end_trace_entries, + uint8_t* buffer, + std::unordered_set<ArtMethod*>& methods) { + // Encode header at the end once we compute the number of records. + uint8_t* curr_buffer_ptr = buffer + kAlwaysOnTraceHeaderSize; + + int num_records = 0; + uintptr_t prev_timestamp_action_encoding = 0; + uintptr_t prev_method_ptr = 0; + size_t end_index = end_trace_entries - method_trace_entries; + for (size_t i = kAlwaysOnTraceBufSize - 1; i >= end_index;) { + uintptr_t event = method_trace_entries[i--]; + bool is_method_exit = event & 0x1; + uint64_t timestamp_action_encoding; + uintptr_t method_ptr; + if (is_method_exit) { + // Method exit. We only have timestamp here. + timestamp_action_encoding = event & ~1; } else { - // Method entry - ArtMethod* method = reinterpret_cast<ArtMethod*>(entry); - ptr--; - CHECK(ptr >= end_trace_entries); - os << "->" << method << " " << TimestampCounter::GetNanoTime(*ptr) << "\n"; + // method entry + method_ptr = event; + timestamp_action_encoding = method_trace_entries[i--]; + } + + int64_t timestamp_action_diff = timestamp_action_encoding - prev_timestamp_action_encoding; + int64_t method_diff; + if (!is_method_exit) { + method_diff = method_ptr - prev_method_ptr; + ArtMethod* method = reinterpret_cast<ArtMethod*>(method_ptr); methods.insert(method); + prev_method_ptr = method_ptr; + curr_buffer_ptr = EncodeSignedLeb128(curr_buffer_ptr, method_diff); } - ptr--; + curr_buffer_ptr = EncodeSignedLeb128(curr_buffer_ptr, timestamp_action_diff); + num_records++; + prev_timestamp_action_encoding = timestamp_action_encoding; } + + // Fill in header information: + // 1 byte of header identifier + // 4 bytes of thread_id + // 3 bytes of number of records + buffer[0] = kEntryHeaderV2; + Append4LE(buffer + 1, thread_id); + Append3LE(buffer + 5, num_records); + return curr_buffer_ptr - buffer; } void TraceProfiler::FlushBufferAndRecordTraceEvent(ArtMethod* method, Thread* thread, bool is_entry) { uint64_t timestamp = TimestampCounter::GetTimestamp(); - std::ostringstream os; std::unordered_set<ArtMethod*> traced_methods; uintptr_t* method_trace_entries = thread->GetMethodTraceBuffer(); DCHECK(method_trace_entries != nullptr); @@ -529,19 +569,28 @@ void TraceProfiler::FlushBufferAndRecordTraceEvent(ArtMethod* method, size_t num_occupied_entries = (processed_events_ptr - *method_trace_curr_ptr); size_t index = kAlwaysOnTraceBufSize; + + std::unique_ptr<uint8_t> buffer_ptr(new uint8_t[kBufSizeForEncodedData]); + size_t num_bytes; if (num_occupied_entries > kMaxEntriesAfterFlush) { // If we don't have sufficient space just record a placeholder exit and flush all the existing // events. We have accurate timestamps to filter out these events in a post-processing step. // This would happen only when we have very deeply (~1024) nested code. - DumpLongRunningMethodBuffer( - thread->GetTid(), method_trace_entries, *method_trace_curr_ptr, traced_methods, os); + num_bytes = DumpLongRunningMethodBuffer(thread->GetTid(), + method_trace_entries, + *method_trace_curr_ptr, + buffer_ptr.get(), + traced_methods); // Encode a placeholder exit event. This will be ignored when dumping the methods. method_trace_entries[--index] = 0x1; } else { // Flush all the entries till the method exit event. - DumpLongRunningMethodBuffer( - thread->GetTid(), method_trace_entries, processed_events_ptr, traced_methods, os); + num_bytes = DumpLongRunningMethodBuffer(thread->GetTid(), + method_trace_entries, + processed_events_ptr, + buffer_ptr.get(), + traced_methods); // Move the remaining events to the start of the buffer. for (uintptr_t* ptr = processed_events_ptr - 1; ptr >= *method_trace_curr_ptr; ptr--) { @@ -569,7 +618,7 @@ void TraceProfiler::FlushBufferAndRecordTraceEvent(ArtMethod* method, *method_trace_curr_ptr = method_trace_entries + index; MutexLock mu(Thread::Current(), *Locks::trace_lock_); - trace_data_->AppendToLongRunningMethods(os.str()); + trace_data_->AppendToLongRunningMethods(buffer_ptr.get(), num_bytes); trace_data_->AddTracedMethods(traced_methods); trace_data_->AddTracedThread(thread); } @@ -590,10 +639,20 @@ void TraceDumpCheckpoint::Run(Thread* thread) { std::unordered_set<ArtMethod*> traced_methods; if (trace_data_->GetTraceType() == LowOverheadTraceType::kLongRunningMethods) { uintptr_t* method_trace_curr_ptr = *(thread->GetTraceBufferCurrEntryPtr()); - std::ostringstream os; - TraceProfiler::DumpLongRunningMethodBuffer( - thread->GetTid(), method_trace_entries, method_trace_curr_ptr, traced_methods, os); - trace_data_->AppendToLongRunningMethods(os.str()); + std::unique_ptr<uint8_t> buffer_ptr(new uint8_t[kBufSizeForEncodedData]); + size_t num_bytes = TraceProfiler::DumpLongRunningMethodBuffer(thread->GetTid(), + method_trace_entries, + method_trace_curr_ptr, + buffer_ptr.get(), + traced_methods); + MutexLock mu(Thread::Current(), trace_file_lock_); + if (trace_file_ != nullptr) { + if (!trace_file_->WriteFully(buffer_ptr.get(), num_bytes)) { + PLOG(WARNING) << "Failed streaming a tracing event."; + } + } else { + trace_data_->AppendToLongRunningMethods(buffer_ptr.get(), num_bytes); + } } else { std::unique_ptr<uint8_t> buffer_ptr(new uint8_t[kBufSizeForEncodedData]); size_t num_bytes = TraceProfiler::DumpBuffer( @@ -632,8 +691,12 @@ void TraceData::DumpData(std::ostringstream& os) { // We cannot dump method information while holding trace_lock_, since we have to also // acquire a mutator lock. Take a snapshot of thread and method information. MutexLock mu(Thread::Current(), trace_data_lock_); - if (long_running_methods_.length() > 0) { - os << long_running_methods_; + if (curr_buffer_ != nullptr) { + for (size_t i = 0; i < overflow_buffers_.size(); i++) { + os.write(reinterpret_cast<char*>(overflow_buffers_[i].get()), kBufSizeForEncodedData); + } + + os.write(reinterpret_cast<char*>(curr_buffer_.get()), curr_index_); } methods = traced_methods_; diff --git a/runtime/trace_profile.h b/runtime/trace_profile.h index df8ddec387..6cfc91b1fb 100644 --- a/runtime/trace_profile.h +++ b/runtime/trace_profile.h @@ -46,7 +46,9 @@ enum class LowOverheadTraceType { class TraceData { public: explicit TraceData(LowOverheadTraceType trace_type) - : trace_type_(trace_type), + : curr_buffer_(nullptr), + curr_index_(0), + trace_type_(trace_type), trace_end_time_(0), trace_dump_in_progress_(false), trace_dump_condition_("trace dump condition", *Locks::trace_lock_), @@ -64,14 +66,11 @@ class TraceData { trace_end_time_ = end_time; } - // Dumps events collected in long_running_methods_ and the information about - // threads and methods into the output stream. + // Dumps events collected in the buffers and the information about threads and methods into the + // output stream. void DumpData(std::ostringstream& os); - void AppendToLongRunningMethods(const std::string& str) { - MutexLock mu(Thread::Current(), trace_data_lock_); - long_running_methods_.append(str); - } + void AppendToLongRunningMethods(const uint8_t* buffer, size_t size); void AddTracedMethods(std::unordered_set<ArtMethod*>& methods) { MutexLock mu(Thread::Current(), trace_data_lock_); @@ -102,9 +101,14 @@ class TraceData { } private: - // This is used to hold the initial methods on stack and also long running methods when there is a - // buffer overflow. - std::string long_running_methods_ GUARDED_BY(trace_data_lock_); + // This is used to hold the long running methods when the per-thread buffer overflows. + std::unique_ptr<uint8_t> curr_buffer_ GUARDED_BY(trace_data_lock_); + + // The index of the next free space in the curr_buffer_ + size_t curr_index_ GUARDED_BY(trace_data_lock_); + + // When the curr_buffer_ becomes full, we store it in this list and allocate a new buffer. + std::vector<std::unique_ptr<uint8_t>> overflow_buffers_ GUARDED_BY(trace_data_lock_); LowOverheadTraceType trace_type_; @@ -126,7 +130,7 @@ class TraceData { bool trace_dump_in_progress_ GUARDED_BY(Locks::trace_lock_); ConditionVariable trace_dump_condition_ GUARDED_BY(Locks::trace_lock_); - // Lock to synchronize access to traced_methods_, traced_threads_ and long_running_methods_ which + // Lock to synchronize access to traced_methods_, traced_threads_ and curr_buffer_ which // can be accessed simultaneously by multiple threads when running TraceDumpCheckpoint. Mutex trace_data_lock_; }; @@ -150,7 +154,8 @@ class TraceDumpCheckpoint final : public Closure { // Trace data to record the data from each thread. TraceData* trace_data_; - // Trace file to flush the data. + // Trace file to flush the data. If the trace_file_ is empty then the data is recorded in the + // trace_data_. const std::unique_ptr<File>& trace_file_ GUARDED_BY(trace_file_lock_); // Lock to synchronize access to trace_file_. We need to write the data of @@ -225,13 +230,13 @@ class TraceProfiler { uint8_t* buffer /* out */, std::unordered_set<ArtMethod*>& methods /* out */); - // Dumps all the events in the buffer into the file. Also records the ArtMethods from the events - // which is then used to record information about these methods. - static void DumpLongRunningMethodBuffer(uint32_t thread_id, - uintptr_t* thread_buffer, - uintptr_t* end_buffer, - std::unordered_set<ArtMethod*>& methods /* out */, - std::ostringstream& os); + // Dumps all the trace events from the thread into the buffer. Also records the ArtMethods from + // the events which is then used to record information about these methods. + static size_t DumpLongRunningMethodBuffer(uint32_t thread_id, + uintptr_t* method_trace_entries, + uintptr_t* end_trace_entries, + uint8_t* buffer, + std::unordered_set<ArtMethod*>& methods); static bool profile_in_progress_ GUARDED_BY(Locks::trace_lock_); diff --git a/test/Android.bp b/test/Android.bp index 226e182498..63840648ca 100644 --- a/test/Android.bp +++ b/test/Android.bp @@ -86,12 +86,6 @@ cc_defaults { relative_install_path: "art/x86_64", }, }, - // Tests aren't generally included in any APEX, but this is necessary to - // avoid duplicate install rules for them by making them unavailable to platform. - // TODO(b/395617594): Figure out why this is necessary and remove it. - apex_available: [ - "com.android.art.debug", - ], } // Variant of art_test_defaults for test libraries that installs them in a @@ -544,9 +538,6 @@ art_cc_defaults { defaults: ["art_test_defaults"], header_libs: ["libnativebridge-headers"], srcs: ["115-native-bridge/nativebridge.cc"], - // TODO(b/395617594): This can be removed when apex_available is removed - // from art_test_defaults. - apex_available: ["//apex_available:platform"], } art_cc_test_library { @@ -576,9 +567,6 @@ cc_defaults { "liblog", "libnativehelper", ], - // TODO(b/395617594): This can be removed when apex_available is removed - // from art_test_defaults. - apex_available: ["//apex_available:platform"], } art_cc_test_library { @@ -822,9 +810,6 @@ art_cc_defaults { "slicer_no_rtti", "libz", // for slicer (using adler32). ], - // TODO(b/395617594): This can be removed when apex_available is removed - // from art_test_defaults. - apex_available: ["//apex_available:platform"], } art_cc_test_library { @@ -854,9 +839,6 @@ art_cc_defaults { "libtistress-srcs", ], static_libs: ["slicer_no_rtti"], - // TODO(b/395617594): This can be removed when apex_available is removed - // from art_test_defaults. - apex_available: ["//apex_available:platform"], } art_cc_test_library { @@ -968,9 +950,6 @@ cc_defaults { shared_libs: ["libdl_android"], }, }, - // TODO(b/395617594): This can be removed when apex_available is removed - // from art_test_defaults. - apex_available: ["//apex_available:platform"], } art_cc_test_library { @@ -1015,9 +994,6 @@ cc_defaults { "liblog", "libnativehelper", ], - // TODO(b/395617594): This can be removed when apex_available is removed - // from art_test_defaults. - apex_available: ["//apex_available:platform"], } art_cc_test_library { diff --git a/test/generate-boot-image/generate-boot-image.cc b/test/generate-boot-image/generate-boot-image.cc index 3cd992f0c6..454b63185a 100644 --- a/test/generate-boot-image/generate-boot-image.cc +++ b/test/generate-boot-image/generate-boot-image.cc @@ -80,6 +80,9 @@ Supported options: host. The default on target is based on the ISA of this binary. --core-only=true|false: If true, only compile ART jars. Otherwise, also compile core-icu4j and conscrypt. Default: false + --android-root-for-location=true|false: If true, use --android-root as a prefix to the dex + locations. This allows non-device paths to the bootclasspath jars to be used, for example: to + generate a boot image on host that can be used on host. Default: false --: Arguments following '--' are directly passed to dex2oat. )"; @@ -94,6 +97,7 @@ struct Options { std::string profile_file = ""; std::string instruction_set = ""; bool core_only = false; + bool android_root_for_location = false; std::vector<std::string> dex2oat_options; }; @@ -133,7 +137,12 @@ int GenerateBootImage(const Options& options) { std::vector<std::string> dex_files = GetLibCoreDexFileNames(options.android_root, options.core_only); - std::vector<std::string> dex_locations = GetLibCoreDexLocations(options.core_only); + std::vector<std::string> dex_locations; + if (options.android_root_for_location) { + dex_locations = dex_files; + } else { + dex_locations = GetLibCoreDexLocations(options.core_only); + } args.push_back("--runtime-arg"); args.push_back("-Xbootclasspath:" + Join(dex_files, ":")); args.push_back("--runtime-arg"); @@ -209,6 +218,12 @@ int Main(int argc, char** argv) { Usage(ART_FORMAT("Unrecognized --core-only value: '{}'", arg)); } options.core_only = result == ParseBoolResult::kTrue; + } else if (ConsumePrefix(&arg, "--android-root-for-location=")) { + ParseBoolResult result = ParseBool(arg); + if (result == ParseBoolResult::kError) { + Usage(ART_FORMAT("Unrecognized --android-root-for-location value: '{}'", arg)); + } + options.android_root_for_location = result == ParseBoolResult::kTrue; } else if (arg == "--") { for (i++; i < argc; i++) { options.dex2oat_options.push_back(argv[i]); diff --git a/tools/buildbot-build.sh b/tools/buildbot-build.sh index 861664839f..acc0ad78d8 100755 --- a/tools/buildbot-build.sh +++ b/tools/buildbot-build.sh @@ -179,6 +179,14 @@ if [[ $build_target == "yes" ]]; then # Build/install the required APEXes. make_command+=" ${apexes[*]}" make_command+=" ${specific_targets}" + + # Although the simulator is run on the host, we reuse the target build to + # build the target run tests on the host. + if [[ -n "${ART_USE_SIMULATOR}" ]]; then + # Build any simulator specific components, such as a target boot image, on + # the host. + make_command+=" build-art-simulator" + fi fi if [[ $installclean == "yes" ]]; then |