diff options
| author | 2025-01-23 16:01:44 -0800 | |
|---|---|---|
| committer | 2025-01-23 16:01:44 -0800 | |
| commit | 35f3a8a1c34cb233fee43885cf4626dfb45f6bbe (patch) | |
| tree | 8ebf0b5e4c8471f832bb4b1061574607baa81364 | |
| parent | aa717c9dce95d4353e20b21aa0225d4fd8238a6f (diff) | |
| parent | 98645abb2142fdb6dda39384b6764b89d09a7625 (diff) | |
Snap for 12962709 from 98645abb2142fdb6dda39384b6764b89d09a7625 to 25Q2-release
Change-Id: Ic3f7c0489bb4d5c52f48349e87b01521bd56c396
29 files changed, 684 insertions, 304 deletions
diff --git a/cmdline/cmdline.h b/cmdline/cmdline.h index 39fcaebd63..20f5dbed7f 100644 --- a/cmdline/cmdline.h +++ b/cmdline/cmdline.h @@ -33,9 +33,11 @@ #include "base/logging.h" #include "base/mutex.h" #include "base/utils.h" +#include "jni/jni_env_ext.h" #include "noop_compiler_callbacks.h" #include "oat/oat_file_assistant_context.h" #include "runtime.h" +#include "well_known_classes.h" #if !defined(NDEBUG) #define DBG_LOG LOG(INFO) @@ -85,6 +87,9 @@ static Runtime* StartRuntime(const std::vector<std::string>& boot_image_location return nullptr; } + // Need well-known-classes. + WellKnownClasses::Init(Thread::Current()->GetJniEnv()); + // Runtime::Create acquired the mutator_lock_ that is normally given away when we Runtime::Start, // give it away now and then switch to a more manageable ScopedObjectAccess. Thread::Current()->TransitionFromRunnableToSuspended(ThreadState::kNative); diff --git a/compiler/optimizing/code_flow_simplifier.cc b/compiler/optimizing/code_flow_simplifier.cc index b32a4a5c4c..855da3e959 100644 --- a/compiler/optimizing/code_flow_simplifier.cc +++ b/compiler/optimizing/code_flow_simplifier.cc @@ -68,23 +68,31 @@ static bool BlocksMergeTogether(HBasicBlock* block1, HBasicBlock* block2) { return block1->GetSingleSuccessor() == block2->GetSingleSuccessor(); } -// Returns nullptr if `block` has either no phis or there is more than one phi. Otherwise returns -// that phi. -static HPhi* GetSinglePhi(HBasicBlock* block, size_t index1, size_t index2) { +// Search `block` for phis that have different inputs at `index1` and `index2`. +// If none is found, returns `{true, nullptr}`. +// If exactly one such `phi` is found, returns `{true, phi}`. +// Otherwise (if more than one such phi is found), returns `{false, nullptr}`. +static std::pair<bool, HPhi*> HasAtMostOnePhiWithDifferentInputs(HBasicBlock* block, + size_t index1, + size_t index2) { DCHECK_NE(index1, index2); HPhi* select_phi = nullptr; for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) { HPhi* phi = it.Current()->AsPhi(); + auto&& inputs = phi->GetInputs(); + if (inputs[index1] == inputs[index2]) { + continue; + } if (select_phi == nullptr) { // First phi found. select_phi = phi; } else { // More than one phi found, return null. - return nullptr; + return {false, nullptr}; } } - return select_phi; + return {true, select_phi}; } bool HCodeFlowSimplifier::TryGenerateSelectSimpleDiamondPattern( @@ -132,54 +140,60 @@ bool HCodeFlowSimplifier::TryGenerateSelectSimpleDiamondPattern( // a = 1; b = 2; // } // // use a and b - HPhi* phi = GetSinglePhi(merge_block, predecessor_index_true, predecessor_index_false); - + bool at_most_one_phi_with_different_inputs = false; + HPhi* phi = nullptr; HInstruction* true_value = nullptr; HInstruction* false_value = nullptr; if (both_successors_return) { + // Note: This can create a select with the same then-value and else-value. true_value = true_block->GetFirstInstruction()->InputAt(0); false_value = false_block->GetFirstInstruction()->InputAt(0); - } else if (phi != nullptr) { - true_value = phi->InputAt(predecessor_index_true); - false_value = phi->InputAt(predecessor_index_false); } else { - return false; + std::tie(at_most_one_phi_with_different_inputs, phi) = HasAtMostOnePhiWithDifferentInputs( + merge_block, predecessor_index_true, predecessor_index_false); + if (!at_most_one_phi_with_different_inputs) { + return false; + } + if (phi != nullptr) { + true_value = phi->InputAt(predecessor_index_true); + false_value = phi->InputAt(predecessor_index_false); + } // else we don't need to create a `HSelect` at all. } - DCHECK(both_successors_return || phi != nullptr); + DCHECK(both_successors_return || at_most_one_phi_with_different_inputs); // Create the Select instruction and insert it in front of the If. HInstruction* condition = if_instruction->InputAt(0); - HSelect* select = new (graph_->GetAllocator()) HSelect(condition, - true_value, - false_value, - if_instruction->GetDexPc()); - if (both_successors_return) { - if (true_value->GetType() == DataType::Type::kReference) { - DCHECK(false_value->GetType() == DataType::Type::kReference); - ReferenceTypePropagation::FixUpSelectType(select, graph_->GetHandleCache()); + HSelect* select = nullptr; + if (both_successors_return || phi != nullptr) { + select = new (graph_->GetAllocator()) HSelect(condition, + true_value, + false_value, + if_instruction->GetDexPc()); + block->InsertInstructionBefore(select, if_instruction); + if (both_successors_return) { + if (true_value->GetType() == DataType::Type::kReference) { + DCHECK(false_value->GetType() == DataType::Type::kReference); + ReferenceTypePropagation::FixUpSelectType(select, graph_->GetHandleCache()); + } + false_block->GetFirstInstruction()->ReplaceInput(select, 0); + } else { + if (phi->GetType() == DataType::Type::kReference) { + select->SetReferenceTypeInfoIfValid(phi->GetReferenceTypeInfo()); + } + phi->ReplaceInput(select, predecessor_index_false); // We'll remove the true branch below. } - } else if (phi->GetType() == DataType::Type::kReference) { - select->SetReferenceTypeInfoIfValid(phi->GetReferenceTypeInfo()); - } - block->InsertInstructionBefore(select, if_instruction); - - // Remove the true branch which removes the corresponding Phi - // input if needed. If left only with the false branch, the Phi is - // automatically removed. - if (both_successors_return) { - false_block->GetFirstInstruction()->ReplaceInput(select, 0); - } else { - phi->ReplaceInput(select, predecessor_index_false); } - bool only_two_predecessors = (merge_block->GetPredecessors().size() == 2u); + // Remove the true branch which removes the corresponding Phi input if needed. + // If left only with the false branch, the Phi is automatically removed. true_block->DisconnectAndDelete(); // Merge remaining blocks which are now connected with Goto. DCHECK_EQ(block->GetSingleSuccessor(), false_block); block->MergeWith(false_block); - if (!both_successors_return && only_two_predecessors) { - DCHECK_EQ(only_two_predecessors, phi->GetBlock() == nullptr); + if (!both_successors_return && merge_block->GetPredecessors().size() == 1u) { + DCHECK_IMPLIES(phi != nullptr, phi->GetBlock() == nullptr); + DCHECK(merge_block->GetPhis().IsEmpty()); DCHECK_EQ(block->GetSingleSuccessor(), merge_block); block->MergeWith(merge_block); } @@ -190,20 +204,22 @@ bool HCodeFlowSimplifier::TryGenerateSelectSimpleDiamondPattern( // (since this runs after GVN). Lookup by condition, and reuse latest one if possible // (due to post order, latest select is most likely replacement). If needed, we could // improve this by e.g. using the operands in the map as well. - auto it = cache->find(condition); - if (it == cache->end()) { - cache->Put(condition, select); - } else { - // Found cached value. See if latest can replace cached in the HIR. - HSelect* cached_select = it->second; - DCHECK_EQ(cached_select->GetCondition(), select->GetCondition()); - if (cached_select->GetTrueValue() == select->GetTrueValue() && - cached_select->GetFalseValue() == select->GetFalseValue() && - select->StrictlyDominates(cached_select)) { - cached_select->ReplaceWith(select); - cached_select->GetBlock()->RemoveInstruction(cached_select); + if (select != nullptr) { + auto it = cache->find(condition); + if (it == cache->end()) { + cache->Put(condition, select); + } else { + // Found cached value. See if latest can replace cached in the HIR. + HSelect* cached_select = it->second; + DCHECK_EQ(cached_select->GetCondition(), select->GetCondition()); + if (cached_select->GetTrueValue() == select->GetTrueValue() && + cached_select->GetFalseValue() == select->GetFalseValue() && + select->StrictlyDominates(cached_select)) { + cached_select->ReplaceWith(select); + cached_select->GetBlock()->RemoveInstruction(cached_select); + } + it->second = select; // always cache latest } - it->second = select; // always cache latest } // No need to update dominance information, as we are simplifying diff --git a/compiler/optimizing/code_flow_simplifier_test.cc b/compiler/optimizing/code_flow_simplifier_test.cc index a382f0f6f6..8945e03619 100644 --- a/compiler/optimizing/code_flow_simplifier_test.cc +++ b/compiler/optimizing/code_flow_simplifier_test.cc @@ -58,7 +58,7 @@ TEST_F(CodeFlowSimplifierTest, testZeroCheckPreventsSelect) { ManuallyBuildEnvFor(instr, {param, graph_->GetIntConstant(1)}); EXPECT_FALSE(CheckGraphAndTryCodeFlowSimplifier()); - EXPECT_FALSE(phi->GetBlock() == nullptr); + EXPECT_INS_RETAINED(phi); } // Test that CodeFlowSimplifier succeeds with HAdd. @@ -68,7 +68,45 @@ TEST_F(CodeFlowSimplifierTest, testSelectWithAdd) { HAdd* instr = new (GetAllocator()) HAdd(DataType::Type::kInt32, param, param, /*dex_pc=*/ 0); HPhi* phi = ConstructBasicGraphForSelect(return_block, instr); EXPECT_TRUE(CheckGraphAndTryCodeFlowSimplifier()); - EXPECT_TRUE(phi->GetBlock() == nullptr); + EXPECT_INS_REMOVED(phi); +} + +// Test that CodeFlowSimplifier succeeds if there is an additional `HPhi` with identical inputs. +TEST_F(CodeFlowSimplifierTest, testSelectWithAddAndExtraPhi) { + // Create a graph with three blocks merging to the `return_block`. + HBasicBlock* return_block = InitEntryMainExitGraphWithReturnVoid(); + HParameterValue* bool_param1 = MakeParam(DataType::Type::kBool); + HParameterValue* bool_param2 = MakeParam(DataType::Type::kBool); + HParameterValue* param = MakeParam(DataType::Type::kInt32); + HInstruction* const0 = graph_->GetIntConstant(0); + auto [if_block1, left, mid] = CreateDiamondPattern(return_block, bool_param1); + HBasicBlock* if_block2 = AddNewBlock(); + if_block1->ReplaceSuccessor(mid, if_block2); + HBasicBlock* right = AddNewBlock(); + if_block2->AddSuccessor(mid); + if_block2->AddSuccessor(right); + HIf* if2 = MakeIf(if_block2, bool_param2); + right->AddSuccessor(return_block); + MakeGoto(right); + ASSERT_TRUE(PredecessorsEqual(return_block, {left, mid, right})); + HAdd* add = MakeBinOp<HAdd>(right, DataType::Type::kInt32, param, param); + HPhi* phi1 = MakePhi(return_block, {param, param, add}); + HPhi* phi2 = MakePhi(return_block, {param, const0, const0}); + + // Prevent second `HSelect` match. Do not rely on the "instructions per branch" limit. + MakeInvokeStatic(left, DataType::Type::kVoid, {}, {}); + + EXPECT_TRUE(CheckGraphAndTryCodeFlowSimplifier()); + + ASSERT_BLOCK_RETAINED(left); + ASSERT_BLOCK_REMOVED(mid); + ASSERT_BLOCK_REMOVED(right); + HInstruction* select = if2->GetPrevious(); // `HSelect` is inserted before `HIf`. + ASSERT_TRUE(select->IsSelect()); + ASSERT_INS_RETAINED(phi1); + ASSERT_TRUE(InputsEqual(phi1, {param, select})); + ASSERT_INS_RETAINED(phi2); + ASSERT_TRUE(InputsEqual(phi2, {param, const0})); } // Test `HSelect` optimization in an irreducible loop. @@ -84,10 +122,8 @@ TEST_F(CodeFlowSimplifierTest, testSelectInIrreducibleLoop) { HInstruction* const0 = graph_->GetIntConstant(0); HInstruction* const1 = graph_->GetIntConstant(1); - HPhi* right_phi = MakePhi(right_header, {const0, /* placeholder */ const0}); - HPhi* left_phi = MakePhi(left_header, {const1, right_phi}); - HAdd* add = MakeBinOp<HAdd>(body, DataType::Type::kInt32, left_phi, const1); - right_phi->ReplaceInput(add, 1u); // Update back-edge input. + auto [left_phi, right_phi, add] = + MakeLinearIrreducibleLoopVar(left_header, right_header, body, const1, const0, const1); HCondition* condition = MakeCondition(left_header, kCondGE, left_phi, n_param); MakeIf(left_header, condition); @@ -99,14 +135,14 @@ TEST_F(CodeFlowSimplifierTest, testSelectInIrreducibleLoop) { ASSERT_TRUE(loop_info != nullptr); ASSERT_TRUE(loop_info->IsIrreducible()); - EXPECT_TRUE(phi->GetBlock() == nullptr); + EXPECT_INS_REMOVED(phi); ASSERT_TRUE(if_block->GetFirstInstruction()->IsSelect()); ASSERT_EQ(if_block, add->GetBlock()); // Moved when merging blocks. for (HBasicBlock* removed_block : {then_block, else_block, body}) { + ASSERT_BLOCK_REMOVED(removed_block); uint32_t removed_block_id = removed_block->GetBlockId(); - ASSERT_TRUE(removed_block->GetGraph() == nullptr) << removed_block_id; ASSERT_FALSE(loop_info->GetBlocks().IsBitSet(removed_block_id)) << removed_block_id; } } diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h index 4afb78a0e2..2ca286ea6a 100644 --- a/compiler/optimizing/inliner.h +++ b/compiler/optimizing/inliner.h @@ -246,8 +246,7 @@ class HInliner : public HOptimization { HInstruction* cursor, HBasicBlock* bb_cursor); - HInstanceFieldGet* BuildGetReceiverClass(HInstruction* receiver, - uint32_t dex_pc) const + HInstanceFieldGet* BuildGetReceiverClass(HInstruction* receiver, uint32_t dex_pc) const REQUIRES_SHARED(Locks::mutator_lock_); void MaybeRunReferenceTypePropagation(HInstruction* replacement, diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc index 713806e217..5323ae2445 100644 --- a/compiler/optimizing/intrinsics.cc +++ b/compiler/optimizing/intrinsics.cc @@ -172,15 +172,11 @@ IntrinsicVisitor::ValueOfInfo IntrinsicVisitor::ComputeValueOfInfo( } MemberOffset IntrinsicVisitor::GetReferenceDisableIntrinsicOffset() { - ScopedObjectAccess soa(Thread::Current()); - ArtField* field = WellKnownClasses::java_lang_ref_Reference_disableIntrinsic; - return field->GetOffset(); + return WellKnownClasses::java_lang_ref_Reference_disableIntrinsic->GetOffset(); } MemberOffset IntrinsicVisitor::GetReferenceSlowPathEnabledOffset() { - ScopedObjectAccess soa(Thread::Current()); - ArtField* field = WellKnownClasses::java_lang_ref_Reference_slowPathEnabled; - return field->GetOffset(); + return WellKnownClasses::java_lang_ref_Reference_slowPathEnabled->GetOffset(); } void IntrinsicVisitor::CreateReferenceGetReferentLocations(HInvoke* invoke, diff --git a/compiler/optimizing/load_store_elimination_test.cc b/compiler/optimizing/load_store_elimination_test.cc index 1e5c7082a4..347a050a3b 100644 --- a/compiler/optimizing/load_store_elimination_test.cc +++ b/compiler/optimizing/load_store_elimination_test.cc @@ -123,7 +123,7 @@ class LoadStoreEliminationTestBase : public SuperTest, public OptimizingUnitTest // Return the pre-header and loop block. std::tuple<HBasicBlock*, HBasicBlock*> CreateDoWhileLoopWithInstructions( HBasicBlock* loop_exit, std::initializer_list<HInstruction*> suspend_check_env = {}) { - auto [pre_header, loop] = CreateDoWhileLoop(loop_exit); + auto [pre_header, loop, back_edge] = CreateWhileLoop(loop_exit); MakeSimpleLoopInstructions(loop, loop, suspend_check_env); return {pre_header, loop}; } diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h index 8115ea035d..d81f3804dc 100644 --- a/compiler/optimizing/optimizing_unit_test.h +++ b/compiler/optimizing/optimizing_unit_test.h @@ -90,6 +90,11 @@ inline std::ostream& operator<<(std::ostream& os, const InstructionDumper& id) { #define ASSERT_INS_REMOVED(a) ASSERT_TRUE(IsRemoved(a)) << "Not removed: " << (InstructionDumper{a}) #define ASSERT_INS_RETAINED(a) ASSERT_FALSE(IsRemoved(a)) << "Removed: " << (InstructionDumper{a}) +#define EXPECT_BLOCK_REMOVED(b) EXPECT_TRUE(IsRemoved(b)) << "Not removed: B" << b->GetBlockId() +#define EXPECT_BLOCK_RETAINED(b) EXPECT_FALSE(IsRemoved(b)) << "Removed: B" << b->GetBlockId() +#define ASSERT_BLOCK_REMOVED(b) ASSERT_TRUE(IsRemoved(b)) << "Not removed: B" << b->GetBlockId() +#define ASSERT_BLOCK_RETAINED(b) ASSERT_FALSE(IsRemoved(b)) << "Removed: B" << b->GetBlockId() + inline LiveInterval* BuildInterval(const size_t ranges[][2], size_t number_of_ranges, ScopedArenaAllocator* allocator, @@ -348,6 +353,8 @@ class OptimizingUnitTestHelper { // empty, leaving the construction of an appropriate condition and `HIf` to the caller. // Note: The `loop_exit` shall be the "then" successor of the "loop-header". If the `loop_exit` // is needed as the "else" successor, use `HBlock::SwapSuccessors()` to adjust the order. + // Note: A `do { ... } while (...);` loop pattern has the same block structure, except that + // the `loop_body` is a single-goto block that exists purely to avoid a critical edge. std::tuple<HBasicBlock*, HBasicBlock*, HBasicBlock*> CreateWhileLoop(HBasicBlock* loop_exit) { HBasicBlock* pre_header = AddNewBlock(); HBasicBlock* loop_header = AddNewBlock(); @@ -367,28 +374,6 @@ class OptimizingUnitTestHelper { return {pre_header, loop_header, loop_body}; } - // Insert "pre-header" and "loop" blocks before a given `loop_exit` block and connect them in a - // `do { ... } while (...);` loop pattern. Return the new blocks. Adds `HGoto` to the "pre-header" - // block but leaves the "loop" block empty, leaving the construction of an appropriate condition - // and `HIf` to the caller. - // Note: The `loop_exit` shall be the "then" successor of the "loop". If the `loop_exit` - // is needed as the "else" successor, use `HBlock::SwapSuccessors()` to adjust the order. - std::tuple<HBasicBlock*, HBasicBlock*> CreateDoWhileLoop(HBasicBlock* loop_exit) { - HBasicBlock* pre_header = AddNewBlock(); - HBasicBlock* loop = AddNewBlock(); - - HBasicBlock* predecessor = loop_exit->GetSinglePredecessor(); - predecessor->ReplaceSuccessor(loop_exit, pre_header); - - pre_header->AddSuccessor(loop); - loop->AddSuccessor(loop_exit); // true successor - loop->AddSuccessor(loop); // false successor - - MakeGoto(pre_header); - - return {pre_header, loop}; - } - // Insert blocks for an irreducible loop before the `loop_exit`: // // <loop_exit's old predecessor> @@ -923,6 +908,19 @@ class OptimizingUnitTestHelper { return {phi, add}; } + std::tuple<HPhi*, HPhi*, HAdd*> MakeLinearIrreducibleLoopVar(HBasicBlock* left_header, + HBasicBlock* right_header, + HBasicBlock* body, + HInstruction* left_initial, + HInstruction* right_initial, + HInstruction* increment) { + HPhi* left_phi = MakePhi(left_header, {left_initial, /* placeholder */ left_initial}); + HAdd* add = MakeBinOp<HAdd>(body, left_phi->GetType(), left_phi, increment); + HPhi* right_phi = MakePhi(right_header, {right_initial, add}); + left_phi->ReplaceInput(right_phi, 1u); // Update back-edge input. + return {left_phi, right_phi, add}; + } + dex::TypeIndex DefaultTypeIndexForType(DataType::Type type) { switch (type) { case DataType::Type::kBool: @@ -959,6 +957,26 @@ class OptimizingUnitTestHelper { return val; } + static bool PredecessorsEqual(HBasicBlock* block, + std::initializer_list<HBasicBlock*> expected) { + return RangeEquals(block->GetPredecessors(), expected); + } + + static bool InputsEqual(HInstruction* instruction, + std::initializer_list<HInstruction*> expected) { + return RangeEquals(instruction->GetInputs(), expected); + } + + // Returns if the `instruction` is removed from the graph. + static inline bool IsRemoved(HInstruction* instruction) { + return instruction->GetBlock() == nullptr; + } + + // Returns if the `block` is removed from the graph. + static inline bool IsRemoved(HBasicBlock* block) { + return block->GetGraph() == nullptr; + } + protected: bool CheckGraph(HGraph* graph, std::ostream& oss) { GraphChecker checker(graph); @@ -967,6 +985,12 @@ class OptimizingUnitTestHelper { return checker.IsValid(); } + template <typename Range, typename ElementType> + static bool RangeEquals(Range&& range, std::initializer_list<ElementType> expected) { + return std::distance(range.begin(), range.end()) == expected.size() && + std::equal(range.begin(), range.end(), expected.begin()); + } + std::vector<std::unique_ptr<const StandardDexFile>> dex_files_; std::unique_ptr<ArenaPoolAndAllocator> pool_and_allocator_; @@ -1006,11 +1030,6 @@ inline std::string Patch(const std::string& original, const diff_t& diff) { return result; } -// Returns if the instruction is removed from the graph. -inline bool IsRemoved(HInstruction* instruction) { - return instruction->GetBlock() == nullptr; -} - inline std::ostream& operator<<(std::ostream& oss, const AdjacencyListGraph& alg) { return alg.Dump(oss); } diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc index 3e90a0881f..32b4557de0 100644 --- a/compiler/optimizing/reference_type_propagation.cc +++ b/compiler/optimizing/reference_type_propagation.cc @@ -276,12 +276,8 @@ static void BoundTypeForClassCheck(HInstruction* check) { return; } - { - ScopedObjectAccess soa(Thread::Current()); - ArtField* field = WellKnownClasses::java_lang_Object_shadowKlass; - if (field_get->GetFieldInfo().GetField() != field) { - return; - } + if (field_get->GetFieldInfo().GetField() != WellKnownClasses::java_lang_Object_shadowKlass) { + return; } if (check->IsIf()) { diff --git a/dex2oat/linker/image_write_read_test.cc b/dex2oat/linker/image_write_read_test.cc index aadcbea894..d7fb242fc9 100644 --- a/dex2oat/linker/image_write_read_test.cc +++ b/dex2oat/linker/image_write_read_test.cc @@ -78,6 +78,8 @@ void ImageWriteReadTest::TestWriteRead(ImageHeader::StorageMode storage_mode, return; } runtime_.reset(Runtime::Current()); + // Need well-known-classes. + WellKnownClasses::Init(Thread::Current()->GetJniEnv()); // Runtime::Create acquired the mutator_lock_ that is normally given away when we Runtime::Start, // give it away now and then switch to a more managable ScopedObjectAccess. Thread::Current()->TransitionFromRunnableToSuspended(ThreadState::kNative); diff --git a/imgdiag/create_dirty_image_objects.py b/imgdiag/create_dirty_image_objects.py index da0ed2c918..cabb097015 100755 --- a/imgdiag/create_dirty_image_objects.py +++ b/imgdiag/create_dirty_image_objects.py @@ -101,6 +101,18 @@ def process_dirty_entries(entries, sort_type): return (dirty_obj_lines, sort_keys) +def split_dirty_objects(dirty_objects): + art_objects = list() + framework_objects = list() + for obj in dirty_objects: + obj_without_location = obj.split(' ', 1)[1] + is_art_module_object = obj.startswith('/apex/com.android.art/') + is_primitive_array = obj.startswith('primitive') + if is_art_module_object or is_primitive_array: + art_objects.append(obj_without_location) + else: + framework_objects.append(obj_without_location) + return art_objects, framework_objects def main(): parser = argparse.ArgumentParser( @@ -172,6 +184,12 @@ def main(): with open(args.output_filename, 'w') as f: f.writelines(dirty_image_objects) + art_objs, framework_objs = split_dirty_objects(dirty_image_objects) + with open('art_' + args.output_filename, 'w') as f: + f.writelines(art_objs) + with open('framework_' + args.output_filename, 'w') as f: + f.writelines(framework_objs) + if args.print_stats: print(','.join(k for k, v in entries), ',obj_count') total_count = 0 diff --git a/libartbase/base/common_art_test.h b/libartbase/base/common_art_test.h index 1a0b910ced..5da6ac9920 100644 --- a/libartbase/base/common_art_test.h +++ b/libartbase/base/common_art_test.h @@ -228,7 +228,7 @@ class CommonArtTestImpl { protected: static bool IsHost() { - return !kIsTargetBuild; + return !art::kIsTargetBuild; } // Returns ${ANDROID_BUILD_TOP}. Ensure it has tailing /. @@ -310,44 +310,44 @@ using CommonArtTestWithParam = CommonArtTestBase<testing::TestWithParam<Param>>; std::vector<pid_t> GetPidByName(const std::string& process_name); #define TEST_DISABLED_FOR_TARGET() \ - if (kIsTargetBuild) { \ + if (art::kIsTargetBuild) { \ GTEST_SKIP() << "WARNING: TEST DISABLED FOR TARGET"; \ } #define TEST_DISABLED_FOR_HOST() \ - if (!kIsTargetBuild) { \ + if (!art::kIsTargetBuild) { \ GTEST_SKIP() << "WARNING: TEST DISABLED FOR HOST"; \ } #define TEST_DISABLED_FOR_NON_STATIC_HOST_BUILDS() \ - if (!kHostStaticBuildEnabled) { \ + if (!art::kHostStaticBuildEnabled) { \ GTEST_SKIP() << "WARNING: TEST DISABLED FOR NON-STATIC HOST BUILDS"; \ } #define TEST_DISABLED_FOR_DEBUG_BUILD() \ - if (kIsDebugBuild) { \ + if (art::kIsDebugBuild) { \ GTEST_SKIP() << "WARNING: TEST DISABLED FOR DEBUG BUILD"; \ } #define TEST_DISABLED_FOR_MEMORY_TOOL() \ - if (kRunningOnMemoryTool) { \ + if (art::kRunningOnMemoryTool) { \ GTEST_SKIP() << "WARNING: TEST DISABLED FOR MEMORY TOOL"; \ } #define TEST_DISABLED_FOR_HEAP_POISONING() \ - if (kPoisonHeapReferences) { \ + if (art::kPoisonHeapReferences) { \ GTEST_SKIP() << "WARNING: TEST DISABLED FOR HEAP POISONING"; \ } } // namespace art #define TEST_DISABLED_FOR_MEMORY_TOOL_WITH_HEAP_POISONING() \ - if (kRunningOnMemoryTool && kPoisonHeapReferences) { \ + if (art::kRunningOnMemoryTool && art::kPoisonHeapReferences) { \ GTEST_SKIP() << "WARNING: TEST DISABLED FOR MEMORY TOOL WITH HEAP POISONING"; \ } #define TEST_DISABLED_FOR_USER_BUILD() \ if (std::string build_type = android::base::GetProperty("ro.build.type", ""); \ - kIsTargetBuild && build_type != "userdebug" && build_type != "eng") { \ + art::kIsTargetBuild && build_type != "userdebug" && build_type != "eng") { \ GTEST_SKIP() << "WARNING: TEST DISABLED FOR USER BUILD"; \ } diff --git a/libartbase/base/mem_map.h b/libartbase/base/mem_map.h index 4b4a56a23f..db60c9443e 100644 --- a/libartbase/base/mem_map.h +++ b/libartbase/base/mem_map.h @@ -37,13 +37,9 @@ namespace art { -#if defined(__LP64__) && !defined(__Fuchsia__) && \ - (defined(__aarch64__) || defined(__riscv) || defined(__APPLE__)) +#if defined(__LP64__) && !defined(__Fuchsia__) && !defined(_WINDOWS_) #define USE_ART_LOW_4G_ALLOCATOR 1 #else -#if defined(__LP64__) && !defined(__Fuchsia__) && !defined(__x86_64__) -#error "Unrecognized 64-bit architecture." -#endif #define USE_ART_LOW_4G_ALLOCATOR 0 #endif diff --git a/libartbase/base/mem_map_test.cc b/libartbase/base/mem_map_test.cc index 2c63539803..b3da8fbf52 100644 --- a/libartbase/base/mem_map_test.cc +++ b/libartbase/base/mem_map_test.cc @@ -354,6 +354,10 @@ TEST_F(MemMapTest, MapAnonymousEmpty) { } TEST_F(MemMapTest, MapAnonymousFailNullError) { + // Host system's mmap_min_addr configuration could allow for arbitrarily low addresses to be + // successfully mapped, breaking the expectation that the MapAnonymous call should fail. + TEST_DISABLED_FOR_HOST(); + CommonInit(); uint8_t* invalid_page[16]; // Use this address as mmap hint address. const size_t page_size = MemMap::GetPageSize(); diff --git a/libartbase/base/unix_file/fd_file_test.cc b/libartbase/base/unix_file/fd_file_test.cc index d5c3056393..374edc96e7 100644 --- a/libartbase/base/unix_file/fd_file_test.cc +++ b/libartbase/base/unix_file/fd_file_test.cc @@ -326,6 +326,9 @@ void FdFileTest::TestDataMatches(const FdFile* src, // Test that the file created by FdFileTest::CreateSparseSourceFile is sparse on the test // environment. TEST_F(FdFileTest, CopySparseCreateSparseFile) { + // Disable on host as sparsity is filesystem dependent and some hosts may break test assumptions. + TEST_DISABLED_FOR_HOST(); + // Create file with no empty prefix or suffix. std::unique_ptr<art::ScratchFile> src1; ASSERT_NO_FATAL_FAILURE(CreateSparseSourceFile(/*empty_prefix=*/0, /*empty_suffix=*/0, src1)); @@ -350,6 +353,9 @@ TEST_F(FdFileTest, CopySparseCreateSparseFile) { // Test complete copies of the source file produced by FdFileTest::CreateSparseSourceFile. TEST_F(FdFileTest, CopySparseFullCopy) { + // Disable on host as sparsity is filesystem dependent and some hosts may break test assumptions. + TEST_DISABLED_FOR_HOST(); + auto verify_fullcopy = [&](size_t empty_prefix, size_t empty_suffix) { SCOPED_TRACE(testing::Message() << "prefix:" << empty_prefix << ", suffix:" << empty_suffix); @@ -417,6 +423,9 @@ size_t FdFileTest::GetFilesystemBlockSize() { // Test partial copies of the source file produced by FdFileTest::CreateSparseSourceFile. TEST_F(FdFileTest, CopySparsePartialCopy) { + // Disable on host as sparsity is filesystem dependent and some hosts may break test assumptions. + TEST_DISABLED_FOR_HOST(); + size_t blocksize = GetFilesystemBlockSize(); ASSERT_GT(blocksize, 0u); @@ -502,6 +511,9 @@ TEST_F(FdFileTest, CopySparsePartialCopy) { // Test the case where the destination file's FD offset is non-zero before the copy. TEST_F(FdFileTest, CopySparseToNonZeroOffset) { + // Disable on host as sparsity is filesystem dependent and some hosts may break test assumptions. + TEST_DISABLED_FOR_HOST(); + std::unique_ptr<art::ScratchFile> src; ASSERT_NO_FATAL_FAILURE(CreateSparseSourceFile(/*empty_prefix=*/0u, /*empty_suffix=*/0u, src)); diff --git a/runtime/gc/accounting/space_bitmap-inl.h b/runtime/gc/accounting/space_bitmap-inl.h index a8dabde716..702a78731d 100644 --- a/runtime/gc/accounting/space_bitmap-inl.h +++ b/runtime/gc/accounting/space_bitmap-inl.h @@ -32,6 +32,7 @@ namespace accounting { template<size_t kAlignment> inline bool SpaceBitmap<kAlignment>::AtomicTestAndSet(const mirror::Object* obj) { + DCHECK(obj != nullptr); uintptr_t addr = reinterpret_cast<uintptr_t>(obj); DCHECK_GE(addr, heap_begin_); const uintptr_t offset = addr - heap_begin_; @@ -232,6 +233,7 @@ void SpaceBitmap<kAlignment>::Walk(Visitor&& visitor) { template<size_t kAlignment> template<bool kSetBit> inline bool SpaceBitmap<kAlignment>::Modify(const mirror::Object* obj) { + DCHECK(obj != nullptr); uintptr_t addr = reinterpret_cast<uintptr_t>(obj); DCHECK_GE(addr, heap_begin_); DCHECK(HasAddress(obj)) << obj; diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc index cf5b483f53..e7498d91db 100644 --- a/runtime/gc/collector/mark_compact.cc +++ b/runtime/gc/collector/mark_compact.cc @@ -4530,21 +4530,15 @@ void MarkCompact::ScanObject(mirror::Object* obj) { usleep(1000); klass = obj->GetClass<kVerifyNone, kWithoutReadBarrier>(); if (klass != nullptr) { - std::ostringstream oss; - klass->DumpClass(oss, mirror::Class::kDumpClassFullDetail); - LOG(FATAL_WITHOUT_ABORT) << "klass pointer for obj: " << obj - << " found to be null first. Reloading after " << i - << " iterations of 1ms sleep fetched klass: " << oss.str(); break; } } - - if (UNLIKELY(klass == nullptr)) { + if (klass == nullptr) { // It must be heap corruption. LOG(FATAL_WITHOUT_ABORT) << "klass pointer for obj: " << obj << " found to be null."; + heap_->GetVerification()->LogHeapCorruption( + obj, mirror::Object::ClassOffset(), klass, /*fatal=*/true); } - heap_->GetVerification()->LogHeapCorruption( - obj, mirror::Object::ClassOffset(), klass, /*fatal=*/true); } // The size of `obj` is used both here (to update `bytes_scanned_`) and in // `UpdateLivenessInfo`. As fetching this value can be expensive, do it once diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index 531cbcf97b..dcb25a08d7 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -2688,7 +2688,7 @@ void Heap::MarkAllocStack(accounting::ContinuousSpaceBitmap* bitmap1, const auto* limit = stack->End(); for (auto* it = stack->Begin(); it != limit; ++it) { const mirror::Object* obj = it->AsMirrorPtr(); - if (!kUseThreadLocalAllocationStack || obj != nullptr) { + if (obj != nullptr) { if (bitmap1->HasAddress(obj)) { bitmap1->Set(obj); } else if (bitmap2->HasAddress(obj)) { diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc index 18e91da75d..74d2970c0d 100644 --- a/runtime/gc/heap_test.cc +++ b/runtime/gc/heap_test.cc @@ -190,7 +190,7 @@ TEST_F(HeapTest, GCMetrics) { // young-generation collections) is null (b/271112044). Temporarily // suspend the following checks while we investigate. // - // TODO(b/271112044): Investigate and adjust these expectations and/or the + // TODO(b/271990567): Investigate and adjust these expectations and/or the // corresponding metric logic. #if 0 EXPECT_PRED2(AnyIsFalse, full_gc_duration->IsNull(), young_gc_duration->IsNull()); @@ -209,8 +209,15 @@ TEST_F(HeapTest, GCMetrics) { EXPECT_FALSE(full_gc_scanned_bytes_delta->IsNull()); EXPECT_FALSE(full_gc_freed_bytes->IsNull()); EXPECT_FALSE(full_gc_freed_bytes_delta->IsNull()); + // Like the generational case, these GC duration can be less than a + // millisecond here as well (b/391531096). Temporarily disabling the + // tests. + // TODO(b/271990567): Possibly make the GCs above more time consuming to + // avoid the situation. +#if 0 EXPECT_FALSE(full_gc_duration->IsNull()); EXPECT_FALSE(full_gc_duration_delta->IsNull()); +#endif EXPECT_TRUE(young_gc_collection_time->IsNull()); EXPECT_TRUE(young_gc_count->IsNull()); diff --git a/runtime/trace_profile.cc b/runtime/trace_profile.cc index 6aa09303d4..d0d234c476 100644 --- a/runtime/trace_profile.cc +++ b/runtime/trace_profile.cc @@ -67,6 +67,7 @@ int TraceProfiler::num_trace_stop_tasks_ = 0; TraceData* TraceProfiler::trace_data_ = nullptr; void TraceData::AddTracedThread(Thread* thread) { + MutexLock mu(Thread::Current(), trace_data_lock_); size_t thread_id = thread->GetTid(); if (traced_threads_.find(thread_id) != traced_threads_.end()) { return; @@ -105,10 +106,8 @@ LowOverheadTraceType TraceProfiler::GetTraceType() { } namespace { -void RecordMethodsOnThreadStack(Thread* thread, TraceData* trace_data) - REQUIRES(Locks::mutator_lock_) { - Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current()); - +void RecordMethodsOnThreadStack(Thread* thread, uintptr_t* method_trace_buffer) + REQUIRES_SHARED(Locks::mutator_lock_) { struct MethodEntryStackVisitor final : public StackVisitor { MethodEntryStackVisitor(Thread* thread_in, Context* context) : StackVisitor(thread_in, context, StackVisitor::StackWalkKind::kSkipInlinedFrames) {} @@ -141,23 +140,48 @@ void RecordMethodsOnThreadStack(Thread* thread, TraceData* trace_data) visitor.WalkStack(true); // Create method entry events for all methods currently on the thread's stack. - // Annotate them with an 'S' to indicate they are methods at startup and the entry timestamp - // isn't accurate. - uintptr_t* method_trace_buffer = thread->GetMethodTraceBuffer(); uint64_t init_time = TimestampCounter::GetMicroTime(TimestampCounter::GetTimestamp()); + // Set the lsb to 0 to indicate method entry. + init_time = init_time & ~1; std::ostringstream os; os << "Thread:" << thread->GetTid() << "\n"; + size_t index = kAlwaysOnTraceBufSize - 1; for (auto smi = visitor.stack_methods_.rbegin(); smi != visitor.stack_methods_.rend(); smi++) { - os << "S->" << *smi << " " << init_time << "\n"; - trace_data->AddTracedMethod(*smi); + method_trace_buffer[index--] = reinterpret_cast<uintptr_t>(*smi); + method_trace_buffer[index--] = init_time; + + if (index < kMaxEntriesAfterFlush) { + // To keep the implementation simple, ignore methods deep down the stack. If the call stack + // unwinds beyond this point then we will see method exits without corresponding method + // entries. + break; + } } // Record a placeholder method exit event into the buffer so we record method exits for the // methods that are currently on stack. - method_trace_buffer[kAlwaysOnTraceBufSize - 1] = 0x1; - thread->SetMethodTraceBufferCurrentEntry(kAlwaysOnTraceBufSize - 1); - trace_data->AppendToLongRunningMethods(os.str()); - trace_data->AddTracedThread(thread); + method_trace_buffer[index] = 0x1; + thread->SetMethodTraceBufferCurrentEntry(index); +} + +// Records the thread and method info. +void DumpThreadMethodInfo(const std::unordered_map<size_t, std::string>& traced_threads, + const std::unordered_set<ArtMethod*>& traced_methods, + std::ostringstream& os) REQUIRES_SHARED(Locks::mutator_lock_) { + // Dump data about thread information. + os << "\n*threads\n"; + for (const auto& it : traced_threads) { + os << it.first << "\t" << it.second << "\n"; + } + + // Dump data about method information. + os << "*methods\n"; + for (ArtMethod* method : traced_methods) { + ArtMethod* method_ptr = reinterpret_cast<ArtMethod*>(method); + os << method_ptr << "\t" << GetMethodInfoLine(method); + } + + os << "*end"; } } // namespace @@ -168,6 +192,40 @@ class TraceStopTask : public gc::HeapTask { void Run([[maybe_unused]] Thread* self) override { TraceProfiler::TraceTimeElapsed(); } }; +class TraceStartCheckpoint final : public Closure { + public: + explicit TraceStartCheckpoint(LowOverheadTraceType type) : trace_type_(type), barrier_(0) {} + + void Run(Thread* thread) override REQUIRES_SHARED(Locks::mutator_lock_) { + auto buffer = new uintptr_t[kAlwaysOnTraceBufSize]; + + if (trace_type_ == LowOverheadTraceType::kLongRunningMethods) { + // Record methods that are currently on stack. + RecordMethodsOnThreadStack(thread, buffer); + thread->UpdateTlsLowOverheadTraceEntrypoints(LowOverheadTraceType::kLongRunningMethods); + } else { + memset(buffer, 0, kAlwaysOnTraceBufSize * sizeof(uintptr_t)); + thread->UpdateTlsLowOverheadTraceEntrypoints(LowOverheadTraceType::kAllMethods); + } + thread->SetMethodTraceBuffer(buffer, kAlwaysOnTraceBufSize); + barrier_.Pass(Thread::Current()); + } + + void WaitForThreadsToRunThroughCheckpoint(size_t threads_running_checkpoint) { + Thread* self = Thread::Current(); + ScopedThreadStateChange tsc(self, ThreadState::kWaitingForCheckPointsToRun); + barrier_.Increment(self, threads_running_checkpoint); + } + + private: + LowOverheadTraceType trace_type_; + + // The barrier to be passed through and for the requestor to wait upon. + Barrier barrier_; + + DISALLOW_COPY_AND_ASSIGN(TraceStartCheckpoint); +}; + void TraceProfiler::Start(LowOverheadTraceType trace_type, uint64_t trace_duration_ns) { if (!art_flags::always_enable_profile_code()) { LOG(ERROR) << "Feature not supported. Please build with ART_ALWAYS_ENABLE_PROFILE_CODE."; @@ -190,26 +248,16 @@ void TraceProfiler::Start(LowOverheadTraceType trace_type, uint64_t trace_durati profile_in_progress_ = true; trace_data_ = new TraceData(trace_type); - { - ScopedSuspendAll ssa(__FUNCTION__); - MutexLock tl(self, *Locks::thread_list_lock_); - for (Thread* thread : Runtime::Current()->GetThreadList()->GetList()) { - auto buffer = new uintptr_t[kAlwaysOnTraceBufSize]; - memset(buffer, 0, kAlwaysOnTraceBufSize * sizeof(uintptr_t)); - thread->SetMethodTraceBuffer(buffer, kAlwaysOnTraceBufSize); - if (trace_type == LowOverheadTraceType::kLongRunningMethods) { - // Record methods that are currently on stack. - RecordMethodsOnThreadStack(thread, trace_data_); - thread->UpdateTlsLowOverheadTraceEntrypoints(LowOverheadTraceType::kLongRunningMethods); - } else { - thread->UpdateTlsLowOverheadTraceEntrypoints(LowOverheadTraceType::kAllMethods); - } - } + Runtime* runtime = Runtime::Current(); + TraceStartCheckpoint checkpoint(trace_type); + size_t threads_running_checkpoint = runtime->GetThreadList()->RunCheckpoint(&checkpoint); + if (threads_running_checkpoint != 0) { + checkpoint.WaitForThreadsToRunThroughCheckpoint(threads_running_checkpoint); } if (trace_type == LowOverheadTraceType::kLongRunningMethods) { // Add a Task that stops the tracing after trace_duration. - Runtime::Current()->GetHeap()->AddHeapTask(new TraceStopTask(NanoTime() + trace_duration_ns)); + runtime->GetHeap()->AddHeapTask(new TraceStopTask(NanoTime() + trace_duration_ns)); num_trace_stop_tasks_++; } } @@ -235,7 +283,7 @@ void TraceProfiler::StopLocked() { return; } - FunctionClosure reset_buffer([](Thread* thread) { + static FunctionClosure reset_buffer([](Thread* thread) { auto buffer = thread->GetMethodTraceBuffer(); if (buffer != nullptr) { delete[] buffer; @@ -251,25 +299,7 @@ void TraceProfiler::StopLocked() { trace_data_ = nullptr; } -void TraceProfiler::DumpThreadMethodInfo( - const std::unordered_map<size_t, std::string>& traced_threads, - const std::unordered_set<ArtMethod*>& traced_methods, - std::ostringstream& os) { - // Dump data about thread information. - os << "\n*threads\n"; - for (const auto& it : traced_threads) { - os << it.first << "\t" << it.second << "\n"; - } - - // Dump data about method information. - os << "*methods\n"; - for (ArtMethod* method : traced_methods) { - ArtMethod* method_ptr = reinterpret_cast<ArtMethod*>(method); - os << method_ptr << "\t" << GetMethodInfoLine(method); - } - os << "*end"; -} uint8_t* TraceProfiler::DumpBuffer(uint32_t thread_id, uintptr_t* method_trace_entries, @@ -558,37 +588,53 @@ std::string TraceProfiler::GetLongRunningMethodsString() { return GetLongRunningMethodsStringLocked(); } +void TraceDumpCheckpoint::Run(Thread* thread) { + auto method_trace_entries = thread->GetMethodTraceBuffer(); + if (method_trace_entries != nullptr) { + std::unordered_set<ArtMethod*> traced_methods; + uintptr_t* method_trace_curr_ptr = *(thread->GetTraceBufferCurrEntryPtr()); + std::ostringstream os; + TraceProfiler::DumpLongRunningMethodBuffer( + thread->GetTid(), method_trace_entries, method_trace_curr_ptr, traced_methods, os); + trace_data_->AddTracedThread(thread); + trace_data_->AddTracedMethods(traced_methods); + trace_data_->AppendToLongRunningMethods(os.str()); + } + barrier_.Pass(Thread::Current()); +} + +void TraceDumpCheckpoint::WaitForThreadsToRunThroughCheckpoint(size_t threads_running_checkpoint) { + Thread* self = Thread::Current(); + ScopedThreadStateChange tsc(self, ThreadState::kWaitingForCheckPointsToRun); + barrier_.Increment(self, threads_running_checkpoint); +} + std::string TraceProfiler::GetLongRunningMethodsStringLocked() { Thread* self = Thread::Current(); std::ostringstream os; - ScopedSuspendAll ssa(__FUNCTION__); - MutexLock tl(self, *Locks::thread_list_lock_); - - // Get any data that was previously flushed. - const std::string& old_data = trace_data_->GetLongRunningMethods(); - if (old_data.length() > 0) { - os << trace_data_->GetLongRunningMethods(); + // Collect long running methods from all the threads; + Runtime* runtime = Runtime::Current(); + TraceDumpCheckpoint checkpoint(trace_data_); + size_t threads_running_checkpoint = runtime->GetThreadList()->RunCheckpoint(&checkpoint); + if (threads_running_checkpoint != 0) { + checkpoint.WaitForThreadsToRunThroughCheckpoint(threads_running_checkpoint); } - // Collect long running methods from all the threads; - std::unordered_set<ArtMethod*> traced_methods; - for (Thread* thread : Runtime::Current()->GetThreadList()->GetList()) { - auto method_trace_entries = thread->GetMethodTraceBuffer(); - if (method_trace_entries == nullptr) { - continue; - } + trace_data_->DumpData(os); + return os.str(); +} - trace_data_->AddTracedThread(thread); - uintptr_t* method_trace_curr_ptr = *(thread->GetTraceBufferCurrEntryPtr()); - DumpLongRunningMethodBuffer( - thread->GetTid(), method_trace_entries, method_trace_curr_ptr, traced_methods, os); +void TraceData::DumpData(std::ostringstream& os) { + MutexLock mu(Thread::Current(), trace_data_lock_); + if (long_running_methods_.length() > 0) { + os << long_running_methods_; } - trace_data_->AddTracedMethods(traced_methods); - // Dump the information about traced_methods and threads - DumpThreadMethodInfo(trace_data_->GetTracedThreads(), trace_data_->GetTracedMethods(), os); - return os.str(); + { + ScopedObjectAccess soa(Thread::Current()); + DumpThreadMethodInfo(traced_threads_, traced_methods_, os); + } } void TraceProfiler::DumpLongRunningMethods(std::unique_ptr<File>&& trace_file) { diff --git a/runtime/trace_profile.h b/runtime/trace_profile.h index c71cb2aee6..b56ac86673 100644 --- a/runtime/trace_profile.h +++ b/runtime/trace_profile.h @@ -22,6 +22,8 @@ #include "base/locks.h" #include "base/macros.h" #include "base/os.h" +#include "thread.h" +#include "thread_pool.h" namespace art HIDDEN { @@ -43,34 +45,30 @@ enum class LowOverheadTraceType { class TraceData { public: - explicit TraceData(LowOverheadTraceType trace_type) : trace_type_(trace_type) { - } + explicit TraceData(LowOverheadTraceType trace_type) + : trace_type_(trace_type), + trace_data_lock_("Trace Data lock", LockLevel::kGenericBottomLock) {} LowOverheadTraceType GetTraceType() const { return trace_type_; } - const std::string& GetLongRunningMethods() const { - return long_running_methods_; - } - - const std::unordered_set<ArtMethod*>& GetTracedMethods() const { - return traced_methods_; - } - - const std::unordered_map<size_t, std::string>& GetTracedThreads() const { - return traced_threads_; - } + // Dumps events collected in long_running_methods_ and the information about + // threads and methods into the output stream. + void DumpData(std::ostringstream& os); void AppendToLongRunningMethods(const std::string& str) { + MutexLock mu(Thread::Current(), trace_data_lock_); long_running_methods_.append(str); } void AddTracedMethods(std::unordered_set<ArtMethod*>& methods) { + MutexLock mu(Thread::Current(), trace_data_lock_); traced_methods_.merge(methods); } void AddTracedMethod(ArtMethod* method) { + MutexLock mu(Thread::Current(), trace_data_lock_); traced_methods_.insert(method); } @@ -79,17 +77,36 @@ class TraceData { private: // This is used to hold the initial methods on stack and also long running methods when there is a // buffer overflow. - std::string long_running_methods_; + std::string long_running_methods_ GUARDED_BY(trace_data_lock_); LowOverheadTraceType trace_type_; // These hold the methods and threads see so far. These are used to generate information about // the methods and threads. - std::unordered_set<ArtMethod*> traced_methods_; + std::unordered_set<ArtMethod*> traced_methods_ GUARDED_BY(trace_data_lock_); // Threads might exit before we dump the data, so record thread id and name when we see a new // thread. - std::unordered_map<size_t, std::string> traced_threads_; + std::unordered_map<size_t, std::string> traced_threads_ GUARDED_BY(trace_data_lock_); + + // Lock to synchronize access to traced_methods_, traced_threads_ and long_running_methods_ which + // can be accessed simultaneously by multiple threads when running TraceDumpCheckpoint. + Mutex trace_data_lock_; +}; + +class TraceDumpCheckpoint final : public Closure { + public: + explicit TraceDumpCheckpoint(TraceData* trace_data) : barrier_(0), trace_data_(trace_data) {} + + void Run(Thread* thread) override REQUIRES_SHARED(Locks::mutator_lock_); + void WaitForThreadsToRunThroughCheckpoint(size_t threads_running_checkpoint); + + private: + // The barrier to be passed through and for the requestor to wait upon. + Barrier barrier_; + + // Trace data to record the data from each thread. + TraceData* trace_data_; }; // This class implements low-overhead tracing. This feature is available only when @@ -178,11 +195,6 @@ class TraceProfiler { std::unordered_set<ArtMethod*>& methods /* out */, std::ostringstream& os); - // Records the thread and method info. - static void DumpThreadMethodInfo(const std::unordered_map<size_t, std::string>& traced_threads, - const std::unordered_set<ArtMethod*>& traced_methods, - std::ostringstream& os) REQUIRES(Locks::mutator_lock_); - static bool profile_in_progress_ GUARDED_BY(Locks::trace_lock_); // Keeps track of number of outstanding trace stop tasks. We should only stop a trace when the @@ -192,6 +204,7 @@ class TraceProfiler { static TraceData* trace_data_ GUARDED_BY(Locks::trace_lock_); + friend class TraceDumpCheckpoint; DISALLOW_COPY_AND_ASSIGN(TraceProfiler); }; diff --git a/test/463-checker-boolean-simplifier/smali/Main2.smali b/test/463-checker-boolean-simplifier/smali/Main2.smali index b10e61022c..9bf0bc1b13 100644 --- a/test/463-checker-boolean-simplifier/smali/Main2.smali +++ b/test/463-checker-boolean-simplifier/smali/Main2.smali @@ -231,11 +231,11 @@ ## CHECK-DAG: <<Const1:i\d+>> IntConstant 1 ## CHECK-DAG: <<Const13:i\d+>> IntConstant 13 ## CHECK-DAG: <<Const42:i\d+>> IntConstant 42 -## CHECK-DAG: <<PhiX:i\d+>> Phi [<<Const0>>,<<Const13>>,<<Const42>>] -## CHECK-DAG: <<PhiY:i\d+>> Phi [<<Const1>>,<<Add:i\d+>>,<<Add>>] +## CHECK-DAG: <<PhiX:i\d+>> Phi [<<Const0>>,<<Select:i\d+>>] +## CHECK-DAG: <<PhiY:i\d+>> Phi [<<Const1>>,<<Add:i\d+>>] ## CHECK-DAG: <<Add>> Add [<<PhiY>>,<<Const1>>] ## CHECK-DAG: <<Cond:z\d+>> LessThanOrEqual [<<Add>>,<<Const1>>] -## CHECK-DAG: If [<<Cond>>] +## CHECK-DAG: <<Select>> Select [<<Const13>>,<<Const42>>,<<Cond>>] ## CHECK-DAG: Return [<<PhiX>>] # The original java source of this method: diff --git a/test/663-checker-select-generator/src/Main.java b/test/663-checker-select-generator/src/Main.java index 6a5564b1c1..631210c9bc 100644 --- a/test/663-checker-select-generator/src/Main.java +++ b/test/663-checker-select-generator/src/Main.java @@ -106,9 +106,8 @@ public class Main { /// CHECK-DAG: <<Bool2:z\d+>> ParameterValue /// CHECK-DAG: <<Const10:i\d+>> IntConstant 10 /// CHECK-DAG: <<Const20:i\d+>> IntConstant 20 - /// CHECK-DAG: <<Select:i\d+>> Select [<<Const20>>,<<Const20>>,<<Bool2>>] - /// CHECK-DAG: <<Select2:i\d+>> Select [<<Select>>,<<Const10>>,<<Bool1>>] - /// CHECK-DAG: Return [<<Select2>>] + /// CHECK-DAG: <<Select:i\d+>> Select [<<Const20>>,<<Const10>>,<<Bool1>>] + /// CHECK-DAG: Return [<<Select>>] private static int $noinline$testDoubleDiamondSameValueButNotAllOuter(boolean bool_param_1, boolean bool_param_2) { int return_value; if (bool_param_1) { diff --git a/tools/ahat/Android.bp b/tools/ahat/Android.bp index 6db914b277..57d239cc54 100644 --- a/tools/ahat/Android.bp +++ b/tools/ahat/Android.bp @@ -62,3 +62,13 @@ cc_library_shared { header_libs: ["jni_headers"], host_supported: true, } + +java_genrule { + name: "ahat-test-dump-gen", + srcs: [ + "ahat-test-dump-gen.sh.in", + ":ahat-test-dump", + ], + out: ["ahat-test-dump-gen.sh"], + cmd: "sed -e s=@AHAT_TEST_DUMP_JAR@=$(location :ahat-test-dump)= $(location ahat-test-dump-gen.sh.in) > $(out)", +} diff --git a/tools/ahat/Android.mk b/tools/ahat/Android.mk index 5db78e2765..8a1ab87303 100644 --- a/tools/ahat/Android.mk +++ b/tools/ahat/Android.mk @@ -18,75 +18,6 @@ LOCAL_PATH := $(call my-dir) include art/build/Android.common_path.mk -# The ahat tests rely on running ART to generate a heap dump for test, but ART -# doesn't run on darwin. Only build and run the tests for linux. -# There are also issues with running under instrumentation. -ifeq ($(HOST_OS),linux) -ifneq ($(EMMA_INSTRUMENT),true) - -# Determine the location of the test-dump.jar, test-dump.hprof, and proguard -AHAT_TEST_DUMP_JAR := $(call intermediates-dir-for,JAVA_LIBRARIES,ahat-test-dump)/javalib.jar -AHAT_TEST_DUMP_COMMON := $(call intermediates-dir-for,JAVA_LIBRARIES,ahat-test-dump,,COMMON) -AHAT_TEST_DUMP_JNI := $(ART_HOST_OUT_SHARED_LIBRARIES)/libahat-test-jni$(ART_HOST_SHLIB_EXTENSION) -AHAT_TEST_DUMP_HPROF := $(AHAT_TEST_DUMP_COMMON)/test-dump.hprof -AHAT_TEST_DUMP_BASE_HPROF := $(AHAT_TEST_DUMP_COMMON)/test-dump-base.hprof -AHAT_TEST_DUMP_PROGUARD_MAP := $(AHAT_TEST_DUMP_COMMON)/test-dump.map -AHAT_TEST_DUMP_PROGUARD_DICTIONARY := $(AHAT_TEST_DUMP_COMMON)/proguard_dictionary - -# Directories to use for ANDROID_DATA when generating the test dumps to -# ensure we don't pollute the source tree with any artifacts from running -# dalvikvm. -AHAT_TEST_DUMP_ANDROID_DATA := $(AHAT_TEST_DUMP_COMMON)/test-dump-android_data -AHAT_TEST_DUMP_BASE_ANDROID_DATA := $(AHAT_TEST_DUMP_COMMON)/test-dump-base-android_data - -# Generate the proguard map in the desired location by copying it from -# wherever the build system generates it by default. -$(AHAT_TEST_DUMP_PROGUARD_MAP): PRIVATE_AHAT_SOURCE_PROGUARD_MAP := $(AHAT_TEST_DUMP_PROGUARD_DICTIONARY) -$(AHAT_TEST_DUMP_PROGUARD_MAP): $(AHAT_TEST_DUMP_PROGUARD_DICTIONARY) - cp $(PRIVATE_AHAT_SOURCE_PROGUARD_MAP) $@ - -ifeq (true,$(HOST_PREFER_32_BIT)) - AHAT_TEST_DALVIKVM_DEP := $(HOST_OUT_EXECUTABLES)/dalvikvm32 - AHAT_TEST_DALVIKVM_ARG := --32 -else - AHAT_TEST_DALVIKVM_DEP := $(HOST_OUT_EXECUTABLES)/dalvikvm64 - AHAT_TEST_DALVIKVM_ARG := --64 -endif - -# Run ahat-test-dump.jar to generate test-dump.hprof and test-dump-base.hprof -# The scripts below are run with --no-compile to avoid dependency on dex2oat. -AHAT_TEST_DUMP_DEPENDENCIES := \ - $(AHAT_TEST_DALVIKVM_DEP) \ - $(AHAT_TEST_DUMP_JNI) \ - $(ART_HOST_SHARED_LIBRARY_DEPENDENCIES) \ - $(ART_HOST_SHARED_LIBRARY_DEBUG_DEPENDENCIES) \ - $(ART_HOST_DEX_DEPENDENCIES) \ - $(HOST_OUT_EXECUTABLES)/art \ - $(HOST_CORE_IMG_OUTS) - -$(AHAT_TEST_DUMP_HPROF): PRIVATE_AHAT_TEST_ART := $(HOST_OUT_EXECUTABLES)/art -$(AHAT_TEST_DUMP_HPROF): PRIVATE_AHAT_TEST_DUMP_JAR := $(AHAT_TEST_DUMP_JAR) -$(AHAT_TEST_DUMP_HPROF): PRIVATE_AHAT_TEST_ANDROID_DATA := $(AHAT_TEST_DUMP_ANDROID_DATA) -$(AHAT_TEST_DUMP_HPROF): PRIVATE_AHAT_TEST_DALVIKVM_ARG := $(AHAT_TEST_DALVIKVM_ARG) -$(AHAT_TEST_DUMP_HPROF): $(AHAT_TEST_DUMP_JAR) $(AHAT_TEST_DUMP_DEPENDENCIES) - rm -rf $(PRIVATE_AHAT_TEST_ANDROID_DATA) - mkdir -p $(PRIVATE_AHAT_TEST_ANDROID_DATA) - ANDROID_DATA=$(PRIVATE_AHAT_TEST_ANDROID_DATA) \ - $(PRIVATE_AHAT_TEST_ART) --no-compile -d $(PRIVATE_AHAT_TEST_DALVIKVM_ARG) \ - -cp $(PRIVATE_AHAT_TEST_DUMP_JAR) Main $@ - -$(AHAT_TEST_DUMP_BASE_HPROF): PRIVATE_AHAT_TEST_ART := $(HOST_OUT_EXECUTABLES)/art -$(AHAT_TEST_DUMP_BASE_HPROF): PRIVATE_AHAT_TEST_DUMP_JAR := $(AHAT_TEST_DUMP_JAR) -$(AHAT_TEST_DUMP_BASE_HPROF): PRIVATE_AHAT_TEST_ANDROID_DATA := $(AHAT_TEST_DUMP_BASE_ANDROID_DATA) -$(AHAT_TEST_DUMP_BASE_HPROF): PRIVATE_AHAT_TEST_DALVIKVM_ARG := $(AHAT_TEST_DALVIKVM_ARG) -$(AHAT_TEST_DUMP_BASE_HPROF): $(AHAT_TEST_DUMP_JAR) $(AHAT_TEST_DUMP_DEPENDENCIES) - rm -rf $(PRIVATE_AHAT_TEST_ANDROID_DATA) - mkdir -p $(PRIVATE_AHAT_TEST_ANDROID_DATA) - ANDROID_DATA=$(PRIVATE_AHAT_TEST_ANDROID_DATA) \ - $(PRIVATE_AHAT_TEST_ART) --no-compile -d $(PRIVATE_AHAT_TEST_DALVIKVM_ARG) \ - -cp $(PRIVATE_AHAT_TEST_DUMP_JAR) Main $@ --base - - # Determine the location of the ri-test-dump.jar and ri-test-dump.hprof. AHAT_RI_TEST_DUMP_JAR := $(call intermediates-dir-for,JAVA_LIBRARIES,ahat-ri-test-dump,HOST)/javalib.jar AHAT_RI_TEST_DUMP_COMMON := $(call intermediates-dir-for,JAVA_LIBRARIES,ahat-ri-test-dump,HOST,COMMON) @@ -104,9 +35,9 @@ include $(CLEAR_VARS) LOCAL_SRC_FILES := $(call all-java-files-under, src/test) LOCAL_JAR_MANIFEST := etc/ahat-tests.mf LOCAL_JAVA_RESOURCE_FILES := \ - $(AHAT_TEST_DUMP_HPROF) \ - $(AHAT_TEST_DUMP_BASE_HPROF) \ - $(AHAT_TEST_DUMP_PROGUARD_MAP) \ + $(LOCAL_PATH)/etc/test-dump.hprof \ + $(LOCAL_PATH)/etc/test-dump-base.hprof \ + $(LOCAL_PATH)/etc/test-dump.map \ $(AHAT_RI_TEST_DUMP_HPROF) \ $(LOCAL_PATH)/etc/L.hprof \ $(LOCAL_PATH)/etc/O.hprof \ @@ -123,20 +54,8 @@ LOCAL_COMPATIBILITY_SUITE := general-tests include $(BUILD_HOST_JAVA_LIBRARY) AHAT_TEST_JAR := $(LOCAL_BUILT_MODULE) -endif # EMMA_INSTRUMENT -endif # linux - # Clean up local variables. AHAT_TEST_JAR := -AHAT_TEST_DUMP_JAR := -AHAT_TEST_DUMP_JNI := -AHAT_TEST_DUMP_COMMON := -AHAT_TEST_DUMP_HPROF := -AHAT_TEST_DUMP_BASE_HPROF := -AHAT_TEST_DUMP_PROGUARD_MAP := -AHAT_TEST_DUMP_DEPENDENCIES := -AHAT_TEST_DUMP_ANDROID_DATA := -AHAT_TEST_DUMP_BASE_ANDROID_DATA := AHAT_RI_TEST_DUMP_JAR := AHAT_RI_TEST_DUMP_COMMON := diff --git a/tools/ahat/ahat-test-dump-gen.sh.in b/tools/ahat/ahat-test-dump-gen.sh.in new file mode 100755 index 0000000000..ea6b3b4481 --- /dev/null +++ b/tools/ahat/ahat-test-dump-gen.sh.in @@ -0,0 +1,46 @@ +#!/bin/bash +# +# Copyright (C) 2025 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ahat-test-dump-gen.sh.in is an input template for a script to re-generate +# the test dump hprof files. The files should be regenerated whenever there +# are changes to src/test-dump/*. +# +# To regenerate the test dump files: +# $ m ahat-test-dump-gen +# $ croot +# $ bash out/soong/.intermediates/art/tools/ahat/ahat-test-dump-gen/android_common/gen/ahat-test-dump-gen.sh +# +# The outputs are placed in the etc/ directory where they can be checked in to +# be used by the ahat tests. Note: You'll see a lot of error messages from +# running the script that should be safe to ignore, as long as you see +# etc/test-dump.hprof and etc/test-dump-base.hprof being generated. + +AHAT_ETC_DIR=${ANDROID_BUILD_TOP}/art/tools/ahat/etc +AHAT_TEST_DUMP_JAR=@AHAT_TEST_DUMP_JAR@ +AHAT_TEST_DUMP_MAP=$(dirname ${AHAT_TEST_DUMP_JAR})/../proguard_dictionary + +# Build required dependencies. +m build-art-host libahat-test-jni + + +# test-dump.hprof +art --no-compile -cp ${AHAT_TEST_DUMP_JAR} Main ${AHAT_ETC_DIR}/test-dump.hprof + +# test-dump-base.hprof +art --no-compile -cp ${AHAT_TEST_DUMP_JAR} Main ${AHAT_ETC_DIR}/test-dump-base.hprof --base + +# test-dump.map +cp ${AHAT_TEST_DUMP_MAP} ${AHAT_ETC_DIR}/test-dump.map diff --git a/tools/ahat/etc/README.txt b/tools/ahat/etc/README.txt index e9b5b22dae..837c7ecf90 100644 --- a/tools/ahat/etc/README.txt +++ b/tools/ahat/etc/README.txt @@ -7,3 +7,8 @@ O.hprof RI.hprof A version of the test-dump hprof generated on the reference implementation. + +test-dump.hprof, test-dump-base.hprof, test-dump.map + Recent versions of the test-dump generated using ahat-test-dump-gen. See + comments in ahat-test-dump-gen.sh.in for more details. These will need to be + regenerated manually any time the test-dump source code is modified. diff --git a/tools/ahat/etc/test-dump-base.hprof b/tools/ahat/etc/test-dump-base.hprof Binary files differnew file mode 100644 index 0000000000..d0c3f342c2 --- /dev/null +++ b/tools/ahat/etc/test-dump-base.hprof diff --git a/tools/ahat/etc/test-dump.hprof b/tools/ahat/etc/test-dump.hprof Binary files differnew file mode 100644 index 0000000000..84ad4287e5 --- /dev/null +++ b/tools/ahat/etc/test-dump.hprof diff --git a/tools/ahat/etc/test-dump.map b/tools/ahat/etc/test-dump.map new file mode 100644 index 0000000000..7d5ad035b2 --- /dev/null +++ b/tools/ahat/etc/test-dump.map @@ -0,0 +1,240 @@ +# compiler: R8 +# compiler_version: 8.9.19-dev +# min_api: 35 +# compiler_hash: b6b2b638738ed9914c3cbc7103ae1a236354c1b4 +# common_typos_disable +# {"id":"com.android.tools.r8.mapping","version":"2.2"} +# pg_map_id: e381cdb92198880e7b46db4aaea8369ad60617c62144964c6424590112d04f37 +# pg_map_hash: SHA-256 e381cdb92198880e7b46db4aaea8369ad60617c62144964c6424590112d04f37 +DumpedStuff -> DumpedStuff: +# {"id":"sourceFile","fileName":"DumpedStuff.java"} + java.lang.ref.WeakReference aShortWeakPathToSamplePathObject -> A + java.lang.ref.WeakReference aWeakRefToGcRoot -> B + java.lang.ref.SoftReference aSoftChain -> C + java.lang.Object[] basicStringRef -> D + DumpedStuff$AddedObject addedObject -> E + # {"id":"com.android.tools.r8.residualsignature","signature":"La;"} + DumpedStuff$UnchangedObject unchangedObject -> F + # {"id":"com.android.tools.r8.residualsignature","signature":"Lp;"} + DumpedStuff$RemovedObject removedObject -> G + # {"id":"com.android.tools.r8.residualsignature","signature":"Ln;"} + DumpedStuff$ModifiedObject modifiedObject -> H + # {"id":"com.android.tools.r8.residualsignature","signature":"Lk;"} + DumpedStuff$StackSmasher stackSmasher -> I + # {"id":"com.android.tools.r8.residualsignature","signature":"Lo;"} + DumpedStuff$StackSmasher stackSmasherAdded -> J + # {"id":"com.android.tools.r8.residualsignature","signature":"Lo;"} + int[] modifiedArray -> K + java.lang.Object objectAllocatedAtKnownSite -> L + java.lang.Object objectAllocatedAtKnownSubSite -> M + android.os.IBinder correctBinderProxy -> N + # {"id":"com.android.tools.r8.residualsignature","signature":"La/c;"} + android.os.IBinder imposedBinderProxy -> O + # {"id":"com.android.tools.r8.residualsignature","signature":"La/c;"} + android.os.IBinder carriedBinderProxy -> P + # {"id":"com.android.tools.r8.residualsignature","signature":"La/c;"} + java.lang.Object correctBinderProxyObject -> Q + java.lang.Object impostorBinderProxyObject -> R + java.lang.Object carrierBinderProxyObject -> S + java.lang.Object binderService -> T + java.lang.Object fakeBinderService -> U + java.lang.Object binderToken -> V + java.lang.Object namedBinderToken -> W + java.lang.String modifiedStaticField -> X + java.lang.String basicString -> d + java.lang.String nonAscii -> e + java.lang.String embeddedZero -> f + char[] charArray -> g + byte[] byteString -> h + byte[] byteNotString -> i + byte[] byteEmpty -> j + java.lang.String nullString -> k + java.lang.Object anObject -> l + java.lang.Object aCleanedObject -> m + java.lang.Runnable aCleanerThunk -> n + DumpedStuff$Reference aReference -> o + # {"id":"com.android.tools.r8.residualsignature","signature":"Lm;"} + java.lang.ref.ReferenceQueue referenceQueue -> p + java.lang.ref.PhantomReference aPhantomReference -> q + java.lang.ref.WeakReference aWeakReference -> r + java.lang.ref.WeakReference aNullReferentReference -> s + java.lang.ref.SoftReference aSoftReference -> t + DumpedStuff$Reference reachabilityReferenceChain -> u + # {"id":"com.android.tools.r8.residualsignature","signature":"Lm;"} + byte[] bigArray -> v + android.graphics.Bitmap bitmapOne -> w + # {"id":"com.android.tools.r8.residualsignature","signature":"Landroid/graphics/b;"} + android.graphics.Bitmap bitmapTwo -> x + # {"id":"com.android.tools.r8.residualsignature","signature":"Landroid/graphics/b;"} + DumpedStuff$ObjectTree[] gcPathArray -> y + # {"id":"com.android.tools.r8.residualsignature","signature":"[Ll;"} + DumpedStuff$Reference aLongStrongPathToSamplePathObject -> z + # {"id":"com.android.tools.r8.residualsignature","signature":"Lm;"} + 1:6:void <clinit>():242:242 -> <clinit> + 1:1:void <init>(boolean):45:45 -> <init> + 2:11:void <init>(boolean):177:186 -> <init> + 12:17:void <init>(boolean):188:193 -> <init> + 18:20:void <init>(boolean):196:198 -> <init> + 21:22:void <init>(boolean):205:206 -> <init> + 23:23:void <init>(boolean):209:209 -> <init> + 24:29:void <init>(boolean):218:223 -> <init> + 30:33:void <init>(boolean):225:228 -> <init> + 34:34:void <init>(boolean):46:46 -> <init> + 35:35:void <init>(boolean):49:49 -> <init> + 36:36:void <init>(boolean):51:51 -> <init> + 37:39:void <init>(boolean):55:57 -> <init> + 40:41:void <init>(boolean):60:61 -> <init> + 42:44:void <init>(boolean):65:67 -> <init> + 45:52:void <init>(boolean):70:77 -> <init> + 53:55:void <init>(boolean):82:84 -> <init> + 56:58:void <init>(boolean):87:89 -> <init> + 59:59:void <init>(boolean):93:93 -> <init> + 60:61:void <init>(boolean):95:96 -> <init> + 1:8:void allocateObjectAtOverriddenSite():42:42 -> b + 1:7:void allocateObjectAtKnownSite():30:30 -> c + 8:10:void allocateObjectAtKnownSite():31:31 -> c + 11:13:void allocateObjectAtKnownSite():32:32 -> c + 14:16:void allocateObjectAtKnownSite():33:33 -> c + 17:20:void allocateObjectAtKnownSite():34:34 -> c + 1:8:void allocateObjectAtKnownSubSite():38:38 -> d + 1:40:void shouldNotGc():233:233 -> e +DumpedStuff$AddedObject -> a: +# {"id":"sourceFile","fileName":"DumpedStuff.java"} + 1:4:void <init>():109:109 -> <init> +DumpedStuff$BinderProxyCarrier -> b: +# {"id":"sourceFile","fileName":"DumpedStuff.java"} + android.os.IBinder mRemote -> a + # {"id":"com.android.tools.r8.residualsignature","signature":"La/c;"} + 1:3:void <init>(android.os.IBinder):164:164 -> <init> + # {"id":"com.android.tools.r8.residualsignature","signature":"(La/c;)V"} + 4:6:void <init>(android.os.IBinder):165:165 -> <init> +DumpedStuff$BinderService -> c: +# {"id":"sourceFile","fileName":"DumpedStuff.java"} + 1:1:void <init>(DumpedStuff-IA):0:0 -> <init> + # {"id":"com.android.tools.r8.synthesized"} + # {"id":"com.android.tools.r8.residualsignature","signature":"(Lq;)V"} + 2:2:void <init>():169:169 -> <init> +DumpedStuff$FakeBinderService -> d: +# {"id":"sourceFile","fileName":"DumpedStuff.java"} + 1:1:void <init>(DumpedStuff-IA):0:0 -> <init> + # {"id":"com.android.tools.r8.synthesized"} + # {"id":"com.android.tools.r8.residualsignature","signature":"(Lq;)V"} + 2:2:void <init>():173:173 -> <init> +DumpedStuff$IBinderInterfaceImpostor -> g: +# {"id":"sourceFile","fileName":"DumpedStuff.java"} +DumpedStuff$IBinderInterfaceImpostor$Stub -> f: +# {"id":"sourceFile","fileName":"DumpedStuff.java"} + 1:4:void <init>():152:152 -> <init> +DumpedStuff$IBinderInterfaceImpostor$Stub$Proxy -> e: +# {"id":"sourceFile","fileName":"DumpedStuff.java"} + android.os.IBinder mFakeRemote -> a + # {"id":"com.android.tools.r8.residualsignature","signature":"La/c;"} + 1:3:void <init>(android.os.IBinder):155:155 -> <init> + # {"id":"com.android.tools.r8.residualsignature","signature":"(La/c;)V"} + 4:8:void <init>(android.os.IBinder):154:154 -> <init> + 9:11:void <init>(android.os.IBinder):156:156 -> <init> +DumpedStuff$IDumpedManager -> j: +# {"id":"sourceFile","fileName":"DumpedStuff.java"} +DumpedStuff$IDumpedManager$Stub -> i: +# {"id":"sourceFile","fileName":"DumpedStuff.java"} + java.lang.String DESCRIPTOR -> b + 1:6:void <init>():140:140 -> <init> +DumpedStuff$IDumpedManager$Stub$Proxy -> h: +# {"id":"sourceFile","fileName":"DumpedStuff.java"} + android.os.IBinder mRemote -> a + # {"id":"com.android.tools.r8.residualsignature","signature":"La/c;"} + 1:3:void <init>(android.os.IBinder):144:144 -> <init> + # {"id":"com.android.tools.r8.residualsignature","signature":"(La/c;)V"} + 4:6:void <init>(android.os.IBinder):145:145 -> <init> +DumpedStuff$ModifiedObject -> k: +# {"id":"sourceFile","fileName":"DumpedStuff.java"} + int value -> a + java.lang.String modifiedRefField -> b + java.lang.String unmodifiedRefField -> c + 1:4:void <init>():118:118 -> <init> +DumpedStuff$ObjectTree -> l: +# {"id":"sourceFile","fileName":"DumpedStuff.java"} + DumpedStuff$ObjectTree left -> a + # {"id":"com.android.tools.r8.residualsignature","signature":"Ll;"} + DumpedStuff$ObjectTree right -> b + # {"id":"com.android.tools.r8.residualsignature","signature":"Ll;"} + 1:3:void <init>(DumpedStuff$ObjectTree,DumpedStuff$ObjectTree):103:103 -> <init> + # {"id":"com.android.tools.r8.residualsignature","signature":"(Ll;Ll;)V"} + 4:5:void <init>(DumpedStuff$ObjectTree,DumpedStuff$ObjectTree):104:104 -> <init> + 6:8:void <init>(DumpedStuff$ObjectTree,DumpedStuff$ObjectTree):105:105 -> <init> +DumpedStuff$Reference -> m: +# {"id":"sourceFile","fileName":"DumpedStuff.java"} + java.lang.Object referent -> a + 1:3:void <init>(java.lang.Object):131:131 -> <init> + 4:6:void <init>(java.lang.Object):132:132 -> <init> +DumpedStuff$RemovedObject -> n: +# {"id":"sourceFile","fileName":"DumpedStuff.java"} + 1:4:void <init>():112:112 -> <init> +DumpedStuff$StackSmasher -> o: +# {"id":"sourceFile","fileName":"DumpedStuff.java"} + DumpedStuff$StackSmasher child -> a + # {"id":"com.android.tools.r8.residualsignature","signature":"Lo;"} + 1:4:void <init>():124:124 -> <init> +DumpedStuff$UnchangedObject -> p: +# {"id":"sourceFile","fileName":"DumpedStuff.java"} + 1:4:void <init>():115:115 -> <init> +DumpedStuff-IA -> q: +# {"id":"sourceFile","fileName":"R8$$SyntheticClass"} +# {"id":"com.android.tools.r8.synthesized"} +Main -> Main: +# {"id":"sourceFile","fileName":"Main.java"} + DumpedStuff stuff -> a + 1:4:void <init>():24:24 -> <init> + 1:2:void main(java.lang.String[]):30:31 -> main + 3:3:void main(java.lang.String[]):34:34 -> main + 4:4:void main(java.lang.String[]):38:38 -> main + 5:5:void main(java.lang.String[]):41:41 -> main + 6:6:void main(java.lang.String[]):44:44 -> main + 7:7:void main(java.lang.String[]):48:48 -> main + 8:8:void main(java.lang.String[]):53:53 -> main + 9:9:void main(java.lang.String[]):56:56 -> main + 10:11:void main(java.lang.String[]):59:60 -> main +SuperDumpedStuff -> SuperDumpedStuff: +# {"id":"sourceFile","fileName":"SuperDumpedStuff.java"} + java.lang.Object objectAllocatedAtObfSuperSite -> a + java.lang.Object objectAllocatedAtUnObfSuperSite -> b + java.lang.Object objectAllocatedAtOverriddenSite -> c + 1:4:void <init>():19:19 -> <init> + 1:8:void allocateObjectAtObfSuperSite():22:22 -> a + 1:8:void allocateObjectAtUnObfSuperSite():26:26 -> allocateObjectAtUnObfSuperSite + 1:8:void allocateObjectAtOverriddenSite():30:30 -> b +android.graphics.Bitmap -> android.graphics.b: +# {"id":"sourceFile","fileName":"Bitmap.java"} + long mNativePtr -> a + int mWidth -> b + int mHeight -> c + android.graphics.Bitmap$DumpData dumpData -> d + # {"id":"com.android.tools.r8.residualsignature","signature":"Landroid/graphics/a;"} + 1:11:void <clinit>():59:59 -> <clinit> + 1:5:void <init>(int,int,long,byte[]):29:33 -> <init> +android.graphics.Bitmap$DumpData -> android.graphics.a: +# {"id":"sourceFile","fileName":"Bitmap.java"} + int format -> a + long[] natives -> b + byte[][] buffers -> c + int max -> d + int count -> e + 1:3:void <init>(int,int):43:43 -> <init> + 4:5:void <init>(int,int):44:44 -> <init> + 6:7:void <init>(int,int):45:45 -> <init> + 8:11:void <init>(int,int):46:46 -> <init> + 12:16:void <init>(int,int):47:47 -> <init> + 17:19:void <init>(int,int):48:48 -> <init> + 1:6:void add(long,byte[]):52:52 -> a + 7:10:void add(long,byte[]):53:53 -> a + 11:20:void add(long,byte[]):54:54 -> a +android.os.Binder -> a.a: +# {"id":"sourceFile","fileName":"Binder.java"} + java.lang.String mDescriptor -> a + 1:2:void <init>():37:38 -> <init> + 3:4:void <init>(java.lang.String):41:42 -> <init> +android.os.BinderProxy -> a.b: +# {"id":"sourceFile","fileName":"BinderProxy.java"} + 1:4:void <init>():20:20 -> <init> +android.os.IBinder -> a.c: +# {"id":"sourceFile","fileName":"IBinder.java"} |