summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/Android.bp1
-rw-r--r--compiler/optimizing/code_generator.cc5
-rw-r--r--compiler/optimizing/code_sinking.cc403
-rw-r--r--compiler/optimizing/code_sinking.h48
-rw-r--r--compiler/optimizing/common_dominator.h6
-rw-r--r--compiler/optimizing/inliner.cc28
-rw-r--r--compiler/optimizing/inliner.h7
-rw-r--r--compiler/optimizing/optimizing_compiler.cc5
-rw-r--r--compiler/optimizing/optimizing_compiler_stats.h29
-rw-r--r--dexlayout/dex_ir.cc2
-rw-r--r--dexlayout/dexlayout.cc107
-rw-r--r--dexlayout/dexlayout.h5
-rw-r--r--dexlayout/dexlayout_test.cc106
-rw-r--r--runtime/Android.bp1
-rw-r--r--runtime/arch/arm64/instruction_set_features_arm64.cc1
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S25
-rw-r--r--runtime/art_method-inl.h14
-rw-r--r--runtime/art_method.h2
-rw-r--r--runtime/asm_support.h2
-rw-r--r--runtime/base/mutex.cc8
-rw-r--r--runtime/base/mutex.h6
-rw-r--r--runtime/class_linker_test.cc2
-rw-r--r--runtime/gc/heap.cc9
-rw-r--r--runtime/hprof/hprof.cc4
-rw-r--r--runtime/image.cc2
-rw-r--r--runtime/jdwp/jdwp.h42
-rw-r--r--runtime/jdwp/jdwp_event.cc22
-rw-r--r--runtime/jdwp/jdwp_main.cc3
-rw-r--r--runtime/jit/jit_code_cache.cc11
-rw-r--r--runtime/mirror/string.h2
-rw-r--r--runtime/native/dalvik_system_ZygoteHooks.cc44
-rw-r--r--runtime/native/java_lang_Object.cc7
-rw-r--r--runtime/native/java_lang_System.cc10
-rw-r--r--runtime/non_debuggable_classes.cc42
-rw-r--r--runtime/non_debuggable_classes.h46
-rw-r--r--runtime/openjdkjvmti/OpenjdkJvmTi.cc42
-rw-r--r--runtime/openjdkjvmti/art_jvmti.h11
-rw-r--r--runtime/openjdkjvmti/ti_field.cc1
-rw-r--r--runtime/openjdkjvmti/ti_heap.cc178
-rw-r--r--runtime/openjdkjvmti/ti_phase.cc1
-rw-r--r--runtime/openjdkjvmti/ti_redefine.cc15
-rw-r--r--runtime/openjdkjvmti/ti_redefine.h1
-rw-r--r--runtime/openjdkjvmti/ti_search.cc1
-rw-r--r--runtime/openjdkjvmti/transform.cc6
-rw-r--r--runtime/openjdkjvmti/transform.h8
-rw-r--r--test/639-checker-code-sinking/expected.txt3
-rw-r--r--test/639-checker-code-sinking/info.txt1
-rw-r--r--test/639-checker-code-sinking/src/Main.java355
-rw-r--r--test/903-hello-tagging/expected.txt1
-rw-r--r--test/903-hello-tagging/src/Main.java8
-rw-r--r--test/903-hello-tagging/tagging.cc56
-rw-r--r--test/906-iterate-heap/expected.txt22
-rw-r--r--test/906-iterate-heap/iterate_heap.cc102
-rw-r--r--test/906-iterate-heap/src/Main.java41
-rw-r--r--test/912-classes/expected.txt3
-rw-r--r--test/912-classes/src/Main.java2
-rw-r--r--test/913-heaps/expected.txt215
-rw-r--r--test/913-heaps/heaps.cc101
-rwxr-xr-xtest/913-heaps/run2
-rw-r--r--test/913-heaps/src/Main.java70
-rwxr-xr-xtest/testrunner/testrunner.py8
61 files changed, 2084 insertions, 227 deletions
diff --git a/compiler/Android.bp b/compiler/Android.bp
index f5589cd7a3..1ee2a21b18 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -52,6 +52,7 @@ art_cc_defaults {
"optimizing/cha_guard_optimization.cc",
"optimizing/code_generator.cc",
"optimizing/code_generator_utils.cc",
+ "optimizing/code_sinking.cc",
"optimizing/constant_folding.cc",
"optimizing/dead_code_elimination.cc",
"optimizing/escape.cc",
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 8dd423fcbb..424b8507fb 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -861,8 +861,11 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction,
bool CodeGenerator::HasStackMapAtCurrentPc() {
uint32_t pc = GetAssembler()->CodeSize();
size_t count = stack_map_stream_.GetNumberOfStackMaps();
+ if (count == 0) {
+ return false;
+ }
CodeOffset native_pc_offset = stack_map_stream_.GetStackMap(count - 1).native_pc_code_offset;
- return (count > 0) && (native_pc_offset.Uint32Value(GetInstructionSet()) == pc);
+ return (native_pc_offset.Uint32Value(GetInstructionSet()) == pc);
}
void CodeGenerator::MaybeRecordNativeDebugInfo(HInstruction* instruction,
diff --git a/compiler/optimizing/code_sinking.cc b/compiler/optimizing/code_sinking.cc
new file mode 100644
index 0000000000..dc3d378e75
--- /dev/null
+++ b/compiler/optimizing/code_sinking.cc
@@ -0,0 +1,403 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "code_sinking.h"
+
+#include "common_dominator.h"
+#include "nodes.h"
+
+namespace art {
+
+void CodeSinking::Run() {
+ HBasicBlock* exit = graph_->GetExitBlock();
+ if (exit == nullptr) {
+ // Infinite loop, just bail.
+ return;
+ }
+ // TODO(ngeoffray): we do not profile branches yet, so use throw instructions
+ // as an indicator of an uncommon branch.
+ for (HBasicBlock* exit_predecessor : exit->GetPredecessors()) {
+ if (exit_predecessor->GetLastInstruction()->IsThrow()) {
+ SinkCodeToUncommonBranch(exit_predecessor);
+ }
+ }
+}
+
+static bool IsInterestingInstruction(HInstruction* instruction) {
+ // Instructions from the entry graph (for example constants) are never interesting to move.
+ if (instruction->GetBlock() == instruction->GetBlock()->GetGraph()->GetEntryBlock()) {
+ return false;
+ }
+ // We want to move moveable instructions that cannot throw, as well as
+ // heap stores and allocations.
+
+ // Volatile stores cannot be moved.
+ if (instruction->IsInstanceFieldSet()) {
+ if (instruction->AsInstanceFieldSet()->IsVolatile()) {
+ return false;
+ }
+ }
+
+ // Check allocations first, as they can throw, but it is safe to move them.
+ if (instruction->IsNewInstance() || instruction->IsNewArray()) {
+ return true;
+ }
+
+ // All other instructions that can throw cannot be moved.
+ if (instruction->CanThrow()) {
+ return false;
+ }
+
+ // We can only store on local allocations. Other heap references can
+ // be escaping. Note that allocations can escape too, but we only move
+ // allocations if their users can move to, or are in the list of
+ // post dominated blocks.
+ if (instruction->IsInstanceFieldSet()) {
+ if (!instruction->InputAt(0)->IsNewInstance()) {
+ return false;
+ }
+ }
+
+ if (instruction->IsArraySet()) {
+ if (!instruction->InputAt(0)->IsNewArray()) {
+ return false;
+ }
+ }
+
+ // Heap accesses cannot go pass instructions that have memory side effects, which
+ // we are not tracking here. Note that the load/store elimination optimization
+ // runs before this optimization, and should have removed interesting ones.
+ // In theory, we could handle loads of local allocations, but this is currently
+ // hard to test, as LSE removes them.
+ if (instruction->IsStaticFieldGet() ||
+ instruction->IsInstanceFieldGet() ||
+ instruction->IsArrayGet()) {
+ return false;
+ }
+
+ if (instruction->IsInstanceFieldSet() ||
+ instruction->IsArraySet() ||
+ instruction->CanBeMoved()) {
+ return true;
+ }
+ return false;
+}
+
+static void AddInstruction(HInstruction* instruction,
+ const ArenaBitVector& processed_instructions,
+ const ArenaBitVector& discard_blocks,
+ ArenaVector<HInstruction*>* worklist) {
+ // Add to the work list if the instruction is not in the list of blocks
+ // to discard, hasn't been already processed and is of interest.
+ if (!discard_blocks.IsBitSet(instruction->GetBlock()->GetBlockId()) &&
+ !processed_instructions.IsBitSet(instruction->GetId()) &&
+ IsInterestingInstruction(instruction)) {
+ worklist->push_back(instruction);
+ }
+}
+
+static void AddInputs(HInstruction* instruction,
+ const ArenaBitVector& processed_instructions,
+ const ArenaBitVector& discard_blocks,
+ ArenaVector<HInstruction*>* worklist) {
+ for (HInstruction* input : instruction->GetInputs()) {
+ AddInstruction(input, processed_instructions, discard_blocks, worklist);
+ }
+}
+
+static void AddInputs(HBasicBlock* block,
+ const ArenaBitVector& processed_instructions,
+ const ArenaBitVector& discard_blocks,
+ ArenaVector<HInstruction*>* worklist) {
+ for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
+ AddInputs(it.Current(), processed_instructions, discard_blocks, worklist);
+ }
+ for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+ AddInputs(it.Current(), processed_instructions, discard_blocks, worklist);
+ }
+}
+
+static bool ShouldFilterUse(HInstruction* instruction,
+ HInstruction* user,
+ const ArenaBitVector& post_dominated) {
+ if (instruction->IsNewInstance()) {
+ return user->IsInstanceFieldSet() &&
+ (user->InputAt(0) == instruction) &&
+ !post_dominated.IsBitSet(user->GetBlock()->GetBlockId());
+ } else if (instruction->IsNewArray()) {
+ return user->IsArraySet() &&
+ (user->InputAt(0) == instruction) &&
+ !post_dominated.IsBitSet(user->GetBlock()->GetBlockId());
+ }
+ return false;
+}
+
+
+// Find the ideal position for moving `instruction`. If `filter` is true,
+// we filter out store instructions to that instruction, which are processed
+// first in the step (3) of the sinking algorithm.
+// This method is tailored to the sinking algorithm, unlike
+// the generic HInstruction::MoveBeforeFirstUserAndOutOfLoops.
+static HInstruction* FindIdealPosition(HInstruction* instruction,
+ const ArenaBitVector& post_dominated,
+ bool filter = false) {
+ DCHECK(!instruction->IsPhi()); // Makes no sense for Phi.
+
+ // Find the target block.
+ CommonDominator finder(/* start_block */ nullptr);
+ for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
+ HInstruction* user = use.GetUser();
+ if (!(filter && ShouldFilterUse(instruction, user, post_dominated))) {
+ finder.Update(user->IsPhi()
+ ? user->GetBlock()->GetPredecessors()[use.GetIndex()]
+ : user->GetBlock());
+ }
+ }
+ for (const HUseListNode<HEnvironment*>& use : instruction->GetEnvUses()) {
+ DCHECK(!use.GetUser()->GetHolder()->IsPhi());
+ DCHECK(!filter || !ShouldFilterUse(instruction, use.GetUser()->GetHolder(), post_dominated));
+ finder.Update(use.GetUser()->GetHolder()->GetBlock());
+ }
+ HBasicBlock* target_block = finder.Get();
+ if (target_block == nullptr) {
+ // No user we can go next to? Likely a LSE or DCE limitation.
+ return nullptr;
+ }
+
+ // Move to the first dominator not in a loop, if we can.
+ while (target_block->IsInLoop()) {
+ if (!post_dominated.IsBitSet(target_block->GetDominator()->GetBlockId())) {
+ break;
+ }
+ target_block = target_block->GetDominator();
+ DCHECK(target_block != nullptr);
+ }
+
+ // Find insertion position. No need to filter anymore, as we have found a
+ // target block.
+ HInstruction* insert_pos = nullptr;
+ for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
+ if (use.GetUser()->GetBlock() == target_block &&
+ (insert_pos == nullptr || use.GetUser()->StrictlyDominates(insert_pos))) {
+ insert_pos = use.GetUser();
+ }
+ }
+ for (const HUseListNode<HEnvironment*>& use : instruction->GetEnvUses()) {
+ HInstruction* user = use.GetUser()->GetHolder();
+ if (user->GetBlock() == target_block &&
+ (insert_pos == nullptr || user->StrictlyDominates(insert_pos))) {
+ insert_pos = user;
+ }
+ }
+ if (insert_pos == nullptr) {
+ // No user in `target_block`, insert before the control flow instruction.
+ insert_pos = target_block->GetLastInstruction();
+ DCHECK(insert_pos->IsControlFlow());
+ // Avoid splitting HCondition from HIf to prevent unnecessary materialization.
+ if (insert_pos->IsIf()) {
+ HInstruction* if_input = insert_pos->AsIf()->InputAt(0);
+ if (if_input == insert_pos->GetPrevious()) {
+ insert_pos = if_input;
+ }
+ }
+ }
+ DCHECK(!insert_pos->IsPhi());
+ return insert_pos;
+}
+
+
+void CodeSinking::SinkCodeToUncommonBranch(HBasicBlock* end_block) {
+ // Local allocator to discard data structures created below at the end of
+ // this optimization.
+ ArenaAllocator allocator(graph_->GetArena()->GetArenaPool());
+
+ size_t number_of_instructions = graph_->GetCurrentInstructionId();
+ ArenaVector<HInstruction*> worklist(allocator.Adapter(kArenaAllocMisc));
+ ArenaBitVector processed_instructions(&allocator, number_of_instructions, /* expandable */ false);
+ ArenaBitVector post_dominated(&allocator, graph_->GetBlocks().size(), /* expandable */ false);
+ ArenaBitVector instructions_that_can_move(
+ &allocator, number_of_instructions, /* expandable */ false);
+ ArenaVector<HInstruction*> move_in_order(allocator.Adapter(kArenaAllocMisc));
+
+ // Step (1): Visit post order to get a subset of blocks post dominated by `end_block`.
+ // TODO(ngeoffray): Getting the full set of post-dominated shoud be done by
+ // computint the post dominator tree, but that could be too time consuming. Also,
+ // we should start the analysis from blocks dominated by an uncommon branch, but we
+ // don't profile branches yet.
+ bool found_block = false;
+ for (HBasicBlock* block : graph_->GetPostOrder()) {
+ if (block == end_block) {
+ found_block = true;
+ post_dominated.SetBit(block->GetBlockId());
+ } else if (found_block) {
+ bool is_post_dominated = true;
+ if (block->GetSuccessors().empty()) {
+ // We currently bail for loops.
+ is_post_dominated = false;
+ } else {
+ for (HBasicBlock* successor : block->GetSuccessors()) {
+ if (!post_dominated.IsBitSet(successor->GetBlockId())) {
+ is_post_dominated = false;
+ break;
+ }
+ }
+ }
+ if (is_post_dominated) {
+ post_dominated.SetBit(block->GetBlockId());
+ }
+ }
+ }
+
+ // Now that we have found a subset of post-dominated blocks, add to the worklist all inputs
+ // of instructions in these blocks that are not themselves in these blocks.
+ // Also find the common dominator of the found post dominated blocks, to help filtering
+ // out un-movable uses in step (2).
+ CommonDominator finder(end_block);
+ for (size_t i = 0, e = graph_->GetBlocks().size(); i < e; ++i) {
+ if (post_dominated.IsBitSet(i)) {
+ finder.Update(graph_->GetBlocks()[i]);
+ AddInputs(graph_->GetBlocks()[i], processed_instructions, post_dominated, &worklist);
+ }
+ }
+ HBasicBlock* common_dominator = finder.Get();
+
+ // Step (2): iterate over the worklist to find sinking candidates.
+ while (!worklist.empty()) {
+ HInstruction* instruction = worklist.back();
+ if (processed_instructions.IsBitSet(instruction->GetId())) {
+ // The instruction has already been processed, continue. This happens
+ // when the instruction is the input/user of multiple instructions.
+ worklist.pop_back();
+ continue;
+ }
+ bool all_users_in_post_dominated_blocks = true;
+ bool can_move = true;
+ // Check users of the instruction.
+ for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
+ HInstruction* user = use.GetUser();
+ if (!post_dominated.IsBitSet(user->GetBlock()->GetBlockId()) &&
+ !instructions_that_can_move.IsBitSet(user->GetId())) {
+ all_users_in_post_dominated_blocks = false;
+ // If we've already processed this user, or the user cannot be moved, or
+ // is not dominating the post dominated blocks, bail.
+ // TODO(ngeoffray): The domination check is an approximation. We should
+ // instead check if the dominated blocks post dominate the user's block,
+ // but we do not have post dominance information here.
+ if (processed_instructions.IsBitSet(user->GetId()) ||
+ !IsInterestingInstruction(user) ||
+ !user->GetBlock()->Dominates(common_dominator)) {
+ can_move = false;
+ break;
+ }
+ }
+ }
+
+ // Check environment users of the instruction. Some of these users require
+ // the instruction not to move.
+ if (all_users_in_post_dominated_blocks) {
+ for (const HUseListNode<HEnvironment*>& use : instruction->GetEnvUses()) {
+ HEnvironment* environment = use.GetUser();
+ HInstruction* user = environment->GetHolder();
+ if (!post_dominated.IsBitSet(user->GetBlock()->GetBlockId())) {
+ if (graph_->IsDebuggable() ||
+ user->IsDeoptimize() ||
+ user->CanThrowIntoCatchBlock() ||
+ (user->IsSuspendCheck() && graph_->IsCompilingOsr())) {
+ can_move = false;
+ break;
+ }
+ }
+ }
+ }
+ if (!can_move) {
+ // Instruction cannot be moved, mark it as processed and remove it from the work
+ // list.
+ processed_instructions.SetBit(instruction->GetId());
+ worklist.pop_back();
+ } else if (all_users_in_post_dominated_blocks) {
+ // Instruction is a candidate for being sunk. Mark it as such, remove it from the
+ // work list, and add its inputs to the work list.
+ instructions_that_can_move.SetBit(instruction->GetId());
+ move_in_order.push_back(instruction);
+ processed_instructions.SetBit(instruction->GetId());
+ worklist.pop_back();
+ AddInputs(instruction, processed_instructions, post_dominated, &worklist);
+ // Drop the environment use not in the list of post-dominated block. This is
+ // to help step (3) of this optimization, when we start moving instructions
+ // closer to their use.
+ for (const HUseListNode<HEnvironment*>& use : instruction->GetEnvUses()) {
+ HEnvironment* environment = use.GetUser();
+ HInstruction* user = environment->GetHolder();
+ if (!post_dominated.IsBitSet(user->GetBlock()->GetBlockId())) {
+ environment->RemoveAsUserOfInput(use.GetIndex());
+ environment->SetRawEnvAt(use.GetIndex(), nullptr);
+ }
+ }
+ } else {
+ // The information we have on the users was not enough to decide whether the
+ // instruction could be moved.
+ // Add the users to the work list, and keep the instruction in the work list
+ // to process it again once all users have been processed.
+ for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
+ AddInstruction(use.GetUser(), processed_instructions, post_dominated, &worklist);
+ }
+ }
+ }
+
+ // Make sure we process instructions in dominated order. This is required for heap
+ // stores.
+ std::sort(move_in_order.begin(), move_in_order.end(), [](HInstruction* a, HInstruction* b) {
+ return b->StrictlyDominates(a);
+ });
+
+ // Step (3): Try to move sinking candidates.
+ for (HInstruction* instruction : move_in_order) {
+ HInstruction* position = nullptr;
+ if (instruction->IsArraySet() || instruction->IsInstanceFieldSet()) {
+ if (!instructions_that_can_move.IsBitSet(instruction->InputAt(0)->GetId())) {
+ // A store can trivially move, but it can safely do so only if the heap
+ // location it stores to can also move.
+ // TODO(ngeoffray): Handle allocation/store cycles by pruning these instructions
+ // from the set and all their inputs.
+ continue;
+ }
+ // Find the position of the instruction we're storing into, filtering out this
+ // store and all other stores to that instruction.
+ position = FindIdealPosition(instruction->InputAt(0), post_dominated, /* filter */ true);
+
+ // The position needs to be dominated by the store, in order for the store to move there.
+ if (position == nullptr || !instruction->GetBlock()->Dominates(position->GetBlock())) {
+ continue;
+ }
+ } else {
+ // Find the ideal position within the post dominated blocks.
+ position = FindIdealPosition(instruction, post_dominated);
+ if (position == nullptr) {
+ continue;
+ }
+ }
+ // Bail if we could not find a position in the post dominated blocks (for example,
+ // if there are multiple users whose common dominator is not in the list of
+ // post dominated blocks).
+ if (!post_dominated.IsBitSet(position->GetBlock()->GetBlockId())) {
+ continue;
+ }
+ MaybeRecordStat(MethodCompilationStat::kInstructionSunk);
+ instruction->MoveBefore(position, /* ensure_safety */ false);
+ }
+}
+
+} // namespace art
diff --git a/compiler/optimizing/code_sinking.h b/compiler/optimizing/code_sinking.h
new file mode 100644
index 0000000000..59cda52a8c
--- /dev/null
+++ b/compiler/optimizing/code_sinking.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_CODE_SINKING_H_
+#define ART_COMPILER_OPTIMIZING_CODE_SINKING_H_
+
+#include "nodes.h"
+#include "optimization.h"
+
+namespace art {
+
+/**
+ * Optimization pass to move instructions into uncommon branches,
+ * when it is safe to do so.
+ */
+class CodeSinking : public HOptimization {
+ public:
+ CodeSinking(HGraph* graph, OptimizingCompilerStats* stats)
+ : HOptimization(graph, kCodeSinkingPassName, stats) {}
+
+ void Run() OVERRIDE;
+
+ static constexpr const char* kCodeSinkingPassName = "code_sinking";
+
+ private:
+ // Try to move code only used by `end_block` and all its post-dominated / dominated
+ // blocks, to these blocks.
+ void SinkCodeToUncommonBranch(HBasicBlock* end_block);
+
+ DISALLOW_COPY_AND_ASSIGN(CodeSinking);
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_CODE_SINKING_H_
diff --git a/compiler/optimizing/common_dominator.h b/compiler/optimizing/common_dominator.h
index b459d24d7c..9f012cfbb2 100644
--- a/compiler/optimizing/common_dominator.h
+++ b/compiler/optimizing/common_dominator.h
@@ -36,12 +36,16 @@ class CommonDominator {
// Create a finder starting with a given block.
explicit CommonDominator(HBasicBlock* block)
: dominator_(block), chain_length_(ChainLength(block)) {
- DCHECK(block != nullptr);
}
// Update the common dominator with another block.
void Update(HBasicBlock* block) {
DCHECK(block != nullptr);
+ if (dominator_ == nullptr) {
+ dominator_ = block;
+ chain_length_ = ChainLength(block);
+ return;
+ }
HBasicBlock* block2 = dominator_;
DCHECK(block2 != nullptr);
if (block == block2) {
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 8c73f1d036..3e340908bf 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -1272,12 +1272,19 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
caller_instruction_counter);
callee_graph->SetArtMethod(resolved_method);
- // When they are needed, allocate `inline_stats` on the heap instead
+ // When they are needed, allocate `inline_stats_` on the Arena instead
// of on the stack, as Clang might produce a stack frame too large
// for this function, that would not fit the requirements of the
// `-Wframe-larger-than` option.
- std::unique_ptr<OptimizingCompilerStats> inline_stats =
- (stats_ == nullptr) ? nullptr : MakeUnique<OptimizingCompilerStats>();
+ if (stats_ != nullptr) {
+ // Reuse one object for all inline attempts from this caller to keep Arena memory usage low.
+ if (inline_stats_ == nullptr) {
+ void* storage = graph_->GetArena()->Alloc<OptimizingCompilerStats>(kArenaAllocMisc);
+ inline_stats_ = new (storage) OptimizingCompilerStats;
+ } else {
+ inline_stats_->Reset();
+ }
+ }
HGraphBuilder builder(callee_graph,
&dex_compilation_unit,
&outer_compilation_unit_,
@@ -1285,7 +1292,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
*code_item,
compiler_driver_,
codegen_,
- inline_stats.get(),
+ inline_stats_,
resolved_method->GetQuickenedInfo(class_linker->GetImagePointerSize()),
dex_cache,
handles_);
@@ -1468,6 +1475,11 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
DCHECK_EQ(callee_instruction_counter, callee_graph->GetCurrentInstructionId())
<< "No instructions can be added to the inner graph during inlining into the outer graph";
+ if (stats_ != nullptr) {
+ DCHECK(inline_stats_ != nullptr);
+ inline_stats_->AddTo(stats_);
+ }
+
return true;
}
@@ -1476,11 +1488,11 @@ size_t HInliner::RunOptimizations(HGraph* callee_graph,
const DexCompilationUnit& dex_compilation_unit) {
// Note: if the outermost_graph_ is being compiled OSR, we should not run any
// optimization that could lead to a HDeoptimize. The following optimizations do not.
- HDeadCodeElimination dce(callee_graph, stats_, "dead_code_elimination$inliner");
+ HDeadCodeElimination dce(callee_graph, inline_stats_, "dead_code_elimination$inliner");
HConstantFolding fold(callee_graph, "constant_folding$inliner");
HSharpening sharpening(callee_graph, codegen_, dex_compilation_unit, compiler_driver_, handles_);
- InstructionSimplifier simplify(callee_graph, stats_);
- IntrinsicsRecognizer intrinsics(callee_graph, stats_);
+ InstructionSimplifier simplify(callee_graph, inline_stats_);
+ IntrinsicsRecognizer intrinsics(callee_graph, inline_stats_);
HOptimization* optimizations[] = {
&intrinsics,
@@ -1504,7 +1516,7 @@ size_t HInliner::RunOptimizations(HGraph* callee_graph,
dex_compilation_unit,
compiler_driver_,
handles_,
- stats_,
+ inline_stats_,
total_number_of_dex_registers_ + code_item->registers_size_,
depth_ + 1);
inliner.Run();
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 11aacab802..75d025ae41 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -51,7 +51,8 @@ class HInliner : public HOptimization {
total_number_of_dex_registers_(total_number_of_dex_registers),
depth_(depth),
number_of_inlined_instructions_(0),
- handles_(handles) {}
+ handles_(handles),
+ inline_stats_(nullptr) {}
void Run() OVERRIDE;
@@ -218,6 +219,10 @@ class HInliner : public HOptimization {
size_t number_of_inlined_instructions_;
VariableSizedHandleScope* const handles_;
+ // Used to record stats about optimizations on the inlined graph.
+ // If the inlining is successful, these stats are merged to the caller graph's stats.
+ OptimizingCompilerStats* inline_stats_;
+
DISALLOW_COPY_AND_ASSIGN(HInliner);
};
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index f72bd6a5a3..3842ef98da 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -56,6 +56,7 @@
#include "builder.h"
#include "cha_guard_optimization.h"
#include "code_generator.h"
+#include "code_sinking.h"
#include "compiled_method.h"
#include "compiler.h"
#include "constant_folding.h"
@@ -521,6 +522,8 @@ static HOptimization* BuildOptimization(
return new (arena) HLoopOptimization(graph, most_recent_induction);
} else if (opt_name == CHAGuardOptimization::kCHAGuardOptimizationPassName) {
return new (arena) CHAGuardOptimization(graph);
+ } else if (opt_name == CodeSinking::kCodeSinkingPassName) {
+ return new (arena) CodeSinking(graph, stats);
#ifdef ART_ENABLE_CODEGEN_arm
} else if (opt_name == arm::DexCacheArrayFixups::kDexCacheArrayFixupsArmPassName) {
return new (arena) arm::DexCacheArrayFixups(graph, codegen, stats);
@@ -787,6 +790,7 @@ void OptimizingCompiler::RunOptimizations(HGraph* graph,
graph, stats, "instruction_simplifier$before_codegen");
IntrinsicsRecognizer* intrinsics = new (arena) IntrinsicsRecognizer(graph, stats);
CHAGuardOptimization* cha_guard = new (arena) CHAGuardOptimization(graph);
+ CodeSinking* code_sinking = new (arena) CodeSinking(graph, stats);
HOptimization* optimizations1[] = {
intrinsics,
@@ -817,6 +821,7 @@ void OptimizingCompiler::RunOptimizations(HGraph* graph,
lse,
cha_guard,
dce3,
+ code_sinking,
// The codegen has a few assumptions that only the instruction simplifier
// can satisfy. For example, the code generator does not expect to see a
// HTypeConversion from a type to the same type.
diff --git a/compiler/optimizing/optimizing_compiler_stats.h b/compiler/optimizing/optimizing_compiler_stats.h
index 203b1ec7ec..ae9a8119a7 100644
--- a/compiler/optimizing/optimizing_compiler_stats.h
+++ b/compiler/optimizing/optimizing_compiler_stats.h
@@ -17,6 +17,7 @@
#ifndef ART_COMPILER_OPTIMIZING_OPTIMIZING_COMPILER_STATS_H_
#define ART_COMPILER_OPTIMIZING_OPTIMIZING_COMPILER_STATS_H_
+#include <atomic>
#include <iomanip>
#include <string>
#include <type_traits>
@@ -67,14 +68,18 @@ enum MethodCompilationStat {
kImplicitNullCheckGenerated,
kExplicitNullCheckGenerated,
kSimplifyIf,
+ kInstructionSunk,
kLastStat
};
class OptimizingCompilerStats {
public:
- OptimizingCompilerStats() {}
+ OptimizingCompilerStats() {
+ // The std::atomic<> default constructor leaves values uninitialized, so initialize them now.
+ Reset();
+ }
- void RecordStat(MethodCompilationStat stat, size_t count = 1) {
+ void RecordStat(MethodCompilationStat stat, uint32_t count = 1) {
compile_stats_[stat] += count;
}
@@ -93,7 +98,7 @@ class OptimizingCompilerStats {
<< " methods: " << std::fixed << std::setprecision(2)
<< compiled_percent << "% (" << compile_stats_[kCompiled] << ") compiled.";
- for (int i = 0; i < kLastStat; i++) {
+ for (size_t i = 0; i < kLastStat; i++) {
if (compile_stats_[i] != 0) {
LOG(INFO) << PrintMethodCompilationStat(static_cast<MethodCompilationStat>(i)) << ": "
<< compile_stats_[i];
@@ -102,6 +107,21 @@ class OptimizingCompilerStats {
}
}
+ void AddTo(OptimizingCompilerStats* other_stats) {
+ for (size_t i = 0; i != kLastStat; ++i) {
+ uint32_t count = compile_stats_[i];
+ if (count != 0) {
+ other_stats->RecordStat(static_cast<MethodCompilationStat>(i), count);
+ }
+ }
+ }
+
+ void Reset() {
+ for (size_t i = 0; i != kLastStat; ++i) {
+ compile_stats_[i] = 0u;
+ }
+ }
+
private:
std::string PrintMethodCompilationStat(MethodCompilationStat stat) const {
std::string name;
@@ -147,6 +167,7 @@ class OptimizingCompilerStats {
case kImplicitNullCheckGenerated: name = "ImplicitNullCheckGenerated"; break;
case kExplicitNullCheckGenerated: name = "ExplicitNullCheckGenerated"; break;
case kSimplifyIf: name = "SimplifyIf"; break;
+ case kInstructionSunk: name = "InstructionSunk"; break;
case kLastStat:
LOG(FATAL) << "invalid stat "
@@ -156,7 +177,7 @@ class OptimizingCompilerStats {
return "OptStat#" + name;
}
- AtomicInteger compile_stats_[kLastStat];
+ std::atomic<uint32_t> compile_stats_[kLastStat];
DISALLOW_COPY_AND_ASSIGN(OptimizingCompilerStats);
};
diff --git a/dexlayout/dex_ir.cc b/dexlayout/dex_ir.cc
index 609068f41c..131f4b9f63 100644
--- a/dexlayout/dex_ir.cc
+++ b/dexlayout/dex_ir.cc
@@ -653,7 +653,7 @@ CodeItem* Collections::CreateCodeItem(const DexFile& dex_file,
if (has_catch_all) {
size = -size;
}
- if (already_added == true) {
+ if (already_added) {
for (int32_t i = 0; i < size; i++) {
DecodeUnsignedLeb128(&handlers_data);
DecodeUnsignedLeb128(&handlers_data);
diff --git a/dexlayout/dexlayout.cc b/dexlayout/dexlayout.cc
index 1add6bfede..22619b9e8d 100644
--- a/dexlayout/dexlayout.cc
+++ b/dexlayout/dexlayout.cc
@@ -46,6 +46,8 @@ namespace art {
using android::base::StringPrintf;
+static constexpr uint32_t kDexCodeItemAlignment = 4;
+
/*
* Flags for use with createAccessFlagStr().
*/
@@ -1489,7 +1491,7 @@ void DexLayout::DumpDexFile() {
}
}
-std::vector<dex_ir::ClassDef*> DexLayout::LayoutClassDefsAndClassData(const DexFile* dex_file) {
+std::vector<dex_ir::ClassData*> DexLayout::LayoutClassDefsAndClassData(const DexFile* dex_file) {
std::vector<dex_ir::ClassDef*> new_class_def_order;
for (std::unique_ptr<dex_ir::ClassDef>& class_def : header_->GetCollections().ClassDefs()) {
dex::TypeIndex type_idx(class_def->ClassType()->GetIndex());
@@ -1505,46 +1507,93 @@ std::vector<dex_ir::ClassDef*> DexLayout::LayoutClassDefsAndClassData(const DexF
}
uint32_t class_defs_offset = header_->GetCollections().ClassDefsOffset();
uint32_t class_data_offset = header_->GetCollections().ClassDatasOffset();
+ std::unordered_set<dex_ir::ClassData*> visited_class_data;
+ std::vector<dex_ir::ClassData*> new_class_data_order;
for (uint32_t i = 0; i < new_class_def_order.size(); ++i) {
dex_ir::ClassDef* class_def = new_class_def_order[i];
class_def->SetIndex(i);
class_def->SetOffset(class_defs_offset);
class_defs_offset += dex_ir::ClassDef::ItemSize();
- if (class_def->GetClassData() != nullptr) {
- class_def->GetClassData()->SetOffset(class_data_offset);
- class_data_offset += class_def->GetClassData()->GetSize();
+ dex_ir::ClassData* class_data = class_def->GetClassData();
+ if (class_data != nullptr && visited_class_data.find(class_data) == visited_class_data.end()) {
+ class_data->SetOffset(class_data_offset);
+ class_data_offset += class_data->GetSize();
+ visited_class_data.insert(class_data);
+ new_class_data_order.push_back(class_data);
}
}
- return new_class_def_order;
+ return new_class_data_order;
}
-int32_t DexLayout::LayoutCodeItems(std::vector<dex_ir::ClassDef*> new_class_def_order) {
- int32_t diff = 0;
+// Orders code items according to specified class data ordering.
+// NOTE: If the section following the code items is byte aligned, the last code item is left in
+// place to preserve alignment. Layout needs an overhaul to handle movement of other sections.
+int32_t DexLayout::LayoutCodeItems(std::vector<dex_ir::ClassData*> new_class_data_order) {
+ // Find the last code item so we can leave it in place if the next section is not 4 byte aligned.
+ std::unordered_set<dex_ir::CodeItem*> visited_code_items;
uint32_t offset = header_->GetCollections().CodeItemsOffset();
- for (dex_ir::ClassDef* class_def : new_class_def_order) {
- dex_ir::ClassData* class_data = class_def->GetClassData();
- if (class_data != nullptr) {
- class_data->SetOffset(class_data->GetOffset() + diff);
- for (auto& method : *class_data->DirectMethods()) {
- dex_ir::CodeItem* code_item = method->GetCodeItem();
- if (code_item != nullptr) {
- diff += UnsignedLeb128Size(offset) - UnsignedLeb128Size(code_item->GetOffset());
- code_item->SetOffset(offset);
- offset += RoundUp(code_item->GetSize(), 4);
- }
+ bool is_code_item_aligned = IsNextSectionCodeItemAligned(offset);
+ if (!is_code_item_aligned) {
+ dex_ir::CodeItem* last_code_item = nullptr;
+ for (auto& code_item_pair : header_->GetCollections().CodeItems()) {
+ std::unique_ptr<dex_ir::CodeItem>& code_item = code_item_pair.second;
+ if (last_code_item == nullptr || last_code_item->GetOffset() < code_item->GetOffset()) {
+ last_code_item = code_item.get();
}
- for (auto& method : *class_data->VirtualMethods()) {
- dex_ir::CodeItem* code_item = method->GetCodeItem();
- if (code_item != nullptr) {
- diff += UnsignedLeb128Size(offset) - UnsignedLeb128Size(code_item->GetOffset());
- code_item->SetOffset(offset);
- offset += RoundUp(code_item->GetSize(), 4);
- }
+ }
+ // Preserve the last code item by marking it already visited.
+ visited_code_items.insert(last_code_item);
+ }
+
+ int32_t diff = 0;
+ for (dex_ir::ClassData* class_data : new_class_data_order) {
+ class_data->SetOffset(class_data->GetOffset() + diff);
+ for (auto& method : *class_data->DirectMethods()) {
+ dex_ir::CodeItem* code_item = method->GetCodeItem();
+ if (code_item != nullptr && visited_code_items.find(code_item) == visited_code_items.end()) {
+ visited_code_items.insert(code_item);
+ diff += UnsignedLeb128Size(offset) - UnsignedLeb128Size(code_item->GetOffset());
+ code_item->SetOffset(offset);
+ offset += RoundUp(code_item->GetSize(), kDexCodeItemAlignment);
+ }
+ }
+ for (auto& method : *class_data->VirtualMethods()) {
+ dex_ir::CodeItem* code_item = method->GetCodeItem();
+ if (code_item != nullptr && visited_code_items.find(code_item) == visited_code_items.end()) {
+ visited_code_items.insert(code_item);
+ diff += UnsignedLeb128Size(offset) - UnsignedLeb128Size(code_item->GetOffset());
+ code_item->SetOffset(offset);
+ offset += RoundUp(code_item->GetSize(), kDexCodeItemAlignment);
}
}
}
+ // Adjust diff to be 4-byte aligned.
+ return RoundUp(diff, kDexCodeItemAlignment);
+}
- return diff;
+bool DexLayout::IsNextSectionCodeItemAligned(uint32_t offset) {
+ dex_ir::Collections& collections = header_->GetCollections();
+ std::set<uint32_t> section_offsets;
+ section_offsets.insert(collections.MapListOffset());
+ section_offsets.insert(collections.TypeListsOffset());
+ section_offsets.insert(collections.AnnotationSetRefListsOffset());
+ section_offsets.insert(collections.AnnotationSetItemsOffset());
+ section_offsets.insert(collections.ClassDatasOffset());
+ section_offsets.insert(collections.CodeItemsOffset());
+ section_offsets.insert(collections.StringDatasOffset());
+ section_offsets.insert(collections.DebugInfoItemsOffset());
+ section_offsets.insert(collections.AnnotationItemsOffset());
+ section_offsets.insert(collections.EncodedArrayItemsOffset());
+ section_offsets.insert(collections.AnnotationsDirectoryItemsOffset());
+
+ auto found = section_offsets.find(offset);
+ if (found != section_offsets.end()) {
+ found++;
+ if (found != section_offsets.end()) {
+ return *found % kDexCodeItemAlignment == 0;
+ }
+ }
+ return false;
}
// Adjust offsets of every item in the specified section by diff bytes.
@@ -1626,10 +1675,8 @@ void DexLayout::FixupSections(uint32_t offset, uint32_t diff) {
}
void DexLayout::LayoutOutputFile(const DexFile* dex_file) {
- std::vector<dex_ir::ClassDef*> new_class_def_order = LayoutClassDefsAndClassData(dex_file);
- int32_t diff = LayoutCodeItems(new_class_def_order);
- // Adjust diff to be 4-byte aligned.
- diff = RoundUp(diff, 4);
+ std::vector<dex_ir::ClassData*> new_class_data_order = LayoutClassDefsAndClassData(dex_file);
+ int32_t diff = LayoutCodeItems(new_class_data_order);
// Move sections after ClassData by diff bytes.
FixupSections(header_->GetCollections().ClassDatasOffset(), diff);
// Update file size.
diff --git a/dexlayout/dexlayout.h b/dexlayout/dexlayout.h
index ac1a4a6efb..391870644a 100644
--- a/dexlayout/dexlayout.h
+++ b/dexlayout/dexlayout.h
@@ -105,8 +105,9 @@ class DexLayout {
void DumpSField(uint32_t idx, uint32_t flags, int i, dex_ir::EncodedValue* init);
void DumpDexFile();
- std::vector<dex_ir::ClassDef*> LayoutClassDefsAndClassData(const DexFile* dex_file);
- int32_t LayoutCodeItems(std::vector<dex_ir::ClassDef*> new_class_def_order);
+ std::vector<dex_ir::ClassData*> LayoutClassDefsAndClassData(const DexFile* dex_file);
+ int32_t LayoutCodeItems(std::vector<dex_ir::ClassData*> new_class_data_order);
+ bool IsNextSectionCodeItemAligned(uint32_t offset);
template<class T> void FixupSection(std::map<uint32_t, std::unique_ptr<T>>& map, uint32_t diff);
void FixupSections(uint32_t offset, uint32_t diff);
diff --git a/dexlayout/dexlayout_test.cc b/dexlayout/dexlayout_test.cc
index 9881e283df..9f0593a5cd 100644
--- a/dexlayout/dexlayout_test.cc
+++ b/dexlayout/dexlayout_test.cc
@@ -55,6 +55,26 @@ static const char kDexFileLayoutExpectedOutputDex[] =
"qAAAAAYAAAACAAAAwAAAAAEgAAACAAAAAAEAAAIgAAAHAAAAMAEAAAMgAAACAAAAaQEAAAAgAAAC"
"AAAAdQEAAAAQAAABAAAAjAEAAA==";
+// Dex file with catch handler unreferenced by try blocks.
+// Constructed by building a dex file with try/catch blocks and hex editing.
+static const char kUnreferencedCatchHandlerInputDex[] =
+ "ZGV4CjAzNQD+exd52Y0f9nY5x5GmInXq5nXrO6Kl2RV4AwAAcAAAAHhWNBIAAAAAAAAAANgCAAAS"
+ "AAAAcAAAAAgAAAC4AAAAAwAAANgAAAABAAAA/AAAAAQAAAAEAQAAAQAAACQBAAA0AgAARAEAANYB"
+ "AADeAQAA5gEAAO4BAAAAAgAADwIAACYCAAA9AgAAUQIAAGUCAAB5AgAAfwIAAIUCAACIAgAAjAIA"
+ "AKECAACnAgAArAIAAAQAAAAFAAAABgAAAAcAAAAIAAAACQAAAAwAAAAOAAAADAAAAAYAAAAAAAAA"
+ "DQAAAAYAAADIAQAADQAAAAYAAADQAQAABQABABAAAAAAAAAAAAAAAAAAAgAPAAAAAQABABEAAAAD"
+ "AAAAAAAAAAAAAAABAAAAAwAAAAAAAAADAAAAAAAAAMgCAAAAAAAAAQABAAEAAAC1AgAABAAAAHAQ"
+ "AwAAAA4AAwABAAIAAgC6AgAAIQAAAGIAAAAaAQoAbiACABAAYgAAABoBCwBuIAIAEAAOAA0AYgAA"
+ "ABoBAQBuIAIAEAAo8A0AYgAAABoBAgBuIAIAEAAo7gAAAAAAAAcAAQAHAAAABwABAAIBAg8BAhgA"
+ "AQAAAAQAAAABAAAABwAGPGluaXQ+AAZDYXRjaDEABkNhdGNoMgAQSGFuZGxlclRlc3QuamF2YQAN"
+ "TEhhbmRsZXJUZXN0OwAVTGphdmEvaW8vUHJpbnRTdHJlYW07ABVMamF2YS9sYW5nL0V4Y2VwdGlv"
+ "bjsAEkxqYXZhL2xhbmcvT2JqZWN0OwASTGphdmEvbGFuZy9TdHJpbmc7ABJMamF2YS9sYW5nL1N5"
+ "c3RlbTsABFRyeTEABFRyeTIAAVYAAlZMABNbTGphdmEvbGFuZy9TdHJpbmc7AARtYWluAANvdXQA"
+ "B3ByaW50bG4AAQAHDgAEAQAHDn17AncdHoseAAAAAgAAgYAExAIBCdwCAAANAAAAAAAAAAEAAAAA"
+ "AAAAAQAAABIAAABwAAAAAgAAAAgAAAC4AAAAAwAAAAMAAADYAAAABAAAAAEAAAD8AAAABQAAAAQA"
+ "AAAEAQAABgAAAAEAAAAkAQAAASAAAAIAAABEAQAAARAAAAIAAADIAQAAAiAAABIAAADWAQAAAyAA"
+ "AAIAAAC1AgAAACAAAAEAAADIAgAAABAAAAEAAADYAgAA";
+
// Dex file with multiple code items that have the same debug_info_off_. Constructed by a modified
// dexlayout on XandY.
static const char kDexFileDuplicateOffset[] =
@@ -100,25 +120,30 @@ static const char kNullSetRefListElementInputDex[] =
"ASAAAAIAAACEAQAABiAAAAIAAACwAQAAARAAAAIAAADYAQAAAiAAABIAAADoAQAAAyAAAAIAAADw"
"AgAABCAAAAIAAAD8AgAAACAAAAIAAAAIAwAAABAAAAEAAAAgAwAA";
-// Dex file with catch handler unreferenced by try blocks.
-// Constructed by building a dex file with try/catch blocks and hex editing.
-static const char kUnreferencedCatchHandlerInputDex[] =
- "ZGV4CjAzNQD+exd52Y0f9nY5x5GmInXq5nXrO6Kl2RV4AwAAcAAAAHhWNBIAAAAAAAAAANgCAAAS"
- "AAAAcAAAAAgAAAC4AAAAAwAAANgAAAABAAAA/AAAAAQAAAAEAQAAAQAAACQBAAA0AgAARAEAANYB"
- "AADeAQAA5gEAAO4BAAAAAgAADwIAACYCAAA9AgAAUQIAAGUCAAB5AgAAfwIAAIUCAACIAgAAjAIA"
- "AKECAACnAgAArAIAAAQAAAAFAAAABgAAAAcAAAAIAAAACQAAAAwAAAAOAAAADAAAAAYAAAAAAAAA"
- "DQAAAAYAAADIAQAADQAAAAYAAADQAQAABQABABAAAAAAAAAAAAAAAAAAAgAPAAAAAQABABEAAAAD"
- "AAAAAAAAAAAAAAABAAAAAwAAAAAAAAADAAAAAAAAAMgCAAAAAAAAAQABAAEAAAC1AgAABAAAAHAQ"
- "AwAAAA4AAwABAAIAAgC6AgAAIQAAAGIAAAAaAQoAbiACABAAYgAAABoBCwBuIAIAEAAOAA0AYgAA"
- "ABoBAQBuIAIAEAAo8A0AYgAAABoBAgBuIAIAEAAo7gAAAAAAAAcAAQAHAAAABwABAAIBAg8BAhgA"
- "AQAAAAQAAAABAAAABwAGPGluaXQ+AAZDYXRjaDEABkNhdGNoMgAQSGFuZGxlclRlc3QuamF2YQAN"
- "TEhhbmRsZXJUZXN0OwAVTGphdmEvaW8vUHJpbnRTdHJlYW07ABVMamF2YS9sYW5nL0V4Y2VwdGlv"
- "bjsAEkxqYXZhL2xhbmcvT2JqZWN0OwASTGphdmEvbGFuZy9TdHJpbmc7ABJMamF2YS9sYW5nL1N5"
- "c3RlbTsABFRyeTEABFRyeTIAAVYAAlZMABNbTGphdmEvbGFuZy9TdHJpbmc7AARtYWluAANvdXQA"
- "B3ByaW50bG4AAQAHDgAEAQAHDn17AncdHoseAAAAAgAAgYAExAIBCdwCAAANAAAAAAAAAAEAAAAA"
- "AAAAAQAAABIAAABwAAAAAgAAAAgAAAC4AAAAAwAAAAMAAADYAAAABAAAAAEAAAD8AAAABQAAAAQA"
- "AAAEAQAABgAAAAEAAAAkAQAAASAAAAIAAABEAQAAARAAAAIAAADIAQAAAiAAABIAAADWAQAAAyAA"
- "AAIAAAC1AgAAACAAAAEAAADIAgAAABAAAAEAAADYAgAA";
+// Dex file with shared empty class data item for multiple class defs.
+// Constructing by building a dex file with multiple classes and hex editing.
+static const char kMultiClassDataInputDex[] =
+ "ZGV4CjAzNQALJgF9TtnLq748xVe/+wyxETrT9lTEiW6YAQAAcAAAAHhWNBIAAAAAAAAAADQBAAAI"
+ "AAAAcAAAAAQAAACQAAAAAAAAAAAAAAACAAAAoAAAAAAAAAAAAAAAAgAAALAAAACoAAAA8AAAAPAA"
+ "AAD4AAAAAAEAAAMBAAAIAQAADQEAACEBAAAkAQAAAgAAAAMAAAAEAAAABQAAAAEAAAAGAAAAAgAA"
+ "AAcAAAABAAAAAQYAAAMAAAAAAAAAAAAAAAAAAAAnAQAAAAAAAAIAAAABBgAAAwAAAAAAAAABAAAA"
+ "AAAAACcBAAAAAAAABkEuamF2YQAGQi5qYXZhAAFJAANMQTsAA0xCOwASTGphdmEvbGFuZy9PYmpl"
+ "Y3Q7AAFhAAFiAAAAAAABAAAAARkAAAAIAAAAAAAAAAEAAAAAAAAAAQAAAAgAAABwAAAAAgAAAAQA"
+ "AACQAAAABAAAAAIAAACgAAAABgAAAAIAAACwAAAAAiAAAAgAAADwAAAAACAAAAIAAAAnAQAAABAA"
+ "AAEAAAA0AQAA";
+
+// Dex file with code info followed by non 4-byte aligned section.
+// Constructed a dex file with code info followed by string data and hex edited.
+static const char kUnalignedCodeInfoInputDex[] =
+ "ZGV4CjAzNQDXJzXNb4iWn2SLhmLydW/8h1K9moERIw7UAQAAcAAAAHhWNBIAAAAAAAAAAEwBAAAG"
+ "AAAAcAAAAAMAAACIAAAAAQAAAJQAAAAAAAAAAAAAAAMAAACgAAAAAQAAALgAAAD8AAAA2AAAAAIB"
+ "AAAKAQAAEgEAABcBAAArAQAALgEAAAIAAAADAAAABAAAAAQAAAACAAAAAAAAAAAAAAAAAAAAAAAA"
+ "AAUAAAABAAAAAAAAAAAAAAABAAAAAQAAAAAAAAABAAAAAAAAADsBAAAAAAAAAQABAAEAAAAxAQAA"
+ "BAAAAHAQAgAAAA4AAQABAAAAAAA2AQAAAQAAAA4ABjxpbml0PgAGQS5qYXZhAANMQTsAEkxqYXZh"
+ "L2xhbmcvT2JqZWN0OwABVgABYQABAAcOAAMABw4AAAABAQCBgATYAQEB8AEAAAALAAAAAAAAAAEA"
+ "AAAAAAAAAQAAAAYAAABwAAAAAgAAAAMAAACIAAAAAwAAAAEAAACUAAAABQAAAAMAAACgAAAABgAA"
+ "AAEAAAC4AAAAASAAAAIAAADYAAAAAiAAAAYAAAACAQAAAyAAAAIAAAAxAQAAACAAAAEAAAA7AQAA"
+ "ABAAAAEAAABMAQAA";
static void WriteBase64ToFile(const char* base64, File* file) {
// Decode base64.
@@ -314,6 +339,12 @@ TEST_F(DexLayoutTest, DexFileLayout) {
ASSERT_TRUE(DexFileLayoutExec(&error_msg)) << error_msg;
}
+TEST_F(DexLayoutTest, UnreferencedCatchHandler) {
+ // Disable test on target.
+ TEST_DISABLED_FOR_TARGET();
+ std::string error_msg;
+ ASSERT_TRUE(UnreferencedCatchHandlerExec(&error_msg)) << error_msg;
+}
TEST_F(DexLayoutTest, DuplicateOffset) {
ScratchFile temp;
WriteBase64ToFile(kDexFileDuplicateOffset, temp.GetFile());
@@ -351,11 +382,40 @@ TEST_F(DexLayoutTest, NullSetRefListElement) {
}
}
-TEST_F(DexLayoutTest, UnreferencedCatchHandler) {
- // Disable test on target.
- TEST_DISABLED_FOR_TARGET();
+TEST_F(DexLayoutTest, MultiClassData) {
+ ScratchFile temp;
+ WriteBase64ToFile(kMultiClassDataInputDex, temp.GetFile());
+ ScratchFile temp2;
+ WriteBase64ToFile(kDexFileLayoutInputProfile, temp2.GetFile());
+ EXPECT_EQ(temp.GetFile()->Flush(), 0);
+ std::string dexlayout = GetTestAndroidRoot() + "/bin/dexlayout";
+ EXPECT_TRUE(OS::FileExists(dexlayout.c_str())) << dexlayout << " should be a valid file path";
+ std::vector<std::string> dexlayout_exec_argv =
+ { dexlayout, "-p", temp2.GetFilename(), "-o", "/dev/null", temp.GetFilename() };
std::string error_msg;
- ASSERT_TRUE(UnreferencedCatchHandlerExec(&error_msg)) << error_msg;
+ const bool result = ::art::Exec(dexlayout_exec_argv, &error_msg);
+ EXPECT_TRUE(result);
+ if (!result) {
+ LOG(ERROR) << "Error " << error_msg;
+ }
+}
+
+TEST_F(DexLayoutTest, UnalignedCodeInfo) {
+ ScratchFile temp;
+ WriteBase64ToFile(kUnalignedCodeInfoInputDex, temp.GetFile());
+ ScratchFile temp2;
+ WriteBase64ToFile(kDexFileLayoutInputProfile, temp2.GetFile());
+ EXPECT_EQ(temp.GetFile()->Flush(), 0);
+ std::string dexlayout = GetTestAndroidRoot() + "/bin/dexlayout";
+ EXPECT_TRUE(OS::FileExists(dexlayout.c_str())) << dexlayout << " should be a valid file path";
+ std::vector<std::string> dexlayout_exec_argv =
+ { dexlayout, "-p", temp2.GetFilename(), "-o", "/dev/null", temp.GetFilename() };
+ std::string error_msg;
+ const bool result = ::art::Exec(dexlayout_exec_argv, &error_msg);
+ EXPECT_TRUE(result);
+ if (!result) {
+ LOG(ERROR) << "Error " << error_msg;
+ }
}
} // namespace art
diff --git a/runtime/Android.bp b/runtime/Android.bp
index d136aa15b3..b4c7b9cc6a 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -171,6 +171,7 @@ cc_defaults {
"native/org_apache_harmony_dalvik_ddmc_DdmServer.cc",
"native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc",
"native/sun_misc_Unsafe.cc",
+ "non_debuggable_classes.cc",
"oat.cc",
"oat_file.cc",
"oat_file_assistant.cc",
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.cc b/runtime/arch/arm64/instruction_set_features_arm64.cc
index 01bd177221..e5f6f11326 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64.cc
+++ b/runtime/arch/arm64/instruction_set_features_arm64.cc
@@ -53,6 +53,7 @@ Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromVariant(
static const char* arm64_known_variants[] = {
"cortex-a35",
"exynos-m1",
+ "exynos-m2",
"denver64",
"kryo"
};
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index bfbe4816ba..7cb50b7118 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1183,15 +1183,13 @@ ENTRY art_quick_lock_object
add x4, x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET // exclusive load/store has no immediate anymore
.Lretry_lock:
ldr w2, [xSELF, #THREAD_ID_OFFSET] // TODO: Can the thread ID really change during the loop?
- ldxr w1, [x4]
- mov x3, x1
- and w3, w3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED // zero the gc bits
+ ldaxr w1, [x4] // acquire needed only in most common case
+ and w3, w1, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED // zero the gc bits
cbnz w3, .Lnot_unlocked // already thin locked
// unlocked case - x1: original lock word that's zero except for the read barrier bits.
orr x2, x1, x2 // x2 holds thread id with count of 0 with preserved read barrier bits
stxr w3, w2, [x4]
cbnz w3, .Llock_stxr_fail // store failed, retry
- dmb ishld // full (LoadLoad|LoadStore) memory barrier
ret
.Lnot_unlocked: // x1: original lock word
lsr w3, w1, LOCK_WORD_STATE_SHIFT
@@ -1200,8 +1198,7 @@ ENTRY art_quick_lock_object
uxth w2, w2 // zero top 16 bits
cbnz w2, .Lslow_lock // lock word and self thread id's match -> recursive lock
// else contention, go to slow path
- mov x3, x1 // copy the lock word to check count overflow.
- and w3, w3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED // zero the gc bits.
+ and w3, w1, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED // zero the gc bits.
add w2, w3, #LOCK_WORD_THIN_LOCK_COUNT_ONE // increment count in lock word placing in w2 to check overflow
lsr w3, w2, #LOCK_WORD_GC_STATE_SHIFT // if the first gc state bit is set, we overflowed.
cbnz w3, .Lslow_lock // if we overflow the count go slow path
@@ -1246,23 +1243,19 @@ ENTRY art_quick_unlock_object
lsr w2, w1, LOCK_WORD_STATE_SHIFT
cbnz w2, .Lslow_unlock // if either of the top two bits are set, go slow path
ldr w2, [xSELF, #THREAD_ID_OFFSET]
- mov x3, x1 // copy lock word to check thread id equality
- and w3, w3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED // zero the gc bits
+ and w3, w1, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED // zero the gc bits
eor w3, w3, w2 // lock_word.ThreadId() ^ self->ThreadId()
uxth w3, w3 // zero top 16 bits
cbnz w3, .Lslow_unlock // do lock word and self thread id's match?
- mov x3, x1 // copy lock word to detect transition to unlocked
- and w3, w3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED // zero the gc bits
+ and w3, w1, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED // zero the gc bits
cmp w3, #LOCK_WORD_THIN_LOCK_COUNT_ONE
bpl .Lrecursive_thin_unlock
// transition to unlocked
- mov x3, x1
- and w3, w3, #LOCK_WORD_GC_STATE_MASK_SHIFTED // w3: zero except for the preserved read barrier bits
- dmb ish // full (LoadStore|StoreStore) memory barrier
+ and w3, w1, #LOCK_WORD_GC_STATE_MASK_SHIFTED // w3: zero except for the preserved read barrier bits
#ifndef USE_READ_BARRIER
- str w3, [x4]
+ stlr w3, [x4]
#else
- stxr w2, w3, [x4] // Need to use atomic instructions for read barrier
+ stlxr w2, w3, [x4] // Need to use atomic instructions for read barrier
cbnz w2, .Lunlock_stxr_fail // store failed, retry
#endif
ret
@@ -1276,7 +1269,7 @@ ENTRY art_quick_unlock_object
#endif
ret
.Lunlock_stxr_fail:
- b .Lretry_unlock // retry
+ b .Lretry_unlock // retry
.Lslow_unlock:
SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case exception allocation triggers GC
mov x1, xSELF // pass Thread::Current
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index 473d9cf74e..685e26c78d 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -55,8 +55,10 @@ inline mirror::Class* ArtMethod::GetDeclaringClass() {
if (kIsDebugBuild) {
if (!IsRuntimeMethod()) {
CHECK(result != nullptr) << this;
- CHECK(result->IsIdxLoaded() || result->IsErroneous())
- << result->GetStatus() << " " << result->PrettyClass();
+ if (kCheckDeclaringClassState) {
+ CHECK(result->IsIdxLoaded() || result->IsErroneous())
+ << result->GetStatus() << " " << result->PrettyClass();
+ }
} else {
CHECK(result == nullptr) << this;
}
@@ -89,7 +91,7 @@ ALWAYS_INLINE static inline void DoGetAccessFlagsHelper(ArtMethod* method)
template <ReadBarrierOption kReadBarrierOption>
inline uint32_t ArtMethod::GetAccessFlags() {
- if (kIsDebugBuild) {
+ if (kCheckDeclaringClassState) {
Thread* self = Thread::Current();
if (!Locks::mutator_lock_->IsSharedHeld(self)) {
if (self->IsThreadSuspensionAllowable()) {
@@ -118,8 +120,10 @@ inline uint16_t ArtMethod::GetMethodIndexDuringLinking() {
}
inline uint32_t ArtMethod::GetDexMethodIndex() {
- DCHECK(IsRuntimeMethod() || GetDeclaringClass()->IsIdxLoaded() ||
- GetDeclaringClass()->IsErroneous());
+ if (kCheckDeclaringClassState) {
+ CHECK(IsRuntimeMethod() || GetDeclaringClass()->IsIdxLoaded() ||
+ GetDeclaringClass()->IsErroneous());
+ }
return GetDexMethodIndexUnchecked();
}
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 99d7a49dc9..cd1950c0e2 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -53,6 +53,8 @@ class PointerArray;
class ArtMethod FINAL {
public:
+ static constexpr bool kCheckDeclaringClassState = kIsDebugBuild;
+
ArtMethod() : access_flags_(0), dex_code_item_offset_(0), dex_method_index_(0),
method_index_(0), hotness_count_(0) { }
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index c7a94a90dc..4a2e34f243 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -246,7 +246,7 @@ ADD_TEST_EQ(MIRROR_STRING_COUNT_OFFSET, art::mirror::String::CountOffset().Int32
ADD_TEST_EQ(MIRROR_STRING_VALUE_OFFSET, art::mirror::String::ValueOffset().Int32Value())
// String compression feature.
-#define STRING_COMPRESSION_FEATURE 0
+#define STRING_COMPRESSION_FEATURE 1
ADD_TEST_EQ(STRING_COMPRESSION_FEATURE, art::mirror::kUseStringCompression);
#if defined(__cplusplus)
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index b93b293435..24846e5ceb 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -46,6 +46,7 @@ Mutex* Locks::deoptimization_lock_ = nullptr;
ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
Mutex* Locks::instrument_entrypoints_lock_ = nullptr;
Mutex* Locks::intern_table_lock_ = nullptr;
+Mutex* Locks::jdwp_event_list_lock_ = nullptr;
Mutex* Locks::jni_function_table_lock_ = nullptr;
Mutex* Locks::jni_libraries_lock_ = nullptr;
Mutex* Locks::logging_lock_ = nullptr;
@@ -998,6 +999,7 @@ void Locks::Init() {
DCHECK(verifier_deps_lock_ != nullptr);
DCHECK(host_dlopen_handles_lock_ != nullptr);
DCHECK(intern_table_lock_ != nullptr);
+ DCHECK(jdwp_event_list_lock_ != nullptr);
DCHECK(jni_function_table_lock_ != nullptr);
DCHECK(jni_libraries_lock_ != nullptr);
DCHECK(logging_lock_ != nullptr);
@@ -1040,6 +1042,10 @@ void Locks::Init() {
DCHECK(runtime_shutdown_lock_ == nullptr);
runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", current_lock_level);
+ UPDATE_CURRENT_LOCK_LEVEL(kJdwpEventListLock);
+ DCHECK(jdwp_event_list_lock_ == nullptr);
+ jdwp_event_list_lock_ = new Mutex("JDWP event list lock", current_lock_level);
+
UPDATE_CURRENT_LOCK_LEVEL(kProfilerLock);
DCHECK(profiler_lock_ == nullptr);
profiler_lock_ = new Mutex("profiler lock", current_lock_level);
@@ -1167,6 +1173,8 @@ void Locks::Init() {
expected_mutexes_on_weak_ref_access_.push_back(dex_lock_);
classlinker_classes_lock_->SetShouldRespondToEmptyCheckpointRequest(true);
expected_mutexes_on_weak_ref_access_.push_back(classlinker_classes_lock_);
+ jdwp_event_list_lock_->SetShouldRespondToEmptyCheckpointRequest(true);
+ expected_mutexes_on_weak_ref_access_.push_back(jdwp_event_list_lock_);
jni_libraries_lock_->SetShouldRespondToEmptyCheckpointRequest(true);
expected_mutexes_on_weak_ref_access_.push_back(jni_libraries_lock_);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 9b6938f9bf..c59664b9cd 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -630,8 +630,12 @@ class Locks {
// Guards shutdown of the runtime.
static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_);
+ static Mutex* jdwp_event_list_lock_
+ ACQUIRED_AFTER(runtime_shutdown_lock_)
+ ACQUIRED_BEFORE(breakpoint_lock_);
+
// Guards background profiler global state.
- static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
+ static Mutex* profiler_lock_ ACQUIRED_AFTER(jdwp_event_list_lock_);
// Guards trace (ie traceview) requests.
static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_);
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 21cdede06b..e5722a13a7 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -139,7 +139,7 @@ class ClassLinkerTest : public CommonRuntimeTest {
EXPECT_FALSE(JavaLangObject->IsFinal());
EXPECT_FALSE(JavaLangObject->IsPrimitive());
EXPECT_FALSE(JavaLangObject->IsSynthetic());
- EXPECT_EQ(2U, JavaLangObject->NumDirectMethods());
+ EXPECT_EQ(4U, JavaLangObject->NumDirectMethods());
EXPECT_EQ(11U, JavaLangObject->NumVirtualMethods());
if (!kUseBrooksReadBarrier) {
EXPECT_EQ(2U, JavaLangObject->NumInstanceFields());
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 53be30eafc..37963e49e7 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -3961,7 +3961,14 @@ void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) {
void Heap::CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c, size_t byte_count) {
CHECK(c == nullptr || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) ||
- (c->IsVariableSize() || c->GetObjectSize() == byte_count)) << c->GetClassFlags();
+ (c->IsVariableSize() || c->GetObjectSize() == byte_count))
+ << "ClassFlags=" << c->GetClassFlags()
+ << " IsClassClass=" << c->IsClassClass()
+ << " byte_count=" << byte_count
+ << " IsVariableSize=" << c->IsVariableSize()
+ << " ObjectSize=" << c->GetObjectSize()
+ << " sizeof(Class)=" << sizeof(mirror::Class)
+ << " klass=" << c.Ptr();
CHECK_GE(byte_count, sizeof(mirror::Object));
}
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index e59c4bb28e..495fec7a48 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -1111,7 +1111,9 @@ void Hprof::DumpHeapObject(mirror::Object* obj) {
if (space != nullptr) {
if (space->IsZygoteSpace()) {
heap_type = HPROF_HEAP_ZYGOTE;
- } else if (space->IsImageSpace()) {
+ } else if (space->IsImageSpace() && heap->ObjectIsInBootImageSpace(obj)) {
+ // Only count objects in the boot image as HPROF_HEAP_IMAGE, this leaves app image objects as
+ // HPROF_HEAP_APP. b/35762934
heap_type = HPROF_HEAP_IMAGE;
}
} else {
diff --git a/runtime/image.cc b/runtime/image.cc
index 4e6da79205..243051e3bd 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -25,7 +25,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '3', '8', '\0' }; // hash-based DexCache types
+const uint8_t ImageHeader::kImageVersion[] = { '0', '3', '9', '\0' }; // Enable string compression.
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
diff --git a/runtime/jdwp/jdwp.h b/runtime/jdwp/jdwp.h
index 86af6d44db..af29468062 100644
--- a/runtime/jdwp/jdwp.h
+++ b/runtime/jdwp/jdwp.h
@@ -203,7 +203,8 @@ struct JdwpState {
*/
void PostLocationEvent(const EventLocation* pLoc, mirror::Object* thisPtr, int eventFlags,
const JValue* returnValue)
- REQUIRES(!event_list_lock_, !jdwp_token_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES(!Locks::jdwp_event_list_lock_, !jdwp_token_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* A field of interest has been accessed or modified. This is used for field access and field
@@ -214,7 +215,8 @@ struct JdwpState {
*/
void PostFieldEvent(const EventLocation* pLoc, ArtField* field, mirror::Object* thisPtr,
const JValue* fieldValue, bool is_modification)
- REQUIRES(!event_list_lock_, !jdwp_token_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES(!Locks::jdwp_event_list_lock_, !jdwp_token_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* An exception has been thrown.
@@ -223,19 +225,22 @@ struct JdwpState {
*/
void PostException(const EventLocation* pThrowLoc, mirror::Throwable* exception_object,
const EventLocation* pCatchLoc, mirror::Object* thisPtr)
- REQUIRES(!event_list_lock_, !jdwp_token_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES(!Locks::jdwp_event_list_lock_, !jdwp_token_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* A thread has started or stopped.
*/
void PostThreadChange(Thread* thread, bool start)
- REQUIRES(!event_list_lock_, !jdwp_token_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES(!Locks::jdwp_event_list_lock_, !jdwp_token_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Class has been prepared.
*/
void PostClassPrepare(mirror::Class* klass)
- REQUIRES(!event_list_lock_, !jdwp_token_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES(!Locks::jdwp_event_list_lock_, !jdwp_token_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* The VM is about to stop.
@@ -259,7 +264,7 @@ struct JdwpState {
void SendRequest(ExpandBuf* pReq);
void ResetState()
- REQUIRES(!event_list_lock_)
+ REQUIRES(!Locks::jdwp_event_list_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
/* atomic ops to get next serial number */
@@ -268,7 +273,7 @@ struct JdwpState {
void Run()
REQUIRES(!Locks::mutator_lock_, !Locks::thread_suspend_count_lock_, !thread_start_lock_,
- !attach_lock_, !event_list_lock_);
+ !attach_lock_, !Locks::jdwp_event_list_lock_);
/*
* Register an event by adding it to the event list.
@@ -277,25 +282,25 @@ struct JdwpState {
* may discard its pointer after calling this.
*/
JdwpError RegisterEvent(JdwpEvent* pEvent)
- REQUIRES(!event_list_lock_)
+ REQUIRES(!Locks::jdwp_event_list_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Unregister an event, given the requestId.
*/
void UnregisterEventById(uint32_t requestId)
- REQUIRES(!event_list_lock_)
+ REQUIRES(!Locks::jdwp_event_list_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
void UnregisterLocationEventsOnClass(ObjPtr<mirror::Class> klass)
- REQUIRES(!event_list_lock_)
+ REQUIRES(!Locks::jdwp_event_list_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Unregister all events.
*/
void UnregisterAll()
- REQUIRES(!event_list_lock_)
+ REQUIRES(!Locks::jdwp_event_list_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
private:
@@ -310,16 +315,16 @@ struct JdwpState {
ObjectId threadId)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!jdwp_token_lock_);
void CleanupMatchList(const std::vector<JdwpEvent*>& match_list)
- REQUIRES(event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES(Locks::jdwp_event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
void EventFinish(ExpandBuf* pReq);
bool FindMatchingEvents(JdwpEventKind eventKind, const ModBasket& basket,
std::vector<JdwpEvent*>* match_list)
- REQUIRES(!event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES(!Locks::jdwp_event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
void FindMatchingEventsLocked(JdwpEventKind eventKind, const ModBasket& basket,
std::vector<JdwpEvent*>* match_list)
- REQUIRES(event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES(Locks::jdwp_event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
void UnregisterEvent(JdwpEvent* pEvent)
- REQUIRES(event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES(Locks::jdwp_event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
void SendBufferedRequest(uint32_t type, const std::vector<iovec>& iov);
/*
@@ -387,9 +392,8 @@ struct JdwpState {
AtomicInteger event_serial_;
// Linked list of events requested by the debugger (breakpoints, class prep, etc).
- Mutex event_list_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_BEFORE(Locks::breakpoint_lock_);
- JdwpEvent* event_list_ GUARDED_BY(event_list_lock_);
- size_t event_list_size_ GUARDED_BY(event_list_lock_); // Number of elements in event_list_.
+ JdwpEvent* event_list_ GUARDED_BY(Locks::jdwp_event_list_lock_);
+ size_t event_list_size_ GUARDED_BY(Locks::jdwp_event_list_lock_); // Number of elements in event_list_.
// Used to synchronize JDWP command handler thread and event threads so only one
// thread does JDWP stuff at a time. This prevent from interleaving command handling
@@ -410,7 +414,7 @@ struct JdwpState {
// When the runtime shuts down, it needs to stop JDWP command handler thread by closing the
// JDWP connection. However, if the JDWP thread is processing a command, it needs to wait
// for the command to finish so we can send its reply before closing the connection.
- Mutex shutdown_lock_ ACQUIRED_AFTER(event_list_lock_);
+ Mutex shutdown_lock_ ACQUIRED_AFTER(Locks::jdwp_event_list_lock_);
ConditionVariable shutdown_cond_ GUARDED_BY(shutdown_lock_);
bool processing_request_ GUARDED_BY(shutdown_lock_);
};
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index 96249f9b58..36d733ea08 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -237,7 +237,7 @@ JdwpError JdwpState::RegisterEvent(JdwpEvent* pEvent) {
/*
* Add to list.
*/
- MutexLock mu(Thread::Current(), event_list_lock_);
+ MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
if (event_list_ != nullptr) {
pEvent->next = event_list_;
event_list_->prev = pEvent;
@@ -256,7 +256,7 @@ void JdwpState::UnregisterLocationEventsOnClass(ObjPtr<mirror::Class> klass) {
StackHandleScope<1> hs(Thread::Current());
Handle<mirror::Class> h_klass(hs.NewHandle(klass));
std::vector<JdwpEvent*> to_remove;
- MutexLock mu(Thread::Current(), event_list_lock_);
+ MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
for (JdwpEvent* cur_event = event_list_; cur_event != nullptr; cur_event = cur_event->next) {
// Fill in the to_remove list
bool found_event = false;
@@ -356,7 +356,7 @@ void JdwpState::UnregisterEvent(JdwpEvent* pEvent) {
void JdwpState::UnregisterEventById(uint32_t requestId) {
bool found = false;
{
- MutexLock mu(Thread::Current(), event_list_lock_);
+ MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
for (JdwpEvent* pEvent = event_list_; pEvent != nullptr; pEvent = pEvent->next) {
if (pEvent->requestId == requestId) {
@@ -383,7 +383,7 @@ void JdwpState::UnregisterEventById(uint32_t requestId) {
* Remove all entries from the event list.
*/
void JdwpState::UnregisterAll() {
- MutexLock mu(Thread::Current(), event_list_lock_);
+ MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
JdwpEvent* pEvent = event_list_;
while (pEvent != nullptr) {
@@ -593,7 +593,7 @@ void JdwpState::FindMatchingEventsLocked(JdwpEventKind event_kind, const ModBask
*/
bool JdwpState::FindMatchingEvents(JdwpEventKind event_kind, const ModBasket& basket,
std::vector<JdwpEvent*>* match_list) {
- MutexLock mu(Thread::Current(), event_list_lock_);
+ MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
match_list->reserve(event_list_size_);
FindMatchingEventsLocked(event_kind, basket, match_list);
return !match_list->empty();
@@ -908,7 +908,7 @@ void JdwpState::PostLocationEvent(const EventLocation* pLoc, mirror::Object* thi
std::vector<JdwpEvent*> match_list;
{
// We use the locked version because we have multiple possible match events.
- MutexLock mu(Thread::Current(), event_list_lock_);
+ MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
match_list.reserve(event_list_size_);
if ((eventFlags & Dbg::kBreakpoint) != 0) {
FindMatchingEventsLocked(EK_BREAKPOINT, basket, &match_list);
@@ -955,7 +955,7 @@ void JdwpState::PostLocationEvent(const EventLocation* pLoc, mirror::Object* thi
}
{
- MutexLock mu(Thread::Current(), event_list_lock_);
+ MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
CleanupMatchList(match_list);
}
@@ -1041,7 +1041,7 @@ void JdwpState::PostFieldEvent(const EventLocation* pLoc, ArtField* field,
}
{
- MutexLock mu(Thread::Current(), event_list_lock_);
+ MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
CleanupMatchList(match_list);
}
@@ -1103,7 +1103,7 @@ void JdwpState::PostThreadChange(Thread* thread, bool start) {
}
{
- MutexLock mu(Thread::Current(), event_list_lock_);
+ MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
CleanupMatchList(match_list);
}
@@ -1213,7 +1213,7 @@ void JdwpState::PostException(const EventLocation* pThrowLoc, mirror::Throwable*
}
{
- MutexLock mu(Thread::Current(), event_list_lock_);
+ MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
CleanupMatchList(match_list);
}
@@ -1295,7 +1295,7 @@ void JdwpState::PostClassPrepare(mirror::Class* klass) {
}
{
- MutexLock mu(Thread::Current(), event_list_lock_);
+ MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
CleanupMatchList(match_list);
}
diff --git a/runtime/jdwp/jdwp_main.cc b/runtime/jdwp/jdwp_main.cc
index 7707ba4932..64ed724afc 100644
--- a/runtime/jdwp/jdwp_main.cc
+++ b/runtime/jdwp/jdwp_main.cc
@@ -227,7 +227,6 @@ JdwpState::JdwpState(const JdwpOptions* options)
last_activity_time_ms_(0),
request_serial_(0x10000000),
event_serial_(0x20000000),
- event_list_lock_("JDWP event list lock", kJdwpEventListLock),
event_list_(nullptr),
event_list_size_(0),
jdwp_token_lock_("JDWP token lock"),
@@ -331,7 +330,7 @@ void JdwpState::ResetState() {
UnregisterAll();
{
- MutexLock mu(Thread::Current(), event_list_lock_);
+ MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
CHECK(event_list_ == nullptr);
}
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index b1ba95287b..62acedfb1b 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -987,8 +987,11 @@ void JitCodeCache::GarbageCollectCache(Thread* self) {
const void* entry_point = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
if (ContainsPc(entry_point)) {
info->SetSavedEntryPoint(entry_point);
- Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
- info->GetMethod(), GetQuickToInterpreterBridge());
+ // Don't call Instrumentation::UpdateMethods, as it can check the declaring
+ // class of the method. We may be concurrently running a GC which makes accessing
+ // the class unsafe. We know it is OK to bypass the instrumentation as we've just
+ // checked that the current entry point is JIT compiled code.
+ info->GetMethod()->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
}
}
@@ -1270,6 +1273,9 @@ void JitCodeCache::GetProfiledMethods(const std::set<std::string>& dex_base_loca
if (cls->GetDexCache() == nullptr) {
DCHECK(cls->IsArrayClass()) << cls->PrettyClass();
+ // Make a best effort to find the type index in the method's dex file.
+ // We could search all open dex files but that might turn expensive
+ // and probably not worth it.
class_dex_file = dex_file;
type_index = cls->FindTypeIndexInOtherDexFile(*dex_file);
} else {
@@ -1278,7 +1284,6 @@ void JitCodeCache::GetProfiledMethods(const std::set<std::string>& dex_base_loca
}
if (!type_index.IsValid()) {
// Could be a proxy class or an array for which we couldn't find the type index.
- // TODO(calin): can we really miss the type index for arrays here?
continue;
}
if (ContainsElement(dex_base_locations, class_dex_file->GetBaseLocation())) {
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index 35ce98efad..dbb5a4c387 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -32,7 +32,7 @@ class StubTest_ReadBarrierForRoot_Test;
namespace mirror {
// String Compression
-static constexpr bool kUseStringCompression = false;
+static constexpr bool kUseStringCompression = true;
enum class StringCompressionFlag : uint32_t {
kCompressed = 0u,
kUncompressed = 1u
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index fd22d9e646..100f476b43 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -26,9 +26,11 @@
#include "jit/jit.h"
#include "jni_internal.h"
#include "JNIHelp.h"
+#include "non_debuggable_classes.h"
#include "scoped_thread_state_change-inl.h"
#include "ScopedUtfChars.h"
#include "thread-inl.h"
+#include "thread_list.h"
#include "trace.h"
#if defined(__linux__)
@@ -39,6 +41,10 @@
namespace art {
+// Set to true to always determine the non-debuggable classes even if we would not allow a debugger
+// to actually attach.
+static constexpr bool kAlwaysCollectNonDebuggableClasses = kIsDebugBuild;
+
using android::base::StringPrintf;
static void EnableDebugger() {
@@ -68,6 +74,39 @@ static void EnableDebugger() {
}
}
+static void DoCollectNonDebuggableCallback(Thread* thread, void* data ATTRIBUTE_UNUSED)
+ REQUIRES(Locks::mutator_lock_) {
+ class NonDebuggableStacksVisitor : public StackVisitor {
+ public:
+ explicit NonDebuggableStacksVisitor(Thread* t)
+ : StackVisitor(t, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
+
+ ~NonDebuggableStacksVisitor() OVERRIDE {}
+
+ bool VisitFrame() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ if (GetMethod()->IsRuntimeMethod()) {
+ return true;
+ }
+ NonDebuggableClasses::AddNonDebuggableClass(GetMethod()->GetDeclaringClass());
+ if (kIsDebugBuild) {
+ LOG(INFO) << GetMethod()->GetDeclaringClass()->PrettyClass()
+ << " might not be fully debuggable/deoptimizable due to "
+ << GetMethod()->PrettyMethod() << " appearing on the stack during zygote fork.";
+ }
+ return true;
+ }
+ };
+ NonDebuggableStacksVisitor visitor(thread);
+ visitor.WalkStack();
+}
+
+static void CollectNonDebuggableClasses() {
+ Runtime* const runtime = Runtime::Current();
+ ScopedSuspendAll suspend("Checking stacks for non-obsoletable methods!", /*long_suspend*/false);
+ MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
+ runtime->GetThreadList()->ForEach(DoCollectNonDebuggableCallback, nullptr);
+}
+
static void EnableDebugFeatures(uint32_t debug_flags) {
// Must match values in com.android.internal.os.Zygote.
enum {
@@ -131,12 +170,17 @@ static void EnableDebugFeatures(uint32_t debug_flags) {
debug_flags &= ~DEBUG_ALWAYS_JIT;
}
+ bool needs_non_debuggable_classes = false;
if ((debug_flags & DEBUG_JAVA_DEBUGGABLE) != 0) {
runtime->AddCompilerOption("--debuggable");
runtime->SetJavaDebuggable(true);
// Deoptimize the boot image as it may be non-debuggable.
runtime->DeoptimizeBootImage();
debug_flags &= ~DEBUG_JAVA_DEBUGGABLE;
+ needs_non_debuggable_classes = true;
+ }
+ if (needs_non_debuggable_classes || kAlwaysCollectNonDebuggableClasses) {
+ CollectNonDebuggableClasses();
}
if ((debug_flags & DEBUG_NATIVE_DEBUGGABLE) != 0) {
diff --git a/runtime/native/java_lang_Object.cc b/runtime/native/java_lang_Object.cc
index 6989244280..fb4f99a126 100644
--- a/runtime/native/java_lang_Object.cc
+++ b/runtime/native/java_lang_Object.cc
@@ -48,12 +48,19 @@ static void Object_waitJI(JNIEnv* env, jobject java_this, jlong ms, jint ns) {
soa.Decode<mirror::Object>(java_this)->Wait(soa.Self(), ms, ns);
}
+static jint Object_identityHashCodeNative(JNIEnv* env, jclass, jobject javaObject) {
+ ScopedFastNativeObjectAccess soa(env);
+ ObjPtr<mirror::Object> o = soa.Decode<mirror::Object>(javaObject);
+ return static_cast<jint>(o->IdentityHashCode());
+}
+
static JNINativeMethod gMethods[] = {
FAST_NATIVE_METHOD(Object, internalClone, "()Ljava/lang/Object;"),
FAST_NATIVE_METHOD(Object, notify, "()V"),
FAST_NATIVE_METHOD(Object, notifyAll, "()V"),
OVERLOADED_FAST_NATIVE_METHOD(Object, wait, "()V", wait),
OVERLOADED_FAST_NATIVE_METHOD(Object, wait, "(JI)V", waitJI),
+ FAST_NATIVE_METHOD(Object, identityHashCodeNative, "(Ljava/lang/Object;)I"),
};
void register_java_lang_Object(JNIEnv* env) {
diff --git a/runtime/native/java_lang_System.cc b/runtime/native/java_lang_System.cc
index d7c9cd07b5..2cabce8868 100644
--- a/runtime/native/java_lang_System.cc
+++ b/runtime/native/java_lang_System.cc
@@ -227,15 +227,6 @@ static void System_arraycopyBooleanUnchecked(JNIEnv* env, jclass, jobject javaSr
javaDst, dstPos, count);
}
-static jint System_identityHashCode(JNIEnv* env, jclass, jobject javaObject) {
- if (UNLIKELY(javaObject == nullptr)) {
- return 0;
- }
- ScopedFastNativeObjectAccess soa(env);
- ObjPtr<mirror::Object> o = soa.Decode<mirror::Object>(javaObject);
- return static_cast<jint>(o->IdentityHashCode());
-}
-
static JNINativeMethod gMethods[] = {
FAST_NATIVE_METHOD(System, arraycopy, "(Ljava/lang/Object;ILjava/lang/Object;II)V"),
FAST_NATIVE_METHOD(System, arraycopyCharUnchecked, "([CI[CII)V"),
@@ -246,7 +237,6 @@ static JNINativeMethod gMethods[] = {
FAST_NATIVE_METHOD(System, arraycopyFloatUnchecked, "([FI[FII)V"),
FAST_NATIVE_METHOD(System, arraycopyDoubleUnchecked, "([DI[DII)V"),
FAST_NATIVE_METHOD(System, arraycopyBooleanUnchecked, "([ZI[ZII)V"),
- FAST_NATIVE_METHOD(System, identityHashCode, "(Ljava/lang/Object;)I"),
};
void register_java_lang_System(JNIEnv* env) {
diff --git a/runtime/non_debuggable_classes.cc b/runtime/non_debuggable_classes.cc
new file mode 100644
index 0000000000..db121a90e2
--- /dev/null
+++ b/runtime/non_debuggable_classes.cc
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "non_debuggable_classes.h"
+
+#include "base/logging.h"
+#include "jni_internal.h"
+#include "mirror/class-inl.h"
+#include "obj_ptr-inl.h"
+#include "ScopedLocalRef.h"
+#include "thread-inl.h"
+
+namespace art {
+
+std::vector<jclass> NonDebuggableClasses::non_debuggable_classes;
+
+void NonDebuggableClasses::AddNonDebuggableClass(ObjPtr<mirror::Class> klass) {
+ Thread* self = Thread::Current();
+ JNIEnvExt* env = self->GetJniEnv();
+ for (jclass c : non_debuggable_classes) {
+ if (self->DecodeJObject(c)->AsClass() == klass.Ptr()) {
+ return;
+ }
+ }
+ ScopedLocalRef<jclass> lr(env, env->AddLocalReference<jclass>(klass));
+ non_debuggable_classes.push_back(reinterpret_cast<jclass>(env->NewGlobalRef(lr.get())));
+}
+
+} // namespace art
diff --git a/runtime/non_debuggable_classes.h b/runtime/non_debuggable_classes.h
new file mode 100644
index 0000000000..b72afd8299
--- /dev/null
+++ b/runtime/non_debuggable_classes.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NON_DEBUGGABLE_CLASSES_H_
+#define ART_RUNTIME_NON_DEBUGGABLE_CLASSES_H_
+
+#include <vector>
+
+#include "base/mutex.h"
+#include "jni.h"
+#include "obj_ptr.h"
+
+namespace art {
+
+namespace mirror {
+class Class;
+} // namespace mirror
+
+struct NonDebuggableClasses {
+ public:
+ static const std::vector<jclass>& GetNonDebuggableClasses() {
+ return non_debuggable_classes;
+ }
+
+ static void AddNonDebuggableClass(ObjPtr<mirror::Class> klass) REQUIRES(Locks::mutator_lock_);
+
+ private:
+ static std::vector<jclass> non_debuggable_classes;
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_NON_DEBUGGABLE_CLASSES_H_
diff --git a/runtime/openjdkjvmti/OpenjdkJvmTi.cc b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
index 77ca9ce2e5..450b6b6bcf 100644
--- a/runtime/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
@@ -73,7 +73,6 @@
namespace openjdkjvmti {
EventHandler gEventHandler;
-ObjectTagTable gObjectTagTable(&gEventHandler);
#define ENSURE_NON_NULL(n) \
do { \
@@ -334,7 +333,7 @@ class JvmtiFunctions {
const jvmtiHeapCallbacks* callbacks,
const void* user_data) {
ENSURE_HAS_CAP(env, can_tag_objects);
- HeapUtil heap_util(&gObjectTagTable);
+ HeapUtil heap_util(ArtJvmTiEnv::AsArtJvmTiEnv(env)->object_tag_table.get());
return heap_util.FollowReferences(env,
heap_filter,
klass,
@@ -349,7 +348,7 @@ class JvmtiFunctions {
const jvmtiHeapCallbacks* callbacks,
const void* user_data) {
ENSURE_HAS_CAP(env, can_tag_objects);
- HeapUtil heap_util(&gObjectTagTable);
+ HeapUtil heap_util(ArtJvmTiEnv::AsArtJvmTiEnv(env)->object_tag_table.get());
return heap_util.IterateThroughHeap(env, heap_filter, klass, callbacks, user_data);
}
@@ -363,7 +362,7 @@ class JvmtiFunctions {
art::ScopedObjectAccess soa(jni_env);
art::ObjPtr<art::mirror::Object> obj = soa.Decode<art::mirror::Object>(object);
- if (!gObjectTagTable.GetTag(obj.Ptr(), tag_ptr)) {
+ if (!ArtJvmTiEnv::AsArtJvmTiEnv(env)->object_tag_table->GetTag(obj.Ptr(), tag_ptr)) {
*tag_ptr = 0;
}
@@ -384,7 +383,7 @@ class JvmtiFunctions {
art::ScopedObjectAccess soa(jni_env);
art::ObjPtr<art::mirror::Object> obj = soa.Decode<art::mirror::Object>(object);
- gObjectTagTable.Set(obj.Ptr(), tag);
+ ArtJvmTiEnv::AsArtJvmTiEnv(env)->object_tag_table->Set(obj.Ptr(), tag);
return ERR(NONE);
}
@@ -403,12 +402,12 @@ class JvmtiFunctions {
}
art::ScopedObjectAccess soa(jni_env);
- return gObjectTagTable.GetTaggedObjects(env,
- tag_count,
- tags,
- count_ptr,
- object_result_ptr,
- tag_result_ptr);
+ return ArtJvmTiEnv::AsArtJvmTiEnv(env)->object_tag_table->GetTaggedObjects(env,
+ tag_count,
+ tags,
+ count_ptr,
+ object_result_ptr,
+ tag_result_ptr);
}
static jvmtiError ForceGarbageCollection(jvmtiEnv* env) {
@@ -579,7 +578,7 @@ class JvmtiFunctions {
}
static jvmtiError GetLoadedClasses(jvmtiEnv* env, jint* class_count_ptr, jclass** classes_ptr) {
- HeapUtil heap_util(&gObjectTagTable);
+ HeapUtil heap_util(ArtJvmTiEnv::AsArtJvmTiEnv(env)->object_tag_table.get());
return heap_util.GetLoadedClasses(env, class_count_ptr, classes_ptr);
}
@@ -678,6 +677,7 @@ class JvmtiFunctions {
ENSURE_HAS_CAP(env, can_retransform_classes);
std::string error_msg;
jvmtiError res = Transformer::RetransformClasses(ArtJvmTiEnv::AsArtJvmTiEnv(env),
+ &gEventHandler,
art::Runtime::Current(),
art::Thread::Current(),
class_count,
@@ -695,6 +695,7 @@ class JvmtiFunctions {
ENSURE_HAS_CAP(env, can_redefine_classes);
std::string error_msg;
jvmtiError res = Redefiner::RedefineClasses(ArtJvmTiEnv::AsArtJvmTiEnv(env),
+ &gEventHandler,
art::Runtime::Current(),
art::Thread::Current(),
class_count,
@@ -1162,6 +1163,8 @@ class JvmtiFunctions {
static jvmtiError DisposeEnvironment(jvmtiEnv* env) {
ENSURE_VALID_ENV(env);
gEventHandler.RemoveArtJvmTiEnv(ArtJvmTiEnv::AsArtJvmTiEnv(env));
+ art::Runtime::Current()->RemoveSystemWeakHolder(
+ ArtJvmTiEnv::AsArtJvmTiEnv(env)->object_tag_table.get());
delete env;
return OK;
}
@@ -1333,13 +1336,25 @@ static bool IsJvmtiVersion(jint version) {
version == JVMTI_VERSION;
}
+extern const jvmtiInterface_1 gJvmtiInterface;
+ArtJvmTiEnv::ArtJvmTiEnv(art::JavaVMExt* runtime, EventHandler* event_handler)
+ : art_vm(runtime),
+ local_data(nullptr),
+ capabilities(),
+ object_tag_table(new ObjectTagTable(event_handler)) {
+ functions = &gJvmtiInterface;
+}
+
// Creates a jvmtiEnv and returns it with the art::ti::Env that is associated with it. new_art_ti
// is a pointer to the uninitialized memory for an art::ti::Env.
static void CreateArtJvmTiEnv(art::JavaVMExt* vm, /*out*/void** new_jvmtiEnv) {
- struct ArtJvmTiEnv* env = new ArtJvmTiEnv(vm);
+ struct ArtJvmTiEnv* env = new ArtJvmTiEnv(vm, &gEventHandler);
*new_jvmtiEnv = env;
gEventHandler.RegisterArtJvmTiEnv(env);
+
+ art::Runtime::Current()->AddSystemWeakHolder(
+ ArtJvmTiEnv::AsArtJvmTiEnv(env)->object_tag_table.get());
}
// A hook that the runtime uses to allow plugins to handle GetEnv calls. It returns true and
@@ -1371,7 +1386,6 @@ extern "C" bool ArtPlugin_Initialize() {
SearchUtil::Register();
runtime->GetJavaVM()->AddEnvironmentHook(GetEnvHandler);
- runtime->AddSystemWeakHolder(&gObjectTagTable);
return true;
}
diff --git a/runtime/openjdkjvmti/art_jvmti.h b/runtime/openjdkjvmti/art_jvmti.h
index 99139a1f37..2ff3a478c4 100644
--- a/runtime/openjdkjvmti/art_jvmti.h
+++ b/runtime/openjdkjvmti/art_jvmti.h
@@ -48,8 +48,7 @@
namespace openjdkjvmti {
-extern const jvmtiInterface_1 gJvmtiInterface;
-extern EventHandler gEventHandler;
+class ObjectTagTable;
// A structure that is a jvmtiEnv with additional information for the runtime.
struct ArtJvmTiEnv : public jvmtiEnv {
@@ -60,10 +59,10 @@ struct ArtJvmTiEnv : public jvmtiEnv {
EventMasks event_masks;
std::unique_ptr<jvmtiEventCallbacks> event_callbacks;
- explicit ArtJvmTiEnv(art::JavaVMExt* runtime)
- : art_vm(runtime), local_data(nullptr), capabilities() {
- functions = &gJvmtiInterface;
- }
+ // Tagging is specific to the jvmtiEnv.
+ std::unique_ptr<ObjectTagTable> object_tag_table;
+
+ ArtJvmTiEnv(art::JavaVMExt* runtime, EventHandler* event_handler);
static ArtJvmTiEnv* AsArtJvmTiEnv(jvmtiEnv* env) {
return art::down_cast<ArtJvmTiEnv*>(env);
diff --git a/runtime/openjdkjvmti/ti_field.cc b/runtime/openjdkjvmti/ti_field.cc
index 8c3f2fffbd..1e5fbda35b 100644
--- a/runtime/openjdkjvmti/ti_field.cc
+++ b/runtime/openjdkjvmti/ti_field.cc
@@ -88,7 +88,6 @@ jvmtiError FieldUtil::GetFieldName(jvmtiEnv* env,
*signature_ptr = signature_copy.get();
}
- // TODO: Support generic signature.
if (generic_ptr != nullptr) {
*generic_ptr = nullptr;
if (!art_field->GetDeclaringClass()->IsProxyClass()) {
diff --git a/runtime/openjdkjvmti/ti_heap.cc b/runtime/openjdkjvmti/ti_heap.cc
index 7efeea7bbd..976ce66f11 100644
--- a/runtime/openjdkjvmti/ti_heap.cc
+++ b/runtime/openjdkjvmti/ti_heap.cc
@@ -31,6 +31,7 @@
#include "object_callbacks.h"
#include "object_tagging.h"
#include "obj_ptr-inl.h"
+#include "primitive.h"
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
#include "thread-inl.h"
@@ -89,32 +90,88 @@ jint ReportString(art::ObjPtr<art::mirror::Object> obj,
return 0;
}
-} // namespace
+// Report the contents of a primitive array, if a callback is set.
+jint ReportPrimitiveArray(art::ObjPtr<art::mirror::Object> obj,
+ jvmtiEnv* env,
+ ObjectTagTable* tag_table,
+ const jvmtiHeapCallbacks* cb,
+ const void* user_data) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ if (UNLIKELY(cb->array_primitive_value_callback != nullptr) &&
+ obj->IsArrayInstance() &&
+ !obj->IsObjectArray()) {
+ art::ObjPtr<art::mirror::Array> array = obj->AsArray();
+ int32_t array_length = array->GetLength();
+ size_t component_size = array->GetClass()->GetComponentSize();
+ art::Primitive::Type art_prim_type = array->GetClass()->GetComponentType()->GetPrimitiveType();
+ jvmtiPrimitiveType prim_type =
+ static_cast<jvmtiPrimitiveType>(art::Primitive::Descriptor(art_prim_type)[0]);
+ DCHECK(prim_type == JVMTI_PRIMITIVE_TYPE_BOOLEAN ||
+ prim_type == JVMTI_PRIMITIVE_TYPE_BYTE ||
+ prim_type == JVMTI_PRIMITIVE_TYPE_CHAR ||
+ prim_type == JVMTI_PRIMITIVE_TYPE_SHORT ||
+ prim_type == JVMTI_PRIMITIVE_TYPE_INT ||
+ prim_type == JVMTI_PRIMITIVE_TYPE_LONG ||
+ prim_type == JVMTI_PRIMITIVE_TYPE_FLOAT ||
+ prim_type == JVMTI_PRIMITIVE_TYPE_DOUBLE);
-struct IterateThroughHeapData {
- IterateThroughHeapData(HeapUtil* _heap_util,
- jvmtiEnv* _env,
- jint heap_filter,
- art::ObjPtr<art::mirror::Class> klass,
- const jvmtiHeapCallbacks* _callbacks,
- const void* _user_data)
- : heap_util(_heap_util),
- filter_klass(klass),
- env(_env),
- callbacks(_callbacks),
- user_data(_user_data),
- filter_out_tagged((heap_filter & JVMTI_HEAP_FILTER_TAGGED) != 0),
+ const jlong class_tag = tag_table->GetTagOrZero(obj->GetClass());
+ jlong array_tag = tag_table->GetTagOrZero(obj.Ptr());
+ const jlong saved_array_tag = array_tag;
+
+ jint result;
+ if (array_length == 0) {
+ result = cb->array_primitive_value_callback(class_tag,
+ obj->SizeOf(),
+ &array_tag,
+ 0,
+ prim_type,
+ nullptr,
+ const_cast<void*>(user_data));
+ } else {
+ jvmtiError alloc_error;
+ JvmtiUniquePtr<char[]> data = AllocJvmtiUniquePtr<char[]>(env,
+ array_length * component_size,
+ &alloc_error);
+ if (data == nullptr) {
+ // TODO: Not really sure what to do here. Should we abort the iteration and go all the way
+ // back? For now just warn.
+ LOG(WARNING) << "Unable to allocate buffer for array reporting! Silently dropping value.";
+ return 0;
+ }
+
+ memcpy(data.get(), array->GetRawData(component_size, 0), array_length * component_size);
+
+ result = cb->array_primitive_value_callback(class_tag,
+ obj->SizeOf(),
+ &array_tag,
+ array_length,
+ prim_type,
+ data.get(),
+ const_cast<void*>(user_data));
+ }
+
+ if (array_tag != saved_array_tag) {
+ tag_table->Set(obj.Ptr(), array_tag);
+ }
+
+ return result;
+ }
+ return 0;
+}
+
+struct HeapFilter {
+ explicit HeapFilter(jint heap_filter)
+ : filter_out_tagged((heap_filter & JVMTI_HEAP_FILTER_TAGGED) != 0),
filter_out_untagged((heap_filter & JVMTI_HEAP_FILTER_UNTAGGED) != 0),
filter_out_class_tagged((heap_filter & JVMTI_HEAP_FILTER_CLASS_TAGGED) != 0),
filter_out_class_untagged((heap_filter & JVMTI_HEAP_FILTER_CLASS_UNTAGGED) != 0),
any_filter(filter_out_tagged ||
filter_out_untagged ||
filter_out_class_tagged ||
- filter_out_class_untagged),
- stop_reports(false) {
+ filter_out_class_untagged) {
}
- bool ShouldReportByHeapFilter(jlong tag, jlong class_tag) {
+ bool ShouldReportByHeapFilter(jlong tag, jlong class_tag) const {
if (!any_filter) {
return true;
}
@@ -131,16 +188,37 @@ struct IterateThroughHeapData {
return true;
}
- HeapUtil* heap_util;
- art::ObjPtr<art::mirror::Class> filter_klass;
- jvmtiEnv* env;
- const jvmtiHeapCallbacks* callbacks;
- const void* user_data;
const bool filter_out_tagged;
const bool filter_out_untagged;
const bool filter_out_class_tagged;
const bool filter_out_class_untagged;
const bool any_filter;
+};
+
+} // namespace
+
+struct IterateThroughHeapData {
+ IterateThroughHeapData(HeapUtil* _heap_util,
+ jvmtiEnv* _env,
+ art::ObjPtr<art::mirror::Class> klass,
+ jint _heap_filter,
+ const jvmtiHeapCallbacks* _callbacks,
+ const void* _user_data)
+ : heap_util(_heap_util),
+ heap_filter(_heap_filter),
+ filter_klass(klass),
+ env(_env),
+ callbacks(_callbacks),
+ user_data(_user_data),
+ stop_reports(false) {
+ }
+
+ HeapUtil* heap_util;
+ const HeapFilter heap_filter;
+ art::ObjPtr<art::mirror::Class> filter_klass;
+ jvmtiEnv* env;
+ const jvmtiHeapCallbacks* callbacks;
+ const void* user_data;
bool stop_reports;
};
@@ -163,7 +241,7 @@ static void IterateThroughHeapObjectCallback(art::mirror::Object* obj, void* arg
ithd->heap_util->GetTags()->GetTag(klass.Ptr(), &class_tag);
// For simplicity, even if we find a tag = 0, assume 0 = not tagged.
- if (!ithd->ShouldReportByHeapFilter(tag, class_tag)) {
+ if (!ithd->heap_filter.ShouldReportByHeapFilter(tag, class_tag)) {
return;
}
@@ -202,7 +280,15 @@ static void IterateThroughHeapObjectCallback(art::mirror::Object* obj, void* arg
ithd->stop_reports = (string_ret & JVMTI_VISIT_ABORT) != 0;
}
- // TODO Implement array primitive callback.
+ if (!ithd->stop_reports) {
+ jint array_ret = ReportPrimitiveArray(obj,
+ ithd->env,
+ ithd->heap_util->GetTags(),
+ ithd->callbacks,
+ ithd->user_data);
+ ithd->stop_reports = (array_ret & JVMTI_VISIT_ABORT) != 0;
+ }
+
// TODO Implement primitive field callback.
}
@@ -215,18 +301,13 @@ jvmtiError HeapUtil::IterateThroughHeap(jvmtiEnv* env,
return ERR(NULL_POINTER);
}
- if (callbacks->array_primitive_value_callback != nullptr) {
- // TODO: Implement.
- return ERR(NOT_IMPLEMENTED);
- }
-
art::Thread* self = art::Thread::Current();
art::ScopedObjectAccess soa(self); // Now we know we have the shared lock.
IterateThroughHeapData ithd(this,
env,
- heap_filter,
soa.Decode<art::mirror::Class>(klass),
+ heap_filter,
callbacks,
user_data);
@@ -241,11 +322,15 @@ class FollowReferencesHelper FINAL {
jvmtiEnv* jvmti_env,
art::ObjPtr<art::mirror::Object> initial_object,
const jvmtiHeapCallbacks* callbacks,
+ art::ObjPtr<art::mirror::Class> class_filter,
+ jint heap_filter,
const void* user_data)
: env(jvmti_env),
tag_table_(h->GetTags()),
initial_object_(initial_object),
callbacks_(callbacks),
+ class_filter_(class_filter),
+ heap_filter_(heap_filter),
user_data_(user_data),
start_(0),
stop_reports_(false) {
@@ -569,6 +654,11 @@ class FollowReferencesHelper FINAL {
}
}
}
+ } else {
+ if (!stop_reports_) {
+ jint array_ret = ReportPrimitiveArray(array, env, tag_table_, callbacks_, user_data_);
+ stop_reports_ = (array_ret & JVMTI_VISIT_ABORT) != 0;
+ }
}
}
@@ -682,11 +772,20 @@ class FollowReferencesHelper FINAL {
return 0;
}
+ if (UNLIKELY(class_filter_ != nullptr) && class_filter_ != referree->GetClass()) {
+ return JVMTI_VISIT_OBJECTS;
+ }
+
const jlong class_tag = tag_table_->GetTagOrZero(referree->GetClass());
+ jlong tag = tag_table_->GetTagOrZero(referree);
+
+ if (!heap_filter_.ShouldReportByHeapFilter(tag, class_tag)) {
+ return JVMTI_VISIT_OBJECTS;
+ }
+
const jlong referrer_class_tag =
referrer == nullptr ? 0 : tag_table_->GetTagOrZero(referrer->GetClass());
const jlong size = static_cast<jlong>(referree->SizeOf());
- jlong tag = tag_table_->GetTagOrZero(referree);
jlong saved_tag = tag;
jlong referrer_tag = 0;
jlong saved_referrer_tag = 0;
@@ -701,6 +800,7 @@ class FollowReferencesHelper FINAL {
referrer_tag_ptr = &referrer_tag;
}
}
+
jint length = -1;
if (referree->IsArrayInstance()) {
length = referree->AsArray()->GetLength();
@@ -730,6 +830,8 @@ class FollowReferencesHelper FINAL {
ObjectTagTable* tag_table_;
art::ObjPtr<art::mirror::Object> initial_object_;
const jvmtiHeapCallbacks* callbacks_;
+ art::ObjPtr<art::mirror::Class> class_filter_;
+ const HeapFilter heap_filter_;
const void* user_data_;
std::vector<art::mirror::Object*> worklist_;
@@ -744,8 +846,8 @@ class FollowReferencesHelper FINAL {
};
jvmtiError HeapUtil::FollowReferences(jvmtiEnv* env,
- jint heap_filter ATTRIBUTE_UNUSED,
- jclass klass ATTRIBUTE_UNUSED,
+ jint heap_filter,
+ jclass klass,
jobject initial_object,
const jvmtiHeapCallbacks* callbacks,
const void* user_data) {
@@ -753,11 +855,6 @@ jvmtiError HeapUtil::FollowReferences(jvmtiEnv* env,
return ERR(NULL_POINTER);
}
- if (callbacks->array_primitive_value_callback != nullptr) {
- // TODO: Implement.
- return ERR(NOT_IMPLEMENTED);
- }
-
art::Thread* self = art::Thread::Current();
art::gc::Heap* heap = art::Runtime::Current()->GetHeap();
@@ -771,10 +868,15 @@ jvmtiError HeapUtil::FollowReferences(jvmtiEnv* env,
art::ScopedThreadSuspension sts(self, art::kWaitingForVisitObjects);
art::ScopedSuspendAll ssa("FollowReferences");
+ art::ObjPtr<art::mirror::Class> class_filter = klass == nullptr
+ ? nullptr
+ : art::ObjPtr<art::mirror::Class>::DownCast(self->DecodeJObject(klass));
FollowReferencesHelper frh(this,
env,
self->DecodeJObject(initial_object),
callbacks,
+ class_filter,
+ heap_filter,
user_data);
frh.Init();
frh.Work();
diff --git a/runtime/openjdkjvmti/ti_phase.cc b/runtime/openjdkjvmti/ti_phase.cc
index 60371cfafe..e494cb6530 100644
--- a/runtime/openjdkjvmti/ti_phase.cc
+++ b/runtime/openjdkjvmti/ti_phase.cc
@@ -56,7 +56,6 @@ struct PhaseUtil::PhaseCallback : public art::RuntimePhaseCallback {
}
void NextRuntimePhase(RuntimePhase phase) REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
- // TODO: Events.
switch (phase) {
case RuntimePhase::kInitialAgents:
PhaseUtil::current_phase_ = JVMTI_PHASE_PRIMORDIAL;
diff --git a/runtime/openjdkjvmti/ti_redefine.cc b/runtime/openjdkjvmti/ti_redefine.cc
index 60ce898d65..c4d20c007e 100644
--- a/runtime/openjdkjvmti/ti_redefine.cc
+++ b/runtime/openjdkjvmti/ti_redefine.cc
@@ -56,6 +56,7 @@
#include "mirror/class-inl.h"
#include "mirror/class_ext.h"
#include "mirror/object.h"
+#include "non_debuggable_classes.h"
#include "object_lock.h"
#include "runtime.h"
#include "ScopedLocalRef.h"
@@ -237,6 +238,9 @@ jvmtiError Redefiner::GetClassRedefinitionError(art::Handle<art::mirror::Class>
} else if (klass->IsInterface()) {
*error_msg = "Modification of Interface classes is currently not supported";
return ERR(UNMODIFIABLE_CLASS);
+ } else if (klass->IsStringClass()) {
+ *error_msg = "Modification of String class is not supported";
+ return ERR(UNMODIFIABLE_CLASS);
} else if (klass->IsArrayClass()) {
*error_msg = "Modification of Array classes is not supported";
return ERR(UNMODIFIABLE_CLASS);
@@ -245,8 +249,13 @@ jvmtiError Redefiner::GetClassRedefinitionError(art::Handle<art::mirror::Class>
return ERR(UNMODIFIABLE_CLASS);
}
- // TODO We should check if the class has non-obsoletable methods on the stack
- LOG(WARNING) << "presence of non-obsoletable methods on stacks is not currently checked";
+ for (jclass c : art::NonDebuggableClasses::GetNonDebuggableClasses()) {
+ if (klass.Get() == art::Thread::Current()->DecodeJObject(c)->AsClass()) {
+ *error_msg = "Class might have stack frames that cannot be made obsolete";
+ return ERR(UNMODIFIABLE_CLASS);
+ }
+ }
+
return OK;
}
@@ -294,6 +303,7 @@ Redefiner::ClassRedefinition::~ClassRedefinition() {
}
jvmtiError Redefiner::RedefineClasses(ArtJvmTiEnv* env,
+ EventHandler* event_handler,
art::Runtime* runtime,
art::Thread* self,
jint class_count,
@@ -341,6 +351,7 @@ jvmtiError Redefiner::RedefineClasses(ArtJvmTiEnv* env,
}
// Call all the transformation events.
jvmtiError res = Transformer::RetransformClassesDirect(env,
+ event_handler,
self,
&def_vector);
if (res != OK) {
diff --git a/runtime/openjdkjvmti/ti_redefine.h b/runtime/openjdkjvmti/ti_redefine.h
index 65ee2912e2..4e6d05f056 100644
--- a/runtime/openjdkjvmti/ti_redefine.h
+++ b/runtime/openjdkjvmti/ti_redefine.h
@@ -88,6 +88,7 @@ class Redefiner {
// The caller is responsible for freeing it. The runtime makes its own copy of the data.
// TODO This function should call the transformation events.
static jvmtiError RedefineClasses(ArtJvmTiEnv* env,
+ EventHandler* event_handler,
art::Runtime* runtime,
art::Thread* self,
jint class_count,
diff --git a/runtime/openjdkjvmti/ti_search.cc b/runtime/openjdkjvmti/ti_search.cc
index df80f85ed8..f51a98f976 100644
--- a/runtime/openjdkjvmti/ti_search.cc
+++ b/runtime/openjdkjvmti/ti_search.cc
@@ -212,7 +212,6 @@ jvmtiError SearchUtil::AddToBootstrapClassLoaderSearch(jvmtiEnv* env ATTRIBUTE_U
return ERR(WRONG_PHASE);
}
if (current->GetClassLinker() == nullptr) {
- // TODO: Support boot classpath change in OnLoad.
return ERR(WRONG_PHASE);
}
if (segment == nullptr) {
diff --git a/runtime/openjdkjvmti/transform.cc b/runtime/openjdkjvmti/transform.cc
index 2fec631c00..36421b9137 100644
--- a/runtime/openjdkjvmti/transform.cc
+++ b/runtime/openjdkjvmti/transform.cc
@@ -63,12 +63,13 @@ namespace openjdkjvmti {
jvmtiError Transformer::RetransformClassesDirect(
ArtJvmTiEnv* env,
+ EventHandler* event_handler,
art::Thread* self,
/*in-out*/std::vector<ArtClassDefinition>* definitions) {
for (ArtClassDefinition& def : *definitions) {
jint new_len = -1;
unsigned char* new_data = nullptr;
- gEventHandler.DispatchEvent<ArtJvmtiEvent::kClassFileLoadHookRetransformable>(
+ event_handler->DispatchEvent<ArtJvmtiEvent::kClassFileLoadHookRetransformable>(
self,
GetJniEnv(env),
def.klass,
@@ -85,6 +86,7 @@ jvmtiError Transformer::RetransformClassesDirect(
}
jvmtiError Transformer::RetransformClasses(ArtJvmTiEnv* env,
+ EventHandler* event_handler,
art::Runtime* runtime,
art::Thread* self,
jint class_count,
@@ -114,7 +116,7 @@ jvmtiError Transformer::RetransformClasses(ArtJvmTiEnv* env,
}
definitions.push_back(std::move(def));
}
- res = RetransformClassesDirect(env, self, &definitions);
+ res = RetransformClassesDirect(env, event_handler, self, &definitions);
if (res != OK) {
return res;
}
diff --git a/runtime/openjdkjvmti/transform.h b/runtime/openjdkjvmti/transform.h
index 65f2ae1353..c6a36e8e20 100644
--- a/runtime/openjdkjvmti/transform.h
+++ b/runtime/openjdkjvmti/transform.h
@@ -42,14 +42,20 @@
namespace openjdkjvmti {
+class EventHandler;
+
jvmtiError GetClassLocation(ArtJvmTiEnv* env, jclass klass, /*out*/std::string* location);
class Transformer {
public:
static jvmtiError RetransformClassesDirect(
- ArtJvmTiEnv* env, art::Thread* self, /*in-out*/std::vector<ArtClassDefinition>* definitions);
+ ArtJvmTiEnv* env,
+ EventHandler* event_handler,
+ art::Thread* self,
+ /*in-out*/std::vector<ArtClassDefinition>* definitions);
static jvmtiError RetransformClasses(ArtJvmTiEnv* env,
+ EventHandler* event_handler,
art::Runtime* runtime,
art::Thread* self,
jint class_count,
diff --git a/test/639-checker-code-sinking/expected.txt b/test/639-checker-code-sinking/expected.txt
new file mode 100644
index 0000000000..52e756c231
--- /dev/null
+++ b/test/639-checker-code-sinking/expected.txt
@@ -0,0 +1,3 @@
+0
+class java.lang.Object
+43
diff --git a/test/639-checker-code-sinking/info.txt b/test/639-checker-code-sinking/info.txt
new file mode 100644
index 0000000000..9722bdff2e
--- /dev/null
+++ b/test/639-checker-code-sinking/info.txt
@@ -0,0 +1 @@
+Checker tests for the code sinking optimization pass.
diff --git a/test/639-checker-code-sinking/src/Main.java b/test/639-checker-code-sinking/src/Main.java
new file mode 100644
index 0000000000..1da19b687c
--- /dev/null
+++ b/test/639-checker-code-sinking/src/Main.java
@@ -0,0 +1,355 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ public static void main(String[] args) {
+ testSimpleUse();
+ testTwoUses();
+ testFieldStores(doThrow);
+ testFieldStoreCycle();
+ testArrayStores();
+ testOnlyStoreUses();
+ testNoUse();
+ testPhiInput();
+ testVolatileStore();
+ doThrow = true;
+ try {
+ testInstanceSideEffects();
+ } catch (Error e) {
+ // expected
+ System.out.println(e.getMessage());
+ }
+ try {
+ testStaticSideEffects();
+ } catch (Error e) {
+ // expected
+ System.out.println(e.getMessage());
+ }
+
+ try {
+ testStoreStore(doThrow);
+ } catch (Error e) {
+ // expected
+ System.out.println(e.getMessage());
+ }
+ }
+
+ /// CHECK-START: void Main.testSimpleUse() code_sinking (before)
+ /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:java.lang.Object
+ /// CHECK: NewInstance [<<LoadClass>>]
+ /// CHECK: If
+ /// CHECK: begin_block
+ /// CHECK: Throw
+
+ /// CHECK-START: void Main.testSimpleUse() code_sinking (after)
+ /// CHECK-NOT: NewInstance
+ /// CHECK: If
+ /// CHECK: begin_block
+ /// CHECK: <<Error:l\d+>> LoadClass class_name:java.lang.Error
+ /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:java.lang.Object
+ /// CHECK-NOT: begin_block
+ /// CHECK: NewInstance [<<LoadClass>>]
+ /// CHECK-NOT: begin_block
+ /// CHECK: NewInstance [<<Error>>]
+ /// CHECK: Throw
+ public static void testSimpleUse() {
+ Object o = new Object();
+ if (doThrow) {
+ throw new Error(o.toString());
+ }
+ }
+
+ /// CHECK-START: void Main.testTwoUses() code_sinking (before)
+ /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:java.lang.Object
+ /// CHECK: NewInstance [<<LoadClass>>]
+ /// CHECK: If
+ /// CHECK: begin_block
+ /// CHECK: Throw
+
+ /// CHECK-START: void Main.testTwoUses() code_sinking (after)
+ /// CHECK-NOT: NewInstance
+ /// CHECK: If
+ /// CHECK: begin_block
+ /// CHECK: <<Error:l\d+>> LoadClass class_name:java.lang.Error
+ /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:java.lang.Object
+ /// CHECK-NOT: begin_block
+ /// CHECK: NewInstance [<<LoadClass>>]
+ /// CHECK-NOT: begin_block
+ /// CHECK: NewInstance [<<Error>>]
+ /// CHECK: Throw
+ public static void testTwoUses() {
+ Object o = new Object();
+ if (doThrow) {
+ throw new Error(o.toString() + o.toString());
+ }
+ }
+
+ /// CHECK-START: void Main.testFieldStores(boolean) code_sinking (before)
+ /// CHECK: <<Int42:i\d+>> IntConstant 42
+ /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:Main
+ /// CHECK: <<NewInstance:l\d+>> NewInstance [<<LoadClass>>]
+ /// CHECK: InstanceFieldSet [<<NewInstance>>,<<Int42>>]
+ /// CHECK: If
+ /// CHECK: begin_block
+ /// CHECK: Throw
+
+ /// CHECK-START: void Main.testFieldStores(boolean) code_sinking (after)
+ /// CHECK: <<Int42:i\d+>> IntConstant 42
+ /// CHECK-NOT: NewInstance
+ /// CHECK: If
+ /// CHECK: begin_block
+ /// CHECK: <<Error:l\d+>> LoadClass class_name:java.lang.Error
+ /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:Main
+ /// CHECK-NOT: begin_block
+ /// CHECK: <<NewInstance:l\d+>> NewInstance [<<LoadClass>>]
+ /// CHECK-NOT: begin_block
+ /// CHECK: InstanceFieldSet [<<NewInstance>>,<<Int42>>]
+ /// CHECK-NOT: begin_block
+ /// CHECK: NewInstance [<<Error>>]
+ /// CHECK: Throw
+ public static void testFieldStores(boolean doThrow) {
+ Main m = new Main();
+ m.intField = 42;
+ if (doThrow) {
+ throw new Error(m.toString());
+ }
+ }
+
+ /// CHECK-START: void Main.testFieldStoreCycle() code_sinking (before)
+ /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:Main
+ /// CHECK: <<NewInstance1:l\d+>> NewInstance [<<LoadClass>>]
+ /// CHECK: <<NewInstance2:l\d+>> NewInstance [<<LoadClass>>]
+ /// CHECK: InstanceFieldSet [<<NewInstance1>>,<<NewInstance2>>]
+ /// CHECK: InstanceFieldSet [<<NewInstance2>>,<<NewInstance1>>]
+ /// CHECK: If
+ /// CHECK: begin_block
+ /// CHECK: Throw
+
+ // TODO(ngeoffray): Handle allocation/store cycles.
+ /// CHECK-START: void Main.testFieldStoreCycle() code_sinking (after)
+ /// CHECK: begin_block
+ /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:Main
+ /// CHECK: <<NewInstance1:l\d+>> NewInstance [<<LoadClass>>]
+ /// CHECK: <<NewInstance2:l\d+>> NewInstance [<<LoadClass>>]
+ /// CHECK: InstanceFieldSet [<<NewInstance1>>,<<NewInstance2>>]
+ /// CHECK: InstanceFieldSet [<<NewInstance2>>,<<NewInstance1>>]
+ /// CHECK: If
+ /// CHECK: begin_block
+ /// CHECK: Throw
+ public static void testFieldStoreCycle() {
+ Main m1 = new Main();
+ Main m2 = new Main();
+ m1.objectField = m2;
+ m2.objectField = m1;
+ if (doThrow) {
+ throw new Error(m1.toString() + m2.toString());
+ }
+ }
+
+ /// CHECK-START: void Main.testArrayStores() code_sinking (before)
+ /// CHECK: <<Int1:i\d+>> IntConstant 1
+ /// CHECK: <<Int0:i\d+>> IntConstant 0
+ /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:java.lang.Object[]
+ /// CHECK: <<NewArray:l\d+>> NewArray [<<LoadClass>>,<<Int1>>]
+ /// CHECK: ArraySet [<<NewArray>>,<<Int0>>,<<NewArray>>]
+ /// CHECK: If
+ /// CHECK: begin_block
+ /// CHECK: Throw
+
+ /// CHECK-START: void Main.testArrayStores() code_sinking (after)
+ /// CHECK: <<Int1:i\d+>> IntConstant 1
+ /// CHECK: <<Int0:i\d+>> IntConstant 0
+ /// CHECK-NOT: NewArray
+ /// CHECK: If
+ /// CHECK: begin_block
+ /// CHECK: <<Error:l\d+>> LoadClass class_name:java.lang.Error
+ /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:java.lang.Object[]
+ /// CHECK-NOT: begin_block
+ /// CHECK: <<NewArray:l\d+>> NewArray [<<LoadClass>>,<<Int1>>]
+ /// CHECK-NOT: begin_block
+ /// CHECK: ArraySet [<<NewArray>>,<<Int0>>,<<NewArray>>]
+ /// CHECK-NOT: begin_block
+ /// CHECK: NewInstance [<<Error>>]
+ /// CHECK: Throw
+ public static void testArrayStores() {
+ Object[] o = new Object[1];
+ o[0] = o;
+ if (doThrow) {
+ throw new Error(o.toString());
+ }
+ }
+
+ // Make sure code sinking does not crash on dead allocations.
+ public static void testOnlyStoreUses() {
+ Main m = new Main();
+ Object[] o = new Object[1]; // dead allocation, should eventually be removed b/35634932.
+ o[0] = m;
+ o = null; // Avoid environment uses for the array allocation.
+ if (doThrow) {
+ throw new Error(m.toString());
+ }
+ }
+
+ // Make sure code sinking does not crash on dead code.
+ public static void testNoUse() {
+ Main m = new Main();
+ boolean load = Main.doLoop; // dead code, not removed because of environment use.
+ // Ensure one environment use for the static field
+ $opt$noinline$foo();
+ load = false;
+ if (doThrow) {
+ throw new Error(m.toString());
+ }
+ }
+
+ // Make sure we can move code only used by a phi.
+ /// CHECK-START: void Main.testPhiInput() code_sinking (before)
+ /// CHECK: <<Null:l\d+>> NullConstant
+ /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:java.lang.Object
+ /// CHECK: <<NewInstance:l\d+>> NewInstance [<<LoadClass>>]
+ /// CHECK: If
+ /// CHECK: begin_block
+ /// CHECK: Phi [<<Null>>,<<NewInstance>>]
+ /// CHECK: Throw
+
+ /// CHECK-START: void Main.testPhiInput() code_sinking (after)
+ /// CHECK: <<Null:l\d+>> NullConstant
+ /// CHECK-NOT: NewInstance
+ /// CHECK: If
+ /// CHECK: begin_block
+ /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:java.lang.Object
+ /// CHECK: <<NewInstance:l\d+>> NewInstance [<<LoadClass>>]
+ /// CHECK: begin_block
+ /// CHECK: Phi [<<Null>>,<<NewInstance>>]
+ /// CHECK: <<Error:l\d+>> LoadClass class_name:java.lang.Error
+ /// CHECK: NewInstance [<<Error>>]
+ /// CHECK: Throw
+ public static void testPhiInput() {
+ Object f = new Object();
+ if (doThrow) {
+ Object o = null;
+ int i = 2;
+ if (doLoop) {
+ o = f;
+ i = 42;
+ }
+ throw new Error(o.toString() + i);
+ }
+ }
+
+ static void $opt$noinline$foo() {}
+
+ // Check that we do not move volatile stores.
+ /// CHECK-START: void Main.testVolatileStore() code_sinking (before)
+ /// CHECK: <<Int42:i\d+>> IntConstant 42
+ /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:Main
+ /// CHECK: <<NewInstance:l\d+>> NewInstance [<<LoadClass>>]
+ /// CHECK: InstanceFieldSet [<<NewInstance>>,<<Int42>>]
+ /// CHECK: If
+ /// CHECK: begin_block
+ /// CHECK: Throw
+
+ /// CHECK-START: void Main.testVolatileStore() code_sinking (after)
+ /// CHECK: <<Int42:i\d+>> IntConstant 42
+ /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:Main
+ /// CHECK: <<NewInstance:l\d+>> NewInstance [<<LoadClass>>]
+ /// CHECK: InstanceFieldSet [<<NewInstance>>,<<Int42>>]
+ /// CHECK: If
+ /// CHECK: begin_block
+ /// CHECK: Throw
+ public static void testVolatileStore() {
+ Main m = new Main();
+ m.volatileField = 42;
+ if (doThrow) {
+ throw new Error(m.toString());
+ }
+ }
+
+ public static void testInstanceSideEffects() {
+ int a = mainField.intField;
+ $noinline$changeIntField();
+ if (doThrow) {
+ throw new Error("" + a);
+ }
+ }
+
+ static void $noinline$changeIntField() {
+ mainField.intField = 42;
+ }
+
+ public static void testStaticSideEffects() {
+ Object o = obj;
+ $noinline$changeStaticObjectField();
+ if (doThrow) {
+ throw new Error(o.getClass().toString());
+ }
+ }
+
+ static void $noinline$changeStaticObjectField() {
+ obj = new Main();
+ }
+
+ // Test that we preserve the order of stores.
+ /// CHECK-START: void Main.testStoreStore(boolean) code_sinking (before)
+ /// CHECK: <<Int42:i\d+>> IntConstant 42
+ /// CHECK: <<Int43:i\d+>> IntConstant 43
+ /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:Main
+ /// CHECK: <<NewInstance:l\d+>> NewInstance [<<LoadClass>>]
+ /// CHECK: InstanceFieldSet [<<NewInstance>>,<<Int42>>]
+ /// CHECK: InstanceFieldSet [<<NewInstance>>,<<Int43>>]
+ /// CHECK: If
+ /// CHECK: begin_block
+ /// CHECK: Throw
+
+ /// CHECK-START: void Main.testStoreStore(boolean) code_sinking (after)
+ /// CHECK: <<Int42:i\d+>> IntConstant 42
+ /// CHECK: <<Int43:i\d+>> IntConstant 43
+ /// CHECK-NOT: NewInstance
+ /// CHECK: If
+ /// CHECK: begin_block
+ /// CHECK: <<Error:l\d+>> LoadClass class_name:java.lang.Error
+ /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:Main
+ /// CHECK-NOT: begin_block
+ /// CHECK: <<NewInstance:l\d+>> NewInstance [<<LoadClass>>]
+ /// CHECK-NOT: begin_block
+ /// CHECK: InstanceFieldSet [<<NewInstance>>,<<Int42>>]
+ /// CHECK-NOT: begin_block
+ /// CHECK: InstanceFieldSet [<<NewInstance>>,<<Int43>>]
+ /// CHECK-NOT: begin_block
+ /// CHECK: NewInstance [<<Error>>]
+ /// CHECK: Throw
+ public static void testStoreStore(boolean doThrow) {
+ Main m = new Main();
+ m.intField = 42;
+ m.intField = 43;
+ if (doThrow) {
+ throw new Error(m.$opt$noinline$toString());
+ }
+ }
+
+ public String $opt$noinline$toString() {
+ return "" + intField;
+ }
+
+ volatile int volatileField;
+ int intField;
+ Object objectField;
+ static boolean doThrow;
+ static boolean doLoop;
+ static Main mainField = new Main();
+ static Object obj = new Object();
+}
diff --git a/test/903-hello-tagging/expected.txt b/test/903-hello-tagging/expected.txt
index 872b79b518..acfdbd810b 100644
--- a/test/903-hello-tagging/expected.txt
+++ b/test/903-hello-tagging/expected.txt
@@ -8,3 +8,4 @@
[<null;1>, <null;1>, <null;2>, <null;2>, <null;3>, <null;3>, <null;4>, <null;4>, <null;5>, <null;5>, <null;6>, <null;6>, <null;7>, <null;7>, <null;8>, <null;8>, <null;9>, <null;9>]
18
[<1;0>, <2;0>, <3;0>, <4;0>, <5;0>, <6;0>, <7;0>, <8;0>, <9;0>, <11;0>, <12;0>, <13;0>, <14;0>, <15;0>, <16;0>, <17;0>, <18;0>, <19;0>]
+[100, 101, 102, 103, 104, 105, 106, 107, 108, 109]
diff --git a/test/903-hello-tagging/src/Main.java b/test/903-hello-tagging/src/Main.java
index 2f0365a921..48896b236a 100644
--- a/test/903-hello-tagging/src/Main.java
+++ b/test/903-hello-tagging/src/Main.java
@@ -22,6 +22,7 @@ public class Main {
public static void main(String[] args) {
doTest();
testGetTaggedObjects();
+ testTags();
}
public static void doTest() {
@@ -35,6 +36,12 @@ public class Main {
}
}
+ public static void testTags() {
+ Object o = new Object();
+ long[] res = testTagsInDifferentEnvs(o, 100, 10);
+ System.out.println(Arrays.toString(res));
+ }
+
private static WeakReference<Object> test() {
Object o1 = new Object();
setTag(o1, 1);
@@ -166,4 +173,5 @@ public class Main {
private static native long getTag(Object o);
private static native Object[] getTaggedObjects(long[] searchTags, boolean returnObjects,
boolean returnTags);
+ private static native long[] testTagsInDifferentEnvs(Object o, long baseTag, int n);
}
diff --git a/test/903-hello-tagging/tagging.cc b/test/903-hello-tagging/tagging.cc
index f74c1fc2ea..6177263cd2 100644
--- a/test/903-hello-tagging/tagging.cc
+++ b/test/903-hello-tagging/tagging.cc
@@ -139,6 +139,62 @@ extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getTaggedObjects(JNIEnv* env
return resultArray;
}
+static jvmtiEnv* CreateJvmtiEnv(JNIEnv* env) {
+ JavaVM* jvm;
+ CHECK_EQ(0, env->GetJavaVM(&jvm));
+
+ jvmtiEnv* new_jvmti_env;
+ CHECK_EQ(0, jvm->GetEnv(reinterpret_cast<void**>(&new_jvmti_env), JVMTI_VERSION_1_0));
+
+ jvmtiCapabilities capa;
+ memset(&capa, 0, sizeof(jvmtiCapabilities));
+ capa.can_tag_objects = 1;
+ jvmtiError error = new_jvmti_env->AddCapabilities(&capa);
+ CHECK_EQ(JVMTI_ERROR_NONE, error);
+
+ return new_jvmti_env;
+}
+
+static void SetTag(jvmtiEnv* env, jobject obj, jlong tag) {
+ jvmtiError ret = env->SetTag(obj, tag);
+ CHECK_EQ(JVMTI_ERROR_NONE, ret);
+}
+
+static jlong GetTag(jvmtiEnv* env, jobject obj) {
+ jlong tag;
+ jvmtiError ret = env->GetTag(obj, &tag);
+ CHECK_EQ(JVMTI_ERROR_NONE, ret);
+ return tag;
+}
+
+extern "C" JNIEXPORT jlongArray JNICALL Java_Main_testTagsInDifferentEnvs(
+ JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobject obj, jlong base_tag, jint count) {
+ std::unique_ptr<jvmtiEnv*[]> envs = std::unique_ptr<jvmtiEnv*[]>(new jvmtiEnv*[count]);
+ envs[0] = jvmti_env;
+ for (int32_t i = 1; i != count; ++i) {
+ envs[i] = CreateJvmtiEnv(env);
+ }
+
+ for (int32_t i = 0; i != count; ++i) {
+ SetTag(envs[i], obj, base_tag + i);
+ }
+ std::unique_ptr<jlong[]> vals = std::unique_ptr<jlong[]>(new jlong[count]);
+ for (int32_t i = 0; i != count; ++i) {
+ vals[i] = GetTag(envs[i], obj);
+ }
+
+ for (int32_t i = 1; i != count; ++i) {
+ CHECK_EQ(JVMTI_ERROR_NONE, envs[i]->DisposeEnvironment());
+ }
+
+ jlongArray res = env->NewLongArray(count);
+ if (res == nullptr) {
+ return nullptr;
+ }
+ env->SetLongArrayRegion(res, 0, count, vals.get());
+ return res;
+}
+
} // namespace Test903HelloTagging
} // namespace art
diff --git a/test/906-iterate-heap/expected.txt b/test/906-iterate-heap/expected.txt
index d636286af5..3e857ab003 100644
--- a/test/906-iterate-heap/expected.txt
+++ b/test/906-iterate-heap/expected.txt
@@ -1,4 +1,20 @@
-[{tag=1, class-tag=0, size=8, length=-1}, {tag=2, class-tag=100, size=8, length=-1}, {tag=3, class-tag=100, size=8, length=-1}, {tag=4, class-tag=0, size=32, length=5}, {tag=5, class-tag=0, size=40, length=-1}, {tag=100, class-tag=0, size=<class>, length=-1}]
-[{tag=11, class-tag=0, size=8, length=-1}, {tag=12, class-tag=110, size=8, length=-1}, {tag=13, class-tag=110, size=8, length=-1}, {tag=14, class-tag=0, size=32, length=5}, {tag=15, class-tag=0, size=40, length=-1}, {tag=110, class-tag=0, size=<class>, length=-1}]
-15@0 ( 40, 'Hello World')
+[{tag=1, class-tag=0, size=8, length=-1}, {tag=2, class-tag=100, size=8, length=-1}, {tag=3, class-tag=100, size=8, length=-1}, {tag=4, class-tag=0, size=32, length=5}, {tag=5, class-tag=0, size=32, length=-1}, {tag=100, class-tag=0, size=<class>, length=-1}]
+[{tag=11, class-tag=0, size=8, length=-1}, {tag=12, class-tag=110, size=8, length=-1}, {tag=13, class-tag=110, size=8, length=-1}, {tag=14, class-tag=0, size=32, length=5}, {tag=15, class-tag=0, size=32, length=-1}, {tag=110, class-tag=0, size=<class>, length=-1}]
+15@0 (32, 'Hello World')
16
+1@0 (14, 2xZ '0001')
+2
+1@0 (15, 3xB '010203')
+2
+1@0 (16, 2xC '41005a00')
+2
+1@0 (18, 3xS '010002000300')
+2
+1@0 (24, 3xI '010000000200000003000000')
+2
+1@0 (20, 2xF '000000000000803f')
+2
+1@0 (40, 3xJ '010000000000000002000000000000000300000000000000')
+2
+1@0 (32, 2xD '0000000000000000000000000000f03f')
+2
diff --git a/test/906-iterate-heap/iterate_heap.cc b/test/906-iterate-heap/iterate_heap.cc
index 0a0c68a2ea..890220ee8d 100644
--- a/test/906-iterate-heap/iterate_heap.cc
+++ b/test/906-iterate-heap/iterate_heap.cc
@@ -16,8 +16,10 @@
#include "inttypes.h"
+#include <iomanip>
#include <iostream>
#include <pthread.h>
+#include <sstream>
#include <stdio.h>
#include <vector>
@@ -181,11 +183,11 @@ extern "C" JNIEXPORT jstring JNICALL Java_Main_iterateThroughHeapString(
struct FindStringCallbacks {
explicit FindStringCallbacks(jlong t) : tag_to_find(t) {}
- static jint JNICALL HeapIterationCallback(jlong class_tag ATTRIBUTE_UNUSED,
- jlong size ATTRIBUTE_UNUSED,
- jlong* tag_ptr ATTRIBUTE_UNUSED,
- jint length ATTRIBUTE_UNUSED,
- void* user_data ATTRIBUTE_UNUSED) {
+ static jint JNICALL HeapIterationCallback(jlong class_tag ATTRIBUTE_UNUSED,
+ jlong size ATTRIBUTE_UNUSED,
+ jlong* tag_ptr ATTRIBUTE_UNUSED,
+ jint length ATTRIBUTE_UNUSED,
+ void* user_data ATTRIBUTE_UNUSED) {
return 0;
}
@@ -204,7 +206,7 @@ extern "C" JNIEXPORT jstring JNICALL Java_Main_iterateThroughHeapString(
if (!p->data.empty()) {
p->data += "\n";
}
- p->data += android::base::StringPrintf("%" PRId64 "@%" PRId64 " (% " PRId64 ", '%s')",
+ p->data += android::base::StringPrintf("%" PRId64 "@%" PRId64 " (%" PRId64 ", '%s')",
*tag_ptr,
class_tag,
size,
@@ -232,5 +234,93 @@ extern "C" JNIEXPORT jstring JNICALL Java_Main_iterateThroughHeapString(
return env->NewStringUTF(fsc.data.c_str());
}
+extern "C" JNIEXPORT jstring JNICALL Java_Main_iterateThroughHeapPrimitiveArray(
+ JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jlong tag) {
+ struct FindArrayCallbacks {
+ explicit FindArrayCallbacks(jlong t) : tag_to_find(t) {}
+
+ static jint JNICALL HeapIterationCallback(jlong class_tag ATTRIBUTE_UNUSED,
+ jlong size ATTRIBUTE_UNUSED,
+ jlong* tag_ptr ATTRIBUTE_UNUSED,
+ jint length ATTRIBUTE_UNUSED,
+ void* user_data ATTRIBUTE_UNUSED) {
+ return 0;
+ }
+
+ static jint JNICALL ArrayValueCallback(jlong class_tag,
+ jlong size,
+ jlong* tag_ptr,
+ jint element_count,
+ jvmtiPrimitiveType element_type,
+ const void* elements,
+ void* user_data) {
+ FindArrayCallbacks* p = reinterpret_cast<FindArrayCallbacks*>(user_data);
+ if (*tag_ptr == p->tag_to_find) {
+ std::ostringstream oss;
+ oss << *tag_ptr
+ << '@'
+ << class_tag
+ << " ("
+ << size
+ << ", "
+ << element_count
+ << "x"
+ << static_cast<char>(element_type)
+ << " '";
+ size_t element_size;
+ switch (element_type) {
+ case JVMTI_PRIMITIVE_TYPE_BOOLEAN:
+ case JVMTI_PRIMITIVE_TYPE_BYTE:
+ element_size = 1;
+ break;
+ case JVMTI_PRIMITIVE_TYPE_CHAR:
+ case JVMTI_PRIMITIVE_TYPE_SHORT:
+ element_size = 2;
+ break;
+ case JVMTI_PRIMITIVE_TYPE_INT:
+ case JVMTI_PRIMITIVE_TYPE_FLOAT:
+ element_size = 4;
+ break;
+ case JVMTI_PRIMITIVE_TYPE_LONG:
+ case JVMTI_PRIMITIVE_TYPE_DOUBLE:
+ element_size = 8;
+ break;
+ default:
+ LOG(FATAL) << "Unknown type " << static_cast<size_t>(element_type);
+ UNREACHABLE();
+ }
+ const uint8_t* data = reinterpret_cast<const uint8_t*>(elements);
+ for (size_t i = 0; i != element_size * element_count; ++i) {
+ oss << android::base::StringPrintf("%02x", data[i]);
+ }
+ oss << "')";
+
+ if (!p->data.empty()) {
+ p->data += "\n";
+ }
+ p->data += oss.str();
+ // Update the tag to test whether that works.
+ *tag_ptr = *tag_ptr + 1;
+ }
+ return 0;
+ }
+
+ std::string data;
+ const jlong tag_to_find;
+ };
+
+ jvmtiHeapCallbacks callbacks;
+ memset(&callbacks, 0, sizeof(jvmtiHeapCallbacks));
+ callbacks.heap_iteration_callback = FindArrayCallbacks::HeapIterationCallback;
+ callbacks.array_primitive_value_callback = FindArrayCallbacks::ArrayValueCallback;
+
+ FindArrayCallbacks fac(tag);
+ jvmtiError ret = jvmti_env->IterateThroughHeap(0, nullptr, &callbacks, &fac);
+ if (JvmtiErrorToException(env, ret)) {
+ return nullptr;
+ }
+ return env->NewStringUTF(fac.data.c_str());
+}
+
} // namespace Test906IterateHeap
} // namespace art
diff --git a/test/906-iterate-heap/src/Main.java b/test/906-iterate-heap/src/Main.java
index 755d23c0d8..d4998865b5 100644
--- a/test/906-iterate-heap/src/Main.java
+++ b/test/906-iterate-heap/src/Main.java
@@ -79,6 +79,46 @@ public class Main {
System.out.println(iterateThroughHeapString(getTag(s)));
System.out.println(getTag(s));
+
+ boolean[] zArray = new boolean[] { false, true };
+ setTag(zArray, 1);
+ System.out.println(iterateThroughHeapPrimitiveArray(getTag(zArray)));
+ System.out.println(getTag(zArray));
+
+ byte[] bArray = new byte[] { 1, 2, 3 };
+ setTag(bArray, 1);
+ System.out.println(iterateThroughHeapPrimitiveArray(getTag(bArray)));
+ System.out.println(getTag(bArray));
+
+ char[] cArray = new char[] { 'A', 'Z' };
+ setTag(cArray, 1);
+ System.out.println(iterateThroughHeapPrimitiveArray(getTag(cArray)));
+ System.out.println(getTag(cArray));
+
+ short[] sArray = new short[] { 1, 2, 3 };
+ setTag(sArray, 1);
+ System.out.println(iterateThroughHeapPrimitiveArray(getTag(sArray)));
+ System.out.println(getTag(sArray));
+
+ int[] iArray = new int[] { 1, 2, 3 };
+ setTag(iArray, 1);
+ System.out.println(iterateThroughHeapPrimitiveArray(getTag(iArray)));
+ System.out.println(getTag(iArray));
+
+ float[] fArray = new float[] { 0.0f, 1.0f };
+ setTag(fArray, 1);
+ System.out.println(iterateThroughHeapPrimitiveArray(getTag(fArray)));
+ System.out.println(getTag(fArray));
+
+ long[] lArray = new long[] { 1, 2, 3 };
+ setTag(lArray, 1);
+ System.out.println(iterateThroughHeapPrimitiveArray(getTag(lArray)));
+ System.out.println(getTag(lArray));
+
+ double[] dArray = new double[] { 0.0, 1.0 };
+ setTag(dArray, 1);
+ System.out.println(iterateThroughHeapPrimitiveArray(getTag(dArray)));
+ System.out.println(getTag(dArray));
}
static class A {
@@ -147,4 +187,5 @@ public class Main {
private static native int iterateThroughHeapAdd(int heapFilter,
Class<?> klassFilter);
private static native String iterateThroughHeapString(long tag);
+ private static native String iterateThroughHeapPrimitiveArray(long tag);
}
diff --git a/test/912-classes/expected.txt b/test/912-classes/expected.txt
index e932b206c0..6b86ac9e23 100644
--- a/test/912-classes/expected.txt
+++ b/test/912-classes/expected.txt
@@ -15,7 +15,8 @@
int interface=false array=false modifiable=false
$Proxy0 interface=false array=false modifiable=false
java.lang.Runnable interface=true array=false modifiable=false
-java.lang.String interface=false array=false modifiable=true
+java.lang.String interface=false array=false modifiable=false
+java.util.ArrayList interface=false array=false modifiable=true
[I interface=false array=true modifiable=false
[Ljava.lang.Runnable; interface=false array=true modifiable=false
[Ljava.lang.String; interface=false array=true modifiable=false
diff --git a/test/912-classes/src/Main.java b/test/912-classes/src/Main.java
index 005074f8c1..5d25d76aac 100644
--- a/test/912-classes/src/Main.java
+++ b/test/912-classes/src/Main.java
@@ -17,6 +17,7 @@
import java.lang.ref.Reference;
import java.lang.reflect.Constructor;
import java.lang.reflect.Proxy;
+import java.util.ArrayList;
import java.util.Arrays;
import java.util.Comparator;
@@ -40,6 +41,7 @@ public class Main {
testClassType(getProxyClass());
testClassType(Runnable.class);
testClassType(String.class);
+ testClassType(ArrayList.class);
testClassType(int[].class);
testClassType(Runnable[].class);
diff --git a/test/913-heaps/expected.txt b/test/913-heaps/expected.txt
index 3125d2bd26..46805d7272 100644
--- a/test/913-heaps/expected.txt
+++ b/test/913-heaps/expected.txt
@@ -79,5 +79,218 @@ root@root --(thread)--> 3000@0 [size=132, length=-1]
5@1002 --(field@28)--> 1@1000 [size=16, length=-1]
6@1000 --(class)--> 1000@0 [size=123, length=-1]
---
-[1@0 ( 40, 'HelloWorld')]
+[1@0 (32, 'HelloWorld')]
2
+2@0 (15, 3xB '010203')
+3@0 (16, 2xC '41005a00')
+8@0 (32, 2xD '0000000000000000000000000000f03f')
+6@0 (20, 2xF '000000000000803f')
+5@0 (24, 3xI '010000000200000003000000')
+7@0 (40, 3xJ '010000000000000002000000000000000300000000000000')
+4@0 (18, 3xS '010002000300')
+1@0 (14, 2xZ '0001')
+23456789
+--- klass ---
+root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=13,location= 32])--> 1@1000 [size=16, length=-1]
+0@0 --(array-element@0)--> 1@1000 [size=16, length=-1]
+1@1000 --(field@8)--> 2@1000 [size=16, length=-1]
+3@1001 --(field@16)--> 4@1000 [size=16, length=-1]
+5@1002 --(field@24)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@28)--> 1@1000 [size=16, length=-1]
+---
+1@1000 --(field@8)--> 2@1000 [size=16, length=-1]
+3@1001 --(field@16)--> 4@1000 [size=16, length=-1]
+5@1002 --(field@24)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@28)--> 1@1000 [size=16, length=-1]
+---
+root@root --(jni-global)--> 1@1000 [size=16, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 1@1000 [size=16, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=13,location= 10])--> 1@1000 [size=16, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=5,location= 10])--> 1@1000 [size=16, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 19])--> 1@1000 [size=16, length=-1]
+root@root --(thread)--> 1@1000 [size=16, length=-1]
+1@1000 --(field@8)--> 2@1000 [size=16, length=-1]
+3@1001 --(field@16)--> 4@1000 [size=16, length=-1]
+5@1002 --(field@24)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@28)--> 1@1000 [size=16, length=-1]
+---
+1@1000 --(field@8)--> 2@1000 [size=16, length=-1]
+3@1001 --(field@16)--> 4@1000 [size=16, length=-1]
+5@1002 --(field@24)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@28)--> 1@1000 [size=16, length=-1]
+---
+--- heap_filter ---
+---- tagged objects
+---
+---
+---
+---
+---- untagged objects
+root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=13,location= 32])--> 1@1000 [size=16, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=3,method=doFollowReferencesTest,vreg=1,location= 28])--> 3000@0 [size=132, length=-1]
+root@root --(system-class)--> 2@0 [size=32, length=-1]
+root@root --(thread)--> 3000@0 [size=132, length=-1]
+0@0 --(array-element@0)--> 1@1000 [size=16, length=-1]
+1001@0 --(superclass)--> 1000@0 [size=123, length=-1]
+1002@0 --(interface)--> 2001@0 [size=124, length=-1]
+1002@0 --(superclass)--> 1001@0 [size=123, length=-1]
+1@1000 --(class)--> 1000@0 [size=123, length=-1]
+1@1000 --(field@12)--> 3@1001 [size=24, length=-1]
+1@1000 --(field@8)--> 2@1000 [size=16, length=-1]
+2001@0 --(interface)--> 2000@0 [size=124, length=-1]
+2@1000 --(class)--> 1000@0 [size=123, length=-1]
+3@1001 --(class)--> 1001@0 [size=123, length=-1]
+3@1001 --(field@16)--> 4@1000 [size=16, length=-1]
+3@1001 --(field@20)--> 5@1002 [size=32, length=-1]
+4@1000 --(class)--> 1000@0 [size=123, length=-1]
+5@1002 --(class)--> 1002@0 [size=123, length=-1]
+5@1002 --(field@24)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@28)--> 1@1000 [size=16, length=-1]
+6@1000 --(class)--> 1000@0 [size=123, length=-1]
+---
+1001@0 --(superclass)--> 1000@0 [size=123, length=-1]
+1002@0 --(interface)--> 2001@0 [size=124, length=-1]
+1002@0 --(superclass)--> 1001@0 [size=123, length=-1]
+1@1000 --(class)--> 1000@0 [size=123, length=-1]
+1@1000 --(field@12)--> 3@1001 [size=24, length=-1]
+1@1000 --(field@8)--> 2@1000 [size=16, length=-1]
+2001@0 --(interface)--> 2000@0 [size=124, length=-1]
+2@1000 --(class)--> 1000@0 [size=123, length=-1]
+3@1001 --(class)--> 1001@0 [size=123, length=-1]
+3@1001 --(field@16)--> 4@1000 [size=16, length=-1]
+3@1001 --(field@20)--> 5@1002 [size=32, length=-1]
+4@1000 --(class)--> 1000@0 [size=123, length=-1]
+5@1002 --(class)--> 1002@0 [size=123, length=-1]
+5@1002 --(field@24)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@28)--> 1@1000 [size=16, length=-1]
+6@1000 --(class)--> 1000@0 [size=123, length=-1]
+---
+root@root --(jni-global)--> 1@1000 [size=16, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 1@1000 [size=16, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=13,location= 10])--> 1@1000 [size=16, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=5,location= 10])--> 1@1000 [size=16, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 19])--> 1@1000 [size=16, length=-1]
+root@root --(system-class)--> 2@0 [size=32, length=-1]
+root@root --(thread)--> 1@1000 [size=16, length=-1]
+root@root --(thread)--> 3000@0 [size=132, length=-1]
+1001@0 --(superclass)--> 1000@0 [size=123, length=-1]
+1002@0 --(interface)--> 2001@0 [size=124, length=-1]
+1002@0 --(superclass)--> 1001@0 [size=123, length=-1]
+1@1000 --(class)--> 1000@0 [size=123, length=-1]
+1@1000 --(field@12)--> 3@1001 [size=24, length=-1]
+1@1000 --(field@8)--> 2@1000 [size=16, length=-1]
+2001@0 --(interface)--> 2000@0 [size=124, length=-1]
+2@1000 --(class)--> 1000@0 [size=123, length=-1]
+3@1001 --(class)--> 1001@0 [size=123, length=-1]
+3@1001 --(field@16)--> 4@1000 [size=16, length=-1]
+3@1001 --(field@20)--> 5@1002 [size=32, length=-1]
+4@1000 --(class)--> 1000@0 [size=123, length=-1]
+5@1002 --(class)--> 1002@0 [size=123, length=-1]
+5@1002 --(field@24)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@28)--> 1@1000 [size=16, length=-1]
+6@1000 --(class)--> 1000@0 [size=123, length=-1]
+---
+1001@0 --(superclass)--> 1000@0 [size=123, length=-1]
+1002@0 --(interface)--> 2001@0 [size=124, length=-1]
+1002@0 --(superclass)--> 1001@0 [size=123, length=-1]
+1@1000 --(class)--> 1000@0 [size=123, length=-1]
+1@1000 --(field@12)--> 3@1001 [size=24, length=-1]
+1@1000 --(field@8)--> 2@1000 [size=16, length=-1]
+2001@0 --(interface)--> 2000@0 [size=124, length=-1]
+2@1000 --(class)--> 1000@0 [size=123, length=-1]
+3@1001 --(class)--> 1001@0 [size=123, length=-1]
+3@1001 --(field@16)--> 4@1000 [size=16, length=-1]
+3@1001 --(field@20)--> 5@1002 [size=32, length=-1]
+4@1000 --(class)--> 1000@0 [size=123, length=-1]
+5@1002 --(class)--> 1002@0 [size=123, length=-1]
+5@1002 --(field@24)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@28)--> 1@1000 [size=16, length=-1]
+6@1000 --(class)--> 1000@0 [size=123, length=-1]
+---
+---- tagged classes
+root@root --(stack-local[id=1,tag=3000,depth=3,method=doFollowReferencesTest,vreg=1,location= 28])--> 3000@0 [size=132, length=-1]
+root@root --(system-class)--> 2@0 [size=32, length=-1]
+root@root --(thread)--> 3000@0 [size=132, length=-1]
+1001@0 --(superclass)--> 1000@0 [size=123, length=-1]
+1002@0 --(interface)--> 2001@0 [size=124, length=-1]
+1002@0 --(superclass)--> 1001@0 [size=123, length=-1]
+1@1000 --(class)--> 1000@0 [size=123, length=-1]
+2001@0 --(interface)--> 2000@0 [size=124, length=-1]
+2@1000 --(class)--> 1000@0 [size=123, length=-1]
+3@1001 --(class)--> 1001@0 [size=123, length=-1]
+4@1000 --(class)--> 1000@0 [size=123, length=-1]
+5@1002 --(class)--> 1002@0 [size=123, length=-1]
+6@1000 --(class)--> 1000@0 [size=123, length=-1]
+---
+1001@0 --(superclass)--> 1000@0 [size=123, length=-1]
+1002@0 --(interface)--> 2001@0 [size=124, length=-1]
+1002@0 --(superclass)--> 1001@0 [size=123, length=-1]
+1@1000 --(class)--> 1000@0 [size=123, length=-1]
+2001@0 --(interface)--> 2000@0 [size=124, length=-1]
+2@1000 --(class)--> 1000@0 [size=123, length=-1]
+3@1001 --(class)--> 1001@0 [size=123, length=-1]
+4@1000 --(class)--> 1000@0 [size=123, length=-1]
+5@1002 --(class)--> 1002@0 [size=123, length=-1]
+6@1000 --(class)--> 1000@0 [size=123, length=-1]
+---
+root@root --(system-class)--> 2@0 [size=32, length=-1]
+root@root --(thread)--> 3000@0 [size=132, length=-1]
+1001@0 --(superclass)--> 1000@0 [size=123, length=-1]
+1002@0 --(interface)--> 2001@0 [size=124, length=-1]
+1002@0 --(superclass)--> 1001@0 [size=123, length=-1]
+1@1000 --(class)--> 1000@0 [size=123, length=-1]
+2001@0 --(interface)--> 2000@0 [size=124, length=-1]
+2@1000 --(class)--> 1000@0 [size=123, length=-1]
+3@1001 --(class)--> 1001@0 [size=123, length=-1]
+4@1000 --(class)--> 1000@0 [size=123, length=-1]
+5@1002 --(class)--> 1002@0 [size=123, length=-1]
+6@1000 --(class)--> 1000@0 [size=123, length=-1]
+---
+1001@0 --(superclass)--> 1000@0 [size=123, length=-1]
+1002@0 --(interface)--> 2001@0 [size=124, length=-1]
+1002@0 --(superclass)--> 1001@0 [size=123, length=-1]
+1@1000 --(class)--> 1000@0 [size=123, length=-1]
+2001@0 --(interface)--> 2000@0 [size=124, length=-1]
+2@1000 --(class)--> 1000@0 [size=123, length=-1]
+3@1001 --(class)--> 1001@0 [size=123, length=-1]
+4@1000 --(class)--> 1000@0 [size=123, length=-1]
+5@1002 --(class)--> 1002@0 [size=123, length=-1]
+6@1000 --(class)--> 1000@0 [size=123, length=-1]
+---
+---- untagged classes
+root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=13,location= 32])--> 1@1000 [size=16, length=-1]
+0@0 --(array-element@0)--> 1@1000 [size=16, length=-1]
+1@1000 --(field@12)--> 3@1001 [size=24, length=-1]
+1@1000 --(field@8)--> 2@1000 [size=16, length=-1]
+3@1001 --(field@16)--> 4@1000 [size=16, length=-1]
+3@1001 --(field@20)--> 5@1002 [size=32, length=-1]
+5@1002 --(field@24)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@28)--> 1@1000 [size=16, length=-1]
+---
+1@1000 --(field@12)--> 3@1001 [size=24, length=-1]
+1@1000 --(field@8)--> 2@1000 [size=16, length=-1]
+3@1001 --(field@16)--> 4@1000 [size=16, length=-1]
+3@1001 --(field@20)--> 5@1002 [size=32, length=-1]
+5@1002 --(field@24)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@28)--> 1@1000 [size=16, length=-1]
+---
+root@root --(jni-global)--> 1@1000 [size=16, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 1@1000 [size=16, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=13,location= 10])--> 1@1000 [size=16, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=5,location= 10])--> 1@1000 [size=16, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 19])--> 1@1000 [size=16, length=-1]
+root@root --(thread)--> 1@1000 [size=16, length=-1]
+1@1000 --(field@12)--> 3@1001 [size=24, length=-1]
+1@1000 --(field@8)--> 2@1000 [size=16, length=-1]
+3@1001 --(field@16)--> 4@1000 [size=16, length=-1]
+3@1001 --(field@20)--> 5@1002 [size=32, length=-1]
+5@1002 --(field@24)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@28)--> 1@1000 [size=16, length=-1]
+---
+1@1000 --(field@12)--> 3@1001 [size=24, length=-1]
+1@1000 --(field@8)--> 2@1000 [size=16, length=-1]
+3@1001 --(field@16)--> 4@1000 [size=16, length=-1]
+3@1001 --(field@20)--> 5@1002 [size=32, length=-1]
+5@1002 --(field@24)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@28)--> 1@1000 [size=16, length=-1]
+---
diff --git a/test/913-heaps/heaps.cc b/test/913-heaps/heaps.cc
index 0c2361a3b4..99bc48eeec 100644
--- a/test/913-heaps/heaps.cc
+++ b/test/913-heaps/heaps.cc
@@ -18,6 +18,7 @@
#include <stdio.h>
#include <string.h>
+#include <iostream>
#include <vector>
#include "android-base/stringprintf.h"
@@ -29,6 +30,7 @@
#include "native_stack_dump.h"
#include "openjdkjvmti/jvmti.h"
#include "runtime.h"
+#include "scoped_thread_state_change-inl.h"
#include "thread-inl.h"
#include "thread_list.h"
@@ -279,8 +281,14 @@ extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_followReferences(JNIEnv* env
jlong size,
jint length,
const jvmtiHeapReferenceInfo* reference_info)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: Elem(referrer, referree, size, length) {
memcpy(&info_, reference_info, sizeof(jvmtiHeapReferenceInfo));
+ // Debug stack trace for failure condition. Remove when done.
+ if (info_.stack_local.depth == 3 && info_.stack_local.slot == 13) {
+ DumpNativeStack(std::cerr, GetTid());
+ Thread::Current()->DumpJavaStack(std::cerr, false, false);
+ }
}
protected:
@@ -521,7 +529,7 @@ extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_followReferencesString(
std::unique_ptr<char[]> mod_utf(new char[utf_byte_count + 1]);
memset(mod_utf.get(), 0, utf_byte_count + 1);
ConvertUtf16ToModifiedUtf8(mod_utf.get(), utf_byte_count, value, value_length);
- p->data.push_back(android::base::StringPrintf("%" PRId64 "@%" PRId64 " (% " PRId64 ", '%s')",
+ p->data.push_back(android::base::StringPrintf("%" PRId64 "@%" PRId64 " (%" PRId64 ", '%s')",
*tag_ptr,
class_tag,
size,
@@ -555,5 +563,96 @@ extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_followReferencesString(
return retArray;
}
+
+extern "C" JNIEXPORT jstring JNICALL Java_Main_followReferencesPrimitiveArray(
+ JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobject initial_object) {
+ struct FindArrayCallbacks {
+ static jint JNICALL FollowReferencesCallback(
+ jvmtiHeapReferenceKind reference_kind ATTRIBUTE_UNUSED,
+ const jvmtiHeapReferenceInfo* reference_info ATTRIBUTE_UNUSED,
+ jlong class_tag ATTRIBUTE_UNUSED,
+ jlong referrer_class_tag ATTRIBUTE_UNUSED,
+ jlong size ATTRIBUTE_UNUSED,
+ jlong* tag_ptr ATTRIBUTE_UNUSED,
+ jlong* referrer_tag_ptr ATTRIBUTE_UNUSED,
+ jint length ATTRIBUTE_UNUSED,
+ void* user_data ATTRIBUTE_UNUSED) {
+ return JVMTI_VISIT_OBJECTS; // Continue visiting.
+ }
+
+ static jint JNICALL ArrayValueCallback(jlong class_tag,
+ jlong size,
+ jlong* tag_ptr,
+ jint element_count,
+ jvmtiPrimitiveType element_type,
+ const void* elements,
+ void* user_data) {
+ FindArrayCallbacks* p = reinterpret_cast<FindArrayCallbacks*>(user_data);
+ if (*tag_ptr != 0) {
+ std::ostringstream oss;
+ oss << *tag_ptr
+ << '@'
+ << class_tag
+ << " ("
+ << size
+ << ", "
+ << element_count
+ << "x"
+ << static_cast<char>(element_type)
+ << " '";
+ size_t element_size;
+ switch (element_type) {
+ case JVMTI_PRIMITIVE_TYPE_BOOLEAN:
+ case JVMTI_PRIMITIVE_TYPE_BYTE:
+ element_size = 1;
+ break;
+ case JVMTI_PRIMITIVE_TYPE_CHAR:
+ case JVMTI_PRIMITIVE_TYPE_SHORT:
+ element_size = 2;
+ break;
+ case JVMTI_PRIMITIVE_TYPE_INT:
+ case JVMTI_PRIMITIVE_TYPE_FLOAT:
+ element_size = 4;
+ break;
+ case JVMTI_PRIMITIVE_TYPE_LONG:
+ case JVMTI_PRIMITIVE_TYPE_DOUBLE:
+ element_size = 8;
+ break;
+ default:
+ LOG(FATAL) << "Unknown type " << static_cast<size_t>(element_type);
+ UNREACHABLE();
+ }
+ const uint8_t* data = reinterpret_cast<const uint8_t*>(elements);
+ for (size_t i = 0; i != element_size * element_count; ++i) {
+ oss << android::base::StringPrintf("%02x", data[i]);
+ }
+ oss << "')";
+
+ if (!p->data.empty()) {
+ p->data += "\n";
+ }
+ p->data += oss.str();
+ // Update the tag to test whether that works.
+ *tag_ptr = *tag_ptr + 1;
+ }
+ return 0;
+ }
+
+ std::string data;
+ };
+
+ jvmtiHeapCallbacks callbacks;
+ memset(&callbacks, 0, sizeof(jvmtiHeapCallbacks));
+ callbacks.heap_reference_callback = FindArrayCallbacks::FollowReferencesCallback;
+ callbacks.array_primitive_value_callback = FindArrayCallbacks::ArrayValueCallback;
+
+ FindArrayCallbacks fac;
+ jvmtiError ret = jvmti_env->FollowReferences(0, nullptr, initial_object, &callbacks, &fac);
+ if (JvmtiErrorToException(env, ret)) {
+ return nullptr;
+ }
+ return env->NewStringUTF(fac.data.c_str());
+}
+
} // namespace Test913Heaps
} // namespace art
diff --git a/test/913-heaps/run b/test/913-heaps/run
index c6e62ae6cd..dd35526d25 100755
--- a/test/913-heaps/run
+++ b/test/913-heaps/run
@@ -14,4 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-./default-run "$@" --jvmti
+./default-run "$@" --jvmti -Xcompiler-option -g
diff --git a/test/913-heaps/src/Main.java b/test/913-heaps/src/Main.java
index 4402072649..df89f347e0 100644
--- a/test/913-heaps/src/Main.java
+++ b/test/913-heaps/src/Main.java
@@ -26,6 +26,22 @@ public class Main {
new TestConfig().doFollowReferencesTest();
doStringTest();
+ doPrimitiveArrayTest();
+
+ // Test klass filter.
+ System.out.println("--- klass ---");
+ new TestConfig(A.class, 0).doFollowReferencesTest();
+
+ // Test heap filter.
+ System.out.println("--- heap_filter ---");
+ System.out.println("---- tagged objects");
+ new TestConfig(null, 0x4).doFollowReferencesTest();
+ System.out.println("---- untagged objects");
+ new TestConfig(null, 0x8).doFollowReferencesTest();
+ System.out.println("---- tagged classes");
+ new TestConfig(null, 0x10).doFollowReferencesTest();
+ System.out.println("---- untagged classes");
+ new TestConfig(null, 0x20).doFollowReferencesTest();
}
public static void doTest() throws Exception {
@@ -47,6 +63,53 @@ public class Main {
System.out.println(getTag(str));
}
+ public static void doPrimitiveArrayTest() throws Exception {
+ final boolean[] zArray = new boolean[] { false, true };
+ setTag(zArray, 1);
+
+ final byte[] bArray = new byte[] { 1, 2, 3 };
+ setTag(bArray, 2);
+
+ final char[] cArray = new char[] { 'A', 'Z' };
+ setTag(cArray, 3);
+
+ final short[] sArray = new short[] { 1, 2, 3 };
+ setTag(sArray, 4);
+
+ final int[] iArray = new int[] { 1, 2, 3 };
+ setTag(iArray, 5);
+
+ final float[] fArray = new float[] { 0.0f, 1.0f };
+ setTag(fArray, 6);
+
+ final long[] lArray = new long[] { 1, 2, 3 };
+ setTag(lArray, 7);
+
+ final double[] dArray = new double[] { 0.0, 1.0 };
+ setTag(dArray, 8);
+
+ Object o = new Object() {
+ Object z = zArray;
+ Object b = bArray;
+ Object c = cArray;
+ Object s = sArray;
+ Object i = iArray;
+ Object f = fArray;
+ Object l = lArray;
+ Object d = dArray;
+ };
+
+ System.out.println(followReferencesPrimitiveArray(o));
+ System.out.print(getTag(zArray));
+ System.out.print(getTag(bArray));
+ System.out.print(getTag(cArray));
+ System.out.print(getTag(sArray));
+ System.out.print(getTag(iArray));
+ System.out.print(getTag(fArray));
+ System.out.print(getTag(lArray));
+ System.out.println(getTag(dArray));
+ }
+
private static void run() {
clearStats();
forceGarbageCollection();
@@ -239,6 +302,9 @@ public class Main {
}
public static class Verifier {
+ // Should roots with vreg=-1 be printed?
+ public final static boolean PRINT_ROOTS_WITH_UNKNOWN_VREG = false;
+
public static class Node {
public String referrer;
@@ -323,6 +389,9 @@ public class Main {
continue;
}
lastRoot = l;
+ if (!PRINT_ROOTS_WITH_UNKNOWN_VREG && l.indexOf("vreg=-1") > 0) {
+ continue;
+ }
System.out.println(l);
}
}
@@ -424,4 +493,5 @@ public class Main {
public static native String[] followReferences(int heapFilter, Class<?> klassFilter,
Object initialObject, int stopAfter, int followSet, Object jniRef);
public static native String[] followReferencesString(Object initialObject);
+ public static native String followReferencesPrimitiveArray(Object initialObject);
}
diff --git a/test/testrunner/testrunner.py b/test/testrunner/testrunner.py
index 8c0b9283b6..c22b0be9f4 100755
--- a/test/testrunner/testrunner.py
+++ b/test/testrunner/testrunner.py
@@ -527,10 +527,10 @@ def print_test_info(test_name, result, failed_test_info=""):
test_name = ('%s...%s') % (
test_name[:(allowed_test_length - 3)/2],
test_name[-(allowed_test_length - 3)/2:])
- info += ('%s %s %s') % (
- progress_info,
- test_name,
- result_text)
+ info += ('%s %s %s') % (
+ progress_info,
+ test_name,
+ result_text)
print_text(info)
except Exception, e:
print_text(('%s\n%s\n') % (test_name, str(e)))