summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/Android.bp1
-rw-r--r--compiler/optimizing/cloner_test.cc185
-rw-r--r--compiler/optimizing/instruction_simplifier.cc9
-rw-r--r--compiler/optimizing/nodes.cc29
-rw-r--r--compiler/optimizing/nodes.h554
-rw-r--r--compiler/optimizing/nodes_mips.h13
-rw-r--r--compiler/optimizing/nodes_shared.h23
-rw-r--r--compiler/optimizing/nodes_vector.h127
-rw-r--r--compiler/optimizing/nodes_x86.h17
-rw-r--r--dex2oat/dex2oat_test.cc16
-rw-r--r--dex2oat/linker/oat_writer.cc80
-rw-r--r--dexlayout/compact_dex_writer.cc2
-rw-r--r--dexlayout/compact_dex_writer.h9
-rw-r--r--dexlayout/dex_ir.cc196
-rw-r--r--dexlayout/dex_ir.h247
-rw-r--r--dexlayout/dex_ir_builder.cc12
-rw-r--r--dexlayout/dex_ir_builder.h4
-rw-r--r--dexlayout/dex_writer.cc655
-rw-r--r--dexlayout/dex_writer.h121
-rw-r--r--dexlayout/dexdiag.cc3
-rw-r--r--dexlayout/dexlayout.cc389
-rw-r--r--dexlayout/dexlayout.h28
-rw-r--r--dexlayout/dexlayout_test.cc42
-rw-r--r--openjdkjvmti/art_jvmti.h4
-rw-r--r--openjdkjvmti/events-inl.h277
-rw-r--r--openjdkjvmti/events.cc21
-rw-r--r--openjdkjvmti/events.h67
-rw-r--r--openjdkjvmti/object_tagging.cc3
-rw-r--r--openjdkjvmti/ti_dump.cc2
-rw-r--r--openjdkjvmti/ti_phase.cc11
-rw-r--r--runtime/base/mutex-inl.h30
-rw-r--r--runtime/base/mutex.h10
-rw-r--r--runtime/debugger.cc51
-rw-r--r--runtime/debugger.h8
-rw-r--r--runtime/dex_file_layout.cc10
-rw-r--r--runtime/dex_file_layout.h40
-rw-r--r--runtime/dex_file_verifier.cc4
-rw-r--r--runtime/ti/agent.cc3
-rw-r--r--runtime/trace.cc64
-rw-r--r--test/1940-ddms-ext/expected.txt3
-rw-r--r--test/1940-ddms-ext/src-art/art/Test1940.java98
-rw-r--r--test/1941-dispose-stress/dispose_stress.cc59
-rw-r--r--test/1941-dispose-stress/expected.txt1
-rw-r--r--test/1941-dispose-stress/info.txt3
-rwxr-xr-xtest/1941-dispose-stress/run18
-rw-r--r--test/1941-dispose-stress/src/Main.java21
-rw-r--r--test/1941-dispose-stress/src/art/Breakpoint.java202
-rw-r--r--test/1941-dispose-stress/src/art/Test1941.java72
-rw-r--r--test/1941-dispose-stress/src/art/Trace.java68
-rw-r--r--test/Android.bp1
-rw-r--r--test/knownfailures.json6
-rw-r--r--tools/libjdwp_art_failures.txt6
-rw-r--r--tools/libjdwp_oj_art_failures.txt6
53 files changed, 2716 insertions, 1215 deletions
diff --git a/compiler/Android.bp b/compiler/Android.bp
index 859947108e..249aaf5632 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -323,6 +323,7 @@ art_cc_test {
"linker/method_bss_mapping_encoder_test.cc",
"linker/output_stream_test.cc",
"optimizing/bounds_check_elimination_test.cc",
+ "optimizing/cloner_test.cc",
"optimizing/data_type_test.cc",
"optimizing/dominator_test.cc",
"optimizing/find_loops_test.cc",
diff --git a/compiler/optimizing/cloner_test.cc b/compiler/optimizing/cloner_test.cc
new file mode 100644
index 0000000000..d34dd81767
--- /dev/null
+++ b/compiler/optimizing/cloner_test.cc
@@ -0,0 +1,185 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "graph_checker.h"
+#include "nodes.h"
+#include "optimizing_unit_test.h"
+
+#include "gtest/gtest.h"
+
+namespace art {
+
+// This class provides methods and helpers for testing various cloning and copying routines:
+// individual instruction cloning and cloning of the more coarse-grain structures.
+class ClonerTest : public OptimizingUnitTest {
+ public:
+ ClonerTest()
+ : graph_(CreateGraph()), entry_block_(nullptr), exit_block_(nullptr), parameter_(nullptr) {}
+
+ void CreateBasicLoopControlFlow(/* out */ HBasicBlock** header_p,
+ /* out */ HBasicBlock** body_p) {
+ entry_block_ = new (GetAllocator()) HBasicBlock(graph_);
+ graph_->AddBlock(entry_block_);
+ graph_->SetEntryBlock(entry_block_);
+
+ HBasicBlock* loop_preheader = new (GetAllocator()) HBasicBlock(graph_);
+ HBasicBlock* loop_header = new (GetAllocator()) HBasicBlock(graph_);
+ HBasicBlock* loop_body = new (GetAllocator()) HBasicBlock(graph_);
+ HBasicBlock* loop_exit = new (GetAllocator()) HBasicBlock(graph_);
+
+ graph_->AddBlock(loop_preheader);
+ graph_->AddBlock(loop_header);
+ graph_->AddBlock(loop_body);
+ graph_->AddBlock(loop_exit);
+
+ exit_block_ = new (GetAllocator()) HBasicBlock(graph_);
+ graph_->AddBlock(exit_block_);
+ graph_->SetExitBlock(exit_block_);
+
+ entry_block_->AddSuccessor(loop_preheader);
+ loop_preheader->AddSuccessor(loop_header);
+ // Loop exit first to have a proper exit condition/target for HIf.
+ loop_header->AddSuccessor(loop_exit);
+ loop_header->AddSuccessor(loop_body);
+ loop_body->AddSuccessor(loop_header);
+ loop_exit->AddSuccessor(exit_block_);
+
+ *header_p = loop_header;
+ *body_p = loop_body;
+
+ parameter_ = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ DataType::Type::kInt32);
+ entry_block_->AddInstruction(parameter_);
+ loop_exit->AddInstruction(new (GetAllocator()) HReturnVoid());
+ exit_block_->AddInstruction(new (GetAllocator()) HExit());
+ }
+
+ void CreateBasicLoopDataFlow(HBasicBlock* loop_header, HBasicBlock* loop_body) {
+ uint32_t dex_pc = 0;
+
+ // Entry block.
+ HIntConstant* const_0 = graph_->GetIntConstant(0);
+ HIntConstant* const_1 = graph_->GetIntConstant(1);
+ HIntConstant* const_128 = graph_->GetIntConstant(128);
+
+ // Header block.
+ HPhi* phi = new (GetAllocator()) HPhi(GetAllocator(), 0, 0, DataType::Type::kInt32);
+ HInstruction* suspend_check = new (GetAllocator()) HSuspendCheck();
+
+ loop_header->AddPhi(phi);
+ loop_header->AddInstruction(suspend_check);
+ loop_header->AddInstruction(new (GetAllocator()) HGreaterThanOrEqual(phi, const_128));
+ loop_header->AddInstruction(new (GetAllocator()) HIf(parameter_));
+
+ // Loop body block.
+ HInstruction* null_check = new (GetAllocator()) HNullCheck(parameter_, dex_pc);
+ HInstruction* array_length = new (GetAllocator()) HArrayLength(null_check, dex_pc);
+ HInstruction* bounds_check = new (GetAllocator()) HBoundsCheck(phi, array_length, dex_pc);
+ HInstruction* array_get =
+ new (GetAllocator()) HArrayGet(null_check, bounds_check, DataType::Type::kInt32, dex_pc);
+ HInstruction* add = new (GetAllocator()) HAdd(DataType::Type::kInt32, array_get, const_1);
+ HInstruction* array_set =
+ new (GetAllocator()) HArraySet(null_check, bounds_check, add, DataType::Type::kInt32, dex_pc);
+ HInstruction* induction_inc = new (GetAllocator()) HAdd(DataType::Type::kInt32, phi, const_1);
+
+ loop_body->AddInstruction(null_check);
+ loop_body->AddInstruction(array_length);
+ loop_body->AddInstruction(bounds_check);
+ loop_body->AddInstruction(array_get);
+ loop_body->AddInstruction(add);
+ loop_body->AddInstruction(array_set);
+ loop_body->AddInstruction(induction_inc);
+ loop_body->AddInstruction(new (GetAllocator()) HGoto());
+
+ phi->AddInput(const_0);
+ phi->AddInput(induction_inc);
+
+ graph_->SetHasBoundsChecks(true);
+
+ // Adjust HEnvironment for each instruction which require that.
+ ArenaVector<HInstruction*> current_locals({phi, const_128, parameter_},
+ GetAllocator()->Adapter(kArenaAllocInstruction));
+
+ HEnvironment* env = ManuallyBuildEnvFor(suspend_check, &current_locals);
+ null_check->CopyEnvironmentFrom(env);
+ bounds_check->CopyEnvironmentFrom(env);
+ }
+
+ HEnvironment* ManuallyBuildEnvFor(HInstruction* instruction,
+ ArenaVector<HInstruction*>* current_locals) {
+ HEnvironment* environment = new (GetAllocator()) HEnvironment(
+ (GetAllocator()),
+ current_locals->size(),
+ graph_->GetArtMethod(),
+ instruction->GetDexPc(),
+ instruction);
+
+ environment->CopyFrom(ArrayRef<HInstruction* const>(*current_locals));
+ instruction->SetRawEnvironment(environment);
+ return environment;
+ }
+
+ bool CheckGraph() {
+ GraphChecker checker(graph_);
+ checker.Run();
+ if (!checker.IsValid()) {
+ for (const std::string& error : checker.GetErrors()) {
+ std::cout << error << std::endl;
+ }
+ return false;
+ }
+ return true;
+ }
+
+ HGraph* graph_;
+
+ HBasicBlock* entry_block_;
+ HBasicBlock* exit_block_;
+
+ HInstruction* parameter_;
+};
+
+TEST_F(ClonerTest, IndividualInstrCloner) {
+ HBasicBlock* header = nullptr;
+ HBasicBlock* loop_body = nullptr;
+
+ CreateBasicLoopControlFlow(&header, &loop_body);
+ CreateBasicLoopDataFlow(header, loop_body);
+ graph_->BuildDominatorTree();
+ ASSERT_TRUE(CheckGraph());
+
+ HSuspendCheck* old_suspend_check = header->GetLoopInformation()->GetSuspendCheck();
+ CloneAndReplaceInstructionVisitor visitor(graph_);
+ // Do instruction cloning and replacement twice with different visiting order.
+
+ visitor.VisitInsertionOrder();
+ size_t instr_replaced_by_clones_count = visitor.GetInstrReplacedByClonesCount();
+ EXPECT_EQ(instr_replaced_by_clones_count, 12u);
+ EXPECT_TRUE(CheckGraph());
+
+ visitor.VisitReversePostOrder();
+ instr_replaced_by_clones_count = visitor.GetInstrReplacedByClonesCount();
+ EXPECT_EQ(instr_replaced_by_clones_count, 24u);
+ EXPECT_TRUE(CheckGraph());
+
+ HSuspendCheck* new_suspend_check = header->GetLoopInformation()->GetSuspendCheck();
+ EXPECT_NE(new_suspend_check, old_suspend_check);
+ EXPECT_NE(new_suspend_check, nullptr);
+}
+
+} // namespace art
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index fbfee12be9..4c18e16c48 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -27,6 +27,10 @@
namespace art {
+// Whether to run an exhaustive test of individual HInstructions cloning when each instruction
+// is replaced with its copy if it is clonable.
+static constexpr bool kTestInstructionClonerExhaustively = false;
+
class InstructionSimplifierVisitor : public HGraphDelegateVisitor {
public:
InstructionSimplifierVisitor(HGraph* graph,
@@ -130,6 +134,11 @@ class InstructionSimplifierVisitor : public HGraphDelegateVisitor {
};
void InstructionSimplifier::Run() {
+ if (kTestInstructionClonerExhaustively) {
+ CloneAndReplaceInstructionVisitor visitor(graph_);
+ visitor.VisitReversePostOrder();
+ }
+
InstructionSimplifierVisitor visitor(graph_, codegen_, compiler_driver_, stats_);
visitor.Run();
}
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index fff61f5727..fa580d9bed 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -845,6 +845,13 @@ static void UpdateInputsUsers(HInstruction* instruction) {
DCHECK(!instruction->HasEnvironment());
}
+void HBasicBlock::ReplaceAndRemovePhiWith(HPhi* initial, HPhi* replacement) {
+ DCHECK(initial->GetBlock() == this);
+ InsertPhiAfter(replacement, initial);
+ initial->ReplaceWith(replacement);
+ RemovePhi(initial);
+}
+
void HBasicBlock::ReplaceAndRemoveInstructionWith(HInstruction* initial,
HInstruction* replacement) {
DCHECK(initial->GetBlock() == this);
@@ -2907,6 +2914,28 @@ void HInstruction::RemoveEnvironmentUsers() {
env_uses_.clear();
}
+HInstruction* ReplaceInstrOrPhiByClone(HInstruction* instr) {
+ HInstruction* clone = instr->Clone(instr->GetBlock()->GetGraph()->GetAllocator());
+ HBasicBlock* block = instr->GetBlock();
+
+ if (instr->IsPhi()) {
+ HPhi* phi = instr->AsPhi();
+ DCHECK(!phi->HasEnvironment());
+ HPhi* phi_clone = clone->AsPhi();
+ block->ReplaceAndRemovePhiWith(phi, phi_clone);
+ } else {
+ block->ReplaceAndRemoveInstructionWith(instr, clone);
+ if (instr->HasEnvironment()) {
+ clone->CopyEnvironmentFrom(instr->GetEnvironment());
+ HLoopInformation* loop_info = block->GetLoopInformation();
+ if (instr->IsSuspendCheck() && loop_info != nullptr) {
+ loop_info->SetSuspendCheck(clone->AsSuspendCheck());
+ }
+ }
+ }
+ return clone;
+}
+
// Returns an instruction with the opposite Boolean value from 'cond'.
HInstruction* HGraph::InsertOppositeCondition(HInstruction* cond, HInstruction* cursor) {
ArenaAllocator* allocator = GetAllocator();
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 6672901781..66d5bfea32 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1160,6 +1160,8 @@ class HBasicBlock : public ArenaObject<kArenaAllocBasicBlock> {
// Insert `instruction` before/after an existing instruction `cursor`.
void InsertInstructionBefore(HInstruction* instruction, HInstruction* cursor);
void InsertInstructionAfter(HInstruction* instruction, HInstruction* cursor);
+ // Replace phi `initial` with `replacement` within this block.
+ void ReplaceAndRemovePhiWith(HPhi* initial, HPhi* replacement);
// Replace instruction `initial` with `replacement` within this block.
void ReplaceAndRemoveInstructionWith(HInstruction* initial,
HInstruction* replacement);
@@ -1480,18 +1482,31 @@ FOR_EACH_INSTRUCTION(FORWARD_DECLARATION)
#undef FORWARD_DECLARATION
#define DECLARE_INSTRUCTION(type) \
+ private: \
+ H##type& operator=(const H##type&) = delete; \
+ public: \
InstructionKind GetKindInternal() const OVERRIDE { return k##type; } \
const char* DebugName() const OVERRIDE { return #type; } \
bool InstructionTypeEquals(const HInstruction* other) const OVERRIDE { \
return other->Is##type(); \
} \
+ HInstruction* Clone(ArenaAllocator* arena) const OVERRIDE { \
+ DCHECK(IsClonable()); \
+ return new (arena) H##type(*this->As##type()); \
+ } \
void Accept(HGraphVisitor* visitor) OVERRIDE
#define DECLARE_ABSTRACT_INSTRUCTION(type) \
+ private: \
+ H##type& operator=(const H##type&) = delete; \
+ public: \
bool Is##type() const { return As##type() != nullptr; } \
const H##type* As##type() const { return this; } \
H##type* As##type() { return this; }
+#define DEFAULT_COPY_CONSTRUCTOR(type) \
+ explicit H##type(const H##type& other) = default;
+
template <typename T>
class HUseListNode : public ArenaObject<kArenaAllocUseListNode>,
public IntrusiveForwardListNode<HUseListNode<T>> {
@@ -2182,6 +2197,25 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
FOR_EACH_ABSTRACT_INSTRUCTION(INSTRUCTION_TYPE_CHECK)
#undef INSTRUCTION_TYPE_CHECK
+ // Return a clone of the instruction if it is clonable (shallow copy by default, custom copy
+ // if a custom copy-constructor is provided for a particular type). If IsClonable() is false for
+ // the instruction then the behaviour of this function is undefined.
+ //
+ // Note: It is semantically valid to create a clone of the instruction only until
+ // prepare_for_register_allocator phase as lifetime, intervals and codegen info are not
+ // copied.
+ //
+ // Note: HEnvironment and some other fields are not copied and are set to default values, see
+ // 'explicit HInstruction(const HInstruction& other)' for details.
+ virtual HInstruction* Clone(ArenaAllocator* arena ATTRIBUTE_UNUSED) const {
+ LOG(FATAL) << "Cloning is not implemented for the instruction " <<
+ DebugName() << " " << GetId();
+ UNREACHABLE();
+ }
+
+ // Return whether instruction can be cloned (copied).
+ virtual bool IsClonable() const { return false; }
+
// Returns whether the instruction can be moved within the graph.
// TODO: this method is used by LICM and GVN with possibly different
// meanings? split and rename?
@@ -2298,6 +2332,30 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
packed_fields_ = BitFieldType::Update(value, packed_fields_);
}
+ // Copy construction for the instruction (used for Clone function).
+ //
+ // Fields (e.g. lifetime, intervals and codegen info) associated with phases starting from
+ // prepare_for_register_allocator are not copied (set to default values).
+ //
+ // Copy constructors must be provided for every HInstruction type; default copy constructor is
+ // fine for most of them. However for some of the instructions a custom copy constructor must be
+ // specified (when instruction has non-trivially copyable fields and must have a special behaviour
+ // for copying them).
+ explicit HInstruction(const HInstruction& other)
+ : previous_(nullptr),
+ next_(nullptr),
+ block_(nullptr),
+ dex_pc_(other.dex_pc_),
+ id_(-1),
+ ssa_index_(-1),
+ packed_fields_(other.packed_fields_),
+ environment_(nullptr),
+ locations_(nullptr),
+ live_interval_(nullptr),
+ lifetime_position_(kNoLifetime),
+ side_effects_(other.side_effects_),
+ reference_type_handle_(other.reference_type_handle_) {}
+
private:
void FixUpUserRecordsAfterUseInsertion(HUseList<HInstruction*>::iterator fixup_end) {
auto before_use_node = uses_.before_begin();
@@ -2387,8 +2445,6 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
friend class HEnvironment;
friend class HGraph;
friend class HInstructionList;
-
- DISALLOW_COPY_AND_ASSIGN(HInstruction);
};
std::ostream& operator<<(std::ostream& os, const HInstruction::InstructionKind& rhs);
@@ -2484,10 +2540,9 @@ class HVariableInputSizeInstruction : public HInstruction {
: HInstruction(side_effects, dex_pc),
inputs_(number_of_inputs, allocator->Adapter(kind)) {}
- ArenaVector<HUserRecord<HInstruction*>> inputs_;
+ DEFAULT_COPY_CONSTRUCTOR(VariableInputSizeInstruction);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVariableInputSizeInstruction);
+ ArenaVector<HUserRecord<HInstruction*>> inputs_;
};
template<size_t N>
@@ -2502,6 +2557,9 @@ class HTemplateInstruction: public HInstruction {
return ArrayRef<HUserRecord<HInstruction*>>(inputs_);
}
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(TemplateInstruction<N>);
+
private:
std::array<HUserRecord<HInstruction*>, N> inputs_;
@@ -2522,6 +2580,9 @@ class HTemplateInstruction<0>: public HInstruction {
return ArrayRef<HUserRecord<HInstruction*>>();
}
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(TemplateInstruction<0>);
+
private:
friend class SsaBuilder;
};
@@ -2547,6 +2608,7 @@ class HExpression : public HTemplateInstruction<N> {
static_assert(kNumberOfExpressionPackedBits <= HInstruction::kMaxNumberOfPackedBits,
"Too many packed fields.");
using TypeField = BitField<DataType::Type, kFieldType, kFieldTypeSize>;
+ DEFAULT_COPY_CONSTRUCTOR(Expression<N>);
};
// Represents dex's RETURN_VOID opcode. A HReturnVoid is a control flow
@@ -2560,8 +2622,8 @@ class HReturnVoid FINAL : public HTemplateInstruction<0> {
DECLARE_INSTRUCTION(ReturnVoid);
- private:
- DISALLOW_COPY_AND_ASSIGN(HReturnVoid);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(ReturnVoid);
};
// Represents dex's RETURN opcodes. A HReturn is a control flow
@@ -2577,8 +2639,8 @@ class HReturn FINAL : public HTemplateInstruction<1> {
DECLARE_INSTRUCTION(Return);
- private:
- DISALLOW_COPY_AND_ASSIGN(HReturn);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Return);
};
class HPhi FINAL : public HVariableInputSizeInstruction {
@@ -2604,6 +2666,8 @@ class HPhi FINAL : public HVariableInputSizeInstruction {
SetPackedFlag<kFlagCanBeNull>(true);
}
+ bool IsClonable() const OVERRIDE { return true; }
+
// Returns a type equivalent to the given `type`, but that a `HPhi` can hold.
static DataType::Type ToPhiType(DataType::Type type) {
return DataType::Kind(type);
@@ -2666,6 +2730,9 @@ class HPhi FINAL : public HVariableInputSizeInstruction {
DECLARE_INSTRUCTION(Phi);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Phi);
+
private:
static constexpr size_t kFieldType = HInstruction::kNumberOfGenericPackedBits;
static constexpr size_t kFieldTypeSize =
@@ -2677,8 +2744,6 @@ class HPhi FINAL : public HVariableInputSizeInstruction {
using TypeField = BitField<DataType::Type, kFieldType, kFieldTypeSize>;
const uint32_t reg_number_;
-
- DISALLOW_COPY_AND_ASSIGN(HPhi);
};
// The exit instruction is the only instruction of the exit block.
@@ -2692,8 +2757,8 @@ class HExit FINAL : public HTemplateInstruction<0> {
DECLARE_INSTRUCTION(Exit);
- private:
- DISALLOW_COPY_AND_ASSIGN(HExit);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Exit);
};
// Jumps from one block to another.
@@ -2701,6 +2766,7 @@ class HGoto FINAL : public HTemplateInstruction<0> {
public:
explicit HGoto(uint32_t dex_pc = kNoDexPc) : HTemplateInstruction(SideEffects::None(), dex_pc) {}
+ bool IsClonable() const OVERRIDE { return true; }
bool IsControlFlow() const OVERRIDE { return true; }
HBasicBlock* GetSuccessor() const {
@@ -2709,8 +2775,8 @@ class HGoto FINAL : public HTemplateInstruction<0> {
DECLARE_INSTRUCTION(Goto);
- private:
- DISALLOW_COPY_AND_ASSIGN(HGoto);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Goto);
};
class HConstant : public HExpression<0> {
@@ -2733,8 +2799,8 @@ class HConstant : public HExpression<0> {
DECLARE_ABSTRACT_INSTRUCTION(Constant);
- private:
- DISALLOW_COPY_AND_ASSIGN(HConstant);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Constant);
};
class HNullConstant FINAL : public HConstant {
@@ -2752,12 +2818,14 @@ class HNullConstant FINAL : public HConstant {
DECLARE_INSTRUCTION(NullConstant);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(NullConstant);
+
private:
explicit HNullConstant(uint32_t dex_pc = kNoDexPc)
: HConstant(DataType::Type::kReference, dex_pc) {}
friend class HGraph;
- DISALLOW_COPY_AND_ASSIGN(HNullConstant);
};
// Constants of the type int. Those can be from Dex instructions, or
@@ -2789,6 +2857,9 @@ class HIntConstant FINAL : public HConstant {
DECLARE_INSTRUCTION(IntConstant);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(IntConstant);
+
private:
explicit HIntConstant(int32_t value, uint32_t dex_pc = kNoDexPc)
: HConstant(DataType::Type::kInt32, dex_pc), value_(value) {}
@@ -2800,7 +2871,6 @@ class HIntConstant FINAL : public HConstant {
friend class HGraph;
ART_FRIEND_TEST(GraphTest, InsertInstructionBefore);
ART_FRIEND_TYPED_TEST(ParallelMoveTest, ConstantLast);
- DISALLOW_COPY_AND_ASSIGN(HIntConstant);
};
class HLongConstant FINAL : public HConstant {
@@ -2823,6 +2893,9 @@ class HLongConstant FINAL : public HConstant {
DECLARE_INSTRUCTION(LongConstant);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(LongConstant);
+
private:
explicit HLongConstant(int64_t value, uint32_t dex_pc = kNoDexPc)
: HConstant(DataType::Type::kInt64, dex_pc), value_(value) {}
@@ -2830,7 +2903,6 @@ class HLongConstant FINAL : public HConstant {
const int64_t value_;
friend class HGraph;
- DISALLOW_COPY_AND_ASSIGN(HLongConstant);
};
class HFloatConstant FINAL : public HConstant {
@@ -2872,6 +2944,9 @@ class HFloatConstant FINAL : public HConstant {
DECLARE_INSTRUCTION(FloatConstant);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(FloatConstant);
+
private:
explicit HFloatConstant(float value, uint32_t dex_pc = kNoDexPc)
: HConstant(DataType::Type::kFloat32, dex_pc), value_(value) {}
@@ -2883,7 +2958,6 @@ class HFloatConstant FINAL : public HConstant {
// Only the SsaBuilder and HGraph can create floating-point constants.
friend class SsaBuilder;
friend class HGraph;
- DISALLOW_COPY_AND_ASSIGN(HFloatConstant);
};
class HDoubleConstant FINAL : public HConstant {
@@ -2923,6 +2997,9 @@ class HDoubleConstant FINAL : public HConstant {
DECLARE_INSTRUCTION(DoubleConstant);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(DoubleConstant);
+
private:
explicit HDoubleConstant(double value, uint32_t dex_pc = kNoDexPc)
: HConstant(DataType::Type::kFloat64, dex_pc), value_(value) {}
@@ -2934,7 +3011,6 @@ class HDoubleConstant FINAL : public HConstant {
// Only the SsaBuilder and HGraph can create floating-point constants.
friend class SsaBuilder;
friend class HGraph;
- DISALLOW_COPY_AND_ASSIGN(HDoubleConstant);
};
// Conditional branch. A block ending with an HIf instruction must have
@@ -2946,6 +3022,7 @@ class HIf FINAL : public HTemplateInstruction<1> {
SetRawInputAt(0, input);
}
+ bool IsClonable() const OVERRIDE { return true; }
bool IsControlFlow() const OVERRIDE { return true; }
HBasicBlock* IfTrueSuccessor() const {
@@ -2958,8 +3035,8 @@ class HIf FINAL : public HTemplateInstruction<1> {
DECLARE_INSTRUCTION(If);
- private:
- DISALLOW_COPY_AND_ASSIGN(HIf);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(If);
};
@@ -3012,6 +3089,9 @@ class HTryBoundary FINAL : public HTemplateInstruction<0> {
DECLARE_INSTRUCTION(TryBoundary);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(TryBoundary);
+
private:
static constexpr size_t kFieldBoundaryKind = kNumberOfGenericPackedBits;
static constexpr size_t kFieldBoundaryKindSize =
@@ -3021,8 +3101,6 @@ class HTryBoundary FINAL : public HTemplateInstruction<0> {
static_assert(kNumberOfTryBoundaryPackedBits <= kMaxNumberOfPackedBits,
"Too many packed fields.");
using BoundaryKindField = BitField<BoundaryKind, kFieldBoundaryKind, kFieldBoundaryKindSize>;
-
- DISALLOW_COPY_AND_ASSIGN(HTryBoundary);
};
// Deoptimize to interpreter, upon checking a condition.
@@ -3045,6 +3123,8 @@ class HDeoptimize FINAL : public HVariableInputSizeInstruction {
SetRawInputAt(0, cond);
}
+ bool IsClonable() const OVERRIDE { return true; }
+
// Use this constructor when the `HDeoptimize` guards an instruction, and any user
// that relies on the deoptimization to pass should have its input be the `HDeoptimize`
// instead of `guard`.
@@ -3098,6 +3178,9 @@ class HDeoptimize FINAL : public HVariableInputSizeInstruction {
DECLARE_INSTRUCTION(Deoptimize);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Deoptimize);
+
private:
static constexpr size_t kFieldCanBeMoved = kNumberOfGenericPackedBits;
static constexpr size_t kFieldDeoptimizeKind = kNumberOfGenericPackedBits + 1;
@@ -3109,8 +3192,6 @@ class HDeoptimize FINAL : public HVariableInputSizeInstruction {
"Too many packed fields.");
using DeoptimizeKindField =
BitField<DeoptimizationKind, kFieldDeoptimizeKind, kFieldDeoptimizeKindSize>;
-
- DISALLOW_COPY_AND_ASSIGN(HDeoptimize);
};
// Represents a should_deoptimize flag. Currently used for CHA-based devirtualization.
@@ -3136,8 +3217,8 @@ class HShouldDeoptimizeFlag FINAL : public HVariableInputSizeInstruction {
DECLARE_INSTRUCTION(ShouldDeoptimizeFlag);
- private:
- DISALLOW_COPY_AND_ASSIGN(HShouldDeoptimizeFlag);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(ShouldDeoptimizeFlag);
};
// Represents the ArtMethod that was passed as a first argument to
@@ -3150,8 +3231,8 @@ class HCurrentMethod FINAL : public HExpression<0> {
DECLARE_INSTRUCTION(CurrentMethod);
- private:
- DISALLOW_COPY_AND_ASSIGN(HCurrentMethod);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(CurrentMethod);
};
// Fetches an ArtMethod from the virtual table or the interface method table
@@ -3174,6 +3255,7 @@ class HClassTableGet FINAL : public HExpression<1> {
SetRawInputAt(0, cls);
}
+ bool IsClonable() const OVERRIDE { return true; }
bool CanBeMoved() const OVERRIDE { return true; }
bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
return other->AsClassTableGet()->GetIndex() == index_ &&
@@ -3185,6 +3267,9 @@ class HClassTableGet FINAL : public HExpression<1> {
DECLARE_INSTRUCTION(ClassTableGet);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(ClassTableGet);
+
private:
static constexpr size_t kFieldTableKind = kNumberOfExpressionPackedBits;
static constexpr size_t kFieldTableKindSize =
@@ -3196,8 +3281,6 @@ class HClassTableGet FINAL : public HExpression<1> {
// The index of the ArtMethod in the table.
const size_t index_;
-
- DISALLOW_COPY_AND_ASSIGN(HClassTableGet);
};
// PackedSwitch (jump table). A block ending with a PackedSwitch instruction will
@@ -3215,6 +3298,8 @@ class HPackedSwitch FINAL : public HTemplateInstruction<1> {
SetRawInputAt(0, input);
}
+ bool IsClonable() const OVERRIDE { return true; }
+
bool IsControlFlow() const OVERRIDE { return true; }
int32_t GetStartValue() const { return start_value_; }
@@ -3227,11 +3312,12 @@ class HPackedSwitch FINAL : public HTemplateInstruction<1> {
}
DECLARE_INSTRUCTION(PackedSwitch);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(PackedSwitch);
+
private:
const int32_t start_value_;
const uint32_t num_entries_;
-
- DISALLOW_COPY_AND_ASSIGN(HPackedSwitch);
};
class HUnaryOperation : public HExpression<1> {
@@ -3241,6 +3327,9 @@ class HUnaryOperation : public HExpression<1> {
SetRawInputAt(0, input);
}
+ // All of the UnaryOperation instructions are clonable.
+ bool IsClonable() const OVERRIDE { return true; }
+
HInstruction* GetInput() const { return InputAt(0); }
DataType::Type GetResultType() const { return GetType(); }
@@ -3262,8 +3351,8 @@ class HUnaryOperation : public HExpression<1> {
DECLARE_ABSTRACT_INSTRUCTION(UnaryOperation);
- private:
- DISALLOW_COPY_AND_ASSIGN(HUnaryOperation);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(UnaryOperation);
};
class HBinaryOperation : public HExpression<2> {
@@ -3278,6 +3367,9 @@ class HBinaryOperation : public HExpression<2> {
SetRawInputAt(1, right);
}
+ // All of the BinaryOperation instructions are clonable.
+ bool IsClonable() const OVERRIDE { return true; }
+
HInstruction* GetLeft() const { return InputAt(0); }
HInstruction* GetRight() const { return InputAt(1); }
DataType::Type GetResultType() const { return GetType(); }
@@ -3352,8 +3444,8 @@ class HBinaryOperation : public HExpression<2> {
DECLARE_ABSTRACT_INSTRUCTION(BinaryOperation);
- private:
- DISALLOW_COPY_AND_ASSIGN(HBinaryOperation);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(BinaryOperation);
};
// The comparison bias applies for floating point operations and indicates how NaN
@@ -3443,8 +3535,7 @@ class HCondition : public HBinaryOperation {
return GetBlock()->GetGraph()->GetIntConstant(value, dex_pc);
}
- private:
- DISALLOW_COPY_AND_ASSIGN(HCondition);
+ DEFAULT_COPY_CONSTRUCTOR(Condition);
};
// Instruction to check if two inputs are equal to each other.
@@ -3486,10 +3577,11 @@ class HEqual FINAL : public HCondition {
return kCondNE;
}
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Equal);
+
private:
template <typename T> static bool Compute(T x, T y) { return x == y; }
-
- DISALLOW_COPY_AND_ASSIGN(HEqual);
};
class HNotEqual FINAL : public HCondition {
@@ -3529,10 +3621,11 @@ class HNotEqual FINAL : public HCondition {
return kCondEQ;
}
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(NotEqual);
+
private:
template <typename T> static bool Compute(T x, T y) { return x != y; }
-
- DISALLOW_COPY_AND_ASSIGN(HNotEqual);
};
class HLessThan FINAL : public HCondition {
@@ -3566,10 +3659,11 @@ class HLessThan FINAL : public HCondition {
return kCondGE;
}
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(LessThan);
+
private:
template <typename T> static bool Compute(T x, T y) { return x < y; }
-
- DISALLOW_COPY_AND_ASSIGN(HLessThan);
};
class HLessThanOrEqual FINAL : public HCondition {
@@ -3603,10 +3697,11 @@ class HLessThanOrEqual FINAL : public HCondition {
return kCondGT;
}
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(LessThanOrEqual);
+
private:
template <typename T> static bool Compute(T x, T y) { return x <= y; }
-
- DISALLOW_COPY_AND_ASSIGN(HLessThanOrEqual);
};
class HGreaterThan FINAL : public HCondition {
@@ -3640,10 +3735,11 @@ class HGreaterThan FINAL : public HCondition {
return kCondLE;
}
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(GreaterThan);
+
private:
template <typename T> static bool Compute(T x, T y) { return x > y; }
-
- DISALLOW_COPY_AND_ASSIGN(HGreaterThan);
};
class HGreaterThanOrEqual FINAL : public HCondition {
@@ -3677,10 +3773,11 @@ class HGreaterThanOrEqual FINAL : public HCondition {
return kCondLT;
}
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(GreaterThanOrEqual);
+
private:
template <typename T> static bool Compute(T x, T y) { return x >= y; }
-
- DISALLOW_COPY_AND_ASSIGN(HGreaterThanOrEqual);
};
class HBelow FINAL : public HCondition {
@@ -3715,12 +3812,13 @@ class HBelow FINAL : public HCondition {
return kCondAE;
}
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Below);
+
private:
template <typename T> static bool Compute(T x, T y) {
return MakeUnsigned(x) < MakeUnsigned(y);
}
-
- DISALLOW_COPY_AND_ASSIGN(HBelow);
};
class HBelowOrEqual FINAL : public HCondition {
@@ -3755,12 +3853,13 @@ class HBelowOrEqual FINAL : public HCondition {
return kCondA;
}
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(BelowOrEqual);
+
private:
template <typename T> static bool Compute(T x, T y) {
return MakeUnsigned(x) <= MakeUnsigned(y);
}
-
- DISALLOW_COPY_AND_ASSIGN(HBelowOrEqual);
};
class HAbove FINAL : public HCondition {
@@ -3795,12 +3894,13 @@ class HAbove FINAL : public HCondition {
return kCondBE;
}
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Above);
+
private:
template <typename T> static bool Compute(T x, T y) {
return MakeUnsigned(x) > MakeUnsigned(y);
}
-
- DISALLOW_COPY_AND_ASSIGN(HAbove);
};
class HAboveOrEqual FINAL : public HCondition {
@@ -3835,12 +3935,13 @@ class HAboveOrEqual FINAL : public HCondition {
return kCondB;
}
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(AboveOrEqual);
+
private:
template <typename T> static bool Compute(T x, T y) {
return MakeUnsigned(x) >= MakeUnsigned(y);
}
-
- DISALLOW_COPY_AND_ASSIGN(HAboveOrEqual);
};
// Instruction to check how two inputs compare to each other.
@@ -3930,8 +4031,7 @@ class HCompare FINAL : public HBinaryOperation {
return GetBlock()->GetGraph()->GetIntConstant(value, dex_pc);
}
- private:
- DISALLOW_COPY_AND_ASSIGN(HCompare);
+ DEFAULT_COPY_CONSTRUCTOR(Compare);
};
class HNewInstance FINAL : public HExpression<1> {
@@ -3950,6 +4050,8 @@ class HNewInstance FINAL : public HExpression<1> {
SetRawInputAt(0, cls);
}
+ bool IsClonable() const OVERRIDE { return true; }
+
dex::TypeIndex GetTypeIndex() const { return type_index_; }
const DexFile& GetDexFile() const { return dex_file_; }
@@ -3986,6 +4088,9 @@ class HNewInstance FINAL : public HExpression<1> {
DECLARE_INSTRUCTION(NewInstance);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(NewInstance);
+
private:
static constexpr size_t kFlagFinalizable = kNumberOfExpressionPackedBits;
static constexpr size_t kNumberOfNewInstancePackedBits = kFlagFinalizable + 1;
@@ -3995,8 +4100,6 @@ class HNewInstance FINAL : public HExpression<1> {
const dex::TypeIndex type_index_;
const DexFile& dex_file_;
QuickEntrypointEnum entrypoint_;
-
- DISALLOW_COPY_AND_ASSIGN(HNewInstance);
};
enum IntrinsicNeedsEnvironmentOrCache {
@@ -4114,6 +4217,8 @@ class HInvoke : public HVariableInputSizeInstruction {
SetPackedFlag<kFlagCanThrow>(true);
}
+ DEFAULT_COPY_CONSTRUCTOR(Invoke);
+
uint32_t number_of_arguments_;
ArtMethod* resolved_method_;
const uint32_t dex_method_index_;
@@ -4121,9 +4226,6 @@ class HInvoke : public HVariableInputSizeInstruction {
// A magic word holding optimizations for intrinsics. See intrinsics.h.
uint32_t intrinsic_optimizations_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(HInvoke);
};
class HInvokeUnresolved FINAL : public HInvoke {
@@ -4144,10 +4246,12 @@ class HInvokeUnresolved FINAL : public HInvoke {
invoke_type) {
}
+ bool IsClonable() const OVERRIDE { return true; }
+
DECLARE_INSTRUCTION(InvokeUnresolved);
- private:
- DISALLOW_COPY_AND_ASSIGN(HInvokeUnresolved);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(InvokeUnresolved);
};
class HInvokePolymorphic FINAL : public HInvoke {
@@ -4166,10 +4270,12 @@ class HInvokePolymorphic FINAL : public HInvoke {
nullptr,
kVirtual) {}
+ bool IsClonable() const OVERRIDE { return true; }
+
DECLARE_INSTRUCTION(InvokePolymorphic);
- private:
- DISALLOW_COPY_AND_ASSIGN(HInvokePolymorphic);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(InvokePolymorphic);
};
class HInvokeStaticOrDirect FINAL : public HInvoke {
@@ -4256,6 +4362,8 @@ class HInvokeStaticOrDirect FINAL : public HInvoke {
SetPackedField<ClinitCheckRequirementField>(clinit_check_requirement);
}
+ bool IsClonable() const OVERRIDE { return true; }
+
void SetDispatchInfo(const DispatchInfo& dispatch_info) {
bool had_current_method_input = HasCurrentMethodInput();
bool needs_current_method_input = NeedsCurrentMethodInput(dispatch_info.method_load_kind);
@@ -4401,6 +4509,9 @@ class HInvokeStaticOrDirect FINAL : public HInvoke {
DECLARE_INSTRUCTION(InvokeStaticOrDirect);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(InvokeStaticOrDirect);
+
private:
static constexpr size_t kFieldClinitCheckRequirement = kNumberOfInvokePackedBits;
static constexpr size_t kFieldClinitCheckRequirementSize =
@@ -4416,8 +4527,6 @@ class HInvokeStaticOrDirect FINAL : public HInvoke {
// Cached values of the resolved method, to avoid needing the mutator lock.
MethodReference target_method_;
DispatchInfo dispatch_info_;
-
- DISALLOW_COPY_AND_ASSIGN(HInvokeStaticOrDirect);
};
std::ostream& operator<<(std::ostream& os, HInvokeStaticOrDirect::MethodLoadKind rhs);
std::ostream& operator<<(std::ostream& os, HInvokeStaticOrDirect::ClinitCheckRequirement rhs);
@@ -4441,6 +4550,8 @@ class HInvokeVirtual FINAL : public HInvoke {
kVirtual),
vtable_index_(vtable_index) {}
+ bool IsClonable() const OVERRIDE { return true; }
+
bool CanBeNull() const OVERRIDE {
switch (GetIntrinsic()) {
case Intrinsics::kThreadCurrentThread:
@@ -4463,11 +4574,12 @@ class HInvokeVirtual FINAL : public HInvoke {
DECLARE_INSTRUCTION(InvokeVirtual);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(InvokeVirtual);
+
private:
// Cached value of the resolved method, to avoid needing the mutator lock.
const uint32_t vtable_index_;
-
- DISALLOW_COPY_AND_ASSIGN(HInvokeVirtual);
};
class HInvokeInterface FINAL : public HInvoke {
@@ -4489,6 +4601,8 @@ class HInvokeInterface FINAL : public HInvoke {
kInterface),
imt_index_(imt_index) {}
+ bool IsClonable() const OVERRIDE { return true; }
+
bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
// TODO: Add implicit null checks in intrinsics.
return (obj == InputAt(0)) && !GetLocations()->Intrinsified();
@@ -4504,11 +4618,12 @@ class HInvokeInterface FINAL : public HInvoke {
DECLARE_INSTRUCTION(InvokeInterface);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(InvokeInterface);
+
private:
// Cached value of the resolved method, to avoid needing the mutator lock.
const uint32_t imt_index_;
-
- DISALLOW_COPY_AND_ASSIGN(HInvokeInterface);
};
class HNeg FINAL : public HUnaryOperation {
@@ -4535,8 +4650,8 @@ class HNeg FINAL : public HUnaryOperation {
DECLARE_INSTRUCTION(Neg);
- private:
- DISALLOW_COPY_AND_ASSIGN(HNeg);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Neg);
};
class HNewArray FINAL : public HExpression<2> {
@@ -4547,6 +4662,8 @@ class HNewArray FINAL : public HExpression<2> {
SetRawInputAt(1, length);
}
+ bool IsClonable() const OVERRIDE { return true; }
+
// Calls runtime so needs an environment.
bool NeedsEnvironment() const OVERRIDE { return true; }
@@ -4566,8 +4683,8 @@ class HNewArray FINAL : public HExpression<2> {
DECLARE_INSTRUCTION(NewArray);
- private:
- DISALLOW_COPY_AND_ASSIGN(HNewArray);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(NewArray);
};
class HAdd FINAL : public HBinaryOperation {
@@ -4601,8 +4718,8 @@ class HAdd FINAL : public HBinaryOperation {
DECLARE_INSTRUCTION(Add);
- private:
- DISALLOW_COPY_AND_ASSIGN(HAdd);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Add);
};
class HSub FINAL : public HBinaryOperation {
@@ -4634,8 +4751,8 @@ class HSub FINAL : public HBinaryOperation {
DECLARE_INSTRUCTION(Sub);
- private:
- DISALLOW_COPY_AND_ASSIGN(HSub);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Sub);
};
class HMul FINAL : public HBinaryOperation {
@@ -4669,8 +4786,8 @@ class HMul FINAL : public HBinaryOperation {
DECLARE_INSTRUCTION(Mul);
- private:
- DISALLOW_COPY_AND_ASSIGN(HMul);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Mul);
};
class HDiv FINAL : public HBinaryOperation {
@@ -4716,8 +4833,8 @@ class HDiv FINAL : public HBinaryOperation {
DECLARE_INSTRUCTION(Div);
- private:
- DISALLOW_COPY_AND_ASSIGN(HDiv);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Div);
};
class HRem FINAL : public HBinaryOperation {
@@ -4763,8 +4880,8 @@ class HRem FINAL : public HBinaryOperation {
DECLARE_INSTRUCTION(Rem);
- private:
- DISALLOW_COPY_AND_ASSIGN(HRem);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Rem);
};
class HDivZeroCheck FINAL : public HExpression<1> {
@@ -4789,8 +4906,8 @@ class HDivZeroCheck FINAL : public HExpression<1> {
DECLARE_INSTRUCTION(DivZeroCheck);
- private:
- DISALLOW_COPY_AND_ASSIGN(HDivZeroCheck);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(DivZeroCheck);
};
class HShl FINAL : public HBinaryOperation {
@@ -4835,8 +4952,8 @@ class HShl FINAL : public HBinaryOperation {
DECLARE_INSTRUCTION(Shl);
- private:
- DISALLOW_COPY_AND_ASSIGN(HShl);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Shl);
};
class HShr FINAL : public HBinaryOperation {
@@ -4881,8 +4998,8 @@ class HShr FINAL : public HBinaryOperation {
DECLARE_INSTRUCTION(Shr);
- private:
- DISALLOW_COPY_AND_ASSIGN(HShr);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Shr);
};
class HUShr FINAL : public HBinaryOperation {
@@ -4929,8 +5046,8 @@ class HUShr FINAL : public HBinaryOperation {
DECLARE_INSTRUCTION(UShr);
- private:
- DISALLOW_COPY_AND_ASSIGN(HUShr);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(UShr);
};
class HAnd FINAL : public HBinaryOperation {
@@ -4966,8 +5083,8 @@ class HAnd FINAL : public HBinaryOperation {
DECLARE_INSTRUCTION(And);
- private:
- DISALLOW_COPY_AND_ASSIGN(HAnd);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(And);
};
class HOr FINAL : public HBinaryOperation {
@@ -5003,8 +5120,8 @@ class HOr FINAL : public HBinaryOperation {
DECLARE_INSTRUCTION(Or);
- private:
- DISALLOW_COPY_AND_ASSIGN(HOr);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Or);
};
class HXor FINAL : public HBinaryOperation {
@@ -5040,8 +5157,8 @@ class HXor FINAL : public HBinaryOperation {
DECLARE_INSTRUCTION(Xor);
- private:
- DISALLOW_COPY_AND_ASSIGN(HXor);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Xor);
};
class HRor FINAL : public HBinaryOperation {
@@ -5091,8 +5208,8 @@ class HRor FINAL : public HBinaryOperation {
DECLARE_INSTRUCTION(Ror);
- private:
- DISALLOW_COPY_AND_ASSIGN(HRor);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Ror);
};
// The value of a parameter in this method. Its location depends on
@@ -5122,6 +5239,9 @@ class HParameterValue FINAL : public HExpression<0> {
DECLARE_INSTRUCTION(ParameterValue);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(ParameterValue);
+
private:
// Whether or not the parameter value corresponds to 'this' argument.
static constexpr size_t kFlagIsThis = kNumberOfExpressionPackedBits;
@@ -5135,8 +5255,6 @@ class HParameterValue FINAL : public HExpression<0> {
// The index of this parameter in the parameters list. Must be less
// than HGraph::number_of_in_vregs_.
const uint8_t index_;
-
- DISALLOW_COPY_AND_ASSIGN(HParameterValue);
};
class HNot FINAL : public HUnaryOperation {
@@ -5168,8 +5286,8 @@ class HNot FINAL : public HUnaryOperation {
DECLARE_INSTRUCTION(Not);
- private:
- DISALLOW_COPY_AND_ASSIGN(HNot);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Not);
};
class HBooleanNot FINAL : public HUnaryOperation {
@@ -5205,8 +5323,8 @@ class HBooleanNot FINAL : public HUnaryOperation {
DECLARE_INSTRUCTION(BooleanNot);
- private:
- DISALLOW_COPY_AND_ASSIGN(HBooleanNot);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(BooleanNot);
};
class HTypeConversion FINAL : public HExpression<1> {
@@ -5234,8 +5352,8 @@ class HTypeConversion FINAL : public HExpression<1> {
DECLARE_INSTRUCTION(TypeConversion);
- private:
- DISALLOW_COPY_AND_ASSIGN(HTypeConversion);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(TypeConversion);
};
static constexpr uint32_t kNoRegNumber = -1;
@@ -5249,6 +5367,7 @@ class HNullCheck FINAL : public HExpression<1> {
SetRawInputAt(0, value);
}
+ bool IsClonable() const OVERRIDE { return true; }
bool CanBeMoved() const OVERRIDE { return true; }
bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
@@ -5260,11 +5379,10 @@ class HNullCheck FINAL : public HExpression<1> {
bool CanBeNull() const OVERRIDE { return false; }
-
DECLARE_INSTRUCTION(NullCheck);
- private:
- DISALLOW_COPY_AND_ASSIGN(HNullCheck);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(NullCheck);
};
// Embeds an ArtField and all the information required by the compiler. We cache
@@ -5326,6 +5444,7 @@ class HInstanceFieldGet FINAL : public HExpression<1> {
SetRawInputAt(0, value);
}
+ bool IsClonable() const OVERRIDE { return true; }
bool CanBeMoved() const OVERRIDE { return !IsVolatile(); }
bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
@@ -5355,10 +5474,11 @@ class HInstanceFieldGet FINAL : public HExpression<1> {
DECLARE_INSTRUCTION(InstanceFieldGet);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(InstanceFieldGet);
+
private:
const FieldInfo field_info_;
-
- DISALLOW_COPY_AND_ASSIGN(HInstanceFieldGet);
};
class HInstanceFieldSet FINAL : public HTemplateInstruction<2> {
@@ -5386,6 +5506,8 @@ class HInstanceFieldSet FINAL : public HTemplateInstruction<2> {
SetRawInputAt(1, value);
}
+ bool IsClonable() const OVERRIDE { return true; }
+
bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
return (obj == InputAt(0)) && art::CanDoImplicitNullCheckOn(GetFieldOffset().Uint32Value());
}
@@ -5400,6 +5522,9 @@ class HInstanceFieldSet FINAL : public HTemplateInstruction<2> {
DECLARE_INSTRUCTION(InstanceFieldSet);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(InstanceFieldSet);
+
private:
static constexpr size_t kFlagValueCanBeNull = kNumberOfGenericPackedBits;
static constexpr size_t kNumberOfInstanceFieldSetPackedBits = kFlagValueCanBeNull + 1;
@@ -5407,8 +5532,6 @@ class HInstanceFieldSet FINAL : public HTemplateInstruction<2> {
"Too many packed fields.");
const FieldInfo field_info_;
-
- DISALLOW_COPY_AND_ASSIGN(HInstanceFieldSet);
};
class HArrayGet FINAL : public HExpression<2> {
@@ -5436,6 +5559,7 @@ class HArrayGet FINAL : public HExpression<2> {
SetRawInputAt(1, index);
}
+ bool IsClonable() const OVERRIDE { return true; }
bool CanBeMoved() const OVERRIDE { return true; }
bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
@@ -5485,6 +5609,9 @@ class HArrayGet FINAL : public HExpression<2> {
DECLARE_INSTRUCTION(ArrayGet);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(ArrayGet);
+
private:
// We treat a String as an array, creating the HArrayGet from String.charAt()
// intrinsic in the instruction simplifier. We can always determine whether
@@ -5495,8 +5622,6 @@ class HArrayGet FINAL : public HExpression<2> {
static constexpr size_t kNumberOfArrayGetPackedBits = kFlagIsStringCharAt + 1;
static_assert(kNumberOfArrayGetPackedBits <= HInstruction::kMaxNumberOfPackedBits,
"Too many packed fields.");
-
- DISALLOW_COPY_AND_ASSIGN(HArrayGet);
};
class HArraySet FINAL : public HTemplateInstruction<3> {
@@ -5530,6 +5655,8 @@ class HArraySet FINAL : public HTemplateInstruction<3> {
SetRawInputAt(2, value);
}
+ bool IsClonable() const OVERRIDE { return true; }
+
bool NeedsEnvironment() const OVERRIDE {
// We call a runtime method to throw ArrayStoreException.
return NeedsTypeCheck();
@@ -5595,6 +5722,9 @@ class HArraySet FINAL : public HTemplateInstruction<3> {
DECLARE_INSTRUCTION(ArraySet);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(ArraySet);
+
private:
static constexpr size_t kFieldExpectedComponentType = kNumberOfGenericPackedBits;
static constexpr size_t kFieldExpectedComponentTypeSize =
@@ -5610,8 +5740,6 @@ class HArraySet FINAL : public HTemplateInstruction<3> {
static_assert(kNumberOfArraySetPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
using ExpectedComponentTypeField =
BitField<DataType::Type, kFieldExpectedComponentType, kFieldExpectedComponentTypeSize>;
-
- DISALLOW_COPY_AND_ASSIGN(HArraySet);
};
class HArrayLength FINAL : public HExpression<1> {
@@ -5624,6 +5752,7 @@ class HArrayLength FINAL : public HExpression<1> {
SetRawInputAt(0, array);
}
+ bool IsClonable() const OVERRIDE { return true; }
bool CanBeMoved() const OVERRIDE { return true; }
bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
@@ -5636,6 +5765,9 @@ class HArrayLength FINAL : public HExpression<1> {
DECLARE_INSTRUCTION(ArrayLength);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(ArrayLength);
+
private:
// We treat a String as an array, creating the HArrayLength from String.length()
// or String.isEmpty() intrinsic in the instruction simplifier. We can always
@@ -5646,8 +5778,6 @@ class HArrayLength FINAL : public HExpression<1> {
static constexpr size_t kNumberOfArrayLengthPackedBits = kFlagIsStringLength + 1;
static_assert(kNumberOfArrayLengthPackedBits <= HInstruction::kMaxNumberOfPackedBits,
"Too many packed fields.");
-
- DISALLOW_COPY_AND_ASSIGN(HArrayLength);
};
class HBoundsCheck FINAL : public HExpression<2> {
@@ -5665,6 +5795,7 @@ class HBoundsCheck FINAL : public HExpression<2> {
SetRawInputAt(1, length);
}
+ bool IsClonable() const OVERRIDE { return true; }
bool CanBeMoved() const OVERRIDE { return true; }
bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
@@ -5680,10 +5811,11 @@ class HBoundsCheck FINAL : public HExpression<2> {
DECLARE_INSTRUCTION(BoundsCheck);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(BoundsCheck);
+
private:
static constexpr size_t kFlagIsStringCharAt = kNumberOfExpressionPackedBits;
-
- DISALLOW_COPY_AND_ASSIGN(HBoundsCheck);
};
class HSuspendCheck FINAL : public HTemplateInstruction<0> {
@@ -5691,6 +5823,8 @@ class HSuspendCheck FINAL : public HTemplateInstruction<0> {
explicit HSuspendCheck(uint32_t dex_pc = kNoDexPc)
: HTemplateInstruction(SideEffects::CanTriggerGC(), dex_pc), slow_path_(nullptr) {}
+ bool IsClonable() const OVERRIDE { return true; }
+
bool NeedsEnvironment() const OVERRIDE {
return true;
}
@@ -5700,12 +5834,13 @@ class HSuspendCheck FINAL : public HTemplateInstruction<0> {
DECLARE_INSTRUCTION(SuspendCheck);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(SuspendCheck);
+
private:
// Only used for code generation, in order to share the same slow path between back edges
// of a same loop.
SlowPathCode* slow_path_;
-
- DISALLOW_COPY_AND_ASSIGN(HSuspendCheck);
};
// Pseudo-instruction which provides the native debugger with mapping information.
@@ -5721,8 +5856,8 @@ class HNativeDebugInfo : public HTemplateInstruction<0> {
DECLARE_INSTRUCTION(NativeDebugInfo);
- private:
- DISALLOW_COPY_AND_ASSIGN(HNativeDebugInfo);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(NativeDebugInfo);
};
/**
@@ -5788,6 +5923,8 @@ class HLoadClass FINAL : public HInstruction {
SetPackedFlag<kFlagGenerateClInitCheck>(false);
}
+ bool IsClonable() const OVERRIDE { return true; }
+
void SetLoadKind(LoadKind load_kind);
LoadKind GetLoadKind() const {
@@ -5879,6 +6016,9 @@ class HLoadClass FINAL : public HInstruction {
DECLARE_INSTRUCTION(LoadClass);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(LoadClass);
+
private:
static constexpr size_t kFlagNeedsAccessCheck = kNumberOfGenericPackedBits;
static constexpr size_t kFlagIsInBootImage = kFlagNeedsAccessCheck + 1;
@@ -5918,8 +6058,6 @@ class HLoadClass FINAL : public HInstruction {
Handle<mirror::Class> klass_;
ReferenceTypeInfo loaded_class_rti_;
-
- DISALLOW_COPY_AND_ASSIGN(HLoadClass);
};
std::ostream& operator<<(std::ostream& os, HLoadClass::LoadKind rhs);
@@ -5977,6 +6115,8 @@ class HLoadString FINAL : public HInstruction {
SetPackedField<LoadKindField>(LoadKind::kRuntimeCall);
}
+ bool IsClonable() const OVERRIDE { return true; }
+
void SetLoadKind(LoadKind load_kind);
LoadKind GetLoadKind() const {
@@ -6043,6 +6183,9 @@ class HLoadString FINAL : public HInstruction {
DECLARE_INSTRUCTION(LoadString);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(LoadString);
+
private:
static constexpr size_t kFieldLoadKind = kNumberOfGenericPackedBits;
static constexpr size_t kFieldLoadKindSize =
@@ -6062,8 +6205,6 @@ class HLoadString FINAL : public HInstruction {
const DexFile& dex_file_;
Handle<mirror::String> string_;
-
- DISALLOW_COPY_AND_ASSIGN(HLoadString);
};
std::ostream& operator<<(std::ostream& os, HLoadString::LoadKind rhs);
@@ -6095,6 +6236,7 @@ class HClinitCheck FINAL : public HExpression<1> {
SetRawInputAt(0, constant);
}
+ bool IsClonable() const OVERRIDE { return true; }
bool CanBeMoved() const OVERRIDE { return true; }
bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
@@ -6114,8 +6256,9 @@ class HClinitCheck FINAL : public HExpression<1> {
DECLARE_INSTRUCTION(ClinitCheck);
- private:
- DISALLOW_COPY_AND_ASSIGN(HClinitCheck);
+
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(ClinitCheck);
};
class HStaticFieldGet FINAL : public HExpression<1> {
@@ -6141,6 +6284,7 @@ class HStaticFieldGet FINAL : public HExpression<1> {
}
+ bool IsClonable() const OVERRIDE { return true; }
bool CanBeMoved() const OVERRIDE { return !IsVolatile(); }
bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
@@ -6166,10 +6310,11 @@ class HStaticFieldGet FINAL : public HExpression<1> {
DECLARE_INSTRUCTION(StaticFieldGet);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(StaticFieldGet);
+
private:
const FieldInfo field_info_;
-
- DISALLOW_COPY_AND_ASSIGN(HStaticFieldGet);
};
class HStaticFieldSet FINAL : public HTemplateInstruction<2> {
@@ -6197,6 +6342,7 @@ class HStaticFieldSet FINAL : public HTemplateInstruction<2> {
SetRawInputAt(1, value);
}
+ bool IsClonable() const OVERRIDE { return true; }
const FieldInfo& GetFieldInfo() const { return field_info_; }
MemberOffset GetFieldOffset() const { return field_info_.GetFieldOffset(); }
DataType::Type GetFieldType() const { return field_info_.GetFieldType(); }
@@ -6208,6 +6354,9 @@ class HStaticFieldSet FINAL : public HTemplateInstruction<2> {
DECLARE_INSTRUCTION(StaticFieldSet);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(StaticFieldSet);
+
private:
static constexpr size_t kFlagValueCanBeNull = kNumberOfGenericPackedBits;
static constexpr size_t kNumberOfStaticFieldSetPackedBits = kFlagValueCanBeNull + 1;
@@ -6215,8 +6364,6 @@ class HStaticFieldSet FINAL : public HTemplateInstruction<2> {
"Too many packed fields.");
const FieldInfo field_info_;
-
- DISALLOW_COPY_AND_ASSIGN(HStaticFieldSet);
};
class HUnresolvedInstanceFieldGet FINAL : public HExpression<1> {
@@ -6230,6 +6377,7 @@ class HUnresolvedInstanceFieldGet FINAL : public HExpression<1> {
SetRawInputAt(0, obj);
}
+ bool IsClonable() const OVERRIDE { return true; }
bool NeedsEnvironment() const OVERRIDE { return true; }
bool CanThrow() const OVERRIDE { return true; }
@@ -6238,10 +6386,11 @@ class HUnresolvedInstanceFieldGet FINAL : public HExpression<1> {
DECLARE_INSTRUCTION(UnresolvedInstanceFieldGet);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(UnresolvedInstanceFieldGet);
+
private:
const uint32_t field_index_;
-
- DISALLOW_COPY_AND_ASSIGN(HUnresolvedInstanceFieldGet);
};
class HUnresolvedInstanceFieldSet FINAL : public HTemplateInstruction<2> {
@@ -6259,6 +6408,7 @@ class HUnresolvedInstanceFieldSet FINAL : public HTemplateInstruction<2> {
SetRawInputAt(1, value);
}
+ bool IsClonable() const OVERRIDE { return true; }
bool NeedsEnvironment() const OVERRIDE { return true; }
bool CanThrow() const OVERRIDE { return true; }
@@ -6267,6 +6417,9 @@ class HUnresolvedInstanceFieldSet FINAL : public HTemplateInstruction<2> {
DECLARE_INSTRUCTION(UnresolvedInstanceFieldSet);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(UnresolvedInstanceFieldSet);
+
private:
static constexpr size_t kFieldFieldType = HInstruction::kNumberOfGenericPackedBits;
static constexpr size_t kFieldFieldTypeSize =
@@ -6278,8 +6431,6 @@ class HUnresolvedInstanceFieldSet FINAL : public HTemplateInstruction<2> {
using FieldTypeField = BitField<DataType::Type, kFieldFieldType, kFieldFieldTypeSize>;
const uint32_t field_index_;
-
- DISALLOW_COPY_AND_ASSIGN(HUnresolvedInstanceFieldSet);
};
class HUnresolvedStaticFieldGet FINAL : public HExpression<0> {
@@ -6291,6 +6442,7 @@ class HUnresolvedStaticFieldGet FINAL : public HExpression<0> {
field_index_(field_index) {
}
+ bool IsClonable() const OVERRIDE { return true; }
bool NeedsEnvironment() const OVERRIDE { return true; }
bool CanThrow() const OVERRIDE { return true; }
@@ -6299,10 +6451,11 @@ class HUnresolvedStaticFieldGet FINAL : public HExpression<0> {
DECLARE_INSTRUCTION(UnresolvedStaticFieldGet);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(UnresolvedStaticFieldGet);
+
private:
const uint32_t field_index_;
-
- DISALLOW_COPY_AND_ASSIGN(HUnresolvedStaticFieldGet);
};
class HUnresolvedStaticFieldSet FINAL : public HTemplateInstruction<1> {
@@ -6318,6 +6471,7 @@ class HUnresolvedStaticFieldSet FINAL : public HTemplateInstruction<1> {
SetRawInputAt(0, value);
}
+ bool IsClonable() const OVERRIDE { return true; }
bool NeedsEnvironment() const OVERRIDE { return true; }
bool CanThrow() const OVERRIDE { return true; }
@@ -6326,6 +6480,9 @@ class HUnresolvedStaticFieldSet FINAL : public HTemplateInstruction<1> {
DECLARE_INSTRUCTION(UnresolvedStaticFieldSet);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(UnresolvedStaticFieldSet);
+
private:
static constexpr size_t kFieldFieldType = HInstruction::kNumberOfGenericPackedBits;
static constexpr size_t kFieldFieldTypeSize =
@@ -6337,8 +6494,6 @@ class HUnresolvedStaticFieldSet FINAL : public HTemplateInstruction<1> {
using FieldTypeField = BitField<DataType::Type, kFieldFieldType, kFieldFieldTypeSize>;
const uint32_t field_index_;
-
- DISALLOW_COPY_AND_ASSIGN(HUnresolvedStaticFieldSet);
};
// Implement the move-exception DEX instruction.
@@ -6351,8 +6506,8 @@ class HLoadException FINAL : public HExpression<0> {
DECLARE_INSTRUCTION(LoadException);
- private:
- DISALLOW_COPY_AND_ASSIGN(HLoadException);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(LoadException);
};
// Implicit part of move-exception which clears thread-local exception storage.
@@ -6364,8 +6519,8 @@ class HClearException FINAL : public HTemplateInstruction<0> {
DECLARE_INSTRUCTION(ClearException);
- private:
- DISALLOW_COPY_AND_ASSIGN(HClearException);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(ClearException);
};
class HThrow FINAL : public HTemplateInstruction<1> {
@@ -6381,11 +6536,10 @@ class HThrow FINAL : public HTemplateInstruction<1> {
bool CanThrow() const OVERRIDE { return true; }
-
DECLARE_INSTRUCTION(Throw);
- private:
- DISALLOW_COPY_AND_ASSIGN(HThrow);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Throw);
};
/**
@@ -6420,6 +6574,7 @@ class HInstanceOf FINAL : public HExpression<2> {
SetRawInputAt(1, constant);
}
+ bool IsClonable() const OVERRIDE { return true; }
bool CanBeMoved() const OVERRIDE { return true; }
bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
@@ -6447,6 +6602,9 @@ class HInstanceOf FINAL : public HExpression<2> {
DECLARE_INSTRUCTION(InstanceOf);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(InstanceOf);
+
private:
static constexpr size_t kFieldTypeCheckKind = kNumberOfExpressionPackedBits;
static constexpr size_t kFieldTypeCheckKindSize =
@@ -6455,8 +6613,6 @@ class HInstanceOf FINAL : public HExpression<2> {
static constexpr size_t kNumberOfInstanceOfPackedBits = kFlagMustDoNullCheck + 1;
static_assert(kNumberOfInstanceOfPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
using TypeCheckKindField = BitField<TypeCheckKind, kFieldTypeCheckKind, kFieldTypeCheckKindSize>;
-
- DISALLOW_COPY_AND_ASSIGN(HInstanceOf);
};
class HBoundType FINAL : public HExpression<1> {
@@ -6470,6 +6626,8 @@ class HBoundType FINAL : public HExpression<1> {
SetRawInputAt(0, input);
}
+ bool IsClonable() const OVERRIDE { return true; }
+
// {Get,Set}Upper* should only be used in reference type propagation.
const ReferenceTypeInfo& GetUpperBound() const { return upper_bound_; }
bool GetUpperCanBeNull() const { return GetPackedFlag<kFlagUpperCanBeNull>(); }
@@ -6484,6 +6642,9 @@ class HBoundType FINAL : public HExpression<1> {
DECLARE_INSTRUCTION(BoundType);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(BoundType);
+
private:
// Represents the top constraint that can_be_null_ cannot exceed (i.e. if this
// is false then CanBeNull() cannot be true).
@@ -6499,8 +6660,6 @@ class HBoundType FINAL : public HExpression<1> {
// // uper_bound_ will be ClassX
// }
ReferenceTypeInfo upper_bound_;
-
- DISALLOW_COPY_AND_ASSIGN(HBoundType);
};
class HCheckCast FINAL : public HTemplateInstruction<2> {
@@ -6516,6 +6675,7 @@ class HCheckCast FINAL : public HTemplateInstruction<2> {
SetRawInputAt(1, constant);
}
+ bool IsClonable() const OVERRIDE { return true; }
bool CanBeMoved() const OVERRIDE { return true; }
bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
@@ -6536,6 +6696,9 @@ class HCheckCast FINAL : public HTemplateInstruction<2> {
DECLARE_INSTRUCTION(CheckCast);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(CheckCast);
+
private:
static constexpr size_t kFieldTypeCheckKind = kNumberOfGenericPackedBits;
static constexpr size_t kFieldTypeCheckKindSize =
@@ -6544,8 +6707,6 @@ class HCheckCast FINAL : public HTemplateInstruction<2> {
static constexpr size_t kNumberOfCheckCastPackedBits = kFlagMustDoNullCheck + 1;
static_assert(kNumberOfCheckCastPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
using TypeCheckKindField = BitField<TypeCheckKind, kFieldTypeCheckKind, kFieldTypeCheckKindSize>;
-
- DISALLOW_COPY_AND_ASSIGN(HCheckCast);
};
/**
@@ -6582,10 +6743,15 @@ class HMemoryBarrier FINAL : public HTemplateInstruction<0> {
SetPackedField<BarrierKindField>(barrier_kind);
}
+ bool IsClonable() const OVERRIDE { return true; }
+
MemBarrierKind GetBarrierKind() { return GetPackedField<BarrierKindField>(); }
DECLARE_INSTRUCTION(MemoryBarrier);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(MemoryBarrier);
+
private:
static constexpr size_t kFieldBarrierKind = HInstruction::kNumberOfGenericPackedBits;
static constexpr size_t kFieldBarrierKindSize =
@@ -6595,8 +6761,6 @@ class HMemoryBarrier FINAL : public HTemplateInstruction<0> {
static_assert(kNumberOfMemoryBarrierPackedBits <= kMaxNumberOfPackedBits,
"Too many packed fields.");
using BarrierKindField = BitField<MemBarrierKind, kFieldBarrierKind, kFieldBarrierKindSize>;
-
- DISALLOW_COPY_AND_ASSIGN(HMemoryBarrier);
};
// A constructor fence orders all prior stores to fields that could be accessed via a final field of
@@ -6747,8 +6911,8 @@ class HConstructorFence FINAL : public HVariableInputSizeInstruction {
DECLARE_INSTRUCTION(ConstructorFence);
- private:
- DISALLOW_COPY_AND_ASSIGN(HConstructorFence);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(ConstructorFence);
};
class HMonitorOperation FINAL : public HTemplateInstruction<1> {
@@ -6782,6 +6946,9 @@ class HMonitorOperation FINAL : public HTemplateInstruction<1> {
DECLARE_INSTRUCTION(MonitorOperation);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(MonitorOperation);
+
private:
static constexpr size_t kFieldOperationKind = HInstruction::kNumberOfGenericPackedBits;
static constexpr size_t kFieldOperationKindSize =
@@ -6791,9 +6958,6 @@ class HMonitorOperation FINAL : public HTemplateInstruction<1> {
static_assert(kNumberOfMonitorOperationPackedBits <= HInstruction::kMaxNumberOfPackedBits,
"Too many packed fields.");
using OperationKindField = BitField<OperationKind, kFieldOperationKind, kFieldOperationKindSize>;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(HMonitorOperation);
};
class HSelect FINAL : public HExpression<3> {
@@ -6814,6 +6978,7 @@ class HSelect FINAL : public HExpression<3> {
SetRawInputAt(2, condition);
}
+ bool IsClonable() const OVERRIDE { return true; }
HInstruction* GetFalseValue() const { return InputAt(0); }
HInstruction* GetTrueValue() const { return InputAt(1); }
HInstruction* GetCondition() const { return InputAt(2); }
@@ -6829,8 +6994,8 @@ class HSelect FINAL : public HExpression<3> {
DECLARE_INSTRUCTION(Select);
- private:
- DISALLOW_COPY_AND_ASSIGN(HSelect);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Select);
};
class MoveOperands : public ArenaObject<kArenaAllocMoveOperands> {
@@ -6961,10 +7126,11 @@ class HParallelMove FINAL : public HTemplateInstruction<0> {
DECLARE_INSTRUCTION(ParallelMove);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(ParallelMove);
+
private:
ArenaVector<MoveOperands> moves_;
-
- DISALLOW_COPY_AND_ASSIGN(HParallelMove);
};
// This instruction computes an intermediate address pointing in the 'middle' of an object. The
@@ -6983,6 +7149,7 @@ class HIntermediateAddress FINAL : public HExpression<2> {
SetRawInputAt(1, offset);
}
+ bool IsClonable() const OVERRIDE { return true; }
bool CanBeMoved() const OVERRIDE { return true; }
bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
@@ -6994,8 +7161,8 @@ class HIntermediateAddress FINAL : public HExpression<2> {
DECLARE_INSTRUCTION(IntermediateAddress);
- private:
- DISALLOW_COPY_AND_ASSIGN(HIntermediateAddress);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(IntermediateAddress);
};
@@ -7070,6 +7237,33 @@ class HGraphDelegateVisitor : public HGraphVisitor {
DISALLOW_COPY_AND_ASSIGN(HGraphDelegateVisitor);
};
+// Create a clone of the instruction, insert it into the graph; replace the old one with a new
+// and remove the old instruction.
+HInstruction* ReplaceInstrOrPhiByClone(HInstruction* instr);
+
+// Create a clone for each clonable instructions/phis and replace the original with the clone.
+//
+// Used for testing individual instruction cloner.
+class CloneAndReplaceInstructionVisitor : public HGraphDelegateVisitor {
+ public:
+ explicit CloneAndReplaceInstructionVisitor(HGraph* graph)
+ : HGraphDelegateVisitor(graph), instr_replaced_by_clones_count(0) {}
+
+ void VisitInstruction(HInstruction* instruction) OVERRIDE {
+ if (instruction->IsClonable()) {
+ ReplaceInstrOrPhiByClone(instruction);
+ instr_replaced_by_clones_count++;
+ }
+ }
+
+ size_t GetInstrReplacedByClonesCount() const { return instr_replaced_by_clones_count; }
+
+ private:
+ size_t instr_replaced_by_clones_count;
+
+ DISALLOW_COPY_AND_ASSIGN(CloneAndReplaceInstructionVisitor);
+};
+
// Iterator over the blocks that art part of the loop. Includes blocks part
// of an inner loop. The order in which the blocks are iterated is on their
// block id.
diff --git a/compiler/optimizing/nodes_mips.h b/compiler/optimizing/nodes_mips.h
index ef388c30d5..2c0595e3d8 100644
--- a/compiler/optimizing/nodes_mips.h
+++ b/compiler/optimizing/nodes_mips.h
@@ -30,8 +30,8 @@ class HMipsComputeBaseMethodAddress : public HExpression<0> {
DECLARE_INSTRUCTION(MipsComputeBaseMethodAddress);
- private:
- DISALLOW_COPY_AND_ASSIGN(HMipsComputeBaseMethodAddress);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(MipsComputeBaseMethodAddress);
};
// Mips version of HPackedSwitch that holds a pointer to the base method address.
@@ -62,11 +62,12 @@ class HMipsPackedSwitch FINAL : public HTemplateInstruction<2> {
DECLARE_INSTRUCTION(MipsPackedSwitch);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(MipsPackedSwitch);
+
private:
const int32_t start_value_;
const int32_t num_entries_;
-
- DISALLOW_COPY_AND_ASSIGN(HMipsPackedSwitch);
};
// This instruction computes part of the array access offset (index offset).
@@ -105,8 +106,8 @@ class HIntermediateArrayAddressIndex FINAL : public HExpression<2> {
DECLARE_INSTRUCTION(IntermediateArrayAddressIndex);
- private:
- DISALLOW_COPY_AND_ASSIGN(HIntermediateArrayAddressIndex);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(IntermediateArrayAddressIndex);
};
} // namespace art
diff --git a/compiler/optimizing/nodes_shared.h b/compiler/optimizing/nodes_shared.h
index 7b4f5f7cbb..e837f1e7e0 100644
--- a/compiler/optimizing/nodes_shared.h
+++ b/compiler/optimizing/nodes_shared.h
@@ -38,6 +38,8 @@ class HMultiplyAccumulate FINAL : public HExpression<3> {
SetRawInputAt(kInputMulRightIndex, mul_right);
}
+ bool IsClonable() const OVERRIDE { return true; }
+
static constexpr int kInputAccumulatorIndex = 0;
static constexpr int kInputMulLeftIndex = 1;
static constexpr int kInputMulRightIndex = 2;
@@ -51,11 +53,12 @@ class HMultiplyAccumulate FINAL : public HExpression<3> {
DECLARE_INSTRUCTION(MultiplyAccumulate);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(MultiplyAccumulate);
+
private:
// Indicates if this is a MADD or MSUB.
const InstructionKind op_kind_;
-
- DISALLOW_COPY_AND_ASSIGN(HMultiplyAccumulate);
};
class HBitwiseNegatedRight FINAL : public HBinaryOperation {
@@ -111,11 +114,12 @@ class HBitwiseNegatedRight FINAL : public HBinaryOperation {
DECLARE_INSTRUCTION(BitwiseNegatedRight);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(BitwiseNegatedRight);
+
private:
// Specifies the bitwise operation, which will be then negated.
const InstructionKind op_kind_;
-
- DISALLOW_COPY_AND_ASSIGN(HBitwiseNegatedRight);
};
// This instruction computes part of the array access offset (data and index offset).
@@ -145,6 +149,7 @@ class HIntermediateAddressIndex FINAL : public HExpression<3> {
SetRawInputAt(2, shift);
}
+ bool IsClonable() const OVERRIDE { return true; }
bool CanBeMoved() const OVERRIDE { return true; }
bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
@@ -157,8 +162,8 @@ class HIntermediateAddressIndex FINAL : public HExpression<3> {
DECLARE_INSTRUCTION(IntermediateAddressIndex);
- private:
- DISALLOW_COPY_AND_ASSIGN(HIntermediateAddressIndex);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(IntermediateAddressIndex);
};
class HDataProcWithShifterOp FINAL : public HExpression<2> {
@@ -198,6 +203,7 @@ class HDataProcWithShifterOp FINAL : public HExpression<2> {
SetRawInputAt(1, right);
}
+ bool IsClonable() const OVERRIDE { return true; }
bool CanBeMoved() const OVERRIDE { return true; }
bool InstructionDataEquals(const HInstruction* other_instr) const OVERRIDE {
const HDataProcWithShifterOp* other = other_instr->AsDataProcWithShifterOp();
@@ -225,14 +231,15 @@ class HDataProcWithShifterOp FINAL : public HExpression<2> {
DECLARE_INSTRUCTION(DataProcWithShifterOp);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(DataProcWithShifterOp);
+
private:
InstructionKind instr_kind_;
OpKind op_kind_;
int shift_amount_;
friend std::ostream& operator<<(std::ostream& os, OpKind op);
-
- DISALLOW_COPY_AND_ASSIGN(HDataProcWithShifterOp);
};
std::ostream& operator<<(std::ostream& os, const HDataProcWithShifterOp::OpKind op);
diff --git a/compiler/optimizing/nodes_vector.h b/compiler/optimizing/nodes_vector.h
index 17540b9770..59d5b9f847 100644
--- a/compiler/optimizing/nodes_vector.h
+++ b/compiler/optimizing/nodes_vector.h
@@ -161,10 +161,10 @@ class HVecOperation : public HVariableInputSizeInstruction {
static_assert(kNumberOfVectorOpPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
using TypeField = BitField<DataType::Type, kFieldType, kFieldTypeSize>;
+ DEFAULT_COPY_CONSTRUCTOR(VecOperation);
+
private:
const size_t vector_length_;
-
- DISALLOW_COPY_AND_ASSIGN(HVecOperation);
};
// Abstraction of a unary vector operation.
@@ -188,8 +188,8 @@ class HVecUnaryOperation : public HVecOperation {
DECLARE_ABSTRACT_INSTRUCTION(VecUnaryOperation);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecUnaryOperation);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecUnaryOperation);
};
// Abstraction of a binary vector operation.
@@ -216,8 +216,8 @@ class HVecBinaryOperation : public HVecOperation {
DECLARE_ABSTRACT_INSTRUCTION(VecBinaryOperation);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecBinaryOperation);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecBinaryOperation);
};
// Abstraction of a vector operation that references memory, with an alignment.
@@ -255,10 +255,11 @@ class HVecMemoryOperation : public HVecOperation {
DECLARE_ABSTRACT_INSTRUCTION(VecMemoryOperation);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecMemoryOperation);
+
private:
Alignment alignment_;
-
- DISALLOW_COPY_AND_ASSIGN(HVecMemoryOperation);
};
// Packed type consistency checker ("same vector length" integral types may mix freely).
@@ -296,8 +297,8 @@ class HVecReplicateScalar FINAL : public HVecUnaryOperation {
DECLARE_INSTRUCTION(VecReplicateScalar);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecReplicateScalar);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecReplicateScalar);
};
// Extracts a particular scalar from the given vector,
@@ -329,8 +330,8 @@ class HVecExtractScalar FINAL : public HVecUnaryOperation {
DECLARE_INSTRUCTION(VecExtractScalar);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecExtractScalar);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecExtractScalar);
};
// Reduces the given vector into the first element as sum/min/max,
@@ -367,10 +368,11 @@ class HVecReduce FINAL : public HVecUnaryOperation {
DECLARE_INSTRUCTION(VecReduce);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecReduce);
+
private:
const ReductionKind kind_;
-
- DISALLOW_COPY_AND_ASSIGN(HVecReduce);
};
// Converts every component in the vector,
@@ -394,8 +396,8 @@ class HVecCnv FINAL : public HVecUnaryOperation {
DECLARE_INSTRUCTION(VecCnv);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecCnv);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecCnv);
};
// Negates every component in the vector,
@@ -415,8 +417,8 @@ class HVecNeg FINAL : public HVecUnaryOperation {
DECLARE_INSTRUCTION(VecNeg);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecNeg);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecNeg);
};
// Takes absolute value of every component in the vector,
@@ -437,8 +439,8 @@ class HVecAbs FINAL : public HVecUnaryOperation {
DECLARE_INSTRUCTION(VecAbs);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecAbs);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecAbs);
};
// Bitwise- or boolean-nots every component in the vector,
@@ -459,8 +461,8 @@ class HVecNot FINAL : public HVecUnaryOperation {
DECLARE_INSTRUCTION(VecNot);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecNot);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecNot);
};
//
@@ -486,8 +488,8 @@ class HVecAdd FINAL : public HVecBinaryOperation {
DECLARE_INSTRUCTION(VecAdd);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecAdd);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecAdd);
};
// Performs halving add on every component in the two vectors, viz.
@@ -531,14 +533,15 @@ class HVecHalvingAdd FINAL : public HVecBinaryOperation {
DECLARE_INSTRUCTION(VecHalvingAdd);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecHalvingAdd);
+
private:
// Additional packed bits.
static constexpr size_t kFieldHAddIsUnsigned = HVecOperation::kNumberOfVectorOpPackedBits;
static constexpr size_t kFieldHAddIsRounded = kFieldHAddIsUnsigned + 1;
static constexpr size_t kNumberOfHAddPackedBits = kFieldHAddIsRounded + 1;
static_assert(kNumberOfHAddPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
-
- DISALLOW_COPY_AND_ASSIGN(HVecHalvingAdd);
};
// Subtracts every component in the two vectors,
@@ -560,8 +563,8 @@ class HVecSub FINAL : public HVecBinaryOperation {
DECLARE_INSTRUCTION(VecSub);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecSub);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecSub);
};
// Multiplies every component in the two vectors,
@@ -583,8 +586,8 @@ class HVecMul FINAL : public HVecBinaryOperation {
DECLARE_INSTRUCTION(VecMul);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecMul);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecMul);
};
// Divides every component in the two vectors,
@@ -606,8 +609,8 @@ class HVecDiv FINAL : public HVecBinaryOperation {
DECLARE_INSTRUCTION(VecDiv);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecDiv);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecDiv);
};
// Takes minimum of every component in the two vectors,
@@ -645,13 +648,14 @@ class HVecMin FINAL : public HVecBinaryOperation {
DECLARE_INSTRUCTION(VecMin);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecMin);
+
private:
// Additional packed bits.
static constexpr size_t kFieldMinOpIsUnsigned = HVecOperation::kNumberOfVectorOpPackedBits;
static constexpr size_t kNumberOfMinOpPackedBits = kFieldMinOpIsUnsigned + 1;
static_assert(kNumberOfMinOpPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
-
- DISALLOW_COPY_AND_ASSIGN(HVecMin);
};
// Takes maximum of every component in the two vectors,
@@ -689,13 +693,14 @@ class HVecMax FINAL : public HVecBinaryOperation {
DECLARE_INSTRUCTION(VecMax);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecMax);
+
private:
// Additional packed bits.
static constexpr size_t kFieldMaxOpIsUnsigned = HVecOperation::kNumberOfVectorOpPackedBits;
static constexpr size_t kNumberOfMaxOpPackedBits = kFieldMaxOpIsUnsigned + 1;
static_assert(kNumberOfMaxOpPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
-
- DISALLOW_COPY_AND_ASSIGN(HVecMax);
};
// Bitwise-ands every component in the two vectors,
@@ -716,8 +721,8 @@ class HVecAnd FINAL : public HVecBinaryOperation {
DECLARE_INSTRUCTION(VecAnd);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecAnd);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecAnd);
};
// Bitwise-and-nots every component in the two vectors,
@@ -738,8 +743,8 @@ class HVecAndNot FINAL : public HVecBinaryOperation {
DECLARE_INSTRUCTION(VecAndNot);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecAndNot);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecAndNot);
};
// Bitwise-ors every component in the two vectors,
@@ -760,8 +765,8 @@ class HVecOr FINAL : public HVecBinaryOperation {
DECLARE_INSTRUCTION(VecOr);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecOr);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecOr);
};
// Bitwise-xors every component in the two vectors,
@@ -782,8 +787,8 @@ class HVecXor FINAL : public HVecBinaryOperation {
DECLARE_INSTRUCTION(VecXor);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecXor);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecXor);
};
// Logically shifts every component in the vector left by the given distance,
@@ -804,8 +809,8 @@ class HVecShl FINAL : public HVecBinaryOperation {
DECLARE_INSTRUCTION(VecShl);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecShl);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecShl);
};
// Arithmetically shifts every component in the vector right by the given distance,
@@ -826,8 +831,8 @@ class HVecShr FINAL : public HVecBinaryOperation {
DECLARE_INSTRUCTION(VecShr);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecShr);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecShr);
};
// Logically shifts every component in the vector right by the given distance,
@@ -848,8 +853,8 @@ class HVecUShr FINAL : public HVecBinaryOperation {
DECLARE_INSTRUCTION(VecUShr);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecUShr);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecUShr);
};
//
@@ -885,8 +890,8 @@ class HVecSetScalars FINAL : public HVecOperation {
DECLARE_INSTRUCTION(VecSetScalars);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecSetScalars);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecSetScalars);
};
// Multiplies every component in the two vectors, adds the result vector to the accumulator vector,
@@ -929,11 +934,12 @@ class HVecMultiplyAccumulate FINAL : public HVecOperation {
DECLARE_INSTRUCTION(VecMultiplyAccumulate);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecMultiplyAccumulate);
+
private:
// Indicates if this is a MADD or MSUB.
const InstructionKind op_kind_;
-
- DISALLOW_COPY_AND_ASSIGN(HVecMultiplyAccumulate);
};
// Takes the absolute difference of two vectors, and adds the results to
@@ -968,8 +974,8 @@ class HVecSADAccumulate FINAL : public HVecOperation {
DECLARE_INSTRUCTION(VecSADAccumulate);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecSADAccumulate);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecSADAccumulate);
};
// Loads a vector from memory, viz. load(mem, 1)
@@ -1007,13 +1013,14 @@ class HVecLoad FINAL : public HVecMemoryOperation {
DECLARE_INSTRUCTION(VecLoad);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecLoad);
+
private:
// Additional packed bits.
static constexpr size_t kFieldIsStringCharAt = HVecOperation::kNumberOfVectorOpPackedBits;
static constexpr size_t kNumberOfVecLoadPackedBits = kFieldIsStringCharAt + 1;
static_assert(kNumberOfVecLoadPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
-
- DISALLOW_COPY_AND_ASSIGN(HVecLoad);
};
// Stores a vector to memory, viz. store(m, 1, [x1, .. , xn] )
@@ -1045,8 +1052,8 @@ class HVecStore FINAL : public HVecMemoryOperation {
DECLARE_INSTRUCTION(VecStore);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecStore);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecStore)
};
} // namespace art
diff --git a/compiler/optimizing/nodes_x86.h b/compiler/optimizing/nodes_x86.h
index 22e92eab31..6326065fe2 100644
--- a/compiler/optimizing/nodes_x86.h
+++ b/compiler/optimizing/nodes_x86.h
@@ -30,8 +30,8 @@ class HX86ComputeBaseMethodAddress FINAL : public HExpression<0> {
DECLARE_INSTRUCTION(X86ComputeBaseMethodAddress);
- private:
- DISALLOW_COPY_AND_ASSIGN(HX86ComputeBaseMethodAddress);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(X86ComputeBaseMethodAddress);
};
// Load a constant value from the constant table.
@@ -54,8 +54,8 @@ class HX86LoadFromConstantTable FINAL : public HExpression<2> {
DECLARE_INSTRUCTION(X86LoadFromConstantTable);
- private:
- DISALLOW_COPY_AND_ASSIGN(HX86LoadFromConstantTable);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(X86LoadFromConstantTable);
};
// Version of HNeg with access to the constant table for FP types.
@@ -77,8 +77,8 @@ class HX86FPNeg FINAL : public HExpression<2> {
DECLARE_INSTRUCTION(X86FPNeg);
- private:
- DISALLOW_COPY_AND_ASSIGN(HX86FPNeg);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(X86FPNeg);
};
// X86 version of HPackedSwitch that holds a pointer to the base method address.
@@ -113,11 +113,12 @@ class HX86PackedSwitch FINAL : public HTemplateInstruction<2> {
DECLARE_INSTRUCTION(X86PackedSwitch);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(X86PackedSwitch);
+
private:
const int32_t start_value_;
const int32_t num_entries_;
-
- DISALLOW_COPY_AND_ASSIGN(HX86PackedSwitch);
};
} // namespace art
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index bd8583bd41..ad287b0745 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -1332,10 +1332,10 @@ TEST_F(Dex2oatTest, LayoutSections) {
code_section.parts_[static_cast<size_t>(LayoutType::kLayoutTypeUnused)];
// All the sections should be non-empty.
- EXPECT_GT(section_hot_code.size_, 0u);
- EXPECT_GT(section_sometimes_used.size_, 0u);
- EXPECT_GT(section_startup_only.size_, 0u);
- EXPECT_GT(section_unused.size_, 0u);
+ EXPECT_GT(section_hot_code.Size(), 0u);
+ EXPECT_GT(section_sometimes_used.Size(), 0u);
+ EXPECT_GT(section_startup_only.Size(), 0u);
+ EXPECT_GT(section_unused.Size(), 0u);
// Open the dex file since we need to peek at the code items to verify the layout matches what
// we expect.
@@ -1364,18 +1364,18 @@ TEST_F(Dex2oatTest, LayoutSections) {
const bool is_post_startup = ContainsElement(post_methods, method_idx);
if (is_hot) {
// Hot is highest precedence, check that the hot methods are in the hot section.
- EXPECT_LT(code_item_offset - section_hot_code.offset_, section_hot_code.size_);
+ EXPECT_TRUE(section_hot_code.Contains(code_item_offset));
++hot_count;
} else if (is_post_startup) {
// Post startup is sometimes used section.
- EXPECT_LT(code_item_offset - section_sometimes_used.offset_, section_sometimes_used.size_);
+ EXPECT_TRUE(section_sometimes_used.Contains(code_item_offset));
++post_startup_count;
} else if (is_startup) {
// Startup at this point means not hot or post startup, these must be startup only then.
- EXPECT_LT(code_item_offset - section_startup_only.offset_, section_startup_only.size_);
+ EXPECT_TRUE(section_startup_only.Contains(code_item_offset));
++startup_count;
} else {
- if (code_item_offset - section_unused.offset_ < section_unused.size_) {
+ if (section_unused.Contains(code_item_offset)) {
// If no flags are set, the method should be unused ...
++unused_count;
} else {
diff --git a/dex2oat/linker/oat_writer.cc b/dex2oat/linker/oat_writer.cc
index 6e8dbaf4e7..663a8896ff 100644
--- a/dex2oat/linker/oat_writer.cc
+++ b/dex2oat/linker/oat_writer.cc
@@ -295,46 +295,29 @@ class OatWriter::OatDexFile {
// Whether to create the type lookup table.
CreateTypeLookupTable create_type_lookup_table_;
- // Dex file size. Initialized when copying the dex file in the
- // WriteDexFile methods.
+ // Dex file size. Initialized when writing the dex file.
size_t dex_file_size_;
// Offset of start of OatDexFile from beginning of OatHeader. It is
// used to validate file position when writing.
size_t offset_;
- ///// Start of data to write to vdex/oat file.
-
- const uint32_t dex_file_location_size_;
- const char* const dex_file_location_data_;
-
- // The checksum of the dex file. Initialized when adding a DexFile
- // source.
+ // Data to write.
+ uint32_t dex_file_location_size_;
+ const char* dex_file_location_data_;
uint32_t dex_file_location_checksum_;
-
- // Offset of the dex file in the vdex file. Set when writing dex files in
- // SeekToDexFile.
uint32_t dex_file_offset_;
-
- // The lookup table offset in the oat file. Set in WriteTypeLookupTables.
- uint32_t lookup_table_offset_;
-
- // Offset of dex sections that will have different runtime madvise states.
- // Set in WriteDexLayoutSections.
- uint32_t dex_sections_layout_offset_;
-
- // Class and BSS offsets set in PrepareLayout.
uint32_t class_offsets_offset_;
+ uint32_t lookup_table_offset_;
uint32_t method_bss_mapping_offset_;
+ uint32_t dex_sections_layout_offset_;
- // Data to write to a separate section. We set the length
- // of the vector in OpenDexFiles.
+ // Data to write to a separate section.
dchecked_vector<uint32_t> class_offsets_;
// Dex section layout info to serialize.
DexLayoutSections dex_sections_layout_;
- ///// End of data to write to vdex/oat file.
private:
DISALLOW_COPY_AND_ASSIGN(OatDexFile);
};
@@ -481,8 +464,6 @@ bool OatWriter::AddZippedDexFilesSource(File&& zip_fd,
oat_dex_files_.emplace_back(full_location,
DexFileSource(zipped_dex_files_.back().get()),
create_type_lookup_table);
- // Override the checksum from header with the CRC from ZIP entry.
- oat_dex_files_.back().dex_file_location_checksum_ = zipped_dex_files_.back()->GetCrc32();
}
if (zipped_dex_file_locations_.empty()) {
LOG(ERROR) << "No dex files in zip file '" << location << "': " << error_msg;
@@ -3080,6 +3061,8 @@ bool OatWriter::ReadDexFileHeader(File* file, OatDexFile* oat_dex_file) {
const UnalignedDexFileHeader* header = AsUnalignedDexFileHeader(raw_header);
oat_dex_file->dex_file_size_ = header->file_size_;
+ oat_dex_file->dex_file_location_checksum_ = header->checksum_;
+ oat_dex_file->class_offsets_.resize(header->class_defs_size_);
return true;
}
@@ -3260,6 +3243,8 @@ bool OatWriter::LayoutAndWriteDexFile(OutputStream* out, OatDexFile* oat_dex_fil
return false;
}
oat_dex_file->dex_sections_layout_ = dex_layout.GetSections();
+ // Set the checksum of the new oat dex file to be the original file's checksum.
+ oat_dex_file->dex_file_location_checksum_ = dex_file->GetLocationChecksum();
return true;
}
@@ -3309,7 +3294,6 @@ bool OatWriter::WriteDexFile(OutputStream* out,
<< " File: " << oat_dex_file->GetLocation() << " Output: " << file->GetPath();
return false;
}
- // Read the dex file header to get the dex file size.
if (!ReadDexFileHeader(file, oat_dex_file)) {
return false;
}
@@ -3320,6 +3304,9 @@ bool OatWriter::WriteDexFile(OutputStream* out,
return false;
}
+ // Override the checksum from header with the CRC from ZIP entry.
+ oat_dex_file->dex_file_location_checksum_ = dex_file->GetCrc32();
+
// Seek both file and stream to the end offset.
size_t end_offset = start_offset + oat_dex_file->dex_file_size_;
actual_offset = lseek(file->Fd(), end_offset, SEEK_SET);
@@ -3368,7 +3355,6 @@ bool OatWriter::WriteDexFile(OutputStream* out,
<< " File: " << oat_dex_file->GetLocation() << " Output: " << file->GetPath();
return false;
}
- // Read the dex file header to get the dex file size.
if (!ReadDexFileHeader(dex_file, oat_dex_file)) {
return false;
}
@@ -3435,7 +3421,10 @@ bool OatWriter::WriteDexFile(OutputStream* out,
}
// Update dex file size and resize class offsets in the OatDexFile.
+ // Note: For raw data, the checksum is passed directly to AddRawDexFileSource().
+ // Note: For vdex, the checksum is copied from the existing vdex file.
oat_dex_file->dex_file_size_ = header->file_size_;
+ oat_dex_file->class_offsets_.resize(header->class_defs_size_);
return true;
}
@@ -3471,22 +3460,29 @@ bool OatWriter::OpenDexFiles(
}
std::vector<std::unique_ptr<const DexFile>> dex_files;
for (OatDexFile& oat_dex_file : oat_dex_files_) {
+ // Make sure no one messed with input files while we were copying data.
+ // At the very least we need consistent file size and number of class definitions.
const uint8_t* raw_dex_file =
dex_files_map->Begin() + oat_dex_file.dex_file_offset_ - map_offset;
-
- if (kIsDebugBuild) {
- // Sanity check our input files.
- // Note that ValidateDexFileHeader() logs error messages.
- CHECK(ValidateDexFileHeader(raw_dex_file, oat_dex_file.GetLocation()))
- << "Failed to verify written dex file header!"
+ if (!ValidateDexFileHeader(raw_dex_file, oat_dex_file.GetLocation())) {
+ // Note: ValidateDexFileHeader() already logged an error message.
+ LOG(ERROR) << "Failed to verify written dex file header!"
<< " Output: " << file->GetPath() << " ~ " << std::hex << map_offset
<< " ~ " << static_cast<const void*>(raw_dex_file);
-
- const UnalignedDexFileHeader* header = AsUnalignedDexFileHeader(raw_dex_file);
- CHECK_EQ(header->file_size_, oat_dex_file.dex_file_size_)
- << "File size mismatch in written dex file header! Expected: "
+ return false;
+ }
+ const UnalignedDexFileHeader* header = AsUnalignedDexFileHeader(raw_dex_file);
+ if (header->file_size_ != oat_dex_file.dex_file_size_) {
+ LOG(ERROR) << "File size mismatch in written dex file header! Expected: "
<< oat_dex_file.dex_file_size_ << " Actual: " << header->file_size_
<< " Output: " << file->GetPath();
+ return false;
+ }
+ if (header->class_defs_size_ != oat_dex_file.class_offsets_.size()) {
+ LOG(ERROR) << "Class defs size mismatch in written dex file header! Expected: "
+ << oat_dex_file.class_offsets_.size() << " Actual: " << header->class_defs_size_
+ << " Output: " << file->GetPath();
+ return false;
}
// Now, open the dex file.
@@ -3503,10 +3499,6 @@ bool OatWriter::OpenDexFiles(
<< " Error: " << error_msg;
return false;
}
-
- // Set the class_offsets size now that we have easy access to the DexFile and
- // it has been verified in DexFileLoader::Open.
- oat_dex_file.class_offsets_.resize(dex_files.back()->GetHeader().class_defs_size_);
}
*opened_dex_files_map = std::move(dex_files_map);
@@ -3753,10 +3745,10 @@ OatWriter::OatDexFile::OatDexFile(const char* dex_file_location,
dex_file_location_data_(dex_file_location),
dex_file_location_checksum_(0u),
dex_file_offset_(0u),
- lookup_table_offset_(0u),
- dex_sections_layout_offset_(0u),
class_offsets_offset_(0u),
+ lookup_table_offset_(0u),
method_bss_mapping_offset_(0u),
+ dex_sections_layout_offset_(0u),
class_offsets_() {
}
diff --git a/dexlayout/compact_dex_writer.cc b/dexlayout/compact_dex_writer.cc
index b089c1d4b3..f2d46199a2 100644
--- a/dexlayout/compact_dex_writer.cc
+++ b/dexlayout/compact_dex_writer.cc
@@ -47,7 +47,7 @@ void CompactDexWriter::WriteHeader() {
header.class_defs_off_ = collections.ClassDefsOffset();
header.data_size_ = header_->DataSize();
header.data_off_ = header_->DataOffset();
- Write(reinterpret_cast<uint8_t*>(&header), sizeof(header), 0u);
+ UNUSED(Write(reinterpret_cast<uint8_t*>(&header), sizeof(header), 0u));
}
} // namespace art
diff --git a/dexlayout/compact_dex_writer.h b/dexlayout/compact_dex_writer.h
index 1c77202c9a..e28efab5c1 100644
--- a/dexlayout/compact_dex_writer.h
+++ b/dexlayout/compact_dex_writer.h
@@ -25,9 +25,12 @@ namespace art {
class CompactDexWriter : public DexWriter {
public:
- CompactDexWriter(dex_ir::Header* header, MemMap* mem_map, CompactDexLevel compact_dex_level)
- : DexWriter(header, mem_map),
- compact_dex_level_(compact_dex_level) { }
+ CompactDexWriter(dex_ir::Header* header,
+ MemMap* mem_map,
+ DexLayout* dex_layout,
+ CompactDexLevel compact_dex_level)
+ : DexWriter(header, mem_map, dex_layout, /*compute_offsets*/ true),
+ compact_dex_level_(compact_dex_level) {}
protected:
void WriteHeader() OVERRIDE;
diff --git a/dexlayout/dex_ir.cc b/dexlayout/dex_ir.cc
index 3edb0a44f2..a8ba950c28 100644
--- a/dexlayout/dex_ir.cc
+++ b/dexlayout/dex_ir.cc
@@ -186,22 +186,28 @@ static bool GetIdsFromByteCode(Collections& collections,
return has_id;
}
-EncodedValue* Collections::ReadEncodedValue(const uint8_t** data) {
+EncodedValue* Collections::ReadEncodedValue(const DexFile& dex_file, const uint8_t** data) {
const uint8_t encoded_value = *(*data)++;
const uint8_t type = encoded_value & 0x1f;
EncodedValue* item = new EncodedValue(type);
- ReadEncodedValue(data, type, encoded_value >> 5, item);
+ ReadEncodedValue(dex_file, data, type, encoded_value >> 5, item);
return item;
}
-EncodedValue* Collections::ReadEncodedValue(const uint8_t** data, uint8_t type, uint8_t length) {
+EncodedValue* Collections::ReadEncodedValue(const DexFile& dex_file,
+ const uint8_t** data,
+ uint8_t type,
+ uint8_t length) {
EncodedValue* item = new EncodedValue(type);
- ReadEncodedValue(data, type, length, item);
+ ReadEncodedValue(dex_file, data, type, length, item);
return item;
}
-void Collections::ReadEncodedValue(
- const uint8_t** data, uint8_t type, uint8_t length, EncodedValue* item) {
+void Collections::ReadEncodedValue(const DexFile& dex_file,
+ const uint8_t** data,
+ uint8_t type,
+ uint8_t length,
+ EncodedValue* item) {
switch (type) {
case DexFile::kDexAnnotationByte:
item->SetByte(static_cast<int8_t>(ReadVarWidth(data, length, false)));
@@ -271,12 +277,17 @@ void Collections::ReadEncodedValue(
}
case DexFile::kDexAnnotationArray: {
EncodedValueVector* values = new EncodedValueVector();
+ const uint32_t offset = *data - dex_file.Begin();
const uint32_t size = DecodeUnsignedLeb128(data);
// Decode all elements.
for (uint32_t i = 0; i < size; i++) {
- values->push_back(std::unique_ptr<EncodedValue>(ReadEncodedValue(data)));
+ values->push_back(std::unique_ptr<EncodedValue>(ReadEncodedValue(dex_file, data)));
}
- item->SetEncodedArray(new EncodedArrayItem(values));
+ EncodedArrayItem* array_item = new EncodedArrayItem(values);
+ if (eagerly_assign_offsets_) {
+ array_item->SetOffset(offset);
+ }
+ item->SetEncodedArray(array_item);
break;
}
case DexFile::kDexAnnotationAnnotation: {
@@ -287,7 +298,7 @@ void Collections::ReadEncodedValue(
for (uint32_t i = 0; i < size; i++) {
const uint32_t name_index = DecodeUnsignedLeb128(data);
elements->push_back(std::unique_ptr<AnnotationElement>(
- new AnnotationElement(GetStringId(name_index), ReadEncodedValue(data))));
+ new AnnotationElement(GetStringId(name_index), ReadEncodedValue(dex_file, data))));
}
item->SetEncodedAnnotation(new EncodedAnnotation(GetTypeId(type_idx), elements));
break;
@@ -305,16 +316,16 @@ void Collections::ReadEncodedValue(
void Collections::CreateStringId(const DexFile& dex_file, uint32_t i) {
const DexFile::StringId& disk_string_id = dex_file.GetStringId(dex::StringIndex(i));
StringData* string_data = new StringData(dex_file.GetStringData(disk_string_id));
- string_datas_.AddItem(string_data, disk_string_id.string_data_off_);
+ AddItem(string_datas_map_, string_datas_, string_data, disk_string_id.string_data_off_);
StringId* string_id = new StringId(string_data);
- string_ids_.AddIndexedItem(string_id, StringIdsOffset() + i * StringId::ItemSize(), i);
+ AddIndexedItem(string_ids_, string_id, StringIdsOffset() + i * StringId::ItemSize(), i);
}
void Collections::CreateTypeId(const DexFile& dex_file, uint32_t i) {
const DexFile::TypeId& disk_type_id = dex_file.GetTypeId(dex::TypeIndex(i));
TypeId* type_id = new TypeId(GetStringId(disk_type_id.descriptor_idx_.index_));
- type_ids_.AddIndexedItem(type_id, TypeIdsOffset() + i * TypeId::ItemSize(), i);
+ AddIndexedItem(type_ids_, type_id, TypeIdsOffset() + i * TypeId::ItemSize(), i);
}
void Collections::CreateProtoId(const DexFile& dex_file, uint32_t i) {
@@ -325,7 +336,7 @@ void Collections::CreateProtoId(const DexFile& dex_file, uint32_t i) {
ProtoId* proto_id = new ProtoId(GetStringId(disk_proto_id.shorty_idx_.index_),
GetTypeId(disk_proto_id.return_type_idx_.index_),
parameter_type_list);
- proto_ids_.AddIndexedItem(proto_id, ProtoIdsOffset() + i * ProtoId::ItemSize(), i);
+ AddIndexedItem(proto_ids_, proto_id, ProtoIdsOffset() + i * ProtoId::ItemSize(), i);
}
void Collections::CreateFieldId(const DexFile& dex_file, uint32_t i) {
@@ -333,7 +344,7 @@ void Collections::CreateFieldId(const DexFile& dex_file, uint32_t i) {
FieldId* field_id = new FieldId(GetTypeId(disk_field_id.class_idx_.index_),
GetTypeId(disk_field_id.type_idx_.index_),
GetStringId(disk_field_id.name_idx_.index_));
- field_ids_.AddIndexedItem(field_id, FieldIdsOffset() + i * FieldId::ItemSize(), i);
+ AddIndexedItem(field_ids_, field_id, FieldIdsOffset() + i * FieldId::ItemSize(), i);
}
void Collections::CreateMethodId(const DexFile& dex_file, uint32_t i) {
@@ -341,7 +352,7 @@ void Collections::CreateMethodId(const DexFile& dex_file, uint32_t i) {
MethodId* method_id = new MethodId(GetTypeId(disk_method_id.class_idx_.index_),
GetProtoId(disk_method_id.proto_idx_),
GetStringId(disk_method_id.name_idx_.index_));
- method_ids_.AddIndexedItem(method_id, MethodIdsOffset() + i * MethodId::ItemSize(), i);
+ AddIndexedItem(method_ids_, method_id, MethodIdsOffset() + i * MethodId::ItemSize(), i);
}
void Collections::CreateClassDef(const DexFile& dex_file, uint32_t i) {
@@ -365,48 +376,48 @@ void Collections::CreateClassDef(const DexFile& dex_file, uint32_t i) {
// Static field initializers.
const uint8_t* static_data = dex_file.GetEncodedStaticFieldValuesArray(disk_class_def);
EncodedArrayItem* static_values =
- CreateEncodedArrayItem(static_data, disk_class_def.static_values_off_);
+ CreateEncodedArrayItem(dex_file, static_data, disk_class_def.static_values_off_);
ClassData* class_data = CreateClassData(
dex_file, dex_file.GetClassData(disk_class_def), disk_class_def.class_data_off_);
ClassDef* class_def = new ClassDef(class_type, access_flags, superclass, interfaces_type_list,
source_file, annotations, static_values, class_data);
- class_defs_.AddIndexedItem(class_def, ClassDefsOffset() + i * ClassDef::ItemSize(), i);
+ AddIndexedItem(class_defs_, class_def, ClassDefsOffset() + i * ClassDef::ItemSize(), i);
}
TypeList* Collections::CreateTypeList(const DexFile::TypeList* dex_type_list, uint32_t offset) {
if (dex_type_list == nullptr) {
return nullptr;
}
- auto found_type_list = TypeLists().find(offset);
- if (found_type_list != TypeLists().end()) {
- return found_type_list->second.get();
- }
- TypeIdVector* type_vector = new TypeIdVector();
- uint32_t size = dex_type_list->Size();
- for (uint32_t index = 0; index < size; ++index) {
- type_vector->push_back(GetTypeId(dex_type_list->GetTypeItem(index).type_idx_.index_));
+ TypeList* type_list = type_lists_map_.GetExistingObject(offset);
+ if (type_list == nullptr) {
+ TypeIdVector* type_vector = new TypeIdVector();
+ uint32_t size = dex_type_list->Size();
+ for (uint32_t index = 0; index < size; ++index) {
+ type_vector->push_back(GetTypeId(dex_type_list->GetTypeItem(index).type_idx_.index_));
+ }
+ type_list = new TypeList(type_vector);
+ AddItem(type_lists_map_, type_lists_, type_list, offset);
}
- TypeList* new_type_list = new TypeList(type_vector);
- type_lists_.AddItem(new_type_list, offset);
- return new_type_list;
+ return type_list;
}
-EncodedArrayItem* Collections::CreateEncodedArrayItem(const uint8_t* static_data, uint32_t offset) {
+EncodedArrayItem* Collections::CreateEncodedArrayItem(const DexFile& dex_file,
+ const uint8_t* static_data,
+ uint32_t offset) {
if (static_data == nullptr) {
return nullptr;
}
- auto found_encoded_array_item = EncodedArrayItems().find(offset);
- if (found_encoded_array_item != EncodedArrayItems().end()) {
- return found_encoded_array_item->second.get();
- }
- uint32_t size = DecodeUnsignedLeb128(&static_data);
- EncodedValueVector* values = new EncodedValueVector();
- for (uint32_t i = 0; i < size; ++i) {
- values->push_back(std::unique_ptr<EncodedValue>(ReadEncodedValue(&static_data)));
+ EncodedArrayItem* encoded_array_item = encoded_array_items_map_.GetExistingObject(offset);
+ if (encoded_array_item == nullptr) {
+ uint32_t size = DecodeUnsignedLeb128(&static_data);
+ EncodedValueVector* values = new EncodedValueVector();
+ for (uint32_t i = 0; i < size; ++i) {
+ values->push_back(std::unique_ptr<EncodedValue>(ReadEncodedValue(dex_file, &static_data)));
+ }
+ // TODO: Calculate the size of the encoded array.
+ encoded_array_item = new EncodedArrayItem(values);
+ AddItem(encoded_array_items_map_, encoded_array_items_, encoded_array_item, offset);
}
- // TODO: Calculate the size of the encoded array.
- EncodedArrayItem* encoded_array_item = new EncodedArrayItem(values);
- encoded_array_items_.AddItem(encoded_array_item, offset);
return encoded_array_item;
}
@@ -427,19 +438,16 @@ AnnotationItem* Collections::CreateAnnotationItem(const DexFile& dex_file,
const DexFile::AnnotationItem* annotation) {
const uint8_t* const start_data = reinterpret_cast<const uint8_t*>(annotation);
const uint32_t offset = start_data - dex_file.Begin();
- auto found_annotation_item = AnnotationItems().find(offset);
- if (found_annotation_item != AnnotationItems().end()) {
- return found_annotation_item->second.get();
+ AnnotationItem* annotation_item = annotation_items_map_.GetExistingObject(offset);
+ if (annotation_item == nullptr) {
+ uint8_t visibility = annotation->visibility_;
+ const uint8_t* annotation_data = annotation->annotation_;
+ std::unique_ptr<EncodedValue> encoded_value(
+ ReadEncodedValue(dex_file, &annotation_data, DexFile::kDexAnnotationAnnotation, 0));
+ annotation_item = new AnnotationItem(visibility, encoded_value->ReleaseEncodedAnnotation());
+ annotation_item->SetSize(annotation_data - start_data);
+ AddItem(annotation_items_map_, annotation_items_, annotation_item, offset);
}
- uint8_t visibility = annotation->visibility_;
- const uint8_t* annotation_data = annotation->annotation_;
- std::unique_ptr<EncodedValue> encoded_value(
- ReadEncodedValue(&annotation_data, DexFile::kDexAnnotationAnnotation, 0));
- AnnotationItem* annotation_item =
- new AnnotationItem(visibility, encoded_value->ReleaseEncodedAnnotation());
- annotation_item->SetOffset(offset);
- annotation_item->SetSize(annotation_data - start_data);
- annotation_items_.AddItem(annotation_item, annotation_item->GetOffset());
return annotation_item;
}
@@ -449,30 +457,30 @@ AnnotationSetItem* Collections::CreateAnnotationSetItem(const DexFile& dex_file,
if (disk_annotations_item == nullptr || (disk_annotations_item->size_ == 0 && offset == 0)) {
return nullptr;
}
- auto found_anno_set_item = AnnotationSetItems().find(offset);
- if (found_anno_set_item != AnnotationSetItems().end()) {
- return found_anno_set_item->second.get();
- }
- std::vector<AnnotationItem*>* items = new std::vector<AnnotationItem*>();
- for (uint32_t i = 0; i < disk_annotations_item->size_; ++i) {
- const DexFile::AnnotationItem* annotation =
- dex_file.GetAnnotationItem(disk_annotations_item, i);
- if (annotation == nullptr) {
- continue;
+ AnnotationSetItem* annotation_set_item = annotation_set_items_map_.GetExistingObject(offset);
+ if (annotation_set_item == nullptr) {
+ std::vector<AnnotationItem*>* items = new std::vector<AnnotationItem*>();
+ for (uint32_t i = 0; i < disk_annotations_item->size_; ++i) {
+ const DexFile::AnnotationItem* annotation =
+ dex_file.GetAnnotationItem(disk_annotations_item, i);
+ if (annotation == nullptr) {
+ continue;
+ }
+ AnnotationItem* annotation_item = CreateAnnotationItem(dex_file, annotation);
+ items->push_back(annotation_item);
}
- AnnotationItem* annotation_item = CreateAnnotationItem(dex_file, annotation);
- items->push_back(annotation_item);
+ annotation_set_item = new AnnotationSetItem(items);
+ AddItem(annotation_set_items_map_, annotation_set_items_, annotation_set_item, offset);
}
- AnnotationSetItem* annotation_set_item = new AnnotationSetItem(items);
- annotation_set_items_.AddItem(annotation_set_item, offset);
return annotation_set_item;
}
AnnotationsDirectoryItem* Collections::CreateAnnotationsDirectoryItem(const DexFile& dex_file,
const DexFile::AnnotationsDirectoryItem* disk_annotations_item, uint32_t offset) {
- auto found_anno_dir_item = AnnotationsDirectoryItems().find(offset);
- if (found_anno_dir_item != AnnotationsDirectoryItems().end()) {
- return found_anno_dir_item->second.get();
+ AnnotationsDirectoryItem* annotations_directory_item =
+ annotations_directory_items_map_.GetExistingObject(offset);
+ if (annotations_directory_item != nullptr) {
+ return annotations_directory_item;
}
const DexFile::AnnotationSetItem* class_set_item =
dex_file.GetClassAnnotationSet(disk_annotations_item);
@@ -527,20 +535,19 @@ AnnotationsDirectoryItem* Collections::CreateAnnotationsDirectoryItem(const DexF
}
}
// TODO: Calculate the size of the annotations directory.
- AnnotationsDirectoryItem* annotations_directory_item = new AnnotationsDirectoryItem(
+annotations_directory_item = new AnnotationsDirectoryItem(
class_annotation, field_annotations, method_annotations, parameter_annotations);
- annotations_directory_items_.AddItem(annotations_directory_item, offset);
+ AddItem(annotations_directory_items_map_,
+ annotations_directory_items_,
+ annotations_directory_item,
+ offset);
return annotations_directory_item;
}
ParameterAnnotation* Collections::GenerateParameterAnnotation(
const DexFile& dex_file, MethodId* method_id,
const DexFile::AnnotationSetRefList* annotation_set_ref_list, uint32_t offset) {
- AnnotationSetRefList* set_ref_list = nullptr;
- auto found_set_ref_list = AnnotationSetRefLists().find(offset);
- if (found_set_ref_list != AnnotationSetRefLists().end()) {
- set_ref_list = found_set_ref_list->second.get();
- }
+ AnnotationSetRefList* set_ref_list = annotation_set_ref_lists_map_.GetExistingObject(offset);
if (set_ref_list == nullptr) {
std::vector<AnnotationSetItem*>* annotations = new std::vector<AnnotationSetItem*>();
for (uint32_t i = 0; i < annotation_set_ref_list->size_; ++i) {
@@ -550,7 +557,7 @@ ParameterAnnotation* Collections::GenerateParameterAnnotation(
annotations->push_back(CreateAnnotationSetItem(dex_file, annotation_set_item, set_offset));
}
set_ref_list = new AnnotationSetRefList(annotations);
- annotation_set_ref_lists_.AddItem(set_ref_list, offset);
+ AddItem(annotation_set_ref_lists_map_, annotation_set_ref_lists_, set_ref_list, offset);
}
return new ParameterAnnotation(method_id, set_ref_list);
}
@@ -566,13 +573,13 @@ CodeItem* Collections::CreateCodeItem(const DexFile& dex_file,
const uint8_t* debug_info_stream = dex_file.GetDebugInfoStream(&disk_code_item);
DebugInfoItem* debug_info = nullptr;
if (debug_info_stream != nullptr) {
- debug_info = debug_info_items_.GetExistingObject(disk_code_item.debug_info_off_);
+ debug_info = debug_info_items_map_.GetExistingObject(disk_code_item.debug_info_off_);
if (debug_info == nullptr) {
uint32_t debug_info_size = GetDebugInfoStreamSize(debug_info_stream);
uint8_t* debug_info_buffer = new uint8_t[debug_info_size];
memcpy(debug_info_buffer, debug_info_stream, debug_info_size);
debug_info = new DebugInfoItem(debug_info_size, debug_info_buffer);
- debug_info_items_.AddItem(debug_info, disk_code_item.debug_info_off_);
+ AddItem(debug_info_items_map_, debug_info_items_, debug_info, disk_code_item.debug_info_off_);
}
}
@@ -662,7 +669,7 @@ CodeItem* Collections::CreateCodeItem(const DexFile& dex_file,
CodeItem* code_item = new CodeItem(
registers_size, ins_size, outs_size, debug_info, insns_size, insns, tries, handler_list);
code_item->SetSize(size);
- code_items_.AddItem(code_item, offset);
+ AddItem(code_items_map_, code_items_, code_item, offset);
// Add "fixup" references to types, strings, methods, and fields.
// This is temporary, as we will probably want more detailed parsing of the
// instructions here.
@@ -690,7 +697,7 @@ MethodItem* Collections::GenerateMethodItem(const DexFile& dex_file, ClassDataIt
MethodId* method_id = GetMethodId(cdii.GetMemberIndex());
uint32_t access_flags = cdii.GetRawMemberAccessFlags();
const DexFile::CodeItem* disk_code_item = cdii.GetMethodCodeItem();
- CodeItem* code_item = code_items_.GetExistingObject(cdii.GetMethodCodeItemOffset());
+ CodeItem* code_item = code_items_map_.GetExistingObject(cdii.GetMethodCodeItemOffset());
DebugInfoItem* debug_info = nullptr;
if (disk_code_item != nullptr) {
if (code_item == nullptr) {
@@ -705,7 +712,7 @@ ClassData* Collections::CreateClassData(
const DexFile& dex_file, const uint8_t* encoded_data, uint32_t offset) {
// Read the fields and methods defined by the class, resolving the circular reference from those
// to classes by setting class at the same time.
- ClassData* class_data = class_datas_.GetExistingObject(offset);
+ ClassData* class_data = class_datas_map_.GetExistingObject(offset);
if (class_data == nullptr && encoded_data != nullptr) {
ClassDataItemIterator cdii(dex_file, encoded_data);
// Static fields.
@@ -735,7 +742,7 @@ ClassData* Collections::CreateClassData(
}
class_data = new ClassData(static_fields, instance_fields, direct_methods, virtual_methods);
class_data->SetSize(cdii.EndDataPointer() - encoded_data);
- class_datas_.AddItem(class_data, offset);
+ AddItem(class_datas_map_, class_datas_, class_data, offset);
}
return class_data;
}
@@ -771,10 +778,10 @@ void Collections::CreateCallSiteId(const DexFile& dex_file, uint32_t i) {
const DexFile::CallSiteIdItem& disk_call_site_id = dex_file.GetCallSiteId(i);
const uint8_t* disk_call_item_ptr = dex_file.Begin() + disk_call_site_id.data_off_;
EncodedArrayItem* call_site_item =
- CreateEncodedArrayItem(disk_call_item_ptr, disk_call_site_id.data_off_);
+ CreateEncodedArrayItem(dex_file, disk_call_item_ptr, disk_call_site_id.data_off_);
CallSiteId* call_site_id = new CallSiteId(call_site_item);
- call_site_ids_.AddIndexedItem(call_site_id, CallSiteIdsOffset() + i * CallSiteId::ItemSize(), i);
+ AddIndexedItem(call_site_ids_, call_site_id, CallSiteIdsOffset() + i * CallSiteId::ItemSize(), i);
}
void Collections::CreateMethodHandleItem(const DexFile& dex_file, uint32_t i) {
@@ -796,8 +803,23 @@ void Collections::CreateMethodHandleItem(const DexFile& dex_file, uint32_t i) {
field_or_method_id = GetFieldId(index);
}
MethodHandleItem* method_handle = new MethodHandleItem(type, field_or_method_id);
- method_handle_items_.AddIndexedItem(
- method_handle, MethodHandleItemsOffset() + i * MethodHandleItem::ItemSize(), i);
+ AddIndexedItem(method_handle_items_,
+ method_handle,
+ MethodHandleItemsOffset() + i * MethodHandleItem::ItemSize(),
+ i);
+}
+
+void Collections::SortVectorsByMapOrder() {
+ string_datas_map_.SortVectorByMapOrder(string_datas_);
+ type_lists_map_.SortVectorByMapOrder(type_lists_);
+ encoded_array_items_map_.SortVectorByMapOrder(encoded_array_items_);
+ annotation_items_map_.SortVectorByMapOrder(annotation_items_);
+ annotation_set_items_map_.SortVectorByMapOrder(annotation_set_items_);
+ annotation_set_ref_lists_map_.SortVectorByMapOrder(annotation_set_ref_lists_);
+ annotations_directory_items_map_.SortVectorByMapOrder(annotations_directory_items_);
+ debug_info_items_map_.SortVectorByMapOrder(debug_info_items_);
+ code_items_map_.SortVectorByMapOrder(code_items_);
+ class_datas_map_.SortVectorByMapOrder(class_datas_);
}
static uint32_t HeaderOffset(const dex_ir::Collections& collections ATTRIBUTE_UNUSED) {
diff --git a/dexlayout/dex_ir.h b/dexlayout/dex_ir.h
index 179d3b96e0..8421774104 100644
--- a/dexlayout/dex_ir.h
+++ b/dexlayout/dex_ir.h
@@ -112,33 +112,55 @@ template<class T> class CollectionBase {
public:
CollectionBase() = default;
- uint32_t GetOffset() const { return offset_; }
- void SetOffset(uint32_t new_offset) { offset_ = new_offset; }
+ uint32_t GetOffset() const {
+ return offset_;
+ }
+ void SetOffset(uint32_t new_offset) {
+ offset_ = new_offset;
+ }
private:
- uint32_t offset_ = 0;
+ // Start out unassigned.
+ uint32_t offset_ = 0u;
DISALLOW_COPY_AND_ASSIGN(CollectionBase);
};
template<class T> class CollectionVector : public CollectionBase<T> {
public:
+ using Vector = std::vector<std::unique_ptr<T>>;
CollectionVector() = default;
- void AddIndexedItem(T* object, uint32_t offset, uint32_t index) {
- object->SetOffset(offset);
- object->SetIndex(index);
+ uint32_t Size() const { return collection_.size(); }
+ Vector& Collection() { return collection_; }
+
+ protected:
+ Vector collection_;
+
+ void AddItem(T* object) {
collection_.push_back(std::unique_ptr<T>(object));
}
- uint32_t Size() const { return collection_.size(); }
- std::vector<std::unique_ptr<T>>& Collection() { return collection_; }
private:
- std::vector<std::unique_ptr<T>> collection_;
-
+ friend class Collections;
DISALLOW_COPY_AND_ASSIGN(CollectionVector);
};
+template<class T> class IndexedCollectionVector : public CollectionVector<T> {
+ public:
+ using Vector = std::vector<std::unique_ptr<T>>;
+ IndexedCollectionVector() = default;
+
+ private:
+ void AddIndexedItem(T* object, uint32_t index) {
+ object->SetIndex(index);
+ CollectionVector<T>::collection_.push_back(std::unique_ptr<T>(object));
+ }
+
+ friend class Collections;
+ DISALLOW_COPY_AND_ASSIGN(IndexedCollectionVector);
+};
+
template<class T> class CollectionMap : public CollectionBase<T> {
public:
CollectionMap() = default;
@@ -146,21 +168,35 @@ template<class T> class CollectionMap : public CollectionBase<T> {
// Returns the existing item if it is already inserted, null otherwise.
T* GetExistingObject(uint32_t offset) {
auto it = collection_.find(offset);
- return it != collection_.end() ? it->second.get() : nullptr;
+ return it != collection_.end() ? it->second : nullptr;
}
- void AddItem(T* object, uint32_t offset) {
- object->SetOffset(offset);
- auto it = collection_.emplace(offset, std::unique_ptr<T>(object));
- CHECK(it.second) << "CollectionMap already has an object with offset " << offset << " "
- << " and address " << it.first->second.get();
- }
uint32_t Size() const { return collection_.size(); }
- std::map<uint32_t, std::unique_ptr<T>>& Collection() { return collection_; }
+ std::map<uint32_t, T*>& Collection() { return collection_; }
+
+ // Sort the vector by copying pointers over.
+ void SortVectorByMapOrder(CollectionVector<T>& vector) {
+ auto it = collection_.begin();
+ CHECK_EQ(vector.Size(), Size());
+ for (size_t i = 0; i < Size(); ++i) {
+ // There are times when the array will temporarily contain the same pointer twice, doing the
+ // release here sure there is no double free errors.
+ vector.Collection()[i].release();
+ vector.Collection()[i].reset(it->second);
+ ++it;
+ }
+ }
private:
- std::map<uint32_t, std::unique_ptr<T>> collection_;
+ std::map<uint32_t, T*> collection_;
+
+ void AddItem(T* object, uint32_t offset) {
+ auto it = collection_.emplace(offset, object);
+ CHECK(it.second) << "CollectionMap already has an object with offset " << offset << " "
+ << " and address " << it.first->second;
+ }
+ friend class Collections;
DISALLOW_COPY_AND_ASSIGN(CollectionMap);
};
@@ -168,32 +204,31 @@ class Collections {
public:
Collections() = default;
- std::vector<std::unique_ptr<StringId>>& StringIds() { return string_ids_.Collection(); }
- std::vector<std::unique_ptr<TypeId>>& TypeIds() { return type_ids_.Collection(); }
- std::vector<std::unique_ptr<ProtoId>>& ProtoIds() { return proto_ids_.Collection(); }
- std::vector<std::unique_ptr<FieldId>>& FieldIds() { return field_ids_.Collection(); }
- std::vector<std::unique_ptr<MethodId>>& MethodIds() { return method_ids_.Collection(); }
- std::vector<std::unique_ptr<ClassDef>>& ClassDefs() { return class_defs_.Collection(); }
- std::vector<std::unique_ptr<CallSiteId>>& CallSiteIds() { return call_site_ids_.Collection(); }
- std::vector<std::unique_ptr<MethodHandleItem>>& MethodHandleItems()
+ CollectionVector<StringId>::Vector& StringIds() { return string_ids_.Collection(); }
+ CollectionVector<TypeId>::Vector& TypeIds() { return type_ids_.Collection(); }
+ CollectionVector<ProtoId>::Vector& ProtoIds() { return proto_ids_.Collection(); }
+ CollectionVector<FieldId>::Vector& FieldIds() { return field_ids_.Collection(); }
+ CollectionVector<MethodId>::Vector& MethodIds() { return method_ids_.Collection(); }
+ CollectionVector<ClassDef>::Vector& ClassDefs() { return class_defs_.Collection(); }
+ CollectionVector<CallSiteId>::Vector& CallSiteIds() { return call_site_ids_.Collection(); }
+ CollectionVector<MethodHandleItem>::Vector& MethodHandleItems()
{ return method_handle_items_.Collection(); }
- std::map<uint32_t, std::unique_ptr<StringData>>& StringDatas()
- { return string_datas_.Collection(); }
- std::map<uint32_t, std::unique_ptr<TypeList>>& TypeLists() { return type_lists_.Collection(); }
- std::map<uint32_t, std::unique_ptr<EncodedArrayItem>>& EncodedArrayItems()
+ CollectionVector<StringData>::Vector& StringDatas() { return string_datas_.Collection(); }
+ CollectionVector<TypeList>::Vector& TypeLists() { return type_lists_.Collection(); }
+ CollectionVector<EncodedArrayItem>::Vector& EncodedArrayItems()
{ return encoded_array_items_.Collection(); }
- std::map<uint32_t, std::unique_ptr<AnnotationItem>>& AnnotationItems()
+ CollectionVector<AnnotationItem>::Vector& AnnotationItems()
{ return annotation_items_.Collection(); }
- std::map<uint32_t, std::unique_ptr<AnnotationSetItem>>& AnnotationSetItems()
+ CollectionVector<AnnotationSetItem>::Vector& AnnotationSetItems()
{ return annotation_set_items_.Collection(); }
- std::map<uint32_t, std::unique_ptr<AnnotationSetRefList>>& AnnotationSetRefLists()
+ CollectionVector<AnnotationSetRefList>::Vector& AnnotationSetRefLists()
{ return annotation_set_ref_lists_.Collection(); }
- std::map<uint32_t, std::unique_ptr<AnnotationsDirectoryItem>>& AnnotationsDirectoryItems()
+ CollectionVector<AnnotationsDirectoryItem>::Vector& AnnotationsDirectoryItems()
{ return annotations_directory_items_.Collection(); }
- std::map<uint32_t, std::unique_ptr<DebugInfoItem>>& DebugInfoItems()
+ CollectionVector<DebugInfoItem>::Vector& DebugInfoItems()
{ return debug_info_items_.Collection(); }
- std::map<uint32_t, std::unique_ptr<CodeItem>>& CodeItems() { return code_items_.Collection(); }
- std::map<uint32_t, std::unique_ptr<ClassData>>& ClassDatas() { return class_datas_.Collection(); }
+ CollectionVector<CodeItem>::Vector& CodeItems() { return code_items_.Collection(); }
+ CollectionVector<ClassData>::Vector& ClassDatas() { return class_datas_.Collection(); }
void CreateStringId(const DexFile& dex_file, uint32_t i);
void CreateTypeId(const DexFile& dex_file, uint32_t i);
@@ -207,7 +242,9 @@ class Collections {
void CreateCallSitesAndMethodHandles(const DexFile& dex_file);
TypeList* CreateTypeList(const DexFile::TypeList* type_list, uint32_t offset);
- EncodedArrayItem* CreateEncodedArrayItem(const uint8_t* static_data, uint32_t offset);
+ EncodedArrayItem* CreateEncodedArrayItem(const DexFile& dex_file,
+ const uint8_t* static_data,
+ uint32_t offset);
AnnotationItem* CreateAnnotationItem(const DexFile& dex_file,
const DexFile::AnnotationItem* annotation);
AnnotationSetItem* CreateAnnotationSetItem(const DexFile& dex_file,
@@ -326,37 +363,110 @@ class Collections {
uint32_t CodeItemsSize() const { return code_items_.Size(); }
uint32_t ClassDatasSize() const { return class_datas_.Size(); }
+ // Sort the vectors buy map order (same order that was used in the input file).
+ void SortVectorsByMapOrder();
+
+ template <typename Type>
+ void AddItem(CollectionMap<Type>& map,
+ CollectionVector<Type>& vector,
+ Type* item,
+ uint32_t offset) {
+ DCHECK(!map.GetExistingObject(offset));
+ DCHECK(!item->OffsetAssigned());
+ if (eagerly_assign_offsets_) {
+ item->SetOffset(offset);
+ }
+ map.AddItem(item, offset);
+ vector.AddItem(item);
+ }
+
+ template <typename Type>
+ void AddIndexedItem(IndexedCollectionVector<Type>& vector,
+ Type* item,
+ uint32_t offset,
+ uint32_t index) {
+ DCHECK(!item->OffsetAssigned());
+ if (eagerly_assign_offsets_) {
+ item->SetOffset(offset);
+ }
+ vector.AddIndexedItem(item, index);
+ }
+
+ void SetEagerlyAssignOffsets(bool eagerly_assign_offsets) {
+ eagerly_assign_offsets_ = eagerly_assign_offsets;
+ }
+
+ void SetLinkData(std::vector<uint8_t>&& link_data) {
+ link_data_ = std::move(link_data);
+ }
+
+ const std::vector<uint8_t>& LinkData() const {
+ return link_data_;
+ }
+
private:
- EncodedValue* ReadEncodedValue(const uint8_t** data);
- EncodedValue* ReadEncodedValue(const uint8_t** data, uint8_t type, uint8_t length);
- void ReadEncodedValue(const uint8_t** data, uint8_t type, uint8_t length, EncodedValue* item);
+ EncodedValue* ReadEncodedValue(const DexFile& dex_file, const uint8_t** data);
+ EncodedValue* ReadEncodedValue(const DexFile& dex_file,
+ const uint8_t** data,
+ uint8_t type,
+ uint8_t length);
+ void ReadEncodedValue(const DexFile& dex_file,
+ const uint8_t** data,
+ uint8_t type,
+ uint8_t length,
+ EncodedValue* item);
ParameterAnnotation* GenerateParameterAnnotation(const DexFile& dex_file, MethodId* method_id,
const DexFile::AnnotationSetRefList* annotation_set_ref_list, uint32_t offset);
MethodItem* GenerateMethodItem(const DexFile& dex_file, ClassDataItemIterator& cdii);
- CollectionVector<StringId> string_ids_;
- CollectionVector<TypeId> type_ids_;
- CollectionVector<ProtoId> proto_ids_;
- CollectionVector<FieldId> field_ids_;
- CollectionVector<MethodId> method_ids_;
- CollectionVector<ClassDef> class_defs_;
- CollectionVector<CallSiteId> call_site_ids_;
- CollectionVector<MethodHandleItem> method_handle_items_;
-
- CollectionMap<StringData> string_datas_;
- CollectionMap<TypeList> type_lists_;
- CollectionMap<EncodedArrayItem> encoded_array_items_;
- CollectionMap<AnnotationItem> annotation_items_;
- CollectionMap<AnnotationSetItem> annotation_set_items_;
- CollectionMap<AnnotationSetRefList> annotation_set_ref_lists_;
- CollectionMap<AnnotationsDirectoryItem> annotations_directory_items_;
- CollectionMap<DebugInfoItem> debug_info_items_;
- CollectionMap<CodeItem> code_items_;
- CollectionMap<ClassData> class_datas_;
+ // Collection vectors own the IR data.
+ IndexedCollectionVector<StringId> string_ids_;
+ IndexedCollectionVector<TypeId> type_ids_;
+ IndexedCollectionVector<ProtoId> proto_ids_;
+ IndexedCollectionVector<FieldId> field_ids_;
+ IndexedCollectionVector<MethodId> method_ids_;
+ IndexedCollectionVector<CallSiteId> call_site_ids_;
+ IndexedCollectionVector<MethodHandleItem> method_handle_items_;
+ IndexedCollectionVector<StringData> string_datas_;
+ IndexedCollectionVector<TypeList> type_lists_;
+ IndexedCollectionVector<EncodedArrayItem> encoded_array_items_;
+ IndexedCollectionVector<AnnotationItem> annotation_items_;
+ IndexedCollectionVector<AnnotationSetItem> annotation_set_items_;
+ IndexedCollectionVector<AnnotationSetRefList> annotation_set_ref_lists_;
+ IndexedCollectionVector<AnnotationsDirectoryItem> annotations_directory_items_;
+ IndexedCollectionVector<ClassDef> class_defs_;
+ // The order of the vectors controls the layout of the output file by index order, to change the
+ // layout just sort the vector. Note that you may only change the order of the non indexed vectors
+ // below. Indexed vectors are accessed by indices in other places, changing the sorting order will
+ // invalidate the existing indices and is not currently supported.
+ CollectionVector<DebugInfoItem> debug_info_items_;
+ CollectionVector<CodeItem> code_items_;
+ CollectionVector<ClassData> class_datas_;
+
+ // Note that the maps do not have ownership, the vectors do.
+ // TODO: These maps should only be required for building the IR and should be put in a separate
+ // IR builder class.
+ CollectionMap<StringData> string_datas_map_;
+ CollectionMap<TypeList> type_lists_map_;
+ CollectionMap<EncodedArrayItem> encoded_array_items_map_;
+ CollectionMap<AnnotationItem> annotation_items_map_;
+ CollectionMap<AnnotationSetItem> annotation_set_items_map_;
+ CollectionMap<AnnotationSetRefList> annotation_set_ref_lists_map_;
+ CollectionMap<AnnotationsDirectoryItem> annotations_directory_items_map_;
+ CollectionMap<DebugInfoItem> debug_info_items_map_;
+ CollectionMap<CodeItem> code_items_map_;
+ CollectionMap<ClassData> class_datas_map_;
uint32_t map_list_offset_ = 0;
+ // Link data.
+ std::vector<uint8_t> link_data_;
+
+ // If we eagerly assign offsets during IR building or later after layout. Must be false if
+ // changing the layout is enabled.
+ bool eagerly_assign_offsets_;
+
DISALLOW_COPY_AND_ASSIGN(Collections);
};
@@ -365,15 +475,26 @@ class Item {
Item() { }
virtual ~Item() { }
- uint32_t GetOffset() const { return offset_; }
+ // Return the assigned offset.
+ uint32_t GetOffset() const {
+ CHECK(OffsetAssigned());
+ return offset_;
+ }
uint32_t GetSize() const { return size_; }
void SetOffset(uint32_t offset) { offset_ = offset; }
void SetSize(uint32_t size) { size_ = size; }
+ bool OffsetAssigned() const {
+ return offset_ != kOffsetUnassigned;
+ }
protected:
Item(uint32_t offset, uint32_t size) : offset_(offset), size_(size) { }
- uint32_t offset_ = 0;
+ // 0 is the dex file header and shouldn't be a valid offset for any part of the dex file.
+ static constexpr uint32_t kOffsetUnassigned = 0u;
+
+ // Start out unassigned.
+ uint32_t offset_ = kOffsetUnassigned;
uint32_t size_ = 0;
};
diff --git a/dexlayout/dex_ir_builder.cc b/dexlayout/dex_ir_builder.cc
index bd3e1fa718..1fd963fe22 100644
--- a/dexlayout/dex_ir_builder.cc
+++ b/dexlayout/dex_ir_builder.cc
@@ -26,7 +26,7 @@ namespace dex_ir {
static void CheckAndSetRemainingOffsets(const DexFile& dex_file, Collections* collections);
-Header* DexIrBuilder(const DexFile& dex_file) {
+Header* DexIrBuilder(const DexFile& dex_file, bool eagerly_assign_offsets) {
const DexFile::Header& disk_header = dex_file.GetHeader();
Header* header = new Header(disk_header.magic_,
disk_header.checksum_,
@@ -39,6 +39,7 @@ Header* DexIrBuilder(const DexFile& dex_file) {
disk_header.data_size_,
disk_header.data_off_);
Collections& collections = header->GetCollections();
+ collections.SetEagerlyAssignOffsets(eagerly_assign_offsets);
// Walk the rest of the header fields.
// StringId table.
collections.SetStringIdsOffset(disk_header.string_ids_off_);
@@ -74,9 +75,16 @@ Header* DexIrBuilder(const DexFile& dex_file) {
collections.SetMapListOffset(disk_header.map_off_);
// CallSiteIds and MethodHandleItems.
collections.CreateCallSitesAndMethodHandles(dex_file);
-
CheckAndSetRemainingOffsets(dex_file, &collections);
+ // Sort the vectors by the map order (same order as the file).
+ collections.SortVectorsByMapOrder();
+
+ // Load the link data if it exists.
+ collections.SetLinkData(std::vector<uint8_t>(
+ dex_file.Begin() + dex_file.GetHeader().link_off_,
+ dex_file.Begin() + dex_file.GetHeader().link_off_ + dex_file.GetHeader().link_size_));
+
return header;
}
diff --git a/dexlayout/dex_ir_builder.h b/dexlayout/dex_ir_builder.h
index c53157b5fc..4d4b4e8699 100644
--- a/dexlayout/dex_ir_builder.h
+++ b/dexlayout/dex_ir_builder.h
@@ -24,7 +24,9 @@
namespace art {
namespace dex_ir {
-dex_ir::Header* DexIrBuilder(const DexFile& dex_file);
+// Eagerly assign offsets assigns offsets based on the original offsets in the input dex file. If
+// this not done, dex_ir::Item::GetOffset will abort when reading uninitialized offsets.
+dex_ir::Header* DexIrBuilder(const DexFile& dex_file, bool eagerly_assign_offsets);
} // namespace dex_ir
} // namespace art
diff --git a/dexlayout/dex_writer.cc b/dexlayout/dex_writer.cc
index 4895ab6957..1fac2359b0 100644
--- a/dexlayout/dex_writer.cc
+++ b/dexlayout/dex_writer.cc
@@ -18,17 +18,37 @@
#include <stdint.h>
-#include <queue>
#include <vector>
#include "cdex/compact_dex_file.h"
#include "compact_dex_writer.h"
+#include "dex_file_layout.h"
#include "dex_file_types.h"
+#include "dexlayout.h"
#include "standard_dex_file.h"
#include "utf.h"
namespace art {
+static constexpr uint32_t kDataSectionAlignment = sizeof(uint32_t) * 2;
+static constexpr uint32_t kDexSectionWordAlignment = 4;
+
+static constexpr uint32_t SectionAlignment(DexFile::MapItemType type) {
+ switch (type) {
+ case DexFile::kDexTypeClassDataItem:
+ case DexFile::kDexTypeStringDataItem:
+ case DexFile::kDexTypeDebugInfoItem:
+ case DexFile::kDexTypeAnnotationItem:
+ case DexFile::kDexTypeEncodedArrayItem:
+ return alignof(uint8_t);
+
+ default:
+ // All other sections are kDexAlignedSection.
+ return kDexSectionWordAlignment;
+ }
+}
+
+
size_t EncodeIntValue(int32_t value, uint8_t* buffer) {
size_t length = 0;
if (value >= 0) {
@@ -245,130 +265,213 @@ size_t DexWriter::WriteEncodedMethods(dex_ir::MethodItemVector* methods, size_t
return offset - original_offset;
}
-void DexWriter::WriteStrings() {
- uint32_t string_data_off[1];
+// TODO: Refactor this to remove duplicated boiler plate. One way to do this is adding
+// function that takes a CollectionVector<T> and uses overloading.
+uint32_t DexWriter::WriteStringIds(uint32_t offset, bool reserve_only) {
+ const uint32_t start = offset;
for (std::unique_ptr<dex_ir::StringId>& string_id : header_->GetCollections().StringIds()) {
- string_data_off[0] = string_id->DataItem()->GetOffset();
- Write(string_data_off, string_id->GetSize(), string_id->GetOffset());
+ offset = RoundUp(offset, SectionAlignment(DexFile::kDexTypeStringIdItem));
+ if (reserve_only) {
+ offset += string_id->GetSize();
+ } else {
+ uint32_t string_data_off = string_id->DataItem()->GetOffset();
+ offset += Write(&string_data_off, string_id->GetSize(), offset);
+ }
}
+ if (compute_offsets_ && start != offset) {
+ header_->GetCollections().SetStringIdsOffset(start);
+ }
+ return offset - start;
+}
- for (auto& string_data_pair : header_->GetCollections().StringDatas()) {
- std::unique_ptr<dex_ir::StringData>& string_data = string_data_pair.second;
- uint32_t offset = string_data->GetOffset();
+uint32_t DexWriter::WriteStringDatas(uint32_t offset) {
+ const uint32_t start = offset;
+ for (std::unique_ptr<dex_ir::StringData>& string_data : header_->GetCollections().StringDatas()) {
+ ProcessOffset(&offset, string_data.get());
+ offset = RoundUp(offset, SectionAlignment(DexFile::kDexTypeStringDataItem));
offset += WriteUleb128(CountModifiedUtf8Chars(string_data->Data()), offset);
- Write(string_data->Data(), strlen(string_data->Data()), offset);
+ // Skip null terminator (already zeroed out, no need to write).
+ offset += Write(string_data->Data(), strlen(string_data->Data()), offset) + 1u;
+ }
+ if (compute_offsets_ && start != offset) {
+ header_->GetCollections().SetStringDatasOffset(start);
}
+ return offset - start;
}
-void DexWriter::WriteTypes() {
+uint32_t DexWriter::WriteTypeIds(uint32_t offset) {
uint32_t descriptor_idx[1];
+ const uint32_t start = offset;
for (std::unique_ptr<dex_ir::TypeId>& type_id : header_->GetCollections().TypeIds()) {
+ offset = RoundUp(offset, SectionAlignment(DexFile::kDexTypeTypeIdItem));
+ ProcessOffset(&offset, type_id.get());
descriptor_idx[0] = type_id->GetStringId()->GetIndex();
- Write(descriptor_idx, type_id->GetSize(), type_id->GetOffset());
+ offset += Write(descriptor_idx, type_id->GetSize(), offset);
}
+ if (compute_offsets_ && start != offset) {
+ header_->GetCollections().SetTypeIdsOffset(start);
+ }
+ return offset - start;
}
-void DexWriter::WriteTypeLists() {
+uint32_t DexWriter::WriteTypeLists(uint32_t offset) {
uint32_t size[1];
uint16_t list[1];
- for (auto& type_list_pair : header_->GetCollections().TypeLists()) {
- std::unique_ptr<dex_ir::TypeList>& type_list = type_list_pair.second;
+ const uint32_t start = offset;
+ for (std::unique_ptr<dex_ir::TypeList>& type_list : header_->GetCollections().TypeLists()) {
+ offset = RoundUp(offset, SectionAlignment(DexFile::kDexTypeTypeList));
size[0] = type_list->GetTypeList()->size();
- uint32_t offset = type_list->GetOffset();
+ ProcessOffset(&offset, type_list.get());
offset += Write(size, sizeof(uint32_t), offset);
for (const dex_ir::TypeId* type_id : *type_list->GetTypeList()) {
list[0] = type_id->GetIndex();
offset += Write(list, sizeof(uint16_t), offset);
}
}
+ if (compute_offsets_ && start != offset) {
+ header_->GetCollections().SetTypeListsOffset(start);
+ }
+ return offset - start;
}
-void DexWriter::WriteProtos() {
+uint32_t DexWriter::WriteProtoIds(uint32_t offset, bool reserve_only) {
uint32_t buffer[3];
+ const uint32_t start = offset;
for (std::unique_ptr<dex_ir::ProtoId>& proto_id : header_->GetCollections().ProtoIds()) {
- buffer[0] = proto_id->Shorty()->GetIndex();
- buffer[1] = proto_id->ReturnType()->GetIndex();
- buffer[2] = proto_id->Parameters() == nullptr ? 0 : proto_id->Parameters()->GetOffset();
- Write(buffer, proto_id->GetSize(), proto_id->GetOffset());
+ offset = RoundUp(offset, SectionAlignment(DexFile::kDexTypeProtoIdItem));
+ ProcessOffset(&offset, proto_id.get());
+ if (reserve_only) {
+ offset += proto_id->GetSize();
+ } else {
+ buffer[0] = proto_id->Shorty()->GetIndex();
+ buffer[1] = proto_id->ReturnType()->GetIndex();
+ buffer[2] = proto_id->Parameters() == nullptr ? 0 : proto_id->Parameters()->GetOffset();
+ offset += Write(buffer, proto_id->GetSize(), offset);
+ }
}
+ if (compute_offsets_ && start != offset) {
+ header_->GetCollections().SetProtoIdsOffset(start);
+ }
+ return offset - start;
}
-void DexWriter::WriteFields() {
+uint32_t DexWriter::WriteFieldIds(uint32_t offset) {
uint16_t buffer[4];
+ const uint32_t start = offset;
for (std::unique_ptr<dex_ir::FieldId>& field_id : header_->GetCollections().FieldIds()) {
+ offset = RoundUp(offset, SectionAlignment(DexFile::kDexTypeFieldIdItem));
+ ProcessOffset(&offset, field_id.get());
buffer[0] = field_id->Class()->GetIndex();
buffer[1] = field_id->Type()->GetIndex();
buffer[2] = field_id->Name()->GetIndex();
buffer[3] = field_id->Name()->GetIndex() >> 16;
- Write(buffer, field_id->GetSize(), field_id->GetOffset());
+ offset += Write(buffer, field_id->GetSize(), offset);
+ }
+ if (compute_offsets_ && start != offset) {
+ header_->GetCollections().SetFieldIdsOffset(start);
}
+ return offset - start;
}
-void DexWriter::WriteMethods() {
+uint32_t DexWriter::WriteMethodIds(uint32_t offset) {
uint16_t buffer[4];
+ const uint32_t start = offset;
for (std::unique_ptr<dex_ir::MethodId>& method_id : header_->GetCollections().MethodIds()) {
+ offset = RoundUp(offset, SectionAlignment(DexFile::kDexTypeMethodIdItem));
+ ProcessOffset(&offset, method_id.get());
buffer[0] = method_id->Class()->GetIndex();
buffer[1] = method_id->Proto()->GetIndex();
buffer[2] = method_id->Name()->GetIndex();
buffer[3] = method_id->Name()->GetIndex() >> 16;
- Write(buffer, method_id->GetSize(), method_id->GetOffset());
+ offset += Write(buffer, method_id->GetSize(), offset);
+ }
+ if (compute_offsets_ && start != offset) {
+ header_->GetCollections().SetMethodIdsOffset(start);
}
+ return offset - start;
}
-void DexWriter::WriteEncodedArrays() {
- for (auto& encoded_array_pair : header_->GetCollections().EncodedArrayItems()) {
- std::unique_ptr<dex_ir::EncodedArrayItem>& encoded_array = encoded_array_pair.second;
- WriteEncodedArray(encoded_array->GetEncodedValues(), encoded_array->GetOffset());
+uint32_t DexWriter::WriteEncodedArrays(uint32_t offset) {
+ const uint32_t start = offset;
+ for (std::unique_ptr<dex_ir::EncodedArrayItem>& encoded_array :
+ header_->GetCollections().EncodedArrayItems()) {
+ offset = RoundUp(offset, SectionAlignment(DexFile::kDexTypeEncodedArrayItem));
+ ProcessOffset(&offset, encoded_array.get());
+ offset += WriteEncodedArray(encoded_array->GetEncodedValues(), offset);
}
+ if (compute_offsets_ && start != offset) {
+ header_->GetCollections().SetEncodedArrayItemsOffset(start);
+ }
+ return offset - start;
}
-void DexWriter::WriteAnnotations() {
+uint32_t DexWriter::WriteAnnotations(uint32_t offset) {
uint8_t visibility[1];
- for (auto& annotation_pair : header_->GetCollections().AnnotationItems()) {
- std::unique_ptr<dex_ir::AnnotationItem>& annotation = annotation_pair.second;
+ const uint32_t start = offset;
+ for (std::unique_ptr<dex_ir::AnnotationItem>& annotation :
+ header_->GetCollections().AnnotationItems()) {
+ offset = RoundUp(offset, SectionAlignment(DexFile::kDexTypeAnnotationItem));
visibility[0] = annotation->GetVisibility();
- size_t offset = annotation->GetOffset();
+ ProcessOffset(&offset, annotation.get());
offset += Write(visibility, sizeof(uint8_t), offset);
- WriteEncodedAnnotation(annotation->GetAnnotation(), offset);
+ offset += WriteEncodedAnnotation(annotation->GetAnnotation(), offset);
+ }
+ if (compute_offsets_ && start != offset) {
+ header_->GetCollections().SetAnnotationItemsOffset(start);
}
+ return offset - start;
}
-void DexWriter::WriteAnnotationSets() {
+uint32_t DexWriter::WriteAnnotationSets(uint32_t offset) {
uint32_t size[1];
uint32_t annotation_off[1];
- for (auto& annotation_set_pair : header_->GetCollections().AnnotationSetItems()) {
- std::unique_ptr<dex_ir::AnnotationSetItem>& annotation_set = annotation_set_pair.second;
+ const uint32_t start = offset;
+ for (std::unique_ptr<dex_ir::AnnotationSetItem>& annotation_set :
+ header_->GetCollections().AnnotationSetItems()) {
+ offset = RoundUp(offset, SectionAlignment(DexFile::kDexTypeAnnotationSetItem));
size[0] = annotation_set->GetItems()->size();
- size_t offset = annotation_set->GetOffset();
+ ProcessOffset(&offset, annotation_set.get());
offset += Write(size, sizeof(uint32_t), offset);
for (dex_ir::AnnotationItem* annotation : *annotation_set->GetItems()) {
annotation_off[0] = annotation->GetOffset();
offset += Write(annotation_off, sizeof(uint32_t), offset);
}
}
+ if (compute_offsets_ && start != offset) {
+ header_->GetCollections().SetAnnotationSetItemsOffset(start);
+ }
+ return offset - start;
}
-void DexWriter::WriteAnnotationSetRefs() {
+uint32_t DexWriter::WriteAnnotationSetRefs(uint32_t offset) {
uint32_t size[1];
uint32_t annotations_off[1];
- for (auto& anno_set_ref_pair : header_->GetCollections().AnnotationSetRefLists()) {
- std::unique_ptr<dex_ir::AnnotationSetRefList>& annotation_set_ref = anno_set_ref_pair.second;
+ const uint32_t start = offset;
+ for (std::unique_ptr<dex_ir::AnnotationSetRefList>& annotation_set_ref :
+ header_->GetCollections().AnnotationSetRefLists()) {
+ offset = RoundUp(offset, SectionAlignment(DexFile::kDexTypeAnnotationSetRefList));
size[0] = annotation_set_ref->GetItems()->size();
- size_t offset = annotation_set_ref->GetOffset();
+ ProcessOffset(&offset, annotation_set_ref.get());
offset += Write(size, sizeof(uint32_t), offset);
for (dex_ir::AnnotationSetItem* annotation_set : *annotation_set_ref->GetItems()) {
annotations_off[0] = annotation_set == nullptr ? 0 : annotation_set->GetOffset();
offset += Write(annotations_off, sizeof(uint32_t), offset);
}
}
+ if (compute_offsets_ && start != offset) {
+ header_->GetCollections().SetAnnotationSetRefListsOffset(start);
+ }
+ return offset - start;
}
-void DexWriter::WriteAnnotationsDirectories() {
+uint32_t DexWriter::WriteAnnotationsDirectories(uint32_t offset) {
uint32_t directory_buffer[4];
uint32_t annotation_buffer[2];
- for (auto& annotations_directory_pair : header_->GetCollections().AnnotationsDirectoryItems()) {
- std::unique_ptr<dex_ir::AnnotationsDirectoryItem>& annotations_directory =
- annotations_directory_pair.second;
+ const uint32_t start = offset;
+ for (std::unique_ptr<dex_ir::AnnotationsDirectoryItem>& annotations_directory :
+ header_->GetCollections().AnnotationsDirectoryItems()) {
+ offset = RoundUp(offset, SectionAlignment(DexFile::kDexTypeAnnotationsDirectoryItem));
+ ProcessOffset(&offset, annotations_directory.get());
directory_buffer[0] = annotations_directory->GetClassAnnotation() == nullptr ? 0 :
annotations_directory->GetClassAnnotation()->GetOffset();
directory_buffer[1] = annotations_directory->GetFieldAnnotations() == nullptr ? 0 :
@@ -377,7 +480,6 @@ void DexWriter::WriteAnnotationsDirectories() {
annotations_directory->GetMethodAnnotations()->size();
directory_buffer[3] = annotations_directory->GetParameterAnnotations() == nullptr ? 0 :
annotations_directory->GetParameterAnnotations()->size();
- uint32_t offset = annotations_directory->GetOffset();
offset += Write(directory_buffer, 4 * sizeof(uint32_t), offset);
if (annotations_directory->GetFieldAnnotations() != nullptr) {
for (std::unique_ptr<dex_ir::FieldAnnotation>& field :
@@ -404,27 +506,55 @@ void DexWriter::WriteAnnotationsDirectories() {
}
}
}
-}
-
-void DexWriter::WriteDebugInfoItems() {
- for (auto& debug_info_pair : header_->GetCollections().DebugInfoItems()) {
- std::unique_ptr<dex_ir::DebugInfoItem>& debug_info = debug_info_pair.second;
- Write(debug_info->GetDebugInfo(), debug_info->GetDebugInfoSize(), debug_info->GetOffset());
- }
-}
-
-void DexWriter::WriteCodeItems() {
- uint16_t uint16_buffer[4];
- uint32_t uint32_buffer[2];
- for (auto& code_item_pair : header_->GetCollections().CodeItems()) {
- std::unique_ptr<dex_ir::CodeItem>& code_item = code_item_pair.second;
- uint16_buffer[0] = code_item->RegistersSize();
- uint16_buffer[1] = code_item->InsSize();
- uint16_buffer[2] = code_item->OutsSize();
- uint16_buffer[3] = code_item->TriesSize();
- uint32_buffer[0] = code_item->DebugInfo() == nullptr ? 0 : code_item->DebugInfo()->GetOffset();
- uint32_buffer[1] = code_item->InsnsSize();
- size_t offset = code_item->GetOffset();
+ if (compute_offsets_ && start != offset) {
+ header_->GetCollections().SetAnnotationsDirectoryItemsOffset(start);
+ }
+ return offset - start;
+}
+
+uint32_t DexWriter::WriteDebugInfoItems(uint32_t offset) {
+ const uint32_t start = offset;
+ for (std::unique_ptr<dex_ir::DebugInfoItem>& debug_info :
+ header_->GetCollections().DebugInfoItems()) {
+ offset = RoundUp(offset, SectionAlignment(DexFile::kDexTypeDebugInfoItem));
+ ProcessOffset(&offset, debug_info.get());
+ offset += Write(debug_info->GetDebugInfo(), debug_info->GetDebugInfoSize(), offset);
+ }
+ if (compute_offsets_ && start != offset) {
+ header_->GetCollections().SetDebugInfoItemsOffset(start);
+ }
+ return offset - start;
+}
+
+uint32_t DexWriter::WriteCodeItems(uint32_t offset, bool reserve_only) {
+ DexLayoutSection* code_section = nullptr;
+ if (!reserve_only && dex_layout_ != nullptr) {
+ code_section = &dex_layout_->GetSections().sections_[static_cast<size_t>(
+ DexLayoutSections::SectionType::kSectionTypeCode)];
+ }
+ uint16_t uint16_buffer[4] = {};
+ uint32_t uint32_buffer[2] = {};
+ uint32_t start = offset;
+ for (auto& code_item : header_->GetCollections().CodeItems()) {
+ offset = RoundUp(offset, SectionAlignment(DexFile::kDexTypeCodeItem));
+ ProcessOffset(&offset, code_item.get());
+ if (!reserve_only) {
+ uint16_buffer[0] = code_item->RegistersSize();
+ uint16_buffer[1] = code_item->InsSize();
+ uint16_buffer[2] = code_item->OutsSize();
+ uint16_buffer[3] = code_item->TriesSize();
+ uint32_buffer[0] = code_item->DebugInfo() == nullptr ? 0 :
+ code_item->DebugInfo()->GetOffset();
+ uint32_buffer[1] = code_item->InsnsSize();
+ // Only add the section hotness info once.
+ if (code_section != nullptr) {
+ auto it = dex_layout_->LayoutHotnessInfo().code_item_layout_.find(code_item.get());
+ if (it != dex_layout_->LayoutHotnessInfo().code_item_layout_.end()) {
+ code_section->parts_[static_cast<size_t>(it->second)].CombineSection(
+ code_item->GetOffset(), code_item->GetOffset() + code_item->GetSize());
+ }
+ }
+ }
offset += Write(uint16_buffer, 4 * sizeof(uint16_t), offset);
offset += Write(uint32_buffer, 2 * sizeof(uint32_t), offset);
offset += Write(code_item->Insns(), code_item->InsnsSize() * sizeof(uint16_t), offset);
@@ -443,7 +573,7 @@ void DexWriter::WriteCodeItems() {
offset += Write(insn_count_and_handler_off, 2 * sizeof(uint16_t), offset);
}
// Leave offset pointing to the end of the try items.
- WriteUleb128(code_item->Handlers()->size(), offset);
+ UNUSED(WriteUleb128(code_item->Handlers()->size(), offset));
for (std::unique_ptr<const dex_ir::CatchHandler>& handlers : *code_item->Handlers()) {
size_t list_offset = offset + handlers->GetListOffset();
uint32_t size = handlers->HasCatchAll() ? (handlers->GetHandlers()->size() - 1) * -1 :
@@ -457,32 +587,52 @@ void DexWriter::WriteCodeItems() {
}
}
}
+ // TODO: Clean this up to properly calculate the size instead of assuming it doesn't change.
+ offset = code_item->GetOffset() + code_item->GetSize();
+ }
+
+ if (compute_offsets_ && start != offset) {
+ header_->GetCollections().SetCodeItemsOffset(start);
}
+ return offset - start;
}
-void DexWriter::WriteClasses() {
+uint32_t DexWriter::WriteClassDefs(uint32_t offset, bool reserve_only) {
+ const uint32_t start = offset;
uint32_t class_def_buffer[8];
for (std::unique_ptr<dex_ir::ClassDef>& class_def : header_->GetCollections().ClassDefs()) {
- class_def_buffer[0] = class_def->ClassType()->GetIndex();
- class_def_buffer[1] = class_def->GetAccessFlags();
- class_def_buffer[2] = class_def->Superclass() == nullptr ? dex::kDexNoIndex :
- class_def->Superclass()->GetIndex();
- class_def_buffer[3] = class_def->InterfacesOffset();
- class_def_buffer[4] = class_def->SourceFile() == nullptr ? dex::kDexNoIndex :
- class_def->SourceFile()->GetIndex();
- class_def_buffer[5] = class_def->Annotations() == nullptr ? 0 :
- class_def->Annotations()->GetOffset();
- class_def_buffer[6] = class_def->GetClassData() == nullptr ? 0 :
- class_def->GetClassData()->GetOffset();
- class_def_buffer[7] = class_def->StaticValues() == nullptr ? 0 :
- class_def->StaticValues()->GetOffset();
- size_t offset = class_def->GetOffset();
- Write(class_def_buffer, class_def->GetSize(), offset);
- }
-
- for (auto& class_data_pair : header_->GetCollections().ClassDatas()) {
- std::unique_ptr<dex_ir::ClassData>& class_data = class_data_pair.second;
- size_t offset = class_data->GetOffset();
+ offset = RoundUp(offset, SectionAlignment(DexFile::kDexTypeClassDefItem));
+ if (reserve_only) {
+ offset += class_def->GetSize();
+ } else {
+ class_def_buffer[0] = class_def->ClassType()->GetIndex();
+ class_def_buffer[1] = class_def->GetAccessFlags();
+ class_def_buffer[2] = class_def->Superclass() == nullptr ? dex::kDexNoIndex :
+ class_def->Superclass()->GetIndex();
+ class_def_buffer[3] = class_def->InterfacesOffset();
+ class_def_buffer[4] = class_def->SourceFile() == nullptr ? dex::kDexNoIndex :
+ class_def->SourceFile()->GetIndex();
+ class_def_buffer[5] = class_def->Annotations() == nullptr ? 0 :
+ class_def->Annotations()->GetOffset();
+ class_def_buffer[6] = class_def->GetClassData() == nullptr ? 0 :
+ class_def->GetClassData()->GetOffset();
+ class_def_buffer[7] = class_def->StaticValues() == nullptr ? 0 :
+ class_def->StaticValues()->GetOffset();
+ offset += Write(class_def_buffer, class_def->GetSize(), offset);
+ }
+ }
+ if (compute_offsets_ && start != offset) {
+ header_->GetCollections().SetClassDefsOffset(start);
+ }
+ return offset - start;
+}
+
+uint32_t DexWriter::WriteClassDatas(uint32_t offset) {
+ const uint32_t start = offset;
+ for (const std::unique_ptr<dex_ir::ClassData>& class_data :
+ header_->GetCollections().ClassDatas()) {
+ offset = RoundUp(offset, SectionAlignment(DexFile::kDexTypeClassDataItem));
+ ProcessOffset(&offset, class_data.get());
offset += WriteUleb128(class_data->StaticFields()->size(), offset);
offset += WriteUleb128(class_data->InstanceFields()->size(), offset);
offset += WriteUleb128(class_data->DirectMethods()->size(), offset);
@@ -492,139 +642,134 @@ void DexWriter::WriteClasses() {
offset += WriteEncodedMethods(class_data->DirectMethods(), offset);
offset += WriteEncodedMethods(class_data->VirtualMethods(), offset);
}
+ if (compute_offsets_ && start != offset) {
+ header_->GetCollections().SetClassDatasOffset(start);
+ }
+ return offset - start;
}
-void DexWriter::WriteCallSites() {
+uint32_t DexWriter::WriteCallSiteIds(uint32_t offset, bool reserve_only) {
+ const uint32_t start = offset;
uint32_t call_site_off[1];
for (std::unique_ptr<dex_ir::CallSiteId>& call_site_id :
header_->GetCollections().CallSiteIds()) {
- call_site_off[0] = call_site_id->CallSiteItem()->GetOffset();
- Write(call_site_off, call_site_id->GetSize(), call_site_id->GetOffset());
+ offset = RoundUp(offset, SectionAlignment(DexFile::kDexTypeCallSiteIdItem));
+ if (reserve_only) {
+ offset += call_site_id->GetSize();
+ } else {
+ call_site_off[0] = call_site_id->CallSiteItem()->GetOffset();
+ offset += Write(call_site_off, call_site_id->GetSize(), offset);
+ }
+ }
+ if (compute_offsets_ && start != offset) {
+ header_->GetCollections().SetCallSiteIdsOffset(start);
}
+ return offset - start;
}
-void DexWriter::WriteMethodHandles() {
+uint32_t DexWriter::WriteMethodHandles(uint32_t offset) {
+ const uint32_t start = offset;
uint16_t method_handle_buff[4];
for (std::unique_ptr<dex_ir::MethodHandleItem>& method_handle :
header_->GetCollections().MethodHandleItems()) {
+ offset = RoundUp(offset, SectionAlignment(DexFile::kDexTypeMethodHandleItem));
method_handle_buff[0] = static_cast<uint16_t>(method_handle->GetMethodHandleType());
method_handle_buff[1] = 0; // unused.
method_handle_buff[2] = method_handle->GetFieldOrMethodId()->GetIndex();
method_handle_buff[3] = 0; // unused.
- Write(method_handle_buff, method_handle->GetSize(), method_handle->GetOffset());
+ offset += Write(method_handle_buff, method_handle->GetSize(), offset);
}
-}
-
-struct MapItemContainer {
- MapItemContainer(uint32_t type, uint32_t size, uint32_t offset)
- : type_(type), size_(size), offset_(offset) { }
-
- bool operator<(const MapItemContainer& other) const {
- return offset_ > other.offset_;
- }
-
- uint32_t type_;
- uint32_t size_;
- uint32_t offset_;
-};
-
-void DexWriter::WriteMapItem() {
- dex_ir::Collections& collection = header_->GetCollections();
- std::priority_queue<MapItemContainer> queue;
-
- // Header and index section.
- queue.push(MapItemContainer(DexFile::kDexTypeHeaderItem, 1, 0));
- if (collection.StringIdsSize() != 0) {
- queue.push(MapItemContainer(DexFile::kDexTypeStringIdItem, collection.StringIdsSize(),
- collection.StringIdsOffset()));
- }
- if (collection.TypeIdsSize() != 0) {
- queue.push(MapItemContainer(DexFile::kDexTypeTypeIdItem, collection.TypeIdsSize(),
- collection.TypeIdsOffset()));
- }
- if (collection.ProtoIdsSize() != 0) {
- queue.push(MapItemContainer(DexFile::kDexTypeProtoIdItem, collection.ProtoIdsSize(),
- collection.ProtoIdsOffset()));
- }
- if (collection.FieldIdsSize() != 0) {
- queue.push(MapItemContainer(DexFile::kDexTypeFieldIdItem, collection.FieldIdsSize(),
- collection.FieldIdsOffset()));
- }
- if (collection.MethodIdsSize() != 0) {
- queue.push(MapItemContainer(DexFile::kDexTypeMethodIdItem, collection.MethodIdsSize(),
- collection.MethodIdsOffset()));
- }
- if (collection.ClassDefsSize() != 0) {
- queue.push(MapItemContainer(DexFile::kDexTypeClassDefItem, collection.ClassDefsSize(),
- collection.ClassDefsOffset()));
- }
- if (collection.CallSiteIdsSize() != 0) {
- queue.push(MapItemContainer(DexFile::kDexTypeCallSiteIdItem, collection.CallSiteIdsSize(),
- collection.CallSiteIdsOffset()));
- }
- if (collection.MethodHandleItemsSize() != 0) {
- queue.push(MapItemContainer(DexFile::kDexTypeMethodHandleItem,
- collection.MethodHandleItemsSize(), collection.MethodHandleItemsOffset()));
- }
-
- // Data section.
- queue.push(MapItemContainer(DexFile::kDexTypeMapList, 1, collection.MapListOffset()));
- if (collection.TypeListsSize() != 0) {
- queue.push(MapItemContainer(DexFile::kDexTypeTypeList, collection.TypeListsSize(),
- collection.TypeListsOffset()));
- }
- if (collection.AnnotationSetRefListsSize() != 0) {
- queue.push(MapItemContainer(DexFile::kDexTypeAnnotationSetRefList,
- collection.AnnotationSetRefListsSize(), collection.AnnotationSetRefListsOffset()));
- }
- if (collection.AnnotationSetItemsSize() != 0) {
- queue.push(MapItemContainer(DexFile::kDexTypeAnnotationSetItem,
- collection.AnnotationSetItemsSize(), collection.AnnotationSetItemsOffset()));
- }
- if (collection.ClassDatasSize() != 0) {
- queue.push(MapItemContainer(DexFile::kDexTypeClassDataItem, collection.ClassDatasSize(),
- collection.ClassDatasOffset()));
- }
- if (collection.CodeItemsSize() != 0) {
- queue.push(MapItemContainer(DexFile::kDexTypeCodeItem, collection.CodeItemsSize(),
- collection.CodeItemsOffset()));
- }
- if (collection.StringDatasSize() != 0) {
- queue.push(MapItemContainer(DexFile::kDexTypeStringDataItem, collection.StringDatasSize(),
- collection.StringDatasOffset()));
- }
- if (collection.DebugInfoItemsSize() != 0) {
- queue.push(MapItemContainer(DexFile::kDexTypeDebugInfoItem, collection.DebugInfoItemsSize(),
- collection.DebugInfoItemsOffset()));
- }
- if (collection.AnnotationItemsSize() != 0) {
- queue.push(MapItemContainer(DexFile::kDexTypeAnnotationItem, collection.AnnotationItemsSize(),
- collection.AnnotationItemsOffset()));
- }
- if (collection.EncodedArrayItemsSize() != 0) {
- queue.push(MapItemContainer(DexFile::kDexTypeEncodedArrayItem,
- collection.EncodedArrayItemsSize(), collection.EncodedArrayItemsOffset()));
- }
- if (collection.AnnotationsDirectoryItemsSize() != 0) {
- queue.push(MapItemContainer(DexFile::kDexTypeAnnotationsDirectoryItem,
- collection.AnnotationsDirectoryItemsSize(), collection.AnnotationsDirectoryItemsOffset()));
+ if (compute_offsets_ && start != offset) {
+ header_->GetCollections().SetMethodHandleItemsOffset(start);
}
+ return offset - start;
+}
- uint32_t offset = collection.MapListOffset();
+uint32_t DexWriter::WriteMapItems(uint32_t offset, MapItemQueue* queue) {
+ // All the sections should already have been added.
uint16_t uint16_buffer[2];
uint32_t uint32_buffer[2];
uint16_buffer[1] = 0;
- uint32_buffer[0] = queue.size();
+ uint32_buffer[0] = queue->size();
+ const uint32_t start = offset;
offset += Write(uint32_buffer, sizeof(uint32_t), offset);
- while (!queue.empty()) {
- const MapItemContainer& map_item = queue.top();
+ while (!queue->empty()) {
+ const MapItem& map_item = queue->top();
uint16_buffer[0] = map_item.type_;
uint32_buffer[0] = map_item.size_;
uint32_buffer[1] = map_item.offset_;
offset += Write(uint16_buffer, 2 * sizeof(uint16_t), offset);
offset += Write(uint32_buffer, 2 * sizeof(uint32_t), offset);
- queue.pop();
+ queue->pop();
}
+ return offset - start;
+}
+
+uint32_t DexWriter::GenerateAndWriteMapItems(uint32_t offset) {
+ dex_ir::Collections& collection = header_->GetCollections();
+ MapItemQueue queue;
+
+ // Header and index section.
+ queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeHeaderItem, 1, 0));
+ queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeStringIdItem,
+ collection.StringIdsSize(),
+ collection.StringIdsOffset()));
+ queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeTypeIdItem,
+ collection.TypeIdsSize(),
+ collection.TypeIdsOffset()));
+ queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeProtoIdItem,
+ collection.ProtoIdsSize(),
+ collection.ProtoIdsOffset()));
+ queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeFieldIdItem,
+ collection.FieldIdsSize(),
+ collection.FieldIdsOffset()));
+ queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeMethodIdItem,
+ collection.MethodIdsSize(),
+ collection.MethodIdsOffset()));
+ queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeClassDefItem,
+ collection.ClassDefsSize(),
+ collection.ClassDefsOffset()));
+ queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeCallSiteIdItem,
+ collection.CallSiteIdsSize(),
+ collection.CallSiteIdsOffset()));
+ queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeMethodHandleItem,
+ collection.MethodHandleItemsSize(),
+ collection.MethodHandleItemsOffset()));
+ // Data section.
+ queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeMapList, 1, collection.MapListOffset()));
+ queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeTypeList,
+ collection.TypeListsSize(),
+ collection.TypeListsOffset()));
+ queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeAnnotationSetRefList,
+ collection.AnnotationSetRefListsSize(),
+ collection.AnnotationSetRefListsOffset()));
+ queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeAnnotationSetItem,
+ collection.AnnotationSetItemsSize(),
+ collection.AnnotationSetItemsOffset()));
+ queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeClassDataItem,
+ collection.ClassDatasSize(),
+ collection.ClassDatasOffset()));
+ queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeCodeItem,
+ collection.CodeItemsSize(),
+ collection.CodeItemsOffset()));
+ queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeStringDataItem,
+ collection.StringDatasSize(),
+ collection.StringDatasOffset()));
+ queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeDebugInfoItem,
+ collection.DebugInfoItemsSize(),
+ collection.DebugInfoItemsOffset()));
+ queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeAnnotationItem,
+ collection.AnnotationItemsSize(),
+ collection.AnnotationItemsOffset()));
+ queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeEncodedArrayItem,
+ collection.EncodedArrayItemsSize(),
+ collection.EncodedArrayItemsOffset()));
+ queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeAnnotationsDirectoryItem,
+ collection.AnnotationsDirectoryItemsSize(),
+ collection.AnnotationsDirectoryItemsOffset()));
+
+ // Write the map items.
+ return WriteMapItems(offset, &queue);
}
void DexWriter::WriteHeader() {
@@ -657,38 +802,118 @@ void DexWriter::WriteHeader() {
header.data_off_ = header_->DataOffset();
static_assert(sizeof(header) == 0x70, "Size doesn't match dex spec");
- Write(reinterpret_cast<uint8_t*>(&header), sizeof(header), 0u);
+ UNUSED(Write(reinterpret_cast<uint8_t*>(&header), sizeof(header), 0u));
}
void DexWriter::WriteMemMap() {
- WriteStrings();
- WriteTypes();
- WriteTypeLists();
- WriteProtos();
- WriteFields();
- WriteMethods();
- WriteEncodedArrays();
- WriteAnnotations();
- WriteAnnotationSets();
- WriteAnnotationSetRefs();
- WriteAnnotationsDirectories();
- WriteDebugInfoItems();
- WriteCodeItems();
- WriteClasses();
- WriteCallSites();
- WriteMethodHandles();
- WriteMapItem();
+ // Starting offset is right after the header.
+ uint32_t offset = sizeof(StandardDexFile::Header);
+
+ dex_ir::Collections& collection = header_->GetCollections();
+
+ // Based on: https://source.android.com/devices/tech/dalvik/dex-format
+ // Since the offsets may not be calculated already, the writing must be done in the correct order.
+ const uint32_t string_ids_offset = offset;
+ offset += WriteStringIds(offset, /*reserve_only*/ true);
+ offset += WriteTypeIds(offset);
+ const uint32_t proto_ids_offset = offset;
+ offset += WriteProtoIds(offset, /*reserve_only*/ true);
+ offset += WriteFieldIds(offset);
+ offset += WriteMethodIds(offset);
+ const uint32_t class_defs_offset = offset;
+ offset += WriteClassDefs(offset, /*reserve_only*/ true);
+ const uint32_t call_site_ids_offset = offset;
+ offset += WriteCallSiteIds(offset, /*reserve_only*/ true);
+ offset += WriteMethodHandles(offset);
+
+ uint32_t data_offset_ = 0u;
+ if (compute_offsets_) {
+ // Data section.
+ offset = RoundUp(offset, kDataSectionAlignment);
+ data_offset_ = offset;
+ }
+
+ // Write code item first to minimize the space required for encoded methods.
+ // Reserve code item space since we need the debug offsets to actually write them.
+ const uint32_t code_items_offset = offset;
+ offset += WriteCodeItems(offset, /*reserve_only*/ true);
+ // Write debug info section.
+ offset += WriteDebugInfoItems(offset);
+ // Actually write code items since debug info offsets are calculated now.
+ WriteCodeItems(code_items_offset, /*reserve_only*/ false);
+
+ offset += WriteEncodedArrays(offset);
+ offset += WriteAnnotations(offset);
+ offset += WriteAnnotationSets(offset);
+ offset += WriteAnnotationSetRefs(offset);
+ offset += WriteAnnotationsDirectories(offset);
+ offset += WriteTypeLists(offset);
+ offset += WriteClassDatas(offset);
+ offset += WriteStringDatas(offset);
+
+ // Write delayed id sections that depend on data sections.
+ WriteStringIds(string_ids_offset, /*reserve_only*/ false);
+ WriteProtoIds(proto_ids_offset, /*reserve_only*/ false);
+ WriteClassDefs(class_defs_offset, /*reserve_only*/ false);
+ WriteCallSiteIds(call_site_ids_offset, /*reserve_only*/ false);
+
+ // Write the map list.
+ if (compute_offsets_) {
+ offset = RoundUp(offset, SectionAlignment(DexFile::kDexTypeMapList));
+ collection.SetMapListOffset(offset);
+ } else {
+ offset = collection.MapListOffset();
+ }
+ offset += GenerateAndWriteMapItems(offset);
+ offset = RoundUp(offset, kDataSectionAlignment);
+
+ // Map items are included in the data section.
+ if (compute_offsets_) {
+ header_->SetDataSize(offset - data_offset_);
+ if (header_->DataSize() != 0) {
+ // Offset must be zero when the size is zero.
+ header_->SetDataOffset(data_offset_);
+ } else {
+ header_->SetDataOffset(0u);
+ }
+ }
+
+ // Write link data if it exists.
+ const std::vector<uint8_t>& link_data = collection.LinkData();
+ if (link_data.size() > 0) {
+ CHECK_EQ(header_->LinkSize(), static_cast<uint32_t>(link_data.size()));
+ if (compute_offsets_) {
+ header_->SetLinkOffset(offset);
+ }
+ offset += Write(&link_data[0], link_data.size(), header_->LinkOffset());
+ }
+
+ // Write header last.
+ if (compute_offsets_) {
+ header_->SetFileSize(offset);
+ }
WriteHeader();
}
-void DexWriter::Output(dex_ir::Header* header, MemMap* mem_map, CompactDexLevel compact_dex_level) {
+void DexWriter::Output(dex_ir::Header* header,
+ MemMap* mem_map,
+ DexLayout* dex_layout,
+ bool compute_offsets,
+ CompactDexLevel compact_dex_level) {
+ CHECK(dex_layout != nullptr);
std::unique_ptr<DexWriter> writer;
if (compact_dex_level != CompactDexLevel::kCompactDexLevelNone) {
- writer.reset(new CompactDexWriter(header, mem_map, compact_dex_level));
+ writer.reset(new CompactDexWriter(header, mem_map, dex_layout, compact_dex_level));
} else {
- writer.reset(new DexWriter(header, mem_map));
+ writer.reset(new DexWriter(header, mem_map, dex_layout, compute_offsets));
}
writer->WriteMemMap();
}
+void MapItemQueue::AddIfNotEmpty(const MapItem& item) {
+ if (item.size_ != 0) {
+ push(item);
+ }
+}
+
} // namespace art
diff --git a/dexlayout/dex_writer.h b/dexlayout/dex_writer.h
index 85d3e7ebf3..c47898e533 100644
--- a/dexlayout/dex_writer.h
+++ b/dexlayout/dex_writer.h
@@ -19,56 +19,119 @@
#ifndef ART_DEXLAYOUT_DEX_WRITER_H_
#define ART_DEXLAYOUT_DEX_WRITER_H_
+#include <functional>
+
#include "base/unix_file/fd_file.h"
#include "cdex/compact_dex_level.h"
#include "dex_ir.h"
#include "mem_map.h"
#include "os.h"
+#include <queue>
+
namespace art {
+class DexLayout;
+class DexLayoutHotnessInfo;
+
+struct MapItem {
+ // Not using DexFile::MapItemType since compact dex and standard dex file may have different
+ // sections.
+ MapItem() = default;
+ MapItem(uint32_t type, uint32_t size, uint32_t offset)
+ : type_(type), size_(size), offset_(offset) { }
+
+ // Sort by decreasing order since the priority_queue puts largest elements first.
+ bool operator>(const MapItem& other) const {
+ return offset_ > other.offset_;
+ }
+
+ uint32_t type_ = 0u;
+ uint32_t size_ = 0u;
+ uint32_t offset_ = 0u;
+};
+
+class MapItemQueue : public
+ std::priority_queue<MapItem, std::vector<MapItem>, std::greater<MapItem>> {
+ public:
+ void AddIfNotEmpty(const MapItem& item);
+};
+
class DexWriter {
public:
- DexWriter(dex_ir::Header* header, MemMap* mem_map) : header_(header), mem_map_(mem_map) {}
+ DexWriter(dex_ir::Header* header,
+ MemMap* mem_map,
+ DexLayout* dex_layout,
+ bool compute_offsets)
+ : header_(header),
+ mem_map_(mem_map),
+ dex_layout_(dex_layout),
+ compute_offsets_(compute_offsets) {}
- static void Output(dex_ir::Header* header, MemMap* mem_map, CompactDexLevel compact_dex_level);
+ static void Output(dex_ir::Header* header,
+ MemMap* mem_map,
+ DexLayout* dex_layout,
+ bool compute_offsets,
+ CompactDexLevel compact_dex_level);
virtual ~DexWriter() {}
protected:
void WriteMemMap();
- size_t Write(const void* buffer, size_t length, size_t offset);
- size_t WriteSleb128(uint32_t value, size_t offset);
- size_t WriteUleb128(uint32_t value, size_t offset);
- size_t WriteEncodedValue(dex_ir::EncodedValue* encoded_value, size_t offset);
- size_t WriteEncodedValueHeader(int8_t value_type, size_t value_arg, size_t offset);
- size_t WriteEncodedArray(dex_ir::EncodedValueVector* values, size_t offset);
- size_t WriteEncodedAnnotation(dex_ir::EncodedAnnotation* annotation, size_t offset);
- size_t WriteEncodedFields(dex_ir::FieldItemVector* fields, size_t offset);
- size_t WriteEncodedMethods(dex_ir::MethodItemVector* methods, size_t offset);
-
- void WriteStrings();
- void WriteTypes();
- void WriteTypeLists();
- void WriteProtos();
- void WriteFields();
- void WriteMethods();
- void WriteEncodedArrays();
- void WriteAnnotations();
- void WriteAnnotationSets();
- void WriteAnnotationSetRefs();
- void WriteAnnotationsDirectories();
- void WriteDebugInfoItems();
- void WriteCodeItems();
- void WriteClasses();
- void WriteCallSites();
- void WriteMethodHandles();
- void WriteMapItem();
+ size_t Write(const void* buffer, size_t length, size_t offset) WARN_UNUSED;
+ size_t WriteSleb128(uint32_t value, size_t offset) WARN_UNUSED;
+ size_t WriteUleb128(uint32_t value, size_t offset) WARN_UNUSED;
+ size_t WriteEncodedValue(dex_ir::EncodedValue* encoded_value, size_t offset) WARN_UNUSED;
+ size_t WriteEncodedValueHeader(int8_t value_type, size_t value_arg, size_t offset) WARN_UNUSED;
+ size_t WriteEncodedArray(dex_ir::EncodedValueVector* values, size_t offset) WARN_UNUSED;
+ size_t WriteEncodedAnnotation(dex_ir::EncodedAnnotation* annotation, size_t offset) WARN_UNUSED;
+ size_t WriteEncodedFields(dex_ir::FieldItemVector* fields, size_t offset) WARN_UNUSED;
+ size_t WriteEncodedMethods(dex_ir::MethodItemVector* methods, size_t offset) WARN_UNUSED;
+
+ // Header and id section
virtual void WriteHeader();
+ // reserve_only means don't write, only reserve space. This is required since the string data
+ // offsets must be assigned.
+ uint32_t WriteStringIds(uint32_t offset, bool reserve_only);
+ uint32_t WriteTypeIds(uint32_t offset);
+ uint32_t WriteProtoIds(uint32_t offset, bool reserve_only);
+ uint32_t WriteFieldIds(uint32_t offset);
+ uint32_t WriteMethodIds(uint32_t offset);
+ uint32_t WriteClassDefs(uint32_t offset, bool reserve_only);
+ uint32_t WriteCallSiteIds(uint32_t offset, bool reserve_only);
+
+ uint32_t WriteEncodedArrays(uint32_t offset);
+ uint32_t WriteAnnotations(uint32_t offset);
+ uint32_t WriteAnnotationSets(uint32_t offset);
+ uint32_t WriteAnnotationSetRefs(uint32_t offset);
+ uint32_t WriteAnnotationsDirectories(uint32_t offset);
+
+ // Data section.
+ uint32_t WriteDebugInfoItems(uint32_t offset);
+ uint32_t WriteCodeItems(uint32_t offset, bool reserve_only);
+ uint32_t WriteTypeLists(uint32_t offset);
+ uint32_t WriteStringDatas(uint32_t offset);
+ uint32_t WriteClassDatas(uint32_t offset);
+ uint32_t WriteMethodHandles(uint32_t offset);
+ uint32_t WriteMapItems(uint32_t offset, MapItemQueue* queue);
+ uint32_t GenerateAndWriteMapItems(uint32_t offset);
+
+ // Process an offset, if compute_offset is set, write into the dex ir item, otherwise read the
+ // existing offset and use that for writing.
+ void ProcessOffset(uint32_t* const offset, dex_ir::Item* item) {
+ if (compute_offsets_) {
+ item->SetOffset(*offset);
+ } else {
+ // Not computing offsets, just use the one in the item.
+ *offset = item->GetOffset();
+ }
+ }
dex_ir::Header* const header_;
MemMap* const mem_map_;
+ DexLayout* const dex_layout_;
+ bool compute_offsets_;
private:
DISALLOW_COPY_AND_ASSIGN(DexWriter);
diff --git a/dexlayout/dexdiag.cc b/dexlayout/dexdiag.cc
index c14cd5f807..e83f98ee6b 100644
--- a/dexlayout/dexdiag.cc
+++ b/dexlayout/dexdiag.cc
@@ -289,7 +289,8 @@ static void ProcessOneDexMapping(uint64_t* pagemap,
// Build a list of the dex file section types, sorted from highest offset to lowest.
std::vector<dex_ir::DexFileSection> sections;
{
- std::unique_ptr<dex_ir::Header> header(dex_ir::DexIrBuilder(*dex_file));
+ std::unique_ptr<dex_ir::Header> header(dex_ir::DexIrBuilder(*dex_file,
+ /*eagerly_assign_offsets*/ true));
sections = dex_ir::GetSortedDexFileSections(header.get(),
dex_ir::SortDirection::kSortDescending);
}
diff --git a/dexlayout/dexlayout.cc b/dexlayout/dexlayout.cc
index dd2e809a92..d904a52f0c 100644
--- a/dexlayout/dexlayout.cc
+++ b/dexlayout/dexlayout.cc
@@ -56,9 +56,6 @@ using android::base::StringPrintf;
// necessary to ensure the partial order w.r.t. class derivation. TODO: Re-enable (b/68317550).
static constexpr bool kChangeClassDefOrder = false;
-static constexpr uint32_t kDataSectionAlignment = sizeof(uint32_t) * 2;
-static constexpr uint32_t kDexCodeItemAlignment = 4;
-
/*
* Flags for use with createAccessFlagStr().
*/
@@ -1564,7 +1561,7 @@ void DexLayout::DumpDexFile() {
}
}
-std::vector<dex_ir::ClassData*> DexLayout::LayoutClassDefsAndClassData(const DexFile* dex_file) {
+void DexLayout::LayoutClassDefsAndClassData(const DexFile* dex_file) {
std::vector<dex_ir::ClassDef*> new_class_def_order;
for (std::unique_ptr<dex_ir::ClassDef>& class_def : header_->GetCollections().ClassDefs()) {
dex::TypeIndex type_idx(class_def->ClassType()->GetIndex());
@@ -1578,31 +1575,41 @@ std::vector<dex_ir::ClassData*> DexLayout::LayoutClassDefsAndClassData(const Dex
new_class_def_order.push_back(class_def.get());
}
}
- uint32_t class_defs_offset = header_->GetCollections().ClassDefsOffset();
- uint32_t class_data_offset = header_->GetCollections().ClassDatasOffset();
std::unordered_set<dex_ir::ClassData*> visited_class_data;
- std::vector<dex_ir::ClassData*> new_class_data_order;
- for (uint32_t i = 0; i < new_class_def_order.size(); ++i) {
- dex_ir::ClassDef* class_def = new_class_def_order[i];
- if (kChangeClassDefOrder) {
- // This produces dex files that violate the spec since the super class class_def is supposed
- // to occur before any subclasses.
- class_def->SetIndex(i);
- class_def->SetOffset(class_defs_offset);
- class_defs_offset += dex_ir::ClassDef::ItemSize();
- }
+ size_t class_data_index = 0;
+ dex_ir::CollectionVector<dex_ir::ClassData>::Vector& class_datas =
+ header_->GetCollections().ClassDatas();
+ for (dex_ir::ClassDef* class_def : new_class_def_order) {
dex_ir::ClassData* class_data = class_def->GetClassData();
if (class_data != nullptr && visited_class_data.find(class_data) == visited_class_data.end()) {
- class_data->SetOffset(class_data_offset);
- class_data_offset += class_data->GetSize();
visited_class_data.insert(class_data);
- new_class_data_order.push_back(class_data);
+ // Overwrite the existing vector with the new ordering, note that the sets of objects are
+ // equivalent, but the order changes. This is why this is not a memory leak.
+ // TODO: Consider cleaning this up with a shared_ptr.
+ class_datas[class_data_index].release();
+ class_datas[class_data_index].reset(class_data);
+ ++class_data_index;
+ }
+ }
+ CHECK_EQ(class_data_index, class_datas.size());
+
+ if (kChangeClassDefOrder) {
+ // This currently produces dex files that violate the spec since the super class class_def is
+ // supposed to occur before any subclasses.
+ dex_ir::CollectionVector<dex_ir::ClassDef>::Vector& class_defs =
+ header_->GetCollections().ClassDefs();
+ CHECK_EQ(new_class_def_order.size(), class_defs.size());
+ for (size_t i = 0; i < class_defs.size(); ++i) {
+ // Overwrite the existing vector with the new ordering, note that the sets of objects are
+ // equivalent, but the order changes. This is why this is not a memory leak.
+ // TODO: Consider cleaning this up with a shared_ptr.
+ class_defs[i].release();
+ class_defs[i].reset(new_class_def_order[i]);
}
}
- return new_class_data_order;
}
-int32_t DexLayout::LayoutStringData(const DexFile* dex_file) {
+void DexLayout::LayoutStringData(const DexFile* dex_file) {
const size_t num_strings = header_->GetCollections().StringIds().size();
std::vector<bool> is_shorty(num_strings, false);
std::vector<bool> from_hot_method(num_strings, false);
@@ -1672,23 +1679,9 @@ int32_t DexLayout::LayoutStringData(const DexFile* dex_file) {
}
// Sort string data by specified order.
std::vector<dex_ir::StringId*> string_ids;
- size_t min_offset = std::numeric_limits<size_t>::max();
- size_t max_offset = 0;
- size_t hot_bytes = 0;
for (auto& string_id : header_->GetCollections().StringIds()) {
string_ids.push_back(string_id.get());
- const size_t cur_offset = string_id->DataItem()->GetOffset();
- CHECK_NE(cur_offset, 0u);
- min_offset = std::min(min_offset, cur_offset);
- dex_ir::StringData* data = string_id->DataItem();
- const size_t element_size = data->GetSize() + 1; // Add one extra for null.
- size_t end_offset = cur_offset + element_size;
- if (is_shorty[string_id->GetIndex()] || from_hot_method[string_id->GetIndex()]) {
- hot_bytes += element_size;
- }
- max_offset = std::max(max_offset, end_offset);
- }
- VLOG(compiler) << "Hot string data bytes " << hot_bytes << "/" << max_offset - min_offset;
+ }
std::sort(string_ids.begin(),
string_ids.end(),
[&is_shorty, &from_hot_method](const dex_ir::StringId* a,
@@ -1704,59 +1697,41 @@ int32_t DexLayout::LayoutStringData(const DexFile* dex_file) {
if (a_is_shorty != b_is_shorty) {
return a_is_shorty < b_is_shorty;
}
- // Preserve order.
- return a->DataItem()->GetOffset() < b->DataItem()->GetOffset();
+ // Order by index by default.
+ return a->GetIndex() < b->GetIndex();
});
- // Now we know what order we want the string data, reorder the offsets.
- size_t offset = min_offset;
+ dex_ir::CollectionVector<dex_ir::StringData>::Vector& string_datas =
+ header_->GetCollections().StringDatas();
+ // Now we know what order we want the string data, reorder them.
+ size_t data_index = 0;
for (dex_ir::StringId* string_id : string_ids) {
- dex_ir::StringData* data = string_id->DataItem();
- data->SetOffset(offset);
- offset += data->GetSize() + 1; // Add one extra for null.
+ string_datas[data_index].release();
+ string_datas[data_index].reset(string_id->DataItem());
+ ++data_index;
}
- if (offset > max_offset) {
- return offset - max_offset;
- // If we expanded the string data section, we need to update the offsets or else we will
- // corrupt the next section when writing out.
+ if (kIsDebugBuild) {
+ std::unordered_set<dex_ir::StringData*> visited;
+ for (const std::unique_ptr<dex_ir::StringData>& data : string_datas) {
+ visited.insert(data.get());
+ }
+ for (auto& string_id : header_->GetCollections().StringIds()) {
+ CHECK(visited.find(string_id->DataItem()) != visited.end());
+ }
}
- return 0;
+ CHECK_EQ(data_index, string_datas.size());
}
// Orders code items according to specified class data ordering.
-// NOTE: If the section following the code items is byte aligned, the last code item is left in
-// place to preserve alignment. Layout needs an overhaul to handle movement of other sections.
-int32_t DexLayout::LayoutCodeItems(const DexFile* dex_file,
- std::vector<dex_ir::ClassData*> new_class_data_order) {
- // Do not move code items if class data section precedes code item section.
- // ULEB encoding is variable length, causing problems determining the offset of the code items.
- // TODO: We should swap the order of these sections in the future to avoid this issue.
- uint32_t class_data_offset = header_->GetCollections().ClassDatasOffset();
- uint32_t code_item_offset = header_->GetCollections().CodeItemsOffset();
- if (class_data_offset < code_item_offset) {
- return 0;
- }
-
- // Find the last code item so we can leave it in place if the next section is not 4 byte aligned.
- dex_ir::CodeItem* last_code_item = nullptr;
- std::unordered_set<dex_ir::CodeItem*> visited_code_items;
- bool is_code_item_aligned = IsNextSectionCodeItemAligned(code_item_offset);
- if (!is_code_item_aligned) {
- for (auto& code_item_pair : header_->GetCollections().CodeItems()) {
- std::unique_ptr<dex_ir::CodeItem>& code_item = code_item_pair.second;
- if (last_code_item == nullptr
- || last_code_item->GetOffset() < code_item->GetOffset()) {
- last_code_item = code_item.get();
- }
- }
- }
-
+void DexLayout::LayoutCodeItems(const DexFile* dex_file) {
static constexpr InvokeType invoke_types[] = {
kDirect,
kVirtual
};
- const size_t num_layout_types = static_cast<size_t>(LayoutType::kLayoutTypeCount);
- std::unordered_set<dex_ir::CodeItem*> code_items[num_layout_types];
+ std::unordered_map<dex_ir::CodeItem*, LayoutType>& code_item_layout =
+ layout_hotness_info_.code_item_layout_;
+
+ // Assign hotness flags to all code items.
for (InvokeType invoke_type : invoke_types) {
for (std::unique_ptr<dex_ir::ClassDef>& class_def : header_->GetCollections().ClassDefs()) {
const bool is_profile_class =
@@ -1772,7 +1747,7 @@ int32_t DexLayout::LayoutCodeItems(const DexFile* dex_file,
: class_data->VirtualMethods())) {
const dex_ir::MethodId *method_id = method->GetMethodId();
dex_ir::CodeItem *code_item = method->GetCodeItem();
- if (code_item == last_code_item || code_item == nullptr) {
+ if (code_item == nullptr) {
continue;
}
// Separate executed methods (clinits and profiled methods) from unexecuted methods.
@@ -1794,194 +1769,61 @@ int32_t DexLayout::LayoutCodeItems(const DexFile* dex_file,
} else if (hotness.IsInProfile()) {
state = LayoutType::kLayoutTypeSometimesUsed;
}
- code_items[static_cast<size_t>(state)].insert(code_item);
- }
- }
- }
-
- // Removing duplicate CodeItems may expose other issues with downstream
- // optimizations such as quickening. But we need to ensure at least the weak
- // forms of it currently in use do not break layout optimizations.
- std::map<dex_ir::CodeItem*, uint32_t> original_code_item_offset;
- // Total_diff includes diffs generated by clinits, executed, and non-executed methods.
- int32_t total_diff = 0;
- // The relative placement has no effect on correctness; it is used to ensure
- // the layout is deterministic
- for (size_t index = 0; index < num_layout_types; ++index) {
- const std::unordered_set<dex_ir::CodeItem*>& code_items_set = code_items[index];
- // diff is reset for each class of code items.
- int32_t diff = 0;
- const uint32_t start_offset = code_item_offset;
- for (dex_ir::ClassData* data : new_class_data_order) {
- data->SetOffset(data->GetOffset() + diff);
- for (InvokeType invoke_type : invoke_types) {
- for (auto &method : *(invoke_type == InvokeType::kDirect
- ? data->DirectMethods()
- : data->VirtualMethods())) {
- dex_ir::CodeItem* code_item = method->GetCodeItem();
- if (code_item != nullptr &&
- code_items_set.find(code_item) != code_items_set.end()) {
- // Compute where the CodeItem was originally laid out.
- uint32_t original_offset = code_item->GetOffset();
- auto it = original_code_item_offset.find(code_item);
- if (it != original_code_item_offset.end()) {
- original_offset = it->second;
- } else {
- original_code_item_offset[code_item] = code_item->GetOffset();
- // Assign the new offset and move the pointer to allocate space.
- code_item->SetOffset(code_item_offset);
- code_item_offset +=
- RoundUp(code_item->GetSize(), kDexCodeItemAlignment);
- }
- // Update the size of the encoded methods to reflect that the offset difference
- // may have changed the ULEB128 length.
- diff +=
- UnsignedLeb128Size(code_item->GetOffset()) - UnsignedLeb128Size(original_offset);
- }
+ auto it = code_item_layout.emplace(code_item, state);
+ if (!it.second) {
+ LayoutType& layout_type = it.first->second;
+ // Already exists, merge the hotness.
+ layout_type = MergeLayoutType(layout_type, state);
}
}
}
- DexLayoutSection& code_section = dex_sections_.sections_[static_cast<size_t>(
- DexLayoutSections::SectionType::kSectionTypeCode)];
- code_section.parts_[index].offset_ = start_offset;
- code_section.parts_[index].size_ = code_item_offset - start_offset;
- for (size_t i = 0; i < num_layout_types; ++i) {
- VLOG(dex) << "Code item layout bucket " << i << " count=" << code_items[i].size()
- << " bytes=" << code_section.parts_[i].size_;
- }
- total_diff += diff;
}
- // Adjust diff to be 4-byte aligned.
- return RoundUp(total_diff, kDexCodeItemAlignment);
-}
-bool DexLayout::IsNextSectionCodeItemAligned(uint32_t offset) {
- dex_ir::Collections& collections = header_->GetCollections();
- std::set<uint32_t> section_offsets;
- section_offsets.insert(collections.MapListOffset());
- section_offsets.insert(collections.TypeListsOffset());
- section_offsets.insert(collections.AnnotationSetRefListsOffset());
- section_offsets.insert(collections.AnnotationSetItemsOffset());
- section_offsets.insert(collections.ClassDatasOffset());
- section_offsets.insert(collections.CodeItemsOffset());
- section_offsets.insert(collections.StringDatasOffset());
- section_offsets.insert(collections.DebugInfoItemsOffset());
- section_offsets.insert(collections.AnnotationItemsOffset());
- section_offsets.insert(collections.EncodedArrayItemsOffset());
- section_offsets.insert(collections.AnnotationsDirectoryItemsOffset());
-
- auto found = section_offsets.find(offset);
- if (found != section_offsets.end()) {
- found++;
- if (found != section_offsets.end()) {
- return *found % kDexCodeItemAlignment == 0;
+ dex_ir::CollectionVector<dex_ir::CodeItem>::Vector& code_items =
+ header_->GetCollections().CodeItems();
+ if (VLOG_IS_ON(dex)) {
+ size_t layout_count[static_cast<size_t>(LayoutType::kLayoutTypeCount)] = {};
+ for (const std::unique_ptr<dex_ir::CodeItem>& code_item : code_items) {
+ auto it = code_item_layout.find(code_item.get());
+ DCHECK(it != code_item_layout.end());
+ ++layout_count[static_cast<size_t>(it->second)];
+ }
+ for (size_t i = 0; i < static_cast<size_t>(LayoutType::kLayoutTypeCount); ++i) {
+ LOG(INFO) << "Code items in category " << i << " count=" << layout_count[i];
}
- }
- return false;
-}
-
-// Adjust offsets of every item in the specified section by diff bytes.
-template<class T> void DexLayout::FixupSection(std::map<uint32_t, std::unique_ptr<T>>& map,
- uint32_t diff) {
- for (auto& pair : map) {
- std::unique_ptr<T>& item = pair.second;
- item->SetOffset(item->GetOffset() + diff);
- }
-}
-
-// Adjust offsets of all sections with an address after the specified offset by diff bytes.
-void DexLayout::FixupSections(uint32_t offset, uint32_t diff) {
- dex_ir::Collections& collections = header_->GetCollections();
- uint32_t map_list_offset = collections.MapListOffset();
- if (map_list_offset > offset) {
- collections.SetMapListOffset(map_list_offset + diff);
- }
-
- uint32_t type_lists_offset = collections.TypeListsOffset();
- if (type_lists_offset > offset) {
- collections.SetTypeListsOffset(type_lists_offset + diff);
- FixupSection(collections.TypeLists(), diff);
- }
-
- uint32_t annotation_set_ref_lists_offset = collections.AnnotationSetRefListsOffset();
- if (annotation_set_ref_lists_offset > offset) {
- collections.SetAnnotationSetRefListsOffset(annotation_set_ref_lists_offset + diff);
- FixupSection(collections.AnnotationSetRefLists(), diff);
- }
-
- uint32_t annotation_set_items_offset = collections.AnnotationSetItemsOffset();
- if (annotation_set_items_offset > offset) {
- collections.SetAnnotationSetItemsOffset(annotation_set_items_offset + diff);
- FixupSection(collections.AnnotationSetItems(), diff);
- }
-
- uint32_t class_datas_offset = collections.ClassDatasOffset();
- if (class_datas_offset > offset) {
- collections.SetClassDatasOffset(class_datas_offset + diff);
- FixupSection(collections.ClassDatas(), diff);
- }
-
- uint32_t code_items_offset = collections.CodeItemsOffset();
- if (code_items_offset > offset) {
- collections.SetCodeItemsOffset(code_items_offset + diff);
- FixupSection(collections.CodeItems(), diff);
- }
-
- uint32_t string_datas_offset = collections.StringDatasOffset();
- if (string_datas_offset > offset) {
- collections.SetStringDatasOffset(string_datas_offset + diff);
- FixupSection(collections.StringDatas(), diff);
- }
-
- uint32_t debug_info_items_offset = collections.DebugInfoItemsOffset();
- if (debug_info_items_offset > offset) {
- collections.SetDebugInfoItemsOffset(debug_info_items_offset + diff);
- FixupSection(collections.DebugInfoItems(), diff);
- }
-
- uint32_t annotation_items_offset = collections.AnnotationItemsOffset();
- if (annotation_items_offset > offset) {
- collections.SetAnnotationItemsOffset(annotation_items_offset + diff);
- FixupSection(collections.AnnotationItems(), diff);
- }
-
- uint32_t encoded_array_items_offset = collections.EncodedArrayItemsOffset();
- if (encoded_array_items_offset > offset) {
- collections.SetEncodedArrayItemsOffset(encoded_array_items_offset + diff);
- FixupSection(collections.EncodedArrayItems(), diff);
}
- uint32_t annotations_directory_items_offset = collections.AnnotationsDirectoryItemsOffset();
- if (annotations_directory_items_offset > offset) {
- collections.SetAnnotationsDirectoryItemsOffset(annotations_directory_items_offset + diff);
- FixupSection(collections.AnnotationsDirectoryItems(), diff);
- }
+ // Sort the code items vector by new layout. The writing process will take care of calculating
+ // all the offsets. Stable sort to preserve any existing locality that might be there.
+ std::stable_sort(code_items.begin(),
+ code_items.end(),
+ [&](const std::unique_ptr<dex_ir::CodeItem>& a,
+ const std::unique_ptr<dex_ir::CodeItem>& b) {
+ auto it_a = code_item_layout.find(a.get());
+ auto it_b = code_item_layout.find(b.get());
+ DCHECK(it_a != code_item_layout.end());
+ DCHECK(it_b != code_item_layout.end());
+ const LayoutType layout_type_a = it_a->second;
+ const LayoutType layout_type_b = it_b->second;
+ return layout_type_a < layout_type_b;
+ });
}
void DexLayout::LayoutOutputFile(const DexFile* dex_file) {
- const int32_t string_diff = LayoutStringData(dex_file);
- // If we expanded the string data section, we need to update the offsets or else we will
- // corrupt the next section when writing out.
- FixupSections(header_->GetCollections().StringDatasOffset(), string_diff);
- // Update file size.
- header_->SetFileSize(header_->FileSize() + string_diff);
-
- std::vector<dex_ir::ClassData*> new_class_data_order = LayoutClassDefsAndClassData(dex_file);
- const int32_t code_item_diff = LayoutCodeItems(dex_file, new_class_data_order);
- // Move sections after ClassData by diff bytes.
- FixupSections(header_->GetCollections().ClassDatasOffset(), code_item_diff);
-
- // Update file and data size.
- // The data size must be aligned to kDataSectionAlignment.
- const int32_t total_diff = code_item_diff + string_diff;
- header_->SetDataSize(RoundUp(header_->DataSize() + total_diff, kDataSectionAlignment));
- header_->SetFileSize(header_->FileSize() + total_diff);
+ LayoutStringData(dex_file);
+ LayoutClassDefsAndClassData(dex_file);
+ LayoutCodeItems(dex_file);
}
-void DexLayout::OutputDexFile(const DexFile* dex_file) {
+void DexLayout::OutputDexFile(const DexFile* dex_file, bool compute_offsets) {
const std::string& dex_file_location = dex_file->GetLocation();
std::string error_msg;
std::unique_ptr<File> new_file;
+ // Since we allow dex growth, we need to size the map larger than the original input to be safe.
+ // Reserve an extra 10% to add some buffer room. Note that this is probably more than
+ // necessary.
+ constexpr size_t kReserveFraction = 10;
+ const size_t max_size = header_->FileSize() + header_->FileSize() / kReserveFraction;
if (!options_.output_to_memmap_) {
std::string output_location(options_.output_dex_directory_);
size_t last_slash = dex_file_location.rfind('/');
@@ -1998,15 +1840,15 @@ void DexLayout::OutputDexFile(const DexFile* dex_file) {
LOG(ERROR) << "Could not create dex writer output file: " << output_location;
return;
}
- if (ftruncate(new_file->Fd(), header_->FileSize()) != 0) {
+ if (ftruncate(new_file->Fd(), max_size) != 0) {
LOG(ERROR) << "Could not grow dex writer output file: " << output_location;;
new_file->Erase();
return;
}
- mem_map_.reset(MemMap::MapFile(header_->FileSize(), PROT_READ | PROT_WRITE, MAP_SHARED,
+ mem_map_.reset(MemMap::MapFile(max_size, PROT_READ | PROT_WRITE, MAP_SHARED,
new_file->Fd(), 0, /*low_4gb*/ false, output_location.c_str(), &error_msg));
} else {
- mem_map_.reset(MemMap::MapAnonymous("layout dex", nullptr, header_->FileSize(),
+ mem_map_.reset(MemMap::MapAnonymous("layout dex", nullptr, max_size,
PROT_READ | PROT_WRITE, /* low_4gb */ false, /* reuse */ false, &error_msg));
}
if (mem_map_ == nullptr) {
@@ -2016,8 +1858,14 @@ void DexLayout::OutputDexFile(const DexFile* dex_file) {
}
return;
}
- DexWriter::Output(header_, mem_map_.get(), options_.compact_dex_level_);
+ DexWriter::Output(header_, mem_map_.get(), this, compute_offsets, options_.compact_dex_level_);
if (new_file != nullptr) {
+ // Since we make the memmap larger than needed, shrink the file back down to not leave extra
+ // padding.
+ int res = new_file->SetLength(header_->FileSize());
+ if (res != 0) {
+ LOG(ERROR) << "Truncating file resulted in " << res;
+ }
UNUSED(new_file->FlushCloseOrErase());
}
}
@@ -2028,7 +1876,15 @@ void DexLayout::OutputDexFile(const DexFile* dex_file) {
void DexLayout::ProcessDexFile(const char* file_name,
const DexFile* dex_file,
size_t dex_file_index) {
- std::unique_ptr<dex_ir::Header> header(dex_ir::DexIrBuilder(*dex_file));
+ const bool output = options_.output_dex_directory_ != nullptr || options_.output_to_memmap_;
+ // Try to avoid eagerly assigning offsets to find bugs since GetOffset will abort if the offset
+ // is unassigned.
+ bool eagerly_assign_offsets = false;
+ if (options_.visualize_pattern_ || options_.show_section_statistics_ || options_.dump_) {
+ // These options required the offsets for dumping purposes.
+ eagerly_assign_offsets = true;
+ }
+ std::unique_ptr<dex_ir::Header> header(dex_ir::DexIrBuilder(*dex_file, eagerly_assign_offsets));
SetHeader(header.get());
if (options_.verbose_) {
@@ -2052,13 +1908,17 @@ void DexLayout::ProcessDexFile(const char* file_name,
}
// In case we are outputting to a file, keep it open so we can verify.
- if (options_.output_dex_directory_ != nullptr || options_.output_to_memmap_) {
- if (info_ != nullptr) {
+ if (output) {
+ // Layout information about what strings and code items are hot. Used by the writing process
+ // to generate the sections that are stored in the oat file.
+ bool do_layout = info_ != nullptr;
+ if (do_layout) {
LayoutOutputFile(dex_file);
}
- OutputDexFile(dex_file);
+ OutputDexFile(dex_file, do_layout);
// Clear header before verifying to reduce peak RAM usage.
+ const size_t file_size = header_->FileSize();
header.reset();
// Verify the output dex file's structure, only enabled by default for debug builds.
@@ -2066,7 +1926,7 @@ void DexLayout::ProcessDexFile(const char* file_name,
std::string error_msg;
std::string location = "memory mapped file for " + std::string(file_name);
std::unique_ptr<const DexFile> output_dex_file(DexFileLoader::Open(mem_map_->Begin(),
- mem_map_->Size(),
+ file_size,
location,
/* checksum */ 0,
/*oat_dex_file*/ nullptr,
@@ -2076,11 +1936,16 @@ void DexLayout::ProcessDexFile(const char* file_name,
CHECK(output_dex_file != nullptr) << "Failed to re-open output file:" << error_msg;
// Do IR-level comparison between input and output. This check ignores potential differences
- // due to layout, so offsets are not checked. Instead, it checks the data contents of each item.
+ // due to layout, so offsets are not checked. Instead, it checks the data contents of each
+ // item.
//
// Regenerate output IR to catch any bugs that might happen during writing.
- std::unique_ptr<dex_ir::Header> output_header(dex_ir::DexIrBuilder(*output_dex_file));
- std::unique_ptr<dex_ir::Header> orig_header(dex_ir::DexIrBuilder(*dex_file));
+ std::unique_ptr<dex_ir::Header> output_header(
+ dex_ir::DexIrBuilder(*output_dex_file,
+ /*eagerly_assign_offsets*/ true));
+ std::unique_ptr<dex_ir::Header> orig_header(
+ dex_ir::DexIrBuilder(*dex_file,
+ /*eagerly_assign_offsets*/ true));
CHECK(VerifyOutputDexFile(output_header.get(), orig_header.get(), &error_msg)) << error_msg;
}
}
diff --git a/dexlayout/dexlayout.h b/dexlayout/dexlayout.h
index 2e897739cc..8a277b7afe 100644
--- a/dexlayout/dexlayout.h
+++ b/dexlayout/dexlayout.h
@@ -25,6 +25,7 @@
#include <stdint.h>
#include <stdio.h>
+#include <unordered_map>
#include "cdex/compact_dex_level.h"
#include "dex_file_layout.h"
@@ -69,6 +70,13 @@ class Options {
const char* profile_file_name_ = nullptr;
};
+// Hotness info
+class DexLayoutHotnessInfo {
+ public:
+ // Store layout information so that the offset calculation can specify the section sizes.
+ std::unordered_map<dex_ir::CodeItem*, LayoutType> code_item_layout_;
+};
+
class DexLayout {
public:
DexLayout(Options& options,
@@ -86,10 +94,14 @@ class DexLayout {
MemMap* GetAndReleaseMemMap() { return mem_map_.release(); }
- const DexLayoutSections& GetSections() const {
+ DexLayoutSections& GetSections() {
return dex_sections_;
}
+ const DexLayoutHotnessInfo& LayoutHotnessInfo() const {
+ return layout_hotness_info_;
+ }
+
private:
void DumpAnnotationSetItem(dex_ir::AnnotationSetItem* set_item);
void DumpBytecodes(uint32_t idx, const dex_ir::CodeItem* code, uint32_t code_offset);
@@ -120,18 +132,14 @@ class DexLayout {
void DumpSField(uint32_t idx, uint32_t flags, int i, dex_ir::EncodedValue* init);
void DumpDexFile();
- std::vector<dex_ir::ClassData*> LayoutClassDefsAndClassData(const DexFile* dex_file);
- int32_t LayoutCodeItems(const DexFile* dex_file,
- std::vector<dex_ir::ClassData*> new_class_data_order);
- int32_t LayoutStringData(const DexFile* dex_file);
- bool IsNextSectionCodeItemAligned(uint32_t offset);
- template<class T> void FixupSection(std::map<uint32_t, std::unique_ptr<T>>& map, uint32_t diff);
- void FixupSections(uint32_t offset, uint32_t diff);
+ void LayoutClassDefsAndClassData(const DexFile* dex_file);
+ void LayoutCodeItems(const DexFile* dex_file);
+ void LayoutStringData(const DexFile* dex_file);
// Creates a new layout for the dex file based on profile info.
// Currently reorders ClassDefs, ClassDataItems, and CodeItems.
void LayoutOutputFile(const DexFile* dex_file);
- void OutputDexFile(const DexFile* dex_file);
+ void OutputDexFile(const DexFile* dex_file, bool compute_offsets);
void DumpCFG(const DexFile* dex_file, int idx);
void DumpCFG(const DexFile* dex_file, uint32_t dex_method_idx, const DexFile::CodeItem* code);
@@ -142,6 +150,8 @@ class DexLayout {
dex_ir::Header* header_;
std::unique_ptr<MemMap> mem_map_;
DexLayoutSections dex_sections_;
+ // Layout hotness information is only calculated when dexlayout is enabled.
+ DexLayoutHotnessInfo layout_hotness_info_;
DISALLOW_COPY_AND_ASSIGN(DexLayout);
};
diff --git a/dexlayout/dexlayout_test.cc b/dexlayout/dexlayout_test.cc
index c4f7accc8c..f994fd6533 100644
--- a/dexlayout/dexlayout_test.cc
+++ b/dexlayout/dexlayout_test.cc
@@ -732,4 +732,46 @@ TEST_F(DexLayoutTest, CodeItemOverrun) {
dexlayout_args));
}
+// Test that link data is written out (or at least the header is updated).
+TEST_F(DexLayoutTest, LinkData) {
+ TEST_DISABLED_FOR_TARGET();
+ ScratchFile temp_dex;
+ size_t file_size = 0;
+ MutateDexFile(temp_dex.GetFile(), GetTestDexFileName("ManyMethods"), [&] (DexFile* dex) {
+ DexFile::Header& header = const_cast<DexFile::Header&>(dex->GetHeader());
+ header.link_off_ = header.file_size_;
+ header.link_size_ = 16 * KB;
+ header.file_size_ += header.link_size_;
+ file_size = header.file_size_;
+ });
+ TEMP_FAILURE_RETRY(temp_dex.GetFile()->SetLength(file_size));
+
+ std::string error_msg;
+
+ ScratchFile tmp_file;
+ const std::string& tmp_name = tmp_file.GetFilename();
+ size_t tmp_last_slash = tmp_name.rfind('/');
+ std::string tmp_dir = tmp_name.substr(0, tmp_last_slash + 1);
+ ScratchFile profile_file;
+
+ std::vector<std::string> dexlayout_args =
+ { "-i",
+ "-v",
+ "-w", tmp_dir,
+ "-o", tmp_name,
+ "-p", profile_file.GetFilename(),
+ temp_dex.GetFilename()
+ };
+ // -v makes sure that the layout did not corrupt the dex file.
+ ASSERT_TRUE(DexLayoutExec(&temp_dex,
+ /*dex_filename*/ nullptr,
+ &profile_file,
+ dexlayout_args));
+
+ std::string output_dex = temp_dex.GetFilename() + ".new";
+ std::vector<std::string> rm_exec_argv =
+ { "/bin/rm", output_dex };
+ ASSERT_TRUE(::art::Exec(rm_exec_argv, &error_msg));
+}
+
} // namespace art
diff --git a/openjdkjvmti/art_jvmti.h b/openjdkjvmti/art_jvmti.h
index 682b82b5cd..e8e62c2b40 100644
--- a/openjdkjvmti/art_jvmti.h
+++ b/openjdkjvmti/art_jvmti.h
@@ -94,6 +94,10 @@ struct ArtJvmTiEnv : public jvmtiEnv {
static ArtJvmTiEnv* AsArtJvmTiEnv(jvmtiEnv* env) {
return art::down_cast<ArtJvmTiEnv*>(env);
}
+
+ // Top level lock. Nothing can be held when we get this except for mutator lock for full
+ // thread-suspension.
+ static art::Mutex *gEnvMutex ACQUIRED_AFTER(art::Locks::mutator_lock_);
};
// Macro and constexpr to make error values less annoying to write.
diff --git a/openjdkjvmti/events-inl.h b/openjdkjvmti/events-inl.h
index 5344e0fbde..007669b50f 100644
--- a/openjdkjvmti/events-inl.h
+++ b/openjdkjvmti/events-inl.h
@@ -46,6 +46,45 @@ static inline ArtJvmtiEvent GetArtJvmtiEvent(ArtJvmTiEnv* env, jvmtiEvent e) {
namespace impl {
+// Helper for ensuring that the dispatch environment is sane. Events with JNIEnvs need to stash
+// pending exceptions since they can cause new ones to be thrown. In accordance with the JVMTI
+// specification we allow exceptions originating from events to overwrite the current exception,
+// including exceptions originating from earlier events.
+class ScopedEventDispatchEnvironment FINAL : public art::ValueObject {
+ public:
+ ScopedEventDispatchEnvironment() : env_(nullptr), throw_(nullptr, nullptr) {
+ DCHECK_EQ(art::Thread::Current()->GetState(), art::ThreadState::kNative);
+ }
+
+ explicit ScopedEventDispatchEnvironment(JNIEnv* env)
+ : env_(env),
+ throw_(env_, env_->ExceptionOccurred()) {
+ DCHECK_EQ(art::Thread::Current()->GetState(), art::ThreadState::kNative);
+ // The spec doesn't say how much local data should be there, so we just give 128 which seems
+ // likely to be enough for most cases.
+ env_->PushLocalFrame(128);
+ env_->ExceptionClear();
+ }
+
+ ~ScopedEventDispatchEnvironment() {
+ if (env_ != nullptr) {
+ if (throw_.get() != nullptr && !env_->ExceptionCheck()) {
+ // TODO It would be nice to add the overwritten exceptions to the suppressed exceptions list
+ // of the newest exception.
+ env_->Throw(throw_.get());
+ }
+ env_->PopLocalFrame(nullptr);
+ }
+ DCHECK_EQ(art::Thread::Current()->GetState(), art::ThreadState::kNative);
+ }
+
+ private:
+ JNIEnv* env_;
+ ScopedLocalRef<jthrowable> throw_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedEventDispatchEnvironment);
+};
+
// Infrastructure to achieve type safety for event dispatch.
#define FORALL_EVENT_TYPES(fn) \
@@ -97,27 +136,68 @@ FORALL_EVENT_TYPES(EVENT_FN_TYPE)
#undef EVENT_FN_TYPE
-template <ArtJvmtiEvent kEvent>
-ALWAYS_INLINE inline typename EventFnType<kEvent>::type GetCallback(ArtJvmTiEnv* env);
-
-#define GET_CALLBACK(name, enum_name) \
-template <> \
-ALWAYS_INLINE inline EventFnType<enum_name>::type GetCallback<enum_name>( \
- ArtJvmTiEnv* env) { \
- if (env->event_callbacks == nullptr) { \
- return nullptr; \
- } \
- return env->event_callbacks->name; \
-}
+#define MAKE_EVENT_HANDLER_FUNC(name, enum_name) \
+template<> \
+struct EventHandlerFunc<enum_name> { \
+ using EventFnType = typename impl::EventFnType<enum_name>::type; \
+ explicit EventHandlerFunc(ArtJvmTiEnv* env) \
+ : env_(env), \
+ fn_(env_->event_callbacks == nullptr ? nullptr : env_->event_callbacks->name) { } \
+ \
+ template <typename ...Args> \
+ ALWAYS_INLINE \
+ void ExecuteCallback(JNIEnv* jnienv, Args... args) const { \
+ if (fn_ != nullptr) { \
+ ScopedEventDispatchEnvironment sede(jnienv); \
+ DoExecute(jnienv, args...); \
+ } \
+ } \
+ \
+ template <typename ...Args> \
+ ALWAYS_INLINE \
+ void ExecuteCallback(Args... args) const { \
+ if (fn_ != nullptr) { \
+ ScopedEventDispatchEnvironment sede; \
+ DoExecute(args...); \
+ } \
+ } \
+ \
+ private: \
+ template <typename ...Args> \
+ ALWAYS_INLINE \
+ inline void DoExecute(Args... args) const { \
+ static_assert(std::is_same<EventFnType, void(*)(jvmtiEnv*, Args...)>::value, \
+ "Unexpected different type of ExecuteCallback"); \
+ fn_(env_, args...); \
+ } \
+ \
+ public: \
+ ArtJvmTiEnv* env_; \
+ EventFnType fn_; \
+};
-FORALL_EVENT_TYPES(GET_CALLBACK)
+FORALL_EVENT_TYPES(MAKE_EVENT_HANDLER_FUNC)
-#undef GET_CALLBACK
+#undef MAKE_EVENT_HANDLER_FUNC
#undef FORALL_EVENT_TYPES
} // namespace impl
+template <ArtJvmtiEvent kEvent, typename ...Args>
+inline std::vector<impl::EventHandlerFunc<kEvent>> EventHandler::CollectEvents(art::Thread* thread,
+ Args... args) const {
+ art::MutexLock mu(thread, envs_lock_);
+ std::vector<impl::EventHandlerFunc<kEvent>> handlers;
+ for (ArtJvmTiEnv* env : envs) {
+ if (ShouldDispatch<kEvent>(env, thread, args...)) {
+ impl::EventHandlerFunc<kEvent> h(env);
+ handlers.push_back(h);
+ }
+ }
+ return handlers;
+}
+
// C++ does not allow partial template function specialization. The dispatch for our separated
// ClassFileLoadHook event types is the same, so use this helper for code deduplication.
template <ArtJvmtiEvent kEvent>
@@ -131,29 +211,37 @@ inline void EventHandler::DispatchClassFileLoadHookEvent(art::Thread* thread,
const unsigned char* class_data,
jint* new_class_data_len,
unsigned char** new_class_data) const {
+ art::ScopedThreadStateChange stsc(thread, art::ThreadState::kNative);
static_assert(kEvent == ArtJvmtiEvent::kClassFileLoadHookRetransformable ||
kEvent == ArtJvmtiEvent::kClassFileLoadHookNonRetransformable, "Unsupported event");
DCHECK(*new_class_data == nullptr);
jint current_len = class_data_len;
unsigned char* current_class_data = const_cast<unsigned char*>(class_data);
+ std::vector<impl::EventHandlerFunc<kEvent>> handlers =
+ CollectEvents<kEvent>(thread,
+ jnienv,
+ class_being_redefined,
+ loader,
+ name,
+ protection_domain,
+ class_data_len,
+ class_data,
+ new_class_data_len,
+ new_class_data);
ArtJvmTiEnv* last_env = nullptr;
- for (ArtJvmTiEnv* env : envs) {
- if (env == nullptr) {
- continue;
- }
+ for (const impl::EventHandlerFunc<kEvent>& event : handlers) {
jint new_len = 0;
unsigned char* new_data = nullptr;
- DispatchEventOnEnv<kEvent>(env,
- thread,
- jnienv,
- class_being_redefined,
- loader,
- name,
- protection_domain,
- current_len,
- static_cast<const unsigned char*>(current_class_data),
- &new_len,
- &new_data);
+ ExecuteCallback<kEvent>(event,
+ jnienv,
+ class_being_redefined,
+ loader,
+ name,
+ protection_domain,
+ current_len,
+ static_cast<const unsigned char*>(current_class_data),
+ &new_len,
+ &new_data);
if (new_data != nullptr && new_data != current_class_data) {
// Destroy the data the last transformer made. We skip this if the previous state was the
// initial one since we don't know here which jvmtiEnv allocated it.
@@ -162,7 +250,7 @@ inline void EventHandler::DispatchClassFileLoadHookEvent(art::Thread* thread,
if (last_env != nullptr) {
last_env->Deallocate(current_class_data);
}
- last_env = env;
+ last_env = event.env_;
current_class_data = new_data;
current_len = new_len;
}
@@ -177,69 +265,27 @@ inline void EventHandler::DispatchClassFileLoadHookEvent(art::Thread* thread,
// exactly the argument types of the corresponding Jvmti kEvent function pointer.
template <ArtJvmtiEvent kEvent, typename ...Args>
-inline void EventHandler::ExecuteCallback(ArtJvmTiEnv* env, Args... args) {
- using FnType = typename impl::EventFnType<kEvent>::type;
- FnType callback = impl::GetCallback<kEvent>(env);
- if (callback != nullptr) {
- (*callback)(env, args...);
- }
-}
-
-template <ArtJvmtiEvent kEvent, typename ...Args>
inline void EventHandler::DispatchEvent(art::Thread* thread, Args... args) const {
+ art::ScopedThreadStateChange stsc(thread, art::ThreadState::kNative);
static_assert(!std::is_same<JNIEnv*,
typename std::decay_t<
std::tuple_element_t<0, std::tuple<Args..., nullptr_t>>>>::value,
"Should be calling DispatchEvent with explicit JNIEnv* argument!");
DCHECK(thread == nullptr || !thread->IsExceptionPending());
- for (ArtJvmTiEnv* env : envs) {
- if (env != nullptr) {
- DispatchEventOnEnv<kEvent, Args...>(env, thread, args...);
- }
+ std::vector<impl::EventHandlerFunc<kEvent>> events = CollectEvents<kEvent>(thread, args...);
+ for (auto event : events) {
+ ExecuteCallback<kEvent>(event, args...);
}
}
-// Helper for ensuring that the dispatch environment is sane. Events with JNIEnvs need to stash
-// pending exceptions since they can cause new ones to be thrown. In accordance with the JVMTI
-// specification we allow exceptions originating from events to overwrite the current exception,
-// including exceptions originating from earlier events.
-class ScopedEventDispatchEnvironment FINAL : public art::ValueObject {
- public:
- explicit ScopedEventDispatchEnvironment(JNIEnv* env)
- : env_(env),
- thr_(env_, env_->ExceptionOccurred()),
- suspend_(art::Thread::Current(), art::kNative) {
- // The spec doesn't say how much local data should be there, so we just give 128 which seems
- // likely to be enough for most cases.
- env_->PushLocalFrame(128);
- env_->ExceptionClear();
- UNUSED(suspend_);
- }
-
- ~ScopedEventDispatchEnvironment() {
- if (thr_.get() != nullptr && !env_->ExceptionCheck()) {
- // TODO It would be nice to add the overwritten exceptions to the suppressed exceptions list
- // of the newest exception.
- env_->Throw(thr_.get());
- }
- env_->PopLocalFrame(nullptr);
- }
-
- private:
- JNIEnv* env_;
- ScopedLocalRef<jthrowable> thr_;
- // Not actually unused. The destructor/constructor does important work.
- art::ScopedThreadStateChange suspend_;
-
- DISALLOW_COPY_AND_ASSIGN(ScopedEventDispatchEnvironment);
-};
-
template <ArtJvmtiEvent kEvent, typename ...Args>
inline void EventHandler::DispatchEvent(art::Thread* thread, JNIEnv* jnienv, Args... args) const {
- for (ArtJvmTiEnv* env : envs) {
- if (env != nullptr) {
- DispatchEventOnEnv<kEvent, Args...>(env, thread, jnienv, args...);
- }
+ art::ScopedThreadStateChange stsc(thread, art::ThreadState::kNative);
+ std::vector<impl::EventHandlerFunc<kEvent>> events = CollectEvents<kEvent>(thread,
+ jnienv,
+ args...);
+ for (auto event : events) {
+ ExecuteCallback<kEvent>(event, jnienv, args...);
}
}
@@ -248,8 +294,9 @@ inline void EventHandler::DispatchEventOnEnv(
ArtJvmTiEnv* env, art::Thread* thread, JNIEnv* jnienv, Args... args) const {
DCHECK(env != nullptr);
if (ShouldDispatch<kEvent, JNIEnv*, Args...>(env, thread, jnienv, args...)) {
- ScopedEventDispatchEnvironment sede(jnienv);
- ExecuteCallback<kEvent, JNIEnv*, Args...>(env, jnienv, args...);
+ art::ScopedThreadStateChange stsc(thread, art::ThreadState::kNative);
+ impl::EventHandlerFunc<kEvent> func(env);
+ ExecuteCallback<kEvent>(func, jnienv, args...);
}
}
@@ -260,11 +307,26 @@ inline void EventHandler::DispatchEventOnEnv(
typename std::decay_t<
std::tuple_element_t<0, std::tuple<Args..., nullptr_t>>>>::value,
"Should be calling DispatchEventOnEnv with explicit JNIEnv* argument!");
- if (ShouldDispatch<kEvent>(env, thread, args...)) {
- ExecuteCallback<kEvent, Args...>(env, args...);
+ DCHECK(env != nullptr);
+ if (ShouldDispatch<kEvent, Args...>(env, thread, args...)) {
+ art::ScopedThreadStateChange stsc(thread, art::ThreadState::kNative);
+ impl::EventHandlerFunc<kEvent> func(env);
+ ExecuteCallback<kEvent>(func, args...);
}
}
+template <ArtJvmtiEvent kEvent, typename ...Args>
+inline void EventHandler::ExecuteCallback(impl::EventHandlerFunc<kEvent> handler, Args... args) {
+ handler.ExecuteCallback(args...);
+}
+
+template <ArtJvmtiEvent kEvent, typename ...Args>
+inline void EventHandler::ExecuteCallback(impl::EventHandlerFunc<kEvent> handler,
+ JNIEnv* jnienv,
+ Args... args) {
+ handler.ExecuteCallback(jnienv, args...);
+}
+
// Events that need custom logic for if we send the event but are otherwise normal. This includes
// the kBreakpoint, kFramePop, kFieldAccess, and kFieldModification events.
@@ -347,14 +409,13 @@ inline bool EventHandler::ShouldDispatch<ArtJvmtiEvent::kFieldAccess>(
// something.
template <>
inline void EventHandler::ExecuteCallback<ArtJvmtiEvent::kFramePop>(
- ArtJvmTiEnv* env,
+ impl::EventHandlerFunc<ArtJvmtiEvent::kFramePop> event,
JNIEnv* jnienv,
jthread jni_thread,
jmethodID jmethod,
jboolean is_exception,
const art::ShadowFrame* frame ATTRIBUTE_UNUSED) {
- ExecuteCallback<ArtJvmtiEvent::kFramePop>(
- env, jnienv, jni_thread, jmethod, is_exception);
+ ExecuteCallback<ArtJvmtiEvent::kFramePop>(event, jnienv, jni_thread, jmethod, is_exception);
}
// Need to give a custom specialization for NativeMethodBind since it has to deal with an out
@@ -366,20 +427,25 @@ inline void EventHandler::DispatchEvent<ArtJvmtiEvent::kNativeMethodBind>(art::T
jmethodID method,
void* cur_method,
void** new_method) const {
+ art::ScopedThreadStateChange stsc(thread, art::ThreadState::kNative);
+ std::vector<impl::EventHandlerFunc<ArtJvmtiEvent::kNativeMethodBind>> events =
+ CollectEvents<ArtJvmtiEvent::kNativeMethodBind>(thread,
+ jnienv,
+ jni_thread,
+ method,
+ cur_method,
+ new_method);
*new_method = cur_method;
- for (ArtJvmTiEnv* env : envs) {
- if (env != nullptr) {
- *new_method = cur_method;
- DispatchEventOnEnv<ArtJvmtiEvent::kNativeMethodBind>(env,
- thread,
- jnienv,
- jni_thread,
- method,
- cur_method,
- new_method);
- if (*new_method != nullptr) {
- cur_method = *new_method;
- }
+ for (auto event : events) {
+ *new_method = cur_method;
+ ExecuteCallback<ArtJvmtiEvent::kNativeMethodBind>(event,
+ jnienv,
+ jni_thread,
+ method,
+ cur_method,
+ new_method);
+ if (*new_method != nullptr) {
+ cur_method = *new_method;
}
}
*new_method = cur_method;
@@ -439,7 +505,7 @@ inline void EventHandler::DispatchEvent<ArtJvmtiEvent::kClassFileLoadHookNonRetr
}
template <ArtJvmtiEvent kEvent>
-inline bool EventHandler::ShouldDispatchOnThread(ArtJvmTiEnv* env, art::Thread* thread) {
+inline bool EventHandler::ShouldDispatchOnThread(ArtJvmTiEnv* env, art::Thread* thread) const {
bool dispatch = env->event_masks.global_event_mask.Test(kEvent);
if (!dispatch && thread != nullptr && env->event_masks.unioned_thread_event_mask.Test(kEvent)) {
@@ -461,6 +527,11 @@ inline bool EventHandler::ShouldDispatch(ArtJvmTiEnv* env,
}
inline void EventHandler::RecalculateGlobalEventMask(ArtJvmtiEvent event) {
+ art::MutexLock mu(art::Thread::Current(), envs_lock_);
+ RecalculateGlobalEventMaskLocked(event);
+}
+
+inline void EventHandler::RecalculateGlobalEventMaskLocked(ArtJvmtiEvent event) {
bool union_value = false;
for (const ArtJvmTiEnv* stored_env : envs) {
if (stored_env == nullptr) {
diff --git a/openjdkjvmti/events.cc b/openjdkjvmti/events.cc
index d1d606de48..be4ebbc85e 100644
--- a/openjdkjvmti/events.cc
+++ b/openjdkjvmti/events.cc
@@ -193,25 +193,21 @@ void EventMasks::HandleChangedCapabilities(const jvmtiCapabilities& caps, bool c
}
void EventHandler::RegisterArtJvmTiEnv(ArtJvmTiEnv* env) {
- // Since we never shrink this array we might as well try to fill gaps.
- auto it = std::find(envs.begin(), envs.end(), nullptr);
- if (it != envs.end()) {
- *it = env;
- } else {
- envs.push_back(env);
- }
+ art::MutexLock mu(art::Thread::Current(), envs_lock_);
+ envs.push_back(env);
}
void EventHandler::RemoveArtJvmTiEnv(ArtJvmTiEnv* env) {
+ art::MutexLock mu(art::Thread::Current(), envs_lock_);
// Since we might be currently iterating over the envs list we cannot actually erase elements.
// Instead we will simply replace them with 'nullptr' and skip them manually.
auto it = std::find(envs.begin(), envs.end(), env);
if (it != envs.end()) {
- *it = nullptr;
+ envs.erase(it);
for (size_t i = static_cast<size_t>(ArtJvmtiEvent::kMinEventTypeVal);
i <= static_cast<size_t>(ArtJvmtiEvent::kMaxEventTypeVal);
++i) {
- RecalculateGlobalEventMask(static_cast<ArtJvmtiEvent>(i));
+ RecalculateGlobalEventMaskLocked(static_cast<ArtJvmtiEvent>(i));
}
}
}
@@ -431,11 +427,11 @@ class JvmtiGcPauseListener : public art::gc::GcPauseListener {
finish_enabled_(false) {}
void StartPause() OVERRIDE {
- handler_->DispatchEvent<ArtJvmtiEvent::kGarbageCollectionStart>(nullptr);
+ handler_->DispatchEvent<ArtJvmtiEvent::kGarbageCollectionStart>(art::Thread::Current());
}
void EndPause() OVERRIDE {
- handler_->DispatchEvent<ArtJvmtiEvent::kGarbageCollectionFinish>(nullptr);
+ handler_->DispatchEvent<ArtJvmtiEvent::kGarbageCollectionFinish>(art::Thread::Current());
}
bool IsEnabled() {
@@ -1176,7 +1172,8 @@ void EventHandler::Shutdown() {
art::Runtime::Current()->GetInstrumentation()->RemoveListener(method_trace_listener_.get(), ~0);
}
-EventHandler::EventHandler() {
+EventHandler::EventHandler() : envs_lock_("JVMTI Environment List Lock",
+ art::LockLevel::kTopLockLevel) {
alloc_listener_.reset(new JvmtiAllocationListener(this));
ddm_listener_.reset(new JvmtiDdmChunkListener(this));
gc_pause_listener_.reset(new JvmtiGcPauseListener(this));
diff --git a/openjdkjvmti/events.h b/openjdkjvmti/events.h
index a99ed7b212..c73215f07b 100644
--- a/openjdkjvmti/events.h
+++ b/openjdkjvmti/events.h
@@ -158,6 +158,10 @@ struct EventMasks {
void HandleChangedCapabilities(const jvmtiCapabilities& caps, bool caps_added);
};
+namespace impl {
+template <ArtJvmtiEvent kEvent> struct EventHandlerFunc { };
+} // namespace impl
+
// Helper class for event handling.
class EventHandler {
public:
@@ -169,10 +173,10 @@ class EventHandler {
// Register an env. It is assumed that this happens on env creation, that is, no events are
// enabled, yet.
- void RegisterArtJvmTiEnv(ArtJvmTiEnv* env);
+ void RegisterArtJvmTiEnv(ArtJvmTiEnv* env) REQUIRES(!envs_lock_);
// Remove an env.
- void RemoveArtJvmTiEnv(ArtJvmTiEnv* env);
+ void RemoveArtJvmTiEnv(ArtJvmTiEnv* env) REQUIRES(!envs_lock_);
bool IsEventEnabledAnywhere(ArtJvmtiEvent event) const {
if (!EventMask::EventIsInRange(event)) {
@@ -184,13 +188,15 @@ class EventHandler {
jvmtiError SetEvent(ArtJvmTiEnv* env,
art::Thread* thread,
ArtJvmtiEvent event,
- jvmtiEventMode mode);
+ jvmtiEventMode mode)
+ REQUIRES(!envs_lock_);
// Dispatch event to all registered environments. Since this one doesn't have a JNIEnv* it doesn't
// matter if it has the mutator_lock.
template <ArtJvmtiEvent kEvent, typename ...Args>
ALWAYS_INLINE
- inline void DispatchEvent(art::Thread* thread, Args... args) const;
+ inline void DispatchEvent(art::Thread* thread, Args... args) const
+ REQUIRES(!envs_lock_);
// Dispatch event to all registered environments stashing exceptions as needed. This works since
// JNIEnv* is always the second argument if it is passed to an event. Needed since C++ does not
@@ -200,7 +206,8 @@ class EventHandler {
// the event to allocate local references.
template <ArtJvmtiEvent kEvent, typename ...Args>
ALWAYS_INLINE
- inline void DispatchEvent(art::Thread* thread, JNIEnv* jnienv, Args... args) const;
+ inline void DispatchEvent(art::Thread* thread, JNIEnv* jnienv, Args... args) const
+ REQUIRES(!envs_lock_);
// Tell the event handler capabilities were added/lost so it can adjust the sent events.If
// caps_added is true then caps is all the newly set capabilities of the jvmtiEnv. If it is false
@@ -208,30 +215,50 @@ class EventHandler {
ALWAYS_INLINE
inline void HandleChangedCapabilities(ArtJvmTiEnv* env,
const jvmtiCapabilities& caps,
- bool added);
+ bool added)
+ REQUIRES(!envs_lock_);
// Dispatch event to the given environment, only.
template <ArtJvmtiEvent kEvent, typename ...Args>
ALWAYS_INLINE
- inline void DispatchEventOnEnv(
- ArtJvmTiEnv* env, art::Thread* thread, JNIEnv* jnienv, Args... args) const;
+ inline void DispatchEventOnEnv(ArtJvmTiEnv* env,
+ art::Thread* thread,
+ JNIEnv* jnienv,
+ Args... args) const
+ REQUIRES(!envs_lock_);
// Dispatch event to the given environment, only.
template <ArtJvmtiEvent kEvent, typename ...Args>
ALWAYS_INLINE
- inline void DispatchEventOnEnv(ArtJvmTiEnv* env, art::Thread* thread, Args... args) const;
+ inline void DispatchEventOnEnv(ArtJvmTiEnv* env, art::Thread* thread, Args... args) const
+ REQUIRES(!envs_lock_);
private:
+ template <ArtJvmtiEvent kEvent, typename ...Args>
+ ALWAYS_INLINE
+ inline std::vector<impl::EventHandlerFunc<kEvent>> CollectEvents(art::Thread* thread,
+ Args... args) const
+ REQUIRES(!envs_lock_);
+
template <ArtJvmtiEvent kEvent>
ALWAYS_INLINE
- static inline bool ShouldDispatchOnThread(ArtJvmTiEnv* env, art::Thread* thread);
+ inline bool ShouldDispatchOnThread(ArtJvmTiEnv* env, art::Thread* thread) const;
template <ArtJvmtiEvent kEvent, typename ...Args>
ALWAYS_INLINE
- static inline void ExecuteCallback(ArtJvmTiEnv* env, Args... args);
+ static inline void ExecuteCallback(impl::EventHandlerFunc<kEvent> handler,
+ JNIEnv* env,
+ Args... args)
+ REQUIRES(!envs_lock_);
template <ArtJvmtiEvent kEvent, typename ...Args>
ALWAYS_INLINE
+ static inline void ExecuteCallback(impl::EventHandlerFunc<kEvent> handler, Args... args)
+ REQUIRES(!envs_lock_);
+
+ // Public for use to collect dispatches
+ template <ArtJvmtiEvent kEvent, typename ...Args>
+ ALWAYS_INLINE
inline bool ShouldDispatch(ArtJvmTiEnv* env, art::Thread* thread, Args... args) const;
ALWAYS_INLINE
@@ -241,7 +268,9 @@ class EventHandler {
// Recalculates the event mask for the given event.
ALWAYS_INLINE
- inline void RecalculateGlobalEventMask(ArtJvmtiEvent event);
+ inline void RecalculateGlobalEventMask(ArtJvmtiEvent event) REQUIRES(!envs_lock_);
+ ALWAYS_INLINE
+ inline void RecalculateGlobalEventMaskLocked(ArtJvmtiEvent event) REQUIRES(envs_lock_);
template <ArtJvmtiEvent kEvent>
ALWAYS_INLINE inline void DispatchClassFileLoadHookEvent(art::Thread* thread,
@@ -253,7 +282,8 @@ class EventHandler {
jint class_data_len,
const unsigned char* class_data,
jint* new_class_data_len,
- unsigned char** new_class_data) const;
+ unsigned char** new_class_data) const
+ REQUIRES(!envs_lock_);
void HandleEventType(ArtJvmtiEvent event, bool enable);
void HandleLocalAccessCapabilityAdded();
@@ -261,10 +291,13 @@ class EventHandler {
bool OtherMonitorEventsEnabledAnywhere(ArtJvmtiEvent event);
- // List of all JvmTiEnv objects that have been created, in their creation order.
- // NB Some elements might be null representing envs that have been deleted. They should be skipped
- // anytime this list is used.
- std::vector<ArtJvmTiEnv*> envs;
+ // List of all JvmTiEnv objects that have been created, in their creation order. It is a std::list
+ // since we mostly access it by iterating over the entire thing, only ever append to the end, and
+ // need to be able to remove arbitrary elements from it.
+ std::list<ArtJvmTiEnv*> envs GUARDED_BY(envs_lock_);
+
+ // Top level lock. Nothing at all should be held when we lock this.
+ mutable art::Mutex envs_lock_ ACQUIRED_BEFORE(art::Locks::instrument_entrypoints_lock_);
// A union of all enabled events, anywhere.
EventMask global_mask;
diff --git a/openjdkjvmti/object_tagging.cc b/openjdkjvmti/object_tagging.cc
index 6ba7165577..ba242ef1e8 100644
--- a/openjdkjvmti/object_tagging.cc
+++ b/openjdkjvmti/object_tagging.cc
@@ -61,7 +61,8 @@ bool ObjectTagTable::DoesHandleNullOnSweep() {
return event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kObjectFree);
}
void ObjectTagTable::HandleNullSweep(jlong tag) {
- event_handler_->DispatchEventOnEnv<ArtJvmtiEvent::kObjectFree>(jvmti_env_, nullptr, tag);
+ event_handler_->DispatchEventOnEnv<ArtJvmtiEvent::kObjectFree>(
+ jvmti_env_, art::Thread::Current(), tag);
}
} // namespace openjdkjvmti
diff --git a/openjdkjvmti/ti_dump.cc b/openjdkjvmti/ti_dump.cc
index 809a5e47bb..253580e0e1 100644
--- a/openjdkjvmti/ti_dump.cc
+++ b/openjdkjvmti/ti_dump.cc
@@ -47,7 +47,7 @@ struct DumpCallback : public art::RuntimeSigQuitCallback {
void SigQuit() OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
art::Thread* thread = art::Thread::Current();
art::ScopedThreadSuspension sts(thread, art::ThreadState::kNative);
- event_handler->DispatchEvent<ArtJvmtiEvent::kDataDumpRequest>(nullptr);
+ event_handler->DispatchEvent<ArtJvmtiEvent::kDataDumpRequest>(art::Thread::Current());
}
EventHandler* event_handler = nullptr;
diff --git a/openjdkjvmti/ti_phase.cc b/openjdkjvmti/ti_phase.cc
index 23df27fbda..7157974c13 100644
--- a/openjdkjvmti/ti_phase.cc
+++ b/openjdkjvmti/ti_phase.cc
@@ -57,6 +57,7 @@ struct PhaseUtil::PhaseCallback : public art::RuntimePhaseCallback {
}
void NextRuntimePhase(RuntimePhase phase) REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+ art::Thread* self = art::Thread::Current();
switch (phase) {
case RuntimePhase::kInitialAgents:
PhaseUtil::current_phase_ = JVMTI_PHASE_PRIMORDIAL;
@@ -64,8 +65,7 @@ struct PhaseUtil::PhaseCallback : public art::RuntimePhaseCallback {
case RuntimePhase::kStart:
{
PhaseUtil::current_phase_ = JVMTI_PHASE_START;
- art::ScopedThreadSuspension sts(art::Thread::Current(), art::ThreadState::kNative);
- event_handler->DispatchEvent<ArtJvmtiEvent::kVmStart>(nullptr, GetJniEnv());
+ event_handler->DispatchEvent<ArtJvmtiEvent::kVmStart>(self, GetJniEnv());
}
break;
case RuntimePhase::kInit:
@@ -74,9 +74,7 @@ struct PhaseUtil::PhaseCallback : public art::RuntimePhaseCallback {
PhaseUtil::current_phase_ = JVMTI_PHASE_LIVE;
{
ScopedLocalRef<jthread> thread(GetJniEnv(), GetCurrentJThread());
- art::ScopedThreadSuspension sts(art::Thread::Current(), art::ThreadState::kNative);
- event_handler->DispatchEvent<ArtJvmtiEvent::kVmInit>(
- nullptr, GetJniEnv(), thread.get());
+ event_handler->DispatchEvent<ArtJvmtiEvent::kVmInit>(self, GetJniEnv(), thread.get());
}
// We need to have these events be ordered to match behavior expected by some real-world
// agents. The spec does not really require this but compatibility is a useful property to
@@ -86,8 +84,7 @@ struct PhaseUtil::PhaseCallback : public art::RuntimePhaseCallback {
break;
case RuntimePhase::kDeath:
{
- art::ScopedThreadSuspension sts(art::Thread::Current(), art::ThreadState::kNative);
- event_handler->DispatchEvent<ArtJvmtiEvent::kVmDeath>(nullptr, GetJniEnv());
+ event_handler->DispatchEvent<ArtJvmtiEvent::kVmDeath>(self, GetJniEnv());
PhaseUtil::current_phase_ = JVMTI_PHASE_DEAD;
}
// TODO: Block events now.
diff --git a/runtime/base/mutex-inl.h b/runtime/base/mutex-inl.h
index c9d48ff7f7..587b092ab7 100644
--- a/runtime/base/mutex-inl.h
+++ b/runtime/base/mutex-inl.h
@@ -80,7 +80,9 @@ static inline void CheckUnattachedThread(LockLevel level) NO_THREAD_SAFETY_ANALY
// (see Thread::TransitionFromSuspendedToRunnable).
level == kThreadSuspendCountLock ||
// Avoid recursive death.
- level == kAbortLock) << level;
+ level == kAbortLock ||
+ // Locks at the absolute top of the stack can be locked at any time.
+ level == kTopLockLevel) << level;
}
}
@@ -92,10 +94,34 @@ inline void BaseMutex::RegisterAsLocked(Thread* self) {
if (kDebugLocking) {
// Check if a bad Mutex of this level or lower is held.
bool bad_mutexes_held = false;
+ // Specifically allow a kTopLockLevel lock to be gained when the current thread holds the
+ // mutator_lock_ exclusive. This is because we suspending when holding locks at this level is
+ // not allowed and if we hold the mutator_lock_ exclusive we must unsuspend stuff eventually
+ // so there are no deadlocks.
+ if (level_ == kTopLockLevel &&
+ Locks::mutator_lock_->IsSharedHeld(self) &&
+ !Locks::mutator_lock_->IsExclusiveHeld(self)) {
+ LOG(ERROR) << "Lock level violation: holding \"" << Locks::mutator_lock_->name_ << "\" "
+ << "(level " << kMutatorLock << " - " << static_cast<int>(kMutatorLock)
+ << ") non-exclusive while locking \"" << name_ << "\" "
+ << "(level " << level_ << " - " << static_cast<int>(level_) << ") a top level"
+ << "mutex. This is not allowed.";
+ bad_mutexes_held = true;
+ } else if (this == Locks::mutator_lock_ && self->GetHeldMutex(kTopLockLevel) != nullptr) {
+ LOG(ERROR) << "Lock level violation. Locking mutator_lock_ while already having a "
+ << "kTopLevelLock (" << self->GetHeldMutex(kTopLockLevel)->name_ << "held is "
+ << "not allowed.";
+ bad_mutexes_held = true;
+ }
for (int i = level_; i >= 0; --i) {
LockLevel lock_level_i = static_cast<LockLevel>(i);
BaseMutex* held_mutex = self->GetHeldMutex(lock_level_i);
- if (UNLIKELY(held_mutex != nullptr) && lock_level_i != kAbortLock) {
+ if (level_ == kTopLockLevel &&
+ lock_level_i == kMutatorLock &&
+ Locks::mutator_lock_->IsExclusiveHeld(self)) {
+ // This is checked above.
+ continue;
+ } else if (UNLIKELY(held_mutex != nullptr) && lock_level_i != kAbortLock) {
LOG(ERROR) << "Lock level violation: holding \"" << held_mutex->name_ << "\" "
<< "(level " << lock_level_i << " - " << i
<< ") while locking \"" << name_ << "\" "
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 87c4afe96f..c0cf4872de 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -122,6 +122,16 @@ enum LockLevel {
kInstrumentEntrypointsLock,
kZygoteCreationLock,
+ // The highest valid lock level. Use this if there is code that should only be called with no
+ // other locks held. Since this is the highest lock level we also allow it to be held even if the
+ // runtime or current thread is not fully set-up yet (for example during thread attach). Note that
+ // this lock also has special behavior around the mutator_lock_. Since the mutator_lock_ is not
+ // really a 'real' lock we allow this to be locked when the mutator_lock_ is held exclusive.
+ // Furthermore, the mutator_lock_ may not be acquired in any form when a lock of this level is
+ // held. Since the mutator_lock_ being held strong means that all other threads are suspended this
+ // will prevent deadlocks while still allowing this lock level to function as a "highest" level.
+ kTopLockLevel,
+
kLockLevelCount // Must come last.
};
std::ostream& operator<<(std::ostream& os, const LockLevel& rhs);
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 613e4fe7c7..3784212ef0 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -345,7 +345,14 @@ Dbg::DbgThreadLifecycleCallback Dbg::thread_lifecycle_callback_;
Dbg::DbgClassLoadCallback Dbg::class_load_callback_;
void DebuggerDdmCallback::DdmPublishChunk(uint32_t type, const ArrayRef<const uint8_t>& data) {
- Dbg::DdmSendChunk(type, data);
+ if (gJdwpState == nullptr) {
+ VLOG(jdwp) << "Debugger thread not active, ignoring DDM send: " << type;
+ } else {
+ iovec vec[1];
+ vec[0].iov_base = reinterpret_cast<void*>(const_cast<uint8_t*>(data.data()));
+ vec[0].iov_len = data.size();
+ gJdwpState->DdmSendChunkV(type, vec, 1);
+ }
}
bool DebuggerActiveMethodInspectionCallback::IsMethodBeingInspected(ArtMethod* m ATTRIBUTE_UNUSED) {
@@ -4458,10 +4465,11 @@ void Dbg::DdmSendThreadNotification(Thread* t, uint32_t type) {
return;
}
+ RuntimeCallbacks* cb = Runtime::Current()->GetRuntimeCallbacks();
if (type == CHUNK_TYPE("THDE")) {
uint8_t buf[4];
JDWP::Set4BE(&buf[0], t->GetThreadId());
- Dbg::DdmSendChunk(CHUNK_TYPE("THDE"), 4, buf);
+ cb->DdmPublishChunk(CHUNK_TYPE("THDE"), ArrayRef<const uint8_t>(buf));
} else {
CHECK(type == CHUNK_TYPE("THCR") || type == CHUNK_TYPE("THNM")) << type;
ScopedObjectAccessUnchecked soa(Thread::Current());
@@ -4480,7 +4488,7 @@ void Dbg::DdmSendThreadNotification(Thread* t, uint32_t type) {
JDWP::AppendUtf16BE(bytes, chars, char_count);
}
CHECK_EQ(bytes.size(), char_count*2 + sizeof(uint32_t)*2);
- Dbg::DdmSendChunk(type, bytes);
+ cb->DdmPublishChunk(type, ArrayRef<const uint8_t>(bytes));
}
}
@@ -4523,30 +4531,6 @@ void Dbg::PostThreadDeath(Thread* t) {
Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THDE"));
}
-void Dbg::DdmSendChunk(uint32_t type, const ArrayRef<const uint8_t>& data) {
- DdmSendChunk(type, data.size(), data.data());
-}
-
-void Dbg::DdmSendChunk(uint32_t type, size_t byte_count, const uint8_t* buf) {
- CHECK(buf != nullptr);
- iovec vec[1];
- vec[0].iov_base = reinterpret_cast<void*>(const_cast<uint8_t*>(buf));
- vec[0].iov_len = byte_count;
- Dbg::DdmSendChunkV(type, vec, 1);
-}
-
-void Dbg::DdmSendChunk(uint32_t type, const std::vector<uint8_t>& bytes) {
- DdmSendChunk(type, bytes.size(), &bytes[0]);
-}
-
-void Dbg::DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count) {
- if (gJdwpState == nullptr) {
- VLOG(jdwp) << "Debugger thread not active, ignoring DDM send: " << type;
- } else {
- gJdwpState->DdmSendChunkV(type, iov, iov_count);
- }
-}
-
JDWP::JdwpState* Dbg::GetJdwpState() {
return gJdwpState;
}
@@ -4624,7 +4608,8 @@ void Dbg::DdmSendHeapInfo(HpifWhen reason) {
JDWP::Append4BE(bytes, heap->GetBytesAllocated());
JDWP::Append4BE(bytes, heap->GetObjectsAllocated());
CHECK_EQ(bytes.size(), 4U + (heap_count * (4 + 8 + 1 + 4 + 4 + 4 + 4)));
- Dbg::DdmSendChunk(CHUNK_TYPE("HPIF"), bytes);
+ Runtime::Current()->GetRuntimeCallbacks()->DdmPublishChunk(CHUNK_TYPE("HPIF"),
+ ArrayRef<const uint8_t>(bytes));
}
enum HpsgSolidity {
@@ -4710,7 +4695,8 @@ class HeapChunkContext {
CHECK_LE(pieceLenField_, p_);
JDWP::Set4BE(pieceLenField_, totalAllocationUnits_);
- Dbg::DdmSendChunk(type_, p_ - &buf_[0], &buf_[0]);
+ ArrayRef<const uint8_t> out(&buf_[0], p_ - &buf_[0]);
+ Runtime::Current()->GetRuntimeCallbacks()->DdmPublishChunk(type_, out);
Reset();
}
@@ -4892,6 +4878,7 @@ void Dbg::DdmSendHeapSegments(bool native) {
if (when == HPSG_WHEN_NEVER) {
return;
}
+ RuntimeCallbacks* cb = Runtime::Current()->GetRuntimeCallbacks();
// Figure out what kind of chunks we'll be sending.
CHECK(what == HPSG_WHAT_MERGED_OBJECTS || what == HPSG_WHAT_DISTINCT_OBJECTS)
<< static_cast<int>(what);
@@ -4899,7 +4886,8 @@ void Dbg::DdmSendHeapSegments(bool native) {
// First, send a heap start chunk.
uint8_t heap_id[4];
JDWP::Set4BE(&heap_id[0], 1); // Heap id (bogus; we only have one heap).
- Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHST") : CHUNK_TYPE("HPST"), sizeof(heap_id), heap_id);
+ cb->DdmPublishChunk(native ? CHUNK_TYPE("NHST") : CHUNK_TYPE("HPST"),
+ ArrayRef<const uint8_t>(heap_id));
Thread* self = Thread::Current();
Locks::mutator_lock_->AssertSharedHeld(self);
@@ -4958,7 +4946,8 @@ void Dbg::DdmSendHeapSegments(bool native) {
}
// Finally, send a heap end chunk.
- Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHEN") : CHUNK_TYPE("HPEN"), sizeof(heap_id), heap_id);
+ cb->DdmPublishChunk(native ? CHUNK_TYPE("NHEN") : CHUNK_TYPE("HPEN"),
+ ArrayRef<const uint8_t>(heap_id));
}
void Dbg::SetAllocTrackingEnabled(bool enable) {
diff --git a/runtime/debugger.h b/runtime/debugger.h
index c3184e8374..d5bad8dc67 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -662,14 +662,6 @@ class Dbg {
static bool DdmHandlePacket(JDWP::Request* request, uint8_t** pReplyBuf, int* pReplyLen);
static void DdmConnected() REQUIRES_SHARED(Locks::mutator_lock_);
static void DdmDisconnected() REQUIRES_SHARED(Locks::mutator_lock_);
- static void DdmSendChunk(uint32_t type, const ArrayRef<const uint8_t>& bytes)
- REQUIRES_SHARED(Locks::mutator_lock_);
- static void DdmSendChunk(uint32_t type, const std::vector<uint8_t>& bytes)
- REQUIRES_SHARED(Locks::mutator_lock_);
- static void DdmSendChunk(uint32_t type, size_t len, const uint8_t* buf)
- REQUIRES_SHARED(Locks::mutator_lock_);
- static void DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count)
- REQUIRES_SHARED(Locks::mutator_lock_);
// Visit breakpoint roots, used to prevent unloading of methods with breakpoints.
static void VisitRoots(RootVisitor* visitor)
diff --git a/runtime/dex_file_layout.cc b/runtime/dex_file_layout.cc
index c3fae15b14..1973440d55 100644
--- a/runtime/dex_file_layout.cc
+++ b/runtime/dex_file_layout.cc
@@ -26,10 +26,10 @@ namespace art {
void DexLayoutSection::Subsection::Madvise(const DexFile* dex_file, int advice) const {
DCHECK(dex_file != nullptr);
- DCHECK_LE(size_, dex_file->Size());
- DCHECK_LE(offset_ + size_, dex_file->Size());
- MadviseLargestPageAlignedRegion(dex_file->Begin() + offset_,
- dex_file->Begin() + offset_ + size_,
+ DCHECK_LT(start_offset_, dex_file->Size());
+ DCHECK_LE(end_offset_, dex_file->Size());
+ MadviseLargestPageAlignedRegion(dex_file->Begin() + start_offset_,
+ dex_file->Begin() + end_offset_,
advice);
}
@@ -69,7 +69,7 @@ std::ostream& operator<<(std::ostream& os, const DexLayoutSection& section) {
for (size_t i = 0; i < static_cast<size_t>(LayoutType::kLayoutTypeCount); ++i) {
const DexLayoutSection::Subsection& part = section.parts_[i];
os << static_cast<LayoutType>(i) << "("
- << part.offset_ << "-" << part.offset_ + part.size_ << ") ";
+ << part.start_offset_ << "-" << part.end_offset_ << ") ";
}
return os;
}
diff --git a/runtime/dex_file_layout.h b/runtime/dex_file_layout.h
index 40cc91232e..4c960c3ff5 100644
--- a/runtime/dex_file_layout.h
+++ b/runtime/dex_file_layout.h
@@ -17,22 +17,25 @@
#ifndef ART_RUNTIME_DEX_FILE_LAYOUT_H_
#define ART_RUNTIME_DEX_FILE_LAYOUT_H_
+#include <algorithm>
#include <cstdint>
#include <iosfwd>
+#include "base/logging.h"
+
namespace art {
class DexFile;
enum class LayoutType : uint8_t {
+ // Layout of things that are hot (commonly accessed), these should be pinned or madvised will
+ // need.
+ kLayoutTypeHot,
// Layout of things that are randomly used. These should be advised to random access.
// Without layout, this is the default mode when loading a dex file.
kLayoutTypeSometimesUsed,
// Layout of things that are only used during startup, these can be madvised after launch.
kLayoutTypeStartupOnly,
- // Layout of things that are hot (commonly accessed), these should be pinned or madvised will
- // need.
- kLayoutTypeHot,
// Layout of things that are needed probably only once (class initializers). These can be
// madvised during trim events.
kLayoutTypeUsedOnce,
@@ -44,6 +47,11 @@ enum class LayoutType : uint8_t {
};
std::ostream& operator<<(std::ostream& os, const LayoutType& collector_type);
+// Return the "best" layout option if the same item has multiple different layouts.
+static inline LayoutType MergeLayoutType(LayoutType a, LayoutType b) {
+ return std::min(a, b);
+}
+
enum class MadviseState : uint8_t {
// Madvise based on a file that was just loaded.
kMadviseStateAtLoad,
@@ -55,15 +63,35 @@ enum class MadviseState : uint8_t {
std::ostream& operator<<(std::ostream& os, const MadviseState& collector_type);
// A dex layout section such as code items or strings. Each section is composed of subsections
-// that are layed out ajacently to each other such as (hot, unused, startup, etc...).
+// that are laid out adjacently to each other such as (hot, unused, startup, etc...).
class DexLayoutSection {
public:
// A subsection is a a continuous range of dex file that is all part of the same layout hint.
class Subsection {
public:
// Use uint32_t to handle 32/64 bit cross compilation.
- uint32_t offset_ = 0u;
- uint32_t size_ = 0u;
+ uint32_t start_offset_ = 0u;
+ uint32_t end_offset_ = 0u;
+
+ bool Contains(uint32_t offset) const {
+ return start_offset_ <= offset && offset < end_offset_;
+ }
+
+ bool Size() const {
+ DCHECK_LE(start_offset_, end_offset_);
+ return end_offset_ - start_offset_;
+ }
+
+ void CombineSection(uint32_t start_offset, uint32_t end_offset) {
+ DCHECK_LT(start_offset, end_offset);
+ if (start_offset_ == end_offset_) {
+ start_offset_ = start_offset;
+ end_offset_ = end_offset;
+ } else {
+ start_offset_ = std::min(start_offset_, start_offset);
+ end_offset_ = std::max(end_offset_, end_offset);
+ }
+ }
void Madvise(const DexFile* dex_file, int advice) const;
};
diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc
index 025952f7a9..edf5650df1 100644
--- a/runtime/dex_file_verifier.cc
+++ b/runtime/dex_file_verifier.cc
@@ -471,7 +471,9 @@ bool DexFileVerifier::CheckMap() {
if (IsDataSectionType(item_type)) {
uint32_t icount = item->size_;
if (UNLIKELY(icount > data_items_left)) {
- ErrorStringPrintf("Too many items in data section: %ud", data_item_count + icount);
+ ErrorStringPrintf("Too many items in data section: %ud item_type %zx",
+ data_item_count + icount,
+ static_cast<size_t>(item_type));
return false;
}
data_items_left -= icount;
diff --git a/runtime/ti/agent.cc b/runtime/ti/agent.cc
index 3bf169ad40..548752e980 100644
--- a/runtime/ti/agent.cc
+++ b/runtime/ti/agent.cc
@@ -21,6 +21,8 @@
#include "base/strlcpy.h"
#include "java_vm_ext.h"
#include "runtime.h"
+#include "thread-current-inl.h"
+#include "scoped_thread_state_change-inl.h"
namespace art {
namespace ti {
@@ -35,6 +37,7 @@ const char* AGENT_ON_UNLOAD_FUNCTION_NAME = "Agent_OnUnload";
Agent::LoadError Agent::DoLoadHelper(bool attaching,
/*out*/jint* call_res,
/*out*/std::string* error_msg) {
+ ScopedThreadStateChange stsc(Thread::Current(), ThreadState::kNative);
DCHECK(call_res != nullptr);
DCHECK(error_msg != nullptr);
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 4b5a7610a3..a113ab5cc8 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -413,42 +413,40 @@ void Trace::StopTracing(bool finish_tracing, bool flush_file) {
sampling_pthread_ = 0U;
}
- {
+ if (the_trace != nullptr) {
+ stop_alloc_counting = (the_trace->flags_ & Trace::kTraceCountAllocs) != 0;
+ if (finish_tracing) {
+ the_trace->FinishTracing();
+ }
gc::ScopedGCCriticalSection gcs(self,
gc::kGcCauseInstrumentation,
gc::kCollectorTypeInstrumentation);
ScopedSuspendAll ssa(__FUNCTION__);
- if (the_trace != nullptr) {
- stop_alloc_counting = (the_trace->flags_ & Trace::kTraceCountAllocs) != 0;
- if (finish_tracing) {
- the_trace->FinishTracing();
- }
- if (the_trace->trace_mode_ == TraceMode::kSampling) {
- MutexLock mu(self, *Locks::thread_list_lock_);
- runtime->GetThreadList()->ForEach(ClearThreadStackTraceAndClockBase, nullptr);
+ if (the_trace->trace_mode_ == TraceMode::kSampling) {
+ MutexLock mu(self, *Locks::thread_list_lock_);
+ runtime->GetThreadList()->ForEach(ClearThreadStackTraceAndClockBase, nullptr);
+ } else {
+ runtime->GetInstrumentation()->DisableMethodTracing(kTracerInstrumentationKey);
+ runtime->GetInstrumentation()->RemoveListener(
+ the_trace, instrumentation::Instrumentation::kMethodEntered |
+ instrumentation::Instrumentation::kMethodExited |
+ instrumentation::Instrumentation::kMethodUnwind);
+ }
+ if (the_trace->trace_file_.get() != nullptr) {
+ // Do not try to erase, so flush and close explicitly.
+ if (flush_file) {
+ if (the_trace->trace_file_->Flush() != 0) {
+ PLOG(WARNING) << "Could not flush trace file.";
+ }
} else {
- runtime->GetInstrumentation()->DisableMethodTracing(kTracerInstrumentationKey);
- runtime->GetInstrumentation()->RemoveListener(
- the_trace, instrumentation::Instrumentation::kMethodEntered |
- instrumentation::Instrumentation::kMethodExited |
- instrumentation::Instrumentation::kMethodUnwind);
+ the_trace->trace_file_->MarkUnchecked(); // Do not trigger guard.
}
- if (the_trace->trace_file_.get() != nullptr) {
- // Do not try to erase, so flush and close explicitly.
- if (flush_file) {
- if (the_trace->trace_file_->Flush() != 0) {
- PLOG(WARNING) << "Could not flush trace file.";
- }
- } else {
- the_trace->trace_file_->MarkUnchecked(); // Do not trigger guard.
- }
- if (the_trace->trace_file_->Close() != 0) {
- PLOG(ERROR) << "Could not close trace file.";
- }
+ if (the_trace->trace_file_->Close() != 0) {
+ PLOG(ERROR) << "Could not close trace file.";
}
- delete the_trace;
}
+ delete the_trace;
}
if (stop_alloc_counting) {
// Can be racy since SetStatsEnabled is not guarded by any locks.
@@ -717,12 +715,12 @@ void Trace::FinishTracing() {
FlushBuf();
} else {
if (trace_file_.get() == nullptr) {
- iovec iov[2];
- iov[0].iov_base = reinterpret_cast<void*>(const_cast<char*>(header.c_str()));
- iov[0].iov_len = header.length();
- iov[1].iov_base = buf_.get();
- iov[1].iov_len = final_offset;
- Dbg::DdmSendChunkV(CHUNK_TYPE("MPSE"), iov, 2);
+ std::vector<uint8_t> data;
+ data.resize(header.length() + final_offset);
+ memcpy(data.data(), header.c_str(), header.length());
+ memcpy(data.data() + header.length(), buf_.get(), final_offset);
+ Runtime::Current()->GetRuntimeCallbacks()->DdmPublishChunk(CHUNK_TYPE("MPSE"),
+ ArrayRef<const uint8_t>(data));
const bool kDumpTraceInfo = false;
if (kDumpTraceInfo) {
LOG(INFO) << "Trace sent:\n" << header;
diff --git a/test/1940-ddms-ext/expected.txt b/test/1940-ddms-ext/expected.txt
index cf4ad50e90..62d3b7bd4c 100644
--- a/test/1940-ddms-ext/expected.txt
+++ b/test/1940-ddms-ext/expected.txt
@@ -5,3 +5,6 @@ MyDdmHandler: Chunk returned: Chunk(Type: 0xFADE7357, Len: 8, data: [0, 0, 0, 0,
JVMTI returned chunk: Chunk(Type: 0xFADE7357, Len: 8, data: [0, 0, 0, 0, 0, -128, 0, 37])
Sending chunk: Chunk(Type: 0xDEADBEEF, Len: 8, data: [9, 10, 11, 12, 13, 14, 15, 16])
Chunk published: Chunk(Type: 0xDEADBEEF, Len: 8, data: [9, 10, 11, 12, 13, 14, 15, 16])
+Saw expected thread events.
+Expected chunk type published: 1213221190
+Expected chunk type published: 1297109829
diff --git a/test/1940-ddms-ext/src-art/art/Test1940.java b/test/1940-ddms-ext/src-art/art/Test1940.java
index f0ee7102a9..9f79eaebba 100644
--- a/test/1940-ddms-ext/src-art/art/Test1940.java
+++ b/test/1940-ddms-ext/src-art/art/Test1940.java
@@ -17,6 +17,7 @@
package art;
import org.apache.harmony.dalvik.ddmc.*;
+import dalvik.system.VMDebug;
import java.lang.reflect.Method;
import java.util.Arrays;
@@ -30,6 +31,12 @@ public class Test1940 {
public static final int MY_DDMS_TYPE = 0xDEADBEEF;
public static final int MY_DDMS_RESPONSE_TYPE = 0xFADE7357;
+ public static final boolean PRINT_ALL_CHUNKS = false;
+
+ public static interface DdmHandler {
+ public void HandleChunk(int type, byte[] data);
+ }
+
public static final class TestError extends Error {
public TestError(String s) { super(s); }
}
@@ -69,11 +76,38 @@ public class Test1940 {
public static final ChunkHandler SINGLE_HANDLER = new MyDdmHandler();
+ public static DdmHandler CURRENT_HANDLER;
+
public static void HandlePublish(int type, byte[] data) {
- System.out.println("Chunk published: " + printChunk(new Chunk(type, data, 0, data.length)));
+ if (PRINT_ALL_CHUNKS) {
+ System.out.println(
+ "Unknown Chunk published: " + printChunk(new Chunk(type, data, 0, data.length)));
+ }
+ CURRENT_HANDLER.HandleChunk(type, data);
+ }
+
+ // TYPE Thread Create
+ public static final int TYPE_THCR = 0x54484352;
+ // Type Thread name
+ public static final int TYPE_THNM = 0x54484E4D;
+ // Type Thread death.
+ public static final int TYPE_THDE = 0x54484445;
+ // Type Heap info
+ public static final int TYPE_HPIF = 0x48504946;
+ // Type Trace Results
+ public static final int TYPE_MPSE = 0x4D505345;
+
+ public static boolean IsFromThread(Thread t, byte[] data) {
+ // DDMS always puts the thread-id as the first 4 bytes.
+ ByteBuffer b = ByteBuffer.wrap(data);
+ b.order(ByteOrder.BIG_ENDIAN);
+ return b.getInt() == (int) t.getId();
}
public static void run() throws Exception {
+ CURRENT_HANDLER = (type, data) -> {
+ System.out.println("Chunk published: " + printChunk(new Chunk(type, data, 0, data.length)));
+ };
initializeTest(
Test1940.class,
Test1940.class.getDeclaredMethod("HandlePublish", Integer.TYPE, new byte[0].getClass()));
@@ -90,8 +124,70 @@ public class Test1940 {
MY_DDMS_TYPE, new byte[] { 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10 }, 0, 8);
System.out.println("Sending chunk: " + printChunk(c));
DdmServer.sendChunk(c);
+
+ // Test thread chunks are sent.
+ final boolean[] types_seen = new boolean[] { false, false, false };
+ CURRENT_HANDLER = (type, cdata) -> {
+ switch (type) {
+ case TYPE_THCR:
+ types_seen[0] = true;
+ break;
+ case TYPE_THNM:
+ types_seen[1] = true;
+ break;
+ case TYPE_THDE:
+ types_seen[2] = true;
+ break;
+ default:
+ // We don't want to print other types.
+ break;
+ }
+ };
+ DdmVmInternal.threadNotify(true);
+ final Thread thr = new Thread(() -> { return; }, "THREAD");
+ thr.start();
+ thr.join();
+ DdmVmInternal.threadNotify(false);
+ // Make sure we saw at least one of Thread-create, Thread name, & thread death.
+ if (!types_seen[0] || !types_seen[1] || !types_seen[2]) {
+ System.out.println("Didn't see expected chunks for thread creation! got: " +
+ Arrays.toString(types_seen));
+ } else {
+ System.out.println("Saw expected thread events.");
+ }
+
+ // Test heap chunks are sent.
+ CURRENT_HANDLER = (type, cdata) -> {
+ // The actual data is far to noisy for this test as it includes information about global heap
+ // state.
+ if (type == TYPE_HPIF) {
+ System.out.println("Expected chunk type published: " + type);
+ }
+ };
+ final int HPIF_WHEN_NOW = 1;
+ if (!DdmVmInternal.heapInfoNotify(HPIF_WHEN_NOW)) {
+ System.out.println("Unexpected failure for heapInfoNotify!");
+ }
+
+ // method Tracing
+ CURRENT_HANDLER = (type, cdata) -> {
+ // This chunk includes timing and thread information so we just check the type.
+ if (type == TYPE_MPSE) {
+ System.out.println("Expected chunk type published: " + type);
+ }
+ };
+ VMDebug.startMethodTracingDdms(/*size: default*/0,
+ /*flags: none*/ 0,
+ /*sampling*/ false,
+ /*interval*/ 0);
+ doNothing();
+ doNothing();
+ doNothing();
+ doNothing();
+ VMDebug.stopMethodTracing();
}
+ private static void doNothing() {}
private static Chunk processChunk(byte[] val) {
return processChunk(new Chunk(MY_DDMS_TYPE, val, 0, val.length));
}
diff --git a/test/1941-dispose-stress/dispose_stress.cc b/test/1941-dispose-stress/dispose_stress.cc
new file mode 100644
index 0000000000..e8fcc775e9
--- /dev/null
+++ b/test/1941-dispose-stress/dispose_stress.cc
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <atomic>
+
+#include "android-base/logging.h"
+#include "jni.h"
+#include "scoped_local_ref.h"
+#include "scoped_primitive_array.h"
+
+#include "jvmti.h"
+
+// Test infrastructure
+#include "jvmti_helper.h"
+#include "test_env.h"
+
+namespace art {
+namespace Test1941DisposeStress {
+
+extern "C" JNIEXPORT jlong JNICALL Java_art_Test1941_AllocEnv(JNIEnv* env, jclass) {
+ JavaVM* vm = nullptr;
+ if (env->GetJavaVM(&vm) != 0) {
+ ScopedLocalRef<jclass> rt_exception(env, env->FindClass("java/lang/RuntimeException"));
+ env->ThrowNew(rt_exception.get(), "Unable to get JavaVM");
+ return -1;
+ }
+ jvmtiEnv* new_env = nullptr;
+ if (vm->GetEnv(reinterpret_cast<void**>(&new_env), JVMTI_VERSION_1_0) != 0) {
+ ScopedLocalRef<jclass> rt_exception(env, env->FindClass("java/lang/RuntimeException"));
+ env->ThrowNew(rt_exception.get(), "Unable to create new jvmtiEnv");
+ return -1;
+ }
+ return static_cast<jlong>(reinterpret_cast<intptr_t>(new_env));
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_Test1941_FreeEnv(JNIEnv* env,
+ jclass,
+ jlong jvmti_env_ptr) {
+ JvmtiErrorToException(env,
+ jvmti_env,
+ reinterpret_cast<jvmtiEnv*>(jvmti_env_ptr)->DisposeEnvironment());
+}
+
+} // namespace Test1941DisposeStress
+} // namespace art
+
diff --git a/test/1941-dispose-stress/expected.txt b/test/1941-dispose-stress/expected.txt
new file mode 100644
index 0000000000..ca2eddc7b8
--- /dev/null
+++ b/test/1941-dispose-stress/expected.txt
@@ -0,0 +1 @@
+fib(20) is 6765
diff --git a/test/1941-dispose-stress/info.txt b/test/1941-dispose-stress/info.txt
new file mode 100644
index 0000000000..e4a584e46f
--- /dev/null
+++ b/test/1941-dispose-stress/info.txt
@@ -0,0 +1,3 @@
+Test basic JVMTI single step functionality.
+
+Ensures that we can receive single step events from JVMTI.
diff --git a/test/1941-dispose-stress/run b/test/1941-dispose-stress/run
new file mode 100755
index 0000000000..51875a7e86
--- /dev/null
+++ b/test/1941-dispose-stress/run
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Ask for stack traces to be dumped to a file rather than to stdout.
+./default-run "$@" --jvmti
diff --git a/test/1941-dispose-stress/src/Main.java b/test/1941-dispose-stress/src/Main.java
new file mode 100644
index 0000000000..2fe6b818a0
--- /dev/null
+++ b/test/1941-dispose-stress/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ art.Test1941.run();
+ }
+}
diff --git a/test/1941-dispose-stress/src/art/Breakpoint.java b/test/1941-dispose-stress/src/art/Breakpoint.java
new file mode 100644
index 0000000000..bbb89f707f
--- /dev/null
+++ b/test/1941-dispose-stress/src/art/Breakpoint.java
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Executable;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.Objects;
+
+public class Breakpoint {
+ public static class Manager {
+ public static class BP {
+ public final Executable method;
+ public final long location;
+
+ public BP(Executable method) {
+ this(method, getStartLocation(method));
+ }
+
+ public BP(Executable method, long location) {
+ this.method = method;
+ this.location = location;
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ return (other instanceof BP) &&
+ method.equals(((BP)other).method) &&
+ location == ((BP)other).location;
+ }
+
+ @Override
+ public String toString() {
+ return method.toString() + " @ " + getLine();
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(method, location);
+ }
+
+ public int getLine() {
+ try {
+ LineNumber[] lines = getLineNumberTable(method);
+ int best = -1;
+ for (LineNumber l : lines) {
+ if (l.location > location) {
+ break;
+ } else {
+ best = l.line;
+ }
+ }
+ return best;
+ } catch (Exception e) {
+ return -1;
+ }
+ }
+ }
+
+ private Set<BP> breaks = new HashSet<>();
+
+ public void setBreakpoints(BP... bs) {
+ for (BP b : bs) {
+ if (breaks.add(b)) {
+ Breakpoint.setBreakpoint(b.method, b.location);
+ }
+ }
+ }
+ public void setBreakpoint(Executable method, long location) {
+ setBreakpoints(new BP(method, location));
+ }
+
+ public void clearBreakpoints(BP... bs) {
+ for (BP b : bs) {
+ if (breaks.remove(b)) {
+ Breakpoint.clearBreakpoint(b.method, b.location);
+ }
+ }
+ }
+ public void clearBreakpoint(Executable method, long location) {
+ clearBreakpoints(new BP(method, location));
+ }
+
+ public void clearAllBreakpoints() {
+ clearBreakpoints(breaks.toArray(new BP[0]));
+ }
+ }
+
+ public static void startBreakpointWatch(Class<?> methodClass,
+ Executable breakpointReached,
+ Thread thr) {
+ startBreakpointWatch(methodClass, breakpointReached, false, thr);
+ }
+
+ /**
+ * Enables the trapping of breakpoint events.
+ *
+ * If allowRecursive == true then breakpoints will be sent even if one is currently being handled.
+ */
+ public static native void startBreakpointWatch(Class<?> methodClass,
+ Executable breakpointReached,
+ boolean allowRecursive,
+ Thread thr);
+ public static native void stopBreakpointWatch(Thread thr);
+
+ public static final class LineNumber implements Comparable<LineNumber> {
+ public final long location;
+ public final int line;
+
+ private LineNumber(long loc, int line) {
+ this.location = loc;
+ this.line = line;
+ }
+
+ public boolean equals(Object other) {
+ return other instanceof LineNumber && ((LineNumber)other).line == line &&
+ ((LineNumber)other).location == location;
+ }
+
+ public int compareTo(LineNumber other) {
+ int v = Integer.valueOf(line).compareTo(Integer.valueOf(other.line));
+ if (v != 0) {
+ return v;
+ } else {
+ return Long.valueOf(location).compareTo(Long.valueOf(other.location));
+ }
+ }
+ }
+
+ public static native void setBreakpoint(Executable m, long loc);
+ public static void setBreakpoint(Executable m, LineNumber l) {
+ setBreakpoint(m, l.location);
+ }
+
+ public static native void clearBreakpoint(Executable m, long loc);
+ public static void clearBreakpoint(Executable m, LineNumber l) {
+ clearBreakpoint(m, l.location);
+ }
+
+ private static native Object[] getLineNumberTableNative(Executable m);
+ public static LineNumber[] getLineNumberTable(Executable m) {
+ Object[] nativeTable = getLineNumberTableNative(m);
+ long[] location = (long[])(nativeTable[0]);
+ int[] lines = (int[])(nativeTable[1]);
+ if (lines.length != location.length) {
+ throw new Error("Lines and locations have different lengths!");
+ }
+ LineNumber[] out = new LineNumber[lines.length];
+ for (int i = 0; i < lines.length; i++) {
+ out[i] = new LineNumber(location[i], lines[i]);
+ }
+ return out;
+ }
+
+ public static native long getStartLocation(Executable m);
+
+ public static int locationToLine(Executable m, long location) {
+ try {
+ Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
+ int best = -1;
+ for (Breakpoint.LineNumber l : lines) {
+ if (l.location > location) {
+ break;
+ } else {
+ best = l.line;
+ }
+ }
+ return best;
+ } catch (Exception e) {
+ return -1;
+ }
+ }
+
+ public static long lineToLocation(Executable m, int line) throws Exception {
+ try {
+ Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
+ for (Breakpoint.LineNumber l : lines) {
+ if (l.line == line) {
+ return l.location;
+ }
+ }
+ throw new Exception("Unable to find line " + line + " in " + m);
+ } catch (Exception e) {
+ throw new Exception("Unable to get line number info for " + m, e);
+ }
+ }
+}
+
diff --git a/test/1941-dispose-stress/src/art/Test1941.java b/test/1941-dispose-stress/src/art/Test1941.java
new file mode 100644
index 0000000000..d5a9de6cab
--- /dev/null
+++ b/test/1941-dispose-stress/src/art/Test1941.java
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.Arrays;
+import java.lang.reflect.Executable;
+import java.lang.reflect.Method;
+
+public class Test1941 {
+ public static final boolean PRINT_CNT = false;
+ public static long CNT = 0;
+
+ // Method with multiple paths we can break on.
+ public static long fib(long f) {
+ if (f < 0) {
+ throw new IllegalArgumentException("Bad argument f < 0: f = " + f);
+ } else if (f == 0) {
+ return 0;
+ } else if (f == 1) {
+ return 1;
+ } else {
+ return fib(f - 1) + fib(f - 2);
+ }
+ }
+
+ public static void notifySingleStep(Thread thr, Executable e, long loc) {
+ // Don't bother actually doing anything.
+ }
+
+ public static void LoopAllocFreeEnv() {
+ while (!Thread.interrupted()) {
+ CNT++;
+ long env = AllocEnv();
+ FreeEnv(env);
+ }
+ }
+
+ public static native long AllocEnv();
+ public static native void FreeEnv(long env);
+
+ public static void run() throws Exception {
+ Thread thr = new Thread(Test1941::LoopAllocFreeEnv, "LoopNative");
+ thr.start();
+ Trace.enableSingleStepTracing(Test1941.class,
+ Test1941.class.getDeclaredMethod(
+ "notifySingleStep", Thread.class, Executable.class, Long.TYPE),
+ null);
+
+ System.out.println("fib(20) is " + fib(20));
+
+ thr.interrupt();
+ thr.join();
+ Trace.disableTracing(null);
+ if (PRINT_CNT) {
+ System.out.println("Number of envs created/destroyed: " + CNT);
+ }
+ }
+}
diff --git a/test/1941-dispose-stress/src/art/Trace.java b/test/1941-dispose-stress/src/art/Trace.java
new file mode 100644
index 0000000000..8999bb1368
--- /dev/null
+++ b/test/1941-dispose-stress/src/art/Trace.java
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+
+public class Trace {
+ public static native void enableTracing(Class<?> methodClass,
+ Method entryMethod,
+ Method exitMethod,
+ Method fieldAccess,
+ Method fieldModify,
+ Method singleStep,
+ Thread thr);
+ public static native void disableTracing(Thread thr);
+
+ public static void enableFieldTracing(Class<?> methodClass,
+ Method fieldAccess,
+ Method fieldModify,
+ Thread thr) {
+ enableTracing(methodClass, null, null, fieldAccess, fieldModify, null, thr);
+ }
+
+ public static void enableMethodTracing(Class<?> methodClass,
+ Method entryMethod,
+ Method exitMethod,
+ Thread thr) {
+ enableTracing(methodClass, entryMethod, exitMethod, null, null, null, thr);
+ }
+
+ public static void enableSingleStepTracing(Class<?> methodClass,
+ Method singleStep,
+ Thread thr) {
+ enableTracing(methodClass, null, null, null, null, singleStep, thr);
+ }
+
+ public static native void watchFieldAccess(Field f);
+ public static native void watchFieldModification(Field f);
+ public static native void watchAllFieldAccesses();
+ public static native void watchAllFieldModifications();
+
+ // the names, arguments, and even line numbers of these functions are embedded in the tests so we
+ // need to add to the bottom and not modify old ones to maintain compat.
+ public static native void enableTracing2(Class<?> methodClass,
+ Method entryMethod,
+ Method exitMethod,
+ Method fieldAccess,
+ Method fieldModify,
+ Method singleStep,
+ Method ThreadStart,
+ Method ThreadEnd,
+ Thread thr);
+}
diff --git a/test/Android.bp b/test/Android.bp
index ba24119e9c..8f29251907 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -259,6 +259,7 @@ art_cc_defaults {
"1932-monitor-events-misc/monitor_misc.cc",
"1934-jvmti-signal-thread/signal_threads.cc",
"1939-proxy-frames/local_instance.cc",
+ "1941-dispose-stress/dispose_stress.cc",
],
shared_libs: [
"libbase",
diff --git a/test/knownfailures.json b/test/knownfailures.json
index c6b6574f1b..5b2ebf58a4 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -202,6 +202,12 @@
"bug": "http://b/34369284"
},
{
+ "tests": "1940-ddms-ext",
+ "description": ["Test expects to be able to start tracing but we cannot",
+ "do that if tracing is already ongoing."],
+ "variant": "trace | stream"
+ },
+ {
"tests": "137-cfi",
"description": ["This test unrolls and expects managed frames, but",
"tracing means we run the interpreter."],
diff --git a/tools/libjdwp_art_failures.txt b/tools/libjdwp_art_failures.txt
index fd711bbd8b..abcc728890 100644
--- a/tools/libjdwp_art_failures.txt
+++ b/tools/libjdwp_art_failures.txt
@@ -103,5 +103,11 @@
result: EXEC_FAILED,
bug: 69169846,
name: "org.apache.harmony.jpda.tests.jdwp.DDM.DDMTest#testChunk001"
+},
+{
+ description: "Test crashes",
+ result: EXEC_FAILED,
+ bug: 69591477,
+ name: "org.apache.harmony.jpda.tests.jdwp.VirtualMachine.ExitTest#testExit001"
}
]
diff --git a/tools/libjdwp_oj_art_failures.txt b/tools/libjdwp_oj_art_failures.txt
index 3d06bcf100..e1cc831303 100644
--- a/tools/libjdwp_oj_art_failures.txt
+++ b/tools/libjdwp_oj_art_failures.txt
@@ -73,5 +73,11 @@
result: EXEC_FAILED,
bug: 69169846,
name: "org.apache.harmony.jpda.tests.jdwp.DDM.DDMTest#testChunk001"
+},
+{
+ description: "Test crashes",
+ result: EXEC_FAILED,
+ bug: 69591477,
+ name: "org.apache.harmony.jpda.tests.jdwp.VirtualMachine.ExitTest#testExit001"
}
]