diff options
28 files changed, 1426 insertions, 405 deletions
diff --git a/compiler/Android.bp b/compiler/Android.bp index 859947108e..249aaf5632 100644 --- a/compiler/Android.bp +++ b/compiler/Android.bp @@ -323,6 +323,7 @@ art_cc_test { "linker/method_bss_mapping_encoder_test.cc", "linker/output_stream_test.cc", "optimizing/bounds_check_elimination_test.cc", + "optimizing/cloner_test.cc", "optimizing/data_type_test.cc", "optimizing/dominator_test.cc", "optimizing/find_loops_test.cc", diff --git a/compiler/optimizing/cloner_test.cc b/compiler/optimizing/cloner_test.cc new file mode 100644 index 0000000000..d34dd81767 --- /dev/null +++ b/compiler/optimizing/cloner_test.cc @@ -0,0 +1,185 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "graph_checker.h" +#include "nodes.h" +#include "optimizing_unit_test.h" + +#include "gtest/gtest.h" + +namespace art { + +// This class provides methods and helpers for testing various cloning and copying routines: +// individual instruction cloning and cloning of the more coarse-grain structures. +class ClonerTest : public OptimizingUnitTest { + public: + ClonerTest() + : graph_(CreateGraph()), entry_block_(nullptr), exit_block_(nullptr), parameter_(nullptr) {} + + void CreateBasicLoopControlFlow(/* out */ HBasicBlock** header_p, + /* out */ HBasicBlock** body_p) { + entry_block_ = new (GetAllocator()) HBasicBlock(graph_); + graph_->AddBlock(entry_block_); + graph_->SetEntryBlock(entry_block_); + + HBasicBlock* loop_preheader = new (GetAllocator()) HBasicBlock(graph_); + HBasicBlock* loop_header = new (GetAllocator()) HBasicBlock(graph_); + HBasicBlock* loop_body = new (GetAllocator()) HBasicBlock(graph_); + HBasicBlock* loop_exit = new (GetAllocator()) HBasicBlock(graph_); + + graph_->AddBlock(loop_preheader); + graph_->AddBlock(loop_header); + graph_->AddBlock(loop_body); + graph_->AddBlock(loop_exit); + + exit_block_ = new (GetAllocator()) HBasicBlock(graph_); + graph_->AddBlock(exit_block_); + graph_->SetExitBlock(exit_block_); + + entry_block_->AddSuccessor(loop_preheader); + loop_preheader->AddSuccessor(loop_header); + // Loop exit first to have a proper exit condition/target for HIf. + loop_header->AddSuccessor(loop_exit); + loop_header->AddSuccessor(loop_body); + loop_body->AddSuccessor(loop_header); + loop_exit->AddSuccessor(exit_block_); + + *header_p = loop_header; + *body_p = loop_body; + + parameter_ = new (GetAllocator()) HParameterValue(graph_->GetDexFile(), + dex::TypeIndex(0), + 0, + DataType::Type::kInt32); + entry_block_->AddInstruction(parameter_); + loop_exit->AddInstruction(new (GetAllocator()) HReturnVoid()); + exit_block_->AddInstruction(new (GetAllocator()) HExit()); + } + + void CreateBasicLoopDataFlow(HBasicBlock* loop_header, HBasicBlock* loop_body) { + uint32_t dex_pc = 0; + + // Entry block. + HIntConstant* const_0 = graph_->GetIntConstant(0); + HIntConstant* const_1 = graph_->GetIntConstant(1); + HIntConstant* const_128 = graph_->GetIntConstant(128); + + // Header block. + HPhi* phi = new (GetAllocator()) HPhi(GetAllocator(), 0, 0, DataType::Type::kInt32); + HInstruction* suspend_check = new (GetAllocator()) HSuspendCheck(); + + loop_header->AddPhi(phi); + loop_header->AddInstruction(suspend_check); + loop_header->AddInstruction(new (GetAllocator()) HGreaterThanOrEqual(phi, const_128)); + loop_header->AddInstruction(new (GetAllocator()) HIf(parameter_)); + + // Loop body block. + HInstruction* null_check = new (GetAllocator()) HNullCheck(parameter_, dex_pc); + HInstruction* array_length = new (GetAllocator()) HArrayLength(null_check, dex_pc); + HInstruction* bounds_check = new (GetAllocator()) HBoundsCheck(phi, array_length, dex_pc); + HInstruction* array_get = + new (GetAllocator()) HArrayGet(null_check, bounds_check, DataType::Type::kInt32, dex_pc); + HInstruction* add = new (GetAllocator()) HAdd(DataType::Type::kInt32, array_get, const_1); + HInstruction* array_set = + new (GetAllocator()) HArraySet(null_check, bounds_check, add, DataType::Type::kInt32, dex_pc); + HInstruction* induction_inc = new (GetAllocator()) HAdd(DataType::Type::kInt32, phi, const_1); + + loop_body->AddInstruction(null_check); + loop_body->AddInstruction(array_length); + loop_body->AddInstruction(bounds_check); + loop_body->AddInstruction(array_get); + loop_body->AddInstruction(add); + loop_body->AddInstruction(array_set); + loop_body->AddInstruction(induction_inc); + loop_body->AddInstruction(new (GetAllocator()) HGoto()); + + phi->AddInput(const_0); + phi->AddInput(induction_inc); + + graph_->SetHasBoundsChecks(true); + + // Adjust HEnvironment for each instruction which require that. + ArenaVector<HInstruction*> current_locals({phi, const_128, parameter_}, + GetAllocator()->Adapter(kArenaAllocInstruction)); + + HEnvironment* env = ManuallyBuildEnvFor(suspend_check, ¤t_locals); + null_check->CopyEnvironmentFrom(env); + bounds_check->CopyEnvironmentFrom(env); + } + + HEnvironment* ManuallyBuildEnvFor(HInstruction* instruction, + ArenaVector<HInstruction*>* current_locals) { + HEnvironment* environment = new (GetAllocator()) HEnvironment( + (GetAllocator()), + current_locals->size(), + graph_->GetArtMethod(), + instruction->GetDexPc(), + instruction); + + environment->CopyFrom(ArrayRef<HInstruction* const>(*current_locals)); + instruction->SetRawEnvironment(environment); + return environment; + } + + bool CheckGraph() { + GraphChecker checker(graph_); + checker.Run(); + if (!checker.IsValid()) { + for (const std::string& error : checker.GetErrors()) { + std::cout << error << std::endl; + } + return false; + } + return true; + } + + HGraph* graph_; + + HBasicBlock* entry_block_; + HBasicBlock* exit_block_; + + HInstruction* parameter_; +}; + +TEST_F(ClonerTest, IndividualInstrCloner) { + HBasicBlock* header = nullptr; + HBasicBlock* loop_body = nullptr; + + CreateBasicLoopControlFlow(&header, &loop_body); + CreateBasicLoopDataFlow(header, loop_body); + graph_->BuildDominatorTree(); + ASSERT_TRUE(CheckGraph()); + + HSuspendCheck* old_suspend_check = header->GetLoopInformation()->GetSuspendCheck(); + CloneAndReplaceInstructionVisitor visitor(graph_); + // Do instruction cloning and replacement twice with different visiting order. + + visitor.VisitInsertionOrder(); + size_t instr_replaced_by_clones_count = visitor.GetInstrReplacedByClonesCount(); + EXPECT_EQ(instr_replaced_by_clones_count, 12u); + EXPECT_TRUE(CheckGraph()); + + visitor.VisitReversePostOrder(); + instr_replaced_by_clones_count = visitor.GetInstrReplacedByClonesCount(); + EXPECT_EQ(instr_replaced_by_clones_count, 24u); + EXPECT_TRUE(CheckGraph()); + + HSuspendCheck* new_suspend_check = header->GetLoopInformation()->GetSuspendCheck(); + EXPECT_NE(new_suspend_check, old_suspend_check); + EXPECT_NE(new_suspend_check, nullptr); +} + +} // namespace art diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc index fbfee12be9..4c18e16c48 100644 --- a/compiler/optimizing/instruction_simplifier.cc +++ b/compiler/optimizing/instruction_simplifier.cc @@ -27,6 +27,10 @@ namespace art { +// Whether to run an exhaustive test of individual HInstructions cloning when each instruction +// is replaced with its copy if it is clonable. +static constexpr bool kTestInstructionClonerExhaustively = false; + class InstructionSimplifierVisitor : public HGraphDelegateVisitor { public: InstructionSimplifierVisitor(HGraph* graph, @@ -130,6 +134,11 @@ class InstructionSimplifierVisitor : public HGraphDelegateVisitor { }; void InstructionSimplifier::Run() { + if (kTestInstructionClonerExhaustively) { + CloneAndReplaceInstructionVisitor visitor(graph_); + visitor.VisitReversePostOrder(); + } + InstructionSimplifierVisitor visitor(graph_, codegen_, compiler_driver_, stats_); visitor.Run(); } diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc index fff61f5727..fa580d9bed 100644 --- a/compiler/optimizing/nodes.cc +++ b/compiler/optimizing/nodes.cc @@ -845,6 +845,13 @@ static void UpdateInputsUsers(HInstruction* instruction) { DCHECK(!instruction->HasEnvironment()); } +void HBasicBlock::ReplaceAndRemovePhiWith(HPhi* initial, HPhi* replacement) { + DCHECK(initial->GetBlock() == this); + InsertPhiAfter(replacement, initial); + initial->ReplaceWith(replacement); + RemovePhi(initial); +} + void HBasicBlock::ReplaceAndRemoveInstructionWith(HInstruction* initial, HInstruction* replacement) { DCHECK(initial->GetBlock() == this); @@ -2907,6 +2914,28 @@ void HInstruction::RemoveEnvironmentUsers() { env_uses_.clear(); } +HInstruction* ReplaceInstrOrPhiByClone(HInstruction* instr) { + HInstruction* clone = instr->Clone(instr->GetBlock()->GetGraph()->GetAllocator()); + HBasicBlock* block = instr->GetBlock(); + + if (instr->IsPhi()) { + HPhi* phi = instr->AsPhi(); + DCHECK(!phi->HasEnvironment()); + HPhi* phi_clone = clone->AsPhi(); + block->ReplaceAndRemovePhiWith(phi, phi_clone); + } else { + block->ReplaceAndRemoveInstructionWith(instr, clone); + if (instr->HasEnvironment()) { + clone->CopyEnvironmentFrom(instr->GetEnvironment()); + HLoopInformation* loop_info = block->GetLoopInformation(); + if (instr->IsSuspendCheck() && loop_info != nullptr) { + loop_info->SetSuspendCheck(clone->AsSuspendCheck()); + } + } + } + return clone; +} + // Returns an instruction with the opposite Boolean value from 'cond'. HInstruction* HGraph::InsertOppositeCondition(HInstruction* cond, HInstruction* cursor) { ArenaAllocator* allocator = GetAllocator(); diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 6672901781..66d5bfea32 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -1160,6 +1160,8 @@ class HBasicBlock : public ArenaObject<kArenaAllocBasicBlock> { // Insert `instruction` before/after an existing instruction `cursor`. void InsertInstructionBefore(HInstruction* instruction, HInstruction* cursor); void InsertInstructionAfter(HInstruction* instruction, HInstruction* cursor); + // Replace phi `initial` with `replacement` within this block. + void ReplaceAndRemovePhiWith(HPhi* initial, HPhi* replacement); // Replace instruction `initial` with `replacement` within this block. void ReplaceAndRemoveInstructionWith(HInstruction* initial, HInstruction* replacement); @@ -1480,18 +1482,31 @@ FOR_EACH_INSTRUCTION(FORWARD_DECLARATION) #undef FORWARD_DECLARATION #define DECLARE_INSTRUCTION(type) \ + private: \ + H##type& operator=(const H##type&) = delete; \ + public: \ InstructionKind GetKindInternal() const OVERRIDE { return k##type; } \ const char* DebugName() const OVERRIDE { return #type; } \ bool InstructionTypeEquals(const HInstruction* other) const OVERRIDE { \ return other->Is##type(); \ } \ + HInstruction* Clone(ArenaAllocator* arena) const OVERRIDE { \ + DCHECK(IsClonable()); \ + return new (arena) H##type(*this->As##type()); \ + } \ void Accept(HGraphVisitor* visitor) OVERRIDE #define DECLARE_ABSTRACT_INSTRUCTION(type) \ + private: \ + H##type& operator=(const H##type&) = delete; \ + public: \ bool Is##type() const { return As##type() != nullptr; } \ const H##type* As##type() const { return this; } \ H##type* As##type() { return this; } +#define DEFAULT_COPY_CONSTRUCTOR(type) \ + explicit H##type(const H##type& other) = default; + template <typename T> class HUseListNode : public ArenaObject<kArenaAllocUseListNode>, public IntrusiveForwardListNode<HUseListNode<T>> { @@ -2182,6 +2197,25 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> { FOR_EACH_ABSTRACT_INSTRUCTION(INSTRUCTION_TYPE_CHECK) #undef INSTRUCTION_TYPE_CHECK + // Return a clone of the instruction if it is clonable (shallow copy by default, custom copy + // if a custom copy-constructor is provided for a particular type). If IsClonable() is false for + // the instruction then the behaviour of this function is undefined. + // + // Note: It is semantically valid to create a clone of the instruction only until + // prepare_for_register_allocator phase as lifetime, intervals and codegen info are not + // copied. + // + // Note: HEnvironment and some other fields are not copied and are set to default values, see + // 'explicit HInstruction(const HInstruction& other)' for details. + virtual HInstruction* Clone(ArenaAllocator* arena ATTRIBUTE_UNUSED) const { + LOG(FATAL) << "Cloning is not implemented for the instruction " << + DebugName() << " " << GetId(); + UNREACHABLE(); + } + + // Return whether instruction can be cloned (copied). + virtual bool IsClonable() const { return false; } + // Returns whether the instruction can be moved within the graph. // TODO: this method is used by LICM and GVN with possibly different // meanings? split and rename? @@ -2298,6 +2332,30 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> { packed_fields_ = BitFieldType::Update(value, packed_fields_); } + // Copy construction for the instruction (used for Clone function). + // + // Fields (e.g. lifetime, intervals and codegen info) associated with phases starting from + // prepare_for_register_allocator are not copied (set to default values). + // + // Copy constructors must be provided for every HInstruction type; default copy constructor is + // fine for most of them. However for some of the instructions a custom copy constructor must be + // specified (when instruction has non-trivially copyable fields and must have a special behaviour + // for copying them). + explicit HInstruction(const HInstruction& other) + : previous_(nullptr), + next_(nullptr), + block_(nullptr), + dex_pc_(other.dex_pc_), + id_(-1), + ssa_index_(-1), + packed_fields_(other.packed_fields_), + environment_(nullptr), + locations_(nullptr), + live_interval_(nullptr), + lifetime_position_(kNoLifetime), + side_effects_(other.side_effects_), + reference_type_handle_(other.reference_type_handle_) {} + private: void FixUpUserRecordsAfterUseInsertion(HUseList<HInstruction*>::iterator fixup_end) { auto before_use_node = uses_.before_begin(); @@ -2387,8 +2445,6 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> { friend class HEnvironment; friend class HGraph; friend class HInstructionList; - - DISALLOW_COPY_AND_ASSIGN(HInstruction); }; std::ostream& operator<<(std::ostream& os, const HInstruction::InstructionKind& rhs); @@ -2484,10 +2540,9 @@ class HVariableInputSizeInstruction : public HInstruction { : HInstruction(side_effects, dex_pc), inputs_(number_of_inputs, allocator->Adapter(kind)) {} - ArenaVector<HUserRecord<HInstruction*>> inputs_; + DEFAULT_COPY_CONSTRUCTOR(VariableInputSizeInstruction); - private: - DISALLOW_COPY_AND_ASSIGN(HVariableInputSizeInstruction); + ArenaVector<HUserRecord<HInstruction*>> inputs_; }; template<size_t N> @@ -2502,6 +2557,9 @@ class HTemplateInstruction: public HInstruction { return ArrayRef<HUserRecord<HInstruction*>>(inputs_); } + protected: + DEFAULT_COPY_CONSTRUCTOR(TemplateInstruction<N>); + private: std::array<HUserRecord<HInstruction*>, N> inputs_; @@ -2522,6 +2580,9 @@ class HTemplateInstruction<0>: public HInstruction { return ArrayRef<HUserRecord<HInstruction*>>(); } + protected: + DEFAULT_COPY_CONSTRUCTOR(TemplateInstruction<0>); + private: friend class SsaBuilder; }; @@ -2547,6 +2608,7 @@ class HExpression : public HTemplateInstruction<N> { static_assert(kNumberOfExpressionPackedBits <= HInstruction::kMaxNumberOfPackedBits, "Too many packed fields."); using TypeField = BitField<DataType::Type, kFieldType, kFieldTypeSize>; + DEFAULT_COPY_CONSTRUCTOR(Expression<N>); }; // Represents dex's RETURN_VOID opcode. A HReturnVoid is a control flow @@ -2560,8 +2622,8 @@ class HReturnVoid FINAL : public HTemplateInstruction<0> { DECLARE_INSTRUCTION(ReturnVoid); - private: - DISALLOW_COPY_AND_ASSIGN(HReturnVoid); + protected: + DEFAULT_COPY_CONSTRUCTOR(ReturnVoid); }; // Represents dex's RETURN opcodes. A HReturn is a control flow @@ -2577,8 +2639,8 @@ class HReturn FINAL : public HTemplateInstruction<1> { DECLARE_INSTRUCTION(Return); - private: - DISALLOW_COPY_AND_ASSIGN(HReturn); + protected: + DEFAULT_COPY_CONSTRUCTOR(Return); }; class HPhi FINAL : public HVariableInputSizeInstruction { @@ -2604,6 +2666,8 @@ class HPhi FINAL : public HVariableInputSizeInstruction { SetPackedFlag<kFlagCanBeNull>(true); } + bool IsClonable() const OVERRIDE { return true; } + // Returns a type equivalent to the given `type`, but that a `HPhi` can hold. static DataType::Type ToPhiType(DataType::Type type) { return DataType::Kind(type); @@ -2666,6 +2730,9 @@ class HPhi FINAL : public HVariableInputSizeInstruction { DECLARE_INSTRUCTION(Phi); + protected: + DEFAULT_COPY_CONSTRUCTOR(Phi); + private: static constexpr size_t kFieldType = HInstruction::kNumberOfGenericPackedBits; static constexpr size_t kFieldTypeSize = @@ -2677,8 +2744,6 @@ class HPhi FINAL : public HVariableInputSizeInstruction { using TypeField = BitField<DataType::Type, kFieldType, kFieldTypeSize>; const uint32_t reg_number_; - - DISALLOW_COPY_AND_ASSIGN(HPhi); }; // The exit instruction is the only instruction of the exit block. @@ -2692,8 +2757,8 @@ class HExit FINAL : public HTemplateInstruction<0> { DECLARE_INSTRUCTION(Exit); - private: - DISALLOW_COPY_AND_ASSIGN(HExit); + protected: + DEFAULT_COPY_CONSTRUCTOR(Exit); }; // Jumps from one block to another. @@ -2701,6 +2766,7 @@ class HGoto FINAL : public HTemplateInstruction<0> { public: explicit HGoto(uint32_t dex_pc = kNoDexPc) : HTemplateInstruction(SideEffects::None(), dex_pc) {} + bool IsClonable() const OVERRIDE { return true; } bool IsControlFlow() const OVERRIDE { return true; } HBasicBlock* GetSuccessor() const { @@ -2709,8 +2775,8 @@ class HGoto FINAL : public HTemplateInstruction<0> { DECLARE_INSTRUCTION(Goto); - private: - DISALLOW_COPY_AND_ASSIGN(HGoto); + protected: + DEFAULT_COPY_CONSTRUCTOR(Goto); }; class HConstant : public HExpression<0> { @@ -2733,8 +2799,8 @@ class HConstant : public HExpression<0> { DECLARE_ABSTRACT_INSTRUCTION(Constant); - private: - DISALLOW_COPY_AND_ASSIGN(HConstant); + protected: + DEFAULT_COPY_CONSTRUCTOR(Constant); }; class HNullConstant FINAL : public HConstant { @@ -2752,12 +2818,14 @@ class HNullConstant FINAL : public HConstant { DECLARE_INSTRUCTION(NullConstant); + protected: + DEFAULT_COPY_CONSTRUCTOR(NullConstant); + private: explicit HNullConstant(uint32_t dex_pc = kNoDexPc) : HConstant(DataType::Type::kReference, dex_pc) {} friend class HGraph; - DISALLOW_COPY_AND_ASSIGN(HNullConstant); }; // Constants of the type int. Those can be from Dex instructions, or @@ -2789,6 +2857,9 @@ class HIntConstant FINAL : public HConstant { DECLARE_INSTRUCTION(IntConstant); + protected: + DEFAULT_COPY_CONSTRUCTOR(IntConstant); + private: explicit HIntConstant(int32_t value, uint32_t dex_pc = kNoDexPc) : HConstant(DataType::Type::kInt32, dex_pc), value_(value) {} @@ -2800,7 +2871,6 @@ class HIntConstant FINAL : public HConstant { friend class HGraph; ART_FRIEND_TEST(GraphTest, InsertInstructionBefore); ART_FRIEND_TYPED_TEST(ParallelMoveTest, ConstantLast); - DISALLOW_COPY_AND_ASSIGN(HIntConstant); }; class HLongConstant FINAL : public HConstant { @@ -2823,6 +2893,9 @@ class HLongConstant FINAL : public HConstant { DECLARE_INSTRUCTION(LongConstant); + protected: + DEFAULT_COPY_CONSTRUCTOR(LongConstant); + private: explicit HLongConstant(int64_t value, uint32_t dex_pc = kNoDexPc) : HConstant(DataType::Type::kInt64, dex_pc), value_(value) {} @@ -2830,7 +2903,6 @@ class HLongConstant FINAL : public HConstant { const int64_t value_; friend class HGraph; - DISALLOW_COPY_AND_ASSIGN(HLongConstant); }; class HFloatConstant FINAL : public HConstant { @@ -2872,6 +2944,9 @@ class HFloatConstant FINAL : public HConstant { DECLARE_INSTRUCTION(FloatConstant); + protected: + DEFAULT_COPY_CONSTRUCTOR(FloatConstant); + private: explicit HFloatConstant(float value, uint32_t dex_pc = kNoDexPc) : HConstant(DataType::Type::kFloat32, dex_pc), value_(value) {} @@ -2883,7 +2958,6 @@ class HFloatConstant FINAL : public HConstant { // Only the SsaBuilder and HGraph can create floating-point constants. friend class SsaBuilder; friend class HGraph; - DISALLOW_COPY_AND_ASSIGN(HFloatConstant); }; class HDoubleConstant FINAL : public HConstant { @@ -2923,6 +2997,9 @@ class HDoubleConstant FINAL : public HConstant { DECLARE_INSTRUCTION(DoubleConstant); + protected: + DEFAULT_COPY_CONSTRUCTOR(DoubleConstant); + private: explicit HDoubleConstant(double value, uint32_t dex_pc = kNoDexPc) : HConstant(DataType::Type::kFloat64, dex_pc), value_(value) {} @@ -2934,7 +3011,6 @@ class HDoubleConstant FINAL : public HConstant { // Only the SsaBuilder and HGraph can create floating-point constants. friend class SsaBuilder; friend class HGraph; - DISALLOW_COPY_AND_ASSIGN(HDoubleConstant); }; // Conditional branch. A block ending with an HIf instruction must have @@ -2946,6 +3022,7 @@ class HIf FINAL : public HTemplateInstruction<1> { SetRawInputAt(0, input); } + bool IsClonable() const OVERRIDE { return true; } bool IsControlFlow() const OVERRIDE { return true; } HBasicBlock* IfTrueSuccessor() const { @@ -2958,8 +3035,8 @@ class HIf FINAL : public HTemplateInstruction<1> { DECLARE_INSTRUCTION(If); - private: - DISALLOW_COPY_AND_ASSIGN(HIf); + protected: + DEFAULT_COPY_CONSTRUCTOR(If); }; @@ -3012,6 +3089,9 @@ class HTryBoundary FINAL : public HTemplateInstruction<0> { DECLARE_INSTRUCTION(TryBoundary); + protected: + DEFAULT_COPY_CONSTRUCTOR(TryBoundary); + private: static constexpr size_t kFieldBoundaryKind = kNumberOfGenericPackedBits; static constexpr size_t kFieldBoundaryKindSize = @@ -3021,8 +3101,6 @@ class HTryBoundary FINAL : public HTemplateInstruction<0> { static_assert(kNumberOfTryBoundaryPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields."); using BoundaryKindField = BitField<BoundaryKind, kFieldBoundaryKind, kFieldBoundaryKindSize>; - - DISALLOW_COPY_AND_ASSIGN(HTryBoundary); }; // Deoptimize to interpreter, upon checking a condition. @@ -3045,6 +3123,8 @@ class HDeoptimize FINAL : public HVariableInputSizeInstruction { SetRawInputAt(0, cond); } + bool IsClonable() const OVERRIDE { return true; } + // Use this constructor when the `HDeoptimize` guards an instruction, and any user // that relies on the deoptimization to pass should have its input be the `HDeoptimize` // instead of `guard`. @@ -3098,6 +3178,9 @@ class HDeoptimize FINAL : public HVariableInputSizeInstruction { DECLARE_INSTRUCTION(Deoptimize); + protected: + DEFAULT_COPY_CONSTRUCTOR(Deoptimize); + private: static constexpr size_t kFieldCanBeMoved = kNumberOfGenericPackedBits; static constexpr size_t kFieldDeoptimizeKind = kNumberOfGenericPackedBits + 1; @@ -3109,8 +3192,6 @@ class HDeoptimize FINAL : public HVariableInputSizeInstruction { "Too many packed fields."); using DeoptimizeKindField = BitField<DeoptimizationKind, kFieldDeoptimizeKind, kFieldDeoptimizeKindSize>; - - DISALLOW_COPY_AND_ASSIGN(HDeoptimize); }; // Represents a should_deoptimize flag. Currently used for CHA-based devirtualization. @@ -3136,8 +3217,8 @@ class HShouldDeoptimizeFlag FINAL : public HVariableInputSizeInstruction { DECLARE_INSTRUCTION(ShouldDeoptimizeFlag); - private: - DISALLOW_COPY_AND_ASSIGN(HShouldDeoptimizeFlag); + protected: + DEFAULT_COPY_CONSTRUCTOR(ShouldDeoptimizeFlag); }; // Represents the ArtMethod that was passed as a first argument to @@ -3150,8 +3231,8 @@ class HCurrentMethod FINAL : public HExpression<0> { DECLARE_INSTRUCTION(CurrentMethod); - private: - DISALLOW_COPY_AND_ASSIGN(HCurrentMethod); + protected: + DEFAULT_COPY_CONSTRUCTOR(CurrentMethod); }; // Fetches an ArtMethod from the virtual table or the interface method table @@ -3174,6 +3255,7 @@ class HClassTableGet FINAL : public HExpression<1> { SetRawInputAt(0, cls); } + bool IsClonable() const OVERRIDE { return true; } bool CanBeMoved() const OVERRIDE { return true; } bool InstructionDataEquals(const HInstruction* other) const OVERRIDE { return other->AsClassTableGet()->GetIndex() == index_ && @@ -3185,6 +3267,9 @@ class HClassTableGet FINAL : public HExpression<1> { DECLARE_INSTRUCTION(ClassTableGet); + protected: + DEFAULT_COPY_CONSTRUCTOR(ClassTableGet); + private: static constexpr size_t kFieldTableKind = kNumberOfExpressionPackedBits; static constexpr size_t kFieldTableKindSize = @@ -3196,8 +3281,6 @@ class HClassTableGet FINAL : public HExpression<1> { // The index of the ArtMethod in the table. const size_t index_; - - DISALLOW_COPY_AND_ASSIGN(HClassTableGet); }; // PackedSwitch (jump table). A block ending with a PackedSwitch instruction will @@ -3215,6 +3298,8 @@ class HPackedSwitch FINAL : public HTemplateInstruction<1> { SetRawInputAt(0, input); } + bool IsClonable() const OVERRIDE { return true; } + bool IsControlFlow() const OVERRIDE { return true; } int32_t GetStartValue() const { return start_value_; } @@ -3227,11 +3312,12 @@ class HPackedSwitch FINAL : public HTemplateInstruction<1> { } DECLARE_INSTRUCTION(PackedSwitch); + protected: + DEFAULT_COPY_CONSTRUCTOR(PackedSwitch); + private: const int32_t start_value_; const uint32_t num_entries_; - - DISALLOW_COPY_AND_ASSIGN(HPackedSwitch); }; class HUnaryOperation : public HExpression<1> { @@ -3241,6 +3327,9 @@ class HUnaryOperation : public HExpression<1> { SetRawInputAt(0, input); } + // All of the UnaryOperation instructions are clonable. + bool IsClonable() const OVERRIDE { return true; } + HInstruction* GetInput() const { return InputAt(0); } DataType::Type GetResultType() const { return GetType(); } @@ -3262,8 +3351,8 @@ class HUnaryOperation : public HExpression<1> { DECLARE_ABSTRACT_INSTRUCTION(UnaryOperation); - private: - DISALLOW_COPY_AND_ASSIGN(HUnaryOperation); + protected: + DEFAULT_COPY_CONSTRUCTOR(UnaryOperation); }; class HBinaryOperation : public HExpression<2> { @@ -3278,6 +3367,9 @@ class HBinaryOperation : public HExpression<2> { SetRawInputAt(1, right); } + // All of the BinaryOperation instructions are clonable. + bool IsClonable() const OVERRIDE { return true; } + HInstruction* GetLeft() const { return InputAt(0); } HInstruction* GetRight() const { return InputAt(1); } DataType::Type GetResultType() const { return GetType(); } @@ -3352,8 +3444,8 @@ class HBinaryOperation : public HExpression<2> { DECLARE_ABSTRACT_INSTRUCTION(BinaryOperation); - private: - DISALLOW_COPY_AND_ASSIGN(HBinaryOperation); + protected: + DEFAULT_COPY_CONSTRUCTOR(BinaryOperation); }; // The comparison bias applies for floating point operations and indicates how NaN @@ -3443,8 +3535,7 @@ class HCondition : public HBinaryOperation { return GetBlock()->GetGraph()->GetIntConstant(value, dex_pc); } - private: - DISALLOW_COPY_AND_ASSIGN(HCondition); + DEFAULT_COPY_CONSTRUCTOR(Condition); }; // Instruction to check if two inputs are equal to each other. @@ -3486,10 +3577,11 @@ class HEqual FINAL : public HCondition { return kCondNE; } + protected: + DEFAULT_COPY_CONSTRUCTOR(Equal); + private: template <typename T> static bool Compute(T x, T y) { return x == y; } - - DISALLOW_COPY_AND_ASSIGN(HEqual); }; class HNotEqual FINAL : public HCondition { @@ -3529,10 +3621,11 @@ class HNotEqual FINAL : public HCondition { return kCondEQ; } + protected: + DEFAULT_COPY_CONSTRUCTOR(NotEqual); + private: template <typename T> static bool Compute(T x, T y) { return x != y; } - - DISALLOW_COPY_AND_ASSIGN(HNotEqual); }; class HLessThan FINAL : public HCondition { @@ -3566,10 +3659,11 @@ class HLessThan FINAL : public HCondition { return kCondGE; } + protected: + DEFAULT_COPY_CONSTRUCTOR(LessThan); + private: template <typename T> static bool Compute(T x, T y) { return x < y; } - - DISALLOW_COPY_AND_ASSIGN(HLessThan); }; class HLessThanOrEqual FINAL : public HCondition { @@ -3603,10 +3697,11 @@ class HLessThanOrEqual FINAL : public HCondition { return kCondGT; } + protected: + DEFAULT_COPY_CONSTRUCTOR(LessThanOrEqual); + private: template <typename T> static bool Compute(T x, T y) { return x <= y; } - - DISALLOW_COPY_AND_ASSIGN(HLessThanOrEqual); }; class HGreaterThan FINAL : public HCondition { @@ -3640,10 +3735,11 @@ class HGreaterThan FINAL : public HCondition { return kCondLE; } + protected: + DEFAULT_COPY_CONSTRUCTOR(GreaterThan); + private: template <typename T> static bool Compute(T x, T y) { return x > y; } - - DISALLOW_COPY_AND_ASSIGN(HGreaterThan); }; class HGreaterThanOrEqual FINAL : public HCondition { @@ -3677,10 +3773,11 @@ class HGreaterThanOrEqual FINAL : public HCondition { return kCondLT; } + protected: + DEFAULT_COPY_CONSTRUCTOR(GreaterThanOrEqual); + private: template <typename T> static bool Compute(T x, T y) { return x >= y; } - - DISALLOW_COPY_AND_ASSIGN(HGreaterThanOrEqual); }; class HBelow FINAL : public HCondition { @@ -3715,12 +3812,13 @@ class HBelow FINAL : public HCondition { return kCondAE; } + protected: + DEFAULT_COPY_CONSTRUCTOR(Below); + private: template <typename T> static bool Compute(T x, T y) { return MakeUnsigned(x) < MakeUnsigned(y); } - - DISALLOW_COPY_AND_ASSIGN(HBelow); }; class HBelowOrEqual FINAL : public HCondition { @@ -3755,12 +3853,13 @@ class HBelowOrEqual FINAL : public HCondition { return kCondA; } + protected: + DEFAULT_COPY_CONSTRUCTOR(BelowOrEqual); + private: template <typename T> static bool Compute(T x, T y) { return MakeUnsigned(x) <= MakeUnsigned(y); } - - DISALLOW_COPY_AND_ASSIGN(HBelowOrEqual); }; class HAbove FINAL : public HCondition { @@ -3795,12 +3894,13 @@ class HAbove FINAL : public HCondition { return kCondBE; } + protected: + DEFAULT_COPY_CONSTRUCTOR(Above); + private: template <typename T> static bool Compute(T x, T y) { return MakeUnsigned(x) > MakeUnsigned(y); } - - DISALLOW_COPY_AND_ASSIGN(HAbove); }; class HAboveOrEqual FINAL : public HCondition { @@ -3835,12 +3935,13 @@ class HAboveOrEqual FINAL : public HCondition { return kCondB; } + protected: + DEFAULT_COPY_CONSTRUCTOR(AboveOrEqual); + private: template <typename T> static bool Compute(T x, T y) { return MakeUnsigned(x) >= MakeUnsigned(y); } - - DISALLOW_COPY_AND_ASSIGN(HAboveOrEqual); }; // Instruction to check how two inputs compare to each other. @@ -3930,8 +4031,7 @@ class HCompare FINAL : public HBinaryOperation { return GetBlock()->GetGraph()->GetIntConstant(value, dex_pc); } - private: - DISALLOW_COPY_AND_ASSIGN(HCompare); + DEFAULT_COPY_CONSTRUCTOR(Compare); }; class HNewInstance FINAL : public HExpression<1> { @@ -3950,6 +4050,8 @@ class HNewInstance FINAL : public HExpression<1> { SetRawInputAt(0, cls); } + bool IsClonable() const OVERRIDE { return true; } + dex::TypeIndex GetTypeIndex() const { return type_index_; } const DexFile& GetDexFile() const { return dex_file_; } @@ -3986,6 +4088,9 @@ class HNewInstance FINAL : public HExpression<1> { DECLARE_INSTRUCTION(NewInstance); + protected: + DEFAULT_COPY_CONSTRUCTOR(NewInstance); + private: static constexpr size_t kFlagFinalizable = kNumberOfExpressionPackedBits; static constexpr size_t kNumberOfNewInstancePackedBits = kFlagFinalizable + 1; @@ -3995,8 +4100,6 @@ class HNewInstance FINAL : public HExpression<1> { const dex::TypeIndex type_index_; const DexFile& dex_file_; QuickEntrypointEnum entrypoint_; - - DISALLOW_COPY_AND_ASSIGN(HNewInstance); }; enum IntrinsicNeedsEnvironmentOrCache { @@ -4114,6 +4217,8 @@ class HInvoke : public HVariableInputSizeInstruction { SetPackedFlag<kFlagCanThrow>(true); } + DEFAULT_COPY_CONSTRUCTOR(Invoke); + uint32_t number_of_arguments_; ArtMethod* resolved_method_; const uint32_t dex_method_index_; @@ -4121,9 +4226,6 @@ class HInvoke : public HVariableInputSizeInstruction { // A magic word holding optimizations for intrinsics. See intrinsics.h. uint32_t intrinsic_optimizations_; - - private: - DISALLOW_COPY_AND_ASSIGN(HInvoke); }; class HInvokeUnresolved FINAL : public HInvoke { @@ -4144,10 +4246,12 @@ class HInvokeUnresolved FINAL : public HInvoke { invoke_type) { } + bool IsClonable() const OVERRIDE { return true; } + DECLARE_INSTRUCTION(InvokeUnresolved); - private: - DISALLOW_COPY_AND_ASSIGN(HInvokeUnresolved); + protected: + DEFAULT_COPY_CONSTRUCTOR(InvokeUnresolved); }; class HInvokePolymorphic FINAL : public HInvoke { @@ -4166,10 +4270,12 @@ class HInvokePolymorphic FINAL : public HInvoke { nullptr, kVirtual) {} + bool IsClonable() const OVERRIDE { return true; } + DECLARE_INSTRUCTION(InvokePolymorphic); - private: - DISALLOW_COPY_AND_ASSIGN(HInvokePolymorphic); + protected: + DEFAULT_COPY_CONSTRUCTOR(InvokePolymorphic); }; class HInvokeStaticOrDirect FINAL : public HInvoke { @@ -4256,6 +4362,8 @@ class HInvokeStaticOrDirect FINAL : public HInvoke { SetPackedField<ClinitCheckRequirementField>(clinit_check_requirement); } + bool IsClonable() const OVERRIDE { return true; } + void SetDispatchInfo(const DispatchInfo& dispatch_info) { bool had_current_method_input = HasCurrentMethodInput(); bool needs_current_method_input = NeedsCurrentMethodInput(dispatch_info.method_load_kind); @@ -4401,6 +4509,9 @@ class HInvokeStaticOrDirect FINAL : public HInvoke { DECLARE_INSTRUCTION(InvokeStaticOrDirect); + protected: + DEFAULT_COPY_CONSTRUCTOR(InvokeStaticOrDirect); + private: static constexpr size_t kFieldClinitCheckRequirement = kNumberOfInvokePackedBits; static constexpr size_t kFieldClinitCheckRequirementSize = @@ -4416,8 +4527,6 @@ class HInvokeStaticOrDirect FINAL : public HInvoke { // Cached values of the resolved method, to avoid needing the mutator lock. MethodReference target_method_; DispatchInfo dispatch_info_; - - DISALLOW_COPY_AND_ASSIGN(HInvokeStaticOrDirect); }; std::ostream& operator<<(std::ostream& os, HInvokeStaticOrDirect::MethodLoadKind rhs); std::ostream& operator<<(std::ostream& os, HInvokeStaticOrDirect::ClinitCheckRequirement rhs); @@ -4441,6 +4550,8 @@ class HInvokeVirtual FINAL : public HInvoke { kVirtual), vtable_index_(vtable_index) {} + bool IsClonable() const OVERRIDE { return true; } + bool CanBeNull() const OVERRIDE { switch (GetIntrinsic()) { case Intrinsics::kThreadCurrentThread: @@ -4463,11 +4574,12 @@ class HInvokeVirtual FINAL : public HInvoke { DECLARE_INSTRUCTION(InvokeVirtual); + protected: + DEFAULT_COPY_CONSTRUCTOR(InvokeVirtual); + private: // Cached value of the resolved method, to avoid needing the mutator lock. const uint32_t vtable_index_; - - DISALLOW_COPY_AND_ASSIGN(HInvokeVirtual); }; class HInvokeInterface FINAL : public HInvoke { @@ -4489,6 +4601,8 @@ class HInvokeInterface FINAL : public HInvoke { kInterface), imt_index_(imt_index) {} + bool IsClonable() const OVERRIDE { return true; } + bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE { // TODO: Add implicit null checks in intrinsics. return (obj == InputAt(0)) && !GetLocations()->Intrinsified(); @@ -4504,11 +4618,12 @@ class HInvokeInterface FINAL : public HInvoke { DECLARE_INSTRUCTION(InvokeInterface); + protected: + DEFAULT_COPY_CONSTRUCTOR(InvokeInterface); + private: // Cached value of the resolved method, to avoid needing the mutator lock. const uint32_t imt_index_; - - DISALLOW_COPY_AND_ASSIGN(HInvokeInterface); }; class HNeg FINAL : public HUnaryOperation { @@ -4535,8 +4650,8 @@ class HNeg FINAL : public HUnaryOperation { DECLARE_INSTRUCTION(Neg); - private: - DISALLOW_COPY_AND_ASSIGN(HNeg); + protected: + DEFAULT_COPY_CONSTRUCTOR(Neg); }; class HNewArray FINAL : public HExpression<2> { @@ -4547,6 +4662,8 @@ class HNewArray FINAL : public HExpression<2> { SetRawInputAt(1, length); } + bool IsClonable() const OVERRIDE { return true; } + // Calls runtime so needs an environment. bool NeedsEnvironment() const OVERRIDE { return true; } @@ -4566,8 +4683,8 @@ class HNewArray FINAL : public HExpression<2> { DECLARE_INSTRUCTION(NewArray); - private: - DISALLOW_COPY_AND_ASSIGN(HNewArray); + protected: + DEFAULT_COPY_CONSTRUCTOR(NewArray); }; class HAdd FINAL : public HBinaryOperation { @@ -4601,8 +4718,8 @@ class HAdd FINAL : public HBinaryOperation { DECLARE_INSTRUCTION(Add); - private: - DISALLOW_COPY_AND_ASSIGN(HAdd); + protected: + DEFAULT_COPY_CONSTRUCTOR(Add); }; class HSub FINAL : public HBinaryOperation { @@ -4634,8 +4751,8 @@ class HSub FINAL : public HBinaryOperation { DECLARE_INSTRUCTION(Sub); - private: - DISALLOW_COPY_AND_ASSIGN(HSub); + protected: + DEFAULT_COPY_CONSTRUCTOR(Sub); }; class HMul FINAL : public HBinaryOperation { @@ -4669,8 +4786,8 @@ class HMul FINAL : public HBinaryOperation { DECLARE_INSTRUCTION(Mul); - private: - DISALLOW_COPY_AND_ASSIGN(HMul); + protected: + DEFAULT_COPY_CONSTRUCTOR(Mul); }; class HDiv FINAL : public HBinaryOperation { @@ -4716,8 +4833,8 @@ class HDiv FINAL : public HBinaryOperation { DECLARE_INSTRUCTION(Div); - private: - DISALLOW_COPY_AND_ASSIGN(HDiv); + protected: + DEFAULT_COPY_CONSTRUCTOR(Div); }; class HRem FINAL : public HBinaryOperation { @@ -4763,8 +4880,8 @@ class HRem FINAL : public HBinaryOperation { DECLARE_INSTRUCTION(Rem); - private: - DISALLOW_COPY_AND_ASSIGN(HRem); + protected: + DEFAULT_COPY_CONSTRUCTOR(Rem); }; class HDivZeroCheck FINAL : public HExpression<1> { @@ -4789,8 +4906,8 @@ class HDivZeroCheck FINAL : public HExpression<1> { DECLARE_INSTRUCTION(DivZeroCheck); - private: - DISALLOW_COPY_AND_ASSIGN(HDivZeroCheck); + protected: + DEFAULT_COPY_CONSTRUCTOR(DivZeroCheck); }; class HShl FINAL : public HBinaryOperation { @@ -4835,8 +4952,8 @@ class HShl FINAL : public HBinaryOperation { DECLARE_INSTRUCTION(Shl); - private: - DISALLOW_COPY_AND_ASSIGN(HShl); + protected: + DEFAULT_COPY_CONSTRUCTOR(Shl); }; class HShr FINAL : public HBinaryOperation { @@ -4881,8 +4998,8 @@ class HShr FINAL : public HBinaryOperation { DECLARE_INSTRUCTION(Shr); - private: - DISALLOW_COPY_AND_ASSIGN(HShr); + protected: + DEFAULT_COPY_CONSTRUCTOR(Shr); }; class HUShr FINAL : public HBinaryOperation { @@ -4929,8 +5046,8 @@ class HUShr FINAL : public HBinaryOperation { DECLARE_INSTRUCTION(UShr); - private: - DISALLOW_COPY_AND_ASSIGN(HUShr); + protected: + DEFAULT_COPY_CONSTRUCTOR(UShr); }; class HAnd FINAL : public HBinaryOperation { @@ -4966,8 +5083,8 @@ class HAnd FINAL : public HBinaryOperation { DECLARE_INSTRUCTION(And); - private: - DISALLOW_COPY_AND_ASSIGN(HAnd); + protected: + DEFAULT_COPY_CONSTRUCTOR(And); }; class HOr FINAL : public HBinaryOperation { @@ -5003,8 +5120,8 @@ class HOr FINAL : public HBinaryOperation { DECLARE_INSTRUCTION(Or); - private: - DISALLOW_COPY_AND_ASSIGN(HOr); + protected: + DEFAULT_COPY_CONSTRUCTOR(Or); }; class HXor FINAL : public HBinaryOperation { @@ -5040,8 +5157,8 @@ class HXor FINAL : public HBinaryOperation { DECLARE_INSTRUCTION(Xor); - private: - DISALLOW_COPY_AND_ASSIGN(HXor); + protected: + DEFAULT_COPY_CONSTRUCTOR(Xor); }; class HRor FINAL : public HBinaryOperation { @@ -5091,8 +5208,8 @@ class HRor FINAL : public HBinaryOperation { DECLARE_INSTRUCTION(Ror); - private: - DISALLOW_COPY_AND_ASSIGN(HRor); + protected: + DEFAULT_COPY_CONSTRUCTOR(Ror); }; // The value of a parameter in this method. Its location depends on @@ -5122,6 +5239,9 @@ class HParameterValue FINAL : public HExpression<0> { DECLARE_INSTRUCTION(ParameterValue); + protected: + DEFAULT_COPY_CONSTRUCTOR(ParameterValue); + private: // Whether or not the parameter value corresponds to 'this' argument. static constexpr size_t kFlagIsThis = kNumberOfExpressionPackedBits; @@ -5135,8 +5255,6 @@ class HParameterValue FINAL : public HExpression<0> { // The index of this parameter in the parameters list. Must be less // than HGraph::number_of_in_vregs_. const uint8_t index_; - - DISALLOW_COPY_AND_ASSIGN(HParameterValue); }; class HNot FINAL : public HUnaryOperation { @@ -5168,8 +5286,8 @@ class HNot FINAL : public HUnaryOperation { DECLARE_INSTRUCTION(Not); - private: - DISALLOW_COPY_AND_ASSIGN(HNot); + protected: + DEFAULT_COPY_CONSTRUCTOR(Not); }; class HBooleanNot FINAL : public HUnaryOperation { @@ -5205,8 +5323,8 @@ class HBooleanNot FINAL : public HUnaryOperation { DECLARE_INSTRUCTION(BooleanNot); - private: - DISALLOW_COPY_AND_ASSIGN(HBooleanNot); + protected: + DEFAULT_COPY_CONSTRUCTOR(BooleanNot); }; class HTypeConversion FINAL : public HExpression<1> { @@ -5234,8 +5352,8 @@ class HTypeConversion FINAL : public HExpression<1> { DECLARE_INSTRUCTION(TypeConversion); - private: - DISALLOW_COPY_AND_ASSIGN(HTypeConversion); + protected: + DEFAULT_COPY_CONSTRUCTOR(TypeConversion); }; static constexpr uint32_t kNoRegNumber = -1; @@ -5249,6 +5367,7 @@ class HNullCheck FINAL : public HExpression<1> { SetRawInputAt(0, value); } + bool IsClonable() const OVERRIDE { return true; } bool CanBeMoved() const OVERRIDE { return true; } bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { return true; @@ -5260,11 +5379,10 @@ class HNullCheck FINAL : public HExpression<1> { bool CanBeNull() const OVERRIDE { return false; } - DECLARE_INSTRUCTION(NullCheck); - private: - DISALLOW_COPY_AND_ASSIGN(HNullCheck); + protected: + DEFAULT_COPY_CONSTRUCTOR(NullCheck); }; // Embeds an ArtField and all the information required by the compiler. We cache @@ -5326,6 +5444,7 @@ class HInstanceFieldGet FINAL : public HExpression<1> { SetRawInputAt(0, value); } + bool IsClonable() const OVERRIDE { return true; } bool CanBeMoved() const OVERRIDE { return !IsVolatile(); } bool InstructionDataEquals(const HInstruction* other) const OVERRIDE { @@ -5355,10 +5474,11 @@ class HInstanceFieldGet FINAL : public HExpression<1> { DECLARE_INSTRUCTION(InstanceFieldGet); + protected: + DEFAULT_COPY_CONSTRUCTOR(InstanceFieldGet); + private: const FieldInfo field_info_; - - DISALLOW_COPY_AND_ASSIGN(HInstanceFieldGet); }; class HInstanceFieldSet FINAL : public HTemplateInstruction<2> { @@ -5386,6 +5506,8 @@ class HInstanceFieldSet FINAL : public HTemplateInstruction<2> { SetRawInputAt(1, value); } + bool IsClonable() const OVERRIDE { return true; } + bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE { return (obj == InputAt(0)) && art::CanDoImplicitNullCheckOn(GetFieldOffset().Uint32Value()); } @@ -5400,6 +5522,9 @@ class HInstanceFieldSet FINAL : public HTemplateInstruction<2> { DECLARE_INSTRUCTION(InstanceFieldSet); + protected: + DEFAULT_COPY_CONSTRUCTOR(InstanceFieldSet); + private: static constexpr size_t kFlagValueCanBeNull = kNumberOfGenericPackedBits; static constexpr size_t kNumberOfInstanceFieldSetPackedBits = kFlagValueCanBeNull + 1; @@ -5407,8 +5532,6 @@ class HInstanceFieldSet FINAL : public HTemplateInstruction<2> { "Too many packed fields."); const FieldInfo field_info_; - - DISALLOW_COPY_AND_ASSIGN(HInstanceFieldSet); }; class HArrayGet FINAL : public HExpression<2> { @@ -5436,6 +5559,7 @@ class HArrayGet FINAL : public HExpression<2> { SetRawInputAt(1, index); } + bool IsClonable() const OVERRIDE { return true; } bool CanBeMoved() const OVERRIDE { return true; } bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { return true; @@ -5485,6 +5609,9 @@ class HArrayGet FINAL : public HExpression<2> { DECLARE_INSTRUCTION(ArrayGet); + protected: + DEFAULT_COPY_CONSTRUCTOR(ArrayGet); + private: // We treat a String as an array, creating the HArrayGet from String.charAt() // intrinsic in the instruction simplifier. We can always determine whether @@ -5495,8 +5622,6 @@ class HArrayGet FINAL : public HExpression<2> { static constexpr size_t kNumberOfArrayGetPackedBits = kFlagIsStringCharAt + 1; static_assert(kNumberOfArrayGetPackedBits <= HInstruction::kMaxNumberOfPackedBits, "Too many packed fields."); - - DISALLOW_COPY_AND_ASSIGN(HArrayGet); }; class HArraySet FINAL : public HTemplateInstruction<3> { @@ -5530,6 +5655,8 @@ class HArraySet FINAL : public HTemplateInstruction<3> { SetRawInputAt(2, value); } + bool IsClonable() const OVERRIDE { return true; } + bool NeedsEnvironment() const OVERRIDE { // We call a runtime method to throw ArrayStoreException. return NeedsTypeCheck(); @@ -5595,6 +5722,9 @@ class HArraySet FINAL : public HTemplateInstruction<3> { DECLARE_INSTRUCTION(ArraySet); + protected: + DEFAULT_COPY_CONSTRUCTOR(ArraySet); + private: static constexpr size_t kFieldExpectedComponentType = kNumberOfGenericPackedBits; static constexpr size_t kFieldExpectedComponentTypeSize = @@ -5610,8 +5740,6 @@ class HArraySet FINAL : public HTemplateInstruction<3> { static_assert(kNumberOfArraySetPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields."); using ExpectedComponentTypeField = BitField<DataType::Type, kFieldExpectedComponentType, kFieldExpectedComponentTypeSize>; - - DISALLOW_COPY_AND_ASSIGN(HArraySet); }; class HArrayLength FINAL : public HExpression<1> { @@ -5624,6 +5752,7 @@ class HArrayLength FINAL : public HExpression<1> { SetRawInputAt(0, array); } + bool IsClonable() const OVERRIDE { return true; } bool CanBeMoved() const OVERRIDE { return true; } bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { return true; @@ -5636,6 +5765,9 @@ class HArrayLength FINAL : public HExpression<1> { DECLARE_INSTRUCTION(ArrayLength); + protected: + DEFAULT_COPY_CONSTRUCTOR(ArrayLength); + private: // We treat a String as an array, creating the HArrayLength from String.length() // or String.isEmpty() intrinsic in the instruction simplifier. We can always @@ -5646,8 +5778,6 @@ class HArrayLength FINAL : public HExpression<1> { static constexpr size_t kNumberOfArrayLengthPackedBits = kFlagIsStringLength + 1; static_assert(kNumberOfArrayLengthPackedBits <= HInstruction::kMaxNumberOfPackedBits, "Too many packed fields."); - - DISALLOW_COPY_AND_ASSIGN(HArrayLength); }; class HBoundsCheck FINAL : public HExpression<2> { @@ -5665,6 +5795,7 @@ class HBoundsCheck FINAL : public HExpression<2> { SetRawInputAt(1, length); } + bool IsClonable() const OVERRIDE { return true; } bool CanBeMoved() const OVERRIDE { return true; } bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { return true; @@ -5680,10 +5811,11 @@ class HBoundsCheck FINAL : public HExpression<2> { DECLARE_INSTRUCTION(BoundsCheck); + protected: + DEFAULT_COPY_CONSTRUCTOR(BoundsCheck); + private: static constexpr size_t kFlagIsStringCharAt = kNumberOfExpressionPackedBits; - - DISALLOW_COPY_AND_ASSIGN(HBoundsCheck); }; class HSuspendCheck FINAL : public HTemplateInstruction<0> { @@ -5691,6 +5823,8 @@ class HSuspendCheck FINAL : public HTemplateInstruction<0> { explicit HSuspendCheck(uint32_t dex_pc = kNoDexPc) : HTemplateInstruction(SideEffects::CanTriggerGC(), dex_pc), slow_path_(nullptr) {} + bool IsClonable() const OVERRIDE { return true; } + bool NeedsEnvironment() const OVERRIDE { return true; } @@ -5700,12 +5834,13 @@ class HSuspendCheck FINAL : public HTemplateInstruction<0> { DECLARE_INSTRUCTION(SuspendCheck); + protected: + DEFAULT_COPY_CONSTRUCTOR(SuspendCheck); + private: // Only used for code generation, in order to share the same slow path between back edges // of a same loop. SlowPathCode* slow_path_; - - DISALLOW_COPY_AND_ASSIGN(HSuspendCheck); }; // Pseudo-instruction which provides the native debugger with mapping information. @@ -5721,8 +5856,8 @@ class HNativeDebugInfo : public HTemplateInstruction<0> { DECLARE_INSTRUCTION(NativeDebugInfo); - private: - DISALLOW_COPY_AND_ASSIGN(HNativeDebugInfo); + protected: + DEFAULT_COPY_CONSTRUCTOR(NativeDebugInfo); }; /** @@ -5788,6 +5923,8 @@ class HLoadClass FINAL : public HInstruction { SetPackedFlag<kFlagGenerateClInitCheck>(false); } + bool IsClonable() const OVERRIDE { return true; } + void SetLoadKind(LoadKind load_kind); LoadKind GetLoadKind() const { @@ -5879,6 +6016,9 @@ class HLoadClass FINAL : public HInstruction { DECLARE_INSTRUCTION(LoadClass); + protected: + DEFAULT_COPY_CONSTRUCTOR(LoadClass); + private: static constexpr size_t kFlagNeedsAccessCheck = kNumberOfGenericPackedBits; static constexpr size_t kFlagIsInBootImage = kFlagNeedsAccessCheck + 1; @@ -5918,8 +6058,6 @@ class HLoadClass FINAL : public HInstruction { Handle<mirror::Class> klass_; ReferenceTypeInfo loaded_class_rti_; - - DISALLOW_COPY_AND_ASSIGN(HLoadClass); }; std::ostream& operator<<(std::ostream& os, HLoadClass::LoadKind rhs); @@ -5977,6 +6115,8 @@ class HLoadString FINAL : public HInstruction { SetPackedField<LoadKindField>(LoadKind::kRuntimeCall); } + bool IsClonable() const OVERRIDE { return true; } + void SetLoadKind(LoadKind load_kind); LoadKind GetLoadKind() const { @@ -6043,6 +6183,9 @@ class HLoadString FINAL : public HInstruction { DECLARE_INSTRUCTION(LoadString); + protected: + DEFAULT_COPY_CONSTRUCTOR(LoadString); + private: static constexpr size_t kFieldLoadKind = kNumberOfGenericPackedBits; static constexpr size_t kFieldLoadKindSize = @@ -6062,8 +6205,6 @@ class HLoadString FINAL : public HInstruction { const DexFile& dex_file_; Handle<mirror::String> string_; - - DISALLOW_COPY_AND_ASSIGN(HLoadString); }; std::ostream& operator<<(std::ostream& os, HLoadString::LoadKind rhs); @@ -6095,6 +6236,7 @@ class HClinitCheck FINAL : public HExpression<1> { SetRawInputAt(0, constant); } + bool IsClonable() const OVERRIDE { return true; } bool CanBeMoved() const OVERRIDE { return true; } bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { return true; @@ -6114,8 +6256,9 @@ class HClinitCheck FINAL : public HExpression<1> { DECLARE_INSTRUCTION(ClinitCheck); - private: - DISALLOW_COPY_AND_ASSIGN(HClinitCheck); + + protected: + DEFAULT_COPY_CONSTRUCTOR(ClinitCheck); }; class HStaticFieldGet FINAL : public HExpression<1> { @@ -6141,6 +6284,7 @@ class HStaticFieldGet FINAL : public HExpression<1> { } + bool IsClonable() const OVERRIDE { return true; } bool CanBeMoved() const OVERRIDE { return !IsVolatile(); } bool InstructionDataEquals(const HInstruction* other) const OVERRIDE { @@ -6166,10 +6310,11 @@ class HStaticFieldGet FINAL : public HExpression<1> { DECLARE_INSTRUCTION(StaticFieldGet); + protected: + DEFAULT_COPY_CONSTRUCTOR(StaticFieldGet); + private: const FieldInfo field_info_; - - DISALLOW_COPY_AND_ASSIGN(HStaticFieldGet); }; class HStaticFieldSet FINAL : public HTemplateInstruction<2> { @@ -6197,6 +6342,7 @@ class HStaticFieldSet FINAL : public HTemplateInstruction<2> { SetRawInputAt(1, value); } + bool IsClonable() const OVERRIDE { return true; } const FieldInfo& GetFieldInfo() const { return field_info_; } MemberOffset GetFieldOffset() const { return field_info_.GetFieldOffset(); } DataType::Type GetFieldType() const { return field_info_.GetFieldType(); } @@ -6208,6 +6354,9 @@ class HStaticFieldSet FINAL : public HTemplateInstruction<2> { DECLARE_INSTRUCTION(StaticFieldSet); + protected: + DEFAULT_COPY_CONSTRUCTOR(StaticFieldSet); + private: static constexpr size_t kFlagValueCanBeNull = kNumberOfGenericPackedBits; static constexpr size_t kNumberOfStaticFieldSetPackedBits = kFlagValueCanBeNull + 1; @@ -6215,8 +6364,6 @@ class HStaticFieldSet FINAL : public HTemplateInstruction<2> { "Too many packed fields."); const FieldInfo field_info_; - - DISALLOW_COPY_AND_ASSIGN(HStaticFieldSet); }; class HUnresolvedInstanceFieldGet FINAL : public HExpression<1> { @@ -6230,6 +6377,7 @@ class HUnresolvedInstanceFieldGet FINAL : public HExpression<1> { SetRawInputAt(0, obj); } + bool IsClonable() const OVERRIDE { return true; } bool NeedsEnvironment() const OVERRIDE { return true; } bool CanThrow() const OVERRIDE { return true; } @@ -6238,10 +6386,11 @@ class HUnresolvedInstanceFieldGet FINAL : public HExpression<1> { DECLARE_INSTRUCTION(UnresolvedInstanceFieldGet); + protected: + DEFAULT_COPY_CONSTRUCTOR(UnresolvedInstanceFieldGet); + private: const uint32_t field_index_; - - DISALLOW_COPY_AND_ASSIGN(HUnresolvedInstanceFieldGet); }; class HUnresolvedInstanceFieldSet FINAL : public HTemplateInstruction<2> { @@ -6259,6 +6408,7 @@ class HUnresolvedInstanceFieldSet FINAL : public HTemplateInstruction<2> { SetRawInputAt(1, value); } + bool IsClonable() const OVERRIDE { return true; } bool NeedsEnvironment() const OVERRIDE { return true; } bool CanThrow() const OVERRIDE { return true; } @@ -6267,6 +6417,9 @@ class HUnresolvedInstanceFieldSet FINAL : public HTemplateInstruction<2> { DECLARE_INSTRUCTION(UnresolvedInstanceFieldSet); + protected: + DEFAULT_COPY_CONSTRUCTOR(UnresolvedInstanceFieldSet); + private: static constexpr size_t kFieldFieldType = HInstruction::kNumberOfGenericPackedBits; static constexpr size_t kFieldFieldTypeSize = @@ -6278,8 +6431,6 @@ class HUnresolvedInstanceFieldSet FINAL : public HTemplateInstruction<2> { using FieldTypeField = BitField<DataType::Type, kFieldFieldType, kFieldFieldTypeSize>; const uint32_t field_index_; - - DISALLOW_COPY_AND_ASSIGN(HUnresolvedInstanceFieldSet); }; class HUnresolvedStaticFieldGet FINAL : public HExpression<0> { @@ -6291,6 +6442,7 @@ class HUnresolvedStaticFieldGet FINAL : public HExpression<0> { field_index_(field_index) { } + bool IsClonable() const OVERRIDE { return true; } bool NeedsEnvironment() const OVERRIDE { return true; } bool CanThrow() const OVERRIDE { return true; } @@ -6299,10 +6451,11 @@ class HUnresolvedStaticFieldGet FINAL : public HExpression<0> { DECLARE_INSTRUCTION(UnresolvedStaticFieldGet); + protected: + DEFAULT_COPY_CONSTRUCTOR(UnresolvedStaticFieldGet); + private: const uint32_t field_index_; - - DISALLOW_COPY_AND_ASSIGN(HUnresolvedStaticFieldGet); }; class HUnresolvedStaticFieldSet FINAL : public HTemplateInstruction<1> { @@ -6318,6 +6471,7 @@ class HUnresolvedStaticFieldSet FINAL : public HTemplateInstruction<1> { SetRawInputAt(0, value); } + bool IsClonable() const OVERRIDE { return true; } bool NeedsEnvironment() const OVERRIDE { return true; } bool CanThrow() const OVERRIDE { return true; } @@ -6326,6 +6480,9 @@ class HUnresolvedStaticFieldSet FINAL : public HTemplateInstruction<1> { DECLARE_INSTRUCTION(UnresolvedStaticFieldSet); + protected: + DEFAULT_COPY_CONSTRUCTOR(UnresolvedStaticFieldSet); + private: static constexpr size_t kFieldFieldType = HInstruction::kNumberOfGenericPackedBits; static constexpr size_t kFieldFieldTypeSize = @@ -6337,8 +6494,6 @@ class HUnresolvedStaticFieldSet FINAL : public HTemplateInstruction<1> { using FieldTypeField = BitField<DataType::Type, kFieldFieldType, kFieldFieldTypeSize>; const uint32_t field_index_; - - DISALLOW_COPY_AND_ASSIGN(HUnresolvedStaticFieldSet); }; // Implement the move-exception DEX instruction. @@ -6351,8 +6506,8 @@ class HLoadException FINAL : public HExpression<0> { DECLARE_INSTRUCTION(LoadException); - private: - DISALLOW_COPY_AND_ASSIGN(HLoadException); + protected: + DEFAULT_COPY_CONSTRUCTOR(LoadException); }; // Implicit part of move-exception which clears thread-local exception storage. @@ -6364,8 +6519,8 @@ class HClearException FINAL : public HTemplateInstruction<0> { DECLARE_INSTRUCTION(ClearException); - private: - DISALLOW_COPY_AND_ASSIGN(HClearException); + protected: + DEFAULT_COPY_CONSTRUCTOR(ClearException); }; class HThrow FINAL : public HTemplateInstruction<1> { @@ -6381,11 +6536,10 @@ class HThrow FINAL : public HTemplateInstruction<1> { bool CanThrow() const OVERRIDE { return true; } - DECLARE_INSTRUCTION(Throw); - private: - DISALLOW_COPY_AND_ASSIGN(HThrow); + protected: + DEFAULT_COPY_CONSTRUCTOR(Throw); }; /** @@ -6420,6 +6574,7 @@ class HInstanceOf FINAL : public HExpression<2> { SetRawInputAt(1, constant); } + bool IsClonable() const OVERRIDE { return true; } bool CanBeMoved() const OVERRIDE { return true; } bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { @@ -6447,6 +6602,9 @@ class HInstanceOf FINAL : public HExpression<2> { DECLARE_INSTRUCTION(InstanceOf); + protected: + DEFAULT_COPY_CONSTRUCTOR(InstanceOf); + private: static constexpr size_t kFieldTypeCheckKind = kNumberOfExpressionPackedBits; static constexpr size_t kFieldTypeCheckKindSize = @@ -6455,8 +6613,6 @@ class HInstanceOf FINAL : public HExpression<2> { static constexpr size_t kNumberOfInstanceOfPackedBits = kFlagMustDoNullCheck + 1; static_assert(kNumberOfInstanceOfPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields."); using TypeCheckKindField = BitField<TypeCheckKind, kFieldTypeCheckKind, kFieldTypeCheckKindSize>; - - DISALLOW_COPY_AND_ASSIGN(HInstanceOf); }; class HBoundType FINAL : public HExpression<1> { @@ -6470,6 +6626,8 @@ class HBoundType FINAL : public HExpression<1> { SetRawInputAt(0, input); } + bool IsClonable() const OVERRIDE { return true; } + // {Get,Set}Upper* should only be used in reference type propagation. const ReferenceTypeInfo& GetUpperBound() const { return upper_bound_; } bool GetUpperCanBeNull() const { return GetPackedFlag<kFlagUpperCanBeNull>(); } @@ -6484,6 +6642,9 @@ class HBoundType FINAL : public HExpression<1> { DECLARE_INSTRUCTION(BoundType); + protected: + DEFAULT_COPY_CONSTRUCTOR(BoundType); + private: // Represents the top constraint that can_be_null_ cannot exceed (i.e. if this // is false then CanBeNull() cannot be true). @@ -6499,8 +6660,6 @@ class HBoundType FINAL : public HExpression<1> { // // uper_bound_ will be ClassX // } ReferenceTypeInfo upper_bound_; - - DISALLOW_COPY_AND_ASSIGN(HBoundType); }; class HCheckCast FINAL : public HTemplateInstruction<2> { @@ -6516,6 +6675,7 @@ class HCheckCast FINAL : public HTemplateInstruction<2> { SetRawInputAt(1, constant); } + bool IsClonable() const OVERRIDE { return true; } bool CanBeMoved() const OVERRIDE { return true; } bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { @@ -6536,6 +6696,9 @@ class HCheckCast FINAL : public HTemplateInstruction<2> { DECLARE_INSTRUCTION(CheckCast); + protected: + DEFAULT_COPY_CONSTRUCTOR(CheckCast); + private: static constexpr size_t kFieldTypeCheckKind = kNumberOfGenericPackedBits; static constexpr size_t kFieldTypeCheckKindSize = @@ -6544,8 +6707,6 @@ class HCheckCast FINAL : public HTemplateInstruction<2> { static constexpr size_t kNumberOfCheckCastPackedBits = kFlagMustDoNullCheck + 1; static_assert(kNumberOfCheckCastPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields."); using TypeCheckKindField = BitField<TypeCheckKind, kFieldTypeCheckKind, kFieldTypeCheckKindSize>; - - DISALLOW_COPY_AND_ASSIGN(HCheckCast); }; /** @@ -6582,10 +6743,15 @@ class HMemoryBarrier FINAL : public HTemplateInstruction<0> { SetPackedField<BarrierKindField>(barrier_kind); } + bool IsClonable() const OVERRIDE { return true; } + MemBarrierKind GetBarrierKind() { return GetPackedField<BarrierKindField>(); } DECLARE_INSTRUCTION(MemoryBarrier); + protected: + DEFAULT_COPY_CONSTRUCTOR(MemoryBarrier); + private: static constexpr size_t kFieldBarrierKind = HInstruction::kNumberOfGenericPackedBits; static constexpr size_t kFieldBarrierKindSize = @@ -6595,8 +6761,6 @@ class HMemoryBarrier FINAL : public HTemplateInstruction<0> { static_assert(kNumberOfMemoryBarrierPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields."); using BarrierKindField = BitField<MemBarrierKind, kFieldBarrierKind, kFieldBarrierKindSize>; - - DISALLOW_COPY_AND_ASSIGN(HMemoryBarrier); }; // A constructor fence orders all prior stores to fields that could be accessed via a final field of @@ -6747,8 +6911,8 @@ class HConstructorFence FINAL : public HVariableInputSizeInstruction { DECLARE_INSTRUCTION(ConstructorFence); - private: - DISALLOW_COPY_AND_ASSIGN(HConstructorFence); + protected: + DEFAULT_COPY_CONSTRUCTOR(ConstructorFence); }; class HMonitorOperation FINAL : public HTemplateInstruction<1> { @@ -6782,6 +6946,9 @@ class HMonitorOperation FINAL : public HTemplateInstruction<1> { DECLARE_INSTRUCTION(MonitorOperation); + protected: + DEFAULT_COPY_CONSTRUCTOR(MonitorOperation); + private: static constexpr size_t kFieldOperationKind = HInstruction::kNumberOfGenericPackedBits; static constexpr size_t kFieldOperationKindSize = @@ -6791,9 +6958,6 @@ class HMonitorOperation FINAL : public HTemplateInstruction<1> { static_assert(kNumberOfMonitorOperationPackedBits <= HInstruction::kMaxNumberOfPackedBits, "Too many packed fields."); using OperationKindField = BitField<OperationKind, kFieldOperationKind, kFieldOperationKindSize>; - - private: - DISALLOW_COPY_AND_ASSIGN(HMonitorOperation); }; class HSelect FINAL : public HExpression<3> { @@ -6814,6 +6978,7 @@ class HSelect FINAL : public HExpression<3> { SetRawInputAt(2, condition); } + bool IsClonable() const OVERRIDE { return true; } HInstruction* GetFalseValue() const { return InputAt(0); } HInstruction* GetTrueValue() const { return InputAt(1); } HInstruction* GetCondition() const { return InputAt(2); } @@ -6829,8 +6994,8 @@ class HSelect FINAL : public HExpression<3> { DECLARE_INSTRUCTION(Select); - private: - DISALLOW_COPY_AND_ASSIGN(HSelect); + protected: + DEFAULT_COPY_CONSTRUCTOR(Select); }; class MoveOperands : public ArenaObject<kArenaAllocMoveOperands> { @@ -6961,10 +7126,11 @@ class HParallelMove FINAL : public HTemplateInstruction<0> { DECLARE_INSTRUCTION(ParallelMove); + protected: + DEFAULT_COPY_CONSTRUCTOR(ParallelMove); + private: ArenaVector<MoveOperands> moves_; - - DISALLOW_COPY_AND_ASSIGN(HParallelMove); }; // This instruction computes an intermediate address pointing in the 'middle' of an object. The @@ -6983,6 +7149,7 @@ class HIntermediateAddress FINAL : public HExpression<2> { SetRawInputAt(1, offset); } + bool IsClonable() const OVERRIDE { return true; } bool CanBeMoved() const OVERRIDE { return true; } bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { return true; @@ -6994,8 +7161,8 @@ class HIntermediateAddress FINAL : public HExpression<2> { DECLARE_INSTRUCTION(IntermediateAddress); - private: - DISALLOW_COPY_AND_ASSIGN(HIntermediateAddress); + protected: + DEFAULT_COPY_CONSTRUCTOR(IntermediateAddress); }; @@ -7070,6 +7237,33 @@ class HGraphDelegateVisitor : public HGraphVisitor { DISALLOW_COPY_AND_ASSIGN(HGraphDelegateVisitor); }; +// Create a clone of the instruction, insert it into the graph; replace the old one with a new +// and remove the old instruction. +HInstruction* ReplaceInstrOrPhiByClone(HInstruction* instr); + +// Create a clone for each clonable instructions/phis and replace the original with the clone. +// +// Used for testing individual instruction cloner. +class CloneAndReplaceInstructionVisitor : public HGraphDelegateVisitor { + public: + explicit CloneAndReplaceInstructionVisitor(HGraph* graph) + : HGraphDelegateVisitor(graph), instr_replaced_by_clones_count(0) {} + + void VisitInstruction(HInstruction* instruction) OVERRIDE { + if (instruction->IsClonable()) { + ReplaceInstrOrPhiByClone(instruction); + instr_replaced_by_clones_count++; + } + } + + size_t GetInstrReplacedByClonesCount() const { return instr_replaced_by_clones_count; } + + private: + size_t instr_replaced_by_clones_count; + + DISALLOW_COPY_AND_ASSIGN(CloneAndReplaceInstructionVisitor); +}; + // Iterator over the blocks that art part of the loop. Includes blocks part // of an inner loop. The order in which the blocks are iterated is on their // block id. diff --git a/compiler/optimizing/nodes_mips.h b/compiler/optimizing/nodes_mips.h index ef388c30d5..2c0595e3d8 100644 --- a/compiler/optimizing/nodes_mips.h +++ b/compiler/optimizing/nodes_mips.h @@ -30,8 +30,8 @@ class HMipsComputeBaseMethodAddress : public HExpression<0> { DECLARE_INSTRUCTION(MipsComputeBaseMethodAddress); - private: - DISALLOW_COPY_AND_ASSIGN(HMipsComputeBaseMethodAddress); + protected: + DEFAULT_COPY_CONSTRUCTOR(MipsComputeBaseMethodAddress); }; // Mips version of HPackedSwitch that holds a pointer to the base method address. @@ -62,11 +62,12 @@ class HMipsPackedSwitch FINAL : public HTemplateInstruction<2> { DECLARE_INSTRUCTION(MipsPackedSwitch); + protected: + DEFAULT_COPY_CONSTRUCTOR(MipsPackedSwitch); + private: const int32_t start_value_; const int32_t num_entries_; - - DISALLOW_COPY_AND_ASSIGN(HMipsPackedSwitch); }; // This instruction computes part of the array access offset (index offset). @@ -105,8 +106,8 @@ class HIntermediateArrayAddressIndex FINAL : public HExpression<2> { DECLARE_INSTRUCTION(IntermediateArrayAddressIndex); - private: - DISALLOW_COPY_AND_ASSIGN(HIntermediateArrayAddressIndex); + protected: + DEFAULT_COPY_CONSTRUCTOR(IntermediateArrayAddressIndex); }; } // namespace art diff --git a/compiler/optimizing/nodes_shared.h b/compiler/optimizing/nodes_shared.h index 7b4f5f7cbb..e837f1e7e0 100644 --- a/compiler/optimizing/nodes_shared.h +++ b/compiler/optimizing/nodes_shared.h @@ -38,6 +38,8 @@ class HMultiplyAccumulate FINAL : public HExpression<3> { SetRawInputAt(kInputMulRightIndex, mul_right); } + bool IsClonable() const OVERRIDE { return true; } + static constexpr int kInputAccumulatorIndex = 0; static constexpr int kInputMulLeftIndex = 1; static constexpr int kInputMulRightIndex = 2; @@ -51,11 +53,12 @@ class HMultiplyAccumulate FINAL : public HExpression<3> { DECLARE_INSTRUCTION(MultiplyAccumulate); + protected: + DEFAULT_COPY_CONSTRUCTOR(MultiplyAccumulate); + private: // Indicates if this is a MADD or MSUB. const InstructionKind op_kind_; - - DISALLOW_COPY_AND_ASSIGN(HMultiplyAccumulate); }; class HBitwiseNegatedRight FINAL : public HBinaryOperation { @@ -111,11 +114,12 @@ class HBitwiseNegatedRight FINAL : public HBinaryOperation { DECLARE_INSTRUCTION(BitwiseNegatedRight); + protected: + DEFAULT_COPY_CONSTRUCTOR(BitwiseNegatedRight); + private: // Specifies the bitwise operation, which will be then negated. const InstructionKind op_kind_; - - DISALLOW_COPY_AND_ASSIGN(HBitwiseNegatedRight); }; // This instruction computes part of the array access offset (data and index offset). @@ -145,6 +149,7 @@ class HIntermediateAddressIndex FINAL : public HExpression<3> { SetRawInputAt(2, shift); } + bool IsClonable() const OVERRIDE { return true; } bool CanBeMoved() const OVERRIDE { return true; } bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { return true; @@ -157,8 +162,8 @@ class HIntermediateAddressIndex FINAL : public HExpression<3> { DECLARE_INSTRUCTION(IntermediateAddressIndex); - private: - DISALLOW_COPY_AND_ASSIGN(HIntermediateAddressIndex); + protected: + DEFAULT_COPY_CONSTRUCTOR(IntermediateAddressIndex); }; class HDataProcWithShifterOp FINAL : public HExpression<2> { @@ -198,6 +203,7 @@ class HDataProcWithShifterOp FINAL : public HExpression<2> { SetRawInputAt(1, right); } + bool IsClonable() const OVERRIDE { return true; } bool CanBeMoved() const OVERRIDE { return true; } bool InstructionDataEquals(const HInstruction* other_instr) const OVERRIDE { const HDataProcWithShifterOp* other = other_instr->AsDataProcWithShifterOp(); @@ -225,14 +231,15 @@ class HDataProcWithShifterOp FINAL : public HExpression<2> { DECLARE_INSTRUCTION(DataProcWithShifterOp); + protected: + DEFAULT_COPY_CONSTRUCTOR(DataProcWithShifterOp); + private: InstructionKind instr_kind_; OpKind op_kind_; int shift_amount_; friend std::ostream& operator<<(std::ostream& os, OpKind op); - - DISALLOW_COPY_AND_ASSIGN(HDataProcWithShifterOp); }; std::ostream& operator<<(std::ostream& os, const HDataProcWithShifterOp::OpKind op); diff --git a/compiler/optimizing/nodes_vector.h b/compiler/optimizing/nodes_vector.h index 17540b9770..59d5b9f847 100644 --- a/compiler/optimizing/nodes_vector.h +++ b/compiler/optimizing/nodes_vector.h @@ -161,10 +161,10 @@ class HVecOperation : public HVariableInputSizeInstruction { static_assert(kNumberOfVectorOpPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields."); using TypeField = BitField<DataType::Type, kFieldType, kFieldTypeSize>; + DEFAULT_COPY_CONSTRUCTOR(VecOperation); + private: const size_t vector_length_; - - DISALLOW_COPY_AND_ASSIGN(HVecOperation); }; // Abstraction of a unary vector operation. @@ -188,8 +188,8 @@ class HVecUnaryOperation : public HVecOperation { DECLARE_ABSTRACT_INSTRUCTION(VecUnaryOperation); - private: - DISALLOW_COPY_AND_ASSIGN(HVecUnaryOperation); + protected: + DEFAULT_COPY_CONSTRUCTOR(VecUnaryOperation); }; // Abstraction of a binary vector operation. @@ -216,8 +216,8 @@ class HVecBinaryOperation : public HVecOperation { DECLARE_ABSTRACT_INSTRUCTION(VecBinaryOperation); - private: - DISALLOW_COPY_AND_ASSIGN(HVecBinaryOperation); + protected: + DEFAULT_COPY_CONSTRUCTOR(VecBinaryOperation); }; // Abstraction of a vector operation that references memory, with an alignment. @@ -255,10 +255,11 @@ class HVecMemoryOperation : public HVecOperation { DECLARE_ABSTRACT_INSTRUCTION(VecMemoryOperation); + protected: + DEFAULT_COPY_CONSTRUCTOR(VecMemoryOperation); + private: Alignment alignment_; - - DISALLOW_COPY_AND_ASSIGN(HVecMemoryOperation); }; // Packed type consistency checker ("same vector length" integral types may mix freely). @@ -296,8 +297,8 @@ class HVecReplicateScalar FINAL : public HVecUnaryOperation { DECLARE_INSTRUCTION(VecReplicateScalar); - private: - DISALLOW_COPY_AND_ASSIGN(HVecReplicateScalar); + protected: + DEFAULT_COPY_CONSTRUCTOR(VecReplicateScalar); }; // Extracts a particular scalar from the given vector, @@ -329,8 +330,8 @@ class HVecExtractScalar FINAL : public HVecUnaryOperation { DECLARE_INSTRUCTION(VecExtractScalar); - private: - DISALLOW_COPY_AND_ASSIGN(HVecExtractScalar); + protected: + DEFAULT_COPY_CONSTRUCTOR(VecExtractScalar); }; // Reduces the given vector into the first element as sum/min/max, @@ -367,10 +368,11 @@ class HVecReduce FINAL : public HVecUnaryOperation { DECLARE_INSTRUCTION(VecReduce); + protected: + DEFAULT_COPY_CONSTRUCTOR(VecReduce); + private: const ReductionKind kind_; - - DISALLOW_COPY_AND_ASSIGN(HVecReduce); }; // Converts every component in the vector, @@ -394,8 +396,8 @@ class HVecCnv FINAL : public HVecUnaryOperation { DECLARE_INSTRUCTION(VecCnv); - private: - DISALLOW_COPY_AND_ASSIGN(HVecCnv); + protected: + DEFAULT_COPY_CONSTRUCTOR(VecCnv); }; // Negates every component in the vector, @@ -415,8 +417,8 @@ class HVecNeg FINAL : public HVecUnaryOperation { DECLARE_INSTRUCTION(VecNeg); - private: - DISALLOW_COPY_AND_ASSIGN(HVecNeg); + protected: + DEFAULT_COPY_CONSTRUCTOR(VecNeg); }; // Takes absolute value of every component in the vector, @@ -437,8 +439,8 @@ class HVecAbs FINAL : public HVecUnaryOperation { DECLARE_INSTRUCTION(VecAbs); - private: - DISALLOW_COPY_AND_ASSIGN(HVecAbs); + protected: + DEFAULT_COPY_CONSTRUCTOR(VecAbs); }; // Bitwise- or boolean-nots every component in the vector, @@ -459,8 +461,8 @@ class HVecNot FINAL : public HVecUnaryOperation { DECLARE_INSTRUCTION(VecNot); - private: - DISALLOW_COPY_AND_ASSIGN(HVecNot); + protected: + DEFAULT_COPY_CONSTRUCTOR(VecNot); }; // @@ -486,8 +488,8 @@ class HVecAdd FINAL : public HVecBinaryOperation { DECLARE_INSTRUCTION(VecAdd); - private: - DISALLOW_COPY_AND_ASSIGN(HVecAdd); + protected: + DEFAULT_COPY_CONSTRUCTOR(VecAdd); }; // Performs halving add on every component in the two vectors, viz. @@ -531,14 +533,15 @@ class HVecHalvingAdd FINAL : public HVecBinaryOperation { DECLARE_INSTRUCTION(VecHalvingAdd); + protected: + DEFAULT_COPY_CONSTRUCTOR(VecHalvingAdd); + private: // Additional packed bits. static constexpr size_t kFieldHAddIsUnsigned = HVecOperation::kNumberOfVectorOpPackedBits; static constexpr size_t kFieldHAddIsRounded = kFieldHAddIsUnsigned + 1; static constexpr size_t kNumberOfHAddPackedBits = kFieldHAddIsRounded + 1; static_assert(kNumberOfHAddPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields."); - - DISALLOW_COPY_AND_ASSIGN(HVecHalvingAdd); }; // Subtracts every component in the two vectors, @@ -560,8 +563,8 @@ class HVecSub FINAL : public HVecBinaryOperation { DECLARE_INSTRUCTION(VecSub); - private: - DISALLOW_COPY_AND_ASSIGN(HVecSub); + protected: + DEFAULT_COPY_CONSTRUCTOR(VecSub); }; // Multiplies every component in the two vectors, @@ -583,8 +586,8 @@ class HVecMul FINAL : public HVecBinaryOperation { DECLARE_INSTRUCTION(VecMul); - private: - DISALLOW_COPY_AND_ASSIGN(HVecMul); + protected: + DEFAULT_COPY_CONSTRUCTOR(VecMul); }; // Divides every component in the two vectors, @@ -606,8 +609,8 @@ class HVecDiv FINAL : public HVecBinaryOperation { DECLARE_INSTRUCTION(VecDiv); - private: - DISALLOW_COPY_AND_ASSIGN(HVecDiv); + protected: + DEFAULT_COPY_CONSTRUCTOR(VecDiv); }; // Takes minimum of every component in the two vectors, @@ -645,13 +648,14 @@ class HVecMin FINAL : public HVecBinaryOperation { DECLARE_INSTRUCTION(VecMin); + protected: + DEFAULT_COPY_CONSTRUCTOR(VecMin); + private: // Additional packed bits. static constexpr size_t kFieldMinOpIsUnsigned = HVecOperation::kNumberOfVectorOpPackedBits; static constexpr size_t kNumberOfMinOpPackedBits = kFieldMinOpIsUnsigned + 1; static_assert(kNumberOfMinOpPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields."); - - DISALLOW_COPY_AND_ASSIGN(HVecMin); }; // Takes maximum of every component in the two vectors, @@ -689,13 +693,14 @@ class HVecMax FINAL : public HVecBinaryOperation { DECLARE_INSTRUCTION(VecMax); + protected: + DEFAULT_COPY_CONSTRUCTOR(VecMax); + private: // Additional packed bits. static constexpr size_t kFieldMaxOpIsUnsigned = HVecOperation::kNumberOfVectorOpPackedBits; static constexpr size_t kNumberOfMaxOpPackedBits = kFieldMaxOpIsUnsigned + 1; static_assert(kNumberOfMaxOpPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields."); - - DISALLOW_COPY_AND_ASSIGN(HVecMax); }; // Bitwise-ands every component in the two vectors, @@ -716,8 +721,8 @@ class HVecAnd FINAL : public HVecBinaryOperation { DECLARE_INSTRUCTION(VecAnd); - private: - DISALLOW_COPY_AND_ASSIGN(HVecAnd); + protected: + DEFAULT_COPY_CONSTRUCTOR(VecAnd); }; // Bitwise-and-nots every component in the two vectors, @@ -738,8 +743,8 @@ class HVecAndNot FINAL : public HVecBinaryOperation { DECLARE_INSTRUCTION(VecAndNot); - private: - DISALLOW_COPY_AND_ASSIGN(HVecAndNot); + protected: + DEFAULT_COPY_CONSTRUCTOR(VecAndNot); }; // Bitwise-ors every component in the two vectors, @@ -760,8 +765,8 @@ class HVecOr FINAL : public HVecBinaryOperation { DECLARE_INSTRUCTION(VecOr); - private: - DISALLOW_COPY_AND_ASSIGN(HVecOr); + protected: + DEFAULT_COPY_CONSTRUCTOR(VecOr); }; // Bitwise-xors every component in the two vectors, @@ -782,8 +787,8 @@ class HVecXor FINAL : public HVecBinaryOperation { DECLARE_INSTRUCTION(VecXor); - private: - DISALLOW_COPY_AND_ASSIGN(HVecXor); + protected: + DEFAULT_COPY_CONSTRUCTOR(VecXor); }; // Logically shifts every component in the vector left by the given distance, @@ -804,8 +809,8 @@ class HVecShl FINAL : public HVecBinaryOperation { DECLARE_INSTRUCTION(VecShl); - private: - DISALLOW_COPY_AND_ASSIGN(HVecShl); + protected: + DEFAULT_COPY_CONSTRUCTOR(VecShl); }; // Arithmetically shifts every component in the vector right by the given distance, @@ -826,8 +831,8 @@ class HVecShr FINAL : public HVecBinaryOperation { DECLARE_INSTRUCTION(VecShr); - private: - DISALLOW_COPY_AND_ASSIGN(HVecShr); + protected: + DEFAULT_COPY_CONSTRUCTOR(VecShr); }; // Logically shifts every component in the vector right by the given distance, @@ -848,8 +853,8 @@ class HVecUShr FINAL : public HVecBinaryOperation { DECLARE_INSTRUCTION(VecUShr); - private: - DISALLOW_COPY_AND_ASSIGN(HVecUShr); + protected: + DEFAULT_COPY_CONSTRUCTOR(VecUShr); }; // @@ -885,8 +890,8 @@ class HVecSetScalars FINAL : public HVecOperation { DECLARE_INSTRUCTION(VecSetScalars); - private: - DISALLOW_COPY_AND_ASSIGN(HVecSetScalars); + protected: + DEFAULT_COPY_CONSTRUCTOR(VecSetScalars); }; // Multiplies every component in the two vectors, adds the result vector to the accumulator vector, @@ -929,11 +934,12 @@ class HVecMultiplyAccumulate FINAL : public HVecOperation { DECLARE_INSTRUCTION(VecMultiplyAccumulate); + protected: + DEFAULT_COPY_CONSTRUCTOR(VecMultiplyAccumulate); + private: // Indicates if this is a MADD or MSUB. const InstructionKind op_kind_; - - DISALLOW_COPY_AND_ASSIGN(HVecMultiplyAccumulate); }; // Takes the absolute difference of two vectors, and adds the results to @@ -968,8 +974,8 @@ class HVecSADAccumulate FINAL : public HVecOperation { DECLARE_INSTRUCTION(VecSADAccumulate); - private: - DISALLOW_COPY_AND_ASSIGN(HVecSADAccumulate); + protected: + DEFAULT_COPY_CONSTRUCTOR(VecSADAccumulate); }; // Loads a vector from memory, viz. load(mem, 1) @@ -1007,13 +1013,14 @@ class HVecLoad FINAL : public HVecMemoryOperation { DECLARE_INSTRUCTION(VecLoad); + protected: + DEFAULT_COPY_CONSTRUCTOR(VecLoad); + private: // Additional packed bits. static constexpr size_t kFieldIsStringCharAt = HVecOperation::kNumberOfVectorOpPackedBits; static constexpr size_t kNumberOfVecLoadPackedBits = kFieldIsStringCharAt + 1; static_assert(kNumberOfVecLoadPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields."); - - DISALLOW_COPY_AND_ASSIGN(HVecLoad); }; // Stores a vector to memory, viz. store(m, 1, [x1, .. , xn] ) @@ -1045,8 +1052,8 @@ class HVecStore FINAL : public HVecMemoryOperation { DECLARE_INSTRUCTION(VecStore); - private: - DISALLOW_COPY_AND_ASSIGN(HVecStore); + protected: + DEFAULT_COPY_CONSTRUCTOR(VecStore) }; } // namespace art diff --git a/compiler/optimizing/nodes_x86.h b/compiler/optimizing/nodes_x86.h index 22e92eab31..6326065fe2 100644 --- a/compiler/optimizing/nodes_x86.h +++ b/compiler/optimizing/nodes_x86.h @@ -30,8 +30,8 @@ class HX86ComputeBaseMethodAddress FINAL : public HExpression<0> { DECLARE_INSTRUCTION(X86ComputeBaseMethodAddress); - private: - DISALLOW_COPY_AND_ASSIGN(HX86ComputeBaseMethodAddress); + protected: + DEFAULT_COPY_CONSTRUCTOR(X86ComputeBaseMethodAddress); }; // Load a constant value from the constant table. @@ -54,8 +54,8 @@ class HX86LoadFromConstantTable FINAL : public HExpression<2> { DECLARE_INSTRUCTION(X86LoadFromConstantTable); - private: - DISALLOW_COPY_AND_ASSIGN(HX86LoadFromConstantTable); + protected: + DEFAULT_COPY_CONSTRUCTOR(X86LoadFromConstantTable); }; // Version of HNeg with access to the constant table for FP types. @@ -77,8 +77,8 @@ class HX86FPNeg FINAL : public HExpression<2> { DECLARE_INSTRUCTION(X86FPNeg); - private: - DISALLOW_COPY_AND_ASSIGN(HX86FPNeg); + protected: + DEFAULT_COPY_CONSTRUCTOR(X86FPNeg); }; // X86 version of HPackedSwitch that holds a pointer to the base method address. @@ -113,11 +113,12 @@ class HX86PackedSwitch FINAL : public HTemplateInstruction<2> { DECLARE_INSTRUCTION(X86PackedSwitch); + protected: + DEFAULT_COPY_CONSTRUCTOR(X86PackedSwitch); + private: const int32_t start_value_; const int32_t num_entries_; - - DISALLOW_COPY_AND_ASSIGN(HX86PackedSwitch); }; } // namespace art diff --git a/openjdkjvmti/art_jvmti.h b/openjdkjvmti/art_jvmti.h index 682b82b5cd..e8e62c2b40 100644 --- a/openjdkjvmti/art_jvmti.h +++ b/openjdkjvmti/art_jvmti.h @@ -94,6 +94,10 @@ struct ArtJvmTiEnv : public jvmtiEnv { static ArtJvmTiEnv* AsArtJvmTiEnv(jvmtiEnv* env) { return art::down_cast<ArtJvmTiEnv*>(env); } + + // Top level lock. Nothing can be held when we get this except for mutator lock for full + // thread-suspension. + static art::Mutex *gEnvMutex ACQUIRED_AFTER(art::Locks::mutator_lock_); }; // Macro and constexpr to make error values less annoying to write. diff --git a/openjdkjvmti/events-inl.h b/openjdkjvmti/events-inl.h index 5344e0fbde..007669b50f 100644 --- a/openjdkjvmti/events-inl.h +++ b/openjdkjvmti/events-inl.h @@ -46,6 +46,45 @@ static inline ArtJvmtiEvent GetArtJvmtiEvent(ArtJvmTiEnv* env, jvmtiEvent e) { namespace impl { +// Helper for ensuring that the dispatch environment is sane. Events with JNIEnvs need to stash +// pending exceptions since they can cause new ones to be thrown. In accordance with the JVMTI +// specification we allow exceptions originating from events to overwrite the current exception, +// including exceptions originating from earlier events. +class ScopedEventDispatchEnvironment FINAL : public art::ValueObject { + public: + ScopedEventDispatchEnvironment() : env_(nullptr), throw_(nullptr, nullptr) { + DCHECK_EQ(art::Thread::Current()->GetState(), art::ThreadState::kNative); + } + + explicit ScopedEventDispatchEnvironment(JNIEnv* env) + : env_(env), + throw_(env_, env_->ExceptionOccurred()) { + DCHECK_EQ(art::Thread::Current()->GetState(), art::ThreadState::kNative); + // The spec doesn't say how much local data should be there, so we just give 128 which seems + // likely to be enough for most cases. + env_->PushLocalFrame(128); + env_->ExceptionClear(); + } + + ~ScopedEventDispatchEnvironment() { + if (env_ != nullptr) { + if (throw_.get() != nullptr && !env_->ExceptionCheck()) { + // TODO It would be nice to add the overwritten exceptions to the suppressed exceptions list + // of the newest exception. + env_->Throw(throw_.get()); + } + env_->PopLocalFrame(nullptr); + } + DCHECK_EQ(art::Thread::Current()->GetState(), art::ThreadState::kNative); + } + + private: + JNIEnv* env_; + ScopedLocalRef<jthrowable> throw_; + + DISALLOW_COPY_AND_ASSIGN(ScopedEventDispatchEnvironment); +}; + // Infrastructure to achieve type safety for event dispatch. #define FORALL_EVENT_TYPES(fn) \ @@ -97,27 +136,68 @@ FORALL_EVENT_TYPES(EVENT_FN_TYPE) #undef EVENT_FN_TYPE -template <ArtJvmtiEvent kEvent> -ALWAYS_INLINE inline typename EventFnType<kEvent>::type GetCallback(ArtJvmTiEnv* env); - -#define GET_CALLBACK(name, enum_name) \ -template <> \ -ALWAYS_INLINE inline EventFnType<enum_name>::type GetCallback<enum_name>( \ - ArtJvmTiEnv* env) { \ - if (env->event_callbacks == nullptr) { \ - return nullptr; \ - } \ - return env->event_callbacks->name; \ -} +#define MAKE_EVENT_HANDLER_FUNC(name, enum_name) \ +template<> \ +struct EventHandlerFunc<enum_name> { \ + using EventFnType = typename impl::EventFnType<enum_name>::type; \ + explicit EventHandlerFunc(ArtJvmTiEnv* env) \ + : env_(env), \ + fn_(env_->event_callbacks == nullptr ? nullptr : env_->event_callbacks->name) { } \ + \ + template <typename ...Args> \ + ALWAYS_INLINE \ + void ExecuteCallback(JNIEnv* jnienv, Args... args) const { \ + if (fn_ != nullptr) { \ + ScopedEventDispatchEnvironment sede(jnienv); \ + DoExecute(jnienv, args...); \ + } \ + } \ + \ + template <typename ...Args> \ + ALWAYS_INLINE \ + void ExecuteCallback(Args... args) const { \ + if (fn_ != nullptr) { \ + ScopedEventDispatchEnvironment sede; \ + DoExecute(args...); \ + } \ + } \ + \ + private: \ + template <typename ...Args> \ + ALWAYS_INLINE \ + inline void DoExecute(Args... args) const { \ + static_assert(std::is_same<EventFnType, void(*)(jvmtiEnv*, Args...)>::value, \ + "Unexpected different type of ExecuteCallback"); \ + fn_(env_, args...); \ + } \ + \ + public: \ + ArtJvmTiEnv* env_; \ + EventFnType fn_; \ +}; -FORALL_EVENT_TYPES(GET_CALLBACK) +FORALL_EVENT_TYPES(MAKE_EVENT_HANDLER_FUNC) -#undef GET_CALLBACK +#undef MAKE_EVENT_HANDLER_FUNC #undef FORALL_EVENT_TYPES } // namespace impl +template <ArtJvmtiEvent kEvent, typename ...Args> +inline std::vector<impl::EventHandlerFunc<kEvent>> EventHandler::CollectEvents(art::Thread* thread, + Args... args) const { + art::MutexLock mu(thread, envs_lock_); + std::vector<impl::EventHandlerFunc<kEvent>> handlers; + for (ArtJvmTiEnv* env : envs) { + if (ShouldDispatch<kEvent>(env, thread, args...)) { + impl::EventHandlerFunc<kEvent> h(env); + handlers.push_back(h); + } + } + return handlers; +} + // C++ does not allow partial template function specialization. The dispatch for our separated // ClassFileLoadHook event types is the same, so use this helper for code deduplication. template <ArtJvmtiEvent kEvent> @@ -131,29 +211,37 @@ inline void EventHandler::DispatchClassFileLoadHookEvent(art::Thread* thread, const unsigned char* class_data, jint* new_class_data_len, unsigned char** new_class_data) const { + art::ScopedThreadStateChange stsc(thread, art::ThreadState::kNative); static_assert(kEvent == ArtJvmtiEvent::kClassFileLoadHookRetransformable || kEvent == ArtJvmtiEvent::kClassFileLoadHookNonRetransformable, "Unsupported event"); DCHECK(*new_class_data == nullptr); jint current_len = class_data_len; unsigned char* current_class_data = const_cast<unsigned char*>(class_data); + std::vector<impl::EventHandlerFunc<kEvent>> handlers = + CollectEvents<kEvent>(thread, + jnienv, + class_being_redefined, + loader, + name, + protection_domain, + class_data_len, + class_data, + new_class_data_len, + new_class_data); ArtJvmTiEnv* last_env = nullptr; - for (ArtJvmTiEnv* env : envs) { - if (env == nullptr) { - continue; - } + for (const impl::EventHandlerFunc<kEvent>& event : handlers) { jint new_len = 0; unsigned char* new_data = nullptr; - DispatchEventOnEnv<kEvent>(env, - thread, - jnienv, - class_being_redefined, - loader, - name, - protection_domain, - current_len, - static_cast<const unsigned char*>(current_class_data), - &new_len, - &new_data); + ExecuteCallback<kEvent>(event, + jnienv, + class_being_redefined, + loader, + name, + protection_domain, + current_len, + static_cast<const unsigned char*>(current_class_data), + &new_len, + &new_data); if (new_data != nullptr && new_data != current_class_data) { // Destroy the data the last transformer made. We skip this if the previous state was the // initial one since we don't know here which jvmtiEnv allocated it. @@ -162,7 +250,7 @@ inline void EventHandler::DispatchClassFileLoadHookEvent(art::Thread* thread, if (last_env != nullptr) { last_env->Deallocate(current_class_data); } - last_env = env; + last_env = event.env_; current_class_data = new_data; current_len = new_len; } @@ -177,69 +265,27 @@ inline void EventHandler::DispatchClassFileLoadHookEvent(art::Thread* thread, // exactly the argument types of the corresponding Jvmti kEvent function pointer. template <ArtJvmtiEvent kEvent, typename ...Args> -inline void EventHandler::ExecuteCallback(ArtJvmTiEnv* env, Args... args) { - using FnType = typename impl::EventFnType<kEvent>::type; - FnType callback = impl::GetCallback<kEvent>(env); - if (callback != nullptr) { - (*callback)(env, args...); - } -} - -template <ArtJvmtiEvent kEvent, typename ...Args> inline void EventHandler::DispatchEvent(art::Thread* thread, Args... args) const { + art::ScopedThreadStateChange stsc(thread, art::ThreadState::kNative); static_assert(!std::is_same<JNIEnv*, typename std::decay_t< std::tuple_element_t<0, std::tuple<Args..., nullptr_t>>>>::value, "Should be calling DispatchEvent with explicit JNIEnv* argument!"); DCHECK(thread == nullptr || !thread->IsExceptionPending()); - for (ArtJvmTiEnv* env : envs) { - if (env != nullptr) { - DispatchEventOnEnv<kEvent, Args...>(env, thread, args...); - } + std::vector<impl::EventHandlerFunc<kEvent>> events = CollectEvents<kEvent>(thread, args...); + for (auto event : events) { + ExecuteCallback<kEvent>(event, args...); } } -// Helper for ensuring that the dispatch environment is sane. Events with JNIEnvs need to stash -// pending exceptions since they can cause new ones to be thrown. In accordance with the JVMTI -// specification we allow exceptions originating from events to overwrite the current exception, -// including exceptions originating from earlier events. -class ScopedEventDispatchEnvironment FINAL : public art::ValueObject { - public: - explicit ScopedEventDispatchEnvironment(JNIEnv* env) - : env_(env), - thr_(env_, env_->ExceptionOccurred()), - suspend_(art::Thread::Current(), art::kNative) { - // The spec doesn't say how much local data should be there, so we just give 128 which seems - // likely to be enough for most cases. - env_->PushLocalFrame(128); - env_->ExceptionClear(); - UNUSED(suspend_); - } - - ~ScopedEventDispatchEnvironment() { - if (thr_.get() != nullptr && !env_->ExceptionCheck()) { - // TODO It would be nice to add the overwritten exceptions to the suppressed exceptions list - // of the newest exception. - env_->Throw(thr_.get()); - } - env_->PopLocalFrame(nullptr); - } - - private: - JNIEnv* env_; - ScopedLocalRef<jthrowable> thr_; - // Not actually unused. The destructor/constructor does important work. - art::ScopedThreadStateChange suspend_; - - DISALLOW_COPY_AND_ASSIGN(ScopedEventDispatchEnvironment); -}; - template <ArtJvmtiEvent kEvent, typename ...Args> inline void EventHandler::DispatchEvent(art::Thread* thread, JNIEnv* jnienv, Args... args) const { - for (ArtJvmTiEnv* env : envs) { - if (env != nullptr) { - DispatchEventOnEnv<kEvent, Args...>(env, thread, jnienv, args...); - } + art::ScopedThreadStateChange stsc(thread, art::ThreadState::kNative); + std::vector<impl::EventHandlerFunc<kEvent>> events = CollectEvents<kEvent>(thread, + jnienv, + args...); + for (auto event : events) { + ExecuteCallback<kEvent>(event, jnienv, args...); } } @@ -248,8 +294,9 @@ inline void EventHandler::DispatchEventOnEnv( ArtJvmTiEnv* env, art::Thread* thread, JNIEnv* jnienv, Args... args) const { DCHECK(env != nullptr); if (ShouldDispatch<kEvent, JNIEnv*, Args...>(env, thread, jnienv, args...)) { - ScopedEventDispatchEnvironment sede(jnienv); - ExecuteCallback<kEvent, JNIEnv*, Args...>(env, jnienv, args...); + art::ScopedThreadStateChange stsc(thread, art::ThreadState::kNative); + impl::EventHandlerFunc<kEvent> func(env); + ExecuteCallback<kEvent>(func, jnienv, args...); } } @@ -260,11 +307,26 @@ inline void EventHandler::DispatchEventOnEnv( typename std::decay_t< std::tuple_element_t<0, std::tuple<Args..., nullptr_t>>>>::value, "Should be calling DispatchEventOnEnv with explicit JNIEnv* argument!"); - if (ShouldDispatch<kEvent>(env, thread, args...)) { - ExecuteCallback<kEvent, Args...>(env, args...); + DCHECK(env != nullptr); + if (ShouldDispatch<kEvent, Args...>(env, thread, args...)) { + art::ScopedThreadStateChange stsc(thread, art::ThreadState::kNative); + impl::EventHandlerFunc<kEvent> func(env); + ExecuteCallback<kEvent>(func, args...); } } +template <ArtJvmtiEvent kEvent, typename ...Args> +inline void EventHandler::ExecuteCallback(impl::EventHandlerFunc<kEvent> handler, Args... args) { + handler.ExecuteCallback(args...); +} + +template <ArtJvmtiEvent kEvent, typename ...Args> +inline void EventHandler::ExecuteCallback(impl::EventHandlerFunc<kEvent> handler, + JNIEnv* jnienv, + Args... args) { + handler.ExecuteCallback(jnienv, args...); +} + // Events that need custom logic for if we send the event but are otherwise normal. This includes // the kBreakpoint, kFramePop, kFieldAccess, and kFieldModification events. @@ -347,14 +409,13 @@ inline bool EventHandler::ShouldDispatch<ArtJvmtiEvent::kFieldAccess>( // something. template <> inline void EventHandler::ExecuteCallback<ArtJvmtiEvent::kFramePop>( - ArtJvmTiEnv* env, + impl::EventHandlerFunc<ArtJvmtiEvent::kFramePop> event, JNIEnv* jnienv, jthread jni_thread, jmethodID jmethod, jboolean is_exception, const art::ShadowFrame* frame ATTRIBUTE_UNUSED) { - ExecuteCallback<ArtJvmtiEvent::kFramePop>( - env, jnienv, jni_thread, jmethod, is_exception); + ExecuteCallback<ArtJvmtiEvent::kFramePop>(event, jnienv, jni_thread, jmethod, is_exception); } // Need to give a custom specialization for NativeMethodBind since it has to deal with an out @@ -366,20 +427,25 @@ inline void EventHandler::DispatchEvent<ArtJvmtiEvent::kNativeMethodBind>(art::T jmethodID method, void* cur_method, void** new_method) const { + art::ScopedThreadStateChange stsc(thread, art::ThreadState::kNative); + std::vector<impl::EventHandlerFunc<ArtJvmtiEvent::kNativeMethodBind>> events = + CollectEvents<ArtJvmtiEvent::kNativeMethodBind>(thread, + jnienv, + jni_thread, + method, + cur_method, + new_method); *new_method = cur_method; - for (ArtJvmTiEnv* env : envs) { - if (env != nullptr) { - *new_method = cur_method; - DispatchEventOnEnv<ArtJvmtiEvent::kNativeMethodBind>(env, - thread, - jnienv, - jni_thread, - method, - cur_method, - new_method); - if (*new_method != nullptr) { - cur_method = *new_method; - } + for (auto event : events) { + *new_method = cur_method; + ExecuteCallback<ArtJvmtiEvent::kNativeMethodBind>(event, + jnienv, + jni_thread, + method, + cur_method, + new_method); + if (*new_method != nullptr) { + cur_method = *new_method; } } *new_method = cur_method; @@ -439,7 +505,7 @@ inline void EventHandler::DispatchEvent<ArtJvmtiEvent::kClassFileLoadHookNonRetr } template <ArtJvmtiEvent kEvent> -inline bool EventHandler::ShouldDispatchOnThread(ArtJvmTiEnv* env, art::Thread* thread) { +inline bool EventHandler::ShouldDispatchOnThread(ArtJvmTiEnv* env, art::Thread* thread) const { bool dispatch = env->event_masks.global_event_mask.Test(kEvent); if (!dispatch && thread != nullptr && env->event_masks.unioned_thread_event_mask.Test(kEvent)) { @@ -461,6 +527,11 @@ inline bool EventHandler::ShouldDispatch(ArtJvmTiEnv* env, } inline void EventHandler::RecalculateGlobalEventMask(ArtJvmtiEvent event) { + art::MutexLock mu(art::Thread::Current(), envs_lock_); + RecalculateGlobalEventMaskLocked(event); +} + +inline void EventHandler::RecalculateGlobalEventMaskLocked(ArtJvmtiEvent event) { bool union_value = false; for (const ArtJvmTiEnv* stored_env : envs) { if (stored_env == nullptr) { diff --git a/openjdkjvmti/events.cc b/openjdkjvmti/events.cc index d1d606de48..be4ebbc85e 100644 --- a/openjdkjvmti/events.cc +++ b/openjdkjvmti/events.cc @@ -193,25 +193,21 @@ void EventMasks::HandleChangedCapabilities(const jvmtiCapabilities& caps, bool c } void EventHandler::RegisterArtJvmTiEnv(ArtJvmTiEnv* env) { - // Since we never shrink this array we might as well try to fill gaps. - auto it = std::find(envs.begin(), envs.end(), nullptr); - if (it != envs.end()) { - *it = env; - } else { - envs.push_back(env); - } + art::MutexLock mu(art::Thread::Current(), envs_lock_); + envs.push_back(env); } void EventHandler::RemoveArtJvmTiEnv(ArtJvmTiEnv* env) { + art::MutexLock mu(art::Thread::Current(), envs_lock_); // Since we might be currently iterating over the envs list we cannot actually erase elements. // Instead we will simply replace them with 'nullptr' and skip them manually. auto it = std::find(envs.begin(), envs.end(), env); if (it != envs.end()) { - *it = nullptr; + envs.erase(it); for (size_t i = static_cast<size_t>(ArtJvmtiEvent::kMinEventTypeVal); i <= static_cast<size_t>(ArtJvmtiEvent::kMaxEventTypeVal); ++i) { - RecalculateGlobalEventMask(static_cast<ArtJvmtiEvent>(i)); + RecalculateGlobalEventMaskLocked(static_cast<ArtJvmtiEvent>(i)); } } } @@ -431,11 +427,11 @@ class JvmtiGcPauseListener : public art::gc::GcPauseListener { finish_enabled_(false) {} void StartPause() OVERRIDE { - handler_->DispatchEvent<ArtJvmtiEvent::kGarbageCollectionStart>(nullptr); + handler_->DispatchEvent<ArtJvmtiEvent::kGarbageCollectionStart>(art::Thread::Current()); } void EndPause() OVERRIDE { - handler_->DispatchEvent<ArtJvmtiEvent::kGarbageCollectionFinish>(nullptr); + handler_->DispatchEvent<ArtJvmtiEvent::kGarbageCollectionFinish>(art::Thread::Current()); } bool IsEnabled() { @@ -1176,7 +1172,8 @@ void EventHandler::Shutdown() { art::Runtime::Current()->GetInstrumentation()->RemoveListener(method_trace_listener_.get(), ~0); } -EventHandler::EventHandler() { +EventHandler::EventHandler() : envs_lock_("JVMTI Environment List Lock", + art::LockLevel::kTopLockLevel) { alloc_listener_.reset(new JvmtiAllocationListener(this)); ddm_listener_.reset(new JvmtiDdmChunkListener(this)); gc_pause_listener_.reset(new JvmtiGcPauseListener(this)); diff --git a/openjdkjvmti/events.h b/openjdkjvmti/events.h index a99ed7b212..c73215f07b 100644 --- a/openjdkjvmti/events.h +++ b/openjdkjvmti/events.h @@ -158,6 +158,10 @@ struct EventMasks { void HandleChangedCapabilities(const jvmtiCapabilities& caps, bool caps_added); }; +namespace impl { +template <ArtJvmtiEvent kEvent> struct EventHandlerFunc { }; +} // namespace impl + // Helper class for event handling. class EventHandler { public: @@ -169,10 +173,10 @@ class EventHandler { // Register an env. It is assumed that this happens on env creation, that is, no events are // enabled, yet. - void RegisterArtJvmTiEnv(ArtJvmTiEnv* env); + void RegisterArtJvmTiEnv(ArtJvmTiEnv* env) REQUIRES(!envs_lock_); // Remove an env. - void RemoveArtJvmTiEnv(ArtJvmTiEnv* env); + void RemoveArtJvmTiEnv(ArtJvmTiEnv* env) REQUIRES(!envs_lock_); bool IsEventEnabledAnywhere(ArtJvmtiEvent event) const { if (!EventMask::EventIsInRange(event)) { @@ -184,13 +188,15 @@ class EventHandler { jvmtiError SetEvent(ArtJvmTiEnv* env, art::Thread* thread, ArtJvmtiEvent event, - jvmtiEventMode mode); + jvmtiEventMode mode) + REQUIRES(!envs_lock_); // Dispatch event to all registered environments. Since this one doesn't have a JNIEnv* it doesn't // matter if it has the mutator_lock. template <ArtJvmtiEvent kEvent, typename ...Args> ALWAYS_INLINE - inline void DispatchEvent(art::Thread* thread, Args... args) const; + inline void DispatchEvent(art::Thread* thread, Args... args) const + REQUIRES(!envs_lock_); // Dispatch event to all registered environments stashing exceptions as needed. This works since // JNIEnv* is always the second argument if it is passed to an event. Needed since C++ does not @@ -200,7 +206,8 @@ class EventHandler { // the event to allocate local references. template <ArtJvmtiEvent kEvent, typename ...Args> ALWAYS_INLINE - inline void DispatchEvent(art::Thread* thread, JNIEnv* jnienv, Args... args) const; + inline void DispatchEvent(art::Thread* thread, JNIEnv* jnienv, Args... args) const + REQUIRES(!envs_lock_); // Tell the event handler capabilities were added/lost so it can adjust the sent events.If // caps_added is true then caps is all the newly set capabilities of the jvmtiEnv. If it is false @@ -208,30 +215,50 @@ class EventHandler { ALWAYS_INLINE inline void HandleChangedCapabilities(ArtJvmTiEnv* env, const jvmtiCapabilities& caps, - bool added); + bool added) + REQUIRES(!envs_lock_); // Dispatch event to the given environment, only. template <ArtJvmtiEvent kEvent, typename ...Args> ALWAYS_INLINE - inline void DispatchEventOnEnv( - ArtJvmTiEnv* env, art::Thread* thread, JNIEnv* jnienv, Args... args) const; + inline void DispatchEventOnEnv(ArtJvmTiEnv* env, + art::Thread* thread, + JNIEnv* jnienv, + Args... args) const + REQUIRES(!envs_lock_); // Dispatch event to the given environment, only. template <ArtJvmtiEvent kEvent, typename ...Args> ALWAYS_INLINE - inline void DispatchEventOnEnv(ArtJvmTiEnv* env, art::Thread* thread, Args... args) const; + inline void DispatchEventOnEnv(ArtJvmTiEnv* env, art::Thread* thread, Args... args) const + REQUIRES(!envs_lock_); private: + template <ArtJvmtiEvent kEvent, typename ...Args> + ALWAYS_INLINE + inline std::vector<impl::EventHandlerFunc<kEvent>> CollectEvents(art::Thread* thread, + Args... args) const + REQUIRES(!envs_lock_); + template <ArtJvmtiEvent kEvent> ALWAYS_INLINE - static inline bool ShouldDispatchOnThread(ArtJvmTiEnv* env, art::Thread* thread); + inline bool ShouldDispatchOnThread(ArtJvmTiEnv* env, art::Thread* thread) const; template <ArtJvmtiEvent kEvent, typename ...Args> ALWAYS_INLINE - static inline void ExecuteCallback(ArtJvmTiEnv* env, Args... args); + static inline void ExecuteCallback(impl::EventHandlerFunc<kEvent> handler, + JNIEnv* env, + Args... args) + REQUIRES(!envs_lock_); template <ArtJvmtiEvent kEvent, typename ...Args> ALWAYS_INLINE + static inline void ExecuteCallback(impl::EventHandlerFunc<kEvent> handler, Args... args) + REQUIRES(!envs_lock_); + + // Public for use to collect dispatches + template <ArtJvmtiEvent kEvent, typename ...Args> + ALWAYS_INLINE inline bool ShouldDispatch(ArtJvmTiEnv* env, art::Thread* thread, Args... args) const; ALWAYS_INLINE @@ -241,7 +268,9 @@ class EventHandler { // Recalculates the event mask for the given event. ALWAYS_INLINE - inline void RecalculateGlobalEventMask(ArtJvmtiEvent event); + inline void RecalculateGlobalEventMask(ArtJvmtiEvent event) REQUIRES(!envs_lock_); + ALWAYS_INLINE + inline void RecalculateGlobalEventMaskLocked(ArtJvmtiEvent event) REQUIRES(envs_lock_); template <ArtJvmtiEvent kEvent> ALWAYS_INLINE inline void DispatchClassFileLoadHookEvent(art::Thread* thread, @@ -253,7 +282,8 @@ class EventHandler { jint class_data_len, const unsigned char* class_data, jint* new_class_data_len, - unsigned char** new_class_data) const; + unsigned char** new_class_data) const + REQUIRES(!envs_lock_); void HandleEventType(ArtJvmtiEvent event, bool enable); void HandleLocalAccessCapabilityAdded(); @@ -261,10 +291,13 @@ class EventHandler { bool OtherMonitorEventsEnabledAnywhere(ArtJvmtiEvent event); - // List of all JvmTiEnv objects that have been created, in their creation order. - // NB Some elements might be null representing envs that have been deleted. They should be skipped - // anytime this list is used. - std::vector<ArtJvmTiEnv*> envs; + // List of all JvmTiEnv objects that have been created, in their creation order. It is a std::list + // since we mostly access it by iterating over the entire thing, only ever append to the end, and + // need to be able to remove arbitrary elements from it. + std::list<ArtJvmTiEnv*> envs GUARDED_BY(envs_lock_); + + // Top level lock. Nothing at all should be held when we lock this. + mutable art::Mutex envs_lock_ ACQUIRED_BEFORE(art::Locks::instrument_entrypoints_lock_); // A union of all enabled events, anywhere. EventMask global_mask; diff --git a/openjdkjvmti/object_tagging.cc b/openjdkjvmti/object_tagging.cc index 6ba7165577..ba242ef1e8 100644 --- a/openjdkjvmti/object_tagging.cc +++ b/openjdkjvmti/object_tagging.cc @@ -61,7 +61,8 @@ bool ObjectTagTable::DoesHandleNullOnSweep() { return event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kObjectFree); } void ObjectTagTable::HandleNullSweep(jlong tag) { - event_handler_->DispatchEventOnEnv<ArtJvmtiEvent::kObjectFree>(jvmti_env_, nullptr, tag); + event_handler_->DispatchEventOnEnv<ArtJvmtiEvent::kObjectFree>( + jvmti_env_, art::Thread::Current(), tag); } } // namespace openjdkjvmti diff --git a/openjdkjvmti/ti_dump.cc b/openjdkjvmti/ti_dump.cc index 809a5e47bb..253580e0e1 100644 --- a/openjdkjvmti/ti_dump.cc +++ b/openjdkjvmti/ti_dump.cc @@ -47,7 +47,7 @@ struct DumpCallback : public art::RuntimeSigQuitCallback { void SigQuit() OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { art::Thread* thread = art::Thread::Current(); art::ScopedThreadSuspension sts(thread, art::ThreadState::kNative); - event_handler->DispatchEvent<ArtJvmtiEvent::kDataDumpRequest>(nullptr); + event_handler->DispatchEvent<ArtJvmtiEvent::kDataDumpRequest>(art::Thread::Current()); } EventHandler* event_handler = nullptr; diff --git a/openjdkjvmti/ti_phase.cc b/openjdkjvmti/ti_phase.cc index 23df27fbda..7157974c13 100644 --- a/openjdkjvmti/ti_phase.cc +++ b/openjdkjvmti/ti_phase.cc @@ -57,6 +57,7 @@ struct PhaseUtil::PhaseCallback : public art::RuntimePhaseCallback { } void NextRuntimePhase(RuntimePhase phase) REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE { + art::Thread* self = art::Thread::Current(); switch (phase) { case RuntimePhase::kInitialAgents: PhaseUtil::current_phase_ = JVMTI_PHASE_PRIMORDIAL; @@ -64,8 +65,7 @@ struct PhaseUtil::PhaseCallback : public art::RuntimePhaseCallback { case RuntimePhase::kStart: { PhaseUtil::current_phase_ = JVMTI_PHASE_START; - art::ScopedThreadSuspension sts(art::Thread::Current(), art::ThreadState::kNative); - event_handler->DispatchEvent<ArtJvmtiEvent::kVmStart>(nullptr, GetJniEnv()); + event_handler->DispatchEvent<ArtJvmtiEvent::kVmStart>(self, GetJniEnv()); } break; case RuntimePhase::kInit: @@ -74,9 +74,7 @@ struct PhaseUtil::PhaseCallback : public art::RuntimePhaseCallback { PhaseUtil::current_phase_ = JVMTI_PHASE_LIVE; { ScopedLocalRef<jthread> thread(GetJniEnv(), GetCurrentJThread()); - art::ScopedThreadSuspension sts(art::Thread::Current(), art::ThreadState::kNative); - event_handler->DispatchEvent<ArtJvmtiEvent::kVmInit>( - nullptr, GetJniEnv(), thread.get()); + event_handler->DispatchEvent<ArtJvmtiEvent::kVmInit>(self, GetJniEnv(), thread.get()); } // We need to have these events be ordered to match behavior expected by some real-world // agents. The spec does not really require this but compatibility is a useful property to @@ -86,8 +84,7 @@ struct PhaseUtil::PhaseCallback : public art::RuntimePhaseCallback { break; case RuntimePhase::kDeath: { - art::ScopedThreadSuspension sts(art::Thread::Current(), art::ThreadState::kNative); - event_handler->DispatchEvent<ArtJvmtiEvent::kVmDeath>(nullptr, GetJniEnv()); + event_handler->DispatchEvent<ArtJvmtiEvent::kVmDeath>(self, GetJniEnv()); PhaseUtil::current_phase_ = JVMTI_PHASE_DEAD; } // TODO: Block events now. diff --git a/runtime/base/mutex-inl.h b/runtime/base/mutex-inl.h index c9d48ff7f7..587b092ab7 100644 --- a/runtime/base/mutex-inl.h +++ b/runtime/base/mutex-inl.h @@ -80,7 +80,9 @@ static inline void CheckUnattachedThread(LockLevel level) NO_THREAD_SAFETY_ANALY // (see Thread::TransitionFromSuspendedToRunnable). level == kThreadSuspendCountLock || // Avoid recursive death. - level == kAbortLock) << level; + level == kAbortLock || + // Locks at the absolute top of the stack can be locked at any time. + level == kTopLockLevel) << level; } } @@ -92,10 +94,34 @@ inline void BaseMutex::RegisterAsLocked(Thread* self) { if (kDebugLocking) { // Check if a bad Mutex of this level or lower is held. bool bad_mutexes_held = false; + // Specifically allow a kTopLockLevel lock to be gained when the current thread holds the + // mutator_lock_ exclusive. This is because we suspending when holding locks at this level is + // not allowed and if we hold the mutator_lock_ exclusive we must unsuspend stuff eventually + // so there are no deadlocks. + if (level_ == kTopLockLevel && + Locks::mutator_lock_->IsSharedHeld(self) && + !Locks::mutator_lock_->IsExclusiveHeld(self)) { + LOG(ERROR) << "Lock level violation: holding \"" << Locks::mutator_lock_->name_ << "\" " + << "(level " << kMutatorLock << " - " << static_cast<int>(kMutatorLock) + << ") non-exclusive while locking \"" << name_ << "\" " + << "(level " << level_ << " - " << static_cast<int>(level_) << ") a top level" + << "mutex. This is not allowed."; + bad_mutexes_held = true; + } else if (this == Locks::mutator_lock_ && self->GetHeldMutex(kTopLockLevel) != nullptr) { + LOG(ERROR) << "Lock level violation. Locking mutator_lock_ while already having a " + << "kTopLevelLock (" << self->GetHeldMutex(kTopLockLevel)->name_ << "held is " + << "not allowed."; + bad_mutexes_held = true; + } for (int i = level_; i >= 0; --i) { LockLevel lock_level_i = static_cast<LockLevel>(i); BaseMutex* held_mutex = self->GetHeldMutex(lock_level_i); - if (UNLIKELY(held_mutex != nullptr) && lock_level_i != kAbortLock) { + if (level_ == kTopLockLevel && + lock_level_i == kMutatorLock && + Locks::mutator_lock_->IsExclusiveHeld(self)) { + // This is checked above. + continue; + } else if (UNLIKELY(held_mutex != nullptr) && lock_level_i != kAbortLock) { LOG(ERROR) << "Lock level violation: holding \"" << held_mutex->name_ << "\" " << "(level " << lock_level_i << " - " << i << ") while locking \"" << name_ << "\" " diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h index 87c4afe96f..c0cf4872de 100644 --- a/runtime/base/mutex.h +++ b/runtime/base/mutex.h @@ -122,6 +122,16 @@ enum LockLevel { kInstrumentEntrypointsLock, kZygoteCreationLock, + // The highest valid lock level. Use this if there is code that should only be called with no + // other locks held. Since this is the highest lock level we also allow it to be held even if the + // runtime or current thread is not fully set-up yet (for example during thread attach). Note that + // this lock also has special behavior around the mutator_lock_. Since the mutator_lock_ is not + // really a 'real' lock we allow this to be locked when the mutator_lock_ is held exclusive. + // Furthermore, the mutator_lock_ may not be acquired in any form when a lock of this level is + // held. Since the mutator_lock_ being held strong means that all other threads are suspended this + // will prevent deadlocks while still allowing this lock level to function as a "highest" level. + kTopLockLevel, + kLockLevelCount // Must come last. }; std::ostream& operator<<(std::ostream& os, const LockLevel& rhs); diff --git a/runtime/ti/agent.cc b/runtime/ti/agent.cc index 3bf169ad40..548752e980 100644 --- a/runtime/ti/agent.cc +++ b/runtime/ti/agent.cc @@ -21,6 +21,8 @@ #include "base/strlcpy.h" #include "java_vm_ext.h" #include "runtime.h" +#include "thread-current-inl.h" +#include "scoped_thread_state_change-inl.h" namespace art { namespace ti { @@ -35,6 +37,7 @@ const char* AGENT_ON_UNLOAD_FUNCTION_NAME = "Agent_OnUnload"; Agent::LoadError Agent::DoLoadHelper(bool attaching, /*out*/jint* call_res, /*out*/std::string* error_msg) { + ScopedThreadStateChange stsc(Thread::Current(), ThreadState::kNative); DCHECK(call_res != nullptr); DCHECK(error_msg != nullptr); diff --git a/test/1941-dispose-stress/dispose_stress.cc b/test/1941-dispose-stress/dispose_stress.cc new file mode 100644 index 0000000000..e8fcc775e9 --- /dev/null +++ b/test/1941-dispose-stress/dispose_stress.cc @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <atomic> + +#include "android-base/logging.h" +#include "jni.h" +#include "scoped_local_ref.h" +#include "scoped_primitive_array.h" + +#include "jvmti.h" + +// Test infrastructure +#include "jvmti_helper.h" +#include "test_env.h" + +namespace art { +namespace Test1941DisposeStress { + +extern "C" JNIEXPORT jlong JNICALL Java_art_Test1941_AllocEnv(JNIEnv* env, jclass) { + JavaVM* vm = nullptr; + if (env->GetJavaVM(&vm) != 0) { + ScopedLocalRef<jclass> rt_exception(env, env->FindClass("java/lang/RuntimeException")); + env->ThrowNew(rt_exception.get(), "Unable to get JavaVM"); + return -1; + } + jvmtiEnv* new_env = nullptr; + if (vm->GetEnv(reinterpret_cast<void**>(&new_env), JVMTI_VERSION_1_0) != 0) { + ScopedLocalRef<jclass> rt_exception(env, env->FindClass("java/lang/RuntimeException")); + env->ThrowNew(rt_exception.get(), "Unable to create new jvmtiEnv"); + return -1; + } + return static_cast<jlong>(reinterpret_cast<intptr_t>(new_env)); +} + +extern "C" JNIEXPORT void JNICALL Java_art_Test1941_FreeEnv(JNIEnv* env, + jclass, + jlong jvmti_env_ptr) { + JvmtiErrorToException(env, + jvmti_env, + reinterpret_cast<jvmtiEnv*>(jvmti_env_ptr)->DisposeEnvironment()); +} + +} // namespace Test1941DisposeStress +} // namespace art + diff --git a/test/1941-dispose-stress/expected.txt b/test/1941-dispose-stress/expected.txt new file mode 100644 index 0000000000..ca2eddc7b8 --- /dev/null +++ b/test/1941-dispose-stress/expected.txt @@ -0,0 +1 @@ +fib(20) is 6765 diff --git a/test/1941-dispose-stress/info.txt b/test/1941-dispose-stress/info.txt new file mode 100644 index 0000000000..e4a584e46f --- /dev/null +++ b/test/1941-dispose-stress/info.txt @@ -0,0 +1,3 @@ +Test basic JVMTI single step functionality. + +Ensures that we can receive single step events from JVMTI. diff --git a/test/1941-dispose-stress/run b/test/1941-dispose-stress/run new file mode 100755 index 0000000000..51875a7e86 --- /dev/null +++ b/test/1941-dispose-stress/run @@ -0,0 +1,18 @@ +#!/bin/bash +# +# Copyright 2017 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Ask for stack traces to be dumped to a file rather than to stdout. +./default-run "$@" --jvmti diff --git a/test/1941-dispose-stress/src/Main.java b/test/1941-dispose-stress/src/Main.java new file mode 100644 index 0000000000..2fe6b818a0 --- /dev/null +++ b/test/1941-dispose-stress/src/Main.java @@ -0,0 +1,21 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +public class Main { + public static void main(String[] args) throws Exception { + art.Test1941.run(); + } +} diff --git a/test/1941-dispose-stress/src/art/Breakpoint.java b/test/1941-dispose-stress/src/art/Breakpoint.java new file mode 100644 index 0000000000..bbb89f707f --- /dev/null +++ b/test/1941-dispose-stress/src/art/Breakpoint.java @@ -0,0 +1,202 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package art; + +import java.lang.reflect.Executable; +import java.util.HashSet; +import java.util.Set; +import java.util.Objects; + +public class Breakpoint { + public static class Manager { + public static class BP { + public final Executable method; + public final long location; + + public BP(Executable method) { + this(method, getStartLocation(method)); + } + + public BP(Executable method, long location) { + this.method = method; + this.location = location; + } + + @Override + public boolean equals(Object other) { + return (other instanceof BP) && + method.equals(((BP)other).method) && + location == ((BP)other).location; + } + + @Override + public String toString() { + return method.toString() + " @ " + getLine(); + } + + @Override + public int hashCode() { + return Objects.hash(method, location); + } + + public int getLine() { + try { + LineNumber[] lines = getLineNumberTable(method); + int best = -1; + for (LineNumber l : lines) { + if (l.location > location) { + break; + } else { + best = l.line; + } + } + return best; + } catch (Exception e) { + return -1; + } + } + } + + private Set<BP> breaks = new HashSet<>(); + + public void setBreakpoints(BP... bs) { + for (BP b : bs) { + if (breaks.add(b)) { + Breakpoint.setBreakpoint(b.method, b.location); + } + } + } + public void setBreakpoint(Executable method, long location) { + setBreakpoints(new BP(method, location)); + } + + public void clearBreakpoints(BP... bs) { + for (BP b : bs) { + if (breaks.remove(b)) { + Breakpoint.clearBreakpoint(b.method, b.location); + } + } + } + public void clearBreakpoint(Executable method, long location) { + clearBreakpoints(new BP(method, location)); + } + + public void clearAllBreakpoints() { + clearBreakpoints(breaks.toArray(new BP[0])); + } + } + + public static void startBreakpointWatch(Class<?> methodClass, + Executable breakpointReached, + Thread thr) { + startBreakpointWatch(methodClass, breakpointReached, false, thr); + } + + /** + * Enables the trapping of breakpoint events. + * + * If allowRecursive == true then breakpoints will be sent even if one is currently being handled. + */ + public static native void startBreakpointWatch(Class<?> methodClass, + Executable breakpointReached, + boolean allowRecursive, + Thread thr); + public static native void stopBreakpointWatch(Thread thr); + + public static final class LineNumber implements Comparable<LineNumber> { + public final long location; + public final int line; + + private LineNumber(long loc, int line) { + this.location = loc; + this.line = line; + } + + public boolean equals(Object other) { + return other instanceof LineNumber && ((LineNumber)other).line == line && + ((LineNumber)other).location == location; + } + + public int compareTo(LineNumber other) { + int v = Integer.valueOf(line).compareTo(Integer.valueOf(other.line)); + if (v != 0) { + return v; + } else { + return Long.valueOf(location).compareTo(Long.valueOf(other.location)); + } + } + } + + public static native void setBreakpoint(Executable m, long loc); + public static void setBreakpoint(Executable m, LineNumber l) { + setBreakpoint(m, l.location); + } + + public static native void clearBreakpoint(Executable m, long loc); + public static void clearBreakpoint(Executable m, LineNumber l) { + clearBreakpoint(m, l.location); + } + + private static native Object[] getLineNumberTableNative(Executable m); + public static LineNumber[] getLineNumberTable(Executable m) { + Object[] nativeTable = getLineNumberTableNative(m); + long[] location = (long[])(nativeTable[0]); + int[] lines = (int[])(nativeTable[1]); + if (lines.length != location.length) { + throw new Error("Lines and locations have different lengths!"); + } + LineNumber[] out = new LineNumber[lines.length]; + for (int i = 0; i < lines.length; i++) { + out[i] = new LineNumber(location[i], lines[i]); + } + return out; + } + + public static native long getStartLocation(Executable m); + + public static int locationToLine(Executable m, long location) { + try { + Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m); + int best = -1; + for (Breakpoint.LineNumber l : lines) { + if (l.location > location) { + break; + } else { + best = l.line; + } + } + return best; + } catch (Exception e) { + return -1; + } + } + + public static long lineToLocation(Executable m, int line) throws Exception { + try { + Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m); + for (Breakpoint.LineNumber l : lines) { + if (l.line == line) { + return l.location; + } + } + throw new Exception("Unable to find line " + line + " in " + m); + } catch (Exception e) { + throw new Exception("Unable to get line number info for " + m, e); + } + } +} + diff --git a/test/1941-dispose-stress/src/art/Test1941.java b/test/1941-dispose-stress/src/art/Test1941.java new file mode 100644 index 0000000000..d5a9de6cab --- /dev/null +++ b/test/1941-dispose-stress/src/art/Test1941.java @@ -0,0 +1,72 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package art; + +import java.util.Arrays; +import java.lang.reflect.Executable; +import java.lang.reflect.Method; + +public class Test1941 { + public static final boolean PRINT_CNT = false; + public static long CNT = 0; + + // Method with multiple paths we can break on. + public static long fib(long f) { + if (f < 0) { + throw new IllegalArgumentException("Bad argument f < 0: f = " + f); + } else if (f == 0) { + return 0; + } else if (f == 1) { + return 1; + } else { + return fib(f - 1) + fib(f - 2); + } + } + + public static void notifySingleStep(Thread thr, Executable e, long loc) { + // Don't bother actually doing anything. + } + + public static void LoopAllocFreeEnv() { + while (!Thread.interrupted()) { + CNT++; + long env = AllocEnv(); + FreeEnv(env); + } + } + + public static native long AllocEnv(); + public static native void FreeEnv(long env); + + public static void run() throws Exception { + Thread thr = new Thread(Test1941::LoopAllocFreeEnv, "LoopNative"); + thr.start(); + Trace.enableSingleStepTracing(Test1941.class, + Test1941.class.getDeclaredMethod( + "notifySingleStep", Thread.class, Executable.class, Long.TYPE), + null); + + System.out.println("fib(20) is " + fib(20)); + + thr.interrupt(); + thr.join(); + Trace.disableTracing(null); + if (PRINT_CNT) { + System.out.println("Number of envs created/destroyed: " + CNT); + } + } +} diff --git a/test/1941-dispose-stress/src/art/Trace.java b/test/1941-dispose-stress/src/art/Trace.java new file mode 100644 index 0000000000..8999bb1368 --- /dev/null +++ b/test/1941-dispose-stress/src/art/Trace.java @@ -0,0 +1,68 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package art; + +import java.lang.reflect.Field; +import java.lang.reflect.Method; + +public class Trace { + public static native void enableTracing(Class<?> methodClass, + Method entryMethod, + Method exitMethod, + Method fieldAccess, + Method fieldModify, + Method singleStep, + Thread thr); + public static native void disableTracing(Thread thr); + + public static void enableFieldTracing(Class<?> methodClass, + Method fieldAccess, + Method fieldModify, + Thread thr) { + enableTracing(methodClass, null, null, fieldAccess, fieldModify, null, thr); + } + + public static void enableMethodTracing(Class<?> methodClass, + Method entryMethod, + Method exitMethod, + Thread thr) { + enableTracing(methodClass, entryMethod, exitMethod, null, null, null, thr); + } + + public static void enableSingleStepTracing(Class<?> methodClass, + Method singleStep, + Thread thr) { + enableTracing(methodClass, null, null, null, null, singleStep, thr); + } + + public static native void watchFieldAccess(Field f); + public static native void watchFieldModification(Field f); + public static native void watchAllFieldAccesses(); + public static native void watchAllFieldModifications(); + + // the names, arguments, and even line numbers of these functions are embedded in the tests so we + // need to add to the bottom and not modify old ones to maintain compat. + public static native void enableTracing2(Class<?> methodClass, + Method entryMethod, + Method exitMethod, + Method fieldAccess, + Method fieldModify, + Method singleStep, + Method ThreadStart, + Method ThreadEnd, + Thread thr); +} diff --git a/test/Android.bp b/test/Android.bp index ba24119e9c..8f29251907 100644 --- a/test/Android.bp +++ b/test/Android.bp @@ -259,6 +259,7 @@ art_cc_defaults { "1932-monitor-events-misc/monitor_misc.cc", "1934-jvmti-signal-thread/signal_threads.cc", "1939-proxy-frames/local_instance.cc", + "1941-dispose-stress/dispose_stress.cc", ], shared_libs: [ "libbase", |