summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/dex/verified_method.cc43
-rw-r--r--compiler/dex/verified_method.h10
-rw-r--r--compiler/image_writer.h1
-rw-r--r--compiler/optimizing/code_generator_arm64.cc13
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc12
-rw-r--r--compiler/optimizing/code_generator_mips.cc29
-rw-r--r--compiler/optimizing/code_generator_mips.h7
-rw-r--r--compiler/optimizing/code_generator_mips64.cc130
-rw-r--r--compiler/optimizing/code_generator_mips64.h18
-rw-r--r--compiler/optimizing/codegen_test.cc25
-rw-r--r--compiler/optimizing/inliner.cc50
-rw-r--r--compiler/optimizing/inliner.h3
-rw-r--r--compiler/optimizing/intrinsics_mips.cc95
-rw-r--r--compiler/optimizing/reference_type_propagation.cc47
-rw-r--r--compiler/utils/mips/assembler_mips.cc7
-rw-r--r--compiler/utils/mips/assembler_mips.h1
-rw-r--r--compiler/utils/mips/assembler_mips32r6_test.cc8
-rw-r--r--compiler/utils/mips64/assembler_mips64.cc81
-rw-r--r--compiler/utils/mips64/assembler_mips64.h44
-rw-r--r--compiler/utils/mips64/assembler_mips64_test.cc31
-rw-r--r--compiler/verifier_deps_test.cc43
-rw-r--r--runtime/art_method-inl.h19
-rw-r--r--runtime/art_method.h8
-rw-r--r--runtime/base/hash_set.h2
-rw-r--r--runtime/base/mutex.h1
-rw-r--r--runtime/class_linker-inl.h12
-rw-r--r--runtime/class_linker.h2
-rw-r--r--runtime/entrypoints/entrypoint_utils-inl.h11
-rw-r--r--runtime/instrumentation.cc28
-rw-r--r--runtime/instrumentation.h16
-rw-r--r--runtime/intern_table.cc4
-rw-r--r--runtime/intern_table.h9
-rw-r--r--runtime/intern_table_test.cc20
-rw-r--r--runtime/interpreter/interpreter_common.h7
-rw-r--r--runtime/jdwp/jdwp_adb.cc101
-rw-r--r--runtime/jit/jit_code_cache.cc66
-rw-r--r--runtime/jit/jit_code_cache.h9
-rw-r--r--runtime/jit/profiling_info.h4
-rw-r--r--runtime/mirror/dex_cache.h2
-rw-r--r--runtime/mirror/object-inl.h24
-rw-r--r--runtime/mirror/object.h8
-rw-r--r--runtime/monitor.cc35
-rw-r--r--runtime/openjdkjvmti/ti_redefine.cc307
-rw-r--r--runtime/openjdkjvmti/ti_redefine.h17
-rw-r--r--runtime/runtime.h2
-rw-r--r--runtime/stack.cc11
-rw-r--r--runtime/stack.h10
-rw-r--r--runtime/stack_map.h16
-rw-r--r--runtime/verifier/verifier_deps.cc17
-rw-r--r--test/626-checker-arm64-scratch-register/expected.txt1
-rw-r--r--test/626-checker-arm64-scratch-register/info.txt2
-rw-r--r--test/626-checker-arm64-scratch-register/src/Main.java298
-rw-r--r--test/631-checker-fp-abs/expected.txt1
-rw-r--r--test/631-checker-fp-abs/info.txt1
-rw-r--r--test/631-checker-fp-abs/src/Main.java176
-rwxr-xr-xtest/901-hello-ti-agent/run10
-rwxr-xr-xtest/902-hello-transformation/run26
-rwxr-xr-xtest/903-hello-tagging/run26
-rwxr-xr-xtest/904-object-allocation/run26
-rwxr-xr-xtest/905-object-free/run26
-rwxr-xr-xtest/906-iterate-heap/run26
-rwxr-xr-xtest/907-get-loaded-classes/run26
-rwxr-xr-xtest/908-gc-start-finish/run26
-rwxr-xr-xtest/910-methods/run26
-rwxr-xr-xtest/911-get-stack-trace/run26
-rwxr-xr-xtest/912-classes/run26
-rwxr-xr-xtest/913-heaps/run26
-rwxr-xr-xtest/914-hello-obsolescence/build17
-rw-r--r--test/914-hello-obsolescence/expected.txt9
-rw-r--r--test/914-hello-obsolescence/info.txt1
-rwxr-xr-xtest/914-hello-obsolescence/run19
-rw-r--r--test/914-hello-obsolescence/src/Main.java73
-rw-r--r--test/914-hello-obsolescence/src/Transform.java30
-rwxr-xr-xtest/915-obsolete-2/build17
-rw-r--r--test/915-obsolete-2/expected.txt21
-rw-r--r--test/915-obsolete-2/info.txt1
-rwxr-xr-xtest/915-obsolete-2/run19
-rw-r--r--test/915-obsolete-2/src/Main.java99
-rw-r--r--test/915-obsolete-2/src/Transform.java35
-rwxr-xr-xtest/916-obsolete-jit/build17
-rw-r--r--test/916-obsolete-jit/expected.txt21
-rw-r--r--test/916-obsolete-jit/info.txt1
-rwxr-xr-xtest/916-obsolete-jit/run27
-rw-r--r--test/916-obsolete-jit/src/Main.java210
-rw-r--r--test/916-obsolete-jit/src/Transform.java43
-rwxr-xr-xtest/917-fields-transformation/run26
-rw-r--r--test/Android.bp3
-rw-r--r--test/Android.run-test.mk5
-rw-r--r--test/common/runtime_state.cc11
-rw-r--r--test/common/stack_inspect.cc84
-rwxr-xr-xtest/etc/default-build13
-rwxr-xr-xtest/etc/run-test-jar29
-rw-r--r--test/ti-agent/common_helper.cc3
-rw-r--r--test/ti-agent/common_load.cc3
94 files changed, 2486 insertions, 592 deletions
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index 188209bfb8..cbca33320d 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -59,49 +59,6 @@ bool VerifiedMethod::IsSafeCast(uint32_t pc) const {
return std::binary_search(safe_cast_set_.begin(), safe_cast_set_.end(), pc);
}
-bool VerifiedMethod::GenerateDequickenMap(verifier::MethodVerifier* method_verifier) {
- if (method_verifier->HasFailures()) {
- return false;
- }
- const DexFile::CodeItem* code_item = method_verifier->CodeItem();
- const uint16_t* insns = code_item->insns_;
- const Instruction* inst = Instruction::At(insns);
- const Instruction* end = Instruction::At(insns + code_item->insns_size_in_code_units_);
- for (; inst < end; inst = inst->Next()) {
- const bool is_virtual_quick = inst->Opcode() == Instruction::INVOKE_VIRTUAL_QUICK;
- const bool is_range_quick = inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE_QUICK;
- if (is_virtual_quick || is_range_quick) {
- uint32_t dex_pc = inst->GetDexPc(insns);
- verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc);
- ArtMethod* method =
- method_verifier->GetQuickInvokedMethod(inst, line, is_range_quick, true);
- if (method == nullptr) {
- // It can be null if the line wasn't verified since it was unreachable.
- return false;
- }
- // The verifier must know what the type of the object was or else we would have gotten a
- // failure. Put the dex method index in the dequicken map since we need this to get number of
- // arguments in the compiler.
- dequicken_map_.Put(dex_pc, DexFileReference(method->GetDexFile(),
- method->GetDexMethodIndex()));
- } else if (IsInstructionIGetQuickOrIPutQuick(inst->Opcode())) {
- uint32_t dex_pc = inst->GetDexPc(insns);
- verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc);
- ArtField* field = method_verifier->GetQuickFieldAccess(inst, line);
- if (field == nullptr) {
- // It can be null if the line wasn't verified since it was unreachable.
- return false;
- }
- // The verifier must know what the type of the field was or else we would have gotten a
- // failure. Put the dex field index in the dequicken map since we need this for lowering
- // in the compiler.
- // TODO: Putting a field index in a method reference is gross.
- dequicken_map_.Put(dex_pc, DexFileReference(field->GetDexFile(), field->GetDexFieldIndex()));
- }
- }
- return true;
-}
-
void VerifiedMethod::GenerateSafeCastSet(verifier::MethodVerifier* method_verifier) {
/*
* Walks over the method code and adds any cast instructions in which
diff --git a/compiler/dex/verified_method.h b/compiler/dex/verified_method.h
index 0530a8cc18..439e69ece9 100644
--- a/compiler/dex/verified_method.h
+++ b/compiler/dex/verified_method.h
@@ -39,9 +39,6 @@ class VerifiedMethod {
// is better for performance (not just memory usage), especially for large sets.
typedef std::vector<uint32_t> SafeCastSet;
- // Devirtualization map type maps dex offset to field / method idx.
- typedef SafeMap<uint32_t, DexFileReference> DequickenMap;
-
static const VerifiedMethod* Create(verifier::MethodVerifier* method_verifier)
REQUIRES_SHARED(Locks::mutator_lock_);
~VerifiedMethod() = default;
@@ -68,17 +65,10 @@ class VerifiedMethod {
}
private:
- // Generate dequickening map into dequicken_map_. Returns false if there is an error.
- bool GenerateDequickenMap(verifier::MethodVerifier* method_verifier)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
// Generate safe case set into safe_cast_set_.
void GenerateSafeCastSet(verifier::MethodVerifier* method_verifier)
REQUIRES_SHARED(Locks::mutator_lock_);
- // Dequicken map is required for compiling quickened byte codes. The quicken maps from
- // dex PC to dex method index or dex field index based on the instruction.
- DequickenMap dequicken_map_;
SafeCastSet safe_cast_set_;
const uint32_t encountered_error_types_;
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index c5374838f6..cc7df1ce21 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -27,6 +27,7 @@
#include <string>
#include <ostream>
+#include "art_method.h"
#include "base/bit_utils.h"
#include "base/dchecked_vector.h"
#include "base/enums.h"
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 13616db535..5c33fe1a7d 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1533,8 +1533,17 @@ void CodeGeneratorARM64::MoveLocation(Location destination,
DCHECK(source.IsStackSlot() || source.IsDoubleStackSlot());
DCHECK(source.IsDoubleStackSlot() == destination.IsDoubleStackSlot());
UseScratchRegisterScope temps(GetVIXLAssembler());
- // There is generally less pressure on FP registers.
- FPRegister temp = destination.IsDoubleStackSlot() ? temps.AcquireD() : temps.AcquireS();
+ // Use any scratch register (a core or a floating-point one)
+ // from VIXL scratch register pools as a temporary.
+ //
+ // We used to only use the FP scratch register pool, but in some
+ // rare cases the only register from this pool (D31) would
+ // already be used (e.g. within a ParallelMove instruction, when
+ // a move is blocked by a another move requiring a scratch FP
+ // register, which would reserve D31). To prevent this issue, we
+ // ask for a scratch register of any type (core or FP).
+ CPURegister temp =
+ temps.AcquireCPURegisterOfSize(destination.IsDoubleStackSlot() ? kXRegSize : kWRegSize);
__ Ldr(temp, StackOperandFrom(source));
__ Str(temp, StackOperandFrom(destination));
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index f108595a00..00ad3e34b7 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -5681,13 +5681,13 @@ void ParallelMoveResolverARMVIXL::Exchange(vixl32::Register reg, int mem) {
void ParallelMoveResolverARMVIXL::Exchange(int mem1, int mem2) {
// TODO(VIXL32): Double check the performance of this implementation.
UseScratchRegisterScope temps(GetAssembler()->GetVIXLAssembler());
- vixl32::Register temp = temps.Acquire();
- vixl32::SRegister temp_s = temps.AcquireS();
+ vixl32::SRegister temp_1 = temps.AcquireS();
+ vixl32::SRegister temp_2 = temps.AcquireS();
- __ Ldr(temp, MemOperand(sp, mem1));
- __ Vldr(temp_s, MemOperand(sp, mem2));
- __ Str(temp, MemOperand(sp, mem2));
- __ Vstr(temp_s, MemOperand(sp, mem1));
+ __ Vldr(temp_1, MemOperand(sp, mem1));
+ __ Vldr(temp_2, MemOperand(sp, mem2));
+ __ Vstr(temp_1, MemOperand(sp, mem2));
+ __ Vstr(temp_2, MemOperand(sp, mem1));
}
void ParallelMoveResolverARMVIXL::EmitSwap(size_t index) {
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index c5029b3902..01e0dac33e 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -1133,11 +1133,15 @@ void CodeGeneratorMIPS::EmitPcRelativeAddressPlaceholder(
__ SetReorder(reordering);
}
-void CodeGeneratorMIPS::MarkGCCard(Register object, Register value) {
+void CodeGeneratorMIPS::MarkGCCard(Register object,
+ Register value,
+ bool value_can_be_null) {
MipsLabel done;
Register card = AT;
Register temp = TMP;
- __ Beqz(value, &done);
+ if (value_can_be_null) {
+ __ Beqz(value, &done);
+ }
__ LoadFromOffset(kLoadWord,
card,
TR,
@@ -1145,7 +1149,9 @@ void CodeGeneratorMIPS::MarkGCCard(Register object, Register value) {
__ Srl(temp, object, gc::accounting::CardTable::kCardShift);
__ Addu(temp, card, temp);
__ Sb(card, temp, 0);
- __ Bind(&done);
+ if (value_can_be_null) {
+ __ Bind(&done);
+ }
}
void CodeGeneratorMIPS::SetupBlockedRegisters() const {
@@ -2064,7 +2070,7 @@ void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) {
__ StoreToOffset(kStoreWord, value, base_reg, data_offset, null_checker);
if (needs_write_barrier) {
DCHECK_EQ(value_type, Primitive::kPrimNot);
- codegen_->MarkGCCard(obj, value);
+ codegen_->MarkGCCard(obj, value, instruction->GetValueCanBeNull());
}
}
} else {
@@ -4868,7 +4874,8 @@ void LocationsBuilderMIPS::HandleFieldSet(HInstruction* instruction, const Field
void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction,
const FieldInfo& field_info,
- uint32_t dex_pc) {
+ uint32_t dex_pc,
+ bool value_can_be_null) {
Primitive::Type type = field_info.GetFieldType();
LocationSummary* locations = instruction->GetLocations();
Register obj = locations->InAt(0).AsRegister<Register>();
@@ -4963,7 +4970,7 @@ void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction,
// TODO: memory barriers?
if (CodeGenerator::StoreNeedsWriteBarrier(type, instruction->InputAt(1))) {
Register src = value_location.AsRegister<Register>();
- codegen_->MarkGCCard(obj, src);
+ codegen_->MarkGCCard(obj, src, value_can_be_null);
}
if (is_volatile) {
@@ -4984,7 +4991,10 @@ void LocationsBuilderMIPS::VisitInstanceFieldSet(HInstanceFieldSet* instruction)
}
void InstructionCodeGeneratorMIPS::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
- HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetDexPc());
+ HandleFieldSet(instruction,
+ instruction->GetFieldInfo(),
+ instruction->GetDexPc(),
+ instruction->GetValueCanBeNull());
}
void InstructionCodeGeneratorMIPS::GenerateGcRootFieldLoad(
@@ -6175,7 +6185,10 @@ void LocationsBuilderMIPS::VisitStaticFieldSet(HStaticFieldSet* instruction) {
}
void InstructionCodeGeneratorMIPS::VisitStaticFieldSet(HStaticFieldSet* instruction) {
- HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetDexPc());
+ HandleFieldSet(instruction,
+ instruction->GetFieldInfo(),
+ instruction->GetDexPc(),
+ instruction->GetValueCanBeNull());
}
void LocationsBuilderMIPS::VisitUnresolvedInstanceFieldGet(
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index 9a9a8380cb..7b0812cb7b 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -236,7 +236,10 @@ class InstructionCodeGeneratorMIPS : public InstructionCodeGenerator {
void HandleBinaryOp(HBinaryOperation* operation);
void HandleCondition(HCondition* instruction);
void HandleShift(HBinaryOperation* operation);
- void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info, uint32_t dex_pc);
+ void HandleFieldSet(HInstruction* instruction,
+ const FieldInfo& field_info,
+ uint32_t dex_pc,
+ bool value_can_be_null);
void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info, uint32_t dex_pc);
// Generate a GC root reference load:
//
@@ -350,7 +353,7 @@ class CodeGeneratorMIPS : public CodeGenerator {
// Emit linker patches.
void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE;
- void MarkGCCard(Register object, Register value);
+ void MarkGCCard(Register object, Register value, bool value_can_be_null);
// Register allocation.
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 799360ebdc..36690c0569 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -2356,19 +2356,40 @@ void InstructionCodeGeneratorMIPS64::GenerateIntLongCompare(IfCondition cond,
switch (cond) {
case kCondEQ:
case kCondNE:
- if (use_imm && IsUint<16>(rhs_imm)) {
- __ Xori(dst, lhs, rhs_imm);
- } else {
- if (use_imm) {
- rhs_reg = TMP;
- __ LoadConst64(rhs_reg, rhs_imm);
+ if (use_imm && IsInt<16>(-rhs_imm)) {
+ if (rhs_imm == 0) {
+ if (cond == kCondEQ) {
+ __ Sltiu(dst, lhs, 1);
+ } else {
+ __ Sltu(dst, ZERO, lhs);
+ }
+ } else {
+ if (is64bit) {
+ __ Daddiu(dst, lhs, -rhs_imm);
+ } else {
+ __ Addiu(dst, lhs, -rhs_imm);
+ }
+ if (cond == kCondEQ) {
+ __ Sltiu(dst, dst, 1);
+ } else {
+ __ Sltu(dst, ZERO, dst);
+ }
}
- __ Xor(dst, lhs, rhs_reg);
- }
- if (cond == kCondEQ) {
- __ Sltiu(dst, dst, 1);
} else {
- __ Sltu(dst, ZERO, dst);
+ if (use_imm && IsUint<16>(rhs_imm)) {
+ __ Xori(dst, lhs, rhs_imm);
+ } else {
+ if (use_imm) {
+ rhs_reg = TMP;
+ __ LoadConst64(rhs_reg, rhs_imm);
+ }
+ __ Xor(dst, lhs, rhs_reg);
+ }
+ if (cond == kCondEQ) {
+ __ Sltiu(dst, dst, 1);
+ } else {
+ __ Sltu(dst, ZERO, dst);
+ }
}
break;
@@ -4254,9 +4275,12 @@ void InstructionCodeGeneratorMIPS64::VisitTypeConversion(HTypeConversion* conver
break;
case Primitive::kPrimInt:
case Primitive::kPrimLong:
- // Sign-extend 32-bit int into bits 32 through 63 for
- // int-to-long and long-to-int conversions
- __ Sll(dst, src, 0);
+ // Sign-extend 32-bit int into bits 32 through 63 for int-to-long and long-to-int
+ // conversions, except when the input and output registers are the same and we are not
+ // converting longs to shorter types. In these cases, do nothing.
+ if ((input_type == Primitive::kPrimLong) || (dst != src)) {
+ __ Sll(dst, src, 0);
+ }
break;
default:
@@ -4493,27 +4517,20 @@ void LocationsBuilderMIPS64::VisitPackedSwitch(HPackedSwitch* switch_instr) {
locations->SetInAt(0, Location::RequiresRegister());
}
-void InstructionCodeGeneratorMIPS64::VisitPackedSwitch(HPackedSwitch* switch_instr) {
- int32_t lower_bound = switch_instr->GetStartValue();
- int32_t num_entries = switch_instr->GetNumEntries();
- LocationSummary* locations = switch_instr->GetLocations();
- GpuRegister value_reg = locations->InAt(0).AsRegister<GpuRegister>();
- HBasicBlock* default_block = switch_instr->GetDefaultBlock();
-
+void InstructionCodeGeneratorMIPS64::GenPackedSwitchWithCompares(GpuRegister value_reg,
+ int32_t lower_bound,
+ uint32_t num_entries,
+ HBasicBlock* switch_block,
+ HBasicBlock* default_block) {
// Create a set of compare/jumps.
GpuRegister temp_reg = TMP;
- if (IsInt<16>(-lower_bound)) {
- __ Addiu(temp_reg, value_reg, -lower_bound);
- } else {
- __ LoadConst32(AT, -lower_bound);
- __ Addu(temp_reg, value_reg, AT);
- }
+ __ Addiu32(temp_reg, value_reg, -lower_bound);
// Jump to default if index is negative
// Note: We don't check the case that index is positive while value < lower_bound, because in
// this case, index >= num_entries must be true. So that we can save one branch instruction.
__ Bltzc(temp_reg, codegen_->GetLabelOf(default_block));
- const ArenaVector<HBasicBlock*>& successors = switch_instr->GetBlock()->GetSuccessors();
+ const ArenaVector<HBasicBlock*>& successors = switch_block->GetSuccessors();
// Jump to successors[0] if value == lower_bound.
__ Beqzc(temp_reg, codegen_->GetLabelOf(successors[0]));
int32_t last_index = 0;
@@ -4531,11 +4548,66 @@ void InstructionCodeGeneratorMIPS64::VisitPackedSwitch(HPackedSwitch* switch_ins
}
// And the default for any other value.
- if (!codegen_->GoesToNextBlock(switch_instr->GetBlock(), default_block)) {
+ if (!codegen_->GoesToNextBlock(switch_block, default_block)) {
__ Bc(codegen_->GetLabelOf(default_block));
}
}
+void InstructionCodeGeneratorMIPS64::GenTableBasedPackedSwitch(GpuRegister value_reg,
+ int32_t lower_bound,
+ uint32_t num_entries,
+ HBasicBlock* switch_block,
+ HBasicBlock* default_block) {
+ // Create a jump table.
+ std::vector<Mips64Label*> labels(num_entries);
+ const ArenaVector<HBasicBlock*>& successors = switch_block->GetSuccessors();
+ for (uint32_t i = 0; i < num_entries; i++) {
+ labels[i] = codegen_->GetLabelOf(successors[i]);
+ }
+ JumpTable* table = __ CreateJumpTable(std::move(labels));
+
+ // Is the value in range?
+ __ Addiu32(TMP, value_reg, -lower_bound);
+ __ LoadConst32(AT, num_entries);
+ __ Bgeuc(TMP, AT, codegen_->GetLabelOf(default_block));
+
+ // We are in the range of the table.
+ // Load the target address from the jump table, indexing by the value.
+ __ LoadLabelAddress(AT, table->GetLabel());
+ __ Sll(TMP, TMP, 2);
+ __ Daddu(TMP, TMP, AT);
+ __ Lw(TMP, TMP, 0);
+ // Compute the absolute target address by adding the table start address
+ // (the table contains offsets to targets relative to its start).
+ __ Daddu(TMP, TMP, AT);
+ // And jump.
+ __ Jr(TMP);
+ __ Nop();
+}
+
+void InstructionCodeGeneratorMIPS64::VisitPackedSwitch(HPackedSwitch* switch_instr) {
+ int32_t lower_bound = switch_instr->GetStartValue();
+ uint32_t num_entries = switch_instr->GetNumEntries();
+ LocationSummary* locations = switch_instr->GetLocations();
+ GpuRegister value_reg = locations->InAt(0).AsRegister<GpuRegister>();
+ HBasicBlock* switch_block = switch_instr->GetBlock();
+ HBasicBlock* default_block = switch_instr->GetDefaultBlock();
+
+ if (num_entries > kPackedSwitchJumpTableThreshold) {
+ GenTableBasedPackedSwitch(value_reg,
+ lower_bound,
+ num_entries,
+ switch_block,
+ default_block);
+ } else {
+ GenPackedSwitchWithCompares(value_reg,
+ lower_bound,
+ num_entries,
+ switch_block,
+ default_block);
+ }
+}
+
void LocationsBuilderMIPS64::VisitClassTableGet(HClassTableGet*) {
UNIMPLEMENTED(FATAL) << "ClassTableGet is unimplemented on mips64";
}
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index d5811c20e3..8ac919f47e 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -217,6 +217,14 @@ class InstructionCodeGeneratorMIPS64 : public InstructionCodeGenerator {
Mips64Assembler* GetAssembler() const { return assembler_; }
+ // Compare-and-jump packed switch generates approx. 3 + 2.5 * N 32-bit
+ // instructions for N cases.
+ // Table-based packed switch generates approx. 11 32-bit instructions
+ // and N 32-bit data words for N cases.
+ // At N = 6 they come out as 18 and 17 32-bit words respectively.
+ // We switch to the table-based method starting with 7 cases.
+ static constexpr uint32_t kPackedSwitchJumpTableThreshold = 6;
+
private:
void GenerateClassInitializationCheck(SlowPathCodeMIPS64* slow_path, GpuRegister class_reg);
void GenerateMemoryBarrier(MemBarrierKind kind);
@@ -260,6 +268,16 @@ class InstructionCodeGeneratorMIPS64 : public InstructionCodeGenerator {
LocationSummary* locations,
Mips64Label* label);
void HandleGoto(HInstruction* got, HBasicBlock* successor);
+ void GenPackedSwitchWithCompares(GpuRegister value_reg,
+ int32_t lower_bound,
+ uint32_t num_entries,
+ HBasicBlock* switch_block,
+ HBasicBlock* default_block);
+ void GenTableBasedPackedSwitch(GpuRegister value_reg,
+ int32_t lower_bound,
+ uint32_t num_entries,
+ HBasicBlock* switch_block,
+ HBasicBlock* default_block);
Mips64Assembler* const assembler_;
CodeGeneratorMIPS64* const codegen_;
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index ac83bd9b0c..879b4ce59e 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -1041,6 +1041,31 @@ TEST_F(CodegenTest, ComparisonsLong) {
}
}
+#ifdef ART_ENABLE_CODEGEN_arm
+TEST_F(CodegenTest, ARMVIXLParallelMoveResolver) {
+ std::unique_ptr<const ArmInstructionSetFeatures> features(
+ ArmInstructionSetFeatures::FromCppDefines());
+ ArenaPool pool;
+ ArenaAllocator allocator(&pool);
+ HGraph* graph = CreateGraph(&allocator);
+ arm::CodeGeneratorARMVIXL codegen(graph, *features.get(), CompilerOptions());
+
+ codegen.Initialize();
+
+ // This will result in calling EmitSwap -> void ParallelMoveResolverARMVIXL::Exchange(int mem1,
+ // int mem2) which was faulty (before the fix). So previously GPR and FP scratch registers were
+ // used as temps; however GPR scratch register is required for big stack offsets which don't fit
+ // LDR encoding. So the following code is a regression test for that situation.
+ HParallelMove* move = new (graph->GetArena()) HParallelMove(graph->GetArena());
+ move->AddMove(Location::StackSlot(0), Location::StackSlot(8192), Primitive::kPrimInt, nullptr);
+ move->AddMove(Location::StackSlot(8192), Location::StackSlot(0), Primitive::kPrimInt, nullptr);
+ codegen.GetMoveResolver()->EmitNativeCode(move);
+
+ InternalCodeAllocator code_allocator;
+ codegen.Finalize(&code_allocator);
+}
+#endif
+
#ifdef ART_ENABLE_CODEGEN_mips
TEST_F(CodegenTest, MipsClobberRA) {
std::unique_ptr<const MipsInstructionSetFeatures> features_mips(
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index d84787984d..3b83e95071 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -344,6 +344,7 @@ bool HInliner::TryInline(HInvoke* invoke_instruction) {
if (actual_method != nullptr) {
bool result = TryInlineAndReplace(invoke_instruction,
actual_method,
+ ReferenceTypeInfo::CreateInvalid(),
/* do_rtp */ true,
cha_devirtualize);
if (result && !invoke_instruction->IsInvokeStaticOrDirect()) {
@@ -471,9 +472,10 @@ bool HInliner::TryInlineMonomorphicCall(HInvoke* invoke_instruction,
HInstruction* receiver = invoke_instruction->InputAt(0);
HInstruction* cursor = invoke_instruction->GetPrevious();
HBasicBlock* bb_cursor = invoke_instruction->GetBlock();
-
+ Handle<mirror::Class> handle = handles_->NewHandle(GetMonomorphicType(classes));
if (!TryInlineAndReplace(invoke_instruction,
resolved_method,
+ ReferenceTypeInfo::Create(handle, /* is_exact */ true),
/* do_rtp */ false,
/* cha_devirtualize */ false)) {
return false;
@@ -591,13 +593,13 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction,
break;
}
ArtMethod* method = nullptr;
+
+ Handle<mirror::Class> handle = handles_->NewHandle(classes->Get(i));
if (invoke_instruction->IsInvokeInterface()) {
- method = classes->Get(i)->FindVirtualMethodForInterface(
- resolved_method, pointer_size);
+ method = handle->FindVirtualMethodForInterface(resolved_method, pointer_size);
} else {
DCHECK(invoke_instruction->IsInvokeVirtual());
- method = classes->Get(i)->FindVirtualMethodForVirtual(
- resolved_method, pointer_size);
+ method = handle->FindVirtualMethodForVirtual(resolved_method, pointer_size);
}
HInstruction* receiver = invoke_instruction->InputAt(0);
@@ -605,10 +607,13 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction,
HBasicBlock* bb_cursor = invoke_instruction->GetBlock();
dex::TypeIndex class_index = FindClassIndexIn(
- classes->Get(i), caller_dex_file, caller_compilation_unit_.GetDexCache());
+ handle.Get(), caller_dex_file, caller_compilation_unit_.GetDexCache());
HInstruction* return_replacement = nullptr;
if (!class_index.IsValid() ||
- !TryBuildAndInline(invoke_instruction, method, &return_replacement)) {
+ !TryBuildAndInline(invoke_instruction,
+ method,
+ ReferenceTypeInfo::Create(handle, /* is_exact */ true),
+ &return_replacement)) {
all_targets_inlined = false;
} else {
one_target_inlined = true;
@@ -627,7 +632,7 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction,
cursor,
bb_cursor,
class_index,
- classes->Get(i),
+ handle.Get(),
invoke_instruction,
deoptimize);
if (deoptimize) {
@@ -792,7 +797,10 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget(
HBasicBlock* bb_cursor = invoke_instruction->GetBlock();
HInstruction* return_replacement = nullptr;
- if (!TryBuildAndInline(invoke_instruction, actual_method, &return_replacement)) {
+ if (!TryBuildAndInline(invoke_instruction,
+ actual_method,
+ ReferenceTypeInfo::CreateInvalid(),
+ &return_replacement)) {
return false;
}
@@ -857,13 +865,14 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget(
bool HInliner::TryInlineAndReplace(HInvoke* invoke_instruction,
ArtMethod* method,
+ ReferenceTypeInfo receiver_type,
bool do_rtp,
bool cha_devirtualize) {
HInstruction* return_replacement = nullptr;
uint32_t dex_pc = invoke_instruction->GetDexPc();
HInstruction* cursor = invoke_instruction->GetPrevious();
HBasicBlock* bb_cursor = invoke_instruction->GetBlock();
- if (!TryBuildAndInline(invoke_instruction, method, &return_replacement)) {
+ if (!TryBuildAndInline(invoke_instruction, method, receiver_type, &return_replacement)) {
if (invoke_instruction->IsInvokeInterface()) {
// Turn an invoke-interface into an invoke-virtual. An invoke-virtual is always
// better than an invoke-interface because:
@@ -921,6 +930,7 @@ bool HInliner::TryInlineAndReplace(HInvoke* invoke_instruction,
bool HInliner::TryBuildAndInline(HInvoke* invoke_instruction,
ArtMethod* method,
+ ReferenceTypeInfo receiver_type,
HInstruction** return_replacement) {
if (method->IsProxyMethod()) {
VLOG(compiler) << "Method " << method->PrettyMethod()
@@ -997,7 +1007,8 @@ bool HInliner::TryBuildAndInline(HInvoke* invoke_instruction,
return false;
}
- if (!TryBuildAndInlineHelper(invoke_instruction, method, same_dex_file, return_replacement)) {
+ if (!TryBuildAndInlineHelper(
+ invoke_instruction, method, receiver_type, same_dex_file, return_replacement)) {
return false;
}
@@ -1194,8 +1205,10 @@ HInstanceFieldSet* HInliner::CreateInstanceFieldSet(Handle<mirror::DexCache> dex
bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
ArtMethod* resolved_method,
+ ReferenceTypeInfo receiver_type,
bool same_dex_file,
HInstruction** return_replacement) {
+ DCHECK(!(resolved_method->IsStatic() && receiver_type.IsValid()));
ScopedObjectAccess soa(Thread::Current());
const DexFile::CodeItem* code_item = resolved_method->GetCodeItem();
const DexFile& callee_dex_file = *resolved_method->GetDexFile();
@@ -1286,12 +1299,13 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
}
size_t parameter_index = 0;
+ bool run_rtp = false;
for (HInstructionIterator instructions(callee_graph->GetEntryBlock()->GetInstructions());
!instructions.Done();
instructions.Advance()) {
HInstruction* current = instructions.Current();
if (current->IsParameterValue()) {
- HInstruction* argument = invoke_instruction->InputAt(parameter_index++);
+ HInstruction* argument = invoke_instruction->InputAt(parameter_index);
if (argument->IsNullConstant()) {
current->ReplaceWith(callee_graph->GetNullConstant());
} else if (argument->IsIntConstant()) {
@@ -1305,15 +1319,21 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
current->ReplaceWith(
callee_graph->GetDoubleConstant(argument->AsDoubleConstant()->GetValue()));
} else if (argument->GetType() == Primitive::kPrimNot) {
- current->SetReferenceTypeInfo(argument->GetReferenceTypeInfo());
+ if (!resolved_method->IsStatic() && parameter_index == 0 && receiver_type.IsValid()) {
+ run_rtp = true;
+ current->SetReferenceTypeInfo(receiver_type);
+ } else {
+ current->SetReferenceTypeInfo(argument->GetReferenceTypeInfo());
+ }
current->AsParameterValue()->SetCanBeNull(argument->CanBeNull());
}
+ ++parameter_index;
}
}
// We have replaced formal arguments with actual arguments. If actual types
// are more specific than the declared ones, run RTP again on the inner graph.
- if (ArgumentTypesMoreSpecific(invoke_instruction, resolved_method)) {
+ if (run_rtp || ArgumentTypesMoreSpecific(invoke_instruction, resolved_method)) {
ReferenceTypePropagation(callee_graph,
dex_compilation_unit.GetDexCache(),
handles_,
@@ -1502,7 +1522,7 @@ static bool IsReferenceTypeRefinement(ReferenceTypeInfo declared_rti,
ReferenceTypeInfo actual_rti = actual_obj->GetReferenceTypeInfo();
return (actual_rti.IsExact() && !declared_rti.IsExact()) ||
- declared_rti.IsStrictSupertypeOf(actual_rti);
+ declared_rti.IsStrictSupertypeOf(actual_rti);
}
ReferenceTypeInfo HInliner::GetClassRTI(mirror::Class* klass) {
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 0c6436235f..4c0b990f26 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -66,17 +66,20 @@ class HInliner : public HOptimization {
// a CHA guard needs to be added for the inlining.
bool TryInlineAndReplace(HInvoke* invoke_instruction,
ArtMethod* resolved_method,
+ ReferenceTypeInfo receiver_type,
bool do_rtp,
bool cha_devirtualize)
REQUIRES_SHARED(Locks::mutator_lock_);
bool TryBuildAndInline(HInvoke* invoke_instruction,
ArtMethod* resolved_method,
+ ReferenceTypeInfo receiver_type,
HInstruction** return_replacement)
REQUIRES_SHARED(Locks::mutator_lock_);
bool TryBuildAndInlineHelper(HInvoke* invoke_instruction,
ArtMethod* resolved_method,
+ ReferenceTypeInfo receiver_type,
bool same_dex_file,
HInstruction** return_replacement);
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 9b5d7a02dd..e9c6615870 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -1648,7 +1648,8 @@ static void GenUnsafePut(LocationSummary* locations,
}
if (type == Primitive::kPrimNot) {
- codegen->MarkGCCard(base, locations->InAt(3).AsRegister<Register>());
+ bool value_can_be_null = true; // TODO: Worth finding out this information?
+ codegen->MarkGCCard(base, locations->InAt(3).AsRegister<Register>(), value_can_be_null);
}
}
@@ -1806,7 +1807,8 @@ static void GenCas(LocationSummary* locations, Primitive::Type type, CodeGenerat
if (type == Primitive::kPrimNot) {
// Mark card for object assuming new value is stored.
- codegen->MarkGCCard(base, value);
+ bool value_can_be_null = true; // TODO: Worth finding out this information?
+ codegen->MarkGCCard(base, value, value_can_be_null);
}
// do {
@@ -2464,6 +2466,94 @@ void IntrinsicCodeGeneratorMIPS::VisitMathRoundFloat(HInvoke* invoke) {
__ Bind(&done);
}
+// void java.lang.String.getChars(int srcBegin, int srcEnd, char[] dst, int dstBegin)
+void IntrinsicLocationsBuilderMIPS::VisitStringGetCharsNoCheck(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCallOnMainOnly,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(2, Location::RequiresRegister());
+ locations->SetInAt(3, Location::RequiresRegister());
+ locations->SetInAt(4, Location::RequiresRegister());
+
+ // We will call memcpy() to do the actual work. Allocate the temporary
+ // registers to use the correct input registers, and output register.
+ // memcpy() uses the normal MIPS calling convention.
+ InvokeRuntimeCallingConvention calling_convention;
+
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+
+ Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt);
+ locations->AddTemp(Location::RegisterLocation(outLocation.AsRegister<Register>()));
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitStringGetCharsNoCheck(HInvoke* invoke) {
+ MipsAssembler* assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ // Check assumption that sizeof(Char) is 2 (used in scaling below).
+ const size_t char_size = Primitive::ComponentSize(Primitive::kPrimChar);
+ DCHECK_EQ(char_size, 2u);
+ const size_t char_shift = Primitive::ComponentSizeShift(Primitive::kPrimChar);
+
+ Register srcObj = locations->InAt(0).AsRegister<Register>();
+ Register srcBegin = locations->InAt(1).AsRegister<Register>();
+ Register srcEnd = locations->InAt(2).AsRegister<Register>();
+ Register dstObj = locations->InAt(3).AsRegister<Register>();
+ Register dstBegin = locations->InAt(4).AsRegister<Register>();
+
+ Register dstPtr = locations->GetTemp(0).AsRegister<Register>();
+ DCHECK_EQ(dstPtr, A0);
+ Register srcPtr = locations->GetTemp(1).AsRegister<Register>();
+ DCHECK_EQ(srcPtr, A1);
+ Register numChrs = locations->GetTemp(2).AsRegister<Register>();
+ DCHECK_EQ(numChrs, A2);
+
+ Register dstReturn = locations->GetTemp(3).AsRegister<Register>();
+ DCHECK_EQ(dstReturn, V0);
+
+ MipsLabel done;
+
+ // Location of data in char array buffer.
+ const uint32_t data_offset = mirror::Array::DataOffset(char_size).Uint32Value();
+
+ // Get offset of value field within a string object.
+ const int32_t value_offset = mirror::String::ValueOffset().Int32Value();
+
+ __ Beq(srcEnd, srcBegin, &done); // No characters to move.
+
+ // Calculate number of characters to be copied.
+ __ Subu(numChrs, srcEnd, srcBegin);
+
+ // Calculate destination address.
+ __ Addiu(dstPtr, dstObj, data_offset);
+ if (IsR6()) {
+ __ Lsa(dstPtr, dstBegin, dstPtr, char_shift);
+ } else {
+ __ Sll(AT, dstBegin, char_shift);
+ __ Addu(dstPtr, dstPtr, AT);
+ }
+
+ // Calculate source address.
+ __ Addiu(srcPtr, srcObj, value_offset);
+ if (IsR6()) {
+ __ Lsa(srcPtr, srcBegin, srcPtr, char_shift);
+ } else {
+ __ Sll(AT, srcBegin, char_shift);
+ __ Addu(srcPtr, srcPtr, AT);
+ }
+
+ // Calculate number of bytes to copy from number of characters.
+ __ Sll(numChrs, numChrs, char_shift);
+
+ codegen_->InvokeRuntime(kQuickMemcpy, invoke, invoke->GetDexPc(), nullptr);
+
+ __ Bind(&done);
+}
+
// Unimplemented intrinsics.
UNIMPLEMENTED_INTRINSIC(MIPS, MathCeil)
@@ -2473,7 +2563,6 @@ UNIMPLEMENTED_INTRINSIC(MIPS, MathRoundDouble)
UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeCASLong)
UNIMPLEMENTED_INTRINSIC(MIPS, ReferenceGetReferent)
-UNIMPLEMENTED_INTRINSIC(MIPS, StringGetCharsNoCheck)
UNIMPLEMENTED_INTRINSIC(MIPS, SystemArrayCopyChar)
UNIMPLEMENTED_INTRINSIC(MIPS, SystemArrayCopy)
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index c191c6651f..33b3875e3b 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -270,7 +270,7 @@ void ReferenceTypePropagation::BoundTypeForIfNotNull(HBasicBlock* block) {
ScopedObjectAccess soa(Thread::Current());
HInstruction* insert_point = notNullBlock->GetFirstInstruction();
ReferenceTypeInfo object_rti = ReferenceTypeInfo::Create(
- handle_cache_.GetObjectClassHandle(), /* is_exact */ true);
+ handle_cache_.GetObjectClassHandle(), /* is_exact */ false);
if (ShouldCreateBoundType(insert_point, obj, object_rti, nullptr, notNullBlock)) {
bound_type = new (graph_->GetArena()) HBoundType(obj);
bound_type->SetUpperBound(object_rti, /* bound_can_be_null */ false);
@@ -411,7 +411,9 @@ void ReferenceTypePropagation::BoundTypeForIfInstanceOf(HBasicBlock* block) {
HInstruction* insert_point = instanceOfTrueBlock->GetFirstInstruction();
if (ShouldCreateBoundType(insert_point, obj, class_rti, nullptr, instanceOfTrueBlock)) {
bound_type = new (graph_->GetArena()) HBoundType(obj);
- bound_type->SetUpperBound(class_rti, /* InstanceOf fails for null. */ false);
+ bool is_exact = class_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes();
+ bound_type->SetUpperBound(ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), is_exact),
+ /* InstanceOf fails for null. */ false);
instanceOfTrueBlock->InsertInstructionBefore(bound_type, insert_point);
} else {
// We already have a bound type on the position we would need to insert
@@ -605,15 +607,17 @@ void ReferenceTypePropagation::RTPVisitor::VisitBoundType(HBoundType* instr) {
// Narrow the type as much as possible.
HInstruction* obj = instr->InputAt(0);
ReferenceTypeInfo obj_rti = obj->GetReferenceTypeInfo();
- if (class_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes()) {
- instr->SetReferenceTypeInfo(
- ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact */ true));
+ if (class_rti.IsExact()) {
+ instr->SetReferenceTypeInfo(class_rti);
} else if (obj_rti.IsValid()) {
if (class_rti.IsSupertypeOf(obj_rti)) {
// Object type is more specific.
instr->SetReferenceTypeInfo(obj_rti);
} else {
- // Upper bound is more specific.
+ // Upper bound is more specific, or unrelated to the object's type.
+ // Note that the object might then be exact, and we know the code dominated by this
+ // bound type is dead. To not confuse potential other optimizations, we mark
+ // the bound as non-exact.
instr->SetReferenceTypeInfo(
ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact */ false));
}
@@ -644,8 +648,11 @@ void ReferenceTypePropagation::RTPVisitor::VisitCheckCast(HCheckCast* check_cast
if (class_rti.IsValid()) {
DCHECK(is_first_run_);
+ ScopedObjectAccess soa(Thread::Current());
// This is the first run of RTP and class is resolved.
- bound_type->SetUpperBound(class_rti, /* CheckCast succeeds for nulls. */ true);
+ bool is_exact = class_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes();
+ bound_type->SetUpperBound(ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), is_exact),
+ /* CheckCast succeeds for nulls. */ true);
} else {
// This is the first run of RTP and class is unresolved. Remove the binding.
// The instruction itself is removed in VisitBoundType so as to not
@@ -795,21 +802,25 @@ void ReferenceTypePropagation::RTPVisitor::VisitArrayGet(HArrayGet* instr) {
}
void ReferenceTypePropagation::UpdateBoundType(HBoundType* instr) {
- ReferenceTypeInfo new_rti = instr->InputAt(0)->GetReferenceTypeInfo();
- if (!new_rti.IsValid()) {
+ ReferenceTypeInfo input_rti = instr->InputAt(0)->GetReferenceTypeInfo();
+ if (!input_rti.IsValid()) {
return; // No new info yet.
}
- // Make sure that we don't go over the bounded type.
ReferenceTypeInfo upper_bound_rti = instr->GetUpperBound();
- if (!upper_bound_rti.IsSupertypeOf(new_rti)) {
- // Note that the input might be exact, in which case we know the branch leading
- // to the bound type is dead. We play it safe by not marking the bound type as
- // exact.
- bool is_exact = upper_bound_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes();
- new_rti = ReferenceTypeInfo::Create(upper_bound_rti.GetTypeHandle(), is_exact);
- }
- instr->SetReferenceTypeInfo(new_rti);
+ if (upper_bound_rti.IsExact()) {
+ instr->SetReferenceTypeInfo(upper_bound_rti);
+ } else if (upper_bound_rti.IsSupertypeOf(input_rti)) {
+ // input is more specific.
+ instr->SetReferenceTypeInfo(input_rti);
+ } else {
+ // upper_bound is more specific or unrelated.
+ // Note that the object might then be exact, and we know the code dominated by this
+ // bound type is dead. To not confuse potential other optimizations, we mark
+ // the bound as non-exact.
+ instr->SetReferenceTypeInfo(
+ ReferenceTypeInfo::Create(upper_bound_rti.GetTypeHandle(), /* is_exact */ false));
+ }
}
// NullConstant inputs are ignored during merging as they do not provide any useful information.
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index 3dcad6a6b9..5e83e825ed 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -635,6 +635,13 @@ void MipsAssembler::Ins(Register rd, Register rt, int pos, int size) {
DsFsmInstrRrr(EmitR(0x1f, rt, rd, static_cast<Register>(pos + size - 1), pos, 0x04), rd, rd, rt);
}
+void MipsAssembler::Lsa(Register rd, Register rs, Register rt, int saPlusOne) {
+ CHECK(IsR6());
+ CHECK(1 <= saPlusOne && saPlusOne <= 4) << saPlusOne;
+ int sa = saPlusOne - 1;
+ DsFsmInstrRrr(EmitR(0x0, rs, rt, rd, sa, 0x05), rd, rs, rt);
+}
+
void MipsAssembler::Lb(Register rt, Register rs, uint16_t imm16) {
DsFsmInstrRrr(EmitI(0x20, rs, rt, imm16), rt, rs, rs);
}
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 800dc5f9a1..2fca185ec3 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -262,6 +262,7 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi
void Srav(Register rd, Register rt, Register rs);
void Ext(Register rd, Register rt, int pos, int size); // R2+
void Ins(Register rd, Register rt, int pos, int size); // R2+
+ void Lsa(Register rd, Register rs, Register rt, int saPlusOne); // R6
void Lb(Register rt, Register rs, uint16_t imm16);
void Lh(Register rt, Register rs, uint16_t imm16);
diff --git a/compiler/utils/mips/assembler_mips32r6_test.cc b/compiler/utils/mips/assembler_mips32r6_test.cc
index a52f519439..30667efa38 100644
--- a/compiler/utils/mips/assembler_mips32r6_test.cc
+++ b/compiler/utils/mips/assembler_mips32r6_test.cc
@@ -319,6 +319,14 @@ TEST_F(AssemblerMIPS32r6Test, Bitswap) {
DriverStr(RepeatRR(&mips::MipsAssembler::Bitswap, "bitswap ${reg1}, ${reg2}"), "bitswap");
}
+TEST_F(AssemblerMIPS32r6Test, Lsa) {
+ DriverStr(RepeatRRRIb(&mips::MipsAssembler::Lsa,
+ 2,
+ "lsa ${reg1}, ${reg2}, ${reg3}, {imm}",
+ 1),
+ "lsa");
+}
+
TEST_F(AssemblerMIPS32r6Test, Seleqz) {
DriverStr(RepeatRRR(&mips::MipsAssembler::Seleqz, "seleqz ${reg1}, ${reg2}, ${reg3}"),
"seleqz");
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
index 5906a71b38..998f2c709b 100644
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -35,12 +35,14 @@ void Mips64Assembler::FinalizeCode() {
for (auto& exception_block : exception_blocks_) {
EmitExceptionPoll(&exception_block);
}
+ ReserveJumpTableSpace();
EmitLiterals();
PromoteBranches();
}
void Mips64Assembler::FinalizeInstructions(const MemoryRegion& region) {
EmitBranches();
+ EmitJumpTables();
Assembler::FinalizeInstructions(region);
PatchCFI();
}
@@ -482,6 +484,10 @@ void Mips64Assembler::Lui(GpuRegister rt, uint16_t imm16) {
EmitI(0xf, static_cast<GpuRegister>(0), rt, imm16);
}
+void Mips64Assembler::Aui(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
+ EmitI(0xf, rs, rt, imm16);
+}
+
void Mips64Assembler::Dahi(GpuRegister rs, uint16_t imm16) {
EmitI(1, rs, static_cast<GpuRegister>(6), imm16);
}
@@ -1081,6 +1087,20 @@ void Mips64Assembler::LoadConst64(GpuRegister rd, int64_t value) {
TemplateLoadConst64(this, rd, value);
}
+void Mips64Assembler::Addiu32(GpuRegister rt, GpuRegister rs, int32_t value) {
+ if (IsInt<16>(value)) {
+ Addiu(rt, rs, value);
+ } else {
+ int16_t high = High16Bits(value);
+ int16_t low = Low16Bits(value);
+ high += (low < 0) ? 1 : 0; // Account for sign extension in addiu.
+ Aui(rt, rs, high);
+ if (low != 0) {
+ Addiu(rt, rt, low);
+ }
+ }
+}
+
void Mips64Assembler::Daddiu64(GpuRegister rt, GpuRegister rs, int64_t value, GpuRegister rtmp) {
if (IsInt<16>(value)) {
Daddiu(rt, rs, value);
@@ -1653,6 +1673,67 @@ void Mips64Assembler::LoadLiteral(GpuRegister dest_reg,
FinalizeLabeledBranch(label);
}
+JumpTable* Mips64Assembler::CreateJumpTable(std::vector<Mips64Label*>&& labels) {
+ jump_tables_.emplace_back(std::move(labels));
+ JumpTable* table = &jump_tables_.back();
+ DCHECK(!table->GetLabel()->IsBound());
+ return table;
+}
+
+void Mips64Assembler::ReserveJumpTableSpace() {
+ if (!jump_tables_.empty()) {
+ for (JumpTable& table : jump_tables_) {
+ Mips64Label* label = table.GetLabel();
+ Bind(label);
+
+ // Bulk ensure capacity, as this may be large.
+ size_t orig_size = buffer_.Size();
+ size_t required_capacity = orig_size + table.GetSize();
+ if (required_capacity > buffer_.Capacity()) {
+ buffer_.ExtendCapacity(required_capacity);
+ }
+#ifndef NDEBUG
+ buffer_.has_ensured_capacity_ = true;
+#endif
+
+ // Fill the space with dummy data as the data is not final
+ // until the branches have been promoted. And we shouldn't
+ // be moving uninitialized data during branch promotion.
+ for (size_t cnt = table.GetData().size(), i = 0; i < cnt; i++) {
+ buffer_.Emit<uint32_t>(0x1abe1234u);
+ }
+
+#ifndef NDEBUG
+ buffer_.has_ensured_capacity_ = false;
+#endif
+ }
+ }
+}
+
+void Mips64Assembler::EmitJumpTables() {
+ if (!jump_tables_.empty()) {
+ CHECK(!overwriting_);
+ // Switch from appending instructions at the end of the buffer to overwriting
+ // existing instructions (here, jump tables) in the buffer.
+ overwriting_ = true;
+
+ for (JumpTable& table : jump_tables_) {
+ Mips64Label* table_label = table.GetLabel();
+ uint32_t start = GetLabelLocation(table_label);
+ overwrite_location_ = start;
+
+ for (Mips64Label* target : table.GetData()) {
+ CHECK_EQ(buffer_.Load<uint32_t>(overwrite_location_), 0x1abe1234u);
+ // The table will contain target addresses relative to the table start.
+ uint32_t offset = GetLabelLocation(target) - start;
+ Emit(offset);
+ }
+ }
+
+ overwriting_ = false;
+ }
+}
+
void Mips64Assembler::EmitLiterals() {
if (!literals_.empty()) {
for (Literal& literal : literals_) {
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index 7ef5ab0d39..a0a1db634d 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -357,6 +357,36 @@ class Literal {
DISALLOW_COPY_AND_ASSIGN(Literal);
};
+// Jump table: table of labels emitted after the code and before the literals. Similar to literals.
+class JumpTable {
+ public:
+ explicit JumpTable(std::vector<Mips64Label*>&& labels)
+ : label_(), labels_(std::move(labels)) {
+ }
+
+ size_t GetSize() const {
+ return labels_.size() * sizeof(uint32_t);
+ }
+
+ const std::vector<Mips64Label*>& GetData() const {
+ return labels_;
+ }
+
+ Mips64Label* GetLabel() {
+ return &label_;
+ }
+
+ const Mips64Label* GetLabel() const {
+ return &label_;
+ }
+
+ private:
+ Mips64Label label_;
+ std::vector<Mips64Label*> labels_;
+
+ DISALLOW_COPY_AND_ASSIGN(JumpTable);
+};
+
// Slowpath entered when Thread::Current()->_exception is non-null.
class Mips64ExceptionSlowPath {
public:
@@ -388,6 +418,7 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
overwrite_location_(0),
literals_(arena->Adapter(kArenaAllocAssembler)),
long_literals_(arena->Adapter(kArenaAllocAssembler)),
+ jump_tables_(arena->Adapter(kArenaAllocAssembler)),
last_position_adjustment_(0),
last_old_position_(0),
last_branch_id_(0) {
@@ -480,6 +511,7 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
void Lwupc(GpuRegister rs, uint32_t imm19); // MIPS64
void Ldpc(GpuRegister rs, uint32_t imm18); // MIPS64
void Lui(GpuRegister rt, uint16_t imm16);
+ void Aui(GpuRegister rt, GpuRegister rs, uint16_t imm16);
void Dahi(GpuRegister rs, uint16_t imm16); // MIPS64
void Dati(GpuRegister rs, uint16_t imm16); // MIPS64
void Sync(uint32_t stype);
@@ -619,6 +651,7 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
// This function is only used for testing purposes.
void RecordLoadConst64Path(int value);
+ void Addiu32(GpuRegister rt, GpuRegister rs, int32_t value);
void Daddiu64(GpuRegister rt, GpuRegister rs, int64_t value, GpuRegister rtmp = AT); // MIPS64
void Bind(Label* label) OVERRIDE {
@@ -676,6 +709,12 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
// Load literal using PC-relative loads.
void LoadLiteral(GpuRegister dest_reg, LoadOperandType load_type, Literal* literal);
+ // Create a jump table for the given labels that will be emitted when finalizing.
+ // When the table is emitted, offsets will be relative to the location of the table.
+ // The table location is determined by the location of its label (the label precedes
+ // the table data) and should be loaded using LoadLabelAddress().
+ JumpTable* CreateJumpTable(std::vector<Mips64Label*>&& labels);
+
void Bc(Mips64Label* label);
void Balc(Mips64Label* label);
void Bltc(GpuRegister rs, GpuRegister rt, Mips64Label* label);
@@ -1050,6 +1089,8 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
const Branch* GetBranch(uint32_t branch_id) const;
void EmitLiterals();
+ void ReserveJumpTableSpace();
+ void EmitJumpTables();
void PromoteBranches();
void EmitBranch(Branch* branch);
void EmitBranches();
@@ -1073,6 +1114,9 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
ArenaDeque<Literal> literals_;
ArenaDeque<Literal> long_literals_; // 64-bit literals separated for alignment reasons.
+ // Jump table list.
+ ArenaDeque<JumpTable> jump_tables_;
+
// Data for AdjustedPosition(), see the description there.
uint32_t last_position_adjustment_;
uint32_t last_old_position_;
diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc
index 564559f92c..f2cbebbfd7 100644
--- a/compiler/utils/mips64/assembler_mips64_test.cc
+++ b/compiler/utils/mips64/assembler_mips64_test.cc
@@ -1904,9 +1904,9 @@ TEST_F(AssemblerMIPS64Test, StoreFpuToOffset) {
DriverStr(expected, "StoreFpuToOffset");
}
-///////////////////////
-// Loading Constants //
-///////////////////////
+//////////////////////////////
+// Loading/adding Constants //
+//////////////////////////////
TEST_F(AssemblerMIPS64Test, LoadConst32) {
// IsUint<16>(value)
@@ -1949,6 +1949,31 @@ TEST_F(AssemblerMIPS64Test, LoadConst32) {
DriverStr(expected, "LoadConst32");
}
+TEST_F(AssemblerMIPS64Test, Addiu32) {
+ __ Addiu32(mips64::A1, mips64::A2, -0x8000);
+ __ Addiu32(mips64::A1, mips64::A2, +0);
+ __ Addiu32(mips64::A1, mips64::A2, +0x7FFF);
+ __ Addiu32(mips64::A1, mips64::A2, -0x8001);
+ __ Addiu32(mips64::A1, mips64::A2, +0x8000);
+ __ Addiu32(mips64::A1, mips64::A2, -0x10000);
+ __ Addiu32(mips64::A1, mips64::A2, +0x10000);
+ __ Addiu32(mips64::A1, mips64::A2, +0x12345678);
+
+ const char* expected =
+ "addiu $a1, $a2, -0x8000\n"
+ "addiu $a1, $a2, 0\n"
+ "addiu $a1, $a2, 0x7FFF\n"
+ "aui $a1, $a2, 0xFFFF\n"
+ "addiu $a1, $a1, 0x7FFF\n"
+ "aui $a1, $a2, 1\n"
+ "addiu $a1, $a1, -0x8000\n"
+ "aui $a1, $a2, 0xFFFF\n"
+ "aui $a1, $a2, 1\n"
+ "aui $a1, $a2, 0x1234\n"
+ "addiu $a1, $a1, 0x5678\n";
+ DriverStr(expected, "Addiu32");
+}
+
static uint64_t SignExtend16To64(uint16_t n) {
return static_cast<int16_t>(n);
}
diff --git a/compiler/verifier_deps_test.cc b/compiler/verifier_deps_test.cc
index 23f54d75bd..e716cdbed8 100644
--- a/compiler/verifier_deps_test.cc
+++ b/compiler/verifier_deps_test.cc
@@ -539,21 +539,9 @@ TEST_F(VerifierDepsTest, Assignable_BothArrays_Resolved) {
ASSERT_TRUE(HasAssignable("Ljava/util/TimeZone;", "Ljava/util/SimpleTimeZone;", true));
}
-TEST_F(VerifierDepsTest, Assignable_BothArrays_Erroneous) {
- ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "[[Ljava/util/TimeZone;",
- /* src */ "[[LMyErroneousTimeZone;",
- /* is_strict */ true,
- /* is_assignable */ true));
- // If the component type of an array is erroneous, we record the dependency on
- // the array type.
- ASSERT_FALSE(HasAssignable("[[Ljava/util/TimeZone;", "[[LMyErroneousTimeZone;", true));
- ASSERT_TRUE(HasAssignable("[Ljava/util/TimeZone;", "[LMyErroneousTimeZone;", true));
- ASSERT_FALSE(HasAssignable("Ljava/util/TimeZone;", "LMyErroneousTimeZone;", true));
-}
-
- // We test that VerifierDeps does not try to optimize by storing assignability
- // of the component types. This is due to the fact that the component type may
- // be an erroneous class, even though the array type has resolved status.
+// We test that VerifierDeps does not try to optimize by storing assignability
+// of the component types. This is due to the fact that the component type may
+// be an erroneous class, even though the array type has resolved status.
TEST_F(VerifierDepsTest, Assignable_ArrayToInterface1) {
ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/io/Serializable;",
@@ -608,16 +596,6 @@ TEST_F(VerifierDepsTest, ArgumentType_ResolvedClass) {
ASSERT_TRUE(HasClass("Ljava/lang/Thread;", true, "public"));
}
-TEST_F(VerifierDepsTest, ArgumentType_ResolvedReferenceArray) {
- ASSERT_TRUE(VerifyMethod("ArgumentType_ResolvedReferenceArray"));
- ASSERT_TRUE(HasClass("[Ljava/lang/Thread;", true, "public final abstract"));
-}
-
-TEST_F(VerifierDepsTest, ArgumentType_ResolvedPrimitiveArray) {
- ASSERT_TRUE(VerifyMethod("ArgumentType_ResolvedPrimitiveArray"));
- ASSERT_TRUE(HasClass("[B", true, "public final abstract"));
-}
-
TEST_F(VerifierDepsTest, ArgumentType_UnresolvedClass) {
ASSERT_TRUE(VerifyMethod("ArgumentType_UnresolvedClass"));
ASSERT_TRUE(HasClass("LUnresolvedClass;", false));
@@ -714,11 +692,6 @@ TEST_F(VerifierDepsTest, NewInstance_Unresolved) {
ASSERT_TRUE(HasClass("LUnresolvedClass;", false));
}
-TEST_F(VerifierDepsTest, NewArray_Resolved) {
- ASSERT_TRUE(VerifyMethod("NewArray_Resolved"));
- ASSERT_TRUE(HasClass("[Ljava/lang/IllegalStateException;", true, "public final abstract"));
-}
-
TEST_F(VerifierDepsTest, NewArray_Unresolved) {
ASSERT_TRUE(VerifyMethod("NewArray_Unresolved"));
ASSERT_TRUE(HasClass("[LUnresolvedClass;", false));
@@ -1128,6 +1101,16 @@ TEST_F(VerifierDepsTest, InvokeSuper_ThisNotAssignable) {
"virtual", "Ljava/lang/Integer;", "intValue", "()I", true, "public", "Ljava/lang/Integer;"));
}
+TEST_F(VerifierDepsTest, ArgumentType_ResolvedReferenceArray) {
+ ASSERT_TRUE(VerifyMethod("ArgumentType_ResolvedReferenceArray"));
+ ASSERT_TRUE(HasClass("[Ljava/lang/Thread;", true, "public final abstract"));
+}
+
+TEST_F(VerifierDepsTest, NewArray_Resolved) {
+ ASSERT_TRUE(VerifyMethod("NewArray_Resolved"));
+ ASSERT_TRUE(HasClass("[Ljava/lang/IllegalStateException;", true, "public final abstract"));
+}
+
TEST_F(VerifierDepsTest, EncodeDecode) {
VerifyDexFile();
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index ef03bb3dd4..96976d9bce 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -134,8 +134,7 @@ inline ArtMethod* ArtMethod::GetDexCacheResolvedMethod(uint16_t method_index,
// NOTE: Unchecked, i.e. not throwing AIOOB. We don't even know the length here
// without accessing the DexCache and we don't want to do that in release build.
DCHECK_LT(method_index,
- GetInterfaceMethodIfProxy(pointer_size)->GetDeclaringClass()
- ->GetDexCache()->NumResolvedMethods());
+ GetInterfaceMethodIfProxy(pointer_size)->GetDexCache()->NumResolvedMethods());
ArtMethod* method = mirror::DexCache::GetElementPtrSize(GetDexCacheResolvedMethods(pointer_size),
method_index,
pointer_size);
@@ -154,8 +153,7 @@ inline void ArtMethod::SetDexCacheResolvedMethod(uint16_t method_index,
// NOTE: Unchecked, i.e. not throwing AIOOB. We don't even know the length here
// without accessing the DexCache and we don't want to do that in release build.
DCHECK_LT(method_index,
- GetInterfaceMethodIfProxy(pointer_size)->GetDeclaringClass()
- ->GetDexCache()->NumResolvedMethods());
+ GetInterfaceMethodIfProxy(pointer_size)->GetDexCache()->NumResolvedMethods());
DCHECK(new_method == nullptr || new_method->GetDeclaringClass() != nullptr);
mirror::DexCache::SetElementPtrSize(GetDexCacheResolvedMethods(pointer_size),
method_index,
@@ -186,8 +184,7 @@ template <bool kWithCheck>
inline mirror::Class* ArtMethod::GetDexCacheResolvedType(dex::TypeIndex type_index,
PointerSize pointer_size) {
if (kWithCheck) {
- mirror::DexCache* dex_cache =
- GetInterfaceMethodIfProxy(pointer_size)->GetDeclaringClass()->GetDexCache();
+ mirror::DexCache* dex_cache = GetInterfaceMethodIfProxy(pointer_size)->GetDexCache();
if (UNLIKELY(type_index.index_ >= dex_cache->NumResolvedTypes())) {
ThrowArrayIndexOutOfBoundsException(type_index.index_, dex_cache->NumResolvedTypes());
return nullptr;
@@ -333,7 +330,7 @@ inline const char* ArtMethod::GetName() {
}
inline const DexFile::CodeItem* ArtMethod::GetCodeItem() {
- return GetDeclaringClass()->GetDexFile().GetCodeItem(GetCodeItemOffset());
+ return GetDexFile()->GetCodeItem(GetCodeItemOffset());
}
inline bool ArtMethod::IsResolvedTypeIdx(dex::TypeIndex type_idx, PointerSize pointer_size) {
@@ -398,11 +395,11 @@ inline mirror::ClassLoader* ArtMethod::GetClassLoader() {
}
inline mirror::DexCache* ArtMethod::GetDexCache() {
- DCHECK(!IsProxyMethod());
- if (UNLIKELY(IsObsolete())) {
- return GetObsoleteDexCache();
- } else {
+ if (LIKELY(!IsObsolete())) {
return GetDeclaringClass()->GetDexCache();
+ } else {
+ DCHECK(!IsProxyMethod());
+ return GetObsoleteDexCache();
}
}
diff --git a/runtime/art_method.h b/runtime/art_method.h
index abc304de13..b38508b757 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -27,6 +27,7 @@
#include "invoke_type.h"
#include "method_reference.h"
#include "modifiers.h"
+#include "mirror/dex_cache.h"
#include "mirror/object.h"
#include "obj_ptr.h"
#include "read_barrier_option.h"
@@ -220,6 +221,12 @@ class ArtMethod FINAL {
return !IsIntrinsic() && (GetAccessFlags() & kAccObsoleteMethod) != 0;
}
+ void SetIsObsolete() {
+ // TODO We should really support redefining intrinsic if possible.
+ DCHECK(!IsIntrinsic());
+ SetAccessFlags(GetAccessFlags() | kAccObsoleteMethod);
+ }
+
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
bool IsNative() {
return (GetAccessFlags<kReadBarrierOption>() & kAccNative) != 0;
@@ -325,6 +332,7 @@ class ArtMethod FINAL {
ALWAYS_INLINE ArtMethod* GetDexCacheResolvedMethod(uint16_t method_index,
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
+
ALWAYS_INLINE void SetDexCacheResolvedMethod(uint16_t method_index,
ArtMethod* new_method,
PointerSize pointer_size)
diff --git a/runtime/base/hash_set.h b/runtime/base/hash_set.h
index f24a8625b4..a22efcfe32 100644
--- a/runtime/base/hash_set.h
+++ b/runtime/base/hash_set.h
@@ -672,6 +672,8 @@ class HashSet {
T* data_; // Backing storage.
double min_load_factor_;
double max_load_factor_;
+
+ ART_FRIEND_TEST(InternTableTest, CrossHash);
};
template <class T, class EmptyFn, class HashFn, class Pred, class Alloc>
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 255ad714f2..2adeb8cc97 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -60,6 +60,7 @@ enum LockLevel {
kUnexpectedSignalLock,
kThreadSuspendCountLock,
kAbortLock,
+ kJdwpAdbStateLock,
kJdwpSocketLock,
kRegionSpaceRegionLock,
kRosAllocGlobalLock,
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index a11257f21b..5fc5f1a2f5 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -81,9 +81,6 @@ inline mirror::String* ClassLinker::ResolveString(dex::StringIndex string_idx,
Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
const DexFile& dex_file = *dex_cache->GetDexFile();
string = ResolveString(dex_file, string_idx, dex_cache);
- if (string != nullptr) {
- DCHECK_EQ(dex_cache->GetResolvedString(string_idx), string);
- }
}
return string.Ptr();
}
@@ -192,20 +189,15 @@ inline ArtField* ClassLinker::GetResolvedField(uint32_t field_idx,
return dex_cache->GetResolvedField(field_idx, image_pointer_size_);
}
-inline ArtField* ClassLinker::GetResolvedField(uint32_t field_idx,
- ObjPtr<mirror::Class> field_declaring_class) {
- return GetResolvedField(field_idx, MakeObjPtr(field_declaring_class->GetDexCache()));
-}
-
inline ArtField* ClassLinker::ResolveField(uint32_t field_idx,
ArtMethod* referrer,
bool is_static) {
Thread::PoisonObjectPointersIfDebug();
ObjPtr<mirror::Class> declaring_class = referrer->GetDeclaringClass();
- ArtField* resolved_field = GetResolvedField(field_idx, declaring_class);
+ ArtField* resolved_field = GetResolvedField(field_idx, referrer->GetDexCache());
if (UNLIKELY(resolved_field == nullptr)) {
StackHandleScope<2> hs(Thread::Current());
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(referrer->GetDexCache()));
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
const DexFile& dex_file = *dex_cache->GetDexFile();
resolved_field = ResolveField(dex_file, field_idx, dex_cache, class_loader, is_static);
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 9b25303b65..6ef882a66a 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -327,8 +327,6 @@ class ClassLinker {
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
- ArtField* GetResolvedField(uint32_t field_idx, ObjPtr<mirror::Class> field_declaring_class)
- REQUIRES_SHARED(Locks::mutator_lock_);
ArtField* GetResolvedField(uint32_t field_idx, ObjPtr<mirror::DexCache> dex_cache)
REQUIRES_SHARED(Locks::mutator_lock_);
ArtField* ResolveField(uint32_t field_idx, ArtMethod* referrer, bool is_static)
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index f6eeffca73..14c9c21356 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -563,7 +563,7 @@ inline ArtMethod* FindMethodFromCode(uint32_t method_idx,
HandleWrapperObjPtr<mirror::Object> h_this(hs2.NewHandleWrapper(this_object));
Handle<mirror::Class> h_referring_class(hs2.NewHandle(referrer->GetDeclaringClass()));
const dex::TypeIndex method_type_idx =
- h_referring_class->GetDexFile().GetMethodId(method_idx).class_idx_;
+ referrer->GetDexFile()->GetMethodId(method_idx).class_idx_;
mirror::Class* method_reference_class = class_linker->ResolveType(method_type_idx, referrer);
if (UNLIKELY(method_reference_class == nullptr)) {
// Bad type idx.
@@ -673,8 +673,7 @@ inline ArtField* FindFieldFast(uint32_t field_idx, ArtMethod* referrer, FindFiel
size_t expected_size) {
ScopedAssertNoThreadSuspension ants(__FUNCTION__);
ArtField* resolved_field =
- referrer->GetDeclaringClass()->GetDexCache()->GetResolvedField(field_idx,
- kRuntimePointerSize);
+ referrer->GetDexCache()->GetResolvedField(field_idx, kRuntimePointerSize);
if (UNLIKELY(resolved_field == nullptr)) {
return nullptr;
}
@@ -733,7 +732,7 @@ inline ArtMethod* FindMethodFast(uint32_t method_idx,
}
mirror::Class* referring_class = referrer->GetDeclaringClass();
ArtMethod* resolved_method =
- referring_class->GetDexCache()->GetResolvedMethod(method_idx, kRuntimePointerSize);
+ referrer->GetDexCache()->GetResolvedMethod(method_idx, kRuntimePointerSize);
if (UNLIKELY(resolved_method == nullptr)) {
return nullptr;
}
@@ -759,9 +758,9 @@ inline ArtMethod* FindMethodFast(uint32_t method_idx,
} else if (type == kSuper) {
// TODO This lookup is rather slow.
dex::TypeIndex method_type_idx =
- referring_class->GetDexFile().GetMethodId(method_idx).class_idx_;
+ referrer->GetDexFile()->GetMethodId(method_idx).class_idx_;
mirror::Class* method_reference_class =
- referring_class->GetDexCache()->GetResolvedType(method_type_idx);
+ referrer->GetDexCache()->GetResolvedType(method_type_idx);
if (method_reference_class == nullptr) {
// Need to do full type resolution...
return nullptr;
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 03ef962f05..4ea1130947 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -557,8 +557,10 @@ void Instrumentation::RemoveListener(InstrumentationListener* listener, uint32_t
}
Instrumentation::InstrumentationLevel Instrumentation::GetCurrentInstrumentationLevel() const {
- if (interpreter_stubs_installed_) {
+ if (interpreter_stubs_installed_ && interpret_only_) {
return InstrumentationLevel::kInstrumentWithInterpreter;
+ } else if (interpreter_stubs_installed_) {
+ return InstrumentationLevel::kInstrumentWithInterpreterAndJit;
} else if (entry_exit_stubs_installed_) {
return InstrumentationLevel::kInstrumentWithInstrumentationStubs;
} else {
@@ -566,6 +568,14 @@ Instrumentation::InstrumentationLevel Instrumentation::GetCurrentInstrumentation
}
}
+bool Instrumentation::RequiresInstrumentationInstallation(InstrumentationLevel new_level) const {
+ // We need to reinstall instrumentation if we go to a different level or if the current level is
+ // kInstrumentWithInterpreterAndJit since that level does not force all code to always use the
+ // interpreter and so we might have started running optimized code again.
+ return new_level == InstrumentationLevel::kInstrumentWithInterpreterAndJit ||
+ GetCurrentInstrumentationLevel() != new_level;
+}
+
void Instrumentation::ConfigureStubs(const char* key, InstrumentationLevel desired_level) {
// Store the instrumentation level for this key or remove it.
if (desired_level == InstrumentationLevel::kInstrumentNothing) {
@@ -585,8 +595,7 @@ void Instrumentation::ConfigureStubs(const char* key, InstrumentationLevel desir
interpret_only_ = (requested_level == InstrumentationLevel::kInstrumentWithInterpreter) ||
forced_interpret_only_;
- InstrumentationLevel current_level = GetCurrentInstrumentationLevel();
- if (requested_level == current_level) {
+ if (!RequiresInstrumentationInstallation(requested_level)) {
// We're already set.
return;
}
@@ -595,7 +604,7 @@ void Instrumentation::ConfigureStubs(const char* key, InstrumentationLevel desir
Locks::mutator_lock_->AssertExclusiveHeld(self);
Locks::thread_list_lock_->AssertNotHeld(self);
if (requested_level > InstrumentationLevel::kInstrumentNothing) {
- if (requested_level == InstrumentationLevel::kInstrumentWithInterpreter) {
+ if (requested_level >= InstrumentationLevel::kInstrumentWithInterpreterAndJit) {
interpreter_stubs_installed_ = true;
entry_exit_stubs_installed_ = true;
} else {
@@ -842,7 +851,8 @@ void Instrumentation::EnableDeoptimization() {
void Instrumentation::DisableDeoptimization(const char* key) {
CHECK_EQ(deoptimization_enabled_, true);
// If we deoptimized everything, undo it.
- if (interpreter_stubs_installed_) {
+ InstrumentationLevel level = GetCurrentInstrumentationLevel();
+ if (level == InstrumentationLevel::kInstrumentWithInterpreter) {
UndeoptimizeEverything(key);
}
// Undeoptimized selected methods.
@@ -869,6 +879,14 @@ bool Instrumentation::ShouldNotifyMethodEnterExitEvents() const {
return !deoptimization_enabled_ && !interpreter_stubs_installed_;
}
+// TODO we don't check deoptimization_enabled_ because currently there isn't really any support for
+// multiple users of instrumentation. Since this is just a temporary state anyway pending work to
+// ensure that the current_method doesn't get kept across suspend points this should be okay.
+// TODO Remove once b/33630159 is resolved.
+void Instrumentation::ReJitEverything(const char* key) {
+ ConfigureStubs(key, InstrumentationLevel::kInstrumentWithInterpreterAndJit);
+}
+
void Instrumentation::DeoptimizeEverything(const char* key) {
CHECK(deoptimization_enabled_);
ConfigureStubs(key, InstrumentationLevel::kInstrumentWithInterpreter);
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 1e5fcf2c04..05c0aaa081 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -133,6 +133,9 @@ class Instrumentation {
enum class InstrumentationLevel {
kInstrumentNothing, // execute without instrumentation
kInstrumentWithInstrumentationStubs, // execute with instrumentation entry/exit stubs
+ kInstrumentWithInterpreterAndJit, // execute with interpreter initially and later the JIT
+ // (if it is enabled). This level is special in that it
+ // always requires re-instrumentation.
kInstrumentWithInterpreter // execute with interpreter
};
@@ -163,6 +166,13 @@ class Instrumentation {
}
bool ShouldNotifyMethodEnterExitEvents() const REQUIRES_SHARED(Locks::mutator_lock_);
+ // Executes everything with the interpreter/jit (if available).
+ void ReJitEverything(const char* key)
+ REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
+ REQUIRES(!Locks::thread_list_lock_,
+ !Locks::classlinker_classes_lock_,
+ !deoptimized_methods_lock_);
+
// Executes everything with interpreter.
void DeoptimizeEverything(const char* key)
REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
@@ -432,9 +442,13 @@ class Instrumentation {
return alloc_entrypoints_instrumented_;
}
- private:
InstrumentationLevel GetCurrentInstrumentationLevel() const;
+ private:
+ // Returns true if moving to the given instrumentation level requires the installation of stubs.
+ // False otherwise.
+ bool RequiresInstrumentationInstallation(InstrumentationLevel new_level) const;
+
// Does the job of installing or removing instrumentation code within methods.
// In order to support multiple clients using instrumentation at the same time,
// the caller must pass a unique key (a string) identifying it so we remind which
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 9c05d3c574..3e1914604d 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -319,7 +319,9 @@ std::size_t InternTable::StringHashEquals::operator()(const GcRoot<mirror::Strin
if (kIsDebugBuild) {
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
}
- return static_cast<size_t>(root.Read<kWithoutReadBarrier>()->GetHashCode());
+ // An additional cast to prevent undesired sign extension.
+ return static_cast<size_t>(
+ static_cast<uint32_t>(root.Read<kWithoutReadBarrier>()->GetHashCode()));
}
bool InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& a,
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index f661d9ffe5..68454fbfd4 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -163,7 +163,11 @@ class InternTable {
NO_THREAD_SAFETY_ANALYSIS;
// Utf8String can be used for lookup.
- std::size_t operator()(const Utf8String& key) const { return key.GetHash(); }
+ std::size_t operator()(const Utf8String& key) const {
+ // A cast to prevent undesired sign extension.
+ return static_cast<uint32_t>(key.GetHash());
+ }
+
bool operator()(const GcRoot<mirror::String>& a, const Utf8String& b) const
NO_THREAD_SAFETY_ANALYSIS;
};
@@ -217,6 +221,8 @@ class InternTable {
// We call AddNewTable when we create the zygote to reduce private dirty pages caused by
// modifying the zygote intern table. The back of table is modified when strings are interned.
std::vector<UnorderedSet> tables_;
+
+ ART_FRIEND_TEST(InternTableTest, CrossHash);
};
// Insert if non null, otherwise return null. Must be called holding the mutator lock.
@@ -276,6 +282,7 @@ class InternTable {
gc::WeakRootState weak_root_state_ GUARDED_BY(Locks::intern_table_lock_);
friend class Transaction;
+ ART_FRIEND_TEST(InternTableTest, CrossHash);
DISALLOW_COPY_AND_ASSIGN(InternTable);
};
diff --git a/runtime/intern_table_test.cc b/runtime/intern_table_test.cc
index b91d946095..3991d6550d 100644
--- a/runtime/intern_table_test.cc
+++ b/runtime/intern_table_test.cc
@@ -16,6 +16,7 @@
#include "intern_table.h"
+#include "base/hash_set.h"
#include "common_runtime_test.h"
#include "mirror/object.h"
#include "handle_scope-inl.h"
@@ -62,6 +63,25 @@ TEST_F(InternTableTest, Size) {
EXPECT_EQ(2U, t.Size());
}
+// Check if table indexes match on 64 and 32 bit machines.
+// This is done by ensuring hash values are the same on every machine and limited to 32-bit wide.
+// Otherwise cross compilation can cause a table to be filled on host using one indexing algorithm
+// and later on a device with different sizeof(size_t) can use another indexing algorithm.
+// Thus the table may provide wrong data.
+TEST_F(InternTableTest, CrossHash) {
+ ScopedObjectAccess soa(Thread::Current());
+ InternTable t;
+
+ // A string that has a negative hash value.
+ GcRoot<mirror::String> str(mirror::String::AllocFromModifiedUtf8(soa.Self(), "00000000"));
+
+ MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
+ for (InternTable::Table::UnorderedSet& table : t.strong_interns_.tables_) {
+ // The negative hash value shall be 32-bit wide on every host.
+ ASSERT_TRUE(IsUint<32>(table.hashfn_(str)));
+ }
+}
+
class TestPredicate : public IsMarkedVisitor {
public:
mirror::Object* IsMarked(mirror::Object* s) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 423f0543d5..b599949af4 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -251,17 +251,16 @@ static inline ObjPtr<mirror::String> ResolveString(Thread* self,
}
}
ArtMethod* method = shadow_frame.GetMethod();
- ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass();
// MethodVerifier refuses methods with string_idx out of bounds.
DCHECK_LT(string_idx.index_ % mirror::DexCache::kDexCacheStringCacheSize,
- declaring_class->GetDexFile().NumStringIds());
+ method->GetDexFile()->NumStringIds());
ObjPtr<mirror::String> string_ptr =
- mirror::StringDexCachePair::Lookup(declaring_class->GetDexCache()->GetStrings(),
+ mirror::StringDexCachePair::Lookup(method->GetDexCache()->GetStrings(),
string_idx.index_,
mirror::DexCache::kDexCacheStringCacheSize).Read();
if (UNLIKELY(string_ptr == nullptr)) {
StackHandleScope<1> hs(self);
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache()));
string_ptr = Runtime::Current()->GetClassLinker()->ResolveString(*method->GetDexFile(),
string_idx,
dex_cache);
diff --git a/runtime/jdwp/jdwp_adb.cc b/runtime/jdwp/jdwp_adb.cc
index 0eff2ab47b..d8869ad677 100644
--- a/runtime/jdwp/jdwp_adb.cc
+++ b/runtime/jdwp/jdwp_adb.cc
@@ -24,6 +24,7 @@
#include "base/logging.h"
#include "jdwp/jdwp_priv.h"
+#include "thread-inl.h"
#ifdef ART_TARGET_ANDROID
#include "cutils/sockets.h"
@@ -57,7 +58,9 @@ using android::base::StringPrintf;
struct JdwpAdbState : public JdwpNetStateBase {
public:
- explicit JdwpAdbState(JdwpState* state) : JdwpNetStateBase(state) {
+ explicit JdwpAdbState(JdwpState* state)
+ : JdwpNetStateBase(state),
+ state_lock_("JdwpAdbState lock", kJdwpAdbStateLock) {
control_sock_ = -1;
shutting_down_ = false;
@@ -77,20 +80,23 @@ struct JdwpAdbState : public JdwpNetStateBase {
}
}
- virtual bool Accept();
+ virtual bool Accept() REQUIRES(!state_lock_);
virtual bool Establish(const JdwpOptions*) {
return false;
}
- virtual void Shutdown() {
- shutting_down_ = true;
-
- int control_sock = this->control_sock_;
- int local_clientSock = this->clientSock;
-
- /* clear these out so it doesn't wake up and try to reuse them */
- this->control_sock_ = this->clientSock = -1;
+ virtual void Shutdown() REQUIRES(!state_lock_) {
+ int control_sock;
+ int local_clientSock;
+ {
+ MutexLock mu(Thread::Current(), state_lock_);
+ shutting_down_ = true;
+ control_sock = this->control_sock_;
+ local_clientSock = this->clientSock;
+ /* clear these out so it doesn't wake up and try to reuse them */
+ this->control_sock_ = this->clientSock = -1;
+ }
if (local_clientSock != -1) {
shutdown(local_clientSock, SHUT_RDWR);
@@ -103,13 +109,27 @@ struct JdwpAdbState : public JdwpNetStateBase {
WakePipe();
}
- virtual bool ProcessIncoming();
+ virtual bool ProcessIncoming() REQUIRES(!state_lock_);
private:
- int ReceiveClientFd();
+ int ReceiveClientFd() REQUIRES(!state_lock_);
- int control_sock_;
- bool shutting_down_;
+ bool IsDown() REQUIRES(!state_lock_) {
+ MutexLock mu(Thread::Current(), state_lock_);
+ return shutting_down_;
+ }
+
+ int ControlSock() REQUIRES(!state_lock_) {
+ MutexLock mu(Thread::Current(), state_lock_);
+ if (shutting_down_) {
+ CHECK_EQ(control_sock_, -1);
+ }
+ return control_sock_;
+ }
+
+ int control_sock_ GUARDED_BY(state_lock_);
+ bool shutting_down_ GUARDED_BY(state_lock_);
+ Mutex state_lock_;
socklen_t control_addr_len_;
union {
@@ -162,12 +182,13 @@ int JdwpAdbState::ReceiveClientFd() {
cmsg->cmsg_type = SCM_RIGHTS;
(reinterpret_cast<int*>(CMSG_DATA(cmsg)))[0] = -1;
- int rc = TEMP_FAILURE_RETRY(recvmsg(control_sock_, &msg, 0));
+ int rc = TEMP_FAILURE_RETRY(recvmsg(ControlSock(), &msg, 0));
if (rc <= 0) {
if (rc == -1) {
- PLOG(WARNING) << "Receiving file descriptor from ADB failed (socket " << control_sock_ << ")";
+ PLOG(WARNING) << "Receiving file descriptor from ADB failed (socket " << ControlSock() << ")";
}
+ MutexLock mu(Thread::Current(), state_lock_);
close(control_sock_);
control_sock_ = -1;
return -1;
@@ -189,23 +210,29 @@ bool JdwpAdbState::Accept() {
/* first, ensure that we get a connection to the ADB daemon */
retry:
- if (shutting_down_) {
+ if (IsDown()) {
return false;
}
- if (control_sock_ == -1) {
+ if (ControlSock() == -1) {
int sleep_ms = 500;
const int sleep_max_ms = 2*1000;
char buff[5];
- control_sock_ = socket(PF_UNIX, SOCK_STREAM, 0);
- if (control_sock_ < 0) {
+ int sock = socket(PF_UNIX, SOCK_STREAM, 0);
+ if (sock < 0) {
PLOG(ERROR) << "Could not create ADB control socket";
return false;
}
-
- if (!MakePipe()) {
- return false;
+ {
+ MutexLock mu(Thread::Current(), state_lock_);
+ control_sock_ = sock;
+ if (shutting_down_) {
+ return false;
+ }
+ if (!MakePipe()) {
+ return false;
+ }
}
snprintf(buff, sizeof(buff), "%04x", getpid());
@@ -225,11 +252,12 @@ bool JdwpAdbState::Accept() {
* up after a few minutes in case somebody ships an app with
* the debuggable flag set.
*/
- int ret = connect(control_sock_, &control_addr_.controlAddrPlain, control_addr_len_);
+ int ret = connect(ControlSock(), &control_addr_.controlAddrPlain, control_addr_len_);
if (!ret) {
+ int control_sock = ControlSock();
#ifdef ART_TARGET_ANDROID
- if (!socket_peer_is_trusted(control_sock_)) {
- if (shutdown(control_sock_, SHUT_RDWR)) {
+ if (control_sock < 0 || !socket_peer_is_trusted(control_sock)) {
+ if (control_sock >= 0 && shutdown(control_sock, SHUT_RDWR)) {
PLOG(ERROR) << "trouble shutting down socket";
}
return false;
@@ -237,7 +265,7 @@ bool JdwpAdbState::Accept() {
#endif
/* now try to send our pid to the ADB daemon */
- ret = TEMP_FAILURE_RETRY(send(control_sock_, buff, 4, 0));
+ ret = TEMP_FAILURE_RETRY(send(control_sock, buff, 4, 0));
if (ret >= 0) {
VLOG(jdwp) << StringPrintf("PID sent as '%.*s' to ADB", 4, buff);
break;
@@ -256,7 +284,7 @@ bool JdwpAdbState::Accept() {
if (sleep_ms > sleep_max_ms) {
sleep_ms = sleep_max_ms;
}
- if (shutting_down_) {
+ if (IsDown()) {
return false;
}
}
@@ -264,9 +292,13 @@ bool JdwpAdbState::Accept() {
VLOG(jdwp) << "trying to receive file descriptor from ADB";
/* now we can receive a client file descriptor */
- clientSock = ReceiveClientFd();
- if (shutting_down_) {
- return false; // suppress logs and additional activity
+ int sock = ReceiveClientFd();
+ {
+ MutexLock mu(Thread::Current(), state_lock_);
+ clientSock = sock;
+ if (shutting_down_) {
+ return false; // suppress logs and additional activity
+ }
}
if (clientSock == -1) {
if (++retryCount > 5) {
@@ -314,7 +346,7 @@ bool JdwpAdbState::ProcessIncoming() {
FD_ZERO(&readfds);
/* configure fds; note these may get zapped by another thread */
- fd = control_sock_;
+ fd = ControlSock();
if (fd >= 0) {
FD_SET(fd, &readfds);
if (maxfd < fd) {
@@ -368,13 +400,14 @@ bool JdwpAdbState::ProcessIncoming() {
VLOG(jdwp) << "Got wake-up signal, bailing out of select";
goto fail;
}
- if (control_sock_ >= 0 && FD_ISSET(control_sock_, &readfds)) {
+ int control_sock = ControlSock();
+ if (control_sock >= 0 && FD_ISSET(control_sock, &readfds)) {
int sock = ReceiveClientFd();
if (sock >= 0) {
LOG(INFO) << "Ignoring second debugger -- accepting and dropping";
close(sock);
} else {
- CHECK_EQ(control_sock_, -1);
+ CHECK_EQ(ControlSock(), -1);
/*
* Remote side most likely went away, so our next read
* on clientSock will fail and throw us out of the loop.
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index f43e30dd6f..6336cddc07 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -594,6 +594,9 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
VLOG(jit) << "JIT discarded jitted code due to invalid single-implementation assumptions.";
return nullptr;
}
+ DCHECK(cha_single_implementation_list.empty() || !Runtime::Current()->IsDebuggable())
+ << "Should not be using cha on debuggable apps/runs!";
+
for (ArtMethod* single_impl : cha_single_implementation_list) {
Runtime::Current()->GetClassHierarchyAnalysis()->AddDependency(
single_impl, method, method_header);
@@ -645,6 +648,69 @@ size_t JitCodeCache::CodeCacheSize() {
return CodeCacheSizeLocked();
}
+// This notifies the code cache that the given method has been redefined and that it should remove
+// any cached information it has on the method. All threads must be suspended before calling this
+// method. The compiled code for the method (if there is any) must not be in any threads call stack.
+void JitCodeCache::NotifyMethodRedefined(ArtMethod* method) {
+ MutexLock mu(Thread::Current(), lock_);
+ if (method->IsNative()) {
+ return;
+ }
+ ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
+ if (info != nullptr) {
+ auto profile = std::find(profiling_infos_.begin(), profiling_infos_.end(), info);
+ DCHECK(profile != profiling_infos_.end());
+ profiling_infos_.erase(profile);
+ }
+ method->SetProfilingInfo(nullptr);
+ ScopedCodeCacheWrite ccw(code_map_.get());
+ for (auto code_iter = method_code_map_.begin();
+ code_iter != method_code_map_.end();
+ ++code_iter) {
+ if (code_iter->second == method) {
+ FreeCode(code_iter->first);
+ method_code_map_.erase(code_iter);
+ }
+ }
+ auto code_map = osr_code_map_.find(method);
+ if (code_map != osr_code_map_.end()) {
+ osr_code_map_.erase(code_map);
+ }
+}
+
+// This invalidates old_method. Once this function returns one can no longer use old_method to
+// execute code unless it is fixed up. This fixup will happen later in the process of installing a
+// class redefinition.
+// TODO We should add some info to ArtMethod to note that 'old_method' has been invalidated and
+// shouldn't be used since it is no longer logically in the jit code cache.
+// TODO We should add DCHECKS that validate that the JIT is paused when this method is entered.
+void JitCodeCache::MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method) {
+ MutexLock mu(Thread::Current(), lock_);
+ // Update ProfilingInfo to the new one and remove it from the old_method.
+ if (old_method->GetProfilingInfo(kRuntimePointerSize) != nullptr) {
+ DCHECK_EQ(old_method->GetProfilingInfo(kRuntimePointerSize)->GetMethod(), old_method);
+ ProfilingInfo* info = old_method->GetProfilingInfo(kRuntimePointerSize);
+ old_method->SetProfilingInfo(nullptr);
+ // Since the JIT should be paused and all threads suspended by the time this is called these
+ // checks should always pass.
+ DCHECK(!info->IsInUseByCompiler());
+ new_method->SetProfilingInfo(info);
+ info->method_ = new_method;
+ }
+ // Update method_code_map_ to point to the new method.
+ for (auto& it : method_code_map_) {
+ if (it.second == old_method) {
+ it.second = new_method;
+ }
+ }
+ // Update osr_code_map_ to point to the new method.
+ auto code_map = osr_code_map_.find(old_method);
+ if (code_map != osr_code_map_.end()) {
+ osr_code_map_.Put(new_method, code_map->second);
+ osr_code_map_.erase(old_method);
+ }
+}
+
size_t JitCodeCache::CodeCacheSizeLocked() {
return used_memory_for_code_;
}
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index d97742d00b..b5e31769ab 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -75,6 +75,10 @@ class JitCodeCache {
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!lock_);
+ void NotifyMethodRedefined(ArtMethod* method)
+ REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!lock_);
+
// Notify to the code cache that the compiler wants to use the
// profiling info of `method` to drive optimizations,
// and therefore ensure the returned profiling info object is not
@@ -219,6 +223,11 @@ class JitCodeCache {
void DisallowInlineCacheAccess() REQUIRES(!lock_);
void BroadcastForInlineCacheAccess() REQUIRES(!lock_);
+ // Notify the code cache that the method at the pointer 'old_method' is being moved to the pointer
+ // 'new_method' since it is being made obsolete.
+ void MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method)
+ REQUIRES(!lock_) REQUIRES(Locks::mutator_lock_);
+
private:
// Take ownership of maps.
JitCodeCache(MemMap* code_map,
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
index 9902bb584f..9fbf2e3afa 100644
--- a/runtime/jit/profiling_info.h
+++ b/runtime/jit/profiling_info.h
@@ -128,7 +128,9 @@ class ProfilingInfo {
const uint32_t number_of_inline_caches_;
// Method this profiling info is for.
- ArtMethod* const method_;
+ // Not 'const' as JVMTI introduces obsolete methods that we implement by creating new ArtMethods.
+ // See JitCodeCache::MoveObsoleteMethod.
+ ArtMethod* method_;
// Whether the ArtMethod is currently being compiled. This flag
// is implicitly guarded by the JIT code cache lock.
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index ec265e5ab3..6f88cc5df4 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -19,7 +19,6 @@
#include "array.h"
#include "art_field.h"
-#include "art_method.h"
#include "class.h"
#include "dex_file_types.h"
#include "object.h"
@@ -27,6 +26,7 @@
namespace art {
+class ArtMethod;
struct DexCacheOffsets;
class DexFile;
class ImageWriter;
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 6d29ed379d..354410e6bf 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -97,6 +97,12 @@ inline bool Object::CasLockWordWeakRelaxed(LockWord old_val, LockWord new_val) {
OFFSET_OF_OBJECT_MEMBER(Object, monitor_), old_val.GetValue(), new_val.GetValue());
}
+inline bool Object::CasLockWordWeakAcquire(LockWord old_val, LockWord new_val) {
+ // Force use of non-transactional mode and do not check.
+ return CasFieldWeakAcquire32<false, false>(
+ OFFSET_OF_OBJECT_MEMBER(Object, monitor_), old_val.GetValue(), new_val.GetValue());
+}
+
inline bool Object::CasLockWordWeakRelease(LockWord old_val, LockWord new_val) {
// Force use of non-transactional mode and do not check.
return CasFieldWeakRelease32<false, false>(
@@ -759,6 +765,24 @@ inline bool Object::CasFieldWeakRelaxed32(MemberOffset field_offset,
}
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
+inline bool Object::CasFieldWeakAcquire32(MemberOffset field_offset,
+ int32_t old_value, int32_t new_value) {
+ if (kCheckTransaction) {
+ DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
+ }
+ if (kTransactionActive) {
+ Runtime::Current()->RecordWriteField32(this, field_offset, old_value, true);
+ }
+ if (kVerifyFlags & kVerifyThis) {
+ VerifyObject(this);
+ }
+ uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
+ AtomicInteger* atomic_addr = reinterpret_cast<AtomicInteger*>(raw_addr);
+
+ return atomic_addr->CompareExchangeWeakAcquire(old_value, new_value);
+}
+
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
inline bool Object::CasFieldWeakRelease32(MemberOffset field_offset,
int32_t old_value, int32_t new_value) {
if (kCheckTransaction) {
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 67b5ddbb32..db58a60994 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -153,6 +153,8 @@ class MANAGED LOCKABLE Object {
REQUIRES_SHARED(Locks::mutator_lock_);
bool CasLockWordWeakRelaxed(LockWord old_val, LockWord new_val)
REQUIRES_SHARED(Locks::mutator_lock_);
+ bool CasLockWordWeakAcquire(LockWord old_val, LockWord new_val)
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool CasLockWordWeakRelease(LockWord old_val, LockWord new_val)
REQUIRES_SHARED(Locks::mutator_lock_);
uint32_t GetLockOwnerThreadId();
@@ -460,6 +462,12 @@ class MANAGED LOCKABLE Object {
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ bool CasFieldWeakAcquire32(MemberOffset field_offset, int32_t old_value,
+ int32_t new_value) ALWAYS_INLINE
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ template<bool kTransactionActive, bool kCheckTransaction = true,
+ VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldWeakRelease32(MemberOffset field_offset, int32_t old_value,
int32_t new_value) ALWAYS_INLINE
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 222eb5c556..893abd5462 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -161,7 +161,7 @@ bool Monitor::Install(Thread* self) {
}
LockWord fat(this, lw.GCState());
// Publish the updated lock word, which may race with other threads.
- bool success = GetObject()->CasLockWordWeakSequentiallyConsistent(lw, fat);
+ bool success = GetObject()->CasLockWordWeakRelease(lw, fat);
// Lock profiling.
if (success && owner_ != nullptr && lock_profiling_threshold_ != 0) {
// Do not abort on dex pc errors. This can easily happen when we want to dump a stack trace on
@@ -879,13 +879,16 @@ mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj, bool tr
StackHandleScope<1> hs(self);
Handle<mirror::Object> h_obj(hs.NewHandle(obj));
while (true) {
- LockWord lock_word = h_obj->GetLockWord(true);
+ // We initially read the lockword with ordinary Java/relaxed semantics. When stronger
+ // semantics are needed, we address it below. Since GetLockWord bottoms out to a relaxed load,
+ // we can fix it later, in an infrequently executed case, with a fence.
+ LockWord lock_word = h_obj->GetLockWord(false);
switch (lock_word.GetState()) {
case LockWord::kUnlocked: {
+ // No ordering required for preceding lockword read, since we retest.
LockWord thin_locked(LockWord::FromThinLockId(thread_id, 0, lock_word.GCState()));
- if (h_obj->CasLockWordWeakSequentiallyConsistent(lock_word, thin_locked)) {
+ if (h_obj->CasLockWordWeakAcquire(lock_word, thin_locked)) {
AtraceMonitorLock(self, h_obj.Get(), false /* is_wait */);
- // CasLockWord enforces more than the acquire ordering we need here.
return h_obj.Get(); // Success!
}
continue; // Go again.
@@ -893,19 +896,22 @@ mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj, bool tr
case LockWord::kThinLocked: {
uint32_t owner_thread_id = lock_word.ThinLockOwner();
if (owner_thread_id == thread_id) {
+ // No ordering required for initial lockword read.
// We own the lock, increase the recursion count.
uint32_t new_count = lock_word.ThinLockCount() + 1;
if (LIKELY(new_count <= LockWord::kThinLockMaxCount)) {
LockWord thin_locked(LockWord::FromThinLockId(thread_id,
new_count,
lock_word.GCState()));
+ // Only this thread pays attention to the count. Thus there is no need for stronger
+ // than relaxed memory ordering.
if (!kUseReadBarrier) {
- h_obj->SetLockWord(thin_locked, true);
+ h_obj->SetLockWord(thin_locked, false /* volatile */);
AtraceMonitorLock(self, h_obj.Get(), false /* is_wait */);
return h_obj.Get(); // Success!
} else {
// Use CAS to preserve the read barrier state.
- if (h_obj->CasLockWordWeakSequentiallyConsistent(lock_word, thin_locked)) {
+ if (h_obj->CasLockWordWeakRelaxed(lock_word, thin_locked)) {
AtraceMonitorLock(self, h_obj.Get(), false /* is_wait */);
return h_obj.Get(); // Success!
}
@@ -922,20 +928,28 @@ mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj, bool tr
// Contention.
contention_count++;
Runtime* runtime = Runtime::Current();
- if (contention_count <= runtime->GetMaxSpinsBeforeThinkLockInflation()) {
+ if (contention_count <= runtime->GetMaxSpinsBeforeThinLockInflation()) {
// TODO: Consider switching the thread state to kBlocked when we are yielding.
// Use sched_yield instead of NanoSleep since NanoSleep can wait much longer than the
// parameter you pass in. This can cause thread suspension to take excessively long
// and make long pauses. See b/16307460.
+ // TODO: We should literally spin first, without sched_yield. Sched_yield either does
+ // nothing (at significant expense), or guarantees that we wait at least microseconds.
+ // If the owner is running, I would expect the median lock hold time to be hundreds
+ // of nanoseconds or less.
sched_yield();
} else {
contention_count = 0;
+ // No ordering required for initial lockword read. Install rereads it anyway.
InflateThinLocked(self, h_obj, lock_word, 0);
}
}
continue; // Start from the beginning.
}
case LockWord::kFatLocked: {
+ // We should have done an acquire read of the lockword initially, to ensure
+ // visibility of the monitor data structure. Use an explicit fence instead.
+ QuasiAtomic::ThreadFenceAcquire();
Monitor* mon = lock_word.FatLockMonitor();
if (trylock) {
return mon->TryLock(self) ? h_obj.Get() : nullptr;
@@ -946,6 +960,8 @@ mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj, bool tr
}
case LockWord::kHashCode:
// Inflate with the existing hashcode.
+ // Again no ordering required for initial lockword read, since we don't rely
+ // on the visibility of any prior computation.
Inflate(self, nullptr, h_obj.Get(), lock_word.GetHashCode());
continue; // Start from the beginning.
default: {
@@ -988,13 +1004,16 @@ bool Monitor::MonitorExit(Thread* self, mirror::Object* obj) {
}
if (!kUseReadBarrier) {
DCHECK_EQ(new_lw.ReadBarrierState(), 0U);
+ // TODO: This really only needs memory_order_release, but we currently have
+ // no way to specify that. In fact there seem to be no legitimate uses of SetLockWord
+ // with a final argument of true. This slows down x86 and ARMv7, but probably not v8.
h_obj->SetLockWord(new_lw, true);
AtraceMonitorUnlock();
// Success!
return true;
} else {
// Use CAS to preserve the read barrier state.
- if (h_obj->CasLockWordWeakSequentiallyConsistent(lock_word, new_lw)) {
+ if (h_obj->CasLockWordWeakRelease(lock_word, new_lw)) {
AtraceMonitorUnlock();
// Success!
return true;
diff --git a/runtime/openjdkjvmti/ti_redefine.cc b/runtime/openjdkjvmti/ti_redefine.cc
index 68815e7de0..926819d1d0 100644
--- a/runtime/openjdkjvmti/ti_redefine.cc
+++ b/runtime/openjdkjvmti/ti_redefine.cc
@@ -39,7 +39,10 @@
#include "base/logging.h"
#include "events-inl.h"
#include "gc/allocation_listener.h"
+#include "gc/heap.h"
#include "instrumentation.h"
+#include "jit/jit.h"
+#include "jit/jit_code_cache.h"
#include "jni_env_ext-inl.h"
#include "jvmti_allocator.h"
#include "mirror/class.h"
@@ -53,6 +56,143 @@ namespace openjdkjvmti {
using android::base::StringPrintf;
+// This visitor walks thread stacks and allocates and sets up the obsolete methods. It also does
+// some basic sanity checks that the obsolete method is sane.
+class ObsoleteMethodStackVisitor : public art::StackVisitor {
+ protected:
+ ObsoleteMethodStackVisitor(
+ art::Thread* thread,
+ art::LinearAlloc* allocator,
+ const std::unordered_set<art::ArtMethod*>& obsoleted_methods,
+ /*out*/std::unordered_map<art::ArtMethod*, art::ArtMethod*>* obsolete_maps,
+ /*out*/bool* success,
+ /*out*/std::string* error_msg)
+ : StackVisitor(thread,
+ /*context*/nullptr,
+ StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+ allocator_(allocator),
+ obsoleted_methods_(obsoleted_methods),
+ obsolete_maps_(obsolete_maps),
+ success_(success),
+ is_runtime_frame_(false),
+ error_msg_(error_msg) {
+ *success_ = true;
+ }
+
+ ~ObsoleteMethodStackVisitor() OVERRIDE {}
+
+ public:
+ // Returns true if we successfully installed obsolete methods on this thread, filling
+ // obsolete_maps_ with the translations if needed. Returns false and fills error_msg if we fail.
+ // The stack is cleaned up when we fail.
+ static bool UpdateObsoleteFrames(
+ art::Thread* thread,
+ art::LinearAlloc* allocator,
+ const std::unordered_set<art::ArtMethod*>& obsoleted_methods,
+ /*out*/std::unordered_map<art::ArtMethod*, art::ArtMethod*>* obsolete_maps,
+ /*out*/std::string* error_msg) REQUIRES(art::Locks::mutator_lock_) {
+ bool success = true;
+ ObsoleteMethodStackVisitor visitor(thread,
+ allocator,
+ obsoleted_methods,
+ obsolete_maps,
+ &success,
+ error_msg);
+ visitor.WalkStack();
+ if (!success) {
+ RestoreFrames(thread, *obsolete_maps, error_msg);
+ return false;
+ } else {
+ return true;
+ }
+ }
+
+ static void RestoreFrames(
+ art::Thread* thread ATTRIBUTE_UNUSED,
+ const std::unordered_map<art::ArtMethod*, art::ArtMethod*>& obsolete_maps ATTRIBUTE_UNUSED,
+ std::string* error_msg)
+ REQUIRES(art::Locks::mutator_lock_) {
+ LOG(FATAL) << "Restoring stack frames is not yet supported. Error was: " << *error_msg;
+ }
+
+ bool VisitFrame() OVERRIDE REQUIRES(art::Locks::mutator_lock_) {
+ art::ArtMethod* old_method = GetMethod();
+ // TODO REMOVE once either current_method doesn't stick around through suspend points or deopt
+ // works through runtime methods.
+ bool prev_was_runtime_frame_ = is_runtime_frame_;
+ is_runtime_frame_ = old_method->IsRuntimeMethod();
+ if (obsoleted_methods_.find(old_method) != obsoleted_methods_.end()) {
+ // The check below works since when we deoptimize we set shadow frames for all frames until a
+ // native/runtime transition and for those set the return PC to a function that will complete
+ // the deoptimization. This does leave us with the unfortunate side-effect that frames just
+ // below runtime frames cannot be deoptimized at the moment.
+ // TODO REMOVE once either current_method doesn't stick around through suspend points or deopt
+ // works through runtime methods.
+ // TODO b/33616143
+ if (!IsShadowFrame() && prev_was_runtime_frame_) {
+ *error_msg_ = StringPrintf("Deoptimization failed due to runtime method in stack.");
+ *success_ = false;
+ return false;
+ }
+ // We cannot ensure that the right dex file is used in inlined frames so we don't support
+ // redefining them.
+ DCHECK(!IsInInlinedFrame()) << "Inlined frames are not supported when using redefinition";
+ // TODO We should really support intrinsic obsolete methods.
+ // TODO We should really support redefining intrinsics.
+ // We don't support intrinsics so check for them here.
+ DCHECK(!old_method->IsIntrinsic());
+ art::ArtMethod* new_obsolete_method = nullptr;
+ auto obsolete_method_pair = obsolete_maps_->find(old_method);
+ if (obsolete_method_pair == obsolete_maps_->end()) {
+ // Create a new Obsolete Method and put it in the list.
+ art::Runtime* runtime = art::Runtime::Current();
+ art::ClassLinker* cl = runtime->GetClassLinker();
+ auto ptr_size = cl->GetImagePointerSize();
+ const size_t method_size = art::ArtMethod::Size(ptr_size);
+ auto* method_storage = allocator_->Alloc(GetThread(), method_size);
+ if (method_storage == nullptr) {
+ *success_ = false;
+ *error_msg_ = StringPrintf("Unable to allocate storage for obsolete version of '%s'",
+ old_method->PrettyMethod().c_str());
+ return false;
+ }
+ new_obsolete_method = new (method_storage) art::ArtMethod();
+ new_obsolete_method->CopyFrom(old_method, ptr_size);
+ DCHECK_EQ(new_obsolete_method->GetDeclaringClass(), old_method->GetDeclaringClass());
+ new_obsolete_method->SetIsObsolete();
+ obsolete_maps_->insert({old_method, new_obsolete_method});
+ // Update JIT Data structures to point to the new method.
+ art::jit::Jit* jit = art::Runtime::Current()->GetJit();
+ if (jit != nullptr) {
+ // Notify the JIT we are making this obsolete method. It will update the jit's internal
+ // structures to keep track of the new obsolete method.
+ jit->GetCodeCache()->MoveObsoleteMethod(old_method, new_obsolete_method);
+ }
+ } else {
+ new_obsolete_method = obsolete_method_pair->second;
+ }
+ DCHECK(new_obsolete_method != nullptr);
+ SetMethod(new_obsolete_method);
+ }
+ return true;
+ }
+
+ private:
+ // The linear allocator we should use to make new methods.
+ art::LinearAlloc* allocator_;
+ // The set of all methods which could be obsoleted.
+ const std::unordered_set<art::ArtMethod*>& obsoleted_methods_;
+ // A map from the original to the newly allocated obsolete method for frames on this thread. The
+ // values in this map must be added to the obsolete_methods_ (and obsolete_dex_caches_) fields of
+ // the redefined classes ClassExt by the caller.
+ std::unordered_map<art::ArtMethod*, art::ArtMethod*>* obsolete_maps_;
+ bool* success_;
+ // TODO REMOVE once either current_method doesn't stick around through suspend points or deopt
+ // works through runtime methods.
+ bool is_runtime_frame_;
+ std::string* error_msg_;
+};
+
// Moves dex data to an anonymous, read-only mmap'd region.
std::unique_ptr<art::MemMap> Redefiner::MoveDataToMemMap(const std::string& original_location,
jint data_len,
@@ -76,6 +216,8 @@ std::unique_ptr<art::MemMap> Redefiner::MoveDataToMemMap(const std::string& orig
return map;
}
+// TODO This should handle doing multiple classes at once so we need to do less cleanup when things
+// go wrong.
jvmtiError Redefiner::RedefineClass(ArtJvmTiEnv* env,
art::Runtime* runtime,
art::Thread* self,
@@ -116,6 +258,9 @@ jvmtiError Redefiner::RedefineClass(ArtJvmTiEnv* env,
*error_msg = os.str();
return ERR(INVALID_CLASS_FORMAT);
}
+ // Stop JIT for the duration of this redefine since the JIT might concurrently compile a method we
+ // are going to redefine.
+ art::jit::ScopedJitSuspend suspend_jit;
// Get shared mutator lock.
art::ScopedObjectAccess soa(self);
art::StackHandleScope<1> hs(self);
@@ -296,6 +441,107 @@ bool Redefiner::FinishRemainingAllocations(
return true;
}
+struct CallbackCtx {
+ Redefiner* const r;
+ art::LinearAlloc* allocator;
+ std::unordered_map<art::ArtMethod*, art::ArtMethod*> obsolete_map;
+ std::unordered_set<art::ArtMethod*> obsolete_methods;
+ bool success;
+ std::string* error_msg;
+
+ CallbackCtx(Redefiner* self, art::LinearAlloc* alloc, std::string* error)
+ : r(self), allocator(alloc), success(true), error_msg(error) {}
+};
+
+void DoRestoreObsoleteMethodsCallback(art::Thread* t, void* vdata) NO_THREAD_SAFETY_ANALYSIS {
+ CallbackCtx* data = reinterpret_cast<CallbackCtx*>(vdata);
+ ObsoleteMethodStackVisitor::RestoreFrames(t, data->obsolete_map, data->error_msg);
+}
+
+void DoAllocateObsoleteMethodsCallback(art::Thread* t, void* vdata) NO_THREAD_SAFETY_ANALYSIS {
+ CallbackCtx* data = reinterpret_cast<CallbackCtx*>(vdata);
+ if (data->success) {
+ // Don't do anything if we already failed once.
+ data->success = ObsoleteMethodStackVisitor::UpdateObsoleteFrames(t,
+ data->allocator,
+ data->obsolete_methods,
+ &data->obsolete_map,
+ data->error_msg);
+ }
+}
+
+// This creates any ArtMethod* structures needed for obsolete methods and ensures that the stack is
+// updated so they will be run.
+bool Redefiner::FindAndAllocateObsoleteMethods(art::mirror::Class* art_klass) {
+ art::ScopedAssertNoThreadSuspension ns("No thread suspension during thread stack walking");
+ art::mirror::ClassExt* ext = art_klass->GetExtData();
+ CHECK(ext->GetObsoleteMethods() != nullptr);
+ CallbackCtx ctx(this, art_klass->GetClassLoader()->GetAllocator(), error_msg_);
+ // Add all the declared methods to the map
+ for (auto& m : art_klass->GetDeclaredMethods(art::kRuntimePointerSize)) {
+ ctx.obsolete_methods.insert(&m);
+ }
+ for (art::ArtMethod* old_method : ctx.obsolete_methods) {
+ if (old_method->IsIntrinsic()) {
+ *error_msg_ = StringPrintf("Method '%s' is intrinsic and cannot be made obsolete!",
+ old_method->PrettyMethod().c_str());
+ return false;
+ }
+ }
+ {
+ art::MutexLock mu(self_, *art::Locks::thread_list_lock_);
+ art::ThreadList* list = art::Runtime::Current()->GetThreadList();
+ list->ForEach(DoAllocateObsoleteMethodsCallback, static_cast<void*>(&ctx));
+ if (!ctx.success) {
+ list->ForEach(DoRestoreObsoleteMethodsCallback, static_cast<void*>(&ctx));
+ return false;
+ }
+ }
+ FillObsoleteMethodMap(art_klass, ctx.obsolete_map);
+ return true;
+}
+
+// Fills the obsolete method map in the art_klass's extData. This is so obsolete methods are able to
+// figure out their DexCaches.
+void Redefiner::FillObsoleteMethodMap(
+ art::mirror::Class* art_klass,
+ const std::unordered_map<art::ArtMethod*, art::ArtMethod*>& obsoletes) {
+ int32_t index = 0;
+ art::mirror::ClassExt* ext_data = art_klass->GetExtData();
+ art::mirror::PointerArray* obsolete_methods = ext_data->GetObsoleteMethods();
+ art::mirror::ObjectArray<art::mirror::DexCache>* obsolete_dex_caches =
+ ext_data->GetObsoleteDexCaches();
+ int32_t num_method_slots = obsolete_methods->GetLength();
+ // Find the first empty index.
+ for (; index < num_method_slots; index++) {
+ if (obsolete_methods->GetElementPtrSize<art::ArtMethod*>(
+ index, art::kRuntimePointerSize) == nullptr) {
+ break;
+ }
+ }
+ // Make sure we have enough space.
+ CHECK_GT(num_method_slots, static_cast<int32_t>(obsoletes.size() + index));
+ CHECK(obsolete_dex_caches->Get(index) == nullptr);
+ // Fill in the map.
+ for (auto& obs : obsoletes) {
+ obsolete_methods->SetElementPtrSize(index, obs.second, art::kRuntimePointerSize);
+ obsolete_dex_caches->Set(index, art_klass->GetDexCache());
+ index++;
+ }
+}
+
+// TODO It should be possible to only deoptimize the specific obsolete methods.
+// TODO ReJitEverything can (sort of) fail. In certain cases it will skip deoptimizing some frames.
+// If one of these frames is an obsolete method we have a problem. b/33616143
+// TODO This shouldn't be necessary once we can ensure that the current method is not kept in
+// registers across suspend points.
+// TODO Pending b/33630159
+void Redefiner::EnsureObsoleteMethodsAreDeoptimized() {
+ art::ScopedAssertNoThreadSuspension nts("Deoptimizing everything!");
+ art::instrumentation::Instrumentation* i = runtime_->GetInstrumentation();
+ i->ReJitEverything("libOpenJkdJvmti - Class Redefinition");
+}
+
jvmtiError Redefiner::Run() {
art::StackHandleScope<5> hs(self_);
// TODO We might want to have a global lock (or one based on the class being redefined at least)
@@ -329,6 +575,13 @@ jvmtiError Redefiner::Run() {
}
// Get the mirror class now that we aren't allocating anymore.
art::Handle<art::mirror::Class> art_class(hs.NewHandle(GetMirrorClass()));
+ // Disable GC and wait for it to be done if we are a moving GC. This is fine since we are done
+ // allocating so no deadlocks.
+ art::gc::Heap* heap = runtime_->GetHeap();
+ if (heap->IsGcConcurrentAndMoving()) {
+ // GC moving objects can cause deadlocks as we are deoptimizing the stack.
+ heap->IncrementDisableMovingGC(self_);
+ }
// Enable assertion that this thread isn't interrupted during this installation.
// After this we will need to do real cleanup in case of failure. Prior to this we could simply
// return and would let everything get cleaned up or harmlessly leaked.
@@ -338,6 +591,11 @@ jvmtiError Redefiner::Run() {
self_->TransitionFromRunnableToSuspended(art::ThreadState::kNative);
runtime_->GetThreadList()->SuspendAll(
"Final installation of redefined Class!", /*long_suspend*/true);
+ // TODO We need to invalidate all breakpoints in the redefined class with the debugger.
+ // TODO We need to deal with any instrumentation/debugger deoptimized_methods_.
+ // TODO We need to update all debugger MethodIDs so they note the method they point to is
+ // obsolete or implement some other well defined semantics.
+ // TODO We need to decide on & implement semantics for JNI jmethodids when we redefine methods.
// TODO Might want to move this into a different type.
// Now we reach the part where we must do active cleanup if something fails.
// TODO We should really Retry if this fails instead of simply aborting.
@@ -345,11 +603,15 @@ jvmtiError Redefiner::Run() {
art::ObjPtr<art::mirror::LongArray> original_dex_file_cookie(nullptr);
if (!UpdateJavaDexFile(java_dex_file.Get(),
new_dex_file_cookie.Get(),
- &original_dex_file_cookie)) {
+ &original_dex_file_cookie) ||
+ !FindAndAllocateObsoleteMethods(art_class.Get())) {
// Release suspendAll
runtime_->GetThreadList()->ResumeAll();
// Get back shared mutator lock as expected for return.
self_->TransitionFromSuspendedToRunnable();
+ if (heap->IsGcConcurrentAndMoving()) {
+ heap->DecrementDisableMovingGC(self_);
+ }
return result_;
}
if (!UpdateClass(art_class.Get(), new_dex_cache.Get())) {
@@ -359,26 +621,34 @@ jvmtiError Redefiner::Run() {
runtime_->GetThreadList()->ResumeAll();
// Get back shared mutator lock as expected for return.
self_->TransitionFromSuspendedToRunnable();
+ if (heap->IsGcConcurrentAndMoving()) {
+ heap->DecrementDisableMovingGC(self_);
+ }
return result_;
}
- // Update the ClassObjects Keep the old DexCache (and other stuff) around so we can restore
- // functions/fields.
- // Verify the new Class.
- // Failure then undo updates to class
- // Do stack walks and allocate obsolete methods
- // Shrink the obsolete method maps if possible?
- // TODO find appropriate class loader. Allocate new dex files array. Pause all java treads.
- // Replace dex files array. Do stack scan + allocate obsoletes. Remove array if possible.
- // TODO We might want to ensure that all threads are stopped for this!
- // AddDexToClassPath();
- // TODO
- // Release suspendAll
+ // Ensure that obsolete methods are deoptimized. This is needed since optimized methods may have
+ // pointers to their ArtMethod's stashed in registers that they then use to attempt to hit the
+ // DexCache.
+ // TODO This can fail (leave some methods optimized) near runtime methods (including
+ // quick-to-interpreter transition function).
+ // TODO We probably don't need this at all once we have a way to ensure that the
+ // current_art_method is never stashed in a (physical) register by the JIT and lost to the
+ // stack-walker.
+ EnsureObsoleteMethodsAreDeoptimized();
+ // TODO Verify the new Class.
+ // TODO Failure then undo updates to class
+ // TODO Shrink the obsolete method maps if possible?
+ // TODO find appropriate class loader.
// TODO Put this into a scoped thing.
runtime_->GetThreadList()->ResumeAll();
// Get back shared mutator lock as expected for return.
self_->TransitionFromSuspendedToRunnable();
- // TODO Do this at a more reasonable place.
+ // TODO Do the dex_file_ release at a more reasonable place. This works but it muddles who really
+ // owns the DexFile.
dex_file_.release();
+ if (heap->IsGcConcurrentAndMoving()) {
+ heap->DecrementDisableMovingGC(self_);
+ }
return OK;
}
@@ -420,19 +690,24 @@ bool Redefiner::UpdateMethods(art::ObjPtr<art::mirror::Class> mclass,
}
const art::DexFile::ProtoId* proto_id = dex_file_->FindProtoId(method_return_idx,
new_type_list);
- CHECK(proto_id != nullptr || old_type_list == nullptr);
// TODO Return false, cleanup.
+ CHECK(proto_id != nullptr || old_type_list == nullptr);
const art::DexFile::MethodId* method_id = dex_file_->FindMethodId(declaring_class_id,
*new_name_id,
*proto_id);
- CHECK(method_id != nullptr);
// TODO Return false, cleanup.
+ CHECK(method_id != nullptr);
uint32_t dex_method_idx = dex_file_->GetIndexForMethodId(*method_id);
method.SetDexMethodIndex(dex_method_idx);
linker->SetEntryPointsToInterpreter(&method);
method.SetCodeItemOffset(dex_file_->FindCodeItemOffset(class_def, dex_method_idx));
method.SetDexCacheResolvedMethods(new_dex_cache->GetResolvedMethods(), image_pointer_size);
method.SetDexCacheResolvedTypes(new_dex_cache->GetResolvedTypes(), image_pointer_size);
+ // Notify the jit that this method is redefined.
+ art::jit::Jit* jit = runtime_->GetJit();
+ if (jit != nullptr) {
+ jit->GetCodeCache()->NotifyMethodRedefined(&method);
+ }
}
return true;
}
diff --git a/runtime/openjdkjvmti/ti_redefine.h b/runtime/openjdkjvmti/ti_redefine.h
index 73cfc2b69b..9d23ce445f 100644
--- a/runtime/openjdkjvmti/ti_redefine.h
+++ b/runtime/openjdkjvmti/ti_redefine.h
@@ -64,6 +64,8 @@
namespace openjdkjvmti {
// Class that can redefine a single class's methods.
+// TODO We should really make this be driven by an outside class so we can do multiple classes at
+// the same time and have less required cleanup.
class Redefiner {
public:
// Redefine the given class with the given dex data. Note this function does not take ownership of
@@ -124,6 +126,14 @@ class Redefiner {
// in the future. For now we will just take the memory hit.
bool EnsureClassAllocationsFinished() REQUIRES_SHARED(art::Locks::mutator_lock_);
+ // Ensure that obsolete methods are deoptimized. This is needed since optimized methods may have
+ // pointers to their ArtMethods stashed in registers that they then use to attempt to hit the
+ // DexCache.
+ void EnsureObsoleteMethodsAreDeoptimized()
+ REQUIRES(art::Locks::mutator_lock_)
+ REQUIRES(!art::Locks::thread_list_lock_,
+ !art::Locks::classlinker_classes_lock_);
+
art::mirror::ClassLoader* GetClassLoader() REQUIRES_SHARED(art::Locks::mutator_lock_);
// This finds the java.lang.DexFile we will add the native DexFile to as part of the classpath.
@@ -170,6 +180,13 @@ class Redefiner {
bool UpdateClass(art::ObjPtr<art::mirror::Class> mclass,
art::ObjPtr<art::mirror::DexCache> new_dex_cache)
REQUIRES(art::Locks::mutator_lock_);
+
+ bool FindAndAllocateObsoleteMethods(art::mirror::Class* art_klass)
+ REQUIRES(art::Locks::mutator_lock_);
+
+ void FillObsoleteMethodMap(art::mirror::Class* art_klass,
+ const std::unordered_map<art::ArtMethod*, art::ArtMethod*>& obsoletes)
+ REQUIRES(art::Locks::mutator_lock_);
};
} // namespace openjdkjvmti
diff --git a/runtime/runtime.h b/runtime/runtime.h
index d40c631d4b..8fc211c6a3 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -268,7 +268,7 @@ class Runtime {
return java_vm_.get();
}
- size_t GetMaxSpinsBeforeThinkLockInflation() const {
+ size_t GetMaxSpinsBeforeThinLockInflation() const {
return max_spins_before_thin_lock_inflation_;
}
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 3fed7c9458..f9efc0b88f 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -618,6 +618,17 @@ std::string StackVisitor::DescribeLocation() const {
return result;
}
+void StackVisitor::SetMethod(ArtMethod* method) {
+ DCHECK(GetMethod() != nullptr);
+ if (cur_shadow_frame_ != nullptr) {
+ cur_shadow_frame_->SetMethod(method);
+ } else {
+ DCHECK(cur_quick_frame_ != nullptr);
+ CHECK(!IsInInlinedFrame()) << "We do not support setting inlined method's ArtMethod!";
+ *cur_quick_frame_ = method;
+ }
+}
+
static void AssertPcIsWithinQuickCode(ArtMethod* method, uintptr_t pc)
REQUIRES_SHARED(Locks::mutator_lock_) {
if (method->IsNative() || method->IsRuntimeMethod() || method->IsProxyMethod()) {
diff --git a/runtime/stack.h b/runtime/stack.h
index b1e99e5fd0..9dceb2931d 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -327,6 +327,12 @@ class ShadowFrame {
}
}
+ void SetMethod(ArtMethod* method) REQUIRES(Locks::mutator_lock_) {
+ DCHECK(method != nullptr);
+ DCHECK(method_ != nullptr);
+ method_ = method;
+ }
+
ArtMethod* GetMethod() const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(method_ != nullptr);
return method_;
@@ -610,6 +616,10 @@ class StackVisitor {
ArtMethod* GetMethod() const REQUIRES_SHARED(Locks::mutator_lock_);
+ // Sets this stack frame's method pointer. This requires a full lock of the MutatorLock. This
+ // doesn't work with inlined methods.
+ void SetMethod(ArtMethod* method) REQUIRES(Locks::mutator_lock_);
+
ArtMethod* GetOuterMethod() const {
return *GetCurrentQuickFrame();
}
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index dd7e53100f..5e556be286 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -1093,7 +1093,9 @@ class CodeInfo {
}
CodeInfoEncoding ExtractEncoding() const {
- return CodeInfoEncoding(region_.start());
+ CodeInfoEncoding encoding(region_.start());
+ AssertValidStackMap(encoding);
+ return encoding;
}
bool HasInlineInfo(const CodeInfoEncoding& encoding) const {
@@ -1254,6 +1256,18 @@ class CodeInfo {
uint16_t number_of_dex_registers,
bool dump_stack_maps) const;
+ // Check that the code info has valid stack map and abort if it does not.
+ void AssertValidStackMap(const CodeInfoEncoding& encoding) const {
+ if (region_.size() != 0 && region_.size() < GetStackMapsSize(encoding)) {
+ LOG(FATAL) << region_.size() << "\n"
+ << encoding.header_size << "\n"
+ << encoding.non_header_size << "\n"
+ << encoding.number_of_location_catalog_entries << "\n"
+ << encoding.number_of_stack_maps << "\n"
+ << encoding.stack_map_size_in_bytes;
+ }
+ }
+
private:
MemoryRegion GetStackMaps(const CodeInfoEncoding& encoding) const {
return region_.size() == 0
diff --git a/runtime/verifier/verifier_deps.cc b/runtime/verifier/verifier_deps.cc
index 4cebb7b915..c4058d63ee 100644
--- a/runtime/verifier/verifier_deps.cc
+++ b/runtime/verifier/verifier_deps.cc
@@ -245,17 +245,18 @@ std::string VerifierDeps::GetStringFromId(const DexFile& dex_file, dex::StringIn
bool VerifierDeps::IsInClassPath(ObjPtr<mirror::Class> klass) const {
DCHECK(klass != nullptr);
- ObjPtr<mirror::DexCache> dex_cache = klass->GetDexCache();
- if (dex_cache == nullptr) {
- // This is a synthesized class, in this case always an array. They are not
- // defined in the compiled DEX files and therefore are part of the classpath.
- // We could avoid recording dependencies on arrays with component types in
- // the compiled DEX files but we choose to record them anyway so as to
- // record the access flags VM sets for array classes.
- DCHECK(klass->IsArrayClass()) << klass->PrettyDescriptor();
+ // For array types, we return whether the non-array component type
+ // is in the classpath.
+ while (klass->IsArrayClass()) {
+ klass = klass->GetComponentType();
+ }
+
+ if (klass->IsPrimitive()) {
return true;
}
+ ObjPtr<mirror::DexCache> dex_cache = klass->GetDexCache();
+ DCHECK(dex_cache != nullptr);
const DexFile* dex_file = dex_cache->GetDexFile();
DCHECK(dex_file != nullptr);
diff --git a/test/626-checker-arm64-scratch-register/expected.txt b/test/626-checker-arm64-scratch-register/expected.txt
new file mode 100644
index 0000000000..b0aad4deb5
--- /dev/null
+++ b/test/626-checker-arm64-scratch-register/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/626-checker-arm64-scratch-register/info.txt b/test/626-checker-arm64-scratch-register/info.txt
new file mode 100644
index 0000000000..847213119f
--- /dev/null
+++ b/test/626-checker-arm64-scratch-register/info.txt
@@ -0,0 +1,2 @@
+Regression test checking that the ARM64 scratch register pool is not
+exhausted during moves between stack slots (b/32545705).
diff --git a/test/626-checker-arm64-scratch-register/src/Main.java b/test/626-checker-arm64-scratch-register/src/Main.java
new file mode 100644
index 0000000000..aa211be33c
--- /dev/null
+++ b/test/626-checker-arm64-scratch-register/src/Main.java
@@ -0,0 +1,298 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ boolean b00;
+ boolean b01;
+ boolean b02;
+ boolean b03;
+ boolean b04;
+ boolean b05;
+ boolean b06;
+ boolean b07;
+ boolean b08;
+ boolean b09;
+ boolean b10;
+ boolean b11;
+ boolean b12;
+ boolean b13;
+ boolean b14;
+ boolean b15;
+ boolean b16;
+ boolean b17;
+ boolean b18;
+ boolean b19;
+ boolean b20;
+ boolean b21;
+ boolean b22;
+ boolean b23;
+ boolean b24;
+ boolean b25;
+ boolean b26;
+ boolean b27;
+ boolean b28;
+ boolean b29;
+ boolean b30;
+ boolean b31;
+ boolean b32;
+ boolean b33;
+ boolean b34;
+ boolean b35;
+ boolean b36;
+
+ boolean conditionA;
+ boolean conditionB;
+ boolean conditionC;
+
+ /// CHECK-START-ARM64: void Main.test() register (after)
+ /// CHECK: begin_block
+ /// CHECK: name "B0"
+ /// CHECK: <<This:l\d+>> ParameterValue
+ /// CHECK: end_block
+ /// CHECK: begin_block
+ /// CHECK: successors "<<ThenBlock:B\d+>>" "<<ElseBlock:B\d+>>"
+ /// CHECK: <<CondB:z\d+>> InstanceFieldGet [<<This>>] field_name:Main.conditionB
+ /// CHECK: If [<<CondB>>]
+ /// CHECK: end_block
+ /// CHECK: begin_block
+ /// CHECK: name "<<ElseBlock>>"
+ /// CHECK: ParallelMove moves:[#100->d17,32(sp)->d1,36(sp)->d2,d17->d3,d3->d4,d4->d5,d5->d6,d6->d7,d7->d18,d18->d19,d19->d20,d20->d21,d21->d22,d22->d23,d23->d10,d10->d11,d11->d12,24(sp)->d13,28(sp)->d14,d14->16(sp),d12->20(sp),d13->24(sp),d1->28(sp),d2->32(sp),16(sp)->36(sp),20(sp)->40(sp)]
+ /// CHECK: end_block
+
+ /// CHECK-START-ARM64: void Main.test() disassembly (after)
+ /// CHECK: begin_block
+ /// CHECK: name "B0"
+ /// CHECK: <<This:l\d+>> ParameterValue
+ /// CHECK: end_block
+ /// CHECK: begin_block
+ /// CHECK: successors "<<ThenBlock:B\d+>>" "<<ElseBlock:B\d+>>"
+ /// CHECK: <<CondB:z\d+>> InstanceFieldGet [<<This>>] field_name:Main.conditionB
+ /// CHECK: If [<<CondB>>]
+ /// CHECK: end_block
+ /// CHECK: begin_block
+ /// CHECK: name "<<ElseBlock>>"
+ /// CHECK: ParallelMove moves:[invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid]
+ /// CHECK: fmov d31, d2
+ /// CHECK: ldr s2, [sp, #36]
+ /// CHECK: ldr w16, [sp, #16]
+ /// CHECK: str w16, [sp, #36]
+ /// CHECK: str s14, [sp, #16]
+ /// CHECK: ldr s14, [sp, #28]
+ /// CHECK: str s1, [sp, #28]
+ /// CHECK: ldr s1, [sp, #32]
+ /// CHECK: str s31, [sp, #32]
+ /// CHECK: ldr w16, [sp, #20]
+ /// CHECK: str w16, [sp, #40]
+ /// CHECK: str s12, [sp, #20]
+ /// CHECK: fmov d12, d11
+ /// CHECK: fmov d11, d10
+ /// CHECK: fmov d10, d23
+ /// CHECK: fmov d23, d22
+ /// CHECK: fmov d22, d21
+ /// CHECK: fmov d21, d20
+ /// CHECK: fmov d20, d19
+ /// CHECK: fmov d19, d18
+ /// CHECK: fmov d18, d7
+ /// CHECK: fmov d7, d6
+ /// CHECK: fmov d6, d5
+ /// CHECK: fmov d5, d4
+ /// CHECK: fmov d4, d3
+ /// CHECK: fmov d3, d17
+ /// CHECK: fmov d17, d13
+ /// CHECK: ldr s13, [sp, #24]
+ /// CHECK: str s17, [sp, #24]
+ /// CHECK: ldr s17, pc+{{\d+}} (addr {{0x[0-9a-f]+}}) (100)
+ /// CHECK: end_block
+
+ public void test() {
+ String r = "";
+
+ // For the purpose of this regression test, the order of
+ // definition of these float variable matters. Likewise with the
+ // order of the instructions where these variables are used below.
+ // Reordering these lines make make the original (b/32545705)
+ // issue vanish.
+ float f17 = b17 ? 0.0f : 1.0f;
+ float f16 = b16 ? 0.0f : 1.0f;
+ float f18 = b18 ? 0.0f : 1.0f;
+ float f19 = b19 ? 0.0f : 1.0f;
+ float f20 = b20 ? 0.0f : 1.0f;
+ float f21 = b21 ? 0.0f : 1.0f;
+ float f15 = b15 ? 0.0f : 1.0f;
+ float f00 = b00 ? 0.0f : 1.0f;
+ float f22 = b22 ? 0.0f : 1.0f;
+ float f23 = b23 ? 0.0f : 1.0f;
+ float f24 = b24 ? 0.0f : 1.0f;
+ float f25 = b25 ? 0.0f : 1.0f;
+ float f26 = b26 ? 0.0f : 1.0f;
+ float f27 = b27 ? 0.0f : 1.0f;
+ float f29 = b29 ? 0.0f : 1.0f;
+ float f28 = b28 ? 0.0f : 1.0f;
+ float f01 = b01 ? 0.0f : 1.0f;
+ float f02 = b02 ? 0.0f : 1.0f;
+ float f03 = b03 ? 0.0f : 1.0f;
+ float f04 = b04 ? 0.0f : 1.0f;
+ float f05 = b05 ? 0.0f : 1.0f;
+ float f07 = b07 ? 0.0f : 1.0f;
+ float f06 = b06 ? 0.0f : 1.0f;
+ float f30 = b30 ? 0.0f : 1.0f;
+ float f31 = b31 ? 0.0f : 1.0f;
+ float f32 = b32 ? 0.0f : 1.0f;
+ float f33 = b33 ? 0.0f : 1.0f;
+ float f34 = b34 ? 0.0f : 1.0f;
+ float f36 = b36 ? 0.0f : 1.0f;
+ float f35 = b35 ? 0.0f : 1.0f;
+ float f08 = b08 ? 0.0f : 1.0f;
+ float f09 = b09 ? 0.0f : 1.0f;
+ float f10 = b10 ? 0.0f : 1.0f;
+ float f11 = b11 ? 0.0f : 1.0f;
+ float f12 = b12 ? 0.0f : 1.0f;
+ float f14 = b14 ? 0.0f : 1.0f;
+ float f13 = b13 ? 0.0f : 1.0f;
+
+ if (conditionA) {
+ f16 /= 1000.0f;
+ f17 /= 1000.0f;
+ f18 /= 1000.0f;
+ f19 /= 1000.0f;
+ f20 /= 1000.0f;
+ f21 /= 1000.0f;
+ f15 /= 1000.0f;
+ f08 /= 1000.0f;
+ f09 /= 1000.0f;
+ f10 /= 1000.0f;
+ f11 /= 1000.0f;
+ f12 /= 1000.0f;
+ f30 /= 1000.0f;
+ f31 /= 1000.0f;
+ f32 /= 1000.0f;
+ f33 /= 1000.0f;
+ f34 /= 1000.0f;
+ f01 /= 1000.0f;
+ f02 /= 1000.0f;
+ f03 /= 1000.0f;
+ f04 /= 1000.0f;
+ f05 /= 1000.0f;
+ f23 /= 1000.0f;
+ f24 /= 1000.0f;
+ f25 /= 1000.0f;
+ f26 /= 1000.0f;
+ f27 /= 1000.0f;
+ f22 /= 1000.0f;
+ f00 /= 1000.0f;
+ f14 /= 1000.0f;
+ f13 /= 1000.0f;
+ f36 /= 1000.0f;
+ f35 /= 1000.0f;
+ f07 /= 1000.0f;
+ f06 /= 1000.0f;
+ f29 /= 1000.0f;
+ f28 /= 1000.0f;
+ }
+ // The parallel move that used to exhaust the ARM64 parallel move
+ // resolver's scratch register pool (provided by VIXL) was in the
+ // "else" branch of the following condition generated by ART's
+ // compiler.
+ if (conditionB) {
+ f16 /= 100.0f;
+ f17 /= 100.0f;
+ f18 /= 100.0f;
+ f19 /= 100.0f;
+ f20 /= 100.0f;
+ f21 /= 100.0f;
+ f15 /= 100.0f;
+ f08 /= 100.0f;
+ f09 /= 100.0f;
+ f10 /= 100.0f;
+ f11 /= 100.0f;
+ f12 /= 100.0f;
+ f30 /= 100.0f;
+ f31 /= 100.0f;
+ f32 /= 100.0f;
+ f33 /= 100.0f;
+ f34 /= 100.0f;
+ f01 /= 100.0f;
+ f02 /= 100.0f;
+ f03 /= 100.0f;
+ f04 /= 100.0f;
+ f05 /= 100.0f;
+ f23 /= 100.0f;
+ f24 /= 100.0f;
+ f25 /= 100.0f;
+ f26 /= 100.0f;
+ f27 /= 100.0f;
+ f22 /= 100.0f;
+ f00 /= 100.0f;
+ f14 /= 100.0f;
+ f13 /= 100.0f;
+ f36 /= 100.0f;
+ f35 /= 100.0f;
+ f07 /= 100.0f;
+ f06 /= 100.0f;
+ f29 /= 100.0f;
+ f28 /= 100.0f;
+ }
+ if (conditionC) {
+ f16 /= 12.0f;
+ f17 /= 12.0f;
+ f18 /= 12.0f;
+ f19 /= 12.0f;
+ f20 /= 12.0f;
+ f21 /= 12.0f;
+ f15 /= 12.0f;
+ f08 /= 12.0f;
+ f09 /= 12.0f;
+ f10 /= 12.0f;
+ f11 /= 12.0f;
+ f12 /= 12.0f;
+ f30 /= 12.0f;
+ f31 /= 12.0f;
+ f32 /= 12.0f;
+ f33 /= 12.0f;
+ f34 /= 12.0f;
+ f01 /= 12.0f;
+ f02 /= 12.0f;
+ f03 /= 12.0f;
+ f04 /= 12.0f;
+ f05 /= 12.0f;
+ f23 /= 12.0f;
+ f24 /= 12.0f;
+ f25 /= 12.0f;
+ f26 /= 12.0f;
+ f27 /= 12.0f;
+ f22 /= 12.0f;
+ f00 /= 12.0f;
+ f14 /= 12.0f;
+ f13 /= 12.0f;
+ f36 /= 12.0f;
+ f35 /= 12.0f;
+ f07 /= 12.0f;
+ f06 /= 12.0f;
+ f29 /= 12.0f;
+ f28 /= 12.0f;
+ }
+ float s = 0.0f;
+ s = ((float) Math.round(100.0f * s)) / 100.0f;
+ String res = s + r;
+ }
+
+ public static void main(String[] args) {
+ Main main = new Main();
+ main.test();
+ System.out.println("passed");
+ }
+}
diff --git a/test/631-checker-fp-abs/expected.txt b/test/631-checker-fp-abs/expected.txt
new file mode 100644
index 0000000000..b0aad4deb5
--- /dev/null
+++ b/test/631-checker-fp-abs/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/631-checker-fp-abs/info.txt b/test/631-checker-fp-abs/info.txt
new file mode 100644
index 0000000000..0a1499e72c
--- /dev/null
+++ b/test/631-checker-fp-abs/info.txt
@@ -0,0 +1 @@
+Tests on floating-point Math.abs.
diff --git a/test/631-checker-fp-abs/src/Main.java b/test/631-checker-fp-abs/src/Main.java
new file mode 100644
index 0000000000..0f85dc6865
--- /dev/null
+++ b/test/631-checker-fp-abs/src/Main.java
@@ -0,0 +1,176 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * A few tests of Math.abs for floating-point data.
+ *
+ * Note, as a "quality of implementation", rather than pure "spec compliance",
+ * we require that Math.abs() clears the sign bit (but changes nothing else)
+ * for all numbers, including NaN (signaling NaN may become quiet though).
+ */
+public class Main {
+
+ private static final int SPQUIET = 1 << 22;
+ private static final long DPQUIET = 1L << 51;
+
+ public static boolean doThrow = false;
+
+ /// CHECK-START: float Main.$opt$noinline$absSP(float) intrinsics_recognition (after)
+ /// CHECK-DAG: <<Result:f\d+>> InvokeStaticOrDirect intrinsic:MathAbsFloat
+ /// CHECK-DAG: Return [<<Result>>]
+ private static float $opt$noinline$absSP(float f) {
+ if (doThrow) {
+ throw new Error("Something to prevent inlining");
+ }
+ return Math.abs(f);
+ }
+
+ /// CHECK-START: double Main.$opt$noinline$absDP(double) intrinsics_recognition (after)
+ /// CHECK-DAG: <<Result:d\d+>> InvokeStaticOrDirect intrinsic:MathAbsDouble
+ /// CHECK-DAG: Return [<<Result>>]
+ private static double $opt$noinline$absDP(double d) {
+ if (doThrow) {
+ throw new Error("Something to prevent inlining");
+ }
+ return Math.abs(d);
+ }
+
+ public static void main(String args[]) {
+ // A few obvious numbers.
+ for (float f = -100.0f; f < 0.0f; f += 0.5f) {
+ expectEqualsSP(-f, $opt$noinline$absSP(f));
+ }
+ for (float f = 0.0f; f <= 100.0f; f += 0.5f) {
+ expectEqualsSP(f, $opt$noinline$absSP(f));
+ }
+ for (float f = -1.5f; f <= -1.499f; f = Math.nextAfter(f, Float.POSITIVE_INFINITY)) {
+ expectEqualsSP(-f, $opt$noinline$absSP(f));
+ }
+ for (float f = 1.499f; f <= 1.5f; f = Math.nextAfter(f, Float.POSITIVE_INFINITY)) {
+ expectEqualsSP(f, $opt$noinline$absSP(f));
+ }
+
+ // Zero
+ expectEquals32(0, Float.floatToRawIntBits($opt$noinline$absSP(+0.0f)));
+ expectEquals32(0, Float.floatToRawIntBits($opt$noinline$absSP(-0.0f)));
+
+ // Inf.
+ expectEqualsSP(Float.POSITIVE_INFINITY, $opt$noinline$absSP(Float.NEGATIVE_INFINITY));
+ expectEqualsSP(Float.POSITIVE_INFINITY, $opt$noinline$absSP(Float.POSITIVE_INFINITY));
+
+ // A few NaN numbers.
+ int[] spnans = {
+ 0x7f800001,
+ 0x7fa00000,
+ 0x7fc00000,
+ 0x7fffffff,
+ 0xff800001,
+ 0xffa00000,
+ 0xffc00000,
+ 0xffffffff
+ };
+ for (int i = 0; i < spnans.length; i++) {
+ float f = Float.intBitsToFloat(spnans[i]);
+ expectEqualsNaN32(
+ spnans[i] & Integer.MAX_VALUE,
+ Float.floatToRawIntBits($opt$noinline$absSP(f)));
+ }
+
+ // A few obvious numbers.
+ for (double d = -100.0; d < 0.0; d += 0.5) {
+ expectEqualsDP(-d, $opt$noinline$absDP(d));
+ }
+ for (double d = 0.0; d <= 100.0; d += 0.5) {
+ expectEqualsDP(d, $opt$noinline$absDP(d));
+ }
+ for (double d = -1.5d; d <= -1.49999999999d; d = Math.nextAfter(d, Double.POSITIVE_INFINITY)) {
+ expectEqualsDP(-d, $opt$noinline$absDP(d));
+ }
+ for (double d = 1.49999999999d; d <= 1.5; d = Math.nextAfter(d, Double.POSITIVE_INFINITY)) {
+ expectEqualsDP(d, $opt$noinline$absDP(d));
+ }
+
+ // Zero
+ expectEquals64(0L, Double.doubleToRawLongBits($opt$noinline$absDP(+0.0f)));
+ expectEquals64(0L, Double.doubleToRawLongBits($opt$noinline$absDP(-0.0f)));
+
+ // Inf.
+ expectEqualsDP(Double.POSITIVE_INFINITY, $opt$noinline$absDP(Double.NEGATIVE_INFINITY));
+ expectEqualsDP(Double.POSITIVE_INFINITY, $opt$noinline$absDP(Double.POSITIVE_INFINITY));
+
+ // A few NaN numbers.
+ long[] dpnans = {
+ 0x7ff0000000000001L,
+ 0x7ff4000000000000L,
+ 0x7ff8000000000000L,
+ 0x7fffffffffffffffL,
+ 0xfff0000000000001L,
+ 0xfff4000000000000L,
+ 0xfff8000000000000L,
+ 0xffffffffffffffffL
+ };
+ for (int i = 0; i < dpnans.length; i++) {
+ double d = Double.longBitsToDouble(dpnans[i]);
+ expectEqualsNaN64(
+ dpnans[i] & Long.MAX_VALUE,
+ Double.doubleToRawLongBits($opt$noinline$absDP(d)));
+ }
+
+ System.out.println("passed");
+ }
+
+ private static void expectEquals32(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: 0x" + Integer.toHexString(expected)
+ + ", found: 0x" + Integer.toHexString(result));
+ }
+ }
+
+ // We allow that an expected NaN result has become quiet.
+ private static void expectEqualsNaN32(int expected, int result) {
+ if (expected != result && (expected | SPQUIET) != result) {
+ throw new Error("Expected: 0x" + Integer.toHexString(expected)
+ + ", found: 0x" + Integer.toHexString(result));
+ }
+ }
+
+ private static void expectEquals64(long expected, long result) {
+ if (expected != result) {
+ throw new Error("Expected: 0x" + Long.toHexString(expected)
+ + ", found: 0x" + Long.toHexString(result));
+ }
+ }
+
+ // We allow that an expected NaN result has become quiet.
+ private static void expectEqualsNaN64(long expected, long result) {
+ if (expected != result && (expected | DPQUIET) != result) {
+ throw new Error("Expected: 0x" + Long.toHexString(expected)
+ + ", found: 0x" + Long.toHexString(result));
+ }
+ }
+
+ private static void expectEqualsSP(float expected, float result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ private static void expectEqualsDP(double expected, double result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+}
diff --git a/test/901-hello-ti-agent/run b/test/901-hello-ti-agent/run
index 8079a8c457..4379349cb2 100755
--- a/test/901-hello-ti-agent/run
+++ b/test/901-hello-ti-agent/run
@@ -14,14 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-plugin=libopenjdkjvmtid.so
-agent=libtiagentd.so
-if [[ "$@" == *"-O"* ]]; then
- agent=libtiagent.so
- plugin=libopenjdkjvmti.so
-fi
-
./default-run "$@" --experimental agents \
--experimental runtime-plugins \
- --runtime-option -agentpath:${agent}=901-hello-ti-agent \
- --android-runtime-option -Xplugin:${plugin}
+ --jvmti
diff --git a/test/902-hello-transformation/run b/test/902-hello-transformation/run
index 94a8b2d975..4379349cb2 100755
--- a/test/902-hello-transformation/run
+++ b/test/902-hello-transformation/run
@@ -14,30 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-plugin=libopenjdkjvmtid.so
-agent=libtiagentd.so
-lib=tiagentd
-if [[ "$@" == *"-O"* ]]; then
- agent=libtiagent.so
- plugin=libopenjdkjvmti.so
- lib=tiagent
-fi
-
-if [[ "$@" == *"--jvm"* ]]; then
- arg="jvm"
-else
- arg="art"
- if [[ "$@" != *"--debuggable"* ]]; then
- other_args=" -Xcompiler-option --debuggable "
- else
- other_args=""
- fi
-fi
-
./default-run "$@" --experimental agents \
--experimental runtime-plugins \
- --runtime-option -agentpath:${agent}=902-hello-transformation,${arg} \
- --android-runtime-option -Xplugin:${plugin} \
- --android-runtime-option -Xfully-deoptable \
- ${other_args} \
- --args ${lib}
+ --jvmti
diff --git a/test/903-hello-tagging/run b/test/903-hello-tagging/run
index 5e3c0bd32a..4379349cb2 100755
--- a/test/903-hello-tagging/run
+++ b/test/903-hello-tagging/run
@@ -14,30 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-plugin=libopenjdkjvmtid.so
-agent=libtiagentd.so
-lib=tiagentd
-if [[ "$@" == *"-O"* ]]; then
- agent=libtiagent.so
- plugin=libopenjdkjvmti.so
- lib=tiagent
-fi
-
-if [[ "$@" == *"--jvm"* ]]; then
- arg="jvm"
-else
- arg="art"
-fi
-
-if [[ "$@" != *"--debuggable"* ]]; then
- other_args=" -Xcompiler-option --debuggable "
-else
- other_args=""
-fi
-
./default-run "$@" --experimental agents \
--experimental runtime-plugins \
- --runtime-option -agentpath:${agent}=903-hello-tagging,${arg} \
- --android-runtime-option -Xplugin:${plugin} \
- ${other_args} \
- --args ${lib}
+ --jvmti
diff --git a/test/904-object-allocation/run b/test/904-object-allocation/run
index 2f7ad21886..4379349cb2 100755
--- a/test/904-object-allocation/run
+++ b/test/904-object-allocation/run
@@ -14,30 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-plugin=libopenjdkjvmtid.so
-agent=libtiagentd.so
-lib=tiagentd
-if [[ "$@" == *"-O"* ]]; then
- agent=libtiagent.so
- plugin=libopenjdkjvmti.so
- lib=tiagent
-fi
-
-if [[ "$@" == *"--jvm"* ]]; then
- arg="jvm"
-else
- arg="art"
-fi
-
-if [[ "$@" != *"--debuggable"* ]]; then
- other_args=" -Xcompiler-option --debuggable "
-else
- other_args=""
-fi
-
./default-run "$@" --experimental agents \
--experimental runtime-plugins \
- --runtime-option -agentpath:${agent}=904-object-allocation,${arg} \
- --android-runtime-option -Xplugin:${plugin} \
- ${other_args} \
- --args ${lib}
+ --jvmti
diff --git a/test/905-object-free/run b/test/905-object-free/run
index 753b742681..4379349cb2 100755
--- a/test/905-object-free/run
+++ b/test/905-object-free/run
@@ -14,30 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-plugin=libopenjdkjvmtid.so
-agent=libtiagentd.so
-lib=tiagentd
-if [[ "$@" == *"-O"* ]]; then
- agent=libtiagent.so
- plugin=libopenjdkjvmti.so
- lib=tiagent
-fi
-
-if [[ "$@" == *"--jvm"* ]]; then
- arg="jvm"
-else
- arg="art"
-fi
-
-if [[ "$@" != *"--debuggable"* ]]; then
- other_args=" -Xcompiler-option --debuggable "
-else
- other_args=""
-fi
-
./default-run "$@" --experimental agents \
--experimental runtime-plugins \
- --runtime-option -agentpath:${agent}=905-object-free,${arg} \
- --android-runtime-option -Xplugin:${plugin} \
- ${other_args} \
- --args ${lib}
+ --jvmti
diff --git a/test/906-iterate-heap/run b/test/906-iterate-heap/run
index 3e135a378d..4379349cb2 100755
--- a/test/906-iterate-heap/run
+++ b/test/906-iterate-heap/run
@@ -14,30 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-plugin=libopenjdkjvmtid.so
-agent=libtiagentd.so
-lib=tiagentd
-if [[ "$@" == *"-O"* ]]; then
- agent=libtiagent.so
- plugin=libopenjdkjvmti.so
- lib=tiagent
-fi
-
-if [[ "$@" == *"--jvm"* ]]; then
- arg="jvm"
-else
- arg="art"
-fi
-
-if [[ "$@" != *"--debuggable"* ]]; then
- other_args=" -Xcompiler-option --debuggable "
-else
- other_args=""
-fi
-
./default-run "$@" --experimental agents \
--experimental runtime-plugins \
- --runtime-option -agentpath:${agent}=906-iterate-heap,${arg} \
- --android-runtime-option -Xplugin:${plugin} \
- ${other_args} \
- --args ${lib}
+ --jvmti
diff --git a/test/907-get-loaded-classes/run b/test/907-get-loaded-classes/run
index 3f5a059fe2..4379349cb2 100755
--- a/test/907-get-loaded-classes/run
+++ b/test/907-get-loaded-classes/run
@@ -14,30 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-plugin=libopenjdkjvmtid.so
-agent=libtiagentd.so
-lib=tiagentd
-if [[ "$@" == *"-O"* ]]; then
- agent=libtiagent.so
- plugin=libopenjdkjvmti.so
- lib=tiagent
-fi
-
-if [[ "$@" == *"--jvm"* ]]; then
- arg="jvm"
-else
- arg="art"
-fi
-
-if [[ "$@" != *"--debuggable"* ]]; then
- other_args=" -Xcompiler-option --debuggable "
-else
- other_args=""
-fi
-
./default-run "$@" --experimental agents \
--experimental runtime-plugins \
- --runtime-option -agentpath:${agent}=907-get-loaded-classes,${arg} \
- --android-runtime-option -Xplugin:${plugin} \
- ${other_args} \
- --args ${lib}
+ --jvmti
diff --git a/test/908-gc-start-finish/run b/test/908-gc-start-finish/run
index 2fc35f0048..4379349cb2 100755
--- a/test/908-gc-start-finish/run
+++ b/test/908-gc-start-finish/run
@@ -14,30 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-plugin=libopenjdkjvmtid.so
-agent=libtiagentd.so
-lib=tiagentd
-if [[ "$@" == *"-O"* ]]; then
- agent=libtiagent.so
- plugin=libopenjdkjvmti.so
- lib=tiagent
-fi
-
-if [[ "$@" == *"--jvm"* ]]; then
- arg="jvm"
-else
- arg="art"
-fi
-
-if [[ "$@" != *"--debuggable"* ]]; then
- other_args=" -Xcompiler-option --debuggable "
-else
- other_args=""
-fi
-
./default-run "$@" --experimental agents \
--experimental runtime-plugins \
- --runtime-option -agentpath:${agent}=908-gc-start-finish,${arg} \
- --android-runtime-option -Xplugin:${plugin} \
- ${other_args} \
- --args ${lib}
+ --jvmti
diff --git a/test/910-methods/run b/test/910-methods/run
index 4dd2555f9e..4379349cb2 100755
--- a/test/910-methods/run
+++ b/test/910-methods/run
@@ -14,30 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-plugin=libopenjdkjvmtid.so
-agent=libtiagentd.so
-lib=tiagentd
-if [[ "$@" == *"-O"* ]]; then
- agent=libtiagent.so
- plugin=libopenjdkjvmti.so
- lib=tiagent
-fi
-
-if [[ "$@" == *"--jvm"* ]]; then
- arg="jvm"
-else
- arg="art"
-fi
-
-if [[ "$@" != *"--debuggable"* ]]; then
- other_args=" -Xcompiler-option --debuggable "
-else
- other_args=""
-fi
-
./default-run "$@" --experimental agents \
--experimental runtime-plugins \
- --runtime-option -agentpath:${agent}=910-methods,${arg} \
- --android-runtime-option -Xplugin:${plugin} \
- ${other_args} \
- --args ${lib}
+ --jvmti
diff --git a/test/911-get-stack-trace/run b/test/911-get-stack-trace/run
index 43fc325363..4379349cb2 100755
--- a/test/911-get-stack-trace/run
+++ b/test/911-get-stack-trace/run
@@ -14,30 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-plugin=libopenjdkjvmtid.so
-agent=libtiagentd.so
-lib=tiagentd
-if [[ "$@" == *"-O"* ]]; then
- agent=libtiagent.so
- plugin=libopenjdkjvmti.so
- lib=tiagent
-fi
-
-if [[ "$@" == *"--jvm"* ]]; then
- arg="jvm"
-else
- arg="art"
-fi
-
-if [[ "$@" != *"--debuggable"* ]]; then
- other_args=" -Xcompiler-option --debuggable "
-else
- other_args=""
-fi
-
./default-run "$@" --experimental agents \
--experimental runtime-plugins \
- --runtime-option -agentpath:${agent}=911-get-stack-trace,${arg} \
- --android-runtime-option -Xplugin:${plugin} \
- ${other_args} \
- --args ${lib}
+ --jvmti
diff --git a/test/912-classes/run b/test/912-classes/run
index 64bbb987a1..4379349cb2 100755
--- a/test/912-classes/run
+++ b/test/912-classes/run
@@ -14,30 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-plugin=libopenjdkjvmtid.so
-agent=libtiagentd.so
-lib=tiagentd
-if [[ "$@" == *"-O"* ]]; then
- agent=libtiagent.so
- plugin=libopenjdkjvmti.so
- lib=tiagent
-fi
-
-if [[ "$@" == *"--jvm"* ]]; then
- arg="jvm"
-else
- arg="art"
-fi
-
-if [[ "$@" != *"--debuggable"* ]]; then
- other_args=" -Xcompiler-option --debuggable "
-else
- other_args=""
-fi
-
./default-run "$@" --experimental agents \
--experimental runtime-plugins \
- --runtime-option -agentpath:${agent}=912-classes,${arg} \
- --android-runtime-option -Xplugin:${plugin} \
- ${other_args} \
- --args ${lib}
+ --jvmti
diff --git a/test/913-heaps/run b/test/913-heaps/run
index 7bd8cbd1dd..4379349cb2 100755
--- a/test/913-heaps/run
+++ b/test/913-heaps/run
@@ -14,30 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-plugin=libopenjdkjvmtid.so
-agent=libtiagentd.so
-lib=tiagentd
-if [[ "$@" == *"-O"* ]]; then
- agent=libtiagent.so
- plugin=libopenjdkjvmti.so
- lib=tiagent
-fi
-
-if [[ "$@" == *"--jvm"* ]]; then
- arg="jvm"
-else
- arg="art"
-fi
-
-if [[ "$@" != *"--debuggable"* ]]; then
- other_args=" -Xcompiler-option --debuggable "
-else
- other_args=""
-fi
-
./default-run "$@" --experimental agents \
--experimental runtime-plugins \
- --runtime-option -agentpath:${agent}=913-heaps,${arg} \
- --android-runtime-option -Xplugin:${plugin} \
- ${other_args} \
- --args ${lib}
+ --jvmti
diff --git a/test/914-hello-obsolescence/build b/test/914-hello-obsolescence/build
new file mode 100755
index 0000000000..898e2e54a2
--- /dev/null
+++ b/test/914-hello-obsolescence/build
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-build "$@" --experimental agents
diff --git a/test/914-hello-obsolescence/expected.txt b/test/914-hello-obsolescence/expected.txt
new file mode 100644
index 0000000000..83efda144d
--- /dev/null
+++ b/test/914-hello-obsolescence/expected.txt
@@ -0,0 +1,9 @@
+hello
+Not doing anything here
+goodbye
+hello
+transforming calling function
+goodbye
+Hello - Transformed
+Not doing anything here
+Goodbye - Transformed
diff --git a/test/914-hello-obsolescence/info.txt b/test/914-hello-obsolescence/info.txt
new file mode 100644
index 0000000000..c8b892cedd
--- /dev/null
+++ b/test/914-hello-obsolescence/info.txt
@@ -0,0 +1 @@
+Tests basic obsolete method support
diff --git a/test/914-hello-obsolescence/run b/test/914-hello-obsolescence/run
new file mode 100755
index 0000000000..4379349cb2
--- /dev/null
+++ b/test/914-hello-obsolescence/run
@@ -0,0 +1,19 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --experimental agents \
+ --experimental runtime-plugins \
+ --jvmti
diff --git a/test/914-hello-obsolescence/src/Main.java b/test/914-hello-obsolescence/src/Main.java
new file mode 100644
index 0000000000..46266efb28
--- /dev/null
+++ b/test/914-hello-obsolescence/src/Main.java
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Base64;
+
+public class Main {
+ // class Transform {
+ // public void sayHi(Runnable r) {
+ // System.out.println("Hello - Transformed");
+ // r.run();
+ // System.out.println("Goodbye - Transformed");
+ // }
+ // }
+ private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+ "yv66vgAAADQAJAoACAARCQASABMIABQKABUAFgsAFwAYCAAZBwAaBwAbAQAGPGluaXQ+AQADKClW" +
+ "AQAEQ29kZQEAD0xpbmVOdW1iZXJUYWJsZQEABXNheUhpAQAXKExqYXZhL2xhbmcvUnVubmFibGU7" +
+ "KVYBAApTb3VyY2VGaWxlAQAOVHJhbnNmb3JtLmphdmEMAAkACgcAHAwAHQAeAQATSGVsbG8gLSBU" +
+ "cmFuc2Zvcm1lZAcAHwwAIAAhBwAiDAAjAAoBABVHb29kYnllIC0gVHJhbnNmb3JtZWQBAAlUcmFu" +
+ "c2Zvcm0BABBqYXZhL2xhbmcvT2JqZWN0AQAQamF2YS9sYW5nL1N5c3RlbQEAA291dAEAFUxqYXZh" +
+ "L2lvL1ByaW50U3RyZWFtOwEAE2phdmEvaW8vUHJpbnRTdHJlYW0BAAdwcmludGxuAQAVKExqYXZh" +
+ "L2xhbmcvU3RyaW5nOylWAQASamF2YS9sYW5nL1J1bm5hYmxlAQADcnVuACAABwAIAAAAAAACAAAA" +
+ "CQAKAAEACwAAAB0AAQABAAAABSq3AAGxAAAAAQAMAAAABgABAAAAAQABAA0ADgABAAsAAAA7AAIA" +
+ "AgAAABeyAAISA7YABCu5AAUBALIAAhIGtgAEsQAAAAEADAAAABIABAAAAAMACAAEAA4ABQAWAAYA" +
+ "AQAPAAAAAgAQ");
+ private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+ "ZGV4CjAzNQAYeAMMXgYWxoeSHAS9EWKCCtVRSAGpqZVQAwAAcAAAAHhWNBIAAAAAAAAAALACAAAR" +
+ "AAAAcAAAAAcAAAC0AAAAAwAAANAAAAABAAAA9AAAAAUAAAD8AAAAAQAAACQBAAAMAgAARAEAAKIB" +
+ "AACqAQAAwQEAANYBAADjAQAA+gEAAA4CAAAkAgAAOAIAAEwCAABcAgAAXwIAAGMCAAB3AgAAfAIA" +
+ "AIUCAACKAgAAAwAAAAQAAAAFAAAABgAAAAcAAAAIAAAACgAAAAoAAAAGAAAAAAAAAAsAAAAGAAAA" +
+ "lAEAAAsAAAAGAAAAnAEAAAUAAQANAAAAAAAAAAAAAAAAAAEAEAAAAAEAAgAOAAAAAgAAAAAAAAAD" +
+ "AAAADwAAAAAAAAAAAAAAAgAAAAAAAAAJAAAAAAAAAJ8CAAAAAAAAAQABAAEAAACRAgAABAAAAHAQ" +
+ "AwAAAA4ABAACAAIAAACWAgAAFAAAAGIAAAAbAQIAAABuIAIAEAByEAQAAwBiAAAAGwEBAAAAbiAC" +
+ "ABAADgABAAAAAwAAAAEAAAAEAAY8aW5pdD4AFUdvb2RieWUgLSBUcmFuc2Zvcm1lZAATSGVsbG8g" +
+ "LSBUcmFuc2Zvcm1lZAALTFRyYW5zZm9ybTsAFUxqYXZhL2lvL1ByaW50U3RyZWFtOwASTGphdmEv" +
+ "bGFuZy9PYmplY3Q7ABRMamF2YS9sYW5nL1J1bm5hYmxlOwASTGphdmEvbGFuZy9TdHJpbmc7ABJM" +
+ "amF2YS9sYW5nL1N5c3RlbTsADlRyYW5zZm9ybS5qYXZhAAFWAAJWTAASZW1pdHRlcjogamFjay00" +
+ "LjEzAANvdXQAB3ByaW50bG4AA3J1bgAFc2F5SGkAAQAHDgADAQAHDoc8hwAAAAEBAICABMQCAQHc" +
+ "AgAAAA0AAAAAAAAAAQAAAAAAAAABAAAAEQAAAHAAAAACAAAABwAAALQAAAADAAAAAwAAANAAAAAE" +
+ "AAAAAQAAAPQAAAAFAAAABQAAAPwAAAAGAAAAAQAAACQBAAABIAAAAgAAAEQBAAABEAAAAgAAAJQB" +
+ "AAACIAAAEQAAAKIBAAADIAAAAgAAAJECAAAAIAAAAQAAAJ8CAAAAEAAAAQAAALACAAA=");
+
+ public static void main(String[] args) {
+ System.loadLibrary(args[1]);
+ doTest(new Transform());
+ }
+
+ public static void doTest(Transform t) {
+ t.sayHi(() -> { System.out.println("Not doing anything here"); });
+ t.sayHi(() -> {
+ System.out.println("transforming calling function");
+ doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES);
+ });
+ t.sayHi(() -> { System.out.println("Not doing anything here"); });
+ }
+
+ // Transforms the class
+ private static native void doCommonClassRedefinition(Class<?> target,
+ byte[] classfile,
+ byte[] dexfile);
+}
diff --git a/test/914-hello-obsolescence/src/Transform.java b/test/914-hello-obsolescence/src/Transform.java
new file mode 100644
index 0000000000..8cda6cdf53
--- /dev/null
+++ b/test/914-hello-obsolescence/src/Transform.java
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Transform {
+ public void sayHi(Runnable r) {
+ // Use lower 'h' to make sure the string will have a different string id
+ // than the transformation (the transformation code is the same except
+ // the actual printed String, which was making the test inacurately passing
+ // in JIT mode when loading the string from the dex cache, as the string ids
+ // of the two different strings were the same).
+ // We know the string ids will be different because lexicographically:
+ // "Hello" < "LTransform;" < "hello".
+ System.out.println("hello");
+ r.run();
+ System.out.println("goodbye");
+ }
+}
diff --git a/test/915-obsolete-2/build b/test/915-obsolete-2/build
new file mode 100755
index 0000000000..898e2e54a2
--- /dev/null
+++ b/test/915-obsolete-2/build
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-build "$@" --experimental agents
diff --git a/test/915-obsolete-2/expected.txt b/test/915-obsolete-2/expected.txt
new file mode 100644
index 0000000000..04aff3a6dc
--- /dev/null
+++ b/test/915-obsolete-2/expected.txt
@@ -0,0 +1,21 @@
+Pre Start private method call
+hello - private
+Post Start private method call
+Not doing anything here
+Pre Finish private method call
+goodbye - private
+Post Finish private method call
+Pre Start private method call
+hello - private
+Post Start private method call
+transforming calling function
+Pre Finish private method call
+Goodbye - private - Transformed
+Post Finish private method call
+Pre Start private method call - Transformed
+Hello - private - Transformed
+Post Start private method call - Transformed
+Not doing anything here
+Pre Finish private method call - Transformed
+Goodbye - private - Transformed
+Post Finish private method call - Transformed
diff --git a/test/915-obsolete-2/info.txt b/test/915-obsolete-2/info.txt
new file mode 100644
index 0000000000..c8b892cedd
--- /dev/null
+++ b/test/915-obsolete-2/info.txt
@@ -0,0 +1 @@
+Tests basic obsolete method support
diff --git a/test/915-obsolete-2/run b/test/915-obsolete-2/run
new file mode 100755
index 0000000000..4379349cb2
--- /dev/null
+++ b/test/915-obsolete-2/run
@@ -0,0 +1,19 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --experimental agents \
+ --experimental runtime-plugins \
+ --jvmti
diff --git a/test/915-obsolete-2/src/Main.java b/test/915-obsolete-2/src/Main.java
new file mode 100644
index 0000000000..bbeb726858
--- /dev/null
+++ b/test/915-obsolete-2/src/Main.java
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Base64;
+
+public class Main {
+ // class Transform {
+ // private void Start() {
+ // System.out.println("Hello - private - Transformed");
+ // }
+ //
+ // private void Finish() {
+ // System.out.println("Goodbye - private - Transformed");
+ // }
+ //
+ // public void sayHi(Runnable r) {
+ // System.out.println("Pre Start private method call - Transformed");
+ // Start();
+ // System.out.println("Post Start private method call - Transformed");
+ // r.run();
+ // System.out.println("Pre Finish private method call - Transformed");
+ // Finish();
+ // System.out.println("Post Finish private method call - Transformed");
+ // }
+ // }
+ private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+ "yv66vgAAADQAMgoADgAZCQAaABsIABwKAB0AHggAHwgAIAoADQAhCAAiCwAjACQIACUKAA0AJggA" +
+ "JwcAKAcAKQEABjxpbml0PgEAAygpVgEABENvZGUBAA9MaW5lTnVtYmVyVGFibGUBAAVTdGFydAEA" +
+ "BkZpbmlzaAEABXNheUhpAQAXKExqYXZhL2xhbmcvUnVubmFibGU7KVYBAApTb3VyY2VGaWxlAQAO" +
+ "VHJhbnNmb3JtLmphdmEMAA8AEAcAKgwAKwAsAQAdSGVsbG8gLSBwcml2YXRlIC0gVHJhbnNmb3Jt" +
+ "ZWQHAC0MAC4ALwEAH0dvb2RieWUgLSBwcml2YXRlIC0gVHJhbnNmb3JtZWQBACtQcmUgU3RhcnQg" +
+ "cHJpdmF0ZSBtZXRob2QgY2FsbCAtIFRyYW5zZm9ybWVkDAATABABACxQb3N0IFN0YXJ0IHByaXZh" +
+ "dGUgbWV0aG9kIGNhbGwgLSBUcmFuc2Zvcm1lZAcAMAwAMQAQAQAsUHJlIEZpbmlzaCBwcml2YXRl" +
+ "IG1ldGhvZCBjYWxsIC0gVHJhbnNmb3JtZWQMABQAEAEALVBvc3QgRmluaXNoIHByaXZhdGUgbWV0" +
+ "aG9kIGNhbGwgLSBUcmFuc2Zvcm1lZAEACVRyYW5zZm9ybQEAEGphdmEvbGFuZy9PYmplY3QBABBq" +
+ "YXZhL2xhbmcvU3lzdGVtAQADb3V0AQAVTGphdmEvaW8vUHJpbnRTdHJlYW07AQATamF2YS9pby9Q" +
+ "cmludFN0cmVhbQEAB3ByaW50bG4BABUoTGphdmEvbGFuZy9TdHJpbmc7KVYBABJqYXZhL2xhbmcv" +
+ "UnVubmFibGUBAANydW4AIAANAA4AAAAAAAQAAAAPABAAAQARAAAAHQABAAEAAAAFKrcAAbEAAAAB" +
+ "ABIAAAAGAAEAAAABAAIAEwAQAAEAEQAAACUAAgABAAAACbIAAhIDtgAEsQAAAAEAEgAAAAoAAgAA" +
+ "AAMACAAEAAIAFAAQAAEAEQAAACUAAgABAAAACbIAAhIFtgAEsQAAAAEAEgAAAAoAAgAAAAcACAAI" +
+ "AAEAFQAWAAEAEQAAAGMAAgACAAAAL7IAAhIGtgAEKrcAB7IAAhIItgAEK7kACQEAsgACEgq2AAQq" +
+ "twALsgACEgy2AASxAAAAAQASAAAAIgAIAAAACwAIAAwADAANABQADgAaAA8AIgAQACYAEQAuABIA" +
+ "AQAXAAAAAgAY");
+ private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+ "ZGV4CjAzNQCM0QYTJmX+NsZXkImojgSkJtXyuew3oaXcBAAAcAAAAHhWNBIAAAAAAAAAADwEAAAX" +
+ "AAAAcAAAAAcAAADMAAAAAwAAAOgAAAABAAAADAEAAAcAAAAUAQAAAQAAAEwBAABwAwAAbAEAAD4C" +
+ "AABGAgAATgIAAG8CAACOAgAAmwIAALICAADGAgAA3AIAAPACAAAEAwAAMwMAAGEDAACPAwAAvAMA" +
+ "AMMDAADTAwAA1gMAANoDAADuAwAA8wMAAPwDAAABBAAABAAAAAUAAAAGAAAABwAAAAgAAAAJAAAA" +
+ "EAAAABAAAAAGAAAAAAAAABEAAAAGAAAAMAIAABEAAAAGAAAAOAIAAAUAAQATAAAAAAAAAAAAAAAA" +
+ "AAAAAQAAAAAAAAAOAAAAAAABABYAAAABAAIAFAAAAAIAAAAAAAAAAwAAABUAAAAAAAAAAAAAAAIA" +
+ "AAAAAAAADwAAAAAAAAAmBAAAAAAAAAEAAQABAAAACAQAAAQAAABwEAUAAAAOAAMAAQACAAAADQQA" +
+ "AAkAAABiAAAAGwECAAAAbiAEABAADgAAAAMAAQACAAAAEwQAAAkAAABiAAAAGwEDAAAAbiAEABAA" +
+ "DgAAAAQAAgACAAAAGQQAACoAAABiAAAAGwENAAAAbiAEABAAcBACAAIAYgAAABsBCwAAAG4gBAAQ" +
+ "AHIQBgADAGIAAAAbAQwAAABuIAQAEABwEAEAAgBiAAAAGwEKAAAAbiAEABAADgABAAAAAwAAAAEA" +
+ "AAAEAAY8aW5pdD4ABkZpbmlzaAAfR29vZGJ5ZSAtIHByaXZhdGUgLSBUcmFuc2Zvcm1lZAAdSGVs" +
+ "bG8gLSBwcml2YXRlIC0gVHJhbnNmb3JtZWQAC0xUcmFuc2Zvcm07ABVMamF2YS9pby9QcmludFN0" +
+ "cmVhbTsAEkxqYXZhL2xhbmcvT2JqZWN0OwAUTGphdmEvbGFuZy9SdW5uYWJsZTsAEkxqYXZhL2xh" +
+ "bmcvU3RyaW5nOwASTGphdmEvbGFuZy9TeXN0ZW07AC1Qb3N0IEZpbmlzaCBwcml2YXRlIG1ldGhv" +
+ "ZCBjYWxsIC0gVHJhbnNmb3JtZWQALFBvc3QgU3RhcnQgcHJpdmF0ZSBtZXRob2QgY2FsbCAtIFRy" +
+ "YW5zZm9ybWVkACxQcmUgRmluaXNoIHByaXZhdGUgbWV0aG9kIGNhbGwgLSBUcmFuc2Zvcm1lZAAr" +
+ "UHJlIFN0YXJ0IHByaXZhdGUgbWV0aG9kIGNhbGwgLSBUcmFuc2Zvcm1lZAAFU3RhcnQADlRyYW5z" +
+ "Zm9ybS5qYXZhAAFWAAJWTAASZW1pdHRlcjogamFjay00LjEzAANvdXQAB3ByaW50bG4AA3J1bgAF" +
+ "c2F5SGkAAQAHDgAHAAcOhwADAAcOhwALAQAHDoc8hzyHPIcAAAADAQCAgATsAgEChAMBAqgDAwHM" +
+ "Aw0AAAAAAAAAAQAAAAAAAAABAAAAFwAAAHAAAAACAAAABwAAAMwAAAADAAAAAwAAAOgAAAAEAAAA" +
+ "AQAAAAwBAAAFAAAABwAAABQBAAAGAAAAAQAAAEwBAAABIAAABAAAAGwBAAABEAAAAgAAADACAAAC" +
+ "IAAAFwAAAD4CAAADIAAABAAAAAgEAAAAIAAAAQAAACYEAAAAEAAAAQAAADwEAAA=");
+
+ public static void main(String[] args) {
+ System.loadLibrary(args[1]);
+ doTest(new Transform());
+ }
+
+ public static void doTest(Transform t) {
+ t.sayHi(() -> { System.out.println("Not doing anything here"); });
+ t.sayHi(() -> {
+ System.out.println("transforming calling function");
+ doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES);
+ });
+ t.sayHi(() -> { System.out.println("Not doing anything here"); });
+ }
+
+ // Transforms the class
+ private static native void doCommonClassRedefinition(Class<?> target,
+ byte[] classfile,
+ byte[] dexfile);
+}
diff --git a/test/915-obsolete-2/src/Transform.java b/test/915-obsolete-2/src/Transform.java
new file mode 100644
index 0000000000..e914e29479
--- /dev/null
+++ b/test/915-obsolete-2/src/Transform.java
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Transform {
+ private void Start() {
+ System.out.println("hello - private");
+ }
+
+ private void Finish() {
+ System.out.println("goodbye - private");
+ }
+
+ public void sayHi(Runnable r) {
+ System.out.println("Pre Start private method call");
+ Start();
+ System.out.println("Post Start private method call");
+ r.run();
+ System.out.println("Pre Finish private method call");
+ Finish();
+ System.out.println("Post Finish private method call");
+ }
+}
diff --git a/test/916-obsolete-jit/build b/test/916-obsolete-jit/build
new file mode 100755
index 0000000000..898e2e54a2
--- /dev/null
+++ b/test/916-obsolete-jit/build
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-build "$@" --experimental agents
diff --git a/test/916-obsolete-jit/expected.txt b/test/916-obsolete-jit/expected.txt
new file mode 100644
index 0000000000..4caefc6200
--- /dev/null
+++ b/test/916-obsolete-jit/expected.txt
@@ -0,0 +1,21 @@
+Pre Start private method call
+hello - private
+Post Start private method call
+Not doing anything here
+Pre Finish private method call
+goodbye - private
+Post Finish private method call
+Pre Start private method call
+hello - private
+Post Start private method call
+transforming calling function
+Pre Finish private method call
+Goodbye - private - Transformed
+Post Finish private method call
+pre Start private method call - Transformed
+Hello - private - Transformed
+post Start private method call - Transformed
+Not doing anything here
+pre Finish private method call - Transformed
+Goodbye - private - Transformed
+post Finish private method call - Transformed
diff --git a/test/916-obsolete-jit/info.txt b/test/916-obsolete-jit/info.txt
new file mode 100644
index 0000000000..c8b892cedd
--- /dev/null
+++ b/test/916-obsolete-jit/info.txt
@@ -0,0 +1 @@
+Tests basic obsolete method support
diff --git a/test/916-obsolete-jit/run b/test/916-obsolete-jit/run
new file mode 100755
index 0000000000..9056211284
--- /dev/null
+++ b/test/916-obsolete-jit/run
@@ -0,0 +1,27 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# We are testing the redefinition of compiled code but with jvmti we only allow
+# jitted compiled code so always add the --jit argument.
+if [[ "$@" == *"--jit"* ]]; then
+ other_args=""
+else
+ other_args="--jit"
+fi
+./default-run "$@" --experimental agents \
+ --experimental runtime-plugins \
+ ${other_args} \
+ --jvmti
diff --git a/test/916-obsolete-jit/src/Main.java b/test/916-obsolete-jit/src/Main.java
new file mode 100644
index 0000000000..74eb003d5c
--- /dev/null
+++ b/test/916-obsolete-jit/src/Main.java
@@ -0,0 +1,210 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.function.Consumer;
+import java.lang.reflect.Method;
+import java.util.Base64;
+
+public class Main {
+
+ // import java.util.function.Consumer;
+ //
+ // class Transform {
+ // private void Start(Consumer<String> reporter) {
+ // reporter.accept("Hello - private - Transformed");
+ // }
+ //
+ // private void Finish(Consumer<String> reporter) {
+ // reporter.accept("Goodbye - private - Transformed");
+ // }
+ //
+ // public void sayHi(Runnable r, Consumer<String> reporter) {
+ // reporter.accept("pre Start private method call - Transformed");
+ // Start(reporter);
+ // reporter.accept("post Start private method call - Transformed");
+ // r.run();
+ // reporter.accept("pre Finish private method call - Transformed");
+ // Finish(reporter);
+ // reporter.accept("post Finish private method call - Transformed");
+ // }
+ // }
+ private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+ "yv66vgAAADQAMAoADQAcCAAdCwAeAB8IACAIACEKAAwAIggAIwsAJAAlCAAmCgAMACcIACgHACkH" +
+ "ACoBAAY8aW5pdD4BAAMoKVYBAARDb2RlAQAPTGluZU51bWJlclRhYmxlAQAFU3RhcnQBACAoTGph" +
+ "dmEvdXRpbC9mdW5jdGlvbi9Db25zdW1lcjspVgEACVNpZ25hdHVyZQEANChMamF2YS91dGlsL2Z1" +
+ "bmN0aW9uL0NvbnN1bWVyPExqYXZhL2xhbmcvU3RyaW5nOz47KVYBAAZGaW5pc2gBAAVzYXlIaQEA" +
+ "NChMamF2YS9sYW5nL1J1bm5hYmxlO0xqYXZhL3V0aWwvZnVuY3Rpb24vQ29uc3VtZXI7KVYBAEgo" +
+ "TGphdmEvbGFuZy9SdW5uYWJsZTtMamF2YS91dGlsL2Z1bmN0aW9uL0NvbnN1bWVyPExqYXZhL2xh" +
+ "bmcvU3RyaW5nOz47KVYBAApTb3VyY2VGaWxlAQAOVHJhbnNmb3JtLmphdmEMAA4ADwEAHUhlbGxv" +
+ "IC0gcHJpdmF0ZSAtIFRyYW5zZm9ybWVkBwArDAAsAC0BAB9Hb29kYnllIC0gcHJpdmF0ZSAtIFRy" +
+ "YW5zZm9ybWVkAQArcHJlIFN0YXJ0IHByaXZhdGUgbWV0aG9kIGNhbGwgLSBUcmFuc2Zvcm1lZAwA" +
+ "EgATAQAscG9zdCBTdGFydCBwcml2YXRlIG1ldGhvZCBjYWxsIC0gVHJhbnNmb3JtZWQHAC4MAC8A" +
+ "DwEALHByZSBGaW5pc2ggcHJpdmF0ZSBtZXRob2QgY2FsbCAtIFRyYW5zZm9ybWVkDAAWABMBAC1w" +
+ "b3N0IEZpbmlzaCBwcml2YXRlIG1ldGhvZCBjYWxsIC0gVHJhbnNmb3JtZWQBAAlUcmFuc2Zvcm0B" +
+ "ABBqYXZhL2xhbmcvT2JqZWN0AQAbamF2YS91dGlsL2Z1bmN0aW9uL0NvbnN1bWVyAQAGYWNjZXB0" +
+ "AQAVKExqYXZhL2xhbmcvT2JqZWN0OylWAQASamF2YS9sYW5nL1J1bm5hYmxlAQADcnVuACAADAAN" +
+ "AAAAAAAEAAAADgAPAAEAEAAAAB0AAQABAAAABSq3AAGxAAAAAQARAAAABgABAAAAEwACABIAEwAC" +
+ "ABAAAAAlAAIAAgAAAAkrEgK5AAMCALEAAAABABEAAAAKAAIAAAAVAAgAFgAUAAAAAgAVAAIAFgAT" +
+ "AAIAEAAAACUAAgACAAAACSsSBLkAAwIAsQAAAAEAEQAAAAoAAgAAABkACAAaABQAAAACABUAAQAX" +
+ "ABgAAgAQAAAAZQACAAMAAAAxLBIFuQADAgAqLLcABiwSB7kAAwIAK7kACAEALBIJuQADAgAqLLcA" +
+ "CiwSC7kAAwIAsQAAAAEAEQAAACIACAAAAB0ACAAeAA0AHwAVACAAGwAhACMAIgAoACMAMAAkABQA" +
+ "AAACABkAAQAaAAAAAgAb");
+ private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+ "ZGV4CjAzNQBc8wr9PcHqnOR61m+0kimXTSddVMToJPuYBQAAcAAAAHhWNBIAAAAAAAAAAOAEAAAc" +
+ "AAAAcAAAAAYAAADgAAAABAAAAPgAAAAAAAAAAAAAAAcAAAAoAQAAAQAAAGABAAAYBAAAgAEAAHoC" +
+ "AAB9AgAAgAIAAIgCAACOAgAAlgIAALcCAADWAgAA4wIAAAIDAAAWAwAALAMAAEADAABeAwAAfQMA" +
+ "AIQDAACUAwAAlwMAAJsDAACgAwAAqAMAALwDAADrAwAAGQQAAEcEAAB0BAAAeQQAAIAEAAAHAAAA" +
+ "CAAAAAkAAAAKAAAADQAAABAAAAAQAAAABQAAAAAAAAARAAAABQAAAGQCAAASAAAABQAAAGwCAAAR" +
+ "AAAABQAAAHQCAAAAAAAAAgAAAAAAAwAEAAAAAAADAA4AAAAAAAIAGgAAAAIAAAACAAAAAwAAABkA" +
+ "AAAEAAEAEwAAAAAAAAAAAAAAAgAAAAAAAAAPAAAAPAIAAMoEAAAAAAAAAQAAAKgEAAABAAAAuAQA" +
+ "AAEAAQABAAAAhwQAAAQAAABwEAQAAAAOAAMAAgACAAAAjAQAAAcAAAAbAAUAAAByIAYAAgAOAAAA" +
+ "AwACAAIAAACTBAAABwAAABsABgAAAHIgBgACAA4AAAAEAAMAAgAAAJoEAAAiAAAAGwAYAAAAciAG" +
+ "AAMAcCACADEAGwAWAAAAciAGAAMAchAFAAIAGwAXAAAAciAGAAMAcCABADEAGwAVAAAAciAGAAMA" +
+ "DgAAAAAAAAAAAAMAAAAAAAAAAQAAAIABAAACAAAAgAEAAAMAAACIAQAAAQAAAAIAAAACAAAAAwAE" +
+ "AAEAAAAEAAEoAAE8AAY8aW5pdD4ABD47KVYABkZpbmlzaAAfR29vZGJ5ZSAtIHByaXZhdGUgLSBU" +
+ "cmFuc2Zvcm1lZAAdSGVsbG8gLSBwcml2YXRlIC0gVHJhbnNmb3JtZWQAC0xUcmFuc2Zvcm07AB1M" +
+ "ZGFsdmlrL2Fubm90YXRpb24vU2lnbmF0dXJlOwASTGphdmEvbGFuZy9PYmplY3Q7ABRMamF2YS9s" +
+ "YW5nL1J1bm5hYmxlOwASTGphdmEvbGFuZy9TdHJpbmc7ABxMamF2YS91dGlsL2Z1bmN0aW9uL0Nv" +
+ "bnN1bWVyAB1MamF2YS91dGlsL2Z1bmN0aW9uL0NvbnN1bWVyOwAFU3RhcnQADlRyYW5zZm9ybS5q" +
+ "YXZhAAFWAAJWTAADVkxMAAZhY2NlcHQAEmVtaXR0ZXI6IGphY2stNC4xOQAtcG9zdCBGaW5pc2gg" +
+ "cHJpdmF0ZSBtZXRob2QgY2FsbCAtIFRyYW5zZm9ybWVkACxwb3N0IFN0YXJ0IHByaXZhdGUgbWV0" +
+ "aG9kIGNhbGwgLSBUcmFuc2Zvcm1lZAAscHJlIEZpbmlzaCBwcml2YXRlIG1ldGhvZCBjYWxsIC0g" +
+ "VHJhbnNmb3JtZWQAK3ByZSBTdGFydCBwcml2YXRlIG1ldGhvZCBjYWxsIC0gVHJhbnNmb3JtZWQA" +
+ "A3J1bgAFc2F5SGkABXZhbHVlABMABw4AGQEABw5pABUBAAcOaQAdAgAABw5pPGk8aTxpAAIBARsc" +
+ "BRcAFwwXARcLFwMCAQEbHAYXABcKFwwXARcLFwMAAAMBAICABJADAQKoAwECyAMDAegDDwAAAAAA" +
+ "AAABAAAAAAAAAAEAAAAcAAAAcAAAAAIAAAAGAAAA4AAAAAMAAAAEAAAA+AAAAAUAAAAHAAAAKAEA" +
+ "AAYAAAABAAAAYAEAAAMQAAACAAAAgAEAAAEgAAAEAAAAkAEAAAYgAAABAAAAPAIAAAEQAAADAAAA" +
+ "ZAIAAAIgAAAcAAAAegIAAAMgAAAEAAAAhwQAAAQgAAACAAAAqAQAAAAgAAABAAAAygQAAAAQAAAB" +
+ "AAAA4AQAAA==");
+
+ // A class that we can use to keep track of the output of this test.
+ private static class TestWatcher implements Consumer<String> {
+ private StringBuilder sb;
+ public TestWatcher() {
+ sb = new StringBuilder();
+ }
+
+ @Override
+ public void accept(String s) {
+ sb.append(s);
+ sb.append('\n');
+ }
+
+ public String getOutput() {
+ return sb.toString();
+ }
+
+ public void clear() {
+ sb = new StringBuilder();
+ }
+ }
+
+ public static void main(String[] args) {
+ System.loadLibrary(args[1]);
+ doTest(new Transform(), new TestWatcher());
+ }
+
+ // TODO Workaround to (1) inability to ensure that current_method is not put into a register by
+ // the JIT and/or (2) inability to deoptimize frames near runtime functions.
+ // TODO Fix one/both of these issues.
+ public static void doCall(Runnable r) {
+ r.run();
+ }
+
+ private static boolean interpreting = true;
+ private static boolean retry = false;
+
+ public static void doTest(Transform t, TestWatcher w) {
+ // Get the methods that need to be optimized.
+ Method say_hi_method;
+ Method do_call_method;
+ // Figure out if we can even JIT at all.
+ final boolean has_jit = hasJit();
+ try {
+ say_hi_method = Transform.class.getDeclaredMethod(
+ "sayHi", Runnable.class, Consumer.class);
+ do_call_method = Main.class.getDeclaredMethod("doCall", Runnable.class);
+ } catch (Exception e) {
+ System.out.println("Unable to find methods!");
+ e.printStackTrace();
+ return;
+ }
+ // Makes sure the stack is the way we want it for the test and does the redefinition. It will
+ // set the retry boolean to true if we need to go around again due to a bad stack.
+ Runnable do_redefinition = () -> {
+ if (has_jit &&
+ (Main.isInterpretedFunction(say_hi_method, true) ||
+ Main.isInterpretedFunction(do_call_method, false))) {
+ // Try again. We are not running the right jitted methods/cannot redefine them now.
+ retry = true;
+ } else {
+ // Actually do the redefinition. The stack looks good.
+ retry = false;
+ w.accept("transforming calling function");
+ doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES);
+ }
+ };
+ // This does nothing.
+ Runnable noop = () -> {};
+ // This just prints something out to show we are running the Runnable.
+ Runnable say_nothing = () -> { w.accept("Not doing anything here"); };
+ // This checks to see if we have jitted the methods we are testing.
+ Runnable check_interpreting = () -> {
+ // TODO remove the second check when we remove the doCall function. We need to check that
+ // both of these functions aren't being interpreted because if sayHi is the test doesn't do
+ // anything and if doCall is then there will be a runtime call right above the sayHi
+ // function preventing sayHi from being deoptimized.
+ interpreting = has_jit && (Main.isInterpretedFunction(say_hi_method, true) ||
+ Main.isInterpretedFunction(do_call_method, false));
+ };
+ do {
+ w.clear();
+ // Wait for the methods to be jitted
+ long j = 0;
+ do {
+ for (int i = 0; i < 10000; i++) {
+ t.sayHi(noop, w);
+ j++;
+ // Clear so that we won't OOM if we go around a few times.
+ w.clear();
+ }
+ t.sayHi(check_interpreting, w);
+ if (j >= 1000000) {
+ System.out.println("FAIL: Could not make sayHi be Jitted!");
+ return;
+ }
+ j++;
+ } while(interpreting);
+ // Clear output. Now we try for real.
+ w.clear();
+ // Try and redefine.
+ t.sayHi(say_nothing, w);
+ t.sayHi(do_redefinition, w);
+ t.sayHi(say_nothing, w);
+ } while (retry);
+ // Print output of last run.
+ System.out.print(w.getOutput());
+ }
+
+ private static native boolean hasJit();
+
+ private static native boolean isInterpretedFunction(Method m, boolean require_deoptimizable);
+
+ // Transforms the class
+ private static native void doCommonClassRedefinition(Class<?> target,
+ byte[] classfile,
+ byte[] dexfile);
+}
diff --git a/test/916-obsolete-jit/src/Transform.java b/test/916-obsolete-jit/src/Transform.java
new file mode 100644
index 0000000000..f4dcf09dc6
--- /dev/null
+++ b/test/916-obsolete-jit/src/Transform.java
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.function.Consumer;
+
+class Transform {
+ private void Start(Consumer<String> reporter) {
+ reporter.accept("hello - private");
+ }
+
+ private void Finish(Consumer<String> reporter) {
+ reporter.accept("goodbye - private");
+ }
+
+ public void sayHi(Runnable r, Consumer<String> reporter) {
+ reporter.accept("Pre Start private method call");
+ Start(reporter);
+ reporter.accept("Post Start private method call");
+ // TODO Revisit with b/33616143
+ // TODO Uncomment this once either b/33630159 or b/33616143 are resolved.
+ // r.run();
+ // TODO This doCall function is a very temporary fix until we get either deoptimization near
+ // runtime frames working, forcing current method to be always read from the stack or both
+ // working.
+ Main.doCall(r);
+ reporter.accept("Pre Finish private method call");
+ Finish(reporter);
+ reporter.accept("Post Finish private method call");
+ }
+}
diff --git a/test/917-fields-transformation/run b/test/917-fields-transformation/run
index a434b63e42..4379349cb2 100755
--- a/test/917-fields-transformation/run
+++ b/test/917-fields-transformation/run
@@ -14,30 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-plugin=libopenjdkjvmtid.so
-agent=libtiagentd.so
-lib=tiagentd
-if [[ "$@" == *"-O"* ]]; then
- agent=libtiagent.so
- plugin=libopenjdkjvmti.so
- lib=tiagent
-fi
-
-if [[ "$@" == *"--jvm"* ]]; then
- arg="jvm"
-else
- arg="art"
- if [[ "$@" != *"--debuggable"* ]]; then
- other_args=" -Xcompiler-option --debuggable "
- else
- other_args=""
- fi
-fi
-
./default-run "$@" --experimental agents \
--experimental runtime-plugins \
- --runtime-option -agentpath:${agent}=917-fields-transformation,${arg} \
- --android-runtime-option -Xplugin:${plugin} \
- --android-runtime-option -Xfully-deoptable \
- ${other_args} \
- --args ${lib}
+ --jvmti
diff --git a/test/Android.bp b/test/Android.bp
index 2625f56418..5a2c90230e 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -243,6 +243,9 @@ art_cc_defaults {
name: "libtiagent-defaults",
defaults: ["libartagent-defaults"],
srcs: [
+ // This is to get the IsInterpreted native method.
+ "common/stack_inspect.cc",
+ "common/runtime_state.cc",
"ti-agent/common_load.cc",
"ti-agent/common_helper.cc",
"901-hello-ti-agent/basics.cc",
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index fdd5b6009c..ec1f6ba239 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -286,6 +286,9 @@ TEST_ART_BROKEN_TARGET_TESTS += \
911-get-stack-trace \
912-classes \
913-heaps \
+ 914-hello-obsolescence \
+ 915-obsolete-2 \
+ 916-obsolete-jit \
917-fields-transformation \
ifneq (,$(filter target,$(TARGET_TYPES)))
@@ -539,7 +542,6 @@ TEST_ART_BROKEN_INTERPRETER_RUN_TESTS :=
# Test 906 iterates the heap filtering with different options. No instances should be created
# between those runs to be able to have precise checks.
# Test 902 hits races with the JIT compiler. b/32821077
-# Test 626-const-class-linking can deadlock with JIT. b/33567581
# Test 629 requires compilation.
# Test 914, 915, 917, & 918 are very sensitive to the exact state of the stack,
# including the jit-inserted runtime frames. This causes them to be somewhat
@@ -548,7 +550,6 @@ TEST_ART_BROKEN_INTERPRETER_RUN_TESTS :=
# feature for JIT use cases in a way that is resilient to the jit frames.
TEST_ART_BROKEN_JIT_RUN_TESTS := \
137-cfi \
- 626-const-class-linking \
629-vdex-speed \
902-hello-transformation \
904-object-allocation \
diff --git a/test/common/runtime_state.cc b/test/common/runtime_state.cc
index f26e122580..7451cf97de 100644
--- a/test/common/runtime_state.cc
+++ b/test/common/runtime_state.cc
@@ -19,6 +19,7 @@
#include "base/enums.h"
#include "base/logging.h"
#include "dex_file-inl.h"
+#include "instrumentation.h"
#include "jit/jit.h"
#include "jit/jit_code_cache.h"
#include "mirror/class-inl.h"
@@ -30,6 +31,16 @@
namespace art {
+// public static native boolean hasJit();
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_hasJit(JNIEnv*, jclass) {
+ Runtime* runtime = Runtime::Current();
+ return runtime != nullptr
+ && runtime->GetJit() != nullptr
+ && runtime->GetInstrumentation()->GetCurrentInstrumentationLevel() !=
+ instrumentation::Instrumentation::InstrumentationLevel::kInstrumentWithInterpreter;
+}
+
// public static native boolean hasOatFile();
extern "C" JNIEXPORT jboolean JNICALL Java_Main_hasOatFile(JNIEnv* env, jclass cls) {
diff --git a/test/common/stack_inspect.cc b/test/common/stack_inspect.cc
index 4df2d470ec..df7fa20226 100644
--- a/test/common/stack_inspect.cc
+++ b/test/common/stack_inspect.cc
@@ -18,6 +18,7 @@
#include "base/logging.h"
#include "dex_file-inl.h"
+#include "jni_internal.h"
#include "mirror/class-inl.h"
#include "nth_caller_visitor.h"
#include "oat_file.h"
@@ -52,6 +53,89 @@ extern "C" JNIEXPORT jboolean JNICALL Java_Main_isInterpreted(JNIEnv* env, jclas
return IsInterpreted(env, klass, 1);
}
+// public static native boolean isInterpreted(int depth);
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_isInterpretedAt(JNIEnv* env,
+ jclass klass,
+ jint depth) {
+ return IsInterpreted(env, klass, depth);
+}
+
+
+// public static native boolean isInterpretedFunction(String smali);
+
+// TODO Remove 'allow_runtime_frames' option once we have deoptimization through runtime frames.
+struct MethodIsInterpretedVisitor : public StackVisitor {
+ public:
+ MethodIsInterpretedVisitor(Thread* thread, ArtMethod* goal, bool require_deoptable)
+ : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+ goal_(goal),
+ method_is_interpreted_(true),
+ method_found_(false),
+ prev_was_runtime_(true),
+ require_deoptable_(require_deoptable) {}
+
+ virtual bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (goal_ == GetMethod()) {
+ method_is_interpreted_ = (require_deoptable_ && prev_was_runtime_) || IsShadowFrame();
+ method_found_ = true;
+ return false;
+ }
+ prev_was_runtime_ = GetMethod()->IsRuntimeMethod();
+ return true;
+ }
+
+ bool IsInterpreted() {
+ return method_is_interpreted_;
+ }
+
+ bool IsFound() {
+ return method_found_;
+ }
+
+ private:
+ const ArtMethod* goal_;
+ bool method_is_interpreted_;
+ bool method_found_;
+ bool prev_was_runtime_;
+ bool require_deoptable_;
+};
+
+// TODO Remove 'require_deoptimizable' option once we have deoptimization through runtime frames.
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_isInterpretedFunction(
+ JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobject method, jboolean require_deoptimizable) {
+ // Return false if this seems to not be an ART runtime.
+ if (Runtime::Current() == nullptr) {
+ return JNI_FALSE;
+ }
+ if (method == nullptr) {
+ env->ThrowNew(env->FindClass("java/lang/NullPointerException"), "method is null!");
+ return JNI_FALSE;
+ }
+ jmethodID id = env->FromReflectedMethod(method);
+ if (id == nullptr) {
+ env->ThrowNew(env->FindClass("java/lang/Error"), "Unable to interpret method argument!");
+ return JNI_FALSE;
+ }
+ bool result;
+ bool found;
+ {
+ ScopedObjectAccess soa(env);
+ ArtMethod* goal = jni::DecodeArtMethod(id);
+ MethodIsInterpretedVisitor v(soa.Self(), goal, require_deoptimizable);
+ v.WalkStack();
+ bool enters_interpreter = Runtime::Current()->GetClassLinker()->IsQuickToInterpreterBridge(
+ goal->GetEntryPointFromQuickCompiledCode());
+ result = (v.IsInterpreted() || enters_interpreter);
+ found = v.IsFound();
+ }
+ if (!found) {
+ env->ThrowNew(env->FindClass("java/lang/Error"), "Unable to find given method in stack!");
+ return JNI_FALSE;
+ }
+ return result;
+}
+
// public static native void assertIsInterpreted();
extern "C" JNIEXPORT void JNICALL Java_Main_assertIsInterpreted(JNIEnv* env, jclass klass) {
diff --git a/test/etc/default-build b/test/etc/default-build
index 51ae1752ae..e9e388646a 100755
--- a/test/etc/default-build
+++ b/test/etc/default-build
@@ -69,6 +69,7 @@ DEFAULT_EXPERIMENT="no-experiment"
# Setup experimental flag mappings in a bash associative array.
declare -A JACK_EXPERIMENTAL_ARGS
+JACK_EXPERIMENTAL_ARGS["agents"]="-D jack.java.source.version=1.8 -D jack.android.min-api-level=24"
JACK_EXPERIMENTAL_ARGS["default-methods"]="-D jack.java.source.version=1.8 -D jack.android.min-api-level=24"
JACK_EXPERIMENTAL_ARGS["lambdas"]="-D jack.java.source.version=1.8 -D jack.android.min-api-level=24"
JACK_EXPERIMENTAL_ARGS["method-handles"]="-D jack.java.source.version=1.7 -D jack.android.min-api-level=o-b1"
@@ -76,12 +77,14 @@ JACK_EXPERIMENTAL_ARGS["method-handles"]="-D jack.java.source.version=1.7 -D jac
declare -A SMALI_EXPERIMENTAL_ARGS
SMALI_EXPERIMENTAL_ARGS["default-methods"]="--api-level 24"
SMALI_EXPERIMENTAL_ARGS["method-handles"]="--api-level 26"
+SMALI_EXPERIMENTAL_ARGS["agents"]="--api-level 26"
declare -A JAVAC_EXPERIMENTAL_ARGS
JAVAC_EXPERIMENTAL_ARGS["default-methods"]="-source 1.8 -target 1.8"
JAVAC_EXPERIMENTAL_ARGS["lambdas"]="-source 1.8 -target 1.8"
JAVAC_EXPERIMENTAL_ARGS["method-handles"]="-source 1.8 -target 1.8"
JAVAC_EXPERIMENTAL_ARGS[${DEFAULT_EXPERIMENT}]="-source 1.7 -target 1.7"
+JAVAC_EXPERIMENTAL_ARGS["agents"]="-source 1.8 -target 1.8"
while true; do
if [ "x$1" = "x--dx-option" ]; then
@@ -126,6 +129,16 @@ done
# Be sure to get any default arguments if not doing any experiments.
EXPERIMENTAL="${EXPERIMENTAL} ${DEFAULT_EXPERIMENT}"
+if [ "${JACK_SERVER}" = "false" ]; then
+ # Run in single-threaded mode for the continuous buildbot.
+ JACK_ARGS="${JACK_ARGS} -D sched.runner=single-threaded"
+else
+ # Run with 4 threads to reduce memory footprint and thread contention.
+ JACK_ARGS="${JACK_ARGS} -D sched.runner=multi-threaded"
+ JACK_ARGS="${JACK_ARGS} -D sched.runner.thread.kind=fixed"
+ JACK_ARGS="${JACK_ARGS} -D sched.runner.thread.fixed.count=4"
+fi
+
# Add args from the experimental mappings.
for experiment in ${EXPERIMENTAL}; do
JACK_ARGS="${JACK_ARGS} ${JACK_EXPERIMENTAL_ARGS[${experiment}]}"
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 566f7ba522..8245947251 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -30,6 +30,7 @@ HOST="n"
INTERPRETER="n"
JIT="n"
INVOKE_WITH=""
+IS_JVMTI_TEST="n"
ISA=x86
LIBRARY_DIRECTORY="lib"
TEST_DIRECTORY="nativetest"
@@ -59,14 +60,18 @@ ARGS=""
EXTERNAL_LOG_TAGS="n" # if y respect externally set ANDROID_LOG_TAGS.
DRY_RUN="n" # if y prepare to run the test but don't run it.
TEST_VDEX="n"
+TEST_IS_NDEBUG="n"
APP_IMAGE="y"
while true; do
if [ "x$1" = "x--quiet" ]; then
QUIET="y"
shift
+ elif [ "x$1" = "x--jvmti" ]; then
+ IS_JVMTI_TEST="y"
+ shift
elif [ "x$1" = "x-O" ]; then
- # Ignore this option.
+ TEST_IS_NDEBUG="y"
shift
elif [ "x$1" = "x--lib" ]; then
shift
@@ -382,6 +387,28 @@ if [ "$JIT" = "y" ]; then
fi
fi
+if [ "$IS_JVMTI_TEST" = "y" ]; then
+ plugin=libopenjdkjvmtid.so
+ agent=libtiagentd.so
+ lib=tiagentd
+ if [[ "$TEST_IS_NDEBUG" = "y" ]]; then
+ agent=libtiagent.so
+ plugin=libopenjdkjvmti.so
+ lib=tiagent
+ fi
+
+ ARGS="${ARGS} ${lib}"
+ if [[ "$USE_JVM" = "y" ]]; then
+ FLAGS="${FLAGS} -agentpath:${agent}=${TEST_NAME},jvm"
+ else
+ FLAGS="${FLAGS} -agentpath:${agent}=${TEST_NAME},art"
+ FLAGS="${FLAGS} -Xplugin:${plugin}"
+ FLAGS="${FLAGS} -Xfully-deoptable"
+ # Always make the compilation be debuggable.
+ COMPILE_FLAGS="${COMPILE_FLAGS} --debuggable"
+ fi
+fi
+
JNI_OPTS="-Xjnigreflimit:512 -Xcheck:jni"
if [ "$RELOCATE" = "y" ]; then
diff --git a/test/ti-agent/common_helper.cc b/test/ti-agent/common_helper.cc
index 3e2b16802b..ebf1e4621c 100644
--- a/test/ti-agent/common_helper.cc
+++ b/test/ti-agent/common_helper.cc
@@ -18,8 +18,11 @@
#include <stdio.h>
+#include "art_method.h"
#include "jni.h"
#include "openjdkjvmti/jvmti.h"
+#include "scoped_thread_state_change-inl.h"
+#include "stack.h"
#include "ti-agent/common_load.h"
#include "utils.h"
diff --git a/test/ti-agent/common_load.cc b/test/ti-agent/common_load.cc
index 38861482d2..79c17d744f 100644
--- a/test/ti-agent/common_load.cc
+++ b/test/ti-agent/common_load.cc
@@ -66,6 +66,9 @@ AgentLib agents[] = {
{ "911-get-stack-trace", Test911GetStackTrace::OnLoad, nullptr },
{ "912-classes", Test912Classes::OnLoad, nullptr },
{ "913-heaps", Test913Heaps::OnLoad, nullptr },
+ { "914-hello-obsolescence", common_redefine::OnLoad, nullptr },
+ { "915-obsolete-2", common_redefine::OnLoad, nullptr },
+ { "916-obsolete-jit", common_redefine::OnLoad, nullptr },
{ "917-fields-transformation", common_redefine::OnLoad, nullptr },
};