summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--benchmark/jni-perf/src/JniPerfBenchmark.java4
-rw-r--r--benchmark/jobject-benchmark/src/JObjectBenchmark.java4
-rw-r--r--benchmark/scoped-primitive-array/src/ScopedPrimitiveArrayBenchmark.java4
-rw-r--r--compiler/cfi_test.h4
-rw-r--r--compiler/optimizing/code_generator.cc7
-rw-r--r--compiler/optimizing/code_generator.h5
-rw-r--r--compiler/optimizing/code_generator_arm.cc2
-rw-r--r--compiler/optimizing/code_generator_arm64.cc4
-rw-r--r--compiler/optimizing/code_generator_mips.cc2
-rw-r--r--compiler/optimizing/code_generator_mips64.cc2
-rw-r--r--compiler/optimizing/code_generator_x86.cc2
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc2
-rw-r--r--compiler/optimizing/graph_visualizer.cc43
-rw-r--r--compiler/optimizing/gvn.cc9
-rw-r--r--compiler/optimizing/instruction_simplifier.cc26
-rw-r--r--compiler/optimizing/intrinsics.cc6
-rw-r--r--compiler/optimizing/intrinsics.h2
-rw-r--r--compiler/optimizing/intrinsics_list.h2
-rw-r--r--compiler/optimizing/licm.cc13
-rw-r--r--compiler/optimizing/nodes.cc15
-rw-r--r--compiler/optimizing/nodes.h20
-rw-r--r--compiler/optimizing/register_allocator.cc4
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.cc16
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.h17
-rw-r--r--compiler/optimizing/ssa_phi_elimination.cc17
-rw-r--r--compiler/utils/x86/assembler_x86.cc8
-rw-r--r--compiler/utils/x86/assembler_x86.h1
-rw-r--r--compiler/utils/x86/assembler_x86_test.cc6
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.cc10
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.h1
-rw-r--r--compiler/utils/x86_64/assembler_x86_64_test.cc7
-rw-r--r--disassembler/disassembler.h13
-rw-r--r--disassembler/disassembler_arm.cc26
-rw-r--r--disassembler/disassembler_arm64.cc10
-rw-r--r--disassembler/disassembler_arm64.h11
-rw-r--r--oatdump/oatdump.cc1
-rw-r--r--runtime/art_method.cc9
-rw-r--r--runtime/base/histogram.h4
-rw-r--r--runtime/check_jni.cc6
-rw-r--r--runtime/class_linker.cc1
-rw-r--r--runtime/class_linker.h2
-rw-r--r--runtime/class_table.h4
-rw-r--r--runtime/gc/heap.cc29
-rw-r--r--runtime/gc/heap.h14
-rw-r--r--runtime/interpreter/interpreter.cc41
-rw-r--r--runtime/interpreter/interpreter.h5
-rw-r--r--runtime/jit/profile_saver.cc13
-rw-r--r--runtime/native/dalvik_system_DexFile.cc71
-rw-r--r--runtime/native/java_lang_reflect_Constructor.cc39
-rw-r--r--runtime/oat_file_assistant.cc32
-rw-r--r--runtime/oat_file_assistant.h9
-rw-r--r--runtime/runtime.cc11
-rw-r--r--runtime/runtime_options.def4
-rw-r--r--test/044-proxy/expected.txt2
-rw-r--r--test/044-proxy/src/ConstructorProxy.java53
-rw-r--r--test/044-proxy/src/Main.java1
-rw-r--r--test/536-checker-intrinsic-optimization/src/Main.java60
-rw-r--r--test/599-checker-irreducible-loop/expected.txt1
-rw-r--r--test/599-checker-irreducible-loop/info.txt2
-rw-r--r--test/599-checker-irreducible-loop/smali/IrreducibleLoop.smali56
-rw-r--r--test/599-checker-irreducible-loop/src/Main.java30
-rwxr-xr-xtest/run-test11
-rw-r--r--tools/ahat/test-dump/Main.java3
-rw-r--r--tools/libcore_failures.txt6
64 files changed, 705 insertions, 140 deletions
diff --git a/benchmark/jni-perf/src/JniPerfBenchmark.java b/benchmark/jni-perf/src/JniPerfBenchmark.java
index b1b21ce0ba..1e7cc2bf46 100644
--- a/benchmark/jni-perf/src/JniPerfBenchmark.java
+++ b/benchmark/jni-perf/src/JniPerfBenchmark.java
@@ -14,9 +14,7 @@
* limitations under the License.
*/
-import com.google.caliper.SimpleBenchmark;
-
-public class JniPerfBenchmark extends SimpleBenchmark {
+public class JniPerfBenchmark {
private static final String MSG = "ABCDE";
native void perfJniEmptyCall();
diff --git a/benchmark/jobject-benchmark/src/JObjectBenchmark.java b/benchmark/jobject-benchmark/src/JObjectBenchmark.java
index f4c059c58b..90a53b3995 100644
--- a/benchmark/jobject-benchmark/src/JObjectBenchmark.java
+++ b/benchmark/jobject-benchmark/src/JObjectBenchmark.java
@@ -14,9 +14,7 @@
* limitations under the License.
*/
-import com.google.caliper.SimpleBenchmark;
-
-public class JObjectBenchmark extends SimpleBenchmark {
+public class JObjectBenchmark {
public JObjectBenchmark() {
// Make sure to link methods before benchmark starts.
System.loadLibrary("artbenchmark");
diff --git a/benchmark/scoped-primitive-array/src/ScopedPrimitiveArrayBenchmark.java b/benchmark/scoped-primitive-array/src/ScopedPrimitiveArrayBenchmark.java
index be276fe48c..0ad9c36950 100644
--- a/benchmark/scoped-primitive-array/src/ScopedPrimitiveArrayBenchmark.java
+++ b/benchmark/scoped-primitive-array/src/ScopedPrimitiveArrayBenchmark.java
@@ -14,9 +14,7 @@
* limitations under the License.
*/
-import com.google.caliper.SimpleBenchmark;
-
-public class ScopedPrimitiveArrayBenchmark extends SimpleBenchmark {
+public class ScopedPrimitiveArrayBenchmark {
// Measure adds the first and last element of the array by using ScopedPrimitiveArray.
static native long measureByteArray(int reps, byte[] arr);
static native long measureShortArray(int reps, short[] arr);
diff --git a/compiler/cfi_test.h b/compiler/cfi_test.h
index 230cb9aeea..f8b7460935 100644
--- a/compiler/cfi_test.h
+++ b/compiler/cfi_test.h
@@ -55,7 +55,9 @@ class CFITest : public dwarf::DwarfTest {
kCFIFormat, 0, &debug_frame_data_, &debug_frame_patches);
ReformatCfi(Objdump(false, "-W"), &lines);
// Pretty-print assembly.
- auto* opts = new DisassemblerOptions(false, actual_asm.data(), true);
+ const uint8_t* asm_base = actual_asm.data();
+ const uint8_t* asm_end = asm_base + actual_asm.size();
+ auto* opts = new DisassemblerOptions(false, asm_base, asm_end, true);
std::unique_ptr<Disassembler> disasm(Disassembler::Create(isa, opts));
std::stringstream stream;
const uint8_t* base = actual_asm.data() + (isa == kThumb2 ? 1 : 0);
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index e7fa4e472b..51fbaea519 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -50,6 +50,7 @@
#include "mirror/array-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/object_reference.h"
+#include "mirror/string.h"
#include "parallel_move_resolver.h"
#include "ssa_liveness_analysis.h"
#include "utils/assembler.h"
@@ -139,6 +140,12 @@ size_t CodeGenerator::GetCachePointerOffset(uint32_t index) {
return pointer_size * index;
}
+uint32_t CodeGenerator::GetArrayLengthOffset(HArrayLength* array_length) {
+ return array_length->IsStringLength()
+ ? mirror::String::CountOffset().Uint32Value()
+ : mirror::Array::LengthOffset().Uint32Value();
+}
+
bool CodeGenerator::GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const {
DCHECK_EQ((*block_order_)[current_block_index_], current);
return GetNextBlockToEmit() == FirstNonEmptyBlock(next);
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index d69c41055b..6e75e3bb2e 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -340,6 +340,11 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
// Pointer variant for ArtMethod and ArtField arrays.
size_t GetCachePointerOffset(uint32_t index);
+ // Helper that returns the offset of the array's length field.
+ // Note: Besides the normal arrays, we also use the HArrayLength for
+ // accessing the String's `count` field in String intrinsics.
+ static uint32_t GetArrayLengthOffset(HArrayLength* array_length);
+
void EmitParallelMoves(Location from1,
Location to1,
Primitive::Type type1,
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 197e473473..e0106628c6 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -4742,7 +4742,7 @@ void LocationsBuilderARM::VisitArrayLength(HArrayLength* instruction) {
void InstructionCodeGeneratorARM::VisitArrayLength(HArrayLength* instruction) {
LocationSummary* locations = instruction->GetLocations();
- uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
+ uint32_t offset = CodeGenerator::GetArrayLengthOffset(instruction);
Register obj = locations->InAt(0).AsRegister<Register>();
Register out = locations->Out().AsRegister<Register>();
__ LoadFromOffset(kLoadWord, out, obj, offset);
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 9680f2bf45..261c04f062 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -2118,9 +2118,9 @@ void LocationsBuilderARM64::VisitArrayLength(HArrayLength* instruction) {
}
void InstructionCodeGeneratorARM64::VisitArrayLength(HArrayLength* instruction) {
+ uint32_t offset = CodeGenerator::GetArrayLengthOffset(instruction);
BlockPoolsScope block_pools(GetVIXLAssembler());
- __ Ldr(OutputRegister(instruction),
- HeapOperand(InputRegisterAt(instruction, 0), mirror::Array::LengthOffset()));
+ __ Ldr(OutputRegister(instruction), HeapOperand(InputRegisterAt(instruction, 0), offset));
codegen_->MaybeRecordImplicitNullCheck(instruction);
}
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 12d1164d03..fb50680c91 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -1803,7 +1803,7 @@ void LocationsBuilderMIPS::VisitArrayLength(HArrayLength* instruction) {
void InstructionCodeGeneratorMIPS::VisitArrayLength(HArrayLength* instruction) {
LocationSummary* locations = instruction->GetLocations();
- uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
+ uint32_t offset = CodeGenerator::GetArrayLengthOffset(instruction);
Register obj = locations->InAt(0).AsRegister<Register>();
Register out = locations->Out().AsRegister<Register>();
__ LoadFromOffset(kLoadWord, out, obj, offset);
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 56ac38ef84..e67d8d0dc5 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -1426,7 +1426,7 @@ void LocationsBuilderMIPS64::VisitArrayLength(HArrayLength* instruction) {
void InstructionCodeGeneratorMIPS64::VisitArrayLength(HArrayLength* instruction) {
LocationSummary* locations = instruction->GetLocations();
- uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
+ uint32_t offset = CodeGenerator::GetArrayLengthOffset(instruction);
GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
GpuRegister out = locations->Out().AsRegister<GpuRegister>();
__ LoadFromOffset(kLoadWord, out, obj, offset);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 6dc480bbee..50892a9d48 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -5480,7 +5480,7 @@ void LocationsBuilderX86::VisitArrayLength(HArrayLength* instruction) {
void InstructionCodeGeneratorX86::VisitArrayLength(HArrayLength* instruction) {
LocationSummary* locations = instruction->GetLocations();
- uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
+ uint32_t offset = CodeGenerator::GetArrayLengthOffset(instruction);
Register obj = locations->InAt(0).AsRegister<Register>();
Register out = locations->Out().AsRegister<Register>();
__ movl(out, Address(obj, offset));
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 96ec09c2a8..56c5b06945 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -4956,7 +4956,7 @@ void LocationsBuilderX86_64::VisitArrayLength(HArrayLength* instruction) {
void InstructionCodeGeneratorX86_64::VisitArrayLength(HArrayLength* instruction) {
LocationSummary* locations = instruction->GetLocations();
- uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
+ uint32_t offset = CodeGenerator::GetArrayLengthOffset(instruction);
CpuRegister obj = locations->InAt(0).AsRegister<CpuRegister>();
CpuRegister out = locations->Out().AsRegister<CpuRegister>();
__ movl(out, Address(obj, offset));
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 9efc13f61b..6aec463549 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -98,7 +98,9 @@ typedef Disassembler* create_disasm_prototype(InstructionSet instruction_set,
DisassemblerOptions* options);
class HGraphVisualizerDisassembler {
public:
- HGraphVisualizerDisassembler(InstructionSet instruction_set, const uint8_t* base_address)
+ HGraphVisualizerDisassembler(InstructionSet instruction_set,
+ const uint8_t* base_address,
+ const uint8_t* end_address)
: instruction_set_(instruction_set), disassembler_(nullptr) {
libart_disassembler_handle_ =
dlopen(kIsDebugBuild ? "libartd-disassembler.so" : "libart-disassembler.so", RTLD_NOW);
@@ -119,6 +121,7 @@ class HGraphVisualizerDisassembler {
instruction_set,
new DisassemblerOptions(/* absolute_addresses */ false,
base_address,
+ end_address,
/* can_read_literals */ true)));
}
@@ -174,7 +177,9 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
disassembler_(disasm_info_ != nullptr
? new HGraphVisualizerDisassembler(
codegen_.GetInstructionSet(),
- codegen_.GetAssembler().CodeBufferBaseAddress())
+ codegen_.GetAssembler().CodeBufferBaseAddress(),
+ codegen_.GetAssembler().CodeBufferBaseAddress()
+ + codegen_.GetAssembler().CodeSize())
: nullptr),
indent_(0) {}
@@ -389,6 +394,11 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
<< instance_of->MustDoNullCheck() << std::noboolalpha;
}
+ void VisitArrayLength(HArrayLength* array_length) OVERRIDE {
+ StartAttributeStream("is_string_length") << std::boolalpha
+ << array_length->IsStringLength() << std::noboolalpha;
+ }
+
void VisitArraySet(HArraySet* array_set) OVERRIDE {
StartAttributeStream("value_can_be_null") << std::boolalpha
<< array_set->GetValueCanBeNull() << std::noboolalpha;
@@ -544,26 +554,19 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
}
}
- if (IsPass(LICM::kLoopInvariantCodeMotionPassName)
- || IsPass(HDeadCodeElimination::kFinalDeadCodeEliminationPassName)
- || IsPass(HDeadCodeElimination::kInitialDeadCodeEliminationPassName)
- || IsPass(BoundsCheckElimination::kBoundsCheckEliminationPassName)
- || IsPass(RegisterAllocator::kRegisterAllocatorPassName)
- || IsPass(HGraphBuilder::kBuilderPassName)) {
- HLoopInformation* info = instruction->GetBlock()->GetLoopInformation();
- if (info == nullptr) {
- StartAttributeStream("loop") << "none";
+ HLoopInformation* loop_info = instruction->GetBlock()->GetLoopInformation();
+ if (loop_info == nullptr) {
+ StartAttributeStream("loop") << "none";
+ } else {
+ StartAttributeStream("loop") << "B" << loop_info->GetHeader()->GetBlockId();
+ HLoopInformation* outer = loop_info->GetPreHeader()->GetLoopInformation();
+ if (outer != nullptr) {
+ StartAttributeStream("outer_loop") << "B" << outer->GetHeader()->GetBlockId();
} else {
- StartAttributeStream("loop") << "B" << info->GetHeader()->GetBlockId();
- HLoopInformation* outer = info->GetPreHeader()->GetLoopInformation();
- if (outer != nullptr) {
- StartAttributeStream("outer_loop") << "B" << outer->GetHeader()->GetBlockId();
- } else {
- StartAttributeStream("outer_loop") << "none";
- }
- StartAttributeStream("irreducible")
- << std::boolalpha << info->IsIrreducible() << std::noboolalpha;
+ StartAttributeStream("outer_loop") << "none";
}
+ StartAttributeStream("irreducible")
+ << std::boolalpha << loop_info->IsIrreducible() << std::noboolalpha;
}
if ((IsPass(HGraphBuilder::kBuilderPassName)
diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc
index d0d52bf6cc..1e86b75075 100644
--- a/compiler/optimizing/gvn.cc
+++ b/compiler/optimizing/gvn.cc
@@ -454,11 +454,16 @@ void GlobalValueNumberer::VisitBasicBlock(HBasicBlock* block) {
if (!set->IsEmpty()) {
if (block->IsLoopHeader()) {
- if (block->GetLoopInformation()->IsIrreducible()) {
+ if (block->GetLoopInformation()->ContainsIrreducibleLoop()) {
// To satisfy our linear scan algorithm, no instruction should flow in an irreducible
- // loop header.
+ // loop header. We clear the set at entry of irreducible loops and any loop containing
+ // an irreducible loop, as in both cases, GVN can extend the liveness of an instruction
+ // across the irreducible loop.
+ // Note that, if we're not compiling OSR, we could still do GVN and introduce
+ // phis at irreducible loop headers. We decided it was not worth the complexity.
set->Clear();
} else {
+ DCHECK(!block->GetLoopInformation()->IsIrreducible());
DCHECK_EQ(block->GetDominator(), block->GetLoopInformation()->GetPreHeader());
set->Kill(side_effects_.GetLoopEffects(block));
}
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index d7b3856bf4..fd79901ffc 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -101,6 +101,7 @@ class InstructionSimplifierVisitor : public HGraphDelegateVisitor {
void SimplifyCompare(HInvoke* invoke, bool is_signum, Primitive::Type type);
void SimplifyIsNaN(HInvoke* invoke);
void SimplifyFP2Int(HInvoke* invoke);
+ void SimplifyStringIsEmptyOrLength(HInvoke* invoke);
void SimplifyMemBarrier(HInvoke* invoke, MemBarrierKind barrier_kind);
OptimizingCompilerStats* stats_;
@@ -1673,6 +1674,27 @@ void InstructionSimplifierVisitor::SimplifyFP2Int(HInvoke* invoke) {
invoke->ReplaceWithExceptInReplacementAtIndex(select, 0); // false at index 0
}
+void InstructionSimplifierVisitor::SimplifyStringIsEmptyOrLength(HInvoke* invoke) {
+ HInstruction* str = invoke->InputAt(0);
+ uint32_t dex_pc = invoke->GetDexPc();
+ // We treat String as an array to allow DCE and BCE to seamlessly work on strings,
+ // so create the HArrayLength.
+ HArrayLength* length = new (GetGraph()->GetArena()) HArrayLength(str, dex_pc);
+ length->MarkAsStringLength();
+ HInstruction* replacement;
+ if (invoke->GetIntrinsic() == Intrinsics::kStringIsEmpty) {
+ // For String.isEmpty(), create the `HEqual` representing the `length == 0`.
+ invoke->GetBlock()->InsertInstructionBefore(length, invoke);
+ HIntConstant* zero = GetGraph()->GetIntConstant(0);
+ HEqual* equal = new (GetGraph()->GetArena()) HEqual(length, zero, dex_pc);
+ replacement = equal;
+ } else {
+ DCHECK_EQ(invoke->GetIntrinsic(), Intrinsics::kStringLength);
+ replacement = length;
+ }
+ invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, replacement);
+}
+
void InstructionSimplifierVisitor::SimplifyMemBarrier(HInvoke* invoke, MemBarrierKind barrier_kind) {
uint32_t dex_pc = invoke->GetDexPc();
HMemoryBarrier* mem_barrier = new (GetGraph()->GetArena()) HMemoryBarrier(barrier_kind, dex_pc);
@@ -1719,6 +1741,10 @@ void InstructionSimplifierVisitor::VisitInvoke(HInvoke* instruction) {
case Intrinsics::kDoubleDoubleToLongBits:
SimplifyFP2Int(instruction);
break;
+ case Intrinsics::kStringIsEmpty:
+ case Intrinsics::kStringLength:
+ SimplifyStringIsEmptyOrLength(instruction);
+ break;
case Intrinsics::kUnsafeLoadFence:
SimplifyMemBarrier(instruction, MemBarrierKind::kLoadAny);
break;
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 5d4c4e2950..418d59c6cb 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -388,10 +388,8 @@ static Intrinsics GetIntrinsic(InlineMethod method) {
case kIntrinsicGetCharsNoCheck:
return Intrinsics::kStringGetCharsNoCheck;
case kIntrinsicIsEmptyOrLength:
- // The inliner can handle these two cases - and this is the preferred approach
- // since after inlining the call is no longer visible (as opposed to waiting
- // until codegen to handle intrinsic).
- return Intrinsics::kNone;
+ return ((method.d.data & kIntrinsicFlagIsEmpty) == 0) ?
+ Intrinsics::kStringLength : Intrinsics::kStringIsEmpty;
case kIntrinsicIndexOf:
return ((method.d.data & kIntrinsicFlagBase0) == 0) ?
Intrinsics::kStringIndexOfAfter : Intrinsics::kStringIndexOf;
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 39a1313ba0..214250f337 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -239,6 +239,8 @@ UNREACHABLE_INTRINSIC(Arch, IntegerCompare) \
UNREACHABLE_INTRINSIC(Arch, LongCompare) \
UNREACHABLE_INTRINSIC(Arch, IntegerSignum) \
UNREACHABLE_INTRINSIC(Arch, LongSignum) \
+UNREACHABLE_INTRINSIC(Arch, StringIsEmpty) \
+UNREACHABLE_INTRINSIC(Arch, StringLength) \
UNREACHABLE_INTRINSIC(Arch, UnsafeLoadFence) \
UNREACHABLE_INTRINSIC(Arch, UnsafeStoreFence) \
UNREACHABLE_INTRINSIC(Arch, UnsafeFullFence)
diff --git a/compiler/optimizing/intrinsics_list.h b/compiler/optimizing/intrinsics_list.h
index dd9294d486..db60238fb4 100644
--- a/compiler/optimizing/intrinsics_list.h
+++ b/compiler/optimizing/intrinsics_list.h
@@ -107,6 +107,8 @@
V(StringGetCharsNoCheck, kDirect, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow) \
V(StringIndexOf, kDirect, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow) \
V(StringIndexOfAfter, kDirect, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow) \
+ V(StringIsEmpty, kDirect, kNeedsEnvironmentOrCache, kReadSideEffects, kNoThrow) \
+ V(StringLength, kDirect, kNeedsEnvironmentOrCache, kReadSideEffects, kNoThrow) \
V(StringNewStringFromBytes, kStatic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow) \
V(StringNewStringFromChars, kStatic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow) \
V(StringNewStringFromString, kStatic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow) \
diff --git a/compiler/optimizing/licm.cc b/compiler/optimizing/licm.cc
index 5a0b89c90a..7543cd6c54 100644
--- a/compiler/optimizing/licm.cc
+++ b/compiler/optimizing/licm.cc
@@ -101,16 +101,6 @@ void LICM::Run() {
SideEffects loop_effects = side_effects_.GetLoopEffects(block);
HBasicBlock* pre_header = loop_info->GetPreHeader();
- bool contains_irreducible_loop = false;
- if (graph_->HasIrreducibleLoops()) {
- for (HBlocksInLoopIterator it_loop(*loop_info); !it_loop.Done(); it_loop.Advance()) {
- if (it_loop.Current()->GetLoopInformation()->IsIrreducible()) {
- contains_irreducible_loop = true;
- break;
- }
- }
- }
-
for (HBlocksInLoopIterator it_loop(*loop_info); !it_loop.Done(); it_loop.Advance()) {
HBasicBlock* inner = it_loop.Current();
DCHECK(inner->IsInLoop());
@@ -123,11 +113,12 @@ void LICM::Run() {
visited->SetBit(inner->GetBlockId());
}
- if (contains_irreducible_loop) {
+ if (loop_info->ContainsIrreducibleLoop()) {
// We cannot licm in an irreducible loop, or in a natural loop containing an
// irreducible loop.
continue;
}
+ DCHECK(!loop_info->IsIrreducible());
// We can move an instruction that can throw only if it is the first
// throwing instruction in the loop. Note that the first potentially
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 1e6bf07e42..60329ccff2 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -446,8 +446,10 @@ void HGraph::SimplifyCFG() {
}
GraphAnalysisResult HGraph::AnalyzeLoops() const {
- // Order does not matter.
- for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) {
+ // We iterate post order to ensure we visit inner loops before outer loops.
+ // `PopulateRecursive` needs this guarantee to know whether a natural loop
+ // contains an irreducible loop.
+ for (HPostOrderIterator it(*this); !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
if (block->IsLoopHeader()) {
if (block->IsCatchBlock()) {
@@ -580,6 +582,14 @@ void HLoopInformation::PopulateRecursive(HBasicBlock* block) {
blocks_.SetBit(block->GetBlockId());
block->SetInLoop(this);
+ if (block->IsLoopHeader()) {
+ // We're visiting loops in post-order, so inner loops must have been
+ // populated already.
+ DCHECK(block->GetLoopInformation()->IsPopulated());
+ if (block->GetLoopInformation()->IsIrreducible()) {
+ contains_irreducible_loop_ = true;
+ }
+ }
for (HBasicBlock* predecessor : block->GetPredecessors()) {
PopulateRecursive(predecessor);
}
@@ -683,6 +693,7 @@ void HLoopInformation::Populate() {
}
if (is_irreducible_loop) {
irreducible_ = true;
+ contains_irreducible_loop_ = true;
graph->SetHasIrreducibleLoops(true);
}
}
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 829fe71a78..12ea059d3f 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -650,6 +650,7 @@ class HLoopInformation : public ArenaObject<kArenaAllocLoopInfo> {
: header_(header),
suspend_check_(nullptr),
irreducible_(false),
+ contains_irreducible_loop_(false),
back_edges_(graph->GetArena()->Adapter(kArenaAllocLoopInfoBackEdges)),
// Make bit vector growable, as the number of blocks may change.
blocks_(graph->GetArena(), graph->GetBlocks().size(), true, kArenaAllocLoopInfoBackEdges) {
@@ -657,6 +658,7 @@ class HLoopInformation : public ArenaObject<kArenaAllocLoopInfo> {
}
bool IsIrreducible() const { return irreducible_; }
+ bool ContainsIrreducibleLoop() const { return contains_irreducible_loop_; }
void Dump(std::ostream& os);
@@ -727,6 +729,10 @@ class HLoopInformation : public ArenaObject<kArenaAllocLoopInfo> {
bool HasBackEdgeNotDominatedByHeader() const;
+ bool IsPopulated() const {
+ return blocks_.GetHighestBitSet() != -1;
+ }
+
private:
// Internal recursive implementation of `Populate`.
void PopulateRecursive(HBasicBlock* block);
@@ -735,6 +741,7 @@ class HLoopInformation : public ArenaObject<kArenaAllocLoopInfo> {
HBasicBlock* header_;
HSuspendCheck* suspend_check_;
bool irreducible_;
+ bool contains_irreducible_loop_;
ArenaVector<HBasicBlock*> back_edges_;
ArenaBitVector blocks_;
@@ -5228,9 +5235,22 @@ class HArrayLength : public HExpression<1> {
return obj == InputAt(0);
}
+ void MarkAsStringLength() { SetPackedFlag<kFlagIsStringLength>(); }
+ bool IsStringLength() const { return GetPackedFlag<kFlagIsStringLength>(); }
+
DECLARE_INSTRUCTION(ArrayLength);
private:
+ // We treat a String as an array, creating the HArrayLength from String.length()
+ // or String.isEmpty() intrinsic in the instruction simplifier. We can always
+ // determine whether a particular HArrayLength is actually a String.length() by
+ // looking at the type of the input but that requires holding the mutator lock, so
+ // we prefer to use a flag, so that code generators don't need to do the locking.
+ static constexpr size_t kFlagIsStringLength = kNumberOfExpressionPackedBits;
+ static constexpr size_t kNumberOfArrayLengthPackedBits = kFlagIsStringLength + 1;
+ static_assert(kNumberOfArrayLengthPackedBits <= HInstruction::kMaxNumberOfPackedBits,
+ "Too many packed fields.");
+
DISALLOW_COPY_AND_ASSIGN(HArrayLength);
};
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index b1f9cbcdfa..4405b803e0 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -1773,7 +1773,9 @@ void RegisterAllocator::ConnectSplitSiblings(LiveInterval* interval,
// therefore will not have a location for that instruction for `to`.
// Because the instruction is a constant or the ArtMethod, we don't need to
// do anything: it will be materialized in the irreducible loop.
- DCHECK(IsMaterializableEntryBlockInstructionOfGraphWithIrreducibleLoop(defined_by));
+ DCHECK(IsMaterializableEntryBlockInstructionOfGraphWithIrreducibleLoop(defined_by))
+ << defined_by->DebugName() << ":" << defined_by->GetId()
+ << " " << from->GetBlockId() << " -> " << to->GetBlockId();
return;
}
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index 5534aeac29..36e0d993d1 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -309,17 +309,8 @@ void SsaLivenessAnalysis::ComputeLiveRanges() {
}
if (block->IsLoopHeader()) {
- if (kIsDebugBuild && block->GetLoopInformation()->IsIrreducible()) {
- // To satisfy our liveness algorithm, we need to ensure loop headers of
- // irreducible loops do not have any live-in instructions, except constants
- // and the current method, which can be trivially re-materialized.
- for (uint32_t idx : live_in->Indexes()) {
- HInstruction* instruction = GetInstructionFromSsaIndex(idx);
- DCHECK(instruction->GetBlock()->IsEntryBlock()) << instruction->DebugName();
- DCHECK(!instruction->IsParameterValue());
- DCHECK(instruction->IsCurrentMethod() || instruction->IsConstant())
- << instruction->DebugName();
- }
+ if (kIsDebugBuild) {
+ CheckNoLiveInIrreducibleLoop(*block);
}
size_t last_position = block->GetLoopInformation()->GetLifetimeEnd();
// For all live_in instructions at the loop header, we need to create a range
@@ -344,6 +335,9 @@ void SsaLivenessAnalysis::ComputeLiveInAndLiveOutSets() {
// change in this loop), and the live_out set. If the live_out
// set does not change, there is no need to update the live_in set.
if (UpdateLiveOut(block) && UpdateLiveIn(block)) {
+ if (kIsDebugBuild) {
+ CheckNoLiveInIrreducibleLoop(block);
+ }
changed = true;
}
}
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 1141fd1c76..1fcba8bc77 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -1260,6 +1260,23 @@ class SsaLivenessAnalysis : public ValueObject {
return instruction->GetType() == Primitive::kPrimNot;
}
+ void CheckNoLiveInIrreducibleLoop(const HBasicBlock& block) const {
+ if (!block.IsLoopHeader() || !block.GetLoopInformation()->IsIrreducible()) {
+ return;
+ }
+ BitVector* live_in = GetLiveInSet(block);
+ // To satisfy our liveness algorithm, we need to ensure loop headers of
+ // irreducible loops do not have any live-in instructions, except constants
+ // and the current method, which can be trivially re-materialized.
+ for (uint32_t idx : live_in->Indexes()) {
+ HInstruction* instruction = GetInstructionFromSsaIndex(idx);
+ DCHECK(instruction->GetBlock()->IsEntryBlock()) << instruction->DebugName();
+ DCHECK(!instruction->IsParameterValue());
+ DCHECK(instruction->IsCurrentMethod() || instruction->IsConstant())
+ << instruction->DebugName();
+ }
+ }
+
HGraph* const graph_;
CodeGenerator* const codegen_;
ArenaVector<BlockInfo*> block_infos_;
diff --git a/compiler/optimizing/ssa_phi_elimination.cc b/compiler/optimizing/ssa_phi_elimination.cc
index 44bcadf846..c67612e651 100644
--- a/compiler/optimizing/ssa_phi_elimination.cc
+++ b/compiler/optimizing/ssa_phi_elimination.cc
@@ -17,6 +17,7 @@
#include "ssa_phi_elimination.h"
#include "base/arena_containers.h"
+#include "base/arena_bit_vector.h"
#include "base/bit_vector-inl.h"
namespace art {
@@ -127,8 +128,10 @@ void SsaRedundantPhiElimination::Run() {
}
}
- ArenaSet<uint32_t> visited_phis_in_cycle(
- graph_->GetArena()->Adapter(kArenaAllocSsaPhiElimination));
+ ArenaBitVector visited_phis_in_cycle(graph_->GetArena(),
+ graph_->GetCurrentInstructionId(),
+ /* expandable */ false,
+ kArenaAllocSsaPhiElimination);
ArenaVector<HPhi*> cycle_worklist(graph_->GetArena()->Adapter(kArenaAllocSsaPhiElimination));
while (!worklist_.empty()) {
@@ -147,11 +150,11 @@ void SsaRedundantPhiElimination::Run() {
}
HInstruction* candidate = nullptr;
- visited_phis_in_cycle.clear();
+ visited_phis_in_cycle.ClearAllBits();
cycle_worklist.clear();
cycle_worklist.push_back(phi);
- visited_phis_in_cycle.insert(phi->GetId());
+ visited_phis_in_cycle.SetBit(phi->GetId());
bool catch_phi_in_cycle = phi->IsCatchPhi();
bool irreducible_loop_phi_in_cycle = phi->IsIrreducibleLoopHeaderPhi();
@@ -183,9 +186,9 @@ void SsaRedundantPhiElimination::Run() {
if (input == current) {
continue;
} else if (input->IsPhi()) {
- if (!ContainsElement(visited_phis_in_cycle, input->GetId())) {
+ if (!visited_phis_in_cycle.IsBitSet(input->GetId())) {
cycle_worklist.push_back(input->AsPhi());
- visited_phis_in_cycle.insert(input->GetId());
+ visited_phis_in_cycle.SetBit(input->GetId());
catch_phi_in_cycle |= input->AsPhi()->IsCatchPhi();
irreducible_loop_phi_in_cycle |= input->IsIrreducibleLoopHeaderPhi();
} else {
@@ -234,7 +237,7 @@ void SsaRedundantPhiElimination::Run() {
// for elimination. Add phis that use this phi to the worklist.
for (const HUseListNode<HInstruction*>& use : current->GetUses()) {
HInstruction* user = use.GetUser();
- if (user->IsPhi() && !ContainsElement(visited_phis_in_cycle, user->GetId())) {
+ if (user->IsPhi() && !visited_phis_in_cycle.IsBitSet(user->GetId())) {
worklist_.push_back(user->AsPhi());
}
}
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 2203646e77..84cdb7d4d3 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -1030,6 +1030,14 @@ void X86Assembler::xchgl(Register reg, const Address& address) {
}
+void X86Assembler::cmpb(const Address& address, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x80);
+ EmitOperand(7, address);
+ EmitUint8(imm.value() & 0xFF);
+}
+
+
void X86Assembler::cmpw(const Address& address, const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x66);
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 8567ad2a17..bc46e9f7c9 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -479,6 +479,7 @@ class X86Assembler FINAL : public Assembler {
void xchgl(Register dst, Register src);
void xchgl(Register reg, const Address& address);
+ void cmpb(const Address& address, const Immediate& imm);
void cmpw(const Address& address, const Immediate& imm);
void cmpl(Register reg, const Immediate& imm);
diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc
index 1d1df6e447..28043c9380 100644
--- a/compiler/utils/x86/assembler_x86_test.cc
+++ b/compiler/utils/x86/assembler_x86_test.cc
@@ -389,4 +389,10 @@ TEST_F(AssemblerX86Test, NearLabel) {
DriverStr(expected, "near_label");
}
+TEST_F(AssemblerX86Test, Cmpb) {
+ GetAssembler()->cmpb(x86::Address(x86::EDI, 128), x86::Immediate(0));
+ const char* expected = "cmpb $0, 128(%EDI)\n";
+ DriverStr(expected, "cmpb");
+}
+
} // namespace art
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 32eb4a37bf..5e7b587e40 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -1224,6 +1224,16 @@ void X86_64Assembler::xchgl(CpuRegister reg, const Address& address) {
}
+void X86_64Assembler::cmpb(const Address& address, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ CHECK(imm.is_int32());
+ EmitOptionalRex32(address);
+ EmitUint8(0x80);
+ EmitOperand(7, address);
+ EmitUint8(imm.value() & 0xFF);
+}
+
+
void X86_64Assembler::cmpw(const Address& address, const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
CHECK(imm.is_int32());
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 92c7d0ab99..720a402b5f 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -506,6 +506,7 @@ class X86_64Assembler FINAL : public Assembler {
void xchgq(CpuRegister dst, CpuRegister src);
void xchgl(CpuRegister reg, const Address& address);
+ void cmpb(const Address& address, const Immediate& imm);
void cmpw(const Address& address, const Immediate& imm);
void cmpl(CpuRegister reg, const Immediate& imm);
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index afe9207eb1..9dccc9f21f 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -1637,4 +1637,11 @@ TEST_F(AssemblerX86_64Test, Repecmpsq) {
DriverStr(expected, "Repecmpsq");
}
+TEST_F(AssemblerX86_64Test, Cmpb) {
+ GetAssembler()->cmpb(x86_64::Address(x86_64::CpuRegister(x86_64::RDI), 128),
+ x86_64::Immediate(0));
+ const char* expected = "cmpb $0, 128(%RDI)\n";
+ DriverStr(expected, "cmpb");
+}
+
} // namespace art
diff --git a/disassembler/disassembler.h b/disassembler/disassembler.h
index b99e5c2df4..b08031587f 100644
--- a/disassembler/disassembler.h
+++ b/disassembler/disassembler.h
@@ -31,16 +31,23 @@ class DisassemblerOptions {
// Should the disassembler print absolute or relative addresses.
const bool absolute_addresses_;
- // Base addess for calculating relative code offsets when absolute_addresses_ is false.
+ // Base address for calculating relative code offsets when absolute_addresses_ is false.
const uint8_t* const base_address_;
+ // End address (exclusive);
+ const uint8_t* const end_address_;
+
// If set, the disassembler is allowed to look at load targets in literal
// pools.
const bool can_read_literals_;
- DisassemblerOptions(bool absolute_addresses, const uint8_t* base_address,
+ DisassemblerOptions(bool absolute_addresses,
+ const uint8_t* base_address,
+ const uint8_t* end_address,
bool can_read_literals)
- : absolute_addresses_(absolute_addresses), base_address_(base_address),
+ : absolute_addresses_(absolute_addresses),
+ base_address_(base_address),
+ end_address_(end_address),
can_read_literals_(can_read_literals) {}
private:
diff --git a/disassembler/disassembler_arm.cc b/disassembler/disassembler_arm.cc
index bcb043883b..286faf215a 100644
--- a/disassembler/disassembler_arm.cc
+++ b/disassembler/disassembler_arm.cc
@@ -418,7 +418,12 @@ std::ostream& operator<<(std::ostream& os, T2LitType type) {
return os << static_cast<int>(type);
}
-void DumpThumb2Literal(std::ostream& args, const uint8_t* instr_ptr, uint32_t U, uint32_t imm32,
+void DumpThumb2Literal(std::ostream& args,
+ const uint8_t* instr_ptr,
+ const uintptr_t lo_adr,
+ const uintptr_t hi_adr,
+ uint32_t U,
+ uint32_t imm32,
T2LitType type) {
// Literal offsets (imm32) are not required to be aligned so we may need unaligned access.
typedef const int16_t unaligned_int16_t __attribute__ ((aligned (1)));
@@ -428,8 +433,16 @@ void DumpThumb2Literal(std::ostream& args, const uint8_t* instr_ptr, uint32_t U,
typedef const int64_t unaligned_int64_t __attribute__ ((aligned (1)));
typedef const uint64_t unaligned_uint64_t __attribute__ ((aligned (1)));
+ // Get address of literal. Bail if not within expected buffer range to
+ // avoid trying to fetch invalid literals (we can encounter this when
+ // interpreting raw data as instructions).
uintptr_t pc = RoundDown(reinterpret_cast<intptr_t>(instr_ptr) + 4, 4);
uintptr_t lit_adr = U ? pc + imm32 : pc - imm32;
+ if (lit_adr < lo_adr || lit_adr >= hi_adr) {
+ args << " ; (?)";
+ return;
+ }
+
args << " ; ";
switch (type) {
case kT2LitUByte:
@@ -482,6 +495,10 @@ size_t DisassemblerArm::DumpThumb32(std::ostream& os, const uint8_t* instr_ptr)
return DumpThumb16(os, instr_ptr);
}
+ // Set valid address range of backing buffer.
+ const uintptr_t lo_adr = reinterpret_cast<intptr_t>(GetDisassemblerOptions()->base_address_);
+ const uintptr_t hi_adr = reinterpret_cast<intptr_t>(GetDisassemblerOptions()->end_address_);
+
uint32_t op2 = (instr >> 20) & 0x7F;
std::ostringstream opcode;
std::ostringstream args;
@@ -824,7 +841,7 @@ size_t DisassemblerArm::DumpThumb32(std::ostream& os, const uint8_t* instr_ptr)
args << d << ", [" << Rn << ", #" << ((U == 1) ? "" : "-")
<< (imm8 << 2) << "]";
if (Rn.r == 15 && U == 1) {
- DumpThumb2Literal(args, instr_ptr, U, imm8 << 2, kT2LitHexLong);
+ DumpThumb2Literal(args, instr_ptr, lo_adr, hi_adr, U, imm8 << 2, kT2LitHexLong);
}
} else if (Rn.r == 13 && W == 1 && U == L) { // VPUSH/VPOP
opcode << (L == 1 ? "vpop" : "vpush");
@@ -1410,7 +1427,7 @@ size_t DisassemblerArm::DumpThumb32(std::ostream& os, const uint8_t* instr_ptr)
};
DCHECK_LT(op2 >> 1, arraysize(lit_type));
DCHECK_NE(lit_type[op2 >> 1], kT2LitInvalid);
- DumpThumb2Literal(args, instr_ptr, U, imm12, lit_type[op2 >> 1]);
+ DumpThumb2Literal(args, instr_ptr, lo_adr, hi_adr, U, imm12, lit_type[op2 >> 1]);
}
} else if ((instr & 0xFC0) == 0) {
opcode << ldr_str << sign << type << ".w";
@@ -1711,10 +1728,13 @@ size_t DisassemblerArm::DumpThumb16(std::ostream& os, const uint8_t* instr_ptr)
break;
}
} else if (opcode1 == 0x12 || opcode1 == 0x13) { // 01001x
+ const uintptr_t lo_adr = reinterpret_cast<intptr_t>(GetDisassemblerOptions()->base_address_);
+ const uintptr_t hi_adr = reinterpret_cast<intptr_t>(GetDisassemblerOptions()->end_address_);
ThumbRegister Rt(instr, 8);
uint16_t imm8 = instr & 0xFF;
opcode << "ldr";
args << Rt << ", [pc, #" << (imm8 << 2) << "]";
+ DumpThumb2Literal(args, instr_ptr, lo_adr, hi_adr, /*U*/ 1u, imm8 << 2, kT2LitHexWord);
} else if ((opcode1 >= 0x14 && opcode1 <= 0x17) || // 0101xx
(opcode1 >= 0x18 && opcode1 <= 0x1f) || // 011xxx
(opcode1 >= 0x20 && opcode1 <= 0x27)) { // 100xxx
diff --git a/disassembler/disassembler_arm64.cc b/disassembler/disassembler_arm64.cc
index 5f8871470d..6a9afe5740 100644
--- a/disassembler/disassembler_arm64.cc
+++ b/disassembler/disassembler_arm64.cc
@@ -63,9 +63,17 @@ void CustomDisassembler::VisitLoadLiteral(const vixl::Instruction* instr) {
return;
}
+ // Get address of literal. Bail if not within expected buffer range to
+ // avoid trying to fetch invalid literals (we can encounter this when
+ // interpreting raw data as instructions).
void* data_address = instr->LiteralAddress<void*>();
- vixl::Instr op = instr->Mask(vixl::LoadLiteralMask);
+ if (data_address < base_address_ || data_address >= end_address_) {
+ AppendToOutput(" (?)");
+ return;
+ }
+ // Output information on literal.
+ vixl::Instr op = instr->Mask(vixl::LoadLiteralMask);
switch (op) {
case vixl::LDR_w_lit:
case vixl::LDR_x_lit:
diff --git a/disassembler/disassembler_arm64.h b/disassembler/disassembler_arm64.h
index 44fa53f9f6..a4e5ee8a43 100644
--- a/disassembler/disassembler_arm64.h
+++ b/disassembler/disassembler_arm64.h
@@ -30,8 +30,11 @@ namespace arm64 {
class CustomDisassembler FINAL : public vixl::Disassembler {
public:
- explicit CustomDisassembler(DisassemblerOptions* options) :
- vixl::Disassembler(), read_literals_(options->can_read_literals_) {
+ explicit CustomDisassembler(DisassemblerOptions* options)
+ : vixl::Disassembler(),
+ read_literals_(options->can_read_literals_),
+ base_address_(options->base_address_),
+ end_address_(options->end_address_) {
if (!options->absolute_addresses_) {
MapCodeAddress(0, reinterpret_cast<const vixl::Instruction*>(options->base_address_));
}
@@ -55,6 +58,10 @@ class CustomDisassembler FINAL : public vixl::Disassembler {
// true | 0x72681558: 1c000acb ldr s11, pc+344 (addr 0x726816b0)
// false | 0x72681558: 1c000acb ldr s11, pc+344 (addr 0x726816b0) (3.40282e+38)
const bool read_literals_;
+
+ // Valid address range: [base_address_, end_address_)
+ const void* const base_address_;
+ const void* const end_address_;
};
class DisassemblerArm64 FINAL : public Disassembler {
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index d2ab699599..f5458c067d 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -335,6 +335,7 @@ class OatDumper {
disassembler_(Disassembler::Create(instruction_set_,
new DisassemblerOptions(options_.absolute_addresses_,
oat_file.Begin(),
+ oat_file.End(),
true /* can_read_literals_ */))) {
CHECK(options_.class_loader_ != nullptr);
CHECK(options_.class_filter_ != nullptr);
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 06156f5cf8..1790df6be4 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -253,14 +253,17 @@ void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue*
Runtime* runtime = Runtime::Current();
// Call the invoke stub, passing everything as arguments.
// If the runtime is not yet started or it is required by the debugger, then perform the
- // Invocation by the interpreter.
+ // Invocation by the interpreter, explicitly forcing interpretation over JIT to prevent
+ // cycling around the various JIT/Interpreter methods that handle method invocation.
if (UNLIKELY(!runtime->IsStarted() || Dbg::IsForcedInterpreterNeededForCalling(self, this))) {
if (IsStatic()) {
- art::interpreter::EnterInterpreterFromInvoke(self, this, nullptr, args, result);
+ art::interpreter::EnterInterpreterFromInvoke(
+ self, this, nullptr, args, result, /*stay_in_interpreter*/ true);
} else {
mirror::Object* receiver =
reinterpret_cast<StackReference<mirror::Object>*>(&args[0])->AsMirrorPtr();
- art::interpreter::EnterInterpreterFromInvoke(self, this, receiver, args + 1, result);
+ art::interpreter::EnterInterpreterFromInvoke(
+ self, this, receiver, args + 1, result, /*stay_in_interpreter*/ true);
}
} else {
DCHECK_EQ(runtime->GetClassLinker()->GetImagePointerSize(), sizeof(void*));
diff --git a/runtime/base/histogram.h b/runtime/base/histogram.h
index bcb7b3b769..0e3bc8e1b4 100644
--- a/runtime/base/histogram.h
+++ b/runtime/base/histogram.h
@@ -85,6 +85,10 @@ template <class Value> class Histogram {
return max_value_added_;
}
+ Value BucketWidth() const {
+ return bucket_width_;
+ }
+
const std::string& Name() const {
return name_;
}
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index beabce36fb..639f913e80 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -1176,14 +1176,16 @@ class ScopedCheck {
return false;
}
- // Get the *correct* JNIEnv by going through our TLS pointer.
+ // Get the current thread's JNIEnv by going through our TLS pointer.
JNIEnvExt* threadEnv = self->GetJniEnv();
// Verify that the current thread is (a) attached and (b) associated with
// this particular instance of JNIEnv.
if (env != threadEnv) {
+ // Get the thread owning the JNIEnv that's being used.
+ Thread* envThread = reinterpret_cast<JNIEnvExt*>(env)->self;
AbortF("thread %s using JNIEnv* from thread %s",
- ToStr<Thread>(*self).c_str(), ToStr<Thread>(*self).c_str());
+ ToStr<Thread>(*self).c_str(), ToStr<Thread>(*envThread).c_str());
return false;
}
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 35c40cd219..e9b8643223 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -6970,6 +6970,7 @@ bool ClassLinker::LinkInterfaceMethods(
}
// Put some random garbage in old methods to help find stale pointers.
if (methods != old_methods && old_methods != nullptr) {
+ WriterMutexLock mu(self, ClassTableForClassLoader(klass->GetClassLoader())->GetLock());
memset(old_methods, 0xFEu, old_size);
}
} else {
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index ece171c9a6..d1c8172630 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -1020,7 +1020,7 @@ class ClassLinker {
// Returns null if not found.
ClassTable* ClassTableForClassLoader(mirror::ClassLoader* class_loader)
- SHARED_REQUIRES(Locks::mutator_lock_, Locks::classlinker_classes_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Insert a new class table if not found.
ClassTable* InsertClassTableForClassLoader(mirror::ClassLoader* class_loader)
diff --git a/runtime/class_table.h b/runtime/class_table.h
index eb784b5c71..686381d35c 100644
--- a/runtime/class_table.h
+++ b/runtime/class_table.h
@@ -153,6 +153,10 @@ class ClassTable {
REQUIRES(!lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
+ ReaderWriterMutex& GetLock() {
+ return lock_;
+ }
+
private:
// Lock to guard inserting and removing.
mutable ReaderWriterMutex lock_;
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index df5aa0a75c..fa540c0f9b 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -119,6 +119,8 @@ static constexpr uint32_t kAllocSpaceBeginForDeterministicAoT = 0x40000000;
// Dump the rosalloc stats on SIGQUIT.
static constexpr bool kDumpRosAllocStatsOnSigQuit = false;
+static constexpr size_t kNativeAllocationHistogramBuckets = 16;
+
static inline bool CareAboutPauseTimes() {
return Runtime::Current()->InJankPerceptibleProcessState();
}
@@ -186,6 +188,11 @@ Heap::Heap(size_t initial_size,
total_objects_freed_ever_(0),
num_bytes_allocated_(0),
native_bytes_allocated_(0),
+ native_histogram_lock_("Native allocation lock"),
+ native_allocation_histogram_("Native allocation sizes",
+ 1U,
+ kNativeAllocationHistogramBuckets),
+ native_free_histogram_("Native free sizes", 1U, kNativeAllocationHistogramBuckets),
num_bytes_freed_revoke_(0),
verify_missing_card_marks_(false),
verify_system_weaks_(false),
@@ -1185,6 +1192,20 @@ void Heap::DumpGcPerformanceInfo(std::ostream& os) {
rosalloc_space_->DumpStats(os);
}
+ {
+ MutexLock mu(Thread::Current(), native_histogram_lock_);
+ if (native_allocation_histogram_.SampleSize() > 0u) {
+ os << "Histogram of native allocation ";
+ native_allocation_histogram_.DumpBins(os);
+ os << " bucket size " << native_allocation_histogram_.BucketWidth() << "\n";
+ }
+ if (native_free_histogram_.SampleSize() > 0u) {
+ os << "Histogram of native free ";
+ native_free_histogram_.DumpBins(os);
+ os << " bucket size " << native_free_histogram_.BucketWidth() << "\n";
+ }
+ }
+
BaseMutex::DumpAll(os);
}
@@ -3848,6 +3869,10 @@ void Heap::RunFinalization(JNIEnv* env, uint64_t timeout) {
void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
Thread* self = ThreadForEnv(env);
+ {
+ MutexLock mu(self, native_histogram_lock_);
+ native_allocation_histogram_.AddValue(bytes);
+ }
if (native_need_to_run_finalization_) {
RunFinalization(env, kNativeAllocationFinalizeTimeout);
UpdateMaxNativeFootprint();
@@ -3892,6 +3917,10 @@ void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
void Heap::RegisterNativeFree(JNIEnv* env, size_t bytes) {
size_t expected_size;
+ {
+ MutexLock mu(Thread::Current(), native_histogram_lock_);
+ native_free_histogram_.AddValue(bytes);
+ }
do {
expected_size = native_bytes_allocated_.LoadRelaxed();
if (UNLIKELY(bytes > expected_size)) {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index fada1a2212..2a1a4a17ae 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -241,9 +241,9 @@ class Heap {
SHARED_REQUIRES(Locks::mutator_lock_);
void RegisterNativeAllocation(JNIEnv* env, size_t bytes)
- REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
+ REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !native_histogram_lock_);
void RegisterNativeFree(JNIEnv* env, size_t bytes)
- REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
+ REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !native_histogram_lock_);
// Change the allocator, updates entrypoints.
void ChangeAllocator(AllocatorType allocator)
@@ -532,7 +532,7 @@ class Heap {
space::Space* FindSpaceFromObject(const mirror::Object*, bool fail_ok) const
SHARED_REQUIRES(Locks::mutator_lock_);
- void DumpForSigQuit(std::ostream& os) REQUIRES(!*gc_complete_lock_);
+ void DumpForSigQuit(std::ostream& os) REQUIRES(!*gc_complete_lock_, !native_histogram_lock_);
// Do a pending collector transition.
void DoPendingCollectorTransition() REQUIRES(!*gc_complete_lock_);
@@ -654,7 +654,8 @@ class Heap {
std::string SafePrettyTypeOf(mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
// GC performance measuring
- void DumpGcPerformanceInfo(std::ostream& os) REQUIRES(!*gc_complete_lock_);
+ void DumpGcPerformanceInfo(std::ostream& os)
+ REQUIRES(!*gc_complete_lock_, !native_histogram_lock_);
void ResetGcPerformanceInfo() REQUIRES(!*gc_complete_lock_);
// Thread pool.
@@ -1156,6 +1157,11 @@ class Heap {
// Bytes which are allocated and managed by native code but still need to be accounted for.
Atomic<size_t> native_bytes_allocated_;
+ // Native allocation stats.
+ Mutex native_histogram_lock_;
+ Histogram<uint64_t> native_allocation_histogram_;
+ Histogram<uint64_t> native_free_histogram_;
+
// Number of bytes freed by thread local buffer revokes. This will
// cancel out the ahead-of-time bulk counting of bytes allocated in
// rosalloc thread-local buffers. It is temporarily accumulated
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 6c630cc48f..1d0e600e4d 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -264,12 +264,12 @@ JValue ExecuteGotoImpl<false, true>(Thread* self, const DexFile::CodeItem* code_
ShadowFrame& shadow_frame, JValue result_register);
#endif
-static JValue Execute(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame,
- JValue result_register)
- SHARED_REQUIRES(Locks::mutator_lock_);
-
-static inline JValue Execute(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame& shadow_frame, JValue result_register) {
+static inline JValue Execute(
+ Thread* self,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame& shadow_frame,
+ JValue result_register,
+ bool stay_in_interpreter = false) SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(!shadow_frame.GetMethod()->IsAbstract());
DCHECK(!shadow_frame.GetMethod()->IsNative());
if (LIKELY(shadow_frame.GetDexPC() == 0)) { // Entering the method, but not via deoptimization.
@@ -284,19 +284,21 @@ static inline JValue Execute(Thread* self, const DexFile::CodeItem* code_item,
method, 0);
}
- jit::Jit* jit = Runtime::Current()->GetJit();
- if (jit != nullptr) {
- jit->MethodEntered(self, shadow_frame.GetMethod());
- if (jit->CanInvokeCompiledCode(method)) {
- JValue result;
+ if (!stay_in_interpreter) {
+ jit::Jit* jit = Runtime::Current()->GetJit();
+ if (jit != nullptr) {
+ jit->MethodEntered(self, shadow_frame.GetMethod());
+ if (jit->CanInvokeCompiledCode(method)) {
+ JValue result;
- // Pop the shadow frame before calling into compiled code.
- self->PopShadowFrame();
- ArtInterpreterToCompiledCodeBridge(self, nullptr, code_item, &shadow_frame, &result);
- // Push the shadow frame back as the caller will expect it.
- self->PushShadowFrame(&shadow_frame);
+ // Pop the shadow frame before calling into compiled code.
+ self->PopShadowFrame();
+ ArtInterpreterToCompiledCodeBridge(self, nullptr, code_item, &shadow_frame, &result);
+ // Push the shadow frame back as the caller will expect it.
+ self->PushShadowFrame(&shadow_frame);
- return result;
+ return result;
+ }
}
}
}
@@ -387,7 +389,8 @@ static inline JValue Execute(Thread* self, const DexFile::CodeItem* code_item,
}
void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method, Object* receiver,
- uint32_t* args, JValue* result) {
+ uint32_t* args, JValue* result,
+ bool stay_in_interpreter) {
DCHECK_EQ(self, Thread::Current());
bool implicit_check = !Runtime::Current()->ExplicitStackOverflowChecks();
if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEndForInterpreter(implicit_check))) {
@@ -462,7 +465,7 @@ void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method, Object* receive
}
}
if (LIKELY(!method->IsNative())) {
- JValue r = Execute(self, code_item, *shadow_frame, JValue());
+ JValue r = Execute(self, code_item, *shadow_frame, JValue(), stay_in_interpreter);
if (result != nullptr) {
*result = r;
}
diff --git a/runtime/interpreter/interpreter.h b/runtime/interpreter/interpreter.h
index 6353a9b7bf..bf4bcff856 100644
--- a/runtime/interpreter/interpreter.h
+++ b/runtime/interpreter/interpreter.h
@@ -33,8 +33,11 @@ class Thread;
namespace interpreter {
// Called by ArtMethod::Invoke, shadow frames arguments are taken from the args array.
+// The optional stay_in_interpreter parameter (false by default) can be used by clients to
+// explicitly force interpretation in the remaining path that implements method invocation.
extern void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method,
- mirror::Object* receiver, uint32_t* args, JValue* result)
+ mirror::Object* receiver, uint32_t* args, JValue* result,
+ bool stay_in_interpreter = false)
SHARED_REQUIRES(Locks::mutator_lock_);
// 'from_code' denotes whether the deoptimization was explicitly triggered by compiled code.
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index e8462a1188..cf46893311 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -490,17 +490,20 @@ bool ProfileSaver::MaybeRecordDexUseInternal(
// frameworks/base/services/core/java/com/android/server/pm/PackageDexOptimizer.java)
std::replace(dex_location_real_path_str.begin(), dex_location_real_path_str.end(), '/', '@');
std::string flag_path = foreign_dex_profile_path + "/" + dex_location_real_path_str;
- // No need to give any sort of access to flag_path. The system has enough permissions
- // to test for its existence.
- int fd = TEMP_FAILURE_RETRY(open(flag_path.c_str(), O_CREAT | O_EXCL, 0));
+ // We use O_RDONLY as the access mode because we must supply some access
+ // mode, and there is no access mode that means 'create but do not read' the
+ // file. We will not not actually read from the file.
+ int fd = TEMP_FAILURE_RETRY(open(flag_path.c_str(),
+ O_CREAT | O_RDONLY | O_EXCL | O_CLOEXEC | O_NOFOLLOW, 0));
if (fd != -1) {
if (close(fd) != 0) {
PLOG(WARNING) << "Could not close file after flagging foreign dex use " << flag_path;
}
return true;
} else {
- if (errno != EEXIST) {
- // Another app could have already created the file.
+ if (errno != EEXIST && errno != EACCES) {
+ // Another app could have already created the file, and selinux may not
+ // allow the read access to the file implied by the call to open.
PLOG(WARNING) << "Could not create foreign dex use mark " << flag_path;
return false;
}
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 0abe39d872..0126b4d0a4 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -16,6 +16,8 @@
#include "dalvik_system_DexFile.h"
+#include <sstream>
+
#include "base/logging.h"
#include "base/stl_util.h"
#include "base/stringprintf.h"
@@ -27,6 +29,7 @@
#include "mirror/class_loader.h"
#include "mirror/object-inl.h"
#include "mirror/string.h"
+#include "oat_file.h"
#include "oat_file_assistant.h"
#include "oat_file_manager.h"
#include "os.h"
@@ -387,6 +390,61 @@ static jint GetDexOptNeeded(JNIEnv* env,
return oat_file_assistant.GetDexOptNeeded(filter);
}
+static jstring DexFile_getDexFileStatus(JNIEnv* env,
+ jclass,
+ jstring javaFilename,
+ jstring javaInstructionSet) {
+ ScopedUtfChars filename(env, javaFilename);
+ if (env->ExceptionCheck()) {
+ return nullptr;
+ }
+
+ ScopedUtfChars instruction_set(env, javaInstructionSet);
+ if (env->ExceptionCheck()) {
+ return nullptr;
+ }
+
+ const InstructionSet target_instruction_set = GetInstructionSetFromString(
+ instruction_set.c_str());
+ if (target_instruction_set == kNone) {
+ ScopedLocalRef<jclass> iae(env, env->FindClass("java/lang/IllegalArgumentException"));
+ std::string message(StringPrintf("Instruction set %s is invalid.", instruction_set.c_str()));
+ env->ThrowNew(iae.get(), message.c_str());
+ return nullptr;
+ }
+
+ OatFileAssistant oat_file_assistant(filename.c_str(), target_instruction_set,
+ false /* profile_changed */,
+ false /* load_executable */);
+
+ std::ostringstream status;
+ bool oat_file_exists = false;
+ bool odex_file_exists = false;
+ if (oat_file_assistant.OatFileExists()) {
+ oat_file_exists = true;
+ status << *oat_file_assistant.OatFileName() << " [compilation_filter=";
+ status << CompilerFilter::NameOfFilter(oat_file_assistant.OatFileCompilerFilter());
+ status << ", status=" << oat_file_assistant.OatFileStatus();
+ }
+
+ if (oat_file_assistant.OdexFileExists()) {
+ odex_file_exists = true;
+ if (oat_file_exists) {
+ status << "] ";
+ }
+ status << *oat_file_assistant.OdexFileName() << " [compilation_filter=";
+ status << CompilerFilter::NameOfFilter(oat_file_assistant.OdexFileCompilerFilter());
+ status << ", status=" << oat_file_assistant.OdexFileStatus();
+ }
+
+ if (!oat_file_exists && !odex_file_exists) {
+ status << "invalid[";
+ }
+
+ status << "]";
+ return env->NewStringUTF(status.str().c_str());
+}
+
static jint DexFile_getDexOptNeeded(JNIEnv* env,
jclass,
jstring javaFilename,
@@ -481,6 +539,16 @@ static jstring DexFile_getNonProfileGuidedCompilerFilter(JNIEnv* env,
return env->NewStringUTF(new_filter_str.c_str());
}
+static jboolean DexFile_isBackedByOatFile(JNIEnv* env, jclass, jobject cookie) {
+ const OatFile* oat_file = nullptr;
+ std::vector<const DexFile*> dex_files;
+ if (!ConvertJavaArrayToDexFiles(env, cookie, /*out */ dex_files, /* out */ oat_file)) {
+ DCHECK(env->ExceptionCheck());
+ return false;
+ }
+ return oat_file != nullptr;
+}
+
static JNINativeMethod gMethods[] = {
NATIVE_METHOD(DexFile, closeDexFile, "(Ljava/lang/Object;)Z"),
NATIVE_METHOD(DexFile,
@@ -506,6 +574,9 @@ static JNINativeMethod gMethods[] = {
NATIVE_METHOD(DexFile,
getNonProfileGuidedCompilerFilter,
"(Ljava/lang/String;)Ljava/lang/String;"),
+ NATIVE_METHOD(DexFile, isBackedByOatFile, "(Ljava/lang/Object;)Z"),
+ NATIVE_METHOD(DexFile, getDexFileStatus,
+ "(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String;")
};
void register_dalvik_system_DexFile(JNIEnv* env) {
diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc
index ddcaadefa3..54b8afd1f3 100644
--- a/runtime/native/java_lang_reflect_Constructor.cc
+++ b/runtime/native/java_lang_reflect_Constructor.cc
@@ -34,20 +34,38 @@ static jobject Constructor_getAnnotationNative(JNIEnv* env, jobject javaMethod,
ScopedFastNativeObjectAccess soa(env);
StackHandleScope<1> hs(soa.Self());
ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
- Handle<mirror::Class> klass(hs.NewHandle(soa.Decode<mirror::Class*>(annotationType)));
- return soa.AddLocalReference<jobject>(
- method->GetDexFile()->GetAnnotationForMethod(method, klass));
+ if (method->IsProxyMethod()) {
+ return nullptr;
+ } else {
+ Handle<mirror::Class> klass(hs.NewHandle(soa.Decode<mirror::Class*>(annotationType)));
+ return soa.AddLocalReference<jobject>(
+ method->GetDexFile()->GetAnnotationForMethod(method, klass));
+ }
}
static jobjectArray Constructor_getDeclaredAnnotations(JNIEnv* env, jobject javaMethod) {
ScopedFastNativeObjectAccess soa(env);
ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
- return soa.AddLocalReference<jobjectArray>(method->GetDexFile()->GetAnnotationsForMethod(method));
+ if (method->IsProxyMethod()) {
+ mirror::Class* class_class = mirror::Class::GetJavaLangClass();
+ mirror::Class* class_array_class =
+ Runtime::Current()->GetClassLinker()->FindArrayClass(soa.Self(), &class_class);
+ if (class_array_class == nullptr) {
+ return nullptr;
+ }
+ mirror::ObjectArray<mirror::Class>* empty_array =
+ mirror::ObjectArray<mirror::Class>::Alloc(soa.Self(), class_array_class, 0);
+ return soa.AddLocalReference<jobjectArray>(empty_array);
+ } else {
+ return soa.AddLocalReference<jobjectArray>(
+ method->GetDexFile()->GetAnnotationsForMethod(method));
+ }
}
static jobjectArray Constructor_getExceptionTypes(JNIEnv* env, jobject javaMethod) {
ScopedFastNativeObjectAccess soa(env);
- ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
+ ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod)
+ ->GetInterfaceMethodIfProxy(sizeof(void*));
mirror::ObjectArray<mirror::Class>* result_array =
method->GetDexFile()->GetExceptionTypesForMethod(method);
if (result_array == nullptr) {
@@ -69,7 +87,12 @@ static jobjectArray Constructor_getExceptionTypes(JNIEnv* env, jobject javaMetho
static jobjectArray Constructor_getParameterAnnotationsNative(JNIEnv* env, jobject javaMethod) {
ScopedFastNativeObjectAccess soa(env);
ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
- return soa.AddLocalReference<jobjectArray>(method->GetDexFile()->GetParameterAnnotations(method));
+ if (method->IsProxyMethod()) {
+ return nullptr;
+ } else {
+ return soa.AddLocalReference<jobjectArray>(
+ method->GetDexFile()->GetParameterAnnotations(method));
+ }
}
static jboolean Constructor_isAnnotationPresentNative(JNIEnv* env, jobject javaMethod,
@@ -77,6 +100,10 @@ static jboolean Constructor_isAnnotationPresentNative(JNIEnv* env, jobject javaM
ScopedFastNativeObjectAccess soa(env);
StackHandleScope<1> hs(soa.Self());
ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
+ if (method->IsProxyMethod()) {
+ // Proxies have no annotations.
+ return false;
+ }
Handle<mirror::Class> klass(hs.NewHandle(soa.Decode<mirror::Class*>(annotationType)));
return method->GetDexFile()->IsMethodAnnotationPresent(method, klass);
}
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index a508e87c87..713e2f3fa9 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -30,6 +30,7 @@
#include "base/logging.h"
#include "base/stringprintf.h"
+#include "compiler_filter.h"
#include "class_linker.h"
#include "gc/heap.h"
#include "gc/space/image_space.h"
@@ -43,6 +44,24 @@
namespace art {
+std::ostream& operator << (std::ostream& stream, const OatFileAssistant::OatStatus status) {
+ switch (status) {
+ case OatFileAssistant::kOatOutOfDate:
+ stream << "kOatOutOfDate";
+ break;
+ case OatFileAssistant::kOatUpToDate:
+ stream << "kOatUpToDate";
+ break;
+ case OatFileAssistant::kOatNeedsRelocation:
+ stream << "kOatNeedsRelocation";
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ return stream;
+}
+
OatFileAssistant::OatFileAssistant(const char* dex_location,
const InstructionSet isa,
bool profile_changed,
@@ -377,6 +396,12 @@ bool OatFileAssistant::OdexFileIsUpToDate() {
return cached_odex_file_is_up_to_date_;
}
+CompilerFilter::Filter OatFileAssistant::OdexFileCompilerFilter() {
+ const OatFile* odex_file = GetOdexFile();
+ CHECK(odex_file != nullptr);
+
+ return odex_file->GetCompilerFilter();
+}
std::string OatFileAssistant::ArtFileName(const OatFile* oat_file) const {
const std::string oat_file_location = oat_file->GetLocation();
// Replace extension with .art
@@ -455,6 +480,13 @@ bool OatFileAssistant::OatFileIsUpToDate() {
return cached_oat_file_is_up_to_date_;
}
+CompilerFilter::Filter OatFileAssistant::OatFileCompilerFilter() {
+ const OatFile* oat_file = GetOatFile();
+ CHECK(oat_file != nullptr);
+
+ return oat_file->GetCompilerFilter();
+}
+
OatFileAssistant::OatStatus OatFileAssistant::GivenOatFileStatus(const OatFile& file) {
// TODO: This could cause GivenOatFileIsOutOfDate to be called twice, which
// is more work than we need to do. If performance becomes a concern, and
diff --git a/runtime/oat_file_assistant.h b/runtime/oat_file_assistant.h
index 85f4a47868..f48cdf343c 100644
--- a/runtime/oat_file_assistant.h
+++ b/runtime/oat_file_assistant.h
@@ -19,6 +19,7 @@
#include <cstdint>
#include <memory>
+#include <sstream>
#include <string>
#include "arch/instruction_set.h"
@@ -211,6 +212,9 @@ class OatFileAssistant {
bool OdexFileIsOutOfDate();
bool OdexFileNeedsRelocation();
bool OdexFileIsUpToDate();
+ // Must only be called if the associated odex file exists, i.e, if
+ // |OdexFileExists() == true|.
+ CompilerFilter::Filter OdexFileCompilerFilter();
// When the dex files is compiled on the target device, the oat file is the
// result. The oat file will have been relocated to some
@@ -227,6 +231,9 @@ class OatFileAssistant {
bool OatFileIsOutOfDate();
bool OatFileNeedsRelocation();
bool OatFileIsUpToDate();
+ // Must only be called if the associated oat file exists, i.e, if
+ // |OatFileExists() == true|.
+ CompilerFilter::Filter OatFileCompilerFilter();
// Return image file name. Does not cache since it relies on the oat file.
std::string ArtFileName(const OatFile* oat_file) const;
@@ -436,6 +443,8 @@ class OatFileAssistant {
DISALLOW_COPY_AND_ASSIGN(OatFileAssistant);
};
+std::ostream& operator << (std::ostream& stream, const OatFileAssistant::OatStatus status);
+
} // namespace art
#endif // ART_RUNTIME_OAT_FILE_ASSISTANT_H_
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 45ba7d0fc2..ca8f8bb510 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -577,9 +577,14 @@ bool Runtime::Start() {
if (!IsImageDex2OatEnabled() || !GetHeap()->HasBootImageSpace()) {
ScopedObjectAccess soa(self);
- StackHandleScope<1> hs(soa.Self());
- auto klass(hs.NewHandle<mirror::Class>(mirror::Class::GetJavaLangClass()));
- class_linker_->EnsureInitialized(soa.Self(), klass, true, true);
+ StackHandleScope<2> hs(soa.Self());
+
+ auto class_class(hs.NewHandle<mirror::Class>(mirror::Class::GetJavaLangClass()));
+ auto field_class(hs.NewHandle<mirror::Class>(mirror::Field::StaticClass()));
+
+ class_linker_->EnsureInitialized(soa.Self(), class_class, true, true);
+ // Field class is needed for register_java_net_InetAddress in libcore, b/28153851.
+ class_linker_->EnsureInitialized(soa.Self(), field_class, true, true);
}
// InitNativeMethods needs to be after started_ so that the classes
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index 6db0cb2cb6..635ff51697 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -129,8 +129,10 @@ RUNTIME_OPTIONS_KEY (CompilerCallbacks*, CompilerCallbacksPtr) // TDOO: make u
RUNTIME_OPTIONS_KEY (bool (*)(), HookIsSensitiveThread)
RUNTIME_OPTIONS_KEY (int32_t (*)(FILE* stream, const char* format, va_list ap), \
HookVfprintf, vfprintf)
+// Use _exit instead of exit so that we won't get DCHECK failures in global data
+// destructors. b/28106055.
RUNTIME_OPTIONS_KEY (void (*)(int32_t status), \
- HookExit, exit)
+ HookExit, _exit)
// We don't call abort(3) by default; see
// Runtime::Abort.
RUNTIME_OPTIONS_KEY (void (*)(), HookAbort, nullptr)
diff --git a/test/044-proxy/expected.txt b/test/044-proxy/expected.txt
index be7023e49d..2a5f0b90db 100644
--- a/test/044-proxy/expected.txt
+++ b/test/044-proxy/expected.txt
@@ -95,3 +95,5 @@ Proxy narrowed invocation return type passed
5.8
JNI_OnLoad called
callback
+Found constructor.
+Found constructors with 0 exceptions
diff --git a/test/044-proxy/src/ConstructorProxy.java b/test/044-proxy/src/ConstructorProxy.java
new file mode 100644
index 0000000000..95d150cbbd
--- /dev/null
+++ b/test/044-proxy/src/ConstructorProxy.java
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationHandler;
+import java.lang.reflect.Method;
+import java.lang.reflect.Proxy;
+
+/**
+ * Tests proxies when used with constructor methods.
+ */
+class ConstructorProxy implements InvocationHandler {
+ public static void main() {
+ try {
+ new ConstructorProxy().runTest();
+ } catch (Exception e) {
+ System.out.println("Unexpected failure occured");
+ e.printStackTrace();
+ }
+ }
+
+ public void runTest() throws Exception {
+ Class<?> proxyClass = Proxy.getProxyClass(
+ getClass().getClassLoader(),
+ new Class<?>[] { Runnable.class }
+ );
+ Constructor<?> constructor = proxyClass.getConstructor(InvocationHandler.class);
+ System.out.println("Found constructor.");
+ // We used to crash when asking the exception types of the constructor, because the runtime was
+ // not using the non-proxy ArtMethod
+ Object[] exceptions = constructor.getExceptionTypes();
+ System.out.println("Found constructors with " + exceptions.length + " exceptions");
+ }
+
+ @Override
+ public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
+ return args[0];
+ }
+}
+
diff --git a/test/044-proxy/src/Main.java b/test/044-proxy/src/Main.java
index 1f23b95cf0..9dadb7c6ea 100644
--- a/test/044-proxy/src/Main.java
+++ b/test/044-proxy/src/Main.java
@@ -31,6 +31,7 @@ public class Main {
NarrowingTest.main(null);
FloatSelect.main(null);
NativeProxy.main(args);
+ ConstructorProxy.main();
}
// The following code maps from the actual proxy class names (eg $Proxy2) to their test output
diff --git a/test/536-checker-intrinsic-optimization/src/Main.java b/test/536-checker-intrinsic-optimization/src/Main.java
index be666e94fa..15a9504acf 100644
--- a/test/536-checker-intrinsic-optimization/src/Main.java
+++ b/test/536-checker-intrinsic-optimization/src/Main.java
@@ -16,9 +16,69 @@
public class Main {
+ public static boolean doThrow = false;
+
+ public static void assertIntEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void assertBooleanEquals(boolean expected, boolean result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
public static void main(String[] args) {
stringEqualsSame();
stringArgumentNotNull("Foo");
+
+ assertIntEquals(0, $opt$noinline$getStringLength(""));
+ assertIntEquals(3, $opt$noinline$getStringLength("abc"));
+ assertIntEquals(10, $opt$noinline$getStringLength("0123456789"));
+
+ assertBooleanEquals(true, $opt$noinline$isStringEmpty(""));
+ assertBooleanEquals(false, $opt$noinline$isStringEmpty("abc"));
+ assertBooleanEquals(false, $opt$noinline$isStringEmpty("0123456789"));
+ }
+
+ /// CHECK-START: int Main.$opt$noinline$getStringLength(java.lang.String) instruction_simplifier (before)
+ /// CHECK-DAG: <<Length:i\d+>> InvokeVirtual intrinsic:StringLength
+ /// CHECK-DAG: Return [<<Length>>]
+
+ /// CHECK-START: int Main.$opt$noinline$getStringLength(java.lang.String) instruction_simplifier (after)
+ /// CHECK-DAG: <<String:l\d+>> ParameterValue
+ /// CHECK-DAG: <<NullCk:l\d+>> NullCheck [<<String>>]
+ /// CHECK-DAG: <<Length:i\d+>> ArrayLength [<<NullCk>>] is_string_length:true
+ /// CHECK-DAG: Return [<<Length>>]
+
+ /// CHECK-START: int Main.$opt$noinline$getStringLength(java.lang.String) instruction_simplifier (after)
+ /// CHECK-NOT: InvokeVirtual intrinsic:StringLength
+
+ static public int $opt$noinline$getStringLength(String s) {
+ if (doThrow) { throw new Error(); }
+ return s.length();
+ }
+
+ /// CHECK-START: boolean Main.$opt$noinline$isStringEmpty(java.lang.String) instruction_simplifier (before)
+ /// CHECK-DAG: <<IsEmpty:z\d+>> InvokeVirtual intrinsic:StringIsEmpty
+ /// CHECK-DAG: Return [<<IsEmpty>>]
+
+ /// CHECK-START: boolean Main.$opt$noinline$isStringEmpty(java.lang.String) instruction_simplifier (after)
+ /// CHECK-DAG: <<String:l\d+>> ParameterValue
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+ /// CHECK-DAG: <<NullCk:l\d+>> NullCheck [<<String>>]
+ /// CHECK-DAG: <<Length:i\d+>> ArrayLength [<<NullCk>>] is_string_length:true
+ /// CHECK-DAG: <<IsEmpty:z\d+>> Equal [<<Length>>,<<Const0>>]
+ /// CHECK-DAG: Return [<<IsEmpty>>]
+
+ /// CHECK-START: boolean Main.$opt$noinline$isStringEmpty(java.lang.String) instruction_simplifier (after)
+ /// CHECK-NOT: InvokeVirtual intrinsic:StringIsEmpty
+
+ static public boolean $opt$noinline$isStringEmpty(String s) {
+ if (doThrow) { throw new Error(); }
+ return s.isEmpty();
}
/// CHECK-START: boolean Main.stringEqualsSame() instruction_simplifier (before)
diff --git a/test/599-checker-irreducible-loop/expected.txt b/test/599-checker-irreducible-loop/expected.txt
new file mode 100644
index 0000000000..573541ac97
--- /dev/null
+++ b/test/599-checker-irreducible-loop/expected.txt
@@ -0,0 +1 @@
+0
diff --git a/test/599-checker-irreducible-loop/info.txt b/test/599-checker-irreducible-loop/info.txt
new file mode 100644
index 0000000000..1e0dd02284
--- /dev/null
+++ b/test/599-checker-irreducible-loop/info.txt
@@ -0,0 +1,2 @@
+Regression test for optimizing in the presence of
+an irreducible loop.
diff --git a/test/599-checker-irreducible-loop/smali/IrreducibleLoop.smali b/test/599-checker-irreducible-loop/smali/IrreducibleLoop.smali
new file mode 100644
index 0000000000..5331fd6a31
--- /dev/null
+++ b/test/599-checker-irreducible-loop/smali/IrreducibleLoop.smali
@@ -0,0 +1,56 @@
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LIrreducibleLoop;
+
+.super Ljava/lang/Object;
+
+## CHECK-START: int IrreducibleLoop.test(int) GVN (before)
+## CHECK-DAG: LoadClass loop:none
+## CHECK-DAG: LoadClass loop:{{B\d+}} outer_loop:none
+
+## CHECK-START: int IrreducibleLoop.test(int) GVN (after)
+## CHECK-DAG: LoadClass loop:none
+## CHECK-DAG: LoadClass loop:{{B\d+}} outer_loop:none
+.method public static test(I)I
+ .registers 2
+
+ sget v0, LIrreducibleLoop;->field1:I
+ sput v0, LIrreducibleLoop;->field2:I
+
+ if-eqz p0, :loop_entry
+ goto :exit
+
+ :loop_entry
+ if-eqz p0, :irreducible_loop_entry
+ sget v0, LIrreducibleLoop;->field2:I
+ sput v0, LIrreducibleLoop;->field1:I
+ if-eqz v0, :exit
+ goto :irreducible_other_loop_entry
+
+ :irreducible_loop_entry
+ if-eqz p0, :loop_back_edge
+ :irreducible_other_loop_entry
+ if-eqz v0, :loop_back_edge
+ goto :irreducible_loop_entry
+
+ :loop_back_edge
+ goto :loop_entry
+
+ :exit
+ return v0
+.end method
+
+.field public static field1:I
+.field public static field2:I
diff --git a/test/599-checker-irreducible-loop/src/Main.java b/test/599-checker-irreducible-loop/src/Main.java
new file mode 100644
index 0000000000..b47721f721
--- /dev/null
+++ b/test/599-checker-irreducible-loop/src/Main.java
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+
+public class Main {
+ // Workaround for b/18051191.
+ class InnerClass {}
+
+ public static void main(String[] args) throws Exception {
+ Class<?> c = Class.forName("IrreducibleLoop");
+ Method m = c.getMethod("test", int.class);
+ Object[] arguments = { 42 };
+ // Invoke the code just for sanity checking.
+ System.out.println(m.invoke(null, arguments));
+ }
+}
diff --git a/test/run-test b/test/run-test
index 047e3fb606..2710ea32b1 100755
--- a/test/run-test
+++ b/test/run-test
@@ -122,10 +122,12 @@ never_clean="no"
have_dex2oat="yes"
have_patchoat="yes"
have_image="yes"
-image_suffix=""
pic_image_suffix=""
multi_image_suffix=""
android_root="/system"
+# By default we will use optimizing.
+image_args=""
+image_suffix="-optimizing"
while true; do
if [ "x$1" = "x--host" ]; then
@@ -249,18 +251,18 @@ while true; do
image_suffix="-interpreter"
shift
elif [ "x$1" = "x--jit" ]; then
- run_args="${run_args} --jit"
+ image_args="--jit"
image_suffix="-jit"
shift
elif [ "x$1" = "x--optimizing" ]; then
- run_args="${run_args} -Xcompiler-option --compiler-backend=Optimizing"
+ image_args="-Xcompiler-option --compiler-backend=Optimizing"
image_suffix="-optimizing"
shift
elif [ "x$1" = "x--no-verify" ]; then
run_args="${run_args} --no-verify"
shift
elif [ "x$1" = "x--verify-soft-fail" ]; then
- run_args="${run_args} --verify-soft-fail"
+ image_args="--verify-soft-fail"
image_suffix="-interp-ac"
shift
elif [ "x$1" = "x--no-optimize" ]; then
@@ -349,6 +351,7 @@ while true; do
fi
done
+run_args="${run_args} ${image_args}"
# Allocate file descriptor real_stderr and redirect it to the shell's error
# output (fd 2).
if [ ${BASH_VERSINFO[1]} -ge 4 ] && [ ${BASH_VERSINFO[2]} -ge 1 ]; then
diff --git a/tools/ahat/test-dump/Main.java b/tools/ahat/test-dump/Main.java
index d61a98da54..3936f296d3 100644
--- a/tools/ahat/test-dump/Main.java
+++ b/tools/ahat/test-dump/Main.java
@@ -50,7 +50,8 @@ public class Main {
bigArray[i] = (byte)((i*i) & 0xFF);
}
- NativeAllocationRegistry registry = new NativeAllocationRegistry(0x12345, 42);
+ NativeAllocationRegistry registry = new NativeAllocationRegistry(
+ Main.class.getClassLoader(), 0x12345, 42);
registry.registerNativeAllocation(anObject, 0xABCDABCD);
}
}
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index dd2cc3140f..f25fb98c4d 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -253,11 +253,5 @@
names: ["jsr166.CollectionTest#testEmptyMeansEmpty",
"jsr166.Collection8Test#testForEach",
"jsr166.Collection8Test#testForEachConcurrentStressTest"]
-},
-{
- description: "Unclear why this started to fail",
- result: EXEC_FAILED,
- bug: 28574453,
- names: [ "org.apache.harmony.tests.javax.security.cert.X509CertificateTest#testVerifyPublicKey" ]
}
]