Merge "Made run-test 960-964 support being run with jack compiler."
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index fdfd94c..ff41736 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -29,6 +29,7 @@
GetMethodSignature \
Instrumentation \
Interfaces \
+ Lookup \
Main \
MultiDex \
MultiDexModifiedSecondary \
@@ -78,6 +79,7 @@
ART_GTEST_reflection_test_DEX_DEPS := Main NonStaticLeafMethods StaticLeafMethods
ART_GTEST_stub_test_DEX_DEPS := AllFields
ART_GTEST_transaction_test_DEX_DEPS := Transaction
+ART_GTEST_type_lookup_table_test_DEX_DEPS := Lookup
# The elf writer test has dependencies on core.oat.
ART_GTEST_elf_writer_test_HOST_DEPS := $(HOST_CORE_IMAGE_default_no-pic_64) $(HOST_CORE_IMAGE_default_no-pic_32)
@@ -220,6 +222,7 @@
runtime/reference_table_test.cc \
runtime/thread_pool_test.cc \
runtime/transaction_test.cc \
+ runtime/type_lookup_table_test.cc \
runtime/utf_test.cc \
runtime/utils_test.cc \
runtime/verifier/method_verifier_test.cc \
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 151437b..c37ceca 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -77,11 +77,10 @@
header_code_and_maps_chunks_.push_back(std::vector<uint8_t>());
std::vector<uint8_t>* chunk = &header_code_and_maps_chunks_.back();
- size_t size = sizeof(method_header) + code_size + vmap_table.size() + mapping_table_size +
- gc_map_size;
- size_t code_offset = compiled_method->AlignCode(size - code_size);
- size_t padding = code_offset - (size - code_size);
- chunk->reserve(padding + size);
+ const size_t max_padding = GetInstructionSetAlignment(compiled_method->GetInstructionSet());
+ const size_t size =
+ gc_map_size + mapping_table_size + vmap_table.size() + sizeof(method_header) + code_size;
+ chunk->reserve(size + max_padding);
chunk->resize(sizeof(method_header));
memcpy(&(*chunk)[0], &method_header, sizeof(method_header));
chunk->insert(chunk->begin(), vmap_table.begin(), vmap_table.end());
@@ -91,10 +90,16 @@
if (gc_map_used) {
chunk->insert(chunk->begin(), gc_map.begin(), gc_map.end());
}
- chunk->insert(chunk->begin(), padding, 0);
chunk->insert(chunk->end(), code.begin(), code.end());
- CHECK_EQ(padding + size, chunk->size());
- const void* code_ptr = &(*chunk)[code_offset];
+ CHECK_EQ(chunk->size(), size);
+ const void* unaligned_code_ptr = chunk->data() + (size - code_size);
+ size_t offset = dchecked_integral_cast<size_t>(reinterpret_cast<uintptr_t>(unaligned_code_ptr));
+ size_t padding = compiled_method->AlignCode(offset) - offset;
+ // Make sure no resizing takes place.
+ CHECK_GE(chunk->capacity(), chunk->size() + padding);
+ chunk->insert(chunk->begin(), padding, 0);
+ const void* code_ptr = reinterpret_cast<const uint8_t*>(unaligned_code_ptr) + padding;
+ CHECK_EQ(code_ptr, static_cast<const void*>(chunk->data() + (chunk->size() - code_size)));
MakeExecutable(code_ptr, code.size());
const void* method_code = CompiledMethod::CodePointer(code_ptr,
compiled_method->GetInstructionSet());
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index e1a2838..eaf2408 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -756,14 +756,7 @@
return *class_index;
}
- const DexFile::StringId* string_id = dex_file->FindStringId(kClassCacheNames[index]);
- if (string_id == nullptr) {
- *class_index = kIndexNotFound;
- return *class_index;
- }
- uint32_t string_index = dex_file->GetIndexForStringId(*string_id);
-
- const DexFile::TypeId* type_id = dex_file->FindTypeId(string_index);
+ const DexFile::TypeId* type_id = dex_file->FindTypeId(kClassCacheNames[index]);
if (type_id == nullptr) {
*class_index = kIndexNotFound;
return *class_index;
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index 1a7dbe3..14ba81d 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -187,15 +187,11 @@
// Search dex file for localized ssb index, may fail if member's class is a parent
// of the class mentioned in the dex file and there is no dex cache entry.
std::string temp;
- const DexFile::StringId* string_id =
- dex_file->FindStringId(resolved_member->GetDeclaringClass()->GetDescriptor(&temp));
- if (string_id != nullptr) {
- const DexFile::TypeId* type_id =
- dex_file->FindTypeId(dex_file->GetIndexForStringId(*string_id));
- if (type_id != nullptr) {
- // medium path, needs check of static storage base being initialized
- storage_idx = dex_file->GetIndexForTypeId(*type_id);
- }
+ const DexFile::TypeId* type_id =
+ dex_file->FindTypeId(resolved_member->GetDeclaringClass()->GetDescriptor(&temp));
+ if (type_id != nullptr) {
+ // medium path, needs check of static storage base being initialized
+ storage_idx = dex_file->GetIndexForTypeId(*type_id);
}
}
if (storage_idx != DexFile::kDexNoIndex) {
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index dcb23bf..c7b8884 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -33,6 +33,7 @@
#include "driver/compiler_options.h"
#include "gc/space/image_space.h"
#include "gc/space/space.h"
+#include "handle_scope-inl.h"
#include "image_writer.h"
#include "linker/relative_patcher.h"
#include "mirror/array.h"
@@ -44,7 +45,7 @@
#include "output_stream.h"
#include "safe_map.h"
#include "scoped_thread_state_change.h"
-#include "handle_scope-inl.h"
+#include "type_lookup_table.h"
#include "utils/dex_cache_arrays_layout-inl.h"
#include "verifier/method_verifier.h"
@@ -107,6 +108,9 @@
size_oat_class_status_(0),
size_oat_class_method_bitmaps_(0),
size_oat_class_method_offsets_(0),
+ size_oat_lookup_table_alignment_(0),
+ size_oat_lookup_table_offset_(0),
+ size_oat_lookup_table_(0),
method_offset_map_() {
CHECK(key_value_store != nullptr);
@@ -129,6 +133,10 @@
offset = InitDexFiles(offset);
}
{
+ TimingLogger::ScopedTiming split("InitLookupTables", timings);
+ offset = InitLookupTables(offset);
+ }
+ {
TimingLogger::ScopedTiming split("InitOatClasses", timings);
offset = InitOatClasses(offset);
}
@@ -322,7 +330,8 @@
return true;
}
- bool VisitMethod(size_t class_def_method_index ATTRIBUTE_UNUSED, const ClassDataItemIterator& it) {
+ bool VisitMethod(size_t class_def_method_index ATTRIBUTE_UNUSED,
+ const ClassDataItemIterator& it) {
// Fill in the compiled_methods_ array for methods that have a
// CompiledMethod. We track the number of non-null entries in
// num_non_null_compiled_methods_ since we only want to allocate
@@ -1043,11 +1052,29 @@
oat_dex_files_[i]->dex_file_offset_ = offset;
const DexFile* dex_file = (*dex_files_)[i];
+
+ // Initialize type lookup table
+ oat_dex_files_[i]->lookup_table_ = dex_file->GetTypeLookupTable();
+
offset += dex_file->GetHeader().file_size_;
}
return offset;
}
+size_t OatWriter::InitLookupTables(size_t offset) {
+ for (OatDexFile* oat_dex_file : oat_dex_files_) {
+ if (oat_dex_file->lookup_table_ != nullptr) {
+ uint32_t aligned_offset = RoundUp(offset, 4);
+ oat_dex_file->lookup_table_offset_ = aligned_offset;
+ size_oat_lookup_table_alignment_ += aligned_offset - offset;
+ offset = aligned_offset + oat_dex_file->lookup_table_->RawDataLength();
+ } else {
+ oat_dex_file->lookup_table_offset_ = 0;
+ }
+ }
+ return offset;
+}
+
size_t OatWriter::InitOatClasses(size_t offset) {
// calculate the offsets within OatDexFiles to OatClasses
InitOatClassesMethodVisitor visitor(this, offset);
@@ -1256,6 +1283,9 @@
DO_STAT(size_oat_class_status_);
DO_STAT(size_oat_class_method_bitmaps_);
DO_STAT(size_oat_class_method_offsets_);
+ DO_STAT(size_oat_lookup_table_alignment_);
+ DO_STAT(size_oat_lookup_table_offset_);
+ DO_STAT(size_oat_lookup_table_);
#undef DO_STAT
VLOG(compiler) << "size_total=" << PrettySize(size_total) << " (" << size_total << "B)"; \
@@ -1309,6 +1339,9 @@
}
size_dex_file_ += dex_file->GetHeader().file_size_;
}
+ if (!WriteLookupTables(out, file_offset)) {
+ return false;
+ }
for (size_t i = 0; i != oat_classes_.size(); ++i) {
if (!oat_classes_[i]->Write(this, out, file_offset)) {
PLOG(ERROR) << "Failed to write oat methods information to " << out->GetLocation();
@@ -1318,6 +1351,35 @@
return true;
}
+bool OatWriter::WriteLookupTables(OutputStream* out, const size_t file_offset) {
+ for (size_t i = 0; i < oat_dex_files_.size(); ++i) {
+ const uint32_t lookup_table_offset = oat_dex_files_[i]->lookup_table_offset_;
+ const TypeLookupTable* table = oat_dex_files_[i]->lookup_table_;
+ DCHECK_EQ(lookup_table_offset == 0, table == nullptr);
+ if (lookup_table_offset == 0) {
+ continue;
+ }
+ const uint32_t expected_offset = file_offset + lookup_table_offset;
+ off_t actual_offset = out->Seek(expected_offset, kSeekSet);
+ if (static_cast<uint32_t>(actual_offset) != expected_offset) {
+ const DexFile* dex_file = (*dex_files_)[i];
+ PLOG(ERROR) << "Failed to seek to lookup table section. Actual: " << actual_offset
+ << " Expected: " << expected_offset << " File: " << dex_file->GetLocation();
+ return false;
+ }
+ if (table != nullptr) {
+ if (!out->WriteFully(table->RawData(), table->RawDataLength())) {
+ const DexFile* dex_file = (*dex_files_)[i];
+ PLOG(ERROR) << "Failed to write lookup table for " << dex_file->GetLocation()
+ << " to " << out->GetLocation();
+ return false;
+ }
+ size_oat_lookup_table_ += table->RawDataLength();
+ }
+ }
+ return true;
+}
+
size_t OatWriter::WriteMaps(OutputStream* out, const size_t file_offset, size_t relative_offset) {
#define VISIT(VisitorType) \
do { \
@@ -1425,6 +1487,7 @@
dex_file_location_data_ = reinterpret_cast<const uint8_t*>(location.data());
dex_file_location_checksum_ = dex_file.GetLocationChecksum();
dex_file_offset_ = 0;
+ lookup_table_offset_ = 0;
methods_offsets_.resize(dex_file.NumClassDefs());
}
@@ -1433,6 +1496,7 @@
+ dex_file_location_size_
+ sizeof(dex_file_location_checksum_)
+ sizeof(dex_file_offset_)
+ + sizeof(lookup_table_offset_)
+ (sizeof(methods_offsets_[0]) * methods_offsets_.size());
}
@@ -1441,6 +1505,10 @@
oat_header->UpdateChecksum(dex_file_location_data_, dex_file_location_size_);
oat_header->UpdateChecksum(&dex_file_location_checksum_, sizeof(dex_file_location_checksum_));
oat_header->UpdateChecksum(&dex_file_offset_, sizeof(dex_file_offset_));
+ oat_header->UpdateChecksum(&lookup_table_offset_, sizeof(lookup_table_offset_));
+ if (lookup_table_ != nullptr) {
+ oat_header->UpdateChecksum(lookup_table_->RawData(), lookup_table_->RawDataLength());
+ }
oat_header->UpdateChecksum(&methods_offsets_[0],
sizeof(methods_offsets_[0]) * methods_offsets_.size());
}
@@ -1469,6 +1537,11 @@
return false;
}
oat_writer->size_oat_dex_file_offset_ += sizeof(dex_file_offset_);
+ if (!out->WriteFully(&lookup_table_offset_, sizeof(lookup_table_offset_))) {
+ PLOG(ERROR) << "Failed to write lookup table offset to " << out->GetLocation();
+ return false;
+ }
+ oat_writer->size_oat_lookup_table_offset_ += sizeof(lookup_table_offset_);
if (!out->WriteFully(&methods_offsets_[0],
sizeof(methods_offsets_[0]) * methods_offsets_.size())) {
PLOG(ERROR) << "Failed to write methods offsets to " << out->GetLocation();
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index d6cb65b..f2fe048 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -24,8 +24,8 @@
#include "linker/relative_patcher.h" // For linker::RelativePatcherTargetProvider.
#include "mem_map.h"
#include "method_reference.h"
-#include "oat.h"
#include "mirror/class.h"
+#include "oat.h"
#include "safe_map.h"
namespace art {
@@ -36,6 +36,7 @@
class ImageWriter;
class OutputStream;
class TimingLogger;
+class TypeLookupTable;
// OatHeader variable length with count of D OatDexFiles
//
@@ -49,6 +50,11 @@
// ...
// Dex[D]
//
+// TypeLookupTable[0] one descriptor to class def index hash table for each OatDexFile.
+// TypeLookupTable[1]
+// ...
+// TypeLookupTable[D]
+//
// OatClass[0] one variable sized OatClass for each of C DexFile::ClassDefs
// OatClass[1] contains OatClass entries with class status, offsets to code, etc.
// ...
@@ -168,6 +174,7 @@
size_t InitOatHeader();
size_t InitOatDexFiles(size_t offset);
+ size_t InitLookupTables(size_t offset);
size_t InitDexFiles(size_t offset);
size_t InitOatClasses(size_t offset);
size_t InitOatMaps(size_t offset);
@@ -177,6 +184,7 @@
SHARED_REQUIRES(Locks::mutator_lock_);
bool WriteTables(OutputStream* out, const size_t file_offset);
+ bool WriteLookupTables(OutputStream* out, const size_t file_offset);
size_t WriteMaps(OutputStream* out, const size_t file_offset, size_t relative_offset);
size_t WriteCode(OutputStream* out, const size_t file_offset, size_t relative_offset);
size_t WriteCodeDexFiles(OutputStream* out, const size_t file_offset, size_t relative_offset);
@@ -199,6 +207,8 @@
const uint8_t* dex_file_location_data_;
uint32_t dex_file_location_checksum_;
uint32_t dex_file_offset_;
+ uint32_t lookup_table_offset_;
+ TypeLookupTable* lookup_table_; // Owned by the dex file.
std::vector<uint32_t> methods_offsets_;
private:
@@ -333,6 +343,9 @@
uint32_t size_oat_class_status_;
uint32_t size_oat_class_method_bitmaps_;
uint32_t size_oat_class_method_offsets_;
+ uint32_t size_oat_lookup_table_alignment_;
+ uint32_t size_oat_lookup_table_offset_;
+ uint32_t size_oat_lookup_table_;
std::unique_ptr<linker::RelativePatcher> relative_patcher_;
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 6aed444..e6b9273 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -3118,15 +3118,25 @@
}
void LocationsBuilderMIPS::VisitLoadClass(HLoadClass* cls) {
- LocationSummary::CallKind call_kind = cls->CanCallRuntime() ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetOut(Location::RequiresRegister());
+ InvokeRuntimeCallingConvention calling_convention;
+ CodeGenerator::CreateLoadClassLocationSummary(
+ cls,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ Location::RegisterLocation(V0));
}
void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) {
LocationSummary* locations = cls->GetLocations();
+ if (cls->NeedsAccessCheck()) {
+ codegen_->MoveConstant(locations->GetTemp(0), cls->GetTypeIndex());
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInitializeTypeAndVerifyAccess),
+ cls,
+ cls->GetDexPc(),
+ nullptr,
+ IsDirectEntrypoint(kQuickInitializeTypeAndVerifyAccess));
+ return;
+ }
+
Register out = locations->Out().AsRegister<Register>();
Register current_method = locations->InAt(0).AsRegister<Register>();
if (cls->IsReferrersClass()) {
diff --git a/compiler/optimizing/constant_folding.cc b/compiler/optimizing/constant_folding.cc
index e0aa4ff..57452cc 100644
--- a/compiler/optimizing/constant_folding.cc
+++ b/compiler/optimizing/constant_folding.cc
@@ -27,6 +27,11 @@
private:
void VisitShift(HBinaryOperation* shift);
+ void VisitAbove(HAbove* instruction) OVERRIDE;
+ void VisitAboveOrEqual(HAboveOrEqual* instruction) OVERRIDE;
+ void VisitBelow(HBelow* instruction) OVERRIDE;
+ void VisitBelowOrEqual(HBelowOrEqual* instruction) OVERRIDE;
+
void VisitAnd(HAnd* instruction) OVERRIDE;
void VisitCompare(HCompare* instruction) OVERRIDE;
void VisitMul(HMul* instruction) OVERRIDE;
@@ -105,6 +110,54 @@
}
}
+void InstructionWithAbsorbingInputSimplifier::VisitAbove(HAbove* instruction) {
+ if (instruction->GetLeft()->IsConstant() &&
+ instruction->GetLeft()->AsConstant()->IsZero()) {
+ // Replace code looking like
+ // ABOVE dst, 0, src // unsigned 0 > src is always false
+ // with
+ // CONSTANT false
+ instruction->ReplaceWith(GetGraph()->GetConstant(Primitive::kPrimBoolean, 0));
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ }
+}
+
+void InstructionWithAbsorbingInputSimplifier::VisitAboveOrEqual(HAboveOrEqual* instruction) {
+ if (instruction->GetRight()->IsConstant() &&
+ instruction->GetRight()->AsConstant()->IsZero()) {
+ // Replace code looking like
+ // ABOVE_OR_EQUAL dst, src, 0 // unsigned src >= 0 is always true
+ // with
+ // CONSTANT true
+ instruction->ReplaceWith(GetGraph()->GetConstant(Primitive::kPrimBoolean, 1));
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ }
+}
+
+void InstructionWithAbsorbingInputSimplifier::VisitBelow(HBelow* instruction) {
+ if (instruction->GetRight()->IsConstant() &&
+ instruction->GetRight()->AsConstant()->IsZero()) {
+ // Replace code looking like
+ // BELOW dst, src, 0 // unsigned src < 0 is always false
+ // with
+ // CONSTANT false
+ instruction->ReplaceWith(GetGraph()->GetConstant(Primitive::kPrimBoolean, 0));
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ }
+}
+
+void InstructionWithAbsorbingInputSimplifier::VisitBelowOrEqual(HBelowOrEqual* instruction) {
+ if (instruction->GetLeft()->IsConstant() &&
+ instruction->GetLeft()->AsConstant()->IsZero()) {
+ // Replace code looking like
+ // BELOW_OR_EQUAL dst, 0, src // unsigned 0 <= src is always true
+ // with
+ // CONSTANT true
+ instruction->ReplaceWith(GetGraph()->GetConstant(Primitive::kPrimBoolean, 1));
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ }
+}
+
void InstructionWithAbsorbingInputSimplifier::VisitAnd(HAnd* instruction) {
HConstant* input_cst = instruction->GetConstantRight();
if ((input_cst != nullptr) && input_cst->IsZero()) {
diff --git a/compiler/optimizing/constant_folding_test.cc b/compiler/optimizing/constant_folding_test.cc
index 2feb75c..e469c8d 100644
--- a/compiler/optimizing/constant_folding_test.cc
+++ b/compiler/optimizing/constant_folding_test.cc
@@ -29,50 +29,70 @@
namespace art {
-static void TestCode(const uint16_t* data,
- const std::string& expected_before,
- const std::string& expected_after_cf,
- const std::string& expected_after_dce,
- std::function<void(HGraph*)> check_after_cf,
- Primitive::Type return_type = Primitive::kPrimInt) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateCFG(&allocator, data, return_type);
- ASSERT_NE(graph, nullptr);
+/**
+ * Fixture class for the constant folding and dce tests.
+ */
+class ConstantFoldingTest : public testing::Test {
+ public:
+ ConstantFoldingTest() : pool_(), allocator_(&pool_) {
+ graph_ = CreateGraph(&allocator_);
+ }
- graph->TryBuildingSsa();
+ void TestCode(const uint16_t* data,
+ const std::string& expected_before,
+ const std::string& expected_after_cf,
+ const std::string& expected_after_dce,
+ std::function<void(HGraph*)> check_after_cf,
+ Primitive::Type return_type = Primitive::kPrimInt) {
+ graph_ = CreateCFG(&allocator_, data, return_type);
+ TestCodeOnReadyGraph(expected_before,
+ expected_after_cf,
+ expected_after_dce,
+ check_after_cf);
+ }
- StringPrettyPrinter printer_before(graph);
- printer_before.VisitInsertionOrder();
- std::string actual_before = printer_before.str();
- ASSERT_EQ(expected_before, actual_before);
+ void TestCodeOnReadyGraph(const std::string& expected_before,
+ const std::string& expected_after_cf,
+ const std::string& expected_after_dce,
+ std::function<void(HGraph*)> check_after_cf) {
+ ASSERT_NE(graph_, nullptr);
+ graph_->TryBuildingSsa();
- std::unique_ptr<const X86InstructionSetFeatures> features_x86(
- X86InstructionSetFeatures::FromCppDefines());
- x86::CodeGeneratorX86 codegenX86(graph, *features_x86.get(), CompilerOptions());
- HConstantFolding(graph).Run();
- SSAChecker ssa_checker_cf(graph);
- ssa_checker_cf.Run();
- ASSERT_TRUE(ssa_checker_cf.IsValid());
+ StringPrettyPrinter printer_before(graph_);
+ printer_before.VisitInsertionOrder();
+ std::string actual_before = printer_before.str();
+ EXPECT_EQ(expected_before, actual_before);
- StringPrettyPrinter printer_after_cf(graph);
- printer_after_cf.VisitInsertionOrder();
- std::string actual_after_cf = printer_after_cf.str();
- ASSERT_EQ(expected_after_cf, actual_after_cf);
+ std::unique_ptr<const X86InstructionSetFeatures> features_x86(
+ X86InstructionSetFeatures::FromCppDefines());
+ x86::CodeGeneratorX86 codegenX86(graph_, *features_x86.get(), CompilerOptions());
+ HConstantFolding(graph_).Run();
+ SSAChecker ssa_checker_cf(graph_);
+ ssa_checker_cf.Run();
+ ASSERT_TRUE(ssa_checker_cf.IsValid());
- check_after_cf(graph);
+ StringPrettyPrinter printer_after_cf(graph_);
+ printer_after_cf.VisitInsertionOrder();
+ std::string actual_after_cf = printer_after_cf.str();
+ EXPECT_EQ(expected_after_cf, actual_after_cf);
- HDeadCodeElimination(graph).Run();
- SSAChecker ssa_checker_dce(graph);
- ssa_checker_dce.Run();
- ASSERT_TRUE(ssa_checker_dce.IsValid());
+ check_after_cf(graph_);
- StringPrettyPrinter printer_after_dce(graph);
- printer_after_dce.VisitInsertionOrder();
- std::string actual_after_dce = printer_after_dce.str();
- ASSERT_EQ(expected_after_dce, actual_after_dce);
-}
+ HDeadCodeElimination(graph_).Run();
+ SSAChecker ssa_checker_dce(graph_);
+ ssa_checker_dce.Run();
+ ASSERT_TRUE(ssa_checker_dce.IsValid());
+ StringPrettyPrinter printer_after_dce(graph_);
+ printer_after_dce.VisitInsertionOrder();
+ std::string actual_after_dce = printer_after_dce.str();
+ EXPECT_EQ(expected_after_dce, actual_after_dce);
+ }
+
+ ArenaPool pool_;
+ ArenaAllocator allocator_;
+ HGraph* graph_;
+};
/**
* Tiny three-register program exercising int constant folding on negation.
@@ -84,7 +104,7 @@
* v1 <- -v0 1. neg-int v1, v0
* return v1 2. return v1
*/
-TEST(ConstantFolding, IntConstantFoldingNegation) {
+TEST_F(ConstantFoldingTest, IntConstantFoldingNegation) {
const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 << 8 | 1 << 12,
Instruction::NEG_INT | 1 << 8 | 0 << 12,
@@ -141,7 +161,7 @@
* (v2, v3) <- -(v0, v1) 1. neg-long v2, v0
* return (v2, v3) 2. return-wide v2
*/
-TEST(ConstantFolding, LongConstantFoldingNegation) {
+TEST_F(ConstantFoldingTest, LongConstantFoldingNegation) {
const int64_t input = INT64_C(4294967296); // 2^32
const uint16_t word0 = Low16Bits(Low32Bits(input)); // LSW.
const uint16_t word1 = High16Bits(Low32Bits(input));
@@ -205,7 +225,7 @@
* v2 <- v0 + v1 2. add-int v2, v0, v1
* return v2 4. return v2
*/
-TEST(ConstantFolding, IntConstantFoldingOnAddition1) {
+TEST_F(ConstantFoldingTest, IntConstantFoldingOnAddition1) {
const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 << 8 | 1 << 12,
Instruction::CONST_4 | 1 << 8 | 2 << 12,
@@ -271,7 +291,7 @@
* v2 <- v0 + v1 6. add-int v2, v0, v1
* return v2 8. return v2
*/
-TEST(ConstantFolding, IntConstantFoldingOnAddition2) {
+TEST_F(ConstantFoldingTest, IntConstantFoldingOnAddition2) {
const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 << 8 | 1 << 12,
Instruction::CONST_4 | 1 << 8 | 2 << 12,
@@ -357,7 +377,7 @@
* v2 <- v0 - v1 2. sub-int v2, v0, v1
* return v2 4. return v2
*/
-TEST(ConstantFolding, IntConstantFoldingOnSubtraction) {
+TEST_F(ConstantFoldingTest, IntConstantFoldingOnSubtraction) {
const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 << 8 | 3 << 12,
Instruction::CONST_4 | 1 << 8 | 2 << 12,
@@ -421,7 +441,7 @@
* (v0, v1) + (v1, v2) 4. add-long v4, v0, v2
* return (v4, v5) 6. return-wide v4
*/
-TEST(ConstantFolding, LongConstantFoldingOnAddition) {
+TEST_F(ConstantFoldingTest, LongConstantFoldingOnAddition) {
const uint16_t data[] = SIX_REGISTERS_CODE_ITEM(
Instruction::CONST_WIDE_16 | 0 << 8, 1,
Instruction::CONST_WIDE_16 | 2 << 8, 2,
@@ -486,7 +506,7 @@
* (v0, v1) - (v1, v2) 4. sub-long v4, v0, v2
* return (v4, v5) 6. return-wide v4
*/
-TEST(ConstantFolding, LongConstantFoldingOnSubtraction) {
+TEST_F(ConstantFoldingTest, LongConstantFoldingOnSubtraction) {
const uint16_t data[] = SIX_REGISTERS_CODE_ITEM(
Instruction::CONST_WIDE_16 | 0 << 8, 3,
Instruction::CONST_WIDE_16 | 2 << 8, 2,
@@ -560,7 +580,7 @@
* L3: v2 <- v1 + 8 11. add-int/lit16 v2, v1, #+8
* return v2 13. return v2
*/
-TEST(ConstantFolding, IntConstantFoldingAndJumps) {
+TEST_F(ConstantFoldingTest, IntConstantFoldingAndJumps) {
const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 << 8 | 1 << 12,
Instruction::CONST_4 | 1 << 8 | 2 << 12,
@@ -656,7 +676,6 @@
check_after_cf);
}
-
/**
* Three-register program with a constant (static) condition.
*
@@ -670,7 +689,7 @@
* L1: v2 <- v0 + v1 5. add-int v2, v0, v1
* return-void 7. return
*/
-TEST(ConstantFolding, ConstantCondition) {
+TEST_F(ConstantFoldingTest, ConstantCondition) {
const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 1 << 8 | 1 << 12,
Instruction::CONST_4 | 0 << 8 | 0 << 12,
@@ -732,4 +751,109 @@
check_after_cf);
}
+/**
+ * Unsigned comparisons with zero. Since these instructions are not present
+ * in the bytecode, we need to set up the graph explicitly.
+ */
+TEST_F(ConstantFoldingTest, UnsignedComparisonsWithZero) {
+ graph_ = CreateGraph(&allocator_);
+ HBasicBlock* entry_block = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(entry_block);
+ graph_->SetEntryBlock(entry_block);
+ HBasicBlock* block = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(block);
+ HBasicBlock* exit_block = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(exit_block);
+ graph_->SetExitBlock(exit_block);
+ entry_block->AddSuccessor(block);
+ block->AddSuccessor(exit_block);
+
+ // Make various unsigned comparisons with zero against a parameter.
+ HInstruction* parameter = new (&allocator_) HParameterValue(
+ graph_->GetDexFile(), 0, 0, Primitive::kPrimInt, true);
+ entry_block->AddInstruction(parameter);
+ HInstruction* zero = graph_->GetIntConstant(0);
+ HInstruction* last;
+ block->AddInstruction(last = new (&allocator_) HAbove(zero, parameter));
+ block->AddInstruction(new (&allocator_) HDeoptimize(last, 0));
+ block->AddInstruction(last = new (&allocator_) HAbove(parameter, zero));
+ block->AddInstruction(new (&allocator_) HDeoptimize(last, 0));
+ block->AddInstruction(last = new (&allocator_) HAboveOrEqual(zero, parameter));
+ block->AddInstruction(new (&allocator_) HDeoptimize(last, 0));
+ block->AddInstruction(last = new (&allocator_) HAboveOrEqual(parameter, zero));
+ block->AddInstruction(new (&allocator_) HDeoptimize(last, 0));
+ block->AddInstruction(last = new (&allocator_) HBelow(zero, parameter));
+ block->AddInstruction(new (&allocator_) HDeoptimize(last, 0));
+ block->AddInstruction(last = new (&allocator_) HBelow(parameter, zero));
+ block->AddInstruction(new (&allocator_) HDeoptimize(last, 0));
+ block->AddInstruction(last = new (&allocator_) HBelowOrEqual(zero, parameter));
+ block->AddInstruction(new (&allocator_) HDeoptimize(last, 0));
+ block->AddInstruction(last = new (&allocator_) HBelowOrEqual(parameter, zero));
+ block->AddInstruction(new (&allocator_) HDeoptimize(last, 0));
+
+ entry_block->AddInstruction(new (&allocator_) HGoto());
+ block->AddInstruction(new (&allocator_) HReturn(zero));
+ exit_block->AddInstruction(new (&allocator_) HExit());
+
+ const std::string expected_before =
+ "BasicBlock 0, succ: 1\n"
+ " 0: ParameterValue [16, 14, 12, 10, 8, 6, 4, 2]\n"
+ " 1: IntConstant [19, 16, 14, 12, 10, 8, 6, 4, 2]\n"
+ " 18: Goto 1\n"
+ "BasicBlock 1, pred: 0, succ: 2\n"
+ " 2: Above(1, 0) [3]\n"
+ " 3: Deoptimize(2)\n"
+ " 4: Above(0, 1) [5]\n"
+ " 5: Deoptimize(4)\n"
+ " 6: AboveOrEqual(1, 0) [7]\n"
+ " 7: Deoptimize(6)\n"
+ " 8: AboveOrEqual(0, 1) [9]\n"
+ " 9: Deoptimize(8)\n"
+ " 10: Below(1, 0) [11]\n"
+ " 11: Deoptimize(10)\n"
+ " 12: Below(0, 1) [13]\n"
+ " 13: Deoptimize(12)\n"
+ " 14: BelowOrEqual(1, 0) [15]\n"
+ " 15: Deoptimize(14)\n"
+ " 16: BelowOrEqual(0, 1) [17]\n"
+ " 17: Deoptimize(16)\n"
+ " 19: Return(1)\n"
+ "BasicBlock 2, pred: 1\n"
+ " 20: Exit\n";
+
+ const std::string expected_after_cf =
+ "BasicBlock 0, succ: 1\n"
+ " 0: ParameterValue [16, 10, 6, 4]\n"
+ " 1: IntConstant [13, 3, 19, 16, 10, 6, 4]\n"
+ " 21: IntConstant [15, 9]\n"
+ " 18: Goto 1\n"
+ "BasicBlock 1, pred: 0, succ: 2\n"
+ " 3: Deoptimize(1)\n"
+ " 4: Above(0, 1) [5]\n"
+ " 5: Deoptimize(4)\n"
+ " 6: AboveOrEqual(1, 0) [7]\n"
+ " 7: Deoptimize(6)\n"
+ " 9: Deoptimize(21)\n"
+ " 10: Below(1, 0) [11]\n"
+ " 11: Deoptimize(10)\n"
+ " 13: Deoptimize(1)\n"
+ " 15: Deoptimize(21)\n"
+ " 16: BelowOrEqual(0, 1) [17]\n"
+ " 17: Deoptimize(16)\n"
+ " 19: Return(1)\n"
+ "BasicBlock 2, pred: 1\n"
+ " 20: Exit\n";
+
+ const std::string expected_after_dce = expected_after_cf;
+
+ auto check_after_cf = [](HGraph* graph) {
+ CHECK(graph != nullptr);
+ };
+
+ TestCodeOnReadyGraph(expected_before,
+ expected_after_cf,
+ expected_after_dce,
+ check_after_cf);
+}
+
} // namespace art
diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc
index 8968a44..fdf8cc9 100644
--- a/compiler/optimizing/induction_var_analysis.cc
+++ b/compiler/optimizing/induction_var_analysis.cc
@@ -20,19 +20,6 @@
namespace art {
/**
- * Returns true if instruction is invariant within the given loop.
- */
-static bool IsLoopInvariant(HLoopInformation* loop, HInstruction* instruction) {
- HLoopInformation* other_loop = instruction->GetBlock()->GetLoopInformation();
- if (other_loop != loop) {
- // If instruction does not occur in same loop, it is invariant
- // if it appears in an outer loop (including no loop at all).
- return other_loop == nullptr || loop->IsIn(*other_loop);
- }
- return false;
-}
-
-/**
* Since graph traversal may enter a SCC at any position, an initial representation may be rotated,
* along dependences, viz. any of (a, b, c, d), (d, a, b, c) (c, d, a, b), (b, c, d, a) assuming
* a chain of dependences (mutual independent items may occur in arbitrary order). For proper
@@ -601,15 +588,16 @@
// an unsigned entity, for example, as in the following loop that uses the full range:
// for (int i = INT_MIN; i < INT_MAX; i++) // TC = UINT_MAX
// (2) The TC is only valid if the loop is taken, otherwise TC = 0, as in:
- // for (int i = 12; i < U; i++) // TC = 0 when U >= 12
+ // for (int i = 12; i < U; i++) // TC = 0 when U < 12
// If this cannot be determined at compile-time, the TC is only valid within the
- // loop-body proper, not the loop-header unless enforced with an explicit condition.
+ // loop-body proper, not the loop-header unless enforced with an explicit taken-test.
// (3) The TC is only valid if the loop is finite, otherwise TC has no value, as in:
// for (int i = 0; i <= U; i++) // TC = Inf when U = INT_MAX
// If this cannot be determined at compile-time, the TC is only valid when enforced
- // with an explicit condition.
+ // with an explicit finite-test.
// (4) For loops which early-exits, the TC forms an upper bound, as in:
// for (int i = 0; i < 10 && ....; i++) // TC <= 10
+ InductionInfo* trip_count = upper_expr;
const bool is_taken = IsTaken(lower_expr, upper_expr, cmp);
const bool is_finite = IsFinite(upper_expr, stride_value, type, cmp);
const bool cancels = (cmp == kCondLT || cmp == kCondGT) && std::abs(stride_value) == 1;
@@ -617,26 +605,36 @@
// Convert exclusive integral inequality into inclusive integral inequality,
// viz. condition i < U is i <= U - 1 and condition i > U is i >= U + 1.
if (cmp == kCondLT) {
- upper_expr = CreateInvariantOp(kSub, upper_expr, CreateConstant(1, type));
+ trip_count = CreateInvariantOp(kSub, trip_count, CreateConstant(1, type));
} else if (cmp == kCondGT) {
- upper_expr = CreateInvariantOp(kAdd, upper_expr, CreateConstant(1, type));
+ trip_count = CreateInvariantOp(kAdd, trip_count, CreateConstant(1, type));
}
// Compensate for stride.
- upper_expr = CreateInvariantOp(kAdd, upper_expr, stride);
+ trip_count = CreateInvariantOp(kAdd, trip_count, stride);
}
- InductionInfo* trip_count
- = CreateInvariantOp(kDiv, CreateInvariantOp(kSub, upper_expr, lower_expr), stride);
+ trip_count = CreateInvariantOp(kDiv, CreateInvariantOp(kSub, trip_count, lower_expr), stride);
// Assign the trip-count expression to the loop control. Clients that use the information
// should be aware that the expression is only valid under the conditions listed above.
- InductionOp tcKind = kTripCountInBodyUnsafe;
+ InductionOp tcKind = kTripCountInBodyUnsafe; // needs both tests
if (is_taken && is_finite) {
- tcKind = kTripCountInLoop;
+ tcKind = kTripCountInLoop; // needs neither test
} else if (is_finite) {
- tcKind = kTripCountInBody;
+ tcKind = kTripCountInBody; // needs taken-test
} else if (is_taken) {
- tcKind = kTripCountInLoopUnsafe;
+ tcKind = kTripCountInLoopUnsafe; // needs finite-test
}
- AssignInfo(loop, loop->GetHeader()->GetLastInstruction(), CreateTripCount(tcKind, trip_count));
+ InductionOp op = kNop;
+ switch (cmp) {
+ case kCondLT: op = kLT; break;
+ case kCondLE: op = kLE; break;
+ case kCondGT: op = kGT; break;
+ case kCondGE: op = kGE; break;
+ default: LOG(FATAL) << "CONDITION UNREACHABLE";
+ }
+ InductionInfo* taken_test = CreateInvariantOp(op, lower_expr, upper_expr);
+ AssignInfo(loop,
+ loop->GetHeader()->GetLastInstruction(),
+ CreateTripCount(tcKind, trip_count, taken_test));
}
bool HInductionVarAnalysis::IsTaken(InductionInfo* lower_expr,
@@ -707,7 +705,7 @@
return loop_it->second;
}
}
- if (IsLoopInvariant(loop, instruction)) {
+ if (loop->IsLoopInvariant(instruction, true)) {
InductionInfo* info = CreateInvariantFetch(instruction);
AssignInfo(loop, instruction, info);
return info;
@@ -829,12 +827,16 @@
std::string inv = "(";
inv += InductionToString(info->op_a);
switch (info->operation) {
- case kNop: inv += " @ "; break;
- case kAdd: inv += " + "; break;
+ case kNop: inv += " @ "; break;
+ case kAdd: inv += " + "; break;
case kSub:
- case kNeg: inv += " - "; break;
- case kMul: inv += " * "; break;
- case kDiv: inv += " / "; break;
+ case kNeg: inv += " - "; break;
+ case kMul: inv += " * "; break;
+ case kDiv: inv += " / "; break;
+ case kLT: inv += " < "; break;
+ case kLE: inv += " <= "; break;
+ case kGT: inv += " > "; break;
+ case kGE: inv += " >= "; break;
case kFetch:
DCHECK(info->fetch);
if (IsIntAndGet(info, &value)) {
@@ -843,10 +845,10 @@
inv += std::to_string(info->fetch->GetId()) + ":" + info->fetch->DebugName();
}
break;
- case kTripCountInLoop: inv += "TC-loop:"; break;
- case kTripCountInBody: inv += "TC-body:"; break;
- case kTripCountInLoopUnsafe: inv += "TC-loop-unsafe:"; break;
- case kTripCountInBodyUnsafe: inv += "TC-body-unsafe:"; break;
+ case kTripCountInLoop: inv += " (TC-loop) "; break;
+ case kTripCountInBody: inv += " (TC-body) "; break;
+ case kTripCountInLoopUnsafe: inv += " (TC-loop-unsafe) "; break;
+ case kTripCountInBodyUnsafe: inv += " (TC-body-unsafe) "; break;
}
inv += InductionToString(info->op_b);
return inv + ")";
diff --git a/compiler/optimizing/induction_var_analysis.h b/compiler/optimizing/induction_var_analysis.h
index 7ab80cd..cf35409 100644
--- a/compiler/optimizing/induction_var_analysis.h
+++ b/compiler/optimizing/induction_var_analysis.h
@@ -65,11 +65,16 @@
kMul,
kDiv,
kFetch,
- // Trip counts (valid in full loop or only body proper; unsafe implies loop may be infinite).
- kTripCountInLoop,
- kTripCountInBody,
- kTripCountInLoopUnsafe,
- kTripCountInBodyUnsafe
+ // Trip-counts.
+ kTripCountInLoop, // valid in full loop; loop is finite
+ kTripCountInBody, // valid in body only; loop is finite
+ kTripCountInLoopUnsafe, // valid in full loop; loop may be infinite
+ kTripCountInBodyUnsafe, // valid in body only; loop may be infinite
+ // Comparisons for trip-count tests.
+ kLT,
+ kLE,
+ kGT,
+ kGE
};
/**
@@ -85,7 +90,7 @@
* (4) periodic
* nop: a, then defined by b (repeated when exhausted)
* (5) trip-count:
- * tc: defined by b
+ * tc: defined by a, taken-test in b
*/
struct InductionInfo : public ArenaObject<kArenaAllocInductionVarAnalysis> {
InductionInfo(InductionClass ic,
@@ -119,8 +124,9 @@
return new (graph_->GetArena()) InductionInfo(kInvariant, kFetch, nullptr, nullptr, f);
}
- InductionInfo* CreateTripCount(InductionOp op, InductionInfo* b) {
- return new (graph_->GetArena()) InductionInfo(kInvariant, op, nullptr, b, nullptr);
+ InductionInfo* CreateTripCount(InductionOp op, InductionInfo* a, InductionInfo* b) {
+ DCHECK(a != nullptr);
+ return new (graph_->GetArena()) InductionInfo(kInvariant, op, a, b, nullptr);
}
InductionInfo* CreateInduction(InductionClass ic, InductionInfo* a, InductionInfo* b) {
diff --git a/compiler/optimizing/induction_var_analysis_test.cc b/compiler/optimizing/induction_var_analysis_test.cc
index f16da2a..b7262f6 100644
--- a/compiler/optimizing/induction_var_analysis_test.cc
+++ b/compiler/optimizing/induction_var_analysis_test.cc
@@ -234,7 +234,7 @@
EXPECT_STREQ("((1) * i + (1))", GetInductionInfo(increment_[0], 0).c_str());
// Trip-count.
- EXPECT_STREQ("(TC-loop:(100))",
+ EXPECT_STREQ("((100) (TC-loop) ((0) < (100)))",
GetInductionInfo(loop_header_[0]->GetLastInstruction(), 0).c_str());
}
@@ -552,7 +552,7 @@
}
EXPECT_STREQ("((1) * i + (1))", GetInductionInfo(increment_[d], d).c_str());
// Trip-count.
- EXPECT_STREQ("(TC-loop:(100))",
+ EXPECT_STREQ("((100) (TC-loop) ((0) < (100)))",
GetInductionInfo(loop_header_[d]->GetLastInstruction(), d).c_str());
}
}
diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc
index f4842f9..5530d26 100644
--- a/compiler/optimizing/induction_var_range.cc
+++ b/compiler/optimizing/induction_var_range.cc
@@ -152,7 +152,7 @@
}
} else if (is_min) {
// Special case for finding minimum: minimum of trip-count in loop-body is 1.
- if (trip != nullptr && in_body && instruction == trip->op_b->fetch) {
+ if (trip != nullptr && in_body && instruction == trip->op_a->fetch) {
return Value(1);
}
}
@@ -185,14 +185,14 @@
return GetFetch(info->fetch, trip, in_body, is_min);
case HInductionVarAnalysis::kTripCountInLoop:
if (!in_body && !is_min) { // one extra!
- return GetVal(info->op_b, trip, in_body, is_min);
+ return GetVal(info->op_a, trip, in_body, is_min);
}
FALLTHROUGH_INTENDED;
case HInductionVarAnalysis::kTripCountInBody:
if (is_min) {
return Value(0);
} else if (in_body) {
- return SubValue(GetVal(info->op_b, trip, in_body, is_min), Value(1));
+ return SubValue(GetVal(info->op_a, trip, in_body, is_min), Value(1));
}
break;
default:
@@ -428,7 +428,7 @@
return true;
case HInductionVarAnalysis::kTripCountInLoop:
if (!in_body && !is_min) { // one extra!
- return GenerateCode(info->op_b, trip, graph, block, result, in_body, is_min);
+ return GenerateCode(info->op_a, trip, graph, block, result, in_body, is_min);
}
FALLTHROUGH_INTENDED;
case HInductionVarAnalysis::kTripCountInBody:
@@ -438,7 +438,7 @@
}
return true;
} else if (in_body) {
- if (GenerateCode(info->op_b, trip, graph, block, &opb, in_body, is_min)) {
+ if (GenerateCode(info->op_a, trip, graph, block, &opb, in_body, is_min)) {
if (graph != nullptr) {
*result = Insert(block,
new (graph->GetArena())
diff --git a/compiler/optimizing/induction_var_range_test.cc b/compiler/optimizing/induction_var_range_test.cc
index 8fbc59f..ce8926a 100644
--- a/compiler/optimizing/induction_var_range_test.cc
+++ b/compiler/optimizing/induction_var_range_test.cc
@@ -125,7 +125,7 @@
/** Constructs a trip-count. */
HInductionVarAnalysis::InductionInfo* CreateTripCount(int32_t tc) {
- return iva_->CreateTripCount(HInductionVarAnalysis::kTripCountInLoop, CreateConst(tc));
+ return iva_->CreateTripCount(HInductionVarAnalysis::kTripCountInLoop, CreateConst(tc), nullptr);
}
/** Constructs a linear a * i + b induction. */
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 0ab0b80..05c7eb0 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -1227,6 +1227,91 @@
GenUnsafePut(invoke->GetLocations(), Primitive::kPrimLong, true, false, codegen_);
}
+static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena, HInvoke* invoke) {
+ LocationSummary* locations = new (arena) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(2, Location::RequiresRegister());
+ locations->SetInAt(3, Location::RequiresRegister());
+ locations->SetInAt(4, Location::RequiresRegister());
+
+ locations->SetOut(Location::RequiresRegister());
+}
+
+static void GenCas(LocationSummary* locations, Primitive::Type type, CodeGeneratorMIPS64* codegen) {
+ Mips64Assembler* assembler = codegen->GetAssembler();
+ GpuRegister base = locations->InAt(1).AsRegister<GpuRegister>();
+ GpuRegister offset = locations->InAt(2).AsRegister<GpuRegister>();
+ GpuRegister expected = locations->InAt(3).AsRegister<GpuRegister>();
+ GpuRegister value = locations->InAt(4).AsRegister<GpuRegister>();
+ GpuRegister out = locations->Out().AsRegister<GpuRegister>();
+
+ DCHECK_NE(base, out);
+ DCHECK_NE(offset, out);
+ DCHECK_NE(expected, out);
+
+ // do {
+ // tmp_value = [tmp_ptr] - expected;
+ // } while (tmp_value == 0 && failure([tmp_ptr] <- r_new_value));
+ // result = tmp_value != 0;
+
+ Label loop_head, exit_loop;
+ __ Daddu(TMP, base, offset);
+ __ Sync(0);
+ __ Bind(&loop_head);
+ if (type == Primitive::kPrimLong) {
+ __ Lld(out, TMP);
+ } else {
+ __ Ll(out, TMP);
+ }
+ __ Dsubu(out, out, expected); // If we didn't get the 'expected'
+ __ Sltiu(out, out, 1); // value, set 'out' to false, and
+ __ Beqzc(out, &exit_loop); // return.
+ __ Move(out, value); // Use 'out' for the 'store conditional' instruction.
+ // If we use 'value' directly, we would lose 'value'
+ // in the case that the store fails. Whether the
+ // store succeeds, or fails, it will load the
+ // correct boolean value into the 'out' register.
+ if (type == Primitive::kPrimLong) {
+ __ Scd(out, TMP);
+ } else {
+ __ Sc(out, TMP);
+ }
+ __ Beqzc(out, &loop_head); // If we couldn't do the read-modify-write
+ // cycle atomically then retry.
+ __ Bind(&exit_loop);
+ __ Sync(0);
+}
+
+// boolean sun.misc.Unsafe.compareAndSwapInt(Object o, long offset, int expected, int x)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafeCASInt(HInvoke* invoke) {
+ CreateIntIntIntIntIntToInt(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafeCASInt(HInvoke* invoke) {
+ GenCas(invoke->GetLocations(), Primitive::kPrimInt, codegen_);
+}
+
+// boolean sun.misc.Unsafe.compareAndSwapLong(Object o, long offset, long expected, long x)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafeCASLong(HInvoke* invoke) {
+ CreateIntIntIntIntIntToInt(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafeCASLong(HInvoke* invoke) {
+ GenCas(invoke->GetLocations(), Primitive::kPrimLong, codegen_);
+}
+
+// boolean sun.misc.Unsafe.compareAndSwapObject(Object o, long offset, Object expected, Object x)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafeCASObject(HInvoke* invoke) {
+ CreateIntIntIntIntIntToInt(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafeCASObject(HInvoke* invoke) {
+ GenCas(invoke->GetLocations(), Primitive::kPrimNot, codegen_);
+}
+
// char java.lang.String.charAt(int index)
void IntrinsicLocationsBuilderMIPS64::VisitStringCharAt(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
@@ -1502,9 +1587,6 @@
UNIMPLEMENTED_INTRINSIC(MathRoundDouble)
UNIMPLEMENTED_INTRINSIC(MathRoundFloat)
-UNIMPLEMENTED_INTRINSIC(UnsafeCASInt)
-UNIMPLEMENTED_INTRINSIC(UnsafeCASLong)
-UNIMPLEMENTED_INTRINSIC(UnsafeCASObject)
UNIMPLEMENTED_INTRINSIC(StringEquals)
UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 3480265..8b28ff9 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -574,6 +574,17 @@
return other.blocks_.IsBitSet(header_->GetBlockId());
}
+bool HLoopInformation::IsLoopInvariant(HInstruction* instruction, bool must_dominate) const {
+ HLoopInformation* other_loop = instruction->GetBlock()->GetLoopInformation();
+ if (other_loop != this && (other_loop == nullptr || !other_loop->IsIn(*this))) {
+ if (must_dominate) {
+ return instruction->GetBlock()->Dominates(GetHeader());
+ }
+ return true;
+ }
+ return false;
+}
+
size_t HLoopInformation::GetLifetimeEnd() const {
size_t last_position = 0;
for (HBasicBlock* back_edge : GetBackEdges()) {
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 6028d4b..7df5866 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -551,6 +551,12 @@
// Note that `other` *must* be populated before entering this function.
bool IsIn(const HLoopInformation& other) const;
+ // Returns true if instruction is not defined within this loop or any loop nested inside
+ // this loop. If must_dominate is set, only definitions that actually dominate the loop
+ // header can be invariant. Otherwise, any definition outside the loop, including
+ // definitions that appear after the loop, is invariant.
+ bool IsLoopInvariant(HInstruction* instruction, bool must_dominate) const;
+
const ArenaBitVector& GetBlocks() const { return blocks_; }
void Add(HBasicBlock* block);
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 26a05da..659da06 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -373,12 +373,18 @@
if (instr->IsInvokeStaticOrDirect() && instr->AsInvokeStaticOrDirect()->IsStringInit()) {
// Calls to String.<init> are replaced with a StringFactory.
if (kIsDebugBuild) {
- ScopedObjectAccess soa(Thread::Current());
+ HInvoke* invoke = instr->AsInvoke();
ClassLinker* cl = Runtime::Current()->GetClassLinker();
- mirror::DexCache* dex_cache = cl->FindDexCache(
- soa.Self(), instr->AsInvoke()->GetDexFile(), false);
- ArtMethod* method = dex_cache->GetResolvedMethod(
- instr->AsInvoke()->GetDexMethodIndex(), cl->GetImagePointerSize());
+ ScopedObjectAccess soa(Thread::Current());
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::DexCache> dex_cache(
+ hs.NewHandle(cl->FindDexCache(soa.Self(), invoke->GetDexFile(), false)));
+ // Use a null loader. We should probably use the compiling method's class loader,
+ // but then we would need to pass it to RTPVisitor just for this debug check. Since
+ // the method is from the String class, the null loader is good enough.
+ Handle<mirror::ClassLoader> loader;
+ ArtMethod* method = cl->ResolveMethod(
+ invoke->GetDexFile(), invoke->GetDexMethodIndex(), dex_cache, loader, nullptr, kDirect);
DCHECK(method != nullptr);
mirror::Class* declaring_class = method->GetDeclaringClass();
DCHECK(declaring_class != nullptr);
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 384b879..af0bb65 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -1410,6 +1410,7 @@
ScopedObjectAccess soa(self);
dex_caches_.push_back(soa.AddLocalReference<jobject>(
class_linker->RegisterDexFile(*dex_file, Runtime::Current()->GetLinearAlloc())));
+ dex_file->CreateTypeLookupTable();
}
// If we use a swap file, ensure we are above the threshold to make it necessary.
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 09d7311..1fdffe3 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -47,6 +47,7 @@
dex_file_verifier.cc \
dex_instruction.cc \
elf_file.cc \
+ fault_handler.cc \
gc/allocation_record.cc \
gc/allocator/dlmalloc.cc \
gc/allocator/rosalloc.cc \
@@ -162,6 +163,7 @@
os_linux.cc \
parsed_options.cc \
primitive.cc \
+ profiler.cc \
quick_exception_handler.cc \
quick/inline_method_analyser.cc \
reference_table.cc \
@@ -176,8 +178,7 @@
thread_pool.cc \
trace.cc \
transaction.cc \
- profiler.cc \
- fault_handler.cc \
+ type_lookup_table.cc \
utf.cc \
utils.cc \
verifier/dex_gc_map.cc \
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index be5a15e..9ccabad 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1437,7 +1437,107 @@
ONE_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
// Generate the allocation entrypoints for each allocator.
-GENERATE_ALL_ALLOC_ENTRYPOINTS
+GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB)
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc).
+ENTRY art_quick_alloc_object_rosalloc
+ // Fast path rosalloc allocation.
+ // x0: type_idx/return value, x1: ArtMethod*, xSELF(x19): Thread::Current
+ // x2-x7: free.
+ ldr x2, [x1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64] // Load dex cache resolved types array
+ // Load the class (x2)
+ ldr w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
+ cbz x2, .Lart_quick_alloc_object_rosalloc_slow_path // Check null class
+ // Check class status.
+ ldr w3, [x2, #MIRROR_CLASS_STATUS_OFFSET]
+ cmp x3, #MIRROR_CLASS_STATUS_INITIALIZED
+ bne .Lart_quick_alloc_object_rosalloc_slow_path
+ // Add a fake dependence from the
+ // following access flag and size
+ // loads to the status load.
+ // This is to prevent those loads
+ // from being reordered above the
+ // status load and reading wrong
+ // values (an alternative is to use
+ // a load-acquire for the status).
+ eor x3, x3, x3
+ add x2, x2, x3
+ // Check access flags has
+ // kAccClassIsFinalizable
+ ldr w3, [x2, #MIRROR_CLASS_ACCESS_FLAGS_OFFSET]
+ tst x3, #ACCESS_FLAGS_CLASS_IS_FINALIZABLE
+ bne .Lart_quick_alloc_object_rosalloc_slow_path
+ ldr x3, [xSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET] // Check if the thread local
+ // allocation stack has room.
+ // ldp won't work due to large offset.
+ ldr x4, [xSELF, #THREAD_LOCAL_ALLOC_STACK_END_OFFSET]
+ cmp x3, x4
+ bhs .Lart_quick_alloc_object_rosalloc_slow_path
+ ldr w3, [x2, #MIRROR_CLASS_OBJECT_SIZE_OFFSET] // Load the object size (x3)
+ cmp x3, #ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE // Check if the size is for a thread
+ // local allocation
+ bhs .Lart_quick_alloc_object_rosalloc_slow_path
+ // Compute the rosalloc bracket index
+ // from the size.
+ // Align up the size by the rosalloc
+ // bracket quantum size and divide
+ // by the quantum size and subtract
+ // by 1. This code is a shorter but
+ // equivalent version.
+ sub x3, x3, #1
+ lsr x3, x3, #ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT
+ // Load the rosalloc run (x4)
+ add x4, xSELF, x3, lsl #POINTER_SIZE_SHIFT
+ ldr x4, [x4, #THREAD_ROSALLOC_RUNS_OFFSET]
+ // Load the free list head (x3). This
+ // will be the return val.
+ ldr x3, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)]
+ cbz x3, .Lart_quick_alloc_object_rosalloc_slow_path
+ // "Point of no slow path". Won't go to the slow path from here on. OK to clobber x0 and x1.
+ ldr x1, [x3, #ROSALLOC_SLOT_NEXT_OFFSET] // Load the next pointer of the head
+ // and update the list head with the
+ // next pointer.
+ str x1, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)]
+ // Store the class pointer in the
+ // header. This also overwrites the
+ // next pointer. The offsets are
+ // asserted to match.
+#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
+#error "Class pointer needs to overwrite next pointer."
+#endif
+ POISON_HEAP_REF w2
+ str w2, [x3, #MIRROR_OBJECT_CLASS_OFFSET]
+ // Push the new object onto the thread
+ // local allocation stack and
+ // increment the thread local
+ // allocation stack top.
+ ldr x1, [xSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET]
+ str w3, [x1], #COMPRESSED_REFERENCE_SIZE // (Increment x1 as a side effect.)
+ str x1, [xSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET]
+ // Decrement the size of the free list
+ ldr w1, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)]
+ sub x1, x1, #1
+ // TODO: consider combining this store
+ // and the list head store above using
+ // strd.
+ str w1, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)]
+ // Fence. This is "ish" not "ishst" so
+ // that the code after this allocation
+ // site will see the right values in
+ // the fields of the class.
+ // Alternatively we could use "ishst"
+ // if we use load-acquire for the
+ // class status load.)
+ dmb ish
+ mov x0, x3 // Set the return value and return.
+ ret
+.Lart_quick_alloc_object_rosalloc_slow_path:
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
+ mov x2, xSELF // pass Thread::Current
+ bl artAllocObjectFromCodeRosAlloc // (uint32_t type_idx, Method* method, Thread*)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+END art_quick_alloc_object_rosalloc
/*
* Called by managed code when the thread has been asked to suspend.
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index a10d7af..3f17702 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -163,18 +163,13 @@
return dex_method_idx;
}
const char* mid_declaring_class_descriptor = dexfile->StringByTypeIdx(mid.class_idx_);
- const DexFile::StringId* other_descriptor =
- other_dexfile.FindStringId(mid_declaring_class_descriptor);
- if (other_descriptor != nullptr) {
- const DexFile::TypeId* other_type_id =
- other_dexfile.FindTypeId(other_dexfile.GetIndexForStringId(*other_descriptor));
- if (other_type_id != nullptr) {
- const DexFile::MethodId* other_mid = other_dexfile.FindMethodId(
- *other_type_id, other_dexfile.GetStringId(name_and_sig_mid.name_idx_),
- other_dexfile.GetProtoId(name_and_sig_mid.proto_idx_));
- if (other_mid != nullptr) {
- return other_dexfile.GetIndexForMethodId(*other_mid);
- }
+ const DexFile::TypeId* other_type_id = other_dexfile.FindTypeId(mid_declaring_class_descriptor);
+ if (other_type_id != nullptr) {
+ const DexFile::MethodId* other_mid = other_dexfile.FindMethodId(
+ *other_type_id, other_dexfile.GetStringId(name_and_sig_mid.name_idx_),
+ other_dexfile.GetProtoId(name_and_sig_mid.proto_idx_));
+ if (other_mid != nullptr) {
+ return other_dexfile.GetIndexForMethodId(*other_mid);
}
}
return DexFile::kDexNoIndex;
@@ -361,19 +356,6 @@
return true;
}
-ProfilingInfo* ArtMethod::CreateProfilingInfo() {
- DCHECK(!Runtime::Current()->IsAotCompiler());
- ProfilingInfo* info = ProfilingInfo::Create(this);
- MemberOffset offset = ArtMethod::EntryPointFromJniOffset(sizeof(void*));
- uintptr_t pointer = reinterpret_cast<uintptr_t>(this) + offset.Uint32Value();
- if (!reinterpret_cast<Atomic<ProfilingInfo*>*>(pointer)->
- CompareExchangeStrongSequentiallyConsistent(nullptr, info)) {
- return GetProfilingInfo(sizeof(void*));
- } else {
- return info;
- }
-}
-
const uint8_t* ArtMethod::GetQuickenedInfo() {
bool found = false;
OatFile::OatMethod oat_method =
@@ -427,6 +409,12 @@
bool found;
OatFile::OatMethod oat_method = class_linker->FindOatMethodFor(this, &found);
if (!found) {
+ if (class_linker->IsQuickResolutionStub(existing_entry_point)) {
+ // We are running the generic jni stub, but the entry point of the method has not
+ // been updated yet.
+ DCHECK(IsNative());
+ return nullptr;
+ }
// Only for unit tests.
// TODO(ngeoffray): Update these tests to pass the right pc?
return OatQuickMethodHeader::FromEntryPoint(existing_entry_point);
diff --git a/runtime/art_method.h b/runtime/art_method.h
index bb9804e..551989d 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -305,12 +305,18 @@
PtrSizedFields, entry_point_from_quick_compiled_code_) / sizeof(void*) * pointer_size);
}
- ProfilingInfo* CreateProfilingInfo() SHARED_REQUIRES(Locks::mutator_lock_);
-
ProfilingInfo* GetProfilingInfo(size_t pointer_size) {
return reinterpret_cast<ProfilingInfo*>(GetEntryPointFromJniPtrSize(pointer_size));
}
+ ALWAYS_INLINE void SetProfilingInfo(ProfilingInfo* info) {
+ SetEntryPointFromJniPtrSize(info, sizeof(void*));
+ }
+
+ static MemberOffset ProfilingInfoOffset() {
+ return EntryPointFromJniOffset(sizeof(void*));
+ }
+
void* GetEntryPointFromJni() {
return GetEntryPointFromJniPtrSize(sizeof(void*));
}
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 69d0799..5de1cac 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -616,10 +616,7 @@
// initialized.
{
const DexFile& dex_file = java_lang_Object->GetDexFile();
- const DexFile::StringId* void_string_id = dex_file.FindStringId("V");
- CHECK(void_string_id != nullptr);
- uint32_t void_string_index = dex_file.GetIndexForStringId(*void_string_id);
- const DexFile::TypeId* void_type_id = dex_file.FindTypeId(void_string_index);
+ const DexFile::TypeId* void_type_id = dex_file.FindTypeId("V");
CHECK(void_type_id != nullptr);
uint16_t void_type_idx = dex_file.GetIndexForTypeId(*void_type_id);
// Now we resolve void type so the dex cache contains it. We use java.lang.Object class
@@ -2740,17 +2737,13 @@
for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
mirror::DexCache* dex_cache = dex_caches->Get(i);
const DexFile* dex_file = dex_cache->GetDexFile();
- // Try binary searching the string/type index.
- const DexFile::StringId* string_id = dex_file->FindStringId(descriptor);
- if (string_id != nullptr) {
- const DexFile::TypeId* type_id =
- dex_file->FindTypeId(dex_file->GetIndexForStringId(*string_id));
- if (type_id != nullptr) {
- uint16_t type_idx = dex_file->GetIndexForTypeId(*type_id);
- mirror::Class* klass = dex_cache->GetResolvedType(type_idx);
- if (klass != nullptr) {
- return klass;
- }
+ // Try binary searching the type index by descriptor.
+ const DexFile::TypeId* type_id = dex_file->FindTypeId(descriptor);
+ if (type_id != nullptr) {
+ uint16_t type_idx = dex_file->GetIndexForTypeId(*type_id);
+ mirror::Class* klass = dex_cache->GetResolvedType(type_idx);
+ if (klass != nullptr) {
+ return klass;
}
}
}
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 0926ce3..04b8900 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -1032,9 +1032,7 @@
mirror::Class* klass = class_linker_->FindClass(soa.Self(), "LStaticsFromCode;", class_loader);
ArtMethod* clinit = klass->FindClassInitializer(sizeof(void*));
ArtMethod* getS0 = klass->FindDirectMethod("getS0", "()Ljava/lang/Object;", sizeof(void*));
- const DexFile::StringId* string_id = dex_file->FindStringId("LStaticsFromCode;");
- ASSERT_TRUE(string_id != nullptr);
- const DexFile::TypeId* type_id = dex_file->FindTypeId(dex_file->GetIndexForStringId(*string_id));
+ const DexFile::TypeId* type_id = dex_file->FindTypeId("LStaticsFromCode;");
ASSERT_TRUE(type_id != nullptr);
uint32_t type_idx = dex_file->GetIndexForTypeId(*type_id);
mirror::Class* uninit = ResolveVerifyAndClinit(type_idx, clinit, soa.Self(), true, false);
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index b17b76e..7117be9 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -69,29 +69,26 @@
return alloc_record_count;
}
-class Breakpoint {
+class Breakpoint : public ValueObject {
public:
- Breakpoint(ArtMethod* method, uint32_t dex_pc,
- DeoptimizationRequest::Kind deoptimization_kind)
- SHARED_REQUIRES(Locks::mutator_lock_)
- : method_(nullptr), dex_pc_(dex_pc), deoptimization_kind_(deoptimization_kind) {
+ Breakpoint(ArtMethod* method, uint32_t dex_pc, DeoptimizationRequest::Kind deoptimization_kind)
+ : method_(method),
+ dex_pc_(dex_pc),
+ deoptimization_kind_(deoptimization_kind) {
CHECK(deoptimization_kind_ == DeoptimizationRequest::kNothing ||
deoptimization_kind_ == DeoptimizationRequest::kSelectiveDeoptimization ||
deoptimization_kind_ == DeoptimizationRequest::kFullDeoptimization);
- ScopedObjectAccessUnchecked soa(Thread::Current());
- method_ = soa.EncodeMethod(method);
}
Breakpoint(const Breakpoint& other) SHARED_REQUIRES(Locks::mutator_lock_)
- : method_(nullptr), dex_pc_(other.dex_pc_),
- deoptimization_kind_(other.deoptimization_kind_) {
- ScopedObjectAccessUnchecked soa(Thread::Current());
- method_ = soa.EncodeMethod(other.Method());
- }
+ : method_(other.method_),
+ dex_pc_(other.dex_pc_),
+ deoptimization_kind_(other.deoptimization_kind_) {}
- ArtMethod* Method() const SHARED_REQUIRES(Locks::mutator_lock_) {
- ScopedObjectAccessUnchecked soa(Thread::Current());
- return soa.DecodeMethod(method_);
+ // Method() is called from root visiting, do not use ScopedObjectAccess here or it can cause
+ // GC to deadlock if another thread tries to call SuspendAll while the GC is in a runnable state.
+ ArtMethod* Method() const {
+ return method_;
}
uint32_t DexPc() const {
@@ -104,7 +101,7 @@
private:
// The location of this breakpoint.
- jmethodID method_;
+ ArtMethod* method_;
uint32_t dex_pc_;
// Indicates whether breakpoint needs full deoptimization or selective deoptimization.
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index ae62e2b..3a93aac 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -37,6 +37,7 @@
#include "dex_file-inl.h"
#include "dex_file_verifier.h"
#include "globals.h"
+#include "handle_scope-inl.h"
#include "leb128.h"
#include "mirror/field.h"
#include "mirror/method.h"
@@ -44,8 +45,8 @@
#include "os.h"
#include "reflection.h"
#include "safe_map.h"
-#include "handle_scope-inl.h"
#include "thread.h"
+#include "type_lookup_table.h"
#include "utf-inl.h"
#include "utils.h"
#include "well_known_classes.h"
@@ -414,11 +415,19 @@
method_ids_(reinterpret_cast<const MethodId*>(base + header_->method_ids_off_)),
proto_ids_(reinterpret_cast<const ProtoId*>(base + header_->proto_ids_off_)),
class_defs_(reinterpret_cast<const ClassDef*>(base + header_->class_defs_off_)),
- find_class_def_misses_(0),
- class_def_index_(nullptr),
oat_dex_file_(oat_dex_file) {
CHECK(begin_ != nullptr) << GetLocation();
CHECK_GT(size_, 0U) << GetLocation();
+ const uint8_t* lookup_data = (oat_dex_file != nullptr)
+ ? oat_dex_file->GetLookupTableData()
+ : nullptr;
+ if (lookup_data != nullptr) {
+ if (lookup_data + TypeLookupTable::RawDataLength(*this) > oat_dex_file->GetOatFile()->End()) {
+ LOG(WARNING) << "found truncated lookup table in " << GetLocation();
+ } else {
+ lookup_table_.reset(TypeLookupTable::Open(lookup_data, *this));
+ }
+ }
}
DexFile::~DexFile() {
@@ -426,8 +435,6 @@
// that's only called after DetachCurrentThread, which means there's no JNIEnv. We could
// re-attach, but cleaning up these global references is not obviously useful. It's not as if
// the global reference table is otherwise empty!
- // Remove the index if one were created.
- delete class_def_index_.LoadRelaxed();
}
bool DexFile::Init(std::string* error_msg) {
@@ -477,51 +484,26 @@
const DexFile::ClassDef* DexFile::FindClassDef(const char* descriptor, size_t hash) const {
DCHECK_EQ(ComputeModifiedUtf8Hash(descriptor), hash);
- // If we have an index lookup the descriptor via that as its constant time to search.
- Index* index = class_def_index_.LoadSequentiallyConsistent();
- if (index != nullptr) {
- auto it = index->FindWithHash(descriptor, hash);
- return (it == index->end()) ? nullptr : it->second;
+ if (LIKELY(lookup_table_ != nullptr)) {
+ const uint32_t class_def_idx = lookup_table_->Lookup(descriptor, hash);
+ return (class_def_idx != DexFile::kDexNoIndex) ? &GetClassDef(class_def_idx) : nullptr;
}
+
// Fast path for rate no class defs case.
- uint32_t num_class_defs = NumClassDefs();
+ const uint32_t num_class_defs = NumClassDefs();
if (num_class_defs == 0) {
return nullptr;
}
- // Search for class def with 2 binary searches and then a linear search.
- const StringId* string_id = FindStringId(descriptor);
- if (string_id != nullptr) {
- const TypeId* type_id = FindTypeId(GetIndexForStringId(*string_id));
- if (type_id != nullptr) {
- uint16_t type_idx = GetIndexForTypeId(*type_id);
- for (size_t i = 0; i < num_class_defs; ++i) {
- const ClassDef& class_def = GetClassDef(i);
- if (class_def.class_idx_ == type_idx) {
- return &class_def;
- }
+ const TypeId* type_id = FindTypeId(descriptor);
+ if (type_id != nullptr) {
+ uint16_t type_idx = GetIndexForTypeId(*type_id);
+ for (size_t i = 0; i < num_class_defs; ++i) {
+ const ClassDef& class_def = GetClassDef(i);
+ if (class_def.class_idx_ == type_idx) {
+ return &class_def;
}
}
}
- // A miss. If we've had kMaxFailedDexClassDefLookups misses then build an index to speed things
- // up. This isn't done eagerly at construction as construction is not performed in multi-threaded
- // sections of tools like dex2oat. If we're lazy we hopefully increase the chance of balancing
- // out which thread builds the index.
- const uint32_t kMaxFailedDexClassDefLookups = 100;
- uint32_t old_misses = find_class_def_misses_.FetchAndAddSequentiallyConsistent(1);
- if (old_misses == kMaxFailedDexClassDefLookups) {
- // Are we the ones moving the miss count past the max? Sanity check the index doesn't exist.
- CHECK(class_def_index_.LoadSequentiallyConsistent() == nullptr);
- // Build the index.
- index = new Index();
- for (uint32_t i = 0; i < num_class_defs; ++i) {
- const ClassDef& class_def = GetClassDef(i);
- const char* class_descriptor = GetClassDescriptor(class_def);
- index->Insert(std::make_pair(class_descriptor, &class_def));
- }
- // Sanity check the index still doesn't exist, only 1 thread should build it.
- CHECK(class_def_index_.LoadSequentiallyConsistent() == nullptr);
- class_def_index_.StoreSequentiallyConsistent(index);
- }
return nullptr;
}
@@ -625,6 +607,26 @@
return nullptr;
}
+const DexFile::TypeId* DexFile::FindTypeId(const char* string) const {
+ int32_t lo = 0;
+ int32_t hi = NumTypeIds() - 1;
+ while (hi >= lo) {
+ int32_t mid = (hi + lo) / 2;
+ const TypeId& type_id = GetTypeId(mid);
+ const DexFile::StringId& str_id = GetStringId(type_id.descriptor_idx_);
+ const char* str = GetStringData(str_id);
+ int compare = CompareModifiedUtf8ToModifiedUtf8AsUtf16CodePointValues(string, str);
+ if (compare > 0) {
+ lo = mid + 1;
+ } else if (compare < 0) {
+ hi = mid - 1;
+ } else {
+ return &type_id;
+ }
+ }
+ return nullptr;
+}
+
const DexFile::StringId* DexFile::FindStringId(const uint16_t* string, size_t length) const {
int32_t lo = 0;
int32_t hi = NumStringIds() - 1;
@@ -697,6 +699,10 @@
return nullptr;
}
+void DexFile::CreateTypeLookupTable() const {
+ lookup_table_.reset(TypeLookupTable::Create(*this));
+}
+
// Given a signature place the type ids into the given vector
bool DexFile::CreateTypeList(const StringPiece& signature, uint16_t* return_type_idx,
std::vector<uint16_t>* param_type_idxs) const {
@@ -732,11 +738,7 @@
}
// TODO: avoid creating a std::string just to get a 0-terminated char array
std::string descriptor(signature.data() + start_offset, offset - start_offset);
- const DexFile::StringId* string_id = FindStringId(descriptor.c_str());
- if (string_id == nullptr) {
- return false;
- }
- const DexFile::TypeId* type_id = FindTypeId(GetIndexForStringId(*string_id));
+ const DexFile::TypeId* type_id = FindTypeId(descriptor.c_str());
if (type_id == nullptr) {
return false;
}
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 47e5c12..e7877b2 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -51,6 +51,7 @@
class Signature;
template<class T> class Handle;
class StringPiece;
+class TypeLookupTable;
class ZipArchive;
// TODO: move all of the macro functionality into the DexCache class.
@@ -532,6 +533,8 @@
// Looks up a string id for a given modified utf8 string.
const StringId* FindStringId(const char* string) const;
+ const TypeId* FindTypeId(const char* string) const;
+
// Looks up a string id for a given utf16 string.
const StringId* FindStringId(const uint16_t* string, size_t length) const;
@@ -1139,6 +1142,12 @@
return oat_dex_file_;
}
+ TypeLookupTable* GetTypeLookupTable() const {
+ return lookup_table_.get();
+ }
+
+ void CreateTypeLookupTable() const;
+
private:
// Opens a .dex file
static std::unique_ptr<const DexFile> OpenFile(int fd, const char* location,
@@ -1237,44 +1246,11 @@
// Points to the base of the class definition list.
const ClassDef* const class_defs_;
- // Number of misses finding a class def from a descriptor.
- mutable Atomic<uint32_t> find_class_def_misses_;
-
- struct UTF16EmptyFn {
- void MakeEmpty(std::pair<const char*, const ClassDef*>& pair) const {
- pair.first = nullptr;
- pair.second = nullptr;
- }
- bool IsEmpty(const std::pair<const char*, const ClassDef*>& pair) const {
- if (pair.first == nullptr) {
- DCHECK(pair.second == nullptr);
- return true;
- }
- return false;
- }
- };
- struct UTF16HashCmp {
- // Hash function.
- size_t operator()(const char* key) const {
- return ComputeModifiedUtf8Hash(key);
- }
- // std::equal function.
- bool operator()(const char* a, const char* b) const {
- return CompareModifiedUtf8ToModifiedUtf8AsUtf16CodePointValues(a, b) == 0;
- }
- };
- using Index = HashMap<const char*,
- const ClassDef*,
- UTF16EmptyFn,
- UTF16HashCmp,
- UTF16HashCmp,
- std::allocator<std::pair<const char*, const ClassDef*>>>;
- mutable Atomic<Index*> class_def_index_;
-
// If this dex file was loaded from an oat file, oat_dex_file_ contains a
// pointer to the OatDexFile it was loaded from. Otherwise oat_dex_file_ is
// null.
const OatDexFile* oat_dex_file_;
+ mutable std::unique_ptr<TypeLookupTable> lookup_table_;
friend class DexFileVerifierTest;
};
diff --git a/runtime/dex_file_test.cc b/runtime/dex_file_test.cc
index 90b35a3..0a167bb 100644
--- a/runtime/dex_file_test.cc
+++ b/runtime/dex_file_test.cc
@@ -297,6 +297,7 @@
ASSERT_TRUE(type_str_id != nullptr);
uint32_t type_str_idx = java_lang_dex_file_->GetIndexForStringId(*type_str_id);
const DexFile::TypeId* type_id = java_lang_dex_file_->FindTypeId(type_str_idx);
+ ASSERT_EQ(type_id, java_lang_dex_file_->FindTypeId(type_str));
ASSERT_TRUE(type_id != nullptr);
EXPECT_EQ(java_lang_dex_file_->GetIndexForTypeId(*type_id), i);
}
diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc
index b1d4d35..18ccd08 100644
--- a/runtime/exception_test.cc
+++ b/runtime/exception_test.cc
@@ -92,10 +92,25 @@
fake_header_code_and_maps_.insert(fake_header_code_and_maps_.end(),
fake_code_.begin(), fake_code_.end());
- // NOTE: Don't align the code (it will not be executed) but check that the Thumb2
- // adjustment will be a NOP, see EntryPointToCodePointer().
- CHECK_ALIGNED(mapping_table_offset, 2);
- const uint8_t* code_ptr = &fake_header_code_and_maps_[gc_map_offset];
+ // Align the code.
+ const size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
+ fake_header_code_and_maps_.reserve(fake_header_code_and_maps_.size() + alignment);
+ const void* unaligned_code_ptr =
+ fake_header_code_and_maps_.data() + (fake_header_code_and_maps_.size() - code_size);
+ size_t offset = dchecked_integral_cast<size_t>(reinterpret_cast<uintptr_t>(unaligned_code_ptr));
+ size_t padding = RoundUp(offset, alignment) - offset;
+ // Make sure no resizing takes place.
+ CHECK_GE(fake_header_code_and_maps_.capacity(), fake_header_code_and_maps_.size() + padding);
+ fake_header_code_and_maps_.insert(fake_header_code_and_maps_.begin(), padding, 0);
+ const void* code_ptr = reinterpret_cast<const uint8_t*>(unaligned_code_ptr) + padding;
+ CHECK_EQ(code_ptr,
+ static_cast<const void*>(fake_header_code_and_maps_.data() +
+ (fake_header_code_and_maps_.size() - code_size)));
+
+ if (kRuntimeISA == kArm) {
+ // Check that the Thumb2 adjustment will be a NOP, see EntryPointToCodePointer().
+ CHECK_ALIGNED(mapping_table_offset, 2);
+ }
method_f_ = my_klass_->FindVirtualMethod("f", "()I", sizeof(void*));
ASSERT_TRUE(method_f_ != nullptr);
diff --git a/runtime/gc/accounting/space_bitmap-inl.h b/runtime/gc/accounting/space_bitmap-inl.h
index 006d2c7..3be7181 100644
--- a/runtime/gc/accounting/space_bitmap-inl.h
+++ b/runtime/gc/accounting/space_bitmap-inl.h
@@ -46,7 +46,7 @@
DCHECK(Test(obj));
return true;
}
- } while (!atomic_entry->CompareExchangeWeakSequentiallyConsistent(old_word, old_word | mask));
+ } while (!atomic_entry->CompareExchangeWeakRelaxed(old_word, old_word | mask));
DCHECK(Test(obj));
return false;
}
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index e433b8d..20e775c 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -339,9 +339,7 @@
<< thread->GetState() << " thread " << thread << " self " << self;
// If thread is a running mutator, then act on behalf of the garbage collector.
// See the code in ThreadList::RunCheckpoint.
- if (thread->GetState() == kRunnable) {
- concurrent_copying_->GetBarrier().Pass(self);
- }
+ concurrent_copying_->GetBarrier().Pass(self);
}
private:
@@ -514,9 +512,7 @@
thread->SetIsGcMarking(false);
// If thread is a running mutator, then act on behalf of the garbage collector.
// See the code in ThreadList::RunCheckpoint.
- if (thread->GetState() == kRunnable) {
- concurrent_copying_->GetBarrier().Pass(self);
- }
+ concurrent_copying_->GetBarrier().Pass(self);
}
private:
@@ -937,9 +933,7 @@
}
// If thread is a running mutator, then act on behalf of the garbage collector.
// See the code in ThreadList::RunCheckpoint.
- if (thread->GetState() == kRunnable) {
- concurrent_copying_->GetBarrier().Pass(self);
- }
+ concurrent_copying_->GetBarrier().Pass(self);
}
private:
@@ -1670,7 +1664,7 @@
// It was updated by the mutator.
break;
}
- } while (!obj->CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier<
+ } while (!obj->CasFieldWeakRelaxedObjectWithoutWriteBarrier<
false, false, kVerifyNone>(offset, expected_ref, new_ref));
}
@@ -1695,7 +1689,7 @@
// It was updated by the mutator.
break;
}
- } while (!addr->CompareExchangeWeakSequentiallyConsistent(expected_ref, new_ref));
+ } while (!addr->CompareExchangeWeakRelaxed(expected_ref, new_ref));
}
}
@@ -1716,7 +1710,7 @@
// It was updated by the mutator.
break;
}
- } while (!addr->CompareExchangeWeakSequentiallyConsistent(expected_ref, new_ref));
+ } while (!addr->CompareExchangeWeakRelaxed(expected_ref, new_ref));
}
}
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 77a288b..db516a0 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -1146,9 +1146,7 @@
}
// If thread is a running mutator, then act on behalf of the garbage collector.
// See the code in ThreadList::RunCheckpoint.
- if (thread->GetState() == kRunnable) {
- mark_sweep_->GetBarrier().Pass(self);
- }
+ mark_sweep_->GetBarrier().Pass(self);
}
private:
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 1d38525..ab93142 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1291,9 +1291,7 @@
ATRACE_END();
// If thread is a running mutator, then act on behalf of the trim thread.
// See the code in ThreadList::RunCheckpoint.
- if (thread->GetState() == kRunnable) {
- barrier_->Pass(Thread::Current());
- }
+ barrier_->Pass(Thread::Current());
}
private:
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index e73ba82..1f89f9b 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -43,7 +43,7 @@
class Jit {
public:
static constexpr bool kStressMode = kIsDebugBuild;
- static constexpr size_t kDefaultCompileThreshold = kStressMode ? 2 : 1000;
+ static constexpr size_t kDefaultCompileThreshold = kStressMode ? 2 : 500;
static constexpr size_t kDefaultWarmupThreshold = kDefaultCompileThreshold / 2;
virtual ~Jit();
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 2d0a2a5..cfccec8 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -21,6 +21,7 @@
#include "art_method-inl.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "gc/accounting/bitmap-inl.h"
+#include "jit/profiling_info.h"
#include "linear_alloc.h"
#include "mem_map.h"
#include "oat_file-inl.h"
@@ -56,9 +57,9 @@
return nullptr;
}
- // Data cache is 1 / 4 of the map.
+ // Data cache is 1 / 2 of the map.
// TODO: Make this variable?
- size_t data_size = RoundUp(data_map->Size() / 4, kPageSize);
+ size_t data_size = RoundUp(data_map->Size() / 2, kPageSize);
size_t code_size = data_map->Size() - data_size;
uint8_t* divider = data_map->Begin() + data_size;
@@ -206,10 +207,23 @@
// We do not check if a code cache GC is in progress, as this method comes
// with the classlinker_classes_lock_ held, and suspending ourselves could
// lead to a deadlock.
- for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
- if (alloc.ContainsUnsafe(it->second)) {
- FreeCode(it->first, it->second);
- it = method_code_map_.erase(it);
+ {
+ ScopedCodeCacheWrite scc(code_map_.get());
+ for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
+ if (alloc.ContainsUnsafe(it->second)) {
+ FreeCode(it->first, it->second);
+ it = method_code_map_.erase(it);
+ } else {
+ ++it;
+ }
+ }
+ }
+ for (auto it = profiling_infos_.begin(); it != profiling_infos_.end();) {
+ ProfilingInfo* info = *it;
+ if (alloc.ContainsUnsafe(info->GetMethod())) {
+ info->GetMethod()->SetProfilingInfo(nullptr);
+ mspace_free(data_mspace_, reinterpret_cast<uint8_t*>(info));
+ it = profiling_infos_.erase(it);
} else {
++it;
}
@@ -355,9 +369,7 @@
DCHECK(thread == Thread::Current() || thread->IsSuspended());
MarkCodeVisitor visitor(thread, code_cache_);
visitor.WalkStack();
- if (thread->GetState() == kRunnable) {
- barrier_->Pass(Thread::Current());
- }
+ barrier_->Pass(Thread::Current());
}
private:
@@ -387,6 +399,9 @@
for (auto& it : method_code_map_) {
it.second->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
}
+ for (ProfilingInfo* info : profiling_infos_) {
+ info->GetMethod()->SetProfilingInfo(nullptr);
+ }
}
// Run a checkpoint on all threads to mark the JIT compiled code they are running.
@@ -400,27 +415,37 @@
}
}
- // Free unused compiled code, and restore the entry point of used compiled code.
{
MutexLock mu(self, lock_);
DCHECK_EQ(map_size, method_code_map_.size());
- ScopedCodeCacheWrite scc(code_map_.get());
- for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
- const void* code_ptr = it->first;
- ArtMethod* method = it->second;
- uintptr_t allocation = FromCodeToAllocation(code_ptr);
- const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
- if (GetLiveBitmap()->Test(allocation)) {
- method->SetEntryPointFromQuickCompiledCode(method_header->GetEntryPoint());
- ++it;
- } else {
- method->ClearCounter();
- DCHECK_NE(method->GetEntryPointFromQuickCompiledCode(), method_header->GetEntryPoint());
- FreeCode(code_ptr, method);
- it = method_code_map_.erase(it);
+ // Free unused compiled code, and restore the entry point of used compiled code.
+ {
+ ScopedCodeCacheWrite scc(code_map_.get());
+ for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
+ const void* code_ptr = it->first;
+ ArtMethod* method = it->second;
+ uintptr_t allocation = FromCodeToAllocation(code_ptr);
+ const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+ if (GetLiveBitmap()->Test(allocation)) {
+ method->SetEntryPointFromQuickCompiledCode(method_header->GetEntryPoint());
+ ++it;
+ } else {
+ method->ClearCounter();
+ DCHECK_NE(method->GetEntryPointFromQuickCompiledCode(), method_header->GetEntryPoint());
+ FreeCode(code_ptr, method);
+ it = method_code_map_.erase(it);
+ }
}
}
GetLiveBitmap()->Bitmap::Clear();
+
+ // Free all profiling info.
+ for (ProfilingInfo* info : profiling_infos_) {
+ DCHECK(info->GetMethod()->GetProfilingInfo(sizeof(void*)) == nullptr);
+ mspace_free(data_mspace_, reinterpret_cast<uint8_t*>(info));
+ }
+ profiling_infos_.clear();
+
collection_in_progress_ = false;
lock_cond_.Broadcast(self);
}
@@ -460,5 +485,44 @@
return method_header;
}
+ProfilingInfo* JitCodeCache::AddProfilingInfo(Thread* self,
+ ArtMethod* method,
+ const std::vector<uint32_t>& entries,
+ bool retry_allocation) {
+ ProfilingInfo* info = AddProfilingInfoInternal(self, method, entries);
+
+ if (info == nullptr && retry_allocation) {
+ GarbageCollectCache(self);
+ info = AddProfilingInfoInternal(self, method, entries);
+ }
+ return info;
+}
+
+ProfilingInfo* JitCodeCache::AddProfilingInfoInternal(Thread* self,
+ ArtMethod* method,
+ const std::vector<uint32_t>& entries) {
+ size_t profile_info_size = RoundUp(
+ sizeof(ProfilingInfo) + sizeof(ProfilingInfo::InlineCache) * entries.size(),
+ sizeof(void*));
+ ScopedThreadSuspension sts(self, kSuspended);
+ MutexLock mu(self, lock_);
+ WaitForPotentialCollectionToComplete(self);
+
+ // Check whether some other thread has concurrently created it.
+ ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
+ if (info != nullptr) {
+ return info;
+ }
+
+ uint8_t* data = reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, profile_info_size));
+ if (data == nullptr) {
+ return nullptr;
+ }
+ info = new (data) ProfilingInfo(method, entries);
+ method->SetProfilingInfo(info);
+ profiling_infos_.push_back(info);
+ return info;
+}
+
} // namespace jit
} // namespace art
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 4e415b8..e10f962 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -35,6 +35,7 @@
class ArtMethod;
class LinearAlloc;
+class ProfilingInfo;
namespace jit {
@@ -109,11 +110,21 @@
REQUIRES(!lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
+ // Remove all methods in our cache that were allocated by 'alloc'.
void RemoveMethodsIn(Thread* self, const LinearAlloc& alloc)
REQUIRES(!lock_)
REQUIRES(Locks::classlinker_classes_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
+ // Create a 'ProfileInfo' for 'method'. If 'retry_allocation' is true,
+ // will collect and retry if the first allocation is unsuccessful.
+ ProfilingInfo* AddProfilingInfo(Thread* self,
+ ArtMethod* method,
+ const std::vector<uint32_t>& entries,
+ bool retry_allocation)
+ REQUIRES(!lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
private:
// Take ownership of code_mem_map.
JitCodeCache(MemMap* code_map, MemMap* data_map);
@@ -133,6 +144,12 @@
REQUIRES(!lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
+ ProfilingInfo* AddProfilingInfoInternal(Thread* self,
+ ArtMethod* method,
+ const std::vector<uint32_t>& entries)
+ REQUIRES(!lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
// If a collection is in progress, wait for it to finish. Return
// whether the thread actually waited.
bool WaitForPotentialCollectionToComplete(Thread* self)
@@ -157,8 +174,10 @@
void* data_mspace_ GUARDED_BY(lock_);
// Bitmap for collecting code and data.
std::unique_ptr<CodeCacheBitmap> live_bitmap_;
- // This map holds compiled code associated to the ArtMethod
+ // This map holds compiled code associated to the ArtMethod.
SafeMap<const void*, ArtMethod*> method_code_map_ GUARDED_BY(lock_);
+ // ProfilingInfo objects we have allocated.
+ std::vector<ProfilingInfo*> profiling_infos_ GUARDED_BY(lock_);
DISALLOW_IMPLICIT_CONSTRUCTORS(JitCodeCache);
};
diff --git a/runtime/jit/jit_instrumentation.cc b/runtime/jit/jit_instrumentation.cc
index 666b8e7..8aaa5fa 100644
--- a/runtime/jit/jit_instrumentation.cc
+++ b/runtime/jit/jit_instrumentation.cc
@@ -26,7 +26,12 @@
class JitCompileTask FINAL : public Task {
public:
- explicit JitCompileTask(ArtMethod* method) : method_(method) {
+ enum TaskKind {
+ kAllocateProfile,
+ kCompile
+ };
+
+ JitCompileTask(ArtMethod* method, TaskKind kind) : method_(method), kind_(kind) {
ScopedObjectAccess soa(Thread::Current());
// Add a global ref to the class to prevent class unloading until compilation is done.
klass_ = soa.Vm()->AddGlobalRef(soa.Self(), method_->GetDeclaringClass());
@@ -40,9 +45,16 @@
void Run(Thread* self) OVERRIDE {
ScopedObjectAccess soa(self);
- VLOG(jit) << "JitCompileTask compiling method " << PrettyMethod(method_);
- if (!Runtime::Current()->GetJit()->CompileMethod(method_, self)) {
- VLOG(jit) << "Failed to compile method " << PrettyMethod(method_);
+ if (kind_ == kCompile) {
+ VLOG(jit) << "JitCompileTask compiling method " << PrettyMethod(method_);
+ if (!Runtime::Current()->GetJit()->CompileMethod(method_, self)) {
+ VLOG(jit) << "Failed to compile method " << PrettyMethod(method_);
+ }
+ } else {
+ DCHECK(kind_ == kAllocateProfile);
+ if (ProfilingInfo::Create(self, method_, /* retry_allocation */ true)) {
+ VLOG(jit) << "Start profiling " << PrettyMethod(method_);
+ }
}
}
@@ -52,6 +64,7 @@
private:
ArtMethod* const method_;
+ const TaskKind kind_;
jobject klass_;
DISALLOW_IMPLICIT_CONSTRUCTORS(JitCompileTask);
@@ -73,7 +86,6 @@
}
void JitInstrumentationCache::AddSamples(Thread* self, ArtMethod* method, size_t) {
- ScopedObjectAccessUnchecked soa(self);
// Since we don't have on-stack replacement, some methods can remain in the interpreter longer
// than we want resulting in samples even after the method is compiled.
if (method->IsClassInitializer() || method->IsNative()) {
@@ -85,14 +97,20 @@
}
uint16_t sample_count = method->IncrementCounter();
if (sample_count == warm_method_threshold_) {
- ProfilingInfo* info = method->CreateProfilingInfo();
- if (info != nullptr) {
+ if (ProfilingInfo::Create(self, method, /* retry_allocation */ false)) {
VLOG(jit) << "Start profiling " << PrettyMethod(method);
+ } else {
+ // We failed allocating. Instead of doing the collection on the Java thread, we push
+ // an allocation to a compiler thread, that will do the collection.
+ thread_pool_->AddTask(self, new JitCompileTask(
+ method->GetInterfaceMethodIfProxy(sizeof(void*)), JitCompileTask::kAllocateProfile));
+ thread_pool_->StartWorkers(self);
}
}
+
if (sample_count == hot_method_threshold_) {
thread_pool_->AddTask(self, new JitCompileTask(
- method->GetInterfaceMethodIfProxy(sizeof(void*))));
+ method->GetInterfaceMethodIfProxy(sizeof(void*)), JitCompileTask::kCompile));
thread_pool_->StartWorkers(self);
}
}
@@ -107,14 +125,18 @@
ArtMethod* caller,
uint32_t dex_pc,
ArtMethod* callee ATTRIBUTE_UNUSED) {
+ instrumentation_cache_->AddSamples(thread, caller, 1);
+ // We make sure we cannot be suspended, as the profiling info can be concurrently deleted.
+ thread->StartAssertNoThreadSuspension("Instrumenting invoke");
DCHECK(this_object != nullptr);
ProfilingInfo* info = caller->GetProfilingInfo(sizeof(void*));
if (info != nullptr) {
// Since the instrumentation is marked from the declaring class we need to mark the card so
// that mod-union tables and card rescanning know about the update.
Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(caller->GetDeclaringClass());
- info->AddInvokeInfo(thread, dex_pc, this_object->GetClass());
+ info->AddInvokeInfo(dex_pc, this_object->GetClass());
}
+ thread->EndAssertNoThreadSuspension(nullptr);
}
void JitInstrumentationCache::WaitForCompilationToFinish(Thread* self) {
diff --git a/runtime/jit/profiling_info.cc b/runtime/jit/profiling_info.cc
index 7c5f78e..2e52b1b 100644
--- a/runtime/jit/profiling_info.cc
+++ b/runtime/jit/profiling_info.cc
@@ -25,7 +25,7 @@
namespace art {
-ProfilingInfo* ProfilingInfo::Create(ArtMethod* method) {
+bool ProfilingInfo::Create(Thread* self, ArtMethod* method, bool retry_allocation) {
// Walk over the dex instructions of the method and keep track of
// instructions we are interested in profiling.
DCHECK(!method->IsNative());
@@ -57,23 +57,15 @@
// If there is no instruction we are interested in, no need to create a `ProfilingInfo`
// object, it will never be filled.
if (entries.empty()) {
- return nullptr;
+ return true;
}
// Allocate the `ProfilingInfo` object int the JIT's data space.
jit::JitCodeCache* code_cache = Runtime::Current()->GetJit()->GetCodeCache();
- size_t profile_info_size = sizeof(ProfilingInfo) + sizeof(InlineCache) * entries.size();
- uint8_t* data = code_cache->ReserveData(Thread::Current(), profile_info_size);
-
- if (data == nullptr) {
- VLOG(jit) << "Cannot allocate profiling info anymore";
- return nullptr;
- }
-
- return new (data) ProfilingInfo(entries);
+ return code_cache->AddProfilingInfo(self, method, entries, retry_allocation) != nullptr;
}
-void ProfilingInfo::AddInvokeInfo(Thread* self, uint32_t dex_pc, mirror::Class* cls) {
+void ProfilingInfo::AddInvokeInfo(uint32_t dex_pc, mirror::Class* cls) {
InlineCache* cache = nullptr;
// TODO: binary search if array is too long.
for (size_t i = 0; i < number_of_inline_caches_; ++i) {
@@ -84,7 +76,6 @@
}
DCHECK(cache != nullptr);
- ScopedObjectAccess soa(self);
for (size_t i = 0; i < InlineCache::kIndividualCacheSize; ++i) {
mirror::Class* existing = cache->classes_[i].Read();
if (existing == cls) {
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
index 7a2d1a8..b13a315 100644
--- a/runtime/jit/profiling_info.h
+++ b/runtime/jit/profiling_info.h
@@ -26,6 +26,10 @@
class ArtMethod;
+namespace jit {
+class JitCodeCache;
+}
+
namespace mirror {
class Class;
}
@@ -36,10 +40,17 @@
*/
class ProfilingInfo {
public:
- static ProfilingInfo* Create(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
+ // Create a ProfilingInfo for 'method'. Return whether it succeeded, or if it is
+ // not needed in case the method does not have virtual/interface invocations.
+ static bool Create(Thread* self, ArtMethod* method, bool retry_allocation)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Add information from an executed INVOKE instruction to the profile.
- void AddInvokeInfo(Thread* self, uint32_t dex_pc, mirror::Class* cls);
+ void AddInvokeInfo(uint32_t dex_pc, mirror::Class* cls)
+ // Method should not be interruptible, as it manipulates the ProfilingInfo
+ // which can be concurrently collected.
+ REQUIRES(Roles::uninterruptible_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// NO_THREAD_SAFETY_ANALYSIS since we don't know what the callback requires.
template<typename RootVisitorType>
@@ -52,6 +63,10 @@
}
}
+ ArtMethod* GetMethod() const {
+ return method_;
+ }
+
private:
// Structure to store the classes seen at runtime for a specific instruction.
// Once the classes_ array is full, we consider the INVOKE to be megamorphic.
@@ -84,8 +99,9 @@
GcRoot<mirror::Class> classes_[kIndividualCacheSize];
};
- explicit ProfilingInfo(const std::vector<uint32_t>& entries)
- : number_of_inline_caches_(entries.size()) {
+ ProfilingInfo(ArtMethod* method, const std::vector<uint32_t>& entries)
+ : number_of_inline_caches_(entries.size()),
+ method_(method) {
memset(&cache_, 0, number_of_inline_caches_ * sizeof(InlineCache));
for (size_t i = 0; i < number_of_inline_caches_; ++i) {
cache_[i].dex_pc = entries[i];
@@ -95,9 +111,14 @@
// Number of instructions we are profiling in the ArtMethod.
const uint32_t number_of_inline_caches_;
+ // Method this profiling info is for.
+ ArtMethod* const method_;
+
// Dynamically allocated array of size `number_of_inline_caches_`.
InlineCache cache_[0];
+ friend class jit::JitCodeCache;
+
DISALLOW_COPY_AND_ASSIGN(ProfilingInfo);
};
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 90180c5..5c12091 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -95,6 +95,12 @@
OFFSET_OF_OBJECT_MEMBER(Object, monitor_), old_val.GetValue(), new_val.GetValue());
}
+inline bool Object::CasLockWordWeakRelease(LockWord old_val, LockWord new_val) {
+ // Force use of non-transactional mode and do not check.
+ return CasFieldWeakRelease32<false, false>(
+ OFFSET_OF_OBJECT_MEMBER(Object, monitor_), old_val.GetValue(), new_val.GetValue());
+}
+
inline uint32_t Object::GetLockOwnerThreadId() {
return Monitor::GetLockOwnerThreadId(this);
}
@@ -175,7 +181,10 @@
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(expected_rb_ptr)));
new_lw = lw;
new_lw.SetReadBarrierState(static_cast<uint32_t>(reinterpret_cast<uintptr_t>(rb_ptr)));
- } while (!CasLockWordWeakSequentiallyConsistent(expected_lw, new_lw));
+ // This CAS is a CAS release so that when GC updates all the fields of an object and then
+ // changes the object from gray to black, the field updates (stores) will be visible (won't be
+ // reordered after this CAS.)
+ } while (!CasLockWordWeakRelease(expected_lw, new_lw));
return true;
#elif USE_BROOKS_READ_BARRIER
DCHECK(kUseBrooksReadBarrier);
@@ -671,6 +680,24 @@
}
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
+inline bool Object::CasFieldWeakRelease32(MemberOffset field_offset,
+ int32_t old_value, int32_t new_value) {
+ if (kCheckTransaction) {
+ DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
+ }
+ if (kTransactionActive) {
+ Runtime::Current()->RecordWriteField32(this, field_offset, old_value, true);
+ }
+ if (kVerifyFlags & kVerifyThis) {
+ VerifyObject(this);
+ }
+ uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
+ AtomicInteger* atomic_addr = reinterpret_cast<AtomicInteger*>(raw_addr);
+
+ return atomic_addr->CompareExchangeWeakRelease(old_value, new_value);
+}
+
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
inline bool Object::CasFieldStrongSequentiallyConsistent32(MemberOffset field_offset,
int32_t old_value, int32_t new_value) {
if (kCheckTransaction) {
@@ -944,6 +971,62 @@
return success;
}
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
+inline bool Object::CasFieldWeakRelaxedObjectWithoutWriteBarrier(
+ MemberOffset field_offset, Object* old_value, Object* new_value) {
+ if (kCheckTransaction) {
+ DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
+ }
+ if (kVerifyFlags & kVerifyThis) {
+ VerifyObject(this);
+ }
+ if (kVerifyFlags & kVerifyWrites) {
+ VerifyObject(new_value);
+ }
+ if (kVerifyFlags & kVerifyReads) {
+ VerifyObject(old_value);
+ }
+ if (kTransactionActive) {
+ Runtime::Current()->RecordWriteFieldReference(this, field_offset, old_value, true);
+ }
+ HeapReference<Object> old_ref(HeapReference<Object>::FromMirrorPtr(old_value));
+ HeapReference<Object> new_ref(HeapReference<Object>::FromMirrorPtr(new_value));
+ uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
+ Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
+
+ bool success = atomic_addr->CompareExchangeWeakRelaxed(old_ref.reference_,
+ new_ref.reference_);
+ return success;
+}
+
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
+inline bool Object::CasFieldStrongRelaxedObjectWithoutWriteBarrier(
+ MemberOffset field_offset, Object* old_value, Object* new_value) {
+ if (kCheckTransaction) {
+ DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
+ }
+ if (kVerifyFlags & kVerifyThis) {
+ VerifyObject(this);
+ }
+ if (kVerifyFlags & kVerifyWrites) {
+ VerifyObject(new_value);
+ }
+ if (kVerifyFlags & kVerifyReads) {
+ VerifyObject(old_value);
+ }
+ if (kTransactionActive) {
+ Runtime::Current()->RecordWriteFieldReference(this, field_offset, old_value, true);
+ }
+ HeapReference<Object> old_ref(HeapReference<Object>::FromMirrorPtr(old_value));
+ HeapReference<Object> new_ref(HeapReference<Object>::FromMirrorPtr(new_value));
+ uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
+ Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
+
+ bool success = atomic_addr->CompareExchangeStrongRelaxed(old_ref.reference_,
+ new_ref.reference_);
+ return success;
+}
+
template<bool kIsStatic, typename Visitor>
inline void Object::VisitFieldsReferences(uint32_t ref_offsets, const Visitor& visitor) {
if (!kIsStatic && (ref_offsets != mirror::Class::kClassWalkSuper)) {
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index f75b8ae..022f31d 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -135,6 +135,8 @@
SHARED_REQUIRES(Locks::mutator_lock_);
bool CasLockWordWeakRelaxed(LockWord old_val, LockWord new_val)
SHARED_REQUIRES(Locks::mutator_lock_);
+ bool CasLockWordWeakRelease(LockWord old_val, LockWord new_val)
+ SHARED_REQUIRES(Locks::mutator_lock_);
uint32_t GetLockOwnerThreadId();
mirror::Object* MonitorEnter(Thread* self)
@@ -276,7 +278,6 @@
Object* old_value,
Object* new_value)
SHARED_REQUIRES(Locks::mutator_lock_);
-
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldStrongSequentiallyConsistentObject(MemberOffset field_offset, Object* old_value,
@@ -288,6 +289,18 @@
Object* old_value,
Object* new_value)
SHARED_REQUIRES(Locks::mutator_lock_);
+ template<bool kTransactionActive, bool kCheckTransaction = true,
+ VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ bool CasFieldWeakRelaxedObjectWithoutWriteBarrier(MemberOffset field_offset,
+ Object* old_value,
+ Object* new_value)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ template<bool kTransactionActive, bool kCheckTransaction = true,
+ VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ bool CasFieldStrongRelaxedObjectWithoutWriteBarrier(MemberOffset field_offset,
+ Object* old_value,
+ Object* new_value)
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
HeapReference<Object>* GetFieldObjectReferenceAddr(MemberOffset field_offset);
@@ -396,6 +409,12 @@
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ bool CasFieldWeakRelease32(MemberOffset field_offset, int32_t old_value,
+ int32_t new_value) ALWAYS_INLINE
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ template<bool kTransactionActive, bool kCheckTransaction = true,
+ VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldStrongSequentiallyConsistent32(MemberOffset field_offset, int32_t old_value,
int32_t new_value) ALWAYS_INLINE
SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index f5a0445..c1284a6 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -307,10 +307,7 @@
ScopedObjectAccess soa(Thread::Current());
Class* java_util_Arrays = class_linker_->FindSystemClass(soa.Self(), "Ljava/util/Arrays;");
ArtMethod* sort = java_util_Arrays->FindDirectMethod("sort", "([I)V", sizeof(void*));
- const DexFile::StringId* string_id = java_lang_dex_file_->FindStringId("[I");
- ASSERT_TRUE(string_id != nullptr);
- const DexFile::TypeId* type_id = java_lang_dex_file_->FindTypeId(
- java_lang_dex_file_->GetIndexForStringId(*string_id));
+ const DexFile::TypeId* type_id = java_lang_dex_file_->FindTypeId("[I");
ASSERT_TRUE(type_id != nullptr);
uint32_t type_idx = java_lang_dex_file_->GetIndexForTypeId(*type_id);
Object* array = CheckAndAllocArrayFromCodeInstrumented(
@@ -367,16 +364,10 @@
Handle<mirror::ClassLoader> loader(hs.NewHandle(soa.Decode<ClassLoader*>(class_loader)));
Class* klass = class_linker_->FindClass(soa.Self(), "LStaticsFromCode;", loader);
ArtMethod* clinit = klass->FindClassInitializer(sizeof(void*));
- const DexFile::StringId* klass_string_id = dex_file->FindStringId("LStaticsFromCode;");
- ASSERT_TRUE(klass_string_id != nullptr);
- const DexFile::TypeId* klass_type_id = dex_file->FindTypeId(
- dex_file->GetIndexForStringId(*klass_string_id));
+ const DexFile::TypeId* klass_type_id = dex_file->FindTypeId("LStaticsFromCode;");
ASSERT_TRUE(klass_type_id != nullptr);
- const DexFile::StringId* type_string_id = dex_file->FindStringId("Ljava/lang/Object;");
- ASSERT_TRUE(type_string_id != nullptr);
- const DexFile::TypeId* type_type_id = dex_file->FindTypeId(
- dex_file->GetIndexForStringId(*type_string_id));
+ const DexFile::TypeId* type_type_id = dex_file->FindTypeId("Ljava/lang/Object;");
ASSERT_TRUE(type_type_id != nullptr);
const DexFile::StringId* name_str_id = dex_file->FindStringId("s0");
diff --git a/runtime/oat.h b/runtime/oat.h
index 276e7f3..5b780c3 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -31,7 +31,7 @@
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- static constexpr uint8_t kOatVersion[] = { '0', '7', '2', '\0' };
+ static constexpr uint8_t kOatVersion[] = { '0', '7', '3', '\0' };
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index a162a4e..680f4ac 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -547,6 +547,25 @@
return false;
}
const DexFile::Header* header = reinterpret_cast<const DexFile::Header*>(dex_file_pointer);
+
+ if (UNLIKELY(oat > End())) {
+ *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd for '%s' with truncated "
+ "lookup table offset", GetLocation().c_str(), i,
+ dex_file_location.c_str());
+ return false;
+ }
+ uint32_t lookup_table_offset = *reinterpret_cast<const uint32_t*>(oat);
+ oat += sizeof(lookup_table_offset);
+ if (Begin() + lookup_table_offset > End()) {
+ *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd for '%s' with truncated "
+ "lookup table", GetLocation().c_str(), i,
+ dex_file_location.c_str());
+ return false;
+ }
+ const uint8_t* lookup_table_data = lookup_table_offset != 0u
+ ? Begin() + lookup_table_offset
+ : nullptr;
+
const uint32_t* methods_offsets_pointer = reinterpret_cast<const uint32_t*>(oat);
oat += (sizeof(*methods_offsets_pointer) * header->class_defs_size_);
@@ -586,6 +605,7 @@
canonical_location,
dex_file_checksum,
dex_file_pointer,
+ lookup_table_data,
methods_offsets_pointer,
current_dex_cache_arrays);
oat_dex_files_storage_.push_back(oat_dex_file);
@@ -709,6 +729,7 @@
const std::string& canonical_dex_file_location,
uint32_t dex_file_location_checksum,
const uint8_t* dex_file_pointer,
+ const uint8_t* lookup_table_data,
const uint32_t* oat_class_offsets_pointer,
uint8_t* dex_cache_arrays)
: oat_file_(oat_file),
@@ -716,6 +737,7 @@
canonical_dex_file_location_(canonical_dex_file_location),
dex_file_location_checksum_(dex_file_location_checksum),
dex_file_pointer_(dex_file_pointer),
+ lookup_table_data_(lookup_table_data),
oat_class_offsets_pointer_(oat_class_offsets_pointer),
dex_cache_arrays_(dex_cache_arrays) {}
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 6acdf86..0a77654 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -400,6 +400,10 @@
return dex_cache_arrays_;
}
+ const uint8_t* GetLookupTableData() const {
+ return lookup_table_data_;
+ }
+
~OatDexFile();
private:
@@ -408,6 +412,7 @@
const std::string& canonical_dex_file_location,
uint32_t dex_file_checksum,
const uint8_t* dex_file_pointer,
+ const uint8_t* lookup_table_data,
const uint32_t* oat_class_offsets_pointer,
uint8_t* dex_cache_arrays);
@@ -416,6 +421,7 @@
const std::string canonical_dex_file_location_;
const uint32_t dex_file_location_checksum_;
const uint8_t* const dex_file_pointer_;
+ const uint8_t* lookup_table_data_;
const uint32_t* const oat_class_offsets_pointer_;
uint8_t* const dex_cache_arrays_;
diff --git a/runtime/oat_quick_method_header.h b/runtime/oat_quick_method_header.h
index c9a2cfb..03cad08 100644
--- a/runtime/oat_quick_method_header.h
+++ b/runtime/oat_quick_method_header.h
@@ -43,6 +43,8 @@
static OatQuickMethodHeader* FromCodePointer(const void* code_ptr) {
uintptr_t code = reinterpret_cast<uintptr_t>(code_ptr);
uintptr_t header = code - OFFSETOF_MEMBER(OatQuickMethodHeader, code_);
+ DCHECK(IsAlignedParam(code, GetInstructionSetAlignment(kRuntimeISA)) ||
+ IsAlignedParam(header, GetInstructionSetAlignment(kRuntimeISA)));
return reinterpret_cast<OatQuickMethodHeader*>(header);
}
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 53b4f3a..1552318 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -372,9 +372,14 @@
StackMapEncoding encoding = code_info.ExtractEncoding();
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
const size_t number_of_vregs = m->GetCodeItem()->registers_size_;
- DexRegisterMap vreg_map = code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_vregs);
MemoryRegion stack_mask = stack_map.GetStackMask(encoding);
uint32_t register_mask = stack_map.GetRegisterMask(encoding);
+ DexRegisterMap vreg_map = IsInInlinedFrame()
+ ? code_info.GetDexRegisterMapAtDepth(GetCurrentInliningDepth() - 1,
+ code_info.GetInlineInfoOf(stack_map, encoding),
+ encoding,
+ number_of_vregs)
+ : code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_vregs);
for (uint16_t vreg = 0; vreg < number_of_vregs; ++vreg) {
if (updated_vregs != nullptr && updated_vregs[vreg]) {
diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h
index 85ac4aa..4998a6a 100644
--- a/runtime/read_barrier-inl.h
+++ b/runtime/read_barrier-inl.h
@@ -63,7 +63,7 @@
ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
// Update the field atomically. This may fail if mutator updates before us, but it's ok.
if (ref != old_ref) {
- obj->CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrier<false, false>(
+ obj->CasFieldStrongRelaxedObjectWithoutWriteBarrier<false, false>(
offset, old_ref, ref);
}
}
@@ -101,7 +101,7 @@
// Update the field atomically. This may fail if mutator updates before us, but it's ok.
if (ref != old_ref) {
Atomic<mirror::Object*>* atomic_root = reinterpret_cast<Atomic<mirror::Object*>*>(root);
- atomic_root->CompareExchangeStrongSequentiallyConsistent(old_ref, ref);
+ atomic_root->CompareExchangeStrongRelaxed(old_ref, ref);
}
}
AssertToSpaceInvariant(gc_root_source, ref);
@@ -140,7 +140,7 @@
if (new_ref.AsMirrorPtr() != old_ref.AsMirrorPtr()) {
auto* atomic_root =
reinterpret_cast<Atomic<mirror::CompressedReference<MirrorType>>*>(root);
- atomic_root->CompareExchangeStrongSequentiallyConsistent(old_ref, new_ref);
+ atomic_root->CompareExchangeStrongRelaxed(old_ref, new_ref);
}
}
AssertToSpaceInvariant(gc_root_source, ref);
diff --git a/runtime/stack.h b/runtime/stack.h
index 1276b24..aa7b616 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -698,6 +698,10 @@
return current_inlining_depth_ != 0;
}
+ size_t GetCurrentInliningDepth() const {
+ return current_inlining_depth_;
+ }
+
uintptr_t GetCurrentQuickFramePc() const {
return cur_quick_frame_pc_;
}
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index bdd5d10..dcf9601 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -60,8 +60,11 @@
static constexpr useconds_t kThreadSuspendMaxSleepUs = 5000;
ThreadList::ThreadList()
- : suspend_all_count_(0), debug_suspend_all_count_(0), unregistering_count_(0),
- suspend_all_historam_("suspend all histogram", 16, 64), long_suspend_(false) {
+ : suspend_all_count_(0),
+ debug_suspend_all_count_(0),
+ unregistering_count_(0),
+ suspend_all_historam_("suspend all histogram", 16, 64),
+ long_suspend_(false) {
CHECK(Monitor::IsValidLockWord(LockWord::FromThinLockId(kMaxThreadId, 1, 0U)));
}
@@ -195,9 +198,7 @@
MutexLock mu(self, *Locks::logging_lock_);
*os_ << local_os.str();
}
- if (thread->GetState() == kRunnable) {
- barrier_.Pass(self);
- }
+ barrier_.Pass(self);
}
void WaitForThreadsToRunThroughCheckpoint(size_t threads_running_checkpoint) {
@@ -285,12 +286,12 @@
// manually called.
MutexLock mu(self, *Locks::thread_list_lock_);
MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
+ count = list_.size();
for (const auto& thread : list_) {
if (thread != self) {
while (true) {
if (thread->RequestCheckpoint(checkpoint_function)) {
// This thread will run its checkpoint some time in the near future.
- count++;
break;
} else {
// We are probably suspended, try to make sure that we stay suspended.
@@ -383,7 +384,8 @@
// from-space to to-space refs. Used to synchronize threads at a point
// to mark the initiation of marking while maintaining the to-space
// invariant.
-size_t ThreadList::FlipThreadRoots(Closure* thread_flip_visitor, Closure* flip_callback,
+size_t ThreadList::FlipThreadRoots(Closure* thread_flip_visitor,
+ Closure* flip_callback,
gc::collector::GarbageCollector* collector) {
TimingLogger::ScopedTiming split("ThreadListFlip", collector->GetTimings());
const uint64_t start_time = NanoTime();
@@ -511,7 +513,9 @@
// Debugger thread might be set to kRunnable for a short period of time after the
// SuspendAllInternal. This is safe because it will be set back to suspended state before
// the SuspendAll returns.
-void ThreadList::SuspendAllInternal(Thread* self, Thread* ignore1, Thread* ignore2,
+void ThreadList::SuspendAllInternal(Thread* self,
+ Thread* ignore1,
+ Thread* ignore2,
bool debug_suspend) {
Locks::mutator_lock_->AssertNotExclusiveHeld(self);
Locks::thread_list_lock_->AssertNotHeld(self);
@@ -700,12 +704,14 @@
VLOG(threads) << "Resume(" << reinterpret_cast<void*>(thread) << ") complete";
}
-static void ThreadSuspendByPeerWarning(Thread* self, LogSeverity severity, const char* message,
+static void ThreadSuspendByPeerWarning(Thread* self,
+ LogSeverity severity,
+ const char* message,
jobject peer) {
JNIEnvExt* env = self->GetJniEnv();
ScopedLocalRef<jstring>
- scoped_name_string(env, (jstring)env->GetObjectField(
- peer, WellKnownClasses::java_lang_Thread_name));
+ scoped_name_string(env, static_cast<jstring>(env->GetObjectField(
+ peer, WellKnownClasses::java_lang_Thread_name)));
ScopedUtfChars scoped_name_chars(env, scoped_name_string.get());
if (scoped_name_chars.c_str() == nullptr) {
LOG(severity) << message << ": " << peer;
@@ -715,8 +721,10 @@
}
}
-Thread* ThreadList::SuspendThreadByPeer(jobject peer, bool request_suspension,
- bool debug_suspension, bool* timed_out) {
+Thread* ThreadList::SuspendThreadByPeer(jobject peer,
+ bool request_suspension,
+ bool debug_suspension,
+ bool* timed_out) {
const uint64_t start_time = NanoTime();
useconds_t sleep_us = kThreadSuspendInitialSleepUs;
*timed_out = false;
@@ -813,12 +821,14 @@
}
}
-static void ThreadSuspendByThreadIdWarning(LogSeverity severity, const char* message,
+static void ThreadSuspendByThreadIdWarning(LogSeverity severity,
+ const char* message,
uint32_t thread_id) {
LOG(severity) << StringPrintf("%s: %d", message, thread_id);
}
-Thread* ThreadList::SuspendThreadByThreadId(uint32_t thread_id, bool debug_suspension,
+Thread* ThreadList::SuspendThreadByThreadId(uint32_t thread_id,
+ bool debug_suspension,
bool* timed_out) {
const uint64_t start_time = NanoTime();
useconds_t sleep_us = kThreadSuspendInitialSleepUs;
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index c727432..07ea10d 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -55,8 +55,8 @@
// Thread suspension support.
void ResumeAll()
- UNLOCK_FUNCTION(Locks::mutator_lock_)
- REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
+ REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
+ UNLOCK_FUNCTION(Locks::mutator_lock_);
void Resume(Thread* thread, bool for_debugger = false)
REQUIRES(!Locks::thread_suspend_count_lock_);
@@ -76,7 +76,8 @@
// is set to true.
Thread* SuspendThreadByPeer(jobject peer, bool request_suspension, bool debug_suspension,
bool* timed_out)
- REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_,
+ REQUIRES(!Locks::mutator_lock_,
+ !Locks::thread_list_lock_,
!Locks::thread_suspend_count_lock_);
// Suspend a thread using its thread id, typically used by lock/monitor inflation. Returns the
@@ -84,14 +85,16 @@
// the thread terminating. Note that as thread ids are recycled this may not suspend the expected
// thread, that may be terminating. If the suspension times out then *timeout is set to true.
Thread* SuspendThreadByThreadId(uint32_t thread_id, bool debug_suspension, bool* timed_out)
- REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_,
+ REQUIRES(!Locks::mutator_lock_,
+ !Locks::thread_list_lock_,
!Locks::thread_suspend_count_lock_);
// Find an already suspended thread (or self) by its id.
Thread* FindThreadByThreadId(uint32_t thin_lock_id);
// Run a checkpoint on threads, running threads are not suspended but run the checkpoint inside
- // of the suspend check. Returns how many checkpoints we should expect to run.
+ // of the suspend check. Returns how many checkpoints that are expected to run, including for
+ // already suspended threads for b/24191051.
size_t RunCheckpoint(Closure* checkpoint_function)
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
@@ -100,14 +103,17 @@
// Flip thread roots from from-space refs to to-space refs. Used by
// the concurrent copying collector.
- size_t FlipThreadRoots(Closure* thread_flip_visitor, Closure* flip_callback,
+ size_t FlipThreadRoots(Closure* thread_flip_visitor,
+ Closure* flip_callback,
gc::collector::GarbageCollector* collector)
- REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_,
+ REQUIRES(!Locks::mutator_lock_,
+ !Locks::thread_list_lock_,
!Locks::thread_suspend_count_lock_);
// Suspends all threads
void SuspendAllForDebugger()
- REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_,
+ REQUIRES(!Locks::mutator_lock_,
+ !Locks::thread_list_lock_,
!Locks::thread_suspend_count_lock_);
void SuspendSelfForDebugger()
@@ -126,10 +132,14 @@
// Add/remove current thread from list.
void Register(Thread* self)
- REQUIRES(Locks::runtime_shutdown_lock_, !Locks::mutator_lock_, !Locks::thread_list_lock_,
+ REQUIRES(Locks::runtime_shutdown_lock_)
+ REQUIRES(!Locks::mutator_lock_,
+ !Locks::thread_list_lock_,
!Locks::thread_suspend_count_lock_);
- void Unregister(Thread* self) REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_,
- !Locks::thread_suspend_count_lock_);
+ void Unregister(Thread* self)
+ REQUIRES(!Locks::mutator_lock_,
+ !Locks::thread_list_lock_,
+ !Locks::thread_suspend_count_lock_);
void VisitRoots(RootVisitor* visitor) const
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -159,7 +169,9 @@
void WaitForOtherNonDaemonThreadsToExit()
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
- void SuspendAllInternal(Thread* self, Thread* ignore1, Thread* ignore2 = nullptr,
+ void SuspendAllInternal(Thread* self,
+ Thread* ignore1,
+ Thread* ignore2 = nullptr,
bool debug_suspend = false)
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
@@ -200,8 +212,8 @@
!Locks::mutator_lock_);
// No REQUIRES(mutator_lock_) since the unlock function already asserts this.
~ScopedSuspendAll()
- UNLOCK_FUNCTION(Locks::mutator_lock_)
- REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
+ REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
+ UNLOCK_FUNCTION(Locks::mutator_lock_);
};
} // namespace art
diff --git a/runtime/type_lookup_table.cc b/runtime/type_lookup_table.cc
new file mode 100644
index 0000000..0d40bb7
--- /dev/null
+++ b/runtime/type_lookup_table.cc
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "type_lookup_table.h"
+
+#include "dex_file-inl.h"
+#include "utf-inl.h"
+#include "utils.h"
+
+#include <memory>
+#include <cstring>
+
+namespace art {
+
+static uint16_t MakeData(uint16_t class_def_idx, uint32_t hash, uint32_t mask) {
+ uint16_t hash_mask = static_cast<uint16_t>(~mask);
+ return (static_cast<uint16_t>(hash) & hash_mask) | class_def_idx;
+}
+
+TypeLookupTable::~TypeLookupTable() {
+ if (!owns_entries_) {
+ // We don't actually own the entries, don't let the unique_ptr release them.
+ entries_.release();
+ }
+}
+
+uint32_t TypeLookupTable::RawDataLength() const {
+ return RawDataLength(dex_file_);
+}
+
+uint32_t TypeLookupTable::RawDataLength(const DexFile& dex_file) {
+ return RoundUpToPowerOfTwo(dex_file.NumClassDefs()) * sizeof(Entry);
+}
+
+TypeLookupTable* TypeLookupTable::Create(const DexFile& dex_file) {
+ const uint32_t num_class_defs = dex_file.NumClassDefs();
+ return (num_class_defs == 0 || num_class_defs > std::numeric_limits<uint16_t>::max())
+ ? nullptr
+ : new TypeLookupTable(dex_file);
+}
+
+TypeLookupTable* TypeLookupTable::Open(const uint8_t* raw_data, const DexFile& dex_file) {
+ return new TypeLookupTable(raw_data, dex_file);
+}
+
+TypeLookupTable::TypeLookupTable(const DexFile& dex_file)
+ : dex_file_(dex_file),
+ mask_(RoundUpToPowerOfTwo(dex_file.NumClassDefs()) - 1),
+ entries_(new Entry[mask_ + 1]),
+ owns_entries_(true) {
+ std::vector<uint16_t> conflict_class_defs;
+ // The first stage. Put elements on their initial positions. If an initial position is already
+ // occupied then delay the insertion of the element to the second stage to reduce probing
+ // distance.
+ for (size_t i = 0; i < dex_file.NumClassDefs(); ++i) {
+ const DexFile::ClassDef& class_def = dex_file.GetClassDef(i);
+ const DexFile::TypeId& type_id = dex_file.GetTypeId(class_def.class_idx_);
+ const DexFile::StringId& str_id = dex_file.GetStringId(type_id.descriptor_idx_);
+ const uint32_t hash = ComputeModifiedUtf8Hash(dex_file.GetStringData(str_id));
+ Entry entry;
+ entry.str_offset = str_id.string_data_off_;
+ entry.data = MakeData(i, hash, GetSizeMask());
+ if (!SetOnInitialPos(entry, hash)) {
+ conflict_class_defs.push_back(i);
+ }
+ }
+ // The second stage. The initial position of these elements had a collision. Put these elements
+ // into the nearest free cells and link them together by updating next_pos_delta.
+ for (uint16_t class_def_idx : conflict_class_defs) {
+ const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_idx);
+ const DexFile::TypeId& type_id = dex_file.GetTypeId(class_def.class_idx_);
+ const DexFile::StringId& str_id = dex_file.GetStringId(type_id.descriptor_idx_);
+ const uint32_t hash = ComputeModifiedUtf8Hash(dex_file.GetStringData(str_id));
+ Entry entry;
+ entry.str_offset = str_id.string_data_off_;
+ entry.data = MakeData(class_def_idx, hash, GetSizeMask());
+ Insert(entry, hash);
+ }
+}
+
+TypeLookupTable::TypeLookupTable(const uint8_t* raw_data, const DexFile& dex_file)
+ : dex_file_(dex_file),
+ mask_(RoundUpToPowerOfTwo(dex_file.NumClassDefs()) - 1),
+ entries_(reinterpret_cast<Entry*>(const_cast<uint8_t*>(raw_data))),
+ owns_entries_(false) {}
+
+bool TypeLookupTable::SetOnInitialPos(const Entry& entry, uint32_t hash) {
+ const uint32_t pos = hash & GetSizeMask();
+ if (!entries_[pos].IsEmpty()) {
+ return false;
+ }
+ entries_[pos] = entry;
+ entries_[pos].next_pos_delta = 0;
+ return true;
+}
+
+void TypeLookupTable::Insert(const Entry& entry, uint32_t hash) {
+ uint32_t pos = FindLastEntryInBucket(hash & GetSizeMask());
+ uint32_t next_pos = (pos + 1) & GetSizeMask();
+ while (!entries_[next_pos].IsEmpty()) {
+ next_pos = (next_pos + 1) & GetSizeMask();
+ }
+ const uint32_t delta = (next_pos >= pos) ? (next_pos - pos) : (next_pos + Size() - pos);
+ entries_[pos].next_pos_delta = delta;
+ entries_[next_pos] = entry;
+ entries_[next_pos].next_pos_delta = 0;
+}
+
+uint32_t TypeLookupTable::FindLastEntryInBucket(uint32_t pos) const {
+ const Entry* entry = &entries_[pos];
+ while (!entry->IsLast()) {
+ pos = (pos + entry->next_pos_delta) & GetSizeMask();
+ entry = &entries_[pos];
+ }
+ return pos;
+}
+
+} // namespace art
diff --git a/runtime/type_lookup_table.h b/runtime/type_lookup_table.h
new file mode 100644
index 0000000..3c2295c
--- /dev/null
+++ b/runtime/type_lookup_table.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_TYPE_LOOKUP_TABLE_H_
+#define ART_RUNTIME_TYPE_LOOKUP_TABLE_H_
+
+#include "dex_file.h"
+#include "leb128.h"
+#include "utf.h"
+
+namespace art {
+
+/**
+ * TypeLookupTable used to find class_def_idx by class descriptor quickly.
+ * Implementation of TypeLookupTable is based on hash table.
+ * This class instantiated at compile time by calling Create() method and written into OAT file.
+ * At runtime, the raw data is read from memory-mapped file by calling Open() method. The table
+ * memory remains clean.
+ */
+class TypeLookupTable {
+ public:
+ ~TypeLookupTable();
+
+ // Return the number of buckets in the lookup table.
+ uint32_t Size() const {
+ return mask_ + 1;
+ }
+
+ // Method search class_def_idx by class descriptor and it's hash.
+ // If no data found then the method returns DexFile::kDexNoIndex
+ ALWAYS_INLINE uint32_t Lookup(const char* str, uint32_t hash) const {
+ uint32_t pos = hash & GetSizeMask();
+ // Thanks to special insertion algorithm, the element at position pos can be empty or start of
+ // bucket.
+ const Entry* entry = &entries_[pos];
+ while (!entry->IsEmpty()) {
+ if (CmpHashBits(entry->data, hash) && IsStringsEquals(str, entry->str_offset)) {
+ return GetClassDefIdx(entry->data);
+ }
+ if (entry->IsLast()) {
+ return DexFile::kDexNoIndex;
+ }
+ pos = (pos + entry->next_pos_delta) & GetSizeMask();
+ entry = &entries_[pos];
+ }
+ return DexFile::kDexNoIndex;
+ }
+
+ // Method creates lookup table for dex file
+ static TypeLookupTable* Create(const DexFile& dex_file);
+
+ // Method opens lookup table from binary data. Lookup table does not owns binary data.
+ static TypeLookupTable* Open(const uint8_t* raw_data, const DexFile& dex_file);
+
+ // Method returns pointer to binary data of lookup table. Used by the oat writer.
+ const uint8_t* RawData() const {
+ return reinterpret_cast<const uint8_t*>(entries_.get());
+ }
+
+ // Method returns length of binary data. Used by the oat writer.
+ uint32_t RawDataLength() const;
+
+ // Method returns length of binary data for the specified dex file.
+ static uint32_t RawDataLength(const DexFile& dex_file);
+
+ private:
+ /**
+ * To find element we need to compare strings.
+ * It is faster to compare first hashes and then strings itself.
+ * But we have no full hash of element of table. But we can use 2 ideas.
+ * 1. All minor bits of hash inside one bucket are equals.
+ * 2. If dex file contains N classes and size of hash table is 2^n (where N <= 2^n)
+ * then 16-n bits are free. So we can encode part of element's hash into these bits.
+ * So hash of element can be divided on three parts:
+ * XXXX XXXX XXXX YYYY YZZZ ZZZZ ZZZZZ
+ * Z - a part of hash encoded in bucket (these bits of has are same for all elements in bucket) -
+ * n bits
+ * Y - a part of hash that we can write into free 16-n bits (because only n bits used to store
+ * class_def_idx)
+ * X - a part of has that we can't use without increasing increase
+ * So the data element of Entry used to store class_def_idx and part of hash of the entry.
+ */
+ struct Entry {
+ uint32_t str_offset;
+ uint16_t data;
+ uint16_t next_pos_delta;
+
+ Entry() : str_offset(0), data(0), next_pos_delta(0) {}
+
+ bool IsEmpty() const {
+ return str_offset == 0;
+ }
+
+ bool IsLast() const {
+ return next_pos_delta == 0;
+ }
+ };
+
+ // Construct from a dex file.
+ explicit TypeLookupTable(const DexFile& dex_file);
+
+ // Construct from a dex file with existing data.
+ TypeLookupTable(const uint8_t* raw_data, const DexFile& dex_file);
+
+ bool IsStringsEquals(const char* str, uint32_t str_offset) const {
+ const uint8_t* ptr = dex_file_.Begin() + str_offset;
+ // Skip string length.
+ DecodeUnsignedLeb128(&ptr);
+ return CompareModifiedUtf8ToModifiedUtf8AsUtf16CodePointValues(
+ str, reinterpret_cast<const char*>(ptr)) == 0;
+ }
+
+ // Method extracts hash bits from element's data and compare them with
+ // the corresponding bits of the specified hash
+ bool CmpHashBits(uint32_t data, uint32_t hash) const {
+ uint32_t mask = static_cast<uint16_t>(~GetSizeMask());
+ return (hash & mask) == (data & mask);
+ }
+
+ uint32_t GetClassDefIdx(uint32_t data) const {
+ return data & mask_;
+ }
+
+ uint32_t GetSizeMask() const {
+ return mask_;
+ }
+
+ // Attempt to set an entry on it's hash' slot. If there is alrady something there, return false.
+ // Otherwise return true.
+ bool SetOnInitialPos(const Entry& entry, uint32_t hash);
+
+ // Insert an entry, probes until there is an empty slot.
+ void Insert(const Entry& entry, uint32_t hash);
+
+ // Find the last entry in a chain.
+ uint32_t FindLastEntryInBucket(uint32_t cur_pos) const;
+
+ const DexFile& dex_file_;
+ const uint32_t mask_;
+ std::unique_ptr<Entry[]> entries_;
+ // owns_entries_ specifies if the lookup table owns the entries_ array.
+ const bool owns_entries_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(TypeLookupTable);
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_TYPE_LOOKUP_TABLE_H_
diff --git a/runtime/type_lookup_table_test.cc b/runtime/type_lookup_table_test.cc
new file mode 100644
index 0000000..7f500cc
--- /dev/null
+++ b/runtime/type_lookup_table_test.cc
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <memory>
+
+#include "common_runtime_test.h"
+#include "dex_file-inl.h"
+#include "scoped_thread_state_change.h"
+#include "type_lookup_table.h"
+#include "utf-inl.h"
+
+namespace art {
+
+class TypeLookupTableTest : public CommonRuntimeTest {
+ public:
+ size_t kDexNoIndex = DexFile::kDexNoIndex; // Make copy to prevent linking errors.
+};
+
+TEST_F(TypeLookupTableTest, CreateLookupTable) {
+ ScopedObjectAccess soa(Thread::Current());
+ std::unique_ptr<const DexFile> dex_file(OpenTestDexFile("Lookup"));
+ std::unique_ptr<TypeLookupTable> table(TypeLookupTable::Create(*dex_file));
+ ASSERT_NE(nullptr, table.get());
+ ASSERT_NE(nullptr, table->RawData());
+ ASSERT_EQ(32U, table->RawDataLength());
+}
+
+TEST_F(TypeLookupTableTest, FindNonExistingClassWithoutCollisions) {
+ ScopedObjectAccess soa(Thread::Current());
+ std::unique_ptr<const DexFile> dex_file(OpenTestDexFile("Lookup"));
+ std::unique_ptr<TypeLookupTable> table(TypeLookupTable::Create(*dex_file));
+ ASSERT_NE(nullptr, table.get());
+ const char* descriptor = "LBA;";
+ size_t hash = ComputeModifiedUtf8Hash(descriptor);
+ uint32_t class_def_idx = table->Lookup(descriptor, hash);
+ ASSERT_EQ(kDexNoIndex, class_def_idx);
+}
+
+TEST_F(TypeLookupTableTest, FindNonExistingClassWithCollisions) {
+ ScopedObjectAccess soa(Thread::Current());
+ std::unique_ptr<const DexFile> dex_file(OpenTestDexFile("Lookup"));
+ std::unique_ptr<TypeLookupTable> table(TypeLookupTable::Create(*dex_file));
+ ASSERT_NE(nullptr, table.get());
+ const char* descriptor = "LDA;";
+ size_t hash = ComputeModifiedUtf8Hash(descriptor);
+ uint32_t class_def_idx = table->Lookup(descriptor, hash);
+ ASSERT_EQ(kDexNoIndex, class_def_idx);
+}
+
+TEST_F(TypeLookupTableTest, FindClassNoCollisions) {
+ ScopedObjectAccess soa(Thread::Current());
+ std::unique_ptr<const DexFile> dex_file(OpenTestDexFile("Lookup"));
+ std::unique_ptr<TypeLookupTable> table(TypeLookupTable::Create(*dex_file));
+ ASSERT_NE(nullptr, table.get());
+ const char* descriptor = "LC;";
+ size_t hash = ComputeModifiedUtf8Hash(descriptor);
+ uint32_t class_def_idx = table->Lookup(descriptor, hash);
+ ASSERT_EQ(2U, class_def_idx);
+}
+
+TEST_F(TypeLookupTableTest, FindClassWithCollisions) {
+ ScopedObjectAccess soa(Thread::Current());
+ std::unique_ptr<const DexFile> dex_file(OpenTestDexFile("Lookup"));
+ std::unique_ptr<TypeLookupTable> table(TypeLookupTable::Create(*dex_file));
+ ASSERT_NE(nullptr, table.get());
+ const char* descriptor = "LAB;";
+ size_t hash = ComputeModifiedUtf8Hash(descriptor);
+ uint32_t class_def_idx = table->Lookup(descriptor, hash);
+ ASSERT_EQ(1U, class_def_idx);
+}
+
+} // namespace art
diff --git a/test/004-ThreadStress/run b/test/004-ThreadStress/run
new file mode 100755
index 0000000..27c501d
--- /dev/null
+++ b/test/004-ThreadStress/run
@@ -0,0 +1,19 @@
+#!/bin/bash
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Be less agressive than the default debug option for the jit code cache
+# to avoid timeouts.
+exec ${RUN} "$@" --runtime-option -Xjitcodecachesize:1M
diff --git a/test/541-regression-inlined-deopt/expected.txt b/test/541-regression-inlined-deopt/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/541-regression-inlined-deopt/expected.txt
diff --git a/test/541-regression-inlined-deopt/info.txt b/test/541-regression-inlined-deopt/info.txt
new file mode 100644
index 0000000..209588f
--- /dev/null
+++ b/test/541-regression-inlined-deopt/info.txt
@@ -0,0 +1,4 @@
+Regression test for deopt from optimized code which would use the top-level
+stack map for deopting inlined frames. Test case is written in smali for full
+control over vregs because the previous test 449 would pass because the vreg
+maps at the various inlining depths were similar.
diff --git a/test/541-regression-inlined-deopt/smali/TestCase.smali b/test/541-regression-inlined-deopt/smali/TestCase.smali
new file mode 100644
index 0000000..a109775
--- /dev/null
+++ b/test/541-regression-inlined-deopt/smali/TestCase.smali
@@ -0,0 +1,55 @@
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LTestCase;
+.super Ljava/lang/Object;
+
+.method private static $inline$depth1([I)V
+ .registers 3
+
+ # Expects array in v2.
+
+ const v0, 0x0
+
+ const v1, 0x3
+ aput v0, p0, v1
+
+ const v1, 0x4
+ aput v0, p0, v1
+
+ return-void
+.end method
+
+.method private static $inline$depth0([I)V
+ .registers 1
+
+ # Expects array in v0.
+
+ invoke-static {p0}, LTestCase;->$inline$depth1([I)V
+ return-void
+.end method
+
+.method public static foo()V
+ .registers 10
+
+ # Create a new array short enough to throw AIOOB in $inline$depth1.
+ # Make sure the reference is not stored in the same vreg as used by
+ # the inlined methods.
+
+ const v5, 0x3
+ new-array v6, v5, [I
+
+ invoke-static {v6}, LTestCase;->$inline$depth0([I)V
+ return-void
+.end method
diff --git a/test/541-regression-inlined-deopt/src/Main.java b/test/541-regression-inlined-deopt/src/Main.java
new file mode 100644
index 0000000..fa79590
--- /dev/null
+++ b/test/541-regression-inlined-deopt/src/Main.java
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.*;
+
+public class Main {
+
+ // Workaround for b/18051191.
+ class InnerClass {}
+
+ public static void main(String[] args) throws Throwable {
+ try {
+ Class<?> c = Class.forName("TestCase");
+ Method m = c.getMethod("foo");
+ m.invoke(null, (Object[]) null);
+ } catch (InvocationTargetException ex) {
+ // Code should have thrown AIOOB.
+ if (!(ex.getCause() instanceof ArrayIndexOutOfBoundsException)) {
+ throw ex;
+ }
+ }
+ }
+}
diff --git a/test/Lookup/A.java b/test/Lookup/A.java
new file mode 100644
index 0000000..666ba18
--- /dev/null
+++ b/test/Lookup/A.java
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class A {}
diff --git a/test/Lookup/AB.java b/test/Lookup/AB.java
new file mode 100644
index 0000000..b231708
--- /dev/null
+++ b/test/Lookup/AB.java
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class AB {}
diff --git a/test/Lookup/C.java b/test/Lookup/C.java
new file mode 100644
index 0000000..5b90069
--- /dev/null
+++ b/test/Lookup/C.java
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class C {}
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 280b4bc..18867fd 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -214,7 +214,7 @@
if [ "$USE_JVM" = "n" ]; then
for feature in ${EXPERIMENTAL}; do
- FLAGS="${FLAGS} -Xexperimental:${feature}"
+ FLAGS="${FLAGS} -Xexperimental:${feature} -Xcompiler-option --runtime-arg -Xcompiler-option -Xexperimental:${feature}"
COMPILE_FLAGS="${COMPILE_FLAGS} --runtime-arg -Xexperimental:${feature}"
done
fi
diff --git a/test/run-all-tests b/test/run-all-tests
index 76283b7..6d5c28c 100755
--- a/test/run-all-tests
+++ b/test/run-all-tests
@@ -44,12 +44,45 @@
elif [ "x$1" = "x--use-java-home" ]; then
run_args="${run_args} --use-java-home"
shift
+ elif [ "x$1" = "x--no-image" ]; then
+ run_args="${run_args} --no-image"
+ shift
+ elif [ "x$1" = "x--quick" ]; then
+ run_args="${run_args} --quick"
+ shift
+ elif [ "x$1" = "x--optimizing" ]; then
+ run_args="${run_args} --optimizing"
+ shift
+ elif [ "x$1" = "x--image" ]; then
+ run_args="${run_args} --image"
+ shift
+ elif [ "x$1" = "x--never-clean" ]; then
+ run_args="${run_args} --never-clean"
+ shift
elif [ "x$1" = "x--jvm" ]; then
run_args="${run_args} --jvm"
shift
elif [ "x$1" = "x--debug" ]; then
run_args="${run_args} --debug"
shift
+ elif [ "x$1" = "x--build-only" ]; then
+ run_args="${run_args} --build-only"
+ shift
+ elif [ "x$1" = "x--build-with-jack" ]; then
+ run_args="${run_args} --build-with-jack"
+ shift
+ elif [ "x$1" = "x--build-with-javac-dx" ]; then
+ run_args="${run_args} --build-with-javac-dx"
+ shift
+ elif [ "x$1" = "x--dex2oat-swap" ]; then
+ run_args="${run_args} --dex2oat-swap"
+ shift
+ elif [ "x$1" = "x--dalvik" ]; then
+ run_args="${run_args} --dalvik"
+ shift
+ elif [ "x$1" = "x--debuggable" ]; then
+ run_args="${run_args} --debuggable"
+ shift
elif [ "x$1" = "x--zygote" ]; then
run_args="${run_args} --zygote"
shift
@@ -59,15 +92,15 @@
elif [ "x$1" = "x--jit" ]; then
run_args="${run_args} --jit"
shift
+ elif [ "x$1" = "x--verify-soft-fail" ]; then
+ run_args="${run_args} --verify-soft-fail"
+ shift
elif [ "x$1" = "x--no-verify" ]; then
run_args="${run_args} --no-verify"
shift
elif [ "x$1" = "x--no-optimize" ]; then
run_args="${run_args} --no-optimize"
shift
- elif [ "x$1" = "x--valgrind" ]; then
- run_args="${run_args} --valgrind"
- shift
elif [ "x$1" = "x--dev" ]; then
run_args="${run_args} --dev"
shift
@@ -116,6 +149,15 @@
elif [ "x$1" = "x--always-clean" ]; then
run_args="${run_args} --always-clean"
shift
+ elif [ "x$1" = "x--pic-test" ]; then
+ run_args="${run_args} --pic-test"
+ shift
+ elif [ "x$1" = "x--pic-image" ]; then
+ run_args="${run_args} --pic-image"
+ shift
+ elif [ "x$1" = "x--strace" ]; then
+ run_args="${run_args} --strace"
+ shift
elif expr "x$1" : "x--" >/dev/null 2>&1; then
echo "unknown $0 option: $1" 1>&2
usage="yes"
@@ -134,9 +176,13 @@
echo " Options are all passed to run-test; refer to that for " \
"further documentation:"
echo " --debug --dev --host --interpreter --jit --jvm --no-optimize"
- echo " --no-verify -O --update --valgrind --zygote --64 --relocate"
- echo " --prebuild --always-clean --gcstress --gcverify --trace"
- echo " --no-patchoat --no-dex2oat --use-java-home"
+ echo " --no-verify --verify-soft-fail -O --update --zygote --64"
+ echo " --relocate --prebuild --always-clean --gcstress --gcverify"
+ echo " --trace --no-patchoat --no-dex2oat --use-java-home --pic-image"
+ echo " --pic-test --strace --debuggable --dalvik --dex2oat-swap"
+ echo " --build-only --build-with-jack --build-with-javac-dx"
+ echo " --never-clean --image --no-image --quick --optimizing"
+ echo " --no-relocate --no-prebuild"
echo " Specific Runtime Options:"
echo " --seq Run tests one-by-one, avoiding failures caused by busy CPU"
) 1>&2
diff --git a/test/run-test b/test/run-test
index 30c8f5b..f2bbaa7 100755
--- a/test/run-test
+++ b/test/run-test
@@ -528,6 +528,7 @@
echo " --debug Wait for a debugger to attach."
echo " --debuggable Whether to compile Java code for a debugger."
echo " --gdb Run under gdb; incompatible with some tests."
+ echo " --gdb-arg Pass an option to gdb."
echo " --build-only Build test files only (off by default)."
echo " --build-with-javac-dx Build test files with javac and dx (on by default)."
echo " --build-with-jack Build test files with jack and jill (off by default)."
@@ -553,6 +554,8 @@
echo " the image and oat files be relocated to a random"
echo " address before running. (default)"
echo " --no-relocate Force the use of no relocating in the test"
+ echo " --image Run the test using a precompiled boot image. (default)"
+ echo " --no-image Run the test without a precompiled boot image."
echo " --host Use the host-mode virtual machine."
echo " --invoke-with Pass --invoke-with option to runtime."
echo " --dalvik Use Dalvik (off by default)."
@@ -564,6 +567,7 @@
"files."
echo " --64 Run the test in 64-bit mode"
echo " --trace Run with method tracing"
+ echo " --strace Run with syscall tracing from strace."
echo " --stream Run method tracing in streaming mode (requires --trace)"
echo " --gcstress Run with gc stress testing"
echo " --gcverify Run with gc verification"
@@ -573,6 +577,9 @@
echo " --dex2oat-swap Use a dex2oat swap file."
echo " --instruction-set-features [string]"
echo " Set instruction-set-features for compilation."
+ echo " --pic-image Use an image compiled with position independent code for the"
+ echo " boot class path."
+ echo " --pic-test Compile the test code position independent."
) 1>&2
exit 1
fi
diff --git a/tools/run-jdwp-tests.sh b/tools/run-jdwp-tests.sh
index 9aed271..de27a6f 100755
--- a/tools/run-jdwp-tests.sh
+++ b/tools/run-jdwp-tests.sh
@@ -30,8 +30,6 @@
art="/data/local/tmp/system/bin/art"
art_debugee="sh /data/local/tmp/system/bin/art"
-# We use Quick's image on target because optimizing's image is not compiled debuggable.
-image="-Ximage:/data/art-test/core.art"
args=$@
debuggee_args="-Xcompiler-option --debuggable"
device_dir="--device-dir=/data/local/tmp"
@@ -41,6 +39,8 @@
image_compiler_option=""
debug="no"
verbose="no"
+image="-Ximage:/data/art-test/core-jit.art"
+vm_args=""
# By default, we run the whole JDWP test suite.
test="org.apache.harmony.jpda.tests.share.AllTests"
@@ -88,7 +88,10 @@
fi
done
-vm_args="--vm-arg $image --vm-arg -Xusejit:true"
+if [[ "$image" != "" ]]; then
+ vm_args="--vm-arg $image"
+fi
+vm_args="$vm_args --vm-arg -Xusejit:true"
debuggee_args="$debuggee_args -Xusejit:true"
if [[ $debug == "yes" ]]; then
art="$art -d"