summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/common_compiler_test.cc2
-rw-r--r--compiler/driver/compiler_driver.cc8
-rw-r--r--compiler/driver/compiler_options_map.def2
-rw-r--r--dex2oat/dex2oat_test.cc38
-rw-r--r--dex2oat/linker/elf_writer_test.cc29
-rw-r--r--dex2oat/linker/image_writer.cc86
-rw-r--r--dex2oat/linker/image_writer.h3
-rw-r--r--libartbase/base/mem_map.h28
-rw-r--r--libartbase/base/mem_map_test.cc148
-rw-r--r--libartbase/base/zip_archive.cc5
-rw-r--r--libdexfile/dex/modifiers.h10
-rw-r--r--oatdump/oatdump.cc80
-rw-r--r--openjdkjvmti/ti_class_definition.cc2
-rw-r--r--openjdkjvmti/ti_redefine.cc1
-rw-r--r--runtime/art_method.h19
-rw-r--r--runtime/base/mem_map_arena_pool.cc1
-rw-r--r--runtime/class_linker.cc25
-rw-r--r--runtime/class_linker_test.cc2
-rw-r--r--runtime/debugger.cc1
-rw-r--r--runtime/dexopt_test.cc4
-rw-r--r--runtime/entrypoints/quick/quick_alloc_entrypoints.cc2
-rw-r--r--runtime/gc/accounting/atomic_stack.h3
-rw-r--r--runtime/gc/accounting/bitmap.cc3
-rw-r--r--runtime/gc/accounting/card_table.cc3
-rw-r--r--runtime/gc/accounting/mod_union_table_test.cc2
-rw-r--r--runtime/gc/accounting/read_barrier_table.h3
-rw-r--r--runtime/gc/accounting/space_bitmap.cc3
-rw-r--r--runtime/gc/accounting/space_bitmap.h6
-rw-r--r--runtime/gc/allocator/rosalloc.cc3
-rw-r--r--runtime/gc/collector/concurrent_copying.cc67
-rw-r--r--runtime/gc/collector/immune_spaces_test.cc198
-rw-r--r--runtime/gc/collector/mark_sweep.cc3
-rw-r--r--runtime/gc/heap.cc15
-rw-r--r--runtime/gc/heap.h3
-rw-r--r--runtime/gc/heap_test.cc2
-rw-r--r--runtime/gc/space/bump_pointer_space.cc6
-rw-r--r--runtime/gc/space/bump_pointer_space.h2
-rw-r--r--runtime/gc/space/dlmalloc_space.cc12
-rw-r--r--runtime/gc/space/dlmalloc_space.h7
-rw-r--r--runtime/gc/space/dlmalloc_space_random_test.cc10
-rw-r--r--runtime/gc/space/dlmalloc_space_static_test.cc10
-rw-r--r--runtime/gc/space/image_space.cc26
-rw-r--r--runtime/gc/space/large_object_space.cc11
-rw-r--r--runtime/gc/space/large_object_space.h2
-rw-r--r--runtime/gc/space/large_object_space_test.cc4
-rw-r--r--runtime/gc/space/malloc_space.cc6
-rw-r--r--runtime/gc/space/malloc_space.h3
-rw-r--r--runtime/gc/space/region_space.cc4
-rw-r--r--runtime/gc/space/rosalloc_space.cc15
-rw-r--r--runtime/gc/space/rosalloc_space.h7
-rw-r--r--runtime/gc/space/rosalloc_space_random_test.cc15
-rw-r--r--runtime/gc/space/rosalloc_space_static_test.cc14
-rw-r--r--runtime/gc/space/space_create_test.cc29
-rw-r--r--runtime/gc/space/space_test.h8
-rw-r--r--runtime/image.cc2
-rw-r--r--runtime/image.h38
-rw-r--r--runtime/indirect_reference_table.cc6
-rw-r--r--runtime/interpreter/interpreter_common.cc25
-rw-r--r--runtime/interpreter/interpreter_common.h114
-rw-r--r--runtime/interpreter/interpreter_switch_impl-inl.h9
-rw-r--r--runtime/interpreter/mterp/mterp.cc26
-rw-r--r--runtime/interpreter/unstarted_runtime.cc1
-rw-r--r--runtime/jit/jit_code_cache.cc3
-rw-r--r--runtime/jni/jni_internal.cc1
-rw-r--r--runtime/mirror/dex_cache-inl.h27
-rw-r--r--runtime/mirror/dex_cache.cc15
-rw-r--r--runtime/mirror/dex_cache.h73
-rw-r--r--runtime/mirror/string-alloc-inl.h259
-rw-r--r--runtime/mirror/string-inl.h216
-rw-r--r--runtime/mirror/string.cc2
-rw-r--r--runtime/native/dalvik_system_DexFile.cc3
-rw-r--r--runtime/native/java_lang_Class.cc1
-rw-r--r--runtime/native/java_lang_String.cc2
-rw-r--r--runtime/native/java_lang_StringFactory.cc2
-rw-r--r--runtime/native/sun_misc_Unsafe.cc31
-rw-r--r--runtime/runtime.cc6
-rw-r--r--runtime/runtime_callbacks_test.cc3
-rw-r--r--runtime/thread.cc142
-rw-r--r--runtime/thread.h7
-rw-r--r--runtime/thread_pool.cc3
-rw-r--r--runtime/well_known_classes.cc2
-rw-r--r--runtime/well_known_classes.h1
-rw-r--r--test/004-ThreadStress/src-art/Main.java36
-rw-r--r--test/913-heaps/expected.txt52
84 files changed, 1318 insertions, 791 deletions
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 586891a3ff..fc8cd528fa 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -328,6 +328,8 @@ void CommonCompilerTest::ReserveImageSpace() {
(size_t)120 * 1024 * 1024, // 120MB
PROT_NONE,
false /* no need for 4gb flag with fixed mmap */,
+ /*reuse=*/ false,
+ /*reservation=*/ nullptr,
&error_msg);
CHECK(image_reservation_.IsValid()) << error_msg;
}
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 864b215a90..df6e8a83e1 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -719,6 +719,10 @@ void CompilerDriver::ResolveConstStrings(const std::vector<const DexFile*>& dex_
for (const DexFile* dex_file : dex_files) {
dex_cache.Assign(class_linker->FindDexCache(soa.Self(), *dex_file));
+ if (only_startup_strings) {
+ // When resolving startup strings, create the preresolved strings array.
+ dex_cache->AddPreResolvedStringsArray();
+ }
TimingLogger::ScopedTiming t("Resolve const-string Strings", timings);
for (ClassAccessor accessor : dex_file->GetClasses()) {
@@ -757,6 +761,10 @@ void CompilerDriver::ResolveConstStrings(const std::vector<const DexFile*>& dex_
: inst->VRegB_31c());
ObjPtr<mirror::String> string = class_linker->ResolveString(string_index, dex_cache);
CHECK(string != nullptr) << "Could not allocate a string when forcing determinism";
+ if (only_startup_strings) {
+ dex_cache->GetPreResolvedStrings()[string_index.index_] =
+ GcRoot<mirror::String>(string);
+ }
++num_instructions;
break;
}
diff --git a/compiler/driver/compiler_options_map.def b/compiler/driver/compiler_options_map.def
index a593240365..1ec34ec73a 100644
--- a/compiler/driver/compiler_options_map.def
+++ b/compiler/driver/compiler_options_map.def
@@ -52,7 +52,7 @@ COMPILER_OPTIONS_KEY (Unit, Baseline)
COMPILER_OPTIONS_KEY (double, TopKProfileThreshold)
COMPILER_OPTIONS_KEY (bool, AbortOnHardVerifierFailure)
COMPILER_OPTIONS_KEY (bool, AbortOnSoftVerifierFailure)
-COMPILER_OPTIONS_KEY (bool, ResolveStartupConstStrings, false)
+COMPILER_OPTIONS_KEY (bool, ResolveStartupConstStrings, kIsDebugBuild)
COMPILER_OPTIONS_KEY (std::string, DumpInitFailures)
COMPILER_OPTIONS_KEY (std::string, DumpCFG)
COMPILER_OPTIONS_KEY (Unit, DumpCFGAppend)
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index 770b696d6b..10d2b6f5ce 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -2080,8 +2080,8 @@ TEST_F(Dex2oatTest, AppImageResolveStrings) {
ScratchFile profile_file;
std::vector<uint16_t> methods;
std::vector<dex::TypeIndex> classes;
+ std::unique_ptr<const DexFile> dex(OpenTestDexFile("StringLiterals"));
{
- std::unique_ptr<const DexFile> dex(OpenTestDexFile("StringLiterals"));
for (ClassAccessor accessor : dex->GetClasses()) {
if (accessor.GetDescriptor() == std::string("LStringLiterals$StartupClass;")) {
classes.push_back(accessor.GetClassIdx());
@@ -2141,15 +2141,43 @@ TEST_F(Dex2oatTest, AppImageResolveStrings) {
seen.insert(str.Read()->ToModifiedUtf8());
}
});
+ // Ensure that the dex cache has a preresolved string array.
+ std::set<std::string> preresolved_seen;
+ bool saw_dexcache = false;
+ space->GetLiveBitmap()->VisitAllMarked(
+ [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (obj->IsDexCache<kVerifyNone>()) {
+ ObjPtr<mirror::DexCache> dex_cache = obj->AsDexCache();
+ GcRoot<mirror::String>* preresolved_strings = dex_cache->GetPreResolvedStrings();
+ ASSERT_EQ(dex->NumStringIds(), dex_cache->NumPreResolvedStrings());
+ for (size_t i = 0; i < dex_cache->NumPreResolvedStrings(); ++i) {
+ ObjPtr<mirror::String> string = preresolved_strings[i].Read<kWithoutReadBarrier>();
+ if (string != nullptr) {
+ preresolved_seen.insert(string->ToModifiedUtf8());
+ }
+ }
+ saw_dexcache = true;
+ }
+ });
+ ASSERT_TRUE(saw_dexcache);
+ // Everything in the preresolved array should also be in the intern table.
+ for (const std::string& str : preresolved_seen) {
+ EXPECT_TRUE(seen.find(str) != seen.end());
+ }
// Normal methods
- EXPECT_TRUE(seen.find("Loading ") != seen.end());
- EXPECT_TRUE(seen.find("Starting up") != seen.end());
- EXPECT_TRUE(seen.find("abcd.apk") != seen.end());
+ EXPECT_TRUE(preresolved_seen.find("Loading ") != preresolved_seen.end());
+ EXPECT_TRUE(preresolved_seen.find("Starting up") != preresolved_seen.end());
+ EXPECT_TRUE(preresolved_seen.find("abcd.apk") != preresolved_seen.end());
EXPECT_TRUE(seen.find("Unexpected error") == seen.end());
EXPECT_TRUE(seen.find("Shutting down!") == seen.end());
+ EXPECT_TRUE(preresolved_seen.find("Unexpected error") == preresolved_seen.end());
+ EXPECT_TRUE(preresolved_seen.find("Shutting down!") == preresolved_seen.end());
// Classes initializers
- EXPECT_TRUE(seen.find("Startup init") != seen.end());
+ EXPECT_TRUE(preresolved_seen.find("Startup init") != preresolved_seen.end());
EXPECT_TRUE(seen.find("Other class init") == seen.end());
+ EXPECT_TRUE(preresolved_seen.find("Other class init") == preresolved_seen.end());
+ // Expect the sets match.
+ EXPECT_GE(seen.size(), preresolved_seen.size());
}
}
diff --git a/dex2oat/linker/elf_writer_test.cc b/dex2oat/linker/elf_writer_test.cc
index 1d578ab9d1..b381765fe2 100644
--- a/dex2oat/linker/elf_writer_test.cc
+++ b/dex2oat/linker/elf_writer_test.cc
@@ -68,9 +68,9 @@ TEST_F(ElfWriterTest, dlsym) {
{
std::string error_msg;
std::unique_ptr<ElfFile> ef(ElfFile::Open(file.get(),
- /* writable */ false,
- /* program_header_only */ false,
- /*low_4gb*/false,
+ /*writable=*/ false,
+ /*program_header_only=*/ false,
+ /*low_4gb=*/false,
&error_msg));
CHECK(ef.get() != nullptr) << error_msg;
EXPECT_ELF_FILE_ADDRESS(ef, dl_oatdata, "oatdata", false);
@@ -80,9 +80,9 @@ TEST_F(ElfWriterTest, dlsym) {
{
std::string error_msg;
std::unique_ptr<ElfFile> ef(ElfFile::Open(file.get(),
- /* writable */ false,
- /* program_header_only */ false,
- /* low_4gb */ false,
+ /*writable=*/ false,
+ /*program_header_only=*/ false,
+ /*low_4gb=*/ false,
&error_msg));
CHECK(ef.get() != nullptr) << error_msg;
EXPECT_ELF_FILE_ADDRESS(ef, dl_oatdata, "oatdata", true);
@@ -92,24 +92,23 @@ TEST_F(ElfWriterTest, dlsym) {
{
std::string error_msg;
std::unique_ptr<ElfFile> ef(ElfFile::Open(file.get(),
- /* writable */ false,
- /* program_header_only */ true,
- /* low_4gb */ false,
+ /*writable=*/ false,
+ /*program_header_only=*/ true,
+ /*low_4gb=*/ false,
&error_msg));
CHECK(ef.get() != nullptr) << error_msg;
size_t size;
bool success = ef->GetLoadedSize(&size, &error_msg);
CHECK(success) << error_msg;
MemMap reservation = MemMap::MapAnonymous("ElfWriterTest#dlsym reservation",
- /* addr */ nullptr,
RoundUp(size, kPageSize),
PROT_NONE,
- /* low_4gb */ true,
+ /*low_4gb=*/ true,
&error_msg);
CHECK(reservation.IsValid()) << error_msg;
uint8_t* base = reservation.Begin();
success =
- ef->Load(file.get(), /* executable */ false, /* low_4gb */ false, &reservation, &error_msg);
+ ef->Load(file.get(), /*executable=*/ false, /*low_4gb=*/ false, &reservation, &error_msg);
CHECK(success) << error_msg;
CHECK(!reservation.IsValid());
EXPECT_EQ(reinterpret_cast<uintptr_t>(dl_oatdata) + reinterpret_cast<uintptr_t>(base),
@@ -131,9 +130,9 @@ TEST_F(ElfWriterTest, CheckBuildIdPresent) {
{
std::string error_msg;
std::unique_ptr<ElfFile> ef(ElfFile::Open(file.get(),
- /* writable */ false,
- /* program_header_only */ false,
- /* low_4gb */ false,
+ /*writable=*/ false,
+ /*program_header_only=*/ false,
+ /*low_4gb=*/ false,
&error_msg));
CHECK(ef.get() != nullptr) << error_msg;
EXPECT_TRUE(ef->HasSection(".note.gnu.build-id"));
diff --git a/dex2oat/linker/image_writer.cc b/dex2oat/linker/image_writer.cc
index be620587b0..fd10b6b23c 100644
--- a/dex2oat/linker/image_writer.cc
+++ b/dex2oat/linker/image_writer.cc
@@ -288,11 +288,16 @@ bool ImageWriter::PrepareImageAddressSpace(TimingLogger* timings) {
for (const HeapReferencePointerInfo& ref_info : string_ref_info) {
uint32_t base_offset;
- if (HasDexCacheNativeRefTag(ref_info.first)) {
+ if (HasDexCacheStringNativeRefTag(ref_info.first)) {
++native_string_refs;
- auto* obj_ptr = reinterpret_cast<mirror::Object*>(ClearDexCacheNativeRefTag(
+ auto* obj_ptr = reinterpret_cast<mirror::Object*>(ClearDexCacheNativeRefTags(
ref_info.first));
- base_offset = SetDexCacheNativeRefTag(GetImageOffset(obj_ptr));
+ base_offset = SetDexCacheStringNativeRefTag(GetImageOffset(obj_ptr));
+ } else if (HasDexCachePreResolvedStringNativeRefTag(ref_info.first)) {
+ ++native_string_refs;
+ auto* obj_ptr = reinterpret_cast<mirror::Object*>(ClearDexCacheNativeRefTags(
+ ref_info.first));
+ base_offset = SetDexCachePreResolvedStringNativeRefTag(GetImageOffset(obj_ptr));
} else {
++managed_string_refs;
base_offset = GetImageOffset(reinterpret_cast<mirror::Object*>(ref_info.first));
@@ -447,7 +452,19 @@ std::vector<ImageWriter::HeapReferencePointerInfo> ImageWriter::CollectStringRef
if (IsValidAppImageStringReference(referred_string)) {
++string_info_collected;
visitor.AddStringRefInfo(
- SetDexCacheNativeRefTag(reinterpret_cast<uintptr_t>(object.Ptr())), index);
+ SetDexCacheStringNativeRefTag(reinterpret_cast<uintptr_t>(object.Ptr())), index);
+ }
+ }
+
+ // Visit all of the preinitialized strings.
+ GcRoot<mirror::String>* preresolved_strings = dex_cache->GetPreResolvedStrings();
+ for (size_t index = 0; index < dex_cache->NumPreResolvedStrings(); ++index) {
+ ObjPtr<mirror::String> referred_string = preresolved_strings[index].Read();
+ if (IsValidAppImageStringReference(referred_string)) {
+ ++string_info_collected;
+ visitor.AddStringRefInfo(SetDexCachePreResolvedStringNativeRefTag(
+ reinterpret_cast<uintptr_t>(object.Ptr())),
+ index);
}
}
@@ -852,15 +869,27 @@ void ImageWriter::PrepareDexCacheArraySlots() {
DCHECK_EQ(dex_file->NumStringIds() != 0u, dex_cache->GetStrings() != nullptr);
AddDexCacheArrayRelocation(dex_cache->GetStrings(), start + layout.StringsOffset(), oat_index);
- if (dex_cache->GetResolvedMethodTypes() != nullptr) {
- AddDexCacheArrayRelocation(dex_cache->GetResolvedMethodTypes(),
- start + layout.MethodTypesOffset(),
- oat_index);
- }
- if (dex_cache->GetResolvedCallSites() != nullptr) {
- AddDexCacheArrayRelocation(dex_cache->GetResolvedCallSites(),
- start + layout.CallSitesOffset(),
- oat_index);
+ AddDexCacheArrayRelocation(dex_cache->GetResolvedMethodTypes(),
+ start + layout.MethodTypesOffset(),
+ oat_index);
+ AddDexCacheArrayRelocation(dex_cache->GetResolvedCallSites(),
+ start + layout.CallSitesOffset(),
+ oat_index);
+
+ // Preresolved strings aren't part of the special layout.
+ GcRoot<mirror::String>* preresolved_strings = dex_cache->GetPreResolvedStrings();
+ if (preresolved_strings != nullptr) {
+ DCHECK(!IsInBootImage(preresolved_strings));
+ // Add the array to the metadata section.
+ const size_t count = dex_cache->NumPreResolvedStrings();
+ auto bin = BinTypeForNativeRelocationType(NativeObjectRelocationType::kGcRootPointer);
+ for (size_t i = 0; i < count; ++i) {
+ native_object_relocations_.emplace(&preresolved_strings[i],
+ NativeObjectRelocation { oat_index,
+ image_info.GetBinSlotSize(bin),
+ NativeObjectRelocationType::kGcRootPointer });
+ image_info.IncrementBinSlotSize(bin, sizeof(GcRoot<mirror::Object>));
+ }
}
}
}
@@ -1069,10 +1098,9 @@ bool ImageWriter::AllocMemory() {
std::string error_msg;
image_info.image_ = MemMap::MapAnonymous("image writer image",
- /* addr */ nullptr,
length,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /*low_4gb=*/ false,
&error_msg);
if (UNLIKELY(!image_info.image_.IsValid())) {
LOG(ERROR) << "Failed to allocate memory for image file generation: " << error_msg;
@@ -2340,9 +2368,21 @@ std::pair<size_t, std::vector<ImageSection>> ImageWriter::ImageInfo::CreateImage
sizeof(typename decltype(string_reference_offsets_)::value_type) *
num_string_references_);
+ /*
+ * Metadata section.
+ */
+
+ // Round up to the alignment of the offsets we are going to store.
+ cur_pos = RoundUp(string_reference_offsets.End(),
+ mirror::DexCache::PreResolvedStringsAlignment());
+
+ const ImageSection& metadata_section =
+ sections[ImageHeader::kSectionMetadata] =
+ ImageSection(cur_pos, GetBinSlotSize(Bin::kMetadata));
+
// Return the number of bytes described by these sections, and the sections
// themselves.
- return make_pair(string_reference_offsets.End(), std::move(sections));
+ return make_pair(metadata_section.End(), std::move(sections));
}
void ImageWriter::CreateHeader(size_t oat_index) {
@@ -2546,6 +2586,12 @@ void ImageWriter::CopyAndFixupNativeData(size_t oat_index) {
new(dest)ImtConflictTable(orig_table->NumEntries(target_ptr_size_), target_ptr_size_));
break;
}
+ case NativeObjectRelocationType::kGcRootPointer: {
+ auto* orig_pointer = reinterpret_cast<GcRoot<mirror::Object>*>(pair.first);
+ auto* dest_pointer = reinterpret_cast<GcRoot<mirror::Object>*>(dest);
+ CopyAndFixupReference(dest_pointer->AddressWithoutBarrier(), orig_pointer->Read());
+ break;
+ }
}
}
// Fixup the image method roots.
@@ -2966,6 +3012,12 @@ void ImageWriter::FixupDexCache(DexCache* orig_dex_cache, DexCache* copy_dex_cac
copy_dex_cache,
DexCache::ResolvedCallSitesOffset(),
orig_dex_cache->NumResolvedCallSites());
+ if (orig_dex_cache->GetPreResolvedStrings() != nullptr) {
+ CopyAndFixupPointer(copy_dex_cache,
+ DexCache::PreResolvedStringsOffset(),
+ orig_dex_cache->GetPreResolvedStrings(),
+ PointerSize::k64);
+ }
// Remove the DexFile pointers. They will be fixed up when the runtime loads the oat file. Leaving
// compiler pointers in here will make the output non-deterministic.
@@ -3179,6 +3231,8 @@ ImageWriter::Bin ImageWriter::BinTypeForNativeRelocationType(NativeObjectRelocat
return Bin::kImTable;
case NativeObjectRelocationType::kIMTConflictTable:
return Bin::kIMTConflictTable;
+ case NativeObjectRelocationType::kGcRootPointer:
+ return Bin::kMetadata;
}
UNREACHABLE();
}
diff --git a/dex2oat/linker/image_writer.h b/dex2oat/linker/image_writer.h
index e019a501a2..06c694c793 100644
--- a/dex2oat/linker/image_writer.h
+++ b/dex2oat/linker/image_writer.h
@@ -209,6 +209,8 @@ class ImageWriter final {
kIMTConflictTable,
// Runtime methods (always clean, do not have a length prefix array).
kRuntimeMethod,
+ // Metadata bin for data that is temporary during image lifetime.
+ kMetadata,
// Dex cache arrays have a special slot for PC-relative addressing. Since they are
// huge, and as such their dirtiness is not important for the clean/dirty separation,
// we arbitrarily keep them at the end of the native data.
@@ -226,6 +228,7 @@ class ImageWriter final {
kArtMethodArrayClean,
kArtMethodDirty,
kArtMethodArrayDirty,
+ kGcRootPointer,
kRuntimeMethod,
kIMTable,
kIMTConflictTable,
diff --git a/libartbase/base/mem_map.h b/libartbase/base/mem_map.h
index 4f92492643..525e622690 100644
--- a/libartbase/base/mem_map.h
+++ b/libartbase/base/mem_map.h
@@ -139,18 +139,32 @@ class MemMap {
/*out*/std::string* error_msg,
bool use_debug_name = true);
static MemMap MapAnonymous(const char* name,
- uint8_t* addr,
size_t byte_count,
int prot,
bool low_4gb,
/*out*/std::string* error_msg) {
return MapAnonymous(name,
- addr,
+ /*addr=*/ nullptr,
+ byte_count,
+ prot,
+ low_4gb,
+ /*reuse=*/ false,
+ /*reservation=*/ nullptr,
+ error_msg);
+ }
+ static MemMap MapAnonymous(const char* name,
+ size_t byte_count,
+ int prot,
+ bool low_4gb,
+ MemMap* reservation,
+ /*out*/std::string* error_msg) {
+ return MapAnonymous(name,
+ /*addr=*/ (reservation != nullptr) ? reservation->Begin() : nullptr,
byte_count,
prot,
low_4gb,
- /* reuse */ false,
- /* reservation */ nullptr,
+ /*reuse=*/ false,
+ reservation,
error_msg);
}
@@ -178,10 +192,10 @@ class MemMap {
flags,
fd,
start,
- /* low_4gb */ low_4gb,
+ /*low_4gb=*/ low_4gb,
filename,
- /* reuse */ false,
- /* reservation */ nullptr,
+ /*reuse=*/ false,
+ /*reservation=*/ nullptr,
error_msg);
}
diff --git a/libartbase/base/mem_map_test.cc b/libartbase/base/mem_map_test.cc
index e4e227f3b2..b419ddd941 100644
--- a/libartbase/base/mem_map_test.cc
+++ b/libartbase/base/mem_map_test.cc
@@ -54,7 +54,6 @@ class MemMapTest : public CommonArtTest {
// Find a valid map address and unmap it before returning.
std::string error_msg;
MemMap map = MemMap::MapAnonymous("temp",
- /* addr= */ nullptr,
size,
PROT_READ,
low_4gb,
@@ -69,7 +68,6 @@ class MemMapTest : public CommonArtTest {
const size_t page_size = static_cast<size_t>(kPageSize);
// Map a two-page memory region.
MemMap m0 = MemMap::MapAnonymous("MemMapTest_RemapAtEndTest_map0",
- /* addr= */ nullptr,
2 * page_size,
PROT_READ | PROT_WRITE,
low_4gb,
@@ -166,17 +164,15 @@ TEST_F(MemMapTest, Start) {
TEST_F(MemMapTest, ReplaceMapping_SameSize) {
std::string error_msg;
MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
- /* addr= */ nullptr,
kPageSize,
PROT_READ,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
ASSERT_TRUE(dest.IsValid());
MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
- /* addr= */ nullptr,
kPageSize,
PROT_WRITE | PROT_READ,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
ASSERT_TRUE(source.IsValid());
void* source_addr = source.Begin();
@@ -201,21 +197,19 @@ TEST_F(MemMapTest, ReplaceMapping_SameSize) {
TEST_F(MemMapTest, ReplaceMapping_MakeLarger) {
std::string error_msg;
MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
- /* addr= */ nullptr,
5 * kPageSize, // Need to make it larger
// initially so we know
// there won't be mappings
// in the way we we move
// source.
PROT_READ,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
ASSERT_TRUE(dest.IsValid());
MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
- /* addr= */ nullptr,
3 * kPageSize,
PROT_WRITE | PROT_READ,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
ASSERT_TRUE(source.IsValid());
uint8_t* source_addr = source.Begin();
@@ -247,17 +241,15 @@ TEST_F(MemMapTest, ReplaceMapping_MakeLarger) {
TEST_F(MemMapTest, ReplaceMapping_MakeSmaller) {
std::string error_msg;
MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
- /* addr= */ nullptr,
3 * kPageSize,
PROT_READ,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
ASSERT_TRUE(dest.IsValid());
MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
- /* addr= */ nullptr,
kPageSize,
PROT_WRITE | PROT_READ,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
ASSERT_TRUE(source.IsValid());
uint8_t* source_addr = source.Begin();
@@ -286,11 +278,10 @@ TEST_F(MemMapTest, ReplaceMapping_FailureOverlap) {
MemMap dest =
MemMap::MapAnonymous(
"MapAnonymousEmpty-atomic-replace-dest",
- /* addr= */ nullptr,
3 * kPageSize, // Need to make it larger initially so we know there won't be mappings in
// the way we we move source.
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
ASSERT_TRUE(dest.IsValid());
// Resize down to 1 page so we can remap the rest.
@@ -300,7 +291,9 @@ TEST_F(MemMapTest, ReplaceMapping_FailureOverlap) {
dest.Begin() + kPageSize,
2 * kPageSize,
PROT_WRITE | PROT_READ,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
+ /*reuse=*/ false,
+ /*reservation=*/ nullptr,
&error_msg);
ASSERT_TRUE(source.IsValid());
ASSERT_EQ(dest.Begin() + kPageSize, source.Begin());
@@ -333,20 +326,18 @@ TEST_F(MemMapTest, MapAnonymousEmpty) {
CommonInit();
std::string error_msg;
MemMap map = MemMap::MapAnonymous("MapAnonymousEmpty",
- /* addr= */ nullptr,
- 0,
+ /*byte_count=*/ 0,
PROT_READ,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
ASSERT_FALSE(map.IsValid()) << error_msg;
ASSERT_FALSE(error_msg.empty());
error_msg.clear();
map = MemMap::MapAnonymous("MapAnonymousNonEmpty",
- /* addr= */ nullptr,
kPageSize,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
ASSERT_TRUE(map.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
@@ -359,7 +350,9 @@ TEST_F(MemMapTest, MapAnonymousFailNullError) {
reinterpret_cast<uint8_t*>(kPageSize),
0x20000,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
+ /*reuse=*/ false,
+ /*reservation=*/ nullptr,
nullptr);
ASSERT_FALSE(map.IsValid());
}
@@ -369,20 +362,18 @@ TEST_F(MemMapTest, MapAnonymousEmpty32bit) {
CommonInit();
std::string error_msg;
MemMap map = MemMap::MapAnonymous("MapAnonymousEmpty",
- /* addr= */ nullptr,
- 0,
+ /*byte_count=*/ 0,
PROT_READ,
- /* low_4gb= */ true,
+ /*low_4gb=*/ true,
&error_msg);
ASSERT_FALSE(map.IsValid()) << error_msg;
ASSERT_FALSE(error_msg.empty());
error_msg.clear();
map = MemMap::MapAnonymous("MapAnonymousNonEmpty",
- /* addr= */ nullptr,
kPageSize,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ true,
+ /*low_4gb=*/ true,
&error_msg);
ASSERT_TRUE(map.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
@@ -426,17 +417,18 @@ TEST_F(MemMapTest, MapAnonymousExactAddr) {
valid_address,
kPageSize,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
+ /*reuse=*/ false,
+ /*reservation=*/ nullptr,
&error_msg);
ASSERT_TRUE(map0.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
ASSERT_TRUE(map0.BaseBegin() == valid_address);
// Map at an unspecified address, which should succeed.
MemMap map1 = MemMap::MapAnonymous("MapAnonymous1",
- /* addr= */ nullptr,
kPageSize,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
ASSERT_TRUE(map1.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
@@ -446,7 +438,9 @@ TEST_F(MemMapTest, MapAnonymousExactAddr) {
reinterpret_cast<uint8_t*>(map1.BaseBegin()),
kPageSize,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
+ /*reuse=*/ false,
+ /*reservation=*/ nullptr,
&error_msg);
ASSERT_FALSE(map2.IsValid()) << error_msg;
ASSERT_TRUE(!error_msg.empty());
@@ -530,6 +524,8 @@ TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
size,
PROT_READ | PROT_WRITE,
/*low_4gb=*/ true,
+ /*reuse=*/ false,
+ /*reservation=*/ nullptr,
&error_msg);
if (map.IsValid()) {
break;
@@ -550,7 +546,9 @@ TEST_F(MemMapTest, MapAnonymousOverflow) {
reinterpret_cast<uint8_t*>(ptr),
2 * kPageSize, // brings it over the top.
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
+ /*reuse=*/ false,
+ /*reservation=*/ nullptr,
&error_msg);
ASSERT_FALSE(map.IsValid());
ASSERT_FALSE(error_msg.empty());
@@ -565,7 +563,9 @@ TEST_F(MemMapTest, MapAnonymousLow4GBExpectedTooHigh) {
reinterpret_cast<uint8_t*>(UINT64_C(0x100000000)),
kPageSize,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ true,
+ /*low_4gb=*/ true,
+ /*reuse=*/ false,
+ /*reservation=*/ nullptr,
&error_msg);
ASSERT_FALSE(map.IsValid());
ASSERT_FALSE(error_msg.empty());
@@ -578,7 +578,9 @@ TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) {
reinterpret_cast<uint8_t*>(0xF0000000),
0x20000000,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ true,
+ /*low_4gb=*/ true,
+ /*reuse=*/ false,
+ /*reservation=*/ nullptr,
&error_msg);
ASSERT_FALSE(map.IsValid());
ASSERT_FALSE(error_msg.empty());
@@ -589,12 +591,9 @@ TEST_F(MemMapTest, MapAnonymousReuse) {
CommonInit();
std::string error_msg;
MemMap map = MemMap::MapAnonymous("MapAnonymousReserve",
- nullptr,
0x20000,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
- /* reuse= */ false,
- /* reservation= */ nullptr,
+ /*low_4gb=*/ false,
&error_msg);
ASSERT_TRUE(map.IsValid());
ASSERT_TRUE(error_msg.empty());
@@ -602,9 +601,9 @@ TEST_F(MemMapTest, MapAnonymousReuse) {
reinterpret_cast<uint8_t*>(map.BaseBegin()),
0x10000,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
- /* reuse= */ true,
- /* reservation= */ nullptr,
+ /*low_4gb=*/ false,
+ /*reuse=*/ true,
+ /*reservation=*/ nullptr,
&error_msg);
ASSERT_TRUE(map2.IsValid());
ASSERT_TRUE(error_msg.empty());
@@ -615,45 +614,45 @@ TEST_F(MemMapTest, CheckNoGaps) {
std::string error_msg;
constexpr size_t kNumPages = 3;
// Map a 3-page mem map.
- MemMap map = MemMap::MapAnonymous("MapAnonymous0",
- /* addr= */ nullptr,
- kPageSize * kNumPages,
- PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
- &error_msg);
- ASSERT_TRUE(map.IsValid()) << error_msg;
+ MemMap reservation = MemMap::MapAnonymous("MapAnonymous0",
+ kPageSize * kNumPages,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb=*/ false,
+ &error_msg);
+ ASSERT_TRUE(reservation.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
// Record the base address.
- uint8_t* map_base = reinterpret_cast<uint8_t*>(map.BaseBegin());
- // Unmap it.
- map.Reset();
+ uint8_t* map_base = reinterpret_cast<uint8_t*>(reservation.BaseBegin());
- // Map at the same address, but in page-sized separate mem maps,
- // assuming the space at the address is still available.
+ // Map at the same address, taking from the `map` reservation.
MemMap map0 = MemMap::MapAnonymous("MapAnonymous0",
- map_base,
kPageSize,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
+ &reservation,
&error_msg);
ASSERT_TRUE(map0.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
+ ASSERT_EQ(map_base, map0.Begin());
MemMap map1 = MemMap::MapAnonymous("MapAnonymous1",
- map_base + kPageSize,
kPageSize,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
+ &reservation,
&error_msg);
ASSERT_TRUE(map1.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
+ ASSERT_EQ(map_base + kPageSize, map1.Begin());
MemMap map2 = MemMap::MapAnonymous("MapAnonymous2",
- map_base + kPageSize * 2,
kPageSize,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
+ &reservation,
&error_msg);
ASSERT_TRUE(map2.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
+ ASSERT_EQ(map_base + 2 * kPageSize, map2.Begin());
+ ASSERT_FALSE(reservation.IsValid()); // The entire reservation was used.
// One-map cases.
ASSERT_TRUE(MemMap::CheckNoGaps(map0, map0));
@@ -679,10 +678,9 @@ TEST_F(MemMapTest, AlignBy) {
const size_t page_size = static_cast<size_t>(kPageSize);
// Map a region.
MemMap m0 = MemMap::MapAnonymous("MemMapTest_AlignByTest_map0",
- /* addr= */ nullptr,
14 * page_size,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
ASSERT_TRUE(m0.IsValid());
uint8_t* base0 = m0.Begin();
@@ -785,10 +783,9 @@ TEST_F(MemMapTest, Reservation) {
ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize));
MemMap reservation = MemMap::MapAnonymous("Test reservation",
- /* addr= */ nullptr,
kMapSize,
PROT_NONE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
ASSERT_TRUE(reservation.IsValid());
ASSERT_TRUE(error_msg.empty());
@@ -798,14 +795,14 @@ TEST_F(MemMapTest, Reservation) {
static_assert(kChunk1Size < kMapSize, "We want to split the reservation.");
uint8_t* addr1 = reservation.Begin();
MemMap map1 = MemMap::MapFileAtAddress(addr1,
- /* byte_count= */ kChunk1Size,
+ /*byte_count=*/ kChunk1Size,
PROT_READ,
MAP_PRIVATE,
scratch_file.GetFd(),
- /* start= */ 0,
- /* low_4gb= */ false,
+ /*start=*/ 0,
+ /*low_4gb=*/ false,
scratch_file.GetFilename().c_str(),
- /* reuse= */ false,
+ /*reuse=*/ false,
&reservation,
&error_msg);
ASSERT_TRUE(map1.IsValid()) << error_msg;
@@ -823,10 +820,10 @@ TEST_F(MemMapTest, Reservation) {
uint8_t* addr2 = reservation.Begin();
MemMap map2 = MemMap::MapAnonymous("MiddleReservation",
addr2,
- /* byte_count= */ kChunk2Size,
+ /*byte_count=*/ kChunk2Size,
PROT_READ,
- /* low_4gb= */ false,
- /* reuse= */ false,
+ /*low_4gb=*/ false,
+ /*reuse=*/ false,
&reservation,
&error_msg);
ASSERT_TRUE(map2.IsValid()) << error_msg;
@@ -840,14 +837,14 @@ TEST_F(MemMapTest, Reservation) {
const size_t kChunk3Size = reservation.Size() - 1u;
uint8_t* addr3 = reservation.Begin();
MemMap map3 = MemMap::MapFileAtAddress(addr3,
- /* byte_count= */ kChunk3Size,
+ /*byte_count=*/ kChunk3Size,
PROT_READ,
MAP_PRIVATE,
scratch_file.GetFd(),
- /* start= */ dchecked_integral_cast<size_t>(addr3 - addr1),
- /* low_4gb= */ false,
+ /*start=*/ dchecked_integral_cast<size_t>(addr3 - addr1),
+ /*low_4gb=*/ false,
scratch_file.GetFilename().c_str(),
- /* reuse= */ false,
+ /*reuse=*/ false,
&reservation,
&error_msg);
ASSERT_TRUE(map3.IsValid()) << error_msg;
@@ -893,6 +890,7 @@ class DumpMapsOnFailListener : public testing::EmptyTestEventListener {
// TODO: Could consider logging on EXPECT failures.
case testing::TestPartResult::kNonFatalFailure:
+ case testing::TestPartResult::kSkip:
case testing::TestPartResult::kSuccess:
break;
}
diff --git a/libartbase/base/zip_archive.cc b/libartbase/base/zip_archive.cc
index f5761cfbec..8ceea83be4 100644
--- a/libartbase/base/zip_archive.cc
+++ b/libartbase/base/zip_archive.cc
@@ -75,10 +75,9 @@ MemMap ZipEntry::ExtractToMemMap(const char* zip_filename,
name += " extracted in memory from ";
name += zip_filename;
MemMap map = MemMap::MapAnonymous(name.c_str(),
- /* addr= */ nullptr,
GetUncompressedLength(),
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
error_msg);
if (!map.IsValid()) {
DCHECK(!error_msg->empty());
@@ -138,7 +137,7 @@ MemMap ZipEntry::MapDirectlyFromFile(const char* zip_filename, std::string* erro
MAP_PRIVATE,
zip_fd,
offset,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
name.c_str(),
error_msg);
diff --git a/libdexfile/dex/modifiers.h b/libdexfile/dex/modifiers.h
index 018b1419b1..c4ea2d39b4 100644
--- a/libdexfile/dex/modifiers.h
+++ b/libdexfile/dex/modifiers.h
@@ -86,9 +86,9 @@ static constexpr uint32_t kAccSingleImplementation = 0x08000000; // method (ru
static constexpr uint32_t kAccHiddenApiBits = 0x30000000; // field, method
-// Not currently used, except for intrinsic methods where these bits
-// are part of the intrinsic ordinal.
-static constexpr uint32_t kAccMayBeUnusedBits = 0x40000000;
+// Non-intrinsics: Caches whether we can use fast-path in the interpreter invokes.
+// Intrinsics: These bits are part of the intrinsic ordinal.
+static constexpr uint32_t kAccFastInterpreterToInterpreterInvoke = 0x40000000; // method.
// Set by the compiler driver when compiling boot classes with instrinsic methods.
static constexpr uint32_t kAccIntrinsic = 0x80000000; // method (runtime)
@@ -103,9 +103,9 @@ static constexpr uint32_t kAccClassIsFinalizable = 0x80000000;
// Continuous sequence of bits used to hold the ordinal of an intrinsic method. Flags
// which overlap are not valid when kAccIntrinsic is set.
-static constexpr uint32_t kAccIntrinsicBits = kAccMayBeUnusedBits | kAccHiddenApiBits |
+static constexpr uint32_t kAccIntrinsicBits = kAccHiddenApiBits |
kAccSingleImplementation | kAccMustCountLocks | kAccCompileDontBother | kAccDefaultConflict |
- kAccPreviouslyWarm;
+ kAccPreviouslyWarm | kAccFastInterpreterToInterpreterInvoke;
// Valid (meaningful) bits for a field.
static constexpr uint32_t kAccValidFieldFlags = kAccPublic | kAccPrivate | kAccProtected |
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 358b7ba287..e26ec95512 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -1950,6 +1950,8 @@ class ImageDumper {
const auto& dex_cache_arrays_section = image_header_.GetDexCacheArraysSection();
const auto& intern_section = image_header_.GetInternedStringsSection();
const auto& class_table_section = image_header_.GetClassTableSection();
+ const auto& sro_section = image_header_.GetImageStringReferenceOffsetsSection();
+ const auto& metadata_section = image_header_.GetMetadataSection();
const auto& bitmap_section = image_header_.GetImageBitmapSection();
stats_.header_bytes = header_bytes;
@@ -1996,6 +1998,9 @@ class ImageDumper {
stats_.dex_cache_arrays_bytes += dex_cache_arrays_section.Size();
stats_.interned_strings_bytes += intern_section.Size();
stats_.class_table_bytes += class_table_section.Size();
+ stats_.sro_offset_bytes += sro_section.Size();
+ stats_.metadata_bytes += metadata_section.Size();
+
stats_.Dump(os, indent_os);
os << "\n";
@@ -2412,55 +2417,38 @@ class ImageDumper {
public:
struct Stats {
- size_t oat_file_bytes;
- size_t file_bytes;
-
- size_t header_bytes;
- size_t object_bytes;
- size_t art_field_bytes;
- size_t art_method_bytes;
- size_t dex_cache_arrays_bytes;
- size_t interned_strings_bytes;
- size_t class_table_bytes;
- size_t bitmap_bytes;
- size_t alignment_bytes;
-
- size_t managed_code_bytes;
- size_t managed_code_bytes_ignoring_deduplication;
- size_t native_to_managed_code_bytes;
- size_t class_initializer_code_bytes;
- size_t large_initializer_code_bytes;
- size_t large_method_code_bytes;
-
- size_t vmap_table_bytes;
-
- size_t dex_instruction_bytes;
+ size_t oat_file_bytes = 0u;
+ size_t file_bytes = 0u;
+
+ size_t header_bytes = 0u;
+ size_t object_bytes = 0u;
+ size_t art_field_bytes = 0u;
+ size_t art_method_bytes = 0u;
+ size_t dex_cache_arrays_bytes = 0u;
+ size_t interned_strings_bytes = 0u;
+ size_t class_table_bytes = 0u;
+ size_t sro_offset_bytes = 0u;
+ size_t metadata_bytes = 0u;
+ size_t bitmap_bytes = 0u;
+ size_t alignment_bytes = 0u;
+
+ size_t managed_code_bytes = 0u;
+ size_t managed_code_bytes_ignoring_deduplication = 0u;
+ size_t native_to_managed_code_bytes = 0u;
+ size_t class_initializer_code_bytes = 0u;
+ size_t large_initializer_code_bytes = 0u;
+ size_t large_method_code_bytes = 0u;
+
+ size_t vmap_table_bytes = 0u;
+
+ size_t dex_instruction_bytes = 0u;
std::vector<ArtMethod*> method_outlier;
std::vector<size_t> method_outlier_size;
std::vector<double> method_outlier_expansion;
std::vector<std::pair<std::string, size_t>> oat_dex_file_sizes;
- Stats()
- : oat_file_bytes(0),
- file_bytes(0),
- header_bytes(0),
- object_bytes(0),
- art_field_bytes(0),
- art_method_bytes(0),
- dex_cache_arrays_bytes(0),
- interned_strings_bytes(0),
- class_table_bytes(0),
- bitmap_bytes(0),
- alignment_bytes(0),
- managed_code_bytes(0),
- managed_code_bytes_ignoring_deduplication(0),
- native_to_managed_code_bytes(0),
- class_initializer_code_bytes(0),
- large_initializer_code_bytes(0),
- large_method_code_bytes(0),
- vmap_table_bytes(0),
- dex_instruction_bytes(0) {}
+ Stats() {}
struct SizeAndCount {
SizeAndCount(size_t bytes_in, size_t count_in) : bytes(bytes_in), count(count_in) {}
@@ -2614,6 +2602,8 @@ class ImageDumper {
"dex_cache_arrays_bytes = %8zd (%2.0f%% of art file bytes)\n"
"interned_string_bytes = %8zd (%2.0f%% of art file bytes)\n"
"class_table_bytes = %8zd (%2.0f%% of art file bytes)\n"
+ "sro_bytes = %8zd (%2.0f%% of art file bytes)\n"
+ "metadata_bytes = %8zd (%2.0f%% of art file bytes)\n"
"bitmap_bytes = %8zd (%2.0f%% of art file bytes)\n"
"alignment_bytes = %8zd (%2.0f%% of art file bytes)\n\n",
header_bytes, PercentOfFileBytes(header_bytes),
@@ -2625,13 +2615,15 @@ class ImageDumper {
interned_strings_bytes,
PercentOfFileBytes(interned_strings_bytes),
class_table_bytes, PercentOfFileBytes(class_table_bytes),
+ sro_offset_bytes, PercentOfFileBytes(sro_offset_bytes),
+ metadata_bytes, PercentOfFileBytes(metadata_bytes),
bitmap_bytes, PercentOfFileBytes(bitmap_bytes),
alignment_bytes, PercentOfFileBytes(alignment_bytes))
<< std::flush;
CHECK_EQ(file_bytes,
header_bytes + object_bytes + art_field_bytes + art_method_bytes +
dex_cache_arrays_bytes + interned_strings_bytes + class_table_bytes +
- bitmap_bytes + alignment_bytes);
+ sro_offset_bytes + metadata_bytes + bitmap_bytes + alignment_bytes);
}
os << "object_bytes breakdown:\n";
diff --git a/openjdkjvmti/ti_class_definition.cc b/openjdkjvmti/ti_class_definition.cc
index 9e8288f997..2a565127f6 100644
--- a/openjdkjvmti/ti_class_definition.cc
+++ b/openjdkjvmti/ti_class_definition.cc
@@ -246,14 +246,12 @@ void ArtClassDefinition::InitWithDex(GetOriginalDexFile get_original,
mmap_name += name_;
std::string error;
dex_data_mmap_ = art::MemMap::MapAnonymous(mmap_name.c_str(),
- /* addr= */ nullptr,
dequick_size,
PROT_NONE,
/*low_4gb=*/ false,
&error);
mmap_name += "-TEMP";
temp_mmap_ = art::MemMap::MapAnonymous(mmap_name.c_str(),
- /* addr= */ nullptr,
dequick_size,
PROT_READ | PROT_WRITE,
/*low_4gb=*/ false,
diff --git a/openjdkjvmti/ti_redefine.cc b/openjdkjvmti/ti_redefine.cc
index f3f45d9c50..7cd10394d5 100644
--- a/openjdkjvmti/ti_redefine.cc
+++ b/openjdkjvmti/ti_redefine.cc
@@ -309,7 +309,6 @@ art::MemMap Redefiner::MoveDataToMemMap(const std::string& original_location,
std::string* error_msg) {
art::MemMap map = art::MemMap::MapAnonymous(
StringPrintf("%s-transformed", original_location.c_str()).c_str(),
- /* addr= */ nullptr,
data.size(),
PROT_READ|PROT_WRITE,
/*low_4gb=*/ false,
diff --git a/runtime/art_method.h b/runtime/art_method.h
index e56f3fd6bc..4e3ef5366a 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -285,6 +285,23 @@ class ArtMethod final {
bool IsPolymorphicSignature() REQUIRES_SHARED(Locks::mutator_lock_);
+ bool UseFastInterpreterToInterpreterInvoke() {
+ // The bit is applicable only if the method is not intrinsic.
+ constexpr uint32_t mask = kAccFastInterpreterToInterpreterInvoke | kAccIntrinsic;
+ return (GetAccessFlags() & mask) == kAccFastInterpreterToInterpreterInvoke;
+ }
+
+ void SetFastInterpreterToInterpreterInvokeFlag() {
+ DCHECK(!IsIntrinsic());
+ AddAccessFlags(kAccFastInterpreterToInterpreterInvoke);
+ }
+
+ void ClearFastInterpreterToInterpreterInvokeFlag() {
+ if (!IsIntrinsic()) {
+ ClearAccessFlags(kAccFastInterpreterToInterpreterInvoke);
+ }
+ }
+
bool SkipAccessChecks() {
// The kAccSkipAccessChecks flag value is used with a different meaning for native methods,
// so we need to check the kAccNative flag as well.
@@ -422,6 +439,8 @@ class ArtMethod final {
SetNativePointer(EntryPointFromQuickCompiledCodeOffset(pointer_size),
entry_point_from_quick_compiled_code,
pointer_size);
+ // We might want to invoke compiled code, so don't use the fast path.
+ ClearFastInterpreterToInterpreterInvokeFlag();
}
// Registers the native method and returns the new entry point. NB The returned entry point might
diff --git a/runtime/base/mem_map_arena_pool.cc b/runtime/base/mem_map_arena_pool.cc
index 50b42d4f7b..ae7db45024 100644
--- a/runtime/base/mem_map_arena_pool.cc
+++ b/runtime/base/mem_map_arena_pool.cc
@@ -58,7 +58,6 @@ MemMap MemMapArena::Allocate(size_t size, bool low_4gb, const char* name) {
size = RoundUp(size, kPageSize);
std::string error_msg;
MemMap map = MemMap::MapAnonymous(name,
- /* addr= */ nullptr,
size,
PROT_READ | PROT_WRITE,
low_4gb,
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 151f7b8565..f43791ab06 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1415,9 +1415,9 @@ void AppImageLoadingHelper::UpdateInternStrings(
for (size_t offset_index = 0; offset_index < num_string_offsets; ++offset_index) {
uint32_t base_offset = sro_base[offset_index].first;
- if (HasDexCacheNativeRefTag(base_offset)) {
- base_offset = ClearDexCacheNativeRefTag(base_offset);
- DCHECK_ALIGNED(base_offset, 2);
+ if (HasDexCacheStringNativeRefTag(base_offset)) {
+ base_offset = ClearDexCacheNativeRefTags(base_offset);
+ DCHECK_ALIGNED(base_offset, 2);
ObjPtr<mirror::DexCache> dex_cache =
reinterpret_cast<mirror::DexCache*>(space->Begin() + base_offset);
@@ -1437,10 +1437,27 @@ void AppImageLoadingHelper::UpdateInternStrings(
dex_cache->GetStrings()[string_index].store(
mirror::StringDexCachePair(it->second, source.index));
}
+ } else if (HasDexCachePreResolvedStringNativeRefTag(base_offset)) {
+ base_offset = ClearDexCacheNativeRefTags(base_offset);
+ DCHECK_ALIGNED(base_offset, 2);
+ ObjPtr<mirror::DexCache> dex_cache =
+ reinterpret_cast<mirror::DexCache*>(space->Begin() + base_offset);
+ uint32_t string_index = sro_base[offset_index].second;
+
+ ObjPtr<mirror::String> referred_string =
+ dex_cache->GetPreResolvedStrings()[string_index].Read();
+ DCHECK(referred_string != nullptr);
+
+ auto it = intern_remap.find(referred_string.Ptr());
+ if (it != intern_remap.end()) {
+ // Because we are not using a helper function we need to mark the GC card manually.
+ WriteBarrier::ForEveryFieldWrite(dex_cache);
+ dex_cache->GetPreResolvedStrings()[string_index] = GcRoot<mirror::String>(it->second);
+ }
} else {
uint32_t raw_member_offset = sro_base[offset_index].second;
- DCHECK_ALIGNED(base_offset, 2);
+ DCHECK_ALIGNED(base_offset, 2);
DCHECK_ALIGNED(raw_member_offset, 2);
ObjPtr<mirror::Object> obj_ptr =
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 91636dc7e9..56fdd06ff2 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -662,12 +662,14 @@ struct DexCacheOffsets : public CheckOffsets<mirror::DexCache> {
DexCacheOffsets() : CheckOffsets<mirror::DexCache>(false, "Ljava/lang/DexCache;") {
addOffset(OFFSETOF_MEMBER(mirror::DexCache, dex_file_), "dexFile");
addOffset(OFFSETOF_MEMBER(mirror::DexCache, location_), "location");
+ addOffset(OFFSETOF_MEMBER(mirror::DexCache, num_preresolved_strings_), "numPreResolvedStrings");
addOffset(OFFSETOF_MEMBER(mirror::DexCache, num_resolved_call_sites_), "numResolvedCallSites");
addOffset(OFFSETOF_MEMBER(mirror::DexCache, num_resolved_fields_), "numResolvedFields");
addOffset(OFFSETOF_MEMBER(mirror::DexCache, num_resolved_method_types_), "numResolvedMethodTypes");
addOffset(OFFSETOF_MEMBER(mirror::DexCache, num_resolved_methods_), "numResolvedMethods");
addOffset(OFFSETOF_MEMBER(mirror::DexCache, num_resolved_types_), "numResolvedTypes");
addOffset(OFFSETOF_MEMBER(mirror::DexCache, num_strings_), "numStrings");
+ addOffset(OFFSETOF_MEMBER(mirror::DexCache, preresolved_strings_), "preResolvedStrings");
addOffset(OFFSETOF_MEMBER(mirror::DexCache, resolved_call_sites_), "resolvedCallSites");
addOffset(OFFSETOF_MEMBER(mirror::DexCache, resolved_fields_), "resolvedFields");
addOffset(OFFSETOF_MEMBER(mirror::DexCache, resolved_method_types_), "resolvedMethodTypes");
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 328a9bba40..3ad7fc92a2 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -60,6 +60,7 @@
#include "mirror/class_loader.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
+#include "mirror/string-alloc-inl.h"
#include "mirror/string-inl.h"
#include "mirror/throwable.h"
#include "nativehelper/scoped_local_ref.h"
diff --git a/runtime/dexopt_test.cc b/runtime/dexopt_test.cc
index 13f5fcb20e..ed3a18db28 100644
--- a/runtime/dexopt_test.cc
+++ b/runtime/dexopt_test.cc
@@ -206,7 +206,9 @@ void DexoptTest::ReserveImageSpaceChunk(uintptr_t start, uintptr_t end) {
reinterpret_cast<uint8_t*>(start),
end - start,
PROT_NONE,
- /* low_4gb=*/ false,
+ /*low_4gb=*/ false,
+ /*reuse=*/ false,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(image_reservation_.back().IsValid()) << error_msg;
LOG(INFO) << "Reserved space for image " <<
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
index 257cd41bc8..abefa4a6a1 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
@@ -25,7 +25,7 @@
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
-#include "mirror/string-inl.h"
+#include "mirror/string-alloc-inl.h"
namespace art {
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index 313b2b4fe4..9431f80a10 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -253,10 +253,9 @@ class AtomicStack {
void Init() {
std::string error_msg;
mem_map_ = MemMap::MapAnonymous(name_.c_str(),
- /* addr= */ nullptr,
capacity_ * sizeof(begin_[0]),
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
CHECK(mem_map_.IsValid()) << "couldn't allocate mark stack.\n" << error_msg;
uint8_t* addr = mem_map_.Begin();
diff --git a/runtime/gc/accounting/bitmap.cc b/runtime/gc/accounting/bitmap.cc
index 80c4c76bd3..8a15af2fbc 100644
--- a/runtime/gc/accounting/bitmap.cc
+++ b/runtime/gc/accounting/bitmap.cc
@@ -49,10 +49,9 @@ MemMap Bitmap::AllocateMemMap(const std::string& name, size_t num_bits) {
RoundUp(num_bits, kBitsPerBitmapWord) / kBitsPerBitmapWord * sizeof(uintptr_t), kPageSize);
std::string error_msg;
MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
- /* addr= */ nullptr,
bitmap_size,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
if (UNLIKELY(!mem_map.IsValid())) {
LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
diff --git a/runtime/gc/accounting/card_table.cc b/runtime/gc/accounting/card_table.cc
index 9a5bde86b1..fdf1615f5e 100644
--- a/runtime/gc/accounting/card_table.cc
+++ b/runtime/gc/accounting/card_table.cc
@@ -65,10 +65,9 @@ CardTable* CardTable::Create(const uint8_t* heap_begin, size_t heap_capacity) {
/* Allocate an extra 256 bytes to allow fixed low-byte of base */
std::string error_msg;
MemMap mem_map = MemMap::MapAnonymous("card table",
- /* addr= */ nullptr,
capacity + 256,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
CHECK(mem_map.IsValid()) << "couldn't allocate card table: " << error_msg;
// All zeros is the correct initial value; all clean. Anonymous mmaps are initialized to zero, we
diff --git a/runtime/gc/accounting/mod_union_table_test.cc b/runtime/gc/accounting/mod_union_table_test.cc
index a6177896e1..b39628b1dc 100644
--- a/runtime/gc/accounting/mod_union_table_test.cc
+++ b/runtime/gc/accounting/mod_union_table_test.cc
@@ -185,7 +185,7 @@ void ModUnionTableTest::RunTest(ModUnionTableFactory::TableType type) {
ResetClass();
// Create another space that we can put references in.
std::unique_ptr<space::DlMallocSpace> other_space(space::DlMallocSpace::Create(
- "other space", 128 * KB, 4 * MB, 4 * MB, nullptr, false));
+ "other space", 128 * KB, 4 * MB, 4 * MB, /*can_move_objects=*/ false));
ASSERT_TRUE(other_space.get() != nullptr);
{
ScopedThreadSuspension sts(self, kSuspended);
diff --git a/runtime/gc/accounting/read_barrier_table.h b/runtime/gc/accounting/read_barrier_table.h
index b369a6685e..7eca792063 100644
--- a/runtime/gc/accounting/read_barrier_table.h
+++ b/runtime/gc/accounting/read_barrier_table.h
@@ -40,10 +40,9 @@ class ReadBarrierTable {
static_cast<uint64_t>(static_cast<size_t>(kHeapCapacity / kRegionSize)));
std::string error_msg;
mem_map_ = MemMap::MapAnonymous("read barrier table",
- /* addr= */ nullptr,
capacity,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
CHECK(mem_map_.IsValid() && mem_map_.Begin() != nullptr)
<< "couldn't allocate read barrier table: " << error_msg;
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index 76d5d9de7e..dc223dbb04 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -85,10 +85,9 @@ SpaceBitmap<kAlignment>* SpaceBitmap<kAlignment>::Create(
const size_t bitmap_size = ComputeBitmapSize(heap_capacity);
std::string error_msg;
MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
- /* addr= */ nullptr,
bitmap_size,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
if (UNLIKELY(!mem_map.IsValid())) {
LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index 6a3faefe08..fcc3007acd 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -151,6 +151,12 @@ class SpaceBitmap {
void VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end, Visitor&& visitor) const
NO_THREAD_SAFETY_ANALYSIS;
+ // Visit all of the set bits in HeapBegin(), HeapLimit().
+ template <typename Visitor>
+ void VisitAllMarked(Visitor&& visitor) const {
+ VisitMarkedRange(HeapBegin(), HeapLimit(), visitor);
+ }
+
// Visits set bits in address order. The callback is not permitted to change the bitmap bits or
// max during the traversal.
template <typename Visitor>
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index 4e2cf2bf8c..b90a95d802 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -92,10 +92,9 @@ RosAlloc::RosAlloc(void* base, size_t capacity, size_t max_capacity,
size_t max_num_of_pages = max_capacity_ / kPageSize;
std::string error_msg;
page_map_mem_map_ = MemMap::MapAnonymous("rosalloc page map",
- /* addr= */ nullptr,
RoundUp(max_num_of_pages, kPageSize),
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
CHECK(page_map_mem_map_.IsValid()) << "Couldn't allocate the page map : " << error_msg;
page_map_ = page_map_mem_map_.Begin();
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 1c9d051989..e0bbf43622 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -135,10 +135,9 @@ ConcurrentCopying::ConcurrentCopying(Heap* heap,
std::string error_msg;
sweep_array_free_buffer_mem_map_ = MemMap::MapAnonymous(
"concurrent copying sweep array free buffer",
- /* addr= */ nullptr,
RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
CHECK(sweep_array_free_buffer_mem_map_.IsValid())
<< "Couldn't allocate sweep array free buffer: " << error_msg;
@@ -1651,36 +1650,15 @@ size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_acc
inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
DCHECK(!region_space_->IsInFromSpace(to_ref));
space::RegionSpace::RegionType rtype = region_space_->GetRegionType(to_ref);
- auto find_space_from_ref = [this] (mirror::Object* ref)
- REQUIRES_SHARED(Locks::mutator_lock_) -> space::Space* {
- for (const auto& space : heap_->GetContinuousSpaces()) {
- if (space->Contains(ref)) {
- return space;
- }
- }
- for (const auto& space : heap_->GetDiscontinuousSpaces()) {
- if (space->Contains(ref)) {
- return space;
- }
- }
- return nullptr;
- };
- if (kUseBakerReadBarrier &&
- kIsDebugBuild &&
- to_ref->GetReadBarrierState() != ReadBarrier::GrayState()) {
- space::Space* space = find_space_from_ref(to_ref);
- LOG(FATAL_WITHOUT_ABORT) << " " << to_ref
- << " " << to_ref->GetReadBarrierState()
- << " is_marked=" << IsMarked(to_ref)
- << " type=" << to_ref->PrettyTypeOf()
- << " is_young_gc=" << young_gen_;
- if (space == region_space_) {
- LOG(FATAL) << " region_type=" << rtype;
- } else if (space != nullptr) {
- LOG(FATAL) << " space=" << space->GetName();
- } else {
- LOG(FATAL) << "no space";
- }
+ if (kUseBakerReadBarrier) {
+ DCHECK(to_ref->GetReadBarrierState() == ReadBarrier::GrayState())
+ << " to_ref=" << to_ref
+ << " rb_state=" << to_ref->GetReadBarrierState()
+ << " is_marked=" << IsMarked(to_ref)
+ << " type=" << to_ref->PrettyTypeOf()
+ << " young_gen=" << std::boolalpha << young_gen_ << std::noboolalpha
+ << " space=" << heap_->DumpSpaceNameFromAddress(to_ref)
+ << " region_type=" << rtype;
}
bool add_to_live_bytes = false;
// Invariant: There should be no object from a newly-allocated
@@ -1716,22 +1694,15 @@ inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
Scan<false>(to_ref);
}
}
- if (kUseBakerReadBarrier &&
- kIsDebugBuild &&
- to_ref->GetReadBarrierState() != ReadBarrier::GrayState()) {
- space::Space* space = find_space_from_ref(to_ref);
- LOG(FATAL_WITHOUT_ABORT) << " " << to_ref
- << " " << to_ref->GetReadBarrierState()
- << " is_marked=" << IsMarked(to_ref)
- << " type=" << to_ref->PrettyTypeOf()
- << " is_young_gc=" << young_gen_;
- if (space == region_space_) {
- LOG(FATAL) << " region_type=" << rtype;
- } else if (space != nullptr) {
- LOG(FATAL) << " space=" << space->GetName();
- } else {
- LOG(FATAL) << "no space";
- }
+ if (kUseBakerReadBarrier) {
+ DCHECK(to_ref->GetReadBarrierState() == ReadBarrier::GrayState())
+ << " to_ref=" << to_ref
+ << " rb_state=" << to_ref->GetReadBarrierState()
+ << " is_marked=" << IsMarked(to_ref)
+ << " type=" << to_ref->PrettyTypeOf()
+ << " young_gen=" << std::boolalpha << young_gen_ << std::noboolalpha
+ << " space=" << heap_->DumpSpaceNameFromAddress(to_ref)
+ << " region_type=" << rtype;
}
#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
mirror::Object* referent = nullptr;
diff --git a/runtime/gc/collector/immune_spaces_test.cc b/runtime/gc/collector/immune_spaces_test.cc
index 0e5fac123e..c2a67bf9f6 100644
--- a/runtime/gc/collector/immune_spaces_test.cc
+++ b/runtime/gc/collector/immune_spaces_test.cc
@@ -78,18 +78,20 @@ class ImmuneSpacesTest : public CommonRuntimeTest {
}
// Create an image space, the oat file is optional.
- DummyImageSpace* CreateImageSpace(uint8_t* image_begin,
- size_t image_size,
- uint8_t* oat_begin,
- size_t oat_size) {
+ DummyImageSpace* CreateImageSpace(size_t image_size,
+ size_t oat_size,
+ MemMap* image_reservation,
+ MemMap* oat_reservation) {
+ DCHECK(image_reservation != nullptr);
+ DCHECK(oat_reservation != nullptr);
std::string error_str;
- MemMap map = MemMap::MapAnonymous("DummyImageSpace",
- image_begin,
- image_size,
- PROT_READ | PROT_WRITE,
- /*low_4gb=*/true,
- &error_str);
- if (!map.IsValid()) {
+ MemMap image_map = MemMap::MapAnonymous("DummyImageSpace",
+ image_size,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb=*/ true,
+ /*reservation=*/ image_reservation,
+ &error_str);
+ if (!image_map.IsValid()) {
LOG(ERROR) << error_str;
return nullptr;
}
@@ -97,10 +99,10 @@ class ImmuneSpacesTest : public CommonRuntimeTest {
std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap(std::move(live_bitmaps_.back()));
live_bitmaps_.pop_back();
MemMap oat_map = MemMap::MapAnonymous("OatMap",
- oat_begin,
oat_size,
PROT_READ | PROT_WRITE,
- /*low_4gb=*/true,
+ /*low_4gb=*/ true,
+ /*reservation=*/ oat_reservation,
&error_str);
if (!oat_map.IsValid()) {
LOG(ERROR) << error_str;
@@ -109,17 +111,17 @@ class ImmuneSpacesTest : public CommonRuntimeTest {
std::unique_ptr<DummyOatFile> oat_file(new DummyOatFile(oat_map.Begin(), oat_map.End()));
// Create image header.
ImageSection sections[ImageHeader::kSectionCount];
- new (map.Begin()) ImageHeader(
- /*image_begin=*/PointerToLowMemUInt32(map.Begin()),
- /*image_size=*/map.Size(),
+ new (image_map.Begin()) ImageHeader(
+ /*image_begin=*/ PointerToLowMemUInt32(image_map.Begin()),
+ /*image_size=*/ image_map.Size(),
sections,
- /*image_roots=*/PointerToLowMemUInt32(map.Begin()) + 1,
- /*oat_checksum=*/0u,
+ /*image_roots=*/ PointerToLowMemUInt32(image_map.Begin()) + 1,
+ /*oat_checksum=*/ 0u,
// The oat file data in the header is always right after the image space.
- /*oat_file_begin=*/PointerToLowMemUInt32(oat_begin),
- /*oat_data_begin=*/PointerToLowMemUInt32(oat_begin),
- /*oat_data_end=*/PointerToLowMemUInt32(oat_begin + oat_size),
- /*oat_file_end=*/PointerToLowMemUInt32(oat_begin + oat_size),
+ /*oat_file_begin=*/ PointerToLowMemUInt32(oat_map.Begin()),
+ /*oat_data_begin=*/PointerToLowMemUInt32(oat_map.Begin()),
+ /*oat_data_end=*/PointerToLowMemUInt32(oat_map.Begin() + oat_size),
+ /*oat_file_end=*/PointerToLowMemUInt32(oat_map.Begin() + oat_size),
/*boot_image_begin=*/0u,
/*boot_image_size=*/0u,
/*boot_oat_begin=*/0u,
@@ -127,29 +129,12 @@ class ImmuneSpacesTest : public CommonRuntimeTest {
/*pointer_size=*/sizeof(void*),
ImageHeader::kStorageModeUncompressed,
/*data_size=*/0u);
- return new DummyImageSpace(std::move(map),
+ return new DummyImageSpace(std::move(image_map),
std::move(live_bitmap),
std::move(oat_file),
std::move(oat_map));
}
- // Does not reserve the memory, the caller needs to be sure no other threads will map at the
- // returned address.
- static uint8_t* GetContinuousMemoryRegion(size_t size) {
- std::string error_str;
- MemMap map = MemMap::MapAnonymous("reserve",
- /* addr= */ nullptr,
- size,
- PROT_READ | PROT_WRITE,
- /*low_4gb=*/ true,
- &error_str);
- if (!map.IsValid()) {
- LOG(ERROR) << "Failed to allocate memory region " << error_str;
- return nullptr;
- }
- return map.Begin();
- }
-
private:
// Bitmap pool for pre-allocated dummy bitmaps. We need to pre-allocate them since we don't want
// them to randomly get placed somewhere where we want an image space.
@@ -206,13 +191,25 @@ TEST_F(ImmuneSpacesTest, AppendAfterImage) {
constexpr size_t kImageOatSize = 321 * kPageSize;
constexpr size_t kOtherSpaceSize = 100 * kPageSize;
- uint8_t* memory = GetContinuousMemoryRegion(kImageSize + kImageOatSize + kOtherSpaceSize);
-
- std::unique_ptr<DummyImageSpace> image_space(CreateImageSpace(memory,
- kImageSize,
- memory + kImageSize,
- kImageOatSize));
+ std::string error_str;
+ MemMap reservation = MemMap::MapAnonymous("reserve",
+ kImageSize + kImageOatSize + kOtherSpaceSize,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb=*/ true,
+ &error_str);
+ ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
+ MemMap image_reservation = reservation.TakeReservedMemory(kImageSize);
+ ASSERT_TRUE(image_reservation.IsValid());
+ ASSERT_TRUE(reservation.IsValid());
+
+ std::unique_ptr<DummyImageSpace> image_space(CreateImageSpace(kImageSize,
+ kImageOatSize,
+ &image_reservation,
+ &reservation));
ASSERT_TRUE(image_space != nullptr);
+ ASSERT_FALSE(image_reservation.IsValid());
+ ASSERT_TRUE(reservation.IsValid());
+
const ImageHeader& image_header = image_space->GetImageHeader();
DummySpace space(image_header.GetOatFileEnd(), image_header.GetOatFileEnd() + kOtherSpaceSize);
@@ -257,36 +254,44 @@ TEST_F(ImmuneSpacesTest, MultiImage) {
constexpr size_t kImage3OatSize = kPageSize;
constexpr size_t kImageBytes = kImage1Size + kImage2Size + kImage3Size;
constexpr size_t kMemorySize = kImageBytes + kImage1OatSize + kImage2OatSize + kImage3OatSize;
- uint8_t* memory = GetContinuousMemoryRegion(kMemorySize);
- uint8_t* space1_begin = memory;
- memory += kImage1Size;
- uint8_t* space2_begin = memory;
- memory += kImage2Size;
- uint8_t* space1_oat_begin = memory;
- memory += kImage1OatSize;
- uint8_t* space2_oat_begin = memory;
- memory += kImage2OatSize;
- uint8_t* space3_begin = memory;
-
- std::unique_ptr<DummyImageSpace> space1(CreateImageSpace(space1_begin,
- kImage1Size,
- space1_oat_begin,
- kImage1OatSize));
+ std::string error_str;
+ MemMap reservation = MemMap::MapAnonymous("reserve",
+ kMemorySize,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb=*/ true,
+ &error_str);
+ ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
+ MemMap image_reservation = reservation.TakeReservedMemory(kImage1Size + kImage2Size);
+ ASSERT_TRUE(image_reservation.IsValid());
+ ASSERT_TRUE(reservation.IsValid());
+
+ std::unique_ptr<DummyImageSpace> space1(CreateImageSpace(kImage1Size,
+ kImage1OatSize,
+ &image_reservation,
+ &reservation));
ASSERT_TRUE(space1 != nullptr);
+ ASSERT_TRUE(image_reservation.IsValid());
+ ASSERT_TRUE(reservation.IsValid());
-
- std::unique_ptr<DummyImageSpace> space2(CreateImageSpace(space2_begin,
- kImage2Size,
- space2_oat_begin,
- kImage2OatSize));
+ std::unique_ptr<DummyImageSpace> space2(CreateImageSpace(kImage2Size,
+ kImage2OatSize,
+ &image_reservation,
+ &reservation));
ASSERT_TRUE(space2 != nullptr);
+ ASSERT_FALSE(image_reservation.IsValid());
+ ASSERT_TRUE(reservation.IsValid());
// Finally put a 3rd image space.
- std::unique_ptr<DummyImageSpace> space3(CreateImageSpace(space3_begin,
- kImage3Size,
- space3_begin + kImage3Size,
- kImage3OatSize));
+ image_reservation = reservation.TakeReservedMemory(kImage3Size);
+ ASSERT_TRUE(image_reservation.IsValid());
+ ASSERT_TRUE(reservation.IsValid());
+ std::unique_ptr<DummyImageSpace> space3(CreateImageSpace(kImage3Size,
+ kImage3OatSize,
+ &image_reservation,
+ &reservation));
ASSERT_TRUE(space3 != nullptr);
+ ASSERT_FALSE(image_reservation.IsValid());
+ ASSERT_FALSE(reservation.IsValid());
// Check that we do not include the oat if there is no space after.
ImmuneSpaces spaces;
@@ -323,12 +328,29 @@ TEST_F(ImmuneSpacesTest, MultiImage) {
constexpr size_t kGuardSize = kPageSize;
constexpr size_t kImage4Size = kImageBytes - kPageSize;
constexpr size_t kImage4OatSize = kPageSize;
- uint8_t* memory2 = GetContinuousMemoryRegion(kImage4Size + kImage4OatSize + kGuardSize * 2);
- std::unique_ptr<DummyImageSpace> space4(CreateImageSpace(memory2 + kGuardSize,
- kImage4Size,
- memory2 + kGuardSize + kImage4Size,
- kImage4OatSize));
+
+ reservation = MemMap::MapAnonymous("reserve",
+ kImage4Size + kImage4OatSize + kGuardSize * 2,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb=*/ true,
+ &error_str);
+ ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
+ MemMap guard = reservation.TakeReservedMemory(kGuardSize);
+ ASSERT_TRUE(guard.IsValid());
+ ASSERT_TRUE(reservation.IsValid());
+ guard.Reset(); // Release the guard memory.
+ image_reservation = reservation.TakeReservedMemory(kImage4Size);
+ ASSERT_TRUE(image_reservation.IsValid());
+ ASSERT_TRUE(reservation.IsValid());
+ std::unique_ptr<DummyImageSpace> space4(CreateImageSpace(kImage4Size,
+ kImage4OatSize,
+ &image_reservation,
+ &reservation));
ASSERT_TRUE(space4 != nullptr);
+ ASSERT_FALSE(image_reservation.IsValid());
+ ASSERT_TRUE(reservation.IsValid());
+ ASSERT_EQ(reservation.Size(), kGuardSize);
+ reservation.Reset(); // Release the guard memory.
{
WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
LOG(INFO) << "Adding space4 " << reinterpret_cast<const void*>(space4->Begin());
@@ -346,12 +368,28 @@ TEST_F(ImmuneSpacesTest, MultiImage) {
// Layout: [guard page][image][oat][guard page]
constexpr size_t kImage5Size = kImageBytes + kPageSize;
constexpr size_t kImage5OatSize = kPageSize;
- uint8_t* memory3 = GetContinuousMemoryRegion(kImage5Size + kImage5OatSize + kGuardSize * 2);
- std::unique_ptr<DummyImageSpace> space5(CreateImageSpace(memory3 + kGuardSize,
- kImage5Size,
- memory3 + kGuardSize + kImage5Size,
- kImage5OatSize));
+ reservation = MemMap::MapAnonymous("reserve",
+ kImage5Size + kImage5OatSize + kGuardSize * 2,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb=*/ true,
+ &error_str);
+ ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
+ guard = reservation.TakeReservedMemory(kGuardSize);
+ ASSERT_TRUE(guard.IsValid());
+ ASSERT_TRUE(reservation.IsValid());
+ guard.Reset(); // Release the guard memory.
+ image_reservation = reservation.TakeReservedMemory(kImage5Size);
+ ASSERT_TRUE(image_reservation.IsValid());
+ ASSERT_TRUE(reservation.IsValid());
+ std::unique_ptr<DummyImageSpace> space5(CreateImageSpace(kImage5Size,
+ kImage5OatSize,
+ &image_reservation,
+ &reservation));
ASSERT_TRUE(space5 != nullptr);
+ ASSERT_FALSE(image_reservation.IsValid());
+ ASSERT_TRUE(reservation.IsValid());
+ ASSERT_EQ(reservation.Size(), kGuardSize);
+ reservation.Reset(); // Release the guard memory.
{
WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
LOG(INFO) << "Adding space5 " << reinterpret_cast<const void*>(space5->Begin());
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 399f9ff301..9e5cb9c314 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -105,10 +105,9 @@ MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_pre
std::string error_msg;
sweep_array_free_buffer_mem_map_ = MemMap::MapAnonymous(
"mark sweep sweep array free buffer",
- /* addr= */ nullptr,
RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
CHECK(sweep_array_free_buffer_mem_map_.IsValid())
<< "Couldn't allocate sweep array free buffer: " << error_msg;
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index a31cbe755f..f0f81fc67e 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -505,11 +505,11 @@ Heap::Heap(size_t initial_size,
// Create bump pointer spaces instead of a backup space.
main_mem_map_2.Reset();
bump_pointer_space_ = space::BumpPointerSpace::Create(
- "Bump pointer space 1", kGSSBumpPointerSpaceCapacity, /* requested_begin= */ nullptr);
+ "Bump pointer space 1", kGSSBumpPointerSpaceCapacity);
CHECK(bump_pointer_space_ != nullptr);
AddSpace(bump_pointer_space_);
temp_space_ = space::BumpPointerSpace::Create(
- "Bump pointer space 2", kGSSBumpPointerSpaceCapacity, /* requested_begin= */ nullptr);
+ "Bump pointer space 2", kGSSBumpPointerSpaceCapacity);
CHECK(temp_space_ != nullptr);
AddSpace(temp_space_);
} else if (main_mem_map_2.IsValid()) {
@@ -529,8 +529,7 @@ Heap::Heap(size_t initial_size,
CHECK(!non_moving_space_->CanMoveObjects());
// Allocate the large object space.
if (large_object_space_type == space::LargeObjectSpaceType::kFreeList) {
- large_object_space_ = space::FreeListSpace::Create("free list large object space", nullptr,
- capacity_);
+ large_object_space_ = space::FreeListSpace::Create("free list large object space", capacity_);
CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
} else if (large_object_space_type == space::LargeObjectSpaceType::kMap) {
large_object_space_ = space::LargeObjectMapSpace::Create("mem map large object space");
@@ -696,7 +695,9 @@ MemMap Heap::MapAnonymousPreferredAddress(const char* name,
request_begin,
capacity,
PROT_READ | PROT_WRITE,
- /* low_4gb=*/ true,
+ /*low_4gb=*/ true,
+ /*reuse=*/ false,
+ /*reservation=*/ nullptr,
out_error_str);
if (map.IsValid() || request_begin == nullptr) {
return map;
@@ -1270,6 +1271,10 @@ space::Space* Heap::FindSpaceFromAddress(const void* addr) const {
return nullptr;
}
+std::string Heap::DumpSpaceNameFromAddress(const void* addr) const {
+ space::Space* space = FindSpaceFromAddress(addr);
+ return (space != nullptr) ? space->GetName() : "no space";
+}
void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) {
// If we're in a stack overflow, do not create a new exception. It would require running the
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 6c4b9367d1..c3ee5267b5 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -554,6 +554,9 @@ class Heap {
space::Space* FindSpaceFromAddress(const void* ptr) const
REQUIRES_SHARED(Locks::mutator_lock_);
+ std::string DumpSpaceNameFromAddress(const void* addr) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
void DumpForSigQuit(std::ostream& os) REQUIRES(!*gc_complete_lock_);
// Do a pending collector transition.
diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc
index f6db070fac..fa10150d46 100644
--- a/runtime/gc/heap_test.cc
+++ b/runtime/gc/heap_test.cc
@@ -39,6 +39,8 @@ class HeapTest : public CommonRuntimeTest {
16 * KB,
PROT_READ,
/*low_4gb=*/ true,
+ /*reuse=*/ false,
+ /*reservation=*/ nullptr,
&error_msg);
ASSERT_TRUE(reserved_.IsValid()) << error_msg;
CommonRuntimeTest::SetUp();
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index 497a0c2e5f..609ccee7b4 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -24,15 +24,13 @@ namespace art {
namespace gc {
namespace space {
-BumpPointerSpace* BumpPointerSpace::Create(const std::string& name, size_t capacity,
- uint8_t* requested_begin) {
+BumpPointerSpace* BumpPointerSpace::Create(const std::string& name, size_t capacity) {
capacity = RoundUp(capacity, kPageSize);
std::string error_msg;
MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
- requested_begin,
capacity,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ true,
+ /*low_4gb=*/ true,
&error_msg);
if (!mem_map.IsValid()) {
LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 59d4d27626..383bf7abaa 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -46,7 +46,7 @@ class BumpPointerSpace final : public ContinuousMemMapAllocSpace {
// Create a bump pointer space with the requested sizes. The requested base address is not
// guaranteed to be granted, if it is required, the caller should call Begin on the returned
// space to confirm the request was granted.
- static BumpPointerSpace* Create(const std::string& name, size_t capacity, uint8_t* requested_begin);
+ static BumpPointerSpace* Create(const std::string& name, size_t capacity);
static BumpPointerSpace* CreateFromMemMap(const std::string& name, MemMap&& mem_map);
// Allocate num_bytes, returns null if the space is full.
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 73582a00c0..7955ff92e6 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -108,8 +108,10 @@ DlMallocSpace* DlMallocSpace::CreateFromMemMap(MemMap&& mem_map,
}
}
-DlMallocSpace* DlMallocSpace::Create(const std::string& name, size_t initial_size,
- size_t growth_limit, size_t capacity, uint8_t* requested_begin,
+DlMallocSpace* DlMallocSpace::Create(const std::string& name,
+ size_t initial_size,
+ size_t growth_limit,
+ size_t capacity,
bool can_move_objects) {
uint64_t start_time = 0;
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
@@ -117,8 +119,7 @@ DlMallocSpace* DlMallocSpace::Create(const std::string& name, size_t initial_siz
LOG(INFO) << "DlMallocSpace::Create entering " << name
<< " initial_size=" << PrettySize(initial_size)
<< " growth_limit=" << PrettySize(growth_limit)
- << " capacity=" << PrettySize(capacity)
- << " requested_begin=" << reinterpret_cast<void*>(requested_begin);
+ << " capacity=" << PrettySize(capacity);
}
// Memory we promise to dlmalloc before it asks for morecore.
@@ -126,8 +127,7 @@ DlMallocSpace* DlMallocSpace::Create(const std::string& name, size_t initial_siz
// will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
// size of the large allocation) will be greater than the footprint limit.
size_t starting_size = kPageSize;
- MemMap mem_map =
- CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity, requested_begin);
+ MemMap mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity);
if (!mem_map.IsValid()) {
LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size "
<< PrettySize(capacity);
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index c63ff71849..e91602f607 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -46,8 +46,11 @@ class DlMallocSpace : public MallocSpace {
// base address is not guaranteed to be granted, if it is required,
// the caller should call Begin on the returned space to confirm the
// request was granted.
- static DlMallocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit,
- size_t capacity, uint8_t* requested_begin, bool can_move_objects);
+ static DlMallocSpace* Create(const std::string& name,
+ size_t initial_size,
+ size_t growth_limit,
+ size_t capacity,
+ bool can_move_objects);
// Virtual to allow MemoryToolMallocSpace to intercept.
mirror::Object* AllocWithGrowth(Thread* self,
diff --git a/runtime/gc/space/dlmalloc_space_random_test.cc b/runtime/gc/space/dlmalloc_space_random_test.cc
index f9b41daad8..92b56bda22 100644
--- a/runtime/gc/space/dlmalloc_space_random_test.cc
+++ b/runtime/gc/space/dlmalloc_space_random_test.cc
@@ -22,14 +22,16 @@ namespace art {
namespace gc {
namespace space {
-MallocSpace* CreateDlMallocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
- size_t capacity, uint8_t* requested_begin) {
- return DlMallocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin, false);
+MallocSpace* CreateDlMallocSpace(const std::string& name,
+ size_t initial_size,
+ size_t growth_limit,
+ size_t capacity) {
+ return DlMallocSpace::Create(
+ name, initial_size, growth_limit, capacity, /*can_move_objects=*/ false);
}
TEST_SPACE_CREATE_FN_RANDOM(DlMallocSpace, CreateDlMallocSpace)
-
} // namespace space
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/dlmalloc_space_static_test.cc b/runtime/gc/space/dlmalloc_space_static_test.cc
index 5758e0cde9..550d1bbe77 100644
--- a/runtime/gc/space/dlmalloc_space_static_test.cc
+++ b/runtime/gc/space/dlmalloc_space_static_test.cc
@@ -22,14 +22,16 @@ namespace art {
namespace gc {
namespace space {
-MallocSpace* CreateDlMallocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
- size_t capacity, uint8_t* requested_begin) {
- return DlMallocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin, false);
+MallocSpace* CreateDlMallocSpace(const std::string& name,
+ size_t initial_size,
+ size_t growth_limit,
+ size_t capacity) {
+ return DlMallocSpace::Create(
+ name, initial_size, growth_limit, capacity, /*can_move_objects=*/ false);
}
TEST_SPACE_CREATE_FN_STATIC(DlMallocSpace, CreateDlMallocSpace)
-
} // namespace space
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 9e679573bd..96a2cea39f 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -622,9 +622,9 @@ class ImageSpace::Loader {
/*inout*/MemMap* image_reservation,
/*out*/std::string* error_msg) {
TimingLogger::ScopedTiming timing("MapImageFile", logger);
- uint8_t* address = (image_reservation != nullptr) ? image_reservation->Begin() : nullptr;
const ImageHeader::StorageMode storage_mode = image_header.GetStorageMode();
if (storage_mode == ImageHeader::kStorageModeUncompressed) {
+ uint8_t* address = (image_reservation != nullptr) ? image_reservation->Begin() : nullptr;
return MemMap::MapFileAtAddress(address,
image_header.GetImageSize(),
PROT_READ | PROT_WRITE,
@@ -649,11 +649,9 @@ class ImageSpace::Loader {
// Reserve output and decompress into it.
MemMap map = MemMap::MapAnonymous(image_location,
- address,
image_header.GetImageSize(),
PROT_READ | PROT_WRITE,
/*low_4gb=*/ true,
- /*reuse=*/ false,
image_reservation,
error_msg);
if (map.IsValid()) {
@@ -1172,6 +1170,19 @@ class ImageSpace::Loader {
}
dex_cache->FixupResolvedCallSites<kWithoutReadBarrier>(new_call_sites, fixup_adapter);
}
+
+ GcRoot<mirror::String>* preresolved_strings = dex_cache->GetPreResolvedStrings();
+ if (preresolved_strings != nullptr) {
+ GcRoot<mirror::String>* new_array = fixup_adapter.ForwardObject(preresolved_strings);
+ if (preresolved_strings != new_array) {
+ dex_cache->SetPreResolvedStrings(new_array);
+ }
+ const size_t num_preresolved_strings = dex_cache->NumPreResolvedStrings();
+ for (size_t j = 0; j < num_preresolved_strings; ++j) {
+ new_array[j] = GcRoot<mirror::String>(
+ fixup_adapter(new_array[j].Read<kWithoutReadBarrier>()));
+ }
+ }
}
}
{
@@ -1731,6 +1742,10 @@ class ImageSpace::BootImageLoader {
dex_cache,
mirror::DexCache::ResolvedCallSitesOffset(),
dex_cache->NumResolvedCallSites<kVerifyNone>());
+ FixupDexCacheArray<GcRoot<mirror::String>>(
+ dex_cache,
+ mirror::DexCache::PreResolvedStringsOffset(),
+ dex_cache->NumPreResolvedStrings<kVerifyNone>());
}
private:
@@ -1775,6 +1790,11 @@ class ImageSpace::BootImageLoader {
PatchGcRoot(diff_, &array[index]);
}
+ void FixupDexCacheArrayEntry(GcRoot<mirror::String>* array, uint32_t index)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ PatchGcRoot(diff_, &array[index]);
+ }
+
template <typename EntryType>
void FixupDexCacheArray(mirror::DexCache* dex_cache,
MemberOffset array_offset,
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index a7f82f6e36..1658dba413 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -137,10 +137,9 @@ mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes,
size_t* bytes_tl_bulk_allocated) {
std::string error_msg;
MemMap mem_map = MemMap::MapAnonymous("large object space allocation",
- /* addr= */ nullptr,
num_bytes,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ true,
+ /*low_4gb=*/ true,
&error_msg);
if (UNLIKELY(!mem_map.IsValid())) {
LOG(WARNING) << "Large object allocation failed: " << error_msg;
@@ -346,14 +345,13 @@ inline bool FreeListSpace::SortByPrevFree::operator()(const AllocationInfo* a,
return reinterpret_cast<uintptr_t>(a) < reinterpret_cast<uintptr_t>(b);
}
-FreeListSpace* FreeListSpace::Create(const std::string& name, uint8_t* requested_begin, size_t size) {
+FreeListSpace* FreeListSpace::Create(const std::string& name, size_t size) {
CHECK_EQ(size % kAlignment, 0U);
std::string error_msg;
MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
- requested_begin,
size,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ true,
+ /*low_4gb=*/ true,
&error_msg);
CHECK(mem_map.IsValid()) << "Failed to allocate large object space mem map: " << error_msg;
return new FreeListSpace(name, std::move(mem_map), mem_map.Begin(), mem_map.End());
@@ -372,10 +370,9 @@ FreeListSpace::FreeListSpace(const std::string& name,
std::string error_msg;
allocation_info_map_ =
MemMap::MapAnonymous("large object free list space allocation info map",
- /* addr= */ nullptr,
alloc_info_size,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
CHECK(allocation_info_map_.IsValid()) << "Failed to allocate allocation info map" << error_msg;
allocation_info_ = reinterpret_cast<AllocationInfo*>(allocation_info_map_.Begin());
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index 47167faccc..a4d6a24263 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -184,7 +184,7 @@ class FreeListSpace final : public LargeObjectSpace {
static constexpr size_t kAlignment = kPageSize;
virtual ~FreeListSpace();
- static FreeListSpace* Create(const std::string& name, uint8_t* requested_begin, size_t capacity);
+ static FreeListSpace* Create(const std::string& name, size_t capacity);
size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override
REQUIRES(lock_);
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
diff --git a/runtime/gc/space/large_object_space_test.cc b/runtime/gc/space/large_object_space_test.cc
index d55ccd6e40..62bc26e09d 100644
--- a/runtime/gc/space/large_object_space_test.cc
+++ b/runtime/gc/space/large_object_space_test.cc
@@ -42,7 +42,7 @@ void LargeObjectSpaceTest::LargeObjectTest() {
if (i == 0) {
los = space::LargeObjectMapSpace::Create("large object space");
} else {
- los = space::FreeListSpace::Create("large object space", nullptr, capacity);
+ los = space::FreeListSpace::Create("large object space", capacity);
}
// Make sure the bitmap is not empty and actually covers at least how much we expect.
@@ -157,7 +157,7 @@ void LargeObjectSpaceTest::RaceTest() {
if (los_type == 0) {
los = space::LargeObjectMapSpace::Create("large object space");
} else {
- los = space::FreeListSpace::Create("large object space", nullptr, 128 * MB);
+ los = space::FreeListSpace::Create("large object space", 128 * MB);
}
Thread* self = Thread::Current();
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index 189aeb5297..b5e6b62bcd 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -82,8 +82,7 @@ MemMap MallocSpace::CreateMemMap(const std::string& name,
size_t starting_size,
size_t* initial_size,
size_t* growth_limit,
- size_t* capacity,
- uint8_t* requested_begin) {
+ size_t* capacity) {
// Sanity check arguments
if (starting_size > *initial_size) {
*initial_size = starting_size;
@@ -107,10 +106,9 @@ MemMap MallocSpace::CreateMemMap(const std::string& name,
std::string error_msg;
MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
- requested_begin,
*capacity,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ true,
+ /*low_4gb=*/ true,
&error_msg);
if (!mem_map.IsValid()) {
LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index 6bf2d71c7c..5dd8136dcb 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -157,8 +157,7 @@ class MallocSpace : public ContinuousMemMapAllocSpace {
size_t starting_size,
size_t* initial_size,
size_t* growth_limit,
- size_t* capacity,
- uint8_t* requested_begin);
+ size_t* capacity);
// When true the low memory mode argument specifies that the heap wishes the created allocator to
// be more aggressive in releasing unused pages.
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 31bbfb8f00..2774e26acd 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -58,7 +58,9 @@ MemMap RegionSpace::CreateMemMap(const std::string& name,
requested_begin,
capacity + kRegionSize,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ true,
+ /*low_4gb=*/ true,
+ /*reuse=*/ false,
+ /*reservation=*/ nullptr,
&error_msg);
if (mem_map.IsValid() || requested_begin == nullptr) {
break;
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index 10ff1c15b1..36fd864bf3 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -133,17 +133,19 @@ RosAllocSpace::~RosAllocSpace() {
delete rosalloc_;
}
-RosAllocSpace* RosAllocSpace::Create(const std::string& name, size_t initial_size,
- size_t growth_limit, size_t capacity, uint8_t* requested_begin,
- bool low_memory_mode, bool can_move_objects) {
+RosAllocSpace* RosAllocSpace::Create(const std::string& name,
+ size_t initial_size,
+ size_t growth_limit,
+ size_t capacity,
+ bool low_memory_mode,
+ bool can_move_objects) {
uint64_t start_time = 0;
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
start_time = NanoTime();
VLOG(startup) << "RosAllocSpace::Create entering " << name
<< " initial_size=" << PrettySize(initial_size)
<< " growth_limit=" << PrettySize(growth_limit)
- << " capacity=" << PrettySize(capacity)
- << " requested_begin=" << reinterpret_cast<void*>(requested_begin);
+ << " capacity=" << PrettySize(capacity);
}
// Memory we promise to rosalloc before it asks for morecore.
@@ -151,8 +153,7 @@ RosAllocSpace* RosAllocSpace::Create(const std::string& name, size_t initial_siz
// will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
// size of the large allocation) will be greater than the footprint limit.
size_t starting_size = Heap::kDefaultStartingSize;
- MemMap mem_map =
- CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity, requested_begin);
+ MemMap mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity);
if (!mem_map.IsValid()) {
LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size "
<< PrettySize(capacity);
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index 5162a064d1..9e95c16cb3 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -38,8 +38,11 @@ class RosAllocSpace : public MallocSpace {
// base address is not guaranteed to be granted, if it is required,
// the caller should call Begin on the returned space to confirm the
// request was granted.
- static RosAllocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit,
- size_t capacity, uint8_t* requested_begin, bool low_memory_mode,
+ static RosAllocSpace* Create(const std::string& name,
+ size_t initial_size,
+ size_t growth_limit,
+ size_t capacity,
+ bool low_memory_mode,
bool can_move_objects);
static RosAllocSpace* CreateFromMemMap(MemMap&& mem_map,
const std::string& name,
diff --git a/runtime/gc/space/rosalloc_space_random_test.cc b/runtime/gc/space/rosalloc_space_random_test.cc
index b50859b8e6..f0b3231b3a 100644
--- a/runtime/gc/space/rosalloc_space_random_test.cc
+++ b/runtime/gc/space/rosalloc_space_random_test.cc
@@ -22,15 +22,20 @@ namespace art {
namespace gc {
namespace space {
-MallocSpace* CreateRosAllocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
- size_t capacity, uint8_t* requested_begin) {
- return RosAllocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin,
- Runtime::Current()->GetHeap()->IsLowMemoryMode(), false);
+MallocSpace* CreateRosAllocSpace(const std::string& name,
+ size_t initial_size,
+ size_t growth_limit,
+ size_t capacity) {
+ return RosAllocSpace::Create(name,
+ initial_size,
+ growth_limit,
+ capacity,
+ Runtime::Current()->GetHeap()->IsLowMemoryMode(),
+ /*can_move_objects=*/ false);
}
TEST_SPACE_CREATE_FN_RANDOM(RosAllocSpace, CreateRosAllocSpace)
-
} // namespace space
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/rosalloc_space_static_test.cc b/runtime/gc/space/rosalloc_space_static_test.cc
index 5e7ced6e23..d7e7e90188 100644
--- a/runtime/gc/space/rosalloc_space_static_test.cc
+++ b/runtime/gc/space/rosalloc_space_static_test.cc
@@ -22,15 +22,19 @@ namespace art {
namespace gc {
namespace space {
-MallocSpace* CreateRosAllocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
- size_t capacity, uint8_t* requested_begin) {
- return RosAllocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin,
- Runtime::Current()->GetHeap()->IsLowMemoryMode(), false);
+MallocSpace* CreateRosAllocSpace(const std::string& name,
+ size_t initial_size,
+ size_t growth_limit,
+ size_t capacity) {
+ return RosAllocSpace::Create(name, initial_size,
+ growth_limit,
+ capacity,
+ Runtime::Current()->GetHeap()->IsLowMemoryMode(),
+ /*can_move_objects=*/ false);
}
TEST_SPACE_CREATE_FN_STATIC(RosAllocSpace, CreateRosAllocSpace)
-
} // namespace space
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/space_create_test.cc b/runtime/gc/space/space_create_test.cc
index ca5f306264..d3db679c29 100644
--- a/runtime/gc/space/space_create_test.cc
+++ b/runtime/gc/space/space_create_test.cc
@@ -34,25 +34,22 @@ class SpaceCreateTest : public SpaceTest<CommonRuntimeTestWithParam<MallocSpaceT
MallocSpace* CreateSpace(const std::string& name,
size_t initial_size,
size_t growth_limit,
- size_t capacity,
- uint8_t* requested_begin) {
+ size_t capacity) {
const MallocSpaceType type = GetParam();
if (type == kMallocSpaceDlMalloc) {
return DlMallocSpace::Create(name,
initial_size,
growth_limit,
capacity,
- requested_begin,
- false);
+ /*can_move_objects=*/ false);
}
DCHECK_EQ(static_cast<uint32_t>(type), static_cast<uint32_t>(kMallocSpaceRosAlloc));
return RosAllocSpace::Create(name,
initial_size,
growth_limit,
capacity,
- requested_begin,
Runtime::Current()->GetHeap()->IsLowMemoryMode(),
- false);
+ /*can_move_objects=*/ false);
}
};
@@ -62,25 +59,25 @@ TEST_P(SpaceCreateTest, InitTestBody) {
{
// Init < max == growth
- std::unique_ptr<Space> space(CreateSpace("test", 16 * MB, 32 * MB, 32 * MB, nullptr));
+ std::unique_ptr<Space> space(CreateSpace("test", 16 * MB, 32 * MB, 32 * MB));
EXPECT_TRUE(space != nullptr);
// Init == max == growth
- space.reset(CreateSpace("test", 16 * MB, 16 * MB, 16 * MB, nullptr));
+ space.reset(CreateSpace("test", 16 * MB, 16 * MB, 16 * MB));
EXPECT_TRUE(space != nullptr);
// Init > max == growth
- space.reset(CreateSpace("test", 32 * MB, 16 * MB, 16 * MB, nullptr));
+ space.reset(CreateSpace("test", 32 * MB, 16 * MB, 16 * MB));
EXPECT_TRUE(space == nullptr);
// Growth == init < max
- space.reset(CreateSpace("test", 16 * MB, 16 * MB, 32 * MB, nullptr));
+ space.reset(CreateSpace("test", 16 * MB, 16 * MB, 32 * MB));
EXPECT_TRUE(space != nullptr);
// Growth < init < max
- space.reset(CreateSpace("test", 16 * MB, 8 * MB, 32 * MB, nullptr));
+ space.reset(CreateSpace("test", 16 * MB, 8 * MB, 32 * MB));
EXPECT_TRUE(space == nullptr);
// Init < growth < max
- space.reset(CreateSpace("test", 8 * MB, 16 * MB, 32 * MB, nullptr));
+ space.reset(CreateSpace("test", 8 * MB, 16 * MB, 32 * MB));
EXPECT_TRUE(space != nullptr);
// Init < max < growth
- space.reset(CreateSpace("test", 8 * MB, 32 * MB, 16 * MB, nullptr));
+ space.reset(CreateSpace("test", 8 * MB, 32 * MB, 16 * MB));
EXPECT_TRUE(space == nullptr);
}
}
@@ -91,7 +88,7 @@ TEST_P(SpaceCreateTest, InitTestBody) {
// the GC works with the ZygoteSpace.
TEST_P(SpaceCreateTest, ZygoteSpaceTestBody) {
size_t dummy;
- MallocSpace* space(CreateSpace("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
+ MallocSpace* space(CreateSpace("test", 4 * MB, 16 * MB, 16 * MB));
ASSERT_TRUE(space != nullptr);
// Make space findable to the heap, will also delete space when runtime is cleaned up
@@ -225,7 +222,7 @@ TEST_P(SpaceCreateTest, ZygoteSpaceTestBody) {
TEST_P(SpaceCreateTest, AllocAndFreeTestBody) {
size_t dummy = 0;
- MallocSpace* space(CreateSpace("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
+ MallocSpace* space(CreateSpace("test", 4 * MB, 16 * MB, 16 * MB));
ASSERT_TRUE(space != nullptr);
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
@@ -301,7 +298,7 @@ TEST_P(SpaceCreateTest, AllocAndFreeTestBody) {
}
TEST_P(SpaceCreateTest, AllocAndFreeListTestBody) {
- MallocSpace* space(CreateSpace("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
+ MallocSpace* space(CreateSpace("test", 4 * MB, 16 * MB, 16 * MB));
ASSERT_TRUE(space != nullptr);
// Make space findable to the heap, will also delete space when runtime is cleaned up
diff --git a/runtime/gc/space/space_test.h b/runtime/gc/space/space_test.h
index 5aac21721f..1b111e3496 100644
--- a/runtime/gc/space/space_test.h
+++ b/runtime/gc/space/space_test.h
@@ -123,8 +123,10 @@ class SpaceTest : public Super {
return mirror::Array::DataOffset(Primitive::ComponentSize(Primitive::kPrimByte)).Uint32Value();
}
- typedef MallocSpace* (*CreateSpaceFn)(const std::string& name, size_t initial_size, size_t growth_limit,
- size_t capacity, uint8_t* requested_begin);
+ typedef MallocSpace* (*CreateSpaceFn)(const std::string& name,
+ size_t initial_size,
+ size_t growth_limit,
+ size_t capacity);
void SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t object_size,
int round, size_t growth_limit);
@@ -323,7 +325,7 @@ void SpaceTest<Super>::SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size,
size_t initial_size = 4 * MB;
size_t growth_limit = 8 * MB;
size_t capacity = 16 * MB;
- MallocSpace* space(create_space("test", initial_size, growth_limit, capacity, nullptr));
+ MallocSpace* space(create_space("test", initial_size, growth_limit, capacity));
ASSERT_TRUE(space != nullptr);
// Basic sanity
diff --git a/runtime/image.cc b/runtime/image.cc
index e7f44864b2..376742afbc 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -26,7 +26,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '6', '5', '\0' }; // Remove relocation section.
+const uint8_t ImageHeader::kImageVersion[] = { '0', '6', '6', '\0' }; // Add metadata section.
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
diff --git a/runtime/image.h b/runtime/image.h
index 0dec5f71ab..d925956f7d 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -236,6 +236,7 @@ class PACKED(4) ImageHeader {
kSectionInternedStrings,
kSectionClassTable,
kSectionStringReferenceOffsets,
+ kSectionMetadata,
kSectionImageBitmap,
kSectionCount, // Number of elements in enum.
};
@@ -293,6 +294,10 @@ class PACKED(4) ImageHeader {
return GetImageSection(kSectionStringReferenceOffsets);
}
+ const ImageSection& GetMetadataSection() const {
+ return GetImageSection(kSectionMetadata);
+ }
+
const ImageSection& GetImageBitmapSection() const {
return GetImageSection(kSectionImageBitmap);
}
@@ -462,22 +467,45 @@ typedef std::pair<uint32_t, uint32_t> AppImageReferenceOffsetInfo;
* to managed objects and pointers to native reference arrays.
*/
template<typename T>
-T SetDexCacheNativeRefTag(T val) {
+T SetDexCacheStringNativeRefTag(T val) {
static_assert(std::is_integral<T>::value, "Expected integral type.");
return val | 1u;
}
/*
+ * Tags the second last bit. Used by AppImage logic to differentiate between pointers
+ * to managed objects and pointers to native reference arrays.
+ */
+template<typename T>
+T SetDexCachePreResolvedStringNativeRefTag(T val) {
+ static_assert(std::is_integral<T>::value, "Expected integral type.");
+
+ return val | 2u;
+}
+
+/*
* Retrieves the value of the last bit. Used by AppImage logic to
* differentiate between pointers to managed objects and pointers to native
* reference arrays.
*/
template<typename T>
-bool HasDexCacheNativeRefTag(T val) {
+bool HasDexCacheStringNativeRefTag(T val) {
+ static_assert(std::is_integral<T>::value, "Expected integral type.");
+
+ return (val & 1u) != 0u;
+}
+
+/*
+ * Retrieves the value of the second last bit. Used by AppImage logic to
+ * differentiate between pointers to managed objects and pointers to native
+ * reference arrays.
+ */
+template<typename T>
+bool HasDexCachePreResolvedStringNativeRefTag(T val) {
static_assert(std::is_integral<T>::value, "Expected integral type.");
- return (val & 1u) == 1u;
+ return (val & 2u) != 0u;
}
/*
@@ -486,10 +514,10 @@ bool HasDexCacheNativeRefTag(T val) {
* reference arrays.
*/
template<typename T>
-T ClearDexCacheNativeRefTag(T val) {
+T ClearDexCacheNativeRefTags(T val) {
static_assert(std::is_integral<T>::value, "Expected integral type.");
- return val & ~1u;
+ return val & ~3u;
}
std::ostream& operator<<(std::ostream& os, const ImageHeader::ImageMethod& policy);
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 6db47903b2..361dccbd13 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -80,10 +80,9 @@ IndirectReferenceTable::IndirectReferenceTable(size_t max_count,
const size_t table_bytes = max_count * sizeof(IrtEntry);
table_mem_map_ = MemMap::MapAnonymous("indirect ref table",
- /* addr= */ nullptr,
table_bytes,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
error_msg);
if (!table_mem_map_.IsValid() && error_msg->empty()) {
*error_msg = "Unable to map memory for indirect ref table";
@@ -223,10 +222,9 @@ bool IndirectReferenceTable::Resize(size_t new_size, std::string* error_msg) {
const size_t table_bytes = new_size * sizeof(IrtEntry);
MemMap new_map = MemMap::MapAnonymous("indirect ref table",
- /* addr= */ nullptr,
table_bytes,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
error_msg);
if (!new_map.IsValid()) {
return false;
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index d5902ecbec..fe6154792c 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -64,6 +64,31 @@ bool CheckStackOverflow(Thread* self, size_t frame_size)
return true;
}
+bool UseFastInterpreterToInterpreterInvoke(ArtMethod* method) {
+ Runtime* runtime = Runtime::Current();
+ const void* quick_code = method->GetEntryPointFromQuickCompiledCode();
+ if (!runtime->GetClassLinker()->IsQuickToInterpreterBridge(quick_code)) {
+ return false;
+ }
+ if (!method->SkipAccessChecks() || method->IsNative() || method->IsProxyMethod()) {
+ return false;
+ }
+ if (method->IsIntrinsic()) {
+ return false;
+ }
+ if (method->GetDeclaringClass()->IsStringClass() && method->IsConstructor()) {
+ return false;
+ }
+ if (method->IsStatic() && !method->GetDeclaringClass()->IsInitialized()) {
+ return false;
+ }
+ ProfilingInfo* profiling_info = method->GetProfilingInfo(kRuntimePointerSize);
+ if ((profiling_info != nullptr) && (profiling_info->GetSavedEntryPoint() != nullptr)) {
+ return false;
+ }
+ return true;
+}
+
template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check,
bool transaction_active>
bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst,
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 9f4403ed2b..bf84227560 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -124,31 +124,8 @@ template<bool is_range, bool do_assignability_check>
bool DoCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
const Instruction* inst, uint16_t inst_data, JValue* result);
-template<InvokeType type>
-static ALWAYS_INLINE bool UseInterpreterToInterpreterFastPath(ArtMethod* method)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- Runtime* runtime = Runtime::Current();
- const void* quick_code = method->GetEntryPointFromQuickCompiledCode();
- DCHECK(runtime->IsStarted());
- if (!runtime->GetClassLinker()->IsQuickToInterpreterBridge(quick_code)) {
- return false;
- }
- if (!method->SkipAccessChecks() || method->IsNative() || method->IsProxyMethod()) {
- return false;
- }
- if (method->GetDeclaringClass()->IsStringClass() && method->IsConstructor()) {
- return false;
- }
- if (type == kStatic && !method->GetDeclaringClass()->IsInitialized()) {
- return false;
- }
- DCHECK(!runtime->IsActiveTransaction());
- ProfilingInfo* profiling_info = method->GetProfilingInfo(kRuntimePointerSize);
- if ((profiling_info != nullptr) && (profiling_info->GetSavedEntryPoint() != nullptr)) {
- return false;
- }
- return true;
-}
+bool UseFastInterpreterToInterpreterInvoke(ArtMethod* method)
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Throws exception if we are getting close to the end of the stack.
NO_INLINE bool CheckStackOverflow(Thread* self, size_t frame_size)
@@ -156,7 +133,7 @@ NO_INLINE bool CheckStackOverflow(Thread* self, size_t frame_size)
// Handles all invoke-XXX/range instructions except for invoke-polymorphic[/range].
// Returns true on success, otherwise throws an exception and returns false.
-template<InvokeType type, bool is_range, bool do_access_check, bool is_mterp>
+template<InvokeType type, bool is_range, bool do_access_check, bool is_mterp, bool is_quick = false>
static ALWAYS_INLINE bool DoInvoke(Thread* self,
ShadowFrame& shadow_frame,
const Instruction* inst,
@@ -177,7 +154,9 @@ static ALWAYS_INLINE bool DoInvoke(Thread* self,
InterpreterCache* tls_cache = self->GetInterpreterCache();
size_t tls_value;
ArtMethod* resolved_method;
- if (LIKELY(tls_cache->Get(inst, &tls_value))) {
+ if (is_quick) {
+ resolved_method = nullptr; // We don't know/care what the original method was.
+ } else if (LIKELY(tls_cache->Get(inst, &tls_value))) {
resolved_method = reinterpret_cast<ArtMethod*>(tls_value);
} else {
ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
@@ -196,8 +175,20 @@ static ALWAYS_INLINE bool DoInvoke(Thread* self,
// Null pointer check and virtual method resolution.
ObjPtr<mirror::Object> receiver =
(type == kStatic) ? nullptr : shadow_frame.GetVRegReference(vregC);
- ArtMethod* const called_method = FindMethodToCall<type, do_access_check>(
- method_idx, resolved_method, &receiver, sf_method, self);
+ ArtMethod* called_method;
+ if (is_quick) {
+ if (UNLIKELY(receiver == nullptr)) {
+ // We lost the reference to the method index so we cannot get a more precise exception.
+ ThrowNullPointerExceptionFromDexPC();
+ return false;
+ }
+ DCHECK(receiver->GetClass()->ShouldHaveEmbeddedVTable());
+ called_method = receiver->GetClass()->GetEmbeddedVTableEntry(
+ /*vtable_idx=*/ method_idx, Runtime::Current()->GetClassLinker()->GetImagePointerSize());
+ } else {
+ called_method = FindMethodToCall<type, do_access_check>(
+ method_idx, resolved_method, &receiver, sf_method, self);
+ }
if (UNLIKELY(called_method == nullptr)) {
CHECK(self->IsExceptionPending());
result->SetJ(0);
@@ -224,7 +215,31 @@ static ALWAYS_INLINE bool DoInvoke(Thread* self,
}
}
- if (is_mterp && self->UseMterp() && UseInterpreterToInterpreterFastPath<type>(called_method)) {
+ // Check whether we can use the fast path. The result is cached in the ArtMethod.
+ // If the bit is not set, we explicitly recheck all the conditions.
+ // If any of the conditions get falsified, it is important to clear the bit.
+ bool use_fast_path = false;
+ if (is_mterp && self->UseMterp()) {
+ use_fast_path = called_method->UseFastInterpreterToInterpreterInvoke();
+ if (!use_fast_path) {
+ use_fast_path = UseFastInterpreterToInterpreterInvoke(called_method);
+ if (use_fast_path) {
+ called_method->SetFastInterpreterToInterpreterInvokeFlag();
+ }
+ }
+ }
+
+ if (use_fast_path) {
+ DCHECK(Runtime::Current()->IsStarted());
+ DCHECK(!Runtime::Current()->IsActiveTransaction());
+ DCHECK(called_method->SkipAccessChecks());
+ DCHECK(!called_method->IsNative());
+ DCHECK(!called_method->IsProxyMethod());
+ DCHECK(!called_method->IsIntrinsic());
+ DCHECK(!(called_method->GetDeclaringClass()->IsStringClass() &&
+ called_method->IsConstructor()));
+ DCHECK(type != kStatic || called_method->GetDeclaringClass()->IsInitialized());
+
const uint16_t number_of_inputs =
(is_range) ? inst->VRegA_3rc(inst_data) : inst->VRegA_35c(inst_data);
CodeItemDataAccessor accessor(called_method->DexInstructionData());
@@ -353,45 +368,6 @@ bool DoInvokeCustom(Thread* self,
}
}
-// Handles invoke-virtual-quick and invoke-virtual-quick-range instructions.
-// Returns true on success, otherwise throws an exception and returns false.
-template<bool is_range>
-static inline bool DoInvokeVirtualQuick(Thread* self, ShadowFrame& shadow_frame,
- const Instruction* inst, uint16_t inst_data,
- JValue* result)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- const uint32_t vregC = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
- ObjPtr<mirror::Object> const receiver = shadow_frame.GetVRegReference(vregC);
- if (UNLIKELY(receiver == nullptr)) {
- // We lost the reference to the method index so we cannot get a more
- // precised exception message.
- ThrowNullPointerExceptionFromDexPC();
- return false;
- }
- const uint32_t vtable_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
- CHECK(receiver->GetClass()->ShouldHaveEmbeddedVTable());
- ArtMethod* const called_method = receiver->GetClass()->GetEmbeddedVTableEntry(
- vtable_idx, Runtime::Current()->GetClassLinker()->GetImagePointerSize());
- if (UNLIKELY(called_method == nullptr)) {
- CHECK(self->IsExceptionPending());
- result->SetJ(0);
- return false;
- } else if (UNLIKELY(!called_method->IsInvokable())) {
- called_method->ThrowInvocationTimeError();
- result->SetJ(0);
- return false;
- } else {
- jit::Jit* jit = Runtime::Current()->GetJit();
- if (jit != nullptr) {
- jit->InvokeVirtualOrInterface(
- receiver, shadow_frame.GetMethod(), shadow_frame.GetDexPC(), called_method);
- jit->AddSamples(self, shadow_frame.GetMethod(), 1, /*with_backedges=*/false);
- }
- // No need to check since we've been quickened.
- return DoCall<is_range, false>(called_method, self, shadow_frame, inst, inst_data, result);
- }
-}
-
// Handles iget-XXX and sget-XXX instructions.
// Returns true on success, otherwise throws an exception and returns false.
template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check,
diff --git a/runtime/interpreter/interpreter_switch_impl-inl.h b/runtime/interpreter/interpreter_switch_impl-inl.h
index c430de2b85..48e1728c8b 100644
--- a/runtime/interpreter/interpreter_switch_impl-inl.h
+++ b/runtime/interpreter/interpreter_switch_impl-inl.h
@@ -27,6 +27,7 @@
#include "interpreter_common.h"
#include "jit/jit.h"
#include "jvalue-inl.h"
+#include "mirror/string-alloc-inl.h"
#include "nth_caller_visitor.h"
#include "safe_math.h"
#include "shadow_frame-inl.h"
@@ -1749,15 +1750,15 @@ ATTRIBUTE_NO_SANITIZE_ADDRESS void ExecuteSwitchImplCpp(SwitchImplContext* ctx)
}
case Instruction::INVOKE_VIRTUAL_QUICK: {
PREAMBLE();
- bool success = DoInvokeVirtualQuick<false>(
- self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kVirtual, false, do_access_check, /*is_mterp=*/ false,
+ /*is_quick=*/ true>(self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
break;
}
case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: {
PREAMBLE();
- bool success = DoInvokeVirtualQuick<true>(
- self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kVirtual, true, do_access_check, /*is_mterp=*/ false,
+ /*is_quick=*/ true>(self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
break;
}
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index ba109bc2b7..878d9215dd 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -25,6 +25,7 @@
#include "interpreter/interpreter_common.h"
#include "interpreter/interpreter_intrinsics.h"
#include "interpreter/shadow_frame-inl.h"
+#include "mirror/string-alloc-inl.h"
namespace art {
namespace interpreter {
@@ -321,25 +322,8 @@ extern "C" size_t MterpInvokeVirtualQuick(Thread* self,
REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
- const uint32_t vregC = inst->VRegC_35c();
- const uint32_t vtable_idx = inst->VRegB_35c();
- ObjPtr<mirror::Object> const receiver = shadow_frame->GetVRegReference(vregC);
- if (receiver != nullptr) {
- ArtMethod* const called_method = receiver->GetClass()->GetEmbeddedVTableEntry(
- vtable_idx, kRuntimePointerSize);
- if ((called_method != nullptr) && called_method->IsIntrinsic()) {
- if (MterpHandleIntrinsic(shadow_frame, called_method, inst, inst_data, result_register)) {
- jit::Jit* jit = Runtime::Current()->GetJit();
- if (jit != nullptr) {
- jit->InvokeVirtualOrInterface(
- receiver, shadow_frame->GetMethod(), shadow_frame->GetDexPC(), called_method);
- }
- return !self->IsExceptionPending();
- }
- }
- }
- return DoInvokeVirtualQuick<false>(
- self, *shadow_frame, inst, inst_data, result_register);
+ return DoInvoke<kVirtual, /*is_range=*/ false, /*do_access_check=*/ false, /*is_mterp=*/ true,
+ /*is_quick=*/ true>(self, *shadow_frame, inst, inst_data, result_register);
}
extern "C" size_t MterpInvokeVirtualQuickRange(Thread* self,
@@ -349,8 +333,8 @@ extern "C" size_t MterpInvokeVirtualQuickRange(Thread* self,
REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
- return DoInvokeVirtualQuick<true>(
- self, *shadow_frame, inst, inst_data, result_register);
+ return DoInvoke<kVirtual, /*is_range=*/ true, /*do_access_check=*/ false, /*is_mterp=*/ true,
+ /*is_quick=*/ true>(self, *shadow_frame, inst, inst_data, result_register);
}
extern "C" void MterpThreadFenceForConstructor() {
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 5def395007..9bc2179b63 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -52,6 +52,7 @@
#include "mirror/object-inl.h"
#include "mirror/object_array-alloc-inl.h"
#include "mirror/object_array-inl.h"
+#include "mirror/string-alloc-inl.h"
#include "mirror/string-inl.h"
#include "nativehelper/scoped_local_ref.h"
#include "nth_caller_visitor.h"
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 8239602b50..082b311fbe 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -304,12 +304,9 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
base_flags = MAP_PRIVATE | MAP_ANON;
data_pages = MemMap::MapAnonymous(
"data-code-cache",
- /* addr= */ nullptr,
data_capacity + exec_capacity,
kProtRW,
/* low_4gb= */ true,
- /* reuse= */ false,
- /* reservation= */ nullptr,
&error_str);
}
diff --git a/runtime/jni/jni_internal.cc b/runtime/jni/jni_internal.cc
index b07d2c2690..5e01b7941f 100644
--- a/runtime/jni/jni_internal.cc
+++ b/runtime/jni/jni_internal.cc
@@ -53,6 +53,7 @@
#include "mirror/object-inl.h"
#include "mirror/object_array-alloc-inl.h"
#include "mirror/object_array-inl.h"
+#include "mirror/string-alloc-inl.h"
#include "mirror/string-inl.h"
#include "mirror/throwable.h"
#include "nativehelper/scoped_local_ref.h"
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h
index 13eaf3da45..47b621ad1a 100644
--- a/runtime/mirror/dex_cache-inl.h
+++ b/runtime/mirror/dex_cache-inl.h
@@ -84,6 +84,15 @@ inline uint32_t DexCache::StringSlotIndex(dex::StringIndex string_idx) {
}
inline String* DexCache::GetResolvedString(dex::StringIndex string_idx) {
+ const uint32_t num_preresolved_strings = NumPreResolvedStrings();
+ if (num_preresolved_strings != 0u) {
+ DCHECK_LT(string_idx.index_, num_preresolved_strings);
+ DCHECK_EQ(num_preresolved_strings, GetDexFile()->NumStringIds());
+ mirror::String* string = GetPreResolvedStrings()[string_idx.index_].Read();
+ if (LIKELY(string != nullptr)) {
+ return string;
+ }
+ }
return GetStrings()[StringSlotIndex(string_idx)].load(
std::memory_order_relaxed).GetObjectForIndex(string_idx.index_);
}
@@ -101,6 +110,18 @@ inline void DexCache::SetResolvedString(dex::StringIndex string_idx, ObjPtr<Stri
WriteBarrier::ForEveryFieldWrite(this);
}
+inline void DexCache::SetPreResolvedString(dex::StringIndex string_idx,
+ ObjPtr<String> resolved) {
+ DCHECK(resolved != nullptr);
+ DCHECK_LT(string_idx.index_, GetDexFile()->NumStringIds());
+ GetPreResolvedStrings()[string_idx.index_] = GcRoot<mirror::String>(resolved);
+ Runtime* const runtime = Runtime::Current();
+ CHECK(runtime->IsAotCompiler());
+ CHECK(!runtime->IsActiveTransaction());
+ // TODO: Fine-grained marking, so that we don't need to go through all arrays in full.
+ WriteBarrier::ForEveryFieldWrite(this);
+}
+
inline void DexCache::ClearString(dex::StringIndex string_idx) {
DCHECK(Runtime::Current()->IsAotCompiler());
uint32_t slot_idx = StringSlotIndex(string_idx);
@@ -344,6 +365,12 @@ inline void DexCache::VisitReferences(ObjPtr<Class> klass, const Visitor& visito
for (size_t i = 0; i != num_call_sites; ++i) {
visitor.VisitRootIfNonNull(resolved_call_sites[i].AddressWithoutBarrier());
}
+
+ GcRoot<mirror::String>* const preresolved_strings = GetPreResolvedStrings();
+ const size_t num_preresolved_strings = NumPreResolvedStrings();
+ for (size_t i = 0; i != num_preresolved_strings; ++i) {
+ visitor.VisitRootIfNonNull(preresolved_strings[i].AddressWithoutBarrier());
+ }
}
}
diff --git a/runtime/mirror/dex_cache.cc b/runtime/mirror/dex_cache.cc
index 661f954ef5..8d2b838cb2 100644
--- a/runtime/mirror/dex_cache.cc
+++ b/runtime/mirror/dex_cache.cc
@@ -172,6 +172,21 @@ void DexCache::InitializeDexCache(Thread* self,
dex_file->NumCallSiteIds());
}
+void DexCache::AddPreResolvedStringsArray() {
+ DCHECK_EQ(NumPreResolvedStrings(), 0u);
+ Thread* const self = Thread::Current();
+ LinearAlloc* linear_alloc = Runtime::Current()->GetLinearAlloc();
+ const size_t num_strings = GetDexFile()->NumStringIds();
+ SetField32<false>(NumPreResolvedStringsOffset(), num_strings);
+ GcRoot<mirror::String>* strings =
+ linear_alloc->AllocArray<GcRoot<mirror::String>>(self, num_strings);
+ CHECK(strings != nullptr);
+ SetPreResolvedStrings(strings);
+ for (size_t i = 0; i < GetDexFile()->NumStringIds(); ++i) {
+ CHECK(GetPreResolvedStrings()[i].Read() == nullptr);
+ }
+}
+
void DexCache::Init(const DexFile* dex_file,
ObjPtr<String> location,
StringDexCacheType* strings,
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 6149f9c0be..58b199d639 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -217,6 +217,10 @@ class MANAGED DexCache final : public Object {
return OFFSET_OF_OBJECT_MEMBER(DexCache, strings_);
}
+ static constexpr MemberOffset PreResolvedStringsOffset() {
+ return OFFSET_OF_OBJECT_MEMBER(DexCache, preresolved_strings_);
+ }
+
static constexpr MemberOffset ResolvedTypesOffset() {
return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_types_);
}
@@ -241,6 +245,10 @@ class MANAGED DexCache final : public Object {
return OFFSET_OF_OBJECT_MEMBER(DexCache, num_strings_);
}
+ static constexpr MemberOffset NumPreResolvedStringsOffset() {
+ return OFFSET_OF_OBJECT_MEMBER(DexCache, num_preresolved_strings_);
+ }
+
static constexpr MemberOffset NumResolvedTypesOffset() {
return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_types_);
}
@@ -261,12 +269,20 @@ class MANAGED DexCache final : public Object {
return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_call_sites_);
}
+ static constexpr size_t PreResolvedStringsAlignment() {
+ return alignof(GcRoot<mirror::String>);
+ }
+
String* GetResolvedString(dex::StringIndex string_idx) ALWAYS_INLINE
REQUIRES_SHARED(Locks::mutator_lock_);
void SetResolvedString(dex::StringIndex string_idx, ObjPtr<mirror::String> resolved) ALWAYS_INLINE
REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetPreResolvedString(dex::StringIndex string_idx,
+ ObjPtr<mirror::String> resolved)
+ ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_);
+
// Clear a string for a string_idx, used to undo string intern transactions to make sure
// the string isn't kept live.
void ClearString(dex::StringIndex string_idx) REQUIRES_SHARED(Locks::mutator_lock_);
@@ -318,10 +334,21 @@ class MANAGED DexCache final : public Object {
return GetFieldPtr64<StringDexCacheType*, kVerifyFlags>(StringsOffset());
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ GcRoot<mirror::String>* GetPreResolvedStrings() ALWAYS_INLINE
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return GetFieldPtr64<GcRoot<mirror::String>*, kVerifyFlags>(PreResolvedStringsOffset());
+ }
+
void SetStrings(StringDexCacheType* strings) ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
SetFieldPtr<false>(StringsOffset(), strings);
}
+ void SetPreResolvedStrings(GcRoot<mirror::String>* strings)
+ ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
+ SetFieldPtr<false>(PreResolvedStringsOffset(), strings);
+ }
+
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
TypeDexCacheType* GetResolvedTypes() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldPtr<TypeDexCacheType*, kVerifyFlags>(ResolvedTypesOffset());
@@ -384,6 +411,11 @@ class MANAGED DexCache final : public Object {
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ size_t NumPreResolvedStrings() REQUIRES_SHARED(Locks::mutator_lock_) {
+ return GetField32<kVerifyFlags>(NumPreResolvedStringsOffset());
+ }
+
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
size_t NumResolvedTypes() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetField32<kVerifyFlags>(NumResolvedTypesOffset());
}
@@ -429,12 +461,18 @@ class MANAGED DexCache final : public Object {
NativeDexCachePair<T> pair,
PointerSize ptr_size);
+ static size_t PreResolvedStringsSize(size_t num_strings) {
+ return sizeof(GcRoot<mirror::String>) * num_strings;
+ }
+
uint32_t StringSlotIndex(dex::StringIndex string_idx) REQUIRES_SHARED(Locks::mutator_lock_);
uint32_t TypeSlotIndex(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
uint32_t FieldSlotIndex(uint32_t field_idx) REQUIRES_SHARED(Locks::mutator_lock_);
uint32_t MethodSlotIndex(uint32_t method_idx) REQUIRES_SHARED(Locks::mutator_lock_);
uint32_t MethodTypeSlotIndex(dex::ProtoIndex proto_idx) REQUIRES_SHARED(Locks::mutator_lock_);
+ void AddPreResolvedStringsArray() REQUIRES_SHARED(Locks::mutator_lock_);
+
private:
void Init(const DexFile* dex_file,
ObjPtr<String> location,
@@ -516,22 +554,25 @@ class MANAGED DexCache final : public Object {
#endif
HeapReference<String> location_;
- // Number of elements in the call_sites_ array. Note that this appears here
- // because of our packing logic for 32 bit fields.
- uint32_t num_resolved_call_sites_;
-
- uint64_t dex_file_; // const DexFile*
- uint64_t resolved_call_sites_; // GcRoot<CallSite>* array with num_resolved_call_sites_
- // elements.
- uint64_t resolved_fields_; // std::atomic<FieldDexCachePair>*, array with
- // num_resolved_fields_ elements.
- uint64_t resolved_method_types_; // std::atomic<MethodTypeDexCachePair>* array with
- // num_resolved_method_types_ elements.
- uint64_t resolved_methods_; // ArtMethod*, array with num_resolved_methods_ elements.
- uint64_t resolved_types_; // TypeDexCacheType*, array with num_resolved_types_ elements.
- uint64_t strings_; // std::atomic<StringDexCachePair>*, array with num_strings_
- // elements.
-
+ // Number of elements in the preresolved_strings_ array. Note that this appears here because of
+ // our packing logic for 32 bit fields.
+ uint32_t num_preresolved_strings_;
+
+ uint64_t dex_file_; // const DexFile*
+ uint64_t preresolved_strings_; // GcRoot<mirror::String*> array with num_preresolved_strings
+ // elements.
+ uint64_t resolved_call_sites_; // GcRoot<CallSite>* array with num_resolved_call_sites_
+ // elements.
+ uint64_t resolved_fields_; // std::atomic<FieldDexCachePair>*, array with
+ // num_resolved_fields_ elements.
+ uint64_t resolved_method_types_; // std::atomic<MethodTypeDexCachePair>* array with
+ // num_resolved_method_types_ elements.
+ uint64_t resolved_methods_; // ArtMethod*, array with num_resolved_methods_ elements.
+ uint64_t resolved_types_; // TypeDexCacheType*, array with num_resolved_types_ elements.
+ uint64_t strings_; // std::atomic<StringDexCachePair>*, array with num_strings_
+ // elements.
+
+ uint32_t num_resolved_call_sites_; // Number of elements in the call_sites_ array.
uint32_t num_resolved_fields_; // Number of elements in the resolved_fields_ array.
uint32_t num_resolved_method_types_; // Number of elements in the resolved_method_types_ array.
uint32_t num_resolved_methods_; // Number of elements in the resolved_methods_ array.
diff --git a/runtime/mirror/string-alloc-inl.h b/runtime/mirror/string-alloc-inl.h
new file mode 100644
index 0000000000..c026c676ff
--- /dev/null
+++ b/runtime/mirror/string-alloc-inl.h
@@ -0,0 +1,259 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef ART_RUNTIME_MIRROR_STRING_ALLOC_INL_H_
+#define ART_RUNTIME_MIRROR_STRING_ALLOC_INL_H_
+
+#include "string-inl.h"
+
+#include "android-base/stringprintf.h"
+
+#include "array.h"
+#include "base/bit_utils.h"
+#include "base/globals.h"
+#include "base/utils.h"
+#include "class.h"
+#include "class_root.h"
+#include "gc/heap-inl.h"
+#include "runtime.h"
+#include "thread.h"
+
+namespace art {
+namespace mirror {
+
+// Sets string count in the allocation code path to ensure it is guarded by a CAS.
+class SetStringCountVisitor {
+ public:
+ explicit SetStringCountVisitor(int32_t count) : count_(count) {
+ }
+
+ void operator()(ObjPtr<Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ // Avoid AsString as object is not yet in live bitmap or allocation stack.
+ ObjPtr<String> string = ObjPtr<String>::DownCast(obj);
+ string->SetCount(count_);
+ DCHECK(!string->IsCompressed() || kUseStringCompression);
+ }
+
+ private:
+ const int32_t count_;
+};
+
+// Sets string count and value in the allocation code path to ensure it is guarded by a CAS.
+class SetStringCountAndBytesVisitor {
+ public:
+ SetStringCountAndBytesVisitor(int32_t count, Handle<ByteArray> src_array, int32_t offset,
+ int32_t high_byte)
+ : count_(count), src_array_(src_array), offset_(offset), high_byte_(high_byte) {
+ }
+
+ void operator()(ObjPtr<Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ // Avoid AsString as object is not yet in live bitmap or allocation stack.
+ ObjPtr<String> string = ObjPtr<String>::DownCast(obj);
+ string->SetCount(count_);
+ DCHECK(!string->IsCompressed() || kUseStringCompression);
+ int32_t length = String::GetLengthFromCount(count_);
+ const uint8_t* const src = reinterpret_cast<uint8_t*>(src_array_->GetData()) + offset_;
+ if (string->IsCompressed()) {
+ uint8_t* valueCompressed = string->GetValueCompressed();
+ for (int i = 0; i < length; i++) {
+ valueCompressed[i] = (src[i] & 0xFF);
+ }
+ } else {
+ uint16_t* value = string->GetValue();
+ for (int i = 0; i < length; i++) {
+ value[i] = high_byte_ + (src[i] & 0xFF);
+ }
+ }
+ }
+
+ private:
+ const int32_t count_;
+ Handle<ByteArray> src_array_;
+ const int32_t offset_;
+ const int32_t high_byte_;
+};
+
+// Sets string count and value in the allocation code path to ensure it is guarded by a CAS.
+class SetStringCountAndValueVisitorFromCharArray {
+ public:
+ SetStringCountAndValueVisitorFromCharArray(int32_t count, Handle<CharArray> src_array,
+ int32_t offset) :
+ count_(count), src_array_(src_array), offset_(offset) {
+ }
+
+ void operator()(ObjPtr<Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ // Avoid AsString as object is not yet in live bitmap or allocation stack.
+ ObjPtr<String> string = ObjPtr<String>::DownCast(obj);
+ string->SetCount(count_);
+ const uint16_t* const src = src_array_->GetData() + offset_;
+ const int32_t length = String::GetLengthFromCount(count_);
+ if (kUseStringCompression && String::IsCompressed(count_)) {
+ for (int i = 0; i < length; ++i) {
+ string->GetValueCompressed()[i] = static_cast<uint8_t>(src[i]);
+ }
+ } else {
+ memcpy(string->GetValue(), src, length * sizeof(uint16_t));
+ }
+ }
+
+ private:
+ const int32_t count_;
+ Handle<CharArray> src_array_;
+ const int32_t offset_;
+};
+
+// Sets string count and value in the allocation code path to ensure it is guarded by a CAS.
+class SetStringCountAndValueVisitorFromString {
+ public:
+ SetStringCountAndValueVisitorFromString(int32_t count,
+ Handle<String> src_string,
+ int32_t offset) :
+ count_(count), src_string_(src_string), offset_(offset) {
+ }
+
+ void operator()(ObjPtr<Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ // Avoid AsString as object is not yet in live bitmap or allocation stack.
+ ObjPtr<String> string = ObjPtr<String>::DownCast(obj);
+ string->SetCount(count_);
+ const int32_t length = String::GetLengthFromCount(count_);
+ bool compressible = kUseStringCompression && String::IsCompressed(count_);
+ if (src_string_->IsCompressed()) {
+ const uint8_t* const src = src_string_->GetValueCompressed() + offset_;
+ memcpy(string->GetValueCompressed(), src, length * sizeof(uint8_t));
+ } else {
+ const uint16_t* const src = src_string_->GetValue() + offset_;
+ if (compressible) {
+ for (int i = 0; i < length; ++i) {
+ string->GetValueCompressed()[i] = static_cast<uint8_t>(src[i]);
+ }
+ } else {
+ memcpy(string->GetValue(), src, length * sizeof(uint16_t));
+ }
+ }
+ }
+
+ private:
+ const int32_t count_;
+ Handle<String> src_string_;
+ const int32_t offset_;
+};
+
+template <bool kIsInstrumented, typename PreFenceVisitor>
+inline String* String::Alloc(Thread* self,
+ int32_t utf16_length_with_flag,
+ gc::AllocatorType allocator_type,
+ const PreFenceVisitor& pre_fence_visitor) {
+ constexpr size_t header_size = sizeof(String);
+ const bool compressible = kUseStringCompression && String::IsCompressed(utf16_length_with_flag);
+ const size_t block_size = (compressible) ? sizeof(uint8_t) : sizeof(uint16_t);
+ size_t length = String::GetLengthFromCount(utf16_length_with_flag);
+ static_assert(sizeof(length) <= sizeof(size_t),
+ "static_cast<size_t>(utf16_length) must not lose bits.");
+ size_t data_size = block_size * length;
+ size_t size = header_size + data_size;
+ // String.equals() intrinsics assume zero-padding up to kObjectAlignment,
+ // so make sure the allocator clears the padding as well.
+ // http://b/23528461
+ size_t alloc_size = RoundUp(size, kObjectAlignment);
+
+ Runtime* runtime = Runtime::Current();
+ ObjPtr<Class> string_class = GetClassRoot<String>(runtime->GetClassLinker());
+ // Check for overflow and throw OutOfMemoryError if this was an unreasonable request.
+ // Do this by comparing with the maximum length that will _not_ cause an overflow.
+ const size_t overflow_length = (-header_size) / block_size; // Unsigned arithmetic.
+ const size_t max_alloc_length = overflow_length - 1u;
+ static_assert(IsAligned<sizeof(uint16_t)>(kObjectAlignment),
+ "kObjectAlignment must be at least as big as Java char alignment");
+ const size_t max_length = RoundDown(max_alloc_length, kObjectAlignment / block_size);
+ if (UNLIKELY(length > max_length)) {
+ self->ThrowOutOfMemoryError(
+ android::base::StringPrintf("%s of length %d would overflow",
+ Class::PrettyDescriptor(string_class).c_str(),
+ static_cast<int>(length)).c_str());
+ return nullptr;
+ }
+
+ gc::Heap* heap = runtime->GetHeap();
+ return down_cast<String*>(
+ heap->AllocObjectWithAllocator<kIsInstrumented, true>(self,
+ string_class,
+ alloc_size,
+ allocator_type,
+ pre_fence_visitor));
+}
+
+template <bool kIsInstrumented>
+inline String* String::AllocEmptyString(Thread* self, gc::AllocatorType allocator_type) {
+ const int32_t length_with_flag = String::GetFlaggedCount(0, /* compressible= */ true);
+ SetStringCountVisitor visitor(length_with_flag);
+ return Alloc<kIsInstrumented>(self, length_with_flag, allocator_type, visitor);
+}
+
+template <bool kIsInstrumented>
+inline String* String::AllocFromByteArray(Thread* self,
+ int32_t byte_length,
+ Handle<ByteArray> array,
+ int32_t offset,
+ int32_t high_byte,
+ gc::AllocatorType allocator_type) {
+ const uint8_t* const src = reinterpret_cast<uint8_t*>(array->GetData()) + offset;
+ high_byte &= 0xff; // Extract the relevant bits before determining `compressible`.
+ const bool compressible =
+ kUseStringCompression && String::AllASCII<uint8_t>(src, byte_length) && (high_byte == 0);
+ const int32_t length_with_flag = String::GetFlaggedCount(byte_length, compressible);
+ SetStringCountAndBytesVisitor visitor(length_with_flag, array, offset, high_byte << 8);
+ String* string = Alloc<kIsInstrumented>(self, length_with_flag, allocator_type, visitor);
+ return string;
+}
+
+template <bool kIsInstrumented>
+inline String* String::AllocFromCharArray(Thread* self,
+ int32_t count,
+ Handle<CharArray> array,
+ int32_t offset,
+ gc::AllocatorType allocator_type) {
+ // It is a caller error to have a count less than the actual array's size.
+ DCHECK_GE(array->GetLength(), count);
+ const bool compressible = kUseStringCompression &&
+ String::AllASCII<uint16_t>(array->GetData() + offset, count);
+ const int32_t length_with_flag = String::GetFlaggedCount(count, compressible);
+ SetStringCountAndValueVisitorFromCharArray visitor(length_with_flag, array, offset);
+ String* new_string = Alloc<kIsInstrumented>(self, length_with_flag, allocator_type, visitor);
+ return new_string;
+}
+
+template <bool kIsInstrumented>
+inline String* String::AllocFromString(Thread* self,
+ int32_t string_length,
+ Handle<String> string,
+ int32_t offset,
+ gc::AllocatorType allocator_type) {
+ const bool compressible = kUseStringCompression &&
+ ((string->IsCompressed()) ? true : String::AllASCII<uint16_t>(string->GetValue() + offset,
+ string_length));
+ const int32_t length_with_flag = String::GetFlaggedCount(string_length, compressible);
+ SetStringCountAndValueVisitorFromString visitor(length_with_flag, string, offset);
+ String* new_string = Alloc<kIsInstrumented>(self, length_with_flag, allocator_type, visitor);
+ return new_string;
+}
+
+} // namespace mirror
+} // namespace art
+
+#endif // ART_RUNTIME_MIRROR_STRING_ALLOC_INL_H_
diff --git a/runtime/mirror/string-inl.h b/runtime/mirror/string-inl.h
index 3752d6dde9..d89ef1ecd9 100644
--- a/runtime/mirror/string-inl.h
+++ b/runtime/mirror/string-inl.h
@@ -20,17 +20,11 @@
#include "android-base/stringprintf.h"
-#include "array.h"
-#include "base/bit_utils.h"
#include "base/globals.h"
#include "base/utils.h"
-#include "class.h"
-#include "class_root.h"
+#include "class-inl.h"
#include "common_throws.h"
#include "dex/utf.h"
-#include "gc/heap-inl.h"
-#include "runtime.h"
-#include "thread.h"
namespace art {
namespace mirror {
@@ -49,127 +43,6 @@ inline uint32_t String::ClassSize(PointerSize pointer_size) {
return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 1, 2, pointer_size);
}
-// Sets string count in the allocation code path to ensure it is guarded by a CAS.
-class SetStringCountVisitor {
- public:
- explicit SetStringCountVisitor(int32_t count) : count_(count) {
- }
-
- void operator()(ObjPtr<Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
- REQUIRES_SHARED(Locks::mutator_lock_) {
- // Avoid AsString as object is not yet in live bitmap or allocation stack.
- ObjPtr<String> string = ObjPtr<String>::DownCast(obj);
- string->SetCount(count_);
- DCHECK(!string->IsCompressed() || kUseStringCompression);
- }
-
- private:
- const int32_t count_;
-};
-
-// Sets string count and value in the allocation code path to ensure it is guarded by a CAS.
-class SetStringCountAndBytesVisitor {
- public:
- SetStringCountAndBytesVisitor(int32_t count, Handle<ByteArray> src_array, int32_t offset,
- int32_t high_byte)
- : count_(count), src_array_(src_array), offset_(offset), high_byte_(high_byte) {
- }
-
- void operator()(ObjPtr<Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
- REQUIRES_SHARED(Locks::mutator_lock_) {
- // Avoid AsString as object is not yet in live bitmap or allocation stack.
- ObjPtr<String> string = ObjPtr<String>::DownCast(obj);
- string->SetCount(count_);
- DCHECK(!string->IsCompressed() || kUseStringCompression);
- int32_t length = String::GetLengthFromCount(count_);
- const uint8_t* const src = reinterpret_cast<uint8_t*>(src_array_->GetData()) + offset_;
- if (string->IsCompressed()) {
- uint8_t* valueCompressed = string->GetValueCompressed();
- for (int i = 0; i < length; i++) {
- valueCompressed[i] = (src[i] & 0xFF);
- }
- } else {
- uint16_t* value = string->GetValue();
- for (int i = 0; i < length; i++) {
- value[i] = high_byte_ + (src[i] & 0xFF);
- }
- }
- }
-
- private:
- const int32_t count_;
- Handle<ByteArray> src_array_;
- const int32_t offset_;
- const int32_t high_byte_;
-};
-
-// Sets string count and value in the allocation code path to ensure it is guarded by a CAS.
-class SetStringCountAndValueVisitorFromCharArray {
- public:
- SetStringCountAndValueVisitorFromCharArray(int32_t count, Handle<CharArray> src_array,
- int32_t offset) :
- count_(count), src_array_(src_array), offset_(offset) {
- }
-
- void operator()(ObjPtr<Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
- REQUIRES_SHARED(Locks::mutator_lock_) {
- // Avoid AsString as object is not yet in live bitmap or allocation stack.
- ObjPtr<String> string = ObjPtr<String>::DownCast(obj);
- string->SetCount(count_);
- const uint16_t* const src = src_array_->GetData() + offset_;
- const int32_t length = String::GetLengthFromCount(count_);
- if (kUseStringCompression && String::IsCompressed(count_)) {
- for (int i = 0; i < length; ++i) {
- string->GetValueCompressed()[i] = static_cast<uint8_t>(src[i]);
- }
- } else {
- memcpy(string->GetValue(), src, length * sizeof(uint16_t));
- }
- }
-
- private:
- const int32_t count_;
- Handle<CharArray> src_array_;
- const int32_t offset_;
-};
-
-// Sets string count and value in the allocation code path to ensure it is guarded by a CAS.
-class SetStringCountAndValueVisitorFromString {
- public:
- SetStringCountAndValueVisitorFromString(int32_t count,
- Handle<String> src_string,
- int32_t offset) :
- count_(count), src_string_(src_string), offset_(offset) {
- }
-
- void operator()(ObjPtr<Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
- REQUIRES_SHARED(Locks::mutator_lock_) {
- // Avoid AsString as object is not yet in live bitmap or allocation stack.
- ObjPtr<String> string = ObjPtr<String>::DownCast(obj);
- string->SetCount(count_);
- const int32_t length = String::GetLengthFromCount(count_);
- bool compressible = kUseStringCompression && String::IsCompressed(count_);
- if (src_string_->IsCompressed()) {
- const uint8_t* const src = src_string_->GetValueCompressed() + offset_;
- memcpy(string->GetValueCompressed(), src, length * sizeof(uint8_t));
- } else {
- const uint16_t* const src = src_string_->GetValue() + offset_;
- if (compressible) {
- for (int i = 0; i < length; ++i) {
- string->GetValueCompressed()[i] = static_cast<uint8_t>(src[i]);
- }
- } else {
- memcpy(string->GetValue(), src, length * sizeof(uint16_t));
- }
- }
- }
-
- private:
- const int32_t count_;
- Handle<String> src_string_;
- const int32_t offset_;
-};
-
inline uint16_t String::CharAt(int32_t index) {
int32_t count = GetLength();
if (UNLIKELY((index < 0) || (index >= count))) {
@@ -195,93 +68,6 @@ int32_t String::FastIndexOf(MemoryType* chars, int32_t ch, int32_t start) {
return -1;
}
-template <bool kIsInstrumented, typename PreFenceVisitor>
-inline String* String::Alloc(Thread* self, int32_t utf16_length_with_flag,
- gc::AllocatorType allocator_type,
- const PreFenceVisitor& pre_fence_visitor) {
- constexpr size_t header_size = sizeof(String);
- const bool compressible = kUseStringCompression && String::IsCompressed(utf16_length_with_flag);
- const size_t block_size = (compressible) ? sizeof(uint8_t) : sizeof(uint16_t);
- size_t length = String::GetLengthFromCount(utf16_length_with_flag);
- static_assert(sizeof(length) <= sizeof(size_t),
- "static_cast<size_t>(utf16_length) must not lose bits.");
- size_t data_size = block_size * length;
- size_t size = header_size + data_size;
- // String.equals() intrinsics assume zero-padding up to kObjectAlignment,
- // so make sure the allocator clears the padding as well.
- // http://b/23528461
- size_t alloc_size = RoundUp(size, kObjectAlignment);
-
- Runtime* runtime = Runtime::Current();
- ObjPtr<Class> string_class = GetClassRoot<String>(runtime->GetClassLinker());
- // Check for overflow and throw OutOfMemoryError if this was an unreasonable request.
- // Do this by comparing with the maximum length that will _not_ cause an overflow.
- const size_t overflow_length = (-header_size) / block_size; // Unsigned arithmetic.
- const size_t max_alloc_length = overflow_length - 1u;
- static_assert(IsAligned<sizeof(uint16_t)>(kObjectAlignment),
- "kObjectAlignment must be at least as big as Java char alignment");
- const size_t max_length = RoundDown(max_alloc_length, kObjectAlignment / block_size);
- if (UNLIKELY(length > max_length)) {
- self->ThrowOutOfMemoryError(
- android::base::StringPrintf("%s of length %d would overflow",
- Class::PrettyDescriptor(string_class).c_str(),
- static_cast<int>(length)).c_str());
- return nullptr;
- }
-
- gc::Heap* heap = runtime->GetHeap();
- return down_cast<String*>(
- heap->AllocObjectWithAllocator<kIsInstrumented, true>(self, string_class, alloc_size,
- allocator_type, pre_fence_visitor));
-}
-
-template <bool kIsInstrumented>
-inline String* String::AllocEmptyString(Thread* self, gc::AllocatorType allocator_type) {
- const int32_t length_with_flag = String::GetFlaggedCount(0, /* compressible= */ true);
- SetStringCountVisitor visitor(length_with_flag);
- return Alloc<kIsInstrumented>(self, length_with_flag, allocator_type, visitor);
-}
-
-template <bool kIsInstrumented>
-inline String* String::AllocFromByteArray(Thread* self, int32_t byte_length,
- Handle<ByteArray> array, int32_t offset,
- int32_t high_byte, gc::AllocatorType allocator_type) {
- const uint8_t* const src = reinterpret_cast<uint8_t*>(array->GetData()) + offset;
- high_byte &= 0xff; // Extract the relevant bits before determining `compressible`.
- const bool compressible =
- kUseStringCompression && String::AllASCII<uint8_t>(src, byte_length) && (high_byte == 0);
- const int32_t length_with_flag = String::GetFlaggedCount(byte_length, compressible);
- SetStringCountAndBytesVisitor visitor(length_with_flag, array, offset, high_byte << 8);
- String* string = Alloc<kIsInstrumented>(self, length_with_flag, allocator_type, visitor);
- return string;
-}
-
-template <bool kIsInstrumented>
-inline String* String::AllocFromCharArray(Thread* self, int32_t count,
- Handle<CharArray> array, int32_t offset,
- gc::AllocatorType allocator_type) {
- // It is a caller error to have a count less than the actual array's size.
- DCHECK_GE(array->GetLength(), count);
- const bool compressible = kUseStringCompression &&
- String::AllASCII<uint16_t>(array->GetData() + offset, count);
- const int32_t length_with_flag = String::GetFlaggedCount(count, compressible);
- SetStringCountAndValueVisitorFromCharArray visitor(length_with_flag, array, offset);
- String* new_string = Alloc<kIsInstrumented>(self, length_with_flag, allocator_type, visitor);
- return new_string;
-}
-
-template <bool kIsInstrumented>
-inline String* String::AllocFromString(Thread* self, int32_t string_length, Handle<String> string,
- int32_t offset, gc::AllocatorType allocator_type) {
- const bool compressible = kUseStringCompression &&
- ((string->IsCompressed()) ? true : String::AllASCII<uint16_t>(string->GetValue() + offset,
- string_length));
- const int32_t length_with_flag = String::GetFlaggedCount(string_length, compressible);
- SetStringCountAndValueVisitorFromString visitor(length_with_flag, string, offset);
- String* new_string = Alloc<kIsInstrumented>(self, length_with_flag, allocator_type, visitor);
- return new_string;
-}
-
inline int32_t String::GetHashCode() {
int32_t result = GetField32(OFFSET_OF_OBJECT_MEMBER(String, hash_code_));
if (UNLIKELY(result == 0)) {
diff --git a/runtime/mirror/string.cc b/runtime/mirror/string.cc
index ae4c7cc13c..01315e745c 100644
--- a/runtime/mirror/string.cc
+++ b/runtime/mirror/string.cc
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include "string-inl.h"
+#include "string-alloc-inl.h"
#include "arch/memcmp16.h"
#include "array-alloc-inl.h"
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index f9f87d83f6..203d200be3 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -175,10 +175,9 @@ static MemMap AllocateDexMemoryMap(JNIEnv* env, jint start, jint end) {
std::string error_message;
size_t length = static_cast<size_t>(end - start);
MemMap dex_mem_map = MemMap::MapAnonymous("DEX data",
- /* addr= */ nullptr,
length,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_message);
if (!dex_mem_map.IsValid()) {
ScopedObjectAccess soa(env);
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index e78c245a30..c7b8ad4392 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -39,6 +39,7 @@
#include "mirror/object-inl.h"
#include "mirror/object_array-alloc-inl.h"
#include "mirror/object_array-inl.h"
+#include "mirror/string-alloc-inl.h"
#include "mirror/string-inl.h"
#include "native_util.h"
#include "nativehelper/jni_macros.h"
diff --git a/runtime/native/java_lang_String.cc b/runtime/native/java_lang_String.cc
index 78ec859ef9..4be2086ec9 100644
--- a/runtime/native/java_lang_String.cc
+++ b/runtime/native/java_lang_String.cc
@@ -23,8 +23,8 @@
#include "jni/jni_internal.h"
#include "mirror/array.h"
#include "mirror/object-inl.h"
+#include "mirror/string-alloc-inl.h"
#include "mirror/string-inl.h"
-#include "mirror/string.h"
#include "native_util.h"
#include "nativehelper/scoped_local_ref.h"
#include "scoped_fast_native_object_access-inl.h"
diff --git a/runtime/native/java_lang_StringFactory.cc b/runtime/native/java_lang_StringFactory.cc
index c6ad4e40d1..13f8d5be8e 100644
--- a/runtime/native/java_lang_StringFactory.cc
+++ b/runtime/native/java_lang_StringFactory.cc
@@ -20,7 +20,7 @@
#include "handle_scope-inl.h"
#include "jni/jni_internal.h"
#include "mirror/object-inl.h"
-#include "mirror/string-inl.h"
+#include "mirror/string-alloc-inl.h"
#include "native_util.h"
#include "nativehelper/jni_macros.h"
#include "nativehelper/scoped_local_ref.h"
diff --git a/runtime/native/sun_misc_Unsafe.cc b/runtime/native/sun_misc_Unsafe.cc
index e021b77dae..a739c2d16e 100644
--- a/runtime/native/sun_misc_Unsafe.cc
+++ b/runtime/native/sun_misc_Unsafe.cc
@@ -31,8 +31,10 @@
#include "mirror/array.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
+#include "art_field-inl.h"
#include "native_util.h"
#include "scoped_fast_native_object_access-inl.h"
+#include "well_known_classes.h"
namespace art {
@@ -504,6 +506,33 @@ static void Unsafe_fullFence(JNIEnv*, jobject) {
std::atomic_thread_fence(std::memory_order_seq_cst);
}
+static void Unsafe_park(JNIEnv* env, jobject, jboolean isAbsolute, jlong time) {
+ ScopedObjectAccess soa(env);
+ Thread::Current()->Park(isAbsolute, time);
+}
+
+static void Unsafe_unpark(JNIEnv* env, jobject, jobject jthread) {
+ art::ScopedFastNativeObjectAccess soa(env);
+ if (jthread == nullptr || !env->IsInstanceOf(jthread, WellKnownClasses::java_lang_Thread)) {
+ ThrowIllegalArgumentException("Argument to unpark() was not a Thread");
+ return;
+ }
+ art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
+ art::Thread* thread = art::Thread::FromManagedThread(soa, jthread);
+ if (thread != nullptr) {
+ thread->Unpark();
+ } else {
+ // If thread is null, that means that either the thread is not started yet,
+ // or the thread has already terminated. Setting the field to true will be
+ // respected when the thread does start, and is harmless if the thread has
+ // already terminated.
+ ArtField* unparked =
+ jni::DecodeArtField(WellKnownClasses::java_lang_Thread_unparkedBeforeStart);
+ // JNI must use non transactional mode.
+ unparked->SetBoolean<false>(soa.Decode<mirror::Object>(jthread), JNI_TRUE);
+ }
+}
+
static JNINativeMethod gMethods[] = {
FAST_NATIVE_METHOD(Unsafe, compareAndSwapInt, "(Ljava/lang/Object;JII)Z"),
FAST_NATIVE_METHOD(Unsafe, compareAndSwapLong, "(Ljava/lang/Object;JJJ)Z"),
@@ -546,6 +575,8 @@ static JNINativeMethod gMethods[] = {
FAST_NATIVE_METHOD(Unsafe, putShort, "(Ljava/lang/Object;JS)V"),
FAST_NATIVE_METHOD(Unsafe, putFloat, "(Ljava/lang/Object;JF)V"),
FAST_NATIVE_METHOD(Unsafe, putDouble, "(Ljava/lang/Object;JD)V"),
+ FAST_NATIVE_METHOD(Unsafe, unpark, "(Ljava/lang/Object;)V"),
+ NATIVE_METHOD(Unsafe, park, "(ZJ)V"),
// Each of the getFoo variants are overloaded with a call that operates
// directively on a native pointer.
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index a152692897..c3121269c4 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1161,8 +1161,10 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
reinterpret_cast<uint8_t*>(kSentinelAddr),
kPageSize,
PROT_NONE,
- /* low_4gb= */ true,
- /* error_msg= */ nullptr);
+ /*low_4gb=*/ true,
+ /*reuse=*/ false,
+ /*reservation=*/ nullptr,
+ /*error_msg=*/ nullptr);
if (!protected_fault_page_.IsValid()) {
LOG(WARNING) << "Could not reserve sentinel fault page";
} else if (reinterpret_cast<uintptr_t>(protected_fault_page_.Begin()) != kSentinelAddr) {
diff --git a/runtime/runtime_callbacks_test.cc b/runtime/runtime_callbacks_test.cc
index 20b33277b3..f2e5012991 100644
--- a/runtime/runtime_callbacks_test.cc
+++ b/runtime/runtime_callbacks_test.cc
@@ -191,10 +191,9 @@ TEST_F(ThreadLifecycleCallbackRuntimeCallbacksTest, ThreadLifecycleCallbackJava)
TEST_F(ThreadLifecycleCallbackRuntimeCallbacksTest, ThreadLifecycleCallbackAttach) {
std::string error_msg;
MemMap stack = MemMap::MapAnonymous("ThreadLifecycleCallback Thread",
- /* addr= */ nullptr,
128 * kPageSize, // Just some small stack.
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
ASSERT_TRUE(stack.IsValid()) << error_msg;
diff --git a/runtime/thread.cc b/runtime/thread.cc
index b5d214def4..4291709fb0 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -44,6 +44,7 @@
#include "arch/context.h"
#include "art_field-inl.h"
#include "art_method-inl.h"
+#include "base/atomic.h"
#include "base/bit_utils.h"
#include "base/casts.h"
#include "base/file_utils.h"
@@ -285,6 +286,115 @@ void Thread::AssertHasDeoptimizationContext() {
<< "No deoptimization context for thread " << *this;
}
+enum {
+ kPermitAvailable = 0, // Incrementing consumes the permit
+ kNoPermit = 1, // Incrementing marks as waiter waiting
+ kNoPermitWaiterWaiting = 2
+};
+
+void Thread::Park(bool is_absolute, int64_t time) {
+ DCHECK(this == Thread::Current());
+#if ART_USE_FUTEXES
+ // Consume the permit, or mark as waiting. This cannot cause park_state to go
+ // outside of its valid range (0, 1, 2), because in all cases where 2 is
+ // assigned it is set back to 1 before returning, and this method cannot run
+ // concurrently with itself since it operates on the current thread.
+ int old_state = tls32_.park_state_.fetch_add(1, std::memory_order_relaxed);
+ if (old_state == kNoPermit) {
+ // no permit was available. block thread until later.
+ // TODO: Call to signal jvmti here
+ int result = 0;
+ if (!is_absolute && time == 0) {
+ // Thread.getState() is documented to return waiting for untimed parks.
+ ScopedThreadSuspension sts(this, ThreadState::kWaiting);
+ DCHECK_EQ(NumberOfHeldMutexes(), 0u);
+ result = futex(tls32_.park_state_.Address(),
+ FUTEX_WAIT_PRIVATE,
+ /* sleep if val = */ kNoPermitWaiterWaiting,
+ /* timeout */ nullptr,
+ nullptr,
+ 0);
+ } else if (time > 0) {
+ // Only actually suspend and futex_wait if we're going to wait for some
+ // positive amount of time - the kernel will reject negative times with
+ // EINVAL, and a zero time will just noop.
+
+ // Thread.getState() is documented to return timed wait for timed parks.
+ ScopedThreadSuspension sts(this, ThreadState::kTimedWaiting);
+ DCHECK_EQ(NumberOfHeldMutexes(), 0u);
+ timespec timespec;
+ if (is_absolute) {
+ // Time is millis when scheduled for an absolute time
+ timespec.tv_nsec = (time % 1000) * 1000000;
+ timespec.tv_sec = time / 1000;
+ // This odd looking pattern is recommended by futex documentation to
+ // wait until an absolute deadline, with otherwise identical behavior to
+ // FUTEX_WAIT_PRIVATE. This also allows parkUntil() to return at the
+ // correct time when the system clock changes.
+ result = futex(tls32_.park_state_.Address(),
+ FUTEX_WAIT_BITSET_PRIVATE | FUTEX_CLOCK_REALTIME,
+ /* sleep if val = */ kNoPermitWaiterWaiting,
+ &timespec,
+ nullptr,
+ FUTEX_BITSET_MATCH_ANY);
+ } else {
+ // Time is nanos when scheduled for a relative time
+ timespec.tv_sec = time / 1000000000;
+ timespec.tv_nsec = time % 1000000000;
+ result = futex(tls32_.park_state_.Address(),
+ FUTEX_WAIT_PRIVATE,
+ /* sleep if val = */ kNoPermitWaiterWaiting,
+ &timespec,
+ nullptr,
+ 0);
+ }
+ }
+ if (result == -1) {
+ switch (errno) {
+ case EAGAIN:
+ case ETIMEDOUT:
+ case EINTR: break; // park() is allowed to spuriously return
+ default: PLOG(FATAL) << "Failed to park";
+ }
+ }
+ // Mark as no longer waiting, and consume permit if there is one.
+ tls32_.park_state_.store(kNoPermit, std::memory_order_relaxed);
+ // TODO: Call to signal jvmti here
+ } else {
+ // the fetch_add has consumed the permit. immediately return.
+ DCHECK_EQ(old_state, kPermitAvailable);
+ }
+#else
+ #pragma clang diagnostic push
+ #pragma clang diagnostic warning "-W#warnings"
+ #warning "LockSupport.park/unpark implemented as noops without FUTEX support."
+ #pragma clang diagnostic pop
+ UNIMPLEMENTED(WARNING);
+ sched_yield();
+#endif
+}
+
+void Thread::Unpark() {
+#if ART_USE_FUTEXES
+ // Set permit available; will be consumed either by fetch_add (when the thread
+ // tries to park) or store (when the parked thread is woken up)
+ if (tls32_.park_state_.exchange(kPermitAvailable, std::memory_order_relaxed)
+ == kNoPermitWaiterWaiting) {
+ int result = futex(tls32_.park_state_.Address(),
+ FUTEX_WAKE_PRIVATE,
+ /* number of waiters = */ 1,
+ nullptr,
+ nullptr,
+ 0);
+ if (result == -1) {
+ PLOG(FATAL) << "Failed to unpark";
+ }
+ }
+#else
+ UNIMPLEMENTED(WARNING);
+#endif
+}
+
void Thread::PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type) {
StackedShadowFrameRecord* record = new StackedShadowFrameRecord(
sf, type, tlsPtr_.stacked_shadow_frame_record);
@@ -489,6 +599,22 @@ void* Thread::CreateCallback(void* arg) {
runtime->GetRuntimeCallbacks()->ThreadStart(self);
+ // Unpark ourselves if the java peer was unparked before it started (see
+ // b/28845097#comment49 for more information)
+
+ ArtField* unparkedField = jni::DecodeArtField(
+ WellKnownClasses::java_lang_Thread_unparkedBeforeStart);
+ bool should_unpark = false;
+ {
+ // Hold the lock here, so that if another thread calls unpark before the thread starts
+ // we don't observe the unparkedBeforeStart field before the unparker writes to it,
+ // which could cause a lost unpark.
+ art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
+ should_unpark = unparkedField->GetBoolean(self->tlsPtr_.opeer) == JNI_TRUE;
+ }
+ if (should_unpark) {
+ self->Unpark();
+ }
// Invoke the 'run' method of our java.lang.Thread.
ObjPtr<mirror::Object> receiver = self->tlsPtr_.opeer;
jmethodID mid = WellKnownClasses::java_lang_Thread_run;
@@ -2133,6 +2259,9 @@ Thread::Thread(bool daemon)
tls32_.state_and_flags.as_struct.flags = 0;
tls32_.state_and_flags.as_struct.state = kNative;
tls32_.interrupted.store(false, std::memory_order_relaxed);
+ // Initialize with no permit; if the java Thread was unparked before being
+ // started, it will unpark itself before calling into java code.
+ tls32_.park_state_.store(kNoPermit, std::memory_order_relaxed);
memset(&tlsPtr_.held_mutexes[0], 0, sizeof(tlsPtr_.held_mutexes));
std::fill(tlsPtr_.rosalloc_runs,
tlsPtr_.rosalloc_runs + kNumRosAllocThreadLocalSizeBracketsInThread,
@@ -2449,12 +2578,15 @@ bool Thread::IsInterrupted() {
}
void Thread::Interrupt(Thread* self) {
- MutexLock mu(self, *wait_mutex_);
- if (tls32_.interrupted.load(std::memory_order_seq_cst)) {
- return;
+ {
+ MutexLock mu(self, *wait_mutex_);
+ if (tls32_.interrupted.load(std::memory_order_seq_cst)) {
+ return;
+ }
+ tls32_.interrupted.store(true, std::memory_order_seq_cst);
+ NotifyLocked(self);
}
- tls32_.interrupted.store(true, std::memory_order_seq_cst);
- NotifyLocked(self);
+ Unpark();
}
void Thread::Notify() {
diff --git a/runtime/thread.h b/runtime/thread.h
index 941867ce2d..b304cef74d 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -581,6 +581,11 @@ class Thread {
return poison_object_cookie_;
}
+ // Parking for 0ns of relative time means an untimed park, negative (though
+ // should be handled in java code) returns immediately
+ void Park(bool is_absolute, int64_t time) REQUIRES_SHARED(Locks::mutator_lock_);
+ void Unpark();
+
private:
void NotifyLocked(Thread* self) REQUIRES(wait_mutex_);
@@ -1543,6 +1548,8 @@ class Thread {
// Thread "interrupted" status; stays raised until queried or thrown.
Atomic<bool32_t> interrupted;
+ AtomicInteger park_state_;
+
// True if the thread is allowed to access a weak ref (Reference::GetReferent() and system
// weaks) and to potentially mark an object alive/gray. This is used for concurrent reference
// processing of the CC collector only. This is thread local so that we can enable/disable weak
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index a245f659d7..8723c99706 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -47,10 +47,9 @@ ThreadPoolWorker::ThreadPoolWorker(ThreadPool* thread_pool, const std::string& n
stack_size += kPageSize;
std::string error_msg;
stack_ = MemMap::MapAnonymous(name.c_str(),
- /* addr= */ nullptr,
stack_size,
PROT_READ | PROT_WRITE,
- /* low_4gb= */ false,
+ /*low_4gb=*/ false,
&error_msg);
CHECK(stack_.IsValid()) << error_msg;
CHECK_ALIGNED(stack_.Begin(), kPageSize);
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index 206418fbc6..94faa626f6 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -128,6 +128,7 @@ jfieldID WellKnownClasses::java_lang_Thread_lock;
jfieldID WellKnownClasses::java_lang_Thread_name;
jfieldID WellKnownClasses::java_lang_Thread_priority;
jfieldID WellKnownClasses::java_lang_Thread_nativePeer;
+jfieldID WellKnownClasses::java_lang_Thread_unparkedBeforeStart;
jfieldID WellKnownClasses::java_lang_ThreadGroup_groups;
jfieldID WellKnownClasses::java_lang_ThreadGroup_ngroups;
jfieldID WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup;
@@ -376,6 +377,7 @@ void WellKnownClasses::Init(JNIEnv* env) {
java_lang_Thread_name = CacheField(env, java_lang_Thread, false, "name", "Ljava/lang/String;");
java_lang_Thread_priority = CacheField(env, java_lang_Thread, false, "priority", "I");
java_lang_Thread_nativePeer = CacheField(env, java_lang_Thread, false, "nativePeer", "J");
+ java_lang_Thread_unparkedBeforeStart = CacheField(env, java_lang_Thread, false, "unparkedBeforeStart", "Z");
java_lang_ThreadGroup_groups = CacheField(env, java_lang_ThreadGroup, false, "groups", "[Ljava/lang/ThreadGroup;");
java_lang_ThreadGroup_ngroups = CacheField(env, java_lang_ThreadGroup, false, "ngroups", "I");
java_lang_ThreadGroup_mainThreadGroup = CacheField(env, java_lang_ThreadGroup, true, "mainThreadGroup", "Ljava/lang/ThreadGroup;");
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index ce5ab1df84..8c85228dfc 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -137,6 +137,7 @@ struct WellKnownClasses {
static jfieldID java_lang_Thread_name;
static jfieldID java_lang_Thread_priority;
static jfieldID java_lang_Thread_nativePeer;
+ static jfieldID java_lang_Thread_unparkedBeforeStart;
static jfieldID java_lang_ThreadGroup_groups;
static jfieldID java_lang_ThreadGroup_ngroups;
static jfieldID java_lang_ThreadGroup_mainThreadGroup;
diff --git a/test/004-ThreadStress/src-art/Main.java b/test/004-ThreadStress/src-art/Main.java
index 3a89f4f166..b8bfafb2d3 100644
--- a/test/004-ThreadStress/src-art/Main.java
+++ b/test/004-ThreadStress/src-art/Main.java
@@ -26,6 +26,7 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Semaphore;
+import java.util.concurrent.locks.LockSupport;
// Run on host with:
// javac ThreadTest.java && java ThreadStress && rm *.class
@@ -251,6 +252,31 @@ public class Main implements Runnable {
}
}
+ private final static class TimedPark extends Operation {
+ private final static int SLEEP_TIME = 100;
+
+ public TimedPark() {}
+
+ @Override
+ public boolean perform() {
+ LockSupport.parkNanos(this, 100*1000000);
+ return true;
+ }
+ }
+
+ private final static class UnparkAllThreads extends Operation {
+ public UnparkAllThreads() {}
+
+ @Override
+ public boolean perform() {
+ Set<Thread> threads = Thread.getAllStackTraces().keySet();
+ for (Thread candidate : threads) {
+ LockSupport.unpark(candidate);
+ }
+ return true;
+ }
+ }
+
private final static class SyncAndWork extends Operation {
private final Object lock;
@@ -320,7 +346,9 @@ public class Main implements Runnable {
frequencyMap.put(new NonMovingAlloc(), 0.025); // 5/200
frequencyMap.put(new StackTrace(), 0.1); // 20/200
frequencyMap.put(new Exit(), 0.225); // 45/200
- frequencyMap.put(new Sleep(), 0.125); // 25/200
+ frequencyMap.put(new Sleep(), 0.125); // 15/200
+ frequencyMap.put(new TimedPark(), 0.025); // 5/200
+ frequencyMap.put(new UnparkAllThreads(), 0.025); // 5/200
frequencyMap.put(new TimedWait(lock), 0.05); // 10/200
frequencyMap.put(new Wait(lock), 0.075); // 15/200
frequencyMap.put(new QueuedWait(semaphore), 0.05); // 10/200
@@ -341,9 +369,11 @@ public class Main implements Runnable {
private final static Map<Operation, Double> createLockFrequencyMap(Object lock) {
Map<Operation, Double> frequencyMap = new HashMap<Operation, Double>();
frequencyMap.put(new Sleep(), 0.2); // 40/200
- frequencyMap.put(new TimedWait(lock), 0.2); // 40/200
- frequencyMap.put(new Wait(lock), 0.2); // 40/200
+ frequencyMap.put(new TimedWait(lock), 0.1); // 20/200
+ frequencyMap.put(new Wait(lock), 0.1); // 20/200
frequencyMap.put(new SyncAndWork(lock), 0.4); // 80/200
+ frequencyMap.put(new TimedPark(), 0.1); // 20/200
+ frequencyMap.put(new UnparkAllThreads(), 0.1); // 20/200
return frequencyMap;
}
diff --git a/test/913-heaps/expected.txt b/test/913-heaps/expected.txt
index 065b854a6a..1bd56d1a53 100644
--- a/test/913-heaps/expected.txt
+++ b/test/913-heaps/expected.txt
@@ -1,9 +1,9 @@
---
true true
-root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=132, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=124, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=8,location= 31])--> 1@1000 [size=16, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=132, length=-1]
-root@root --(thread)--> 3000@0 [size=132, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=124, length=-1]
+root@root --(thread)--> 3000@0 [size=124, length=-1]
1001@0 --(superclass)--> 1000@0 [size=123456780000, length=-1]
1002@0 --(interface)--> 2001@0 [size=123456780004, length=-1]
1002@0 --(superclass)--> 1001@0 [size=123456780001, length=-1]
@@ -44,14 +44,14 @@ root@root --(thread)--> 3000@0 [size=132, length=-1]
---
root@root --(jni-global)--> 1@1000 [size=16, length=-1]
root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 1@1000 [size=16, length=-1]
-root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=132, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=124, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=10,location= 8])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=5,location= 8])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=13,location= 20])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 20])--> 1@1000 [size=16, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=132, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=124, length=-1]
root@root --(thread)--> 1@1000 [size=16, length=-1]
-root@root --(thread)--> 3000@0 [size=132, length=-1]
+root@root --(thread)--> 3000@0 [size=124, length=-1]
1001@0 --(superclass)--> 1000@0 [size=123456780005, length=-1]
1002@0 --(interface)--> 2001@0 [size=123456780009, length=-1]
1002@0 --(superclass)--> 1001@0 [size=123456780006, length=-1]
@@ -90,18 +90,18 @@ root@root --(thread)--> 3000@0 [size=132, length=-1]
5@1002 --(field@9)--> 6@1000 [size=16, length=-1]
6@1000 --(class)--> 1000@0 [size=123456780005, length=-1]
---
-root@root --(thread)--> 3000@0 [size=132, length=-1]
+root@root --(thread)--> 3000@0 [size=124, length=-1]
---
3@1001 --(class)--> 1001@0 [size=123456780011, length=-1]
---
-root@root --(thread)--> 3000@0 [size=132, length=-1]
+root@root --(thread)--> 3000@0 [size=124, length=-1]
---
3@1001 --(class)--> 1001@0 [size=123456780016, length=-1]
---
-root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=132, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=124, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=8,location= 31])--> 1@1000 [size=16, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=132, length=-1]
-root@root --(thread)--> 3000@0 [size=132, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=124, length=-1]
+root@root --(thread)--> 3000@0 [size=124, length=-1]
---
1001@0 --(superclass)--> 1000@0 [size=123456780020, length=-1]
3@1001 --(class)--> 1001@0 [size=123456780021, length=-1]
@@ -110,14 +110,14 @@ root@root --(thread)--> 3000@0 [size=132, length=-1]
---
root@root --(jni-global)--> 1@1000 [size=16, length=-1]
root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 1@1000 [size=16, length=-1]
-root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=132, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=124, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=10,location= 8])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=5,location= 8])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=13,location= 20])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 20])--> 1@1000 [size=16, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=132, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=124, length=-1]
root@root --(thread)--> 1@1000 [size=16, length=-1]
-root@root --(thread)--> 3000@0 [size=132, length=-1]
+root@root --(thread)--> 3000@0 [size=124, length=-1]
---
1001@0 --(superclass)--> 1000@0 [size=123456780025, length=-1]
3@1001 --(class)--> 1001@0 [size=123456780026, length=-1]
@@ -198,10 +198,10 @@ root@root --(thread)--> 1@1000 [size=16, length=-1]
---
---
---- untagged objects
-root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=132, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=124, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=8,location= 31])--> 1@1000 [size=16, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=132, length=-1]
-root@root --(thread)--> 3000@0 [size=132, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=124, length=-1]
+root@root --(thread)--> 3000@0 [size=124, length=-1]
1001@0 --(superclass)--> 1000@0 [size=123456780050, length=-1]
1002@0 --(interface)--> 2001@0 [size=123456780054, length=-1]
1002@0 --(superclass)--> 1001@0 [size=123456780051, length=-1]
@@ -242,14 +242,14 @@ root@root --(thread)--> 3000@0 [size=132, length=-1]
---
root@root --(jni-global)--> 1@1000 [size=16, length=-1]
root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 1@1000 [size=16, length=-1]
-root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=132, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=124, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=10,location= 8])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=5,location= 8])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=13,location= 20])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 20])--> 1@1000 [size=16, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=132, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=124, length=-1]
root@root --(thread)--> 1@1000 [size=16, length=-1]
-root@root --(thread)--> 3000@0 [size=132, length=-1]
+root@root --(thread)--> 3000@0 [size=124, length=-1]
1001@0 --(superclass)--> 1000@0 [size=123456780055, length=-1]
1002@0 --(interface)--> 2001@0 [size=123456780059, length=-1]
1002@0 --(superclass)--> 1001@0 [size=123456780056, length=-1]
@@ -289,9 +289,9 @@ root@root --(thread)--> 3000@0 [size=132, length=-1]
6@1000 --(class)--> 1000@0 [size=123456780055, length=-1]
---
---- tagged classes
-root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=132, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=132, length=-1]
-root@root --(thread)--> 3000@0 [size=132, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=124, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=124, length=-1]
+root@root --(thread)--> 3000@0 [size=124, length=-1]
1001@0 --(superclass)--> 1000@0 [size=123456780060, length=-1]
1002@0 --(interface)--> 2001@0 [size=123456780064, length=-1]
1002@0 --(superclass)--> 1001@0 [size=123456780061, length=-1]
@@ -316,9 +316,9 @@ root@root --(thread)--> 3000@0 [size=132, length=-1]
5@1002 --(field@8)--> 500@0 [size=20, length=2]
6@1000 --(class)--> 1000@0 [size=123456780060, length=-1]
---
-root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=132, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=132, length=-1]
-root@root --(thread)--> 3000@0 [size=132, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=124, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=124, length=-1]
+root@root --(thread)--> 3000@0 [size=124, length=-1]
1001@0 --(superclass)--> 1000@0 [size=123456780065, length=-1]
1002@0 --(interface)--> 2001@0 [size=123456780069, length=-1]
1002@0 --(superclass)--> 1001@0 [size=123456780066, length=-1]