For 32-bit ARM devices align method headers and code to 64-byte
boundaries.
For other architectures, move back to instruction set alignment.
Rename kJitCodeCacheAlignment to kJitCodeAccountingBytes since it's
now only used for accounting rather than alignment.
Bug: 132205399
Test: art/test.py --host --jit
Test: manual (described in bug)
Change-Id: I88f5f39381bf0331ce8540a929c6a68b3b5e0c75
diff --git a/runtime/gc/accounting/bitmap.cc b/runtime/gc/accounting/bitmap.cc
index e7ef03e..37646b3 100644
--- a/runtime/gc/accounting/bitmap.cc
+++ b/runtime/gc/accounting/bitmap.cc
@@ -97,7 +97,7 @@
}
template class MemoryRangeBitmap<CardTable::kCardSize>;
-template class MemoryRangeBitmap<jit::kJitCodeAlignment>;
+template class MemoryRangeBitmap<jit::kJitCodeAccountingBytes>;
} // namespace accounting
} // namespace gc
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index d90bdcb..100b399 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -372,8 +372,19 @@
return in_collection;
}
+static size_t GetJitCodeAlignment() {
+ if (kRuntimeISA == InstructionSet::kArm || kRuntimeISA == InstructionSet::kThumb2) {
+ // Some devices with 32-bit ARM kernels need additional JIT code alignment when using dual
+ // view JIT (b/132205399). The alignment returned here coincides with the typical ARM d-cache
+ // line (though the value should be probed ideally). Both the method header and code in the
+ // cache are aligned to this size.
+ return 64;
+ }
+ return GetInstructionSetAlignment(kRuntimeISA);
+}
+
static uintptr_t FromCodeToAllocation(const void* code) {
- size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
+ size_t alignment = GetJitCodeAlignment();
return reinterpret_cast<uintptr_t>(code) - RoundUp(sizeof(OatQuickMethodHeader), alignment);
}
@@ -703,14 +714,14 @@
{
ScopedCodeCacheWrite scc(*region);
- size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
+ size_t alignment = GetJitCodeAlignment();
// Ensure the header ends up at expected instruction alignment.
size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
size_t total_size = header_size + code_size;
// AllocateCode allocates memory in non-executable region for alignment header and code. The
// header size may include alignment padding.
- uint8_t* nox_memory = region->AllocateCode(total_size);
+ uint8_t* nox_memory = region->AllocateCode(total_size, alignment);
if (nox_memory == nullptr) {
return nullptr;
}
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 9683b48..8a9c010 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -74,7 +74,9 @@
class MarkCodeClosure;
-using CodeCacheBitmap = gc::accounting::MemoryRangeBitmap<kJitCodeAlignment>;
+// Type of bitmap used for tracking live functions in the JIT code cache for the purposes
+// of garbage collecting code.
+using CodeCacheBitmap = gc::accounting::MemoryRangeBitmap<kJitCodeAccountingBytes>;
class JitCodeCache {
public:
diff --git a/runtime/jit/jit_memory_region.cc b/runtime/jit/jit_memory_region.cc
index df61aaa..ab9a817 100644
--- a/runtime/jit/jit_memory_region.cc
+++ b/runtime/jit/jit_memory_region.cc
@@ -296,15 +296,14 @@
}
}
-uint8_t* JitMemoryRegion::AllocateCode(size_t code_size) {
+uint8_t* JitMemoryRegion::AllocateCode(size_t code_size, size_t alignment) {
// Each allocation should be on its own set of cache lines.
// `code_size` covers the OatQuickMethodHeader, the JIT generated machine code,
// and any alignment padding.
- size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
DCHECK_GT(code_size, header_size);
uint8_t* result = reinterpret_cast<uint8_t*>(
- mspace_memalign(exec_mspace_, kJitCodeAlignment, code_size));
+ mspace_memalign(exec_mspace_, alignment, code_size));
// Ensure the header ends up at expected instruction alignment.
DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(result + header_size), alignment);
used_memory_for_code_ += mspace_usable_size(result);
diff --git a/runtime/jit/jit_memory_region.h b/runtime/jit/jit_memory_region.h
index 1512cad..dda9fc2 100644
--- a/runtime/jit/jit_memory_region.h
+++ b/runtime/jit/jit_memory_region.h
@@ -28,12 +28,9 @@
class TestZygoteMemory;
-// Alignment in bytes that will suit all architectures for JIT code cache allocations. The
-// allocated block is used for method header followed by generated code. Allocations should be
-// aligned to avoid sharing cache lines between different allocations. The alignment should be
-// determined from the hardware, but this isn't readily exposed in userland plus some hardware
-// misreports.
-static constexpr int kJitCodeAlignment = 64;
+// Number of bytes represented by a bit in the CodeCacheBitmap. Value is reasonable for all
+// architectures.
+static constexpr int kJitCodeAccountingBytes = 16;
// Represents a memory region for the JIT, where code and data are stored. This class
// provides allocation and deallocation primitives.
@@ -65,7 +62,7 @@
// Set the footprint limit of the code cache.
void SetFootprintLimit(size_t new_footprint) REQUIRES(Locks::jit_lock_);
- uint8_t* AllocateCode(size_t code_size) REQUIRES(Locks::jit_lock_);
+ uint8_t* AllocateCode(size_t code_size, size_t alignment) REQUIRES(Locks::jit_lock_);
void FreeCode(uint8_t* code) REQUIRES(Locks::jit_lock_);
uint8_t* AllocateData(size_t data_size) REQUIRES(Locks::jit_lock_);
void FreeData(uint8_t* data) REQUIRES(Locks::jit_lock_);