summaryrefslogtreecommitdiff
path: root/runtime/jit/jit_code_cache.cc
diff options
context:
space:
mode:
author Orion Hodson <oth@google.com> 2019-06-18 11:18:08 +0100
committer TreeHugger Robot <treehugger-gerrit@google.com> 2019-06-18 21:47:58 +0000
commit8eefb246019019f8081d11ba932bbabc04ca428b (patch)
tree091a6e416471c1e0a89486f22b2c9ec3a28eb487 /runtime/jit/jit_code_cache.cc
parentfb5eba3545b0c083ceead9b139c0759a9252bf84 (diff)
For 32-bit ARM devices align method headers and code to 64-byte
boundaries. For other architectures, move back to instruction set alignment. Rename kJitCodeCacheAlignment to kJitCodeAccountingBytes since it's now only used for accounting rather than alignment. Bug: 132205399 Test: manual (described in bug) Merged-In: I88f5f39381bf0331ce8540a929c6a68b3b5e0c75 Change-Id: I88f5f39381bf0331ce8540a929c6a68b3b5e0c75
Diffstat (limited to 'runtime/jit/jit_code_cache.cc')
-rw-r--r--runtime/jit/jit_code_cache.cc19
1 files changed, 15 insertions, 4 deletions
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 20c35c3109..fe2d309333 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -645,8 +645,19 @@ bool JitCodeCache::WaitForPotentialCollectionToComplete(Thread* self) {
return in_collection;
}
+static size_t GetJitCodeAlignment() {
+ if (kRuntimeISA == InstructionSet::kArm || kRuntimeISA == InstructionSet::kThumb2) {
+ // Some devices with 32-bit ARM kernels need additional JIT code alignment when using dual
+ // view JIT (b/132205399). The alignment returned here coincides with the typical ARM d-cache
+ // line (though the value should be probed ideally). Both the method header and code in the
+ // cache are aligned to this size. Anything less than 64-bytes exhibits the problem.
+ return 64;
+ }
+ return GetInstructionSetAlignment(kRuntimeISA);
+}
+
static uintptr_t FromCodeToAllocation(const void* code) {
- size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
+ size_t alignment = GetJitCodeAlignment();
return reinterpret_cast<uintptr_t>(code) - RoundUp(sizeof(OatQuickMethodHeader), alignment);
}
@@ -990,7 +1001,7 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
{
ScopedCodeCacheWrite scc(this);
- size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
+ size_t alignment = GetJitCodeAlignment();
// Ensure the header ends up at expected instruction alignment.
size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
size_t total_size = header_size + code_size;
@@ -2160,9 +2171,9 @@ void JitCodeCache::InvalidateCompiledCodeFor(ArtMethod* method,
uint8_t* JitCodeCache::AllocateCode(size_t allocation_size) {
// Each allocation should be on its own set of cache lines. The allocation must be large enough
// for header, code, and any padding.
+ size_t alignment = GetJitCodeAlignment();
uint8_t* result = reinterpret_cast<uint8_t*>(
- mspace_memalign(exec_mspace_, kJitCodeAlignment, allocation_size));
- size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
+ mspace_memalign(exec_mspace_, alignment, allocation_size));
size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
// Ensure the header ends up at expected instruction alignment.
DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(result + header_size), alignment);