Remove unnecessary indirection from MemMap.
Avoid plain MemMap pointers being passed around by changing
the MemMap to moveable and return MemMap objects by value.
Previously we could have a valid zero-size MemMap but this
is now forbidden.
MemMap::RemapAtEnd() is changed to avoid the explicit call
to munmap(); mmap() with MAP_FIXED automatically removes
old mappings for overlapping regions.
Test: m test-art-host-gtest
Test: testrunner.py --host --optimizing
Test: Pixel 2 XL boots.
Test: m test-art-target-gtest
Test: testrunner.py --target --optimizing
Change-Id: I12bd453c26a396edc20eb141bfd4dad20923f170
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 632b45b..a4a0f8f 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -28,6 +28,7 @@
#include "base/atomic.h"
#include "base/histogram.h"
#include "base/macros.h"
+#include "base/mem_map.h"
#include "base/mutex.h"
#include "base/safe_map.h"
@@ -39,7 +40,6 @@
class InlineCache;
class IsMarkedVisitor;
class JitJniStubTestHelper;
-class MemMap;
class OatQuickMethodHeader;
struct ProfileMethodInfo;
class ProfilingInfo;
@@ -279,8 +279,8 @@
private:
// Take ownership of maps.
- JitCodeCache(MemMap* code_map,
- MemMap* data_map,
+ JitCodeCache(MemMap&& code_map,
+ MemMap&& data_map,
size_t initial_code_capacity,
size_t initial_data_capacity,
size_t max_capacity,
@@ -396,9 +396,9 @@
// Whether there is a code cache collection in progress.
bool collection_in_progress_ GUARDED_BY(lock_);
// Mem map which holds code.
- std::unique_ptr<MemMap> code_map_;
+ MemMap code_map_;
// Mem map which holds data (stack maps and profiling info).
- std::unique_ptr<MemMap> data_map_;
+ MemMap data_map_;
// The opaque mspace for allocating code.
void* code_mspace_ GUARDED_BY(lock_);
// The opaque mspace for allocating data.