Prevent IndirectReferenceTable from being outside of kPageAlignment
Due to how we resized the IndirectReferenceTable it could end up being
an unaligned size. On some architectures, this could sometimes cause
issues we would try to madvise a negative length region, causing
crashes. To fix this we changed the IRT to avoid ever having non-page
aligned lengths and changed how we calculate what to madvise slightly.
Test: ./test.py --host
Test: ./test.py --host --debuggable --64 --with-agent $ANDROID_HOST_OUT/lib64/libtifastd.so=ClassFileLoadHook
Test: Manual checks on emulator
Bug: 152421535
Change-Id: I9eb99c750e6b6230998bf8ba314be426ad8c228c
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 361dccb..0deb917 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -14,11 +14,14 @@
* limitations under the License.
*/
+#include "base/bit_utils.h"
+#include "base/globals.h"
#include "indirect_reference_table-inl.h"
#include "base/mutator_locked_dumpable.h"
#include "base/systrace.h"
#include "base/utils.h"
+#include "indirect_reference_table.h"
#include "jni/java_vm_ext.h"
#include "jni/jni_internal.h"
#include "mirror/object-inl.h"
@@ -78,7 +81,7 @@
// Overflow and maximum check.
CHECK_LE(max_count, kMaxTableSizeInBytes / sizeof(IrtEntry));
- const size_t table_bytes = max_count * sizeof(IrtEntry);
+ const size_t table_bytes = RoundUp(max_count * sizeof(IrtEntry), kPageSize);
table_mem_map_ = MemMap::MapAnonymous("indirect ref table",
table_bytes,
PROT_READ | PROT_WRITE,
@@ -95,6 +98,8 @@
}
segment_state_ = kIRTFirstSegment;
last_known_previous_state_ = kIRTFirstSegment;
+ // Take into account the actual length.
+ max_entries_ = table_bytes / sizeof(IrtEntry);
}
IndirectReferenceTable::~IndirectReferenceTable() {
@@ -220,7 +225,7 @@
}
// Note: the above check also ensures that there is no overflow below.
- const size_t table_bytes = new_size * sizeof(IrtEntry);
+ const size_t table_bytes = RoundUp(new_size * sizeof(IrtEntry), kPageSize);
MemMap new_map = MemMap::MapAnonymous("indirect ref table",
table_bytes,
PROT_READ | PROT_WRITE,
@@ -233,7 +238,9 @@
memcpy(new_map.Begin(), table_mem_map_.Begin(), table_mem_map_.Size());
table_mem_map_ = std::move(new_map);
table_ = reinterpret_cast<IrtEntry*>(table_mem_map_.Begin());
- max_entries_ = new_size;
+ const size_t real_new_size = table_bytes / sizeof(IrtEntry);
+ DCHECK_GE(real_new_size, new_size);
+ max_entries_ = real_new_size;
return true;
}
@@ -445,8 +452,11 @@
void IndirectReferenceTable::Trim() {
ScopedTrace trace(__PRETTY_FUNCTION__);
const size_t top_index = Capacity();
- auto* release_start = AlignUp(reinterpret_cast<uint8_t*>(&table_[top_index]), kPageSize);
- uint8_t* release_end = table_mem_map_.End();
+ uint8_t* release_start = AlignUp(reinterpret_cast<uint8_t*>(&table_[top_index]), kPageSize);
+ uint8_t* release_end = static_cast<uint8_t*>(table_mem_map_.BaseEnd());
+ DCHECK_GE(reinterpret_cast<uintptr_t>(release_end), reinterpret_cast<uintptr_t>(release_start));
+ DCHECK_ALIGNED(release_end, kPageSize);
+ DCHECK_ALIGNED(release_end - release_start, kPageSize);
madvise(release_start, release_end - release_start, MADV_DONTNEED);
}
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index b46435d..86b92cf 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -381,7 +381,8 @@
return reinterpret_cast<IndirectRef>(EncodeIndirectRef(table_index, serial));
}
- // Resize the backing table. Currently must be larger than the current size.
+ // Resize the backing table to be at least new_size elements long. Currently
+ // must be larger than the current size. After return max_entries_ >= new_size.
bool Resize(size_t new_size, std::string* error_msg);
void RecoverHoles(IRTSegmentState from);