summaryrefslogtreecommitdiff
path: root/runtime/jni/local_reference_table.cc
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/jni/local_reference_table.cc')
-rw-r--r--runtime/jni/local_reference_table.cc564
1 files changed, 564 insertions, 0 deletions
diff --git a/runtime/jni/local_reference_table.cc b/runtime/jni/local_reference_table.cc
new file mode 100644
index 0000000000..6cbbde7a05
--- /dev/null
+++ b/runtime/jni/local_reference_table.cc
@@ -0,0 +1,564 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "local_reference_table-inl.h"
+
+#include "base/globals.h"
+#include "base/mutator_locked_dumpable.h"
+#include "base/systrace.h"
+#include "base/utils.h"
+#include "indirect_reference_table.h"
+#include "jni/java_vm_ext.h"
+#include "jni/jni_internal.h"
+#include "mirror/object-inl.h"
+#include "nth_caller_visitor.h"
+#include "reference_table.h"
+#include "runtime.h"
+#include "scoped_thread_state_change-inl.h"
+#include "thread.h"
+
+#include <cstdlib>
+
+namespace art {
+namespace jni {
+
+static constexpr bool kDumpStackOnNonLocalReference = false;
+static constexpr bool kDebugLRT = false;
+
+// Maximum table size we allow.
+static constexpr size_t kMaxTableSizeInBytes = 128 * MB;
+
+void LocalReferenceTable::AbortIfNoCheckJNI(const std::string& msg) {
+ // If -Xcheck:jni is on, it'll give a more detailed error before aborting.
+ JavaVMExt* vm = Runtime::Current()->GetJavaVM();
+ if (!vm->IsCheckJniEnabled()) {
+ // Otherwise, we want to abort rather than hand back a bad reference.
+ LOG(FATAL) << msg;
+ } else {
+ LOG(ERROR) << msg;
+ }
+}
+
+// Mmap an "indirect ref table region. Table_bytes is a multiple of a page size.
+static inline MemMap NewLRTMap(size_t table_bytes, std::string* error_msg) {
+ MemMap result = MemMap::MapAnonymous("local ref table",
+ table_bytes,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb=*/ false,
+ error_msg);
+ if (!result.IsValid() && error_msg->empty()) {
+ *error_msg = "Unable to map memory for indirect ref table";
+ }
+ return result;
+}
+
+SmallLrtAllocator::SmallLrtAllocator()
+ : small_lrt_freelist_(nullptr), lock_("Small LRT table lock", LockLevel::kGenericBottomLock) {
+}
+
+// Allocate a LRT table for kSmallLrtEntries.
+LrtEntry* SmallLrtAllocator::Allocate(std::string* error_msg) {
+ MutexLock lock(Thread::Current(), lock_);
+ if (small_lrt_freelist_ == nullptr) {
+ // Refill.
+ MemMap map = NewLRTMap(kPageSize, error_msg);
+ if (map.IsValid()) {
+ small_lrt_freelist_ = reinterpret_cast<LrtEntry*>(map.Begin());
+ for (uint8_t* p = map.Begin(); p + kInitialLrtBytes < map.End(); p += kInitialLrtBytes) {
+ *reinterpret_cast<LrtEntry**>(p) = reinterpret_cast<LrtEntry*>(p + kInitialLrtBytes);
+ }
+ shared_lrt_maps_.emplace_back(std::move(map));
+ }
+ }
+ if (small_lrt_freelist_ == nullptr) {
+ return nullptr;
+ }
+ LrtEntry* result = small_lrt_freelist_;
+ small_lrt_freelist_ = *reinterpret_cast<LrtEntry**>(small_lrt_freelist_);
+ // Clear pointer in first entry.
+ new(result) LrtEntry();
+ return result;
+}
+
+void SmallLrtAllocator::Deallocate(LrtEntry* unneeded) {
+ MutexLock lock(Thread::Current(), lock_);
+ *reinterpret_cast<LrtEntry**>(unneeded) = small_lrt_freelist_;
+ small_lrt_freelist_ = unneeded;
+}
+
+LocalReferenceTable::LocalReferenceTable()
+ : segment_state_(kLRTFirstSegment),
+ table_(nullptr),
+ max_entries_(0u),
+ current_num_holes_(0) {
+}
+
+bool LocalReferenceTable::Initialize(size_t max_count, std::string* error_msg) {
+ CHECK(error_msg != nullptr);
+
+ // Overflow and maximum check.
+ CHECK_LE(max_count, kMaxTableSizeInBytes / sizeof(LrtEntry));
+
+ if (max_count <= kSmallLrtEntries) {
+ table_ = Runtime::Current()->GetSmallLrtAllocator()->Allocate(error_msg);
+ if (table_ != nullptr) {
+ max_entries_ = kSmallLrtEntries;
+ // table_mem_map_ remains invalid.
+ }
+ }
+ if (table_ == nullptr) {
+ const size_t table_bytes = RoundUp(max_count * sizeof(LrtEntry), kPageSize);
+ table_mem_map_ = NewLRTMap(table_bytes, error_msg);
+ if (!table_mem_map_.IsValid()) {
+ DCHECK(!error_msg->empty());
+ return false;
+ }
+
+ table_ = reinterpret_cast<LrtEntry*>(table_mem_map_.Begin());
+ // Take into account the actual length.
+ max_entries_ = table_bytes / sizeof(LrtEntry);
+ }
+ segment_state_ = kLRTFirstSegment;
+ last_known_previous_state_ = kLRTFirstSegment;
+ return true;
+}
+
+LocalReferenceTable::~LocalReferenceTable() {
+ if (table_ != nullptr && !table_mem_map_.IsValid()) {
+ Runtime::Current()->GetSmallLrtAllocator()->Deallocate(table_);
+ }
+}
+
+void LocalReferenceTable::ConstexprChecks() {
+ // Use this for some assertions. They can't be put into the header as C++ wants the class
+ // to be complete.
+
+ // Check kind.
+ static_assert((EncodeIndirectRefKind(kLocal) & (~kKindMask)) == 0, "Kind encoding error");
+ static_assert((EncodeIndirectRefKind(kGlobal) & (~kKindMask)) == 0, "Kind encoding error");
+ static_assert((EncodeIndirectRefKind(kWeakGlobal) & (~kKindMask)) == 0, "Kind encoding error");
+ static_assert(DecodeIndirectRefKind(EncodeIndirectRefKind(kLocal)) == kLocal,
+ "Kind encoding error");
+ static_assert(DecodeIndirectRefKind(EncodeIndirectRefKind(kGlobal)) == kGlobal,
+ "Kind encoding error");
+ static_assert(DecodeIndirectRefKind(EncodeIndirectRefKind(kWeakGlobal)) == kWeakGlobal,
+ "Kind encoding error");
+
+ // Check serial.
+ static_assert(DecodeSerial(EncodeSerial(0u)) == 0u, "Serial encoding error");
+ static_assert(DecodeSerial(EncodeSerial(1u)) == 1u, "Serial encoding error");
+ static_assert(DecodeSerial(EncodeSerial(2u)) == 2u, "Serial encoding error");
+ static_assert(DecodeSerial(EncodeSerial(3u)) == 3u, "Serial encoding error");
+
+ // Table index.
+ static_assert(DecodeIndex(EncodeIndex(0u)) == 0u, "Index encoding error");
+ static_assert(DecodeIndex(EncodeIndex(1u)) == 1u, "Index encoding error");
+ static_assert(DecodeIndex(EncodeIndex(2u)) == 2u, "Index encoding error");
+ static_assert(DecodeIndex(EncodeIndex(3u)) == 3u, "Index encoding error");
+}
+
+bool LocalReferenceTable::IsValid() const {
+ return table_ != nullptr;
+}
+
+// Holes:
+//
+// To keep the LRT compact, we want to fill "holes" created by non-stack-discipline Add & Remove
+// operation sequences. For simplicity and lower memory overhead, we do not use a free list or
+// similar. Instead, we scan for holes, with the expectation that we will find holes fast as they
+// are usually near the end of the table (see the header, TODO: verify this assumption). To avoid
+// scans when there are no holes, the number of known holes should be tracked.
+//
+// A previous implementation stored the top index and the number of holes as the segment state.
+// This constraints the maximum number of references to 16-bit. We want to relax this, as it
+// is easy to require more references (e.g., to list all classes in large applications). Thus,
+// the implicitly stack-stored state, the LRTSegmentState, is only the top index.
+//
+// Thus, hole count is a local property of the current segment, and needs to be recovered when
+// (or after) a frame is pushed or popped. To keep JNI transitions simple (and inlineable), we
+// cannot do work when the segment changes. Thus, Add and Remove need to ensure the current
+// hole count is correct.
+//
+// To be able to detect segment changes, we require an additional local field that can describe
+// the known segment. This is last_known_previous_state_. The requirement will become clear with
+// the following (some non-trivial) cases that have to be supported:
+//
+// 1) Segment with holes (current_num_holes_ > 0), push new segment, add/remove reference
+// 2) Segment with holes (current_num_holes_ > 0), pop segment, add/remove reference
+// 3) Segment with holes (current_num_holes_ > 0), push new segment, pop segment, add/remove
+// reference
+// 4) Empty segment, push new segment, create a hole, pop a segment, add/remove a reference
+// 5) Base segment, push new segment, create a hole, pop a segment, push new segment, add/remove
+// reference
+//
+// Storing the last known *previous* state (bottom index) allows conservatively detecting all the
+// segment changes above. The condition is simply that the last known state is greater than or
+// equal to the current previous state, and smaller than the current state (top index). The
+// condition is conservative as it adds O(1) overhead to operations on an empty segment.
+
+static size_t CountNullEntries(const LrtEntry* table, size_t from, size_t to) {
+ size_t count = 0;
+ for (size_t index = from; index != to; ++index) {
+ if (table[index].GetReference()->IsNull()) {
+ count++;
+ }
+ }
+ return count;
+}
+
+void LocalReferenceTable::RecoverHoles(LRTSegmentState prev_state) {
+ if (last_known_previous_state_.top_index >= segment_state_.top_index ||
+ last_known_previous_state_.top_index < prev_state.top_index) {
+ const size_t top_index = segment_state_.top_index;
+ size_t count = CountNullEntries(table_, prev_state.top_index, top_index);
+
+ if (kDebugLRT) {
+ LOG(INFO) << "+++ Recovered holes: "
+ << " Current prev=" << prev_state.top_index
+ << " Current top_index=" << top_index
+ << " Old num_holes=" << current_num_holes_
+ << " New num_holes=" << count;
+ }
+
+ current_num_holes_ = count;
+ last_known_previous_state_ = prev_state;
+ } else if (kDebugLRT) {
+ LOG(INFO) << "No need to recover holes";
+ }
+}
+
+ALWAYS_INLINE
+static inline void CheckHoleCount(LrtEntry* table,
+ size_t exp_num_holes,
+ LRTSegmentState prev_state,
+ LRTSegmentState cur_state) {
+ if (kIsDebugBuild) {
+ size_t count = CountNullEntries(table, prev_state.top_index, cur_state.top_index);
+ CHECK_EQ(exp_num_holes, count) << "prevState=" << prev_state.top_index
+ << " topIndex=" << cur_state.top_index;
+ }
+}
+
+bool LocalReferenceTable::Resize(size_t new_size, std::string* error_msg) {
+ CHECK_GT(new_size, max_entries_);
+
+ constexpr size_t kMaxEntries = kMaxTableSizeInBytes / sizeof(LrtEntry);
+ if (new_size > kMaxEntries) {
+ *error_msg = android::base::StringPrintf("Requested size exceeds maximum: %zu", new_size);
+ return false;
+ }
+ // Note: the above check also ensures that there is no overflow below.
+
+ const size_t table_bytes = RoundUp(new_size * sizeof(LrtEntry), kPageSize);
+
+ MemMap new_map = NewLRTMap(table_bytes, error_msg);
+ if (!new_map.IsValid()) {
+ return false;
+ }
+
+ memcpy(new_map.Begin(), table_, max_entries_ * sizeof(LrtEntry));
+ if (!table_mem_map_.IsValid()) {
+ // Didn't have its own map; deallocate old table.
+ Runtime::Current()->GetSmallLrtAllocator()->Deallocate(table_);
+ }
+ table_mem_map_ = std::move(new_map);
+ table_ = reinterpret_cast<LrtEntry*>(table_mem_map_.Begin());
+ const size_t real_new_size = table_bytes / sizeof(LrtEntry);
+ DCHECK_GE(real_new_size, new_size);
+ max_entries_ = real_new_size;
+
+ return true;
+}
+
+IndirectRef LocalReferenceTable::Add(LRTSegmentState previous_state,
+ ObjPtr<mirror::Object> obj,
+ std::string* error_msg) {
+ if (kDebugLRT) {
+ LOG(INFO) << "+++ Add: previous_state=" << previous_state.top_index
+ << " top_index=" << segment_state_.top_index
+ << " last_known_prev_top_index=" << last_known_previous_state_.top_index
+ << " holes=" << current_num_holes_;
+ }
+
+ size_t top_index = segment_state_.top_index;
+
+ CHECK(obj != nullptr);
+ VerifyObject(obj);
+ DCHECK(table_ != nullptr);
+
+ if (top_index == max_entries_) {
+ // Try to double space.
+ if (std::numeric_limits<size_t>::max() / 2 < max_entries_) {
+ std::ostringstream oss;
+ oss << "JNI ERROR (app bug): " << kLocal << " table overflow "
+ << "(max=" << max_entries_ << ")" << std::endl
+ << MutatorLockedDumpable<LocalReferenceTable>(*this)
+ << " Resizing failed: exceeds size_t";
+ *error_msg = oss.str();
+ return nullptr;
+ }
+
+ std::string inner_error_msg;
+ if (!Resize(max_entries_ * 2, &inner_error_msg)) {
+ std::ostringstream oss;
+ oss << "JNI ERROR (app bug): " << kLocal << " table overflow "
+ << "(max=" << max_entries_ << ")" << std::endl
+ << MutatorLockedDumpable<LocalReferenceTable>(*this)
+ << " Resizing failed: " << inner_error_msg;
+ *error_msg = oss.str();
+ return nullptr;
+ }
+ }
+
+ RecoverHoles(previous_state);
+ CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
+
+ // We know there's enough room in the table. Now we just need to find
+ // the right spot. If there's a hole, find it and fill it; otherwise,
+ // add to the end of the list.
+ IndirectRef result;
+ size_t index;
+ if (current_num_holes_ > 0) {
+ DCHECK_GT(top_index, 1U);
+ // Find the first hole; likely to be near the end of the list.
+ LrtEntry* p_scan = &table_[top_index - 1];
+ DCHECK(!p_scan->GetReference()->IsNull());
+ --p_scan;
+ while (!p_scan->GetReference()->IsNull()) {
+ DCHECK_GE(p_scan, table_ + previous_state.top_index);
+ --p_scan;
+ }
+ index = p_scan - table_;
+ current_num_holes_--;
+ } else {
+ // Add to the end.
+ index = top_index++;
+ segment_state_.top_index = top_index;
+ }
+ table_[index].Add(obj);
+ result = ToIndirectRef(index);
+ if (kDebugLRT) {
+ LOG(INFO) << "+++ added at " << ExtractIndex(result) << " top=" << segment_state_.top_index
+ << " holes=" << current_num_holes_;
+ }
+
+ DCHECK(result != nullptr);
+ return result;
+}
+
+void LocalReferenceTable::AssertEmpty() {
+ for (size_t i = 0; i < Capacity(); ++i) {
+ if (!table_[i].GetReference()->IsNull()) {
+ LOG(FATAL) << "Internal Error: non-empty local reference table\n"
+ << MutatorLockedDumpable<LocalReferenceTable>(*this);
+ UNREACHABLE();
+ }
+ }
+}
+
+// Removes an object. We extract the table offset bits from "iref"
+// and zap the corresponding entry, leaving a hole if it's not at the top.
+// If the entry is not between the current top index and the bottom index
+// specified by the cookie, we don't remove anything. This is the behavior
+// required by JNI's DeleteLocalRef function.
+// This method is not called when a local frame is popped; this is only used
+// for explicit single removals.
+// Returns "false" if nothing was removed.
+bool LocalReferenceTable::Remove(LRTSegmentState previous_state, IndirectRef iref) {
+ if (kDebugLRT) {
+ LOG(INFO) << "+++ Remove: previous_state=" << previous_state.top_index
+ << " top_index=" << segment_state_.top_index
+ << " last_known_prev_top_index=" << last_known_previous_state_.top_index
+ << " holes=" << current_num_holes_;
+ }
+
+ const uint32_t top_index = segment_state_.top_index;
+ const uint32_t bottom_index = previous_state.top_index;
+
+ DCHECK(table_ != nullptr);
+
+ // TODO: We should eagerly check the ref kind against the `kLocal` kind instead of
+ // relying on this weak check and postponing the rest until `CheckEntry()` below.
+ // Passing the wrong kind shall currently result in misleading warnings.
+ if (GetIndirectRefKind(iref) == kJniTransition) {
+ auto* self = Thread::Current();
+ ScopedObjectAccess soa(self);
+ if (self->IsJniTransitionReference(reinterpret_cast<jobject>(iref))) {
+ auto* env = self->GetJniEnv();
+ DCHECK(env != nullptr);
+ if (env->IsCheckJniEnabled()) {
+ LOG(WARNING) << "Attempt to remove non-JNI local reference, dumping thread";
+ if (kDumpStackOnNonLocalReference) {
+ self->Dump(LOG_STREAM(WARNING));
+ }
+ }
+ return true;
+ }
+ }
+
+ const uint32_t idx = ExtractIndex(iref);
+ if (idx < bottom_index) {
+ // Wrong segment.
+ LOG(WARNING) << "Attempt to remove index outside index area (" << idx
+ << " vs " << bottom_index << "-" << top_index << ")";
+ return false;
+ }
+ if (idx >= top_index) {
+ // Bad --- stale reference?
+ LOG(WARNING) << "Attempt to remove invalid index " << idx
+ << " (bottom=" << bottom_index << " top=" << top_index << ")";
+ return false;
+ }
+
+ RecoverHoles(previous_state);
+ CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
+
+ if (idx == top_index - 1) {
+ // Top-most entry. Scan up and consume holes.
+
+ if (!CheckEntry("remove", iref, idx)) {
+ return false;
+ }
+
+ *table_[idx].GetReference() = GcRoot<mirror::Object>(nullptr);
+ if (current_num_holes_ != 0) {
+ uint32_t collapse_top_index = top_index;
+ while (--collapse_top_index > bottom_index && current_num_holes_ != 0) {
+ if (kDebugLRT) {
+ ScopedObjectAccess soa(Thread::Current());
+ LOG(INFO) << "+++ checking for hole at " << collapse_top_index - 1
+ << " (previous_state=" << bottom_index << ") val="
+ << table_[collapse_top_index - 1].GetReference()->Read<kWithoutReadBarrier>();
+ }
+ if (!table_[collapse_top_index - 1].GetReference()->IsNull()) {
+ break;
+ }
+ if (kDebugLRT) {
+ LOG(INFO) << "+++ ate hole at " << (collapse_top_index - 1);
+ }
+ current_num_holes_--;
+ }
+ segment_state_.top_index = collapse_top_index;
+
+ CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
+ } else {
+ segment_state_.top_index = top_index - 1;
+ if (kDebugLRT) {
+ LOG(INFO) << "+++ ate last entry " << top_index - 1;
+ }
+ }
+ } else {
+ // Not the top-most entry. This creates a hole. We null out the entry to prevent somebody
+ // from deleting it twice and screwing up the hole count.
+ if (table_[idx].GetReference()->IsNull()) {
+ LOG(INFO) << "--- WEIRD: removing null entry " << idx;
+ return false;
+ }
+ if (!CheckEntry("remove", iref, idx)) {
+ return false;
+ }
+
+ *table_[idx].GetReference() = GcRoot<mirror::Object>(nullptr);
+ current_num_holes_++;
+ CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
+ if (kDebugLRT) {
+ LOG(INFO) << "+++ left hole at " << idx << ", holes=" << current_num_holes_;
+ }
+ }
+
+ return true;
+}
+
+void LocalReferenceTable::Trim() {
+ ScopedTrace trace(__PRETTY_FUNCTION__);
+ if (!table_mem_map_.IsValid()) {
+ // Small table; nothing to do here.
+ return;
+ }
+ const size_t top_index = Capacity();
+ uint8_t* release_start = AlignUp(reinterpret_cast<uint8_t*>(&table_[top_index]), kPageSize);
+ uint8_t* release_end = static_cast<uint8_t*>(table_mem_map_.BaseEnd());
+ DCHECK_GE(reinterpret_cast<uintptr_t>(release_end), reinterpret_cast<uintptr_t>(release_start));
+ DCHECK_ALIGNED(release_end, kPageSize);
+ DCHECK_ALIGNED(release_end - release_start, kPageSize);
+ if (release_start != release_end) {
+ madvise(release_start, release_end - release_start, MADV_DONTNEED);
+ }
+}
+
+void LocalReferenceTable::VisitRoots(RootVisitor* visitor, const RootInfo& root_info) {
+ BufferedRootVisitor<kDefaultBufferedRootCount> root_visitor(visitor, root_info);
+ for (size_t i = 0, capacity = Capacity(); i != capacity; ++i) {
+ GcRoot<mirror::Object>* ref = table_[i].GetReference();
+ if (!ref->IsNull()) {
+ root_visitor.VisitRoot(*ref);
+ DCHECK(!ref->IsNull());
+ }
+ }
+}
+
+void LocalReferenceTable::Dump(std::ostream& os) const {
+ os << kLocal << " table dump:\n";
+ ReferenceTable::Table entries;
+ for (size_t i = 0; i < Capacity(); ++i) {
+ ObjPtr<mirror::Object> obj = table_[i].GetReference()->Read<kWithoutReadBarrier>();
+ if (obj != nullptr) {
+ obj = table_[i].GetReference()->Read();
+ entries.push_back(GcRoot<mirror::Object>(obj));
+ }
+ }
+ ReferenceTable::Dump(os, entries);
+}
+
+void LocalReferenceTable::SetSegmentState(LRTSegmentState new_state) {
+ if (kDebugLRT) {
+ LOG(INFO) << "Setting segment state: "
+ << segment_state_.top_index
+ << " -> "
+ << new_state.top_index;
+ }
+ segment_state_ = new_state;
+}
+
+bool LocalReferenceTable::EnsureFreeCapacity(size_t free_capacity, std::string* error_msg) {
+ DCHECK_GE(free_capacity, static_cast<size_t>(1));
+ if (free_capacity > kMaxTableSizeInBytes) {
+ // Arithmetic might even overflow.
+ *error_msg = "Requested table size implausibly large";
+ return false;
+ }
+ size_t top_index = segment_state_.top_index;
+ if (top_index + free_capacity <= max_entries_) {
+ return true;
+ }
+
+ // Try to increase the table size.
+ if (!Resize(top_index + free_capacity, error_msg)) {
+ LOG(WARNING) << "JNI ERROR: Unable to reserve space in EnsureFreeCapacity (" << free_capacity
+ << "): " << std::endl
+ << MutatorLockedDumpable<LocalReferenceTable>(*this)
+ << " Resizing failed: " << *error_msg;
+ return false;
+ }
+ return true;
+}
+
+size_t LocalReferenceTable::FreeCapacity() const {
+ return max_entries_ - segment_state_.top_index;
+}
+
+} // namespace jni
+} // namespace art