summaryrefslogtreecommitdiff
path: root/openjdkjvmti/jvmti_weak_table.h
diff options
context:
space:
mode:
author Lokesh Gidra <lokeshgidra@google.com> 2022-09-12 13:02:05 -0700
committer Lokesh Gidra <lokeshgidra@google.com> 2022-09-14 22:17:07 +0000
commit28443a7075e64adee1cc19c3bb065cc5c9ecb1d0 (patch)
tree880b3243b7946a0a855ec3f01019326e55166b2a /openjdkjvmti/jvmti_weak_table.h
parent03ac3eb0fc36be97f301ac60e85e1bb7ca52fa12 (diff)
Use side vector while sweeping jvmti-weak-table
In the code we were erasing all <K,V> pairs for which object reference (Key) is changed, and then emplacing the ones which were moved. This is not compatible with in-place compaction algorithms as then there is a possibility that a to-space object reference being added to the map is already there as a from-space reference. In this CL, we change it to use a side vector to hold all updated <K,V> until the end of map traversal, and then insert all of them back in the map. Bug: 160737021 Test: ART_USE_READ_BARRIER=false art/test/testrunner/testrunner.py --host --debug -t 905-object-free Change-Id: Ib416ca0b21d4a9fc44721495b798f82be9387900
Diffstat (limited to 'openjdkjvmti/jvmti_weak_table.h')
-rw-r--r--openjdkjvmti/jvmti_weak_table.h14
1 files changed, 7 insertions, 7 deletions
diff --git a/openjdkjvmti/jvmti_weak_table.h b/openjdkjvmti/jvmti_weak_table.h
index afa2d1da0a..674b2a3d52 100644
--- a/openjdkjvmti/jvmti_weak_table.h
+++ b/openjdkjvmti/jvmti_weak_table.h
@@ -211,13 +211,13 @@ class JvmtiWeakTable : public art::gc::SystemWeakHolder {
};
using TagAllocator = JvmtiAllocator<std::pair<const art::GcRoot<art::mirror::Object>, T>>;
- std::unordered_map<art::GcRoot<art::mirror::Object>,
- T,
- HashGcRoot,
- EqGcRoot,
- TagAllocator> tagged_objects_
- GUARDED_BY(allow_disallow_lock_)
- GUARDED_BY(art::Locks::mutator_lock_);
+ using TagMap = std::unordered_map<art::GcRoot<art::mirror::Object>,
+ T,
+ HashGcRoot,
+ EqGcRoot,
+ TagAllocator>;
+
+ TagMap tagged_objects_ GUARDED_BY(allow_disallow_lock_) GUARDED_BY(art::Locks::mutator_lock_);
// To avoid repeatedly scanning the whole table, remember if we did that since the last sweep.
bool update_since_last_sweep_;
};