1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
|
/*
* Copyright (C) 2015 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ART_RUNTIME_LAMBDA_BOX_TABLE_H_
#define ART_RUNTIME_LAMBDA_BOX_TABLE_H_
#include "base/allocator.h"
#include "base/hash_map.h"
#include "gc_root.h"
#include "base/macros.h"
#include "base/mutex.h"
#include "object_callbacks.h"
#include <stdint.h>
namespace art {
class ArtMethod; // forward declaration
namespace mirror {
class Object; // forward declaration
} // namespace mirror
namespace lambda {
/*
* Store a table of boxed lambdas. This is required to maintain object referential equality
* when a lambda is re-boxed.
*
* Conceptually, we store a mapping of Closures -> Weak Reference<Boxed Lambda Object>.
* When too many objects get GCd, we shrink the underlying table to use less space.
*/
class BoxTable FINAL {
public:
using ClosureType = art::ArtMethod*;
// Boxes a closure into an object. Returns null and throws an exception on failure.
mirror::Object* BoxLambda(const ClosureType& closure)
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::lambda_table_lock_);
// Unboxes an object back into the lambda. Returns false and throws an exception on failure.
bool UnboxLambda(mirror::Object* object, ClosureType* out_closure)
SHARED_REQUIRES(Locks::mutator_lock_);
// Sweep weak references to lambda boxes. Update the addresses if the objects have been
// moved, and delete them from the table if the objects have been cleaned up.
void SweepWeakBoxedLambdas(IsMarkedVisitor* visitor)
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::lambda_table_lock_);
// GC callback: Temporarily block anyone from touching the map.
void DisallowNewWeakBoxedLambdas()
REQUIRES(!Locks::lambda_table_lock_);
// GC callback: Unblock any readers who have been queued waiting to touch the map.
void AllowNewWeakBoxedLambdas()
REQUIRES(!Locks::lambda_table_lock_);
// GC callback: Unblock any readers who have been queued waiting to touch the map.
void BroadcastForNewWeakBoxedLambdas()
REQUIRES(!Locks::lambda_table_lock_);
BoxTable();
~BoxTable() = default;
private:
// FIXME: This needs to be a GcRoot.
// Explanation:
// - After all threads are suspended (exclusive mutator lock),
// the concurrent-copying GC can move objects from the "from" space to the "to" space.
// If an object is moved at that time and *before* SweepSystemWeaks are called then
// we don't know if the move has happened yet.
// Successive reads will then (incorrectly) look at the objects in the "from" space,
// which is a problem since the objects have been already forwarded and mutations
// would not be visible in the right space.
// Instead, use a GcRoot here which will be automatically updated by the GC.
//
// Also, any reads should be protected by a read barrier to always give us the "to" space address.
using ValueType = GcRoot<mirror::Object>;
// Attempt to look up the lambda in the map, or return null if it's not there yet.
ValueType FindBoxedLambda(const ClosureType& closure) const
SHARED_REQUIRES(Locks::lambda_table_lock_);
// If the GC has come in and temporarily disallowed touching weaks, block until is it allowed.
void BlockUntilWeaksAllowed()
SHARED_REQUIRES(Locks::lambda_table_lock_);
// EmptyFn implementation for art::HashMap
struct EmptyFn {
void MakeEmpty(std::pair<ClosureType, ValueType>& item) const {
item.first = nullptr;
}
bool IsEmpty(const std::pair<ClosureType, ValueType>& item) const {
return item.first == nullptr;
}
};
// HashFn implementation for art::HashMap
struct HashFn {
size_t operator()(const ClosureType& key) const {
// TODO(iam): Rewrite hash function when ClosureType is no longer an ArtMethod*
return static_cast<size_t>(reinterpret_cast<uintptr_t>(key));
}
};
// EqualsFn implementation for art::HashMap
struct EqualsFn {
bool operator()(const ClosureType& lhs, const ClosureType& rhs) const;
};
using UnorderedMap = art::HashMap<ClosureType,
ValueType,
EmptyFn,
HashFn,
EqualsFn,
TrackingAllocator<std::pair<ClosureType, ValueType>,
kAllocatorTagLambdaBoxTable>>;
UnorderedMap map_ GUARDED_BY(Locks::lambda_table_lock_);
bool allow_new_weaks_ GUARDED_BY(Locks::lambda_table_lock_);
ConditionVariable new_weaks_condition_ GUARDED_BY(Locks::lambda_table_lock_);
// Shrink the map when we get below this load factor.
// (This is an arbitrary value that should be large enough to prevent aggressive map erases
// from shrinking the table too often.)
static constexpr double kMinimumLoadFactor = UnorderedMap::kDefaultMinLoadFactor / 2;
DISALLOW_COPY_AND_ASSIGN(BoxTable);
};
} // namespace lambda
} // namespace art
#endif // ART_RUNTIME_LAMBDA_BOX_TABLE_H_
|