Revert "Revert "lambda: Experimental support for capture-variable and liberate-variable""
This reverts commit 7bbb80ab52c203e44d2ded2c947b3b03b4b31ec4.
Change-Id: If806ce5c6c5e96fdb2c3761dee096f74e7e5b001
diff --git a/runtime/lambda/art_lambda_method.h b/runtime/lambda/art_lambda_method.h
index 892d8c6..ea13eb7 100644
--- a/runtime/lambda/art_lambda_method.h
+++ b/runtime/lambda/art_lambda_method.h
@@ -35,7 +35,7 @@
// (Ownership of strings is retained by the caller and the lifetime should exceed this class).
ArtLambdaMethod(ArtMethod* target_method,
const char* captured_variables_type_descriptor,
- const char* captured_variables_shorty_,
+ const char* captured_variables_shorty,
bool innate_lambda = true);
// Get the target method for this lambda that would be used by the invoke-lambda dex instruction.
diff --git a/runtime/lambda/box_table.cc b/runtime/lambda/box_table.cc
index 26575fd..8eef10b 100644
--- a/runtime/lambda/box_table.cc
+++ b/runtime/lambda/box_table.cc
@@ -18,6 +18,8 @@
#include "base/mutex.h"
#include "common_throws.h"
#include "gc_root-inl.h"
+#include "lambda/closure.h"
+#include "lambda/leaking_allocator.h"
#include "mirror/method.h"
#include "mirror/object-inl.h"
#include "thread.h"
@@ -26,11 +28,53 @@
namespace art {
namespace lambda {
+// Temporarily represent the lambda Closure as its raw bytes in an array.
+// TODO: Generate a proxy class for the closure when boxing the first time.
+using BoxedClosurePointerType = mirror::ByteArray*;
+
+static mirror::Class* GetBoxedClosureClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ return mirror::ByteArray::GetArrayClass();
+}
+
+namespace {
+ // Convenience functions to allocating/deleting box table copies of the closures.
+ struct ClosureAllocator {
+ // Deletes a Closure that was allocated through ::Allocate.
+ static void Delete(Closure* ptr) {
+ delete[] reinterpret_cast<char*>(ptr);
+ }
+
+ // Returns a well-aligned pointer to a newly allocated Closure on the 'new' heap.
+ static Closure* Allocate(size_t size) {
+ DCHECK_GE(size, sizeof(Closure));
+
+ // TODO: Maybe point to the interior of the boxed closure object after we add proxy support?
+ Closure* closure = reinterpret_cast<Closure*>(new char[size]);
+ DCHECK_ALIGNED(closure, alignof(Closure));
+ return closure;
+ }
+ };
+} // namespace
BoxTable::BoxTable()
: allow_new_weaks_(true),
new_weaks_condition_("lambda box table allowed weaks", *Locks::lambda_table_lock_) {}
+BoxTable::~BoxTable() {
+ // Free all the copies of our closures.
+ for (auto map_iterator = map_.begin(); map_iterator != map_.end(); ++map_iterator) {
+ std::pair<UnorderedMapKeyType, ValueType>& key_value_pair = *map_iterator;
+
+ Closure* closure = key_value_pair.first;
+
+ // Remove from the map first, so that it doesn't try to access dangling pointer.
+ map_iterator = map_.Erase(map_iterator);
+
+ // Safe to delete, no dangling pointers.
+ ClosureAllocator::Delete(closure);
+ }
+}
+
mirror::Object* BoxTable::BoxLambda(const ClosureType& closure) {
Thread* self = Thread::Current();
@@ -58,22 +102,29 @@
// Release the lambda table lock here, so that thread suspension is allowed.
- // Convert the ArtMethod into a java.lang.reflect.Method which will serve
+ // Convert the Closure into a managed byte[] which will serve
// as the temporary 'boxed' version of the lambda. This is good enough
// to check all the basic object identities that a boxed lambda must retain.
+ // It's also good enough to contain all the captured primitive variables.
// TODO: Boxing an innate lambda (i.e. made with create-lambda) should make a proxy class
// TODO: Boxing a learned lambda (i.e. made with unbox-lambda) should return the original object
- mirror::Method* method_as_object =
- mirror::Method::CreateFromArtMethod(self, closure);
+ BoxedClosurePointerType closure_as_array_object =
+ mirror::ByteArray::Alloc(self, closure->GetSize());
+
// There are no thread suspension points after this, so we don't need to put it into a handle.
- if (UNLIKELY(method_as_object == nullptr)) {
+ if (UNLIKELY(closure_as_array_object == nullptr)) {
// Most likely an OOM has occurred.
CHECK(self->IsExceptionPending());
return nullptr;
}
+ // Write the raw closure data into the byte[].
+ closure->CopyTo(closure_as_array_object->GetRawData(sizeof(uint8_t), // component size
+ 0 /*index*/), // index
+ closure_as_array_object->GetLength());
+
// The method has been successfully boxed into an object, now insert it into the hash map.
{
MutexLock mu(self, *Locks::lambda_table_lock_);
@@ -87,38 +138,56 @@
return value.Read();
}
- // Otherwise we should insert it into the hash map in this thread.
- map_.Insert(std::make_pair(closure, ValueType(method_as_object)));
+ // Otherwise we need to insert it into the hash map in this thread.
+
+ // Make a copy for the box table to keep, in case the closure gets collected from the stack.
+ // TODO: GC may need to sweep for roots in the box table's copy of the closure.
+ Closure* closure_table_copy = ClosureAllocator::Allocate(closure->GetSize());
+ closure->CopyTo(closure_table_copy, closure->GetSize());
+
+ // The closure_table_copy needs to be deleted by us manually when we erase it from the map.
+
+ // Actually insert into the table.
+ map_.Insert({closure_table_copy, ValueType(closure_as_array_object)});
}
- return method_as_object;
+ return closure_as_array_object;
}
bool BoxTable::UnboxLambda(mirror::Object* object, ClosureType* out_closure) {
DCHECK(object != nullptr);
*out_closure = nullptr;
+ Thread* self = Thread::Current();
+
// Note that we do not need to access lambda_table_lock_ here
// since we don't need to look at the map.
mirror::Object* boxed_closure_object = object;
- // Raise ClassCastException if object is not instanceof java.lang.reflect.Method
- if (UNLIKELY(!boxed_closure_object->InstanceOf(mirror::Method::StaticClass()))) {
- ThrowClassCastException(mirror::Method::StaticClass(), boxed_closure_object->GetClass());
+ // Raise ClassCastException if object is not instanceof byte[]
+ if (UNLIKELY(!boxed_closure_object->InstanceOf(GetBoxedClosureClass()))) {
+ ThrowClassCastException(GetBoxedClosureClass(), boxed_closure_object->GetClass());
return false;
}
// TODO(iam): We must check that the closure object extends/implements the type
- // specified in [type id]. This is not currently implemented since it's always a Method.
+ // specified in [type id]. This is not currently implemented since it's always a byte[].
// If we got this far, the inputs are valid.
- // Write out the java.lang.reflect.Method's embedded ArtMethod* into the vreg target.
- mirror::AbstractMethod* boxed_closure_as_method =
- down_cast<mirror::AbstractMethod*>(boxed_closure_object);
+ // Shuffle the byte[] back into a raw closure, then allocate it, copy, and return it.
+ BoxedClosurePointerType boxed_closure_as_array =
+ down_cast<BoxedClosurePointerType>(boxed_closure_object);
- ArtMethod* unboxed_closure = boxed_closure_as_method->GetArtMethod();
- DCHECK(unboxed_closure != nullptr);
+ const int8_t* unaligned_interior_closure = boxed_closure_as_array->GetData();
+
+ // Allocate a copy that can "escape" and copy the closure data into that.
+ Closure* unboxed_closure =
+ LeakingAllocator::MakeFlexibleInstance<Closure>(self, boxed_closure_as_array->GetLength());
+ // TODO: don't just memcpy the closure, it's unsafe when we add references to the mix.
+ memcpy(unboxed_closure, unaligned_interior_closure, boxed_closure_as_array->GetLength());
+
+ DCHECK_EQ(unboxed_closure->GetSize(), static_cast<size_t>(boxed_closure_as_array->GetLength()));
*out_closure = unboxed_closure;
return true;
@@ -127,7 +196,7 @@
BoxTable::ValueType BoxTable::FindBoxedLambda(const ClosureType& closure) const {
auto map_iterator = map_.Find(closure);
if (map_iterator != map_.end()) {
- const std::pair<ClosureType, ValueType>& key_value_pair = *map_iterator;
+ const std::pair<UnorderedMapKeyType, ValueType>& key_value_pair = *map_iterator;
const ValueType& value = key_value_pair.second;
DCHECK(!value.IsNull()); // Never store null boxes.
@@ -157,7 +226,7 @@
*/
std::vector<ClosureType> remove_list;
for (auto map_iterator = map_.begin(); map_iterator != map_.end(); ) {
- std::pair<ClosureType, ValueType>& key_value_pair = *map_iterator;
+ std::pair<UnorderedMapKeyType, ValueType>& key_value_pair = *map_iterator;
const ValueType& old_value = key_value_pair.second;
@@ -166,10 +235,15 @@
mirror::Object* new_value = visitor->IsMarked(old_value_raw);
if (new_value == nullptr) {
- const ClosureType& closure = key_value_pair.first;
// The object has been swept away.
+ const ClosureType& closure = key_value_pair.first;
+
// Delete the entry from the map.
- map_iterator = map_.Erase(map_.Find(closure));
+ map_iterator = map_.Erase(map_iterator);
+
+ // Clean up the memory by deleting the closure.
+ ClosureAllocator::Delete(closure);
+
} else {
// The object has been moved.
// Update the map.
@@ -208,16 +282,33 @@
new_weaks_condition_.Broadcast(self);
}
-bool BoxTable::EqualsFn::operator()(const ClosureType& lhs, const ClosureType& rhs) const {
+void BoxTable::EmptyFn::MakeEmpty(std::pair<UnorderedMapKeyType, ValueType>& item) const {
+ item.first = nullptr;
+
+ Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
+ item.second = ValueType(); // Also clear the GC root.
+}
+
+bool BoxTable::EmptyFn::IsEmpty(const std::pair<UnorderedMapKeyType, ValueType>& item) const {
+ return item.first == nullptr;
+}
+
+bool BoxTable::EqualsFn::operator()(const UnorderedMapKeyType& lhs,
+ const UnorderedMapKeyType& rhs) const {
// Nothing needs this right now, but leave this assertion for later when
// we need to look at the references inside of the closure.
- if (kIsDebugBuild) {
- Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
- }
+ Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
- // TODO: Need rework to use read barriers once closures have references inside of them that can
- // move. Until then, it's safe to just compare the data inside of it directly.
- return lhs == rhs;
+ return lhs->ReferenceEquals(rhs);
+}
+
+size_t BoxTable::HashFn::operator()(const UnorderedMapKeyType& key) const {
+ const lambda::Closure* closure = key;
+ DCHECK_ALIGNED(closure, alignof(lambda::Closure));
+
+ // Need to hold mutator_lock_ before calling into Closure::GetHashCode.
+ Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
+ return closure->GetHashCode();
}
} // namespace lambda
diff --git a/runtime/lambda/box_table.h b/runtime/lambda/box_table.h
index 9ffda66..adb7332 100644
--- a/runtime/lambda/box_table.h
+++ b/runtime/lambda/box_table.h
@@ -34,6 +34,7 @@
} // namespace mirror
namespace lambda {
+struct Closure; // forward declaration
/*
* Store a table of boxed lambdas. This is required to maintain object referential equality
@@ -44,7 +45,7 @@
*/
class BoxTable FINAL {
public:
- using ClosureType = art::ArtMethod*;
+ using ClosureType = art::lambda::Closure*;
// Boxes a closure into an object. Returns null and throws an exception on failure.
mirror::Object* BoxLambda(const ClosureType& closure)
@@ -72,10 +73,9 @@
REQUIRES(!Locks::lambda_table_lock_);
BoxTable();
- ~BoxTable() = default;
+ ~BoxTable();
private:
- // FIXME: This needs to be a GcRoot.
// Explanation:
// - After all threads are suspended (exclusive mutator lock),
// the concurrent-copying GC can move objects from the "from" space to the "to" space.
@@ -97,30 +97,30 @@
void BlockUntilWeaksAllowed()
SHARED_REQUIRES(Locks::lambda_table_lock_);
+ // Wrap the Closure into a unique_ptr so that the HashMap can delete its memory automatically.
+ using UnorderedMapKeyType = ClosureType;
+
// EmptyFn implementation for art::HashMap
struct EmptyFn {
- void MakeEmpty(std::pair<ClosureType, ValueType>& item) const {
- item.first = nullptr;
- }
- bool IsEmpty(const std::pair<ClosureType, ValueType>& item) const {
- return item.first == nullptr;
- }
+ void MakeEmpty(std::pair<UnorderedMapKeyType, ValueType>& item) const
+ NO_THREAD_SAFETY_ANALYSIS; // SHARED_REQUIRES(Locks::mutator_lock_)
+
+ bool IsEmpty(const std::pair<UnorderedMapKeyType, ValueType>& item) const;
};
// HashFn implementation for art::HashMap
struct HashFn {
- size_t operator()(const ClosureType& key) const {
- // TODO(iam): Rewrite hash function when ClosureType is no longer an ArtMethod*
- return static_cast<size_t>(reinterpret_cast<uintptr_t>(key));
- }
+ size_t operator()(const UnorderedMapKeyType& key) const
+ NO_THREAD_SAFETY_ANALYSIS; // SHARED_REQUIRES(Locks::mutator_lock_)
};
// EqualsFn implementation for art::HashMap
struct EqualsFn {
- bool operator()(const ClosureType& lhs, const ClosureType& rhs) const;
+ bool operator()(const UnorderedMapKeyType& lhs, const UnorderedMapKeyType& rhs) const
+ NO_THREAD_SAFETY_ANALYSIS; // SHARED_REQUIRES(Locks::mutator_lock_)
};
- using UnorderedMap = art::HashMap<ClosureType,
+ using UnorderedMap = art::HashMap<UnorderedMapKeyType,
ValueType,
EmptyFn,
HashFn,
diff --git a/runtime/lambda/closure.cc b/runtime/lambda/closure.cc
index 95a17c6..179e4ee 100644
--- a/runtime/lambda/closure.cc
+++ b/runtime/lambda/closure.cc
@@ -124,6 +124,55 @@
memcpy(target, this, GetSize());
}
+ArtMethod* Closure::GetTargetMethod() const {
+ return const_cast<ArtMethod*>(lambda_info_->GetArtMethod());
+}
+
+uint32_t Closure::GetHashCode() const {
+ // Start with a non-zero constant, a prime number.
+ uint32_t result = 17;
+
+ // Include the hash with the ArtMethod.
+ {
+ uintptr_t method = reinterpret_cast<uintptr_t>(GetTargetMethod());
+ result = 31 * result + Low32Bits(method);
+ if (sizeof(method) == sizeof(uint64_t)) {
+ result = 31 * result + High32Bits(method);
+ }
+ }
+
+ // Include a hash for each captured variable.
+ for (size_t i = 0; i < GetCapturedVariablesSize(); ++i) {
+ // TODO: not safe for GC-able values since the address can move and the hash code would change.
+ uint8_t captured_variable_raw_value;
+ CopyUnsafeAtOffset<uint8_t>(i, /*out*/&captured_variable_raw_value); // NOLINT: [whitespace/comma] [3]
+
+ result = 31 * result + captured_variable_raw_value;
+ }
+
+ // TODO: Fix above loop to work for objects and lambdas.
+ static_assert(kClosureSupportsGarbageCollection == false,
+ "Need to update above loop to read the hash code from the "
+ "objects and lambdas recursively");
+
+ return result;
+}
+
+bool Closure::ReferenceEquals(const Closure* other) const {
+ DCHECK(other != nullptr);
+
+ // TODO: Need rework to use read barriers once closures have references inside of them that can
+ // move. Until then, it's safe to just compare the data inside of it directly.
+ static_assert(kClosureSupportsReferences == false,
+ "Unsafe to use memcmp in read barrier collector");
+
+ if (GetSize() != other->GetSize()) {
+ return false;
+ }
+
+ return memcmp(this, other, GetSize());
+}
+
size_t Closure::GetNumberOfCapturedVariables() const {
// TODO: refactor into art_lambda_method.h. Parsing should only be required here as a DCHECK.
VariableInfo variable_info =
diff --git a/runtime/lambda/closure.h b/runtime/lambda/closure.h
index 60d117e..31ff194 100644
--- a/runtime/lambda/closure.h
+++ b/runtime/lambda/closure.h
@@ -49,6 +49,19 @@
// The target_size must be at least as large as GetSize().
void CopyTo(void* target, size_t target_size) const;
+ // Get the target method, i.e. the method that will be dispatched into with invoke-lambda.
+ ArtMethod* GetTargetMethod() const;
+
+ // Calculates the hash code. Value is recomputed each time.
+ uint32_t GetHashCode() const SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Is this the same closure as other? e.g. same target method, same variables captured.
+ //
+ // Determines whether the two Closures are interchangeable instances.
+ // Does *not* call Object#equals recursively. If two Closures compare ReferenceEquals true that
+ // means that they are interchangeable values (usually for the purpose of boxing/unboxing).
+ bool ReferenceEquals(const Closure* other) const SHARED_REQUIRES(Locks::mutator_lock_);
+
// How many variables were captured?
size_t GetNumberOfCapturedVariables() const;
diff --git a/runtime/lambda/closure_builder-inl.h b/runtime/lambda/closure_builder-inl.h
index 41a803b..3cec21f 100644
--- a/runtime/lambda/closure_builder-inl.h
+++ b/runtime/lambda/closure_builder-inl.h
@@ -35,6 +35,8 @@
values_.push_back(value_storage);
size_ += sizeof(T);
+
+ shorty_types_ += kShortyType;
}
} // namespace lambda
diff --git a/runtime/lambda/closure_builder.cc b/runtime/lambda/closure_builder.cc
index 9c37db8..739e965 100644
--- a/runtime/lambda/closure_builder.cc
+++ b/runtime/lambda/closure_builder.cc
@@ -64,6 +64,8 @@
UNIMPLEMENTED(FATAL) << "can't yet safely capture objects with read barrier";
}
}
+
+ shorty_types_ += ShortyFieldType::kObject;
}
void ClosureBuilder::CaptureVariableLambda(Closure* closure) {
@@ -78,6 +80,8 @@
// A closure may be sized dynamically, so always query it for the true size.
size_ += closure->GetSize();
+
+ shorty_types_ += ShortyFieldType::kLambda;
}
size_t ClosureBuilder::GetSize() const {
@@ -85,9 +89,15 @@
}
size_t ClosureBuilder::GetCaptureCount() const {
+ DCHECK_EQ(values_.size(), shorty_types_.size());
return values_.size();
}
+const std::string& ClosureBuilder::GetCapturedVariableShortyTypes() const {
+ DCHECK_EQ(values_.size(), shorty_types_.size());
+ return shorty_types_;
+}
+
Closure* ClosureBuilder::CreateInPlace(void* memory, ArtLambdaMethod* target_method) const {
DCHECK(memory != nullptr);
DCHECK(target_method != nullptr);
@@ -138,11 +148,14 @@
size_t variables_size) const {
size_t total_size = header_size;
const char* shorty_types = target_method->GetCapturedVariablesShortyTypeDescriptor();
+ DCHECK_STREQ(shorty_types, shorty_types_.c_str());
size_t variables_offset = 0;
size_t remaining_size = variables_size;
const size_t shorty_count = target_method->GetNumberOfCapturedVariables();
+ DCHECK_EQ(shorty_count, GetCaptureCount());
+
for (size_t i = 0; i < shorty_count; ++i) {
ShortyFieldType shorty{shorty_types[i]}; // NOLINT [readability/braces] [4]
diff --git a/runtime/lambda/closure_builder.h b/runtime/lambda/closure_builder.h
index 542e12a..23eb484 100644
--- a/runtime/lambda/closure_builder.h
+++ b/runtime/lambda/closure_builder.h
@@ -40,13 +40,12 @@
//
// The mutator lock must be held for the duration of the lifetime of this object,
// since it needs to temporarily store heap references into an internal list.
-class ClosureBuilder : ValueObject {
+class ClosureBuilder {
public:
using ShortyTypeEnum = decltype(ShortyFieldType::kByte);
-
// Mark this primitive value to be captured as the specified type.
- template <typename T, ShortyTypeEnum kShortyType>
+ template <typename T, ShortyTypeEnum kShortyType = ShortyFieldTypeSelectEnum<T>::value>
void CaptureVariablePrimitive(T value);
// Mark this object reference to be captured.
@@ -63,6 +62,9 @@
// Returns how many variables have been captured so far.
size_t GetCaptureCount() const;
+ // Get the list of captured variables' shorty field types.
+ const std::string& GetCapturedVariableShortyTypes() const;
+
// Creates a closure in-place and writes out the data into 'memory'.
// Memory must be at least 'GetSize' bytes large.
// All previously marked data to be captured is now written out.
@@ -93,6 +95,7 @@
size_t size_ = kInitialSize;
bool is_dynamic_size_ = false;
std::vector<ShortyFieldTypeTraits::MaxType> values_;
+ std::string shorty_types_;
};
} // namespace lambda
diff --git a/runtime/lambda/leaking_allocator.cc b/runtime/lambda/leaking_allocator.cc
new file mode 100644
index 0000000..4910732
--- /dev/null
+++ b/runtime/lambda/leaking_allocator.cc
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "lambda/leaking_allocator.h"
+#include "linear_alloc.h"
+#include "runtime.h"
+
+namespace art {
+namespace lambda {
+
+void* LeakingAllocator::AllocateMemory(Thread* self, size_t byte_size) {
+ // TODO: use GetAllocatorForClassLoader to allocate lambda ArtMethod data.
+ return Runtime::Current()->GetLinearAlloc()->Alloc(self, byte_size);
+}
+
+} // namespace lambda
+} // namespace art
diff --git a/runtime/lambda/leaking_allocator.h b/runtime/lambda/leaking_allocator.h
new file mode 100644
index 0000000..c3222d0
--- /dev/null
+++ b/runtime/lambda/leaking_allocator.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef ART_RUNTIME_LAMBDA_LEAKING_ALLOCATOR_H_
+#define ART_RUNTIME_LAMBDA_LEAKING_ALLOCATOR_H_
+
+#include <utility> // std::forward
+
+namespace art {
+class Thread; // forward declaration
+
+namespace lambda {
+
+// Temporary class to centralize all the leaking allocations.
+// Allocations made through this class are never freed, but it is a placeholder
+// that means that the calling code needs to be rewritten to properly:
+//
+// (a) Have a lifetime scoped to some other entity.
+// (b) Not be allocated over and over again if it was already allocated once (immutable data).
+//
+// TODO: do all of the above a/b for each callsite, and delete this class.
+class LeakingAllocator {
+ public:
+ // Allocate byte_size bytes worth of memory. Never freed.
+ static void* AllocateMemory(Thread* self, size_t byte_size);
+
+ // Make a new instance of T, flexibly sized, in-place at newly allocated memory. Never freed.
+ template <typename T, typename... Args>
+ static T* MakeFlexibleInstance(Thread* self, size_t byte_size, Args&&... args) {
+ return new (AllocateMemory(self, byte_size)) T(std::forward<Args>(args)...);
+ }
+
+ // Make a new instance of T in-place at newly allocated memory. Never freed.
+ template <typename T, typename... Args>
+ static T* MakeInstance(Thread* self, Args&&... args) {
+ return new (AllocateMemory(self, sizeof(T))) T(std::forward<Args>(args)...);
+ }
+};
+
+} // namespace lambda
+} // namespace art
+
+#endif // ART_RUNTIME_LAMBDA_LEAKING_ALLOCATOR_H_