summaryrefslogtreecommitdiff
path: root/runtime/lambda/box_table.cc
diff options
context:
space:
mode:
author Igor Murashkin <iam@google.com> 2015-11-21 00:55:35 +0000
committer Gerrit Code Review <noreply-gerritcodereview@google.com> 2015-11-21 00:55:35 +0000
commitb48cb936e6fa52d0ded29bac4f1f654cdf112ac6 (patch)
treeb192c4698c4889ae90db6abe2652199802213fe6 /runtime/lambda/box_table.cc
parent3944f7175dcf60316ba58a42698ccf23c65ac57c (diff)
parent457e874459ae638145cab6d572e34d48480e39d2 (diff)
Merge "lambda: Add support for invoke-interface for boxed innate lambdas"
Diffstat (limited to 'runtime/lambda/box_table.cc')
-rw-r--r--runtime/lambda/box_table.cc140
1 files changed, 96 insertions, 44 deletions
diff --git a/runtime/lambda/box_table.cc b/runtime/lambda/box_table.cc
index 9918bb71f3..0032d081c6 100644
--- a/runtime/lambda/box_table.cc
+++ b/runtime/lambda/box_table.cc
@@ -18,8 +18,10 @@
#include "base/mutex.h"
#include "common_throws.h"
#include "gc_root-inl.h"
+#include "lambda/box_class_table.h"
#include "lambda/closure.h"
#include "lambda/leaking_allocator.h"
+#include "mirror/lambda_proxy.h"
#include "mirror/method.h"
#include "mirror/object-inl.h"
#include "thread.h"
@@ -28,12 +30,13 @@
namespace art {
namespace lambda {
-// Temporarily represent the lambda Closure as its raw bytes in an array.
-// TODO: Generate a proxy class for the closure when boxing the first time.
-using BoxedClosurePointerType = mirror::ByteArray*;
+// All closures are boxed into a subtype of LambdaProxy which implements the lambda's interface.
+using BoxedClosurePointerType = mirror::LambdaProxy*;
-static mirror::Class* GetBoxedClosureClass() SHARED_REQUIRES(Locks::mutator_lock_) {
- return mirror::ByteArray::GetArrayClass();
+// Returns the base class for all boxed closures.
+// Note that concrete closure boxes are actually a subtype of mirror::LambdaProxy.
+static mirror::Class* GetBoxedClosureBaseClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ return Runtime::Current()->GetClassLinker()->GetClassRoot(ClassLinker::kJavaLangLambdaProxy);
}
namespace {
@@ -54,6 +57,14 @@ namespace {
return closure;
}
};
+
+ struct DeleterForClosure {
+ void operator()(Closure* closure) const {
+ ClosureAllocator::Delete(closure);
+ }
+ };
+
+ using UniqueClosurePtr = std::unique_ptr<Closure, DeleterForClosure>;
} // namespace
BoxTable::BoxTable()
@@ -75,7 +86,9 @@ BoxTable::~BoxTable() {
}
}
-mirror::Object* BoxTable::BoxLambda(const ClosureType& closure) {
+mirror::Object* BoxTable::BoxLambda(const ClosureType& closure,
+ const char* class_name,
+ mirror::ClassLoader* class_loader) {
Thread* self = Thread::Current();
{
@@ -91,7 +104,7 @@ mirror::Object* BoxTable::BoxLambda(const ClosureType& closure) {
// Functional f = () -> 5; // vF = create-lambda
// Object a = f; // vA = box-lambda vA
// Object b = f; // vB = box-lambda vB
- // assert(a == f)
+ // assert(a == b)
ValueType value = FindBoxedLambda(closure);
if (!value.IsNull()) {
return value.Read();
@@ -100,30 +113,62 @@ mirror::Object* BoxTable::BoxLambda(const ClosureType& closure) {
// Otherwise we need to box ourselves and insert it into the hash map
}
- // Release the lambda table lock here, so that thread suspension is allowed.
+ // Convert the Closure into a managed object instance, whose supertype of java.lang.LambdaProxy.
- // Convert the Closure into a managed byte[] which will serve
- // as the temporary 'boxed' version of the lambda. This is good enough
- // to check all the basic object identities that a boxed lambda must retain.
- // It's also good enough to contain all the captured primitive variables.
-
- // TODO: Boxing an innate lambda (i.e. made with create-lambda) should make a proxy class
// TODO: Boxing a learned lambda (i.e. made with unbox-lambda) should return the original object
- BoxedClosurePointerType closure_as_array_object =
- mirror::ByteArray::Alloc(self, closure->GetSize());
+ StackHandleScope<2> hs{self}; // NOLINT: [readability/braces] [4]
- // There are no thread suspension points after this, so we don't need to put it into a handle.
+ Handle<mirror::ClassLoader> class_loader_handle = hs.NewHandle(class_loader);
+
+ // Release the lambda table lock here, so that thread suspension is allowed.
+ self->AllowThreadSuspension();
+
+ lambda::BoxClassTable* lambda_box_class_table;
+
+ // Find the lambda box class table, which can be in the system class loader if classloader is null
+ if (class_loader == nullptr) {
+ ScopedObjectAccessUnchecked soa(self);
+ mirror::ClassLoader* system_class_loader =
+ soa.Decode<mirror::ClassLoader*>(Runtime::Current()->GetSystemClassLoader());
+ lambda_box_class_table = system_class_loader->GetLambdaProxyCache();
+ } else {
+ lambda_box_class_table = class_loader_handle->GetLambdaProxyCache();
+ // OK: can't be deleted while we hold a handle to the class loader.
+ }
+ DCHECK(lambda_box_class_table != nullptr);
- if (UNLIKELY(closure_as_array_object == nullptr)) {
+ Handle<mirror::Class> closure_class(hs.NewHandle(
+ lambda_box_class_table->GetOrCreateBoxClass(class_name, class_loader_handle)));
+ if (UNLIKELY(closure_class.Get() == nullptr)) {
// Most likely an OOM has occurred.
- CHECK(self->IsExceptionPending());
+ self->AssertPendingException();
return nullptr;
}
- // Write the raw closure data into the byte[].
- closure->CopyTo(closure_as_array_object->GetRawData(sizeof(uint8_t), // component size
- 0 /*index*/), // index
- closure_as_array_object->GetLength());
+ BoxedClosurePointerType closure_as_object = nullptr;
+ UniqueClosurePtr closure_table_copy;
+ // Create an instance of the class, and assign the pointer to the closure into it.
+ {
+ closure_as_object = down_cast<BoxedClosurePointerType>(closure_class->AllocObject(self));
+ if (UNLIKELY(closure_as_object == nullptr)) {
+ self->AssertPendingOOMException();
+ return nullptr;
+ }
+
+ // Make a copy of the closure that we will store in the hash map.
+ // The proxy instance will also point to this same hash map.
+ // Note that the closure pointer is cleaned up only after the proxy is GCd.
+ closure_table_copy.reset(ClosureAllocator::Allocate(closure->GetSize()));
+ closure_as_object->SetClosure(closure_table_copy.get());
+ }
+
+ // There are no thread suspension points after this, so we don't need to put it into a handle.
+ ScopedAssertNoThreadSuspension soants{self, // NOLINT: [whitespace/braces] [5]
+ "box lambda table - box lambda - no more suspensions"}; // NOLINT: [whitespace/braces] [5]
+
+ // Write the raw closure data into the proxy instance's copy of the closure.
+ closure->CopyTo(closure_table_copy.get(),
+ closure->GetSize());
// The method has been successfully boxed into an object, now insert it into the hash map.
{
@@ -134,24 +179,21 @@ mirror::Object* BoxTable::BoxLambda(const ClosureType& closure) {
// we were allocating the object before.
ValueType value = FindBoxedLambda(closure);
if (UNLIKELY(!value.IsNull())) {
- // Let the GC clean up method_as_object at a later time.
+ // Let the GC clean up closure_as_object at a later time.
+ // (We will not see this object when sweeping, it wasn't inserted yet.)
+ closure_as_object->SetClosure(nullptr);
return value.Read();
}
// Otherwise we need to insert it into the hash map in this thread.
- // Make a copy for the box table to keep, in case the closure gets collected from the stack.
- // TODO: GC may need to sweep for roots in the box table's copy of the closure.
- Closure* closure_table_copy = ClosureAllocator::Allocate(closure->GetSize());
- closure->CopyTo(closure_table_copy, closure->GetSize());
-
- // The closure_table_copy needs to be deleted by us manually when we erase it from the map.
+ // The closure_table_copy is deleted by us manually when we erase it from the map.
// Actually insert into the table.
- map_.Insert({closure_table_copy, ValueType(closure_as_array_object)});
+ map_.Insert({closure_table_copy.release(), ValueType(closure_as_object)});
}
- return closure_as_array_object;
+ return closure_as_object;
}
bool BoxTable::UnboxLambda(mirror::Object* object, ClosureType* out_closure) {
@@ -165,29 +207,35 @@ bool BoxTable::UnboxLambda(mirror::Object* object, ClosureType* out_closure) {
mirror::Object* boxed_closure_object = object;
- // Raise ClassCastException if object is not instanceof byte[]
- if (UNLIKELY(!boxed_closure_object->InstanceOf(GetBoxedClosureClass()))) {
- ThrowClassCastException(GetBoxedClosureClass(), boxed_closure_object->GetClass());
+ // Raise ClassCastException if object is not instanceof LambdaProxy
+ if (UNLIKELY(!boxed_closure_object->InstanceOf(GetBoxedClosureBaseClass()))) {
+ ThrowClassCastException(GetBoxedClosureBaseClass(), boxed_closure_object->GetClass());
return false;
}
// TODO(iam): We must check that the closure object extends/implements the type
- // specified in [type id]. This is not currently implemented since it's always a byte[].
+ // specified in [type id]. This is not currently implemented since the type id is unavailable.
// If we got this far, the inputs are valid.
- // Shuffle the byte[] back into a raw closure, then allocate it, copy, and return it.
- BoxedClosurePointerType boxed_closure_as_array =
+ // Shuffle the java.lang.LambdaProxy back into a raw closure, then allocate it, copy,
+ // and return it.
+ BoxedClosurePointerType boxed_closure =
down_cast<BoxedClosurePointerType>(boxed_closure_object);
- const int8_t* unaligned_interior_closure = boxed_closure_as_array->GetData();
+ DCHECK_ALIGNED(boxed_closure->GetClosure(), alignof(Closure));
+ const Closure* aligned_interior_closure = boxed_closure->GetClosure();
+ DCHECK(aligned_interior_closure != nullptr);
+
+ // TODO: we probably don't need to make a copy here later on, once there's GC support.
// Allocate a copy that can "escape" and copy the closure data into that.
Closure* unboxed_closure =
- LeakingAllocator::MakeFlexibleInstance<Closure>(self, boxed_closure_as_array->GetLength());
+ LeakingAllocator::MakeFlexibleInstance<Closure>(self, aligned_interior_closure->GetSize());
+ DCHECK_ALIGNED(unboxed_closure, alignof(Closure));
// TODO: don't just memcpy the closure, it's unsafe when we add references to the mix.
- memcpy(unboxed_closure, unaligned_interior_closure, boxed_closure_as_array->GetLength());
+ memcpy(unboxed_closure, aligned_interior_closure, aligned_interior_closure->GetSize());
- DCHECK_EQ(unboxed_closure->GetSize(), static_cast<size_t>(boxed_closure_as_array->GetLength()));
+ DCHECK_EQ(unboxed_closure->GetSize(), aligned_interior_closure->GetSize());
*out_closure = unboxed_closure;
return true;
@@ -236,9 +284,10 @@ void BoxTable::SweepWeakBoxedLambdas(IsMarkedVisitor* visitor) {
if (new_value == nullptr) {
// The object has been swept away.
- const ClosureType& closure = key_value_pair.first;
+ Closure* closure = key_value_pair.first;
// Delete the entry from the map.
+ // (Remove from map first to avoid accessing dangling pointer).
map_iterator = map_.Erase(map_iterator);
// Clean up the memory by deleting the closure.
@@ -290,7 +339,10 @@ void BoxTable::EmptyFn::MakeEmpty(std::pair<UnorderedMapKeyType, ValueType>& ite
}
bool BoxTable::EmptyFn::IsEmpty(const std::pair<UnorderedMapKeyType, ValueType>& item) const {
- return item.first == nullptr;
+ bool is_empty = item.first == nullptr;
+ DCHECK_EQ(item.second.IsNull(), is_empty);
+
+ return is_empty;
}
bool BoxTable::EqualsFn::operator()(const UnorderedMapKeyType& lhs,