diff options
Diffstat (limited to 'runtime/lambda/box_table.cc')
-rw-r--r-- | runtime/lambda/box_table.cc | 145 |
1 files changed, 27 insertions, 118 deletions
diff --git a/runtime/lambda/box_table.cc b/runtime/lambda/box_table.cc index 8eef10bbad..26575fd995 100644 --- a/runtime/lambda/box_table.cc +++ b/runtime/lambda/box_table.cc @@ -18,8 +18,6 @@ #include "base/mutex.h" #include "common_throws.h" #include "gc_root-inl.h" -#include "lambda/closure.h" -#include "lambda/leaking_allocator.h" #include "mirror/method.h" #include "mirror/object-inl.h" #include "thread.h" @@ -28,53 +26,11 @@ namespace art { namespace lambda { -// Temporarily represent the lambda Closure as its raw bytes in an array. -// TODO: Generate a proxy class for the closure when boxing the first time. -using BoxedClosurePointerType = mirror::ByteArray*; - -static mirror::Class* GetBoxedClosureClass() SHARED_REQUIRES(Locks::mutator_lock_) { - return mirror::ByteArray::GetArrayClass(); -} - -namespace { - // Convenience functions to allocating/deleting box table copies of the closures. - struct ClosureAllocator { - // Deletes a Closure that was allocated through ::Allocate. - static void Delete(Closure* ptr) { - delete[] reinterpret_cast<char*>(ptr); - } - - // Returns a well-aligned pointer to a newly allocated Closure on the 'new' heap. - static Closure* Allocate(size_t size) { - DCHECK_GE(size, sizeof(Closure)); - - // TODO: Maybe point to the interior of the boxed closure object after we add proxy support? - Closure* closure = reinterpret_cast<Closure*>(new char[size]); - DCHECK_ALIGNED(closure, alignof(Closure)); - return closure; - } - }; -} // namespace BoxTable::BoxTable() : allow_new_weaks_(true), new_weaks_condition_("lambda box table allowed weaks", *Locks::lambda_table_lock_) {} -BoxTable::~BoxTable() { - // Free all the copies of our closures. - for (auto map_iterator = map_.begin(); map_iterator != map_.end(); ++map_iterator) { - std::pair<UnorderedMapKeyType, ValueType>& key_value_pair = *map_iterator; - - Closure* closure = key_value_pair.first; - - // Remove from the map first, so that it doesn't try to access dangling pointer. - map_iterator = map_.Erase(map_iterator); - - // Safe to delete, no dangling pointers. - ClosureAllocator::Delete(closure); - } -} - mirror::Object* BoxTable::BoxLambda(const ClosureType& closure) { Thread* self = Thread::Current(); @@ -102,29 +58,22 @@ mirror::Object* BoxTable::BoxLambda(const ClosureType& closure) { // Release the lambda table lock here, so that thread suspension is allowed. - // Convert the Closure into a managed byte[] which will serve + // Convert the ArtMethod into a java.lang.reflect.Method which will serve // as the temporary 'boxed' version of the lambda. This is good enough // to check all the basic object identities that a boxed lambda must retain. - // It's also good enough to contain all the captured primitive variables. // TODO: Boxing an innate lambda (i.e. made with create-lambda) should make a proxy class // TODO: Boxing a learned lambda (i.e. made with unbox-lambda) should return the original object - BoxedClosurePointerType closure_as_array_object = - mirror::ByteArray::Alloc(self, closure->GetSize()); - + mirror::Method* method_as_object = + mirror::Method::CreateFromArtMethod(self, closure); // There are no thread suspension points after this, so we don't need to put it into a handle. - if (UNLIKELY(closure_as_array_object == nullptr)) { + if (UNLIKELY(method_as_object == nullptr)) { // Most likely an OOM has occurred. CHECK(self->IsExceptionPending()); return nullptr; } - // Write the raw closure data into the byte[]. - closure->CopyTo(closure_as_array_object->GetRawData(sizeof(uint8_t), // component size - 0 /*index*/), // index - closure_as_array_object->GetLength()); - // The method has been successfully boxed into an object, now insert it into the hash map. { MutexLock mu(self, *Locks::lambda_table_lock_); @@ -138,56 +87,38 @@ mirror::Object* BoxTable::BoxLambda(const ClosureType& closure) { return value.Read(); } - // Otherwise we need to insert it into the hash map in this thread. - - // Make a copy for the box table to keep, in case the closure gets collected from the stack. - // TODO: GC may need to sweep for roots in the box table's copy of the closure. - Closure* closure_table_copy = ClosureAllocator::Allocate(closure->GetSize()); - closure->CopyTo(closure_table_copy, closure->GetSize()); - - // The closure_table_copy needs to be deleted by us manually when we erase it from the map. - - // Actually insert into the table. - map_.Insert({closure_table_copy, ValueType(closure_as_array_object)}); + // Otherwise we should insert it into the hash map in this thread. + map_.Insert(std::make_pair(closure, ValueType(method_as_object))); } - return closure_as_array_object; + return method_as_object; } bool BoxTable::UnboxLambda(mirror::Object* object, ClosureType* out_closure) { DCHECK(object != nullptr); *out_closure = nullptr; - Thread* self = Thread::Current(); - // Note that we do not need to access lambda_table_lock_ here // since we don't need to look at the map. mirror::Object* boxed_closure_object = object; - // Raise ClassCastException if object is not instanceof byte[] - if (UNLIKELY(!boxed_closure_object->InstanceOf(GetBoxedClosureClass()))) { - ThrowClassCastException(GetBoxedClosureClass(), boxed_closure_object->GetClass()); + // Raise ClassCastException if object is not instanceof java.lang.reflect.Method + if (UNLIKELY(!boxed_closure_object->InstanceOf(mirror::Method::StaticClass()))) { + ThrowClassCastException(mirror::Method::StaticClass(), boxed_closure_object->GetClass()); return false; } // TODO(iam): We must check that the closure object extends/implements the type - // specified in [type id]. This is not currently implemented since it's always a byte[]. + // specified in [type id]. This is not currently implemented since it's always a Method. // If we got this far, the inputs are valid. - // Shuffle the byte[] back into a raw closure, then allocate it, copy, and return it. - BoxedClosurePointerType boxed_closure_as_array = - down_cast<BoxedClosurePointerType>(boxed_closure_object); - - const int8_t* unaligned_interior_closure = boxed_closure_as_array->GetData(); + // Write out the java.lang.reflect.Method's embedded ArtMethod* into the vreg target. + mirror::AbstractMethod* boxed_closure_as_method = + down_cast<mirror::AbstractMethod*>(boxed_closure_object); - // Allocate a copy that can "escape" and copy the closure data into that. - Closure* unboxed_closure = - LeakingAllocator::MakeFlexibleInstance<Closure>(self, boxed_closure_as_array->GetLength()); - // TODO: don't just memcpy the closure, it's unsafe when we add references to the mix. - memcpy(unboxed_closure, unaligned_interior_closure, boxed_closure_as_array->GetLength()); - - DCHECK_EQ(unboxed_closure->GetSize(), static_cast<size_t>(boxed_closure_as_array->GetLength())); + ArtMethod* unboxed_closure = boxed_closure_as_method->GetArtMethod(); + DCHECK(unboxed_closure != nullptr); *out_closure = unboxed_closure; return true; @@ -196,7 +127,7 @@ bool BoxTable::UnboxLambda(mirror::Object* object, ClosureType* out_closure) { BoxTable::ValueType BoxTable::FindBoxedLambda(const ClosureType& closure) const { auto map_iterator = map_.Find(closure); if (map_iterator != map_.end()) { - const std::pair<UnorderedMapKeyType, ValueType>& key_value_pair = *map_iterator; + const std::pair<ClosureType, ValueType>& key_value_pair = *map_iterator; const ValueType& value = key_value_pair.second; DCHECK(!value.IsNull()); // Never store null boxes. @@ -226,7 +157,7 @@ void BoxTable::SweepWeakBoxedLambdas(IsMarkedVisitor* visitor) { */ std::vector<ClosureType> remove_list; for (auto map_iterator = map_.begin(); map_iterator != map_.end(); ) { - std::pair<UnorderedMapKeyType, ValueType>& key_value_pair = *map_iterator; + std::pair<ClosureType, ValueType>& key_value_pair = *map_iterator; const ValueType& old_value = key_value_pair.second; @@ -235,15 +166,10 @@ void BoxTable::SweepWeakBoxedLambdas(IsMarkedVisitor* visitor) { mirror::Object* new_value = visitor->IsMarked(old_value_raw); if (new_value == nullptr) { - // The object has been swept away. const ClosureType& closure = key_value_pair.first; - + // The object has been swept away. // Delete the entry from the map. - map_iterator = map_.Erase(map_iterator); - - // Clean up the memory by deleting the closure. - ClosureAllocator::Delete(closure); - + map_iterator = map_.Erase(map_.Find(closure)); } else { // The object has been moved. // Update the map. @@ -282,33 +208,16 @@ void BoxTable::BroadcastForNewWeakBoxedLambdas() { new_weaks_condition_.Broadcast(self); } -void BoxTable::EmptyFn::MakeEmpty(std::pair<UnorderedMapKeyType, ValueType>& item) const { - item.first = nullptr; - - Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); - item.second = ValueType(); // Also clear the GC root. -} - -bool BoxTable::EmptyFn::IsEmpty(const std::pair<UnorderedMapKeyType, ValueType>& item) const { - return item.first == nullptr; -} - -bool BoxTable::EqualsFn::operator()(const UnorderedMapKeyType& lhs, - const UnorderedMapKeyType& rhs) const { +bool BoxTable::EqualsFn::operator()(const ClosureType& lhs, const ClosureType& rhs) const { // Nothing needs this right now, but leave this assertion for later when // we need to look at the references inside of the closure. - Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); - - return lhs->ReferenceEquals(rhs); -} - -size_t BoxTable::HashFn::operator()(const UnorderedMapKeyType& key) const { - const lambda::Closure* closure = key; - DCHECK_ALIGNED(closure, alignof(lambda::Closure)); + if (kIsDebugBuild) { + Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); + } - // Need to hold mutator_lock_ before calling into Closure::GetHashCode. - Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); - return closure->GetHashCode(); + // TODO: Need rework to use read barriers once closures have references inside of them that can + // move. Until then, it's safe to just compare the data inside of it directly. + return lhs == rhs; } } // namespace lambda |