summaryrefslogtreecommitdiff
path: root/runtime/lambda
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/lambda')
-rw-r--r--runtime/lambda/art_lambda_method.cc77
-rw-r--r--runtime/lambda/art_lambda_method.h116
-rw-r--r--runtime/lambda/box_table.cc315
-rw-r--r--runtime/lambda/box_table.h146
-rw-r--r--runtime/lambda/closure.cc414
-rw-r--r--runtime/lambda/closure.h184
-rw-r--r--runtime/lambda/closure_builder-inl.h45
-rw-r--r--runtime/lambda/closure_builder.cc210
-rw-r--r--runtime/lambda/closure_builder.h104
-rw-r--r--runtime/lambda/closure_test.cc356
-rw-r--r--runtime/lambda/leaking_allocator.cc33
-rw-r--r--runtime/lambda/leaking_allocator.h72
-rw-r--r--runtime/lambda/shorty_field_type.h475
-rw-r--r--runtime/lambda/shorty_field_type_test.cc354
14 files changed, 0 insertions, 2901 deletions
diff --git a/runtime/lambda/art_lambda_method.cc b/runtime/lambda/art_lambda_method.cc
deleted file mode 100644
index 6f9f8bbb59..0000000000
--- a/runtime/lambda/art_lambda_method.cc
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "lambda/art_lambda_method.h"
-
-#include "base/logging.h"
-#include "lambda/shorty_field_type.h"
-
-namespace art {
-namespace lambda {
-
-ArtLambdaMethod::ArtLambdaMethod(ArtMethod* target_method,
- const char* captured_variables_type_descriptor,
- const char* captured_variables_shorty,
- bool innate_lambda)
- : method_(target_method),
- captured_variables_type_descriptor_(captured_variables_type_descriptor),
- captured_variables_shorty_(captured_variables_shorty),
- innate_lambda_(innate_lambda) {
- DCHECK(target_method != nullptr);
- DCHECK(captured_variables_type_descriptor != nullptr);
- DCHECK(captured_variables_shorty != nullptr);
-
- // Calculate the static closure size from the captured variables.
- size_t size = sizeof(ArtLambdaMethod*); // Initial size is just this method.
- bool static_size = true;
- const char* shorty = captured_variables_shorty_;
- while (shorty != nullptr && *shorty != '\0') {
- // Each captured variable also appends to the size.
- ShortyFieldType shorty_field{*shorty}; // NOLINT [readability/braces] [4]
- size += shorty_field.GetStaticSize();
- static_size &= shorty_field.IsStaticSize();
- ++shorty;
- }
- closure_size_ = size;
-
- // We determine whether or not the size is dynamic by checking for nested lambdas.
- //
- // This is conservative, since in theory an optimization could determine the size
- // of the nested lambdas recursively. In practice it's probably better to flatten out
- // nested lambdas and inline all their code if they are known statically.
- dynamic_size_ = !static_size;
-
- if (kIsDebugBuild) {
- // Double check that the number of captured variables match in both strings.
- size_t shorty_count = strlen(captured_variables_shorty);
-
- size_t long_count = 0;
- const char* long_type = captured_variables_type_descriptor;
- ShortyFieldType out;
- while ((long_type = ShortyFieldType::ParseFromFieldTypeDescriptor(long_type, &out))
- != nullptr) {
- ++long_count;
- }
-
- DCHECK_EQ(shorty_count, long_count)
- << "number of captured variables in long type '" << captured_variables_type_descriptor
- << "' (" << long_count << ")" << " did not match short type '"
- << captured_variables_shorty << "' (" << shorty_count << ")";
- }
-}
-
-} // namespace lambda
-} // namespace art
diff --git a/runtime/lambda/art_lambda_method.h b/runtime/lambda/art_lambda_method.h
deleted file mode 100644
index ea13eb7af6..0000000000
--- a/runtime/lambda/art_lambda_method.h
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef ART_RUNTIME_LAMBDA_ART_LAMBDA_METHOD_H_
-#define ART_RUNTIME_LAMBDA_ART_LAMBDA_METHOD_H_
-
-#include "base/macros.h"
-#include "art_method.h"
-
-#include <stdint.h>
-
-namespace art {
-namespace lambda {
-
-class ArtLambdaMethod {
- public:
- // Construct an art lambda method.
- // The target method is the one invoked by invoke-lambda.
- // The type descriptor describes the types of variables captured, e.g. "ZFLObject;\FI;[Z"
- // The shorty drops the object name and treats arrays as objects, e.g. "ZFL\L"
- // Innate lambda means that the lambda was originally created via invoke-lambda.
- // -- Non-innate lambdas (learned lambdas) come from a regular class that was boxed to lambda.
- // (Ownership of strings is retained by the caller and the lifetime should exceed this class).
- ArtLambdaMethod(ArtMethod* target_method,
- const char* captured_variables_type_descriptor,
- const char* captured_variables_shorty,
- bool innate_lambda = true);
-
- // Get the target method for this lambda that would be used by the invoke-lambda dex instruction.
- ArtMethod* GetArtMethod() const {
- return method_;
- }
-
- // Get the compile-time size of lambda closures for this method in bytes.
- // This is circular (that is, it includes the size of the ArtLambdaMethod pointer).
- // One should also check if the size is dynamic since nested lambdas have a runtime size.
- size_t GetStaticClosureSize() const {
- return closure_size_;
- }
-
- // Get the type descriptor for the list of captured variables.
- // e.g. "ZFLObject;\FI;[Z" means a captured int, float, class Object, lambda FI, array of ints
- const char* GetCapturedVariablesTypeDescriptor() const {
- return captured_variables_type_descriptor_;
- }
-
- // Get the shorty 'field' type descriptor list of captured variables.
- // This follows the same rules as a string of ShortyFieldType in the dex specification.
- // Every captured variable is represented by exactly one character.
- // - Objects become 'L'.
- // - Arrays become 'L'.
- // - Lambdas become '\'.
- const char* GetCapturedVariablesShortyTypeDescriptor() const {
- return captured_variables_shorty_;
- }
-
- // Will the size of this lambda change at runtime?
- // Only returns true if there is a nested lambda that we can't determine statically the size of.
- bool IsDynamicSize() const {
- return dynamic_size_;
- }
-
- // Will the size of this lambda always be constant at runtime?
- // This generally means there's no nested lambdas, or we were able to successfully determine
- // their size statically at compile time.
- bool IsStaticSize() const {
- return !IsDynamicSize();
- }
- // Is this a lambda that was originally created via invoke-lambda?
- // -- Non-innate lambdas (learned lambdas) come from a regular class that was boxed to lambda.
- bool IsInnateLambda() const {
- return innate_lambda_;
- }
-
- // How many variables were captured?
- // (Each nested lambda counts as 1 captured var regardless of how many captures it itself has).
- size_t GetNumberOfCapturedVariables() const {
- return strlen(captured_variables_shorty_);
- }
-
- private:
- // TODO: ArtMethod, or at least the entry points should be inlined into this struct
- // to avoid an extra indirect load when doing invokes.
- // Target method that invoke-lambda will jump to.
- ArtMethod* method_;
- // How big the closure is (in bytes). Only includes the constant size.
- size_t closure_size_;
- // The type descriptor for the captured variables, e.g. "IS" for [int, short]
- const char* captured_variables_type_descriptor_;
- // The shorty type descriptor for captured vars, (e.g. using 'L' instead of 'LObject;')
- const char* captured_variables_shorty_;
- // Whether or not the size is dynamic. If it is, copiers need to read the Closure size at runtime.
- bool dynamic_size_;
- // True if this lambda was originally made with create-lambda,
- // false if it came from a class instance (through new-instance and then unbox-lambda).
- bool innate_lambda_;
-
- DISALLOW_COPY_AND_ASSIGN(ArtLambdaMethod);
-};
-
-} // namespace lambda
-} // namespace art
-
-#endif // ART_RUNTIME_LAMBDA_ART_LAMBDA_METHOD_H_
diff --git a/runtime/lambda/box_table.cc b/runtime/lambda/box_table.cc
deleted file mode 100644
index 9918bb71f3..0000000000
--- a/runtime/lambda/box_table.cc
+++ /dev/null
@@ -1,315 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "lambda/box_table.h"
-
-#include "base/mutex.h"
-#include "common_throws.h"
-#include "gc_root-inl.h"
-#include "lambda/closure.h"
-#include "lambda/leaking_allocator.h"
-#include "mirror/method.h"
-#include "mirror/object-inl.h"
-#include "thread.h"
-
-#include <vector>
-
-namespace art {
-namespace lambda {
-// Temporarily represent the lambda Closure as its raw bytes in an array.
-// TODO: Generate a proxy class for the closure when boxing the first time.
-using BoxedClosurePointerType = mirror::ByteArray*;
-
-static mirror::Class* GetBoxedClosureClass() SHARED_REQUIRES(Locks::mutator_lock_) {
- return mirror::ByteArray::GetArrayClass();
-}
-
-namespace {
- // Convenience functions to allocating/deleting box table copies of the closures.
- struct ClosureAllocator {
- // Deletes a Closure that was allocated through ::Allocate.
- static void Delete(Closure* ptr) {
- delete[] reinterpret_cast<char*>(ptr);
- }
-
- // Returns a well-aligned pointer to a newly allocated Closure on the 'new' heap.
- static Closure* Allocate(size_t size) {
- DCHECK_GE(size, sizeof(Closure));
-
- // TODO: Maybe point to the interior of the boxed closure object after we add proxy support?
- Closure* closure = reinterpret_cast<Closure*>(new char[size]);
- DCHECK_ALIGNED(closure, alignof(Closure));
- return closure;
- }
- };
-} // namespace
-
-BoxTable::BoxTable()
- : allow_new_weaks_(true),
- new_weaks_condition_("lambda box table allowed weaks", *Locks::lambda_table_lock_) {}
-
-BoxTable::~BoxTable() {
- // Free all the copies of our closures.
- for (auto map_iterator = map_.begin(); map_iterator != map_.end(); ) {
- std::pair<UnorderedMapKeyType, ValueType>& key_value_pair = *map_iterator;
-
- Closure* closure = key_value_pair.first;
-
- // Remove from the map first, so that it doesn't try to access dangling pointer.
- map_iterator = map_.Erase(map_iterator);
-
- // Safe to delete, no dangling pointers.
- ClosureAllocator::Delete(closure);
- }
-}
-
-mirror::Object* BoxTable::BoxLambda(const ClosureType& closure) {
- Thread* self = Thread::Current();
-
- {
- // TODO: Switch to ReaderMutexLock if ConditionVariable ever supports RW Mutexes
- /*Reader*/MutexLock mu(self, *Locks::lambda_table_lock_);
- BlockUntilWeaksAllowed();
-
- // Attempt to look up this object, it's possible it was already boxed previously.
- // If this is the case we *must* return the same object as before to maintain
- // referential equality.
- //
- // In managed code:
- // Functional f = () -> 5; // vF = create-lambda
- // Object a = f; // vA = box-lambda vA
- // Object b = f; // vB = box-lambda vB
- // assert(a == f)
- ValueType value = FindBoxedLambda(closure);
- if (!value.IsNull()) {
- return value.Read();
- }
-
- // Otherwise we need to box ourselves and insert it into the hash map
- }
-
- // Release the lambda table lock here, so that thread suspension is allowed.
-
- // Convert the Closure into a managed byte[] which will serve
- // as the temporary 'boxed' version of the lambda. This is good enough
- // to check all the basic object identities that a boxed lambda must retain.
- // It's also good enough to contain all the captured primitive variables.
-
- // TODO: Boxing an innate lambda (i.e. made with create-lambda) should make a proxy class
- // TODO: Boxing a learned lambda (i.e. made with unbox-lambda) should return the original object
- BoxedClosurePointerType closure_as_array_object =
- mirror::ByteArray::Alloc(self, closure->GetSize());
-
- // There are no thread suspension points after this, so we don't need to put it into a handle.
-
- if (UNLIKELY(closure_as_array_object == nullptr)) {
- // Most likely an OOM has occurred.
- CHECK(self->IsExceptionPending());
- return nullptr;
- }
-
- // Write the raw closure data into the byte[].
- closure->CopyTo(closure_as_array_object->GetRawData(sizeof(uint8_t), // component size
- 0 /*index*/), // index
- closure_as_array_object->GetLength());
-
- // The method has been successfully boxed into an object, now insert it into the hash map.
- {
- MutexLock mu(self, *Locks::lambda_table_lock_);
- BlockUntilWeaksAllowed();
-
- // Lookup the object again, it's possible another thread already boxed it while
- // we were allocating the object before.
- ValueType value = FindBoxedLambda(closure);
- if (UNLIKELY(!value.IsNull())) {
- // Let the GC clean up method_as_object at a later time.
- return value.Read();
- }
-
- // Otherwise we need to insert it into the hash map in this thread.
-
- // Make a copy for the box table to keep, in case the closure gets collected from the stack.
- // TODO: GC may need to sweep for roots in the box table's copy of the closure.
- Closure* closure_table_copy = ClosureAllocator::Allocate(closure->GetSize());
- closure->CopyTo(closure_table_copy, closure->GetSize());
-
- // The closure_table_copy needs to be deleted by us manually when we erase it from the map.
-
- // Actually insert into the table.
- map_.Insert({closure_table_copy, ValueType(closure_as_array_object)});
- }
-
- return closure_as_array_object;
-}
-
-bool BoxTable::UnboxLambda(mirror::Object* object, ClosureType* out_closure) {
- DCHECK(object != nullptr);
- *out_closure = nullptr;
-
- Thread* self = Thread::Current();
-
- // Note that we do not need to access lambda_table_lock_ here
- // since we don't need to look at the map.
-
- mirror::Object* boxed_closure_object = object;
-
- // Raise ClassCastException if object is not instanceof byte[]
- if (UNLIKELY(!boxed_closure_object->InstanceOf(GetBoxedClosureClass()))) {
- ThrowClassCastException(GetBoxedClosureClass(), boxed_closure_object->GetClass());
- return false;
- }
-
- // TODO(iam): We must check that the closure object extends/implements the type
- // specified in [type id]. This is not currently implemented since it's always a byte[].
-
- // If we got this far, the inputs are valid.
- // Shuffle the byte[] back into a raw closure, then allocate it, copy, and return it.
- BoxedClosurePointerType boxed_closure_as_array =
- down_cast<BoxedClosurePointerType>(boxed_closure_object);
-
- const int8_t* unaligned_interior_closure = boxed_closure_as_array->GetData();
-
- // Allocate a copy that can "escape" and copy the closure data into that.
- Closure* unboxed_closure =
- LeakingAllocator::MakeFlexibleInstance<Closure>(self, boxed_closure_as_array->GetLength());
- // TODO: don't just memcpy the closure, it's unsafe when we add references to the mix.
- memcpy(unboxed_closure, unaligned_interior_closure, boxed_closure_as_array->GetLength());
-
- DCHECK_EQ(unboxed_closure->GetSize(), static_cast<size_t>(boxed_closure_as_array->GetLength()));
-
- *out_closure = unboxed_closure;
- return true;
-}
-
-BoxTable::ValueType BoxTable::FindBoxedLambda(const ClosureType& closure) const {
- auto map_iterator = map_.Find(closure);
- if (map_iterator != map_.end()) {
- const std::pair<UnorderedMapKeyType, ValueType>& key_value_pair = *map_iterator;
- const ValueType& value = key_value_pair.second;
-
- DCHECK(!value.IsNull()); // Never store null boxes.
- return value;
- }
-
- return ValueType(nullptr);
-}
-
-void BoxTable::BlockUntilWeaksAllowed() {
- Thread* self = Thread::Current();
- while (UNLIKELY((!kUseReadBarrier && !allow_new_weaks_) ||
- (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
- new_weaks_condition_.WaitHoldingLocks(self); // wait while holding mutator lock
- }
-}
-
-void BoxTable::SweepWeakBoxedLambdas(IsMarkedVisitor* visitor) {
- DCHECK(visitor != nullptr);
-
- Thread* self = Thread::Current();
- MutexLock mu(self, *Locks::lambda_table_lock_);
-
- /*
- * Visit every weak root in our lambda box table.
- * Remove unmarked objects, update marked objects to new address.
- */
- std::vector<ClosureType> remove_list;
- for (auto map_iterator = map_.begin(); map_iterator != map_.end(); ) {
- std::pair<UnorderedMapKeyType, ValueType>& key_value_pair = *map_iterator;
-
- const ValueType& old_value = key_value_pair.second;
-
- // This does not need a read barrier because this is called by GC.
- mirror::Object* old_value_raw = old_value.Read<kWithoutReadBarrier>();
- mirror::Object* new_value = visitor->IsMarked(old_value_raw);
-
- if (new_value == nullptr) {
- // The object has been swept away.
- const ClosureType& closure = key_value_pair.first;
-
- // Delete the entry from the map.
- map_iterator = map_.Erase(map_iterator);
-
- // Clean up the memory by deleting the closure.
- ClosureAllocator::Delete(closure);
-
- } else {
- // The object has been moved.
- // Update the map.
- key_value_pair.second = ValueType(new_value);
- ++map_iterator;
- }
- }
-
- // Occasionally shrink the map to avoid growing very large.
- if (map_.CalculateLoadFactor() < kMinimumLoadFactor) {
- map_.ShrinkToMaximumLoad();
- }
-}
-
-void BoxTable::DisallowNewWeakBoxedLambdas() {
- CHECK(!kUseReadBarrier);
- Thread* self = Thread::Current();
- MutexLock mu(self, *Locks::lambda_table_lock_);
-
- allow_new_weaks_ = false;
-}
-
-void BoxTable::AllowNewWeakBoxedLambdas() {
- CHECK(!kUseReadBarrier);
- Thread* self = Thread::Current();
- MutexLock mu(self, *Locks::lambda_table_lock_);
-
- allow_new_weaks_ = true;
- new_weaks_condition_.Broadcast(self);
-}
-
-void BoxTable::BroadcastForNewWeakBoxedLambdas() {
- CHECK(kUseReadBarrier);
- Thread* self = Thread::Current();
- MutexLock mu(self, *Locks::lambda_table_lock_);
- new_weaks_condition_.Broadcast(self);
-}
-
-void BoxTable::EmptyFn::MakeEmpty(std::pair<UnorderedMapKeyType, ValueType>& item) const {
- item.first = nullptr;
-
- Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
- item.second = ValueType(); // Also clear the GC root.
-}
-
-bool BoxTable::EmptyFn::IsEmpty(const std::pair<UnorderedMapKeyType, ValueType>& item) const {
- return item.first == nullptr;
-}
-
-bool BoxTable::EqualsFn::operator()(const UnorderedMapKeyType& lhs,
- const UnorderedMapKeyType& rhs) const {
- // Nothing needs this right now, but leave this assertion for later when
- // we need to look at the references inside of the closure.
- Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
-
- return lhs->ReferenceEquals(rhs);
-}
-
-size_t BoxTable::HashFn::operator()(const UnorderedMapKeyType& key) const {
- const lambda::Closure* closure = key;
- DCHECK_ALIGNED(closure, alignof(lambda::Closure));
-
- // Need to hold mutator_lock_ before calling into Closure::GetHashCode.
- Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
- return closure->GetHashCode();
-}
-
-} // namespace lambda
-} // namespace art
diff --git a/runtime/lambda/box_table.h b/runtime/lambda/box_table.h
deleted file mode 100644
index adb733271e..0000000000
--- a/runtime/lambda/box_table.h
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef ART_RUNTIME_LAMBDA_BOX_TABLE_H_
-#define ART_RUNTIME_LAMBDA_BOX_TABLE_H_
-
-#include "base/allocator.h"
-#include "base/hash_map.h"
-#include "gc_root.h"
-#include "base/macros.h"
-#include "base/mutex.h"
-#include "object_callbacks.h"
-
-#include <stdint.h>
-
-namespace art {
-
-class ArtMethod; // forward declaration
-
-namespace mirror {
-class Object; // forward declaration
-} // namespace mirror
-
-namespace lambda {
-struct Closure; // forward declaration
-
-/*
- * Store a table of boxed lambdas. This is required to maintain object referential equality
- * when a lambda is re-boxed.
- *
- * Conceptually, we store a mapping of Closures -> Weak Reference<Boxed Lambda Object>.
- * When too many objects get GCd, we shrink the underlying table to use less space.
- */
-class BoxTable FINAL {
- public:
- using ClosureType = art::lambda::Closure*;
-
- // Boxes a closure into an object. Returns null and throws an exception on failure.
- mirror::Object* BoxLambda(const ClosureType& closure)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::lambda_table_lock_);
-
- // Unboxes an object back into the lambda. Returns false and throws an exception on failure.
- bool UnboxLambda(mirror::Object* object, ClosureType* out_closure)
- SHARED_REQUIRES(Locks::mutator_lock_);
-
- // Sweep weak references to lambda boxes. Update the addresses if the objects have been
- // moved, and delete them from the table if the objects have been cleaned up.
- void SweepWeakBoxedLambdas(IsMarkedVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::lambda_table_lock_);
-
- // GC callback: Temporarily block anyone from touching the map.
- void DisallowNewWeakBoxedLambdas()
- REQUIRES(!Locks::lambda_table_lock_);
-
- // GC callback: Unblock any readers who have been queued waiting to touch the map.
- void AllowNewWeakBoxedLambdas()
- REQUIRES(!Locks::lambda_table_lock_);
-
- // GC callback: Unblock any readers who have been queued waiting to touch the map.
- void BroadcastForNewWeakBoxedLambdas()
- REQUIRES(!Locks::lambda_table_lock_);
-
- BoxTable();
- ~BoxTable();
-
- private:
- // Explanation:
- // - After all threads are suspended (exclusive mutator lock),
- // the concurrent-copying GC can move objects from the "from" space to the "to" space.
- // If an object is moved at that time and *before* SweepSystemWeaks are called then
- // we don't know if the move has happened yet.
- // Successive reads will then (incorrectly) look at the objects in the "from" space,
- // which is a problem since the objects have been already forwarded and mutations
- // would not be visible in the right space.
- // Instead, use a GcRoot here which will be automatically updated by the GC.
- //
- // Also, any reads should be protected by a read barrier to always give us the "to" space address.
- using ValueType = GcRoot<mirror::Object>;
-
- // Attempt to look up the lambda in the map, or return null if it's not there yet.
- ValueType FindBoxedLambda(const ClosureType& closure) const
- SHARED_REQUIRES(Locks::lambda_table_lock_);
-
- // If the GC has come in and temporarily disallowed touching weaks, block until is it allowed.
- void BlockUntilWeaksAllowed()
- SHARED_REQUIRES(Locks::lambda_table_lock_);
-
- // Wrap the Closure into a unique_ptr so that the HashMap can delete its memory automatically.
- using UnorderedMapKeyType = ClosureType;
-
- // EmptyFn implementation for art::HashMap
- struct EmptyFn {
- void MakeEmpty(std::pair<UnorderedMapKeyType, ValueType>& item) const
- NO_THREAD_SAFETY_ANALYSIS; // SHARED_REQUIRES(Locks::mutator_lock_)
-
- bool IsEmpty(const std::pair<UnorderedMapKeyType, ValueType>& item) const;
- };
-
- // HashFn implementation for art::HashMap
- struct HashFn {
- size_t operator()(const UnorderedMapKeyType& key) const
- NO_THREAD_SAFETY_ANALYSIS; // SHARED_REQUIRES(Locks::mutator_lock_)
- };
-
- // EqualsFn implementation for art::HashMap
- struct EqualsFn {
- bool operator()(const UnorderedMapKeyType& lhs, const UnorderedMapKeyType& rhs) const
- NO_THREAD_SAFETY_ANALYSIS; // SHARED_REQUIRES(Locks::mutator_lock_)
- };
-
- using UnorderedMap = art::HashMap<UnorderedMapKeyType,
- ValueType,
- EmptyFn,
- HashFn,
- EqualsFn,
- TrackingAllocator<std::pair<ClosureType, ValueType>,
- kAllocatorTagLambdaBoxTable>>;
-
- UnorderedMap map_ GUARDED_BY(Locks::lambda_table_lock_);
- bool allow_new_weaks_ GUARDED_BY(Locks::lambda_table_lock_);
- ConditionVariable new_weaks_condition_ GUARDED_BY(Locks::lambda_table_lock_);
-
- // Shrink the map when we get below this load factor.
- // (This is an arbitrary value that should be large enough to prevent aggressive map erases
- // from shrinking the table too often.)
- static constexpr double kMinimumLoadFactor = UnorderedMap::kDefaultMinLoadFactor / 2;
-
- DISALLOW_COPY_AND_ASSIGN(BoxTable);
-};
-
-} // namespace lambda
-} // namespace art
-
-#endif // ART_RUNTIME_LAMBDA_BOX_TABLE_H_
diff --git a/runtime/lambda/closure.cc b/runtime/lambda/closure.cc
deleted file mode 100644
index 179e4ee7f2..0000000000
--- a/runtime/lambda/closure.cc
+++ /dev/null
@@ -1,414 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "lambda/closure.h"
-
-#include "base/logging.h"
-#include "lambda/art_lambda_method.h"
-#include "runtime/mirror/object_reference.h"
-
-static constexpr const bool kClosureSupportsReferences = false;
-static constexpr const bool kClosureSupportsGarbageCollection = false;
-
-namespace art {
-namespace lambda {
-
-template <typename T>
-// TODO: can I return T __attribute__((__aligned__(1)))* here instead?
-const uint8_t* Closure::GetUnsafeAtOffset(size_t offset) const {
- // Do not DCHECK here with existing helpers since most of them will call into this function.
- return reinterpret_cast<const uint8_t*>(captured_) + offset;
-}
-
-size_t Closure::GetCapturedVariableSize(ShortyFieldType variable_type, size_t offset) const {
- switch (variable_type) {
- case ShortyFieldType::kLambda:
- {
- return GetClosureSize(GetUnsafeAtOffset<Closure>(offset));
- }
- default:
- DCHECK(variable_type.IsStaticSize());
- return variable_type.GetStaticSize();
- }
-}
-
-// Templatize the flags to give the compiler a fighting chance to eliminate
-// any unnecessary code through different uses of this function.
-template <Closure::VariableInfo::Flags flags>
-inline Closure::VariableInfo Closure::ParseTypeDescriptor(const char* type_descriptor,
- size_t upto_index) const {
- DCHECK(type_descriptor != nullptr);
-
- VariableInfo result;
-
- ShortyFieldType last_type;
- size_t offset = (flags & VariableInfo::kOffset) ? GetStartingOffset() : 0;
- size_t prev_offset = 0;
- size_t count = 0;
-
- while ((type_descriptor =
- ShortyFieldType::ParseFromFieldTypeDescriptor(type_descriptor, &last_type)) != nullptr) {
- count++;
-
- if (flags & VariableInfo::kOffset) {
- // Accumulate the sizes of all preceding captured variables as the current offset only.
- offset += prev_offset;
- prev_offset = GetCapturedVariableSize(last_type, offset);
- }
-
- if ((count > upto_index)) {
- break;
- }
- }
-
- if (flags & VariableInfo::kVariableType) {
- result.variable_type_ = last_type;
- }
-
- if (flags & VariableInfo::kIndex) {
- result.index_ = count;
- }
-
- if (flags & VariableInfo::kCount) {
- result.count_ = count;
- }
-
- if (flags & VariableInfo::kOffset) {
- result.offset_ = offset;
- }
-
- // TODO: We should probably store the result of this in the ArtLambdaMethod,
- // to avoid re-computing the data every single time for static closures.
- return result;
-}
-
-size_t Closure::GetCapturedVariablesSize() const {
- const size_t captured_variable_offset = offsetof(Closure, captured_);
- DCHECK_GE(GetSize(), captured_variable_offset); // Prevent underflows.
- return GetSize() - captured_variable_offset;
-}
-
-size_t Closure::GetSize() const {
- const size_t static_closure_size = lambda_info_->GetStaticClosureSize();
- if (LIKELY(lambda_info_->IsStaticSize())) {
- return static_closure_size;
- }
-
- DCHECK_GE(static_closure_size, sizeof(captured_[0].dynamic_.size_));
- const size_t dynamic_closure_size = captured_[0].dynamic_.size_;
- // The dynamic size better be at least as big as the static size.
- DCHECK_GE(dynamic_closure_size, static_closure_size);
-
- return dynamic_closure_size;
-}
-
-void Closure::CopyTo(void* target, size_t target_size) const {
- DCHECK_GE(target_size, GetSize());
-
- // TODO: using memcpy is unsafe with read barriers, fix this once we add reference support
- static_assert(kClosureSupportsReferences == false,
- "Do not use memcpy with readbarrier references");
- memcpy(target, this, GetSize());
-}
-
-ArtMethod* Closure::GetTargetMethod() const {
- return const_cast<ArtMethod*>(lambda_info_->GetArtMethod());
-}
-
-uint32_t Closure::GetHashCode() const {
- // Start with a non-zero constant, a prime number.
- uint32_t result = 17;
-
- // Include the hash with the ArtMethod.
- {
- uintptr_t method = reinterpret_cast<uintptr_t>(GetTargetMethod());
- result = 31 * result + Low32Bits(method);
- if (sizeof(method) == sizeof(uint64_t)) {
- result = 31 * result + High32Bits(method);
- }
- }
-
- // Include a hash for each captured variable.
- for (size_t i = 0; i < GetCapturedVariablesSize(); ++i) {
- // TODO: not safe for GC-able values since the address can move and the hash code would change.
- uint8_t captured_variable_raw_value;
- CopyUnsafeAtOffset<uint8_t>(i, /*out*/&captured_variable_raw_value); // NOLINT: [whitespace/comma] [3]
-
- result = 31 * result + captured_variable_raw_value;
- }
-
- // TODO: Fix above loop to work for objects and lambdas.
- static_assert(kClosureSupportsGarbageCollection == false,
- "Need to update above loop to read the hash code from the "
- "objects and lambdas recursively");
-
- return result;
-}
-
-bool Closure::ReferenceEquals(const Closure* other) const {
- DCHECK(other != nullptr);
-
- // TODO: Need rework to use read barriers once closures have references inside of them that can
- // move. Until then, it's safe to just compare the data inside of it directly.
- static_assert(kClosureSupportsReferences == false,
- "Unsafe to use memcmp in read barrier collector");
-
- if (GetSize() != other->GetSize()) {
- return false;
- }
-
- return memcmp(this, other, GetSize());
-}
-
-size_t Closure::GetNumberOfCapturedVariables() const {
- // TODO: refactor into art_lambda_method.h. Parsing should only be required here as a DCHECK.
- VariableInfo variable_info =
- ParseTypeDescriptor<VariableInfo::kCount>(GetCapturedVariablesTypeDescriptor(),
- VariableInfo::kUpToIndexMax);
- size_t count = variable_info.count_;
- // Assuming each variable was 1 byte, the size should always be greater or equal than the count.
- DCHECK_LE(count, GetCapturedVariablesSize());
- return count;
-}
-
-const char* Closure::GetCapturedVariablesTypeDescriptor() const {
- return lambda_info_->GetCapturedVariablesTypeDescriptor();
-}
-
-ShortyFieldType Closure::GetCapturedShortyType(size_t index) const {
- DCHECK_LT(index, GetNumberOfCapturedVariables());
-
- VariableInfo variable_info =
- ParseTypeDescriptor<VariableInfo::kVariableType>(GetCapturedVariablesTypeDescriptor(),
- index);
-
- return variable_info.variable_type_;
-}
-
-uint32_t Closure::GetCapturedPrimitiveNarrow(size_t index) const {
- DCHECK(GetCapturedShortyType(index).IsPrimitiveNarrow());
-
- ShortyFieldType variable_type;
- size_t offset;
- GetCapturedVariableTypeAndOffset(index, &variable_type, &offset);
-
- // TODO: Restructure to use template specialization, e.g. GetCapturedPrimitive<T>
- // so that we can avoid this nonsense regarding memcpy always overflowing.
- // Plus, this additional switching seems redundant since the interpreter
- // would've done it already, and knows the exact type.
- uint32_t result = 0;
- static_assert(ShortyFieldTypeTraits::IsPrimitiveNarrowType<decltype(result)>(),
- "result must be a primitive narrow type");
- switch (variable_type) {
- case ShortyFieldType::kBoolean:
- CopyUnsafeAtOffset<bool>(offset, &result);
- break;
- case ShortyFieldType::kByte:
- CopyUnsafeAtOffset<uint8_t>(offset, &result);
- break;
- case ShortyFieldType::kChar:
- CopyUnsafeAtOffset<uint16_t>(offset, &result);
- break;
- case ShortyFieldType::kShort:
- CopyUnsafeAtOffset<int16_t>(offset, &result);
- break;
- case ShortyFieldType::kInt:
- CopyUnsafeAtOffset<int32_t>(offset, &result);
- break;
- case ShortyFieldType::kFloat:
- // XX: Maybe there should just be a GetCapturedPrimitive<T> to avoid this shuffle?
- // The interpreter's invoke seems to only special case references and wides,
- // everything else is treated as a generic 32-bit pattern.
- CopyUnsafeAtOffset<float>(offset, &result);
- break;
- default:
- LOG(FATAL)
- << "expected a valid narrow primitive shorty type but got "
- << static_cast<char>(variable_type);
- UNREACHABLE();
- }
-
- return result;
-}
-
-uint64_t Closure::GetCapturedPrimitiveWide(size_t index) const {
- DCHECK(GetCapturedShortyType(index).IsPrimitiveWide());
-
- ShortyFieldType variable_type;
- size_t offset;
- GetCapturedVariableTypeAndOffset(index, &variable_type, &offset);
-
- // TODO: Restructure to use template specialization, e.g. GetCapturedPrimitive<T>
- // so that we can avoid this nonsense regarding memcpy always overflowing.
- // Plus, this additional switching seems redundant since the interpreter
- // would've done it already, and knows the exact type.
- uint64_t result = 0;
- static_assert(ShortyFieldTypeTraits::IsPrimitiveWideType<decltype(result)>(),
- "result must be a primitive wide type");
- switch (variable_type) {
- case ShortyFieldType::kLong:
- CopyUnsafeAtOffset<int64_t>(offset, &result);
- break;
- case ShortyFieldType::kDouble:
- CopyUnsafeAtOffset<double>(offset, &result);
- break;
- default:
- LOG(FATAL)
- << "expected a valid primitive wide shorty type but got "
- << static_cast<char>(variable_type);
- UNREACHABLE();
- }
-
- return result;
-}
-
-mirror::Object* Closure::GetCapturedObject(size_t index) const {
- DCHECK(GetCapturedShortyType(index).IsObject());
-
- ShortyFieldType variable_type;
- size_t offset;
- GetCapturedVariableTypeAndOffset(index, &variable_type, &offset);
-
- // TODO: Restructure to use template specialization, e.g. GetCapturedPrimitive<T>
- // so that we can avoid this nonsense regarding memcpy always overflowing.
- // Plus, this additional switching seems redundant since the interpreter
- // would've done it already, and knows the exact type.
- mirror::Object* result = nullptr;
- static_assert(ShortyFieldTypeTraits::IsObjectType<decltype(result)>(),
- "result must be an object type");
- switch (variable_type) {
- case ShortyFieldType::kObject:
- // TODO: This seems unsafe. This may need to use gcroots.
- static_assert(kClosureSupportsGarbageCollection == false,
- "May need GcRoots and definitely need mutator locks");
- {
- mirror::CompressedReference<mirror::Object> compressed_result;
- CopyUnsafeAtOffset<uint32_t>(offset, &compressed_result);
- result = compressed_result.AsMirrorPtr();
- }
- break;
- default:
- CHECK(false)
- << "expected a valid shorty type but got " << static_cast<char>(variable_type);
- UNREACHABLE();
- }
-
- return result;
-}
-
-size_t Closure::GetCapturedClosureSize(size_t index) const {
- DCHECK(GetCapturedShortyType(index).IsLambda());
- size_t offset = GetCapturedVariableOffset(index);
-
- auto* captured_ptr = reinterpret_cast<const uint8_t*>(&captured_);
- size_t closure_size = GetClosureSize(captured_ptr + offset);
-
- return closure_size;
-}
-
-void Closure::CopyCapturedClosure(size_t index, void* destination, size_t destination_room) const {
- DCHECK(GetCapturedShortyType(index).IsLambda());
- size_t offset = GetCapturedVariableOffset(index);
-
- auto* captured_ptr = reinterpret_cast<const uint8_t*>(&captured_);
- size_t closure_size = GetClosureSize(captured_ptr + offset);
-
- static_assert(ShortyFieldTypeTraits::IsLambdaType<Closure*>(),
- "result must be a lambda type");
-
- CopyUnsafeAtOffset<Closure>(offset, destination, closure_size, destination_room);
-}
-
-size_t Closure::GetCapturedVariableOffset(size_t index) const {
- VariableInfo variable_info =
- ParseTypeDescriptor<VariableInfo::kOffset>(GetCapturedVariablesTypeDescriptor(),
- index);
-
- size_t offset = variable_info.offset_;
-
- return offset;
-}
-
-void Closure::GetCapturedVariableTypeAndOffset(size_t index,
- ShortyFieldType* out_type,
- size_t* out_offset) const {
- DCHECK(out_type != nullptr);
- DCHECK(out_offset != nullptr);
-
- static constexpr const VariableInfo::Flags kVariableTypeAndOffset =
- static_cast<VariableInfo::Flags>(VariableInfo::kVariableType | VariableInfo::kOffset);
- VariableInfo variable_info =
- ParseTypeDescriptor<kVariableTypeAndOffset>(GetCapturedVariablesTypeDescriptor(),
- index);
-
- ShortyFieldType variable_type = variable_info.variable_type_;
- size_t offset = variable_info.offset_;
-
- *out_type = variable_type;
- *out_offset = offset;
-}
-
-template <typename T>
-void Closure::CopyUnsafeAtOffset(size_t offset,
- void* destination,
- size_t src_size,
- size_t destination_room) const {
- DCHECK_GE(destination_room, src_size);
- const uint8_t* data_ptr = GetUnsafeAtOffset<T>(offset);
- memcpy(destination, data_ptr, sizeof(T));
-}
-
-// TODO: This is kind of ugly. I would prefer an unaligned_ptr<Closure> here.
-// Unfortunately C++ doesn't let you lower the alignment (i.e. alignas(1) Closure*) is not legal.
-size_t Closure::GetClosureSize(const uint8_t* closure) {
- DCHECK(closure != nullptr);
-
- static_assert(!std::is_base_of<mirror::Object, Closure>::value,
- "It might be unsafe to call memcpy on a managed object");
-
- // Safe as long as it's not a mirror Object.
- // TODO: Should probably wrap this in like MemCpyNative or some such which statically asserts
- // we aren't trying to copy mirror::Object data around.
- ArtLambdaMethod* closure_info;
- memcpy(&closure_info, closure + offsetof(Closure, lambda_info_), sizeof(closure_info));
-
- if (LIKELY(closure_info->IsStaticSize())) {
- return closure_info->GetStaticClosureSize();
- }
-
- // The size is dynamic, so we need to read it from captured_variables_ portion.
- size_t dynamic_size;
- memcpy(&dynamic_size,
- closure + offsetof(Closure, captured_[0].dynamic_.size_),
- sizeof(dynamic_size));
- static_assert(sizeof(dynamic_size) == sizeof(captured_[0].dynamic_.size_),
- "Dynamic size type must match the structural type of the size");
-
- DCHECK_GE(dynamic_size, closure_info->GetStaticClosureSize());
- return dynamic_size;
-}
-
-size_t Closure::GetStartingOffset() const {
- static constexpr const size_t captured_offset = offsetof(Closure, captured_);
- if (LIKELY(lambda_info_->IsStaticSize())) {
- return offsetof(Closure, captured_[0].static_variables_) - captured_offset;
- } else {
- return offsetof(Closure, captured_[0].dynamic_.variables_) - captured_offset;
- }
-}
-
-} // namespace lambda
-} // namespace art
diff --git a/runtime/lambda/closure.h b/runtime/lambda/closure.h
deleted file mode 100644
index 31ff1944d2..0000000000
--- a/runtime/lambda/closure.h
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef ART_RUNTIME_LAMBDA_CLOSURE_H_
-#define ART_RUNTIME_LAMBDA_CLOSURE_H_
-
-#include "base/macros.h"
-#include "base/mutex.h" // For Locks::mutator_lock_.
-#include "lambda/shorty_field_type.h"
-
-#include <stdint.h>
-
-namespace art {
-class ArtMethod; // forward declaration
-
-namespace mirror {
-class Object; // forward declaration
-} // namespace mirror
-
-namespace lambda {
-class ArtLambdaMethod; // forward declaration
-class ClosureBuilder; // forward declaration
-
-// Inline representation of a lambda closure.
-// Contains the target method and the set of packed captured variables as a copy.
-//
-// The closure itself is logically immutable, although in practice any object references
-// it (recursively) contains can be moved and updated by the GC.
-struct PACKED(sizeof(ArtLambdaMethod*)) Closure {
- // Get the size of the Closure in bytes.
- // This is necessary in order to allocate a large enough area to copy the Closure into.
- // Do *not* copy the closure with memcpy, since references also need to get moved.
- size_t GetSize() const;
-
- // Copy this closure into the target, whose memory size is specified by target_size.
- // Any object references are fixed up during the copy (if there was a read barrier).
- // The target_size must be at least as large as GetSize().
- void CopyTo(void* target, size_t target_size) const;
-
- // Get the target method, i.e. the method that will be dispatched into with invoke-lambda.
- ArtMethod* GetTargetMethod() const;
-
- // Calculates the hash code. Value is recomputed each time.
- uint32_t GetHashCode() const SHARED_REQUIRES(Locks::mutator_lock_);
-
- // Is this the same closure as other? e.g. same target method, same variables captured.
- //
- // Determines whether the two Closures are interchangeable instances.
- // Does *not* call Object#equals recursively. If two Closures compare ReferenceEquals true that
- // means that they are interchangeable values (usually for the purpose of boxing/unboxing).
- bool ReferenceEquals(const Closure* other) const SHARED_REQUIRES(Locks::mutator_lock_);
-
- // How many variables were captured?
- size_t GetNumberOfCapturedVariables() const;
-
- // Returns a type descriptor string that represents each captured variable.
- // e.g. "Ljava/lang/Object;ZB" would mean a capture tuple of (Object, boolean, byte)
- const char* GetCapturedVariablesTypeDescriptor() const;
-
- // Returns the short type for the captured variable at index.
- // Index must be less than the number of captured variables.
- ShortyFieldType GetCapturedShortyType(size_t index) const;
-
- // Returns the 32-bit representation of a non-wide primitive at the captured variable index.
- // Smaller types are zero extended.
- // Index must be less than the number of captured variables.
- uint32_t GetCapturedPrimitiveNarrow(size_t index) const;
- // Returns the 64-bit representation of a wide primitive at the captured variable index.
- // Smaller types are zero extended.
- // Index must be less than the number of captured variables.
- uint64_t GetCapturedPrimitiveWide(size_t index) const;
- // Returns the object reference at the captured variable index.
- // The type at the index *must* be an object reference or a CHECK failure will occur.
- // Index must be less than the number of captured variables.
- mirror::Object* GetCapturedObject(size_t index) const SHARED_REQUIRES(Locks::mutator_lock_);
-
- // Gets the size of a nested capture closure in bytes, at the captured variable index.
- // The type at the index *must* be a lambda closure or a CHECK failure will occur.
- size_t GetCapturedClosureSize(size_t index) const;
-
- // Copies a nested lambda closure at the captured variable index.
- // The destination must have enough room for the closure (see GetCapturedClosureSize).
- void CopyCapturedClosure(size_t index, void* destination, size_t destination_room) const;
-
- private:
- // Read out any non-lambda value as a copy.
- template <typename T>
- T GetCapturedVariable(size_t index) const;
-
- // Reconstruct the closure's captured variable info at runtime.
- struct VariableInfo {
- size_t index_;
- ShortyFieldType variable_type_;
- size_t offset_;
- size_t count_;
-
- enum Flags {
- kIndex = 0x1,
- kVariableType = 0x2,
- kOffset = 0x4,
- kCount = 0x8,
- };
-
- // Traverse to the end of the type descriptor list instead of stopping at some particular index.
- static constexpr size_t kUpToIndexMax = static_cast<size_t>(-1);
- };
-
- // Parse a type descriptor, stopping at index "upto_index".
- // Returns only the information requested in flags. All other fields are indeterminate.
- template <VariableInfo::Flags flags>
- inline VariableInfo ALWAYS_INLINE ParseTypeDescriptor(const char* type_descriptor,
- size_t upto_index) const;
-
- // Convenience function to call ParseTypeDescriptor with just the type and offset.
- void GetCapturedVariableTypeAndOffset(size_t index,
- ShortyFieldType* out_type,
- size_t* out_offset) const;
-
- // How many bytes do the captured variables take up? Runtime sizeof(captured_variables).
- size_t GetCapturedVariablesSize() const;
- // Get the size in bytes of the variable_type which is potentially stored at offset.
- size_t GetCapturedVariableSize(ShortyFieldType variable_type, size_t offset) const;
- // Get the starting offset (in bytes) for the 0th captured variable.
- // All offsets are relative to 'captured_'.
- size_t GetStartingOffset() const;
- // Get the offset for this index.
- // All offsets are relative to 'captuerd_'.
- size_t GetCapturedVariableOffset(size_t index) const;
-
- // Cast the data at '(char*)captured_[offset]' into T, returning its address.
- // This value should not be de-referenced directly since its unaligned.
- template <typename T>
- inline const uint8_t* GetUnsafeAtOffset(size_t offset) const;
-
- // Copy the data at the offset into the destination. DCHECKs that
- // the destination_room is large enough (in bytes) to fit the data.
- template <typename T>
- inline void CopyUnsafeAtOffset(size_t offset,
- void* destination,
- size_t src_size = sizeof(T),
- size_t destination_room = sizeof(T)) const;
-
- // Get the closure size from an unaligned (i.e. interior) closure pointer.
- static size_t GetClosureSize(const uint8_t* closure);
-
- ///////////////////////////////////////////////////////////////////////////////////
-
- // Compile-time known lambda information such as the type descriptor and size.
- ArtLambdaMethod* lambda_info_;
-
- // A contiguous list of captured variables, and possibly the closure size.
- // The runtime size can always be determined through GetSize().
- union {
- // Read from here if the closure size is static (ArtLambdaMethod::IsStatic)
- uint8_t static_variables_[0];
- struct {
- // Read from here if the closure size is dynamic (ArtLambdaMethod::IsDynamic)
- size_t size_; // The lambda_info_ and the size_ itself is also included as part of the size.
- uint8_t variables_[0];
- } dynamic_;
- } captured_[0];
- // captured_ will always consist of one array element at runtime.
- // Set to [0] so that 'size_' is not counted in sizeof(Closure).
-
- friend class ClosureBuilder;
- friend class ClosureTest;
-};
-
-} // namespace lambda
-} // namespace art
-
-#endif // ART_RUNTIME_LAMBDA_CLOSURE_H_
diff --git a/runtime/lambda/closure_builder-inl.h b/runtime/lambda/closure_builder-inl.h
deleted file mode 100644
index 3cec21f3ba..0000000000
--- a/runtime/lambda/closure_builder-inl.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_LAMBDA_CLOSURE_BUILDER_INL_H_
-#define ART_RUNTIME_LAMBDA_CLOSURE_BUILDER_INL_H_
-
-#include "lambda/closure_builder.h"
-#include <string.h>
-
-namespace art {
-namespace lambda {
-
-template <typename T, ClosureBuilder::ShortyTypeEnum kShortyType>
-void ClosureBuilder::CaptureVariablePrimitive(T value) {
- static_assert(ShortyFieldTypeTraits::IsPrimitiveType<T>(), "T must be a primitive type");
- const size_t type_size = ShortyFieldType(kShortyType).GetStaticSize();
- DCHECK_EQ(type_size, sizeof(T));
-
- // Copy the data while retaining the bit pattern. Strict-aliasing safe.
- ShortyFieldTypeTraits::MaxType value_storage = 0;
- memcpy(&value_storage, &value, sizeof(T));
-
- values_.push_back(value_storage);
- size_ += sizeof(T);
-
- shorty_types_ += kShortyType;
-}
-
-} // namespace lambda
-} // namespace art
-
-#endif // ART_RUNTIME_LAMBDA_CLOSURE_BUILDER_INL_H_
diff --git a/runtime/lambda/closure_builder.cc b/runtime/lambda/closure_builder.cc
deleted file mode 100644
index 739e965238..0000000000
--- a/runtime/lambda/closure_builder.cc
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "lambda/closure_builder.h"
-
-#include "base/macros.h"
-#include "base/value_object.h"
-#include "lambda/art_lambda_method.h"
-#include "lambda/closure.h"
-#include "lambda/shorty_field_type.h"
-#include "runtime/mirror/object_reference.h"
-
-#include <stdint.h>
-#include <vector>
-
-namespace art {
-namespace lambda {
-
-/*
- * GC support TODOs:
- * (Although there's some code for storing objects, it is UNIMPLEMENTED(FATAL) because it is
- * incomplete).
- *
- * 1) GC needs to be able to traverse the Closure and visit any references.
- * It might be possible to get away with global roots in the short term.
- *
- * 2) Add brooks read barrier support. We can store the black/gray/white bits
- * in the lower 2 bits of the lambda art method pointer. Whenever a closure is copied
- * [to the stack] we'd need to add a cold path to turn it black.
- * (since there's only 3 colors, I can use the 4th value to indicate no-refs).
- * e.g. 0x0 = gray, 0x1 = white, 0x2 = black, 0x3 = no-nested-references
- * - Alternatively the GC can mark reference-less closures as always-black,
- * although it would need extra work to check for references.
- */
-
-void ClosureBuilder::CaptureVariableObject(mirror::Object* object) {
- auto compressed_reference = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(object);
- ShortyFieldTypeTraits::MaxType storage = 0;
-
- static_assert(sizeof(storage) >= sizeof(compressed_reference),
- "not enough room to store a compressed reference");
- memcpy(&storage, &compressed_reference, sizeof(compressed_reference));
-
- values_.push_back(storage);
- size_ += kObjectReferenceSize;
-
- static_assert(kObjectReferenceSize == sizeof(compressed_reference), "reference size mismatch");
-
- // TODO: needs more work to support concurrent GC
- if (kIsDebugBuild) {
- if (kUseReadBarrier) {
- UNIMPLEMENTED(FATAL) << "can't yet safely capture objects with read barrier";
- }
- }
-
- shorty_types_ += ShortyFieldType::kObject;
-}
-
-void ClosureBuilder::CaptureVariableLambda(Closure* closure) {
- DCHECK(closure != nullptr); // null closures not allowed, target method must be null instead.
- values_.push_back(reinterpret_cast<ShortyFieldTypeTraits::MaxType>(closure));
-
- if (LIKELY(is_dynamic_size_ == false)) {
- // Write in the extra bytes to store the dynamic size the first time.
- is_dynamic_size_ = true;
- size_ += sizeof(Closure::captured_[0].dynamic_.size_);
- }
-
- // A closure may be sized dynamically, so always query it for the true size.
- size_ += closure->GetSize();
-
- shorty_types_ += ShortyFieldType::kLambda;
-}
-
-size_t ClosureBuilder::GetSize() const {
- return size_;
-}
-
-size_t ClosureBuilder::GetCaptureCount() const {
- DCHECK_EQ(values_.size(), shorty_types_.size());
- return values_.size();
-}
-
-const std::string& ClosureBuilder::GetCapturedVariableShortyTypes() const {
- DCHECK_EQ(values_.size(), shorty_types_.size());
- return shorty_types_;
-}
-
-Closure* ClosureBuilder::CreateInPlace(void* memory, ArtLambdaMethod* target_method) const {
- DCHECK(memory != nullptr);
- DCHECK(target_method != nullptr);
- DCHECK_EQ(is_dynamic_size_, target_method->IsDynamicSize());
-
- CHECK_EQ(target_method->GetNumberOfCapturedVariables(), values_.size())
- << "number of variables captured at runtime does not match "
- << "number of variables captured at compile time";
-
- Closure* closure = new (memory) Closure;
- closure->lambda_info_ = target_method;
-
- static_assert(offsetof(Closure, captured_) == kInitialSize, "wrong initial size");
-
- size_t written_size;
- if (UNLIKELY(is_dynamic_size_)) {
- // The closure size must be set dynamically (i.e. nested lambdas).
- closure->captured_[0].dynamic_.size_ = GetSize();
- size_t header_size = offsetof(Closure, captured_[0].dynamic_.variables_);
- DCHECK_LE(header_size, GetSize());
- size_t variables_size = GetSize() - header_size;
- written_size =
- WriteValues(target_method,
- closure->captured_[0].dynamic_.variables_,
- header_size,
- variables_size);
- } else {
- // The closure size is known statically (i.e. no nested lambdas).
- DCHECK(GetSize() == target_method->GetStaticClosureSize());
- size_t header_size = offsetof(Closure, captured_[0].static_variables_);
- DCHECK_LE(header_size, GetSize());
- size_t variables_size = GetSize() - header_size;
- written_size =
- WriteValues(target_method,
- closure->captured_[0].static_variables_,
- header_size,
- variables_size);
- }
-
- DCHECK_EQ(written_size, closure->GetSize());
-
- return closure;
-}
-
-size_t ClosureBuilder::WriteValues(ArtLambdaMethod* target_method,
- uint8_t variables[],
- size_t header_size,
- size_t variables_size) const {
- size_t total_size = header_size;
- const char* shorty_types = target_method->GetCapturedVariablesShortyTypeDescriptor();
- DCHECK_STREQ(shorty_types, shorty_types_.c_str());
-
- size_t variables_offset = 0;
- size_t remaining_size = variables_size;
-
- const size_t shorty_count = target_method->GetNumberOfCapturedVariables();
- DCHECK_EQ(shorty_count, GetCaptureCount());
-
- for (size_t i = 0; i < shorty_count; ++i) {
- ShortyFieldType shorty{shorty_types[i]}; // NOLINT [readability/braces] [4]
-
- size_t var_size;
- if (LIKELY(shorty.IsStaticSize())) {
- // TODO: needs more work to support concurrent GC, e.g. read barriers
- if (kUseReadBarrier == false) {
- if (UNLIKELY(shorty.IsObject())) {
- UNIMPLEMENTED(FATAL) << "can't yet safely write objects with read barrier";
- }
- } else {
- if (UNLIKELY(shorty.IsObject())) {
- UNIMPLEMENTED(FATAL) << "writing objects not yet supported, no GC support";
- }
- }
-
- var_size = shorty.GetStaticSize();
- DCHECK_LE(var_size, sizeof(values_[i]));
-
- // Safe even for objects (non-read barrier case) if we never suspend
- // while the ClosureBuilder is live.
- // FIXME: Need to add GC support for references in a closure.
- memcpy(&variables[variables_offset], &values_[i], var_size);
- } else {
- DCHECK(shorty.IsLambda())
- << " don't support writing dynamically sized types other than lambda";
-
- ShortyFieldTypeTraits::MaxType closure_raw = values_[i];
- Closure* nested_closure = reinterpret_cast<Closure*>(closure_raw);
-
- DCHECK(nested_closure != nullptr);
- nested_closure->CopyTo(&variables[variables_offset], remaining_size);
-
- var_size = nested_closure->GetSize();
- }
-
- total_size += var_size;
- DCHECK_GE(remaining_size, var_size);
- remaining_size -= var_size;
-
- variables_offset += var_size;
- }
-
- DCHECK_EQ('\0', shorty_types[shorty_count]);
- DCHECK_EQ(variables_offset, variables_size);
-
- return total_size;
-}
-
-
-} // namespace lambda
-} // namespace art
diff --git a/runtime/lambda/closure_builder.h b/runtime/lambda/closure_builder.h
deleted file mode 100644
index 23eb484529..0000000000
--- a/runtime/lambda/closure_builder.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef ART_RUNTIME_LAMBDA_CLOSURE_BUILDER_H_
-#define ART_RUNTIME_LAMBDA_CLOSURE_BUILDER_H_
-
-#include "base/macros.h"
-#include "base/mutex.h" // For Locks::mutator_lock_.
-#include "base/value_object.h"
-#include "lambda/shorty_field_type.h"
-
-#include <stdint.h>
-#include <vector>
-
-namespace art {
-class ArtMethod; // forward declaration
-
-namespace mirror {
-class Object; // forward declaration
-} // namespace mirror
-
-namespace lambda {
-class ArtLambdaMethod; // forward declaration
-
-// Build a closure by capturing variables one at a time.
-// When all variables have been marked captured, the closure can be created in-place into
-// a target memory address.
-//
-// The mutator lock must be held for the duration of the lifetime of this object,
-// since it needs to temporarily store heap references into an internal list.
-class ClosureBuilder {
- public:
- using ShortyTypeEnum = decltype(ShortyFieldType::kByte);
-
- // Mark this primitive value to be captured as the specified type.
- template <typename T, ShortyTypeEnum kShortyType = ShortyFieldTypeSelectEnum<T>::value>
- void CaptureVariablePrimitive(T value);
-
- // Mark this object reference to be captured.
- void CaptureVariableObject(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
-
- // Mark this lambda closure to be captured.
- void CaptureVariableLambda(Closure* closure);
-
- // Get the size (in bytes) of the closure.
- // This size is used to be able to allocate memory large enough to write the closure into.
- // Call 'CreateInPlace' to actually write the closure out.
- size_t GetSize() const;
-
- // Returns how many variables have been captured so far.
- size_t GetCaptureCount() const;
-
- // Get the list of captured variables' shorty field types.
- const std::string& GetCapturedVariableShortyTypes() const;
-
- // Creates a closure in-place and writes out the data into 'memory'.
- // Memory must be at least 'GetSize' bytes large.
- // All previously marked data to be captured is now written out.
- Closure* CreateInPlace(void* memory, ArtLambdaMethod* target_method) const
- SHARED_REQUIRES(Locks::mutator_lock_);
-
- // Locks need to be held for entire lifetime of ClosureBuilder.
- ClosureBuilder() SHARED_REQUIRES(Locks::mutator_lock_)
- {}
-
- // Locks need to be held for entire lifetime of ClosureBuilder.
- ~ClosureBuilder() SHARED_REQUIRES(Locks::mutator_lock_)
- {}
-
- private:
- // Initial size a closure starts out before any variables are written.
- // Header size only.
- static constexpr size_t kInitialSize = sizeof(ArtLambdaMethod*);
-
- // Write a Closure's variables field from the captured variables.
- // variables_size specified in bytes, and only includes enough room to write variables into.
- // Returns the calculated actual size of the closure.
- size_t WriteValues(ArtLambdaMethod* target_method,
- uint8_t variables[],
- size_t header_size,
- size_t variables_size) const SHARED_REQUIRES(Locks::mutator_lock_);
-
- size_t size_ = kInitialSize;
- bool is_dynamic_size_ = false;
- std::vector<ShortyFieldTypeTraits::MaxType> values_;
- std::string shorty_types_;
-};
-
-} // namespace lambda
-} // namespace art
-
-#endif // ART_RUNTIME_LAMBDA_CLOSURE_BUILDER_H_
diff --git a/runtime/lambda/closure_test.cc b/runtime/lambda/closure_test.cc
deleted file mode 100644
index 7c1bd0d591..0000000000
--- a/runtime/lambda/closure_test.cc
+++ /dev/null
@@ -1,356 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "art_method.h"
-#include "lambda/art_lambda_method.h"
-#include "lambda/closure.h"
-#include "lambda/closure_builder.h"
-#include "lambda/closure_builder-inl.h"
-#include "utils.h"
-
-#include <numeric>
-#include <stdint.h>
-#include <type_traits>
-#include "gtest/gtest.h"
-
-// Turn this on for some extra printfs to help with debugging, since some code is optimized out.
-static constexpr const bool kDebuggingClosureTest = true;
-
-namespace std {
- using Closure = art::lambda::Closure;
-
- // Specialize std::default_delete so it knows how to properly delete closures
- // through the way we allocate them in this test.
- //
- // This is test-only because we don't want the rest of Art to do this.
- template <>
- struct default_delete<Closure> {
- void operator()(Closure* closure) const {
- delete[] reinterpret_cast<char*>(closure);
- }
- };
-} // namespace std
-
-namespace art {
-
-// Fake lock acquisition to please clang lock checker.
-// This doesn't actually acquire any locks because we don't need multiple threads in this gtest.
-struct SCOPED_CAPABILITY ScopedFakeLock {
- explicit ScopedFakeLock(MutatorMutex& mu) ACQUIRE(mu)
- : mu_(mu) {
- }
-
- ~ScopedFakeLock() RELEASE()
- {}
-
- MutatorMutex& mu_;
-};
-
-namespace lambda {
-
-class ClosureTest : public ::testing::Test {
- public:
- ClosureTest() = default;
- ~ClosureTest() = default;
-
- protected:
- static void SetUpTestCase() {
- }
-
- virtual void SetUp() {
- // Create a completely dummy method here.
- // It's "OK" because the Closure never needs to look inside of the ArtMethod
- // (it just needs to be non-null).
- uintptr_t ignore = 0xbadbad;
- fake_method_ = reinterpret_cast<ArtMethod*>(ignore);
- }
-
- static ::testing::AssertionResult IsResultSuccessful(bool result) {
- if (result) {
- return ::testing::AssertionSuccess();
- } else {
- return ::testing::AssertionFailure();
- }
- }
-
- // Create a closure that captures the static variables from 'args' by-value.
- // The lambda method's captured variables types must match the ones in 'args'.
- // -- This creates the closure directly in-memory by using memcpy.
- template <typename ... Args>
- static std::unique_ptr<Closure> CreateClosureStaticVariables(ArtLambdaMethod* lambda_method,
- Args&& ... args) {
- constexpr size_t header_size = sizeof(ArtLambdaMethod*);
- const size_t static_size = GetArgsSize(args ...) + header_size;
- EXPECT_GE(static_size, sizeof(Closure));
-
- // Can't just 'new' the Closure since we don't know the size up front.
- char* closure_as_char_array = new char[static_size];
- Closure* closure_ptr = new (closure_as_char_array) Closure;
-
- // Set up the data
- closure_ptr->lambda_info_ = lambda_method;
- CopyArgs(closure_ptr->captured_[0].static_variables_, args ...);
-
- // Make sure the entire thing is deleted once the unique_ptr goes out of scope.
- return std::unique_ptr<Closure>(closure_ptr); // NOLINT [whitespace/braces] [5]
- }
-
- // Copy variadic arguments into the destination array with memcpy.
- template <typename T, typename ... Args>
- static void CopyArgs(uint8_t destination[], T&& arg, Args&& ... args) {
- memcpy(destination, &arg, sizeof(arg));
- CopyArgs(destination + sizeof(arg), args ...);
- }
-
- // Base case: Done.
- static void CopyArgs(uint8_t destination[]) {
- UNUSED(destination);
- }
-
- // Create a closure that captures the static variables from 'args' by-value.
- // The lambda method's captured variables types must match the ones in 'args'.
- // -- This uses ClosureBuilder interface to set up the closure indirectly.
- template <typename ... Args>
- static std::unique_ptr<Closure> CreateClosureStaticVariablesFromBuilder(
- ArtLambdaMethod* lambda_method,
- Args&& ... args) {
- // Acquire a fake lock since closure_builder needs it.
- ScopedFakeLock fake_lock(*Locks::mutator_lock_);
-
- ClosureBuilder closure_builder;
- CaptureVariableFromArgsList(/*out*/closure_builder, args ...);
-
- EXPECT_EQ(sizeof...(args), closure_builder.GetCaptureCount());
-
- constexpr size_t header_size = sizeof(ArtLambdaMethod*);
- const size_t static_size = GetArgsSize(args ...) + header_size;
- EXPECT_GE(static_size, sizeof(Closure));
-
- // For static variables, no nested closure, so size must match exactly.
- EXPECT_EQ(static_size, closure_builder.GetSize());
-
- // Can't just 'new' the Closure since we don't know the size up front.
- char* closure_as_char_array = new char[static_size];
- Closure* closure_ptr = new (closure_as_char_array) Closure;
-
- // The closure builder packs the captured variables into a Closure.
- closure_builder.CreateInPlace(closure_ptr, lambda_method);
-
- // Make sure the entire thing is deleted once the unique_ptr goes out of scope.
- return std::unique_ptr<Closure>(closure_ptr); // NOLINT [whitespace/braces] [5]
- }
-
- // Call the correct ClosureBuilder::CaptureVariableXYZ function based on the type of args.
- // Invokes for each arg in args.
- template <typename ... Args>
- static void CaptureVariableFromArgsList(/*out*/ClosureBuilder& closure_builder, Args ... args) {
- int ignore[] = {
- (CaptureVariableFromArgs(/*out*/closure_builder, args),0)... // NOLINT [whitespace/comma] [3]
- };
- UNUSED(ignore);
- }
-
- // ClosureBuilder::CaptureVariablePrimitive for types that are primitive only.
- template <typename T>
- typename std::enable_if<ShortyFieldTypeTraits::IsPrimitiveType<T>()>::type
- static CaptureVariableFromArgs(/*out*/ClosureBuilder& closure_builder, T value) {
- static_assert(ShortyFieldTypeTraits::IsPrimitiveType<T>(), "T must be a shorty primitive");
- closure_builder.CaptureVariablePrimitive<T, ShortyFieldTypeSelectEnum<T>::value>(value);
- }
-
- // ClosureBuilder::CaptureVariableObject for types that are objects only.
- template <typename T>
- typename std::enable_if<ShortyFieldTypeTraits::IsObjectType<T>()>::type
- static CaptureVariableFromArgs(/*out*/ClosureBuilder& closure_builder, const T* object) {
- ScopedFakeLock fake_lock(*Locks::mutator_lock_);
- closure_builder.CaptureVariableObject(object);
- }
-
- // Sum of sizeof(Args...).
- template <typename T, typename ... Args>
- static constexpr size_t GetArgsSize(T&& arg, Args&& ... args) {
- return sizeof(arg) + GetArgsSize(args ...);
- }
-
- // Base case: Done.
- static constexpr size_t GetArgsSize() {
- return 0;
- }
-
- // Take "U" and memcpy it into a "T". T starts out as (T)0.
- template <typename T, typename U>
- static T ExpandingBitCast(const U& val) {
- static_assert(sizeof(T) >= sizeof(U), "U too large");
- T new_val = static_cast<T>(0);
- memcpy(&new_val, &val, sizeof(U));
- return new_val;
- }
-
- // Templatized extraction from closures by checking their type with enable_if.
- template <typename T>
- static typename std::enable_if<ShortyFieldTypeTraits::IsPrimitiveNarrowType<T>()>::type
- ExpectCapturedVariable(const Closure* closure, size_t index, T value) {
- EXPECT_EQ(ExpandingBitCast<uint32_t>(value), closure->GetCapturedPrimitiveNarrow(index))
- << " with index " << index;
- }
-
- template <typename T>
- static typename std::enable_if<ShortyFieldTypeTraits::IsPrimitiveWideType<T>()>::type
- ExpectCapturedVariable(const Closure* closure, size_t index, T value) {
- EXPECT_EQ(ExpandingBitCast<uint64_t>(value), closure->GetCapturedPrimitiveWide(index))
- << " with index " << index;
- }
-
- // Templatized SFINAE for Objects so we can get better error messages.
- template <typename T>
- static typename std::enable_if<ShortyFieldTypeTraits::IsObjectType<T>()>::type
- ExpectCapturedVariable(const Closure* closure, size_t index, const T* object) {
- EXPECT_EQ(object, closure->GetCapturedObject(index))
- << " with index " << index;
- }
-
- template <typename ... Args>
- void TestPrimitive(const char *descriptor, Args ... args) {
- const char* shorty = descriptor;
-
- SCOPED_TRACE(descriptor);
-
- ASSERT_EQ(strlen(shorty), sizeof...(args))
- << "test error: descriptor must have same # of types as the # of captured variables";
-
- // Important: This fake lambda method needs to out-live any Closures we create with it.
- ArtLambdaMethod lambda_method{fake_method_, // NOLINT [whitespace/braces] [5]
- descriptor, // NOLINT [whitespace/blank_line] [2]
- shorty,
- };
-
- std::unique_ptr<Closure> closure_a;
- std::unique_ptr<Closure> closure_b;
-
- // Test the closure twice when it's constructed in different ways.
- {
- // Create the closure in a "raw" manner, that is directly with memcpy
- // since we know the underlying data format.
- // This simulates how the compiler would lay out the data directly.
- SCOPED_TRACE("raw closure");
- std::unique_ptr<Closure> closure_raw = CreateClosureStaticVariables(&lambda_method, args ...);
-
- if (kDebuggingClosureTest) {
- std::cerr << "closure raw address: " << closure_raw.get() << std::endl;
- }
- TestPrimitiveWithClosure(closure_raw.get(), descriptor, shorty, args ...);
- closure_a = std::move(closure_raw);
- }
-
- {
- // Create the closure with the ClosureBuilder, which is done indirectly.
- // This simulates how the interpreter would create the closure dynamically at runtime.
- SCOPED_TRACE("closure from builder");
- std::unique_ptr<Closure> closure_built =
- CreateClosureStaticVariablesFromBuilder(&lambda_method, args ...);
- if (kDebuggingClosureTest) {
- std::cerr << "closure built address: " << closure_built.get() << std::endl;
- }
- TestPrimitiveWithClosure(closure_built.get(), descriptor, shorty, args ...);
- closure_b = std::move(closure_built);
- }
-
- // The closures should be identical memory-wise as well.
- EXPECT_EQ(closure_a->GetSize(), closure_b->GetSize());
- EXPECT_TRUE(memcmp(closure_a.get(),
- closure_b.get(),
- std::min(closure_a->GetSize(), closure_b->GetSize())) == 0);
- }
-
- template <typename ... Args>
- static void TestPrimitiveWithClosure(Closure* closure,
- const char* descriptor,
- const char* shorty,
- Args ... args) {
- EXPECT_EQ(sizeof(ArtLambdaMethod*) + GetArgsSize(args...), closure->GetSize());
- EXPECT_EQ(sizeof...(args), closure->GetNumberOfCapturedVariables());
- EXPECT_STREQ(descriptor, closure->GetCapturedVariablesTypeDescriptor());
- TestPrimitiveExpects(closure, shorty, /*index*/0, args ...);
- }
-
- // Call EXPECT_EQ for each argument in the closure's #GetCapturedX.
- template <typename T, typename ... Args>
- static void TestPrimitiveExpects(
- const Closure* closure, const char* shorty, size_t index, T arg, Args ... args) {
- ASSERT_EQ(ShortyFieldType(shorty[index]).GetStaticSize(), sizeof(T))
- << "Test error: Type mismatch at index " << index;
- ExpectCapturedVariable(closure, index, arg);
- EXPECT_EQ(ShortyFieldType(shorty[index]), closure->GetCapturedShortyType(index));
- TestPrimitiveExpects(closure, shorty, index + 1, args ...);
- }
-
- // Base case for EXPECT_EQ.
- static void TestPrimitiveExpects(const Closure* closure, const char* shorty, size_t index) {
- UNUSED(closure, shorty, index);
- }
-
- ArtMethod* fake_method_;
-};
-
-TEST_F(ClosureTest, TestTrivial) {
- ArtLambdaMethod lambda_method{fake_method_, // NOLINT [whitespace/braces] [5]
- "", // No captured variables // NOLINT [whitespace/blank_line] [2]
- "", // No captured variables
- };
-
- std::unique_ptr<Closure> closure = CreateClosureStaticVariables(&lambda_method);
-
- EXPECT_EQ(sizeof(ArtLambdaMethod*), closure->GetSize());
- EXPECT_EQ(0u, closure->GetNumberOfCapturedVariables());
-} // TEST_F
-
-TEST_F(ClosureTest, TestPrimitiveSingle) {
- TestPrimitive("Z", true);
- TestPrimitive("B", int8_t(0xde));
- TestPrimitive("C", uint16_t(0xbeef));
- TestPrimitive("S", int16_t(0xdead));
- TestPrimitive("I", int32_t(0xdeadbeef));
- TestPrimitive("F", 0.123f);
- TestPrimitive("J", int64_t(0xdeadbeef00c0ffee));
- TestPrimitive("D", 123.456);
-} // TEST_F
-
-TEST_F(ClosureTest, TestPrimitiveMany) {
- TestPrimitive("ZZ", true, false);
- TestPrimitive("ZZZ", true, false, true);
- TestPrimitive("BBBB", int8_t(0xde), int8_t(0xa0), int8_t(0xff), int8_t(0xcc));
- TestPrimitive("CC", uint16_t(0xbeef), uint16_t(0xdead));
- TestPrimitive("SSSS", int16_t(0xdead), int16_t(0xc0ff), int16_t(0xf000), int16_t(0xbaba));
- TestPrimitive("III", int32_t(0xdeadbeef), int32_t(0xc0ffee), int32_t(0xbeefdead));
- TestPrimitive("FF", 0.123f, 555.666f);
- TestPrimitive("JJJ", int64_t(0xdeadbeef00c0ffee), int64_t(0x123), int64_t(0xc0ffee));
- TestPrimitive("DD", 123.456, 777.888);
-} // TEST_F
-
-TEST_F(ClosureTest, TestPrimitiveMixed) {
- TestPrimitive("ZZBBCCSSIIFFJJDD",
- true, false,
- int8_t(0xde), int8_t(0xa0),
- uint16_t(0xbeef), uint16_t(0xdead),
- int16_t(0xdead), int16_t(0xc0ff),
- int32_t(0xdeadbeef), int32_t(0xc0ffee),
- 0.123f, 555.666f,
- int64_t(0xdeadbeef00c0ffee), int64_t(0x123),
- 123.456, 777.888);
-} // TEST_F
-
-} // namespace lambda
-} // namespace art
diff --git a/runtime/lambda/leaking_allocator.cc b/runtime/lambda/leaking_allocator.cc
deleted file mode 100644
index 22bb294d03..0000000000
--- a/runtime/lambda/leaking_allocator.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "base/bit_utils.h"
-#include "lambda/leaking_allocator.h"
-#include "linear_alloc.h"
-#include "runtime.h"
-
-namespace art {
-namespace lambda {
-
-void* LeakingAllocator::AllocateMemoryImpl(Thread* self, size_t byte_size, size_t align_size) {
- // TODO: use GetAllocatorForClassLoader to allocate lambda ArtMethod data.
- void* mem = Runtime::Current()->GetLinearAlloc()->Alloc(self, byte_size);
- DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(mem), align_size);
- return mem;
-}
-
-} // namespace lambda
-} // namespace art
diff --git a/runtime/lambda/leaking_allocator.h b/runtime/lambda/leaking_allocator.h
deleted file mode 100644
index cb5a1bf4c3..0000000000
--- a/runtime/lambda/leaking_allocator.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef ART_RUNTIME_LAMBDA_LEAKING_ALLOCATOR_H_
-#define ART_RUNTIME_LAMBDA_LEAKING_ALLOCATOR_H_
-
-#include <utility> // std::forward
-#include <type_traits> // std::aligned_storage
-
-namespace art {
-class Thread; // forward declaration
-
-namespace lambda {
-
-// Temporary class to centralize all the leaking allocations.
-// Allocations made through this class are never freed, but it is a placeholder
-// that means that the calling code needs to be rewritten to properly:
-//
-// (a) Have a lifetime scoped to some other entity.
-// (b) Not be allocated over and over again if it was already allocated once (immutable data).
-//
-// TODO: do all of the above a/b for each callsite, and delete this class.
-class LeakingAllocator {
- public:
- // An opaque type which is guaranteed for:
- // * a) be large enough to hold T (e.g. for in-place new)
- // * b) be well-aligned (so that reads/writes are well-defined) to T
- // * c) strict-aliasing compatible with T*
- //
- // Nominally used to allocate memory for yet unconstructed instances of T.
- template <typename T>
- using AlignedMemoryStorage = typename std::aligned_storage<sizeof(T), alignof(T)>::type;
-
- // Allocate byte_size bytes worth of memory. Never freed.
- template <typename T>
- static AlignedMemoryStorage<T>* AllocateMemory(Thread* self, size_t byte_size = sizeof(T)) {
- return reinterpret_cast<AlignedMemoryStorage<T>*>(
- AllocateMemoryImpl(self, byte_size, alignof(T)));
- }
-
- // Make a new instance of T, flexibly sized, in-place at newly allocated memory. Never freed.
- template <typename T, typename... Args>
- static T* MakeFlexibleInstance(Thread* self, size_t byte_size, Args&&... args) {
- return new (AllocateMemory<T>(self, byte_size)) T(std::forward<Args>(args)...);
- }
-
- // Make a new instance of T in-place at newly allocated memory. Never freed.
- template <typename T, typename... Args>
- static T* MakeInstance(Thread* self, Args&&... args) {
- return new (AllocateMemory<T>(self, sizeof(T))) T(std::forward<Args>(args)...);
- }
-
- private:
- static void* AllocateMemoryImpl(Thread* self, size_t byte_size, size_t align_size);
-};
-
-} // namespace lambda
-} // namespace art
-
-#endif // ART_RUNTIME_LAMBDA_LEAKING_ALLOCATOR_H_
diff --git a/runtime/lambda/shorty_field_type.h b/runtime/lambda/shorty_field_type.h
deleted file mode 100644
index c314fd2ac3..0000000000
--- a/runtime/lambda/shorty_field_type.h
+++ /dev/null
@@ -1,475 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef ART_RUNTIME_LAMBDA_SHORTY_FIELD_TYPE_H_
-#define ART_RUNTIME_LAMBDA_SHORTY_FIELD_TYPE_H_
-
-#include "base/logging.h"
-#include "base/macros.h"
-#include "base/value_object.h"
-#include "globals.h"
-#include "runtime/primitive.h"
-
-#include <ostream>
-
-namespace art {
-
-namespace mirror {
-class Object; // forward declaration
-} // namespace mirror
-
-namespace lambda {
-
-struct Closure; // forward declaration
-
-// TODO: Refactor together with primitive.h
-
-// The short form of a field type descriptor. Corresponds to ShortyFieldType in dex specification.
-// Only types usable by a field (and locals) are allowed (i.e. no void type).
-// Note that arrays and objects are treated both as 'L'.
-//
-// This is effectively a 'char' enum-like zero-cost type-safe wrapper with extra helper functions.
-struct ShortyFieldType : ValueObject {
- // Use as if this was an enum class, e.g. 'ShortyFieldType::kBoolean'.
- enum : char {
- // Primitives (Narrow):
- kBoolean = 'Z',
- kByte = 'B',
- kChar = 'C',
- kShort = 'S',
- kInt = 'I',
- kFloat = 'F',
- // Primitives (Wide):
- kLong = 'J',
- kDouble = 'D',
- // Managed types:
- kObject = 'L', // This can also be an array (which is otherwise '[' in a non-shorty).
- kLambda = '\\',
- }; // NOTE: This is an anonymous enum so we can get exhaustive switch checking from the compiler.
-
- // Implicitly construct from the enum above. Value must be one of the enum list members above.
- // Always safe to use, does not do any DCHECKs.
- inline constexpr ShortyFieldType(decltype(kByte) c) : value_(c) {
- }
-
- // Default constructor. The initial value is undefined. Initialize before calling methods.
- // This is very unsafe but exists as a convenience to having undefined values.
- explicit ShortyFieldType() : value_(StaticCastValue(0)) {
- }
-
- // Explicitly construct from a char. Value must be one of the enum list members above.
- // Conversion is potentially unsafe, so DCHECKing is performed.
- explicit inline ShortyFieldType(char c) : value_(StaticCastValue(c)) {
- if (kIsDebugBuild) {
- // Verify at debug-time that our conversion is safe.
- ShortyFieldType ignored;
- DCHECK(MaybeCreate(c, &ignored)) << "unknown shorty field type '" << c << "'";
- }
- }
-
- // Attempts to parse the character in 'shorty_field_type' into its strongly typed version.
- // Returns false if the character was out of range of the grammar.
- static bool MaybeCreate(char shorty_field_type, ShortyFieldType* out) {
- DCHECK(out != nullptr);
- switch (shorty_field_type) {
- case kBoolean:
- case kByte:
- case kChar:
- case kShort:
- case kInt:
- case kFloat:
- case kLong:
- case kDouble:
- case kObject:
- case kLambda:
- *out = ShortyFieldType(static_cast<decltype(kByte)>(shorty_field_type));
- return true;
- default:
- break;
- }
-
- return false;
- }
-
- // Convert the first type in a field type descriptor string into a shorty.
- // Arrays are converted into objects.
- // Does not work for 'void' types (as they are illegal in a field type descriptor).
- static ShortyFieldType CreateFromFieldTypeDescriptor(const char* field_type_descriptor) {
- DCHECK(field_type_descriptor != nullptr);
- char c = *field_type_descriptor;
- if (UNLIKELY(c == kArray)) { // Arrays are treated as object references.
- c = kObject;
- }
- return ShortyFieldType{c}; // NOLINT [readability/braces] [4]
- }
-
- // Parse the first type in the field type descriptor string into a shorty.
- // See CreateFromFieldTypeDescriptor for more details.
- //
- // Returns the pointer offset into the middle of the field_type_descriptor
- // that would either point to the next shorty type, or to null if there are
- // no more types.
- //
- // DCHECKs that each of the nested types is a valid shorty field type. This
- // means the type descriptor must be already valid.
- static const char* ParseFromFieldTypeDescriptor(const char* field_type_descriptor,
- ShortyFieldType* out_type) {
- DCHECK(field_type_descriptor != nullptr);
-
- if (UNLIKELY(field_type_descriptor[0] == '\0')) {
- // Handle empty strings by immediately returning null.
- return nullptr;
- }
-
- // All non-empty strings must be a valid list of field type descriptors, otherwise
- // the DCHECKs will kick in and the program will crash.
- const char shorter_type = *field_type_descriptor;
-
- ShortyFieldType safe_type;
- bool type_set = MaybeCreate(shorter_type, &safe_type);
-
- // Lambda that keeps skipping characters until it sees ';'.
- // Stops one character -after- the ';'.
- auto skip_until_semicolon = [&field_type_descriptor]() {
- while (*field_type_descriptor != ';' && *field_type_descriptor != '\0') {
- ++field_type_descriptor;
- }
- DCHECK_NE(*field_type_descriptor, '\0')
- << " type descriptor terminated too early: " << field_type_descriptor;
- ++field_type_descriptor; // Skip the ';'
- };
-
- ++field_type_descriptor;
- switch (shorter_type) {
- case kObject:
- skip_until_semicolon();
-
- DCHECK(type_set);
- DCHECK(safe_type == kObject);
- break;
- case kArray:
- // Strip out all of the leading [[[[[s, we don't care if it's a multi-dimensional array.
- while (*field_type_descriptor == '[' && *field_type_descriptor != '\0') {
- ++field_type_descriptor;
- }
- DCHECK_NE(*field_type_descriptor, '\0')
- << " type descriptor terminated too early: " << field_type_descriptor;
- // Either a primitive, object, or closure left. No more arrays.
- {
- // Now skip all the characters that form the array's interior-most element type
- // (which itself is guaranteed not to be an array).
- ShortyFieldType array_interior_type;
- type_set = MaybeCreate(*field_type_descriptor, &array_interior_type);
- DCHECK(type_set) << " invalid remaining type descriptor " << field_type_descriptor;
-
- // Handle array-of-objects case like [[[[[LObject; and array-of-closures like [[[[[\Foo;
- if (*field_type_descriptor == kObject || *field_type_descriptor == kLambda) {
- skip_until_semicolon();
- } else {
- // Handle primitives which are exactly one character we can skip.
- DCHECK(array_interior_type.IsPrimitive());
- ++field_type_descriptor;
- }
- }
-
- safe_type = kObject;
- type_set = true;
- break;
- case kLambda:
- skip_until_semicolon();
-
- DCHECK(safe_type == kLambda);
- DCHECK(type_set);
- break;
- default:
- DCHECK_NE(kVoid, shorter_type) << "cannot make a ShortyFieldType from a void type";
- break;
- }
-
- DCHECK(type_set) << "invalid shorty type descriptor " << shorter_type;
-
- *out_type = safe_type;
- return type_set ? field_type_descriptor : nullptr;
- }
-
- // Explicitly convert to a char.
- inline explicit operator char() const {
- return value_;
- }
-
- // Is this a primitive?
- inline bool IsPrimitive() const {
- return IsPrimitiveNarrow() || IsPrimitiveWide();
- }
-
- // Is this a narrow primitive (i.e. can fit into 1 virtual register)?
- inline bool IsPrimitiveNarrow() const {
- switch (value_) {
- case kBoolean:
- case kByte:
- case kChar:
- case kShort:
- case kInt:
- case kFloat:
- return true;
- default:
- return false;
- }
- }
-
- // Is this a wide primitive (i.e. needs exactly 2 virtual registers)?
- inline bool IsPrimitiveWide() const {
- switch (value_) {
- case kLong:
- case kDouble:
- return true;
- default:
- return false;
- }
- }
-
- // Is this an object reference (which can also be an array)?
- inline bool IsObject() const {
- return value_ == kObject;
- }
-
- // Is this a lambda?
- inline bool IsLambda() const {
- return value_ == kLambda;
- }
-
- // Is the size of this (to store inline as a field) always known at compile-time?
- inline bool IsStaticSize() const {
- return !IsLambda();
- }
-
- // Get the compile-time size (to be able to store it inline as a field or on stack).
- // Dynamically-sized values such as lambdas return the guaranteed lower bound.
- inline size_t GetStaticSize() const {
- switch (value_) {
- case kBoolean:
- return sizeof(bool);
- case kByte:
- return sizeof(uint8_t);
- case kChar:
- return sizeof(int16_t);
- case kShort:
- return sizeof(uint16_t);
- case kInt:
- return sizeof(int32_t);
- case kLong:
- return sizeof(int64_t);
- case kFloat:
- return sizeof(float);
- case kDouble:
- return sizeof(double);
- case kObject:
- return kObjectReferenceSize;
- case kLambda:
- return sizeof(void*); // Large enough to store the ArtLambdaMethod
- default:
- DCHECK(false) << "unknown shorty field type '" << static_cast<char>(value_) << "'";
- UNREACHABLE();
- }
- }
-
- // Implicitly convert to the anonymous nested inner type. Used for exhaustive switch detection.
- inline operator decltype(kByte)() const {
- return value_;
- }
-
- // Returns a read-only static string representing the enum name, useful for printing/debug only.
- inline const char* ToString() const {
- switch (value_) {
- case kBoolean:
- return "kBoolean";
- case kByte:
- return "kByte";
- case kChar:
- return "kChar";
- case kShort:
- return "kShort";
- case kInt:
- return "kInt";
- case kLong:
- return "kLong";
- case kFloat:
- return "kFloat";
- case kDouble:
- return "kDouble";
- case kObject:
- return "kObject";
- case kLambda:
- return "kLambda";
- default:
- // Undefined behavior if we get this far. Pray the compiler gods are merciful.
- return "<undefined>";
- }
- }
-
- private:
- static constexpr const char kArray = '[';
- static constexpr const char kVoid = 'V';
-
- // Helper to statically cast anything into our nested anonymous enum type.
- template <typename T>
- inline static decltype(kByte) StaticCastValue(const T& anything) {
- return static_cast<decltype(value_)>(anything);
- }
-
- // The only field in this struct.
- decltype(kByte) value_;
-};
-
-
- // Print to an output stream.
-inline std::ostream& operator<<(std::ostream& ostream, ShortyFieldType shorty) {
- return ostream << shorty.ToString();
-}
-
-static_assert(sizeof(ShortyFieldType) == sizeof(char),
- "ShortyFieldType must be lightweight just like a char");
-
-// Compile-time trait information regarding the ShortyFieldType.
-// Used by static_asserts to verify that the templates are correctly used at compile-time.
-//
-// For example,
-// ShortyFieldTypeTraits::IsPrimitiveNarrowType<int64_t>() == true
-// ShortyFieldTypeTraits::IsObjectType<mirror::Object*>() == true
-struct ShortyFieldTypeTraits {
- // A type guaranteed to be large enough to holds any of the shorty field types.
- using MaxType = uint64_t;
-
- // Type traits: Returns true if 'T' is a valid type that can be represented by a shorty field type.
- template <typename T>
- static inline constexpr bool IsType() {
- return IsPrimitiveType<T>() || IsObjectType<T>() || IsLambdaType<T>();
- }
-
- // Returns true if 'T' is a primitive type (i.e. a built-in without nested references).
- template <typename T>
- static inline constexpr bool IsPrimitiveType() {
- return IsPrimitiveNarrowType<T>() || IsPrimitiveWideType<T>();
- }
-
- // Returns true if 'T' is a primitive type that is narrow (i.e. can be stored into 1 vreg).
- template <typename T>
- static inline constexpr bool IsPrimitiveNarrowType() {
- return IsPrimitiveNarrowTypeImpl(static_cast<T* const>(nullptr));
- }
-
- // Returns true if 'T' is a primitive type that is wide (i.e. needs 2 vregs for storage).
- template <typename T>
- static inline constexpr bool IsPrimitiveWideType() {
- return IsPrimitiveWideTypeImpl(static_cast<T* const>(nullptr));
- }
-
- // Returns true if 'T' is an object (i.e. it is a managed GC reference).
- // Note: This is equivalent to std::base_of<mirror::Object*, T>::value
- template <typename T>
- static inline constexpr bool IsObjectType() {
- return IsObjectTypeImpl(static_cast<T* const>(nullptr));
- }
-
- // Returns true if 'T' is a lambda (i.e. it is a closure with unknown static data);
- template <typename T>
- static inline constexpr bool IsLambdaType() {
- return IsLambdaTypeImpl(static_cast<T* const>(nullptr));
- }
-
- private:
-#define IS_VALID_TYPE_SPECIALIZATION(type, name) \
- static inline constexpr bool Is ## name ## TypeImpl(type* const = 0) { /*NOLINT*/ \
- return true; \
- } \
- \
- static_assert(sizeof(MaxType) >= sizeof(type), "MaxType too small")
-
- IS_VALID_TYPE_SPECIALIZATION(bool, PrimitiveNarrow);
- IS_VALID_TYPE_SPECIALIZATION(int8_t, PrimitiveNarrow);
- IS_VALID_TYPE_SPECIALIZATION(uint8_t, PrimitiveNarrow); // Not strictly true, but close enough.
- IS_VALID_TYPE_SPECIALIZATION(int16_t, PrimitiveNarrow);
- IS_VALID_TYPE_SPECIALIZATION(uint16_t, PrimitiveNarrow); // Chars are unsigned.
- IS_VALID_TYPE_SPECIALIZATION(int32_t, PrimitiveNarrow);
- IS_VALID_TYPE_SPECIALIZATION(uint32_t, PrimitiveNarrow); // Not strictly true, but close enough.
- IS_VALID_TYPE_SPECIALIZATION(float, PrimitiveNarrow);
- IS_VALID_TYPE_SPECIALIZATION(int64_t, PrimitiveWide);
- IS_VALID_TYPE_SPECIALIZATION(uint64_t, PrimitiveWide); // Not strictly true, but close enough.
- IS_VALID_TYPE_SPECIALIZATION(double, PrimitiveWide);
- IS_VALID_TYPE_SPECIALIZATION(mirror::Object*, Object);
- IS_VALID_TYPE_SPECIALIZATION(Closure*, Lambda);
-#undef IS_VALID_TYPE_SPECIALIZATION
-
-#define IS_VALID_TYPE_SPECIALIZATION_IMPL(name) \
- template <typename T> \
- static inline constexpr bool Is ## name ## TypeImpl(T* const = 0) { \
- return false; \
- }
-
- IS_VALID_TYPE_SPECIALIZATION_IMPL(PrimitiveNarrow);
- IS_VALID_TYPE_SPECIALIZATION_IMPL(PrimitiveWide);
- IS_VALID_TYPE_SPECIALIZATION_IMPL(Object);
- IS_VALID_TYPE_SPECIALIZATION_IMPL(Lambda);
-
-#undef IS_VALID_TYPE_SPECIALIZATION_IMPL
-};
-
-// Maps the ShortyFieldType enum into it's C++ type equivalent, into the "type" typedef.
-// For example:
-// ShortyFieldTypeSelectType<ShortyFieldType::kBoolean>::type => bool
-// ShortyFieldTypeSelectType<ShortyFieldType::kLong>::type => int64_t
-//
-// Invalid enums will not have the type defined.
-template <decltype(ShortyFieldType::kByte) Shorty>
-struct ShortyFieldTypeSelectType {
-};
-
-// Maps the C++ type into it's ShortyFieldType enum equivalent, into the "value" constexpr.
-// For example:
-// ShortyFieldTypeSelectEnum<bool>::value => ShortyFieldType::kBoolean
-// ShortyFieldTypeSelectEnum<int64_t>::value => ShortyFieldType::kLong
-//
-// Signed-ness must match for a valid select, e.g. uint64_t will not map to kLong, but int64_t will.
-// Invalid types will not have the value defined (see e.g. ShortyFieldTypeTraits::IsType<T>())
-template <typename T>
-struct ShortyFieldTypeSelectEnum {
-};
-
-#define SHORTY_FIELD_TYPE_SELECT_IMPL(cpp_type, enum_element) \
-template <> \
-struct ShortyFieldTypeSelectType<ShortyFieldType::enum_element> { \
- using type = cpp_type; \
-}; \
-\
-template <> \
-struct ShortyFieldTypeSelectEnum<cpp_type> { \
- static constexpr const auto value = ShortyFieldType::enum_element; \
-}; \
-
-SHORTY_FIELD_TYPE_SELECT_IMPL(bool, kBoolean);
-SHORTY_FIELD_TYPE_SELECT_IMPL(int8_t, kByte);
-SHORTY_FIELD_TYPE_SELECT_IMPL(int16_t, kShort);
-SHORTY_FIELD_TYPE_SELECT_IMPL(uint16_t, kChar);
-SHORTY_FIELD_TYPE_SELECT_IMPL(int32_t, kInt);
-SHORTY_FIELD_TYPE_SELECT_IMPL(float, kFloat);
-SHORTY_FIELD_TYPE_SELECT_IMPL(int64_t, kLong);
-SHORTY_FIELD_TYPE_SELECT_IMPL(double, kDouble);
-SHORTY_FIELD_TYPE_SELECT_IMPL(mirror::Object*, kObject);
-SHORTY_FIELD_TYPE_SELECT_IMPL(Closure*, kLambda);
-
-} // namespace lambda
-} // namespace art
-
-#endif // ART_RUNTIME_LAMBDA_SHORTY_FIELD_TYPE_H_
diff --git a/runtime/lambda/shorty_field_type_test.cc b/runtime/lambda/shorty_field_type_test.cc
deleted file mode 100644
index 32bade9b56..0000000000
--- a/runtime/lambda/shorty_field_type_test.cc
+++ /dev/null
@@ -1,354 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "lambda/shorty_field_type.h"
-#include "mirror/object_reference.h"
-
-#include "utils.h"
-#include <numeric>
-#include <stdint.h>
-#include "gtest/gtest.h"
-
-#define EXPECT_NULL(expected) EXPECT_EQ(reinterpret_cast<const void*>(expected), \
- reinterpret_cast<void*>(nullptr));
-
-namespace art {
-namespace lambda {
-
-class ShortyFieldTypeTest : public ::testing::Test {
- public:
- ShortyFieldTypeTest() = default;
- ~ShortyFieldTypeTest() = default;
-
- protected:
- static void SetUpTestCase() {
- }
-
- virtual void SetUp() {
- }
-
- static ::testing::AssertionResult IsResultSuccessful(bool result) {
- if (result) {
- return ::testing::AssertionSuccess();
- } else {
- return ::testing::AssertionFailure();
- }
- }
-
- template <typename T>
- static std::string ListToString(const T& list) {
- std::stringstream stream;
-
- stream << "[";
- for (auto&& val : list) {
- stream << val << ", ";
- }
- stream << "]";
-
- return stream.str();
- }
-
- // Compare two vector-like types for equality.
- template <typename T>
- static ::testing::AssertionResult AreListsEqual(const T& expected, const T& actual) {
- bool success = true;
- std::stringstream stream;
-
- if (expected.size() != actual.size()) {
- success = false;
- stream << "Expected list size: " << expected.size()
- << ", but got list size: " << actual.size();
- stream << std::endl;
- }
-
- for (size_t j = 0; j < std::min(expected.size(), actual.size()); ++j) {
- if (expected[j] != actual[j]) {
- success = false;
- stream << "Expected element '" << j << "' to be '" << expected[j] << "', but got actual: '"
- << actual[j] << "'.";
- stream << std::endl;
- }
- }
-
- if (success) {
- return ::testing::AssertionSuccess();
- }
-
- stream << "Expected list was: " << ListToString(expected)
- << ", actual list was: " << ListToString(actual);
-
- return ::testing::AssertionFailure() << stream.str();
- }
-
- static std::vector<ShortyFieldType> ParseLongTypeDescriptorsToList(const char* type_descriptor) {
- std::vector<ShortyFieldType> lst;
-
- ShortyFieldType shorty;
-
- const char* parsed = type_descriptor;
- while ((parsed = ShortyFieldType::ParseFromFieldTypeDescriptor(parsed, &shorty)) != nullptr) {
- lst.push_back(shorty);
- }
-
- return lst;
- }
-
- protected:
- // Shorthands for the ShortyFieldType constants.
- // The letters are the same as JNI letters, with kS_ being a lambda since \ is not available.
- static constexpr ShortyFieldType kSZ = ShortyFieldType::kBoolean;
- static constexpr ShortyFieldType kSB = ShortyFieldType::kByte;
- static constexpr ShortyFieldType kSC = ShortyFieldType::kChar;
- static constexpr ShortyFieldType kSS = ShortyFieldType::kShort;
- static constexpr ShortyFieldType kSI = ShortyFieldType::kInt;
- static constexpr ShortyFieldType kSF = ShortyFieldType::kFloat;
- static constexpr ShortyFieldType kSJ = ShortyFieldType::kLong;
- static constexpr ShortyFieldType kSD = ShortyFieldType::kDouble;
- static constexpr ShortyFieldType kSL = ShortyFieldType::kObject;
- static constexpr ShortyFieldType kS_ = ShortyFieldType::kLambda;
-};
-
-TEST_F(ShortyFieldTypeTest, TestMaybeCreate) {
- ShortyFieldType shorty;
-
- std::vector<char> shorties = {'Z', 'B', 'C', 'S', 'I', 'F', 'J', 'D', 'L', '\\'};
-
- // All valid 'shorty' characters are created successfully.
- for (const char c : shorties) {
- EXPECT_TRUE(ShortyFieldType::MaybeCreate(c, &shorty)) << c;
- EXPECT_EQ(c, static_cast<char>(c));
- }
-
- // All other characters can never be created.
- for (unsigned char c = 0; c < std::numeric_limits<unsigned char>::max(); ++c) {
- // Skip the valid characters.
- if (std::find(shorties.begin(), shorties.end(), c) != shorties.end()) { continue; }
- // All invalid characters should fail.
- EXPECT_FALSE(ShortyFieldType::MaybeCreate(static_cast<char>(c), &shorty)) << c;
- }
-} // TEST_F
-
-TEST_F(ShortyFieldTypeTest, TestCreateFromFieldTypeDescriptor) {
- // Sample input.
- std::vector<const char*> lengthies = {
- "Z", "B", "C", "S", "I", "F", "J", "D", "LObject;", "\\Closure;",
- "[Z", "[[B", "[[LObject;"
- };
-
- // Expected output.
- std::vector<ShortyFieldType> expected = {
- ShortyFieldType::kBoolean,
- ShortyFieldType::kByte,
- ShortyFieldType::kChar,
- ShortyFieldType::kShort,
- ShortyFieldType::kInt,
- ShortyFieldType::kFloat,
- ShortyFieldType::kLong,
- ShortyFieldType::kDouble,
- ShortyFieldType::kObject,
- ShortyFieldType::kLambda,
- // Arrays are always treated as objects.
- ShortyFieldType::kObject,
- ShortyFieldType::kObject,
- ShortyFieldType::kObject,
- };
-
- // All valid lengthy types are correctly turned into the expected shorty type.
- for (size_t i = 0; i < lengthies.size(); ++i) {
- EXPECT_EQ(expected[i], ShortyFieldType::CreateFromFieldTypeDescriptor(lengthies[i]));
- }
-} // TEST_F
-
-TEST_F(ShortyFieldTypeTest, TestParseFromFieldTypeDescriptor) {
- // Sample input.
- std::vector<const char*> lengthies = {
- // Empty list
- "",
- // Primitives
- "Z", "B", "C", "S", "I", "F", "J", "D",
- // Non-primitives
- "LObject;", "\\Closure;",
- // Arrays. The biggest PITA.
- "[Z", "[[B", "[[LObject;", "[[[[\\Closure;",
- // Multiple things at once:
- "ZBCSIFJD",
- "LObject;LObject;SSI",
- "[[ZDDZ",
- "[[LObject;[[Z[F\\Closure;LObject;",
- };
-
- // Expected output.
- std::vector<std::vector<ShortyFieldType>> expected = {
- // Empty list
- {},
- // Primitives
- {kSZ}, {kSB}, {kSC}, {kSS}, {kSI}, {kSF}, {kSJ}, {kSD},
- // Non-primitives.
- { ShortyFieldType::kObject }, { ShortyFieldType::kLambda },
- // Arrays are always treated as objects.
- { kSL }, { kSL }, { kSL }, { kSL },
- // Multiple things at once:
- { kSZ, kSB, kSC, kSS, kSI, kSF, kSJ, kSD },
- { kSL, kSL, kSS, kSS, kSI },
- { kSL, kSD, kSD, kSZ },
- { kSL, kSL, kSL, kS_, kSL },
- };
-
- // Sanity check that the expected/actual lists are the same size.. when adding new entries.
- ASSERT_EQ(expected.size(), lengthies.size());
-
- // All valid lengthy types are correctly turned into the expected shorty type.
- for (size_t i = 0; i < expected.size(); ++i) {
- const std::vector<ShortyFieldType>& expected_list = expected[i];
- std::vector<ShortyFieldType> actual_list = ParseLongTypeDescriptorsToList(lengthies[i]);
- EXPECT_TRUE(AreListsEqual(expected_list, actual_list));
- }
-} // TEST_F
-
-// Helper class to probe a shorty's characteristics by minimizing copy-and-paste tests.
-template <typename T, decltype(ShortyFieldType::kByte) kShortyEnum>
-struct ShortyTypeCharacteristics {
- bool is_primitive_ = false;
- bool is_primitive_narrow_ = false;
- bool is_primitive_wide_ = false;
- bool is_object_ = false;
- bool is_lambda_ = false;
- size_t size_ = sizeof(T);
- bool is_dynamic_sized_ = false;
-
- void CheckExpects() {
- ShortyFieldType shorty = kShortyEnum;
-
- // Test the main non-parsing-related ShortyFieldType characteristics.
- EXPECT_EQ(is_primitive_, shorty.IsPrimitive());
- EXPECT_EQ(is_primitive_narrow_, shorty.IsPrimitiveNarrow());
- EXPECT_EQ(is_primitive_wide_, shorty.IsPrimitiveWide());
- EXPECT_EQ(is_object_, shorty.IsObject());
- EXPECT_EQ(is_lambda_, shorty.IsLambda());
- EXPECT_EQ(size_, shorty.GetStaticSize());
- EXPECT_EQ(is_dynamic_sized_, !shorty.IsStaticSize());
-
- // Test compile-time ShortyFieldTypeTraits.
- EXPECT_TRUE(ShortyFieldTypeTraits::IsType<T>());
- EXPECT_EQ(is_primitive_, ShortyFieldTypeTraits::IsPrimitiveType<T>());
- EXPECT_EQ(is_primitive_narrow_, ShortyFieldTypeTraits::IsPrimitiveNarrowType<T>());
- EXPECT_EQ(is_primitive_wide_, ShortyFieldTypeTraits::IsPrimitiveWideType<T>());
- EXPECT_EQ(is_object_, ShortyFieldTypeTraits::IsObjectType<T>());
- EXPECT_EQ(is_lambda_, ShortyFieldTypeTraits::IsLambdaType<T>());
-
- // Test compile-time ShortyFieldType selectors
- static_assert(std::is_same<T, typename ShortyFieldTypeSelectType<kShortyEnum>::type>::value,
- "ShortyFieldType Enum->Type incorrect mapping");
- auto kActualEnum = ShortyFieldTypeSelectEnum<T>::value; // Do not ODR-use, avoid linker error.
- EXPECT_EQ(kShortyEnum, kActualEnum);
- }
-};
-
-TEST_F(ShortyFieldTypeTest, TestCharacteristicsAndTraits) {
- // Boolean test
- {
- SCOPED_TRACE("boolean");
- ShortyTypeCharacteristics<bool, ShortyFieldType::kBoolean> chars;
- chars.is_primitive_ = true;
- chars.is_primitive_narrow_ = true;
- chars.CheckExpects();
- }
-
- // Byte test
- {
- SCOPED_TRACE("byte");
- ShortyTypeCharacteristics<int8_t, ShortyFieldType::kByte> chars;
- chars.is_primitive_ = true;
- chars.is_primitive_narrow_ = true;
- chars.CheckExpects();
- }
-
- // Char test
- {
- SCOPED_TRACE("char");
- ShortyTypeCharacteristics<uint16_t, ShortyFieldType::kChar> chars; // Char is unsigned.
- chars.is_primitive_ = true;
- chars.is_primitive_narrow_ = true;
- chars.CheckExpects();
- }
-
- // Short test
- {
- SCOPED_TRACE("short");
- ShortyTypeCharacteristics<int16_t, ShortyFieldType::kShort> chars;
- chars.is_primitive_ = true;
- chars.is_primitive_narrow_ = true;
- chars.CheckExpects();
- }
-
- // Int test
- {
- SCOPED_TRACE("int");
- ShortyTypeCharacteristics<int32_t, ShortyFieldType::kInt> chars;
- chars.is_primitive_ = true;
- chars.is_primitive_narrow_ = true;
- chars.CheckExpects();
- }
-
- // Long test
- {
- SCOPED_TRACE("long");
- ShortyTypeCharacteristics<int64_t, ShortyFieldType::kLong> chars;
- chars.is_primitive_ = true;
- chars.is_primitive_wide_ = true;
- chars.CheckExpects();
- }
-
- // Float test
- {
- SCOPED_TRACE("float");
- ShortyTypeCharacteristics<float, ShortyFieldType::kFloat> chars;
- chars.is_primitive_ = true;
- chars.is_primitive_narrow_ = true;
- chars.CheckExpects();
- }
-
- // Double test
- {
- SCOPED_TRACE("double");
- ShortyTypeCharacteristics<double, ShortyFieldType::kDouble> chars;
- chars.is_primitive_ = true;
- chars.is_primitive_wide_ = true;
- chars.CheckExpects();
- }
-
- // Object test
- {
- SCOPED_TRACE("object");
- ShortyTypeCharacteristics<mirror::Object*, ShortyFieldType::kObject> chars;
- chars.is_object_ = true;
- chars.size_ = kObjectReferenceSize;
- chars.CheckExpects();
- EXPECT_EQ(kObjectReferenceSize, sizeof(mirror::CompressedReference<mirror::Object>));
- }
-
- // Lambda test
- {
- SCOPED_TRACE("lambda");
- ShortyTypeCharacteristics<Closure*, ShortyFieldType::kLambda> chars;
- chars.is_lambda_ = true;
- chars.is_dynamic_sized_ = true;
- chars.CheckExpects();
- }
-}
-
-} // namespace lambda
-} // namespace art