lambda: Minor capture-variable/liberate-variable clean-up after post-merge reviews.
Change-Id: I64f867d4ed5a5efcac138097f38efe4bb7f1281d
diff --git a/runtime/dex_instruction.cc b/runtime/dex_instruction.cc
index 5250b0d..438b6b8 100644
--- a/runtime/dex_instruction.cc
+++ b/runtime/dex_instruction.cc
@@ -333,7 +333,7 @@
if (i != 0) {
os << ", ";
}
- os << "v" << arg[i+2]; // Don't print the pair of vC registers. Pair is implicit.
+ os << "v" << arg[i + 2]; // Don't print the pair of vC registers. Pair is implicit.
}
os << "}";
break;
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index f57bddb..5db8cf7 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -140,11 +140,11 @@
// Write out the 'Closure*' into vreg and vreg+1, as if it was a jlong.
static inline void WriteLambdaClosureIntoVRegs(ShadowFrame& shadow_frame,
- const lambda::Closure* lambda_closure,
+ const lambda::Closure& lambda_closure,
uint32_t vreg) {
// Split the method into a lo and hi 32 bits so we can encode them into 2 virtual registers.
- uint32_t closure_lo = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(lambda_closure));
- uint32_t closure_hi = static_cast<uint32_t>(reinterpret_cast<uint64_t>(lambda_closure)
+ uint32_t closure_lo = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(&lambda_closure));
+ uint32_t closure_hi = static_cast<uint32_t>(reinterpret_cast<uint64_t>(&lambda_closure)
>> BitSizeOf<uint32_t>());
// Use uint64_t instead of uintptr_t to allow shifting past the max on 32-bit.
static_assert(sizeof(uint64_t) >= sizeof(uintptr_t), "Impossible");
@@ -176,6 +176,9 @@
DCHECK(uninitialized_closure != nullptr);
DCHECK_ALIGNED(uninitialized_closure, alignof(lambda::Closure));
+ using lambda::ArtLambdaMethod;
+ using lambda::LeakingAllocator;
+
/*
* create-lambda is opcode 0x21c
* - vA is the target register where the closure will be stored into
@@ -197,12 +200,13 @@
return false;
}
- lambda::ArtLambdaMethod* initialized_lambda_method;
+ ArtLambdaMethod* initialized_lambda_method;
// Initialize the ArtLambdaMethod with the right data.
{
- lambda::ArtLambdaMethod* uninitialized_lambda_method =
- reinterpret_cast<lambda::ArtLambdaMethod*>(
- lambda::LeakingAllocator::AllocateMemory(self, sizeof(lambda::ArtLambdaMethod)));
+ // Allocate enough memory to store a well-aligned ArtLambdaMethod.
+ // This is not the final type yet since the data starts out uninitialized.
+ LeakingAllocator::AlignedMemoryStorage<ArtLambdaMethod>* uninitialized_lambda_method =
+ LeakingAllocator::AllocateMemory<ArtLambdaMethod>(self);
std::string captured_variables_shorty = closure_builder->GetCapturedVariableShortyTypes();
std::string captured_variables_long_type_desc;
@@ -227,30 +231,28 @@
// Copy strings to dynamically allocated storage. This leaks, but that's ok. Fix it later.
// TODO: Strings need to come from the DexFile, so they won't need their own allocations.
- char* captured_variables_type_desc = lambda::LeakingAllocator::MakeFlexibleInstance<char>(
+ char* captured_variables_type_desc = LeakingAllocator::MakeFlexibleInstance<char>(
self,
captured_variables_long_type_desc.size() + 1);
strcpy(captured_variables_type_desc, captured_variables_long_type_desc.c_str());
- char* captured_variables_shorty_copy = lambda::LeakingAllocator::MakeFlexibleInstance<char>(
+ char* captured_variables_shorty_copy = LeakingAllocator::MakeFlexibleInstance<char>(
self,
captured_variables_shorty.size() + 1);
strcpy(captured_variables_shorty_copy, captured_variables_shorty.c_str());
- new (uninitialized_lambda_method) lambda::ArtLambdaMethod(called_method,
- captured_variables_type_desc,
- captured_variables_shorty_copy,
- true); // innate lambda
- initialized_lambda_method = uninitialized_lambda_method;
+ // After initialization, the object at the storage is well-typed. Use strong type going forward.
+ initialized_lambda_method =
+ new (uninitialized_lambda_method) ArtLambdaMethod(called_method,
+ captured_variables_type_desc,
+ captured_variables_shorty_copy,
+ true); // innate lambda
}
// Write all the closure captured variables and the closure header into the closure.
- lambda::Closure* initialized_closure;
- {
- initialized_closure =
- closure_builder->CreateInPlace(uninitialized_closure, initialized_lambda_method);
- }
+ lambda::Closure* initialized_closure =
+ closure_builder->CreateInPlace(uninitialized_closure, initialized_lambda_method);
- WriteLambdaClosureIntoVRegs(/*inout*/shadow_frame, initialized_closure, vreg_dest_closure);
+ WriteLambdaClosureIntoVRegs(/*inout*/shadow_frame, *initialized_closure, vreg_dest_closure);
return true;
}
@@ -911,7 +913,7 @@
}
DCHECK(unboxed_closure != nullptr);
- WriteLambdaClosureIntoVRegs(/*inout*/shadow_frame, unboxed_closure, vreg_target_closure);
+ WriteLambdaClosureIntoVRegs(/*inout*/shadow_frame, *unboxed_closure, vreg_target_closure);
return true;
}
diff --git a/runtime/lambda/leaking_allocator.cc b/runtime/lambda/leaking_allocator.cc
index 4910732..22bb294 100644
--- a/runtime/lambda/leaking_allocator.cc
+++ b/runtime/lambda/leaking_allocator.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "base/bit_utils.h"
#include "lambda/leaking_allocator.h"
#include "linear_alloc.h"
#include "runtime.h"
@@ -21,9 +22,11 @@
namespace art {
namespace lambda {
-void* LeakingAllocator::AllocateMemory(Thread* self, size_t byte_size) {
+void* LeakingAllocator::AllocateMemoryImpl(Thread* self, size_t byte_size, size_t align_size) {
// TODO: use GetAllocatorForClassLoader to allocate lambda ArtMethod data.
- return Runtime::Current()->GetLinearAlloc()->Alloc(self, byte_size);
+ void* mem = Runtime::Current()->GetLinearAlloc()->Alloc(self, byte_size);
+ DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(mem), align_size);
+ return mem;
}
} // namespace lambda
diff --git a/runtime/lambda/leaking_allocator.h b/runtime/lambda/leaking_allocator.h
index c3222d0..cb5a1bf 100644
--- a/runtime/lambda/leaking_allocator.h
+++ b/runtime/lambda/leaking_allocator.h
@@ -17,6 +17,7 @@
#define ART_RUNTIME_LAMBDA_LEAKING_ALLOCATOR_H_
#include <utility> // std::forward
+#include <type_traits> // std::aligned_storage
namespace art {
class Thread; // forward declaration
@@ -33,20 +34,36 @@
// TODO: do all of the above a/b for each callsite, and delete this class.
class LeakingAllocator {
public:
+ // An opaque type which is guaranteed for:
+ // * a) be large enough to hold T (e.g. for in-place new)
+ // * b) be well-aligned (so that reads/writes are well-defined) to T
+ // * c) strict-aliasing compatible with T*
+ //
+ // Nominally used to allocate memory for yet unconstructed instances of T.
+ template <typename T>
+ using AlignedMemoryStorage = typename std::aligned_storage<sizeof(T), alignof(T)>::type;
+
// Allocate byte_size bytes worth of memory. Never freed.
- static void* AllocateMemory(Thread* self, size_t byte_size);
+ template <typename T>
+ static AlignedMemoryStorage<T>* AllocateMemory(Thread* self, size_t byte_size = sizeof(T)) {
+ return reinterpret_cast<AlignedMemoryStorage<T>*>(
+ AllocateMemoryImpl(self, byte_size, alignof(T)));
+ }
// Make a new instance of T, flexibly sized, in-place at newly allocated memory. Never freed.
template <typename T, typename... Args>
static T* MakeFlexibleInstance(Thread* self, size_t byte_size, Args&&... args) {
- return new (AllocateMemory(self, byte_size)) T(std::forward<Args>(args)...);
+ return new (AllocateMemory<T>(self, byte_size)) T(std::forward<Args>(args)...);
}
// Make a new instance of T in-place at newly allocated memory. Never freed.
template <typename T, typename... Args>
static T* MakeInstance(Thread* self, Args&&... args) {
- return new (AllocateMemory(self, sizeof(T))) T(std::forward<Args>(args)...);
+ return new (AllocateMemory<T>(self, sizeof(T))) T(std::forward<Args>(args)...);
}
+
+ private:
+ static void* AllocateMemoryImpl(Thread* self, size_t byte_size, size_t align_size);
};
} // namespace lambda