lambda: Infrastructure to support capture/liberate-variable dex opcodes

* ArtLambdaMethod - wrap an ArtMethod with extra runtime lambda info
* Closure - data representation for a runtime lambda closure (read-only)
* ClosureBuilder - writer for creating a Closure at runtime
* ShortyFieldType - char/enum wrapper for shorty_field_type in dex

Tests:
* Closure, ClosureBuilder, ShortyFieldType have full unit test coverage.
* ArtLambdaMethod does not, but it is tested indirectly and is otherwise
  trivial getters.

Future CLs will include interpreter integration with minimal changes to
this Closure infrastructure.

Change-Id: I38a7aea8df1da7b154fd6623258c6c228c8e51df
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index c88d677..326a92b 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -204,6 +204,8 @@
   runtime/interpreter/unstarted_runtime_test.cc \
   runtime/java_vm_ext_test.cc \
   runtime/jit/jit_code_cache_test.cc \
+  runtime/lambda/closure_test.cc \
+  runtime/lambda/shorty_field_type_test.cc \
   runtime/leb128_test.cc \
   runtime/mem_map_test.cc \
   runtime/memory_region_test.cc \
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 8f70d30..963eecb 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -99,7 +99,10 @@
   jit/jit.cc \
   jit/jit_code_cache.cc \
   jit/jit_instrumentation.cc \
+  lambda/art_lambda_method.cc \
   lambda/box_table.cc \
+  lambda/closure.cc \
+  lambda/closure_builder.cc \
   jni_internal.cc \
   jobject_comparator.cc \
   linear_alloc.cc \
diff --git a/runtime/lambda/art_lambda_method.cc b/runtime/lambda/art_lambda_method.cc
new file mode 100644
index 0000000..6f9f8bb
--- /dev/null
+++ b/runtime/lambda/art_lambda_method.cc
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "lambda/art_lambda_method.h"
+
+#include "base/logging.h"
+#include "lambda/shorty_field_type.h"
+
+namespace art {
+namespace lambda {
+
+ArtLambdaMethod::ArtLambdaMethod(ArtMethod* target_method,
+                                 const char* captured_variables_type_descriptor,
+                                 const char* captured_variables_shorty,
+                                 bool innate_lambda)
+    : method_(target_method),
+      captured_variables_type_descriptor_(captured_variables_type_descriptor),
+      captured_variables_shorty_(captured_variables_shorty),
+      innate_lambda_(innate_lambda) {
+  DCHECK(target_method != nullptr);
+  DCHECK(captured_variables_type_descriptor != nullptr);
+  DCHECK(captured_variables_shorty != nullptr);
+
+  // Calculate the static closure size from the captured variables.
+  size_t size = sizeof(ArtLambdaMethod*);  // Initial size is just this method.
+  bool static_size = true;
+  const char* shorty = captured_variables_shorty_;
+  while (shorty != nullptr && *shorty != '\0') {
+    // Each captured variable also appends to the size.
+    ShortyFieldType shorty_field{*shorty};  // NOLINT [readability/braces] [4]
+    size += shorty_field.GetStaticSize();
+    static_size &= shorty_field.IsStaticSize();
+    ++shorty;
+  }
+  closure_size_ = size;
+
+  // We determine whether or not the size is dynamic by checking for nested lambdas.
+  //
+  // This is conservative, since in theory an optimization could determine the size
+  // of the nested lambdas recursively. In practice it's probably better to flatten out
+  // nested lambdas and inline all their code if they are known statically.
+  dynamic_size_ = !static_size;
+
+  if (kIsDebugBuild) {
+    // Double check that the number of captured variables match in both strings.
+    size_t shorty_count = strlen(captured_variables_shorty);
+
+    size_t long_count = 0;
+    const char* long_type = captured_variables_type_descriptor;
+    ShortyFieldType out;
+    while ((long_type = ShortyFieldType::ParseFromFieldTypeDescriptor(long_type, &out))
+           != nullptr) {
+      ++long_count;
+    }
+
+    DCHECK_EQ(shorty_count, long_count)
+        << "number of captured variables in long type '" << captured_variables_type_descriptor
+        << "' (" << long_count << ")" << " did not match short type '"
+        << captured_variables_shorty << "' (" << shorty_count << ")";
+  }
+}
+
+}  // namespace lambda
+}  // namespace art
diff --git a/runtime/lambda/art_lambda_method.h b/runtime/lambda/art_lambda_method.h
new file mode 100644
index 0000000..892d8c6
--- /dev/null
+++ b/runtime/lambda/art_lambda_method.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef ART_RUNTIME_LAMBDA_ART_LAMBDA_METHOD_H_
+#define ART_RUNTIME_LAMBDA_ART_LAMBDA_METHOD_H_
+
+#include "base/macros.h"
+#include "art_method.h"
+
+#include <stdint.h>
+
+namespace art {
+namespace lambda {
+
+class ArtLambdaMethod {
+ public:
+  // Construct an art lambda method.
+  // The target method is the one invoked by invoke-lambda.
+  // The type descriptor describes the types of variables captured, e.g. "ZFLObject;\FI;[Z"
+  // The shorty drops the object name and treats arrays as objects, e.g. "ZFL\L"
+  // Innate lambda means that the lambda was originally created via invoke-lambda.
+  // -- Non-innate lambdas (learned lambdas) come from a regular class that was boxed to lambda.
+  // (Ownership of strings is retained by the caller and the lifetime should exceed this class).
+  ArtLambdaMethod(ArtMethod* target_method,
+                  const char* captured_variables_type_descriptor,
+                  const char* captured_variables_shorty_,
+                  bool innate_lambda = true);
+
+  // Get the target method for this lambda that would be used by the invoke-lambda dex instruction.
+  ArtMethod* GetArtMethod() const {
+    return method_;
+  }
+
+  // Get the compile-time size of lambda closures for this method in bytes.
+  // This is circular (that is, it includes the size of the ArtLambdaMethod pointer).
+  // One should also check if the size is dynamic since nested lambdas have a runtime size.
+  size_t GetStaticClosureSize() const {
+    return closure_size_;
+  }
+
+  // Get the type descriptor for the list of captured variables.
+  // e.g. "ZFLObject;\FI;[Z" means a captured int, float, class Object, lambda FI, array of ints
+  const char* GetCapturedVariablesTypeDescriptor() const {
+    return captured_variables_type_descriptor_;
+  }
+
+  // Get the shorty 'field' type descriptor list of captured variables.
+  // This follows the same rules as a string of ShortyFieldType in the dex specification.
+  // Every captured variable is represented by exactly one character.
+  // - Objects become 'L'.
+  // - Arrays become 'L'.
+  // - Lambdas become '\'.
+  const char* GetCapturedVariablesShortyTypeDescriptor() const {
+    return captured_variables_shorty_;
+  }
+
+  // Will the size of this lambda change at runtime?
+  // Only returns true if there is a nested lambda that we can't determine statically the size of.
+  bool IsDynamicSize() const {
+    return dynamic_size_;
+  }
+
+  // Will the size of this lambda always be constant at runtime?
+  // This generally means there's no nested lambdas, or we were able to successfully determine
+  // their size statically at compile time.
+  bool IsStaticSize() const {
+    return !IsDynamicSize();
+  }
+  // Is this a lambda that was originally created via invoke-lambda?
+  // -- Non-innate lambdas (learned lambdas) come from a regular class that was boxed to lambda.
+  bool IsInnateLambda() const {
+    return innate_lambda_;
+  }
+
+  // How many variables were captured?
+  // (Each nested lambda counts as 1 captured var regardless of how many captures it itself has).
+  size_t GetNumberOfCapturedVariables() const {
+    return strlen(captured_variables_shorty_);
+  }
+
+ private:
+  // TODO: ArtMethod, or at least the entry points should be inlined into this struct
+  // to avoid an extra indirect load when doing invokes.
+  // Target method that invoke-lambda will jump to.
+  ArtMethod* method_;
+  // How big the closure is (in bytes). Only includes the constant size.
+  size_t closure_size_;
+  // The type descriptor for the captured variables, e.g. "IS" for [int, short]
+  const char* captured_variables_type_descriptor_;
+  // The shorty type descriptor for captured vars, (e.g. using 'L' instead of 'LObject;')
+  const char* captured_variables_shorty_;
+  // Whether or not the size is dynamic. If it is, copiers need to read the Closure size at runtime.
+  bool dynamic_size_;
+  // True if this lambda was originally made with create-lambda,
+  // false if it came from a class instance (through new-instance and then unbox-lambda).
+  bool innate_lambda_;
+
+  DISALLOW_COPY_AND_ASSIGN(ArtLambdaMethod);
+};
+
+}  // namespace lambda
+}  // namespace art
+
+#endif  // ART_RUNTIME_LAMBDA_ART_LAMBDA_METHOD_H_
diff --git a/runtime/lambda/closure.cc b/runtime/lambda/closure.cc
new file mode 100644
index 0000000..95a17c6
--- /dev/null
+++ b/runtime/lambda/closure.cc
@@ -0,0 +1,365 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "lambda/closure.h"
+
+#include "base/logging.h"
+#include "lambda/art_lambda_method.h"
+#include "runtime/mirror/object_reference.h"
+
+static constexpr const bool kClosureSupportsReferences = false;
+static constexpr const bool kClosureSupportsGarbageCollection = false;
+
+namespace art {
+namespace lambda {
+
+template <typename T>
+// TODO: can I return T __attribute__((__aligned__(1)))* here instead?
+const uint8_t* Closure::GetUnsafeAtOffset(size_t offset) const {
+  // Do not DCHECK here with existing helpers since most of them will call into this function.
+  return reinterpret_cast<const uint8_t*>(captured_) + offset;
+}
+
+size_t Closure::GetCapturedVariableSize(ShortyFieldType variable_type, size_t offset) const {
+  switch (variable_type) {
+    case ShortyFieldType::kLambda:
+    {
+      return GetClosureSize(GetUnsafeAtOffset<Closure>(offset));
+    }
+    default:
+      DCHECK(variable_type.IsStaticSize());
+      return variable_type.GetStaticSize();
+  }
+}
+
+// Templatize the flags to give the compiler a fighting chance to eliminate
+// any unnecessary code through different uses of this function.
+template <Closure::VariableInfo::Flags flags>
+inline Closure::VariableInfo Closure::ParseTypeDescriptor(const char* type_descriptor,
+                                                          size_t upto_index) const {
+  DCHECK(type_descriptor != nullptr);
+
+  VariableInfo result;
+
+  ShortyFieldType last_type;
+  size_t offset = (flags & VariableInfo::kOffset) ? GetStartingOffset() : 0;
+  size_t prev_offset = 0;
+  size_t count = 0;
+
+  while ((type_descriptor =
+      ShortyFieldType::ParseFromFieldTypeDescriptor(type_descriptor, &last_type)) != nullptr) {
+    count++;
+
+    if (flags & VariableInfo::kOffset) {
+      // Accumulate the sizes of all preceding captured variables as the current offset only.
+      offset += prev_offset;
+      prev_offset = GetCapturedVariableSize(last_type, offset);
+    }
+
+    if ((count > upto_index)) {
+      break;
+    }
+  }
+
+  if (flags & VariableInfo::kVariableType) {
+    result.variable_type_ = last_type;
+  }
+
+  if (flags & VariableInfo::kIndex) {
+    result.index_ = count;
+  }
+
+  if (flags & VariableInfo::kCount) {
+    result.count_ = count;
+  }
+
+  if (flags & VariableInfo::kOffset) {
+    result.offset_ = offset;
+  }
+
+  // TODO: We should probably store the result of this in the ArtLambdaMethod,
+  // to avoid re-computing the data every single time for static closures.
+  return result;
+}
+
+size_t Closure::GetCapturedVariablesSize() const {
+  const size_t captured_variable_offset = offsetof(Closure, captured_);
+  DCHECK_GE(GetSize(), captured_variable_offset);  // Prevent underflows.
+  return GetSize() - captured_variable_offset;
+}
+
+size_t Closure::GetSize() const {
+  const size_t static_closure_size = lambda_info_->GetStaticClosureSize();
+  if (LIKELY(lambda_info_->IsStaticSize())) {
+    return static_closure_size;
+  }
+
+  DCHECK_GE(static_closure_size, sizeof(captured_[0].dynamic_.size_));
+  const size_t dynamic_closure_size = captured_[0].dynamic_.size_;
+  // The dynamic size better be at least as big as the static size.
+  DCHECK_GE(dynamic_closure_size, static_closure_size);
+
+  return dynamic_closure_size;
+}
+
+void Closure::CopyTo(void* target, size_t target_size) const {
+  DCHECK_GE(target_size, GetSize());
+
+  // TODO: using memcpy is unsafe with read barriers, fix this once we add reference support
+  static_assert(kClosureSupportsReferences == false,
+                "Do not use memcpy with readbarrier references");
+  memcpy(target, this, GetSize());
+}
+
+size_t Closure::GetNumberOfCapturedVariables() const {
+  // TODO: refactor into art_lambda_method.h. Parsing should only be required here as a DCHECK.
+  VariableInfo variable_info =
+      ParseTypeDescriptor<VariableInfo::kCount>(GetCapturedVariablesTypeDescriptor(),
+                                                VariableInfo::kUpToIndexMax);
+  size_t count = variable_info.count_;
+  // Assuming each variable was 1 byte, the size should always be greater or equal than the count.
+  DCHECK_LE(count, GetCapturedVariablesSize());
+  return count;
+}
+
+const char* Closure::GetCapturedVariablesTypeDescriptor() const {
+  return lambda_info_->GetCapturedVariablesTypeDescriptor();
+}
+
+ShortyFieldType Closure::GetCapturedShortyType(size_t index) const {
+  DCHECK_LT(index, GetNumberOfCapturedVariables());
+
+  VariableInfo variable_info =
+      ParseTypeDescriptor<VariableInfo::kVariableType>(GetCapturedVariablesTypeDescriptor(),
+                                                       index);
+
+  return variable_info.variable_type_;
+}
+
+uint32_t Closure::GetCapturedPrimitiveNarrow(size_t index) const {
+  DCHECK(GetCapturedShortyType(index).IsPrimitiveNarrow());
+
+  ShortyFieldType variable_type;
+  size_t offset;
+  GetCapturedVariableTypeAndOffset(index, &variable_type, &offset);
+
+  // TODO: Restructure to use template specialization, e.g. GetCapturedPrimitive<T>
+  // so that we can avoid this nonsense regarding memcpy always overflowing.
+  // Plus, this additional switching seems redundant since the interpreter
+  // would've done it already, and knows the exact type.
+  uint32_t result = 0;
+  static_assert(ShortyFieldTypeTraits::IsPrimitiveNarrowType<decltype(result)>(),
+                "result must be a primitive narrow type");
+  switch (variable_type) {
+    case ShortyFieldType::kBoolean:
+      CopyUnsafeAtOffset<bool>(offset, &result);
+      break;
+    case ShortyFieldType::kByte:
+      CopyUnsafeAtOffset<uint8_t>(offset, &result);
+      break;
+    case ShortyFieldType::kChar:
+      CopyUnsafeAtOffset<uint16_t>(offset, &result);
+      break;
+    case ShortyFieldType::kShort:
+      CopyUnsafeAtOffset<int16_t>(offset, &result);
+      break;
+    case ShortyFieldType::kInt:
+      CopyUnsafeAtOffset<int32_t>(offset, &result);
+      break;
+    case ShortyFieldType::kFloat:
+      // XX: Maybe there should just be a GetCapturedPrimitive<T> to avoid this shuffle?
+      // The interpreter's invoke seems to only special case references and wides,
+      // everything else is treated as a generic 32-bit pattern.
+      CopyUnsafeAtOffset<float>(offset, &result);
+      break;
+    default:
+      LOG(FATAL)
+          << "expected a valid narrow primitive shorty type but got "
+          << static_cast<char>(variable_type);
+      UNREACHABLE();
+  }
+
+  return result;
+}
+
+uint64_t Closure::GetCapturedPrimitiveWide(size_t index) const {
+  DCHECK(GetCapturedShortyType(index).IsPrimitiveWide());
+
+  ShortyFieldType variable_type;
+  size_t offset;
+  GetCapturedVariableTypeAndOffset(index, &variable_type, &offset);
+
+  // TODO: Restructure to use template specialization, e.g. GetCapturedPrimitive<T>
+  // so that we can avoid this nonsense regarding memcpy always overflowing.
+  // Plus, this additional switching seems redundant since the interpreter
+  // would've done it already, and knows the exact type.
+  uint64_t result = 0;
+  static_assert(ShortyFieldTypeTraits::IsPrimitiveWideType<decltype(result)>(),
+                "result must be a primitive wide type");
+  switch (variable_type) {
+    case ShortyFieldType::kLong:
+      CopyUnsafeAtOffset<int64_t>(offset, &result);
+      break;
+    case ShortyFieldType::kDouble:
+      CopyUnsafeAtOffset<double>(offset, &result);
+      break;
+    default:
+      LOG(FATAL)
+          << "expected a valid primitive wide shorty type but got "
+          << static_cast<char>(variable_type);
+      UNREACHABLE();
+  }
+
+  return result;
+}
+
+mirror::Object* Closure::GetCapturedObject(size_t index) const {
+  DCHECK(GetCapturedShortyType(index).IsObject());
+
+  ShortyFieldType variable_type;
+  size_t offset;
+  GetCapturedVariableTypeAndOffset(index, &variable_type, &offset);
+
+  // TODO: Restructure to use template specialization, e.g. GetCapturedPrimitive<T>
+  // so that we can avoid this nonsense regarding memcpy always overflowing.
+  // Plus, this additional switching seems redundant since the interpreter
+  // would've done it already, and knows the exact type.
+  mirror::Object* result = nullptr;
+  static_assert(ShortyFieldTypeTraits::IsObjectType<decltype(result)>(),
+                "result must be an object type");
+  switch (variable_type) {
+    case ShortyFieldType::kObject:
+      // TODO: This seems unsafe. This may need to use gcroots.
+      static_assert(kClosureSupportsGarbageCollection == false,
+                    "May need GcRoots and definitely need mutator locks");
+      {
+        mirror::CompressedReference<mirror::Object> compressed_result;
+        CopyUnsafeAtOffset<uint32_t>(offset, &compressed_result);
+        result = compressed_result.AsMirrorPtr();
+      }
+      break;
+    default:
+      CHECK(false)
+          << "expected a valid shorty type but got " << static_cast<char>(variable_type);
+      UNREACHABLE();
+  }
+
+  return result;
+}
+
+size_t Closure::GetCapturedClosureSize(size_t index) const {
+  DCHECK(GetCapturedShortyType(index).IsLambda());
+  size_t offset = GetCapturedVariableOffset(index);
+
+  auto* captured_ptr = reinterpret_cast<const uint8_t*>(&captured_);
+  size_t closure_size = GetClosureSize(captured_ptr + offset);
+
+  return closure_size;
+}
+
+void Closure::CopyCapturedClosure(size_t index, void* destination, size_t destination_room) const {
+  DCHECK(GetCapturedShortyType(index).IsLambda());
+  size_t offset = GetCapturedVariableOffset(index);
+
+  auto* captured_ptr = reinterpret_cast<const uint8_t*>(&captured_);
+  size_t closure_size = GetClosureSize(captured_ptr + offset);
+
+  static_assert(ShortyFieldTypeTraits::IsLambdaType<Closure*>(),
+                "result must be a lambda type");
+
+  CopyUnsafeAtOffset<Closure>(offset, destination, closure_size, destination_room);
+}
+
+size_t Closure::GetCapturedVariableOffset(size_t index) const {
+  VariableInfo variable_info =
+      ParseTypeDescriptor<VariableInfo::kOffset>(GetCapturedVariablesTypeDescriptor(),
+                                                 index);
+
+  size_t offset = variable_info.offset_;
+
+  return offset;
+}
+
+void Closure::GetCapturedVariableTypeAndOffset(size_t index,
+                                               ShortyFieldType* out_type,
+                                               size_t* out_offset) const {
+  DCHECK(out_type != nullptr);
+  DCHECK(out_offset != nullptr);
+
+  static constexpr const VariableInfo::Flags kVariableTypeAndOffset =
+      static_cast<VariableInfo::Flags>(VariableInfo::kVariableType | VariableInfo::kOffset);
+  VariableInfo variable_info =
+      ParseTypeDescriptor<kVariableTypeAndOffset>(GetCapturedVariablesTypeDescriptor(),
+                                                  index);
+
+  ShortyFieldType variable_type = variable_info.variable_type_;
+  size_t offset = variable_info.offset_;
+
+  *out_type = variable_type;
+  *out_offset = offset;
+}
+
+template <typename T>
+void Closure::CopyUnsafeAtOffset(size_t offset,
+                                 void* destination,
+                                 size_t src_size,
+                                 size_t destination_room) const {
+  DCHECK_GE(destination_room, src_size);
+  const uint8_t* data_ptr = GetUnsafeAtOffset<T>(offset);
+  memcpy(destination, data_ptr, sizeof(T));
+}
+
+// TODO: This is kind of ugly. I would prefer an unaligned_ptr<Closure> here.
+// Unfortunately C++ doesn't let you lower the alignment (i.e. alignas(1) Closure*) is not legal.
+size_t Closure::GetClosureSize(const uint8_t* closure) {
+  DCHECK(closure != nullptr);
+
+  static_assert(!std::is_base_of<mirror::Object, Closure>::value,
+                "It might be unsafe to call memcpy on a managed object");
+
+  // Safe as long as it's not a mirror Object.
+  // TODO: Should probably wrap this in like MemCpyNative or some such which statically asserts
+  // we aren't trying to copy mirror::Object data around.
+  ArtLambdaMethod* closure_info;
+  memcpy(&closure_info, closure + offsetof(Closure, lambda_info_), sizeof(closure_info));
+
+  if (LIKELY(closure_info->IsStaticSize())) {
+    return closure_info->GetStaticClosureSize();
+  }
+
+  // The size is dynamic, so we need to read it from captured_variables_ portion.
+  size_t dynamic_size;
+  memcpy(&dynamic_size,
+         closure + offsetof(Closure, captured_[0].dynamic_.size_),
+         sizeof(dynamic_size));
+  static_assert(sizeof(dynamic_size) == sizeof(captured_[0].dynamic_.size_),
+                "Dynamic size type must match the structural type of the size");
+
+  DCHECK_GE(dynamic_size, closure_info->GetStaticClosureSize());
+  return dynamic_size;
+}
+
+size_t Closure::GetStartingOffset() const {
+  static constexpr const size_t captured_offset = offsetof(Closure, captured_);
+  if (LIKELY(lambda_info_->IsStaticSize())) {
+    return offsetof(Closure, captured_[0].static_variables_) - captured_offset;
+  } else {
+    return offsetof(Closure, captured_[0].dynamic_.variables_) - captured_offset;
+  }
+}
+
+}  // namespace lambda
+}  // namespace art
diff --git a/runtime/lambda/closure.h b/runtime/lambda/closure.h
new file mode 100644
index 0000000..60d117e
--- /dev/null
+++ b/runtime/lambda/closure.h
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef ART_RUNTIME_LAMBDA_CLOSURE_H_
+#define ART_RUNTIME_LAMBDA_CLOSURE_H_
+
+#include "base/macros.h"
+#include "base/mutex.h"  // For Locks::mutator_lock_.
+#include "lambda/shorty_field_type.h"
+
+#include <stdint.h>
+
+namespace art {
+class ArtMethod;  // forward declaration
+
+namespace mirror {
+class Object;  // forward declaration
+}  // namespace mirror
+
+namespace lambda {
+class ArtLambdaMethod;  // forward declaration
+class ClosureBuilder;   // forward declaration
+
+// Inline representation of a lambda closure.
+// Contains the target method and the set of packed captured variables as a copy.
+//
+// The closure itself is logically immutable, although in practice any object references
+// it (recursively) contains can be moved and updated by the GC.
+struct PACKED(sizeof(ArtLambdaMethod*)) Closure {
+  // Get the size of the Closure in bytes.
+  // This is necessary in order to allocate a large enough area to copy the Closure into.
+  // Do *not* copy the closure with memcpy, since references also need to get moved.
+  size_t GetSize() const;
+
+  // Copy this closure into the target, whose memory size is specified by target_size.
+  // Any object references are fixed up during the copy (if there was a read barrier).
+  // The target_size must be at least as large as GetSize().
+  void CopyTo(void* target, size_t target_size) const;
+
+  // How many variables were captured?
+  size_t GetNumberOfCapturedVariables() const;
+
+  // Returns a type descriptor string that represents each captured variable.
+  // e.g. "Ljava/lang/Object;ZB" would mean a capture tuple of (Object, boolean, byte)
+  const char* GetCapturedVariablesTypeDescriptor() const;
+
+  // Returns the short type for the captured variable at index.
+  // Index must be less than the number of captured variables.
+  ShortyFieldType GetCapturedShortyType(size_t index) const;
+
+  // Returns the 32-bit representation of a non-wide primitive at the captured variable index.
+  // Smaller types are zero extended.
+  // Index must be less than the number of captured variables.
+  uint32_t GetCapturedPrimitiveNarrow(size_t index) const;
+  // Returns the 64-bit representation of a wide primitive at the captured variable index.
+  // Smaller types are zero extended.
+  // Index must be less than the number of captured variables.
+  uint64_t GetCapturedPrimitiveWide(size_t index) const;
+  // Returns the object reference at the captured variable index.
+  // The type at the index *must* be an object reference or a CHECK failure will occur.
+  // Index must be less than the number of captured variables.
+  mirror::Object* GetCapturedObject(size_t index) const SHARED_REQUIRES(Locks::mutator_lock_);
+
+  // Gets the size of a nested capture closure in bytes, at the captured variable index.
+  // The type at the index *must* be a lambda closure or a CHECK failure will occur.
+  size_t GetCapturedClosureSize(size_t index) const;
+
+  // Copies a nested lambda closure at the captured variable index.
+  // The destination must have enough room for the closure (see GetCapturedClosureSize).
+  void CopyCapturedClosure(size_t index, void* destination, size_t destination_room) const;
+
+ private:
+  // Read out any non-lambda value as a copy.
+  template <typename T>
+  T GetCapturedVariable(size_t index) const;
+
+  // Reconstruct the closure's captured variable info at runtime.
+  struct VariableInfo {
+    size_t index_;
+    ShortyFieldType variable_type_;
+    size_t offset_;
+    size_t count_;
+
+    enum Flags {
+      kIndex = 0x1,
+      kVariableType = 0x2,
+      kOffset = 0x4,
+      kCount = 0x8,
+    };
+
+    // Traverse to the end of the type descriptor list instead of stopping at some particular index.
+    static constexpr size_t kUpToIndexMax = static_cast<size_t>(-1);
+  };
+
+  // Parse a type descriptor, stopping at index "upto_index".
+  // Returns only the information requested in flags. All other fields are indeterminate.
+  template <VariableInfo::Flags flags>
+  inline VariableInfo ALWAYS_INLINE ParseTypeDescriptor(const char* type_descriptor,
+                                                        size_t upto_index) const;
+
+  // Convenience function to call ParseTypeDescriptor with just the type and offset.
+  void GetCapturedVariableTypeAndOffset(size_t index,
+                                        ShortyFieldType* out_type,
+                                        size_t* out_offset) const;
+
+  // How many bytes do the captured variables take up? Runtime sizeof(captured_variables).
+  size_t GetCapturedVariablesSize() const;
+  // Get the size in bytes of the variable_type which is potentially stored at offset.
+  size_t GetCapturedVariableSize(ShortyFieldType variable_type, size_t offset) const;
+  // Get the starting offset (in bytes) for the 0th captured variable.
+  // All offsets are relative to 'captured_'.
+  size_t GetStartingOffset() const;
+  // Get the offset for this index.
+  // All offsets are relative to 'captuerd_'.
+  size_t GetCapturedVariableOffset(size_t index) const;
+
+  // Cast the data at '(char*)captured_[offset]' into T, returning its address.
+  // This value should not be de-referenced directly since its unaligned.
+  template <typename T>
+  inline const uint8_t* GetUnsafeAtOffset(size_t offset) const;
+
+  // Copy the data at the offset into the destination. DCHECKs that
+  // the destination_room is large enough (in bytes) to fit the data.
+  template <typename T>
+  inline void CopyUnsafeAtOffset(size_t offset,
+                                 void* destination,
+                                 size_t src_size = sizeof(T),
+                                 size_t destination_room = sizeof(T)) const;
+
+  // Get the closure size from an unaligned (i.e. interior) closure pointer.
+  static size_t GetClosureSize(const uint8_t* closure);
+
+  ///////////////////////////////////////////////////////////////////////////////////
+
+  // Compile-time known lambda information such as the type descriptor and size.
+  ArtLambdaMethod* lambda_info_;
+
+  // A contiguous list of captured variables, and possibly the closure size.
+  // The runtime size can always be determined through GetSize().
+  union {
+    // Read from here if the closure size is static (ArtLambdaMethod::IsStatic)
+    uint8_t static_variables_[0];
+    struct {
+      // Read from here if the closure size is dynamic (ArtLambdaMethod::IsDynamic)
+      size_t size_;  // The lambda_info_ and the size_ itself is also included as part of the size.
+      uint8_t variables_[0];
+    } dynamic_;
+  } captured_[0];
+  // captured_ will always consist of one array element at runtime.
+  // Set to [0] so that 'size_' is not counted in sizeof(Closure).
+
+  friend class ClosureBuilder;
+  friend class ClosureTest;
+};
+
+}  // namespace lambda
+}  // namespace art
+
+#endif  // ART_RUNTIME_LAMBDA_CLOSURE_H_
diff --git a/runtime/lambda/closure_builder-inl.h b/runtime/lambda/closure_builder-inl.h
new file mode 100644
index 0000000..41a803b
--- /dev/null
+++ b/runtime/lambda/closure_builder-inl.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_LAMBDA_CLOSURE_BUILDER_INL_H_
+#define ART_RUNTIME_LAMBDA_CLOSURE_BUILDER_INL_H_
+
+#include "lambda/closure_builder.h"
+#include <string.h>
+
+namespace art {
+namespace lambda {
+
+template <typename T, ClosureBuilder::ShortyTypeEnum kShortyType>
+void ClosureBuilder::CaptureVariablePrimitive(T value) {
+  static_assert(ShortyFieldTypeTraits::IsPrimitiveType<T>(), "T must be a primitive type");
+  const size_t type_size = ShortyFieldType(kShortyType).GetStaticSize();
+  DCHECK_EQ(type_size, sizeof(T));
+
+  // Copy the data while retaining the bit pattern. Strict-aliasing safe.
+  ShortyFieldTypeTraits::MaxType value_storage = 0;
+  memcpy(&value_storage, &value, sizeof(T));
+
+  values_.push_back(value_storage);
+  size_ += sizeof(T);
+}
+
+}  // namespace lambda
+}  // namespace art
+
+#endif  // ART_RUNTIME_LAMBDA_CLOSURE_BUILDER_INL_H_
diff --git a/runtime/lambda/closure_builder.cc b/runtime/lambda/closure_builder.cc
new file mode 100644
index 0000000..56bb9fb
--- /dev/null
+++ b/runtime/lambda/closure_builder.cc
@@ -0,0 +1,198 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "lambda/closure_builder.h"
+
+#include "base/macros.h"
+#include "base/value_object.h"
+#include "lambda/art_lambda_method.h"
+#include "lambda/closure.h"
+#include "lambda/shorty_field_type.h"
+#include "runtime/mirror/object_reference.h"
+
+#include <stdint.h>
+#include <vector>
+
+namespace art {
+namespace lambda {
+
+/*
+ * GC support TODOs:
+ * (Although there's some code for storing objects, it is UNIMPLEMENTED(FATAL) because it is
+ * incomplete).
+ *
+ * 1) GC needs to be able to traverse the Closure and visit any references.
+ *    It might be possible to get away with global roots in the short term.
+ *
+ * 2) Add brooks read barrier support. We can store the black/gray/white bits
+ *    in the lower 2 bits of the lambda art method pointer. Whenever a closure is copied
+ *    [to the stack] we'd need to add a cold path to turn it black.
+ *    (since there's only 3 colors, I can use the 4th value to indicate no-refs).
+ *    e.g. 0x0 = gray, 0x1 = white, 0x2 = black, 0x3 = no-nested-references
+ *    - Alternatively the GC can mark reference-less closures as always-black,
+ *      although it would need extra work to check for references.
+ */
+
+void ClosureBuilder::CaptureVariableObject(mirror::Object* object) {
+  auto compressed_reference = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(object);
+  ShortyFieldTypeTraits::MaxType storage = 0;
+
+  static_assert(sizeof(storage) >= sizeof(compressed_reference),
+                "not enough room to store a compressed reference");
+  memcpy(&storage, &compressed_reference, sizeof(compressed_reference));
+
+  values_.push_back(storage);
+  size_ += kObjectReferenceSize;
+
+  static_assert(kObjectReferenceSize == sizeof(compressed_reference), "reference size mismatch");
+
+  // TODO: needs more work to support concurrent GC
+  if (kIsDebugBuild) {
+    if (kUseReadBarrier) {
+      UNIMPLEMENTED(FATAL) << "can't yet safely capture objects with read barrier";
+      UNREACHABLE();
+    }
+  }
+}
+
+void ClosureBuilder::CaptureVariableLambda(Closure* closure) {
+  DCHECK(closure != nullptr);  // null closures not allowed, target method must be null instead.
+  values_.push_back(reinterpret_cast<ShortyFieldTypeTraits::MaxType>(closure));
+
+  if (LIKELY(is_dynamic_size_ == false)) {
+    // Write in the extra bytes to store the dynamic size the first time.
+    is_dynamic_size_ = true;
+    size_ += sizeof(Closure::captured_[0].dynamic_.size_);
+  }
+
+  // A closure may be sized dynamically, so always query it for the true size.
+  size_ += closure->GetSize();
+}
+
+size_t ClosureBuilder::GetSize() const {
+  return size_;
+}
+
+size_t ClosureBuilder::GetCaptureCount() const {
+  return values_.size();
+}
+
+Closure* ClosureBuilder::CreateInPlace(void* memory, ArtLambdaMethod* target_method) const {
+  DCHECK(memory != nullptr);
+  DCHECK(target_method != nullptr);
+  DCHECK_EQ(is_dynamic_size_, target_method->IsDynamicSize());
+
+  CHECK_EQ(target_method->GetNumberOfCapturedVariables(), values_.size())
+    << "number of variables captured at runtime does not match "
+    << "number of variables captured at compile time";
+
+  Closure* closure = new (memory) Closure;
+  closure->lambda_info_ = target_method;
+
+  static_assert(offsetof(Closure, captured_) == kInitialSize, "wrong initial size");
+
+  size_t written_size;
+  if (UNLIKELY(is_dynamic_size_)) {
+    // The closure size must be set dynamically (i.e. nested lambdas).
+    closure->captured_[0].dynamic_.size_ = GetSize();
+    size_t header_size = offsetof(Closure, captured_[0].dynamic_.variables_);
+    DCHECK_LE(header_size, GetSize());
+    size_t variables_size = GetSize() - header_size;
+    written_size =
+        WriteValues(target_method,
+                    closure->captured_[0].dynamic_.variables_,
+                    header_size,
+                    variables_size);
+  } else {
+    // The closure size is known statically (i.e. no nested lambdas).
+    DCHECK(GetSize() == target_method->GetStaticClosureSize());
+    size_t header_size = offsetof(Closure, captured_[0].static_variables_);
+    DCHECK_LE(header_size, GetSize());
+    size_t variables_size = GetSize() - header_size;
+    written_size =
+        WriteValues(target_method,
+                    closure->captured_[0].static_variables_,
+                    header_size,
+                    variables_size);
+  }
+
+  DCHECK_EQ(written_size, closure->GetSize());
+
+  return closure;
+}
+
+size_t ClosureBuilder::WriteValues(ArtLambdaMethod* target_method,
+                                   uint8_t variables[],
+                                   size_t header_size,
+                                   size_t variables_size) const {
+  size_t total_size = header_size;
+  const char* shorty_types = target_method->GetCapturedVariablesShortyTypeDescriptor();
+
+  size_t variables_offset = 0;
+  size_t remaining_size = variables_size;
+
+  const size_t shorty_count = target_method->GetNumberOfCapturedVariables();
+  for (size_t i = 0; i < shorty_count; ++i) {
+    ShortyFieldType shorty{shorty_types[i]};  // NOLINT [readability/braces] [4]
+
+    size_t var_size;
+    if (LIKELY(shorty.IsStaticSize())) {
+      // TODO: needs more work to support concurrent GC, e.g. read barriers
+      if (kUseReadBarrier == false) {
+        if (UNLIKELY(shorty.IsObject())) {
+          UNIMPLEMENTED(FATAL) << "can't yet safely write objects with read barrier";
+        }
+      } else {
+        if (UNLIKELY(shorty.IsObject())) {
+          UNIMPLEMENTED(FATAL) << "writing objects not yet supported, no GC support";
+        }
+      }
+
+      var_size = shorty.GetStaticSize();
+      DCHECK_LE(var_size, sizeof(values_[i]));
+
+      // Safe even for objects (non-read barrier case) if we never suspend
+      // while the ClosureBuilder is live.
+      // FIXME: Need to add GC support for references in a closure.
+      memcpy(&variables[variables_offset], &values_[i], var_size);
+    } else {
+      DCHECK(shorty.IsLambda())
+          << " don't support writing dynamically sized types other than lambda";
+
+      ShortyFieldTypeTraits::MaxType closure_raw = values_[i];
+      Closure* nested_closure = reinterpret_cast<Closure*>(closure_raw);
+
+      DCHECK(nested_closure != nullptr);
+      nested_closure->CopyTo(&variables[variables_offset], remaining_size);
+
+      var_size = nested_closure->GetSize();
+    }
+
+    total_size += var_size;
+    DCHECK_GE(remaining_size, var_size);
+    remaining_size -= var_size;
+
+    variables_offset += var_size;
+  }
+
+  DCHECK_EQ('\0', shorty_types[shorty_count]);
+  DCHECK_EQ(variables_offset, variables_size);
+
+  return total_size;
+}
+
+
+}  // namespace lambda
+}  // namespace art
diff --git a/runtime/lambda/closure_builder.h b/runtime/lambda/closure_builder.h
new file mode 100644
index 0000000..542e12a
--- /dev/null
+++ b/runtime/lambda/closure_builder.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef ART_RUNTIME_LAMBDA_CLOSURE_BUILDER_H_
+#define ART_RUNTIME_LAMBDA_CLOSURE_BUILDER_H_
+
+#include "base/macros.h"
+#include "base/mutex.h"  // For Locks::mutator_lock_.
+#include "base/value_object.h"
+#include "lambda/shorty_field_type.h"
+
+#include <stdint.h>
+#include <vector>
+
+namespace art {
+class ArtMethod;  // forward declaration
+
+namespace mirror {
+class Object;  // forward declaration
+}  // namespace mirror
+
+namespace lambda {
+class ArtLambdaMethod;  // forward declaration
+
+// Build a closure by capturing variables one at a time.
+// When all variables have been marked captured, the closure can be created in-place into
+// a target memory address.
+//
+// The mutator lock must be held for the duration of the lifetime of this object,
+// since it needs to temporarily store heap references into an internal list.
+class ClosureBuilder : ValueObject {
+ public:
+  using ShortyTypeEnum = decltype(ShortyFieldType::kByte);
+
+
+  // Mark this primitive value to be captured as the specified type.
+  template <typename T, ShortyTypeEnum kShortyType>
+  void CaptureVariablePrimitive(T value);
+
+  // Mark this object reference to be captured.
+  void CaptureVariableObject(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
+
+  // Mark this lambda closure to be captured.
+  void CaptureVariableLambda(Closure* closure);
+
+  // Get the size (in bytes) of the closure.
+  // This size is used to be able to allocate memory large enough to write the closure into.
+  // Call 'CreateInPlace' to actually write the closure out.
+  size_t GetSize() const;
+
+  // Returns how many variables have been captured so far.
+  size_t GetCaptureCount() const;
+
+  // Creates a closure in-place and writes out the data into 'memory'.
+  // Memory must be at least 'GetSize' bytes large.
+  // All previously marked data to be captured is now written out.
+  Closure* CreateInPlace(void* memory, ArtLambdaMethod* target_method) const
+      SHARED_REQUIRES(Locks::mutator_lock_);
+
+  // Locks need to be held for entire lifetime of ClosureBuilder.
+  ClosureBuilder() SHARED_REQUIRES(Locks::mutator_lock_)
+  {}
+
+  // Locks need to be held for entire lifetime of ClosureBuilder.
+  ~ClosureBuilder() SHARED_REQUIRES(Locks::mutator_lock_)
+  {}
+
+ private:
+  // Initial size a closure starts out before any variables are written.
+  // Header size only.
+  static constexpr size_t kInitialSize = sizeof(ArtLambdaMethod*);
+
+  // Write a Closure's variables field from the captured variables.
+  // variables_size specified in bytes, and only includes enough room to write variables into.
+  // Returns the calculated actual size of the closure.
+  size_t WriteValues(ArtLambdaMethod* target_method,
+                     uint8_t variables[],
+                     size_t header_size,
+                     size_t variables_size) const SHARED_REQUIRES(Locks::mutator_lock_);
+
+  size_t size_ = kInitialSize;
+  bool is_dynamic_size_ = false;
+  std::vector<ShortyFieldTypeTraits::MaxType> values_;
+};
+
+}  // namespace lambda
+}  // namespace art
+
+#endif  // ART_RUNTIME_LAMBDA_CLOSURE_BUILDER_H_
diff --git a/runtime/lambda/closure_test.cc b/runtime/lambda/closure_test.cc
new file mode 100644
index 0000000..7c1bd0d
--- /dev/null
+++ b/runtime/lambda/closure_test.cc
@@ -0,0 +1,356 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "art_method.h"
+#include "lambda/art_lambda_method.h"
+#include "lambda/closure.h"
+#include "lambda/closure_builder.h"
+#include "lambda/closure_builder-inl.h"
+#include "utils.h"
+
+#include <numeric>
+#include <stdint.h>
+#include <type_traits>
+#include "gtest/gtest.h"
+
+// Turn this on for some extra printfs to help with debugging, since some code is optimized out.
+static constexpr const bool kDebuggingClosureTest = true;
+
+namespace std {
+  using Closure = art::lambda::Closure;
+
+  // Specialize std::default_delete so it knows how to properly delete closures
+  // through the way we allocate them in this test.
+  //
+  // This is test-only because we don't want the rest of Art to do this.
+  template <>
+  struct default_delete<Closure> {
+    void operator()(Closure* closure) const {
+      delete[] reinterpret_cast<char*>(closure);
+    }
+  };
+}  // namespace std
+
+namespace art {
+
+// Fake lock acquisition to please clang lock checker.
+// This doesn't actually acquire any locks because we don't need multiple threads in this gtest.
+struct SCOPED_CAPABILITY ScopedFakeLock {
+  explicit ScopedFakeLock(MutatorMutex& mu) ACQUIRE(mu)
+      : mu_(mu) {
+  }
+
+  ~ScopedFakeLock() RELEASE()
+  {}
+
+  MutatorMutex& mu_;
+};
+
+namespace lambda {
+
+class ClosureTest : public ::testing::Test {
+ public:
+  ClosureTest() = default;
+  ~ClosureTest() = default;
+
+ protected:
+  static void SetUpTestCase() {
+  }
+
+  virtual void SetUp() {
+    // Create a completely dummy method here.
+    // It's "OK" because the Closure never needs to look inside of the ArtMethod
+    // (it just needs to be non-null).
+    uintptr_t ignore = 0xbadbad;
+    fake_method_ = reinterpret_cast<ArtMethod*>(ignore);
+  }
+
+  static ::testing::AssertionResult IsResultSuccessful(bool result) {
+    if (result) {
+      return ::testing::AssertionSuccess();
+    } else {
+      return ::testing::AssertionFailure();
+    }
+  }
+
+  // Create a closure that captures the static variables from 'args' by-value.
+  // The lambda method's captured variables types must match the ones in 'args'.
+  // -- This creates the closure directly in-memory by using memcpy.
+  template <typename ... Args>
+  static std::unique_ptr<Closure> CreateClosureStaticVariables(ArtLambdaMethod* lambda_method,
+                                                               Args&& ... args) {
+    constexpr size_t header_size = sizeof(ArtLambdaMethod*);
+    const size_t static_size = GetArgsSize(args ...) + header_size;
+    EXPECT_GE(static_size, sizeof(Closure));
+
+    // Can't just 'new' the Closure since we don't know the size up front.
+    char* closure_as_char_array = new char[static_size];
+    Closure* closure_ptr = new (closure_as_char_array) Closure;
+
+    // Set up the data
+    closure_ptr->lambda_info_ = lambda_method;
+    CopyArgs(closure_ptr->captured_[0].static_variables_, args ...);
+
+    // Make sure the entire thing is deleted once the unique_ptr goes out of scope.
+    return std::unique_ptr<Closure>(closure_ptr);  // NOLINT [whitespace/braces] [5]
+  }
+
+  // Copy variadic arguments into the destination array with memcpy.
+  template <typename T, typename ... Args>
+  static void CopyArgs(uint8_t destination[], T&& arg, Args&& ... args) {
+    memcpy(destination, &arg, sizeof(arg));
+    CopyArgs(destination + sizeof(arg), args ...);
+  }
+
+  // Base case: Done.
+  static void CopyArgs(uint8_t destination[]) {
+    UNUSED(destination);
+  }
+
+  // Create a closure that captures the static variables from 'args' by-value.
+  // The lambda method's captured variables types must match the ones in 'args'.
+  // -- This uses ClosureBuilder interface to set up the closure indirectly.
+  template <typename ... Args>
+  static std::unique_ptr<Closure> CreateClosureStaticVariablesFromBuilder(
+      ArtLambdaMethod* lambda_method,
+      Args&& ... args) {
+    // Acquire a fake lock since closure_builder needs it.
+    ScopedFakeLock fake_lock(*Locks::mutator_lock_);
+
+    ClosureBuilder closure_builder;
+    CaptureVariableFromArgsList(/*out*/closure_builder, args ...);
+
+    EXPECT_EQ(sizeof...(args), closure_builder.GetCaptureCount());
+
+    constexpr size_t header_size = sizeof(ArtLambdaMethod*);
+    const size_t static_size = GetArgsSize(args ...) + header_size;
+    EXPECT_GE(static_size, sizeof(Closure));
+
+    // For static variables, no nested closure, so size must match exactly.
+    EXPECT_EQ(static_size, closure_builder.GetSize());
+
+    // Can't just 'new' the Closure since we don't know the size up front.
+    char* closure_as_char_array = new char[static_size];
+    Closure* closure_ptr = new (closure_as_char_array) Closure;
+
+    // The closure builder packs the captured variables into a Closure.
+    closure_builder.CreateInPlace(closure_ptr, lambda_method);
+
+    // Make sure the entire thing is deleted once the unique_ptr goes out of scope.
+    return std::unique_ptr<Closure>(closure_ptr);  // NOLINT [whitespace/braces] [5]
+  }
+
+  // Call the correct ClosureBuilder::CaptureVariableXYZ function based on the type of args.
+  // Invokes for each arg in args.
+  template <typename ... Args>
+  static void CaptureVariableFromArgsList(/*out*/ClosureBuilder& closure_builder, Args ... args) {
+    int ignore[] = {
+        (CaptureVariableFromArgs(/*out*/closure_builder, args),0)...  // NOLINT [whitespace/comma] [3]
+    };
+    UNUSED(ignore);
+  }
+
+  // ClosureBuilder::CaptureVariablePrimitive for types that are primitive only.
+  template <typename T>
+  typename std::enable_if<ShortyFieldTypeTraits::IsPrimitiveType<T>()>::type
+  static CaptureVariableFromArgs(/*out*/ClosureBuilder& closure_builder, T value) {
+    static_assert(ShortyFieldTypeTraits::IsPrimitiveType<T>(), "T must be a shorty primitive");
+    closure_builder.CaptureVariablePrimitive<T, ShortyFieldTypeSelectEnum<T>::value>(value);
+  }
+
+  // ClosureBuilder::CaptureVariableObject for types that are objects only.
+  template <typename T>
+  typename std::enable_if<ShortyFieldTypeTraits::IsObjectType<T>()>::type
+  static CaptureVariableFromArgs(/*out*/ClosureBuilder& closure_builder, const T* object) {
+    ScopedFakeLock fake_lock(*Locks::mutator_lock_);
+    closure_builder.CaptureVariableObject(object);
+  }
+
+  // Sum of sizeof(Args...).
+  template <typename T, typename ... Args>
+  static constexpr size_t GetArgsSize(T&& arg, Args&& ... args) {
+    return sizeof(arg) + GetArgsSize(args ...);
+  }
+
+  // Base case: Done.
+  static constexpr size_t GetArgsSize() {
+    return 0;
+  }
+
+  // Take "U" and memcpy it into a "T". T starts out as (T)0.
+  template <typename T, typename U>
+  static T ExpandingBitCast(const U& val) {
+    static_assert(sizeof(T) >= sizeof(U), "U too large");
+    T new_val = static_cast<T>(0);
+    memcpy(&new_val, &val, sizeof(U));
+    return new_val;
+  }
+
+  // Templatized extraction from closures by checking their type with enable_if.
+  template <typename T>
+  static typename std::enable_if<ShortyFieldTypeTraits::IsPrimitiveNarrowType<T>()>::type
+  ExpectCapturedVariable(const Closure* closure, size_t index, T value) {
+    EXPECT_EQ(ExpandingBitCast<uint32_t>(value), closure->GetCapturedPrimitiveNarrow(index))
+        << " with index " << index;
+  }
+
+  template <typename T>
+  static typename std::enable_if<ShortyFieldTypeTraits::IsPrimitiveWideType<T>()>::type
+  ExpectCapturedVariable(const Closure* closure, size_t index, T value) {
+    EXPECT_EQ(ExpandingBitCast<uint64_t>(value), closure->GetCapturedPrimitiveWide(index))
+        << " with index " << index;
+  }
+
+  // Templatized SFINAE for Objects so we can get better error messages.
+  template <typename T>
+  static typename std::enable_if<ShortyFieldTypeTraits::IsObjectType<T>()>::type
+  ExpectCapturedVariable(const Closure* closure, size_t index, const T* object) {
+    EXPECT_EQ(object, closure->GetCapturedObject(index))
+        << " with index " << index;
+  }
+
+  template <typename ... Args>
+  void TestPrimitive(const char *descriptor, Args ... args) {
+    const char* shorty = descriptor;
+
+    SCOPED_TRACE(descriptor);
+
+    ASSERT_EQ(strlen(shorty), sizeof...(args))
+        << "test error: descriptor must have same # of types as the # of captured variables";
+
+    // Important: This fake lambda method needs to out-live any Closures we create with it.
+    ArtLambdaMethod lambda_method{fake_method_,                    // NOLINT [whitespace/braces] [5]
+                                  descriptor,                      // NOLINT [whitespace/blank_line] [2]
+                                  shorty,
+                                 };
+
+    std::unique_ptr<Closure> closure_a;
+    std::unique_ptr<Closure> closure_b;
+
+    // Test the closure twice when it's constructed in different ways.
+    {
+      // Create the closure in a "raw" manner, that is directly with memcpy
+      // since we know the underlying data format.
+      // This simulates how the compiler would lay out the data directly.
+      SCOPED_TRACE("raw closure");
+      std::unique_ptr<Closure> closure_raw = CreateClosureStaticVariables(&lambda_method, args ...);
+
+      if (kDebuggingClosureTest) {
+        std::cerr << "closure raw address: " << closure_raw.get() << std::endl;
+      }
+      TestPrimitiveWithClosure(closure_raw.get(), descriptor, shorty, args ...);
+      closure_a = std::move(closure_raw);
+    }
+
+    {
+      // Create the closure with the ClosureBuilder, which is done indirectly.
+      // This simulates how the interpreter would create the closure dynamically at runtime.
+      SCOPED_TRACE("closure from builder");
+      std::unique_ptr<Closure> closure_built =
+          CreateClosureStaticVariablesFromBuilder(&lambda_method, args ...);
+      if (kDebuggingClosureTest) {
+        std::cerr << "closure built address: " << closure_built.get() << std::endl;
+      }
+      TestPrimitiveWithClosure(closure_built.get(), descriptor, shorty, args ...);
+      closure_b = std::move(closure_built);
+    }
+
+    // The closures should be identical memory-wise as well.
+    EXPECT_EQ(closure_a->GetSize(), closure_b->GetSize());
+    EXPECT_TRUE(memcmp(closure_a.get(),
+                       closure_b.get(),
+                       std::min(closure_a->GetSize(), closure_b->GetSize())) == 0);
+  }
+
+  template <typename ... Args>
+  static void TestPrimitiveWithClosure(Closure* closure,
+                                       const char* descriptor,
+                                       const char* shorty,
+                                       Args ... args) {
+    EXPECT_EQ(sizeof(ArtLambdaMethod*) + GetArgsSize(args...), closure->GetSize());
+    EXPECT_EQ(sizeof...(args), closure->GetNumberOfCapturedVariables());
+    EXPECT_STREQ(descriptor, closure->GetCapturedVariablesTypeDescriptor());
+    TestPrimitiveExpects(closure, shorty, /*index*/0, args ...);
+  }
+
+  // Call EXPECT_EQ for each argument in the closure's #GetCapturedX.
+  template <typename T, typename ... Args>
+  static void TestPrimitiveExpects(
+      const Closure* closure, const char* shorty, size_t index, T arg, Args ... args) {
+    ASSERT_EQ(ShortyFieldType(shorty[index]).GetStaticSize(), sizeof(T))
+        << "Test error: Type mismatch at index " << index;
+    ExpectCapturedVariable(closure, index, arg);
+    EXPECT_EQ(ShortyFieldType(shorty[index]), closure->GetCapturedShortyType(index));
+    TestPrimitiveExpects(closure, shorty, index + 1, args ...);
+  }
+
+  // Base case for EXPECT_EQ.
+  static void TestPrimitiveExpects(const Closure* closure, const char* shorty, size_t index) {
+    UNUSED(closure, shorty, index);
+  }
+
+  ArtMethod* fake_method_;
+};
+
+TEST_F(ClosureTest, TestTrivial) {
+  ArtLambdaMethod lambda_method{fake_method_,                    // NOLINT [whitespace/braces] [5]
+                                "",  // No captured variables    // NOLINT [whitespace/blank_line] [2]
+                                "",  // No captured variables
+                               };
+
+  std::unique_ptr<Closure> closure = CreateClosureStaticVariables(&lambda_method);
+
+  EXPECT_EQ(sizeof(ArtLambdaMethod*), closure->GetSize());
+  EXPECT_EQ(0u, closure->GetNumberOfCapturedVariables());
+}  // TEST_F
+
+TEST_F(ClosureTest, TestPrimitiveSingle) {
+  TestPrimitive("Z", true);
+  TestPrimitive("B", int8_t(0xde));
+  TestPrimitive("C", uint16_t(0xbeef));
+  TestPrimitive("S", int16_t(0xdead));
+  TestPrimitive("I", int32_t(0xdeadbeef));
+  TestPrimitive("F", 0.123f);
+  TestPrimitive("J", int64_t(0xdeadbeef00c0ffee));
+  TestPrimitive("D", 123.456);
+}  // TEST_F
+
+TEST_F(ClosureTest, TestPrimitiveMany) {
+  TestPrimitive("ZZ", true, false);
+  TestPrimitive("ZZZ", true, false, true);
+  TestPrimitive("BBBB", int8_t(0xde), int8_t(0xa0), int8_t(0xff), int8_t(0xcc));
+  TestPrimitive("CC", uint16_t(0xbeef), uint16_t(0xdead));
+  TestPrimitive("SSSS", int16_t(0xdead), int16_t(0xc0ff), int16_t(0xf000), int16_t(0xbaba));
+  TestPrimitive("III", int32_t(0xdeadbeef), int32_t(0xc0ffee), int32_t(0xbeefdead));
+  TestPrimitive("FF", 0.123f, 555.666f);
+  TestPrimitive("JJJ", int64_t(0xdeadbeef00c0ffee), int64_t(0x123), int64_t(0xc0ffee));
+  TestPrimitive("DD", 123.456, 777.888);
+}  // TEST_F
+
+TEST_F(ClosureTest, TestPrimitiveMixed) {
+  TestPrimitive("ZZBBCCSSIIFFJJDD",
+                true, false,
+                int8_t(0xde), int8_t(0xa0),
+                uint16_t(0xbeef), uint16_t(0xdead),
+                int16_t(0xdead), int16_t(0xc0ff),
+                int32_t(0xdeadbeef), int32_t(0xc0ffee),
+                0.123f, 555.666f,
+                int64_t(0xdeadbeef00c0ffee), int64_t(0x123),
+                123.456, 777.888);
+}  // TEST_F
+
+}  // namespace lambda
+}  // namespace art
diff --git a/runtime/lambda/shorty_field_type.h b/runtime/lambda/shorty_field_type.h
new file mode 100644
index 0000000..46ddaa9
--- /dev/null
+++ b/runtime/lambda/shorty_field_type.h
@@ -0,0 +1,475 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef ART_RUNTIME_LAMBDA_SHORTY_FIELD_TYPE_H_
+#define ART_RUNTIME_LAMBDA_SHORTY_FIELD_TYPE_H_
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/value_object.h"
+#include "globals.h"
+#include "runtime/primitive.h"
+
+#include <ostream>
+
+namespace art {
+
+namespace mirror {
+class Object;  // forward declaration
+}  // namespace mirror
+
+namespace lambda {
+
+struct Closure;  // forward declaration
+
+// TODO: Refactor together with primitive.h
+
+// The short form of a field type descriptor. Corresponds to ShortyFieldType in dex specification.
+// Only types usable by a field (and locals) are allowed (i.e. no void type).
+// Note that arrays and objects are treated both as 'L'.
+//
+// This is effectively a 'char' enum-like zero-cost type-safe wrapper with extra helper functions.
+struct ShortyFieldType : ValueObject {
+  // Use as if this was an enum class, e.g. 'ShortyFieldType::kBoolean'.
+  enum : char {
+    // Primitives (Narrow):
+    kBoolean = 'Z',
+    kByte = 'B',
+    kChar = 'C',
+    kShort = 'S',
+    kInt = 'I',
+    kFloat = 'F',
+    // Primitives (Wide):
+    kLong = 'J',
+    kDouble = 'D',
+    // Managed types:
+    kObject = 'L',  // This can also be an array (which is otherwise '[' in a non-shorty).
+    kLambda = '\\',
+  };  // NOTE: This is an anonymous enum so we can get exhaustive switch checking from the compiler.
+
+  // Implicitly construct from the enum above. Value must be one of the enum list members above.
+  // Always safe to use, does not do any DCHECKs.
+  inline constexpr ShortyFieldType(decltype(kByte) c) : value_(c) {
+  }
+
+  // Default constructor. The initial value is undefined. Initialize before calling methods.
+  // This is very unsafe but exists as a convenience to having undefined values.
+  explicit ShortyFieldType() : value_(StaticCastValue(0)) {
+  }
+
+  // Explicitly construct from a char. Value must be one of the enum list members above.
+  // Conversion is potentially unsafe, so DCHECKing is performed.
+  explicit inline ShortyFieldType(char c) : value_(StaticCastValue(c)) {
+    if (kIsDebugBuild) {
+      // Verify at debug-time that our conversion is safe.
+      ShortyFieldType ignored;
+      DCHECK(MaybeCreate(c, &ignored)) << "unknown shorty field type '" << c << "'";
+    }
+  }
+
+  // Attempts to parse the character in 'shorty_field_type' into its strongly typed version.
+  // Returns false if the character was out of range of the grammar.
+  static bool MaybeCreate(char shorty_field_type, ShortyFieldType* out) {
+    DCHECK(out != nullptr);
+    switch (shorty_field_type) {
+      case kBoolean:
+      case kByte:
+      case kChar:
+      case kShort:
+      case kInt:
+      case kFloat:
+      case kLong:
+      case kDouble:
+      case kObject:
+      case kLambda:
+        *out = ShortyFieldType(static_cast<decltype(kByte)>(shorty_field_type));
+        return true;
+      default:
+        break;
+    }
+
+    return false;
+  }
+
+  // Convert the first type in a field type descriptor string into a shorty.
+  // Arrays are converted into objects.
+  // Does not work for 'void' types (as they are illegal in a field type descriptor).
+  static ShortyFieldType CreateFromFieldTypeDescriptor(const char* field_type_descriptor) {
+    DCHECK(field_type_descriptor != nullptr);
+    char c = *field_type_descriptor;
+    if (UNLIKELY(c == kArray)) {  // Arrays are treated as object references.
+      c = kObject;
+    }
+    return ShortyFieldType{c};  // NOLINT [readability/braces] [4]
+  }
+
+  // Parse the first type in the field type descriptor string into a shorty.
+  // See CreateFromFieldTypeDescriptor for more details.
+  //
+  // Returns the pointer offset into the middle of the field_type_descriptor
+  // that would either point to the next shorty type, or to null if there are
+  // no more types.
+  //
+  // DCHECKs that each of the nested types is a valid shorty field type. This
+  // means the type descriptor must be already valid.
+  static const char* ParseFromFieldTypeDescriptor(const char* field_type_descriptor,
+                                                  ShortyFieldType* out_type) {
+    DCHECK(field_type_descriptor != nullptr);
+
+    if (UNLIKELY(field_type_descriptor[0] == '\0')) {
+      // Handle empty strings by immediately returning null.
+      return nullptr;
+    }
+
+    // All non-empty strings must be a valid list of field type descriptors, otherwise
+    // the DCHECKs will kick in and the program will crash.
+    const char shorter_type = *field_type_descriptor;
+
+    ShortyFieldType safe_type;
+    bool type_set = MaybeCreate(shorter_type, &safe_type);
+
+    // Lambda that keeps skipping characters until it sees ';'.
+    // Stops one character -after- the ';'.
+    auto skip_until_semicolon = [&field_type_descriptor]() {
+      while (*field_type_descriptor != ';' && *field_type_descriptor != '\0') {
+        ++field_type_descriptor;
+      }
+      DCHECK_NE(*field_type_descriptor, '\0')
+          << " type descriptor terminated too early: " << field_type_descriptor;
+      ++field_type_descriptor;  // Skip the ';'
+    };
+
+    ++field_type_descriptor;
+    switch (shorter_type) {
+      case kObject:
+        skip_until_semicolon();
+
+        DCHECK(type_set);
+        DCHECK(safe_type == kObject);
+        break;
+      case kArray:
+        // Strip out all of the leading [[[[[s, we don't care if it's a multi-dimensional array.
+        while (*field_type_descriptor == '[' && *field_type_descriptor != '\0') {
+          ++field_type_descriptor;
+        }
+        DCHECK_NE(*field_type_descriptor, '\0')
+            << " type descriptor terminated too early: " << field_type_descriptor;
+        // Either a primitive, object, or closure left. No more arrays.
+        {
+          // Now skip all the characters that form the array's interior-most element type
+          // (which itself is guaranteed not to be an array).
+          ShortyFieldType array_interior_type;
+          type_set = MaybeCreate(*field_type_descriptor, &array_interior_type);
+          DCHECK(type_set) << " invalid remaining type descriptor " << field_type_descriptor;
+
+          // Handle array-of-objects case like [[[[[LObject; and array-of-closures like [[[[[\Foo;
+          if (*field_type_descriptor == kObject || *field_type_descriptor == kLambda) {
+            skip_until_semicolon();
+          } else {
+            // Handle primitives which are exactly one character we can skip.
+            DCHECK(array_interior_type.IsPrimitive());
+            ++field_type_descriptor;
+          }
+        }
+
+        safe_type = kObject;
+        type_set = true;
+        break;
+      case kLambda:
+        skip_until_semicolon();
+
+        DCHECK(safe_type == kLambda);
+        DCHECK(type_set);
+        break;
+      default:
+        DCHECK_NE(kVoid, shorter_type) << "cannot make a ShortyFieldType from a void type";
+        break;
+    }
+
+    DCHECK(type_set) << "invalid shorty type descriptor " << shorter_type;
+
+    *out_type = safe_type;
+    return type_set ? field_type_descriptor : nullptr;
+  }
+
+  // Explicitly convert to a char.
+  inline explicit operator char() const {
+    return value_;
+  }
+
+  // Is this a primitive?
+  inline bool IsPrimitive() const {
+    return IsPrimitiveNarrow() || IsPrimitiveWide();
+  }
+
+  // Is this a narrow primitive (i.e. can fit into 1 virtual register)?
+  inline bool IsPrimitiveNarrow() const {
+    switch (value_) {
+      case kBoolean:
+      case kByte:
+      case kChar:
+      case kShort:
+      case kInt:
+      case kFloat:
+        return true;
+      default:
+        return false;
+    }
+  }
+
+  // Is this a wide primitive (i.e. needs exactly 2 virtual registers)?
+  inline bool IsPrimitiveWide() const {
+    switch (value_) {
+      case kLong:
+      case kDouble:
+        return true;
+      default:
+        return false;
+    }
+  }
+
+  // Is this an object reference (which can also be an array)?
+  inline bool IsObject() const {
+    return value_ == kObject;
+  }
+
+  // Is this a lambda?
+  inline bool IsLambda() const {
+    return value_ == kLambda;
+  }
+
+  // Is the size of this (to store inline as a field) always known at compile-time?
+  inline bool IsStaticSize() const {
+    return !IsLambda();
+  }
+
+  // Get the compile-time size (to be able to store it inline as a field or on stack).
+  // Dynamically-sized values such as lambdas return the guaranteed lower bound.
+  inline size_t GetStaticSize() const {
+    switch (value_) {
+      case kBoolean:
+        return sizeof(bool);
+      case kByte:
+        return sizeof(uint8_t);
+      case kChar:
+        return sizeof(int16_t);
+      case kShort:
+        return sizeof(uint16_t);
+      case kInt:
+        return sizeof(int32_t);
+      case kLong:
+        return sizeof(int64_t);
+      case kFloat:
+        return sizeof(float);
+      case kDouble:
+        return sizeof(double);
+      case kObject:
+        return kObjectReferenceSize;
+      case kLambda:
+        return sizeof(void*);  // Large enough to store the ArtLambdaMethod
+      default:
+        DCHECK(false) << "unknown shorty field type '" << static_cast<char>(value_) << "'";
+        UNREACHABLE();
+    }
+  }
+
+  // Implicitly convert to the anonymous nested inner type. Used for exhaustive switch detection.
+  inline operator decltype(kByte)() const {
+    return value_;
+  }
+
+  // Returns a read-only static string representing the enum name, useful for printing/debug only.
+  inline const char* ToString() const {
+    switch (value_) {
+      case kBoolean:
+        return "kBoolean";
+      case kByte:
+        return "kByte";
+      case kChar:
+        return "kChar";
+      case kShort:
+        return "kShort";
+      case kInt:
+        return "kInt";
+      case kLong:
+        return "kLong";
+      case kFloat:
+        return "kFloat";
+      case kDouble:
+        return "kDouble";
+      case kObject:
+        return "kObject";
+      case kLambda:
+        return "kLambda";
+      default:
+        // Undefined behavior if we get this far. Pray the compiler gods are merciful.
+        return "<undefined>";
+    }
+  }
+
+ private:
+  static constexpr const char kArray = '[';
+  static constexpr const char kVoid  = 'V';
+
+  // Helper to statically cast anything into our nested anonymous enum type.
+  template <typename T>
+  inline static decltype(kByte) StaticCastValue(const T& anything) {
+    return static_cast<decltype(value_)>(anything);
+  }
+
+  // The only field in this struct.
+  decltype(kByte) value_;
+};
+
+
+  // Print to an output stream.
+inline std::ostream& operator<<(std::ostream& ostream, ShortyFieldType shorty) {
+  return ostream << shorty.ToString();
+}
+
+static_assert(sizeof(ShortyFieldType) == sizeof(char),
+              "ShortyFieldType must be lightweight just like a char");
+
+// Compile-time trait information regarding the ShortyFieldType.
+// Used by static_asserts to verify that the templates are correctly used at compile-time.
+//
+// For example,
+//     ShortyFieldTypeTraits::IsPrimitiveNarrowType<int64_t>() == true
+//     ShortyFieldTypeTraits::IsObjectType<mirror::Object*>() == true
+struct ShortyFieldTypeTraits {
+  // A type guaranteed to be large enough to holds any of the shorty field types.
+  using MaxType = uint64_t;
+
+  // Type traits: Returns true if 'T' is a valid type that can be represented by a shorty field type.
+  template <typename T>
+  static inline constexpr bool IsType() {
+    return IsPrimitiveType<T>() || IsObjectType<T>() || IsLambdaType<T>();
+  }
+
+  // Returns true if 'T' is a primitive type (i.e. a built-in without nested references).
+  template <typename T>
+  static inline constexpr bool IsPrimitiveType() {
+    return IsPrimitiveNarrowType<T>() || IsPrimitiveWideType<T>();
+  }
+
+  // Returns true if 'T' is a primitive type that is narrow (i.e. can be stored into 1 vreg).
+  template <typename T>
+  static inline constexpr bool IsPrimitiveNarrowType() {
+    return IsPrimitiveNarrowTypeImpl(static_cast<T* const>(nullptr));
+  }
+
+  // Returns true if 'T' is a primitive type that is wide (i.e. needs 2 vregs for storage).
+  template <typename T>
+  static inline constexpr bool IsPrimitiveWideType() {
+    return IsPrimitiveWideTypeImpl(static_cast<T* const>(nullptr));
+  }
+
+  // Returns true if 'T' is an object (i.e. it is a managed GC reference).
+  // Note: This is equivalent to std::base_of<mirror::Object*, T>::value
+  template <typename T>
+  static inline constexpr bool IsObjectType() {
+    return IsObjectTypeImpl(static_cast<T* const>(nullptr));
+  }
+
+  // Returns true if 'T' is a lambda (i.e. it is a closure with unknown static data);
+  template <typename T>
+  static inline constexpr bool IsLambdaType() {
+    return IsLambdaTypeImpl(static_cast<T* const>(nullptr));
+  }
+
+ private:
+#define IS_VALID_TYPE_SPECIALIZATION(type, name) \
+  static inline constexpr bool Is ## name ## TypeImpl(type* const  = 0) { \
+    return true; \
+  } \
+  \
+  static_assert(sizeof(MaxType) >= sizeof(type), "MaxType too small")
+
+  IS_VALID_TYPE_SPECIALIZATION(bool, PrimitiveNarrow);
+  IS_VALID_TYPE_SPECIALIZATION(int8_t, PrimitiveNarrow);
+  IS_VALID_TYPE_SPECIALIZATION(uint8_t, PrimitiveNarrow);  // Not strictly true, but close enough.
+  IS_VALID_TYPE_SPECIALIZATION(int16_t, PrimitiveNarrow);
+  IS_VALID_TYPE_SPECIALIZATION(uint16_t, PrimitiveNarrow);  // Chars are unsigned.
+  IS_VALID_TYPE_SPECIALIZATION(int32_t, PrimitiveNarrow);
+  IS_VALID_TYPE_SPECIALIZATION(uint32_t, PrimitiveNarrow);  // Not strictly true, but close enough.
+  IS_VALID_TYPE_SPECIALIZATION(float, PrimitiveNarrow);
+  IS_VALID_TYPE_SPECIALIZATION(int64_t, PrimitiveWide);
+  IS_VALID_TYPE_SPECIALIZATION(uint64_t, PrimitiveWide);  // Not strictly true, but close enough.
+  IS_VALID_TYPE_SPECIALIZATION(double, PrimitiveWide);
+  IS_VALID_TYPE_SPECIALIZATION(mirror::Object*, Object);
+  IS_VALID_TYPE_SPECIALIZATION(Closure*, Lambda);
+#undef IS_VALID_TYPE_SPECIALIZATION
+
+#define IS_VALID_TYPE_SPECIALIZATION_IMPL(name) \
+  template <typename T> \
+  static inline constexpr bool Is ## name ## TypeImpl(T* const = 0) { \
+    return false; \
+  }
+
+  IS_VALID_TYPE_SPECIALIZATION_IMPL(PrimitiveNarrow);
+  IS_VALID_TYPE_SPECIALIZATION_IMPL(PrimitiveWide);
+  IS_VALID_TYPE_SPECIALIZATION_IMPL(Object);
+  IS_VALID_TYPE_SPECIALIZATION_IMPL(Lambda);
+
+#undef IS_VALID_TYPE_SPECIALIZATION_IMPL
+};
+
+// Maps the ShortyFieldType enum into it's C++ type equivalent, into the "type" typedef.
+// For example:
+//     ShortyFieldTypeSelectType<ShortyFieldType::kBoolean>::type => bool
+//     ShortyFieldTypeSelectType<ShortyFieldType::kLong>::type => int64_t
+//
+// Invalid enums will not have the type defined.
+template <decltype(ShortyFieldType::kByte) Shorty>
+struct ShortyFieldTypeSelectType {
+};
+
+// Maps the C++ type into it's ShortyFieldType enum equivalent, into the "value" constexpr.
+// For example:
+//     ShortyFieldTypeSelectEnum<bool>::value => ShortyFieldType::kBoolean
+//     ShortyFieldTypeSelectEnum<int64_t>::value => ShortyFieldType::kLong
+//
+// Signed-ness must match for a valid select, e.g. uint64_t will not map to kLong, but int64_t will.
+// Invalid types will not have the value defined (see e.g. ShortyFieldTypeTraits::IsType<T>())
+template <typename T>
+struct ShortyFieldTypeSelectEnum {
+};
+
+#define SHORTY_FIELD_TYPE_SELECT_IMPL(cpp_type, enum_element)      \
+template <> \
+struct ShortyFieldTypeSelectType<ShortyFieldType::enum_element> { \
+  using type = cpp_type; \
+}; \
+\
+template <> \
+struct ShortyFieldTypeSelectEnum<cpp_type> { \
+  static constexpr const auto value = ShortyFieldType::enum_element; \
+}; \
+
+SHORTY_FIELD_TYPE_SELECT_IMPL(bool, kBoolean);
+SHORTY_FIELD_TYPE_SELECT_IMPL(int8_t, kByte);
+SHORTY_FIELD_TYPE_SELECT_IMPL(int16_t, kShort);
+SHORTY_FIELD_TYPE_SELECT_IMPL(uint16_t, kChar);
+SHORTY_FIELD_TYPE_SELECT_IMPL(int32_t, kInt);
+SHORTY_FIELD_TYPE_SELECT_IMPL(float, kFloat);
+SHORTY_FIELD_TYPE_SELECT_IMPL(int64_t, kLong);
+SHORTY_FIELD_TYPE_SELECT_IMPL(double, kDouble);
+SHORTY_FIELD_TYPE_SELECT_IMPL(mirror::Object*, kObject);
+SHORTY_FIELD_TYPE_SELECT_IMPL(Closure*, kLambda);
+
+}  // namespace lambda
+}  // namespace art
+
+#endif  // ART_RUNTIME_LAMBDA_SHORTY_FIELD_TYPE_H_
diff --git a/runtime/lambda/shorty_field_type_test.cc b/runtime/lambda/shorty_field_type_test.cc
new file mode 100644
index 0000000..32bade9
--- /dev/null
+++ b/runtime/lambda/shorty_field_type_test.cc
@@ -0,0 +1,354 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "lambda/shorty_field_type.h"
+#include "mirror/object_reference.h"
+
+#include "utils.h"
+#include <numeric>
+#include <stdint.h>
+#include "gtest/gtest.h"
+
+#define EXPECT_NULL(expected) EXPECT_EQ(reinterpret_cast<const void*>(expected), \
+                                        reinterpret_cast<void*>(nullptr));
+
+namespace art {
+namespace lambda {
+
+class ShortyFieldTypeTest : public ::testing::Test {
+ public:
+  ShortyFieldTypeTest() = default;
+  ~ShortyFieldTypeTest() = default;
+
+ protected:
+  static void SetUpTestCase() {
+  }
+
+  virtual void SetUp() {
+  }
+
+  static ::testing::AssertionResult IsResultSuccessful(bool result) {
+    if (result) {
+      return ::testing::AssertionSuccess();
+    } else {
+      return ::testing::AssertionFailure();
+    }
+  }
+
+  template <typename T>
+  static std::string ListToString(const T& list) {
+    std::stringstream stream;
+
+    stream << "[";
+    for (auto&& val : list) {
+      stream << val << ", ";
+    }
+    stream << "]";
+
+    return stream.str();
+  }
+
+  // Compare two vector-like types for equality.
+  template <typename T>
+  static ::testing::AssertionResult AreListsEqual(const T& expected, const T& actual) {
+    bool success = true;
+    std::stringstream stream;
+
+    if (expected.size() != actual.size()) {
+      success = false;
+      stream << "Expected list size: " << expected.size()
+             << ", but got list size: " << actual.size();
+      stream << std::endl;
+    }
+
+    for (size_t j = 0; j < std::min(expected.size(), actual.size()); ++j) {
+      if (expected[j] != actual[j]) {
+        success = false;
+        stream << "Expected element '" << j << "' to be '" << expected[j] << "', but got actual: '"
+               << actual[j] << "'.";
+        stream << std::endl;
+      }
+    }
+
+    if (success) {
+      return ::testing::AssertionSuccess();
+    }
+
+    stream << "Expected list was: " << ListToString(expected)
+           << ", actual list was: " << ListToString(actual);
+
+    return ::testing::AssertionFailure() << stream.str();
+  }
+
+  static std::vector<ShortyFieldType> ParseLongTypeDescriptorsToList(const char* type_descriptor) {
+    std::vector<ShortyFieldType> lst;
+
+    ShortyFieldType shorty;
+
+    const char* parsed = type_descriptor;
+    while ((parsed = ShortyFieldType::ParseFromFieldTypeDescriptor(parsed, &shorty)) != nullptr) {
+      lst.push_back(shorty);
+    }
+
+    return lst;
+  }
+
+ protected:
+  // Shorthands for the ShortyFieldType constants.
+  // The letters are the same as JNI letters, with kS_ being a lambda since \ is not available.
+  static constexpr ShortyFieldType kSZ = ShortyFieldType::kBoolean;
+  static constexpr ShortyFieldType kSB = ShortyFieldType::kByte;
+  static constexpr ShortyFieldType kSC = ShortyFieldType::kChar;
+  static constexpr ShortyFieldType kSS = ShortyFieldType::kShort;
+  static constexpr ShortyFieldType kSI = ShortyFieldType::kInt;
+  static constexpr ShortyFieldType kSF = ShortyFieldType::kFloat;
+  static constexpr ShortyFieldType kSJ = ShortyFieldType::kLong;
+  static constexpr ShortyFieldType kSD = ShortyFieldType::kDouble;
+  static constexpr ShortyFieldType kSL = ShortyFieldType::kObject;
+  static constexpr ShortyFieldType kS_ = ShortyFieldType::kLambda;
+};
+
+TEST_F(ShortyFieldTypeTest, TestMaybeCreate) {
+  ShortyFieldType shorty;
+
+  std::vector<char> shorties = {'Z', 'B', 'C', 'S', 'I', 'F', 'J', 'D', 'L', '\\'};
+
+  // All valid 'shorty' characters are created successfully.
+  for (const char c : shorties) {
+    EXPECT_TRUE(ShortyFieldType::MaybeCreate(c, &shorty)) << c;
+    EXPECT_EQ(c, static_cast<char>(c));
+  }
+
+  // All other characters can never be created.
+  for (unsigned char c = 0; c < std::numeric_limits<unsigned char>::max(); ++c) {
+    // Skip the valid characters.
+    if (std::find(shorties.begin(), shorties.end(), c) != shorties.end()) { continue; }
+    // All invalid characters should fail.
+    EXPECT_FALSE(ShortyFieldType::MaybeCreate(static_cast<char>(c), &shorty)) << c;
+  }
+}  // TEST_F
+
+TEST_F(ShortyFieldTypeTest, TestCreateFromFieldTypeDescriptor) {
+  // Sample input.
+  std::vector<const char*> lengthies = {
+      "Z", "B", "C", "S", "I", "F", "J", "D", "LObject;", "\\Closure;",
+      "[Z", "[[B", "[[LObject;"
+  };
+
+  // Expected output.
+  std::vector<ShortyFieldType> expected = {
+      ShortyFieldType::kBoolean,
+      ShortyFieldType::kByte,
+      ShortyFieldType::kChar,
+      ShortyFieldType::kShort,
+      ShortyFieldType::kInt,
+      ShortyFieldType::kFloat,
+      ShortyFieldType::kLong,
+      ShortyFieldType::kDouble,
+      ShortyFieldType::kObject,
+      ShortyFieldType::kLambda,
+      // Arrays are always treated as objects.
+      ShortyFieldType::kObject,
+      ShortyFieldType::kObject,
+      ShortyFieldType::kObject,
+  };
+
+  // All valid lengthy types are correctly turned into the expected shorty type.
+  for (size_t i = 0; i < lengthies.size(); ++i) {
+    EXPECT_EQ(expected[i], ShortyFieldType::CreateFromFieldTypeDescriptor(lengthies[i]));
+  }
+}  // TEST_F
+
+TEST_F(ShortyFieldTypeTest, TestParseFromFieldTypeDescriptor) {
+  // Sample input.
+  std::vector<const char*> lengthies = {
+      // Empty list
+      "",
+      // Primitives
+      "Z", "B", "C", "S", "I", "F", "J", "D",
+      // Non-primitives
+      "LObject;", "\\Closure;",
+      // Arrays. The biggest PITA.
+      "[Z", "[[B", "[[LObject;", "[[[[\\Closure;",
+      // Multiple things at once:
+      "ZBCSIFJD",
+      "LObject;LObject;SSI",
+      "[[ZDDZ",
+      "[[LObject;[[Z[F\\Closure;LObject;",
+  };
+
+  // Expected output.
+  std::vector<std::vector<ShortyFieldType>> expected = {
+      // Empty list
+      {},
+      // Primitives
+      {kSZ}, {kSB}, {kSC}, {kSS}, {kSI}, {kSF}, {kSJ}, {kSD},
+      // Non-primitives.
+      { ShortyFieldType::kObject }, { ShortyFieldType::kLambda },
+      // Arrays are always treated as objects.
+      { kSL }, { kSL }, { kSL }, { kSL },
+      // Multiple things at once:
+      { kSZ, kSB, kSC, kSS, kSI, kSF, kSJ, kSD },
+      { kSL, kSL, kSS, kSS, kSI },
+      { kSL, kSD, kSD, kSZ },
+      { kSL, kSL, kSL, kS_, kSL },
+  };
+
+  // Sanity check that the expected/actual lists are the same size.. when adding new entries.
+  ASSERT_EQ(expected.size(), lengthies.size());
+
+  // All valid lengthy types are correctly turned into the expected shorty type.
+  for (size_t i = 0; i < expected.size(); ++i) {
+    const std::vector<ShortyFieldType>& expected_list = expected[i];
+    std::vector<ShortyFieldType> actual_list = ParseLongTypeDescriptorsToList(lengthies[i]);
+    EXPECT_TRUE(AreListsEqual(expected_list, actual_list));
+  }
+}  // TEST_F
+
+// Helper class to probe a shorty's characteristics by minimizing copy-and-paste tests.
+template <typename T, decltype(ShortyFieldType::kByte) kShortyEnum>
+struct ShortyTypeCharacteristics {
+  bool is_primitive_ = false;
+  bool is_primitive_narrow_ = false;
+  bool is_primitive_wide_ = false;
+  bool is_object_ = false;
+  bool is_lambda_ = false;
+  size_t size_ = sizeof(T);
+  bool is_dynamic_sized_ = false;
+
+  void CheckExpects() {
+    ShortyFieldType shorty = kShortyEnum;
+
+    // Test the main non-parsing-related ShortyFieldType characteristics.
+    EXPECT_EQ(is_primitive_, shorty.IsPrimitive());
+    EXPECT_EQ(is_primitive_narrow_, shorty.IsPrimitiveNarrow());
+    EXPECT_EQ(is_primitive_wide_, shorty.IsPrimitiveWide());
+    EXPECT_EQ(is_object_, shorty.IsObject());
+    EXPECT_EQ(is_lambda_, shorty.IsLambda());
+    EXPECT_EQ(size_, shorty.GetStaticSize());
+    EXPECT_EQ(is_dynamic_sized_, !shorty.IsStaticSize());
+
+    // Test compile-time ShortyFieldTypeTraits.
+    EXPECT_TRUE(ShortyFieldTypeTraits::IsType<T>());
+    EXPECT_EQ(is_primitive_, ShortyFieldTypeTraits::IsPrimitiveType<T>());
+    EXPECT_EQ(is_primitive_narrow_, ShortyFieldTypeTraits::IsPrimitiveNarrowType<T>());
+    EXPECT_EQ(is_primitive_wide_, ShortyFieldTypeTraits::IsPrimitiveWideType<T>());
+    EXPECT_EQ(is_object_, ShortyFieldTypeTraits::IsObjectType<T>());
+    EXPECT_EQ(is_lambda_, ShortyFieldTypeTraits::IsLambdaType<T>());
+
+    // Test compile-time ShortyFieldType selectors
+    static_assert(std::is_same<T, typename ShortyFieldTypeSelectType<kShortyEnum>::type>::value,
+                  "ShortyFieldType Enum->Type incorrect mapping");
+    auto kActualEnum = ShortyFieldTypeSelectEnum<T>::value;  // Do not ODR-use, avoid linker error.
+    EXPECT_EQ(kShortyEnum, kActualEnum);
+  }
+};
+
+TEST_F(ShortyFieldTypeTest, TestCharacteristicsAndTraits) {
+  // Boolean test
+  {
+    SCOPED_TRACE("boolean");
+    ShortyTypeCharacteristics<bool, ShortyFieldType::kBoolean> chars;
+    chars.is_primitive_ = true;
+    chars.is_primitive_narrow_ = true;
+    chars.CheckExpects();
+  }
+
+  // Byte test
+  {
+    SCOPED_TRACE("byte");
+    ShortyTypeCharacteristics<int8_t, ShortyFieldType::kByte> chars;
+    chars.is_primitive_ = true;
+    chars.is_primitive_narrow_ = true;
+    chars.CheckExpects();
+  }
+
+  // Char test
+  {
+    SCOPED_TRACE("char");
+    ShortyTypeCharacteristics<uint16_t, ShortyFieldType::kChar> chars;  // Char is unsigned.
+    chars.is_primitive_ = true;
+    chars.is_primitive_narrow_ = true;
+    chars.CheckExpects();
+  }
+
+  // Short test
+  {
+    SCOPED_TRACE("short");
+    ShortyTypeCharacteristics<int16_t, ShortyFieldType::kShort> chars;
+    chars.is_primitive_ = true;
+    chars.is_primitive_narrow_ = true;
+    chars.CheckExpects();
+  }
+
+  // Int test
+  {
+    SCOPED_TRACE("int");
+    ShortyTypeCharacteristics<int32_t, ShortyFieldType::kInt> chars;
+    chars.is_primitive_ = true;
+    chars.is_primitive_narrow_ = true;
+    chars.CheckExpects();
+  }
+
+  // Long test
+  {
+    SCOPED_TRACE("long");
+    ShortyTypeCharacteristics<int64_t, ShortyFieldType::kLong> chars;
+    chars.is_primitive_ = true;
+    chars.is_primitive_wide_ = true;
+    chars.CheckExpects();
+  }
+
+  // Float test
+  {
+    SCOPED_TRACE("float");
+    ShortyTypeCharacteristics<float, ShortyFieldType::kFloat> chars;
+    chars.is_primitive_ = true;
+    chars.is_primitive_narrow_ = true;
+    chars.CheckExpects();
+  }
+
+  // Double test
+  {
+    SCOPED_TRACE("double");
+    ShortyTypeCharacteristics<double, ShortyFieldType::kDouble> chars;
+    chars.is_primitive_ = true;
+    chars.is_primitive_wide_ = true;
+    chars.CheckExpects();
+  }
+
+  // Object test
+  {
+    SCOPED_TRACE("object");
+    ShortyTypeCharacteristics<mirror::Object*, ShortyFieldType::kObject> chars;
+    chars.is_object_ = true;
+    chars.size_ = kObjectReferenceSize;
+    chars.CheckExpects();
+    EXPECT_EQ(kObjectReferenceSize, sizeof(mirror::CompressedReference<mirror::Object>));
+  }
+
+  // Lambda test
+  {
+    SCOPED_TRACE("lambda");
+    ShortyTypeCharacteristics<Closure*, ShortyFieldType::kLambda> chars;
+    chars.is_lambda_ = true;
+    chars.is_dynamic_sized_ = true;
+    chars.CheckExpects();
+  }
+}
+
+}  // namespace lambda
+}  // namespace art