Revert experimental lambda feature.
This is a revert of the following changes :
30c475a2046951a81769c2db0b2dad66cd71e189.
lambda: Minor capture-variable/liberate-variable clean-up after post-merge reviews.
6918bf13eb855b3aa8ccdddda2d27ae8c60cec56.
lambda: Experimental support for capture-variable and liberate-variable
fc1ccd740b7c8e96dfac675cfc580122cd1b40a6.
lambda: Infrastructure to support capture/liberate-variable dex opcodes
e2facc5b18cd756a8b5500fb3d90da69c9ee0fb7.
runtime: Add lambda box/unbox object equality
2ee54e249ad21c74f29a161e248bebe7d22fddf1.
runtime: Partially implement box-lambda and unbox-lambda experimental opcodes
158f35c98e2ec0d40d2c032b8cdce5fb60944a7f.
interpreter: Add experimental lambda opcodes for invoke/create-lambda
a3bb72036f5454e410467f7151dc89f725ae1151.
Added format 25x to dexdump(2).
Plus surrounding cleanups.
Test: make test-art
Change-Id: Ic6f999ad17385ef933f763641049cf721510b202
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 9c813e2..5c94862 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -109,11 +109,6 @@
jit/offline_profiling_info.cc \
jit/profiling_info.cc \
jit/profile_saver.cc \
- lambda/art_lambda_method.cc \
- lambda/box_table.cc \
- lambda/closure.cc \
- lambda/closure_builder.cc \
- lambda/leaking_allocator.cc \
jni_internal.cc \
jobject_comparator.cc \
linear_alloc.cc \
diff --git a/runtime/base/allocator.h b/runtime/base/allocator.h
index cea7046..e48eca9 100644
--- a/runtime/base/allocator.h
+++ b/runtime/base/allocator.h
@@ -52,7 +52,6 @@
kAllocatorTagMonitorList,
kAllocatorTagClassTable,
kAllocatorTagInternTable,
- kAllocatorTagLambdaBoxTable,
kAllocatorTagMaps,
kAllocatorTagLOS,
kAllocatorTagSafeMap,
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 6f689d7..264a530 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -62,7 +62,6 @@
Mutex* Locks::thread_suspend_count_lock_ = nullptr;
Mutex* Locks::trace_lock_ = nullptr;
Mutex* Locks::unexpected_signal_lock_ = nullptr;
-Mutex* Locks::lambda_table_lock_ = nullptr;
Uninterruptible Roles::uninterruptible_;
struct AllMutexData {
@@ -963,7 +962,6 @@
DCHECK(thread_suspend_count_lock_ != nullptr);
DCHECK(trace_lock_ != nullptr);
DCHECK(unexpected_signal_lock_ != nullptr);
- DCHECK(lambda_table_lock_ != nullptr);
} else {
// Create global locks in level order from highest lock level to lowest.
LockLevel current_lock_level = kInstrumentEntrypointsLock;
@@ -1074,10 +1072,6 @@
DCHECK(reference_queue_soft_references_lock_ == nullptr);
reference_queue_soft_references_lock_ = new Mutex("ReferenceQueue soft references lock", current_lock_level);
- UPDATE_CURRENT_LOCK_LEVEL(kLambdaTableLock);
- DCHECK(lambda_table_lock_ == nullptr);
- lambda_table_lock_ = new Mutex("lambda table lock", current_lock_level);
-
UPDATE_CURRENT_LOCK_LEVEL(kAbortLock);
DCHECK(abort_lock_ == nullptr);
abort_lock_ = new Mutex("abort lock", current_lock_level, true);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 3d7624d..c79e287 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -60,7 +60,6 @@
kUnexpectedSignalLock,
kThreadSuspendCountLock,
kAbortLock,
- kLambdaTableLock,
kJdwpSocketLock,
kRegionSpaceRegionLock,
kRosAllocGlobalLock,
@@ -690,10 +689,6 @@
// Have an exclusive logging thread.
static Mutex* logging_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
-
- // Allow reader-writer mutual exclusion on the boxed table of lambda objects.
- // TODO: this should be a RW mutex lock, except that ConditionVariables don't work with it.
- static Mutex* lambda_table_lock_ ACQUIRED_AFTER(mutator_lock_);
};
class Roles {
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index 912a74a..99732c6 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -640,14 +640,6 @@
"Attempt to write to null array");
break;
}
- case Instruction::INVOKE_LAMBDA:
- case Instruction::BOX_LAMBDA:
- case Instruction::UNBOX_LAMBDA:
- case Instruction::LIBERATE_VARIABLE: {
- ThrowException("Ljava/lang/NullPointerException;", nullptr,
- "Using a null lambda");
- break;
- }
case Instruction::MONITOR_ENTER:
case Instruction::MONITOR_EXIT: {
ThrowException("Ljava/lang/NullPointerException;", nullptr,
diff --git a/runtime/dex_instruction-inl.h b/runtime/dex_instruction-inl.h
index e160a10..dd65f2c 100644
--- a/runtime/dex_instruction-inl.h
+++ b/runtime/dex_instruction-inl.h
@@ -223,7 +223,6 @@
case k22t: return true;
case k22x: return true;
case k23x: return true;
- case k25x: return true;
case k31c: return true;
case k31i: return true;
case k31t: return true;
@@ -253,7 +252,6 @@
case k22t: return VRegB_22t();
case k22x: return VRegB_22x();
case k23x: return VRegB_23x();
- case k25x: return VRegB_25x();
case k31c: return VRegB_31c();
case k31i: return VRegB_31i();
case k31t: return VRegB_31t();
@@ -331,12 +329,6 @@
return static_cast<uint8_t>(Fetch16(1) & 0xff);
}
-// Number of additional registers in this instruction. # of var arg registers = this value + 1.
-inline uint4_t Instruction::VRegB_25x() const {
- DCHECK_EQ(FormatOf(Opcode()), k25x);
- return InstB(Fetch16(0));
-}
-
inline uint32_t Instruction::VRegB_31c() const {
DCHECK_EQ(FormatOf(Opcode()), k31c);
return Fetch32(1);
@@ -383,7 +375,6 @@
case k22s: return true;
case k22t: return true;
case k23x: return true;
- case k25x: return true;
case k35c: return true;
case k3rc: return true;
default: return false;
@@ -397,7 +388,6 @@
case k22s: return VRegC_22s();
case k22t: return VRegC_22t();
case k23x: return VRegC_23x();
- case k25x: return VRegC_25x();
case k35c: return VRegC_35c();
case k3rc: return VRegC_3rc();
default:
@@ -431,11 +421,6 @@
return static_cast<uint8_t>(Fetch16(1) >> 8);
}
-inline uint4_t Instruction::VRegC_25x() const {
- DCHECK_EQ(FormatOf(Opcode()), k25x);
- return static_cast<uint4_t>(Fetch16(1) & 0xf);
-}
-
inline uint4_t Instruction::VRegC_35c() const {
DCHECK_EQ(FormatOf(Opcode()), k35c);
return static_cast<uint4_t>(Fetch16(2) & 0x0f);
@@ -446,80 +431,11 @@
return Fetch16(2);
}
-inline bool Instruction::HasVarArgs35c() const {
+inline bool Instruction::HasVarArgs() const {
return FormatOf(Opcode()) == k35c;
}
-inline bool Instruction::HasVarArgs25x() const {
- return FormatOf(Opcode()) == k25x;
-}
-
-// Copies all of the parameter registers into the arg array. Check the length with VRegB_25x()+2.
-inline void Instruction::GetAllArgs25x(uint32_t (&arg)[kMaxVarArgRegs25x]) const {
- DCHECK_EQ(FormatOf(Opcode()), k25x);
-
- /*
- * The opcode looks like this:
- * op vC, {vD, vE, vF, vG}
- *
- * and vB is the (implicit) register count (0-4) which denotes how far from vD to vG to read.
- *
- * vC is always present, so with "op vC, {}" the register count will be 0 even though vC
- * is valid.
- *
- * The exact semantic meanings of vC:vG is up to the instruction using the format.
- *
- * Encoding drawing as a bit stream:
- * (Note that each uint16 is little endian, and each register takes up 4 bits)
- *
- * uint16 ||| uint16
- * 7-0 15-8 7-0 15-8
- * |------|-----|||-----|-----|
- * |opcode|vB|vG|||vD|vC|vF|vE|
- * |------|-----|||-----|-----|
- */
- uint16_t reg_list = Fetch16(1);
- uint4_t count = VRegB_25x();
- DCHECK_LE(count, 4U) << "Invalid arg count in 25x (" << count << ")";
-
- /*
- * TODO(iam): Change instruction encoding to one of:
- *
- * - (X) vA = args count, vB = closure register, {vC..vG} = args (25x)
- * - (Y) vA = args count, vB = method index, {vC..vG} = args (35x)
- *
- * (do this in conjunction with adding verifier support for invoke-lambda)
- */
-
- /*
- * Copy the argument registers into the arg[] array, and
- * also copy the first argument into vC. (The
- * DecodedInstruction structure doesn't have separate
- * fields for {vD, vE, vF, vG}, so there's no need to make
- * copies of those.) Note that all cases fall-through.
- */
- switch (count) {
- case 4:
- arg[5] = (Fetch16(0) >> 8) & 0x0f; // vG
- FALLTHROUGH_INTENDED;
- case 3:
- arg[4] = (reg_list >> 12) & 0x0f; // vF
- FALLTHROUGH_INTENDED;
- case 2:
- arg[3] = (reg_list >> 8) & 0x0f; // vE
- FALLTHROUGH_INTENDED;
- case 1:
- arg[2] = (reg_list >> 4) & 0x0f; // vD
- FALLTHROUGH_INTENDED;
- default: // case 0
- // The required lambda 'this' is actually a pair, but the pair is implicit.
- arg[0] = VRegC_25x(); // vC
- arg[1] = arg[0] + 1; // vC + 1
- break;
- }
-}
-
-inline void Instruction::GetVarArgs(uint32_t arg[kMaxVarArgRegs], uint16_t inst_data) const {
+inline void Instruction::GetVarArgs(uint32_t arg[5], uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k35c);
/*
diff --git a/runtime/dex_instruction.cc b/runtime/dex_instruction.cc
index d04087a..fabc47b 100644
--- a/runtime/dex_instruction.cc
+++ b/runtime/dex_instruction.cc
@@ -71,7 +71,7 @@
#define INSTRUCTION_SIZE(opcode, c, p, format, i, a, v) \
(((opcode) == NOP) ? -1 : \
(((format) >= k10x) && ((format) <= k10t)) ? 1 : \
- (((format) >= k20t) && ((format) <= k25x)) ? 2 : \
+ (((format) >= k20t) && ((format) <= k22c)) ? 2 : \
(((format) >= k32x) && ((format) <= k3rc)) ? 3 : \
((format) == k51l) ? 5 : -1),
#include "dex_instruction_list.h"
@@ -241,14 +241,6 @@
break;
}
FALLTHROUGH_INTENDED;
- case CREATE_LAMBDA:
- if (file != nullptr) {
- uint32_t method_idx = VRegB_21c();
- os << opcode << " v" << static_cast<int>(VRegA_21c()) << ", " << PrettyMethod(method_idx, *file, true)
- << " // method@" << method_idx;
- break;
- }
- FALLTHROUGH_INTENDED;
default:
os << StringPrintf("%s v%d, thing@%d", opcode, VRegA_21c(), VRegB_21c());
break;
@@ -329,26 +321,6 @@
}
break;
}
- case k25x: {
- if (Opcode() == INVOKE_LAMBDA) {
- uint32_t arg[kMaxVarArgRegs25x];
- GetAllArgs25x(arg);
- const size_t num_extra_var_args = VRegB_25x();
- DCHECK_LE(num_extra_var_args + 2, arraysize(arg));
-
- // invoke-lambda vC, {vD, vE, vF, vG}
- os << opcode << " v" << arg[0] << ", {";
- for (size_t i = 0; i < num_extra_var_args; ++i) {
- if (i != 0) {
- os << ", ";
- }
- os << "v" << arg[i + 2]; // Don't print the pair of vC registers. Pair is implicit.
- }
- os << "}";
- break;
- }
- FALLTHROUGH_INTENDED;
- }
case k32x: os << StringPrintf("%s v%d, v%d", opcode, VRegA_32x(), VRegB_32x()); break;
case k30t: os << StringPrintf("%s %+d", opcode, VRegA_30t()); break;
case k31t: os << StringPrintf("%s v%d, %+d", opcode, VRegA_31t(), VRegB_31t()); break;
diff --git a/runtime/dex_instruction.h b/runtime/dex_instruction.h
index c7856f0..1ac0f11 100644
--- a/runtime/dex_instruction.h
+++ b/runtime/dex_instruction.h
@@ -105,7 +105,6 @@
k22t, // op vA, vB, +CCCC
k22s, // op vA, vB, #+CCCC
k22c, // op vA, vB, thing@CCCC
- k25x, // op vC, {vD, vE, vF, vG} (B: count)
k32x, // op vAAAA, vBBBB
k30t, // op +AAAAAAAA
k31t, // op vAA, +BBBBBBBB
@@ -180,12 +179,9 @@
kVerifyVarArgRangeNonZero = 0x100000,
kVerifyRuntimeOnly = 0x200000,
kVerifyError = 0x400000,
- kVerifyRegCString = 0x800000,
};
static constexpr uint32_t kMaxVarArgRegs = 5;
- static constexpr uint32_t kMaxVarArgRegs25x = 6; // lambdas are 2 registers.
- static constexpr uint32_t kLambdaVirtualRegisterWidth = 2;
// Returns the size (in 2 byte code units) of this instruction.
size_t SizeInCodeUnits() const {
@@ -221,7 +217,7 @@
// Returns a pointer to the instruction after this 2xx instruction in the stream.
const Instruction* Next_2xx() const {
- DCHECK(FormatOf(Opcode()) >= k20t && FormatOf(Opcode()) <= k25x);
+ DCHECK(FormatOf(Opcode()) >= k20t && FormatOf(Opcode()) <= k22c);
return RelativeAt(2);
}
@@ -371,7 +367,6 @@
}
uint16_t VRegB_22x() const;
uint8_t VRegB_23x() const;
- uint4_t VRegB_25x() const;
uint32_t VRegB_31c() const;
int32_t VRegB_31i() const;
int32_t VRegB_31t() const;
@@ -398,20 +393,15 @@
int16_t VRegC_22s() const;
int16_t VRegC_22t() const;
uint8_t VRegC_23x() const;
- uint4_t VRegC_25x() const;
uint4_t VRegC_35c() const;
uint16_t VRegC_3rc() const;
// Fills the given array with the 'arg' array of the instruction.
- bool HasVarArgs35c() const;
- bool HasVarArgs25x() const;
-
- // TODO(iam): Make this name more consistent with GetAllArgs25x by including the opcode format.
+ bool HasVarArgs() const;
void GetVarArgs(uint32_t args[kMaxVarArgRegs], uint16_t inst_data) const;
void GetVarArgs(uint32_t args[kMaxVarArgRegs]) const {
return GetVarArgs(args, Fetch16(0));
}
- void GetAllArgs25x(uint32_t (&args)[kMaxVarArgRegs25x]) const;
// Returns the opcode field of the instruction. The given "inst_data" parameter must be the first
// 16 bits of instruction.
@@ -539,7 +529,7 @@
int GetVerifyTypeArgumentC() const {
return (kInstructionVerifyFlags[Opcode()] & (kVerifyRegC | kVerifyRegCField |
- kVerifyRegCNewArray | kVerifyRegCType | kVerifyRegCWide | kVerifyRegCString));
+ kVerifyRegCNewArray | kVerifyRegCType | kVerifyRegCWide));
}
int GetVerifyExtraFlags() const {
diff --git a/runtime/dex_instruction_list.h b/runtime/dex_instruction_list.h
index acdffd9..40ea285 100644
--- a/runtime/dex_instruction_list.h
+++ b/runtime/dex_instruction_list.h
@@ -262,13 +262,13 @@
V(0xF0, IGET_BYTE_QUICK, "iget-byte-quick", k22c, kIndexFieldOffset, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
V(0xF1, IGET_CHAR_QUICK, "iget-char-quick", k22c, kIndexFieldOffset, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
V(0xF2, IGET_SHORT_QUICK, "iget-short-quick", k22c, kIndexFieldOffset, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
- V(0xF3, INVOKE_LAMBDA, "invoke-lambda", k25x, kIndexNone, kContinue | kThrow | kInvoke | kExperimental, kVerifyRegC /*TODO: | kVerifyVarArg*/) \
+ V(0xF3, UNUSED_F3, "unused-f3", k10x, kIndexUnknown, 0, kVerifyError) \
V(0xF4, UNUSED_F4, "unused-f4", k10x, kIndexUnknown, 0, kVerifyError) \
- V(0xF5, CAPTURE_VARIABLE, "capture-variable", k21c, kIndexStringRef, kExperimental, kVerifyRegA | kVerifyRegBString) \
- V(0xF6, CREATE_LAMBDA, "create-lambda", k21c, kIndexMethodRef, kContinue | kThrow | kExperimental, kVerifyRegA | kVerifyRegBMethod) \
- V(0xF7, LIBERATE_VARIABLE, "liberate-variable", k22c, kIndexStringRef, kExperimental, kVerifyRegA | kVerifyRegB | kVerifyRegCString) \
- V(0xF8, BOX_LAMBDA, "box-lambda", k22x, kIndexNone, kContinue | kExperimental, kVerifyRegA | kVerifyRegB) \
- V(0xF9, UNBOX_LAMBDA, "unbox-lambda", k22c, kIndexTypeRef, kContinue | kThrow | kExperimental, kVerifyRegA | kVerifyRegB | kVerifyRegCType) \
+ V(0xF5, UNUSED_F5, "unused-f5", k10x, kIndexUnknown, 0, kVerifyError) \
+ V(0xF6, UNUSED_F6, "unused-f6", k10x, kIndexUnknown, 0, kVerifyError) \
+ V(0xF7, UNUSED_F7, "unused-f7", k10x, kIndexUnknown, 0, kVerifyError) \
+ V(0xF8, UNUSED_F8, "unused-f8", k10x, kIndexUnknown, 0, kVerifyError) \
+ V(0xF9, UNUSED_F9, "unused-f9", k10x, kIndexUnknown, 0, kVerifyError) \
V(0xFA, UNUSED_FA, "unused-fa", k10x, kIndexUnknown, 0, kVerifyError) \
V(0xFB, UNUSED_FB, "unused-fb", k10x, kIndexUnknown, 0, kVerifyError) \
V(0xFC, UNUSED_FC, "unused-fc", k10x, kIndexUnknown, 0, kVerifyError) \
@@ -293,7 +293,6 @@
V(k22t) \
V(k22s) \
V(k22c) \
- V(k25x) \
V(k32x) \
V(k30t) \
V(k31t) \
diff --git a/runtime/experimental_flags.h b/runtime/experimental_flags.h
index 198f3fa..fde1a5f 100644
--- a/runtime/experimental_flags.h
+++ b/runtime/experimental_flags.h
@@ -26,7 +26,6 @@
// The actual flag values.
enum {
kNone = 0x0000,
- kLambdas = 0x0001,
};
constexpr ExperimentalFlags() : value_(0x0000) {}
@@ -62,15 +61,9 @@
uint32_t value_;
};
-inline std::ostream& operator<<(std::ostream& stream, const ExperimentalFlags& e) {
- bool started = false;
- if (e & ExperimentalFlags::kLambdas) {
- stream << (started ? "|" : "") << "kLambdas";
- started = true;
- }
- if (!started) {
- stream << "kNone";
- }
+inline std::ostream& operator<<(std::ostream& stream,
+ const ExperimentalFlags& e ATTRIBUTE_UNUSED) {
+ stream << "kNone";
return stream;
}
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 11b7ef4..ac146b3 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -30,9 +30,6 @@
namespace art {
namespace interpreter {
-// All lambda closures have to be a consecutive pair of virtual registers.
-static constexpr size_t kLambdaVirtualRegisterWidth = 2;
-
void ThrowNullPointerExceptionFromInterpreter() {
ThrowNullPointerExceptionFromDexPC();
}
@@ -732,7 +729,6 @@
// Fast path: no extra checks.
if (is_range) {
- // TODO: Implement the range version of invoke-lambda
uint16_t first_src_reg = vregC;
for (size_t src_reg = first_src_reg, dest_reg = first_dest_reg; dest_reg < num_regs;
@@ -772,34 +768,6 @@
}
template<bool is_range, bool do_assignability_check>
-bool DoLambdaCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
- const Instruction* inst, uint16_t inst_data ATTRIBUTE_UNUSED, JValue* result) {
- const uint4_t num_additional_registers = inst->VRegB_25x();
- // Argument word count.
- const uint16_t number_of_inputs = num_additional_registers + kLambdaVirtualRegisterWidth;
- // The lambda closure register is always present and is not encoded in the count.
- // Furthermore, the lambda closure register is always wide, so it counts as 2 inputs.
-
- // TODO: find a cleaner way to separate non-range and range information without duplicating
- // code.
- uint32_t arg[Instruction::kMaxVarArgRegs25x]; // only used in invoke-XXX.
- uint32_t vregC = 0; // only used in invoke-XXX-range.
- if (is_range) {
- vregC = inst->VRegC_3rc();
- } else {
- // TODO(iam): See if it's possible to remove inst_data dependency from 35x to avoid this path
- inst->GetAllArgs25x(arg);
- }
-
- // TODO: if there's an assignability check, throw instead?
- DCHECK(called_method->IsStatic());
-
- return DoCallCommon<is_range, do_assignability_check>(
- called_method, self, shadow_frame,
- result, number_of_inputs, arg, vregC);
-}
-
-template<bool is_range, bool do_assignability_check>
bool DoCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
const Instruction* inst, uint16_t inst_data, JValue* result) {
// Argument word count.
@@ -947,20 +915,6 @@
EXPLICIT_DO_CALL_TEMPLATE_DECL(true, true);
#undef EXPLICIT_DO_CALL_TEMPLATE_DECL
-// Explicit DoLambdaCall template function declarations.
-#define EXPLICIT_DO_LAMBDA_CALL_TEMPLATE_DECL(_is_range, _do_assignability_check) \
- template SHARED_REQUIRES(Locks::mutator_lock_) \
- bool DoLambdaCall<_is_range, _do_assignability_check>(ArtMethod* method, Thread* self, \
- ShadowFrame& shadow_frame, \
- const Instruction* inst, \
- uint16_t inst_data, \
- JValue* result)
-EXPLICIT_DO_LAMBDA_CALL_TEMPLATE_DECL(false, false);
-EXPLICIT_DO_LAMBDA_CALL_TEMPLATE_DECL(false, true);
-EXPLICIT_DO_LAMBDA_CALL_TEMPLATE_DECL(true, false);
-EXPLICIT_DO_LAMBDA_CALL_TEMPLATE_DECL(true, true);
-#undef EXPLICIT_DO_LAMBDA_CALL_TEMPLATE_DECL
-
// Explicit DoFilledNewArray template function declarations.
#define EXPLICIT_DO_FILLED_NEW_ARRAY_TEMPLATE_DECL(_is_range_, _check, _transaction_active) \
template SHARED_REQUIRES(Locks::mutator_lock_) \
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 174d4e0..4fd1514 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -36,14 +36,7 @@
#include "entrypoints/entrypoint_utils-inl.h"
#include "handle_scope-inl.h"
#include "jit/jit.h"
-#include "lambda/art_lambda_method.h"
-#include "lambda/box_table.h"
-#include "lambda/closure.h"
-#include "lambda/closure_builder-inl.h"
-#include "lambda/leaking_allocator.h"
-#include "lambda/shorty_field_type.h"
#include "mirror/class-inl.h"
-#include "mirror/method.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/string-inl.h"
@@ -142,488 +135,7 @@
bool DoCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
const Instruction* inst, uint16_t inst_data, JValue* result);
-// Invokes the given lambda closure. This is part of the invocation support and is used by
-// DoLambdaInvoke functions.
-// Returns true on success, otherwise throws an exception and returns false.
-template<bool is_range, bool do_assignability_check>
-bool DoLambdaCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
- const Instruction* inst, uint16_t inst_data, JValue* result);
-
-// Validates that the art method corresponding to a lambda method target
-// is semantically valid:
-//
-// Must be ACC_STATIC and ACC_LAMBDA. Must be a concrete managed implementation
-// (i.e. not native, not proxy, not abstract, ...).
-//
-// If the validation fails, return false and raise an exception.
-static inline bool IsValidLambdaTargetOrThrow(ArtMethod* called_method)
- SHARED_REQUIRES(Locks::mutator_lock_) {
- bool success = false;
-
- if (UNLIKELY(called_method == nullptr)) {
- // The shadow frame should already be pushed, so we don't need to update it.
- } else if (UNLIKELY(!called_method->IsInvokable())) {
- called_method->ThrowInvocationTimeError();
- // We got an error.
- // TODO(iam): Also handle the case when the method is non-static, what error do we throw?
- // TODO(iam): Also make sure that ACC_LAMBDA is set.
- } else if (UNLIKELY(called_method->GetCodeItem() == nullptr)) {
- // Method could be native, proxy method, etc. Lambda targets have to be concrete impls,
- // so don't allow this.
- } else {
- success = true;
- }
-
- return success;
-}
-
-// Write out the 'Closure*' into vreg and vreg+1, as if it was a jlong.
-static inline void WriteLambdaClosureIntoVRegs(ShadowFrame& shadow_frame,
- const lambda::Closure& lambda_closure,
- uint32_t vreg) {
- // Split the method into a lo and hi 32 bits so we can encode them into 2 virtual registers.
- uint32_t closure_lo = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(&lambda_closure));
- uint32_t closure_hi = static_cast<uint32_t>(reinterpret_cast<uint64_t>(&lambda_closure)
- >> BitSizeOf<uint32_t>());
- // Use uint64_t instead of uintptr_t to allow shifting past the max on 32-bit.
- static_assert(sizeof(uint64_t) >= sizeof(uintptr_t), "Impossible");
-
- DCHECK_NE(closure_lo | closure_hi, 0u);
-
- shadow_frame.SetVReg(vreg, closure_lo);
- shadow_frame.SetVReg(vreg + 1, closure_hi);
-}
-
-// Handles create-lambda instructions.
-// Returns true on success, otherwise throws an exception and returns false.
-// (Exceptions are thrown by creating a new exception and then being put in the thread TLS)
-//
-// The closure must be allocated big enough to hold the data, and should not be
-// pre-initialized. It is initialized with the actual captured variables as a side-effect,
-// although this should be unimportant to the caller since this function also handles storing it to
-// the ShadowFrame.
-//
-// As a work-in-progress implementation, this shoves the ArtMethod object corresponding
-// to the target dex method index into the target register vA and vA + 1.
-template<bool do_access_check>
-static inline bool DoCreateLambda(Thread* self,
- const Instruction* inst,
- /*inout*/ShadowFrame& shadow_frame,
- /*inout*/lambda::ClosureBuilder* closure_builder,
- /*inout*/lambda::Closure* uninitialized_closure) {
- DCHECK(closure_builder != nullptr);
- DCHECK(uninitialized_closure != nullptr);
- DCHECK_ALIGNED(uninitialized_closure, alignof(lambda::Closure));
-
- using lambda::ArtLambdaMethod;
- using lambda::LeakingAllocator;
-
- /*
- * create-lambda is opcode 0x21c
- * - vA is the target register where the closure will be stored into
- * (also stores into vA + 1)
- * - vB is the method index which will be the target for a later invoke-lambda
- */
- const uint32_t method_idx = inst->VRegB_21c();
- mirror::Object* receiver = nullptr; // Always static. (see 'kStatic')
- ArtMethod* sf_method = shadow_frame.GetMethod();
- ArtMethod* const called_method = FindMethodFromCode<kStatic, do_access_check>(
- method_idx, &receiver, sf_method, self);
-
- uint32_t vreg_dest_closure = inst->VRegA_21c();
-
- if (UNLIKELY(!IsValidLambdaTargetOrThrow(called_method))) {
- CHECK(self->IsExceptionPending());
- shadow_frame.SetVReg(vreg_dest_closure, 0u);
- shadow_frame.SetVReg(vreg_dest_closure + 1, 0u);
- return false;
- }
-
- ArtLambdaMethod* initialized_lambda_method;
- // Initialize the ArtLambdaMethod with the right data.
- {
- // Allocate enough memory to store a well-aligned ArtLambdaMethod.
- // This is not the final type yet since the data starts out uninitialized.
- LeakingAllocator::AlignedMemoryStorage<ArtLambdaMethod>* uninitialized_lambda_method =
- LeakingAllocator::AllocateMemory<ArtLambdaMethod>(self);
-
- std::string captured_variables_shorty = closure_builder->GetCapturedVariableShortyTypes();
- std::string captured_variables_long_type_desc;
-
- // Synthesize a long type descriptor from the short one.
- for (char shorty : captured_variables_shorty) {
- lambda::ShortyFieldType shorty_field_type(shorty);
- if (shorty_field_type.IsObject()) {
- // Not the true type, but good enough until we implement verifier support.
- captured_variables_long_type_desc += "Ljava/lang/Object;";
- UNIMPLEMENTED(FATAL) << "create-lambda with an object captured variable";
- } else if (shorty_field_type.IsLambda()) {
- // Not the true type, but good enough until we implement verifier support.
- captured_variables_long_type_desc += "Ljava/lang/Runnable;";
- UNIMPLEMENTED(FATAL) << "create-lambda with a lambda captured variable";
- } else {
- // The primitive types have the same length shorty or not, so this is always correct.
- DCHECK(shorty_field_type.IsPrimitive());
- captured_variables_long_type_desc += shorty_field_type;
- }
- }
-
- // Copy strings to dynamically allocated storage. This leaks, but that's ok. Fix it later.
- // TODO: Strings need to come from the DexFile, so they won't need their own allocations.
- char* captured_variables_type_desc = LeakingAllocator::MakeFlexibleInstance<char>(
- self,
- captured_variables_long_type_desc.size() + 1);
- strcpy(captured_variables_type_desc, captured_variables_long_type_desc.c_str());
- char* captured_variables_shorty_copy = LeakingAllocator::MakeFlexibleInstance<char>(
- self,
- captured_variables_shorty.size() + 1);
- strcpy(captured_variables_shorty_copy, captured_variables_shorty.c_str());
-
- // After initialization, the object at the storage is well-typed. Use strong type going forward.
- initialized_lambda_method =
- new (uninitialized_lambda_method) ArtLambdaMethod(called_method,
- captured_variables_type_desc,
- captured_variables_shorty_copy,
- true); // innate lambda
- }
-
- // Write all the closure captured variables and the closure header into the closure.
- lambda::Closure* initialized_closure =
- closure_builder->CreateInPlace(uninitialized_closure, initialized_lambda_method);
-
- WriteLambdaClosureIntoVRegs(/*inout*/shadow_frame, *initialized_closure, vreg_dest_closure);
- return true;
-}
-
-// Reads out the 'ArtMethod*' stored inside of vreg and vreg+1
-//
-// Validates that the art method points to a valid lambda function, otherwise throws
-// an exception and returns null.
-// (Exceptions are thrown by creating a new exception and then being put in the thread TLS)
-static inline lambda::Closure* ReadLambdaClosureFromVRegsOrThrow(ShadowFrame& shadow_frame,
- uint32_t vreg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
- // Lambda closures take up a consecutive pair of 2 virtual registers.
- // On 32-bit the high bits are always 0.
- uint32_t vc_value_lo = shadow_frame.GetVReg(vreg);
- uint32_t vc_value_hi = shadow_frame.GetVReg(vreg + 1);
-
- uint64_t vc_value_ptr = (static_cast<uint64_t>(vc_value_hi) << BitSizeOf<uint32_t>())
- | vc_value_lo;
-
- // Use uint64_t instead of uintptr_t to allow left-shifting past the max on 32-bit.
- static_assert(sizeof(uint64_t) >= sizeof(uintptr_t), "Impossible");
- lambda::Closure* const lambda_closure = reinterpret_cast<lambda::Closure*>(vc_value_ptr);
- DCHECK_ALIGNED(lambda_closure, alignof(lambda::Closure));
-
- // Guard against the user passing a null closure, which is odd but (sadly) semantically valid.
- if (UNLIKELY(lambda_closure == nullptr)) {
- ThrowNullPointerExceptionFromInterpreter();
- return nullptr;
- } else if (UNLIKELY(!IsValidLambdaTargetOrThrow(lambda_closure->GetTargetMethod()))) {
- // Sanity check against data corruption.
- return nullptr;
- }
-
- return lambda_closure;
-}
-
-// Forward declaration for lock annotations. See below for documentation.
-template <bool do_access_check>
-static inline const char* GetStringDataByDexStringIndexOrThrow(ShadowFrame& shadow_frame,
- uint32_t string_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
-
-// Find the c-string data corresponding to a dex file's string index.
-// Otherwise, returns null if not found and throws a VerifyError.
-//
-// Note that with do_access_check=false, we never return null because the verifier
-// must guard against invalid string indices.
-// (Exceptions are thrown by creating a new exception and then being put in the thread TLS)
-template <bool do_access_check>
-static inline const char* GetStringDataByDexStringIndexOrThrow(ShadowFrame& shadow_frame,
- uint32_t string_idx) {
- ArtMethod* method = shadow_frame.GetMethod();
- const DexFile* dex_file = method->GetDexFile();
-
- mirror::Class* declaring_class = method->GetDeclaringClass();
- if (!do_access_check) {
- // MethodVerifier refuses methods with string_idx out of bounds.
- DCHECK_LT(string_idx, declaring_class->GetDexCache()->NumStrings());
- } else {
- // Access checks enabled: perform string index bounds ourselves.
- if (string_idx >= dex_file->GetHeader().string_ids_size_) {
- ThrowVerifyError(declaring_class, "String index '%" PRIu32 "' out of bounds",
- string_idx);
- return nullptr;
- }
- }
-
- const char* type_string = dex_file->StringDataByIdx(string_idx);
-
- if (UNLIKELY(type_string == nullptr)) {
- CHECK_EQ(false, do_access_check)
- << " verifier should've caught invalid string index " << string_idx;
- CHECK_EQ(true, do_access_check)
- << " string idx size check should've caught invalid string index " << string_idx;
- }
-
- return type_string;
-}
-
-// Handles capture-variable instructions.
-// Returns true on success, otherwise throws an exception and returns false.
-// (Exceptions are thrown by creating a new exception and then being put in the thread TLS)
-template<bool do_access_check>
-static inline bool DoCaptureVariable(Thread* self,
- const Instruction* inst,
- /*inout*/ShadowFrame& shadow_frame,
- /*inout*/lambda::ClosureBuilder* closure_builder) {
- DCHECK(closure_builder != nullptr);
- using lambda::ShortyFieldType;
- /*
- * capture-variable is opcode 0xf6, fmt 0x21c
- * - vA is the source register of the variable that will be captured
- * - vB is the string ID of the variable's type that will be captured
- */
- const uint32_t source_vreg = inst->VRegA_21c();
- const uint32_t string_idx = inst->VRegB_21c();
- // TODO: this should be a proper [type id] instead of a [string ID] pointing to a type.
-
- const char* type_string = GetStringDataByDexStringIndexOrThrow<do_access_check>(shadow_frame,
- string_idx);
- if (UNLIKELY(type_string == nullptr)) {
- CHECK(self->IsExceptionPending());
- return false;
- }
-
- char type_first_letter = type_string[0];
- ShortyFieldType shorty_type;
- if (do_access_check &&
- UNLIKELY(!ShortyFieldType::MaybeCreate(type_first_letter, /*out*/&shorty_type))) { // NOLINT: [whitespace/comma] [3]
- ThrowVerifyError(shadow_frame.GetMethod()->GetDeclaringClass(),
- "capture-variable vB must be a valid type");
- return false;
- } else {
- // Already verified that the type is valid.
- shorty_type = ShortyFieldType(type_first_letter);
- }
-
- const size_t captured_variable_count = closure_builder->GetCaptureCount();
-
- // Note: types are specified explicitly so that the closure is packed tightly.
- switch (shorty_type) {
- case ShortyFieldType::kBoolean: {
- uint32_t primitive_narrow_value = shadow_frame.GetVReg(source_vreg);
- closure_builder->CaptureVariablePrimitive<bool>(primitive_narrow_value);
- break;
- }
- case ShortyFieldType::kByte: {
- uint32_t primitive_narrow_value = shadow_frame.GetVReg(source_vreg);
- closure_builder->CaptureVariablePrimitive<int8_t>(primitive_narrow_value);
- break;
- }
- case ShortyFieldType::kChar: {
- uint32_t primitive_narrow_value = shadow_frame.GetVReg(source_vreg);
- closure_builder->CaptureVariablePrimitive<uint16_t>(primitive_narrow_value);
- break;
- }
- case ShortyFieldType::kShort: {
- uint32_t primitive_narrow_value = shadow_frame.GetVReg(source_vreg);
- closure_builder->CaptureVariablePrimitive<int16_t>(primitive_narrow_value);
- break;
- }
- case ShortyFieldType::kInt: {
- uint32_t primitive_narrow_value = shadow_frame.GetVReg(source_vreg);
- closure_builder->CaptureVariablePrimitive<int32_t>(primitive_narrow_value);
- break;
- }
- case ShortyFieldType::kDouble: {
- closure_builder->CaptureVariablePrimitive(shadow_frame.GetVRegDouble(source_vreg));
- break;
- }
- case ShortyFieldType::kFloat: {
- closure_builder->CaptureVariablePrimitive(shadow_frame.GetVRegFloat(source_vreg));
- break;
- }
- case ShortyFieldType::kLambda: {
- UNIMPLEMENTED(FATAL) << " capture-variable with type kLambda";
- // TODO: Capturing lambdas recursively will be done at a later time.
- UNREACHABLE();
- }
- case ShortyFieldType::kLong: {
- closure_builder->CaptureVariablePrimitive(shadow_frame.GetVRegLong(source_vreg));
- break;
- }
- case ShortyFieldType::kObject: {
- closure_builder->CaptureVariableObject(shadow_frame.GetVRegReference(source_vreg));
- UNIMPLEMENTED(FATAL) << " capture-variable with type kObject";
- // TODO: finish implementing this. disabled for now since we can't track lambda refs for GC.
- UNREACHABLE();
- }
-
- default:
- LOG(FATAL) << "Invalid shorty type value " << shorty_type;
- UNREACHABLE();
- }
-
- DCHECK_EQ(captured_variable_count + 1, closure_builder->GetCaptureCount());
-
- return true;
-}
-
-// Handles capture-variable instructions.
-// Returns true on success, otherwise throws an exception and returns false.
-// (Exceptions are thrown by creating a new exception and then being put in the thread TLS)
-template<bool do_access_check>
-static inline bool DoLiberateVariable(Thread* self,
- const Instruction* inst,
- size_t captured_variable_index,
- /*inout*/ShadowFrame& shadow_frame) {
- using lambda::ShortyFieldType;
- /*
- * liberate-variable is opcode 0xf7, fmt 0x22c
- * - vA is the destination register
- * - vB is the register with the lambda closure in it
- * - vC is the string ID which needs to be a valid field type descriptor
- */
-
- const uint32_t dest_vreg = inst->VRegA_22c();
- const uint32_t closure_vreg = inst->VRegB_22c();
- const uint32_t string_idx = inst->VRegC_22c();
- // TODO: this should be a proper [type id] instead of a [string ID] pointing to a type.
-
-
- // Synthesize a long type descriptor from a shorty type descriptor list.
- // TODO: Fix the dex encoding to contain the long and short type descriptors.
- const char* type_string = GetStringDataByDexStringIndexOrThrow<do_access_check>(shadow_frame,
- string_idx);
- if (UNLIKELY(do_access_check && type_string == nullptr)) {
- CHECK(self->IsExceptionPending());
- shadow_frame.SetVReg(dest_vreg, 0);
- return false;
- }
-
- char type_first_letter = type_string[0];
- ShortyFieldType shorty_type;
- if (do_access_check &&
- UNLIKELY(!ShortyFieldType::MaybeCreate(type_first_letter, /*out*/&shorty_type))) { // NOLINT: [whitespace/comma] [3]
- ThrowVerifyError(shadow_frame.GetMethod()->GetDeclaringClass(),
- "liberate-variable vC must be a valid type");
- shadow_frame.SetVReg(dest_vreg, 0);
- return false;
- } else {
- // Already verified that the type is valid.
- shorty_type = ShortyFieldType(type_first_letter);
- }
-
- // Check for closure being null *after* the type check.
- // This way we can access the type info in case we fail later, to know how many vregs to clear.
- const lambda::Closure* lambda_closure =
- ReadLambdaClosureFromVRegsOrThrow(/*inout*/shadow_frame, closure_vreg);
-
- // Failed lambda target runtime check, an exception was raised.
- if (UNLIKELY(lambda_closure == nullptr)) {
- CHECK(self->IsExceptionPending());
-
- // Clear the destination vreg(s) to be safe.
- shadow_frame.SetVReg(dest_vreg, 0);
- if (shorty_type.IsPrimitiveWide() || shorty_type.IsLambda()) {
- shadow_frame.SetVReg(dest_vreg + 1, 0);
- }
- return false;
- }
-
- if (do_access_check &&
- UNLIKELY(captured_variable_index >= lambda_closure->GetNumberOfCapturedVariables())) {
- ThrowVerifyError(shadow_frame.GetMethod()->GetDeclaringClass(),
- "liberate-variable captured variable index %zu out of bounds",
- lambda_closure->GetNumberOfCapturedVariables());
- // Clear the destination vreg(s) to be safe.
- shadow_frame.SetVReg(dest_vreg, 0);
- if (shorty_type.IsPrimitiveWide() || shorty_type.IsLambda()) {
- shadow_frame.SetVReg(dest_vreg + 1, 0);
- }
- return false;
- }
-
- // Verify that the runtime type of the captured-variable matches the requested dex type.
- if (do_access_check) {
- ShortyFieldType actual_type = lambda_closure->GetCapturedShortyType(captured_variable_index);
- if (actual_type != shorty_type) {
- ThrowVerifyError(shadow_frame.GetMethod()->GetDeclaringClass(),
- "cannot liberate-variable of runtime type '%c' to dex type '%c'",
- static_cast<char>(actual_type),
- static_cast<char>(shorty_type));
-
- shadow_frame.SetVReg(dest_vreg, 0);
- if (shorty_type.IsPrimitiveWide() || shorty_type.IsLambda()) {
- shadow_frame.SetVReg(dest_vreg + 1, 0);
- }
- return false;
- }
-
- if (actual_type.IsLambda() || actual_type.IsObject()) {
- UNIMPLEMENTED(FATAL) << "liberate-variable type checks needs to "
- << "parse full type descriptor for objects and lambdas";
- }
- }
-
- // Unpack the captured variable from the closure into the correct type, then save it to the vreg.
- if (shorty_type.IsPrimitiveNarrow()) {
- uint32_t primitive_narrow_value =
- lambda_closure->GetCapturedPrimitiveNarrow(captured_variable_index);
- shadow_frame.SetVReg(dest_vreg, primitive_narrow_value);
- } else if (shorty_type.IsPrimitiveWide()) {
- uint64_t primitive_wide_value =
- lambda_closure->GetCapturedPrimitiveWide(captured_variable_index);
- shadow_frame.SetVRegLong(dest_vreg, static_cast<int64_t>(primitive_wide_value));
- } else if (shorty_type.IsObject()) {
- mirror::Object* unpacked_object =
- lambda_closure->GetCapturedObject(captured_variable_index);
- shadow_frame.SetVRegReference(dest_vreg, unpacked_object);
-
- UNIMPLEMENTED(FATAL) << "liberate-variable cannot unpack objects yet";
- } else if (shorty_type.IsLambda()) {
- UNIMPLEMENTED(FATAL) << "liberate-variable cannot unpack lambdas yet";
- } else {
- LOG(FATAL) << "unreachable";
- UNREACHABLE();
- }
-
- return true;
-}
-
-template<bool do_access_check>
-static inline bool DoInvokeLambda(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst,
- uint16_t inst_data, JValue* result) {
- /*
- * invoke-lambda is opcode 0x25
- *
- * - vC is the closure register (both vC and vC + 1 will be used to store the closure).
- * - vB is the number of additional registers up to |{vD,vE,vF,vG}| (4)
- * - the rest of the registers are always var-args
- *
- * - reading var-args for 0x25 gets us vD,vE,vF,vG (but not vB)
- */
- uint32_t vreg_closure = inst->VRegC_25x();
- const lambda::Closure* lambda_closure =
- ReadLambdaClosureFromVRegsOrThrow(shadow_frame, vreg_closure);
-
- // Failed lambda target runtime check, an exception was raised.
- if (UNLIKELY(lambda_closure == nullptr)) {
- CHECK(self->IsExceptionPending());
- result->SetJ(0);
- return false;
- }
-
- ArtMethod* const called_method = lambda_closure->GetTargetMethod();
- // Invoke a non-range lambda
- return DoLambdaCall<false, do_access_check>(called_method, self, shadow_frame, inst, inst_data,
- result);
-}
-
-// Handles invoke-XXX/range instructions (other than invoke-lambda[-range]).
+// Handles invoke-XXX/range instructions.
// Returns true on success, otherwise throws an exception and returns false.
template<InvokeType type, bool is_range, bool do_access_check>
static inline bool DoInvoke(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst,
@@ -904,74 +416,6 @@
return 3;
}
-template <bool _do_check>
-static inline bool DoBoxLambda(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst,
- uint16_t inst_data) SHARED_REQUIRES(Locks::mutator_lock_) {
- /*
- * box-lambda vA, vB /// opcode 0xf8, format 22x
- * - vA is the target register where the Object representation of the closure will be stored into
- * - vB is a closure (made by create-lambda)
- * (also reads vB + 1)
- */
- uint32_t vreg_target_object = inst->VRegA_22x(inst_data);
- uint32_t vreg_source_closure = inst->VRegB_22x();
-
- lambda::Closure* lambda_closure = ReadLambdaClosureFromVRegsOrThrow(shadow_frame,
- vreg_source_closure);
-
- // Failed lambda target runtime check, an exception was raised.
- if (UNLIKELY(lambda_closure == nullptr)) {
- CHECK(self->IsExceptionPending());
- return false;
- }
-
- mirror::Object* closure_as_object =
- Runtime::Current()->GetLambdaBoxTable()->BoxLambda(lambda_closure);
-
- // Failed to box the lambda, an exception was raised.
- if (UNLIKELY(closure_as_object == nullptr)) {
- CHECK(self->IsExceptionPending());
- return false;
- }
-
- shadow_frame.SetVRegReference(vreg_target_object, closure_as_object);
- return true;
-}
-
-template <bool _do_check> SHARED_REQUIRES(Locks::mutator_lock_)
-static inline bool DoUnboxLambda(Thread* self,
- ShadowFrame& shadow_frame,
- const Instruction* inst,
- uint16_t inst_data) {
- /*
- * unbox-lambda vA, vB, [type id] /// opcode 0xf9, format 22c
- * - vA is the target register where the closure will be written into
- * (also writes vA + 1)
- * - vB is the Object representation of the closure (made by box-lambda)
- */
- uint32_t vreg_target_closure = inst->VRegA_22c(inst_data);
- uint32_t vreg_source_object = inst->VRegB_22c();
-
- // Raise NullPointerException if object is null
- mirror::Object* boxed_closure_object = shadow_frame.GetVRegReference(vreg_source_object);
- if (UNLIKELY(boxed_closure_object == nullptr)) {
- ThrowNullPointerExceptionFromInterpreter();
- return false;
- }
-
- lambda::Closure* unboxed_closure = nullptr;
- // Raise an exception if unboxing fails.
- if (!Runtime::Current()->GetLambdaBoxTable()->UnboxLambda(boxed_closure_object,
- /*out*/&unboxed_closure)) {
- CHECK(self->IsExceptionPending());
- return false;
- }
-
- DCHECK(unboxed_closure != nullptr);
- WriteLambdaClosureIntoVRegs(/*inout*/shadow_frame, *unboxed_closure, vreg_target_closure);
- return true;
-}
-
uint32_t FindNextInstructionFollowingException(Thread* self, ShadowFrame& shadow_frame,
uint32_t dex_pc, const instrumentation::Instrumentation* instrumentation)
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -1058,72 +502,6 @@
EXPLICIT_DO_INVOKE_VIRTUAL_QUICK_TEMPLATE_DECL(true); // invoke-virtual-quick-range.
#undef EXPLICIT_INSTANTIATION_DO_INVOKE_VIRTUAL_QUICK
-// Explicitly instantiate all DoCreateLambda functions.
-#define EXPLICIT_DO_CREATE_LAMBDA_DECL(_do_check) \
-template SHARED_REQUIRES(Locks::mutator_lock_) \
-bool DoCreateLambda<_do_check>(Thread* self, \
- const Instruction* inst, \
- /*inout*/ShadowFrame& shadow_frame, \
- /*inout*/lambda::ClosureBuilder* closure_builder, \
- /*inout*/lambda::Closure* uninitialized_closure);
-
-EXPLICIT_DO_CREATE_LAMBDA_DECL(false); // create-lambda
-EXPLICIT_DO_CREATE_LAMBDA_DECL(true); // create-lambda
-#undef EXPLICIT_DO_CREATE_LAMBDA_DECL
-
-// Explicitly instantiate all DoInvokeLambda functions.
-#define EXPLICIT_DO_INVOKE_LAMBDA_DECL(_do_check) \
-template SHARED_REQUIRES(Locks::mutator_lock_) \
-bool DoInvokeLambda<_do_check>(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst, \
- uint16_t inst_data, JValue* result);
-
-EXPLICIT_DO_INVOKE_LAMBDA_DECL(false); // invoke-lambda
-EXPLICIT_DO_INVOKE_LAMBDA_DECL(true); // invoke-lambda
-#undef EXPLICIT_DO_INVOKE_LAMBDA_DECL
-
-// Explicitly instantiate all DoBoxLambda functions.
-#define EXPLICIT_DO_BOX_LAMBDA_DECL(_do_check) \
-template SHARED_REQUIRES(Locks::mutator_lock_) \
-bool DoBoxLambda<_do_check>(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst, \
- uint16_t inst_data);
-
-EXPLICIT_DO_BOX_LAMBDA_DECL(false); // box-lambda
-EXPLICIT_DO_BOX_LAMBDA_DECL(true); // box-lambda
-#undef EXPLICIT_DO_BOX_LAMBDA_DECL
-
-// Explicitly instantiate all DoUnBoxLambda functions.
-#define EXPLICIT_DO_UNBOX_LAMBDA_DECL(_do_check) \
-template SHARED_REQUIRES(Locks::mutator_lock_) \
-bool DoUnboxLambda<_do_check>(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst, \
- uint16_t inst_data);
-
-EXPLICIT_DO_UNBOX_LAMBDA_DECL(false); // unbox-lambda
-EXPLICIT_DO_UNBOX_LAMBDA_DECL(true); // unbox-lambda
-#undef EXPLICIT_DO_BOX_LAMBDA_DECL
-
-// Explicitly instantiate all DoCaptureVariable functions.
-#define EXPLICIT_DO_CAPTURE_VARIABLE_DECL(_do_check) \
-template SHARED_REQUIRES(Locks::mutator_lock_) \
-bool DoCaptureVariable<_do_check>(Thread* self, \
- const Instruction* inst, \
- ShadowFrame& shadow_frame, \
- lambda::ClosureBuilder* closure_builder);
-
-EXPLICIT_DO_CAPTURE_VARIABLE_DECL(false); // capture-variable
-EXPLICIT_DO_CAPTURE_VARIABLE_DECL(true); // capture-variable
-#undef EXPLICIT_DO_CREATE_LAMBDA_DECL
-
-// Explicitly instantiate all DoLiberateVariable functions.
-#define EXPLICIT_DO_LIBERATE_VARIABLE_DECL(_do_check) \
-template SHARED_REQUIRES(Locks::mutator_lock_) \
-bool DoLiberateVariable<_do_check>(Thread* self, \
- const Instruction* inst, \
- size_t captured_variable_index, \
- ShadowFrame& shadow_frame); \
-
-EXPLICIT_DO_LIBERATE_VARIABLE_DECL(false); // liberate-variable
-EXPLICIT_DO_LIBERATE_VARIABLE_DECL(true); // liberate-variable
-#undef EXPLICIT_DO_LIBERATE_LAMBDA_DECL
} // namespace interpreter
} // namespace art
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
index 3b6e015..43b2778 100644
--- a/runtime/interpreter/interpreter_goto_table_impl.cc
+++ b/runtime/interpreter/interpreter_goto_table_impl.cc
@@ -18,14 +18,11 @@
// Clang 3.4 fails to build the goto interpreter implementation.
-#include "base/stl_util.h" // MakeUnique
#include "experimental_flags.h"
#include "interpreter_common.h"
#include "jit/jit.h"
#include "safe_math.h"
-#include <memory> // std::unique_ptr
-
namespace art {
namespace interpreter {
@@ -93,16 +90,6 @@
#define HANDLE_INSTRUCTION_START(opcode) op_##opcode: // NOLINT(whitespace/labels)
#define HANDLE_INSTRUCTION_END() UNREACHABLE_CODE_CHECK()
-// Use with instructions labeled with kExperimental flag:
-#define HANDLE_EXPERIMENTAL_INSTRUCTION_START(opcode) \
- HANDLE_INSTRUCTION_START(opcode); \
- DCHECK(inst->IsExperimental()); \
- if (Runtime::Current()->AreExperimentalFlagsEnabled(ExperimentalFlags::kLambdas)) {
-#define HANDLE_EXPERIMENTAL_INSTRUCTION_END() \
- } else { \
- UnexpectedOpcode(inst, shadow_frame); \
- } HANDLE_INSTRUCTION_END();
-
#define HANDLE_MONITOR_CHECKS() \
if (!DoMonitorCheckOnExit<do_assignability_check>(self, &shadow_frame)) { \
HANDLE_PENDING_EXCEPTION(); \
@@ -190,8 +177,6 @@
uint16_t inst_data;
const void* const* currentHandlersTable;
UPDATE_HANDLER_TABLE();
- std::unique_ptr<lambda::ClosureBuilder> lambda_closure_builder;
- size_t lambda_captured_variable_index = 0;
const auto* const instrumentation = Runtime::Current()->GetInstrumentation();
ArtMethod* method = shadow_frame.GetMethod();
jit::Jit* jit = Runtime::Current()->GetJit();
@@ -1668,14 +1653,6 @@
}
HANDLE_INSTRUCTION_END();
- HANDLE_EXPERIMENTAL_INSTRUCTION_START(INVOKE_LAMBDA) {
- bool success = DoInvokeLambda<do_access_check>(self, shadow_frame, inst, inst_data,
- &result_register);
- UPDATE_HANDLER_TABLE();
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_EXPERIMENTAL_INSTRUCTION_END();
-
HANDLE_INSTRUCTION_START(NEG_INT)
shadow_frame.SetVReg(
inst->VRegA_12x(inst_data), -shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
@@ -2457,62 +2434,6 @@
ADVANCE(2);
HANDLE_INSTRUCTION_END();
- HANDLE_EXPERIMENTAL_INSTRUCTION_START(CREATE_LAMBDA) {
- if (lambda_closure_builder == nullptr) {
- // DoCreateLambda always needs a ClosureBuilder, even if it has 0 captured variables.
- lambda_closure_builder = MakeUnique<lambda::ClosureBuilder>();
- }
-
- // TODO: these allocations should not leak, and the lambda method should not be local.
- lambda::Closure* lambda_closure =
- reinterpret_cast<lambda::Closure*>(alloca(lambda_closure_builder->GetSize()));
- bool success = DoCreateLambda<do_access_check>(self,
- inst,
- /*inout*/shadow_frame,
- /*inout*/lambda_closure_builder.get(),
- /*inout*/lambda_closure);
- lambda_closure_builder.reset(nullptr); // reset state of variables captured
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_EXPERIMENTAL_INSTRUCTION_END();
-
- HANDLE_EXPERIMENTAL_INSTRUCTION_START(BOX_LAMBDA) {
- bool success = DoBoxLambda<do_access_check>(self, shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_EXPERIMENTAL_INSTRUCTION_END();
-
- HANDLE_EXPERIMENTAL_INSTRUCTION_START(UNBOX_LAMBDA) {
- bool success = DoUnboxLambda<do_access_check>(self, shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_EXPERIMENTAL_INSTRUCTION_END();
-
- HANDLE_EXPERIMENTAL_INSTRUCTION_START(CAPTURE_VARIABLE) {
- if (lambda_closure_builder == nullptr) {
- lambda_closure_builder = MakeUnique<lambda::ClosureBuilder>();
- }
-
- bool success = DoCaptureVariable<do_access_check>(self,
- inst,
- /*inout*/shadow_frame,
- /*inout*/lambda_closure_builder.get());
-
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_EXPERIMENTAL_INSTRUCTION_END();
-
- HANDLE_EXPERIMENTAL_INSTRUCTION_START(LIBERATE_VARIABLE) {
- bool success = DoLiberateVariable<do_access_check>(self,
- inst,
- lambda_captured_variable_index,
- /*inout*/shadow_frame);
- // Temporarily only allow sequences of 'liberate-variable, liberate-variable, ...'
- lambda_captured_variable_index++;
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_EXPERIMENTAL_INSTRUCTION_END();
-
HANDLE_INSTRUCTION_START(UNUSED_3E)
UnexpectedOpcode(inst, shadow_frame);
HANDLE_INSTRUCTION_END();
@@ -2545,10 +2466,34 @@
UnexpectedOpcode(inst, shadow_frame);
HANDLE_INSTRUCTION_END();
+ HANDLE_INSTRUCTION_START(UNUSED_F3)
+ UnexpectedOpcode(inst, shadow_frame);
+ HANDLE_INSTRUCTION_END();
+
HANDLE_INSTRUCTION_START(UNUSED_F4)
UnexpectedOpcode(inst, shadow_frame);
HANDLE_INSTRUCTION_END();
+ HANDLE_INSTRUCTION_START(UNUSED_F5)
+ UnexpectedOpcode(inst, shadow_frame);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_F6)
+ UnexpectedOpcode(inst, shadow_frame);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_F7)
+ UnexpectedOpcode(inst, shadow_frame);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_F8)
+ UnexpectedOpcode(inst, shadow_frame);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_F9)
+ UnexpectedOpcode(inst, shadow_frame);
+ HANDLE_INSTRUCTION_END();
+
HANDLE_INSTRUCTION_START(UNUSED_FA)
UnexpectedOpcode(inst, shadow_frame);
HANDLE_INSTRUCTION_END();
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index 8bfc10c..3623db2 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -15,14 +15,11 @@
*/
#include "base/enums.h"
-#include "base/stl_util.h" // MakeUnique
#include "experimental_flags.h"
#include "interpreter_common.h"
#include "jit/jit.h"
#include "safe_math.h"
-#include <memory> // std::unique_ptr
-
namespace art {
namespace interpreter {
@@ -92,11 +89,6 @@
} \
} while (false)
-static bool IsExperimentalInstructionEnabled(const Instruction *inst) {
- DCHECK(inst->IsExperimental());
- return Runtime::Current()->AreExperimentalFlagsEnabled(ExperimentalFlags::kLambdas);
-}
-
template<bool do_access_check, bool transaction_active>
JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame, JValue result_register,
@@ -116,10 +108,6 @@
ArtMethod* method = shadow_frame.GetMethod();
jit::Jit* jit = Runtime::Current()->GetJit();
- // TODO: collapse capture-variable+create-lambda into one opcode, then we won't need
- // to keep this live for the scope of the entire function call.
- std::unique_ptr<lambda::ClosureBuilder> lambda_closure_builder;
- size_t lambda_captured_variable_index = 0;
do {
dex_pc = inst->GetDexPc(insns);
shadow_frame.SetDexPC(dex_pc);
@@ -2333,103 +2321,8 @@
(inst->VRegC_22b() & 0x1f));
inst = inst->Next_2xx();
break;
- case Instruction::INVOKE_LAMBDA: {
- if (!IsExperimentalInstructionEnabled(inst)) {
- UnexpectedOpcode(inst, shadow_frame);
- }
-
- PREAMBLE();
- bool success = DoInvokeLambda<do_access_check>(self, shadow_frame, inst, inst_data,
- &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::CAPTURE_VARIABLE: {
- if (!IsExperimentalInstructionEnabled(inst)) {
- UnexpectedOpcode(inst, shadow_frame);
- }
-
- if (lambda_closure_builder == nullptr) {
- lambda_closure_builder = MakeUnique<lambda::ClosureBuilder>();
- }
-
- PREAMBLE();
- bool success = DoCaptureVariable<do_access_check>(self,
- inst,
- /*inout*/shadow_frame,
- /*inout*/lambda_closure_builder.get());
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::CREATE_LAMBDA: {
- if (!IsExperimentalInstructionEnabled(inst)) {
- UnexpectedOpcode(inst, shadow_frame);
- }
-
- PREAMBLE();
-
- if (lambda_closure_builder == nullptr) {
- // DoCreateLambda always needs a ClosureBuilder, even if it has 0 captured variables.
- lambda_closure_builder = MakeUnique<lambda::ClosureBuilder>();
- }
-
- // TODO: these allocations should not leak, and the lambda method should not be local.
- lambda::Closure* lambda_closure =
- reinterpret_cast<lambda::Closure*>(alloca(lambda_closure_builder->GetSize()));
- bool success = DoCreateLambda<do_access_check>(self,
- inst,
- /*inout*/shadow_frame,
- /*inout*/lambda_closure_builder.get(),
- /*inout*/lambda_closure);
- lambda_closure_builder.reset(nullptr); // reset state of variables captured
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::LIBERATE_VARIABLE: {
- if (!IsExperimentalInstructionEnabled(inst)) {
- UnexpectedOpcode(inst, shadow_frame);
- }
-
- PREAMBLE();
- bool success = DoLiberateVariable<do_access_check>(self,
- inst,
- lambda_captured_variable_index,
- /*inout*/shadow_frame);
- // Temporarily only allow sequences of 'liberate-variable, liberate-variable, ...'
- lambda_captured_variable_index++;
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::UNUSED_F4: {
- if (!IsExperimentalInstructionEnabled(inst)) {
- UnexpectedOpcode(inst, shadow_frame);
- }
-
- CHECK(false); // TODO(iam): Implement opcodes for lambdas
- break;
- }
- case Instruction::BOX_LAMBDA: {
- if (!IsExperimentalInstructionEnabled(inst)) {
- UnexpectedOpcode(inst, shadow_frame);
- }
-
- PREAMBLE();
- bool success = DoBoxLambda<do_access_check>(self, shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::UNBOX_LAMBDA: {
- if (!IsExperimentalInstructionEnabled(inst)) {
- UnexpectedOpcode(inst, shadow_frame);
- }
-
- PREAMBLE();
- bool success = DoUnboxLambda<do_access_check>(self, shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
case Instruction::UNUSED_3E ... Instruction::UNUSED_43:
- case Instruction::UNUSED_FA ... Instruction::UNUSED_FF:
+ case Instruction::UNUSED_F3 ... Instruction::UNUSED_FF:
case Instruction::UNUSED_79:
case Instruction::UNUSED_7A:
UnexpectedOpcode(inst, shadow_frame);
diff --git a/runtime/interpreter/mterp/config_arm b/runtime/interpreter/mterp/config_arm
index 436dcd2..b6caf11 100644
--- a/runtime/interpreter/mterp/config_arm
+++ b/runtime/interpreter/mterp/config_arm
@@ -279,13 +279,13 @@
# op op_iget_byte_quick FALLBACK
# op op_iget_char_quick FALLBACK
# op op_iget_short_quick FALLBACK
- op op_invoke_lambda FALLBACK
+ # op op_unused_f3 FALLBACK
# op op_unused_f4 FALLBACK
- op op_capture_variable FALLBACK
- op op_create_lambda FALLBACK
- op op_liberate_variable FALLBACK
- op op_box_lambda FALLBACK
- op op_unbox_lambda FALLBACK
+ # op op_unused_f5 FALLBACK
+ # op op_unused_f6 FALLBACK
+ # op op_unused_f7 FALLBACK
+ # op op_unused_f8 FALLBACK
+ # op op_unused_f9 FALLBACK
# op op_unused_fa FALLBACK
# op op_unused_fb FALLBACK
# op op_unused_fc FALLBACK
diff --git a/runtime/interpreter/mterp/config_arm64 b/runtime/interpreter/mterp/config_arm64
index 6427ead..c5e06c7 100644
--- a/runtime/interpreter/mterp/config_arm64
+++ b/runtime/interpreter/mterp/config_arm64
@@ -277,13 +277,13 @@
# op op_iget_byte_quick FALLBACK
# op op_iget_char_quick FALLBACK
# op op_iget_short_quick FALLBACK
- op op_invoke_lambda FALLBACK
+ # op op_unused_f3 FALLBACK
# op op_unused_f4 FALLBACK
- op op_capture_variable FALLBACK
- op op_create_lambda FALLBACK
- op op_liberate_variable FALLBACK
- op op_box_lambda FALLBACK
- op op_unbox_lambda FALLBACK
+ # op op_unused_f5 FALLBACK
+ # op op_unused_f6 FALLBACK
+ # op op_unused_f7 FALLBACK
+ # op op_unused_f8 FALLBACK
+ # op op_unused_f9 FALLBACK
# op op_unused_fa FALLBACK
# op op_unused_fb FALLBACK
# op op_unused_fc FALLBACK
diff --git a/runtime/interpreter/mterp/config_mips b/runtime/interpreter/mterp/config_mips
index c6292c3..515cb0b 100644
--- a/runtime/interpreter/mterp/config_mips
+++ b/runtime/interpreter/mterp/config_mips
@@ -279,13 +279,13 @@
# op op_iget_byte_quick FALLBACK
# op op_iget_char_quick FALLBACK
# op op_iget_short_quick FALLBACK
- op op_invoke_lambda FALLBACK
+ # op op_unused_f3 FALLBACK
# op op_unused_f4 FALLBACK
- op op_capture_variable FALLBACK
- op op_create_lambda FALLBACK
- op op_liberate_variable FALLBACK
- op op_box_lambda FALLBACK
- op op_unbox_lambda FALLBACK
+ # op op_unused_f5 FALLBACK
+ # op op_unused_f6 FALLBACK
+ # op op_unused_f7 FALLBACK
+ # op op_unused_f8 FALLBACK
+ # op op_unused_f9 FALLBACK
# op op_unused_fa FALLBACK
# op op_unused_fb FALLBACK
# op op_unused_fc FALLBACK
diff --git a/runtime/interpreter/mterp/config_mips64 b/runtime/interpreter/mterp/config_mips64
index c40c007..aafd248 100644
--- a/runtime/interpreter/mterp/config_mips64
+++ b/runtime/interpreter/mterp/config_mips64
@@ -279,13 +279,13 @@
# op op_iget_byte_quick FALLBACK
# op op_iget_char_quick FALLBACK
# op op_iget_short_quick FALLBACK
- op op_invoke_lambda FALLBACK
+ # op op_unused_f3 FALLBACK
# op op_unused_f4 FALLBACK
- op op_capture_variable FALLBACK
- op op_create_lambda FALLBACK
- op op_liberate_variable FALLBACK
- op op_box_lambda FALLBACK
- op op_unbox_lambda FALLBACK
+ # op op_unused_f5 FALLBACK
+ # op op_unused_f6 FALLBACK
+ # op op_unused_f7 FALLBACK
+ # op op_unused_f8 FALLBACK
+ # op op_unused_f9 FALLBACK
# op op_unused_fa FALLBACK
# op op_unused_fb FALLBACK
# op op_unused_fc FALLBACK
diff --git a/runtime/interpreter/mterp/config_x86 b/runtime/interpreter/mterp/config_x86
index f1501e1..64d8ee8 100644
--- a/runtime/interpreter/mterp/config_x86
+++ b/runtime/interpreter/mterp/config_x86
@@ -283,13 +283,13 @@
# op op_iget_byte_quick FALLBACK
# op op_iget_char_quick FALLBACK
# op op_iget_short_quick FALLBACK
- op op_invoke_lambda FALLBACK
+ # op op_unused_f3 FALLBACK
# op op_unused_f4 FALLBACK
- op op_capture_variable FALLBACK
- op op_create_lambda FALLBACK
- op op_liberate_variable FALLBACK
- op op_box_lambda FALLBACK
- op op_unbox_lambda FALLBACK
+ # op op_unused_f5 FALLBACK
+ # op op_unused_f6 FALLBACK
+ # op op_unused_f7 FALLBACK
+ # op op_unused_f8 FALLBACK
+ # op op_unused_f9 FALLBACK
# op op_unused_fa FALLBACK
# op op_unused_fb FALLBACK
# op op_unused_fc FALLBACK
diff --git a/runtime/interpreter/mterp/config_x86_64 b/runtime/interpreter/mterp/config_x86_64
index 1d7eb03..7c357db 100644
--- a/runtime/interpreter/mterp/config_x86_64
+++ b/runtime/interpreter/mterp/config_x86_64
@@ -283,13 +283,13 @@
# op op_iget_byte_quick FALLBACK
# op op_iget_char_quick FALLBACK
# op op_iget_short_quick FALLBACK
- op op_invoke_lambda FALLBACK
+ # op op_unused_f3 FALLBACK
# op op_unused_f4 FALLBACK
- op op_capture_variable FALLBACK
- op op_create_lambda FALLBACK
- op op_liberate_variable FALLBACK
- op op_box_lambda FALLBACK
- op op_unbox_lambda FALLBACK
+ # op op_unused_f5 FALLBACK
+ # op op_unused_f6 FALLBACK
+ # op op_unused_f7 FALLBACK
+ # op op_unused_f8 FALLBACK
+ # op op_unused_f9 FALLBACK
# op op_unused_fa FALLBACK
# op op_unused_fb FALLBACK
# op op_unused_fc FALLBACK
diff --git a/runtime/interpreter/mterp/mips64/op_unused_f3.S b/runtime/interpreter/mterp/mips64/op_unused_f3.S
new file mode 100644
index 0000000..29463d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_unused_f3.S
@@ -0,0 +1 @@
+%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_f5.S b/runtime/interpreter/mterp/mips64/op_unused_f5.S
new file mode 100644
index 0000000..29463d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_unused_f5.S
@@ -0,0 +1 @@
+%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_f6.S b/runtime/interpreter/mterp/mips64/op_unused_f6.S
new file mode 100644
index 0000000..29463d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_unused_f6.S
@@ -0,0 +1 @@
+%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_f7.S b/runtime/interpreter/mterp/mips64/op_unused_f7.S
new file mode 100644
index 0000000..29463d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_unused_f7.S
@@ -0,0 +1 @@
+%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_f8.S b/runtime/interpreter/mterp/mips64/op_unused_f8.S
new file mode 100644
index 0000000..29463d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_unused_f8.S
@@ -0,0 +1 @@
+%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_f9.S b/runtime/interpreter/mterp/mips64/op_unused_f9.S
new file mode 100644
index 0000000..29463d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_unused_f9.S
@@ -0,0 +1 @@
+%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/out/mterp_arm.S b/runtime/interpreter/mterp/out/mterp_arm.S
index df25767..1bcdd76 100644
--- a/runtime/interpreter/mterp/out/mterp_arm.S
+++ b/runtime/interpreter/mterp/out/mterp_arm.S
@@ -7201,9 +7201,13 @@
/* ------------------------------ */
.balign 128
-.L_op_invoke_lambda: /* 0xf3 */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+.L_op_unused_f3: /* 0xf3 */
+/* File: arm/op_unused_f3.S */
+/* File: arm/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
/* ------------------------------ */
@@ -7219,37 +7223,57 @@
/* ------------------------------ */
.balign 128
-.L_op_capture_variable: /* 0xf5 */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+.L_op_unused_f5: /* 0xf5 */
+/* File: arm/op_unused_f5.S */
+/* File: arm/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
/* ------------------------------ */
.balign 128
-.L_op_create_lambda: /* 0xf6 */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+.L_op_unused_f6: /* 0xf6 */
+/* File: arm/op_unused_f6.S */
+/* File: arm/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
/* ------------------------------ */
.balign 128
-.L_op_liberate_variable: /* 0xf7 */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+.L_op_unused_f7: /* 0xf7 */
+/* File: arm/op_unused_f7.S */
+/* File: arm/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
/* ------------------------------ */
.balign 128
-.L_op_box_lambda: /* 0xf8 */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+.L_op_unused_f8: /* 0xf8 */
+/* File: arm/op_unused_f8.S */
+/* File: arm/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
/* ------------------------------ */
.balign 128
-.L_op_unbox_lambda: /* 0xf9 */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+.L_op_unused_f9: /* 0xf9 */
+/* File: arm/op_unused_f9.S */
+/* File: arm/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
/* ------------------------------ */
@@ -11564,7 +11588,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_invoke_lambda: /* 0xf3 */
+.L_ALT_op_unused_f3: /* 0xf3 */
/* File: arm/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11598,7 +11622,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_capture_variable: /* 0xf5 */
+.L_ALT_op_unused_f5: /* 0xf5 */
/* File: arm/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11615,7 +11639,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_create_lambda: /* 0xf6 */
+.L_ALT_op_unused_f6: /* 0xf6 */
/* File: arm/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11632,7 +11656,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_liberate_variable: /* 0xf7 */
+.L_ALT_op_unused_f7: /* 0xf7 */
/* File: arm/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11649,7 +11673,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_box_lambda: /* 0xf8 */
+.L_ALT_op_unused_f8: /* 0xf8 */
/* File: arm/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11666,7 +11690,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_unbox_lambda: /* 0xf9 */
+.L_ALT_op_unused_f9: /* 0xf9 */
/* File: arm/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
diff --git a/runtime/interpreter/mterp/out/mterp_arm64.S b/runtime/interpreter/mterp/out/mterp_arm64.S
index de37e07..136bf20 100644
--- a/runtime/interpreter/mterp/out/mterp_arm64.S
+++ b/runtime/interpreter/mterp/out/mterp_arm64.S
@@ -6741,9 +6741,13 @@
/* ------------------------------ */
.balign 128
-.L_op_invoke_lambda: /* 0xf3 */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+.L_op_unused_f3: /* 0xf3 */
+/* File: arm64/op_unused_f3.S */
+/* File: arm64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
/* ------------------------------ */
@@ -6759,37 +6763,57 @@
/* ------------------------------ */
.balign 128
-.L_op_capture_variable: /* 0xf5 */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+.L_op_unused_f5: /* 0xf5 */
+/* File: arm64/op_unused_f5.S */
+/* File: arm64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
/* ------------------------------ */
.balign 128
-.L_op_create_lambda: /* 0xf6 */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+.L_op_unused_f6: /* 0xf6 */
+/* File: arm64/op_unused_f6.S */
+/* File: arm64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
/* ------------------------------ */
.balign 128
-.L_op_liberate_variable: /* 0xf7 */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+.L_op_unused_f7: /* 0xf7 */
+/* File: arm64/op_unused_f7.S */
+/* File: arm64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
/* ------------------------------ */
.balign 128
-.L_op_box_lambda: /* 0xf8 */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+.L_op_unused_f8: /* 0xf8 */
+/* File: arm64/op_unused_f8.S */
+/* File: arm64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
/* ------------------------------ */
.balign 128
-.L_op_unbox_lambda: /* 0xf9 */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+.L_op_unused_f9: /* 0xf9 */
+/* File: arm64/op_unused_f9.S */
+/* File: arm64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
/* ------------------------------ */
@@ -11332,7 +11356,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_invoke_lambda: /* 0xf3 */
+.L_ALT_op_unused_f3: /* 0xf3 */
/* File: arm64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11366,7 +11390,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_capture_variable: /* 0xf5 */
+.L_ALT_op_unused_f5: /* 0xf5 */
/* File: arm64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11383,7 +11407,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_create_lambda: /* 0xf6 */
+.L_ALT_op_unused_f6: /* 0xf6 */
/* File: arm64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11400,7 +11424,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_liberate_variable: /* 0xf7 */
+.L_ALT_op_unused_f7: /* 0xf7 */
/* File: arm64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11417,7 +11441,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_box_lambda: /* 0xf8 */
+.L_ALT_op_unused_f8: /* 0xf8 */
/* File: arm64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11434,7 +11458,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_unbox_lambda: /* 0xf9 */
+.L_ALT_op_unused_f9: /* 0xf9 */
/* File: arm64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
diff --git a/runtime/interpreter/mterp/out/mterp_mips.S b/runtime/interpreter/mterp/out/mterp_mips.S
index 5e0c19f..fef7dc6 100644
--- a/runtime/interpreter/mterp/out/mterp_mips.S
+++ b/runtime/interpreter/mterp/out/mterp_mips.S
@@ -7547,9 +7547,14 @@
/* ------------------------------ */
.balign 128
-.L_op_invoke_lambda: /* 0xf3 */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+.L_op_unused_f3: /* 0xf3 */
+/* File: mips/op_unused_f3.S */
+/* File: mips/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
/* ------------------------------ */
.balign 128
@@ -7564,33 +7569,58 @@
/* ------------------------------ */
.balign 128
-.L_op_capture_variable: /* 0xf5 */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+.L_op_unused_f5: /* 0xf5 */
+/* File: mips/op_unused_f5.S */
+/* File: mips/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
/* ------------------------------ */
.balign 128
-.L_op_create_lambda: /* 0xf6 */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+.L_op_unused_f6: /* 0xf6 */
+/* File: mips/op_unused_f6.S */
+/* File: mips/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
/* ------------------------------ */
.balign 128
-.L_op_liberate_variable: /* 0xf7 */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+.L_op_unused_f7: /* 0xf7 */
+/* File: mips/op_unused_f7.S */
+/* File: mips/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
/* ------------------------------ */
.balign 128
-.L_op_box_lambda: /* 0xf8 */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+.L_op_unused_f8: /* 0xf8 */
+/* File: mips/op_unused_f8.S */
+/* File: mips/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
/* ------------------------------ */
.balign 128
-.L_op_unbox_lambda: /* 0xf9 */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+.L_op_unused_f9: /* 0xf9 */
+/* File: mips/op_unused_f9.S */
+/* File: mips/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
/* ------------------------------ */
.balign 128
@@ -12381,7 +12411,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_invoke_lambda: /* 0xf3 */
+.L_ALT_op_unused_f3: /* 0xf3 */
/* File: mips/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -12417,7 +12447,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_capture_variable: /* 0xf5 */
+.L_ALT_op_unused_f5: /* 0xf5 */
/* File: mips/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -12435,7 +12465,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_create_lambda: /* 0xf6 */
+.L_ALT_op_unused_f6: /* 0xf6 */
/* File: mips/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -12453,7 +12483,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_liberate_variable: /* 0xf7 */
+.L_ALT_op_unused_f7: /* 0xf7 */
/* File: mips/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -12471,7 +12501,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_box_lambda: /* 0xf8 */
+.L_ALT_op_unused_f8: /* 0xf8 */
/* File: mips/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -12489,7 +12519,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_unbox_lambda: /* 0xf9 */
+.L_ALT_op_unused_f9: /* 0xf9 */
/* File: mips/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
diff --git a/runtime/interpreter/mterp/out/mterp_mips64.S b/runtime/interpreter/mterp/out/mterp_mips64.S
index 35fbe94..a061f1e 100644
--- a/runtime/interpreter/mterp/out/mterp_mips64.S
+++ b/runtime/interpreter/mterp/out/mterp_mips64.S
@@ -7003,10 +7003,15 @@
/* ------------------------------ */
.balign 128
-.L_op_invoke_lambda: /* 0xf3 */
-/* Transfer stub to alternate interpreter */
+.L_op_unused_f3: /* 0xf3 */
+/* File: mips64/op_unused_f3.S */
+/* File: mips64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
b MterpFallback
+
/* ------------------------------ */
.balign 128
.L_op_unused_f4: /* 0xf4 */
@@ -7020,34 +7025,59 @@
/* ------------------------------ */
.balign 128
-.L_op_capture_variable: /* 0xf5 */
-/* Transfer stub to alternate interpreter */
+.L_op_unused_f5: /* 0xf5 */
+/* File: mips64/op_unused_f5.S */
+/* File: mips64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
b MterpFallback
+
/* ------------------------------ */
.balign 128
-.L_op_create_lambda: /* 0xf6 */
-/* Transfer stub to alternate interpreter */
+.L_op_unused_f6: /* 0xf6 */
+/* File: mips64/op_unused_f6.S */
+/* File: mips64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
b MterpFallback
+
/* ------------------------------ */
.balign 128
-.L_op_liberate_variable: /* 0xf7 */
-/* Transfer stub to alternate interpreter */
+.L_op_unused_f7: /* 0xf7 */
+/* File: mips64/op_unused_f7.S */
+/* File: mips64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
b MterpFallback
+
/* ------------------------------ */
.balign 128
-.L_op_box_lambda: /* 0xf8 */
-/* Transfer stub to alternate interpreter */
+.L_op_unused_f8: /* 0xf8 */
+/* File: mips64/op_unused_f8.S */
+/* File: mips64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
b MterpFallback
+
/* ------------------------------ */
.balign 128
-.L_op_unbox_lambda: /* 0xf9 */
-/* Transfer stub to alternate interpreter */
+.L_op_unused_f9: /* 0xf9 */
+/* File: mips64/op_unused_f9.S */
+/* File: mips64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
b MterpFallback
+
/* ------------------------------ */
.balign 128
.L_op_unused_fa: /* 0xfa */
@@ -11799,7 +11829,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_invoke_lambda: /* 0xf3 */
+.L_ALT_op_unused_f3: /* 0xf3 */
/* File: mips64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11837,7 +11867,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_capture_variable: /* 0xf5 */
+.L_ALT_op_unused_f5: /* 0xf5 */
/* File: mips64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11856,7 +11886,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_create_lambda: /* 0xf6 */
+.L_ALT_op_unused_f6: /* 0xf6 */
/* File: mips64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11875,7 +11905,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_liberate_variable: /* 0xf7 */
+.L_ALT_op_unused_f7: /* 0xf7 */
/* File: mips64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11894,7 +11924,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_box_lambda: /* 0xf8 */
+.L_ALT_op_unused_f8: /* 0xf8 */
/* File: mips64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11913,7 +11943,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_unbox_lambda: /* 0xf9 */
+.L_ALT_op_unused_f9: /* 0xf9 */
/* File: mips64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
diff --git a/runtime/interpreter/mterp/out/mterp_x86.S b/runtime/interpreter/mterp/out/mterp_x86.S
index 5caaa80..29ee248 100644
--- a/runtime/interpreter/mterp/out/mterp_x86.S
+++ b/runtime/interpreter/mterp/out/mterp_x86.S
@@ -6201,8 +6201,12 @@
/* ------------------------------ */
.balign 128
-.L_op_invoke_lambda: /* 0xf3 */
-/* Transfer stub to alternate interpreter */
+.L_op_unused_f3: /* 0xf3 */
+/* File: x86/op_unused_f3.S */
+/* File: x86/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
jmp MterpFallback
@@ -6219,36 +6223,56 @@
/* ------------------------------ */
.balign 128
-.L_op_capture_variable: /* 0xf5 */
-/* Transfer stub to alternate interpreter */
+.L_op_unused_f5: /* 0xf5 */
+/* File: x86/op_unused_f5.S */
+/* File: x86/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
jmp MterpFallback
/* ------------------------------ */
.balign 128
-.L_op_create_lambda: /* 0xf6 */
-/* Transfer stub to alternate interpreter */
+.L_op_unused_f6: /* 0xf6 */
+/* File: x86/op_unused_f6.S */
+/* File: x86/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
jmp MterpFallback
/* ------------------------------ */
.balign 128
-.L_op_liberate_variable: /* 0xf7 */
-/* Transfer stub to alternate interpreter */
+.L_op_unused_f7: /* 0xf7 */
+/* File: x86/op_unused_f7.S */
+/* File: x86/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
jmp MterpFallback
/* ------------------------------ */
.balign 128
-.L_op_box_lambda: /* 0xf8 */
-/* Transfer stub to alternate interpreter */
+.L_op_unused_f8: /* 0xf8 */
+/* File: x86/op_unused_f8.S */
+/* File: x86/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
jmp MterpFallback
/* ------------------------------ */
.balign 128
-.L_op_unbox_lambda: /* 0xf9 */
-/* Transfer stub to alternate interpreter */
+.L_op_unused_f9: /* 0xf9 */
+/* File: x86/op_unused_f9.S */
+/* File: x86/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
jmp MterpFallback
@@ -12178,7 +12202,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_invoke_lambda: /* 0xf3 */
+.L_ALT_op_unused_f3: /* 0xf3 */
/* File: x86/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -12226,7 +12250,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_capture_variable: /* 0xf5 */
+.L_ALT_op_unused_f5: /* 0xf5 */
/* File: x86/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -12250,7 +12274,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_create_lambda: /* 0xf6 */
+.L_ALT_op_unused_f6: /* 0xf6 */
/* File: x86/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -12274,7 +12298,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_liberate_variable: /* 0xf7 */
+.L_ALT_op_unused_f7: /* 0xf7 */
/* File: x86/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -12298,7 +12322,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_box_lambda: /* 0xf8 */
+.L_ALT_op_unused_f8: /* 0xf8 */
/* File: x86/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -12322,7 +12346,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_unbox_lambda: /* 0xf9 */
+.L_ALT_op_unused_f9: /* 0xf9 */
/* File: x86/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
diff --git a/runtime/interpreter/mterp/out/mterp_x86_64.S b/runtime/interpreter/mterp/out/mterp_x86_64.S
index 2f7b854..bc1abcc 100644
--- a/runtime/interpreter/mterp/out/mterp_x86_64.S
+++ b/runtime/interpreter/mterp/out/mterp_x86_64.S
@@ -5966,8 +5966,12 @@
/* ------------------------------ */
.balign 128
-.L_op_invoke_lambda: /* 0xf3 */
-/* Transfer stub to alternate interpreter */
+.L_op_unused_f3: /* 0xf3 */
+/* File: x86_64/op_unused_f3.S */
+/* File: x86_64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
jmp MterpFallback
@@ -5984,36 +5988,56 @@
/* ------------------------------ */
.balign 128
-.L_op_capture_variable: /* 0xf5 */
-/* Transfer stub to alternate interpreter */
+.L_op_unused_f5: /* 0xf5 */
+/* File: x86_64/op_unused_f5.S */
+/* File: x86_64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
jmp MterpFallback
/* ------------------------------ */
.balign 128
-.L_op_create_lambda: /* 0xf6 */
-/* Transfer stub to alternate interpreter */
+.L_op_unused_f6: /* 0xf6 */
+/* File: x86_64/op_unused_f6.S */
+/* File: x86_64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
jmp MterpFallback
/* ------------------------------ */
.balign 128
-.L_op_liberate_variable: /* 0xf7 */
-/* Transfer stub to alternate interpreter */
+.L_op_unused_f7: /* 0xf7 */
+/* File: x86_64/op_unused_f7.S */
+/* File: x86_64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
jmp MterpFallback
/* ------------------------------ */
.balign 128
-.L_op_box_lambda: /* 0xf8 */
-/* Transfer stub to alternate interpreter */
+.L_op_unused_f8: /* 0xf8 */
+/* File: x86_64/op_unused_f8.S */
+/* File: x86_64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
jmp MterpFallback
/* ------------------------------ */
.balign 128
-.L_op_unbox_lambda: /* 0xf9 */
-/* Transfer stub to alternate interpreter */
+.L_op_unused_f9: /* 0xf9 */
+/* File: x86_64/op_unused_f9.S */
+/* File: x86_64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
jmp MterpFallback
@@ -11457,7 +11481,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_invoke_lambda: /* 0xf3 */
+.L_ALT_op_unused_f3: /* 0xf3 */
/* File: x86_64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11501,7 +11525,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_capture_variable: /* 0xf5 */
+.L_ALT_op_unused_f5: /* 0xf5 */
/* File: x86_64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11523,7 +11547,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_create_lambda: /* 0xf6 */
+.L_ALT_op_unused_f6: /* 0xf6 */
/* File: x86_64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11545,7 +11569,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_liberate_variable: /* 0xf7 */
+.L_ALT_op_unused_f7: /* 0xf7 */
/* File: x86_64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11567,7 +11591,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_box_lambda: /* 0xf8 */
+.L_ALT_op_unused_f8: /* 0xf8 */
/* File: x86_64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11589,7 +11613,7 @@
/* ------------------------------ */
.balign 128
-.L_ALT_op_unbox_lambda: /* 0xf9 */
+.L_ALT_op_unused_f9: /* 0xf9 */
/* File: x86_64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
diff --git a/runtime/interpreter/mterp/x86/op_unused_f3.S b/runtime/interpreter/mterp/x86/op_unused_f3.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/runtime/interpreter/mterp/x86/op_unused_f3.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_f5.S b/runtime/interpreter/mterp/x86/op_unused_f5.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/runtime/interpreter/mterp/x86/op_unused_f5.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_f6.S b/runtime/interpreter/mterp/x86/op_unused_f6.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/runtime/interpreter/mterp/x86/op_unused_f6.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_f7.S b/runtime/interpreter/mterp/x86/op_unused_f7.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/runtime/interpreter/mterp/x86/op_unused_f7.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_f8.S b/runtime/interpreter/mterp/x86/op_unused_f8.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/runtime/interpreter/mterp/x86/op_unused_f8.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_f9.S b/runtime/interpreter/mterp/x86/op_unused_f9.S
new file mode 100644
index 0000000..31d98c1
--- /dev/null
+++ b/runtime/interpreter/mterp/x86/op_unused_f9.S
@@ -0,0 +1 @@
+%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_f3.S b/runtime/interpreter/mterp/x86_64/op_unused_f3.S
new file mode 100644
index 0000000..280615f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_unused_f3.S
@@ -0,0 +1 @@
+%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_f5.S b/runtime/interpreter/mterp/x86_64/op_unused_f5.S
new file mode 100644
index 0000000..280615f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_unused_f5.S
@@ -0,0 +1 @@
+%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_f6.S b/runtime/interpreter/mterp/x86_64/op_unused_f6.S
new file mode 100644
index 0000000..280615f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_unused_f6.S
@@ -0,0 +1 @@
+%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_f7.S b/runtime/interpreter/mterp/x86_64/op_unused_f7.S
new file mode 100644
index 0000000..280615f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_unused_f7.S
@@ -0,0 +1 @@
+%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_f8.S b/runtime/interpreter/mterp/x86_64/op_unused_f8.S
new file mode 100644
index 0000000..280615f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_unused_f8.S
@@ -0,0 +1 @@
+%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_f9.S b/runtime/interpreter/mterp/x86_64/op_unused_f9.S
new file mode 100644
index 0000000..280615f
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_unused_f9.S
@@ -0,0 +1 @@
+%include "x86_64/unused.S"
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 57443f1..a0e0e62 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -41,6 +41,7 @@
#include "mirror/array-inl.h"
#include "mirror/class.h"
#include "mirror/field-inl.h"
+#include "mirror/method.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/string-inl.h"
diff --git a/runtime/lambda/art_lambda_method.cc b/runtime/lambda/art_lambda_method.cc
deleted file mode 100644
index 6f9f8bb..0000000
--- a/runtime/lambda/art_lambda_method.cc
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "lambda/art_lambda_method.h"
-
-#include "base/logging.h"
-#include "lambda/shorty_field_type.h"
-
-namespace art {
-namespace lambda {
-
-ArtLambdaMethod::ArtLambdaMethod(ArtMethod* target_method,
- const char* captured_variables_type_descriptor,
- const char* captured_variables_shorty,
- bool innate_lambda)
- : method_(target_method),
- captured_variables_type_descriptor_(captured_variables_type_descriptor),
- captured_variables_shorty_(captured_variables_shorty),
- innate_lambda_(innate_lambda) {
- DCHECK(target_method != nullptr);
- DCHECK(captured_variables_type_descriptor != nullptr);
- DCHECK(captured_variables_shorty != nullptr);
-
- // Calculate the static closure size from the captured variables.
- size_t size = sizeof(ArtLambdaMethod*); // Initial size is just this method.
- bool static_size = true;
- const char* shorty = captured_variables_shorty_;
- while (shorty != nullptr && *shorty != '\0') {
- // Each captured variable also appends to the size.
- ShortyFieldType shorty_field{*shorty}; // NOLINT [readability/braces] [4]
- size += shorty_field.GetStaticSize();
- static_size &= shorty_field.IsStaticSize();
- ++shorty;
- }
- closure_size_ = size;
-
- // We determine whether or not the size is dynamic by checking for nested lambdas.
- //
- // This is conservative, since in theory an optimization could determine the size
- // of the nested lambdas recursively. In practice it's probably better to flatten out
- // nested lambdas and inline all their code if they are known statically.
- dynamic_size_ = !static_size;
-
- if (kIsDebugBuild) {
- // Double check that the number of captured variables match in both strings.
- size_t shorty_count = strlen(captured_variables_shorty);
-
- size_t long_count = 0;
- const char* long_type = captured_variables_type_descriptor;
- ShortyFieldType out;
- while ((long_type = ShortyFieldType::ParseFromFieldTypeDescriptor(long_type, &out))
- != nullptr) {
- ++long_count;
- }
-
- DCHECK_EQ(shorty_count, long_count)
- << "number of captured variables in long type '" << captured_variables_type_descriptor
- << "' (" << long_count << ")" << " did not match short type '"
- << captured_variables_shorty << "' (" << shorty_count << ")";
- }
-}
-
-} // namespace lambda
-} // namespace art
diff --git a/runtime/lambda/art_lambda_method.h b/runtime/lambda/art_lambda_method.h
deleted file mode 100644
index ea13eb7..0000000
--- a/runtime/lambda/art_lambda_method.h
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef ART_RUNTIME_LAMBDA_ART_LAMBDA_METHOD_H_
-#define ART_RUNTIME_LAMBDA_ART_LAMBDA_METHOD_H_
-
-#include "base/macros.h"
-#include "art_method.h"
-
-#include <stdint.h>
-
-namespace art {
-namespace lambda {
-
-class ArtLambdaMethod {
- public:
- // Construct an art lambda method.
- // The target method is the one invoked by invoke-lambda.
- // The type descriptor describes the types of variables captured, e.g. "ZFLObject;\FI;[Z"
- // The shorty drops the object name and treats arrays as objects, e.g. "ZFL\L"
- // Innate lambda means that the lambda was originally created via invoke-lambda.
- // -- Non-innate lambdas (learned lambdas) come from a regular class that was boxed to lambda.
- // (Ownership of strings is retained by the caller and the lifetime should exceed this class).
- ArtLambdaMethod(ArtMethod* target_method,
- const char* captured_variables_type_descriptor,
- const char* captured_variables_shorty,
- bool innate_lambda = true);
-
- // Get the target method for this lambda that would be used by the invoke-lambda dex instruction.
- ArtMethod* GetArtMethod() const {
- return method_;
- }
-
- // Get the compile-time size of lambda closures for this method in bytes.
- // This is circular (that is, it includes the size of the ArtLambdaMethod pointer).
- // One should also check if the size is dynamic since nested lambdas have a runtime size.
- size_t GetStaticClosureSize() const {
- return closure_size_;
- }
-
- // Get the type descriptor for the list of captured variables.
- // e.g. "ZFLObject;\FI;[Z" means a captured int, float, class Object, lambda FI, array of ints
- const char* GetCapturedVariablesTypeDescriptor() const {
- return captured_variables_type_descriptor_;
- }
-
- // Get the shorty 'field' type descriptor list of captured variables.
- // This follows the same rules as a string of ShortyFieldType in the dex specification.
- // Every captured variable is represented by exactly one character.
- // - Objects become 'L'.
- // - Arrays become 'L'.
- // - Lambdas become '\'.
- const char* GetCapturedVariablesShortyTypeDescriptor() const {
- return captured_variables_shorty_;
- }
-
- // Will the size of this lambda change at runtime?
- // Only returns true if there is a nested lambda that we can't determine statically the size of.
- bool IsDynamicSize() const {
- return dynamic_size_;
- }
-
- // Will the size of this lambda always be constant at runtime?
- // This generally means there's no nested lambdas, or we were able to successfully determine
- // their size statically at compile time.
- bool IsStaticSize() const {
- return !IsDynamicSize();
- }
- // Is this a lambda that was originally created via invoke-lambda?
- // -- Non-innate lambdas (learned lambdas) come from a regular class that was boxed to lambda.
- bool IsInnateLambda() const {
- return innate_lambda_;
- }
-
- // How many variables were captured?
- // (Each nested lambda counts as 1 captured var regardless of how many captures it itself has).
- size_t GetNumberOfCapturedVariables() const {
- return strlen(captured_variables_shorty_);
- }
-
- private:
- // TODO: ArtMethod, or at least the entry points should be inlined into this struct
- // to avoid an extra indirect load when doing invokes.
- // Target method that invoke-lambda will jump to.
- ArtMethod* method_;
- // How big the closure is (in bytes). Only includes the constant size.
- size_t closure_size_;
- // The type descriptor for the captured variables, e.g. "IS" for [int, short]
- const char* captured_variables_type_descriptor_;
- // The shorty type descriptor for captured vars, (e.g. using 'L' instead of 'LObject;')
- const char* captured_variables_shorty_;
- // Whether or not the size is dynamic. If it is, copiers need to read the Closure size at runtime.
- bool dynamic_size_;
- // True if this lambda was originally made with create-lambda,
- // false if it came from a class instance (through new-instance and then unbox-lambda).
- bool innate_lambda_;
-
- DISALLOW_COPY_AND_ASSIGN(ArtLambdaMethod);
-};
-
-} // namespace lambda
-} // namespace art
-
-#endif // ART_RUNTIME_LAMBDA_ART_LAMBDA_METHOD_H_
diff --git a/runtime/lambda/box_table.cc b/runtime/lambda/box_table.cc
deleted file mode 100644
index 9918bb7..0000000
--- a/runtime/lambda/box_table.cc
+++ /dev/null
@@ -1,315 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "lambda/box_table.h"
-
-#include "base/mutex.h"
-#include "common_throws.h"
-#include "gc_root-inl.h"
-#include "lambda/closure.h"
-#include "lambda/leaking_allocator.h"
-#include "mirror/method.h"
-#include "mirror/object-inl.h"
-#include "thread.h"
-
-#include <vector>
-
-namespace art {
-namespace lambda {
-// Temporarily represent the lambda Closure as its raw bytes in an array.
-// TODO: Generate a proxy class for the closure when boxing the first time.
-using BoxedClosurePointerType = mirror::ByteArray*;
-
-static mirror::Class* GetBoxedClosureClass() SHARED_REQUIRES(Locks::mutator_lock_) {
- return mirror::ByteArray::GetArrayClass();
-}
-
-namespace {
- // Convenience functions to allocating/deleting box table copies of the closures.
- struct ClosureAllocator {
- // Deletes a Closure that was allocated through ::Allocate.
- static void Delete(Closure* ptr) {
- delete[] reinterpret_cast<char*>(ptr);
- }
-
- // Returns a well-aligned pointer to a newly allocated Closure on the 'new' heap.
- static Closure* Allocate(size_t size) {
- DCHECK_GE(size, sizeof(Closure));
-
- // TODO: Maybe point to the interior of the boxed closure object after we add proxy support?
- Closure* closure = reinterpret_cast<Closure*>(new char[size]);
- DCHECK_ALIGNED(closure, alignof(Closure));
- return closure;
- }
- };
-} // namespace
-
-BoxTable::BoxTable()
- : allow_new_weaks_(true),
- new_weaks_condition_("lambda box table allowed weaks", *Locks::lambda_table_lock_) {}
-
-BoxTable::~BoxTable() {
- // Free all the copies of our closures.
- for (auto map_iterator = map_.begin(); map_iterator != map_.end(); ) {
- std::pair<UnorderedMapKeyType, ValueType>& key_value_pair = *map_iterator;
-
- Closure* closure = key_value_pair.first;
-
- // Remove from the map first, so that it doesn't try to access dangling pointer.
- map_iterator = map_.Erase(map_iterator);
-
- // Safe to delete, no dangling pointers.
- ClosureAllocator::Delete(closure);
- }
-}
-
-mirror::Object* BoxTable::BoxLambda(const ClosureType& closure) {
- Thread* self = Thread::Current();
-
- {
- // TODO: Switch to ReaderMutexLock if ConditionVariable ever supports RW Mutexes
- /*Reader*/MutexLock mu(self, *Locks::lambda_table_lock_);
- BlockUntilWeaksAllowed();
-
- // Attempt to look up this object, it's possible it was already boxed previously.
- // If this is the case we *must* return the same object as before to maintain
- // referential equality.
- //
- // In managed code:
- // Functional f = () -> 5; // vF = create-lambda
- // Object a = f; // vA = box-lambda vA
- // Object b = f; // vB = box-lambda vB
- // assert(a == f)
- ValueType value = FindBoxedLambda(closure);
- if (!value.IsNull()) {
- return value.Read();
- }
-
- // Otherwise we need to box ourselves and insert it into the hash map
- }
-
- // Release the lambda table lock here, so that thread suspension is allowed.
-
- // Convert the Closure into a managed byte[] which will serve
- // as the temporary 'boxed' version of the lambda. This is good enough
- // to check all the basic object identities that a boxed lambda must retain.
- // It's also good enough to contain all the captured primitive variables.
-
- // TODO: Boxing an innate lambda (i.e. made with create-lambda) should make a proxy class
- // TODO: Boxing a learned lambda (i.e. made with unbox-lambda) should return the original object
- BoxedClosurePointerType closure_as_array_object =
- mirror::ByteArray::Alloc(self, closure->GetSize());
-
- // There are no thread suspension points after this, so we don't need to put it into a handle.
-
- if (UNLIKELY(closure_as_array_object == nullptr)) {
- // Most likely an OOM has occurred.
- CHECK(self->IsExceptionPending());
- return nullptr;
- }
-
- // Write the raw closure data into the byte[].
- closure->CopyTo(closure_as_array_object->GetRawData(sizeof(uint8_t), // component size
- 0 /*index*/), // index
- closure_as_array_object->GetLength());
-
- // The method has been successfully boxed into an object, now insert it into the hash map.
- {
- MutexLock mu(self, *Locks::lambda_table_lock_);
- BlockUntilWeaksAllowed();
-
- // Lookup the object again, it's possible another thread already boxed it while
- // we were allocating the object before.
- ValueType value = FindBoxedLambda(closure);
- if (UNLIKELY(!value.IsNull())) {
- // Let the GC clean up method_as_object at a later time.
- return value.Read();
- }
-
- // Otherwise we need to insert it into the hash map in this thread.
-
- // Make a copy for the box table to keep, in case the closure gets collected from the stack.
- // TODO: GC may need to sweep for roots in the box table's copy of the closure.
- Closure* closure_table_copy = ClosureAllocator::Allocate(closure->GetSize());
- closure->CopyTo(closure_table_copy, closure->GetSize());
-
- // The closure_table_copy needs to be deleted by us manually when we erase it from the map.
-
- // Actually insert into the table.
- map_.Insert({closure_table_copy, ValueType(closure_as_array_object)});
- }
-
- return closure_as_array_object;
-}
-
-bool BoxTable::UnboxLambda(mirror::Object* object, ClosureType* out_closure) {
- DCHECK(object != nullptr);
- *out_closure = nullptr;
-
- Thread* self = Thread::Current();
-
- // Note that we do not need to access lambda_table_lock_ here
- // since we don't need to look at the map.
-
- mirror::Object* boxed_closure_object = object;
-
- // Raise ClassCastException if object is not instanceof byte[]
- if (UNLIKELY(!boxed_closure_object->InstanceOf(GetBoxedClosureClass()))) {
- ThrowClassCastException(GetBoxedClosureClass(), boxed_closure_object->GetClass());
- return false;
- }
-
- // TODO(iam): We must check that the closure object extends/implements the type
- // specified in [type id]. This is not currently implemented since it's always a byte[].
-
- // If we got this far, the inputs are valid.
- // Shuffle the byte[] back into a raw closure, then allocate it, copy, and return it.
- BoxedClosurePointerType boxed_closure_as_array =
- down_cast<BoxedClosurePointerType>(boxed_closure_object);
-
- const int8_t* unaligned_interior_closure = boxed_closure_as_array->GetData();
-
- // Allocate a copy that can "escape" and copy the closure data into that.
- Closure* unboxed_closure =
- LeakingAllocator::MakeFlexibleInstance<Closure>(self, boxed_closure_as_array->GetLength());
- // TODO: don't just memcpy the closure, it's unsafe when we add references to the mix.
- memcpy(unboxed_closure, unaligned_interior_closure, boxed_closure_as_array->GetLength());
-
- DCHECK_EQ(unboxed_closure->GetSize(), static_cast<size_t>(boxed_closure_as_array->GetLength()));
-
- *out_closure = unboxed_closure;
- return true;
-}
-
-BoxTable::ValueType BoxTable::FindBoxedLambda(const ClosureType& closure) const {
- auto map_iterator = map_.Find(closure);
- if (map_iterator != map_.end()) {
- const std::pair<UnorderedMapKeyType, ValueType>& key_value_pair = *map_iterator;
- const ValueType& value = key_value_pair.second;
-
- DCHECK(!value.IsNull()); // Never store null boxes.
- return value;
- }
-
- return ValueType(nullptr);
-}
-
-void BoxTable::BlockUntilWeaksAllowed() {
- Thread* self = Thread::Current();
- while (UNLIKELY((!kUseReadBarrier && !allow_new_weaks_) ||
- (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
- new_weaks_condition_.WaitHoldingLocks(self); // wait while holding mutator lock
- }
-}
-
-void BoxTable::SweepWeakBoxedLambdas(IsMarkedVisitor* visitor) {
- DCHECK(visitor != nullptr);
-
- Thread* self = Thread::Current();
- MutexLock mu(self, *Locks::lambda_table_lock_);
-
- /*
- * Visit every weak root in our lambda box table.
- * Remove unmarked objects, update marked objects to new address.
- */
- std::vector<ClosureType> remove_list;
- for (auto map_iterator = map_.begin(); map_iterator != map_.end(); ) {
- std::pair<UnorderedMapKeyType, ValueType>& key_value_pair = *map_iterator;
-
- const ValueType& old_value = key_value_pair.second;
-
- // This does not need a read barrier because this is called by GC.
- mirror::Object* old_value_raw = old_value.Read<kWithoutReadBarrier>();
- mirror::Object* new_value = visitor->IsMarked(old_value_raw);
-
- if (new_value == nullptr) {
- // The object has been swept away.
- const ClosureType& closure = key_value_pair.first;
-
- // Delete the entry from the map.
- map_iterator = map_.Erase(map_iterator);
-
- // Clean up the memory by deleting the closure.
- ClosureAllocator::Delete(closure);
-
- } else {
- // The object has been moved.
- // Update the map.
- key_value_pair.second = ValueType(new_value);
- ++map_iterator;
- }
- }
-
- // Occasionally shrink the map to avoid growing very large.
- if (map_.CalculateLoadFactor() < kMinimumLoadFactor) {
- map_.ShrinkToMaximumLoad();
- }
-}
-
-void BoxTable::DisallowNewWeakBoxedLambdas() {
- CHECK(!kUseReadBarrier);
- Thread* self = Thread::Current();
- MutexLock mu(self, *Locks::lambda_table_lock_);
-
- allow_new_weaks_ = false;
-}
-
-void BoxTable::AllowNewWeakBoxedLambdas() {
- CHECK(!kUseReadBarrier);
- Thread* self = Thread::Current();
- MutexLock mu(self, *Locks::lambda_table_lock_);
-
- allow_new_weaks_ = true;
- new_weaks_condition_.Broadcast(self);
-}
-
-void BoxTable::BroadcastForNewWeakBoxedLambdas() {
- CHECK(kUseReadBarrier);
- Thread* self = Thread::Current();
- MutexLock mu(self, *Locks::lambda_table_lock_);
- new_weaks_condition_.Broadcast(self);
-}
-
-void BoxTable::EmptyFn::MakeEmpty(std::pair<UnorderedMapKeyType, ValueType>& item) const {
- item.first = nullptr;
-
- Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
- item.second = ValueType(); // Also clear the GC root.
-}
-
-bool BoxTable::EmptyFn::IsEmpty(const std::pair<UnorderedMapKeyType, ValueType>& item) const {
- return item.first == nullptr;
-}
-
-bool BoxTable::EqualsFn::operator()(const UnorderedMapKeyType& lhs,
- const UnorderedMapKeyType& rhs) const {
- // Nothing needs this right now, but leave this assertion for later when
- // we need to look at the references inside of the closure.
- Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
-
- return lhs->ReferenceEquals(rhs);
-}
-
-size_t BoxTable::HashFn::operator()(const UnorderedMapKeyType& key) const {
- const lambda::Closure* closure = key;
- DCHECK_ALIGNED(closure, alignof(lambda::Closure));
-
- // Need to hold mutator_lock_ before calling into Closure::GetHashCode.
- Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
- return closure->GetHashCode();
-}
-
-} // namespace lambda
-} // namespace art
diff --git a/runtime/lambda/box_table.h b/runtime/lambda/box_table.h
deleted file mode 100644
index adb7332..0000000
--- a/runtime/lambda/box_table.h
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef ART_RUNTIME_LAMBDA_BOX_TABLE_H_
-#define ART_RUNTIME_LAMBDA_BOX_TABLE_H_
-
-#include "base/allocator.h"
-#include "base/hash_map.h"
-#include "gc_root.h"
-#include "base/macros.h"
-#include "base/mutex.h"
-#include "object_callbacks.h"
-
-#include <stdint.h>
-
-namespace art {
-
-class ArtMethod; // forward declaration
-
-namespace mirror {
-class Object; // forward declaration
-} // namespace mirror
-
-namespace lambda {
-struct Closure; // forward declaration
-
-/*
- * Store a table of boxed lambdas. This is required to maintain object referential equality
- * when a lambda is re-boxed.
- *
- * Conceptually, we store a mapping of Closures -> Weak Reference<Boxed Lambda Object>.
- * When too many objects get GCd, we shrink the underlying table to use less space.
- */
-class BoxTable FINAL {
- public:
- using ClosureType = art::lambda::Closure*;
-
- // Boxes a closure into an object. Returns null and throws an exception on failure.
- mirror::Object* BoxLambda(const ClosureType& closure)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::lambda_table_lock_);
-
- // Unboxes an object back into the lambda. Returns false and throws an exception on failure.
- bool UnboxLambda(mirror::Object* object, ClosureType* out_closure)
- SHARED_REQUIRES(Locks::mutator_lock_);
-
- // Sweep weak references to lambda boxes. Update the addresses if the objects have been
- // moved, and delete them from the table if the objects have been cleaned up.
- void SweepWeakBoxedLambdas(IsMarkedVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::lambda_table_lock_);
-
- // GC callback: Temporarily block anyone from touching the map.
- void DisallowNewWeakBoxedLambdas()
- REQUIRES(!Locks::lambda_table_lock_);
-
- // GC callback: Unblock any readers who have been queued waiting to touch the map.
- void AllowNewWeakBoxedLambdas()
- REQUIRES(!Locks::lambda_table_lock_);
-
- // GC callback: Unblock any readers who have been queued waiting to touch the map.
- void BroadcastForNewWeakBoxedLambdas()
- REQUIRES(!Locks::lambda_table_lock_);
-
- BoxTable();
- ~BoxTable();
-
- private:
- // Explanation:
- // - After all threads are suspended (exclusive mutator lock),
- // the concurrent-copying GC can move objects from the "from" space to the "to" space.
- // If an object is moved at that time and *before* SweepSystemWeaks are called then
- // we don't know if the move has happened yet.
- // Successive reads will then (incorrectly) look at the objects in the "from" space,
- // which is a problem since the objects have been already forwarded and mutations
- // would not be visible in the right space.
- // Instead, use a GcRoot here which will be automatically updated by the GC.
- //
- // Also, any reads should be protected by a read barrier to always give us the "to" space address.
- using ValueType = GcRoot<mirror::Object>;
-
- // Attempt to look up the lambda in the map, or return null if it's not there yet.
- ValueType FindBoxedLambda(const ClosureType& closure) const
- SHARED_REQUIRES(Locks::lambda_table_lock_);
-
- // If the GC has come in and temporarily disallowed touching weaks, block until is it allowed.
- void BlockUntilWeaksAllowed()
- SHARED_REQUIRES(Locks::lambda_table_lock_);
-
- // Wrap the Closure into a unique_ptr so that the HashMap can delete its memory automatically.
- using UnorderedMapKeyType = ClosureType;
-
- // EmptyFn implementation for art::HashMap
- struct EmptyFn {
- void MakeEmpty(std::pair<UnorderedMapKeyType, ValueType>& item) const
- NO_THREAD_SAFETY_ANALYSIS; // SHARED_REQUIRES(Locks::mutator_lock_)
-
- bool IsEmpty(const std::pair<UnorderedMapKeyType, ValueType>& item) const;
- };
-
- // HashFn implementation for art::HashMap
- struct HashFn {
- size_t operator()(const UnorderedMapKeyType& key) const
- NO_THREAD_SAFETY_ANALYSIS; // SHARED_REQUIRES(Locks::mutator_lock_)
- };
-
- // EqualsFn implementation for art::HashMap
- struct EqualsFn {
- bool operator()(const UnorderedMapKeyType& lhs, const UnorderedMapKeyType& rhs) const
- NO_THREAD_SAFETY_ANALYSIS; // SHARED_REQUIRES(Locks::mutator_lock_)
- };
-
- using UnorderedMap = art::HashMap<UnorderedMapKeyType,
- ValueType,
- EmptyFn,
- HashFn,
- EqualsFn,
- TrackingAllocator<std::pair<ClosureType, ValueType>,
- kAllocatorTagLambdaBoxTable>>;
-
- UnorderedMap map_ GUARDED_BY(Locks::lambda_table_lock_);
- bool allow_new_weaks_ GUARDED_BY(Locks::lambda_table_lock_);
- ConditionVariable new_weaks_condition_ GUARDED_BY(Locks::lambda_table_lock_);
-
- // Shrink the map when we get below this load factor.
- // (This is an arbitrary value that should be large enough to prevent aggressive map erases
- // from shrinking the table too often.)
- static constexpr double kMinimumLoadFactor = UnorderedMap::kDefaultMinLoadFactor / 2;
-
- DISALLOW_COPY_AND_ASSIGN(BoxTable);
-};
-
-} // namespace lambda
-} // namespace art
-
-#endif // ART_RUNTIME_LAMBDA_BOX_TABLE_H_
diff --git a/runtime/lambda/closure.cc b/runtime/lambda/closure.cc
deleted file mode 100644
index 179e4ee..0000000
--- a/runtime/lambda/closure.cc
+++ /dev/null
@@ -1,414 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "lambda/closure.h"
-
-#include "base/logging.h"
-#include "lambda/art_lambda_method.h"
-#include "runtime/mirror/object_reference.h"
-
-static constexpr const bool kClosureSupportsReferences = false;
-static constexpr const bool kClosureSupportsGarbageCollection = false;
-
-namespace art {
-namespace lambda {
-
-template <typename T>
-// TODO: can I return T __attribute__((__aligned__(1)))* here instead?
-const uint8_t* Closure::GetUnsafeAtOffset(size_t offset) const {
- // Do not DCHECK here with existing helpers since most of them will call into this function.
- return reinterpret_cast<const uint8_t*>(captured_) + offset;
-}
-
-size_t Closure::GetCapturedVariableSize(ShortyFieldType variable_type, size_t offset) const {
- switch (variable_type) {
- case ShortyFieldType::kLambda:
- {
- return GetClosureSize(GetUnsafeAtOffset<Closure>(offset));
- }
- default:
- DCHECK(variable_type.IsStaticSize());
- return variable_type.GetStaticSize();
- }
-}
-
-// Templatize the flags to give the compiler a fighting chance to eliminate
-// any unnecessary code through different uses of this function.
-template <Closure::VariableInfo::Flags flags>
-inline Closure::VariableInfo Closure::ParseTypeDescriptor(const char* type_descriptor,
- size_t upto_index) const {
- DCHECK(type_descriptor != nullptr);
-
- VariableInfo result;
-
- ShortyFieldType last_type;
- size_t offset = (flags & VariableInfo::kOffset) ? GetStartingOffset() : 0;
- size_t prev_offset = 0;
- size_t count = 0;
-
- while ((type_descriptor =
- ShortyFieldType::ParseFromFieldTypeDescriptor(type_descriptor, &last_type)) != nullptr) {
- count++;
-
- if (flags & VariableInfo::kOffset) {
- // Accumulate the sizes of all preceding captured variables as the current offset only.
- offset += prev_offset;
- prev_offset = GetCapturedVariableSize(last_type, offset);
- }
-
- if ((count > upto_index)) {
- break;
- }
- }
-
- if (flags & VariableInfo::kVariableType) {
- result.variable_type_ = last_type;
- }
-
- if (flags & VariableInfo::kIndex) {
- result.index_ = count;
- }
-
- if (flags & VariableInfo::kCount) {
- result.count_ = count;
- }
-
- if (flags & VariableInfo::kOffset) {
- result.offset_ = offset;
- }
-
- // TODO: We should probably store the result of this in the ArtLambdaMethod,
- // to avoid re-computing the data every single time for static closures.
- return result;
-}
-
-size_t Closure::GetCapturedVariablesSize() const {
- const size_t captured_variable_offset = offsetof(Closure, captured_);
- DCHECK_GE(GetSize(), captured_variable_offset); // Prevent underflows.
- return GetSize() - captured_variable_offset;
-}
-
-size_t Closure::GetSize() const {
- const size_t static_closure_size = lambda_info_->GetStaticClosureSize();
- if (LIKELY(lambda_info_->IsStaticSize())) {
- return static_closure_size;
- }
-
- DCHECK_GE(static_closure_size, sizeof(captured_[0].dynamic_.size_));
- const size_t dynamic_closure_size = captured_[0].dynamic_.size_;
- // The dynamic size better be at least as big as the static size.
- DCHECK_GE(dynamic_closure_size, static_closure_size);
-
- return dynamic_closure_size;
-}
-
-void Closure::CopyTo(void* target, size_t target_size) const {
- DCHECK_GE(target_size, GetSize());
-
- // TODO: using memcpy is unsafe with read barriers, fix this once we add reference support
- static_assert(kClosureSupportsReferences == false,
- "Do not use memcpy with readbarrier references");
- memcpy(target, this, GetSize());
-}
-
-ArtMethod* Closure::GetTargetMethod() const {
- return const_cast<ArtMethod*>(lambda_info_->GetArtMethod());
-}
-
-uint32_t Closure::GetHashCode() const {
- // Start with a non-zero constant, a prime number.
- uint32_t result = 17;
-
- // Include the hash with the ArtMethod.
- {
- uintptr_t method = reinterpret_cast<uintptr_t>(GetTargetMethod());
- result = 31 * result + Low32Bits(method);
- if (sizeof(method) == sizeof(uint64_t)) {
- result = 31 * result + High32Bits(method);
- }
- }
-
- // Include a hash for each captured variable.
- for (size_t i = 0; i < GetCapturedVariablesSize(); ++i) {
- // TODO: not safe for GC-able values since the address can move and the hash code would change.
- uint8_t captured_variable_raw_value;
- CopyUnsafeAtOffset<uint8_t>(i, /*out*/&captured_variable_raw_value); // NOLINT: [whitespace/comma] [3]
-
- result = 31 * result + captured_variable_raw_value;
- }
-
- // TODO: Fix above loop to work for objects and lambdas.
- static_assert(kClosureSupportsGarbageCollection == false,
- "Need to update above loop to read the hash code from the "
- "objects and lambdas recursively");
-
- return result;
-}
-
-bool Closure::ReferenceEquals(const Closure* other) const {
- DCHECK(other != nullptr);
-
- // TODO: Need rework to use read barriers once closures have references inside of them that can
- // move. Until then, it's safe to just compare the data inside of it directly.
- static_assert(kClosureSupportsReferences == false,
- "Unsafe to use memcmp in read barrier collector");
-
- if (GetSize() != other->GetSize()) {
- return false;
- }
-
- return memcmp(this, other, GetSize());
-}
-
-size_t Closure::GetNumberOfCapturedVariables() const {
- // TODO: refactor into art_lambda_method.h. Parsing should only be required here as a DCHECK.
- VariableInfo variable_info =
- ParseTypeDescriptor<VariableInfo::kCount>(GetCapturedVariablesTypeDescriptor(),
- VariableInfo::kUpToIndexMax);
- size_t count = variable_info.count_;
- // Assuming each variable was 1 byte, the size should always be greater or equal than the count.
- DCHECK_LE(count, GetCapturedVariablesSize());
- return count;
-}
-
-const char* Closure::GetCapturedVariablesTypeDescriptor() const {
- return lambda_info_->GetCapturedVariablesTypeDescriptor();
-}
-
-ShortyFieldType Closure::GetCapturedShortyType(size_t index) const {
- DCHECK_LT(index, GetNumberOfCapturedVariables());
-
- VariableInfo variable_info =
- ParseTypeDescriptor<VariableInfo::kVariableType>(GetCapturedVariablesTypeDescriptor(),
- index);
-
- return variable_info.variable_type_;
-}
-
-uint32_t Closure::GetCapturedPrimitiveNarrow(size_t index) const {
- DCHECK(GetCapturedShortyType(index).IsPrimitiveNarrow());
-
- ShortyFieldType variable_type;
- size_t offset;
- GetCapturedVariableTypeAndOffset(index, &variable_type, &offset);
-
- // TODO: Restructure to use template specialization, e.g. GetCapturedPrimitive<T>
- // so that we can avoid this nonsense regarding memcpy always overflowing.
- // Plus, this additional switching seems redundant since the interpreter
- // would've done it already, and knows the exact type.
- uint32_t result = 0;
- static_assert(ShortyFieldTypeTraits::IsPrimitiveNarrowType<decltype(result)>(),
- "result must be a primitive narrow type");
- switch (variable_type) {
- case ShortyFieldType::kBoolean:
- CopyUnsafeAtOffset<bool>(offset, &result);
- break;
- case ShortyFieldType::kByte:
- CopyUnsafeAtOffset<uint8_t>(offset, &result);
- break;
- case ShortyFieldType::kChar:
- CopyUnsafeAtOffset<uint16_t>(offset, &result);
- break;
- case ShortyFieldType::kShort:
- CopyUnsafeAtOffset<int16_t>(offset, &result);
- break;
- case ShortyFieldType::kInt:
- CopyUnsafeAtOffset<int32_t>(offset, &result);
- break;
- case ShortyFieldType::kFloat:
- // XX: Maybe there should just be a GetCapturedPrimitive<T> to avoid this shuffle?
- // The interpreter's invoke seems to only special case references and wides,
- // everything else is treated as a generic 32-bit pattern.
- CopyUnsafeAtOffset<float>(offset, &result);
- break;
- default:
- LOG(FATAL)
- << "expected a valid narrow primitive shorty type but got "
- << static_cast<char>(variable_type);
- UNREACHABLE();
- }
-
- return result;
-}
-
-uint64_t Closure::GetCapturedPrimitiveWide(size_t index) const {
- DCHECK(GetCapturedShortyType(index).IsPrimitiveWide());
-
- ShortyFieldType variable_type;
- size_t offset;
- GetCapturedVariableTypeAndOffset(index, &variable_type, &offset);
-
- // TODO: Restructure to use template specialization, e.g. GetCapturedPrimitive<T>
- // so that we can avoid this nonsense regarding memcpy always overflowing.
- // Plus, this additional switching seems redundant since the interpreter
- // would've done it already, and knows the exact type.
- uint64_t result = 0;
- static_assert(ShortyFieldTypeTraits::IsPrimitiveWideType<decltype(result)>(),
- "result must be a primitive wide type");
- switch (variable_type) {
- case ShortyFieldType::kLong:
- CopyUnsafeAtOffset<int64_t>(offset, &result);
- break;
- case ShortyFieldType::kDouble:
- CopyUnsafeAtOffset<double>(offset, &result);
- break;
- default:
- LOG(FATAL)
- << "expected a valid primitive wide shorty type but got "
- << static_cast<char>(variable_type);
- UNREACHABLE();
- }
-
- return result;
-}
-
-mirror::Object* Closure::GetCapturedObject(size_t index) const {
- DCHECK(GetCapturedShortyType(index).IsObject());
-
- ShortyFieldType variable_type;
- size_t offset;
- GetCapturedVariableTypeAndOffset(index, &variable_type, &offset);
-
- // TODO: Restructure to use template specialization, e.g. GetCapturedPrimitive<T>
- // so that we can avoid this nonsense regarding memcpy always overflowing.
- // Plus, this additional switching seems redundant since the interpreter
- // would've done it already, and knows the exact type.
- mirror::Object* result = nullptr;
- static_assert(ShortyFieldTypeTraits::IsObjectType<decltype(result)>(),
- "result must be an object type");
- switch (variable_type) {
- case ShortyFieldType::kObject:
- // TODO: This seems unsafe. This may need to use gcroots.
- static_assert(kClosureSupportsGarbageCollection == false,
- "May need GcRoots and definitely need mutator locks");
- {
- mirror::CompressedReference<mirror::Object> compressed_result;
- CopyUnsafeAtOffset<uint32_t>(offset, &compressed_result);
- result = compressed_result.AsMirrorPtr();
- }
- break;
- default:
- CHECK(false)
- << "expected a valid shorty type but got " << static_cast<char>(variable_type);
- UNREACHABLE();
- }
-
- return result;
-}
-
-size_t Closure::GetCapturedClosureSize(size_t index) const {
- DCHECK(GetCapturedShortyType(index).IsLambda());
- size_t offset = GetCapturedVariableOffset(index);
-
- auto* captured_ptr = reinterpret_cast<const uint8_t*>(&captured_);
- size_t closure_size = GetClosureSize(captured_ptr + offset);
-
- return closure_size;
-}
-
-void Closure::CopyCapturedClosure(size_t index, void* destination, size_t destination_room) const {
- DCHECK(GetCapturedShortyType(index).IsLambda());
- size_t offset = GetCapturedVariableOffset(index);
-
- auto* captured_ptr = reinterpret_cast<const uint8_t*>(&captured_);
- size_t closure_size = GetClosureSize(captured_ptr + offset);
-
- static_assert(ShortyFieldTypeTraits::IsLambdaType<Closure*>(),
- "result must be a lambda type");
-
- CopyUnsafeAtOffset<Closure>(offset, destination, closure_size, destination_room);
-}
-
-size_t Closure::GetCapturedVariableOffset(size_t index) const {
- VariableInfo variable_info =
- ParseTypeDescriptor<VariableInfo::kOffset>(GetCapturedVariablesTypeDescriptor(),
- index);
-
- size_t offset = variable_info.offset_;
-
- return offset;
-}
-
-void Closure::GetCapturedVariableTypeAndOffset(size_t index,
- ShortyFieldType* out_type,
- size_t* out_offset) const {
- DCHECK(out_type != nullptr);
- DCHECK(out_offset != nullptr);
-
- static constexpr const VariableInfo::Flags kVariableTypeAndOffset =
- static_cast<VariableInfo::Flags>(VariableInfo::kVariableType | VariableInfo::kOffset);
- VariableInfo variable_info =
- ParseTypeDescriptor<kVariableTypeAndOffset>(GetCapturedVariablesTypeDescriptor(),
- index);
-
- ShortyFieldType variable_type = variable_info.variable_type_;
- size_t offset = variable_info.offset_;
-
- *out_type = variable_type;
- *out_offset = offset;
-}
-
-template <typename T>
-void Closure::CopyUnsafeAtOffset(size_t offset,
- void* destination,
- size_t src_size,
- size_t destination_room) const {
- DCHECK_GE(destination_room, src_size);
- const uint8_t* data_ptr = GetUnsafeAtOffset<T>(offset);
- memcpy(destination, data_ptr, sizeof(T));
-}
-
-// TODO: This is kind of ugly. I would prefer an unaligned_ptr<Closure> here.
-// Unfortunately C++ doesn't let you lower the alignment (i.e. alignas(1) Closure*) is not legal.
-size_t Closure::GetClosureSize(const uint8_t* closure) {
- DCHECK(closure != nullptr);
-
- static_assert(!std::is_base_of<mirror::Object, Closure>::value,
- "It might be unsafe to call memcpy on a managed object");
-
- // Safe as long as it's not a mirror Object.
- // TODO: Should probably wrap this in like MemCpyNative or some such which statically asserts
- // we aren't trying to copy mirror::Object data around.
- ArtLambdaMethod* closure_info;
- memcpy(&closure_info, closure + offsetof(Closure, lambda_info_), sizeof(closure_info));
-
- if (LIKELY(closure_info->IsStaticSize())) {
- return closure_info->GetStaticClosureSize();
- }
-
- // The size is dynamic, so we need to read it from captured_variables_ portion.
- size_t dynamic_size;
- memcpy(&dynamic_size,
- closure + offsetof(Closure, captured_[0].dynamic_.size_),
- sizeof(dynamic_size));
- static_assert(sizeof(dynamic_size) == sizeof(captured_[0].dynamic_.size_),
- "Dynamic size type must match the structural type of the size");
-
- DCHECK_GE(dynamic_size, closure_info->GetStaticClosureSize());
- return dynamic_size;
-}
-
-size_t Closure::GetStartingOffset() const {
- static constexpr const size_t captured_offset = offsetof(Closure, captured_);
- if (LIKELY(lambda_info_->IsStaticSize())) {
- return offsetof(Closure, captured_[0].static_variables_) - captured_offset;
- } else {
- return offsetof(Closure, captured_[0].dynamic_.variables_) - captured_offset;
- }
-}
-
-} // namespace lambda
-} // namespace art
diff --git a/runtime/lambda/closure.h b/runtime/lambda/closure.h
deleted file mode 100644
index 31ff194..0000000
--- a/runtime/lambda/closure.h
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef ART_RUNTIME_LAMBDA_CLOSURE_H_
-#define ART_RUNTIME_LAMBDA_CLOSURE_H_
-
-#include "base/macros.h"
-#include "base/mutex.h" // For Locks::mutator_lock_.
-#include "lambda/shorty_field_type.h"
-
-#include <stdint.h>
-
-namespace art {
-class ArtMethod; // forward declaration
-
-namespace mirror {
-class Object; // forward declaration
-} // namespace mirror
-
-namespace lambda {
-class ArtLambdaMethod; // forward declaration
-class ClosureBuilder; // forward declaration
-
-// Inline representation of a lambda closure.
-// Contains the target method and the set of packed captured variables as a copy.
-//
-// The closure itself is logically immutable, although in practice any object references
-// it (recursively) contains can be moved and updated by the GC.
-struct PACKED(sizeof(ArtLambdaMethod*)) Closure {
- // Get the size of the Closure in bytes.
- // This is necessary in order to allocate a large enough area to copy the Closure into.
- // Do *not* copy the closure with memcpy, since references also need to get moved.
- size_t GetSize() const;
-
- // Copy this closure into the target, whose memory size is specified by target_size.
- // Any object references are fixed up during the copy (if there was a read barrier).
- // The target_size must be at least as large as GetSize().
- void CopyTo(void* target, size_t target_size) const;
-
- // Get the target method, i.e. the method that will be dispatched into with invoke-lambda.
- ArtMethod* GetTargetMethod() const;
-
- // Calculates the hash code. Value is recomputed each time.
- uint32_t GetHashCode() const SHARED_REQUIRES(Locks::mutator_lock_);
-
- // Is this the same closure as other? e.g. same target method, same variables captured.
- //
- // Determines whether the two Closures are interchangeable instances.
- // Does *not* call Object#equals recursively. If two Closures compare ReferenceEquals true that
- // means that they are interchangeable values (usually for the purpose of boxing/unboxing).
- bool ReferenceEquals(const Closure* other) const SHARED_REQUIRES(Locks::mutator_lock_);
-
- // How many variables were captured?
- size_t GetNumberOfCapturedVariables() const;
-
- // Returns a type descriptor string that represents each captured variable.
- // e.g. "Ljava/lang/Object;ZB" would mean a capture tuple of (Object, boolean, byte)
- const char* GetCapturedVariablesTypeDescriptor() const;
-
- // Returns the short type for the captured variable at index.
- // Index must be less than the number of captured variables.
- ShortyFieldType GetCapturedShortyType(size_t index) const;
-
- // Returns the 32-bit representation of a non-wide primitive at the captured variable index.
- // Smaller types are zero extended.
- // Index must be less than the number of captured variables.
- uint32_t GetCapturedPrimitiveNarrow(size_t index) const;
- // Returns the 64-bit representation of a wide primitive at the captured variable index.
- // Smaller types are zero extended.
- // Index must be less than the number of captured variables.
- uint64_t GetCapturedPrimitiveWide(size_t index) const;
- // Returns the object reference at the captured variable index.
- // The type at the index *must* be an object reference or a CHECK failure will occur.
- // Index must be less than the number of captured variables.
- mirror::Object* GetCapturedObject(size_t index) const SHARED_REQUIRES(Locks::mutator_lock_);
-
- // Gets the size of a nested capture closure in bytes, at the captured variable index.
- // The type at the index *must* be a lambda closure or a CHECK failure will occur.
- size_t GetCapturedClosureSize(size_t index) const;
-
- // Copies a nested lambda closure at the captured variable index.
- // The destination must have enough room for the closure (see GetCapturedClosureSize).
- void CopyCapturedClosure(size_t index, void* destination, size_t destination_room) const;
-
- private:
- // Read out any non-lambda value as a copy.
- template <typename T>
- T GetCapturedVariable(size_t index) const;
-
- // Reconstruct the closure's captured variable info at runtime.
- struct VariableInfo {
- size_t index_;
- ShortyFieldType variable_type_;
- size_t offset_;
- size_t count_;
-
- enum Flags {
- kIndex = 0x1,
- kVariableType = 0x2,
- kOffset = 0x4,
- kCount = 0x8,
- };
-
- // Traverse to the end of the type descriptor list instead of stopping at some particular index.
- static constexpr size_t kUpToIndexMax = static_cast<size_t>(-1);
- };
-
- // Parse a type descriptor, stopping at index "upto_index".
- // Returns only the information requested in flags. All other fields are indeterminate.
- template <VariableInfo::Flags flags>
- inline VariableInfo ALWAYS_INLINE ParseTypeDescriptor(const char* type_descriptor,
- size_t upto_index) const;
-
- // Convenience function to call ParseTypeDescriptor with just the type and offset.
- void GetCapturedVariableTypeAndOffset(size_t index,
- ShortyFieldType* out_type,
- size_t* out_offset) const;
-
- // How many bytes do the captured variables take up? Runtime sizeof(captured_variables).
- size_t GetCapturedVariablesSize() const;
- // Get the size in bytes of the variable_type which is potentially stored at offset.
- size_t GetCapturedVariableSize(ShortyFieldType variable_type, size_t offset) const;
- // Get the starting offset (in bytes) for the 0th captured variable.
- // All offsets are relative to 'captured_'.
- size_t GetStartingOffset() const;
- // Get the offset for this index.
- // All offsets are relative to 'captuerd_'.
- size_t GetCapturedVariableOffset(size_t index) const;
-
- // Cast the data at '(char*)captured_[offset]' into T, returning its address.
- // This value should not be de-referenced directly since its unaligned.
- template <typename T>
- inline const uint8_t* GetUnsafeAtOffset(size_t offset) const;
-
- // Copy the data at the offset into the destination. DCHECKs that
- // the destination_room is large enough (in bytes) to fit the data.
- template <typename T>
- inline void CopyUnsafeAtOffset(size_t offset,
- void* destination,
- size_t src_size = sizeof(T),
- size_t destination_room = sizeof(T)) const;
-
- // Get the closure size from an unaligned (i.e. interior) closure pointer.
- static size_t GetClosureSize(const uint8_t* closure);
-
- ///////////////////////////////////////////////////////////////////////////////////
-
- // Compile-time known lambda information such as the type descriptor and size.
- ArtLambdaMethod* lambda_info_;
-
- // A contiguous list of captured variables, and possibly the closure size.
- // The runtime size can always be determined through GetSize().
- union {
- // Read from here if the closure size is static (ArtLambdaMethod::IsStatic)
- uint8_t static_variables_[0];
- struct {
- // Read from here if the closure size is dynamic (ArtLambdaMethod::IsDynamic)
- size_t size_; // The lambda_info_ and the size_ itself is also included as part of the size.
- uint8_t variables_[0];
- } dynamic_;
- } captured_[0];
- // captured_ will always consist of one array element at runtime.
- // Set to [0] so that 'size_' is not counted in sizeof(Closure).
-
- friend class ClosureBuilder;
- friend class ClosureTest;
-};
-
-} // namespace lambda
-} // namespace art
-
-#endif // ART_RUNTIME_LAMBDA_CLOSURE_H_
diff --git a/runtime/lambda/closure_builder-inl.h b/runtime/lambda/closure_builder-inl.h
deleted file mode 100644
index 3cec21f..0000000
--- a/runtime/lambda/closure_builder-inl.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_LAMBDA_CLOSURE_BUILDER_INL_H_
-#define ART_RUNTIME_LAMBDA_CLOSURE_BUILDER_INL_H_
-
-#include "lambda/closure_builder.h"
-#include <string.h>
-
-namespace art {
-namespace lambda {
-
-template <typename T, ClosureBuilder::ShortyTypeEnum kShortyType>
-void ClosureBuilder::CaptureVariablePrimitive(T value) {
- static_assert(ShortyFieldTypeTraits::IsPrimitiveType<T>(), "T must be a primitive type");
- const size_t type_size = ShortyFieldType(kShortyType).GetStaticSize();
- DCHECK_EQ(type_size, sizeof(T));
-
- // Copy the data while retaining the bit pattern. Strict-aliasing safe.
- ShortyFieldTypeTraits::MaxType value_storage = 0;
- memcpy(&value_storage, &value, sizeof(T));
-
- values_.push_back(value_storage);
- size_ += sizeof(T);
-
- shorty_types_ += kShortyType;
-}
-
-} // namespace lambda
-} // namespace art
-
-#endif // ART_RUNTIME_LAMBDA_CLOSURE_BUILDER_INL_H_
diff --git a/runtime/lambda/closure_builder.cc b/runtime/lambda/closure_builder.cc
deleted file mode 100644
index 739e965..0000000
--- a/runtime/lambda/closure_builder.cc
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "lambda/closure_builder.h"
-
-#include "base/macros.h"
-#include "base/value_object.h"
-#include "lambda/art_lambda_method.h"
-#include "lambda/closure.h"
-#include "lambda/shorty_field_type.h"
-#include "runtime/mirror/object_reference.h"
-
-#include <stdint.h>
-#include <vector>
-
-namespace art {
-namespace lambda {
-
-/*
- * GC support TODOs:
- * (Although there's some code for storing objects, it is UNIMPLEMENTED(FATAL) because it is
- * incomplete).
- *
- * 1) GC needs to be able to traverse the Closure and visit any references.
- * It might be possible to get away with global roots in the short term.
- *
- * 2) Add brooks read barrier support. We can store the black/gray/white bits
- * in the lower 2 bits of the lambda art method pointer. Whenever a closure is copied
- * [to the stack] we'd need to add a cold path to turn it black.
- * (since there's only 3 colors, I can use the 4th value to indicate no-refs).
- * e.g. 0x0 = gray, 0x1 = white, 0x2 = black, 0x3 = no-nested-references
- * - Alternatively the GC can mark reference-less closures as always-black,
- * although it would need extra work to check for references.
- */
-
-void ClosureBuilder::CaptureVariableObject(mirror::Object* object) {
- auto compressed_reference = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(object);
- ShortyFieldTypeTraits::MaxType storage = 0;
-
- static_assert(sizeof(storage) >= sizeof(compressed_reference),
- "not enough room to store a compressed reference");
- memcpy(&storage, &compressed_reference, sizeof(compressed_reference));
-
- values_.push_back(storage);
- size_ += kObjectReferenceSize;
-
- static_assert(kObjectReferenceSize == sizeof(compressed_reference), "reference size mismatch");
-
- // TODO: needs more work to support concurrent GC
- if (kIsDebugBuild) {
- if (kUseReadBarrier) {
- UNIMPLEMENTED(FATAL) << "can't yet safely capture objects with read barrier";
- }
- }
-
- shorty_types_ += ShortyFieldType::kObject;
-}
-
-void ClosureBuilder::CaptureVariableLambda(Closure* closure) {
- DCHECK(closure != nullptr); // null closures not allowed, target method must be null instead.
- values_.push_back(reinterpret_cast<ShortyFieldTypeTraits::MaxType>(closure));
-
- if (LIKELY(is_dynamic_size_ == false)) {
- // Write in the extra bytes to store the dynamic size the first time.
- is_dynamic_size_ = true;
- size_ += sizeof(Closure::captured_[0].dynamic_.size_);
- }
-
- // A closure may be sized dynamically, so always query it for the true size.
- size_ += closure->GetSize();
-
- shorty_types_ += ShortyFieldType::kLambda;
-}
-
-size_t ClosureBuilder::GetSize() const {
- return size_;
-}
-
-size_t ClosureBuilder::GetCaptureCount() const {
- DCHECK_EQ(values_.size(), shorty_types_.size());
- return values_.size();
-}
-
-const std::string& ClosureBuilder::GetCapturedVariableShortyTypes() const {
- DCHECK_EQ(values_.size(), shorty_types_.size());
- return shorty_types_;
-}
-
-Closure* ClosureBuilder::CreateInPlace(void* memory, ArtLambdaMethod* target_method) const {
- DCHECK(memory != nullptr);
- DCHECK(target_method != nullptr);
- DCHECK_EQ(is_dynamic_size_, target_method->IsDynamicSize());
-
- CHECK_EQ(target_method->GetNumberOfCapturedVariables(), values_.size())
- << "number of variables captured at runtime does not match "
- << "number of variables captured at compile time";
-
- Closure* closure = new (memory) Closure;
- closure->lambda_info_ = target_method;
-
- static_assert(offsetof(Closure, captured_) == kInitialSize, "wrong initial size");
-
- size_t written_size;
- if (UNLIKELY(is_dynamic_size_)) {
- // The closure size must be set dynamically (i.e. nested lambdas).
- closure->captured_[0].dynamic_.size_ = GetSize();
- size_t header_size = offsetof(Closure, captured_[0].dynamic_.variables_);
- DCHECK_LE(header_size, GetSize());
- size_t variables_size = GetSize() - header_size;
- written_size =
- WriteValues(target_method,
- closure->captured_[0].dynamic_.variables_,
- header_size,
- variables_size);
- } else {
- // The closure size is known statically (i.e. no nested lambdas).
- DCHECK(GetSize() == target_method->GetStaticClosureSize());
- size_t header_size = offsetof(Closure, captured_[0].static_variables_);
- DCHECK_LE(header_size, GetSize());
- size_t variables_size = GetSize() - header_size;
- written_size =
- WriteValues(target_method,
- closure->captured_[0].static_variables_,
- header_size,
- variables_size);
- }
-
- DCHECK_EQ(written_size, closure->GetSize());
-
- return closure;
-}
-
-size_t ClosureBuilder::WriteValues(ArtLambdaMethod* target_method,
- uint8_t variables[],
- size_t header_size,
- size_t variables_size) const {
- size_t total_size = header_size;
- const char* shorty_types = target_method->GetCapturedVariablesShortyTypeDescriptor();
- DCHECK_STREQ(shorty_types, shorty_types_.c_str());
-
- size_t variables_offset = 0;
- size_t remaining_size = variables_size;
-
- const size_t shorty_count = target_method->GetNumberOfCapturedVariables();
- DCHECK_EQ(shorty_count, GetCaptureCount());
-
- for (size_t i = 0; i < shorty_count; ++i) {
- ShortyFieldType shorty{shorty_types[i]}; // NOLINT [readability/braces] [4]
-
- size_t var_size;
- if (LIKELY(shorty.IsStaticSize())) {
- // TODO: needs more work to support concurrent GC, e.g. read barriers
- if (kUseReadBarrier == false) {
- if (UNLIKELY(shorty.IsObject())) {
- UNIMPLEMENTED(FATAL) << "can't yet safely write objects with read barrier";
- }
- } else {
- if (UNLIKELY(shorty.IsObject())) {
- UNIMPLEMENTED(FATAL) << "writing objects not yet supported, no GC support";
- }
- }
-
- var_size = shorty.GetStaticSize();
- DCHECK_LE(var_size, sizeof(values_[i]));
-
- // Safe even for objects (non-read barrier case) if we never suspend
- // while the ClosureBuilder is live.
- // FIXME: Need to add GC support for references in a closure.
- memcpy(&variables[variables_offset], &values_[i], var_size);
- } else {
- DCHECK(shorty.IsLambda())
- << " don't support writing dynamically sized types other than lambda";
-
- ShortyFieldTypeTraits::MaxType closure_raw = values_[i];
- Closure* nested_closure = reinterpret_cast<Closure*>(closure_raw);
-
- DCHECK(nested_closure != nullptr);
- nested_closure->CopyTo(&variables[variables_offset], remaining_size);
-
- var_size = nested_closure->GetSize();
- }
-
- total_size += var_size;
- DCHECK_GE(remaining_size, var_size);
- remaining_size -= var_size;
-
- variables_offset += var_size;
- }
-
- DCHECK_EQ('\0', shorty_types[shorty_count]);
- DCHECK_EQ(variables_offset, variables_size);
-
- return total_size;
-}
-
-
-} // namespace lambda
-} // namespace art
diff --git a/runtime/lambda/closure_builder.h b/runtime/lambda/closure_builder.h
deleted file mode 100644
index 23eb484..0000000
--- a/runtime/lambda/closure_builder.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef ART_RUNTIME_LAMBDA_CLOSURE_BUILDER_H_
-#define ART_RUNTIME_LAMBDA_CLOSURE_BUILDER_H_
-
-#include "base/macros.h"
-#include "base/mutex.h" // For Locks::mutator_lock_.
-#include "base/value_object.h"
-#include "lambda/shorty_field_type.h"
-
-#include <stdint.h>
-#include <vector>
-
-namespace art {
-class ArtMethod; // forward declaration
-
-namespace mirror {
-class Object; // forward declaration
-} // namespace mirror
-
-namespace lambda {
-class ArtLambdaMethod; // forward declaration
-
-// Build a closure by capturing variables one at a time.
-// When all variables have been marked captured, the closure can be created in-place into
-// a target memory address.
-//
-// The mutator lock must be held for the duration of the lifetime of this object,
-// since it needs to temporarily store heap references into an internal list.
-class ClosureBuilder {
- public:
- using ShortyTypeEnum = decltype(ShortyFieldType::kByte);
-
- // Mark this primitive value to be captured as the specified type.
- template <typename T, ShortyTypeEnum kShortyType = ShortyFieldTypeSelectEnum<T>::value>
- void CaptureVariablePrimitive(T value);
-
- // Mark this object reference to be captured.
- void CaptureVariableObject(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
-
- // Mark this lambda closure to be captured.
- void CaptureVariableLambda(Closure* closure);
-
- // Get the size (in bytes) of the closure.
- // This size is used to be able to allocate memory large enough to write the closure into.
- // Call 'CreateInPlace' to actually write the closure out.
- size_t GetSize() const;
-
- // Returns how many variables have been captured so far.
- size_t GetCaptureCount() const;
-
- // Get the list of captured variables' shorty field types.
- const std::string& GetCapturedVariableShortyTypes() const;
-
- // Creates a closure in-place and writes out the data into 'memory'.
- // Memory must be at least 'GetSize' bytes large.
- // All previously marked data to be captured is now written out.
- Closure* CreateInPlace(void* memory, ArtLambdaMethod* target_method) const
- SHARED_REQUIRES(Locks::mutator_lock_);
-
- // Locks need to be held for entire lifetime of ClosureBuilder.
- ClosureBuilder() SHARED_REQUIRES(Locks::mutator_lock_)
- {}
-
- // Locks need to be held for entire lifetime of ClosureBuilder.
- ~ClosureBuilder() SHARED_REQUIRES(Locks::mutator_lock_)
- {}
-
- private:
- // Initial size a closure starts out before any variables are written.
- // Header size only.
- static constexpr size_t kInitialSize = sizeof(ArtLambdaMethod*);
-
- // Write a Closure's variables field from the captured variables.
- // variables_size specified in bytes, and only includes enough room to write variables into.
- // Returns the calculated actual size of the closure.
- size_t WriteValues(ArtLambdaMethod* target_method,
- uint8_t variables[],
- size_t header_size,
- size_t variables_size) const SHARED_REQUIRES(Locks::mutator_lock_);
-
- size_t size_ = kInitialSize;
- bool is_dynamic_size_ = false;
- std::vector<ShortyFieldTypeTraits::MaxType> values_;
- std::string shorty_types_;
-};
-
-} // namespace lambda
-} // namespace art
-
-#endif // ART_RUNTIME_LAMBDA_CLOSURE_BUILDER_H_
diff --git a/runtime/lambda/closure_test.cc b/runtime/lambda/closure_test.cc
deleted file mode 100644
index 7c1bd0d..0000000
--- a/runtime/lambda/closure_test.cc
+++ /dev/null
@@ -1,356 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "art_method.h"
-#include "lambda/art_lambda_method.h"
-#include "lambda/closure.h"
-#include "lambda/closure_builder.h"
-#include "lambda/closure_builder-inl.h"
-#include "utils.h"
-
-#include <numeric>
-#include <stdint.h>
-#include <type_traits>
-#include "gtest/gtest.h"
-
-// Turn this on for some extra printfs to help with debugging, since some code is optimized out.
-static constexpr const bool kDebuggingClosureTest = true;
-
-namespace std {
- using Closure = art::lambda::Closure;
-
- // Specialize std::default_delete so it knows how to properly delete closures
- // through the way we allocate them in this test.
- //
- // This is test-only because we don't want the rest of Art to do this.
- template <>
- struct default_delete<Closure> {
- void operator()(Closure* closure) const {
- delete[] reinterpret_cast<char*>(closure);
- }
- };
-} // namespace std
-
-namespace art {
-
-// Fake lock acquisition to please clang lock checker.
-// This doesn't actually acquire any locks because we don't need multiple threads in this gtest.
-struct SCOPED_CAPABILITY ScopedFakeLock {
- explicit ScopedFakeLock(MutatorMutex& mu) ACQUIRE(mu)
- : mu_(mu) {
- }
-
- ~ScopedFakeLock() RELEASE()
- {}
-
- MutatorMutex& mu_;
-};
-
-namespace lambda {
-
-class ClosureTest : public ::testing::Test {
- public:
- ClosureTest() = default;
- ~ClosureTest() = default;
-
- protected:
- static void SetUpTestCase() {
- }
-
- virtual void SetUp() {
- // Create a completely dummy method here.
- // It's "OK" because the Closure never needs to look inside of the ArtMethod
- // (it just needs to be non-null).
- uintptr_t ignore = 0xbadbad;
- fake_method_ = reinterpret_cast<ArtMethod*>(ignore);
- }
-
- static ::testing::AssertionResult IsResultSuccessful(bool result) {
- if (result) {
- return ::testing::AssertionSuccess();
- } else {
- return ::testing::AssertionFailure();
- }
- }
-
- // Create a closure that captures the static variables from 'args' by-value.
- // The lambda method's captured variables types must match the ones in 'args'.
- // -- This creates the closure directly in-memory by using memcpy.
- template <typename ... Args>
- static std::unique_ptr<Closure> CreateClosureStaticVariables(ArtLambdaMethod* lambda_method,
- Args&& ... args) {
- constexpr size_t header_size = sizeof(ArtLambdaMethod*);
- const size_t static_size = GetArgsSize(args ...) + header_size;
- EXPECT_GE(static_size, sizeof(Closure));
-
- // Can't just 'new' the Closure since we don't know the size up front.
- char* closure_as_char_array = new char[static_size];
- Closure* closure_ptr = new (closure_as_char_array) Closure;
-
- // Set up the data
- closure_ptr->lambda_info_ = lambda_method;
- CopyArgs(closure_ptr->captured_[0].static_variables_, args ...);
-
- // Make sure the entire thing is deleted once the unique_ptr goes out of scope.
- return std::unique_ptr<Closure>(closure_ptr); // NOLINT [whitespace/braces] [5]
- }
-
- // Copy variadic arguments into the destination array with memcpy.
- template <typename T, typename ... Args>
- static void CopyArgs(uint8_t destination[], T&& arg, Args&& ... args) {
- memcpy(destination, &arg, sizeof(arg));
- CopyArgs(destination + sizeof(arg), args ...);
- }
-
- // Base case: Done.
- static void CopyArgs(uint8_t destination[]) {
- UNUSED(destination);
- }
-
- // Create a closure that captures the static variables from 'args' by-value.
- // The lambda method's captured variables types must match the ones in 'args'.
- // -- This uses ClosureBuilder interface to set up the closure indirectly.
- template <typename ... Args>
- static std::unique_ptr<Closure> CreateClosureStaticVariablesFromBuilder(
- ArtLambdaMethod* lambda_method,
- Args&& ... args) {
- // Acquire a fake lock since closure_builder needs it.
- ScopedFakeLock fake_lock(*Locks::mutator_lock_);
-
- ClosureBuilder closure_builder;
- CaptureVariableFromArgsList(/*out*/closure_builder, args ...);
-
- EXPECT_EQ(sizeof...(args), closure_builder.GetCaptureCount());
-
- constexpr size_t header_size = sizeof(ArtLambdaMethod*);
- const size_t static_size = GetArgsSize(args ...) + header_size;
- EXPECT_GE(static_size, sizeof(Closure));
-
- // For static variables, no nested closure, so size must match exactly.
- EXPECT_EQ(static_size, closure_builder.GetSize());
-
- // Can't just 'new' the Closure since we don't know the size up front.
- char* closure_as_char_array = new char[static_size];
- Closure* closure_ptr = new (closure_as_char_array) Closure;
-
- // The closure builder packs the captured variables into a Closure.
- closure_builder.CreateInPlace(closure_ptr, lambda_method);
-
- // Make sure the entire thing is deleted once the unique_ptr goes out of scope.
- return std::unique_ptr<Closure>(closure_ptr); // NOLINT [whitespace/braces] [5]
- }
-
- // Call the correct ClosureBuilder::CaptureVariableXYZ function based on the type of args.
- // Invokes for each arg in args.
- template <typename ... Args>
- static void CaptureVariableFromArgsList(/*out*/ClosureBuilder& closure_builder, Args ... args) {
- int ignore[] = {
- (CaptureVariableFromArgs(/*out*/closure_builder, args),0)... // NOLINT [whitespace/comma] [3]
- };
- UNUSED(ignore);
- }
-
- // ClosureBuilder::CaptureVariablePrimitive for types that are primitive only.
- template <typename T>
- typename std::enable_if<ShortyFieldTypeTraits::IsPrimitiveType<T>()>::type
- static CaptureVariableFromArgs(/*out*/ClosureBuilder& closure_builder, T value) {
- static_assert(ShortyFieldTypeTraits::IsPrimitiveType<T>(), "T must be a shorty primitive");
- closure_builder.CaptureVariablePrimitive<T, ShortyFieldTypeSelectEnum<T>::value>(value);
- }
-
- // ClosureBuilder::CaptureVariableObject for types that are objects only.
- template <typename T>
- typename std::enable_if<ShortyFieldTypeTraits::IsObjectType<T>()>::type
- static CaptureVariableFromArgs(/*out*/ClosureBuilder& closure_builder, const T* object) {
- ScopedFakeLock fake_lock(*Locks::mutator_lock_);
- closure_builder.CaptureVariableObject(object);
- }
-
- // Sum of sizeof(Args...).
- template <typename T, typename ... Args>
- static constexpr size_t GetArgsSize(T&& arg, Args&& ... args) {
- return sizeof(arg) + GetArgsSize(args ...);
- }
-
- // Base case: Done.
- static constexpr size_t GetArgsSize() {
- return 0;
- }
-
- // Take "U" and memcpy it into a "T". T starts out as (T)0.
- template <typename T, typename U>
- static T ExpandingBitCast(const U& val) {
- static_assert(sizeof(T) >= sizeof(U), "U too large");
- T new_val = static_cast<T>(0);
- memcpy(&new_val, &val, sizeof(U));
- return new_val;
- }
-
- // Templatized extraction from closures by checking their type with enable_if.
- template <typename T>
- static typename std::enable_if<ShortyFieldTypeTraits::IsPrimitiveNarrowType<T>()>::type
- ExpectCapturedVariable(const Closure* closure, size_t index, T value) {
- EXPECT_EQ(ExpandingBitCast<uint32_t>(value), closure->GetCapturedPrimitiveNarrow(index))
- << " with index " << index;
- }
-
- template <typename T>
- static typename std::enable_if<ShortyFieldTypeTraits::IsPrimitiveWideType<T>()>::type
- ExpectCapturedVariable(const Closure* closure, size_t index, T value) {
- EXPECT_EQ(ExpandingBitCast<uint64_t>(value), closure->GetCapturedPrimitiveWide(index))
- << " with index " << index;
- }
-
- // Templatized SFINAE for Objects so we can get better error messages.
- template <typename T>
- static typename std::enable_if<ShortyFieldTypeTraits::IsObjectType<T>()>::type
- ExpectCapturedVariable(const Closure* closure, size_t index, const T* object) {
- EXPECT_EQ(object, closure->GetCapturedObject(index))
- << " with index " << index;
- }
-
- template <typename ... Args>
- void TestPrimitive(const char *descriptor, Args ... args) {
- const char* shorty = descriptor;
-
- SCOPED_TRACE(descriptor);
-
- ASSERT_EQ(strlen(shorty), sizeof...(args))
- << "test error: descriptor must have same # of types as the # of captured variables";
-
- // Important: This fake lambda method needs to out-live any Closures we create with it.
- ArtLambdaMethod lambda_method{fake_method_, // NOLINT [whitespace/braces] [5]
- descriptor, // NOLINT [whitespace/blank_line] [2]
- shorty,
- };
-
- std::unique_ptr<Closure> closure_a;
- std::unique_ptr<Closure> closure_b;
-
- // Test the closure twice when it's constructed in different ways.
- {
- // Create the closure in a "raw" manner, that is directly with memcpy
- // since we know the underlying data format.
- // This simulates how the compiler would lay out the data directly.
- SCOPED_TRACE("raw closure");
- std::unique_ptr<Closure> closure_raw = CreateClosureStaticVariables(&lambda_method, args ...);
-
- if (kDebuggingClosureTest) {
- std::cerr << "closure raw address: " << closure_raw.get() << std::endl;
- }
- TestPrimitiveWithClosure(closure_raw.get(), descriptor, shorty, args ...);
- closure_a = std::move(closure_raw);
- }
-
- {
- // Create the closure with the ClosureBuilder, which is done indirectly.
- // This simulates how the interpreter would create the closure dynamically at runtime.
- SCOPED_TRACE("closure from builder");
- std::unique_ptr<Closure> closure_built =
- CreateClosureStaticVariablesFromBuilder(&lambda_method, args ...);
- if (kDebuggingClosureTest) {
- std::cerr << "closure built address: " << closure_built.get() << std::endl;
- }
- TestPrimitiveWithClosure(closure_built.get(), descriptor, shorty, args ...);
- closure_b = std::move(closure_built);
- }
-
- // The closures should be identical memory-wise as well.
- EXPECT_EQ(closure_a->GetSize(), closure_b->GetSize());
- EXPECT_TRUE(memcmp(closure_a.get(),
- closure_b.get(),
- std::min(closure_a->GetSize(), closure_b->GetSize())) == 0);
- }
-
- template <typename ... Args>
- static void TestPrimitiveWithClosure(Closure* closure,
- const char* descriptor,
- const char* shorty,
- Args ... args) {
- EXPECT_EQ(sizeof(ArtLambdaMethod*) + GetArgsSize(args...), closure->GetSize());
- EXPECT_EQ(sizeof...(args), closure->GetNumberOfCapturedVariables());
- EXPECT_STREQ(descriptor, closure->GetCapturedVariablesTypeDescriptor());
- TestPrimitiveExpects(closure, shorty, /*index*/0, args ...);
- }
-
- // Call EXPECT_EQ for each argument in the closure's #GetCapturedX.
- template <typename T, typename ... Args>
- static void TestPrimitiveExpects(
- const Closure* closure, const char* shorty, size_t index, T arg, Args ... args) {
- ASSERT_EQ(ShortyFieldType(shorty[index]).GetStaticSize(), sizeof(T))
- << "Test error: Type mismatch at index " << index;
- ExpectCapturedVariable(closure, index, arg);
- EXPECT_EQ(ShortyFieldType(shorty[index]), closure->GetCapturedShortyType(index));
- TestPrimitiveExpects(closure, shorty, index + 1, args ...);
- }
-
- // Base case for EXPECT_EQ.
- static void TestPrimitiveExpects(const Closure* closure, const char* shorty, size_t index) {
- UNUSED(closure, shorty, index);
- }
-
- ArtMethod* fake_method_;
-};
-
-TEST_F(ClosureTest, TestTrivial) {
- ArtLambdaMethod lambda_method{fake_method_, // NOLINT [whitespace/braces] [5]
- "", // No captured variables // NOLINT [whitespace/blank_line] [2]
- "", // No captured variables
- };
-
- std::unique_ptr<Closure> closure = CreateClosureStaticVariables(&lambda_method);
-
- EXPECT_EQ(sizeof(ArtLambdaMethod*), closure->GetSize());
- EXPECT_EQ(0u, closure->GetNumberOfCapturedVariables());
-} // TEST_F
-
-TEST_F(ClosureTest, TestPrimitiveSingle) {
- TestPrimitive("Z", true);
- TestPrimitive("B", int8_t(0xde));
- TestPrimitive("C", uint16_t(0xbeef));
- TestPrimitive("S", int16_t(0xdead));
- TestPrimitive("I", int32_t(0xdeadbeef));
- TestPrimitive("F", 0.123f);
- TestPrimitive("J", int64_t(0xdeadbeef00c0ffee));
- TestPrimitive("D", 123.456);
-} // TEST_F
-
-TEST_F(ClosureTest, TestPrimitiveMany) {
- TestPrimitive("ZZ", true, false);
- TestPrimitive("ZZZ", true, false, true);
- TestPrimitive("BBBB", int8_t(0xde), int8_t(0xa0), int8_t(0xff), int8_t(0xcc));
- TestPrimitive("CC", uint16_t(0xbeef), uint16_t(0xdead));
- TestPrimitive("SSSS", int16_t(0xdead), int16_t(0xc0ff), int16_t(0xf000), int16_t(0xbaba));
- TestPrimitive("III", int32_t(0xdeadbeef), int32_t(0xc0ffee), int32_t(0xbeefdead));
- TestPrimitive("FF", 0.123f, 555.666f);
- TestPrimitive("JJJ", int64_t(0xdeadbeef00c0ffee), int64_t(0x123), int64_t(0xc0ffee));
- TestPrimitive("DD", 123.456, 777.888);
-} // TEST_F
-
-TEST_F(ClosureTest, TestPrimitiveMixed) {
- TestPrimitive("ZZBBCCSSIIFFJJDD",
- true, false,
- int8_t(0xde), int8_t(0xa0),
- uint16_t(0xbeef), uint16_t(0xdead),
- int16_t(0xdead), int16_t(0xc0ff),
- int32_t(0xdeadbeef), int32_t(0xc0ffee),
- 0.123f, 555.666f,
- int64_t(0xdeadbeef00c0ffee), int64_t(0x123),
- 123.456, 777.888);
-} // TEST_F
-
-} // namespace lambda
-} // namespace art
diff --git a/runtime/lambda/leaking_allocator.cc b/runtime/lambda/leaking_allocator.cc
deleted file mode 100644
index 22bb294..0000000
--- a/runtime/lambda/leaking_allocator.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "base/bit_utils.h"
-#include "lambda/leaking_allocator.h"
-#include "linear_alloc.h"
-#include "runtime.h"
-
-namespace art {
-namespace lambda {
-
-void* LeakingAllocator::AllocateMemoryImpl(Thread* self, size_t byte_size, size_t align_size) {
- // TODO: use GetAllocatorForClassLoader to allocate lambda ArtMethod data.
- void* mem = Runtime::Current()->GetLinearAlloc()->Alloc(self, byte_size);
- DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(mem), align_size);
- return mem;
-}
-
-} // namespace lambda
-} // namespace art
diff --git a/runtime/lambda/leaking_allocator.h b/runtime/lambda/leaking_allocator.h
deleted file mode 100644
index cb5a1bf..0000000
--- a/runtime/lambda/leaking_allocator.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef ART_RUNTIME_LAMBDA_LEAKING_ALLOCATOR_H_
-#define ART_RUNTIME_LAMBDA_LEAKING_ALLOCATOR_H_
-
-#include <utility> // std::forward
-#include <type_traits> // std::aligned_storage
-
-namespace art {
-class Thread; // forward declaration
-
-namespace lambda {
-
-// Temporary class to centralize all the leaking allocations.
-// Allocations made through this class are never freed, but it is a placeholder
-// that means that the calling code needs to be rewritten to properly:
-//
-// (a) Have a lifetime scoped to some other entity.
-// (b) Not be allocated over and over again if it was already allocated once (immutable data).
-//
-// TODO: do all of the above a/b for each callsite, and delete this class.
-class LeakingAllocator {
- public:
- // An opaque type which is guaranteed for:
- // * a) be large enough to hold T (e.g. for in-place new)
- // * b) be well-aligned (so that reads/writes are well-defined) to T
- // * c) strict-aliasing compatible with T*
- //
- // Nominally used to allocate memory for yet unconstructed instances of T.
- template <typename T>
- using AlignedMemoryStorage = typename std::aligned_storage<sizeof(T), alignof(T)>::type;
-
- // Allocate byte_size bytes worth of memory. Never freed.
- template <typename T>
- static AlignedMemoryStorage<T>* AllocateMemory(Thread* self, size_t byte_size = sizeof(T)) {
- return reinterpret_cast<AlignedMemoryStorage<T>*>(
- AllocateMemoryImpl(self, byte_size, alignof(T)));
- }
-
- // Make a new instance of T, flexibly sized, in-place at newly allocated memory. Never freed.
- template <typename T, typename... Args>
- static T* MakeFlexibleInstance(Thread* self, size_t byte_size, Args&&... args) {
- return new (AllocateMemory<T>(self, byte_size)) T(std::forward<Args>(args)...);
- }
-
- // Make a new instance of T in-place at newly allocated memory. Never freed.
- template <typename T, typename... Args>
- static T* MakeInstance(Thread* self, Args&&... args) {
- return new (AllocateMemory<T>(self, sizeof(T))) T(std::forward<Args>(args)...);
- }
-
- private:
- static void* AllocateMemoryImpl(Thread* self, size_t byte_size, size_t align_size);
-};
-
-} // namespace lambda
-} // namespace art
-
-#endif // ART_RUNTIME_LAMBDA_LEAKING_ALLOCATOR_H_
diff --git a/runtime/lambda/shorty_field_type.h b/runtime/lambda/shorty_field_type.h
deleted file mode 100644
index c314fd2..0000000
--- a/runtime/lambda/shorty_field_type.h
+++ /dev/null
@@ -1,475 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef ART_RUNTIME_LAMBDA_SHORTY_FIELD_TYPE_H_
-#define ART_RUNTIME_LAMBDA_SHORTY_FIELD_TYPE_H_
-
-#include "base/logging.h"
-#include "base/macros.h"
-#include "base/value_object.h"
-#include "globals.h"
-#include "runtime/primitive.h"
-
-#include <ostream>
-
-namespace art {
-
-namespace mirror {
-class Object; // forward declaration
-} // namespace mirror
-
-namespace lambda {
-
-struct Closure; // forward declaration
-
-// TODO: Refactor together with primitive.h
-
-// The short form of a field type descriptor. Corresponds to ShortyFieldType in dex specification.
-// Only types usable by a field (and locals) are allowed (i.e. no void type).
-// Note that arrays and objects are treated both as 'L'.
-//
-// This is effectively a 'char' enum-like zero-cost type-safe wrapper with extra helper functions.
-struct ShortyFieldType : ValueObject {
- // Use as if this was an enum class, e.g. 'ShortyFieldType::kBoolean'.
- enum : char {
- // Primitives (Narrow):
- kBoolean = 'Z',
- kByte = 'B',
- kChar = 'C',
- kShort = 'S',
- kInt = 'I',
- kFloat = 'F',
- // Primitives (Wide):
- kLong = 'J',
- kDouble = 'D',
- // Managed types:
- kObject = 'L', // This can also be an array (which is otherwise '[' in a non-shorty).
- kLambda = '\\',
- }; // NOTE: This is an anonymous enum so we can get exhaustive switch checking from the compiler.
-
- // Implicitly construct from the enum above. Value must be one of the enum list members above.
- // Always safe to use, does not do any DCHECKs.
- inline constexpr ShortyFieldType(decltype(kByte) c) : value_(c) {
- }
-
- // Default constructor. The initial value is undefined. Initialize before calling methods.
- // This is very unsafe but exists as a convenience to having undefined values.
- explicit ShortyFieldType() : value_(StaticCastValue(0)) {
- }
-
- // Explicitly construct from a char. Value must be one of the enum list members above.
- // Conversion is potentially unsafe, so DCHECKing is performed.
- explicit inline ShortyFieldType(char c) : value_(StaticCastValue(c)) {
- if (kIsDebugBuild) {
- // Verify at debug-time that our conversion is safe.
- ShortyFieldType ignored;
- DCHECK(MaybeCreate(c, &ignored)) << "unknown shorty field type '" << c << "'";
- }
- }
-
- // Attempts to parse the character in 'shorty_field_type' into its strongly typed version.
- // Returns false if the character was out of range of the grammar.
- static bool MaybeCreate(char shorty_field_type, ShortyFieldType* out) {
- DCHECK(out != nullptr);
- switch (shorty_field_type) {
- case kBoolean:
- case kByte:
- case kChar:
- case kShort:
- case kInt:
- case kFloat:
- case kLong:
- case kDouble:
- case kObject:
- case kLambda:
- *out = ShortyFieldType(static_cast<decltype(kByte)>(shorty_field_type));
- return true;
- default:
- break;
- }
-
- return false;
- }
-
- // Convert the first type in a field type descriptor string into a shorty.
- // Arrays are converted into objects.
- // Does not work for 'void' types (as they are illegal in a field type descriptor).
- static ShortyFieldType CreateFromFieldTypeDescriptor(const char* field_type_descriptor) {
- DCHECK(field_type_descriptor != nullptr);
- char c = *field_type_descriptor;
- if (UNLIKELY(c == kArray)) { // Arrays are treated as object references.
- c = kObject;
- }
- return ShortyFieldType{c}; // NOLINT [readability/braces] [4]
- }
-
- // Parse the first type in the field type descriptor string into a shorty.
- // See CreateFromFieldTypeDescriptor for more details.
- //
- // Returns the pointer offset into the middle of the field_type_descriptor
- // that would either point to the next shorty type, or to null if there are
- // no more types.
- //
- // DCHECKs that each of the nested types is a valid shorty field type. This
- // means the type descriptor must be already valid.
- static const char* ParseFromFieldTypeDescriptor(const char* field_type_descriptor,
- ShortyFieldType* out_type) {
- DCHECK(field_type_descriptor != nullptr);
-
- if (UNLIKELY(field_type_descriptor[0] == '\0')) {
- // Handle empty strings by immediately returning null.
- return nullptr;
- }
-
- // All non-empty strings must be a valid list of field type descriptors, otherwise
- // the DCHECKs will kick in and the program will crash.
- const char shorter_type = *field_type_descriptor;
-
- ShortyFieldType safe_type;
- bool type_set = MaybeCreate(shorter_type, &safe_type);
-
- // Lambda that keeps skipping characters until it sees ';'.
- // Stops one character -after- the ';'.
- auto skip_until_semicolon = [&field_type_descriptor]() {
- while (*field_type_descriptor != ';' && *field_type_descriptor != '\0') {
- ++field_type_descriptor;
- }
- DCHECK_NE(*field_type_descriptor, '\0')
- << " type descriptor terminated too early: " << field_type_descriptor;
- ++field_type_descriptor; // Skip the ';'
- };
-
- ++field_type_descriptor;
- switch (shorter_type) {
- case kObject:
- skip_until_semicolon();
-
- DCHECK(type_set);
- DCHECK(safe_type == kObject);
- break;
- case kArray:
- // Strip out all of the leading [[[[[s, we don't care if it's a multi-dimensional array.
- while (*field_type_descriptor == '[' && *field_type_descriptor != '\0') {
- ++field_type_descriptor;
- }
- DCHECK_NE(*field_type_descriptor, '\0')
- << " type descriptor terminated too early: " << field_type_descriptor;
- // Either a primitive, object, or closure left. No more arrays.
- {
- // Now skip all the characters that form the array's interior-most element type
- // (which itself is guaranteed not to be an array).
- ShortyFieldType array_interior_type;
- type_set = MaybeCreate(*field_type_descriptor, &array_interior_type);
- DCHECK(type_set) << " invalid remaining type descriptor " << field_type_descriptor;
-
- // Handle array-of-objects case like [[[[[LObject; and array-of-closures like [[[[[\Foo;
- if (*field_type_descriptor == kObject || *field_type_descriptor == kLambda) {
- skip_until_semicolon();
- } else {
- // Handle primitives which are exactly one character we can skip.
- DCHECK(array_interior_type.IsPrimitive());
- ++field_type_descriptor;
- }
- }
-
- safe_type = kObject;
- type_set = true;
- break;
- case kLambda:
- skip_until_semicolon();
-
- DCHECK(safe_type == kLambda);
- DCHECK(type_set);
- break;
- default:
- DCHECK_NE(kVoid, shorter_type) << "cannot make a ShortyFieldType from a void type";
- break;
- }
-
- DCHECK(type_set) << "invalid shorty type descriptor " << shorter_type;
-
- *out_type = safe_type;
- return type_set ? field_type_descriptor : nullptr;
- }
-
- // Explicitly convert to a char.
- inline explicit operator char() const {
- return value_;
- }
-
- // Is this a primitive?
- inline bool IsPrimitive() const {
- return IsPrimitiveNarrow() || IsPrimitiveWide();
- }
-
- // Is this a narrow primitive (i.e. can fit into 1 virtual register)?
- inline bool IsPrimitiveNarrow() const {
- switch (value_) {
- case kBoolean:
- case kByte:
- case kChar:
- case kShort:
- case kInt:
- case kFloat:
- return true;
- default:
- return false;
- }
- }
-
- // Is this a wide primitive (i.e. needs exactly 2 virtual registers)?
- inline bool IsPrimitiveWide() const {
- switch (value_) {
- case kLong:
- case kDouble:
- return true;
- default:
- return false;
- }
- }
-
- // Is this an object reference (which can also be an array)?
- inline bool IsObject() const {
- return value_ == kObject;
- }
-
- // Is this a lambda?
- inline bool IsLambda() const {
- return value_ == kLambda;
- }
-
- // Is the size of this (to store inline as a field) always known at compile-time?
- inline bool IsStaticSize() const {
- return !IsLambda();
- }
-
- // Get the compile-time size (to be able to store it inline as a field or on stack).
- // Dynamically-sized values such as lambdas return the guaranteed lower bound.
- inline size_t GetStaticSize() const {
- switch (value_) {
- case kBoolean:
- return sizeof(bool);
- case kByte:
- return sizeof(uint8_t);
- case kChar:
- return sizeof(int16_t);
- case kShort:
- return sizeof(uint16_t);
- case kInt:
- return sizeof(int32_t);
- case kLong:
- return sizeof(int64_t);
- case kFloat:
- return sizeof(float);
- case kDouble:
- return sizeof(double);
- case kObject:
- return kObjectReferenceSize;
- case kLambda:
- return sizeof(void*); // Large enough to store the ArtLambdaMethod
- default:
- DCHECK(false) << "unknown shorty field type '" << static_cast<char>(value_) << "'";
- UNREACHABLE();
- }
- }
-
- // Implicitly convert to the anonymous nested inner type. Used for exhaustive switch detection.
- inline operator decltype(kByte)() const {
- return value_;
- }
-
- // Returns a read-only static string representing the enum name, useful for printing/debug only.
- inline const char* ToString() const {
- switch (value_) {
- case kBoolean:
- return "kBoolean";
- case kByte:
- return "kByte";
- case kChar:
- return "kChar";
- case kShort:
- return "kShort";
- case kInt:
- return "kInt";
- case kLong:
- return "kLong";
- case kFloat:
- return "kFloat";
- case kDouble:
- return "kDouble";
- case kObject:
- return "kObject";
- case kLambda:
- return "kLambda";
- default:
- // Undefined behavior if we get this far. Pray the compiler gods are merciful.
- return "<undefined>";
- }
- }
-
- private:
- static constexpr const char kArray = '[';
- static constexpr const char kVoid = 'V';
-
- // Helper to statically cast anything into our nested anonymous enum type.
- template <typename T>
- inline static decltype(kByte) StaticCastValue(const T& anything) {
- return static_cast<decltype(value_)>(anything);
- }
-
- // The only field in this struct.
- decltype(kByte) value_;
-};
-
-
- // Print to an output stream.
-inline std::ostream& operator<<(std::ostream& ostream, ShortyFieldType shorty) {
- return ostream << shorty.ToString();
-}
-
-static_assert(sizeof(ShortyFieldType) == sizeof(char),
- "ShortyFieldType must be lightweight just like a char");
-
-// Compile-time trait information regarding the ShortyFieldType.
-// Used by static_asserts to verify that the templates are correctly used at compile-time.
-//
-// For example,
-// ShortyFieldTypeTraits::IsPrimitiveNarrowType<int64_t>() == true
-// ShortyFieldTypeTraits::IsObjectType<mirror::Object*>() == true
-struct ShortyFieldTypeTraits {
- // A type guaranteed to be large enough to holds any of the shorty field types.
- using MaxType = uint64_t;
-
- // Type traits: Returns true if 'T' is a valid type that can be represented by a shorty field type.
- template <typename T>
- static inline constexpr bool IsType() {
- return IsPrimitiveType<T>() || IsObjectType<T>() || IsLambdaType<T>();
- }
-
- // Returns true if 'T' is a primitive type (i.e. a built-in without nested references).
- template <typename T>
- static inline constexpr bool IsPrimitiveType() {
- return IsPrimitiveNarrowType<T>() || IsPrimitiveWideType<T>();
- }
-
- // Returns true if 'T' is a primitive type that is narrow (i.e. can be stored into 1 vreg).
- template <typename T>
- static inline constexpr bool IsPrimitiveNarrowType() {
- return IsPrimitiveNarrowTypeImpl(static_cast<T* const>(nullptr));
- }
-
- // Returns true if 'T' is a primitive type that is wide (i.e. needs 2 vregs for storage).
- template <typename T>
- static inline constexpr bool IsPrimitiveWideType() {
- return IsPrimitiveWideTypeImpl(static_cast<T* const>(nullptr));
- }
-
- // Returns true if 'T' is an object (i.e. it is a managed GC reference).
- // Note: This is equivalent to std::base_of<mirror::Object*, T>::value
- template <typename T>
- static inline constexpr bool IsObjectType() {
- return IsObjectTypeImpl(static_cast<T* const>(nullptr));
- }
-
- // Returns true if 'T' is a lambda (i.e. it is a closure with unknown static data);
- template <typename T>
- static inline constexpr bool IsLambdaType() {
- return IsLambdaTypeImpl(static_cast<T* const>(nullptr));
- }
-
- private:
-#define IS_VALID_TYPE_SPECIALIZATION(type, name) \
- static inline constexpr bool Is ## name ## TypeImpl(type* const = 0) { /*NOLINT*/ \
- return true; \
- } \
- \
- static_assert(sizeof(MaxType) >= sizeof(type), "MaxType too small")
-
- IS_VALID_TYPE_SPECIALIZATION(bool, PrimitiveNarrow);
- IS_VALID_TYPE_SPECIALIZATION(int8_t, PrimitiveNarrow);
- IS_VALID_TYPE_SPECIALIZATION(uint8_t, PrimitiveNarrow); // Not strictly true, but close enough.
- IS_VALID_TYPE_SPECIALIZATION(int16_t, PrimitiveNarrow);
- IS_VALID_TYPE_SPECIALIZATION(uint16_t, PrimitiveNarrow); // Chars are unsigned.
- IS_VALID_TYPE_SPECIALIZATION(int32_t, PrimitiveNarrow);
- IS_VALID_TYPE_SPECIALIZATION(uint32_t, PrimitiveNarrow); // Not strictly true, but close enough.
- IS_VALID_TYPE_SPECIALIZATION(float, PrimitiveNarrow);
- IS_VALID_TYPE_SPECIALIZATION(int64_t, PrimitiveWide);
- IS_VALID_TYPE_SPECIALIZATION(uint64_t, PrimitiveWide); // Not strictly true, but close enough.
- IS_VALID_TYPE_SPECIALIZATION(double, PrimitiveWide);
- IS_VALID_TYPE_SPECIALIZATION(mirror::Object*, Object);
- IS_VALID_TYPE_SPECIALIZATION(Closure*, Lambda);
-#undef IS_VALID_TYPE_SPECIALIZATION
-
-#define IS_VALID_TYPE_SPECIALIZATION_IMPL(name) \
- template <typename T> \
- static inline constexpr bool Is ## name ## TypeImpl(T* const = 0) { \
- return false; \
- }
-
- IS_VALID_TYPE_SPECIALIZATION_IMPL(PrimitiveNarrow);
- IS_VALID_TYPE_SPECIALIZATION_IMPL(PrimitiveWide);
- IS_VALID_TYPE_SPECIALIZATION_IMPL(Object);
- IS_VALID_TYPE_SPECIALIZATION_IMPL(Lambda);
-
-#undef IS_VALID_TYPE_SPECIALIZATION_IMPL
-};
-
-// Maps the ShortyFieldType enum into it's C++ type equivalent, into the "type" typedef.
-// For example:
-// ShortyFieldTypeSelectType<ShortyFieldType::kBoolean>::type => bool
-// ShortyFieldTypeSelectType<ShortyFieldType::kLong>::type => int64_t
-//
-// Invalid enums will not have the type defined.
-template <decltype(ShortyFieldType::kByte) Shorty>
-struct ShortyFieldTypeSelectType {
-};
-
-// Maps the C++ type into it's ShortyFieldType enum equivalent, into the "value" constexpr.
-// For example:
-// ShortyFieldTypeSelectEnum<bool>::value => ShortyFieldType::kBoolean
-// ShortyFieldTypeSelectEnum<int64_t>::value => ShortyFieldType::kLong
-//
-// Signed-ness must match for a valid select, e.g. uint64_t will not map to kLong, but int64_t will.
-// Invalid types will not have the value defined (see e.g. ShortyFieldTypeTraits::IsType<T>())
-template <typename T>
-struct ShortyFieldTypeSelectEnum {
-};
-
-#define SHORTY_FIELD_TYPE_SELECT_IMPL(cpp_type, enum_element) \
-template <> \
-struct ShortyFieldTypeSelectType<ShortyFieldType::enum_element> { \
- using type = cpp_type; \
-}; \
-\
-template <> \
-struct ShortyFieldTypeSelectEnum<cpp_type> { \
- static constexpr const auto value = ShortyFieldType::enum_element; \
-}; \
-
-SHORTY_FIELD_TYPE_SELECT_IMPL(bool, kBoolean);
-SHORTY_FIELD_TYPE_SELECT_IMPL(int8_t, kByte);
-SHORTY_FIELD_TYPE_SELECT_IMPL(int16_t, kShort);
-SHORTY_FIELD_TYPE_SELECT_IMPL(uint16_t, kChar);
-SHORTY_FIELD_TYPE_SELECT_IMPL(int32_t, kInt);
-SHORTY_FIELD_TYPE_SELECT_IMPL(float, kFloat);
-SHORTY_FIELD_TYPE_SELECT_IMPL(int64_t, kLong);
-SHORTY_FIELD_TYPE_SELECT_IMPL(double, kDouble);
-SHORTY_FIELD_TYPE_SELECT_IMPL(mirror::Object*, kObject);
-SHORTY_FIELD_TYPE_SELECT_IMPL(Closure*, kLambda);
-
-} // namespace lambda
-} // namespace art
-
-#endif // ART_RUNTIME_LAMBDA_SHORTY_FIELD_TYPE_H_
diff --git a/runtime/lambda/shorty_field_type_test.cc b/runtime/lambda/shorty_field_type_test.cc
deleted file mode 100644
index 32bade9..0000000
--- a/runtime/lambda/shorty_field_type_test.cc
+++ /dev/null
@@ -1,354 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "lambda/shorty_field_type.h"
-#include "mirror/object_reference.h"
-
-#include "utils.h"
-#include <numeric>
-#include <stdint.h>
-#include "gtest/gtest.h"
-
-#define EXPECT_NULL(expected) EXPECT_EQ(reinterpret_cast<const void*>(expected), \
- reinterpret_cast<void*>(nullptr));
-
-namespace art {
-namespace lambda {
-
-class ShortyFieldTypeTest : public ::testing::Test {
- public:
- ShortyFieldTypeTest() = default;
- ~ShortyFieldTypeTest() = default;
-
- protected:
- static void SetUpTestCase() {
- }
-
- virtual void SetUp() {
- }
-
- static ::testing::AssertionResult IsResultSuccessful(bool result) {
- if (result) {
- return ::testing::AssertionSuccess();
- } else {
- return ::testing::AssertionFailure();
- }
- }
-
- template <typename T>
- static std::string ListToString(const T& list) {
- std::stringstream stream;
-
- stream << "[";
- for (auto&& val : list) {
- stream << val << ", ";
- }
- stream << "]";
-
- return stream.str();
- }
-
- // Compare two vector-like types for equality.
- template <typename T>
- static ::testing::AssertionResult AreListsEqual(const T& expected, const T& actual) {
- bool success = true;
- std::stringstream stream;
-
- if (expected.size() != actual.size()) {
- success = false;
- stream << "Expected list size: " << expected.size()
- << ", but got list size: " << actual.size();
- stream << std::endl;
- }
-
- for (size_t j = 0; j < std::min(expected.size(), actual.size()); ++j) {
- if (expected[j] != actual[j]) {
- success = false;
- stream << "Expected element '" << j << "' to be '" << expected[j] << "', but got actual: '"
- << actual[j] << "'.";
- stream << std::endl;
- }
- }
-
- if (success) {
- return ::testing::AssertionSuccess();
- }
-
- stream << "Expected list was: " << ListToString(expected)
- << ", actual list was: " << ListToString(actual);
-
- return ::testing::AssertionFailure() << stream.str();
- }
-
- static std::vector<ShortyFieldType> ParseLongTypeDescriptorsToList(const char* type_descriptor) {
- std::vector<ShortyFieldType> lst;
-
- ShortyFieldType shorty;
-
- const char* parsed = type_descriptor;
- while ((parsed = ShortyFieldType::ParseFromFieldTypeDescriptor(parsed, &shorty)) != nullptr) {
- lst.push_back(shorty);
- }
-
- return lst;
- }
-
- protected:
- // Shorthands for the ShortyFieldType constants.
- // The letters are the same as JNI letters, with kS_ being a lambda since \ is not available.
- static constexpr ShortyFieldType kSZ = ShortyFieldType::kBoolean;
- static constexpr ShortyFieldType kSB = ShortyFieldType::kByte;
- static constexpr ShortyFieldType kSC = ShortyFieldType::kChar;
- static constexpr ShortyFieldType kSS = ShortyFieldType::kShort;
- static constexpr ShortyFieldType kSI = ShortyFieldType::kInt;
- static constexpr ShortyFieldType kSF = ShortyFieldType::kFloat;
- static constexpr ShortyFieldType kSJ = ShortyFieldType::kLong;
- static constexpr ShortyFieldType kSD = ShortyFieldType::kDouble;
- static constexpr ShortyFieldType kSL = ShortyFieldType::kObject;
- static constexpr ShortyFieldType kS_ = ShortyFieldType::kLambda;
-};
-
-TEST_F(ShortyFieldTypeTest, TestMaybeCreate) {
- ShortyFieldType shorty;
-
- std::vector<char> shorties = {'Z', 'B', 'C', 'S', 'I', 'F', 'J', 'D', 'L', '\\'};
-
- // All valid 'shorty' characters are created successfully.
- for (const char c : shorties) {
- EXPECT_TRUE(ShortyFieldType::MaybeCreate(c, &shorty)) << c;
- EXPECT_EQ(c, static_cast<char>(c));
- }
-
- // All other characters can never be created.
- for (unsigned char c = 0; c < std::numeric_limits<unsigned char>::max(); ++c) {
- // Skip the valid characters.
- if (std::find(shorties.begin(), shorties.end(), c) != shorties.end()) { continue; }
- // All invalid characters should fail.
- EXPECT_FALSE(ShortyFieldType::MaybeCreate(static_cast<char>(c), &shorty)) << c;
- }
-} // TEST_F
-
-TEST_F(ShortyFieldTypeTest, TestCreateFromFieldTypeDescriptor) {
- // Sample input.
- std::vector<const char*> lengthies = {
- "Z", "B", "C", "S", "I", "F", "J", "D", "LObject;", "\\Closure;",
- "[Z", "[[B", "[[LObject;"
- };
-
- // Expected output.
- std::vector<ShortyFieldType> expected = {
- ShortyFieldType::kBoolean,
- ShortyFieldType::kByte,
- ShortyFieldType::kChar,
- ShortyFieldType::kShort,
- ShortyFieldType::kInt,
- ShortyFieldType::kFloat,
- ShortyFieldType::kLong,
- ShortyFieldType::kDouble,
- ShortyFieldType::kObject,
- ShortyFieldType::kLambda,
- // Arrays are always treated as objects.
- ShortyFieldType::kObject,
- ShortyFieldType::kObject,
- ShortyFieldType::kObject,
- };
-
- // All valid lengthy types are correctly turned into the expected shorty type.
- for (size_t i = 0; i < lengthies.size(); ++i) {
- EXPECT_EQ(expected[i], ShortyFieldType::CreateFromFieldTypeDescriptor(lengthies[i]));
- }
-} // TEST_F
-
-TEST_F(ShortyFieldTypeTest, TestParseFromFieldTypeDescriptor) {
- // Sample input.
- std::vector<const char*> lengthies = {
- // Empty list
- "",
- // Primitives
- "Z", "B", "C", "S", "I", "F", "J", "D",
- // Non-primitives
- "LObject;", "\\Closure;",
- // Arrays. The biggest PITA.
- "[Z", "[[B", "[[LObject;", "[[[[\\Closure;",
- // Multiple things at once:
- "ZBCSIFJD",
- "LObject;LObject;SSI",
- "[[ZDDZ",
- "[[LObject;[[Z[F\\Closure;LObject;",
- };
-
- // Expected output.
- std::vector<std::vector<ShortyFieldType>> expected = {
- // Empty list
- {},
- // Primitives
- {kSZ}, {kSB}, {kSC}, {kSS}, {kSI}, {kSF}, {kSJ}, {kSD},
- // Non-primitives.
- { ShortyFieldType::kObject }, { ShortyFieldType::kLambda },
- // Arrays are always treated as objects.
- { kSL }, { kSL }, { kSL }, { kSL },
- // Multiple things at once:
- { kSZ, kSB, kSC, kSS, kSI, kSF, kSJ, kSD },
- { kSL, kSL, kSS, kSS, kSI },
- { kSL, kSD, kSD, kSZ },
- { kSL, kSL, kSL, kS_, kSL },
- };
-
- // Sanity check that the expected/actual lists are the same size.. when adding new entries.
- ASSERT_EQ(expected.size(), lengthies.size());
-
- // All valid lengthy types are correctly turned into the expected shorty type.
- for (size_t i = 0; i < expected.size(); ++i) {
- const std::vector<ShortyFieldType>& expected_list = expected[i];
- std::vector<ShortyFieldType> actual_list = ParseLongTypeDescriptorsToList(lengthies[i]);
- EXPECT_TRUE(AreListsEqual(expected_list, actual_list));
- }
-} // TEST_F
-
-// Helper class to probe a shorty's characteristics by minimizing copy-and-paste tests.
-template <typename T, decltype(ShortyFieldType::kByte) kShortyEnum>
-struct ShortyTypeCharacteristics {
- bool is_primitive_ = false;
- bool is_primitive_narrow_ = false;
- bool is_primitive_wide_ = false;
- bool is_object_ = false;
- bool is_lambda_ = false;
- size_t size_ = sizeof(T);
- bool is_dynamic_sized_ = false;
-
- void CheckExpects() {
- ShortyFieldType shorty = kShortyEnum;
-
- // Test the main non-parsing-related ShortyFieldType characteristics.
- EXPECT_EQ(is_primitive_, shorty.IsPrimitive());
- EXPECT_EQ(is_primitive_narrow_, shorty.IsPrimitiveNarrow());
- EXPECT_EQ(is_primitive_wide_, shorty.IsPrimitiveWide());
- EXPECT_EQ(is_object_, shorty.IsObject());
- EXPECT_EQ(is_lambda_, shorty.IsLambda());
- EXPECT_EQ(size_, shorty.GetStaticSize());
- EXPECT_EQ(is_dynamic_sized_, !shorty.IsStaticSize());
-
- // Test compile-time ShortyFieldTypeTraits.
- EXPECT_TRUE(ShortyFieldTypeTraits::IsType<T>());
- EXPECT_EQ(is_primitive_, ShortyFieldTypeTraits::IsPrimitiveType<T>());
- EXPECT_EQ(is_primitive_narrow_, ShortyFieldTypeTraits::IsPrimitiveNarrowType<T>());
- EXPECT_EQ(is_primitive_wide_, ShortyFieldTypeTraits::IsPrimitiveWideType<T>());
- EXPECT_EQ(is_object_, ShortyFieldTypeTraits::IsObjectType<T>());
- EXPECT_EQ(is_lambda_, ShortyFieldTypeTraits::IsLambdaType<T>());
-
- // Test compile-time ShortyFieldType selectors
- static_assert(std::is_same<T, typename ShortyFieldTypeSelectType<kShortyEnum>::type>::value,
- "ShortyFieldType Enum->Type incorrect mapping");
- auto kActualEnum = ShortyFieldTypeSelectEnum<T>::value; // Do not ODR-use, avoid linker error.
- EXPECT_EQ(kShortyEnum, kActualEnum);
- }
-};
-
-TEST_F(ShortyFieldTypeTest, TestCharacteristicsAndTraits) {
- // Boolean test
- {
- SCOPED_TRACE("boolean");
- ShortyTypeCharacteristics<bool, ShortyFieldType::kBoolean> chars;
- chars.is_primitive_ = true;
- chars.is_primitive_narrow_ = true;
- chars.CheckExpects();
- }
-
- // Byte test
- {
- SCOPED_TRACE("byte");
- ShortyTypeCharacteristics<int8_t, ShortyFieldType::kByte> chars;
- chars.is_primitive_ = true;
- chars.is_primitive_narrow_ = true;
- chars.CheckExpects();
- }
-
- // Char test
- {
- SCOPED_TRACE("char");
- ShortyTypeCharacteristics<uint16_t, ShortyFieldType::kChar> chars; // Char is unsigned.
- chars.is_primitive_ = true;
- chars.is_primitive_narrow_ = true;
- chars.CheckExpects();
- }
-
- // Short test
- {
- SCOPED_TRACE("short");
- ShortyTypeCharacteristics<int16_t, ShortyFieldType::kShort> chars;
- chars.is_primitive_ = true;
- chars.is_primitive_narrow_ = true;
- chars.CheckExpects();
- }
-
- // Int test
- {
- SCOPED_TRACE("int");
- ShortyTypeCharacteristics<int32_t, ShortyFieldType::kInt> chars;
- chars.is_primitive_ = true;
- chars.is_primitive_narrow_ = true;
- chars.CheckExpects();
- }
-
- // Long test
- {
- SCOPED_TRACE("long");
- ShortyTypeCharacteristics<int64_t, ShortyFieldType::kLong> chars;
- chars.is_primitive_ = true;
- chars.is_primitive_wide_ = true;
- chars.CheckExpects();
- }
-
- // Float test
- {
- SCOPED_TRACE("float");
- ShortyTypeCharacteristics<float, ShortyFieldType::kFloat> chars;
- chars.is_primitive_ = true;
- chars.is_primitive_narrow_ = true;
- chars.CheckExpects();
- }
-
- // Double test
- {
- SCOPED_TRACE("double");
- ShortyTypeCharacteristics<double, ShortyFieldType::kDouble> chars;
- chars.is_primitive_ = true;
- chars.is_primitive_wide_ = true;
- chars.CheckExpects();
- }
-
- // Object test
- {
- SCOPED_TRACE("object");
- ShortyTypeCharacteristics<mirror::Object*, ShortyFieldType::kObject> chars;
- chars.is_object_ = true;
- chars.size_ = kObjectReferenceSize;
- chars.CheckExpects();
- EXPECT_EQ(kObjectReferenceSize, sizeof(mirror::CompressedReference<mirror::Object>));
- }
-
- // Lambda test
- {
- SCOPED_TRACE("lambda");
- ShortyTypeCharacteristics<Closure*, ShortyFieldType::kLambda> chars;
- chars.is_lambda_ = true;
- chars.is_dynamic_sized_ = true;
- chars.CheckExpects();
- }
-}
-
-} // namespace lambda
-} // namespace art
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 595a47b..c7e4f8b 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -583,12 +583,6 @@
args.Set(M::HeapGrowthLimit, args.GetOrDefault(M::MemoryMaximumSize));
}
- if (args.GetOrDefault(M::Experimental) & ExperimentalFlags::kLambdas) {
- LOG(WARNING) << "Experimental lambdas have been enabled. All lambda opcodes have "
- << "an unstable specification and are nearly guaranteed to change over time. "
- << "Do not attempt to write shipping code against these opcodes.";
- }
-
*runtime_options = std::move(args);
return true;
}
@@ -709,8 +703,6 @@
UsageMessage(stream, " -X[no]image-dex2oat (Whether to create and use a boot image)\n");
UsageMessage(stream, " -Xno-dex-file-fallback "
"(Don't fall back to dex files without oat files)\n");
- UsageMessage(stream, " -Xexperimental:lambdas "
- "(Enable new and experimental dalvik opcodes and semantics)\n");
UsageMessage(stream, "\n");
UsageMessage(stream, "The following previously supported Dalvik options are ignored:\n");
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 9f0ef7c..ca270a6 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -78,7 +78,6 @@
#include "jit/jit.h"
#include "jni_internal.h"
#include "linear_alloc.h"
-#include "lambda/box_table.h"
#include "mirror/array.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
@@ -442,7 +441,6 @@
GetMonitorList()->SweepMonitorList(visitor);
GetJavaVM()->SweepJniWeakGlobals(visitor);
GetHeap()->SweepAllocationRecords(visitor);
- GetLambdaBoxTable()->SweepWeakBoxedLambdas(visitor);
}
bool Runtime::ParseOptions(const RuntimeOptions& raw_options,
@@ -1015,9 +1013,6 @@
jit_options_->SetSaveProfilingInfo(false);
}
- // Allocate a global table of boxed lambda objects <-> closures.
- lambda_box_table_ = MakeUnique<lambda::BoxTable>();
-
// Use MemMap arena pool for jit, malloc otherwise. Malloc arenas are faster to allocate but
// can't be trimmed as easily.
const bool use_malloc = IsAotCompiler();
@@ -1638,7 +1633,6 @@
intern_table_->ChangeWeakRootState(gc::kWeakRootStateNoReadsOrWrites);
java_vm_->DisallowNewWeakGlobals();
heap_->DisallowNewAllocationRecords();
- lambda_box_table_->DisallowNewWeakBoxedLambdas();
}
void Runtime::AllowNewSystemWeaks() {
@@ -1647,7 +1641,6 @@
intern_table_->ChangeWeakRootState(gc::kWeakRootStateNormal); // TODO: Do this in the sweeping.
java_vm_->AllowNewWeakGlobals();
heap_->AllowNewAllocationRecords();
- lambda_box_table_->AllowNewWeakBoxedLambdas();
}
void Runtime::BroadcastForNewSystemWeaks() {
@@ -1658,7 +1651,6 @@
intern_table_->BroadcastForNewInterns();
java_vm_->BroadcastForNewWeakGlobals();
heap_->BroadcastForNewAllocationRecords();
- lambda_box_table_->BroadcastForNewWeakBoxedLambdas();
}
void Runtime::SetInstructionSet(InstructionSet instruction_set) {
diff --git a/runtime/runtime.h b/runtime/runtime.h
index afa8e48..9f64e48 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -54,10 +54,6 @@
class JitOptions;
} // namespace jit
-namespace lambda {
- class BoxTable;
-} // namespace lambda
-
namespace mirror {
class ClassLoader;
class Array;
@@ -552,10 +548,6 @@
return (experimental_flags_ & flags) != ExperimentalFlags::kNone;
}
- lambda::BoxTable* GetLambdaBoxTable() const {
- return lambda_box_table_.get();
- }
-
// Create the JIT and instrumentation and code cache.
void CreateJit();
@@ -739,8 +731,6 @@
std::unique_ptr<jit::Jit> jit_;
std::unique_ptr<jit::JitOptions> jit_options_;
- std::unique_ptr<lambda::BoxTable> lambda_box_table_;
-
// Fault message, printed when we get a SIGSEGV.
Mutex fault_message_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
std::string fault_message_ GUARDED_BY(fault_message_lock_);
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index 31206b5..b95dfad 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -117,7 +117,7 @@
RUNTIME_OPTIONS_KEY (Unit, NoDexFileFallback)
RUNTIME_OPTIONS_KEY (std::string, CpuAbiList)
RUNTIME_OPTIONS_KEY (std::string, Fingerprint)
-RUNTIME_OPTIONS_KEY (ExperimentalFlags, Experimental, ExperimentalFlags::kNone) // -Xexperimental:{none, lambdas}
+RUNTIME_OPTIONS_KEY (ExperimentalFlags, Experimental, ExperimentalFlags::kNone) // -Xexperimental:{none}
// Not parse-able from command line, but can be provided explicitly.
// (Do not add anything here that is defined in ParsedOptions::MakeParser)
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 03de399..daf5ec4 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -1157,9 +1157,6 @@
case Instruction::kVerifyRegCWide:
result = result && CheckWideRegisterIndex(inst->VRegC());
break;
- case Instruction::kVerifyRegCString:
- result = result && CheckStringIndex(inst->VRegC());
- break;
}
switch (inst->GetVerifyExtraFlags()) {
case Instruction::kVerifyArrayData:
@@ -3331,67 +3328,10 @@
}
break;
}
- case Instruction::INVOKE_LAMBDA: {
- // Don't bother verifying, instead the interpreter will take the slow path with access checks.
- // If the code would've normally hard-failed, then the interpreter will throw the
- // appropriate verification errors at runtime.
- Fail(VERIFY_ERROR_FORCE_INTERPRETER); // TODO(iam): implement invoke-lambda verification
- break;
- }
- case Instruction::CAPTURE_VARIABLE: {
- // Don't bother verifying, instead the interpreter will take the slow path with access checks.
- // If the code would've normally hard-failed, then the interpreter will throw the
- // appropriate verification errors at runtime.
- Fail(VERIFY_ERROR_FORCE_INTERPRETER); // TODO(iam): implement capture-variable verification
- break;
- }
- case Instruction::CREATE_LAMBDA: {
- // Don't bother verifying, instead the interpreter will take the slow path with access checks.
- // If the code would've normally hard-failed, then the interpreter will throw the
- // appropriate verification errors at runtime.
- Fail(VERIFY_ERROR_FORCE_INTERPRETER); // TODO(iam): implement create-lambda verification
- break;
- }
- case Instruction::LIBERATE_VARIABLE: {
- // Don't bother verifying, instead the interpreter will take the slow path with access checks.
- // If the code would've normally hard-failed, then the interpreter will throw the
- // appropriate verification errors at runtime.
- Fail(VERIFY_ERROR_FORCE_INTERPRETER); // TODO(iam): implement liberate-variable verification
- break;
- }
-
- case Instruction::UNUSED_F4: {
- DCHECK(false); // TODO(iam): Implement opcodes for lambdas
- // Conservatively fail verification on release builds.
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Unexpected opcode " << inst->DumpString(dex_file_);
- break;
- }
-
- case Instruction::BOX_LAMBDA: {
- // Don't bother verifying, instead the interpreter will take the slow path with access checks.
- // If the code would've normally hard-failed, then the interpreter will throw the
- // appropriate verification errors at runtime.
- Fail(VERIFY_ERROR_FORCE_INTERPRETER); // TODO(iam): implement box-lambda verification
-
- // Partial verification. Sets the resulting type to always be an object, which
- // is good enough for some other verification to occur without hard-failing.
- const uint32_t vreg_target_object = inst->VRegA_22x(); // box-lambda vA, vB
- const RegType& reg_type = reg_types_.JavaLangObject(need_precise_constants_);
- work_line_->SetRegisterType<LockOp::kClear>(this, vreg_target_object, reg_type);
- break;
- }
-
- case Instruction::UNBOX_LAMBDA: {
- // Don't bother verifying, instead the interpreter will take the slow path with access checks.
- // If the code would've normally hard-failed, then the interpreter will throw the
- // appropriate verification errors at runtime.
- Fail(VERIFY_ERROR_FORCE_INTERPRETER); // TODO(iam): implement unbox-lambda verification
- break;
- }
/* These should never appear during verification. */
case Instruction::UNUSED_3E ... Instruction::UNUSED_43:
- case Instruction::UNUSED_FA ... Instruction::UNUSED_FF:
+ case Instruction::UNUSED_F3 ... Instruction::UNUSED_FF:
case Instruction::UNUSED_79:
case Instruction::UNUSED_7A:
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Unexpected opcode " << inst->DumpString(dex_file_);
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index 2592a21..5fe95c2 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -86,13 +86,10 @@
VERIFY_ERROR_ACCESS_METHOD = 128, // IllegalAccessError.
VERIFY_ERROR_CLASS_CHANGE = 256, // IncompatibleClassChangeError.
VERIFY_ERROR_INSTANTIATION = 512, // InstantiationError.
- // For opcodes that don't have complete verifier support (such as lambda opcodes),
- // we need a way to continue execution at runtime without attempting to re-verify
- // (since we know it will fail no matter what). Instead, run as the interpreter
- // in a special "do access checks" mode which will perform verifier-like checking
- // on the fly.
- //
- // TODO: Once all new opcodes have implemented full verifier support, this can be removed.
+ // For opcodes that don't have complete verifier support, we need a way to continue
+ // execution at runtime without attempting to re-verify (since we know it will fail no
+ // matter what). Instead, run as the interpreter in a special "do access checks" mode
+ // which will perform verifier-like checking on the fly.
VERIFY_ERROR_FORCE_INTERPRETER = 1024, // Skip the verification phase at runtime;
// force the interpreter to do access checks.
// (sets a soft fail at compile time).