resolved conflicts for merge of f466553d to dalvik-dev
Change-Id: I1205b5c5da3a02e410fff760a8b0dda41cb7249f
diff --git a/Android.mk b/Android.mk
index 46a7c1e..0b4b231 100644
--- a/Android.mk
+++ b/Android.mk
@@ -85,6 +85,7 @@
include $(art_path)/runtime/Android.mk
include $(art_path)/compiler/Android.mk
include $(art_path)/dex2oat/Android.mk
+include $(art_path)/disassembler/Android.mk
include $(art_path)/oatdump/Android.mk
include $(art_path)/dalvikvm/Android.mk
include $(art_path)/jdwpspy/Android.mk
diff --git a/build/Android.common.mk b/build/Android.common.mk
index dd0ba4d..0871884 100644
--- a/build/Android.common.mk
+++ b/build/Android.common.mk
@@ -105,6 +105,7 @@
ART_C_INCLUDES := \
external/gtest/include \
external/valgrind/main/include \
+ external/valgrind/main \
external/zlib \
frameworks/compile/mclinker/include
diff --git a/compiler/dex/arena_allocator.cc b/compiler/dex/arena_allocator.cc
index 36393e7..2da8064 100644
--- a/compiler/dex/arena_allocator.cc
+++ b/compiler/dex/arena_allocator.cc
@@ -19,12 +19,15 @@
#include "arena_allocator.h"
#include "base/logging.h"
#include "base/mutex.h"
+#include "thread-inl.h"
+#include <memcheck/memcheck.h>
namespace art {
// Memmap is a bit slower than malloc according to my measurements.
static constexpr bool kUseMemMap = false;
static constexpr bool kUseMemSet = true && kUseMemMap;
+static constexpr size_t kValgrindRedZoneBytes = 8;
static const char* alloc_names[ArenaAllocator::kNumAllocKinds] = {
"Misc ",
@@ -107,6 +110,9 @@
void ArenaPool::FreeArena(Arena* arena) {
Thread* self = Thread::Current();
+ if (UNLIKELY(RUNNING_ON_VALGRIND)) {
+ VALGRIND_MAKE_MEM_UNDEFINED(arena->memory_, arena->bytes_allocated_);
+ }
{
MutexLock lock(self, lock_);
arena->next_ = free_arenas_;
@@ -128,7 +134,8 @@
end_(nullptr),
ptr_(nullptr),
arena_head_(nullptr),
- num_allocations_(0) {
+ num_allocations_(0),
+ running_on_valgrind_(RUNNING_ON_VALGRIND) {
memset(&alloc_stats_[0], 0, sizeof(alloc_stats_));
}
@@ -140,6 +147,29 @@
}
}
+void* ArenaAllocator::AllocValgrind(size_t bytes, ArenaAllocKind kind) {
+ size_t rounded_bytes = (bytes + 3 + kValgrindRedZoneBytes) & ~3;
+ if (UNLIKELY(ptr_ + rounded_bytes > end_)) {
+ // Obtain a new block.
+ ObtainNewArenaForAllocation(rounded_bytes);
+ if (UNLIKELY(ptr_ == nullptr)) {
+ return nullptr;
+ }
+ }
+ if (kCountAllocations) {
+ alloc_stats_[kind] += rounded_bytes;
+ ++num_allocations_;
+ }
+ uint8_t* ret = ptr_;
+ ptr_ += rounded_bytes;
+ // Check that the memory is already zeroed out.
+ for (uint8_t* ptr = ret; ptr < ptr_; ++ptr) {
+ CHECK_EQ(*ptr, 0U);
+ }
+ VALGRIND_MAKE_MEM_NOACCESS(ret + bytes, rounded_bytes - bytes);
+ return ret;
+}
+
ArenaAllocator::~ArenaAllocator() {
// Reclaim all the arenas by giving them back to the thread pool.
UpdateBytesAllocated();
diff --git a/compiler/dex/arena_allocator.h b/compiler/dex/arena_allocator.h
index dda52a2..d11d67c 100644
--- a/compiler/dex/arena_allocator.h
+++ b/compiler/dex/arena_allocator.h
@@ -103,6 +103,9 @@
// Returns zeroed memory.
void* Alloc(size_t bytes, ArenaAllocKind kind) ALWAYS_INLINE {
+ if (UNLIKELY(running_on_valgrind_)) {
+ return AllocValgrind(bytes, kind);
+ }
bytes = (bytes + 3) & ~3;
if (UNLIKELY(ptr_ + bytes > end_)) {
// Obtain a new block.
@@ -120,6 +123,7 @@
return ret;
}
+ void* AllocValgrind(size_t bytes, ArenaAllocKind kind);
void ObtainNewArenaForAllocation(size_t allocation_size);
size_t BytesAllocated() const;
void DumpMemStats(std::ostream& os) const;
@@ -132,10 +136,9 @@
uint8_t* end_;
uint8_t* ptr_;
Arena* arena_head_;
-
- // Statistics.
size_t num_allocations_;
- size_t alloc_stats_[kNumAllocKinds]; // Bytes used by various allocation kinds.
+ size_t alloc_stats_[kNumAllocKinds]; // Bytes used by various allocation kinds.
+ bool running_on_valgrind_;
DISALLOW_COPY_AND_ASSIGN(ArenaAllocator);
}; // ArenaAllocator
diff --git a/compiler/dex/arena_bit_vector.cc b/compiler/dex/arena_bit_vector.cc
index 3fa9295..b921f61 100644
--- a/compiler/dex/arena_bit_vector.cc
+++ b/compiler/dex/arena_bit_vector.cc
@@ -87,12 +87,6 @@
storage_[num >> 5] &= ~check_masks[num & 0x1f];
}
-// Copy a whole vector to the other. Sizes must match.
-void ArenaBitVector::Copy(ArenaBitVector* src) {
- DCHECK_EQ(storage_size_, src->GetStorageSize());
- memcpy(storage_, src->GetRawStorage(), sizeof(uint32_t) * storage_size_);
-}
-
// Intersect with another bit vector. Sizes and expandability must be the same.
void ArenaBitVector::Intersect(const ArenaBitVector* src) {
DCHECK_EQ(storage_size_, src->GetStorageSize());
diff --git a/compiler/dex/arena_bit_vector.h b/compiler/dex/arena_bit_vector.h
index 8bcd628..24a7ce9 100644
--- a/compiler/dex/arena_bit_vector.h
+++ b/compiler/dex/arena_bit_vector.h
@@ -44,7 +44,7 @@
DCHECK_EQ(bit_size_, p_bits_->GetStorageSize() * sizeof(uint32_t) * 8);
DCHECK_EQ(bit_storage_, p_bits_->GetRawStorage());
- if (bit_index_ >= bit_size_) return -1;
+ if (UNLIKELY(bit_index_ >= bit_size_)) return -1;
uint32_t word_index = bit_index_ / 32;
uint32_t word = bit_storage_[word_index];
@@ -54,7 +54,7 @@
bit_index_ &= ~0x1f;
do {
word_index++;
- if ((word_index * 32) >= bit_size_) {
+ if (UNLIKELY((word_index * 32) >= bit_size_)) {
bit_index_ = bit_size_;
return -1;
}
@@ -95,7 +95,9 @@
bool IsBitSet(unsigned int num);
void ClearAllBits();
void SetInitialBits(unsigned int num_bits);
- void Copy(ArenaBitVector* src);
+ void Copy(ArenaBitVector* src) {
+ memcpy(storage_, src->GetRawStorage(), sizeof(uint32_t) * storage_size_);
+ }
void Intersect(const ArenaBitVector* src2);
void Union(const ArenaBitVector* src);
// Are we equal to another bit vector? Note: expandability attributes must also match.
diff --git a/compiler/dex/dataflow_iterator-inl.h b/compiler/dex/dataflow_iterator-inl.h
index 06cc505..236c6f4 100644
--- a/compiler/dex/dataflow_iterator-inl.h
+++ b/compiler/dex/dataflow_iterator-inl.h
@@ -21,42 +21,63 @@
namespace art {
-inline BasicBlock* DataflowIterator::NextBody(bool had_change) {
- changed_ |= had_change;
+// Single forward pass over the nodes.
+inline BasicBlock* DataflowIterator::ForwardSingleNext() {
BasicBlock* res = NULL;
- if (reverse_) {
- if (is_iterative_ && changed_ && (idx_ < 0)) {
- idx_ = start_idx_;
- changed_ = false;
- }
- if (idx_ >= 0) {
- int bb_id = block_id_list_->Get(idx_--);
- res = mir_graph_->GetBasicBlock(bb_id);
- }
- } else {
- if (is_iterative_ && changed_ && (idx_ >= end_idx_)) {
- idx_ = start_idx_;
- changed_ = false;
- }
- if (idx_ < end_idx_) {
- int bb_id = block_id_list_->Get(idx_++);
- res = mir_graph_->GetBasicBlock(bb_id);
- }
+ if (idx_ < end_idx_) {
+ int bb_id = block_id_list_->Get(idx_++);
+ res = mir_graph_->GetBasicBlock(bb_id);
}
return res;
}
-// AllNodes uses the existing GrowableArray iterator, so use different NextBody().
-inline BasicBlock* AllNodesIterator::NextBody(bool had_change) {
+// Repeat full forward passes over all nodes until no change occurs during a complete pass.
+inline BasicBlock* DataflowIterator::ForwardRepeatNext(bool had_change) {
changed_ |= had_change;
BasicBlock* res = NULL;
+ if ((idx_ >= end_idx_) && changed_) {
+ idx_ = start_idx_;
+ changed_ = false;
+ }
+ if (idx_ < end_idx_) {
+ int bb_id = block_id_list_->Get(idx_++);
+ res = mir_graph_->GetBasicBlock(bb_id);
+ }
+ return res;
+}
+
+// Single reverse pass over the nodes.
+inline BasicBlock* DataflowIterator::ReverseSingleNext() {
+ BasicBlock* res = NULL;
+ if (idx_ >= 0) {
+ int bb_id = block_id_list_->Get(idx_--);
+ res = mir_graph_->GetBasicBlock(bb_id);
+ }
+ return res;
+}
+
+// Repeat full backwards passes over all nodes until no change occurs during a complete pass.
+inline BasicBlock* DataflowIterator::ReverseRepeatNext(bool had_change) {
+ changed_ |= had_change;
+ BasicBlock* res = NULL;
+ if ((idx_ < 0) && changed_) {
+ idx_ = start_idx_;
+ changed_ = false;
+ }
+ if (idx_ >= 0) {
+ int bb_id = block_id_list_->Get(idx_--);
+ res = mir_graph_->GetBasicBlock(bb_id);
+ }
+ return res;
+}
+
+// AllNodes uses the existing GrowableArray iterator, and should be considered unordered.
+inline BasicBlock* AllNodesIterator::Next() {
+ BasicBlock* res = NULL;
bool keep_looking = true;
while (keep_looking) {
res = all_nodes_iterator_->Next();
- if (is_iterative_ && changed_ && (res == NULL)) {
- all_nodes_iterator_->Reset();
- changed_ = false;
- } else if ((res == NULL) || (!res->hidden)) {
+ if ((res == NULL) || (!res->hidden)) {
keep_looking = false;
}
}
diff --git a/compiler/dex/dataflow_iterator.h b/compiler/dex/dataflow_iterator.h
index da44ffd..1dab54e 100644
--- a/compiler/dex/dataflow_iterator.h
+++ b/compiler/dex/dataflow_iterator.h
@@ -27,124 +27,130 @@
* interesting orders. Note that for efficiency, the visit orders have been pre-computed.
* The order itself will not change during the iteration. However, for some uses,
* auxiliary data associated with the basic blocks may be changed during the iteration,
- * necessitating another pass over the list.
- *
- * To support this usage, we have is_iterative_. If false, the iteration is a one-shot
- * pass through the pre-computed list using Next(). If true, the caller must tell the
- * iterator whether a change has been made that necessitates another pass. Use
- * Next(had_change) for this. The general idea is that the iterative_ use case means
- * that the iterator will keep repeating the full basic block list until a complete pass
- * is made through it with no changes. Note that calling Next(true) does not affect
- * the iteration order or short-curcuit the current pass - it simply tells the iterator
- * that once it has finished walking through the block list it should reset and do another
- * full pass through the list.
+ * necessitating another pass over the list. If this behavior is required, use the
+ * "Repeating" variant. For the repeating variant, the caller must tell the iterator
+ * whether a change has been made that necessitates another pass. Note that calling Next(true)
+ * does not affect the iteration order or short-circuit the current pass - it simply tells
+ * the iterator that once it has finished walking through the block list it should reset and
+ * do another full pass through the list.
*/
class DataflowIterator {
public:
virtual ~DataflowIterator() {}
- // Return the next BasicBlock* to visit.
- BasicBlock* Next() {
- DCHECK(!is_iterative_);
- return NextBody(false);
- }
-
- /*
- * Return the next BasicBlock* to visit, and tell the iterator whether any change
- * has occurred that requires another full pass over the block list.
- */
- BasicBlock* Next(bool had_change) {
- DCHECK(is_iterative_);
- return NextBody(had_change);
- }
-
protected:
- DataflowIterator(MIRGraph* mir_graph, bool is_iterative, int start_idx, int end_idx,
- bool reverse)
+ DataflowIterator(MIRGraph* mir_graph, int start_idx, int end_idx)
: mir_graph_(mir_graph),
- is_iterative_(is_iterative),
start_idx_(start_idx),
end_idx_(end_idx),
- reverse_(reverse),
block_id_list_(NULL),
idx_(0),
changed_(false) {}
- virtual BasicBlock* NextBody(bool had_change) ALWAYS_INLINE;
+ virtual BasicBlock* ForwardSingleNext() ALWAYS_INLINE;
+ virtual BasicBlock* ReverseSingleNext() ALWAYS_INLINE;
+ virtual BasicBlock* ForwardRepeatNext(bool had_change) ALWAYS_INLINE;
+ virtual BasicBlock* ReverseRepeatNext(bool had_change) ALWAYS_INLINE;
MIRGraph* const mir_graph_;
- const bool is_iterative_;
const int start_idx_;
const int end_idx_;
- const bool reverse_;
GrowableArray<int>* block_id_list_;
int idx_;
bool changed_;
}; // DataflowIterator
- class ReachableNodesIterator : public DataflowIterator {
- public:
- ReachableNodesIterator(MIRGraph* mir_graph, bool is_iterative)
- : DataflowIterator(mir_graph, is_iterative, 0,
- mir_graph->GetNumReachableBlocks(), false) {
- idx_ = start_idx_;
- block_id_list_ = mir_graph->GetDfsOrder();
- }
- };
-
class PreOrderDfsIterator : public DataflowIterator {
public:
- PreOrderDfsIterator(MIRGraph* mir_graph, bool is_iterative)
- : DataflowIterator(mir_graph, is_iterative, 0,
- mir_graph->GetNumReachableBlocks(), false) {
+ explicit PreOrderDfsIterator(MIRGraph* mir_graph)
+ : DataflowIterator(mir_graph, 0, mir_graph->GetNumReachableBlocks()) {
idx_ = start_idx_;
block_id_list_ = mir_graph->GetDfsOrder();
}
+
+ BasicBlock* Next() {
+ return ForwardSingleNext();
+ }
};
- class PostOrderDfsIterator : public DataflowIterator {
+ class RepeatingPreOrderDfsIterator : public DataflowIterator {
public:
- PostOrderDfsIterator(MIRGraph* mir_graph, bool is_iterative)
- : DataflowIterator(mir_graph, is_iterative, 0,
- mir_graph->GetNumReachableBlocks(), false) {
+ explicit RepeatingPreOrderDfsIterator(MIRGraph* mir_graph)
+ : DataflowIterator(mir_graph, 0, mir_graph->GetNumReachableBlocks()) {
+ idx_ = start_idx_;
+ block_id_list_ = mir_graph->GetDfsOrder();
+ }
+
+ BasicBlock* Next(bool had_change) {
+ return ForwardRepeatNext(had_change);
+ }
+ };
+
+ class RepeatingPostOrderDfsIterator : public DataflowIterator {
+ public:
+ explicit RepeatingPostOrderDfsIterator(MIRGraph* mir_graph)
+ : DataflowIterator(mir_graph, 0, mir_graph->GetNumReachableBlocks()) {
idx_ = start_idx_;
block_id_list_ = mir_graph->GetDfsPostOrder();
}
+
+ BasicBlock* Next(bool had_change) {
+ return ForwardRepeatNext(had_change);
+ }
};
class ReversePostOrderDfsIterator : public DataflowIterator {
public:
- ReversePostOrderDfsIterator(MIRGraph* mir_graph, bool is_iterative)
- : DataflowIterator(mir_graph, is_iterative,
- mir_graph->GetNumReachableBlocks() -1, 0, true) {
+ explicit ReversePostOrderDfsIterator(MIRGraph* mir_graph)
+ : DataflowIterator(mir_graph, mir_graph->GetNumReachableBlocks() -1, 0) {
idx_ = start_idx_;
block_id_list_ = mir_graph->GetDfsPostOrder();
}
+
+ BasicBlock* Next() {
+ return ReverseSingleNext();
+ }
+ };
+
+ class RepeatingReversePostOrderDfsIterator : public DataflowIterator {
+ public:
+ explicit RepeatingReversePostOrderDfsIterator(MIRGraph* mir_graph)
+ : DataflowIterator(mir_graph, mir_graph->GetNumReachableBlocks() -1, 0) {
+ idx_ = start_idx_;
+ block_id_list_ = mir_graph->GetDfsPostOrder();
+ }
+
+ BasicBlock* Next(bool had_change) {
+ return ReverseRepeatNext(had_change);
+ }
};
class PostOrderDOMIterator : public DataflowIterator {
public:
- PostOrderDOMIterator(MIRGraph* mir_graph, bool is_iterative)
- : DataflowIterator(mir_graph, is_iterative, 0,
- mir_graph->GetNumReachableBlocks(), false) {
+ explicit PostOrderDOMIterator(MIRGraph* mir_graph)
+ : DataflowIterator(mir_graph, 0, mir_graph->GetNumReachableBlocks()) {
idx_ = start_idx_;
block_id_list_ = mir_graph->GetDomPostOrder();
}
+
+ BasicBlock* Next() {
+ return ForwardSingleNext();
+ }
};
class AllNodesIterator : public DataflowIterator {
public:
- AllNodesIterator(MIRGraph* mir_graph, bool is_iterative)
- : DataflowIterator(mir_graph, is_iterative, 0, 0, false) {
- all_nodes_iterator_ =
- new (mir_graph->GetArena()) GrowableArray<BasicBlock*>::Iterator(mir_graph->GetBlockList());
+ explicit AllNodesIterator(MIRGraph* mir_graph)
+ : DataflowIterator(mir_graph, 0, 0) {
+ all_nodes_iterator_ = new
+ (mir_graph->GetArena()) GrowableArray<BasicBlock*>::Iterator(mir_graph->GetBlockList());
}
void Reset() {
all_nodes_iterator_->Reset();
}
- BasicBlock* NextBody(bool had_change) ALWAYS_INLINE;
+ BasicBlock* Next() ALWAYS_INLINE;
private:
GrowableArray<BasicBlock*>::Iterator* all_nodes_iterator_;
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index a392f82..ffd7905 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -24,6 +24,7 @@
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache.h"
+#include "thread-inl.h"
namespace art {
namespace optimizer {
@@ -216,8 +217,8 @@
uint32_t field_idx = inst->VRegC_22c();
int field_offset;
bool is_volatile;
- bool fast_path = driver_.ComputeInstanceFieldInfo(field_idx, &unit_, field_offset,
- is_volatile, is_put);
+ bool fast_path = driver_.ComputeInstanceFieldInfo(field_idx, &unit_, is_put,
+ &field_offset, &is_volatile);
if (fast_path && !is_volatile && IsUint(16, field_offset)) {
VLOG(compiler) << "Quickening " << Instruction::Name(inst->Opcode())
<< " to " << Instruction::Name(new_opcode)
@@ -246,11 +247,13 @@
int vtable_idx;
uintptr_t direct_code;
uintptr_t direct_method;
- bool fast_path = driver_.ComputeInvokeInfo(&unit_, dex_pc, invoke_type,
- target_method, vtable_idx,
- direct_code, direct_method,
- false);
// TODO: support devirtualization.
+ const bool kEnableDevirtualization = false;
+ bool fast_path = driver_.ComputeInvokeInfo(&unit_, dex_pc,
+ false, kEnableDevirtualization,
+ &invoke_type,
+ &target_method, &vtable_idx,
+ &direct_code, &direct_method);
if (fast_path && original_invoke_type == invoke_type) {
if (vtable_idx >= 0 && IsUint(16, vtable_idx)) {
VLOG(compiler) << "Quickening " << Instruction::Name(inst->Opcode())
diff --git a/compiler/dex/growable_array.h b/compiler/dex/growable_array.h
index 8e2abfb..639120a 100644
--- a/compiler/dex/growable_array.h
+++ b/compiler/dex/growable_array.h
@@ -131,6 +131,11 @@
elem_list_[index]++;
}
+ /*
+ * Remove an existing element from list. If there are more than one copy
+ * of the element, only the first one encountered will be deleted.
+ */
+ // TODO: consider renaming this.
void Delete(T element) {
bool found = false;
for (size_t i = 0; i < num_used_ - 1; i++) {
@@ -150,6 +155,11 @@
size_t Size() const { return num_used_; }
+ void SetSize(size_t new_size) {
+ Resize(new_size);
+ num_used_ = new_size;
+ }
+
T* GetRawStorage() const { return elem_list_; }
static void* operator new(size_t size, ArenaAllocator* arena) {
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc
index d7a4136..8472a3c 100644
--- a/compiler/dex/mir_analysis.cc
+++ b/compiler/dex/mir_analysis.cc
@@ -1061,7 +1061,7 @@
memset(&stats, 0, sizeof(stats));
ClearAllVisitedFlags();
- AllNodesIterator iter(this, false /* not iterative */);
+ AllNodesIterator iter(this);
for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
AnalyzeBlock(bb, &stats);
}
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
index 3a73717..3d29908 100644
--- a/compiler/dex/mir_dataflow.cc
+++ b/compiler/dex/mir_dataflow.cc
@@ -1221,10 +1221,10 @@
uint32_t current_offset = static_cast<uint32_t>(current_offset_);
bool fast_path =
cu_->compiler_driver->ComputeInvokeInfo(&m_unit, current_offset,
- type, target_method,
- vtable_idx,
- direct_code, direct_method,
- false) &&
+ false, true,
+ &type, &target_method,
+ &vtable_idx,
+ &direct_code, &direct_method) &&
!(cu_->enable_debug & (1 << kDebugSlowInvokePath));
return (((type == kDirect) || (type == kStatic)) &&
fast_path && ((direct_code == 0) || (direct_method == 0)));
@@ -1287,7 +1287,7 @@
if (cu_->disable_opt & (1 << kPromoteRegs)) {
return;
}
- AllNodesIterator iter(this, false /* not iterative */);
+ AllNodesIterator iter(this);
for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
CountUses(bb);
}
@@ -1331,7 +1331,7 @@
void MIRGraph::VerifyDataflow() {
/* Verify if all blocks are connected as claimed */
- AllNodesIterator iter(this, false /* not iterative */);
+ AllNodesIterator iter(this);
for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
VerifyPredInfo(bb);
}
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 81702e3..c72283e 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -99,6 +99,7 @@
cur_block_(NULL),
num_blocks_(0),
current_code_item_(NULL),
+ block_map_(arena, 0, kGrowableArrayMisc),
current_method_(kInvalidEntry),
current_offset_(kInvalidEntry),
def_count_(0),
@@ -210,18 +211,18 @@
BasicBlock** immed_pred_block_p) {
BasicBlock* bb;
unsigned int i;
- SafeMap<unsigned int, BasicBlock*>::iterator it;
- it = block_map_.find(code_offset);
- if (it != block_map_.end()) {
- return it->second;
- } else if (!create) {
+ if (code_offset >= cu_->code_item->insns_size_in_code_units_) {
return NULL;
}
+ bb = block_map_.Get(code_offset);
+ if ((bb != NULL) || !create) {
+ return bb;
+ }
if (split) {
- for (i = 0; i < block_list_.Size(); i++) {
- bb = block_list_.Get(i);
+ for (i = block_list_.Size(); i > 0; i--) {
+ bb = block_list_.Get(i - 1);
if (bb->block_type != kDalvikByteCode) continue;
/* Check if a branch jumps into the middle of an existing block */
if ((code_offset > bb->start_offset) && (bb->last_mir_insn != NULL) &&
@@ -518,6 +519,8 @@
// TODO: need to rework expansion of block list & try_block_addr when inlining activated.
block_list_.Resize(block_list_.Size() + current_code_item_->insns_size_in_code_units_);
+ block_map_.SetSize(block_map_.Size() + current_code_item_->insns_size_in_code_units_);
+
// TODO: replace with explicit resize routine. Using automatic extension side effect for now.
try_block_addr_->SetBit(current_code_item_->insns_size_in_code_units_);
try_block_addr_->ClearBit(current_code_item_->insns_size_in_code_units_);
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index 28ab283..0244dae 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -580,11 +580,34 @@
void SSATransformation();
void CheckForDominanceFrontier(BasicBlock* dom_bb, const BasicBlock* succ_bb);
void NullCheckElimination();
+ /*
+ * Type inference handling helpers. Because Dalvik's bytecode is not fully typed,
+ * we have to do some work to figure out the sreg type. For some operations it is
+ * clear based on the opcode (i.e. ADD_FLOAT v0, v1, v2), but for others (MOVE), we
+ * may never know the "real" type.
+ *
+ * We perform the type inference operation by using an iterative walk over
+ * the graph, propagating types "defined" by typed opcodes to uses and defs in
+ * non-typed opcodes (such as MOVE). The Setxx(index) helpers are used to set defined
+ * types on typed opcodes (such as ADD_INT). The Setxx(index, is_xx) form is used to
+ * propagate types through non-typed opcodes such as PHI and MOVE. The is_xx flag
+ * tells whether our guess of the type is based on a previously typed definition.
+ * If so, the defined type takes precedence. Note that it's possible to have the same sreg
+ * show multiple defined types because dx treats constants as untyped bit patterns.
+ * The return value of the Setxx() helpers says whether or not the Setxx() action changed
+ * the current guess, and is used to know when to terminate the iterative walk.
+ */
bool SetFp(int index, bool is_fp);
+ bool SetFp(int index);
bool SetCore(int index, bool is_core);
+ bool SetCore(int index);
bool SetRef(int index, bool is_ref);
+ bool SetRef(int index);
bool SetWide(int index, bool is_wide);
+ bool SetWide(int index);
bool SetHigh(int index, bool is_high);
+ bool SetHigh(int index);
+
void AppendMIR(BasicBlock* bb, MIR* mir);
void PrependMIR(BasicBlock* bb, MIR* mir);
void InsertMIRAfter(BasicBlock* bb, MIR* current_mir, MIR* new_mir);
@@ -705,7 +728,7 @@
BasicBlock* cur_block_;
int num_blocks_;
const DexFile::CodeItem* current_code_item_;
- SafeMap<unsigned int, BasicBlock*> block_map_; // FindBlock lookup cache.
+ GrowableArray<BasicBlock*> block_map_; // FindBlock lookup cache.
std::vector<DexCompilationUnit*> m_units_; // List of methods included in this graph
typedef std::pair<int, int> MIRLocation; // Insert point, (m_unit_ index, offset)
std::vector<MIRLocation> method_stack_; // Include stack
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index b7611f8..05e428e 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -96,7 +96,7 @@
is_constant_v_ = new (arena_) ArenaBitVector(arena_, GetNumSSARegs(), false);
constant_values_ = static_cast<int*>(arena_->Alloc(sizeof(int) * GetNumSSARegs(),
ArenaAllocator::kAllocDFInfo));
- AllNodesIterator iter(this, false /* not iterative */);
+ AllNodesIterator iter(this);
for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
DoConstantPropogation(bb);
}
@@ -762,11 +762,11 @@
void MIRGraph::NullCheckElimination() {
if (!(cu_->disable_opt & (1 << kNullCheckElimination))) {
DCHECK(temp_ssa_register_v_ != NULL);
- AllNodesIterator iter(this, false /* not iterative */);
+ AllNodesIterator iter(this);
for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
NullCheckEliminationInit(bb);
}
- PreOrderDfsIterator iter2(this, true /* iterative */);
+ RepeatingPreOrderDfsIterator iter2(this);
bool change = false;
for (BasicBlock* bb = iter2.Next(change); bb != NULL; bb = iter2.Next(change)) {
change = EliminateNullChecks(bb);
@@ -778,7 +778,7 @@
}
void MIRGraph::BasicBlockCombine() {
- PreOrderDfsIterator iter(this, false /* not iterative */);
+ PreOrderDfsIterator iter(this);
for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
CombineBlocks(bb);
}
@@ -791,7 +791,7 @@
if (cu_->enable_debug & (1 << kDebugVerifyDataflow)) {
VerifyDataflow();
}
- AllNodesIterator iter(this, false /* not iterative */);
+ AllNodesIterator iter(this);
for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
LayoutBlocks(bb);
}
@@ -804,7 +804,7 @@
Checkstats* stats =
static_cast<Checkstats*>(arena_->Alloc(sizeof(Checkstats), ArenaAllocator::kAllocDFInfo));
checkstats_ = stats;
- AllNodesIterator iter(this, false /* not iterative */);
+ AllNodesIterator iter(this);
for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
CountChecks(bb);
}
@@ -858,7 +858,7 @@
if (!(cu_->disable_opt & (1 << kBBOpt))) {
DCHECK_EQ(cu_->num_compiler_temps, 0);
ClearAllVisitedFlags();
- PreOrderDfsIterator iter2(this, false /* not iterative */);
+ PreOrderDfsIterator iter2(this);
for (BasicBlock* bb = iter2.Next(); bb != NULL; bb = iter2.Next()) {
BuildExtendedBBList(bb);
}
diff --git a/compiler/dex/portable/mir_to_gbc.cc b/compiler/dex/portable/mir_to_gbc.cc
index 90cec75..df10f7e 100644
--- a/compiler/dex/portable/mir_to_gbc.cc
+++ b/compiler/dex/portable/mir_to_gbc.cc
@@ -30,10 +30,10 @@
#include "dex/compiler_internals.h"
#include "dex/dataflow_iterator-inl.h"
#include "dex/frontend.h"
-#include "mir_to_gbc.h"
-
#include "llvm/llvm_compilation_unit.h"
#include "llvm/utils_llvm.h"
+#include "mir_to_gbc.h"
+#include "thread-inl.h"
const char* kLabelFormat = "%c0x%x_%d";
const char kInvalidBlock = 0xff;
@@ -1877,7 +1877,7 @@
CreateFunction();
// Create an LLVM basic block for each MIR block in dfs preorder
- PreOrderDfsIterator iter(mir_graph_, false /* not iterative */);
+ PreOrderDfsIterator iter(mir_graph_);
for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
CreateLLVMBasicBlock(bb);
}
@@ -1909,7 +1909,7 @@
}
}
- PreOrderDfsIterator iter2(mir_graph_, false /* not iterative */);
+ PreOrderDfsIterator iter2(mir_graph_);
for (BasicBlock* bb = iter2.Next(); bb != NULL; bb = iter2.Next()) {
BlockBitcodeConversion(bb);
}
@@ -1972,7 +1972,7 @@
::llvm::OwningPtr< ::llvm::tool_output_file> out_file(
new ::llvm::tool_output_file(fname.c_str(), errmsg,
- ::llvm::sys::fs::F_Binary));
+ ::llvm::raw_fd_ostream::F_Binary));
if (!errmsg.empty()) {
LOG(ERROR) << "Failed to create bitcode output file: " << errmsg;
diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc
index 0649c9f..2d69d93 100644
--- a/compiler/dex/quick/arm/assemble_arm.cc
+++ b/compiler/dex/quick/arm/assemble_arm.cc
@@ -1122,6 +1122,12 @@
lir->operands[1] = 0;
lir->target = 0;
SetupResourceMasks(lir);
+ /*
+ * Because we just added this new instruction after the current one,
+ * advance lir so that this new instruction won't be checked for displacement
+ * overflow until the next pass (when its base offset will be properly established).
+ */
+ lir = new_inst;
res = kRetryAll;
} else {
lir->operands[1] = delta >> 1;
@@ -1170,7 +1176,7 @@
lir->operands[0] = delta >> 1;
if (!(cu_->disable_opt & (1 << kSafeOptimizations)) &&
lir->operands[0] == 0) { // Useless branch
- lir->flags.is_nop = true;
+ NopLIR(lir);
res = kRetryAll;
}
} else if (lir->opcode == kThumbBUncond) {
@@ -1188,7 +1194,7 @@
lir->operands[0] = delta >> 1;
if (!(cu_->disable_opt & (1 << kSafeOptimizations)) &&
lir->operands[0] == -1) { // Useless branch
- lir->flags.is_nop = true;
+ NopLIR(lir);
res = kRetryAll;
}
}
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 2dbe5f5..bba2ec5 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -120,9 +120,10 @@
// TODO: move to common code
void ArmMir2Lir::GenPrintLabel(MIR* mir) {
/* Mark the beginning of a Dalvik instruction for line tracking */
- char* inst_str = cu_->verbose ?
- mir_graph_->GetDalvikDisassembly(mir) : NULL;
- MarkBoundary(mir->offset, inst_str);
+ if (cu_->verbose) {
+ char* inst_str = mir_graph_->GetDalvikDisassembly(mir);
+ MarkBoundary(mir->offset, inst_str);
+ }
}
MIR* ArmMir2Lir::SpecialIGet(BasicBlock** bb, MIR* mir,
@@ -130,7 +131,7 @@
int field_offset;
bool is_volatile;
uint32_t field_idx = mir->dalvikInsn.vC;
- bool fast_path = FastInstance(field_idx, field_offset, is_volatile, false);
+ bool fast_path = FastInstance(field_idx, false, &field_offset, &is_volatile);
if (!fast_path || !(mir->optimization_flags & MIR_IGNORE_NULL_CHECK)) {
return NULL;
}
@@ -155,7 +156,7 @@
int field_offset;
bool is_volatile;
uint32_t field_idx = mir->dalvikInsn.vC;
- bool fast_path = FastInstance(field_idx, field_offset, is_volatile, false);
+ bool fast_path = FastInstance(field_idx, false, &field_offset, &is_volatile);
if (!fast_path || !(mir->optimization_flags & MIR_IGNORE_NULL_CHECK)) {
return NULL;
}
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index 291319f..1954fba 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -51,7 +51,6 @@
int AllocTypedTempPair(bool fp_hint, int reg_class);
int S2d(int low_reg, int high_reg);
int TargetReg(SpecialTargetRegister reg);
- RegisterInfo* GetRegInfo(int reg);
RegLocation GetReturnAlt();
RegLocation GetReturnWideAlt();
RegLocation LocCReturn();
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index 6fbdd2f..07782d9 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -234,11 +234,17 @@
rl_false = LoadValue(rl_false, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
OpRegImm(kOpCmp, rl_src.low_reg, 0);
- OpIT(kCondEq, "E");
- LIR* l1 = OpRegCopy(rl_result.low_reg, rl_true.low_reg);
- l1->flags.is_nop = false; // Make sure this instruction isn't optimized away
- LIR* l2 = OpRegCopy(rl_result.low_reg, rl_false.low_reg);
- l2->flags.is_nop = false; // Make sure this instruction isn't optimized away
+ if (rl_result.low_reg == rl_true.low_reg) { // Is the "true" case already in place?
+ OpIT(kCondNe, "");
+ OpRegCopy(rl_result.low_reg, rl_false.low_reg);
+ } else if (rl_result.low_reg == rl_false.low_reg) { // False case in place?
+ OpIT(kCondEq, "");
+ OpRegCopy(rl_result.low_reg, rl_true.low_reg);
+ } else { // Normal - select between the two.
+ OpIT(kCondEq, "E");
+ OpRegCopy(rl_result.low_reg, rl_true.low_reg);
+ OpRegCopy(rl_result.low_reg, rl_false.low_reg);
+ }
GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
}
StoreValue(rl_dest, rl_result);
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
index 6cc3052..203a8cc 100644
--- a/compiler/dex/quick/arm/target_arm.cc
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -691,11 +691,6 @@
return res;
}
-ArmMir2Lir::RegisterInfo* ArmMir2Lir::GetRegInfo(int reg) {
- return ARM_FPREG(reg) ? ®_pool_->FPRegs[reg & ARM_FP_REG_MASK]
- : ®_pool_->core_regs[reg];
-}
-
/* To be used when explicitly managing register use */
void ArmMir2Lir::LockCallTemps() {
LockTemp(r0);
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index e081c16..f13ab2d 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -50,14 +50,37 @@
DCHECK_EQ(safepoint_pc->def_mask, ENCODE_ALL);
}
-bool Mir2Lir::FastInstance(uint32_t field_idx, int& field_offset, bool& is_volatile, bool is_put) {
+bool Mir2Lir::FastInstance(uint32_t field_idx, bool is_put, int* field_offset, bool* is_volatile) {
return cu_->compiler_driver->ComputeInstanceFieldInfo(
- field_idx, mir_graph_->GetCurrentDexCompilationUnit(), field_offset, is_volatile, is_put);
+ field_idx, mir_graph_->GetCurrentDexCompilationUnit(), is_put, field_offset, is_volatile);
+}
+
+/* Remove a LIR from the list. */
+void Mir2Lir::UnlinkLIR(LIR* lir) {
+ if (UNLIKELY(lir == first_lir_insn_)) {
+ first_lir_insn_ = lir->next;
+ if (lir->next != NULL) {
+ lir->next->prev = NULL;
+ } else {
+ DCHECK(lir->next == NULL);
+ DCHECK(lir == last_lir_insn_);
+ last_lir_insn_ = NULL;
+ }
+ } else if (lir == last_lir_insn_) {
+ last_lir_insn_ = lir->prev;
+ lir->prev->next = NULL;
+ } else if ((lir->prev != NULL) && (lir->next != NULL)) {
+ lir->prev->next = lir->next;
+ lir->next->prev = lir->prev;
+ }
}
/* Convert an instruction to a NOP */
void Mir2Lir::NopLIR(LIR* lir) {
lir->flags.is_nop = true;
+ if (!cu_->verbose) {
+ UnlinkLIR(lir);
+ }
}
void Mir2Lir::SetMemRefType(LIR* lir, bool is_load, int mem_type) {
@@ -696,11 +719,11 @@
for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
lir->offset = offset;
- if (lir->opcode >= 0) {
+ if (LIKELY(lir->opcode >= 0)) {
if (!lir->flags.is_nop) {
offset += lir->flags.size;
}
- } else if (lir->opcode == kPseudoPseudoAlign4) {
+ } else if (UNLIKELY(lir->opcode == kPseudoPseudoAlign4)) {
if (offset & 0x2) {
offset += 2;
lir->operands[0] = 1;
@@ -710,7 +733,6 @@
}
/* Pseudo opcodes don't consume space */
}
-
return offset;
}
@@ -783,21 +805,17 @@
/*
* Insert a kPseudoCaseLabel at the beginning of the Dalvik
* offset vaddr. This label will be used to fix up the case
- * branch table during the assembly phase. Be sure to set
- * all resource flags on this to prevent code motion across
- * target boundaries. KeyVal is just there for debugging.
+ * branch table during the assembly phase. All resource flags
+ * are set to prevent code motion. KeyVal is just there for debugging.
*/
LIR* Mir2Lir::InsertCaseLabel(int vaddr, int keyVal) {
- SafeMap<unsigned int, LIR*>::iterator it;
- it = boundary_map_.find(vaddr);
- if (it == boundary_map_.end()) {
- LOG(FATAL) << "Error: didn't find vaddr 0x" << std::hex << vaddr;
- }
+ LIR* boundary_lir = &block_label_list_[mir_graph_->FindBlock(vaddr)->id];
LIR* new_label = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), ArenaAllocator::kAllocLIR));
new_label->dalvik_offset = vaddr;
new_label->opcode = kPseudoCaseLabel;
new_label->operands[0] = keyVal;
- InsertLIRAfter(it->second, new_label);
+ new_label->def_mask = ENCODE_ALL;
+ InsertLIRAfter(boundary_lir, new_label);
return new_label;
}
@@ -881,18 +899,9 @@
}
}
-/*
- * Set up special LIR to mark a Dalvik byte-code instruction start and
- * record it in the boundary_map. NOTE: in cases such as kMirOpCheck in
- * which we split a single Dalvik instruction, only the first MIR op
- * associated with a Dalvik PC should be entered into the map.
- */
-LIR* Mir2Lir::MarkBoundary(int offset, const char* inst_str) {
- LIR* res = NewLIR1(kPseudoDalvikByteCodeBoundary, reinterpret_cast<uintptr_t>(inst_str));
- if (boundary_map_.find(offset) == boundary_map_.end()) {
- boundary_map_.Put(offset, res);
- }
- return res;
+/* Set up special LIR to mark a Dalvik byte-code instruction start for pretty printing */
+void Mir2Lir::MarkBoundary(int offset, const char* inst_str) {
+ NewLIR1(kPseudoDalvikByteCodeBoundary, reinterpret_cast<uintptr_t>(inst_str));
}
bool Mir2Lir::EvaluateBranch(Instruction::Code opcode, int32_t src1, int32_t src2) {
@@ -947,6 +956,8 @@
throw_launchpads_(arena, 2048, kGrowableArrayThrowLaunchPads),
suspend_launchpads_(arena, 4, kGrowableArraySuspendLaunchPads),
intrinsic_launchpads_(arena, 2048, kGrowableArrayMisc),
+ tempreg_info_(arena, 20, kGrowableArrayMisc),
+ reginfo_map_(arena, 64, kGrowableArrayMisc),
data_offset_(0),
total_size_(0),
block_label_list_(NULL),
@@ -1089,5 +1100,4 @@
new_lir->next->prev = new_lir;
}
-
} // namespace art
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index f018c61..4dd55d7 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -127,13 +127,11 @@
InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src2))) {
// OK - convert this to a compare immediate and branch
OpCmpImmBranch(cond, rl_src1.low_reg, mir_graph_->ConstantValue(rl_src2), taken);
- OpUnconditionalBranch(fall_through);
return;
}
}
rl_src2 = LoadValue(rl_src2, kCoreReg);
OpCmpBranch(cond, rl_src1.low_reg, rl_src2.low_reg, taken);
- OpUnconditionalBranch(fall_through);
}
void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken,
@@ -164,7 +162,6 @@
LOG(FATAL) << "Unexpected opcode " << opcode;
}
OpCmpImmBranch(cond, rl_src.low_reg, 0, taken);
- OpUnconditionalBranch(fall_through);
}
void Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
@@ -337,8 +334,8 @@
bool is_volatile;
bool is_referrers_class;
bool fast_path = cu_->compiler_driver->ComputeStaticFieldInfo(
- field_idx, mir_graph_->GetCurrentDexCompilationUnit(), field_offset, ssb_index,
- is_referrers_class, is_volatile, true);
+ field_idx, mir_graph_->GetCurrentDexCompilationUnit(), true,
+ &field_offset, &ssb_index, &is_referrers_class, &is_volatile);
if (fast_path && !SLOW_FIELD_PATH) {
DCHECK_GE(field_offset, 0);
int rBase;
@@ -423,8 +420,8 @@
bool is_volatile;
bool is_referrers_class;
bool fast_path = cu_->compiler_driver->ComputeStaticFieldInfo(
- field_idx, mir_graph_->GetCurrentDexCompilationUnit(), field_offset, ssb_index,
- is_referrers_class, is_volatile, false);
+ field_idx, mir_graph_->GetCurrentDexCompilationUnit(), false,
+ &field_offset, &ssb_index, &is_referrers_class, &is_volatile);
if (fast_path && !SLOW_FIELD_PATH) {
DCHECK_GE(field_offset, 0);
int rBase;
@@ -626,7 +623,7 @@
int field_offset;
bool is_volatile;
- bool fast_path = FastInstance(field_idx, field_offset, is_volatile, false);
+ bool fast_path = FastInstance(field_idx, false, &field_offset, &is_volatile);
if (fast_path && !SLOW_FIELD_PATH) {
RegLocation rl_result;
@@ -687,8 +684,7 @@
int field_offset;
bool is_volatile;
- bool fast_path = FastInstance(field_idx, field_offset, is_volatile,
- true);
+ bool fast_path = FastInstance(field_idx, true, &field_offset, &is_volatile);
if (fast_path && !SLOW_FIELD_PATH) {
RegisterClass reg_class = oat_reg_class_by_size(size);
DCHECK_GE(field_offset, 0);
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 2a0a23c..72ae91e 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -1373,10 +1373,10 @@
bool fast_path =
cu_->compiler_driver->ComputeInvokeInfo(mir_graph_->GetCurrentDexCompilationUnit(),
current_dalvik_offset_,
- info->type, target_method,
- vtable_idx,
- direct_code, direct_method,
- true) && !SLOW_INVOKE_PATH;
+ true, true,
+ &info->type, &target_method,
+ &vtable_idx,
+ &direct_code, &direct_method) && !SLOW_INVOKE_PATH;
if (info->type == kInterface) {
if (fast_path) {
p_null_ck = &null_ck;
diff --git a/compiler/dex/quick/local_optimizations.cc b/compiler/dex/quick/local_optimizations.cc
index 630e990..cb7694d 100644
--- a/compiler/dex/quick/local_optimizations.cc
+++ b/compiler/dex/quick/local_optimizations.cc
@@ -99,12 +99,11 @@
int native_reg_id;
if (cu_->instruction_set == kX86) {
// If x86, location differs depending on whether memory/reg operation.
- native_reg_id = (GetTargetInstFlags(this_lir->opcode) & IS_STORE) ? this_lir->operands[2]
- : this_lir->operands[0];
+ native_reg_id = (target_flags & IS_STORE) ? this_lir->operands[2] : this_lir->operands[0];
} else {
native_reg_id = this_lir->operands[0];
}
- bool is_this_lir_load = GetTargetInstFlags(this_lir->opcode) & IS_LOAD;
+ bool is_this_lir_load = target_flags & IS_LOAD;
LIR* check_lir;
/* Use the mem mask to determine the rough memory location */
uint64_t this_mem_mask = (this_lir->use_mask | this_lir->def_mask) & ENCODE_MEM;
@@ -169,7 +168,7 @@
if (check_lir->operands[0] != native_reg_id) {
ConvertMemOpIntoMove(check_lir, check_lir->operands[0], native_reg_id);
}
- check_lir->flags.is_nop = true;
+ NopLIR(check_lir);
}
} else if (alias_condition == ENCODE_DALVIK_REG) {
/* Must alias */
@@ -188,7 +187,7 @@
native_reg_id) {
ConvertMemOpIntoMove(check_lir, check_lir->operands[0], native_reg_id);
}
- check_lir->flags.is_nop = true;
+ NopLIR(check_lir);
} else {
/*
* Destinaions are of different types -
@@ -202,7 +201,7 @@
stop_here = true;
} else if (!is_this_lir_load && !is_check_lir_load) {
/* WAW - nuke the earlier store */
- this_lir->flags.is_nop = true;
+ NopLIR(this_lir);
stop_here = true;
}
/* Partial overlap */
@@ -257,7 +256,7 @@
* top-down order.
*/
InsertLIRBefore(check_lir, new_store_lir);
- this_lir->flags.is_nop = true;
+ NopLIR(this_lir);
}
break;
} else if (!check_lir->flags.is_nop) {
@@ -453,7 +452,7 @@
* is never the first LIR on the list
*/
InsertLIRBefore(cur_lir, new_load_lir);
- this_lir->flags.is_nop = true;
+ NopLIR(this_lir);
}
}
}
@@ -468,41 +467,4 @@
}
}
-/*
- * Nop any unconditional branches that go to the next instruction.
- * Note: new redundant branches may be inserted later, and we'll
- * use a check in final instruction assembly to nop those out.
- */
-void Mir2Lir::RemoveRedundantBranches() {
- LIR* this_lir;
-
- for (this_lir = first_lir_insn_; this_lir != last_lir_insn_; this_lir = NEXT_LIR(this_lir)) {
- /* Branch to the next instruction */
- if (IsUnconditionalBranch(this_lir)) {
- LIR* next_lir = this_lir;
-
- while (true) {
- next_lir = NEXT_LIR(next_lir);
-
- /*
- * Is the branch target the next instruction?
- */
- if (next_lir == this_lir->target) {
- this_lir->flags.is_nop = true;
- break;
- }
-
- /*
- * Found real useful stuff between the branch and the target.
- * Need to explicitly check the last_lir_insn_ here because it
- * might be the last real instruction.
- */
- if (!is_pseudo_opcode(next_lir->opcode) ||
- (next_lir == last_lir_insn_))
- break;
- }
- }
- }
-}
-
} // namespace art
diff --git a/compiler/dex/quick/mips/assemble_mips.cc b/compiler/dex/quick/mips/assemble_mips.cc
index cd25232..dbd668b 100644
--- a/compiler/dex/quick/mips/assemble_mips.cc
+++ b/compiler/dex/quick/mips/assemble_mips.cc
@@ -503,7 +503,7 @@
if (!unconditional) {
InsertLIRBefore(lir, hop_target);
}
- lir->flags.is_nop = true;
+ NopLIR(lir);
}
/*
@@ -561,7 +561,7 @@
RawLIR(lir->dalvik_offset, kMipsAddu,
lir->operands[0], lir->operands[0], r_RA);
InsertLIRBefore(lir, new_addu);
- lir->flags.is_nop = true;
+ NopLIR(lir);
res = kRetryAll;
}
} else if (lir->opcode == kMipsDeltaLo) {
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index b9cb720..8d0b347 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -52,7 +52,6 @@
int AllocTypedTempPair(bool fp_hint, int reg_class);
int S2d(int low_reg, int high_reg);
int TargetReg(SpecialTargetRegister reg);
- RegisterInfo* GetRegInfo(int reg);
RegLocation GetReturnAlt();
RegLocation GetReturnWideAlt();
RegLocation LocCReturn();
diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc
index 4ee5b23..8e768dc 100644
--- a/compiler/dex/quick/mips/target_mips.cc
+++ b/compiler/dex/quick/mips/target_mips.cc
@@ -399,11 +399,6 @@
return res;
}
-MipsMir2Lir::RegisterInfo* MipsMir2Lir::GetRegInfo(int reg) {
- return MIPS_FPREG(reg) ? ®_pool_->FPRegs[reg & MIPS_FP_REG_MASK]
- : ®_pool_->core_regs[reg];
-}
-
/* To be used when explicitly managing register use */
void MipsMir2Lir::LockCallTemps() {
LockTemp(rMIPS_ARG0);
diff --git a/compiler/dex/quick/mir_to_lir-inl.h b/compiler/dex/quick/mir_to_lir-inl.h
index 440df2a..0ca8d8d 100644
--- a/compiler/dex/quick/mir_to_lir-inl.h
+++ b/compiler/dex/quick/mir_to_lir-inl.h
@@ -33,7 +33,12 @@
p->def_end = NULL;
if (p->pair) {
p->pair = false;
- Clobber(p->partner);
+ p = GetRegInfo(p->partner);
+ p->pair = false;
+ p->live = false;
+ p->s_reg = INVALID_SREG;
+ p->def_start = NULL;
+ p->def_end = NULL;
}
}
}
@@ -196,6 +201,11 @@
SetupTargetResourceMasks(lir);
}
+inline art::Mir2Lir::RegisterInfo* Mir2Lir::GetRegInfo(int reg) {
+ DCHECK(reginfo_map_.Get(reg) != NULL);
+ return reginfo_map_.Get(reg);
+}
+
} // namespace art
#endif // ART_COMPILER_DEX_QUICK_MIR_TO_LIR_INL_H_
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index c41feb1..6f39869 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -18,6 +18,7 @@
#include "dex/dataflow_iterator-inl.h"
#include "mir_to_lir-inl.h"
#include "object_utils.h"
+#include "thread-inl.h"
namespace art {
@@ -706,16 +707,15 @@
}
// Free temp registers and reset redundant store tracking.
- ResetRegPool();
- ResetDefTracking();
-
ClobberAllRegs();
if (bb->block_type == kEntryBlock) {
+ ResetRegPool();
int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
GenEntrySequence(&mir_graph_->reg_location_[start_vreg],
mir_graph_->reg_location_[mir_graph_->GetMethodSReg()]);
} else if (bb->block_type == kExitBlock) {
+ ResetRegPool();
GenExitSequence();
}
@@ -736,16 +736,16 @@
current_dalvik_offset_ = mir->offset;
int opcode = mir->dalvikInsn.opcode;
- LIR* boundary_lir;
// Mark the beginning of a Dalvik instruction for line tracking.
- char* inst_str = cu_->verbose ?
- mir_graph_->GetDalvikDisassembly(mir) : NULL;
- boundary_lir = MarkBoundary(mir->offset, inst_str);
+ if (cu_->verbose) {
+ char* inst_str = mir_graph_->GetDalvikDisassembly(mir);
+ MarkBoundary(mir->offset, inst_str);
+ }
// Remember the first LIR for this block.
if (head_lir == NULL) {
- head_lir = boundary_lir;
- // Set the first boundary_lir as a scheduling barrier.
+ head_lir = &block_label_list_[bb->id];
+ // Set the first label as a scheduling barrier.
head_lir->def_mask = ENCODE_ALL;
}
@@ -771,11 +771,6 @@
if (head_lir) {
// Eliminate redundant loads/stores and delay stores into later slots.
ApplyLocalOptimizations(head_lir, last_lir_insn_);
-
- // Generate an unconditional branch to the fallthrough block.
- if (bb->fall_through) {
- OpUnconditionalBranch(&block_label_list_[bb->fall_through->id]);
- }
}
return false;
}
@@ -815,9 +810,19 @@
static_cast<LIR*>(arena_->Alloc(sizeof(LIR) * mir_graph_->GetNumBlocks(),
ArenaAllocator::kAllocLIR));
- PreOrderDfsIterator iter(mir_graph_, false /* not iterative */);
- for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
- MethodBlockCodeGen(bb);
+ PreOrderDfsIterator iter(mir_graph_);
+ BasicBlock* curr_bb = iter.Next();
+ BasicBlock* next_bb = iter.Next();
+ while (curr_bb != NULL) {
+ MethodBlockCodeGen(curr_bb);
+ // If the fall_through block is no longer laid out consecutively, drop in a branch.
+ if ((curr_bb->fall_through != NULL) && (curr_bb->fall_through != next_bb)) {
+ OpUnconditionalBranch(&block_label_list_[curr_bb->fall_through->id]);
+ }
+ curr_bb = next_bb;
+ do {
+ next_bb = iter.Next();
+ } while ((next_bb != NULL) && (next_bb->block_type == kDead));
}
HandleSuspendLaunchPads();
@@ -825,10 +830,6 @@
HandleThrowLaunchPads();
HandleIntrinsicLaunchPads();
-
- if (!(cu_->disable_opt & (1 << kSafeOptimizations))) {
- RemoveRedundantBranches();
- }
}
} // namespace art
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index a37ebd1..401e3d5 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -158,6 +158,10 @@
#define ENCODE_ALL (~0ULL)
#define ENCODE_MEM (ENCODE_DALVIK_REG | ENCODE_LITERAL | \
ENCODE_HEAP_REF | ENCODE_MUST_NOT_ALIAS)
+
+// Mask to denote sreg as the start of a double. Must not interfere with low 16 bits.
+#define STARTING_DOUBLE_SREG 0x10000
+
// TODO: replace these macros
#define SLOW_FIELD_PATH (cu_->enable_debug & (1 << kDebugSlowFieldPath))
#define SLOW_INVOKE_PATH (cu_->enable_debug & (1 << kDebugSlowInvokePath))
@@ -187,7 +191,6 @@
struct RefCounts {
int count;
int s_reg;
- bool double_start; // Starting v_reg for a double
};
/*
@@ -250,7 +253,7 @@
virtual void Materialize();
virtual CompiledMethod* GetCompiledMethod();
void MarkSafepointPC(LIR* inst);
- bool FastInstance(uint32_t field_idx, int& field_offset, bool& is_volatile, bool is_put);
+ bool FastInstance(uint32_t field_idx, bool is_put, int* field_offset, bool* is_volatile);
void SetupResourceMasks(LIR* lir);
void AssembleLIR();
void SetMemRefType(LIR* lir, bool is_load, int mem_type);
@@ -274,8 +277,9 @@
void ProcessSwitchTables();
void DumpSparseSwitchTable(const uint16_t* table);
void DumpPackedSwitchTable(const uint16_t* table);
- LIR* MarkBoundary(int offset, const char* inst_str);
+ void MarkBoundary(int offset, const char* inst_str);
void NopLIR(LIR* lir);
+ void UnlinkLIR(LIR* lir);
bool EvaluateBranch(Instruction::Code opcode, int src1, int src2);
bool IsInexpensiveConstant(RegLocation rl_src);
ConditionCode FlipComparisonOrder(ConditionCode before);
@@ -302,7 +306,6 @@
void ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir);
void ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir);
void ApplyLocalOptimizations(LIR* head_lir, LIR* tail_lir);
- void RemoveRedundantBranches();
// Shared by all targets - implemented in ralloc_util.cc
int GetSRegHi(int lowSreg);
@@ -324,11 +327,9 @@
void RecordCorePromotion(int reg, int s_reg);
int AllocPreservedCoreReg(int s_reg);
void RecordFpPromotion(int reg, int s_reg);
- int AllocPreservedSingle(int s_reg, bool even);
+ int AllocPreservedSingle(int s_reg);
int AllocPreservedDouble(int s_reg);
- int AllocPreservedFPReg(int s_reg, bool double_start);
- int AllocTempBody(RegisterInfo* p, int num_regs, int* next_temp,
- bool required);
+ int AllocTempBody(RegisterInfo* p, int num_regs, int* next_temp, bool required);
int AllocTempDouble();
int AllocFreeTemp();
int AllocTemp();
@@ -367,13 +368,14 @@
RegLocation UpdateRawLoc(RegLocation loc);
RegLocation EvalLocWide(RegLocation loc, int reg_class, bool update);
RegLocation EvalLoc(RegLocation loc, int reg_class, bool update);
- void CountRefs(RefCounts* core_counts, RefCounts* fp_counts);
+ void CountRefs(RefCounts* core_counts, RefCounts* fp_counts, size_t num_regs);
void DumpCounts(const RefCounts* arr, int size, const char* msg);
void DoPromotion();
int VRegOffset(int v_reg);
int SRegOffset(int s_reg);
RegLocation GetReturnWide(bool is_double);
RegLocation GetReturn(bool is_float);
+ RegisterInfo* GetRegInfo(int reg);
// Shared by all targets - implemented in gen_common.cc.
bool HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div,
@@ -550,7 +552,6 @@
virtual int AllocTypedTempPair(bool fp_hint, int reg_class) = 0;
virtual int S2d(int low_reg, int high_reg) = 0;
virtual int TargetReg(SpecialTargetRegister reg) = 0;
- virtual RegisterInfo* GetRegInfo(int reg) = 0;
virtual RegLocation GetReturnAlt() = 0;
virtual RegLocation GetReturnWideAlt() = 0;
virtual RegLocation LocCReturn() = 0;
@@ -727,7 +728,8 @@
GrowableArray<LIR*> throw_launchpads_;
GrowableArray<LIR*> suspend_launchpads_;
GrowableArray<LIR*> intrinsic_launchpads_;
- SafeMap<unsigned int, LIR*> boundary_map_; // boundary lookup cache.
+ GrowableArray<RegisterInfo*> tempreg_info_;
+ GrowableArray<RegisterInfo*> reginfo_map_;
/*
* Holds mapping from native PC to dex PC for safepoints where we may deoptimize.
* Native PC is on the return address of the safepointed operation. Dex PC is for
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index 71b74a4..7927ff9 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -28,13 +28,9 @@
* live until it is either explicitly killed or reallocated.
*/
void Mir2Lir::ResetRegPool() {
- for (int i = 0; i < reg_pool_->num_core_regs; i++) {
- if (reg_pool_->core_regs[i].is_temp)
- reg_pool_->core_regs[i].in_use = false;
- }
- for (int i = 0; i < reg_pool_->num_fp_regs; i++) {
- if (reg_pool_->FPRegs[i].is_temp)
- reg_pool_->FPRegs[i].in_use = false;
+ GrowableArray<RegisterInfo*>::Iterator iter(&tempreg_info_);
+ for (RegisterInfo* info = iter.Next(); info != NULL; info = iter.Next()) {
+ info->in_use = false;
}
// Reset temp tracking sanity check.
if (kIsDebugBuild) {
@@ -48,13 +44,21 @@
*/
void Mir2Lir::CompilerInitPool(RegisterInfo* regs, int* reg_nums, int num) {
for (int i = 0; i < num; i++) {
- regs[i].reg = reg_nums[i];
+ uint32_t reg_number = reg_nums[i];
+ regs[i].reg = reg_number;
regs[i].in_use = false;
regs[i].is_temp = false;
regs[i].pair = false;
regs[i].live = false;
regs[i].dirty = false;
regs[i].s_reg = INVALID_SREG;
+ size_t map_size = reginfo_map_.Size();
+ if (reg_number >= map_size) {
+ for (uint32_t i = 0; i < ((reg_number - map_size) + 1); i++) {
+ reginfo_map_.Insert(NULL);
+ }
+ }
+ reginfo_map_.Put(reg_number, ®s[i]);
}
}
@@ -170,17 +174,12 @@
promotion_map_[p_map_idx].FpReg = reg;
}
-/*
- * Reserve a callee-save fp single register. Try to fullfill request for
- * even/odd allocation, but go ahead and allocate anything if not
- * available. If nothing's available, return -1.
- */
-int Mir2Lir::AllocPreservedSingle(int s_reg, bool even) {
- int res = -1;
+// Reserve a callee-save fp single register.
+int Mir2Lir::AllocPreservedSingle(int s_reg) {
+ int res = -1; // Return code if none available.
RegisterInfo* FPRegs = reg_pool_->FPRegs;
for (int i = 0; i < reg_pool_->num_fp_regs; i++) {
- if (!FPRegs[i].is_temp && !FPRegs[i].in_use &&
- ((FPRegs[i].reg & 0x1) == 0) == even) {
+ if (!FPRegs[i].is_temp && !FPRegs[i].in_use) {
res = FPRegs[i].reg;
RecordFpPromotion(res, s_reg);
break;
@@ -246,26 +245,6 @@
return res;
}
-
-/*
- * Reserve a callee-save fp register. If this register can be used
- * as the first of a double, attempt to allocate an even pair of fp
- * single regs (but if can't still attempt to allocate a single, preferring
- * first to allocate an odd register.
- */
-int Mir2Lir::AllocPreservedFPReg(int s_reg, bool double_start) {
- int res = -1;
- if (double_start) {
- res = AllocPreservedDouble(s_reg);
- }
- if (res == -1) {
- res = AllocPreservedSingle(s_reg, false /* try odd # */);
- }
- if (res == -1)
- res = AllocPreservedSingle(s_reg, true /* try even # */);
- return res;
-}
-
int Mir2Lir::AllocTempBody(RegisterInfo* p, int num_regs, int* next_temp,
bool required) {
int next = *next_temp;
@@ -379,7 +358,7 @@
if (s_reg == -1)
return NULL;
for (int i = 0; i < num_regs; i++) {
- if (p[i].live && (p[i].s_reg == s_reg)) {
+ if ((p[i].s_reg == s_reg) && p[i].live) {
if (p[i].is_temp)
p[i].in_use = true;
return &p[i];
@@ -412,47 +391,16 @@
}
void Mir2Lir::FreeTemp(int reg) {
- RegisterInfo* p = reg_pool_->core_regs;
- int num_regs = reg_pool_->num_core_regs;
- for (int i = 0; i< num_regs; i++) {
- if (p[i].reg == reg) {
- if (p[i].is_temp) {
- p[i].in_use = false;
- }
- p[i].pair = false;
- return;
- }
+ RegisterInfo* p = GetRegInfo(reg);
+ if (p->is_temp) {
+ p->in_use = false;
}
- p = reg_pool_->FPRegs;
- num_regs = reg_pool_->num_fp_regs;
- for (int i = 0; i< num_regs; i++) {
- if (p[i].reg == reg) {
- if (p[i].is_temp) {
- p[i].in_use = false;
- }
- p[i].pair = false;
- return;
- }
- }
- LOG(FATAL) << "Tried to free a non-existant temp: r" << reg;
+ p->pair = false;
}
Mir2Lir::RegisterInfo* Mir2Lir::IsLive(int reg) {
- RegisterInfo* p = reg_pool_->core_regs;
- int num_regs = reg_pool_->num_core_regs;
- for (int i = 0; i< num_regs; i++) {
- if (p[i].reg == reg) {
- return p[i].live ? &p[i] : NULL;
- }
- }
- p = reg_pool_->FPRegs;
- num_regs = reg_pool_->num_fp_regs;
- for (int i = 0; i< num_regs; i++) {
- if (p[i].reg == reg) {
- return p[i].live ? &p[i] : NULL;
- }
- }
- return NULL;
+ RegisterInfo* p = GetRegInfo(reg);
+ return p->live ? p : NULL;
}
Mir2Lir::RegisterInfo* Mir2Lir::IsTemp(int reg) {
@@ -476,27 +424,10 @@
* allocated. Use with caution.
*/
void Mir2Lir::LockTemp(int reg) {
- RegisterInfo* p = reg_pool_->core_regs;
- int num_regs = reg_pool_->num_core_regs;
- for (int i = 0; i< num_regs; i++) {
- if (p[i].reg == reg) {
- DCHECK(p[i].is_temp);
- p[i].in_use = true;
- p[i].live = false;
- return;
- }
- }
- p = reg_pool_->FPRegs;
- num_regs = reg_pool_->num_fp_regs;
- for (int i = 0; i< num_regs; i++) {
- if (p[i].reg == reg) {
- DCHECK(p[i].is_temp);
- p[i].in_use = true;
- p[i].live = false;
- return;
- }
- }
- LOG(FATAL) << "Tried to lock a non-existant temp: r" << reg;
+ RegisterInfo* p = GetRegInfo(reg);
+ DCHECK(p->is_temp);
+ p->in_use = true;
+ p->live = false;
}
void Mir2Lir::ResetDef(int reg) {
@@ -599,11 +530,13 @@
}
void Mir2Lir::ClobberAllRegs() {
- for (int i = 0; i< reg_pool_->num_core_regs; i++) {
- ClobberBody(®_pool_->core_regs[i]);
- }
- for (int i = 0; i< reg_pool_->num_fp_regs; i++) {
- ClobberBody(®_pool_->FPRegs[i]);
+ GrowableArray<RegisterInfo*>::Iterator iter(&tempreg_info_);
+ for (RegisterInfo* info = iter.Next(); info != NULL; info = iter.Next()) {
+ info->live = false;
+ info->s_reg = INVALID_SREG;
+ info->def_start = NULL;
+ info->def_end = NULL;
+ info->pair = false;
}
}
@@ -659,11 +592,13 @@
void Mir2Lir::MarkTemp(int reg) {
RegisterInfo* info = GetRegInfo(reg);
+ tempreg_info_.Insert(info);
info->is_temp = true;
}
void Mir2Lir::UnmarkTemp(int reg) {
RegisterInfo* info = GetRegInfo(reg);
+ tempreg_info_.Delete(info);
info->is_temp = false;
}
@@ -912,18 +847,22 @@
}
/* USE SSA names to count references of base Dalvik v_regs. */
-void Mir2Lir::CountRefs(RefCounts* core_counts, RefCounts* fp_counts) {
+void Mir2Lir::CountRefs(RefCounts* core_counts, RefCounts* fp_counts, size_t num_regs) {
for (int i = 0; i < mir_graph_->GetNumSSARegs(); i++) {
RegLocation loc = mir_graph_->reg_location_[i];
RefCounts* counts = loc.fp ? fp_counts : core_counts;
int p_map_idx = SRegToPMap(loc.s_reg_low);
- // Don't count easily regenerated immediates
- if (loc.fp || !IsInexpensiveConstant(loc)) {
+ if (loc.fp) {
+ if (loc.wide) {
+ // Treat doubles as a unit, using upper half of fp_counts array.
+ counts[p_map_idx + num_regs].count += mir_graph_->GetUseCount(i);
+ i++;
+ } else {
+ counts[p_map_idx].count += mir_graph_->GetUseCount(i);
+ }
+ } else if (!IsInexpensiveConstant(loc)) {
counts[p_map_idx].count += mir_graph_->GetUseCount(i);
}
- if (loc.wide && loc.fp && !loc.high_word) {
- counts[p_map_idx].double_start = true;
- }
}
}
@@ -942,7 +881,11 @@
void Mir2Lir::DumpCounts(const RefCounts* arr, int size, const char* msg) {
LOG(INFO) << msg;
for (int i = 0; i < size; i++) {
- LOG(INFO) << "s_reg[" << arr[i].s_reg << "]: " << arr[i].count;
+ if ((arr[i].s_reg & STARTING_DOUBLE_SREG) != 0) {
+ LOG(INFO) << "s_reg[D" << (arr[i].s_reg & ~STARTING_DOUBLE_SREG) << "]: " << arr[i].count;
+ } else {
+ LOG(INFO) << "s_reg[" << arr[i].s_reg << "]: " << arr[i].count;
+ }
}
}
@@ -965,7 +908,7 @@
* count based on original Dalvik register name. Count refs
* separately based on type in order to give allocation
* preference to fp doubles - which must be allocated sequential
- * physical single fp registers started with an even-numbered
+ * physical single fp registers starting with an even-numbered
* reg.
* TUNING: replace with linear scan once we have the ability
* to describe register live ranges for GC.
@@ -974,7 +917,7 @@
static_cast<RefCounts*>(arena_->Alloc(sizeof(RefCounts) * num_regs,
ArenaAllocator::kAllocRegAlloc));
RefCounts *FpRegs =
- static_cast<RefCounts *>(arena_->Alloc(sizeof(RefCounts) * num_regs,
+ static_cast<RefCounts *>(arena_->Alloc(sizeof(RefCounts) * num_regs * 2,
ArenaAllocator::kAllocRegAlloc));
// Set ssa names for original Dalvik registers
for (int i = 0; i < dalvik_regs; i++) {
@@ -982,46 +925,49 @@
}
// Set ssa name for Method*
core_regs[dalvik_regs].s_reg = mir_graph_->GetMethodSReg();
- FpRegs[dalvik_regs].s_reg = mir_graph_->GetMethodSReg(); // For consistecy
+ FpRegs[dalvik_regs].s_reg = mir_graph_->GetMethodSReg(); // For consistecy.
+ FpRegs[dalvik_regs + num_regs].s_reg = mir_graph_->GetMethodSReg(); // for consistency.
// Set ssa names for compiler_temps
for (int i = 1; i <= cu_->num_compiler_temps; i++) {
CompilerTemp* ct = mir_graph_->compiler_temps_.Get(i);
core_regs[dalvik_regs + i].s_reg = ct->s_reg;
FpRegs[dalvik_regs + i].s_reg = ct->s_reg;
+ FpRegs[num_regs + dalvik_regs + i].s_reg = ct->s_reg;
+ }
+
+ // Duplicate in upper half to represent possible fp double starting sregs.
+ for (int i = 0; i < num_regs; i++) {
+ FpRegs[num_regs + i].s_reg = FpRegs[i].s_reg | STARTING_DOUBLE_SREG;
}
// Sum use counts of SSA regs by original Dalvik vreg.
- CountRefs(core_regs, FpRegs);
+ CountRefs(core_regs, FpRegs, num_regs);
- /*
- * Ideally, we'd allocate doubles starting with an even-numbered
- * register. Bias the counts to try to allocate any vreg that's
- * used as the start of a pair first.
- */
- for (int i = 0; i < num_regs; i++) {
- if (FpRegs[i].double_start) {
- FpRegs[i].count *= 2;
- }
- }
// Sort the count arrays
qsort(core_regs, num_regs, sizeof(RefCounts), SortCounts);
- qsort(FpRegs, num_regs, sizeof(RefCounts), SortCounts);
+ qsort(FpRegs, num_regs * 2, sizeof(RefCounts), SortCounts);
if (cu_->verbose) {
DumpCounts(core_regs, num_regs, "Core regs after sort");
- DumpCounts(FpRegs, num_regs, "Fp regs after sort");
+ DumpCounts(FpRegs, num_regs * 2, "Fp regs after sort");
}
if (!(cu_->disable_opt & (1 << kPromoteRegs))) {
// Promote FpRegs
- for (int i = 0; (i < num_regs) && (FpRegs[i].count >= promotion_threshold); i++) {
- int p_map_idx = SRegToPMap(FpRegs[i].s_reg);
- if (promotion_map_[p_map_idx].fp_location != kLocPhysReg) {
- int reg = AllocPreservedFPReg(FpRegs[i].s_reg,
- FpRegs[i].double_start);
+ for (int i = 0; (i < (num_regs * 2)) && (FpRegs[i].count >= promotion_threshold); i++) {
+ int p_map_idx = SRegToPMap(FpRegs[i].s_reg & ~STARTING_DOUBLE_SREG);
+ if ((FpRegs[i].s_reg & STARTING_DOUBLE_SREG) != 0) {
+ if ((promotion_map_[p_map_idx].fp_location != kLocPhysReg) &&
+ (promotion_map_[p_map_idx + 1].fp_location != kLocPhysReg)) {
+ int low_sreg = FpRegs[i].s_reg & ~STARTING_DOUBLE_SREG;
+ // Ignore result - if can't alloc double may still be able to alloc singles.
+ AllocPreservedDouble(low_sreg);
+ }
+ } else if (promotion_map_[p_map_idx].fp_location != kLocPhysReg) {
+ int reg = AllocPreservedSingle(FpRegs[i].s_reg);
if (reg < 0) {
- break; // No more left
+ break; // No more left.
}
}
}
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index e883432..3e76883 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -1237,7 +1237,7 @@
delta = target - pc;
if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && delta == 0) {
// Useless branch
- lir->flags.is_nop = true;
+ NopLIR(lir);
if (kVerbosePcFixup) {
LOG(INFO) << "Retry for useless branch at " << lir->offset;
}
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 478654d..0f28110 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -52,7 +52,6 @@
int AllocTypedTempPair(bool fp_hint, int reg_class);
int S2d(int low_reg, int high_reg);
int TargetReg(SpecialTargetRegister reg);
- RegisterInfo* GetRegInfo(int reg);
RegLocation GetReturnAlt();
RegLocation GetReturnWideAlt();
RegLocation LocCReturn();
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 26accab..94dd759 100644
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -375,11 +375,6 @@
return res;
}
-X86Mir2Lir::RegisterInfo* X86Mir2Lir::GetRegInfo(int reg) {
- return X86_FPREG(reg) ? ®_pool_->FPRegs[reg & X86_FP_REG_MASK]
- : ®_pool_->core_regs[reg];
-}
-
/* To be used when explicitly managing register use */
void X86Mir2Lir::LockCallTemps() {
LockTemp(rX86_ARG0);
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index cd1602f..366d7f2 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -22,7 +22,7 @@
namespace art {
void MIRGraph::ClearAllVisitedFlags() {
- AllNodesIterator iter(this, false /* not iterative */);
+ AllNodesIterator iter(this);
for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
bb->visited = false;
}
@@ -145,11 +145,11 @@
def_block_matrix_[i] =
new (arena_) ArenaBitVector(arena_, GetNumBlocks(), false, kBitMapBMatrix);
}
- AllNodesIterator iter(this, false /* not iterative */);
+ AllNodesIterator iter(this);
for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
FindLocalLiveIn(bb);
}
- AllNodesIterator iter2(this, false /* not iterative */);
+ AllNodesIterator iter2(this);
for (BasicBlock* bb = iter2.Next(); bb != NULL; bb = iter2.Next()) {
FillDefBlockMatrix(bb);
}
@@ -377,7 +377,7 @@
int num_total_blocks = GetBasicBlockListCount();
/* Initialize domination-related data structures */
- ReachableNodesIterator iter(this, false /* not iterative */);
+ PreOrderDfsIterator iter(this);
for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
InitializeDominationInfo(bb);
}
@@ -396,7 +396,7 @@
i_dom_list_[GetEntryBlock()->dfs_id] = GetEntryBlock()->dfs_id;
/* Compute the immediate dominators */
- ReversePostOrderDfsIterator iter2(this, true /* iterative */);
+ RepeatingReversePostOrderDfsIterator iter2(this);
bool change = false;
for (BasicBlock* bb = iter2.Next(false); bb != NULL; bb = iter2.Next(change)) {
change = ComputeblockIDom(bb);
@@ -414,19 +414,19 @@
}
GetEntryBlock()->i_dom = NULL;
- ReachableNodesIterator iter3(this, false /* not iterative */);
+ PreOrderDfsIterator iter3(this);
for (BasicBlock* bb = iter3.Next(); bb != NULL; bb = iter3.Next()) {
SetDominators(bb);
}
- ReversePostOrderDfsIterator iter4(this, false /* not iterative */);
+ ReversePostOrderDfsIterator iter4(this);
for (BasicBlock* bb = iter4.Next(); bb != NULL; bb = iter4.Next()) {
ComputeBlockDominators(bb);
}
// Compute the dominance frontier for each block.
ComputeDomPostOrderTraversal(GetEntryBlock());
- PostOrderDOMIterator iter5(this, false /* not iterative */);
+ PostOrderDOMIterator iter5(this);
for (BasicBlock* bb = iter5.Next(); bb != NULL; bb = iter5.Next()) {
ComputeDominanceFrontier(bb);
}
@@ -503,7 +503,7 @@
temp_dalvik_register_v_ =
new (arena_) ArenaBitVector(arena_, cu_->num_dalvik_registers, false, kBitMapRegisterV);
- PostOrderDfsIterator iter(this, true /* iterative */);
+ RepeatingPostOrderDfsIterator iter(this);
bool change = false;
for (BasicBlock* bb = iter.Next(false); bb != NULL; bb = iter.Next(change)) {
change = ComputeBlockLiveIns(bb);
@@ -700,7 +700,7 @@
new (arena_) ArenaBitVector(arena_, GetNumSSARegs(), false, kBitMapTempSSARegisterV);
/* Insert phi-operands with latest SSA names from predecessor blocks */
- ReachableNodesIterator iter2(this, false /* not iterative */);
+ PreOrderDfsIterator iter2(this);
for (BasicBlock* bb = iter2.Next(); bb != NULL; bb = iter2.Next()) {
InsertPhiNodeOperands(bb);
}
diff --git a/compiler/dex/vreg_analysis.cc b/compiler/dex/vreg_analysis.cc
index 07f37bb..32fac0b 100644
--- a/compiler/dex/vreg_analysis.cc
+++ b/compiler/dex/vreg_analysis.cc
@@ -29,6 +29,16 @@
return change;
}
+bool MIRGraph::SetFp(int index) {
+ bool change = false;
+ if (!reg_location_[index].fp) {
+ reg_location_[index].fp = true;
+ reg_location_[index].defined = true;
+ change = true;
+ }
+ return change;
+}
+
bool MIRGraph::SetCore(int index, bool is_core) {
bool change = false;
if (is_core && !reg_location_[index].defined) {
@@ -39,6 +49,16 @@
return change;
}
+bool MIRGraph::SetCore(int index) {
+ bool change = false;
+ if (!reg_location_[index].defined) {
+ reg_location_[index].core = true;
+ reg_location_[index].defined = true;
+ change = true;
+ }
+ return change;
+}
+
bool MIRGraph::SetRef(int index, bool is_ref) {
bool change = false;
if (is_ref && !reg_location_[index].defined) {
@@ -49,6 +69,16 @@
return change;
}
+bool MIRGraph::SetRef(int index) {
+ bool change = false;
+ if (!reg_location_[index].defined) {
+ reg_location_[index].ref = true;
+ reg_location_[index].defined = true;
+ change = true;
+ }
+ return change;
+}
+
bool MIRGraph::SetWide(int index, bool is_wide) {
bool change = false;
if (is_wide && !reg_location_[index].wide) {
@@ -58,6 +88,15 @@
return change;
}
+bool MIRGraph::SetWide(int index) {
+ bool change = false;
+ if (!reg_location_[index].wide) {
+ reg_location_[index].wide = true;
+ change = true;
+ }
+ return change;
+}
+
bool MIRGraph::SetHigh(int index, bool is_high) {
bool change = false;
if (is_high && !reg_location_[index].high_word) {
@@ -67,6 +106,16 @@
return change;
}
+bool MIRGraph::SetHigh(int index) {
+ bool change = false;
+ if (!reg_location_[index].high_word) {
+ reg_location_[index].high_word = true;
+ change = true;
+ }
+ return change;
+}
+
+
/*
* Infer types and sizes. We don't need to track change on sizes,
* as it doesn't propagate. We're guaranteed at least one pass through
@@ -84,21 +133,23 @@
SSARepresentation *ssa_rep = mir->ssa_rep;
if (ssa_rep) {
int attrs = oat_data_flow_attributes_[mir->dalvikInsn.opcode];
+ const int* uses = ssa_rep->uses;
+ const int* defs = ssa_rep->defs;
// Handle defs
if (attrs & DF_DA) {
if (attrs & DF_CORE_A) {
- changed |= SetCore(ssa_rep->defs[0], true);
+ changed |= SetCore(defs[0]);
}
if (attrs & DF_REF_A) {
- changed |= SetRef(ssa_rep->defs[0], true);
+ changed |= SetRef(defs[0]);
}
if (attrs & DF_A_WIDE) {
- reg_location_[ssa_rep->defs[0]].wide = true;
- reg_location_[ssa_rep->defs[1]].wide = true;
- reg_location_[ssa_rep->defs[1]].high_word = true;
- DCHECK_EQ(SRegToVReg(ssa_rep->defs[0])+1,
- SRegToVReg(ssa_rep->defs[1]));
+ reg_location_[defs[0]].wide = true;
+ reg_location_[defs[1]].wide = true;
+ reg_location_[defs[1]].high_word = true;
+ DCHECK_EQ(SRegToVReg(defs[0])+1,
+ SRegToVReg(defs[1]));
}
}
@@ -106,17 +157,17 @@
int next = 0;
if (attrs & DF_UA) {
if (attrs & DF_CORE_A) {
- changed |= SetCore(ssa_rep->uses[next], true);
+ changed |= SetCore(uses[next]);
}
if (attrs & DF_REF_A) {
- changed |= SetRef(ssa_rep->uses[next], true);
+ changed |= SetRef(uses[next]);
}
if (attrs & DF_A_WIDE) {
- reg_location_[ssa_rep->uses[next]].wide = true;
- reg_location_[ssa_rep->uses[next + 1]].wide = true;
- reg_location_[ssa_rep->uses[next + 1]].high_word = true;
- DCHECK_EQ(SRegToVReg(ssa_rep->uses[next])+1,
- SRegToVReg(ssa_rep->uses[next + 1]));
+ reg_location_[uses[next]].wide = true;
+ reg_location_[uses[next + 1]].wide = true;
+ reg_location_[uses[next + 1]].high_word = true;
+ DCHECK_EQ(SRegToVReg(uses[next])+1,
+ SRegToVReg(uses[next + 1]));
next += 2;
} else {
next++;
@@ -124,17 +175,17 @@
}
if (attrs & DF_UB) {
if (attrs & DF_CORE_B) {
- changed |= SetCore(ssa_rep->uses[next], true);
+ changed |= SetCore(uses[next]);
}
if (attrs & DF_REF_B) {
- changed |= SetRef(ssa_rep->uses[next], true);
+ changed |= SetRef(uses[next]);
}
if (attrs & DF_B_WIDE) {
- reg_location_[ssa_rep->uses[next]].wide = true;
- reg_location_[ssa_rep->uses[next + 1]].wide = true;
- reg_location_[ssa_rep->uses[next + 1]].high_word = true;
- DCHECK_EQ(SRegToVReg(ssa_rep->uses[next])+1,
- SRegToVReg(ssa_rep->uses[next + 1]));
+ reg_location_[uses[next]].wide = true;
+ reg_location_[uses[next + 1]].wide = true;
+ reg_location_[uses[next + 1]].high_word = true;
+ DCHECK_EQ(SRegToVReg(uses[next])+1,
+ SRegToVReg(uses[next + 1]));
next += 2;
} else {
next++;
@@ -142,17 +193,17 @@
}
if (attrs & DF_UC) {
if (attrs & DF_CORE_C) {
- changed |= SetCore(ssa_rep->uses[next], true);
+ changed |= SetCore(uses[next]);
}
if (attrs & DF_REF_C) {
- changed |= SetRef(ssa_rep->uses[next], true);
+ changed |= SetRef(uses[next]);
}
if (attrs & DF_C_WIDE) {
- reg_location_[ssa_rep->uses[next]].wide = true;
- reg_location_[ssa_rep->uses[next + 1]].wide = true;
- reg_location_[ssa_rep->uses[next + 1]].high_word = true;
- DCHECK_EQ(SRegToVReg(ssa_rep->uses[next])+1,
- SRegToVReg(ssa_rep->uses[next + 1]));
+ reg_location_[uses[next]].wide = true;
+ reg_location_[uses[next + 1]].wide = true;
+ reg_location_[uses[next + 1]].high_word = true;
+ DCHECK_EQ(SRegToVReg(uses[next])+1,
+ SRegToVReg(uses[next + 1]));
}
}
@@ -162,27 +213,27 @@
(mir->dalvikInsn.opcode == Instruction::RETURN_OBJECT)) {
switch (cu_->shorty[0]) {
case 'I':
- changed |= SetCore(ssa_rep->uses[0], true);
+ changed |= SetCore(uses[0]);
break;
case 'J':
- changed |= SetCore(ssa_rep->uses[0], true);
- changed |= SetCore(ssa_rep->uses[1], true);
- reg_location_[ssa_rep->uses[0]].wide = true;
- reg_location_[ssa_rep->uses[1]].wide = true;
- reg_location_[ssa_rep->uses[1]].high_word = true;
+ changed |= SetCore(uses[0]);
+ changed |= SetCore(uses[1]);
+ reg_location_[uses[0]].wide = true;
+ reg_location_[uses[1]].wide = true;
+ reg_location_[uses[1]].high_word = true;
break;
case 'F':
- changed |= SetFp(ssa_rep->uses[0], true);
+ changed |= SetFp(uses[0]);
break;
case 'D':
- changed |= SetFp(ssa_rep->uses[0], true);
- changed |= SetFp(ssa_rep->uses[1], true);
- reg_location_[ssa_rep->uses[0]].wide = true;
- reg_location_[ssa_rep->uses[1]].wide = true;
- reg_location_[ssa_rep->uses[1]].high_word = true;
+ changed |= SetFp(uses[0]);
+ changed |= SetFp(uses[1]);
+ reg_location_[uses[0]].wide = true;
+ reg_location_[uses[1]].wide = true;
+ reg_location_[uses[1]].high_word = true;
break;
case 'L':
- changed |= SetRef(ssa_rep->uses[0], true);
+ changed |= SetRef(uses[0]);
break;
default: break;
}
@@ -206,10 +257,10 @@
SSARepresentation* tgt_rep = move_result_mir->ssa_rep;
DCHECK(tgt_rep != NULL);
tgt_rep->fp_def[0] = true;
- changed |= SetFp(tgt_rep->defs[0], true);
+ changed |= SetFp(tgt_rep->defs[0]);
if (shorty[0] == 'D') {
tgt_rep->fp_def[1] = true;
- changed |= SetFp(tgt_rep->defs[1], true);
+ changed |= SetFp(tgt_rep->defs[1]);
}
}
}
@@ -217,8 +268,8 @@
// If this is a non-static invoke, mark implicit "this"
if (((mir->dalvikInsn.opcode != Instruction::INVOKE_STATIC) &&
(mir->dalvikInsn.opcode != Instruction::INVOKE_STATIC_RANGE))) {
- reg_location_[ssa_rep->uses[next]].defined = true;
- reg_location_[ssa_rep->uses[next]].ref = true;
+ reg_location_[uses[next]].defined = true;
+ reg_location_[uses[next]].ref = true;
next++;
}
uint32_t cpos = 1;
@@ -229,28 +280,28 @@
case 'D':
ssa_rep->fp_use[i] = true;
ssa_rep->fp_use[i+1] = true;
- reg_location_[ssa_rep->uses[i]].wide = true;
- reg_location_[ssa_rep->uses[i+1]].wide = true;
- reg_location_[ssa_rep->uses[i+1]].high_word = true;
- DCHECK_EQ(SRegToVReg(ssa_rep->uses[i])+1, SRegToVReg(ssa_rep->uses[i+1]));
+ reg_location_[uses[i]].wide = true;
+ reg_location_[uses[i+1]].wide = true;
+ reg_location_[uses[i+1]].high_word = true;
+ DCHECK_EQ(SRegToVReg(uses[i])+1, SRegToVReg(uses[i+1]));
i++;
break;
case 'J':
- reg_location_[ssa_rep->uses[i]].wide = true;
- reg_location_[ssa_rep->uses[i+1]].wide = true;
- reg_location_[ssa_rep->uses[i+1]].high_word = true;
- DCHECK_EQ(SRegToVReg(ssa_rep->uses[i])+1, SRegToVReg(ssa_rep->uses[i+1]));
- changed |= SetCore(ssa_rep->uses[i], true);
+ reg_location_[uses[i]].wide = true;
+ reg_location_[uses[i+1]].wide = true;
+ reg_location_[uses[i+1]].high_word = true;
+ DCHECK_EQ(SRegToVReg(uses[i])+1, SRegToVReg(uses[i+1]));
+ changed |= SetCore(uses[i]);
i++;
break;
case 'F':
ssa_rep->fp_use[i] = true;
break;
case 'L':
- changed |= SetRef(ssa_rep->uses[i], true);
+ changed |= SetRef(uses[i]);
break;
default:
- changed |= SetCore(ssa_rep->uses[i], true);
+ changed |= SetCore(uses[i]);
break;
}
i++;
@@ -260,11 +311,11 @@
for (int i = 0; ssa_rep->fp_use && i< ssa_rep->num_uses; i++) {
if (ssa_rep->fp_use[i])
- changed |= SetFp(ssa_rep->uses[i], true);
+ changed |= SetFp(uses[i]);
}
for (int i = 0; ssa_rep->fp_def && i< ssa_rep->num_defs; i++) {
if (ssa_rep->fp_def[i])
- changed |= SetFp(ssa_rep->defs[i], true);
+ changed |= SetFp(defs[i]);
}
// Special-case handling for moves & Phi
if (attrs & (DF_IS_MOVE | DF_NULL_TRANSFER_N)) {
@@ -276,14 +327,14 @@
*/
bool is_phi = (static_cast<int>(mir->dalvikInsn.opcode) ==
kMirOpPhi);
- RegLocation rl_temp = reg_location_[ssa_rep->defs[0]];
+ RegLocation rl_temp = reg_location_[defs[0]];
bool defined_fp = rl_temp.defined && rl_temp.fp;
bool defined_core = rl_temp.defined && rl_temp.core;
bool defined_ref = rl_temp.defined && rl_temp.ref;
bool is_wide = rl_temp.wide || ((attrs & DF_A_WIDE) != 0);
bool is_high = is_phi && rl_temp.wide && rl_temp.high_word;
for (int i = 0; i < ssa_rep->num_uses; i++) {
- rl_temp = reg_location_[ssa_rep->uses[i]];
+ rl_temp = reg_location_[uses[i]];
defined_fp |= rl_temp.defined && rl_temp.fp;
defined_core |= rl_temp.defined && rl_temp.core;
defined_ref |= rl_temp.defined && rl_temp.ref;
@@ -303,26 +354,26 @@
<< " has both fp and core/ref uses for same def.";
cu_->disable_opt |= (1 << kPromoteRegs);
}
- changed |= SetFp(ssa_rep->defs[0], defined_fp);
- changed |= SetCore(ssa_rep->defs[0], defined_core);
- changed |= SetRef(ssa_rep->defs[0], defined_ref);
- changed |= SetWide(ssa_rep->defs[0], is_wide);
- changed |= SetHigh(ssa_rep->defs[0], is_high);
+ changed |= SetFp(defs[0], defined_fp);
+ changed |= SetCore(defs[0], defined_core);
+ changed |= SetRef(defs[0], defined_ref);
+ changed |= SetWide(defs[0], is_wide);
+ changed |= SetHigh(defs[0], is_high);
if (attrs & DF_A_WIDE) {
- changed |= SetWide(ssa_rep->defs[1], true);
- changed |= SetHigh(ssa_rep->defs[1], true);
+ changed |= SetWide(defs[1]);
+ changed |= SetHigh(defs[1]);
}
for (int i = 0; i < ssa_rep->num_uses; i++) {
- changed |= SetFp(ssa_rep->uses[i], defined_fp);
- changed |= SetCore(ssa_rep->uses[i], defined_core);
- changed |= SetRef(ssa_rep->uses[i], defined_ref);
- changed |= SetWide(ssa_rep->uses[i], is_wide);
- changed |= SetHigh(ssa_rep->uses[i], is_high);
+ changed |= SetFp(uses[i], defined_fp);
+ changed |= SetCore(uses[i], defined_core);
+ changed |= SetRef(uses[i], defined_ref);
+ changed |= SetWide(uses[i], is_wide);
+ changed |= SetHigh(uses[i], is_high);
}
if (attrs & DF_A_WIDE) {
DCHECK_EQ(ssa_rep->num_uses, 2);
- changed |= SetWide(ssa_rep->uses[1], true);
- changed |= SetHigh(ssa_rep->uses[1], true);
+ changed |= SetWide(uses[1]);
+ changed |= SetHigh(uses[1]);
}
}
}
@@ -444,7 +495,7 @@
}
/* Do type & size inference pass */
- PreOrderDfsIterator iter(this, true /* iterative */);
+ RepeatingPreOrderDfsIterator iter(this);
bool change = false;
for (BasicBlock* bb = iter.Next(false); bb != NULL; bb = iter.Next(change)) {
change = InferTypeAndSize(bb);
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index cbd9020..8d521de 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -355,7 +355,11 @@
jni_compiler_(NULL),
compiler_enable_auto_elf_loading_(NULL),
compiler_get_method_code_addr_(NULL),
- support_boot_image_fixup_(true) {
+ support_boot_image_fixup_(true),
+ dedupe_code_("dedupe code"),
+ dedupe_mapping_table_("dedupe mapping table"),
+ dedupe_vmap_table_("dedupe vmap table"),
+ dedupe_gc_map_("dedupe gc map") {
CHECK_PTHREAD_CALL(pthread_key_create, (&tls_key_, NULL), "compiler tls key");
@@ -912,9 +916,9 @@
}
static mirror::ArtMethod* ComputeMethodReferencedFromCompilingMethod(ScopedObjectAccess& soa,
- const DexCompilationUnit* mUnit,
- uint32_t method_idx,
- InvokeType type)
+ const DexCompilationUnit* mUnit,
+ uint32_t method_idx,
+ InvokeType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::DexCache* dex_cache = mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile());
mirror::ClassLoader* class_loader = soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader());
@@ -923,11 +927,11 @@
}
bool CompilerDriver::ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit,
- int& field_offset, bool& is_volatile, bool is_put) {
+ bool is_put, int* field_offset, bool* is_volatile) {
ScopedObjectAccess soa(Thread::Current());
// Conservative defaults.
- field_offset = -1;
- is_volatile = true;
+ *field_offset = -1;
+ *is_volatile = true;
// Try to resolve field and ignore if an Incompatible Class Change Error (ie is static).
mirror::ArtField* resolved_field = ComputeFieldReferencedFromCompilingMethod(soa, mUnit, field_idx);
if (resolved_field != NULL && !resolved_field->IsStatic()) {
@@ -954,8 +958,8 @@
bool is_write_to_final_from_wrong_class = is_put && resolved_field->IsFinal() &&
fields_class != referrer_class;
if (access_ok && !is_write_to_final_from_wrong_class) {
- field_offset = resolved_field->GetOffset().Int32Value();
- is_volatile = resolved_field->IsVolatile();
+ *field_offset = resolved_field->GetOffset().Int32Value();
+ *is_volatile = resolved_field->IsVolatile();
stats_->ResolvedInstanceField();
return true; // Fast path.
}
@@ -970,15 +974,14 @@
}
bool CompilerDriver::ComputeStaticFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit,
- int& field_offset, int& ssb_index,
- bool& is_referrers_class, bool& is_volatile,
- bool is_put) {
+ bool is_put, int* field_offset, int* ssb_index,
+ bool* is_referrers_class, bool* is_volatile) {
ScopedObjectAccess soa(Thread::Current());
// Conservative defaults.
- field_offset = -1;
- ssb_index = -1;
- is_referrers_class = false;
- is_volatile = true;
+ *field_offset = -1;
+ *ssb_index = -1;
+ *is_referrers_class = false;
+ *is_volatile = true;
// Try to resolve field and ignore if an Incompatible Class Change Error (ie isn't static).
mirror::ArtField* resolved_field = ComputeFieldReferencedFromCompilingMethod(soa, mUnit, field_idx);
if (resolved_field != NULL && resolved_field->IsStatic()) {
@@ -988,9 +991,9 @@
if (referrer_class != NULL) {
mirror::Class* fields_class = resolved_field->GetDeclaringClass();
if (fields_class == referrer_class) {
- is_referrers_class = true; // implies no worrying about class initialization
- field_offset = resolved_field->GetOffset().Int32Value();
- is_volatile = resolved_field->IsVolatile();
+ *is_referrers_class = true; // implies no worrying about class initialization
+ *field_offset = resolved_field->GetOffset().Int32Value();
+ *is_volatile = resolved_field->IsVolatile();
stats_->ResolvedLocalStaticField();
return true; // fast path
} else {
@@ -1021,9 +1024,9 @@
if (fields_class->GetDexCache() == dex_cache) {
// common case where the dex cache of both the referrer and the field are the same,
// no need to search the dex file
- ssb_index = fields_class->GetDexTypeIndex();
- field_offset = resolved_field->GetOffset().Int32Value();
- is_volatile = resolved_field->IsVolatile();
+ *ssb_index = fields_class->GetDexTypeIndex();
+ *field_offset = resolved_field->GetOffset().Int32Value();
+ *is_volatile = resolved_field->IsVolatile();
stats_->ResolvedStaticField();
return true;
}
@@ -1036,9 +1039,9 @@
mUnit->GetDexFile()->FindTypeId(mUnit->GetDexFile()->GetIndexForStringId(*string_id));
if (type_id != NULL) {
// medium path, needs check of static storage base being initialized
- ssb_index = mUnit->GetDexFile()->GetIndexForTypeId(*type_id);
- field_offset = resolved_field->GetOffset().Int32Value();
- is_volatile = resolved_field->IsVolatile();
+ *ssb_index = mUnit->GetDexFile()->GetIndexForTypeId(*type_id);
+ *field_offset = resolved_field->GetOffset().Int32Value();
+ *is_volatile = resolved_field->IsVolatile();
stats_->ResolvedStaticField();
return true;
}
@@ -1058,15 +1061,15 @@
void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType type, InvokeType sharp_type,
mirror::Class* referrer_class,
mirror::ArtMethod* method,
- uintptr_t& direct_code,
- uintptr_t& direct_method,
- bool update_stats) {
+ bool update_stats,
+ uintptr_t* direct_code,
+ uintptr_t* direct_method) {
// For direct and static methods compute possible direct_code and direct_method values, ie
// an address for the Method* being invoked and an address of the code for that Method*.
// For interface calls compute a value for direct_method that is the interface method being
// invoked, so this can be passed to the out-of-line runtime support code.
- direct_code = 0;
- direct_method = 0;
+ *direct_code = 0;
+ *direct_method = 0;
if (compiler_backend_ == kPortable) {
if (sharp_type != kStatic && sharp_type != kDirect) {
return;
@@ -1098,38 +1101,37 @@
if (IsImageClass(mh.GetDeclaringClassDescriptor())) {
// We can only branch directly to Methods that are resolved in the DexCache.
// Otherwise we won't invoke the resolution trampoline.
- direct_method = -1;
- direct_code = -1;
+ *direct_method = -1;
+ *direct_code = -1;
}
}
} else {
if (Runtime::Current()->GetHeap()->FindSpaceFromObject(method, false)->IsImageSpace()) {
- direct_method = reinterpret_cast<uintptr_t>(method);
+ *direct_method = reinterpret_cast<uintptr_t>(method);
}
- direct_code = reinterpret_cast<uintptr_t>(method->GetEntryPointFromCompiledCode());
+ *direct_code = reinterpret_cast<uintptr_t>(method->GetEntryPointFromCompiledCode());
}
}
bool CompilerDriver::ComputeInvokeInfo(const DexCompilationUnit* mUnit, const uint32_t dex_pc,
- InvokeType& invoke_type,
- MethodReference& target_method,
- int& vtable_idx,
- uintptr_t& direct_code, uintptr_t& direct_method,
- bool update_stats) {
+ bool update_stats, bool enable_devirtualization,
+ InvokeType* invoke_type, MethodReference* target_method,
+ int* vtable_idx, uintptr_t* direct_code,
+ uintptr_t* direct_method) {
ScopedObjectAccess soa(Thread::Current());
- vtable_idx = -1;
- direct_code = 0;
- direct_method = 0;
+ *vtable_idx = -1;
+ *direct_code = 0;
+ *direct_method = 0;
mirror::ArtMethod* resolved_method =
- ComputeMethodReferencedFromCompilingMethod(soa, mUnit, target_method.dex_method_index,
- invoke_type);
+ ComputeMethodReferencedFromCompilingMethod(soa, mUnit, target_method->dex_method_index,
+ *invoke_type);
if (resolved_method != NULL) {
// Don't try to fast-path if we don't understand the caller's class or this appears to be an
// Incompatible Class Change Error.
mirror::Class* referrer_class =
ComputeCompilingMethodsClass(soa, resolved_method->GetDeclaringClass()->GetDexCache(),
mUnit);
- bool icce = resolved_method->CheckIncompatibleClassChange(invoke_type);
+ bool icce = resolved_method->CheckIncompatibleClassChange(*invoke_type);
if (referrer_class != NULL && !icce) {
mirror::Class* methods_class = resolved_method->GetDeclaringClass();
if (!referrer_class->CanAccess(methods_class) ||
@@ -1140,42 +1142,42 @@
// method public. Resort to the dex file to determine the correct class for the access
// check.
uint16_t class_idx =
- target_method.dex_file->GetMethodId(target_method.dex_method_index).class_idx_;
- methods_class = mUnit->GetClassLinker()->ResolveType(*target_method.dex_file,
+ target_method->dex_file->GetMethodId(target_method->dex_method_index).class_idx_;
+ methods_class = mUnit->GetClassLinker()->ResolveType(*target_method->dex_file,
class_idx, referrer_class);
}
if (referrer_class->CanAccess(methods_class) &&
referrer_class->CanAccessMember(methods_class, resolved_method->GetAccessFlags())) {
- const bool kEnableFinalBasedSharpening = true;
+ const bool enableFinalBasedSharpening = enable_devirtualization;
// Sharpen a virtual call into a direct call when the target is known not to have been
// overridden (ie is final).
bool can_sharpen_virtual_based_on_type =
- (invoke_type == kVirtual) && (resolved_method->IsFinal() || methods_class->IsFinal());
+ (*invoke_type == kVirtual) && (resolved_method->IsFinal() || methods_class->IsFinal());
// For invoke-super, ensure the vtable index will be correct to dispatch in the vtable of
// the super class.
- bool can_sharpen_super_based_on_type = (invoke_type == kSuper) &&
+ bool can_sharpen_super_based_on_type = (*invoke_type == kSuper) &&
(referrer_class != methods_class) && referrer_class->IsSubClass(methods_class) &&
resolved_method->GetMethodIndex() < methods_class->GetVTable()->GetLength() &&
(methods_class->GetVTable()->Get(resolved_method->GetMethodIndex()) == resolved_method);
- if (kEnableFinalBasedSharpening && (can_sharpen_virtual_based_on_type ||
+ if (enableFinalBasedSharpening && (can_sharpen_virtual_based_on_type ||
can_sharpen_super_based_on_type)) {
// Sharpen a virtual call into a direct call. The method_idx is into referrer's
// dex cache, check that this resolved method is where we expect it.
- CHECK(referrer_class->GetDexCache()->GetResolvedMethod(target_method.dex_method_index) ==
+ CHECK(referrer_class->GetDexCache()->GetResolvedMethod(target_method->dex_method_index) ==
resolved_method) << PrettyMethod(resolved_method);
if (update_stats) {
- stats_->ResolvedMethod(invoke_type);
- stats_->VirtualMadeDirect(invoke_type);
+ stats_->ResolvedMethod(*invoke_type);
+ stats_->VirtualMadeDirect(*invoke_type);
}
- GetCodeAndMethodForDirectCall(invoke_type, kDirect, referrer_class, resolved_method,
- direct_code, direct_method, update_stats);
- invoke_type = kDirect;
+ GetCodeAndMethodForDirectCall(*invoke_type, kDirect, referrer_class, resolved_method,
+ update_stats, direct_code, direct_method);
+ *invoke_type = kDirect;
return true;
}
- const bool kEnableVerifierBasedSharpening = true;
- if (kEnableVerifierBasedSharpening && (invoke_type == kVirtual ||
- invoke_type == kInterface)) {
+ const bool enableVerifierBasedSharpening = enable_devirtualization;
+ if (enableVerifierBasedSharpening && (*invoke_type == kVirtual ||
+ *invoke_type == kInterface)) {
// Did the verifier record a more precise invoke target based on its type information?
const MethodReference caller_method(mUnit->GetDexFile(), mUnit->GetDexMethodIndex());
const MethodReference* devirt_map_target =
@@ -1192,14 +1194,14 @@
kVirtual);
CHECK(called_method != NULL);
CHECK(!called_method->IsAbstract());
- GetCodeAndMethodForDirectCall(invoke_type, kDirect, referrer_class, called_method,
- direct_code, direct_method, update_stats);
+ GetCodeAndMethodForDirectCall(*invoke_type, kDirect, referrer_class, called_method,
+ update_stats, direct_code, direct_method);
bool compiler_needs_dex_cache =
(GetCompilerBackend() == kPortable) ||
(GetCompilerBackend() == kQuick && instruction_set_ != kThumb2) ||
- (direct_code == 0) || (direct_code == static_cast<unsigned int>(-1)) ||
- (direct_method == 0) || (direct_method == static_cast<unsigned int>(-1));
- if ((devirt_map_target->dex_file != target_method.dex_file) &&
+ (*direct_code == 0) || (*direct_code == static_cast<unsigned int>(-1)) ||
+ (*direct_method == 0) || (*direct_method == static_cast<unsigned int>(-1));
+ if ((devirt_map_target->dex_file != target_method->dex_file) &&
compiler_needs_dex_cache) {
// We need to use the dex cache to find either the method or code, and the dex file
// containing the method isn't the one expected for the target method. Try to find
@@ -1209,7 +1211,7 @@
// TODO: quick only supports direct pointers with Thumb2.
// TODO: the following should be factored into a common helper routine to find
// one dex file's method within another.
- const DexFile* dexfile = target_method.dex_file;
+ const DexFile* dexfile = target_method->dex_file;
const DexFile* cm_dexfile =
called_method->GetDeclaringClass()->GetDexCache()->GetDexFile();
const DexFile::MethodId& cm_method_id =
@@ -1235,12 +1237,13 @@
*name, *sig);
if (method_id != NULL) {
if (update_stats) {
- stats_->ResolvedMethod(invoke_type);
- stats_->VirtualMadeDirect(invoke_type);
+ stats_->ResolvedMethod(*invoke_type);
+ stats_->VirtualMadeDirect(*invoke_type);
stats_->PreciseTypeDevirtualization();
}
- target_method.dex_method_index = dexfile->GetIndexForMethodId(*method_id);
- invoke_type = kDirect;
+ target_method->dex_method_index =
+ dexfile->GetIndexForMethodId(*method_id);
+ *invoke_type = kDirect;
return true;
}
}
@@ -1252,28 +1255,28 @@
// method in the referring method's dex cache/file.
} else {
if (update_stats) {
- stats_->ResolvedMethod(invoke_type);
- stats_->VirtualMadeDirect(invoke_type);
+ stats_->ResolvedMethod(*invoke_type);
+ stats_->VirtualMadeDirect(*invoke_type);
stats_->PreciseTypeDevirtualization();
}
- target_method = *devirt_map_target;
- invoke_type = kDirect;
+ *target_method = *devirt_map_target;
+ *invoke_type = kDirect;
return true;
}
}
}
- if (invoke_type == kSuper) {
+ if (*invoke_type == kSuper) {
// Unsharpened super calls are suspicious so go slow-path.
} else {
// Sharpening failed so generate a regular resolved method dispatch.
if (update_stats) {
- stats_->ResolvedMethod(invoke_type);
+ stats_->ResolvedMethod(*invoke_type);
}
- if (invoke_type == kVirtual || invoke_type == kSuper) {
- vtable_idx = resolved_method->GetMethodIndex();
+ if (*invoke_type == kVirtual || *invoke_type == kSuper) {
+ *vtable_idx = resolved_method->GetMethodIndex();
}
- GetCodeAndMethodForDirectCall(invoke_type, invoke_type, referrer_class, resolved_method,
- direct_code, direct_method, update_stats);
+ GetCodeAndMethodForDirectCall(*invoke_type, *invoke_type, referrer_class, resolved_method,
+ update_stats, direct_code, direct_method);
return true;
}
}
@@ -1284,7 +1287,7 @@
soa.Self()->ClearException();
}
if (update_stats) {
- stats_->UnresolvedMethod(invoke_type);
+ stats_->UnresolvedMethod(*invoke_type);
}
return false; // Incomplete knowledge needs slow path.
}
@@ -1585,13 +1588,11 @@
if (IsImage()) {
// For images we resolve all types, such as array, whereas for applications just those with
// classdefs are resolved by ResolveClassFieldsAndMethods.
- // TODO: strdup memory leak.
- timings.NewSplit(strdup(("Resolve " + dex_file.GetLocation() + " Types").c_str()));
+ timings.NewSplit("Resolve Types");
context.ForAll(0, dex_file.NumTypeIds(), ResolveType, thread_count_);
}
- // TODO: strdup memory leak.
- timings.NewSplit(strdup(("Resolve " + dex_file.GetLocation() + " MethodsAndFields").c_str()));
+ timings.NewSplit("Resolve MethodsAndFields");
context.ForAll(0, dex_file.NumClassDefs(), ResolveClassFieldsAndMethods, thread_count_);
}
@@ -1652,8 +1653,7 @@
void CompilerDriver::VerifyDexFile(jobject class_loader, const DexFile& dex_file,
ThreadPool& thread_pool, base::TimingLogger& timings) {
- // TODO: strdup memory leak.
- timings.NewSplit(strdup(("Verify " + dex_file.GetLocation()).c_str()));
+ timings.NewSplit("Verify Dex File");
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
ParallelCompilationManager context(class_linker, class_loader, this, &dex_file, thread_pool);
context.ForAll(0, dex_file.NumClassDefs(), VerifyClass, thread_count_);
@@ -2150,8 +2150,7 @@
void CompilerDriver::InitializeClasses(jobject jni_class_loader, const DexFile& dex_file,
ThreadPool& thread_pool, base::TimingLogger& timings) {
- // TODO: strdup memory leak.
- timings.NewSplit(strdup(("InitializeNoClinit " + dex_file.GetLocation()).c_str()));
+ timings.NewSplit("InitializeNoClinit");
#ifndef NDEBUG
// Sanity check blacklist descriptors.
if (IsImage()) {
@@ -2258,8 +2257,7 @@
void CompilerDriver::CompileDexFile(jobject class_loader, const DexFile& dex_file,
ThreadPool& thread_pool, base::TimingLogger& timings) {
- // TODO: strdup memory leak.
- timings.NewSplit(strdup(("Compile " + dex_file.GetLocation()).c_str()));
+ timings.NewSplit("Compile Dex File");
ParallelCompilationManager context(Runtime::Current()->GetClassLinker(), class_loader, this,
&dex_file, thread_pool);
context.ForAll(0, dex_file.NumClassDefs(), CompilerDriver::CompileClass, thread_count_);
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index cd6b5fa..b4ec0c1 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -169,22 +169,23 @@
LOCKS_EXCLUDED(Locks::mutator_lock_);
// Can we fast path instance field access? Computes field's offset and volatility.
- bool ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit,
- int& field_offset, bool& is_volatile, bool is_put)
+ bool ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit, bool is_put,
+ int* field_offset, bool* is_volatile)
LOCKS_EXCLUDED(Locks::mutator_lock_);
// Can we fastpath static field access? Computes field's offset, volatility and whether the
// field is within the referrer (which can avoid checking class initialization).
- bool ComputeStaticFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit,
- int& field_offset, int& ssb_index,
- bool& is_referrers_class, bool& is_volatile, bool is_put)
+ bool ComputeStaticFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit, bool is_put,
+ int* field_offset, int* ssb_index,
+ bool* is_referrers_class, bool* is_volatile)
LOCKS_EXCLUDED(Locks::mutator_lock_);
// Can we fastpath a interface, super class or virtual method call? Computes method's vtable
// index.
bool ComputeInvokeInfo(const DexCompilationUnit* mUnit, const uint32_t dex_pc,
- InvokeType& type, MethodReference& target_method, int& vtable_idx,
- uintptr_t& direct_code, uintptr_t& direct_method, bool update_stats)
+ bool update_stats, bool enable_devirtualization,
+ InvokeType* type, MethodReference* target_method, int* vtable_idx,
+ uintptr_t* direct_code, uintptr_t* direct_method)
LOCKS_EXCLUDED(Locks::mutator_lock_);
bool IsSafeCast(const MethodReference& mr, uint32_t dex_pc);
@@ -314,8 +315,8 @@
void GetCodeAndMethodForDirectCall(InvokeType type, InvokeType sharp_type,
mirror::Class* referrer_class,
mirror::ArtMethod* method,
- uintptr_t& direct_code, uintptr_t& direct_method,
- bool update_stats)
+ bool update_stats,
+ uintptr_t* direct_code, uintptr_t* direct_method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void PreCompile(jobject class_loader, const std::vector<const DexFile*>& dex_files,
@@ -449,27 +450,40 @@
class DedupeHashFunc {
public:
size_t operator()(const std::vector<uint8_t>& array) const {
- // Take a random sample of bytes.
+ // For small arrays compute a hash using every byte.
static const size_t kSmallArrayThreshold = 16;
- static const size_t kRandomHashCount = 16;
- size_t hash = 0;
- if (array.size() < kSmallArrayThreshold) {
- for (auto c : array) {
- hash = hash * 54 + c;
+ size_t hash = 0x811c9dc5;
+ if (array.size() <= kSmallArrayThreshold) {
+ for (uint8_t b : array) {
+ hash = (hash * 16777619) ^ b;
}
} else {
- for (size_t i = 0; i < kRandomHashCount; ++i) {
+ // For larger arrays use the 2 bytes at 6 bytes (the location of a push registers
+ // instruction field for quick generated code on ARM) and then select a number of other
+ // values at random.
+ static const size_t kRandomHashCount = 16;
+ for (size_t i = 0; i < 2; ++i) {
+ uint8_t b = array[i + 6];
+ hash = (hash * 16777619) ^ b;
+ }
+ for (size_t i = 2; i < kRandomHashCount; ++i) {
size_t r = i * 1103515245 + 12345;
- hash = hash * 54 + array[r % array.size()];
+ uint8_t b = array[r % array.size()];
+ hash = (hash * 16777619) ^ b;
}
}
+ hash += hash << 13;
+ hash ^= hash >> 7;
+ hash += hash << 3;
+ hash ^= hash >> 17;
+ hash += hash << 5;
return hash;
}
};
- DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc> dedupe_code_;
- DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc> dedupe_mapping_table_;
- DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc> dedupe_vmap_table_;
- DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc> dedupe_gc_map_;
+ DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc, 4> dedupe_code_;
+ DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc, 4> dedupe_mapping_table_;
+ DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc, 4> dedupe_vmap_table_;
+ DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc, 4> dedupe_gc_map_;
DISALLOW_COPY_AND_ASSIGN(CompilerDriver);
};
diff --git a/compiler/jni/portable/jni_compiler.cc b/compiler/jni/portable/jni_compiler.cc
index 43408a7..0c14346 100644
--- a/compiler/jni/portable/jni_compiler.cc
+++ b/compiler/jni/portable/jni_compiler.cc
@@ -50,9 +50,9 @@
using ::art::llvm::runtime_support::RuntimeId;
JniCompiler::JniCompiler(LlvmCompilationUnit* cunit,
- CompilerDriver& driver,
+ CompilerDriver* driver,
const DexCompilationUnit* dex_compilation_unit)
- : cunit_(cunit), driver_(&driver), module_(cunit_->GetModule()),
+ : cunit_(cunit), driver_(driver), module_(cunit_->GetModule()),
context_(cunit_->GetLLVMContext()), irb_(*cunit_->GetIRBuilder()),
dex_compilation_unit_(dex_compilation_unit),
func_(NULL), elf_func_idx_(0) {
diff --git a/compiler/jni/portable/jni_compiler.h b/compiler/jni/portable/jni_compiler.h
index d20c63b..ffabfe6 100644
--- a/compiler/jni/portable/jni_compiler.h
+++ b/compiler/jni/portable/jni_compiler.h
@@ -54,7 +54,7 @@
class JniCompiler {
public:
JniCompiler(LlvmCompilationUnit* cunit,
- CompilerDriver& driver,
+ CompilerDriver* driver,
const DexCompilationUnit* dex_compilation_unit);
CompiledMethod* Compile();
@@ -67,7 +67,7 @@
private:
LlvmCompilationUnit* cunit_;
- CompilerDriver* driver_;
+ CompilerDriver* const driver_;
::llvm::Module* module_;
::llvm::LLVMContext* context_;
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 1417fb9..b6b15f9 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -24,7 +24,6 @@
#include "compiled_method.h"
#include "dex_file-inl.h"
#include "driver/compiler_driver.h"
-#include "disassembler.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "jni_internal.h"
#include "utils/assembler.h"
@@ -85,7 +84,6 @@
// Assembler that holds generated instructions
UniquePtr<Assembler> jni_asm(Assembler::Create(instruction_set));
- bool should_disassemble = false;
// Offsets into data structures
// TODO: if cross compiling these offsets are for the host not the target
@@ -366,10 +364,6 @@
std::vector<uint8_t> managed_code(cs);
MemoryRegion code(&managed_code[0], managed_code.size());
__ FinalizeInstructions(code);
- if (should_disassemble) {
- UniquePtr<Disassembler> disassembler(Disassembler::Create(instruction_set));
- disassembler->Dump(LOG(INFO), &managed_code[0], &managed_code[managed_code.size()]);
- }
return new CompiledMethod(compiler,
instruction_set,
managed_code,
diff --git a/compiler/llvm/compiler_llvm.cc b/compiler/llvm/compiler_llvm.cc
index fd440d5..0df3c47 100644
--- a/compiler/llvm/compiler_llvm.cc
+++ b/compiler/llvm/compiler_llvm.cc
@@ -26,6 +26,7 @@
#include "ir_builder.h"
#include "jni/portable/jni_compiler.h"
#include "llvm_compilation_unit.h"
+#include "thread-inl.h"
#include "utils_llvm.h"
#include "verifier/method_verifier.h"
@@ -164,7 +165,7 @@
UniquePtr<LlvmCompilationUnit> cunit(AllocateCompilationUnit());
UniquePtr<JniCompiler> jni_compiler(
- new JniCompiler(cunit.get(), *compiler_driver_, dex_compilation_unit));
+ new JniCompiler(cunit.get(), compiler_driver_, dex_compilation_unit));
return jni_compiler->Compile();
}
diff --git a/compiler/llvm/gbc_expander.cc b/compiler/llvm/gbc_expander.cc
index 4f6fa0a..b206a25 100644
--- a/compiler/llvm/gbc_expander.cc
+++ b/compiler/llvm/gbc_expander.cc
@@ -846,10 +846,10 @@
uintptr_t direct_code = 0;
uintptr_t direct_method = 0;
bool is_fast_path = driver_->ComputeInvokeInfo(dex_compilation_unit_, dex_pc,
- invoke_type, target_method,
- vtable_idx,
- direct_code, direct_method,
- true);
+ true, true,
+ &invoke_type, &target_method,
+ &vtable_idx,
+ &direct_code, &direct_method);
// Load the method object
llvm::Value* callee_method_object_addr = NULL;
@@ -1630,7 +1630,7 @@
int field_offset;
bool is_volatile;
bool is_fast_path = driver_->ComputeInstanceFieldInfo(
- field_idx, dex_compilation_unit_, field_offset, is_volatile, false);
+ field_idx, dex_compilation_unit_, false, &field_offset, &is_volatile);
if (!is_fast_path) {
llvm::Function* runtime_func;
@@ -1692,7 +1692,7 @@
int field_offset;
bool is_volatile;
bool is_fast_path = driver_->ComputeInstanceFieldInfo(
- field_idx, dex_compilation_unit_, field_offset, is_volatile, true);
+ field_idx, dex_compilation_unit_, true, &field_offset, &is_volatile);
if (!is_fast_path) {
llvm::Function* runtime_func;
@@ -1897,8 +1897,8 @@
bool is_volatile;
bool is_fast_path = driver_->ComputeStaticFieldInfo(
- field_idx, dex_compilation_unit_, field_offset, ssb_index,
- is_referrers_class, is_volatile, false);
+ field_idx, dex_compilation_unit_, false,
+ &field_offset, &ssb_index, &is_referrers_class, &is_volatile);
llvm::Value* static_field_value;
@@ -1981,8 +1981,8 @@
bool is_volatile;
bool is_fast_path = driver_->ComputeStaticFieldInfo(
- field_idx, dex_compilation_unit_, field_offset, ssb_index,
- is_referrers_class, is_volatile, true);
+ field_idx, dex_compilation_unit_, true,
+ &field_offset, &ssb_index, &is_referrers_class, &is_volatile);
if (!is_fast_path) {
llvm::Function* runtime_func;
diff --git a/compiler/llvm/llvm_compilation_unit.cc b/compiler/llvm/llvm_compilation_unit.cc
index 139100b..aa439cc 100644
--- a/compiler/llvm/llvm_compilation_unit.cc
+++ b/compiler/llvm/llvm_compilation_unit.cc
@@ -214,6 +214,7 @@
::llvm::TargetOptions target_options;
target_options.FloatABIType = ::llvm::FloatABI::Soft;
target_options.NoFramePointerElim = true;
+ target_options.NoFramePointerElimNonLeaf = true;
target_options.UseSoftFloat = false;
target_options.EnableFastISel = false;
@@ -257,7 +258,7 @@
::llvm::OwningPtr< ::llvm::tool_output_file> out_file(
new ::llvm::tool_output_file(bitcode_filename_.c_str(), errmsg,
- ::llvm::sys::fs::F_Binary));
+ ::llvm::raw_fd_ostream::F_Binary));
if (!errmsg.empty()) {
@@ -277,6 +278,7 @@
// pm_builder.Inliner = ::llvm::createAlwaysInlinerPass();
// pm_builder.Inliner = ::llvm::createPartialInliningPass();
pm_builder.OptLevel = 3;
+ pm_builder.DisableSimplifyLibCalls = 1;
pm_builder.DisableUnitAtATime = 1;
pm_builder.populateFunctionPassManager(fpm);
pm_builder.populateModulePassManager(pm);
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 74b5da9..9ed2642 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -104,7 +104,7 @@
ASSERT_TRUE(oat_file.get() != NULL);
const OatHeader& oat_header = oat_file->GetOatHeader();
ASSERT_TRUE(oat_header.IsValid());
- ASSERT_EQ(2U, oat_header.GetDexFileCount()); // core and conscrypt
+ ASSERT_EQ(1U, oat_header.GetDexFileCount()); // core
ASSERT_EQ(42U, oat_header.GetImageFileLocationOatChecksum());
ASSERT_EQ(4096U, oat_header.GetImageFileLocationOatDataBegin());
ASSERT_EQ("lue.art", oat_header.GetImageFileLocation());
diff --git a/compiler/utils/dedupe_set.h b/compiler/utils/dedupe_set.h
index f3d35d7..53c1afa 100644
--- a/compiler/utils/dedupe_set.h
+++ b/compiler/utils/dedupe_set.h
@@ -18,62 +18,65 @@
#define ART_COMPILER_UTILS_DEDUPE_SET_H_
#include <set>
+#include <string>
#include "base/mutex.h"
#include "base/stl_util.h"
namespace art {
-// A simple data structure to handle hashed deduplication. Add is thread safe.
-template <typename Key, typename HashType, typename HashFunc>
+// A set of Keys that support a HashFunc returning HashType. Used to find duplicates of Key in the
+// Add method. The data-structure is thread-safe through the use of internal locks, it also
+// supports the lock being sharded.
+template <typename Key, typename HashType, typename HashFunc, HashType kShard = 1>
class DedupeSet {
typedef std::pair<HashType, Key*> HashedKey;
class Comparator {
public:
bool operator()(const HashedKey& a, const HashedKey& b) const {
- if (a.first < b.first) return true;
- if (a.first > b.first) return true;
- return *a.second < *b.second;
+ if (a.first != b.first) {
+ return a.first < b.first;
+ } else {
+ return *a.second < *b.second;
+ }
}
};
- typedef std::set<HashedKey, Comparator> Keys;
-
public:
- typedef typename Keys::iterator iterator;
- typedef typename Keys::const_iterator const_iterator;
- typedef typename Keys::size_type size_type;
- typedef typename Keys::value_type value_type;
-
- iterator begin() { return keys_.begin(); }
- const_iterator begin() const { return keys_.begin(); }
- iterator end() { return keys_.end(); }
- const_iterator end() const { return keys_.end(); }
-
Key* Add(Thread* self, const Key& key) {
- HashType hash = HashFunc()(key);
- HashedKey hashed_key(hash, const_cast<Key*>(&key));
- MutexLock lock(self, lock_);
- auto it = keys_.find(hashed_key);
- if (it != keys_.end()) {
+ HashType raw_hash = HashFunc()(key);
+ HashType shard_hash = raw_hash / kShard;
+ HashType shard_bin = raw_hash % kShard;
+ HashedKey hashed_key(shard_hash, const_cast<Key*>(&key));
+ MutexLock lock(self, *lock_[shard_bin]);
+ auto it = keys_[shard_bin].find(hashed_key);
+ if (it != keys_[shard_bin].end()) {
return it->second;
}
hashed_key.second = new Key(key);
- keys_.insert(hashed_key);
+ keys_[shard_bin].insert(hashed_key);
return hashed_key.second;
}
- DedupeSet() : lock_("dedupe lock") {
+ explicit DedupeSet(const char* set_name) {
+ for (HashType i = 0; i < kShard; ++i) {
+ lock_name_[i] = StringPrintf("%s lock %d", set_name, i);
+ lock_[i].reset(new Mutex(lock_name_[i].c_str()));
+ }
}
~DedupeSet() {
- STLDeleteValues(&keys_);
+ for (HashType i = 0; i < kShard; ++i) {
+ STLDeleteValues(&keys_[i]);
+ }
}
private:
- Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
- Keys keys_;
+ std::string lock_name_[kShard];
+ UniquePtr<Mutex> lock_[kShard];
+ std::set<HashedKey, Comparator> keys_[kShard];
+
DISALLOW_COPY_AND_ASSIGN(DedupeSet);
};
diff --git a/compiler/utils/dedupe_set_test.cc b/compiler/utils/dedupe_set_test.cc
index 9f5e292..03d8b96 100644
--- a/compiler/utils/dedupe_set_test.cc
+++ b/compiler/utils/dedupe_set_test.cc
@@ -38,7 +38,7 @@
TEST_F(DedupeSetTest, Test) {
Thread* self = Thread::Current();
typedef std::vector<uint8_t> ByteArray;
- DedupeSet<ByteArray, size_t, DedupeHashFunc> deduplicator;
+ DedupeSet<ByteArray, size_t, DedupeHashFunc> deduplicator("test");
ByteArray* array1;
{
ByteArray test1;
diff --git a/disassembler/Android.mk b/disassembler/Android.mk
new file mode 100644
index 0000000..f8001a4
--- /dev/null
+++ b/disassembler/Android.mk
@@ -0,0 +1,120 @@
+#
+# Copyright (C) 2012 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+LOCAL_PATH := $(call my-dir)
+
+include art/build/Android.common.mk
+
+LIBART_DISASSEMBLER_SRC_FILES := \
+ disassembler.cc \
+ disassembler_arm.cc \
+ disassembler_mips.cc \
+ disassembler_x86.cc
+
+# $(1): target or host
+# $(2): ndebug or debug
+define build-libart-disassembler
+ ifneq ($(1),target)
+ ifneq ($(1),host)
+ $$(error expected target or host for argument 1, received $(1))
+ endif
+ endif
+ ifneq ($(2),ndebug)
+ ifneq ($(2),debug)
+ $$(error expected ndebug or debug for argument 2, received $(2))
+ endif
+ endif
+
+ art_target_or_host := $(1)
+ art_ndebug_or_debug := $(2)
+
+ include $(CLEAR_VARS)
+ ifeq ($$(art_target_or_host),target)
+ include external/stlport/libstlport.mk
+ else
+ LOCAL_IS_HOST_MODULE := true
+ endif
+ LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION)
+ ifeq ($$(art_ndebug_or_debug),ndebug)
+ LOCAL_MODULE := libart-disassembler
+ else # debug
+ LOCAL_MODULE := libartd-disassembler
+ endif
+
+ LOCAL_MODULE_TAGS := optional
+ LOCAL_MODULE_CLASS := SHARED_LIBRARIES
+
+ LOCAL_SRC_FILES := $$(LIBART_DISASSEMBLER_SRC_FILES)
+
+ GENERATED_SRC_DIR := $$(call intermediates-dir-for,$$(LOCAL_MODULE_CLASS),$$(LOCAL_MODULE),$$(LOCAL_IS_HOST_MODULE),)
+
+ ifeq ($$(art_target_or_host),target)
+ LOCAL_CLANG := $(ART_TARGET_CLANG)
+ LOCAL_CFLAGS += $(ART_TARGET_CFLAGS)
+ else # host
+ LOCAL_CLANG := $(ART_HOST_CLANG)
+ LOCAL_CFLAGS += $(ART_HOST_CFLAGS)
+ endif
+
+ LOCAL_SHARED_LIBRARIES += liblog
+ ifeq ($$(art_ndebug_or_debug),debug)
+ ifeq ($$(art_target_or_host),target)
+ LOCAL_CFLAGS += $(ART_TARGET_DEBUG_CFLAGS)
+ else # host
+ LOCAL_CFLAGS += $(ART_HOST_DEBUG_CFLAGS)
+ endif
+ LOCAL_SHARED_LIBRARIES += libartd
+ else
+ ifeq ($$(art_target_or_host),target)
+ LOCAL_CFLAGS += $(ART_TARGET_NON_DEBUG_CFLAGS)
+ else # host
+ LOCAL_CFLAGS += $(ART_HOST_NON_DEBUG_CFLAGS)
+ endif
+ LOCAL_SHARED_LIBRARIES += libart
+ endif
+
+ LOCAL_C_INCLUDES += $(ART_C_INCLUDES) art/runtime
+
+ LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common.mk
+ LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
+ ifeq ($$(art_target_or_host),target)
+ LOCAL_SHARED_LIBRARIES += libcutils
+ include $(LLVM_GEN_INTRINSICS_MK)
+ include $(LLVM_DEVICE_BUILD_MK)
+ include $(BUILD_SHARED_LIBRARY)
+ else # host
+ LOCAL_STATIC_LIBRARIES += libcutils
+ include $(LLVM_GEN_INTRINSICS_MK)
+ include $(LLVM_HOST_BUILD_MK)
+ include $(BUILD_HOST_SHARED_LIBRARY)
+ endif
+endef
+
+ifeq ($(ART_BUILD_TARGET_NDEBUG),true)
+ $(eval $(call build-libart-disassembler,target,ndebug))
+endif
+ifeq ($(ART_BUILD_TARGET_DEBUG),true)
+ $(eval $(call build-libart-disassembler,target,debug))
+endif
+ifeq ($(WITH_HOST_DALVIK),true)
+ # We always build dex2oat and dependencies, even if the host build is otherwise disabled, since they are used to cross compile for the target.
+ ifeq ($(ART_BUILD_NDEBUG),true)
+ $(eval $(call build-libart-disassembler,host,ndebug))
+ endif
+ ifeq ($(ART_BUILD_DEBUG),true)
+ $(eval $(call build-libart-disassembler,host,debug))
+ endif
+endif
diff --git a/runtime/disassembler.cc b/disassembler/disassembler.cc
similarity index 100%
rename from runtime/disassembler.cc
rename to disassembler/disassembler.cc
diff --git a/runtime/disassembler.h b/disassembler/disassembler.h
similarity index 90%
rename from runtime/disassembler.h
rename to disassembler/disassembler.h
index 805ff4d..7547ab7 100644
--- a/runtime/disassembler.h
+++ b/disassembler/disassembler.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_DISASSEMBLER_H_
-#define ART_RUNTIME_DISASSEMBLER_H_
+#ifndef ART_DISASSEMBLER_DISASSEMBLER_H_
+#define ART_DISASSEMBLER_DISASSEMBLER_H_
#include <stdint.h>
@@ -45,4 +45,4 @@
} // namespace art
-#endif // ART_RUNTIME_DISASSEMBLER_H_
+#endif // ART_DISASSEMBLER_DISASSEMBLER_H_
diff --git a/runtime/disassembler_arm.cc b/disassembler/disassembler_arm.cc
similarity index 100%
rename from runtime/disassembler_arm.cc
rename to disassembler/disassembler_arm.cc
diff --git a/runtime/disassembler_arm.h b/disassembler/disassembler_arm.h
similarity index 90%
rename from runtime/disassembler_arm.h
rename to disassembler/disassembler_arm.h
index cab9150..2e699ff 100644
--- a/runtime/disassembler_arm.h
+++ b/disassembler/disassembler_arm.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_DISASSEMBLER_ARM_H_
-#define ART_RUNTIME_DISASSEMBLER_ARM_H_
+#ifndef ART_DISASSEMBLER_DISASSEMBLER_ARM_H_
+#define ART_DISASSEMBLER_DISASSEMBLER_ARM_H_
#include <vector>
@@ -48,4 +48,4 @@
} // namespace arm
} // namespace art
-#endif // ART_RUNTIME_DISASSEMBLER_ARM_H_
+#endif // ART_DISASSEMBLER_DISASSEMBLER_ARM_H_
diff --git a/runtime/disassembler_mips.cc b/disassembler/disassembler_mips.cc
similarity index 100%
rename from runtime/disassembler_mips.cc
rename to disassembler/disassembler_mips.cc
diff --git a/runtime/disassembler_mips.h b/disassembler/disassembler_mips.h
similarity index 87%
rename from runtime/disassembler_mips.h
rename to disassembler/disassembler_mips.h
index e248503..d386267 100644
--- a/runtime/disassembler_mips.h
+++ b/disassembler/disassembler_mips.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_DISASSEMBLER_MIPS_H_
-#define ART_RUNTIME_DISASSEMBLER_MIPS_H_
+#ifndef ART_DISASSEMBLER_DISASSEMBLER_MIPS_H_
+#define ART_DISASSEMBLER_DISASSEMBLER_MIPS_H_
#include <vector>
@@ -37,4 +37,4 @@
} // namespace mips
} // namespace art
-#endif // ART_RUNTIME_DISASSEMBLER_MIPS_H_
+#endif // ART_DISASSEMBLER_DISASSEMBLER_MIPS_H_
diff --git a/runtime/disassembler_x86.cc b/disassembler/disassembler_x86.cc
similarity index 100%
rename from runtime/disassembler_x86.cc
rename to disassembler/disassembler_x86.cc
diff --git a/runtime/disassembler_x86.h b/disassembler/disassembler_x86.h
similarity index 87%
rename from runtime/disassembler_x86.h
rename to disassembler/disassembler_x86.h
index ff4322c..9adaff7 100644
--- a/runtime/disassembler_x86.h
+++ b/disassembler/disassembler_x86.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_DISASSEMBLER_X86_H_
-#define ART_RUNTIME_DISASSEMBLER_X86_H_
+#ifndef ART_DISASSEMBLER_DISASSEMBLER_X86_H_
+#define ART_DISASSEMBLER_DISASSEMBLER_X86_H_
#include "disassembler.h"
@@ -35,4 +35,4 @@
} // namespace x86
} // namespace art
-#endif // ART_RUNTIME_DISASSEMBLER_X86_H_
+#endif // ART_DISASSEMBLER_DISASSEMBLER_X86_H_
diff --git a/oatdump/Android.mk b/oatdump/Android.mk
index a63b229..7cee00e 100644
--- a/oatdump/Android.mk
+++ b/oatdump/Android.mk
@@ -22,17 +22,17 @@
include art/build/Android.executable.mk
ifeq ($(ART_BUILD_TARGET_NDEBUG),true)
- $(eval $(call build-art-executable,oatdump,$(OATDUMP_SRC_FILES),libcutils,,target,ndebug))
+ $(eval $(call build-art-executable,oatdump,$(OATDUMP_SRC_FILES),libcutils libart-disassembler,art/disassembler,target,ndebug))
endif
ifeq ($(ART_BUILD_TARGET_DEBUG),true)
- $(eval $(call build-art-executable,oatdump,$(OATDUMP_SRC_FILES),libcutils,,target,debug))
+ $(eval $(call build-art-executable,oatdump,$(OATDUMP_SRC_FILES),libcutils libartd-disassembler,art/disassembler,target,debug))
endif
ifeq ($(WITH_HOST_DALVIK),true)
ifeq ($(ART_BUILD_HOST_NDEBUG),true)
- $(eval $(call build-art-executable,oatdump,$(OATDUMP_SRC_FILES),,,host,ndebug))
+ $(eval $(call build-art-executable,oatdump,$(OATDUMP_SRC_FILES),libart-disassembler,art/disassembler,host,ndebug))
endif
ifeq ($(ART_BUILD_HOST_DEBUG),true)
- $(eval $(call build-art-executable,oatdump,$(OATDUMP_SRC_FILES),,,host,debug))
+ $(eval $(call build-art-executable,oatdump,$(OATDUMP_SRC_FILES),libartd-disassembler,art/disassembler,host,debug))
endif
endif
diff --git a/runtime/Android.mk b/runtime/Android.mk
index a8d505e..e324060 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -38,10 +38,6 @@
dex_file.cc \
dex_file_verifier.cc \
dex_instruction.cc \
- disassembler.cc \
- disassembler_arm.cc \
- disassembler_mips.cc \
- disassembler_x86.cc \
elf_file.cc \
gc/allocator/dlmalloc.cc \
gc/accounting/card_table.cc \
@@ -64,6 +60,9 @@
instrumentation.cc \
intern_table.cc \
interpreter/interpreter.cc \
+ interpreter/interpreter_common.cc \
+ interpreter/interpreter_goto_table_impl.cc \
+ interpreter/interpreter_switch_impl.cc \
jdwp/jdwp_event.cc \
jdwp/jdwp_expand_buf.cc \
jdwp/jdwp_handler.cc \
diff --git a/runtime/base/mutex-inl.h b/runtime/base/mutex-inl.h
index 7e8365e..c0cfee2 100644
--- a/runtime/base/mutex-inl.h
+++ b/runtime/base/mutex-inl.h
@@ -41,6 +41,54 @@
}
#endif // ART_USE_FUTEXES
+#if defined(__APPLE__)
+
+// This works on Mac OS 10.6 but hasn't been tested on older releases.
+struct __attribute__((__may_alias__)) darwin_pthread_mutex_t {
+ long padding0; // NOLINT(runtime/int) exact match to darwin type
+ int padding1;
+ uint32_t padding2;
+ int16_t padding3;
+ int16_t padding4;
+ uint32_t padding5;
+ pthread_t darwin_pthread_mutex_owner;
+ // ...other stuff we don't care about.
+};
+
+struct __attribute__((__may_alias__)) darwin_pthread_rwlock_t {
+ long padding0; // NOLINT(runtime/int) exact match to darwin type
+ pthread_mutex_t padding1;
+ int padding2;
+ pthread_cond_t padding3;
+ pthread_cond_t padding4;
+ int padding5;
+ int padding6;
+ pthread_t darwin_pthread_rwlock_owner;
+ // ...other stuff we don't care about.
+};
+
+#endif // __APPLE__
+
+#if defined(__GLIBC__)
+
+struct __attribute__((__may_alias__)) glibc_pthread_mutex_t {
+ int32_t padding0[2];
+ int owner;
+ // ...other stuff we don't care about.
+};
+
+struct __attribute__((__may_alias__)) glibc_pthread_rwlock_t {
+#ifdef __LP64__
+ int32_t padding0[6];
+#else
+ int32_t padding0[7];
+#endif
+ int writer;
+ // ...other stuff we don't care about.
+};
+
+#endif // __GLIBC__
+
class ScopedContentionRecorder {
public:
ScopedContentionRecorder(BaseMutex* mutex, uint64_t blocked_tid, uint64_t owner_tid)
@@ -185,6 +233,84 @@
#endif
}
+inline bool Mutex::IsExclusiveHeld(const Thread* self) const {
+ DCHECK(self == NULL || self == Thread::Current());
+ bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
+ if (kDebugLocking) {
+ // Sanity debug check that if we think it is locked we have it in our held mutexes.
+ if (result && self != NULL && level_ != kMonitorLock && !gAborting) {
+ CHECK_EQ(self->GetHeldMutex(level_), this);
+ }
+ }
+ return result;
+}
+
+inline uint64_t Mutex::GetExclusiveOwnerTid() const {
+#if ART_USE_FUTEXES
+ return exclusive_owner_;
+#elif defined(__BIONIC__)
+ return static_cast<uint64_t>((mutex_.value >> 16) & 0xffff);
+#elif defined(__GLIBC__)
+ return reinterpret_cast<const glibc_pthread_mutex_t*>(&mutex_)->owner;
+#elif defined(__APPLE__)
+ const darwin_pthread_mutex_t* dpmutex = reinterpret_cast<const darwin_pthread_mutex_t*>(&mutex_);
+ pthread_t owner = dpmutex->darwin_pthread_mutex_owner;
+ // 0 for unowned, -1 for PTHREAD_MTX_TID_SWITCHING
+ // TODO: should we make darwin_pthread_mutex_owner volatile and recheck until not -1?
+ if ((owner == (pthread_t)0) || (owner == (pthread_t)-1)) {
+ return 0;
+ }
+ uint64_t tid;
+ CHECK_PTHREAD_CALL(pthread_threadid_np, (owner, &tid), __FUNCTION__); // Requires Mac OS 10.6
+ return tid;
+#else
+#error unsupported C library
+#endif
+}
+
+inline bool ReaderWriterMutex::IsExclusiveHeld(const Thread* self) const {
+ DCHECK(self == NULL || self == Thread::Current());
+ bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
+ if (kDebugLocking) {
+ // Sanity that if the pthread thinks we own the lock the Thread agrees.
+ if (self != NULL && result) {
+ CHECK_EQ(self->GetHeldMutex(level_), this);
+ }
+ }
+ return result;
+}
+
+inline uint64_t ReaderWriterMutex::GetExclusiveOwnerTid() const {
+#if ART_USE_FUTEXES
+ int32_t state = state_;
+ if (state == 0) {
+ return 0; // No owner.
+ } else if (state > 0) {
+ return -1; // Shared.
+ } else {
+ return exclusive_owner_;
+ }
+#else
+#if defined(__BIONIC__)
+ return rwlock_.writerThreadId;
+#elif defined(__GLIBC__)
+ return reinterpret_cast<const glibc_pthread_rwlock_t*>(&rwlock_)->writer;
+#elif defined(__APPLE__)
+ const darwin_pthread_rwlock_t*
+ dprwlock = reinterpret_cast<const darwin_pthread_rwlock_t*>(&rwlock_);
+ pthread_t owner = dprwlock->darwin_pthread_rwlock_owner;
+ if (owner == (pthread_t)0) {
+ return 0;
+ }
+ uint64_t tid;
+ CHECK_PTHREAD_CALL(pthread_threadid_np, (owner, &tid), __FUNCTION__); // Requires Mac OS 10.6
+ return tid;
+#else
+#error unsupported C library
+#endif
+#endif
+}
+
} // namespace art
#endif // ART_RUNTIME_BASE_MUTEX_INL_H_
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index b99e7c9..b048bbb 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -31,54 +31,6 @@
namespace art {
-#if defined(__APPLE__)
-
-// This works on Mac OS 10.6 but hasn't been tested on older releases.
-struct __attribute__((__may_alias__)) darwin_pthread_mutex_t {
- long padding0; // NOLINT(runtime/int) exact match to darwin type
- int padding1;
- uint32_t padding2;
- int16_t padding3;
- int16_t padding4;
- uint32_t padding5;
- pthread_t darwin_pthread_mutex_owner;
- // ...other stuff we don't care about.
-};
-
-struct __attribute__((__may_alias__)) darwin_pthread_rwlock_t {
- long padding0; // NOLINT(runtime/int) exact match to darwin type
- pthread_mutex_t padding1;
- int padding2;
- pthread_cond_t padding3;
- pthread_cond_t padding4;
- int padding5;
- int padding6;
- pthread_t darwin_pthread_rwlock_owner;
- // ...other stuff we don't care about.
-};
-
-#endif // __APPLE__
-
-#if defined(__GLIBC__)
-
-struct __attribute__((__may_alias__)) glibc_pthread_mutex_t {
- int32_t padding0[2];
- int owner;
- // ...other stuff we don't care about.
-};
-
-struct __attribute__((__may_alias__)) glibc_pthread_rwlock_t {
-#ifdef __LP64__
- int32_t padding0[6];
-#else
- int32_t padding0[7];
-#endif
- int writer;
- // ...other stuff we don't care about.
-};
-
-#endif // __GLIBC__
-
#if ART_USE_FUTEXES
static bool ComputeRelativeTimeSpec(timespec* result_ts, const timespec& lhs, const timespec& rhs) {
const int32_t one_sec = 1000 * 1000 * 1000; // one second in nanoseconds.
@@ -346,7 +298,7 @@
bool done = false;
do {
int32_t cur_state = state_;
- if (cur_state == 0) {
+ if (LIKELY(cur_state == 0)) {
// Change state from 0 to 1.
done = android_atomic_acquire_cas(0, 1, &state_) == 0;
} else {
@@ -432,14 +384,14 @@
bool done = false;
do {
int32_t cur_state = state_;
- if (cur_state == 1) {
+ if (LIKELY(cur_state == 1)) {
// We're no longer the owner.
exclusive_owner_ = 0;
// Change state to 0.
done = android_atomic_release_cas(cur_state, 0, &state_) == 0;
- if (done) { // Spurious fail?
+ if (LIKELY(done)) { // Spurious fail?
// Wake a contender
- if (num_contenders_ > 0) {
+ if (UNLIKELY(num_contenders_ > 0)) {
futex(&state_, FUTEX_WAKE, 1, NULL, NULL, 0);
}
}
@@ -461,41 +413,6 @@
}
}
-bool Mutex::IsExclusiveHeld(const Thread* self) const {
- DCHECK(self == NULL || self == Thread::Current());
- bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
- if (kDebugLocking) {
- // Sanity debug check that if we think it is locked we have it in our held mutexes.
- if (result && self != NULL && level_ != kMonitorLock && !gAborting) {
- CHECK_EQ(self->GetHeldMutex(level_), this);
- }
- }
- return result;
-}
-
-uint64_t Mutex::GetExclusiveOwnerTid() const {
-#if ART_USE_FUTEXES
- return exclusive_owner_;
-#elif defined(__BIONIC__)
- return static_cast<uint64_t>((mutex_.value >> 16) & 0xffff);
-#elif defined(__GLIBC__)
- return reinterpret_cast<const glibc_pthread_mutex_t*>(&mutex_)->owner;
-#elif defined(__APPLE__)
- const darwin_pthread_mutex_t* dpmutex = reinterpret_cast<const darwin_pthread_mutex_t*>(&mutex_);
- pthread_t owner = dpmutex->darwin_pthread_mutex_owner;
- // 0 for unowned, -1 for PTHREAD_MTX_TID_SWITCHING
- // TODO: should we make darwin_pthread_mutex_owner volatile and recheck until not -1?
- if ((owner == (pthread_t)0) || (owner == (pthread_t)-1)) {
- return 0;
- }
- uint64_t tid;
- CHECK_PTHREAD_CALL(pthread_threadid_np, (owner, &tid), __FUNCTION__); // Requires Mac OS 10.6
- return tid;
-#else
-#error unsupported C library
-#endif
-}
-
void Mutex::Dump(std::ostream& os) const {
os << (recursive_ ? "recursive " : "non-recursive ")
<< name_
@@ -549,7 +466,7 @@
bool done = false;
do {
int32_t cur_state = state_;
- if (cur_state == 0) {
+ if (LIKELY(cur_state == 0)) {
// Change state from 0 to -1.
done = android_atomic_acquire_cas(0, -1, &state_) == 0;
} else {
@@ -583,14 +500,14 @@
bool done = false;
do {
int32_t cur_state = state_;
- if (cur_state == -1) {
+ if (LIKELY(cur_state == -1)) {
// We're no longer the owner.
exclusive_owner_ = 0;
// Change state from -1 to 0.
done = android_atomic_release_cas(-1, 0, &state_) == 0;
- if (done) { // cmpxchg may fail due to noise?
+ if (LIKELY(done)) { // cmpxchg may fail due to noise?
// Wake any waiters.
- if (num_pending_readers_ > 0 || num_pending_writers_ > 0) {
+ if (UNLIKELY(num_pending_readers_ > 0 || num_pending_writers_ > 0)) {
futex(&state_, FUTEX_WAKE, -1, NULL, NULL, 0);
}
}
@@ -687,18 +604,6 @@
return true;
}
-bool ReaderWriterMutex::IsExclusiveHeld(const Thread* self) const {
- DCHECK(self == NULL || self == Thread::Current());
- bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
- if (kDebugLocking) {
- // Sanity that if the pthread thinks we own the lock the Thread agrees.
- if (self != NULL && result) {
- CHECK_EQ(self->GetHeldMutex(level_), this);
- }
- }
- return result;
-}
-
bool ReaderWriterMutex::IsSharedHeld(const Thread* self) const {
DCHECK(self == NULL || self == Thread::Current());
bool result;
@@ -710,37 +615,6 @@
return result;
}
-uint64_t ReaderWriterMutex::GetExclusiveOwnerTid() const {
-#if ART_USE_FUTEXES
- int32_t state = state_;
- if (state == 0) {
- return 0; // No owner.
- } else if (state > 0) {
- return -1; // Shared.
- } else {
- return exclusive_owner_;
- }
-#else
-#if defined(__BIONIC__)
- return rwlock_.writerThreadId;
-#elif defined(__GLIBC__)
- return reinterpret_cast<const glibc_pthread_rwlock_t*>(&rwlock_)->writer;
-#elif defined(__APPLE__)
- const darwin_pthread_rwlock_t*
- dprwlock = reinterpret_cast<const darwin_pthread_rwlock_t*>(&rwlock_);
- pthread_t owner = dprwlock->darwin_pthread_rwlock_owner;
- if (owner == (pthread_t)0) {
- return 0;
- }
- uint64_t tid;
- CHECK_PTHREAD_CALL(pthread_threadid_np, (owner, &tid), __FUNCTION__); // Requires Mac OS 10.6
- return tid;
-#else
-#error unsupported C library
-#endif
-#endif
-}
-
void ReaderWriterMutex::Dump(std::ostream& os) const {
os << name_
<< " level=" << static_cast<int>(level_)
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 0773a8d..c19f872 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1090,13 +1090,14 @@
// reinit references to when reinitializing a ClassLinker from a
// mapped image.
void ClassLinker::VisitRoots(RootVisitor* visitor, void* arg, bool only_dirty, bool clean_dirty) {
- visitor(class_roots_, arg);
+ class_roots_ = down_cast<mirror::ObjectArray<mirror::Class>*>(visitor(class_roots_, arg));
Thread* self = Thread::Current();
{
ReaderMutexLock mu(self, dex_lock_);
if (!only_dirty || dex_caches_dirty_) {
- for (mirror::DexCache* dex_cache : dex_caches_) {
- visitor(dex_cache, arg);
+ for (mirror::DexCache*& dex_cache : dex_caches_) {
+ dex_cache = down_cast<mirror::DexCache*>(visitor(dex_cache, arg));
+ DCHECK(dex_cache != nullptr);
}
if (clean_dirty) {
dex_caches_dirty_ = false;
@@ -1107,8 +1108,9 @@
{
ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
if (!only_dirty || class_table_dirty_) {
- for (const std::pair<size_t, mirror::Class*>& it : class_table_) {
- visitor(it.second, arg);
+ for (std::pair<const size_t, mirror::Class*>& it : class_table_) {
+ it.second = down_cast<mirror::Class*>(visitor(it.second, arg));
+ DCHECK(it.second != nullptr);
}
if (clean_dirty) {
class_table_dirty_ = false;
@@ -1119,7 +1121,8 @@
// handle image roots by using the MS/CMS rescanning of dirty cards.
}
- visitor(array_iftable_, arg);
+ array_iftable_ = reinterpret_cast<mirror::IfTable*>(visitor(array_iftable_, arg));
+ DCHECK(array_iftable_ != nullptr);
}
void ClassLinker::VisitClasses(ClassVisitor* visitor, void* arg) {
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 995434c..0fa0ffb 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -340,8 +340,9 @@
}
}
- static void TestRootVisitor(const mirror::Object* root, void*) {
+ static mirror::Object* TestRootVisitor(mirror::Object* root, void*) {
EXPECT_TRUE(root != NULL);
+ return root;
}
};
diff --git a/runtime/common_test.h b/runtime/common_test.h
index dc1f592..fe54d03 100644
--- a/runtime/common_test.h
+++ b/runtime/common_test.h
@@ -286,12 +286,7 @@
if (java_lang_dex_file_ == NULL) {
LOG(FATAL) << "Could not open .dex file '" << GetLibCoreDexFileName() << "'\n";
}
- conscrypt_file_ = DexFile::Open(GetConscryptFileName(), GetConscryptFileName());
- if (conscrypt_file_ == NULL) {
- LOG(FATAL) << "Could not open .dex file '" << GetConscryptFileName() << "'\n";
- }
boot_class_path_.push_back(java_lang_dex_file_);
- boot_class_path_.push_back(conscrypt_file_);
std::string min_heap_string(StringPrintf("-Xms%zdm", gc::Heap::kDefaultInitialSize / MB));
std::string max_heap_string(StringPrintf("-Xmx%zdm", gc::Heap::kDefaultMaximumSize / MB));
@@ -398,10 +393,6 @@
return GetDexFileName("core-libart");
}
- std::string GetConscryptFileName() {
- return GetDexFileName("conscrypt");
- }
-
std::string GetDexFileName(const std::string& jar_prefix) {
if (IsHost()) {
const char* host_dir = getenv("ANDROID_HOST_OUT");
@@ -520,7 +511,6 @@
std::string android_data_;
std::string dalvik_cache_;
const DexFile* java_lang_dex_file_; // owned by runtime_
- const DexFile* conscrypt_file_; // owned by runtime_
std::vector<const DexFile*> boot_class_path_;
UniquePtr<Runtime> runtime_;
// Owned by the runtime
diff --git a/runtime/dex_instruction-inl.h b/runtime/dex_instruction-inl.h
index 6e21273..4d39024 100644
--- a/runtime/dex_instruction-inl.h
+++ b/runtime/dex_instruction-inl.h
@@ -24,29 +24,29 @@
//------------------------------------------------------------------------------
// VRegA
//------------------------------------------------------------------------------
-inline int8_t Instruction::VRegA_10t() const {
+inline int8_t Instruction::VRegA_10t(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k10t);
- return static_cast<int8_t>(InstAA());
+ return static_cast<int8_t>(InstAA(inst_data));
}
-inline uint8_t Instruction::VRegA_10x() const {
+inline uint8_t Instruction::VRegA_10x(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k10x);
- return InstAA();
+ return InstAA(inst_data);
}
-inline uint4_t Instruction::VRegA_11n() const {
+inline uint4_t Instruction::VRegA_11n(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k11n);
- return InstA();
+ return InstA(inst_data);
}
-inline uint8_t Instruction::VRegA_11x() const {
+inline uint8_t Instruction::VRegA_11x(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k11x);
- return InstAA();
+ return InstAA(inst_data);
}
-inline uint4_t Instruction::VRegA_12x() const {
+inline uint4_t Instruction::VRegA_12x(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k12x);
- return InstA();
+ return InstA(inst_data);
}
inline int16_t Instruction::VRegA_20t() const {
@@ -54,54 +54,54 @@
return static_cast<int16_t>(Fetch16(1));
}
-inline uint8_t Instruction::VRegA_21c() const {
+inline uint8_t Instruction::VRegA_21c(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k21c);
- return InstAA();
+ return InstAA(inst_data);
}
-inline uint8_t Instruction::VRegA_21h() const {
+inline uint8_t Instruction::VRegA_21h(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k21h);
- return InstAA();
+ return InstAA(inst_data);
}
-inline uint8_t Instruction::VRegA_21s() const {
+inline uint8_t Instruction::VRegA_21s(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k21s);
- return InstAA();
+ return InstAA(inst_data);
}
-inline uint8_t Instruction::VRegA_21t() const {
+inline uint8_t Instruction::VRegA_21t(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k21t);
- return InstAA();
+ return InstAA(inst_data);
}
-inline uint8_t Instruction::VRegA_22b() const {
+inline uint8_t Instruction::VRegA_22b(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k22b);
- return InstAA();
+ return InstAA(inst_data);
}
-inline uint4_t Instruction::VRegA_22c() const {
+inline uint4_t Instruction::VRegA_22c(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k22c);
- return InstA();
+ return InstA(inst_data);
}
-inline uint4_t Instruction::VRegA_22s() const {
+inline uint4_t Instruction::VRegA_22s(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k22s);
- return InstA();
+ return InstA(inst_data);
}
-inline uint4_t Instruction::VRegA_22t() const {
+inline uint4_t Instruction::VRegA_22t(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k22t);
- return InstA();
+ return InstA(inst_data);
}
-inline uint8_t Instruction::VRegA_22x() const {
+inline uint8_t Instruction::VRegA_22x(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k22x);
- return InstAA();
+ return InstAA(inst_data);
}
-inline uint8_t Instruction::VRegA_23x() const {
+inline uint8_t Instruction::VRegA_23x(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k23x);
- return InstAA();
+ return InstAA(inst_data);
}
inline int32_t Instruction::VRegA_30t() const {
@@ -109,19 +109,19 @@
return static_cast<int32_t>(Fetch32(1));
}
-inline uint8_t Instruction::VRegA_31c() const {
+inline uint8_t Instruction::VRegA_31c(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k31c);
- return InstAA();
+ return InstAA(inst_data);
}
-inline uint8_t Instruction::VRegA_31i() const {
+inline uint8_t Instruction::VRegA_31i(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k31i);
- return InstAA();
+ return InstAA(inst_data);
}
-inline uint8_t Instruction::VRegA_31t() const {
+inline uint8_t Instruction::VRegA_31t(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k31t);
- return InstAA();
+ return InstAA(inst_data);
}
inline uint16_t Instruction::VRegA_32x() const {
@@ -129,32 +129,32 @@
return Fetch16(1);
}
-inline uint4_t Instruction::VRegA_35c() const {
+inline uint4_t Instruction::VRegA_35c(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k35c);
- return InstB(); // This is labeled A in the spec.
+ return InstB(inst_data); // This is labeled A in the spec.
}
-inline uint8_t Instruction::VRegA_3rc() const {
+inline uint8_t Instruction::VRegA_3rc(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k3rc);
- return InstAA();
+ return InstAA(inst_data);
}
-inline uint8_t Instruction::VRegA_51l() const {
+inline uint8_t Instruction::VRegA_51l(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k51l);
- return InstAA();
+ return InstAA(inst_data);
}
//------------------------------------------------------------------------------
// VRegB
//------------------------------------------------------------------------------
-inline int4_t Instruction::VRegB_11n() const {
+inline int4_t Instruction::VRegB_11n(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k11n);
- return static_cast<int4_t>((InstB() << 28) >> 28);
+ return static_cast<int4_t>((InstB(inst_data) << 28) >> 28);
}
-inline uint4_t Instruction::VRegB_12x() const {
+inline uint4_t Instruction::VRegB_12x(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k12x);
- return InstB();
+ return InstB(inst_data);
}
inline uint16_t Instruction::VRegB_21c() const {
@@ -182,19 +182,19 @@
return static_cast<uint8_t>(Fetch16(1) & 0xff);
}
-inline uint4_t Instruction::VRegB_22c() const {
+inline uint4_t Instruction::VRegB_22c(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k22c);
- return InstB();
+ return InstB(inst_data);
}
-inline uint4_t Instruction::VRegB_22s() const {
+inline uint4_t Instruction::VRegB_22s(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k22s);
- return InstB();
+ return InstB(inst_data);
}
-inline uint4_t Instruction::VRegB_22t() const {
+inline uint4_t Instruction::VRegB_22t(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k22t);
- return InstB();
+ return InstB(inst_data);
}
inline uint16_t Instruction::VRegB_22x() const {
diff --git a/runtime/dex_instruction.h b/runtime/dex_instruction.h
index 13b0f1c..e8db3bc 100644
--- a/runtime/dex_instruction.h
+++ b/runtime/dex_instruction.h
@@ -217,44 +217,122 @@
// VRegA
bool HasVRegA() const;
int32_t VRegA() const;
- int8_t VRegA_10t() const;
- uint8_t VRegA_10x() const;
- uint4_t VRegA_11n() const;
- uint8_t VRegA_11x() const;
- uint4_t VRegA_12x() const;
+
+ int8_t VRegA_10t() const {
+ return VRegA_10t(Fetch16(0));
+ }
+ uint8_t VRegA_10x() const {
+ return VRegA_10x(Fetch16(0));
+ }
+ uint4_t VRegA_11n() const {
+ return VRegA_11n(Fetch16(0));
+ }
+ uint8_t VRegA_11x() const {
+ return VRegA_11x(Fetch16(0));
+ }
+ uint4_t VRegA_12x() const {
+ return VRegA_12x(Fetch16(0));
+ }
int16_t VRegA_20t() const;
- uint8_t VRegA_21c() const;
- uint8_t VRegA_21h() const;
- uint8_t VRegA_21s() const;
- uint8_t VRegA_21t() const;
- uint8_t VRegA_22b() const;
- uint4_t VRegA_22c() const;
- uint4_t VRegA_22s() const;
- uint4_t VRegA_22t() const;
- uint8_t VRegA_22x() const;
- uint8_t VRegA_23x() const;
+ uint8_t VRegA_21c() const {
+ return VRegA_21c(Fetch16(0));
+ }
+ uint8_t VRegA_21h() const {
+ return VRegA_21h(Fetch16(0));
+ }
+ uint8_t VRegA_21s() const {
+ return VRegA_21s(Fetch16(0));
+ }
+ uint8_t VRegA_21t() const {
+ return VRegA_21t(Fetch16(0));
+ }
+ uint8_t VRegA_22b() const {
+ return VRegA_22b(Fetch16(0));
+ }
+ uint4_t VRegA_22c() const {
+ return VRegA_22c(Fetch16(0));
+ }
+ uint4_t VRegA_22s() const {
+ return VRegA_22s(Fetch16(0));
+ }
+ uint4_t VRegA_22t() const {
+ return VRegA_22t(Fetch16(0));
+ }
+ uint8_t VRegA_22x() const {
+ return VRegA_22x(Fetch16(0));
+ }
+ uint8_t VRegA_23x() const {
+ return VRegA_23x(Fetch16(0));
+ }
int32_t VRegA_30t() const;
- uint8_t VRegA_31c() const;
- uint8_t VRegA_31i() const;
- uint8_t VRegA_31t() const;
+ uint8_t VRegA_31c() const {
+ return VRegA_31c(Fetch16(0));
+ }
+ uint8_t VRegA_31i() const {
+ return VRegA_31i(Fetch16(0));
+ }
+ uint8_t VRegA_31t() const {
+ return VRegA_31t(Fetch16(0));
+ }
uint16_t VRegA_32x() const;
- uint4_t VRegA_35c() const;
- uint8_t VRegA_3rc() const;
- uint8_t VRegA_51l() const;
+ uint4_t VRegA_35c() const {
+ return VRegA_35c(Fetch16(0));
+ }
+ uint8_t VRegA_3rc() const {
+ return VRegA_3rc(Fetch16(0));
+ }
+ uint8_t VRegA_51l() const {
+ return VRegA_51l(Fetch16(0));
+ }
+
+ // The following methods return the vA operand for various instruction formats. The "inst_data"
+ // parameter holds the first 16 bits of instruction which the returned value is decoded from.
+ int8_t VRegA_10t(uint16_t inst_data) const;
+ uint8_t VRegA_10x(uint16_t inst_data) const;
+ uint4_t VRegA_11n(uint16_t inst_data) const;
+ uint8_t VRegA_11x(uint16_t inst_data) const;
+ uint4_t VRegA_12x(uint16_t inst_data) const;
+ uint8_t VRegA_21c(uint16_t inst_data) const;
+ uint8_t VRegA_21h(uint16_t inst_data) const;
+ uint8_t VRegA_21s(uint16_t inst_data) const;
+ uint8_t VRegA_21t(uint16_t inst_data) const;
+ uint8_t VRegA_22b(uint16_t inst_data) const;
+ uint4_t VRegA_22c(uint16_t inst_data) const;
+ uint4_t VRegA_22s(uint16_t inst_data) const;
+ uint4_t VRegA_22t(uint16_t inst_data) const;
+ uint8_t VRegA_22x(uint16_t inst_data) const;
+ uint8_t VRegA_23x(uint16_t inst_data) const;
+ uint8_t VRegA_31c(uint16_t inst_data) const;
+ uint8_t VRegA_31i(uint16_t inst_data) const;
+ uint8_t VRegA_31t(uint16_t inst_data) const;
+ uint4_t VRegA_35c(uint16_t inst_data) const;
+ uint8_t VRegA_3rc(uint16_t inst_data) const;
+ uint8_t VRegA_51l(uint16_t inst_data) const;
// VRegB
bool HasVRegB() const;
int32_t VRegB() const;
- int4_t VRegB_11n() const;
- uint4_t VRegB_12x() const;
+
+ int4_t VRegB_11n() const {
+ return VRegB_11n(Fetch16(0));
+ }
+ uint4_t VRegB_12x() const {
+ return VRegB_12x(Fetch16(0));
+ }
uint16_t VRegB_21c() const;
uint16_t VRegB_21h() const;
int16_t VRegB_21s() const;
int16_t VRegB_21t() const;
uint8_t VRegB_22b() const;
- uint4_t VRegB_22c() const;
- uint4_t VRegB_22s() const;
- uint4_t VRegB_22t() const;
+ uint4_t VRegB_22c() const {
+ return VRegB_22c(Fetch16(0));
+ }
+ uint4_t VRegB_22s() const {
+ return VRegB_22s(Fetch16(0));
+ }
+ uint4_t VRegB_22t() const {
+ return VRegB_22t(Fetch16(0));
+ }
uint16_t VRegB_22x() const;
uint8_t VRegB_23x() const;
uint32_t VRegB_31c() const;
@@ -265,9 +343,19 @@
uint16_t VRegB_3rc() const;
uint64_t VRegB_51l() const; // vB_wide
+ // The following methods return the vB operand for all instruction formats where it is encoded in
+ // the first 16 bits of instruction. The "inst_data" parameter holds these 16 bits. The returned
+ // value is decoded from it.
+ int4_t VRegB_11n(uint16_t inst_data) const;
+ uint4_t VRegB_12x(uint16_t inst_data) const;
+ uint4_t VRegB_22c(uint16_t inst_data) const;
+ uint4_t VRegB_22s(uint16_t inst_data) const;
+ uint4_t VRegB_22t(uint16_t inst_data) const;
+
// VRegC
bool HasVRegC() const;
int32_t VRegC() const;
+
int8_t VRegC_22b() const;
uint16_t VRegC_22c() const;
int16_t VRegC_22s() const;
@@ -279,9 +367,16 @@
// Fills the given array with the 'arg' array of the instruction.
void GetArgs(uint32_t args[5]) const;
- // Returns the opcode field of the instruction.
+ // Returns the opcode field of the instruction. The given "inst_data" parameter must be the first
+ // 16 bits of instruction.
+ Code Opcode(uint16_t inst_data) const {
+ DCHECK_EQ(inst_data, Fetch16(0));
+ return static_cast<Code>(inst_data & 0xFF);
+ }
+
+ // Returns the opcode field of the instruction from the first 16 bits of instruction.
Code Opcode() const {
- return static_cast<Code>(Fetch16(0) & 0xFF);
+ return Opcode(Fetch16(0));
}
void SetOpcode(Code opcode) {
@@ -395,28 +490,43 @@
// Dump code_units worth of this instruction, padding to code_units for shorter instructions
std::string DumpHex(size_t code_units) const;
- private:
- size_t SizeInCodeUnitsComplexOpcode() const;
-
uint16_t Fetch16(size_t offset) const {
const uint16_t* insns = reinterpret_cast<const uint16_t*>(this);
return insns[offset];
}
+ private:
+ size_t SizeInCodeUnitsComplexOpcode() const;
+
uint32_t Fetch32(size_t offset) const {
return (Fetch16(offset) | ((uint32_t) Fetch16(offset + 1) << 16));
}
uint4_t InstA() const {
- return static_cast<uint4_t>((Fetch16(0) >> 8) & 0x0f);
+ return InstA(Fetch16(0));
}
uint4_t InstB() const {
- return static_cast<uint4_t>(Fetch16(0) >> 12);
+ return InstB(Fetch16(0));
}
uint8_t InstAA() const {
- return static_cast<uint8_t>(Fetch16(0) >> 8);
+ return InstAA(Fetch16(0));
+ }
+
+ uint4_t InstA(uint16_t inst_data) const {
+ DCHECK_EQ(inst_data, Fetch16(0));
+ return static_cast<uint4_t>((inst_data >> 8) & 0x0f);
+ }
+
+ uint4_t InstB(uint16_t inst_data) const {
+ DCHECK_EQ(inst_data, Fetch16(0));
+ return static_cast<uint4_t>(inst_data >> 12);
+ }
+
+ uint8_t InstAA(uint16_t inst_data) const {
+ DCHECK_EQ(inst_data, Fetch16(0));
+ return static_cast<uint8_t>(inst_data >> 8);
}
static const char* const kInstructionNames[];
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index fa1e4e8..a5d2f21 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -503,24 +503,18 @@
}
}
-void MarkSweep::MarkRootParallelCallback(const Object* root, void* arg) {
+Object* MarkSweep::MarkRootParallelCallback(Object* root, void* arg) {
DCHECK(root != NULL);
DCHECK(arg != NULL);
reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNullParallel(root);
+ return root;
}
-void MarkSweep::MarkObjectCallback(const Object* root, void* arg) {
- DCHECK(root != NULL);
- DCHECK(arg != NULL);
- MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
- mark_sweep->MarkObjectNonNull(root);
-}
-
-void MarkSweep::ReMarkObjectVisitor(const Object* root, void* arg) {
- DCHECK(root != NULL);
- DCHECK(arg != NULL);
- MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
- mark_sweep->MarkObjectNonNull(root);
+Object* MarkSweep::MarkRootCallback(Object* root, void* arg) {
+ DCHECK(root != nullptr);
+ DCHECK(arg != nullptr);
+ reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNull(root);
+ return root;
}
void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg,
@@ -548,20 +542,20 @@
// Marks all objects in the root set.
void MarkSweep::MarkRoots() {
timings_.StartSplit("MarkRoots");
- Runtime::Current()->VisitNonConcurrentRoots(MarkObjectCallback, this);
+ Runtime::Current()->VisitNonConcurrentRoots(MarkRootCallback, this);
timings_.EndSplit();
}
void MarkSweep::MarkNonThreadRoots() {
timings_.StartSplit("MarkNonThreadRoots");
- Runtime::Current()->VisitNonThreadRoots(MarkObjectCallback, this);
+ Runtime::Current()->VisitNonThreadRoots(MarkRootCallback, this);
timings_.EndSplit();
}
void MarkSweep::MarkConcurrentRoots() {
timings_.StartSplit("MarkConcurrentRoots");
// Visit all runtime roots and clear dirty flags.
- Runtime::Current()->VisitConcurrentRoots(MarkObjectCallback, this, false, true);
+ Runtime::Current()->VisitConcurrentRoots(MarkRootCallback, this, false, true);
timings_.EndSplit();
}
@@ -956,8 +950,11 @@
ProcessMarkStack(false);
}
-bool MarkSweep::IsMarkedCallback(const Object* object, void* arg) {
- return reinterpret_cast<MarkSweep*>(arg)->IsMarked(object);
+mirror::Object* MarkSweep::SystemWeakIsMarkedCallback(Object* object, void* arg) {
+ if (reinterpret_cast<MarkSweep*>(arg)->IsMarked(object)) {
+ return object;
+ }
+ return nullptr;
}
void MarkSweep::RecursiveMarkDirtyObjects(bool paused, byte minimum_age) {
@@ -967,33 +964,26 @@
void MarkSweep::ReMarkRoots() {
timings_.StartSplit("ReMarkRoots");
- Runtime::Current()->VisitRoots(ReMarkObjectVisitor, this, true, true);
+ Runtime::Current()->VisitRoots(MarkRootCallback, this, true, true);
timings_.EndSplit();
}
-void MarkSweep::SweepJniWeakGlobals(IsMarkedTester is_marked, void* arg) {
- JavaVMExt* vm = Runtime::Current()->GetJavaVM();
- WriterMutexLock mu(Thread::Current(), vm->weak_globals_lock);
- for (const Object** entry : vm->weak_globals) {
- if (!is_marked(*entry, arg)) {
- *entry = kClearedJniWeakGlobal;
- }
- }
-}
-
struct ArrayMarkedCheck {
accounting::ObjectStack* live_stack;
MarkSweep* mark_sweep;
};
// Either marked or not live.
-bool MarkSweep::IsMarkedArrayCallback(const Object* object, void* arg) {
+mirror::Object* MarkSweep::SystemWeakIsMarkedArrayCallback(Object* object, void* arg) {
ArrayMarkedCheck* array_check = reinterpret_cast<ArrayMarkedCheck*>(arg);
if (array_check->mark_sweep->IsMarked(object)) {
- return true;
+ return object;
}
accounting::ObjectStack* live_stack = array_check->live_stack;
- return std::find(live_stack->Begin(), live_stack->End(), object) == live_stack->End();
+ if (std::find(live_stack->Begin(), live_stack->End(), object) == live_stack->End()) {
+ return object;
+ }
+ return nullptr;
}
void MarkSweep::SweepSystemWeaksArray(accounting::ObjectStack* allocations) {
@@ -1003,14 +993,11 @@
// !IsMarked && IsLive
// So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive).
// Or for swapped (IsLive || !IsMarked).
-
timings_.StartSplit("SweepSystemWeaksArray");
ArrayMarkedCheck visitor;
visitor.live_stack = allocations;
visitor.mark_sweep = this;
- runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedArrayCallback, &visitor);
- runtime->GetMonitorList()->SweepMonitorList(IsMarkedArrayCallback, &visitor);
- SweepJniWeakGlobals(IsMarkedArrayCallback, &visitor);
+ runtime->SweepSystemWeaks(SystemWeakIsMarkedArrayCallback, &visitor);
timings_.EndSplit();
}
@@ -1022,16 +1009,14 @@
// So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive).
// Or for swapped (IsLive || !IsMarked).
timings_.StartSplit("SweepSystemWeaks");
- runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedCallback, this);
- runtime->GetMonitorList()->SweepMonitorList(IsMarkedCallback, this);
- SweepJniWeakGlobals(IsMarkedCallback, this);
+ runtime->SweepSystemWeaks(SystemWeakIsMarkedCallback, this);
timings_.EndSplit();
}
-bool MarkSweep::VerifyIsLiveCallback(const Object* obj, void* arg) {
+mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg) {
reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj);
// We don't actually want to sweep the object, so lets return "marked"
- return true;
+ return obj;
}
void MarkSweep::VerifyIsLive(const Object* obj) {
@@ -1050,16 +1035,8 @@
}
void MarkSweep::VerifySystemWeaks() {
- Runtime* runtime = Runtime::Current();
- // Verify system weaks, uses a special IsMarked callback which always returns true.
- runtime->GetInternTable()->SweepInternTableWeaks(VerifyIsLiveCallback, this);
- runtime->GetMonitorList()->SweepMonitorList(VerifyIsLiveCallback, this);
-
- JavaVMExt* vm = runtime->GetJavaVM();
- ReaderMutexLock mu(Thread::Current(), vm->weak_globals_lock);
- for (const Object** entry : vm->weak_globals) {
- VerifyIsLive(*entry);
- }
+ // Verify system weaks, uses a special object visitor which returns the input object.
+ Runtime::Current()->SweepSystemWeaks(VerifySystemWeakIsLiveCallback, this);
}
struct SweepCallbackContext {
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index fdd0c86..a857dab 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -208,7 +208,7 @@
void SweepSystemWeaksArray(accounting::ObjectStack* allocations)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- static bool VerifyIsLiveCallback(const mirror::Object* obj, void* arg)
+ static mirror::Object* VerifySystemWeakIsLiveCallback(mirror::Object* obj, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
void VerifySystemWeaks()
@@ -223,11 +223,11 @@
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
Locks::mutator_lock_);
- static void MarkObjectCallback(const mirror::Object* root, void* arg)
+ static mirror::Object* MarkRootCallback(mirror::Object* root, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- static void MarkRootParallelCallback(const mirror::Object* root, void* arg);
+ static mirror::Object* MarkRootParallelCallback(mirror::Object* root, void* arg);
// Marks an object.
void MarkObject(const mirror::Object* obj)
@@ -246,16 +246,12 @@
// Returns true if the object has its bit set in the mark bitmap.
bool IsMarked(const mirror::Object* object) const;
- static bool IsMarkedCallback(const mirror::Object* object, void* arg)
+ static mirror::Object* SystemWeakIsMarkedCallback(mirror::Object* object, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- static bool IsMarkedArrayCallback(const mirror::Object* object, void* arg)
+ static mirror::Object* SystemWeakIsMarkedArrayCallback(mirror::Object* object, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- static void ReMarkObjectVisitor(const mirror::Object* root, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
static void VerifyImageRootVisitor(mirror::Object* root, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
Locks::mutator_lock_);
@@ -394,9 +390,6 @@
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SweepJniWeakGlobals(IsMarkedTester is_marked, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
// Whether or not we count how many of each type of object were scanned.
static const bool kCountScannedTypes = false;
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 63f0405..f4d10c6 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1322,11 +1322,12 @@
image_mod_union_table_->MarkReferences(mark_sweep);
}
-static void RootMatchesObjectVisitor(const mirror::Object* root, void* arg) {
+static mirror::Object* RootMatchesObjectVisitor(mirror::Object* root, void* arg) {
mirror::Object* obj = reinterpret_cast<mirror::Object*>(arg);
if (root == obj) {
LOG(INFO) << "Object " << obj << " is a root";
}
+ return root;
}
class ScanVisitor {
@@ -1414,9 +1415,10 @@
return heap_->IsLiveObjectLocked(obj);
}
- static void VerifyRoots(const mirror::Object* root, void* arg) {
+ static mirror::Object* VerifyRoots(mirror::Object* root, void* arg) {
VerifyReferenceVisitor* visitor = reinterpret_cast<VerifyReferenceVisitor*>(arg);
- (*visitor)(NULL, root, MemberOffset(0), true);
+ (*visitor)(nullptr, root, MemberOffset(0), true);
+ return root;
}
private:
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 8b99e96..0de676e 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -22,7 +22,7 @@
#include "utils.h"
#include <valgrind.h>
-#include <../memcheck/memcheck.h>
+#include <memcheck/memcheck.h>
namespace art {
namespace gc {
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index a174c0a..c6d028e 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -17,6 +17,7 @@
#include "large_object_space.h"
#include "base/logging.h"
+#include "base/mutex-inl.h"
#include "base/stl_util.h"
#include "UniquePtr.h"
#include "image.h"
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index 0b2e741..67620a0 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -484,11 +484,11 @@
}
private:
- static void RootVisitor(const mirror::Object* obj, void* arg)
+ static mirror::Object* RootVisitor(mirror::Object* obj, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- CHECK(arg != NULL);
- Hprof* hprof = reinterpret_cast<Hprof*>(arg);
- hprof->VisitRoot(obj);
+ DCHECK(arg != NULL);
+ reinterpret_cast<Hprof*>(arg)->VisitRoot(obj);
+ return obj;
}
static void HeapBitmapCallback(mirror::Object* obj, void* arg)
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 8af4d7e..2bd8353 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -40,7 +40,7 @@
CHECK_LE(initialCount, maxCount);
CHECK_NE(desiredKind, kSirtOrInvalid);
- table_ = reinterpret_cast<const mirror::Object**>(malloc(initialCount * sizeof(const mirror::Object*)));
+ table_ = reinterpret_cast<mirror::Object**>(malloc(initialCount * sizeof(const mirror::Object*)));
CHECK(table_ != NULL);
memset(table_, 0xd1, initialCount * sizeof(const mirror::Object*));
@@ -75,7 +75,7 @@
return true;
}
-IndirectRef IndirectReferenceTable::Add(uint32_t cookie, const mirror::Object* obj) {
+IndirectRef IndirectReferenceTable::Add(uint32_t cookie, mirror::Object* obj) {
IRTSegmentState prevState;
prevState.all = cookie;
size_t topIndex = segment_state_.parts.topIndex;
@@ -101,7 +101,7 @@
}
DCHECK_GT(newSize, alloc_entries_);
- table_ = reinterpret_cast<const mirror::Object**>(realloc(table_, newSize * sizeof(const mirror::Object*)));
+ table_ = reinterpret_cast<mirror::Object**>(realloc(table_, newSize * sizeof(mirror::Object*)));
slot_data_ = reinterpret_cast<IndirectRefSlot*>(realloc(slot_data_,
newSize * sizeof(IndirectRefSlot)));
if (table_ == NULL || slot_data_ == NULL) {
@@ -126,7 +126,7 @@
if (numHoles > 0) {
DCHECK_GT(topIndex, 1U);
// Find the first hole; likely to be near the end of the list.
- const mirror::Object** pScan = &table_[topIndex - 1];
+ mirror::Object** pScan = &table_[topIndex - 1];
DCHECK(*pScan != NULL);
while (*--pScan != NULL) {
DCHECK_GE(pScan, table_ + prevState.parts.topIndex);
@@ -194,7 +194,8 @@
return true;
}
-static int Find(mirror::Object* direct_pointer, int bottomIndex, int topIndex, const mirror::Object** table) {
+static int Find(mirror::Object* direct_pointer, int bottomIndex, int topIndex,
+ mirror::Object** table) {
for (int i = bottomIndex; i < topIndex; ++i) {
if (table[i] == direct_pointer) {
return i;
@@ -310,13 +311,14 @@
void IndirectReferenceTable::VisitRoots(RootVisitor* visitor, void* arg) {
for (auto ref : *this) {
- visitor(*ref, arg);
+ *ref = visitor(const_cast<mirror::Object*>(*ref), arg);
+ DCHECK(*ref != nullptr);
}
}
void IndirectReferenceTable::Dump(std::ostream& os) const {
os << kind_ << " table dump:\n";
- std::vector<const mirror::Object*> entries(table_, table_ + Capacity());
+ ReferenceTable::Table entries(table_, table_ + Capacity());
// Remove NULLs.
for (int i = entries.size() - 1; i >= 0; --i) {
if (entries[i] == NULL) {
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index 26f53db..51b238c 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -206,7 +206,7 @@
class IrtIterator {
public:
- explicit IrtIterator(const mirror::Object** table, size_t i, size_t capacity)
+ explicit IrtIterator(mirror::Object** table, size_t i, size_t capacity)
: table_(table), i_(i), capacity_(capacity) {
SkipNullsAndTombstones();
}
@@ -217,7 +217,7 @@
return *this;
}
- const mirror::Object** operator*() {
+ mirror::Object** operator*() {
return &table_[i_];
}
@@ -233,7 +233,7 @@
}
}
- const mirror::Object** table_;
+ mirror::Object** table_;
size_t i_;
size_t capacity_;
};
@@ -258,7 +258,7 @@
* Returns NULL if the table is full (max entries reached, or alloc
* failed during expansion).
*/
- IndirectRef Add(uint32_t cookie, const mirror::Object* obj)
+ IndirectRef Add(uint32_t cookie, mirror::Object* obj)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
@@ -266,7 +266,7 @@
*
* Returns kInvalidIndirectRefObject if iref is invalid.
*/
- const mirror::Object* Get(IndirectRef iref) const {
+ mirror::Object* Get(IndirectRef iref) const {
if (!GetChecked(iref)) {
return kInvalidIndirectRefObject;
}
@@ -363,7 +363,7 @@
IRTSegmentState segment_state_;
/* bottom of the stack */
- const mirror::Object** table_;
+ mirror::Object** table_;
/* bit mask, ORed into all irefs */
IndirectRefKind kind_;
/* extended debugging info */
diff --git a/runtime/indirect_reference_table_test.cc b/runtime/indirect_reference_table_test.cc
index bd2890c..b6c6cb4 100644
--- a/runtime/indirect_reference_table_test.cc
+++ b/runtime/indirect_reference_table_test.cc
@@ -17,6 +17,7 @@
#include "common_test.h"
#include "indirect_reference_table.h"
+#include "mirror/object-inl.h"
namespace art {
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 6caad01..481cbad 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -41,6 +41,11 @@
namespace art {
namespace instrumentation {
+// Do we want to deoptimize for method entry and exit listeners or just try to intercept
+// invocations? Deoptimization forces all code to run in the interpreter and considerably hurts the
+// application's performance.
+static constexpr bool kDeoptimizeForAccurateMethodEntryExitListeners = false;
+
static bool InstallStubsClassVisitor(mirror::Class* klass, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Instrumentation* instrumentation = reinterpret_cast<Instrumentation*>(arg);
@@ -264,12 +269,14 @@
bool require_interpreter = false;
if ((events & kMethodEntered) != 0) {
method_entry_listeners_.push_back(listener);
- require_entry_exit_stubs = true;
+ require_interpreter = kDeoptimizeForAccurateMethodEntryExitListeners;
+ require_entry_exit_stubs = !kDeoptimizeForAccurateMethodEntryExitListeners;
have_method_entry_listeners_ = true;
}
if ((events & kMethodExited) != 0) {
method_exit_listeners_.push_back(listener);
- require_entry_exit_stubs = true;
+ require_interpreter = kDeoptimizeForAccurateMethodEntryExitListeners;
+ require_entry_exit_stubs = !kDeoptimizeForAccurateMethodEntryExitListeners;
have_method_exit_listeners_ = true;
}
if ((events & kMethodUnwind) != 0) {
@@ -300,7 +307,10 @@
method_entry_listeners_.remove(listener);
}
have_method_entry_listeners_ = method_entry_listeners_.size() > 0;
- require_entry_exit_stubs |= have_method_entry_listeners_;
+ require_entry_exit_stubs |= have_method_entry_listeners_ &&
+ !kDeoptimizeForAccurateMethodEntryExitListeners;
+ require_interpreter = have_method_entry_listeners_ &&
+ kDeoptimizeForAccurateMethodEntryExitListeners;
}
if ((events & kMethodExited) != 0) {
bool contains = std::find(method_exit_listeners_.begin(), method_exit_listeners_.end(),
@@ -309,7 +319,10 @@
method_exit_listeners_.remove(listener);
}
have_method_exit_listeners_ = method_exit_listeners_.size() > 0;
- require_entry_exit_stubs |= have_method_exit_listeners_;
+ require_entry_exit_stubs |= have_method_exit_listeners_ &&
+ !kDeoptimizeForAccurateMethodEntryExitListeners;
+ require_interpreter = have_method_exit_listeners_ &&
+ kDeoptimizeForAccurateMethodEntryExitListeners;
}
if ((events & kMethodUnwind) != 0) {
method_unwind_listeners_.remove(listener);
@@ -455,7 +468,7 @@
void Instrumentation::ExceptionCaughtEvent(Thread* thread, const ThrowLocation& throw_location,
mirror::ArtMethod* catch_method,
uint32_t catch_dex_pc,
- mirror::Throwable* exception_object) {
+ mirror::Throwable* exception_object) const {
if (have_exception_caught_listeners_) {
DCHECK_EQ(thread->GetException(NULL), exception_object);
thread->ClearException();
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 6c80b41..28f9555 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -186,7 +186,7 @@
// Inform listeners that an exception was caught.
void ExceptionCaughtEvent(Thread* thread, const ThrowLocation& throw_location,
mirror::ArtMethod* catch_method, uint32_t catch_dex_pc,
- mirror::Throwable* exception_object)
+ mirror::Throwable* exception_object) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Called when an instrumented method is entered. The intended link register (lr) is saved so
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 89c15f8..29d2ae9 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -45,15 +45,16 @@
bool only_dirty, bool clean_dirty) {
MutexLock mu(Thread::Current(), intern_table_lock_);
if (!only_dirty || is_dirty_) {
- for (const auto& strong_intern : strong_interns_) {
- visitor(strong_intern.second, arg);
+ for (auto& strong_intern : strong_interns_) {
+ strong_intern.second = reinterpret_cast<mirror::String*>(visitor(strong_intern.second, arg));
+ DCHECK(strong_intern.second != nullptr);
}
+
if (clean_dirty) {
is_dirty_ = false;
}
}
- // Note: we deliberately don't visit the weak_interns_ table and the immutable
- // image roots.
+ // Note: we deliberately don't visit the weak_interns_ table and the immutable image roots.
}
mirror::String* InternTable::Lookup(Table& table, mirror::String* s,
@@ -196,14 +197,16 @@
return found == s;
}
-void InternTable::SweepInternTableWeaks(IsMarkedTester is_marked, void* arg) {
+void InternTable::SweepInternTableWeaks(RootVisitor visitor, void* arg) {
MutexLock mu(Thread::Current(), intern_table_lock_);
- // TODO: std::remove_if + lambda.
for (auto it = weak_interns_.begin(), end = weak_interns_.end(); it != end;) {
mirror::Object* object = it->second;
- if (!is_marked(object, arg)) {
+ mirror::Object* new_object = visitor(object, arg);
+ if (new_object == nullptr) {
+ // TODO: use it = weak_interns_.erase(it) when we get a c++11 stl.
weak_interns_.erase(it++);
} else {
+ it->second = down_cast<mirror::String*>(new_object);
++it;
}
}
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index 07615dc..9806130 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -55,8 +55,7 @@
// Interns a potentially new string in the 'weak' table. (See above.)
mirror::String* InternWeak(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SweepInternTableWeaks(IsMarkedTester is_marked, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ void SweepInternTableWeaks(RootVisitor visitor, void* arg);
bool ContainsWeak(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/intern_table_test.cc b/runtime/intern_table_test.cc
index d79d2c4..aa2502d 100644
--- a/runtime/intern_table_test.cc
+++ b/runtime/intern_table_test.cc
@@ -81,8 +81,11 @@
mutable std::vector<const mirror::String*> expected_;
};
-bool IsMarked(const mirror::Object* object, void* arg) {
- return reinterpret_cast<TestPredicate*>(arg)->IsMarked(object);
+mirror::Object* IsMarkedSweepingVisitor(mirror::Object* object, void* arg) {
+ if (reinterpret_cast<TestPredicate*>(arg)->IsMarked(object)) {
+ return object;
+ }
+ return nullptr;
}
TEST_F(InternTableTest, SweepInternTableWeaks) {
@@ -105,7 +108,7 @@
p.Expect(s1.get());
{
ReaderMutexLock mu(soa.Self(), *Locks::heap_bitmap_lock_);
- t.SweepInternTableWeaks(IsMarked, &p);
+ t.SweepInternTableWeaks(IsMarkedSweepingVisitor, &p);
}
EXPECT_EQ(2U, t.Size());
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 1677e80..f35cfa3 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -14,153 +14,11 @@
* limitations under the License.
*/
-#include "interpreter.h"
-
-#include <math.h>
-
-#include "base/logging.h"
-#include "class_linker-inl.h"
-#include "common_throws.h"
-#include "dex_file-inl.h"
-#include "dex_instruction-inl.h"
-#include "dex_instruction.h"
-#include "entrypoints/entrypoint_utils.h"
-#include "gc/accounting/card_table-inl.h"
-#include "invoke_arg_array_builder.h"
-#include "nth_caller_visitor.h"
-#include "mirror/art_field-inl.h"
-#include "mirror/art_method.h"
-#include "mirror/art_method-inl.h"
-#include "mirror/class.h"
-#include "mirror/class-inl.h"
-#include "mirror/object-inl.h"
-#include "mirror/object_array-inl.h"
-#include "object_utils.h"
-#include "ScopedLocalRef.h"
-#include "scoped_thread_state_change.h"
-#include "thread.h"
-#include "well_known_classes.h"
-
-using ::art::mirror::ArtField;
-using ::art::mirror::ArtMethod;
-using ::art::mirror::Array;
-using ::art::mirror::BooleanArray;
-using ::art::mirror::ByteArray;
-using ::art::mirror::CharArray;
-using ::art::mirror::Class;
-using ::art::mirror::ClassLoader;
-using ::art::mirror::IntArray;
-using ::art::mirror::LongArray;
-using ::art::mirror::Object;
-using ::art::mirror::ObjectArray;
-using ::art::mirror::ShortArray;
-using ::art::mirror::String;
-using ::art::mirror::Throwable;
+#include "interpreter_common.h"
namespace art {
-
namespace interpreter {
-static const int32_t kMaxInt = std::numeric_limits<int32_t>::max();
-static const int32_t kMinInt = std::numeric_limits<int32_t>::min();
-static const int64_t kMaxLong = std::numeric_limits<int64_t>::max();
-static const int64_t kMinLong = std::numeric_limits<int64_t>::min();
-
-static void UnstartedRuntimeInvoke(Thread* self, MethodHelper& mh,
- const DexFile::CodeItem* code_item, ShadowFrame* shadow_frame,
- JValue* result, size_t arg_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- // In a runtime that's not started we intercept certain methods to avoid complicated dependency
- // problems in core libraries.
- std::string name(PrettyMethod(shadow_frame->GetMethod()));
- if (name == "java.lang.Class java.lang.Class.forName(java.lang.String)") {
- std::string descriptor(DotToDescriptor(shadow_frame->GetVRegReference(arg_offset)->AsString()->ToModifiedUtf8().c_str()));
- ClassLoader* class_loader = NULL; // shadow_frame.GetMethod()->GetDeclaringClass()->GetClassLoader();
- Class* found = Runtime::Current()->GetClassLinker()->FindClass(descriptor.c_str(),
- class_loader);
- CHECK(found != NULL) << "Class.forName failed in un-started runtime for class: "
- << PrettyDescriptor(descriptor);
- result->SetL(found);
- } else if (name == "java.lang.Object java.lang.Class.newInstance()") {
- Class* klass = shadow_frame->GetVRegReference(arg_offset)->AsClass();
- ArtMethod* c = klass->FindDeclaredDirectMethod("<init>", "()V");
- CHECK(c != NULL);
- SirtRef<Object> obj(self, klass->AllocObject(self));
- CHECK(obj.get() != NULL);
- EnterInterpreterFromInvoke(self, c, obj.get(), NULL, NULL);
- result->SetL(obj.get());
- } else if (name == "java.lang.reflect.Field java.lang.Class.getDeclaredField(java.lang.String)") {
- // Special managed code cut-out to allow field lookup in a un-started runtime that'd fail
- // going the reflective Dex way.
- Class* klass = shadow_frame->GetVRegReference(arg_offset)->AsClass();
- String* name = shadow_frame->GetVRegReference(arg_offset + 1)->AsString();
- ArtField* found = NULL;
- FieldHelper fh;
- ObjectArray<ArtField>* fields = klass->GetIFields();
- for (int32_t i = 0; i < fields->GetLength() && found == NULL; ++i) {
- ArtField* f = fields->Get(i);
- fh.ChangeField(f);
- if (name->Equals(fh.GetName())) {
- found = f;
- }
- }
- if (found == NULL) {
- fields = klass->GetSFields();
- for (int32_t i = 0; i < fields->GetLength() && found == NULL; ++i) {
- ArtField* f = fields->Get(i);
- fh.ChangeField(f);
- if (name->Equals(fh.GetName())) {
- found = f;
- }
- }
- }
- CHECK(found != NULL)
- << "Failed to find field in Class.getDeclaredField in un-started runtime. name="
- << name->ToModifiedUtf8() << " class=" << PrettyDescriptor(klass);
- // TODO: getDeclaredField calls GetType once the field is found to ensure a
- // NoClassDefFoundError is thrown if the field's type cannot be resolved.
- Class* jlr_Field = self->DecodeJObject(WellKnownClasses::java_lang_reflect_Field)->AsClass();
- SirtRef<Object> field(self, jlr_Field->AllocObject(self));
- CHECK(field.get() != NULL);
- ArtMethod* c = jlr_Field->FindDeclaredDirectMethod("<init>", "(Ljava/lang/reflect/ArtField;)V");
- uint32_t args[1];
- args[0] = reinterpret_cast<uint32_t>(found);
- EnterInterpreterFromInvoke(self, c, field.get(), args, NULL);
- result->SetL(field.get());
- } else if (name == "void java.lang.System.arraycopy(java.lang.Object, int, java.lang.Object, int, int)" ||
- name == "void java.lang.System.arraycopy(char[], int, char[], int, int)") {
- // Special case array copying without initializing System.
- Class* ctype = shadow_frame->GetVRegReference(arg_offset)->GetClass()->GetComponentType();
- jint srcPos = shadow_frame->GetVReg(arg_offset + 1);
- jint dstPos = shadow_frame->GetVReg(arg_offset + 3);
- jint length = shadow_frame->GetVReg(arg_offset + 4);
- if (!ctype->IsPrimitive()) {
- ObjectArray<Object>* src = shadow_frame->GetVRegReference(arg_offset)->AsObjectArray<Object>();
- ObjectArray<Object>* dst = shadow_frame->GetVRegReference(arg_offset + 2)->AsObjectArray<Object>();
- for (jint i = 0; i < length; ++i) {
- dst->Set(dstPos + i, src->Get(srcPos + i));
- }
- } else if (ctype->IsPrimitiveChar()) {
- CharArray* src = shadow_frame->GetVRegReference(arg_offset)->AsCharArray();
- CharArray* dst = shadow_frame->GetVRegReference(arg_offset + 2)->AsCharArray();
- for (jint i = 0; i < length; ++i) {
- dst->Set(dstPos + i, src->Get(srcPos + i));
- }
- } else if (ctype->IsPrimitiveInt()) {
- IntArray* src = shadow_frame->GetVRegReference(arg_offset)->AsIntArray();
- IntArray* dst = shadow_frame->GetVRegReference(arg_offset + 2)->AsIntArray();
- for (jint i = 0; i < length; ++i) {
- dst->Set(dstPos + i, src->Get(srcPos + i));
- }
- } else {
- UNIMPLEMENTED(FATAL) << "System.arraycopy of unexpected type: " << PrettyDescriptor(ctype);
- }
- } else {
- // Not special, continue with regular interpreter execution.
- artInterpreterToInterpreterBridge(self, mh, code_item, shadow_frame, result);
- }
-}
-
// Hand select a number of methods to be run in a not yet started runtime without using JNI.
static void UnstartedRuntimeJni(Thread* self, ArtMethod* method,
Object* receiver, uint32_t* args, JValue* result)
@@ -406,2641 +264,12 @@
}
}
-static void DoMonitorEnter(Thread* self, Object* ref) NO_THREAD_SAFETY_ANALYSIS {
- ref->MonitorEnter(self);
-}
+enum InterpreterImplKind {
+ kSwitchImpl, // switch-based interpreter implementation.
+ kComputedGotoImplKind // computed-goto-based interpreter implementation.
+};
-static void DoMonitorExit(Thread* self, Object* ref) NO_THREAD_SAFETY_ANALYSIS {
- ref->MonitorExit(self);
-}
-
-// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
-// specialization.
-template<InvokeType type, bool is_range, bool do_access_check>
-static bool DoInvoke(Thread* self, ShadowFrame& shadow_frame,
- const Instruction* inst, JValue* result) NO_THREAD_SAFETY_ANALYSIS;
-
-template<InvokeType type, bool is_range, bool do_access_check>
-static bool DoInvoke(Thread* self, ShadowFrame& shadow_frame,
- const Instruction* inst, JValue* result) {
- uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
- uint32_t vregC = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
- Object* receiver = (type == kStatic) ? NULL : shadow_frame.GetVRegReference(vregC);
- ArtMethod* method = FindMethodFromCode(method_idx, receiver, shadow_frame.GetMethod(), self,
- do_access_check, type);
- if (UNLIKELY(method == NULL)) {
- CHECK(self->IsExceptionPending());
- result->SetJ(0);
- return false;
- } else if (UNLIKELY(method->IsAbstract())) {
- ThrowAbstractMethodError(method);
- result->SetJ(0);
- return false;
- }
-
- MethodHelper mh(method);
- const DexFile::CodeItem* code_item = mh.GetCodeItem();
- uint16_t num_regs;
- uint16_t num_ins;
- if (LIKELY(code_item != NULL)) {
- num_regs = code_item->registers_size_;
- num_ins = code_item->ins_size_;
- } else {
- DCHECK(method->IsNative() || method->IsProxyMethod());
- num_regs = num_ins = ArtMethod::NumArgRegisters(mh.GetShorty());
- if (!method->IsStatic()) {
- num_regs++;
- num_ins++;
- }
- }
-
- void* memory = alloca(ShadowFrame::ComputeSize(num_regs));
- ShadowFrame* new_shadow_frame(ShadowFrame::Create(num_regs, &shadow_frame, method, 0, memory));
- size_t cur_reg = num_regs - num_ins;
- if (receiver != NULL) {
- new_shadow_frame->SetVRegReference(cur_reg, receiver);
- ++cur_reg;
- }
-
- size_t arg_offset = (receiver == NULL) ? 0 : 1;
- const char* shorty = mh.GetShorty();
- uint32_t arg[5];
- if (!is_range) {
- inst->GetArgs(arg);
- }
- for (size_t shorty_pos = 0; cur_reg < num_regs; ++shorty_pos, cur_reg++, arg_offset++) {
- DCHECK_LT(shorty_pos + 1, mh.GetShortyLength());
- size_t arg_pos = is_range ? vregC + arg_offset : arg[arg_offset];
- switch (shorty[shorty_pos + 1]) {
- case 'L': {
- Object* o = shadow_frame.GetVRegReference(arg_pos);
- new_shadow_frame->SetVRegReference(cur_reg, o);
- break;
- }
- case 'J': case 'D': {
- uint64_t wide_value = (static_cast<uint64_t>(shadow_frame.GetVReg(arg_pos + 1)) << 32) |
- static_cast<uint32_t>(shadow_frame.GetVReg(arg_pos));
- new_shadow_frame->SetVRegLong(cur_reg, wide_value);
- cur_reg++;
- arg_offset++;
- break;
- }
- default:
- new_shadow_frame->SetVReg(cur_reg, shadow_frame.GetVReg(arg_pos));
- break;
- }
- }
-
- if (LIKELY(Runtime::Current()->IsStarted())) {
- (method->GetEntryPointFromInterpreter())(self, mh, code_item, new_shadow_frame, result);
- } else {
- UnstartedRuntimeInvoke(self, mh, code_item, new_shadow_frame, result, num_regs - num_ins);
- }
- return !self->IsExceptionPending();
-}
-
-// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
-// specialization.
-template<bool is_range>
-static bool DoInvokeVirtualQuick(Thread* self, ShadowFrame& shadow_frame,
- const Instruction* inst, JValue* result)
- NO_THREAD_SAFETY_ANALYSIS;
-
-template<bool is_range>
-static bool DoInvokeVirtualQuick(Thread* self, ShadowFrame& shadow_frame,
- const Instruction* inst, JValue* result) {
- uint32_t vregC = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
- Object* receiver = shadow_frame.GetVRegReference(vregC);
- if (UNLIKELY(receiver == NULL)) {
- // We lost the reference to the method index so we cannot get a more
- // precised exception message.
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
- return false;
- }
- uint32_t vtable_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
- // TODO: use ObjectArray<T>::GetWithoutChecks ?
- ArtMethod* method = receiver->GetClass()->GetVTable()->Get(vtable_idx);
- if (UNLIKELY(method == NULL)) {
- CHECK(self->IsExceptionPending());
- result->SetJ(0);
- return false;
- } else if (UNLIKELY(method->IsAbstract())) {
- ThrowAbstractMethodError(method);
- result->SetJ(0);
- return false;
- }
-
- MethodHelper mh(method);
- const DexFile::CodeItem* code_item = mh.GetCodeItem();
- uint16_t num_regs;
- uint16_t num_ins;
- if (code_item != NULL) {
- num_regs = code_item->registers_size_;
- num_ins = code_item->ins_size_;
- } else {
- DCHECK(method->IsNative() || method->IsProxyMethod());
- num_regs = num_ins = ArtMethod::NumArgRegisters(mh.GetShorty());
- if (!method->IsStatic()) {
- num_regs++;
- num_ins++;
- }
- }
-
- void* memory = alloca(ShadowFrame::ComputeSize(num_regs));
- ShadowFrame* new_shadow_frame(ShadowFrame::Create(num_regs, &shadow_frame,
- method, 0, memory));
- size_t cur_reg = num_regs - num_ins;
- if (receiver != NULL) {
- new_shadow_frame->SetVRegReference(cur_reg, receiver);
- ++cur_reg;
- }
-
- size_t arg_offset = (receiver == NULL) ? 0 : 1;
- const char* shorty = mh.GetShorty();
- uint32_t arg[5];
- if (!is_range) {
- inst->GetArgs(arg);
- }
- for (size_t shorty_pos = 0; cur_reg < num_regs; ++shorty_pos, cur_reg++, arg_offset++) {
- DCHECK_LT(shorty_pos + 1, mh.GetShortyLength());
- size_t arg_pos = is_range ? vregC + arg_offset : arg[arg_offset];
- switch (shorty[shorty_pos + 1]) {
- case 'L': {
- Object* o = shadow_frame.GetVRegReference(arg_pos);
- new_shadow_frame->SetVRegReference(cur_reg, o);
- break;
- }
- case 'J': case 'D': {
- uint64_t wide_value = (static_cast<uint64_t>(shadow_frame.GetVReg(arg_pos + 1)) << 32) |
- static_cast<uint32_t>(shadow_frame.GetVReg(arg_pos));
- new_shadow_frame->SetVRegLong(cur_reg, wide_value);
- cur_reg++;
- arg_offset++;
- break;
- }
- default:
- new_shadow_frame->SetVReg(cur_reg, shadow_frame.GetVReg(arg_pos));
- break;
- }
- }
-
- if (LIKELY(Runtime::Current()->IsStarted())) {
- (method->GetEntryPointFromInterpreter())(self, mh, code_item, new_shadow_frame, result);
- } else {
- UnstartedRuntimeInvoke(self, mh, code_item, new_shadow_frame, result, num_regs - num_ins);
- }
- return !self->IsExceptionPending();
-}
-
-// We use template functions to optimize compiler inlining process. Otherwise,
-// some parts of the code (like a switch statement) which depend on a constant
-// parameter would not be inlined while it should be. These constant parameters
-// are now part of the template arguments.
-// Note these template functions are static and inlined so they should not be
-// part of the final object file.
-// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
-// specialization.
-template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check>
-static bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame,
- const Instruction* inst)
- NO_THREAD_SAFETY_ANALYSIS ALWAYS_INLINE;
-
-template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check>
-static inline bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame,
- const Instruction* inst) {
- bool is_static = (find_type == StaticObjectRead) || (find_type == StaticPrimitiveRead);
- uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
- ArtField* f = FindFieldFromCode(field_idx, shadow_frame.GetMethod(), self,
- find_type, Primitive::FieldSize(field_type),
- do_access_check);
- if (UNLIKELY(f == NULL)) {
- CHECK(self->IsExceptionPending());
- return false;
- }
- Object* obj;
- if (is_static) {
- obj = f->GetDeclaringClass();
- } else {
- obj = shadow_frame.GetVRegReference(inst->VRegB_22c());
- if (UNLIKELY(obj == NULL)) {
- ThrowNullPointerExceptionForFieldAccess(shadow_frame.GetCurrentLocationForThrow(), f, true);
- return false;
- }
- }
- uint32_t vregA = is_static ? inst->VRegA_21c() : inst->VRegA_22c();
- switch (field_type) {
- case Primitive::kPrimBoolean:
- shadow_frame.SetVReg(vregA, f->GetBoolean(obj));
- break;
- case Primitive::kPrimByte:
- shadow_frame.SetVReg(vregA, f->GetByte(obj));
- break;
- case Primitive::kPrimChar:
- shadow_frame.SetVReg(vregA, f->GetChar(obj));
- break;
- case Primitive::kPrimShort:
- shadow_frame.SetVReg(vregA, f->GetShort(obj));
- break;
- case Primitive::kPrimInt:
- shadow_frame.SetVReg(vregA, f->GetInt(obj));
- break;
- case Primitive::kPrimLong:
- shadow_frame.SetVRegLong(vregA, f->GetLong(obj));
- break;
- case Primitive::kPrimNot:
- shadow_frame.SetVRegReference(vregA, f->GetObject(obj));
- break;
- default:
- LOG(FATAL) << "Unreachable: " << field_type;
- }
- return true;
-}
-
-// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
-// specialization.
-template<Primitive::Type field_type>
-static bool DoIGetQuick(Thread* self, ShadowFrame& shadow_frame,
- const Instruction* inst)
- NO_THREAD_SAFETY_ANALYSIS ALWAYS_INLINE;
-
-template<Primitive::Type field_type>
-static inline bool DoIGetQuick(Thread* self, ShadowFrame& shadow_frame,
- const Instruction* inst) {
- Object* obj = shadow_frame.GetVRegReference(inst->VRegB_22c());
- if (UNLIKELY(obj == NULL)) {
- // We lost the reference to the field index so we cannot get a more
- // precised exception message.
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
- return false;
- }
- MemberOffset field_offset(inst->VRegC_22c());
- const bool is_volatile = false; // iget-x-quick only on non volatile fields.
- const uint32_t vregA = inst->VRegA_22c();
- switch (field_type) {
- case Primitive::kPrimInt:
- shadow_frame.SetVReg(vregA, static_cast<int32_t>(obj->GetField32(field_offset, is_volatile)));
- break;
- case Primitive::kPrimLong:
- shadow_frame.SetVRegLong(vregA, static_cast<int64_t>(obj->GetField64(field_offset, is_volatile)));
- break;
- case Primitive::kPrimNot:
- shadow_frame.SetVRegReference(vregA, obj->GetFieldObject<mirror::Object*>(field_offset, is_volatile));
- break;
- default:
- LOG(FATAL) << "Unreachable: " << field_type;
- }
- return true;
-}
-
-// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
-// specialization.
-template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check>
-static bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame,
- const Instruction* inst)
- NO_THREAD_SAFETY_ANALYSIS ALWAYS_INLINE;
-
-template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check>
-static inline bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame,
- const Instruction* inst) {
- bool is_static = (find_type == StaticObjectWrite) || (find_type == StaticPrimitiveWrite);
- uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
- ArtField* f = FindFieldFromCode(field_idx, shadow_frame.GetMethod(), self,
- find_type, Primitive::FieldSize(field_type),
- do_access_check);
- if (UNLIKELY(f == NULL)) {
- CHECK(self->IsExceptionPending());
- return false;
- }
- Object* obj;
- if (is_static) {
- obj = f->GetDeclaringClass();
- } else {
- obj = shadow_frame.GetVRegReference(inst->VRegB_22c());
- if (UNLIKELY(obj == NULL)) {
- ThrowNullPointerExceptionForFieldAccess(shadow_frame.GetCurrentLocationForThrow(),
- f, false);
- return false;
- }
- }
- uint32_t vregA = is_static ? inst->VRegA_21c() : inst->VRegA_22c();
- switch (field_type) {
- case Primitive::kPrimBoolean:
- f->SetBoolean(obj, shadow_frame.GetVReg(vregA));
- break;
- case Primitive::kPrimByte:
- f->SetByte(obj, shadow_frame.GetVReg(vregA));
- break;
- case Primitive::kPrimChar:
- f->SetChar(obj, shadow_frame.GetVReg(vregA));
- break;
- case Primitive::kPrimShort:
- f->SetShort(obj, shadow_frame.GetVReg(vregA));
- break;
- case Primitive::kPrimInt:
- f->SetInt(obj, shadow_frame.GetVReg(vregA));
- break;
- case Primitive::kPrimLong:
- f->SetLong(obj, shadow_frame.GetVRegLong(vregA));
- break;
- case Primitive::kPrimNot:
- f->SetObj(obj, shadow_frame.GetVRegReference(vregA));
- break;
- default:
- LOG(FATAL) << "Unreachable: " << field_type;
- }
- return true;
-}
-
-// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
-// specialization.
-template<Primitive::Type field_type>
-static bool DoIPutQuick(Thread* self, ShadowFrame& shadow_frame,
- const Instruction* inst)
- NO_THREAD_SAFETY_ANALYSIS ALWAYS_INLINE;
-
-template<Primitive::Type field_type>
-static inline bool DoIPutQuick(Thread* self, ShadowFrame& shadow_frame,
- const Instruction* inst) {
- Object* obj = shadow_frame.GetVRegReference(inst->VRegB_22c());
- if (UNLIKELY(obj == NULL)) {
- // We lost the reference to the field index so we cannot get a more
- // precised exception message.
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
- return false;
- }
- MemberOffset field_offset(inst->VRegC_22c());
- const bool is_volatile = false; // iput-x-quick only on non volatile fields.
- const uint32_t vregA = inst->VRegA_22c();
- switch (field_type) {
- case Primitive::kPrimInt:
- obj->SetField32(field_offset, shadow_frame.GetVReg(vregA), is_volatile);
- break;
- case Primitive::kPrimLong:
- obj->SetField64(field_offset, shadow_frame.GetVRegLong(vregA), is_volatile);
- break;
- case Primitive::kPrimNot:
- obj->SetFieldObject(field_offset, shadow_frame.GetVRegReference(vregA), is_volatile);
- break;
- default:
- LOG(FATAL) << "Unreachable: " << field_type;
- }
- return true;
-}
-
-static inline String* ResolveString(Thread* self, MethodHelper& mh, uint32_t string_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Class* java_lang_string_class = String::GetJavaLangString();
- if (UNLIKELY(!java_lang_string_class->IsInitialized())) {
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- if (UNLIKELY(!class_linker->EnsureInitialized(java_lang_string_class,
- true, true))) {
- DCHECK(self->IsExceptionPending());
- return NULL;
- }
- }
- return mh.ResolveString(string_idx);
-}
-
-static inline bool DoIntDivide(ShadowFrame& shadow_frame, size_t result_reg,
- int32_t dividend, int32_t divisor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (UNLIKELY(divisor == 0)) {
- ThrowArithmeticExceptionDivideByZero();
- return false;
- }
- if (UNLIKELY(dividend == kMinInt && divisor == -1)) {
- shadow_frame.SetVReg(result_reg, kMinInt);
- } else {
- shadow_frame.SetVReg(result_reg, dividend / divisor);
- }
- return true;
-}
-
-static inline bool DoIntRemainder(ShadowFrame& shadow_frame, size_t result_reg,
- int32_t dividend, int32_t divisor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (UNLIKELY(divisor == 0)) {
- ThrowArithmeticExceptionDivideByZero();
- return false;
- }
- if (UNLIKELY(dividend == kMinInt && divisor == -1)) {
- shadow_frame.SetVReg(result_reg, 0);
- } else {
- shadow_frame.SetVReg(result_reg, dividend % divisor);
- }
- return true;
-}
-
-static inline bool DoLongDivide(ShadowFrame& shadow_frame, size_t result_reg,
- int64_t dividend, int64_t divisor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (UNLIKELY(divisor == 0)) {
- ThrowArithmeticExceptionDivideByZero();
- return false;
- }
- if (UNLIKELY(dividend == kMinLong && divisor == -1)) {
- shadow_frame.SetVRegLong(result_reg, kMinLong);
- } else {
- shadow_frame.SetVRegLong(result_reg, dividend / divisor);
- }
- return true;
-}
-
-static inline bool DoLongRemainder(ShadowFrame& shadow_frame, size_t result_reg,
- int64_t dividend, int64_t divisor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (UNLIKELY(divisor == 0)) {
- ThrowArithmeticExceptionDivideByZero();
- return false;
- }
- if (UNLIKELY(dividend == kMinLong && divisor == -1)) {
- shadow_frame.SetVRegLong(result_reg, 0);
- } else {
- shadow_frame.SetVRegLong(result_reg, dividend % divisor);
- }
- return true;
-}
-
-// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
-// specialization.
-// Returns true on success, otherwise throws an exception and returns false.
-template <bool is_range, bool do_access_check>
-static bool DoFilledNewArray(const Instruction* inst, const ShadowFrame& shadow_frame,
- Thread* self, JValue* result)
- NO_THREAD_SAFETY_ANALYSIS ALWAYS_INLINE;
-
-template <bool is_range, bool do_access_check>
-static inline bool DoFilledNewArray(const Instruction* inst,
- const ShadowFrame& shadow_frame,
- Thread* self, JValue* result) {
- DCHECK(inst->Opcode() == Instruction::FILLED_NEW_ARRAY ||
- inst->Opcode() == Instruction::FILLED_NEW_ARRAY_RANGE);
- const int32_t length = is_range ? inst->VRegA_3rc() : inst->VRegA_35c();
- if (!is_range) {
- // Checks FILLED_NEW_ARRAY's length does not exceed 5 arguments.
- CHECK_LE(length, 5);
- }
- if (UNLIKELY(length < 0)) {
- ThrowNegativeArraySizeException(length);
- return false;
- }
- uint16_t type_idx = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
- Class* arrayClass = ResolveVerifyAndClinit(type_idx, shadow_frame.GetMethod(),
- self, false, do_access_check);
- if (UNLIKELY(arrayClass == NULL)) {
- DCHECK(self->IsExceptionPending());
- return false;
- }
- CHECK(arrayClass->IsArrayClass());
- Class* componentClass = arrayClass->GetComponentType();
- if (UNLIKELY(componentClass->IsPrimitive() && !componentClass->IsPrimitiveInt())) {
- if (componentClass->IsPrimitiveLong() || componentClass->IsPrimitiveDouble()) {
- ThrowRuntimeException("Bad filled array request for type %s",
- PrettyDescriptor(componentClass).c_str());
- } else {
- self->ThrowNewExceptionF(shadow_frame.GetCurrentLocationForThrow(),
- "Ljava/lang/InternalError;",
- "Found type %s; filled-new-array not implemented for anything but \'int\'",
- PrettyDescriptor(componentClass).c_str());
- }
- return false;
- }
- Object* newArray = Array::Alloc(self, arrayClass, length);
- if (UNLIKELY(newArray == NULL)) {
- DCHECK(self->IsExceptionPending());
- return false;
- }
- if (is_range) {
- uint32_t vregC = inst->VRegC_3rc();
- const bool is_primitive_int_component = componentClass->IsPrimitiveInt();
- for (int32_t i = 0; i < length; ++i) {
- if (is_primitive_int_component) {
- newArray->AsIntArray()->Set(i, shadow_frame.GetVReg(vregC + i));
- } else {
- newArray->AsObjectArray<Object>()->Set(i, shadow_frame.GetVRegReference(vregC + i));
- }
- }
- } else {
- uint32_t arg[5];
- inst->GetArgs(arg);
- const bool is_primitive_int_component = componentClass->IsPrimitiveInt();
- for (int32_t i = 0; i < length; ++i) {
- if (is_primitive_int_component) {
- newArray->AsIntArray()->Set(i, shadow_frame.GetVReg(arg[i]));
- } else {
- newArray->AsObjectArray<Object>()->Set(i, shadow_frame.GetVRegReference(arg[i]));
- }
- }
- }
-
- result->SetL(newArray);
- return true;
-}
-
-static inline const Instruction* DoSparseSwitch(const Instruction* inst,
- const ShadowFrame& shadow_frame)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(inst->Opcode() == Instruction::SPARSE_SWITCH);
- const uint16_t* switch_data = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t();
- int32_t test_val = shadow_frame.GetVReg(inst->VRegA_31t());
- DCHECK_EQ(switch_data[0], static_cast<uint16_t>(Instruction::kSparseSwitchSignature));
- uint16_t size = switch_data[1];
- DCHECK_GT(size, 0);
- const int32_t* keys = reinterpret_cast<const int32_t*>(&switch_data[2]);
- DCHECK(IsAligned<4>(keys));
- const int32_t* entries = keys + size;
- DCHECK(IsAligned<4>(entries));
- int lo = 0;
- int hi = size - 1;
- while (lo <= hi) {
- int mid = (lo + hi) / 2;
- int32_t foundVal = keys[mid];
- if (test_val < foundVal) {
- hi = mid - 1;
- } else if (test_val > foundVal) {
- lo = mid + 1;
- } else {
- return inst->RelativeAt(entries[mid]);
- }
- }
- return inst->Next_3xx();
-}
-
-static inline const Instruction* FindNextInstructionFollowingException(Thread* self,
- ShadowFrame& shadow_frame,
- uint32_t dex_pc,
- const uint16_t* insns,
- SirtRef<Object>& this_object_ref,
- instrumentation::Instrumentation* instrumentation)
- ALWAYS_INLINE;
-
-static inline const Instruction* FindNextInstructionFollowingException(Thread* self,
- ShadowFrame& shadow_frame,
- uint32_t dex_pc,
- const uint16_t* insns,
- SirtRef<Object>& this_object_ref,
- instrumentation::Instrumentation* instrumentation)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- self->VerifyStack();
- ThrowLocation throw_location;
- mirror::Throwable* exception = self->GetException(&throw_location);
- bool clear_exception;
- uint32_t found_dex_pc = shadow_frame.GetMethod()->FindCatchBlock(exception->GetClass(), dex_pc,
- &clear_exception);
- if (found_dex_pc == DexFile::kDexNoIndex) {
- instrumentation->MethodUnwindEvent(self, this_object_ref.get(),
- shadow_frame.GetMethod(), dex_pc);
- return NULL;
- } else {
- instrumentation->ExceptionCaughtEvent(self, throw_location,
- shadow_frame.GetMethod(),
- found_dex_pc, exception);
- if (clear_exception) {
- self->ClearException();
- }
- return Instruction::At(insns + found_dex_pc);
- }
-}
-
-#define HANDLE_PENDING_EXCEPTION() \
- CHECK(self->IsExceptionPending()); \
- inst = FindNextInstructionFollowingException(self, shadow_frame, inst->GetDexPc(insns), insns, \
- this_object_ref, instrumentation); \
- if (inst == NULL) { \
- return JValue(); /* Handled in caller. */ \
- }
-
-#define POSSIBLY_HANDLE_PENDING_EXCEPTION(is_exception_pending, next_function) \
- if (UNLIKELY(is_exception_pending)) { \
- HANDLE_PENDING_EXCEPTION(); \
- } else { \
- inst = inst->next_function(); \
- }
-
-static void UnexpectedOpcode(const Instruction* inst, MethodHelper& mh)
- __attribute__((cold, noreturn, noinline));
-
-static void UnexpectedOpcode(const Instruction* inst, MethodHelper& mh)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- LOG(FATAL) << "Unexpected instruction: " << inst->DumpString(&mh.GetDexFile());
- exit(0); // Unreachable, keep GCC happy.
-}
-
-// Code to run before each dex instruction.
-#define PREAMBLE()
-
-// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
-// specialization.
-template<bool do_access_check>
-static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem* code_item,
- ShadowFrame& shadow_frame, JValue result_register)
- NO_THREAD_SAFETY_ANALYSIS __attribute__((hot));
-
-template<bool do_access_check>
-static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem* code_item,
- ShadowFrame& shadow_frame, JValue result_register) {
- if (UNLIKELY(!shadow_frame.HasReferenceArray())) {
- LOG(FATAL) << "Invalid shadow frame for interpreter use";
- return JValue();
- }
- self->VerifyStack();
- instrumentation::Instrumentation* const instrumentation = Runtime::Current()->GetInstrumentation();
-
- // As the 'this' object won't change during the execution of current code, we
- // want to cache it in local variables. Nevertheless, in order to let the
- // garbage collector access it, we store it into sirt references.
- SirtRef<Object> this_object_ref(self, shadow_frame.GetThisObject(code_item->ins_size_));
-
- uint32_t dex_pc = shadow_frame.GetDexPC();
- if (LIKELY(dex_pc == 0)) { // We are entering the method as opposed to deoptimizing..
- if (UNLIKELY(instrumentation->HasMethodEntryListeners())) {
- instrumentation->MethodEnterEvent(self, this_object_ref.get(),
- shadow_frame.GetMethod(), 0);
- }
- }
- const uint16_t* const insns = code_item->insns_;
- const Instruction* inst = Instruction::At(insns + dex_pc);
- while (true) {
- dex_pc = inst->GetDexPc(insns);
- shadow_frame.SetDexPC(dex_pc);
- if (UNLIKELY(self->TestAllFlags())) {
- CheckSuspend(self);
- }
- if (UNLIKELY(instrumentation->HasDexPcListeners())) {
- instrumentation->DexPcMovedEvent(self, this_object_ref.get(),
- shadow_frame.GetMethod(), dex_pc);
- }
- const bool kTracing = false;
- if (kTracing) {
-#define TRACE_LOG std::cerr
- TRACE_LOG << PrettyMethod(shadow_frame.GetMethod())
- << StringPrintf("\n0x%x: ", dex_pc)
- << inst->DumpString(&mh.GetDexFile()) << "\n";
- for (size_t i = 0; i < shadow_frame.NumberOfVRegs(); ++i) {
- uint32_t raw_value = shadow_frame.GetVReg(i);
- Object* ref_value = shadow_frame.GetVRegReference(i);
- TRACE_LOG << StringPrintf(" vreg%d=0x%08X", i, raw_value);
- if (ref_value != NULL) {
- if (ref_value->GetClass()->IsStringClass() &&
- ref_value->AsString()->GetCharArray() != NULL) {
- TRACE_LOG << "/java.lang.String \"" << ref_value->AsString()->ToModifiedUtf8() << "\"";
- } else {
- TRACE_LOG << "/" << PrettyTypeOf(ref_value);
- }
- }
- }
- TRACE_LOG << "\n";
-#undef TRACE_LOG
- }
- switch (inst->Opcode()) {
- case Instruction::NOP:
- PREAMBLE();
- inst = inst->Next_1xx();
- break;
- case Instruction::MOVE:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_12x(),
- shadow_frame.GetVReg(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- case Instruction::MOVE_FROM16:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_22x(),
- shadow_frame.GetVReg(inst->VRegB_22x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::MOVE_16:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_32x(),
- shadow_frame.GetVReg(inst->VRegB_32x()));
- inst = inst->Next_3xx();
- break;
- case Instruction::MOVE_WIDE:
- PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_12x(),
- shadow_frame.GetVRegLong(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- case Instruction::MOVE_WIDE_FROM16:
- PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_22x(),
- shadow_frame.GetVRegLong(inst->VRegB_22x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::MOVE_WIDE_16:
- PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_32x(),
- shadow_frame.GetVRegLong(inst->VRegB_32x()));
- inst = inst->Next_3xx();
- break;
- case Instruction::MOVE_OBJECT:
- PREAMBLE();
- shadow_frame.SetVRegReference(inst->VRegA_12x(),
- shadow_frame.GetVRegReference(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- case Instruction::MOVE_OBJECT_FROM16:
- PREAMBLE();
- shadow_frame.SetVRegReference(inst->VRegA_22x(),
- shadow_frame.GetVRegReference(inst->VRegB_22x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::MOVE_OBJECT_16:
- PREAMBLE();
- shadow_frame.SetVRegReference(inst->VRegA_32x(),
- shadow_frame.GetVRegReference(inst->VRegB_32x()));
- inst = inst->Next_3xx();
- break;
- case Instruction::MOVE_RESULT:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_11x(), result_register.GetI());
- inst = inst->Next_1xx();
- break;
- case Instruction::MOVE_RESULT_WIDE:
- PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_11x(), result_register.GetJ());
- inst = inst->Next_1xx();
- break;
- case Instruction::MOVE_RESULT_OBJECT:
- PREAMBLE();
- shadow_frame.SetVRegReference(inst->VRegA_11x(), result_register.GetL());
- inst = inst->Next_1xx();
- break;
- case Instruction::MOVE_EXCEPTION: {
- PREAMBLE();
- Throwable* exception = self->GetException(NULL);
- self->ClearException();
- shadow_frame.SetVRegReference(inst->VRegA_11x(), exception);
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::RETURN_VOID: {
- PREAMBLE();
- JValue result;
- if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
- instrumentation->MethodExitEvent(self, this_object_ref.get(),
- shadow_frame.GetMethod(), inst->GetDexPc(insns),
- result);
- }
- return result;
- }
- case Instruction::RETURN_VOID_BARRIER: {
- PREAMBLE();
- ANDROID_MEMBAR_STORE();
- JValue result;
- if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
- instrumentation->MethodExitEvent(self, this_object_ref.get(),
- shadow_frame.GetMethod(), inst->GetDexPc(insns),
- result);
- }
- return result;
- }
- case Instruction::RETURN: {
- PREAMBLE();
- JValue result;
- result.SetJ(0);
- result.SetI(shadow_frame.GetVReg(inst->VRegA_11x()));
- if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
- instrumentation->MethodExitEvent(self, this_object_ref.get(),
- shadow_frame.GetMethod(), inst->GetDexPc(insns),
- result);
- }
- return result;
- }
- case Instruction::RETURN_WIDE: {
- PREAMBLE();
- JValue result;
- result.SetJ(shadow_frame.GetVRegLong(inst->VRegA_11x()));
- if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
- instrumentation->MethodExitEvent(self, this_object_ref.get(),
- shadow_frame.GetMethod(), inst->GetDexPc(insns),
- result);
- }
- return result;
- }
- case Instruction::RETURN_OBJECT: {
- PREAMBLE();
- JValue result;
- result.SetJ(0);
- result.SetL(shadow_frame.GetVRegReference(inst->VRegA_11x()));
- if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
- instrumentation->MethodExitEvent(self, this_object_ref.get(),
- shadow_frame.GetMethod(), inst->GetDexPc(insns),
- result);
- }
- return result;
- }
- case Instruction::CONST_4: {
- PREAMBLE();
- uint4_t dst = inst->VRegA_11n();
- int4_t val = inst->VRegB_11n();
- shadow_frame.SetVReg(dst, val);
- if (val == 0) {
- shadow_frame.SetVRegReference(dst, NULL);
- }
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::CONST_16: {
- PREAMBLE();
- uint8_t dst = inst->VRegA_21s();
- int16_t val = inst->VRegB_21s();
- shadow_frame.SetVReg(dst, val);
- if (val == 0) {
- shadow_frame.SetVRegReference(dst, NULL);
- }
- inst = inst->Next_2xx();
- break;
- }
- case Instruction::CONST: {
- PREAMBLE();
- uint8_t dst = inst->VRegA_31i();
- int32_t val = inst->VRegB_31i();
- shadow_frame.SetVReg(dst, val);
- if (val == 0) {
- shadow_frame.SetVRegReference(dst, NULL);
- }
- inst = inst->Next_3xx();
- break;
- }
- case Instruction::CONST_HIGH16: {
- PREAMBLE();
- uint8_t dst = inst->VRegA_21h();
- int32_t val = static_cast<int32_t>(inst->VRegB_21h() << 16);
- shadow_frame.SetVReg(dst, val);
- if (val == 0) {
- shadow_frame.SetVRegReference(dst, NULL);
- }
- inst = inst->Next_2xx();
- break;
- }
- case Instruction::CONST_WIDE_16:
- PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_21s(), inst->VRegB_21s());
- inst = inst->Next_2xx();
- break;
- case Instruction::CONST_WIDE_32:
- PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_31i(), inst->VRegB_31i());
- inst = inst->Next_3xx();
- break;
- case Instruction::CONST_WIDE:
- PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_51l(), inst->VRegB_51l());
- inst = inst->Next_51l();
- break;
- case Instruction::CONST_WIDE_HIGH16:
- shadow_frame.SetVRegLong(inst->VRegA_21h(),
- static_cast<uint64_t>(inst->VRegB_21h()) << 48);
- inst = inst->Next_2xx();
- break;
- case Instruction::CONST_STRING: {
- PREAMBLE();
- String* s = ResolveString(self, mh, inst->VRegB_21c());
- if (UNLIKELY(s == NULL)) {
- HANDLE_PENDING_EXCEPTION();
- } else {
- shadow_frame.SetVRegReference(inst->VRegA_21c(), s);
- inst = inst->Next_2xx();
- }
- break;
- }
- case Instruction::CONST_STRING_JUMBO: {
- PREAMBLE();
- String* s = ResolveString(self, mh, inst->VRegB_31c());
- if (UNLIKELY(s == NULL)) {
- HANDLE_PENDING_EXCEPTION();
- } else {
- shadow_frame.SetVRegReference(inst->VRegA_31c(), s);
- inst = inst->Next_3xx();
- }
- break;
- }
- case Instruction::CONST_CLASS: {
- PREAMBLE();
- Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame.GetMethod(),
- self, false, do_access_check);
- if (UNLIKELY(c == NULL)) {
- HANDLE_PENDING_EXCEPTION();
- } else {
- shadow_frame.SetVRegReference(inst->VRegA_21c(), c);
- inst = inst->Next_2xx();
- }
- break;
- }
- case Instruction::MONITOR_ENTER: {
- PREAMBLE();
- Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x());
- if (UNLIKELY(obj == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
- HANDLE_PENDING_EXCEPTION();
- } else {
- DoMonitorEnter(self, obj);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx);
- }
- break;
- }
- case Instruction::MONITOR_EXIT: {
- PREAMBLE();
- Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x());
- if (UNLIKELY(obj == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
- HANDLE_PENDING_EXCEPTION();
- } else {
- DoMonitorExit(self, obj);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx);
- }
- break;
- }
- case Instruction::CHECK_CAST: {
- PREAMBLE();
- Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame.GetMethod(),
- self, false, do_access_check);
- if (UNLIKELY(c == NULL)) {
- HANDLE_PENDING_EXCEPTION();
- } else {
- Object* obj = shadow_frame.GetVRegReference(inst->VRegA_21c());
- if (UNLIKELY(obj != NULL && !obj->InstanceOf(c))) {
- ThrowClassCastException(c, obj->GetClass());
- HANDLE_PENDING_EXCEPTION();
- } else {
- inst = inst->Next_2xx();
- }
- }
- break;
- }
- case Instruction::INSTANCE_OF: {
- PREAMBLE();
- Class* c = ResolveVerifyAndClinit(inst->VRegC_22c(), shadow_frame.GetMethod(),
- self, false, do_access_check);
- if (UNLIKELY(c == NULL)) {
- HANDLE_PENDING_EXCEPTION();
- } else {
- Object* obj = shadow_frame.GetVRegReference(inst->VRegB_22c());
- shadow_frame.SetVReg(inst->VRegA_22c(), (obj != NULL && obj->InstanceOf(c)) ? 1 : 0);
- inst = inst->Next_2xx();
- }
- break;
- }
- case Instruction::ARRAY_LENGTH: {
- PREAMBLE();
- Object* array = shadow_frame.GetVRegReference(inst->VRegB_12x());
- if (UNLIKELY(array == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
- HANDLE_PENDING_EXCEPTION();
- } else {
- shadow_frame.SetVReg(inst->VRegA_12x(), array->AsArray()->GetLength());
- inst = inst->Next_1xx();
- }
- break;
- }
- case Instruction::NEW_INSTANCE: {
- PREAMBLE();
- Object* obj = AllocObjectFromCode(inst->VRegB_21c(), shadow_frame.GetMethod(),
- self, do_access_check);
- if (UNLIKELY(obj == NULL)) {
- HANDLE_PENDING_EXCEPTION();
- } else {
- shadow_frame.SetVRegReference(inst->VRegA_21c(), obj);
- inst = inst->Next_2xx();
- }
- break;
- }
- case Instruction::NEW_ARRAY: {
- PREAMBLE();
- int32_t length = shadow_frame.GetVReg(inst->VRegB_22c());
- Object* obj = AllocArrayFromCode(inst->VRegC_22c(), shadow_frame.GetMethod(),
- length, self, do_access_check);
- if (UNLIKELY(obj == NULL)) {
- HANDLE_PENDING_EXCEPTION();
- } else {
- shadow_frame.SetVRegReference(inst->VRegA_22c(), obj);
- inst = inst->Next_2xx();
- }
- break;
- }
- case Instruction::FILLED_NEW_ARRAY: {
- PREAMBLE();
- bool success = DoFilledNewArray<false, do_access_check>(inst, shadow_frame,
- self, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
- break;
- }
- case Instruction::FILLED_NEW_ARRAY_RANGE: {
- PREAMBLE();
- bool success = DoFilledNewArray<true, do_access_check>(inst, shadow_frame,
- self, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
- break;
- }
- case Instruction::FILL_ARRAY_DATA: {
- PREAMBLE();
- Object* obj = shadow_frame.GetVRegReference(inst->VRegA_31t());
- if (UNLIKELY(obj == NULL)) {
- ThrowNullPointerException(NULL, "null array in FILL_ARRAY_DATA");
- HANDLE_PENDING_EXCEPTION();
- break;
- }
- Array* array = obj->AsArray();
- DCHECK(array->IsArrayInstance() && !array->IsObjectArray());
- const uint16_t* payload_addr = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t();
- const Instruction::ArrayDataPayload* payload =
- reinterpret_cast<const Instruction::ArrayDataPayload*>(payload_addr);
- if (UNLIKELY(static_cast<int32_t>(payload->element_count) > array->GetLength())) {
- self->ThrowNewExceptionF(shadow_frame.GetCurrentLocationForThrow(),
- "Ljava/lang/ArrayIndexOutOfBoundsException;",
- "failed FILL_ARRAY_DATA; length=%d, index=%d",
- array->GetLength(), payload->element_count);
- HANDLE_PENDING_EXCEPTION();
- break;
- }
- uint32_t size_in_bytes = payload->element_count * payload->element_width;
- memcpy(array->GetRawData(payload->element_width), payload->data, size_in_bytes);
- inst = inst->Next_3xx();
- break;
- }
- case Instruction::THROW: {
- PREAMBLE();
- Object* exception = shadow_frame.GetVRegReference(inst->VRegA_11x());
- if (UNLIKELY(exception == NULL)) {
- ThrowNullPointerException(NULL, "throw with null exception");
- } else {
- self->SetException(shadow_frame.GetCurrentLocationForThrow(), exception->AsThrowable());
- }
- HANDLE_PENDING_EXCEPTION();
- break;
- }
- case Instruction::GOTO: {
- PREAMBLE();
- inst = inst->RelativeAt(inst->VRegA_10t());
- break;
- }
- case Instruction::GOTO_16: {
- PREAMBLE();
- inst = inst->RelativeAt(inst->VRegA_20t());
- break;
- }
- case Instruction::GOTO_32: {
- PREAMBLE();
- inst = inst->RelativeAt(inst->VRegA_30t());
- break;
- }
- case Instruction::PACKED_SWITCH: {
- PREAMBLE();
- const uint16_t* switch_data = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t();
- int32_t test_val = shadow_frame.GetVReg(inst->VRegA_31t());
- DCHECK_EQ(switch_data[0], static_cast<uint16_t>(Instruction::kPackedSwitchSignature));
- uint16_t size = switch_data[1];
- DCHECK_GT(size, 0);
- const int32_t* keys = reinterpret_cast<const int32_t*>(&switch_data[2]);
- DCHECK(IsAligned<4>(keys));
- int32_t first_key = keys[0];
- const int32_t* targets = reinterpret_cast<const int32_t*>(&switch_data[4]);
- DCHECK(IsAligned<4>(targets));
- int32_t index = test_val - first_key;
- if (index >= 0 && index < size) {
- inst = inst->RelativeAt(targets[index]);
- } else {
- inst = inst->Next_3xx();
- }
- break;
- }
- case Instruction::SPARSE_SWITCH: {
- PREAMBLE();
- inst = DoSparseSwitch(inst, shadow_frame);
- break;
- }
- case Instruction::CMPL_FLOAT: {
- PREAMBLE();
- float val1 = shadow_frame.GetVRegFloat(inst->VRegB_23x());
- float val2 = shadow_frame.GetVRegFloat(inst->VRegC_23x());
- int32_t result;
- if (val1 > val2) {
- result = 1;
- } else if (val1 == val2) {
- result = 0;
- } else {
- result = -1;
- }
- shadow_frame.SetVReg(inst->VRegA_23x(), result);
- inst = inst->Next_2xx();
- break;
- }
- case Instruction::CMPG_FLOAT: {
- PREAMBLE();
- float val1 = shadow_frame.GetVRegFloat(inst->VRegB_23x());
- float val2 = shadow_frame.GetVRegFloat(inst->VRegC_23x());
- int32_t result;
- if (val1 < val2) {
- result = -1;
- } else if (val1 == val2) {
- result = 0;
- } else {
- result = 1;
- }
- shadow_frame.SetVReg(inst->VRegA_23x(), result);
- inst = inst->Next_2xx();
- break;
- }
- case Instruction::CMPL_DOUBLE: {
- PREAMBLE();
- double val1 = shadow_frame.GetVRegDouble(inst->VRegB_23x());
- double val2 = shadow_frame.GetVRegDouble(inst->VRegC_23x());
- int32_t result;
- if (val1 > val2) {
- result = 1;
- } else if (val1 == val2) {
- result = 0;
- } else {
- result = -1;
- }
- shadow_frame.SetVReg(inst->VRegA_23x(), result);
- inst = inst->Next_2xx();
- break;
- }
-
- case Instruction::CMPG_DOUBLE: {
- PREAMBLE();
- double val1 = shadow_frame.GetVRegDouble(inst->VRegB_23x());
- double val2 = shadow_frame.GetVRegDouble(inst->VRegC_23x());
- int32_t result;
- if (val1 < val2) {
- result = -1;
- } else if (val1 == val2) {
- result = 0;
- } else {
- result = 1;
- }
- shadow_frame.SetVReg(inst->VRegA_23x(), result);
- inst = inst->Next_2xx();
- break;
- }
- case Instruction::CMP_LONG: {
- PREAMBLE();
- int64_t val1 = shadow_frame.GetVRegLong(inst->VRegB_23x());
- int64_t val2 = shadow_frame.GetVRegLong(inst->VRegC_23x());
- int32_t result;
- if (val1 > val2) {
- result = 1;
- } else if (val1 == val2) {
- result = 0;
- } else {
- result = -1;
- }
- shadow_frame.SetVReg(inst->VRegA_23x(), result);
- inst = inst->Next_2xx();
- break;
- }
- case Instruction::IF_EQ: {
- PREAMBLE();
- if (shadow_frame.GetVReg(inst->VRegA_22t()) == shadow_frame.GetVReg(inst->VRegB_22t())) {
- inst = inst->RelativeAt(inst->VRegC_22t());
- } else {
- inst = inst->Next_2xx();
- }
- break;
- }
- case Instruction::IF_NE: {
- PREAMBLE();
- if (shadow_frame.GetVReg(inst->VRegA_22t()) != shadow_frame.GetVReg(inst->VRegB_22t())) {
- inst = inst->RelativeAt(inst->VRegC_22t());
- } else {
- inst = inst->Next_2xx();
- }
- break;
- }
- case Instruction::IF_LT: {
- PREAMBLE();
- if (shadow_frame.GetVReg(inst->VRegA_22t()) < shadow_frame.GetVReg(inst->VRegB_22t())) {
- inst = inst->RelativeAt(inst->VRegC_22t());
- } else {
- inst = inst->Next_2xx();
- }
- break;
- }
- case Instruction::IF_GE: {
- PREAMBLE();
- if (shadow_frame.GetVReg(inst->VRegA_22t()) >= shadow_frame.GetVReg(inst->VRegB_22t())) {
- inst = inst->RelativeAt(inst->VRegC_22t());
- } else {
- inst = inst->Next_2xx();
- }
- break;
- }
- case Instruction::IF_GT: {
- PREAMBLE();
- if (shadow_frame.GetVReg(inst->VRegA_22t()) > shadow_frame.GetVReg(inst->VRegB_22t())) {
- inst = inst->RelativeAt(inst->VRegC_22t());
- } else {
- inst = inst->Next_2xx();
- }
- break;
- }
- case Instruction::IF_LE: {
- PREAMBLE();
- if (shadow_frame.GetVReg(inst->VRegA_22t()) <= shadow_frame.GetVReg(inst->VRegB_22t())) {
- inst = inst->RelativeAt(inst->VRegC_22t());
- } else {
- inst = inst->Next_2xx();
- }
- break;
- }
- case Instruction::IF_EQZ: {
- PREAMBLE();
- if (shadow_frame.GetVReg(inst->VRegA_21t()) == 0) {
- inst = inst->RelativeAt(inst->VRegB_21t());
- } else {
- inst = inst->Next_2xx();
- }
- break;
- }
- case Instruction::IF_NEZ: {
- PREAMBLE();
- if (shadow_frame.GetVReg(inst->VRegA_21t()) != 0) {
- inst = inst->RelativeAt(inst->VRegB_21t());
- } else {
- inst = inst->Next_2xx();
- }
- break;
- }
- case Instruction::IF_LTZ: {
- PREAMBLE();
- if (shadow_frame.GetVReg(inst->VRegA_21t()) < 0) {
- inst = inst->RelativeAt(inst->VRegB_21t());
- } else {
- inst = inst->Next_2xx();
- }
- break;
- }
- case Instruction::IF_GEZ: {
- PREAMBLE();
- if (shadow_frame.GetVReg(inst->VRegA_21t()) >= 0) {
- inst = inst->RelativeAt(inst->VRegB_21t());
- } else {
- inst = inst->Next_2xx();
- }
- break;
- }
- case Instruction::IF_GTZ: {
- PREAMBLE();
- if (shadow_frame.GetVReg(inst->VRegA_21t()) > 0) {
- inst = inst->RelativeAt(inst->VRegB_21t());
- } else {
- inst = inst->Next_2xx();
- }
- break;
- }
- case Instruction::IF_LEZ: {
- PREAMBLE();
- if (shadow_frame.GetVReg(inst->VRegA_21t()) <= 0) {
- inst = inst->RelativeAt(inst->VRegB_21t());
- } else {
- inst = inst->Next_2xx();
- }
- break;
- }
- case Instruction::AGET_BOOLEAN: {
- PREAMBLE();
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
- HANDLE_PENDING_EXCEPTION();
- break;
- }
- int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- BooleanArray* array = a->AsBooleanArray();
- if (LIKELY(array->IsValidIndex(index))) {
- shadow_frame.SetVReg(inst->VRegA_23x(), array->GetData()[index]);
- inst = inst->Next_2xx();
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
- break;
- }
- case Instruction::AGET_BYTE: {
- PREAMBLE();
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
- HANDLE_PENDING_EXCEPTION();
- break;
- }
- int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- ByteArray* array = a->AsByteArray();
- if (LIKELY(array->IsValidIndex(index))) {
- shadow_frame.SetVReg(inst->VRegA_23x(), array->GetData()[index]);
- inst = inst->Next_2xx();
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
- break;
- }
- case Instruction::AGET_CHAR: {
- PREAMBLE();
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
- HANDLE_PENDING_EXCEPTION();
- break;
- }
- int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- CharArray* array = a->AsCharArray();
- if (LIKELY(array->IsValidIndex(index))) {
- shadow_frame.SetVReg(inst->VRegA_23x(), array->GetData()[index]);
- inst = inst->Next_2xx();
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
- break;
- }
- case Instruction::AGET_SHORT: {
- PREAMBLE();
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
- HANDLE_PENDING_EXCEPTION();
- break;
- }
- int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- ShortArray* array = a->AsShortArray();
- if (LIKELY(array->IsValidIndex(index))) {
- shadow_frame.SetVReg(inst->VRegA_23x(), array->GetData()[index]);
- inst = inst->Next_2xx();
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
- break;
- }
- case Instruction::AGET: {
- PREAMBLE();
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
- HANDLE_PENDING_EXCEPTION();
- break;
- }
- int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- IntArray* array = a->AsIntArray();
- if (LIKELY(array->IsValidIndex(index))) {
- shadow_frame.SetVReg(inst->VRegA_23x(), array->GetData()[index]);
- inst = inst->Next_2xx();
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
- break;
- }
- case Instruction::AGET_WIDE: {
- PREAMBLE();
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
- HANDLE_PENDING_EXCEPTION();
- break;
- }
- int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- LongArray* array = a->AsLongArray();
- if (LIKELY(array->IsValidIndex(index))) {
- shadow_frame.SetVRegLong(inst->VRegA_23x(), array->GetData()[index]);
- inst = inst->Next_2xx();
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
- break;
- }
- case Instruction::AGET_OBJECT: {
- PREAMBLE();
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
- HANDLE_PENDING_EXCEPTION();
- break;
- }
- int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- ObjectArray<Object>* array = a->AsObjectArray<Object>();
- if (LIKELY(array->IsValidIndex(index))) {
- shadow_frame.SetVRegReference(inst->VRegA_23x(), array->GetWithoutChecks(index));
- inst = inst->Next_2xx();
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
- break;
- }
- case Instruction::APUT_BOOLEAN: {
- PREAMBLE();
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
- HANDLE_PENDING_EXCEPTION();
- break;
- }
- uint8_t val = shadow_frame.GetVReg(inst->VRegA_23x());
- int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- BooleanArray* array = a->AsBooleanArray();
- if (LIKELY(array->IsValidIndex(index))) {
- array->GetData()[index] = val;
- inst = inst->Next_2xx();
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
- break;
- }
- case Instruction::APUT_BYTE: {
- PREAMBLE();
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
- HANDLE_PENDING_EXCEPTION();
- break;
- }
- int8_t val = shadow_frame.GetVReg(inst->VRegA_23x());
- int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- ByteArray* array = a->AsByteArray();
- if (LIKELY(array->IsValidIndex(index))) {
- array->GetData()[index] = val;
- inst = inst->Next_2xx();
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
- break;
- }
- case Instruction::APUT_CHAR: {
- PREAMBLE();
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
- HANDLE_PENDING_EXCEPTION();
- break;
- }
- uint16_t val = shadow_frame.GetVReg(inst->VRegA_23x());
- int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- CharArray* array = a->AsCharArray();
- if (LIKELY(array->IsValidIndex(index))) {
- array->GetData()[index] = val;
- inst = inst->Next_2xx();
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
- break;
- }
- case Instruction::APUT_SHORT: {
- PREAMBLE();
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
- HANDLE_PENDING_EXCEPTION();
- break;
- }
- int16_t val = shadow_frame.GetVReg(inst->VRegA_23x());
- int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- ShortArray* array = a->AsShortArray();
- if (LIKELY(array->IsValidIndex(index))) {
- array->GetData()[index] = val;
- inst = inst->Next_2xx();
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
- break;
- }
- case Instruction::APUT: {
- PREAMBLE();
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
- HANDLE_PENDING_EXCEPTION();
- break;
- }
- int32_t val = shadow_frame.GetVReg(inst->VRegA_23x());
- int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- IntArray* array = a->AsIntArray();
- if (LIKELY(array->IsValidIndex(index))) {
- array->GetData()[index] = val;
- inst = inst->Next_2xx();
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
- break;
- }
- case Instruction::APUT_WIDE: {
- PREAMBLE();
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
- HANDLE_PENDING_EXCEPTION();
- break;
- }
- int64_t val = shadow_frame.GetVRegLong(inst->VRegA_23x());
- int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- LongArray* array = a->AsLongArray();
- if (LIKELY(array->IsValidIndex(index))) {
- array->GetData()[index] = val;
- inst = inst->Next_2xx();
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
- break;
- }
- case Instruction::APUT_OBJECT: {
- PREAMBLE();
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
- HANDLE_PENDING_EXCEPTION();
- break;
- }
- int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- Object* val = shadow_frame.GetVRegReference(inst->VRegA_23x());
- ObjectArray<Object>* array = a->AsObjectArray<Object>();
- if (LIKELY(array->IsValidIndex(index) && array->CheckAssignable(val))) {
- array->SetWithoutChecks(index, val);
- inst = inst->Next_2xx();
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
- break;
- }
- case Instruction::IGET_BOOLEAN: {
- PREAMBLE();
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::IGET_BYTE: {
- PREAMBLE();
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::IGET_CHAR: {
- PREAMBLE();
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::IGET_SHORT: {
- PREAMBLE();
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::IGET: {
- PREAMBLE();
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::IGET_WIDE: {
- PREAMBLE();
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::IGET_OBJECT: {
- PREAMBLE();
- bool success = DoFieldGet<InstanceObjectRead, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::IGET_QUICK: {
- PREAMBLE();
- bool success = DoIGetQuick<Primitive::kPrimInt>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::IGET_WIDE_QUICK: {
- PREAMBLE();
- bool success = DoIGetQuick<Primitive::kPrimLong>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::IGET_OBJECT_QUICK: {
- PREAMBLE();
- bool success = DoIGetQuick<Primitive::kPrimNot>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::SGET_BOOLEAN: {
- PREAMBLE();
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::SGET_BYTE: {
- PREAMBLE();
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::SGET_CHAR: {
- PREAMBLE();
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::SGET_SHORT: {
- PREAMBLE();
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::SGET: {
- PREAMBLE();
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::SGET_WIDE: {
- PREAMBLE();
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::SGET_OBJECT: {
- PREAMBLE();
- bool success = DoFieldGet<StaticObjectRead, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::IPUT_BOOLEAN: {
- PREAMBLE();
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::IPUT_BYTE: {
- PREAMBLE();
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::IPUT_CHAR: {
- PREAMBLE();
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::IPUT_SHORT: {
- PREAMBLE();
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::IPUT: {
- PREAMBLE();
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::IPUT_WIDE: {
- PREAMBLE();
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::IPUT_OBJECT: {
- PREAMBLE();
- bool success = DoFieldPut<InstanceObjectWrite, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::IPUT_QUICK: {
- PREAMBLE();
- bool success = DoIPutQuick<Primitive::kPrimInt>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::IPUT_WIDE_QUICK: {
- PREAMBLE();
- bool success = DoIPutQuick<Primitive::kPrimLong>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::IPUT_OBJECT_QUICK: {
- PREAMBLE();
- bool success = DoIPutQuick<Primitive::kPrimNot>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::SPUT_BOOLEAN: {
- PREAMBLE();
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::SPUT_BYTE: {
- PREAMBLE();
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::SPUT_CHAR: {
- PREAMBLE();
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::SPUT_SHORT: {
- PREAMBLE();
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::SPUT: {
- PREAMBLE();
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::SPUT_WIDE: {
- PREAMBLE();
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::SPUT_OBJECT: {
- PREAMBLE();
- bool success = DoFieldPut<StaticObjectWrite, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::INVOKE_VIRTUAL: {
- PREAMBLE();
- bool success = DoInvoke<kVirtual, false, do_access_check>(self, shadow_frame, inst, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
- break;
- }
- case Instruction::INVOKE_VIRTUAL_RANGE: {
- PREAMBLE();
- bool success = DoInvoke<kVirtual, true, do_access_check>(self, shadow_frame, inst, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
- break;
- }
- case Instruction::INVOKE_SUPER: {
- PREAMBLE();
- bool success = DoInvoke<kSuper, false, do_access_check>(self, shadow_frame, inst, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
- break;
- }
- case Instruction::INVOKE_SUPER_RANGE: {
- PREAMBLE();
- bool success = DoInvoke<kSuper, true, do_access_check>(self, shadow_frame, inst, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
- break;
- }
- case Instruction::INVOKE_DIRECT: {
- PREAMBLE();
- bool success = DoInvoke<kDirect, false, do_access_check>(self, shadow_frame, inst, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
- break;
- }
- case Instruction::INVOKE_DIRECT_RANGE: {
- PREAMBLE();
- bool success = DoInvoke<kDirect, true, do_access_check>(self, shadow_frame, inst, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
- break;
- }
- case Instruction::INVOKE_INTERFACE: {
- PREAMBLE();
- bool success = DoInvoke<kInterface, false, do_access_check>(self, shadow_frame, inst, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
- break;
- }
- case Instruction::INVOKE_INTERFACE_RANGE: {
- PREAMBLE();
- bool success = DoInvoke<kInterface, true, do_access_check>(self, shadow_frame, inst, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
- break;
- }
- case Instruction::INVOKE_STATIC: {
- PREAMBLE();
- bool success = DoInvoke<kStatic, false, do_access_check>(self, shadow_frame, inst, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
- break;
- }
- case Instruction::INVOKE_STATIC_RANGE: {
- PREAMBLE();
- bool success = DoInvoke<kStatic, true, do_access_check>(self, shadow_frame, inst, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
- break;
- }
- case Instruction::INVOKE_VIRTUAL_QUICK: {
- PREAMBLE();
- bool success = DoInvokeVirtualQuick<false>(self, shadow_frame, inst, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
- break;
- }
- case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: {
- PREAMBLE();
- bool success = DoInvokeVirtualQuick<true>(self, shadow_frame, inst, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
- break;
- }
- case Instruction::NEG_INT:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_12x(), -shadow_frame.GetVReg(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- case Instruction::NOT_INT:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_12x(), ~shadow_frame.GetVReg(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- case Instruction::NEG_LONG:
- PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_12x(), -shadow_frame.GetVRegLong(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- case Instruction::NOT_LONG:
- PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_12x(), ~shadow_frame.GetVRegLong(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- case Instruction::NEG_FLOAT:
- PREAMBLE();
- shadow_frame.SetVRegFloat(inst->VRegA_12x(), -shadow_frame.GetVRegFloat(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- case Instruction::NEG_DOUBLE:
- PREAMBLE();
- shadow_frame.SetVRegDouble(inst->VRegA_12x(), -shadow_frame.GetVRegDouble(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- case Instruction::INT_TO_LONG:
- PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_12x(), shadow_frame.GetVReg(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- case Instruction::INT_TO_FLOAT:
- PREAMBLE();
- shadow_frame.SetVRegFloat(inst->VRegA_12x(), shadow_frame.GetVReg(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- case Instruction::INT_TO_DOUBLE:
- PREAMBLE();
- shadow_frame.SetVRegDouble(inst->VRegA_12x(), shadow_frame.GetVReg(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- case Instruction::LONG_TO_INT:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_12x(), shadow_frame.GetVRegLong(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- case Instruction::LONG_TO_FLOAT:
- PREAMBLE();
- shadow_frame.SetVRegFloat(inst->VRegA_12x(), shadow_frame.GetVRegLong(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- case Instruction::LONG_TO_DOUBLE:
- PREAMBLE();
- shadow_frame.SetVRegDouble(inst->VRegA_12x(), shadow_frame.GetVRegLong(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- case Instruction::FLOAT_TO_INT: {
- PREAMBLE();
- float val = shadow_frame.GetVRegFloat(inst->VRegB_12x());
- int32_t result;
- if (val != val) {
- result = 0;
- } else if (val > static_cast<float>(kMaxInt)) {
- result = kMaxInt;
- } else if (val < static_cast<float>(kMinInt)) {
- result = kMinInt;
- } else {
- result = val;
- }
- shadow_frame.SetVReg(inst->VRegA_12x(), result);
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::FLOAT_TO_LONG: {
- PREAMBLE();
- float val = shadow_frame.GetVRegFloat(inst->VRegB_12x());
- int64_t result;
- if (val != val) {
- result = 0;
- } else if (val > static_cast<float>(kMaxLong)) {
- result = kMaxLong;
- } else if (val < static_cast<float>(kMinLong)) {
- result = kMinLong;
- } else {
- result = val;
- }
- shadow_frame.SetVRegLong(inst->VRegA_12x(), result);
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::FLOAT_TO_DOUBLE:
- PREAMBLE();
- shadow_frame.SetVRegDouble(inst->VRegA_12x(), shadow_frame.GetVRegFloat(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- case Instruction::DOUBLE_TO_INT: {
- PREAMBLE();
- double val = shadow_frame.GetVRegDouble(inst->VRegB_12x());
- int32_t result;
- if (val != val) {
- result = 0;
- } else if (val > static_cast<double>(kMaxInt)) {
- result = kMaxInt;
- } else if (val < static_cast<double>(kMinInt)) {
- result = kMinInt;
- } else {
- result = val;
- }
- shadow_frame.SetVReg(inst->VRegA_12x(), result);
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::DOUBLE_TO_LONG: {
- PREAMBLE();
- double val = shadow_frame.GetVRegDouble(inst->VRegB_12x());
- int64_t result;
- if (val != val) {
- result = 0;
- } else if (val > static_cast<double>(kMaxLong)) {
- result = kMaxLong;
- } else if (val < static_cast<double>(kMinLong)) {
- result = kMinLong;
- } else {
- result = val;
- }
- shadow_frame.SetVRegLong(inst->VRegA_12x(), result);
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::DOUBLE_TO_FLOAT:
- PREAMBLE();
- shadow_frame.SetVRegFloat(inst->VRegA_12x(), shadow_frame.GetVRegDouble(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- case Instruction::INT_TO_BYTE:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_12x(),
- static_cast<int8_t>(shadow_frame.GetVReg(inst->VRegB_12x())));
- inst = inst->Next_1xx();
- break;
- case Instruction::INT_TO_CHAR:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_12x(),
- static_cast<uint16_t>(shadow_frame.GetVReg(inst->VRegB_12x())));
- inst = inst->Next_1xx();
- break;
- case Instruction::INT_TO_SHORT:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_12x(),
- static_cast<int16_t>(shadow_frame.GetVReg(inst->VRegB_12x())));
- inst = inst->Next_1xx();
- break;
- case Instruction::ADD_INT:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_23x(),
- shadow_frame.GetVReg(inst->VRegB_23x()) +
- shadow_frame.GetVReg(inst->VRegC_23x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::SUB_INT:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_23x(),
- shadow_frame.GetVReg(inst->VRegB_23x()) -
- shadow_frame.GetVReg(inst->VRegC_23x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::MUL_INT:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_23x(),
- shadow_frame.GetVReg(inst->VRegB_23x()) *
- shadow_frame.GetVReg(inst->VRegC_23x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::DIV_INT: {
- PREAMBLE();
- bool success = DoIntDivide(shadow_frame, inst->VRegA_23x(),
- shadow_frame.GetVReg(inst->VRegB_23x()),
- shadow_frame.GetVReg(inst->VRegC_23x()));
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::REM_INT: {
- PREAMBLE();
- bool success = DoIntRemainder(shadow_frame, inst->VRegA_23x(),
- shadow_frame.GetVReg(inst->VRegB_23x()),
- shadow_frame.GetVReg(inst->VRegC_23x()));
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::SHL_INT:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_23x(),
- shadow_frame.GetVReg(inst->VRegB_23x()) <<
- (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x1f));
- inst = inst->Next_2xx();
- break;
- case Instruction::SHR_INT:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_23x(),
- shadow_frame.GetVReg(inst->VRegB_23x()) >>
- (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x1f));
- inst = inst->Next_2xx();
- break;
- case Instruction::USHR_INT:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_23x(),
- static_cast<uint32_t>(shadow_frame.GetVReg(inst->VRegB_23x())) >>
- (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x1f));
- inst = inst->Next_2xx();
- break;
- case Instruction::AND_INT:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_23x(),
- shadow_frame.GetVReg(inst->VRegB_23x()) &
- shadow_frame.GetVReg(inst->VRegC_23x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::OR_INT:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_23x(),
- shadow_frame.GetVReg(inst->VRegB_23x()) |
- shadow_frame.GetVReg(inst->VRegC_23x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::XOR_INT:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_23x(),
- shadow_frame.GetVReg(inst->VRegB_23x()) ^
- shadow_frame.GetVReg(inst->VRegC_23x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::ADD_LONG:
- PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_23x(),
- shadow_frame.GetVRegLong(inst->VRegB_23x()) +
- shadow_frame.GetVRegLong(inst->VRegC_23x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::SUB_LONG:
- PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_23x(),
- shadow_frame.GetVRegLong(inst->VRegB_23x()) -
- shadow_frame.GetVRegLong(inst->VRegC_23x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::MUL_LONG:
- PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_23x(),
- shadow_frame.GetVRegLong(inst->VRegB_23x()) *
- shadow_frame.GetVRegLong(inst->VRegC_23x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::DIV_LONG:
- PREAMBLE();
- DoLongDivide(shadow_frame, inst->VRegA_23x(),
- shadow_frame.GetVRegLong(inst->VRegB_23x()),
- shadow_frame.GetVRegLong(inst->VRegC_23x()));
- POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_2xx);
- break;
- case Instruction::REM_LONG:
- PREAMBLE();
- DoLongRemainder(shadow_frame, inst->VRegA_23x(),
- shadow_frame.GetVRegLong(inst->VRegB_23x()),
- shadow_frame.GetVRegLong(inst->VRegC_23x()));
- POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_2xx);
- break;
- case Instruction::AND_LONG:
- PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_23x(),
- shadow_frame.GetVRegLong(inst->VRegB_23x()) &
- shadow_frame.GetVRegLong(inst->VRegC_23x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::OR_LONG:
- PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_23x(),
- shadow_frame.GetVRegLong(inst->VRegB_23x()) |
- shadow_frame.GetVRegLong(inst->VRegC_23x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::XOR_LONG:
- PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_23x(),
- shadow_frame.GetVRegLong(inst->VRegB_23x()) ^
- shadow_frame.GetVRegLong(inst->VRegC_23x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::SHL_LONG:
- PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_23x(),
- shadow_frame.GetVRegLong(inst->VRegB_23x()) <<
- (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x3f));
- inst = inst->Next_2xx();
- break;
- case Instruction::SHR_LONG:
- PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_23x(),
- shadow_frame.GetVRegLong(inst->VRegB_23x()) >>
- (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x3f));
- inst = inst->Next_2xx();
- break;
- case Instruction::USHR_LONG:
- PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_23x(),
- static_cast<uint64_t>(shadow_frame.GetVRegLong(inst->VRegB_23x())) >>
- (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x3f));
- inst = inst->Next_2xx();
- break;
- case Instruction::ADD_FLOAT:
- PREAMBLE();
- shadow_frame.SetVRegFloat(inst->VRegA_23x(),
- shadow_frame.GetVRegFloat(inst->VRegB_23x()) +
- shadow_frame.GetVRegFloat(inst->VRegC_23x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::SUB_FLOAT:
- PREAMBLE();
- shadow_frame.SetVRegFloat(inst->VRegA_23x(),
- shadow_frame.GetVRegFloat(inst->VRegB_23x()) -
- shadow_frame.GetVRegFloat(inst->VRegC_23x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::MUL_FLOAT:
- PREAMBLE();
- shadow_frame.SetVRegFloat(inst->VRegA_23x(),
- shadow_frame.GetVRegFloat(inst->VRegB_23x()) *
- shadow_frame.GetVRegFloat(inst->VRegC_23x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::DIV_FLOAT:
- PREAMBLE();
- shadow_frame.SetVRegFloat(inst->VRegA_23x(),
- shadow_frame.GetVRegFloat(inst->VRegB_23x()) /
- shadow_frame.GetVRegFloat(inst->VRegC_23x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::REM_FLOAT:
- PREAMBLE();
- shadow_frame.SetVRegFloat(inst->VRegA_23x(),
- fmodf(shadow_frame.GetVRegFloat(inst->VRegB_23x()),
- shadow_frame.GetVRegFloat(inst->VRegC_23x())));
- inst = inst->Next_2xx();
- break;
- case Instruction::ADD_DOUBLE:
- PREAMBLE();
- shadow_frame.SetVRegDouble(inst->VRegA_23x(),
- shadow_frame.GetVRegDouble(inst->VRegB_23x()) +
- shadow_frame.GetVRegDouble(inst->VRegC_23x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::SUB_DOUBLE:
- PREAMBLE();
- shadow_frame.SetVRegDouble(inst->VRegA_23x(),
- shadow_frame.GetVRegDouble(inst->VRegB_23x()) -
- shadow_frame.GetVRegDouble(inst->VRegC_23x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::MUL_DOUBLE:
- PREAMBLE();
- shadow_frame.SetVRegDouble(inst->VRegA_23x(),
- shadow_frame.GetVRegDouble(inst->VRegB_23x()) *
- shadow_frame.GetVRegDouble(inst->VRegC_23x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::DIV_DOUBLE:
- PREAMBLE();
- shadow_frame.SetVRegDouble(inst->VRegA_23x(),
- shadow_frame.GetVRegDouble(inst->VRegB_23x()) /
- shadow_frame.GetVRegDouble(inst->VRegC_23x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::REM_DOUBLE:
- PREAMBLE();
- shadow_frame.SetVRegDouble(inst->VRegA_23x(),
- fmod(shadow_frame.GetVRegDouble(inst->VRegB_23x()),
- shadow_frame.GetVRegDouble(inst->VRegC_23x())));
- inst = inst->Next_2xx();
- break;
- case Instruction::ADD_INT_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVReg(vregA,
- shadow_frame.GetVReg(vregA) +
- shadow_frame.GetVReg(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::SUB_INT_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVReg(vregA,
- shadow_frame.GetVReg(vregA) -
- shadow_frame.GetVReg(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::MUL_INT_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVReg(vregA,
- shadow_frame.GetVReg(vregA) *
- shadow_frame.GetVReg(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::DIV_INT_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- bool success = DoIntDivide(shadow_frame, vregA, shadow_frame.GetVReg(vregA),
- shadow_frame.GetVReg(inst->VRegB_12x()));
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_1xx);
- break;
- }
- case Instruction::REM_INT_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- bool success = DoIntRemainder(shadow_frame, vregA, shadow_frame.GetVReg(vregA),
- shadow_frame.GetVReg(inst->VRegB_12x()));
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_1xx);
- break;
- }
- case Instruction::SHL_INT_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVReg(vregA,
- shadow_frame.GetVReg(vregA) <<
- (shadow_frame.GetVReg(inst->VRegB_12x()) & 0x1f));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::SHR_INT_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVReg(vregA,
- shadow_frame.GetVReg(vregA) >>
- (shadow_frame.GetVReg(inst->VRegB_12x()) & 0x1f));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::USHR_INT_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVReg(vregA,
- static_cast<uint32_t>(shadow_frame.GetVReg(vregA)) >>
- (shadow_frame.GetVReg(inst->VRegB_12x()) & 0x1f));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::AND_INT_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVReg(vregA,
- shadow_frame.GetVReg(vregA) &
- shadow_frame.GetVReg(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::OR_INT_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVReg(vregA,
- shadow_frame.GetVReg(vregA) |
- shadow_frame.GetVReg(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::XOR_INT_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVReg(vregA,
- shadow_frame.GetVReg(vregA) ^
- shadow_frame.GetVReg(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::ADD_LONG_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVRegLong(vregA,
- shadow_frame.GetVRegLong(vregA) +
- shadow_frame.GetVRegLong(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::SUB_LONG_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVRegLong(vregA,
- shadow_frame.GetVRegLong(vregA) -
- shadow_frame.GetVRegLong(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::MUL_LONG_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVRegLong(vregA,
- shadow_frame.GetVRegLong(vregA) *
- shadow_frame.GetVRegLong(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::DIV_LONG_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- DoLongDivide(shadow_frame, vregA, shadow_frame.GetVRegLong(vregA),
- shadow_frame.GetVRegLong(inst->VRegB_12x()));
- POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx);
- break;
- }
- case Instruction::REM_LONG_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- DoLongRemainder(shadow_frame, vregA, shadow_frame.GetVRegLong(vregA),
- shadow_frame.GetVRegLong(inst->VRegB_12x()));
- POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx);
- break;
- }
- case Instruction::AND_LONG_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVRegLong(vregA,
- shadow_frame.GetVRegLong(vregA) &
- shadow_frame.GetVRegLong(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::OR_LONG_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVRegLong(vregA,
- shadow_frame.GetVRegLong(vregA) |
- shadow_frame.GetVRegLong(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::XOR_LONG_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVRegLong(vregA,
- shadow_frame.GetVRegLong(vregA) ^
- shadow_frame.GetVRegLong(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::SHL_LONG_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVRegLong(vregA,
- shadow_frame.GetVRegLong(vregA) <<
- (shadow_frame.GetVReg(inst->VRegB_12x()) & 0x3f));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::SHR_LONG_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVRegLong(vregA,
- shadow_frame.GetVRegLong(vregA) >>
- (shadow_frame.GetVReg(inst->VRegB_12x()) & 0x3f));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::USHR_LONG_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVRegLong(vregA,
- static_cast<uint64_t>(shadow_frame.GetVRegLong(vregA)) >>
- (shadow_frame.GetVReg(inst->VRegB_12x()) & 0x3f));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::ADD_FLOAT_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVRegFloat(vregA,
- shadow_frame.GetVRegFloat(vregA) +
- shadow_frame.GetVRegFloat(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::SUB_FLOAT_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVRegFloat(vregA,
- shadow_frame.GetVRegFloat(vregA) -
- shadow_frame.GetVRegFloat(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::MUL_FLOAT_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVRegFloat(vregA,
- shadow_frame.GetVRegFloat(vregA) *
- shadow_frame.GetVRegFloat(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::DIV_FLOAT_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVRegFloat(vregA,
- shadow_frame.GetVRegFloat(vregA) /
- shadow_frame.GetVRegFloat(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::REM_FLOAT_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVRegFloat(vregA,
- fmodf(shadow_frame.GetVRegFloat(vregA),
- shadow_frame.GetVRegFloat(inst->VRegB_12x())));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::ADD_DOUBLE_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVRegDouble(vregA,
- shadow_frame.GetVRegDouble(vregA) +
- shadow_frame.GetVRegDouble(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::SUB_DOUBLE_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVRegDouble(vregA,
- shadow_frame.GetVRegDouble(vregA) -
- shadow_frame.GetVRegDouble(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::MUL_DOUBLE_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVRegDouble(vregA,
- shadow_frame.GetVRegDouble(vregA) *
- shadow_frame.GetVRegDouble(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::DIV_DOUBLE_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVRegDouble(vregA,
- shadow_frame.GetVRegDouble(vregA) /
- shadow_frame.GetVRegDouble(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::REM_DOUBLE_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVRegDouble(vregA,
- fmod(shadow_frame.GetVRegDouble(vregA),
- shadow_frame.GetVRegDouble(inst->VRegB_12x())));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::ADD_INT_LIT16:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_22s(),
- shadow_frame.GetVReg(inst->VRegB_22s()) +
- inst->VRegC_22s());
- inst = inst->Next_2xx();
- break;
- case Instruction::RSUB_INT:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_22s(),
- inst->VRegC_22s() -
- shadow_frame.GetVReg(inst->VRegB_22s()));
- inst = inst->Next_2xx();
- break;
- case Instruction::MUL_INT_LIT16:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_22s(),
- shadow_frame.GetVReg(inst->VRegB_22s()) *
- inst->VRegC_22s());
- inst = inst->Next_2xx();
- break;
- case Instruction::DIV_INT_LIT16: {
- PREAMBLE();
- bool success = DoIntDivide(shadow_frame, inst->VRegA_22s(),
- shadow_frame.GetVReg(inst->VRegB_22s()), inst->VRegC_22s());
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::REM_INT_LIT16: {
- PREAMBLE();
- bool success = DoIntRemainder(shadow_frame, inst->VRegA_22s(),
- shadow_frame.GetVReg(inst->VRegB_22s()), inst->VRegC_22s());
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::AND_INT_LIT16:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_22s(),
- shadow_frame.GetVReg(inst->VRegB_22s()) &
- inst->VRegC_22s());
- inst = inst->Next_2xx();
- break;
- case Instruction::OR_INT_LIT16:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_22s(),
- shadow_frame.GetVReg(inst->VRegB_22s()) |
- inst->VRegC_22s());
- inst = inst->Next_2xx();
- break;
- case Instruction::XOR_INT_LIT16:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_22s(),
- shadow_frame.GetVReg(inst->VRegB_22s()) ^
- inst->VRegC_22s());
- inst = inst->Next_2xx();
- break;
- case Instruction::ADD_INT_LIT8:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_22b(),
- shadow_frame.GetVReg(inst->VRegB_22b()) +
- inst->VRegC_22b());
- inst = inst->Next_2xx();
- break;
- case Instruction::RSUB_INT_LIT8:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_22b(),
- inst->VRegC_22b() -
- shadow_frame.GetVReg(inst->VRegB_22b()));
- inst = inst->Next_2xx();
- break;
- case Instruction::MUL_INT_LIT8:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_22b(),
- shadow_frame.GetVReg(inst->VRegB_22b()) *
- inst->VRegC_22b());
- inst = inst->Next_2xx();
- break;
- case Instruction::DIV_INT_LIT8: {
- PREAMBLE();
- bool success = DoIntDivide(shadow_frame, inst->VRegA_22b(),
- shadow_frame.GetVReg(inst->VRegB_22b()), inst->VRegC_22b());
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::REM_INT_LIT8: {
- PREAMBLE();
- bool success = DoIntRemainder(shadow_frame, inst->VRegA_22b(),
- shadow_frame.GetVReg(inst->VRegB_22b()), inst->VRegC_22b());
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::AND_INT_LIT8:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_22b(),
- shadow_frame.GetVReg(inst->VRegB_22b()) &
- inst->VRegC_22b());
- inst = inst->Next_2xx();
- break;
- case Instruction::OR_INT_LIT8:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_22b(),
- shadow_frame.GetVReg(inst->VRegB_22b()) |
- inst->VRegC_22b());
- inst = inst->Next_2xx();
- break;
- case Instruction::XOR_INT_LIT8:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_22b(),
- shadow_frame.GetVReg(inst->VRegB_22b()) ^
- inst->VRegC_22b());
- inst = inst->Next_2xx();
- break;
- case Instruction::SHL_INT_LIT8:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_22b(),
- shadow_frame.GetVReg(inst->VRegB_22b()) <<
- (inst->VRegC_22b() & 0x1f));
- inst = inst->Next_2xx();
- break;
- case Instruction::SHR_INT_LIT8:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_22b(),
- shadow_frame.GetVReg(inst->VRegB_22b()) >>
- (inst->VRegC_22b() & 0x1f));
- inst = inst->Next_2xx();
- break;
- case Instruction::USHR_INT_LIT8:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_22b(),
- static_cast<uint32_t>(shadow_frame.GetVReg(inst->VRegB_22b())) >>
- (inst->VRegC_22b() & 0x1f));
- inst = inst->Next_2xx();
- break;
- case Instruction::UNUSED_3E ... Instruction::UNUSED_43:
- case Instruction::UNUSED_EB ... Instruction::UNUSED_FF:
- case Instruction::UNUSED_79:
- case Instruction::UNUSED_7A:
- UnexpectedOpcode(inst, mh);
- }
- }
-} // NOLINT(readability/fn_size)
+static const InterpreterImplKind kInterpreterImplKind = kSwitchImpl;
static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame, JValue result_register)
@@ -3052,12 +281,23 @@
shadow_frame.GetMethod()->GetDeclaringClass()->IsProxyClass());
DCHECK(!shadow_frame.GetMethod()->IsAbstract());
DCHECK(!shadow_frame.GetMethod()->IsNative());
- if (shadow_frame.GetMethod()->IsPreverified()) {
+
+ if (LIKELY(shadow_frame.GetMethod()->IsPreverified())) {
// Enter the "without access check" interpreter.
- return ExecuteImpl<false>(self, mh, code_item, shadow_frame, result_register);
+ if (kInterpreterImplKind == kSwitchImpl) {
+ return ExecuteSwitchImpl<false>(self, mh, code_item, shadow_frame, result_register);
+ } else {
+ DCHECK_EQ(kInterpreterImplKind, kComputedGotoImplKind);
+ return ExecuteGotoImpl<false>(self, mh, code_item, shadow_frame, result_register);
+ }
} else {
// Enter the "with access check" interpreter.
- return ExecuteImpl<true>(self, mh, code_item, shadow_frame, result_register);
+ if (kInterpreterImplKind == kSwitchImpl) {
+ return ExecuteSwitchImpl<true>(self, mh, code_item, shadow_frame, result_register);
+ } else {
+ DCHECK_EQ(kInterpreterImplKind, kComputedGotoImplKind);
+ return ExecuteGotoImpl<true>(self, mh, code_item, shadow_frame, result_register);
+ }
}
}
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
new file mode 100644
index 0000000..86a6aea
--- /dev/null
+++ b/runtime/interpreter/interpreter_common.cc
@@ -0,0 +1,389 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "interpreter_common.h"
+
+namespace art {
+namespace interpreter {
+
+template<InvokeType type, bool is_range, bool do_access_check>
+bool DoInvoke(Thread* self, ShadowFrame& shadow_frame,
+ const Instruction* inst, JValue* result) {
+ uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
+ uint32_t vregC = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
+ Object* receiver = (type == kStatic) ? NULL : shadow_frame.GetVRegReference(vregC);
+ ArtMethod* method = FindMethodFromCode(method_idx, receiver, shadow_frame.GetMethod(), self,
+ do_access_check, type);
+ if (UNLIKELY(method == NULL)) {
+ CHECK(self->IsExceptionPending());
+ result->SetJ(0);
+ return false;
+ } else if (UNLIKELY(method->IsAbstract())) {
+ ThrowAbstractMethodError(method);
+ result->SetJ(0);
+ return false;
+ }
+
+ MethodHelper mh(method);
+ const DexFile::CodeItem* code_item = mh.GetCodeItem();
+ uint16_t num_regs;
+ uint16_t num_ins;
+ if (LIKELY(code_item != NULL)) {
+ num_regs = code_item->registers_size_;
+ num_ins = code_item->ins_size_;
+ } else {
+ DCHECK(method->IsNative() || method->IsProxyMethod());
+ num_regs = num_ins = ArtMethod::NumArgRegisters(mh.GetShorty());
+ if (!method->IsStatic()) {
+ num_regs++;
+ num_ins++;
+ }
+ }
+
+ void* memory = alloca(ShadowFrame::ComputeSize(num_regs));
+ ShadowFrame* new_shadow_frame(ShadowFrame::Create(num_regs, &shadow_frame, method, 0, memory));
+ size_t cur_reg = num_regs - num_ins;
+ if (receiver != NULL) {
+ new_shadow_frame->SetVRegReference(cur_reg, receiver);
+ ++cur_reg;
+ }
+
+ size_t arg_offset = (receiver == NULL) ? 0 : 1;
+ const char* shorty = mh.GetShorty();
+ uint32_t arg[5];
+ if (!is_range) {
+ inst->GetArgs(arg);
+ }
+ for (size_t shorty_pos = 0; cur_reg < num_regs; ++shorty_pos, cur_reg++, arg_offset++) {
+ DCHECK_LT(shorty_pos + 1, mh.GetShortyLength());
+ size_t arg_pos = is_range ? vregC + arg_offset : arg[arg_offset];
+ switch (shorty[shorty_pos + 1]) {
+ case 'L': {
+ Object* o = shadow_frame.GetVRegReference(arg_pos);
+ new_shadow_frame->SetVRegReference(cur_reg, o);
+ break;
+ }
+ case 'J': case 'D': {
+ uint64_t wide_value = (static_cast<uint64_t>(shadow_frame.GetVReg(arg_pos + 1)) << 32) |
+ static_cast<uint32_t>(shadow_frame.GetVReg(arg_pos));
+ new_shadow_frame->SetVRegLong(cur_reg, wide_value);
+ cur_reg++;
+ arg_offset++;
+ break;
+ }
+ default:
+ new_shadow_frame->SetVReg(cur_reg, shadow_frame.GetVReg(arg_pos));
+ break;
+ }
+ }
+
+ if (LIKELY(Runtime::Current()->IsStarted())) {
+ (method->GetEntryPointFromInterpreter())(self, mh, code_item, new_shadow_frame, result);
+ } else {
+ UnstartedRuntimeInvoke(self, mh, code_item, new_shadow_frame, result, num_regs - num_ins);
+ }
+ return !self->IsExceptionPending();
+}
+
+template<bool is_range>
+bool DoInvokeVirtualQuick(Thread* self, ShadowFrame& shadow_frame,
+ const Instruction* inst, JValue* result) {
+ uint32_t vregC = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
+ Object* receiver = shadow_frame.GetVRegReference(vregC);
+ if (UNLIKELY(receiver == NULL)) {
+ // We lost the reference to the method index so we cannot get a more
+ // precised exception message.
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ return false;
+ }
+ uint32_t vtable_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
+ // TODO: use ObjectArray<T>::GetWithoutChecks ?
+ ArtMethod* method = receiver->GetClass()->GetVTable()->Get(vtable_idx);
+ if (UNLIKELY(method == NULL)) {
+ CHECK(self->IsExceptionPending());
+ result->SetJ(0);
+ return false;
+ } else if (UNLIKELY(method->IsAbstract())) {
+ ThrowAbstractMethodError(method);
+ result->SetJ(0);
+ return false;
+ }
+
+ MethodHelper mh(method);
+ const DexFile::CodeItem* code_item = mh.GetCodeItem();
+ uint16_t num_regs;
+ uint16_t num_ins;
+ if (code_item != NULL) {
+ num_regs = code_item->registers_size_;
+ num_ins = code_item->ins_size_;
+ } else {
+ DCHECK(method->IsNative() || method->IsProxyMethod());
+ num_regs = num_ins = ArtMethod::NumArgRegisters(mh.GetShorty());
+ if (!method->IsStatic()) {
+ num_regs++;
+ num_ins++;
+ }
+ }
+
+ void* memory = alloca(ShadowFrame::ComputeSize(num_regs));
+ ShadowFrame* new_shadow_frame(ShadowFrame::Create(num_regs, &shadow_frame,
+ method, 0, memory));
+ size_t cur_reg = num_regs - num_ins;
+ if (receiver != NULL) {
+ new_shadow_frame->SetVRegReference(cur_reg, receiver);
+ ++cur_reg;
+ }
+
+ size_t arg_offset = (receiver == NULL) ? 0 : 1;
+ const char* shorty = mh.GetShorty();
+ uint32_t arg[5];
+ if (!is_range) {
+ inst->GetArgs(arg);
+ }
+ for (size_t shorty_pos = 0; cur_reg < num_regs; ++shorty_pos, cur_reg++, arg_offset++) {
+ DCHECK_LT(shorty_pos + 1, mh.GetShortyLength());
+ size_t arg_pos = is_range ? vregC + arg_offset : arg[arg_offset];
+ switch (shorty[shorty_pos + 1]) {
+ case 'L': {
+ Object* o = shadow_frame.GetVRegReference(arg_pos);
+ new_shadow_frame->SetVRegReference(cur_reg, o);
+ break;
+ }
+ case 'J': case 'D': {
+ uint64_t wide_value = (static_cast<uint64_t>(shadow_frame.GetVReg(arg_pos + 1)) << 32) |
+ static_cast<uint32_t>(shadow_frame.GetVReg(arg_pos));
+ new_shadow_frame->SetVRegLong(cur_reg, wide_value);
+ cur_reg++;
+ arg_offset++;
+ break;
+ }
+ default:
+ new_shadow_frame->SetVReg(cur_reg, shadow_frame.GetVReg(arg_pos));
+ break;
+ }
+ }
+
+ if (LIKELY(Runtime::Current()->IsStarted())) {
+ (method->GetEntryPointFromInterpreter())(self, mh, code_item, new_shadow_frame, result);
+ } else {
+ UnstartedRuntimeInvoke(self, mh, code_item, new_shadow_frame, result, num_regs - num_ins);
+ }
+ return !self->IsExceptionPending();
+}
+
+template <bool is_range, bool do_access_check>
+bool DoFilledNewArray(const Instruction* inst, const ShadowFrame& shadow_frame,
+ Thread* self, JValue* result) {
+ DCHECK(inst->Opcode() == Instruction::FILLED_NEW_ARRAY ||
+ inst->Opcode() == Instruction::FILLED_NEW_ARRAY_RANGE);
+ const int32_t length = is_range ? inst->VRegA_3rc() : inst->VRegA_35c();
+ if (!is_range) {
+ // Checks FILLED_NEW_ARRAY's length does not exceed 5 arguments.
+ CHECK_LE(length, 5);
+ }
+ if (UNLIKELY(length < 0)) {
+ ThrowNegativeArraySizeException(length);
+ return false;
+ }
+ uint16_t type_idx = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
+ Class* arrayClass = ResolveVerifyAndClinit(type_idx, shadow_frame.GetMethod(),
+ self, false, do_access_check);
+ if (UNLIKELY(arrayClass == NULL)) {
+ DCHECK(self->IsExceptionPending());
+ return false;
+ }
+ CHECK(arrayClass->IsArrayClass());
+ Class* componentClass = arrayClass->GetComponentType();
+ if (UNLIKELY(componentClass->IsPrimitive() && !componentClass->IsPrimitiveInt())) {
+ if (componentClass->IsPrimitiveLong() || componentClass->IsPrimitiveDouble()) {
+ ThrowRuntimeException("Bad filled array request for type %s",
+ PrettyDescriptor(componentClass).c_str());
+ } else {
+ self->ThrowNewExceptionF(shadow_frame.GetCurrentLocationForThrow(),
+ "Ljava/lang/InternalError;",
+ "Found type %s; filled-new-array not implemented for anything but \'int\'",
+ PrettyDescriptor(componentClass).c_str());
+ }
+ return false;
+ }
+ Object* newArray = Array::Alloc(self, arrayClass, length);
+ if (UNLIKELY(newArray == NULL)) {
+ DCHECK(self->IsExceptionPending());
+ return false;
+ }
+ if (is_range) {
+ uint32_t vregC = inst->VRegC_3rc();
+ const bool is_primitive_int_component = componentClass->IsPrimitiveInt();
+ for (int32_t i = 0; i < length; ++i) {
+ if (is_primitive_int_component) {
+ newArray->AsIntArray()->Set(i, shadow_frame.GetVReg(vregC + i));
+ } else {
+ newArray->AsObjectArray<Object>()->Set(i, shadow_frame.GetVRegReference(vregC + i));
+ }
+ }
+ } else {
+ uint32_t arg[5];
+ inst->GetArgs(arg);
+ const bool is_primitive_int_component = componentClass->IsPrimitiveInt();
+ for (int32_t i = 0; i < length; ++i) {
+ if (is_primitive_int_component) {
+ newArray->AsIntArray()->Set(i, shadow_frame.GetVReg(arg[i]));
+ } else {
+ newArray->AsObjectArray<Object>()->Set(i, shadow_frame.GetVRegReference(arg[i]));
+ }
+ }
+ }
+
+ result->SetL(newArray);
+ return true;
+}
+
+void UnstartedRuntimeInvoke(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item, ShadowFrame* shadow_frame,
+ JValue* result, size_t arg_offset) {
+ // In a runtime that's not started we intercept certain methods to avoid complicated dependency
+ // problems in core libraries.
+ std::string name(PrettyMethod(shadow_frame->GetMethod()));
+ if (name == "java.lang.Class java.lang.Class.forName(java.lang.String)") {
+ std::string descriptor(DotToDescriptor(shadow_frame->GetVRegReference(arg_offset)->AsString()->ToModifiedUtf8().c_str()));
+ ClassLoader* class_loader = NULL; // shadow_frame.GetMethod()->GetDeclaringClass()->GetClassLoader();
+ Class* found = Runtime::Current()->GetClassLinker()->FindClass(descriptor.c_str(),
+ class_loader);
+ CHECK(found != NULL) << "Class.forName failed in un-started runtime for class: "
+ << PrettyDescriptor(descriptor);
+ result->SetL(found);
+ } else if (name == "java.lang.Object java.lang.Class.newInstance()") {
+ Class* klass = shadow_frame->GetVRegReference(arg_offset)->AsClass();
+ ArtMethod* c = klass->FindDeclaredDirectMethod("<init>", "()V");
+ CHECK(c != NULL);
+ SirtRef<Object> obj(self, klass->AllocObject(self));
+ CHECK(obj.get() != NULL);
+ EnterInterpreterFromInvoke(self, c, obj.get(), NULL, NULL);
+ result->SetL(obj.get());
+ } else if (name == "java.lang.reflect.Field java.lang.Class.getDeclaredField(java.lang.String)") {
+ // Special managed code cut-out to allow field lookup in a un-started runtime that'd fail
+ // going the reflective Dex way.
+ Class* klass = shadow_frame->GetVRegReference(arg_offset)->AsClass();
+ String* name = shadow_frame->GetVRegReference(arg_offset + 1)->AsString();
+ ArtField* found = NULL;
+ FieldHelper fh;
+ ObjectArray<ArtField>* fields = klass->GetIFields();
+ for (int32_t i = 0; i < fields->GetLength() && found == NULL; ++i) {
+ ArtField* f = fields->Get(i);
+ fh.ChangeField(f);
+ if (name->Equals(fh.GetName())) {
+ found = f;
+ }
+ }
+ if (found == NULL) {
+ fields = klass->GetSFields();
+ for (int32_t i = 0; i < fields->GetLength() && found == NULL; ++i) {
+ ArtField* f = fields->Get(i);
+ fh.ChangeField(f);
+ if (name->Equals(fh.GetName())) {
+ found = f;
+ }
+ }
+ }
+ CHECK(found != NULL)
+ << "Failed to find field in Class.getDeclaredField in un-started runtime. name="
+ << name->ToModifiedUtf8() << " class=" << PrettyDescriptor(klass);
+ // TODO: getDeclaredField calls GetType once the field is found to ensure a
+ // NoClassDefFoundError is thrown if the field's type cannot be resolved.
+ Class* jlr_Field = self->DecodeJObject(WellKnownClasses::java_lang_reflect_Field)->AsClass();
+ SirtRef<Object> field(self, jlr_Field->AllocObject(self));
+ CHECK(field.get() != NULL);
+ ArtMethod* c = jlr_Field->FindDeclaredDirectMethod("<init>", "(Ljava/lang/reflect/ArtField;)V");
+ uint32_t args[1];
+ args[0] = reinterpret_cast<uint32_t>(found);
+ EnterInterpreterFromInvoke(self, c, field.get(), args, NULL);
+ result->SetL(field.get());
+ } else if (name == "void java.lang.System.arraycopy(java.lang.Object, int, java.lang.Object, int, int)" ||
+ name == "void java.lang.System.arraycopy(char[], int, char[], int, int)") {
+ // Special case array copying without initializing System.
+ Class* ctype = shadow_frame->GetVRegReference(arg_offset)->GetClass()->GetComponentType();
+ jint srcPos = shadow_frame->GetVReg(arg_offset + 1);
+ jint dstPos = shadow_frame->GetVReg(arg_offset + 3);
+ jint length = shadow_frame->GetVReg(arg_offset + 4);
+ if (!ctype->IsPrimitive()) {
+ ObjectArray<Object>* src = shadow_frame->GetVRegReference(arg_offset)->AsObjectArray<Object>();
+ ObjectArray<Object>* dst = shadow_frame->GetVRegReference(arg_offset + 2)->AsObjectArray<Object>();
+ for (jint i = 0; i < length; ++i) {
+ dst->Set(dstPos + i, src->Get(srcPos + i));
+ }
+ } else if (ctype->IsPrimitiveChar()) {
+ CharArray* src = shadow_frame->GetVRegReference(arg_offset)->AsCharArray();
+ CharArray* dst = shadow_frame->GetVRegReference(arg_offset + 2)->AsCharArray();
+ for (jint i = 0; i < length; ++i) {
+ dst->Set(dstPos + i, src->Get(srcPos + i));
+ }
+ } else if (ctype->IsPrimitiveInt()) {
+ IntArray* src = shadow_frame->GetVRegReference(arg_offset)->AsIntArray();
+ IntArray* dst = shadow_frame->GetVRegReference(arg_offset + 2)->AsIntArray();
+ for (jint i = 0; i < length; ++i) {
+ dst->Set(dstPos + i, src->Get(srcPos + i));
+ }
+ } else {
+ UNIMPLEMENTED(FATAL) << "System.arraycopy of unexpected type: " << PrettyDescriptor(ctype);
+ }
+ } else {
+ // Not special, continue with regular interpreter execution.
+ artInterpreterToInterpreterBridge(self, mh, code_item, shadow_frame, result);
+ }
+}
+
+// Explicit DoInvoke template function declarations.
+#define EXPLICIT_DO_INVOKE_TEMPLATE_DECL(_type, _is_range_, _check) \
+ template bool DoInvoke<_type, _is_range_, _check>(Thread* self, ShadowFrame& shadow_frame, \
+ const Instruction* inst, JValue* result)
+
+#define EXPLICIT_DO_INVOKE_TEMPLATE_DECL_VARIANTS(_type) \
+ EXPLICIT_DO_INVOKE_TEMPLATE_DECL(_type, false, false); \
+ EXPLICIT_DO_INVOKE_TEMPLATE_DECL(_type, false, true); \
+ EXPLICIT_DO_INVOKE_TEMPLATE_DECL(_type, true, false); \
+ EXPLICIT_DO_INVOKE_TEMPLATE_DECL(_type, true, true)
+
+EXPLICIT_DO_INVOKE_TEMPLATE_DECL_VARIANTS(kStatic);
+EXPLICIT_DO_INVOKE_TEMPLATE_DECL_VARIANTS(kDirect);
+EXPLICIT_DO_INVOKE_TEMPLATE_DECL_VARIANTS(kVirtual);
+EXPLICIT_DO_INVOKE_TEMPLATE_DECL_VARIANTS(kSuper);
+EXPLICIT_DO_INVOKE_TEMPLATE_DECL_VARIANTS(kInterface);
+
+#undef EXPLICIT_DO_INVOKE_TEMPLATE_DECL_VARIANTS
+#undef EXPLICIT_DO_INVOKE_TEMPLATE_DECL
+
+// Explicit DoInvokeVirtualQuick template function declarations.
+#define EXPLICIT_DO_INVOKE_VIRTUAL_QUICK_TEMPLATE_DECL(_is_range) \
+template bool DoInvokeVirtualQuick<_is_range>(Thread* self, ShadowFrame& shadow_frame, \
+ const Instruction* inst, JValue* result)
+
+EXPLICIT_DO_INVOKE_VIRTUAL_QUICK_TEMPLATE_DECL(false);
+EXPLICIT_DO_INVOKE_VIRTUAL_QUICK_TEMPLATE_DECL(true);
+#undef EXPLICIT_DO_INVOKE_VIRTUAL_QUICK_TEMPLATE_DECL
+
+// Explicit DoFilledNewArray template function declarations.
+#define EXPLICIT_DO_FILLED_NEW_ARRAY_TEMPLATE_DECL(_is_range_, _check) \
+ template bool DoFilledNewArray<_is_range_, _check>(const Instruction* inst, \
+ const ShadowFrame& shadow_frame, \
+ Thread* self, JValue* result)
+EXPLICIT_DO_FILLED_NEW_ARRAY_TEMPLATE_DECL(false, false);
+EXPLICIT_DO_FILLED_NEW_ARRAY_TEMPLATE_DECL(false, true);
+EXPLICIT_DO_FILLED_NEW_ARRAY_TEMPLATE_DECL(true, false);
+EXPLICIT_DO_FILLED_NEW_ARRAY_TEMPLATE_DECL(true, true);
+#undef EXPLICIT_DO_FILLED_NEW_ARRAY_TEMPLATE_DECL
+
+} // namespace interpreter
+} // namespace art
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
new file mode 100644
index 0000000..ec1f942
--- /dev/null
+++ b/runtime/interpreter/interpreter_common.h
@@ -0,0 +1,516 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_INTERPRETER_INTERPRETER_COMMON_H_
+#define ART_RUNTIME_INTERPRETER_INTERPRETER_COMMON_H_
+
+#include "interpreter.h"
+
+#include <math.h>
+
+#include "base/logging.h"
+#include "class_linker-inl.h"
+#include "common_throws.h"
+#include "dex_file-inl.h"
+#include "dex_instruction-inl.h"
+#include "dex_instruction.h"
+#include "entrypoints/entrypoint_utils.h"
+#include "gc/accounting/card_table-inl.h"
+#include "invoke_arg_array_builder.h"
+#include "nth_caller_visitor.h"
+#include "mirror/art_field-inl.h"
+#include "mirror/art_method.h"
+#include "mirror/art_method-inl.h"
+#include "mirror/class.h"
+#include "mirror/class-inl.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array-inl.h"
+#include "object_utils.h"
+#include "ScopedLocalRef.h"
+#include "scoped_thread_state_change.h"
+#include "thread.h"
+#include "well_known_classes.h"
+
+using ::art::mirror::ArtField;
+using ::art::mirror::ArtMethod;
+using ::art::mirror::Array;
+using ::art::mirror::BooleanArray;
+using ::art::mirror::ByteArray;
+using ::art::mirror::CharArray;
+using ::art::mirror::Class;
+using ::art::mirror::ClassLoader;
+using ::art::mirror::IntArray;
+using ::art::mirror::LongArray;
+using ::art::mirror::Object;
+using ::art::mirror::ObjectArray;
+using ::art::mirror::ShortArray;
+using ::art::mirror::String;
+using ::art::mirror::Throwable;
+
+namespace art {
+namespace interpreter {
+
+// External references to both interpreter implementations.
+
+// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
+// specialization.
+template<bool do_access_check>
+extern JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame& shadow_frame, JValue result_register)
+ NO_THREAD_SAFETY_ANALYSIS __attribute__((hot));
+
+// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
+// specialization.
+template<bool do_access_check>
+extern JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame& shadow_frame, JValue result_register)
+ NO_THREAD_SAFETY_ANALYSIS __attribute__((hot));
+
+// Common part of both implementations.
+static const int32_t kMaxInt = std::numeric_limits<int32_t>::max();
+static const int32_t kMinInt = std::numeric_limits<int32_t>::min();
+static const int64_t kMaxLong = std::numeric_limits<int64_t>::max();
+static const int64_t kMinLong = std::numeric_limits<int64_t>::min();
+
+void UnstartedRuntimeInvoke(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item, ShadowFrame* shadow_frame,
+ JValue* result, size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+static inline void DoMonitorEnter(Thread* self, Object* ref) NO_THREAD_SAFETY_ANALYSIS {
+ ref->MonitorEnter(self);
+}
+
+static inline void DoMonitorExit(Thread* self, Object* ref) NO_THREAD_SAFETY_ANALYSIS {
+ ref->MonitorExit(self);
+}
+
+// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
+// specialization.
+template<InvokeType type, bool is_range, bool do_access_check>
+bool DoInvoke(Thread* self, ShadowFrame& shadow_frame,
+ const Instruction* inst, JValue* result) NO_THREAD_SAFETY_ANALYSIS;
+
+// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
+// specialization.
+template<bool is_range>
+bool DoInvokeVirtualQuick(Thread* self, ShadowFrame& shadow_frame,
+ const Instruction* inst, JValue* result)
+ NO_THREAD_SAFETY_ANALYSIS;
+
+// We use template functions to optimize compiler inlining process. Otherwise,
+// some parts of the code (like a switch statement) which depend on a constant
+// parameter would not be inlined while it should be. These constant parameters
+// are now part of the template arguments.
+// Note these template functions are static and inlined so they should not be
+// part of the final object file.
+// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
+// specialization.
+template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check>
+static bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame,
+ const Instruction* inst, uint16_t inst_data)
+ NO_THREAD_SAFETY_ANALYSIS ALWAYS_INLINE;
+
+template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check>
+static inline bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame,
+ const Instruction* inst, uint16_t inst_data) {
+ bool is_static = (find_type == StaticObjectRead) || (find_type == StaticPrimitiveRead);
+ uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
+ ArtField* f = FindFieldFromCode(field_idx, shadow_frame.GetMethod(), self,
+ find_type, Primitive::FieldSize(field_type),
+ do_access_check);
+ if (UNLIKELY(f == NULL)) {
+ CHECK(self->IsExceptionPending());
+ return false;
+ }
+ Object* obj;
+ if (is_static) {
+ obj = f->GetDeclaringClass();
+ } else {
+ obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
+ if (UNLIKELY(obj == NULL)) {
+ ThrowNullPointerExceptionForFieldAccess(shadow_frame.GetCurrentLocationForThrow(), f, true);
+ return false;
+ }
+ }
+ uint32_t vregA = is_static ? inst->VRegA_21c(inst_data) : inst->VRegA_22c(inst_data);
+ switch (field_type) {
+ case Primitive::kPrimBoolean:
+ shadow_frame.SetVReg(vregA, f->GetBoolean(obj));
+ break;
+ case Primitive::kPrimByte:
+ shadow_frame.SetVReg(vregA, f->GetByte(obj));
+ break;
+ case Primitive::kPrimChar:
+ shadow_frame.SetVReg(vregA, f->GetChar(obj));
+ break;
+ case Primitive::kPrimShort:
+ shadow_frame.SetVReg(vregA, f->GetShort(obj));
+ break;
+ case Primitive::kPrimInt:
+ shadow_frame.SetVReg(vregA, f->GetInt(obj));
+ break;
+ case Primitive::kPrimLong:
+ shadow_frame.SetVRegLong(vregA, f->GetLong(obj));
+ break;
+ case Primitive::kPrimNot:
+ shadow_frame.SetVRegReference(vregA, f->GetObject(obj));
+ break;
+ default:
+ LOG(FATAL) << "Unreachable: " << field_type;
+ }
+ return true;
+}
+
+// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
+// specialization.
+template<Primitive::Type field_type>
+static bool DoIGetQuick(ShadowFrame& shadow_frame, const Instruction* inst, uint16_t inst_data)
+ NO_THREAD_SAFETY_ANALYSIS ALWAYS_INLINE;
+
+template<Primitive::Type field_type>
+static inline bool DoIGetQuick(ShadowFrame& shadow_frame, const Instruction* inst, uint16_t inst_data) {
+ Object* obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
+ if (UNLIKELY(obj == NULL)) {
+ // We lost the reference to the field index so we cannot get a more
+ // precised exception message.
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ return false;
+ }
+ MemberOffset field_offset(inst->VRegC_22c());
+ const bool is_volatile = false; // iget-x-quick only on non volatile fields.
+ const uint32_t vregA = inst->VRegA_22c(inst_data);
+ switch (field_type) {
+ case Primitive::kPrimInt:
+ shadow_frame.SetVReg(vregA, static_cast<int32_t>(obj->GetField32(field_offset, is_volatile)));
+ break;
+ case Primitive::kPrimLong:
+ shadow_frame.SetVRegLong(vregA, static_cast<int64_t>(obj->GetField64(field_offset, is_volatile)));
+ break;
+ case Primitive::kPrimNot:
+ shadow_frame.SetVRegReference(vregA, obj->GetFieldObject<mirror::Object*>(field_offset, is_volatile));
+ break;
+ default:
+ LOG(FATAL) << "Unreachable: " << field_type;
+ }
+ return true;
+}
+
+// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
+// specialization.
+template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check>
+static bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame,
+ const Instruction* inst, uint16_t inst_data)
+ NO_THREAD_SAFETY_ANALYSIS ALWAYS_INLINE;
+
+template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check>
+static inline bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame,
+ const Instruction* inst, uint16_t inst_data) {
+ bool is_static = (find_type == StaticObjectWrite) || (find_type == StaticPrimitiveWrite);
+ uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
+ ArtField* f = FindFieldFromCode(field_idx, shadow_frame.GetMethod(), self,
+ find_type, Primitive::FieldSize(field_type),
+ do_access_check);
+ if (UNLIKELY(f == NULL)) {
+ CHECK(self->IsExceptionPending());
+ return false;
+ }
+ Object* obj;
+ if (is_static) {
+ obj = f->GetDeclaringClass();
+ } else {
+ obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
+ if (UNLIKELY(obj == NULL)) {
+ ThrowNullPointerExceptionForFieldAccess(shadow_frame.GetCurrentLocationForThrow(),
+ f, false);
+ return false;
+ }
+ }
+ uint32_t vregA = is_static ? inst->VRegA_21c(inst_data) : inst->VRegA_22c(inst_data);
+ switch (field_type) {
+ case Primitive::kPrimBoolean:
+ f->SetBoolean(obj, shadow_frame.GetVReg(vregA));
+ break;
+ case Primitive::kPrimByte:
+ f->SetByte(obj, shadow_frame.GetVReg(vregA));
+ break;
+ case Primitive::kPrimChar:
+ f->SetChar(obj, shadow_frame.GetVReg(vregA));
+ break;
+ case Primitive::kPrimShort:
+ f->SetShort(obj, shadow_frame.GetVReg(vregA));
+ break;
+ case Primitive::kPrimInt:
+ f->SetInt(obj, shadow_frame.GetVReg(vregA));
+ break;
+ case Primitive::kPrimLong:
+ f->SetLong(obj, shadow_frame.GetVRegLong(vregA));
+ break;
+ case Primitive::kPrimNot:
+ f->SetObj(obj, shadow_frame.GetVRegReference(vregA));
+ break;
+ default:
+ LOG(FATAL) << "Unreachable: " << field_type;
+ }
+ return true;
+}
+
+// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
+// specialization.
+template<Primitive::Type field_type>
+static bool DoIPutQuick(ShadowFrame& shadow_frame, const Instruction* inst, uint16_t inst_data)
+ NO_THREAD_SAFETY_ANALYSIS ALWAYS_INLINE;
+
+template<Primitive::Type field_type>
+static inline bool DoIPutQuick(ShadowFrame& shadow_frame, const Instruction* inst, uint16_t inst_data) {
+ Object* obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
+ if (UNLIKELY(obj == NULL)) {
+ // We lost the reference to the field index so we cannot get a more
+ // precised exception message.
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ return false;
+ }
+ MemberOffset field_offset(inst->VRegC_22c());
+ const bool is_volatile = false; // iput-x-quick only on non volatile fields.
+ const uint32_t vregA = inst->VRegA_22c(inst_data);
+ switch (field_type) {
+ case Primitive::kPrimInt:
+ obj->SetField32(field_offset, shadow_frame.GetVReg(vregA), is_volatile);
+ break;
+ case Primitive::kPrimLong:
+ obj->SetField64(field_offset, shadow_frame.GetVRegLong(vregA), is_volatile);
+ break;
+ case Primitive::kPrimNot:
+ obj->SetFieldObject(field_offset, shadow_frame.GetVRegReference(vregA), is_volatile);
+ break;
+ default:
+ LOG(FATAL) << "Unreachable: " << field_type;
+ }
+ return true;
+}
+
+static inline String* ResolveString(Thread* self, MethodHelper& mh, uint32_t string_idx)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Class* java_lang_string_class = String::GetJavaLangString();
+ if (UNLIKELY(!java_lang_string_class->IsInitialized())) {
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ if (UNLIKELY(!class_linker->EnsureInitialized(java_lang_string_class,
+ true, true))) {
+ DCHECK(self->IsExceptionPending());
+ return NULL;
+ }
+ }
+ return mh.ResolveString(string_idx);
+}
+
+static inline bool DoIntDivide(ShadowFrame& shadow_frame, size_t result_reg,
+ int32_t dividend, int32_t divisor)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (UNLIKELY(divisor == 0)) {
+ ThrowArithmeticExceptionDivideByZero();
+ return false;
+ }
+ if (UNLIKELY(dividend == kMinInt && divisor == -1)) {
+ shadow_frame.SetVReg(result_reg, kMinInt);
+ } else {
+ shadow_frame.SetVReg(result_reg, dividend / divisor);
+ }
+ return true;
+}
+
+static inline bool DoIntRemainder(ShadowFrame& shadow_frame, size_t result_reg,
+ int32_t dividend, int32_t divisor)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (UNLIKELY(divisor == 0)) {
+ ThrowArithmeticExceptionDivideByZero();
+ return false;
+ }
+ if (UNLIKELY(dividend == kMinInt && divisor == -1)) {
+ shadow_frame.SetVReg(result_reg, 0);
+ } else {
+ shadow_frame.SetVReg(result_reg, dividend % divisor);
+ }
+ return true;
+}
+
+static inline bool DoLongDivide(ShadowFrame& shadow_frame, size_t result_reg,
+ int64_t dividend, int64_t divisor)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (UNLIKELY(divisor == 0)) {
+ ThrowArithmeticExceptionDivideByZero();
+ return false;
+ }
+ if (UNLIKELY(dividend == kMinLong && divisor == -1)) {
+ shadow_frame.SetVRegLong(result_reg, kMinLong);
+ } else {
+ shadow_frame.SetVRegLong(result_reg, dividend / divisor);
+ }
+ return true;
+}
+
+static inline bool DoLongRemainder(ShadowFrame& shadow_frame, size_t result_reg,
+ int64_t dividend, int64_t divisor)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (UNLIKELY(divisor == 0)) {
+ ThrowArithmeticExceptionDivideByZero();
+ return false;
+ }
+ if (UNLIKELY(dividend == kMinLong && divisor == -1)) {
+ shadow_frame.SetVRegLong(result_reg, 0);
+ } else {
+ shadow_frame.SetVRegLong(result_reg, dividend % divisor);
+ }
+ return true;
+}
+
+// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
+// specialization.
+// Returns true on success, otherwise throws an exception and returns false.
+template <bool is_range, bool do_access_check>
+bool DoFilledNewArray(const Instruction* inst, const ShadowFrame& shadow_frame,
+ Thread* self, JValue* result) NO_THREAD_SAFETY_ANALYSIS;
+
+static inline int32_t DoPackedSwitch(const Instruction* inst, const ShadowFrame& shadow_frame,
+ uint16_t inst_data)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK(inst->Opcode() == Instruction::PACKED_SWITCH);
+ const uint16_t* switch_data = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t();
+ int32_t test_val = shadow_frame.GetVReg(inst->VRegA_31t(inst_data));
+ DCHECK_EQ(switch_data[0], static_cast<uint16_t>(Instruction::kPackedSwitchSignature));
+ uint16_t size = switch_data[1];
+ DCHECK_GT(size, 0);
+ const int32_t* keys = reinterpret_cast<const int32_t*>(&switch_data[2]);
+ DCHECK(IsAligned<4>(keys));
+ int32_t first_key = keys[0];
+ const int32_t* targets = reinterpret_cast<const int32_t*>(&switch_data[4]);
+ DCHECK(IsAligned<4>(targets));
+ int32_t index = test_val - first_key;
+ if (index >= 0 && index < size) {
+ return targets[index];
+ } else {
+ // No corresponding value: move forward by 3 (size of PACKED_SWITCH).
+ return 3;
+ }
+}
+
+static inline int32_t DoSparseSwitch(const Instruction* inst, const ShadowFrame& shadow_frame,
+ uint16_t inst_data)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK(inst->Opcode() == Instruction::SPARSE_SWITCH);
+ const uint16_t* switch_data = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t();
+ int32_t test_val = shadow_frame.GetVReg(inst->VRegA_31t(inst_data));
+ DCHECK_EQ(switch_data[0], static_cast<uint16_t>(Instruction::kSparseSwitchSignature));
+ uint16_t size = switch_data[1];
+ DCHECK_GT(size, 0);
+ const int32_t* keys = reinterpret_cast<const int32_t*>(&switch_data[2]);
+ DCHECK(IsAligned<4>(keys));
+ const int32_t* entries = keys + size;
+ DCHECK(IsAligned<4>(entries));
+ int lo = 0;
+ int hi = size - 1;
+ while (lo <= hi) {
+ int mid = (lo + hi) / 2;
+ int32_t foundVal = keys[mid];
+ if (test_val < foundVal) {
+ hi = mid - 1;
+ } else if (test_val > foundVal) {
+ lo = mid + 1;
+ } else {
+ return entries[mid];
+ }
+ }
+ // No corresponding value: move forward by 3 (size of SPARSE_SWITCH).
+ return 3;
+}
+
+static inline uint32_t FindNextInstructionFollowingException(Thread* self,
+ ShadowFrame& shadow_frame,
+ uint32_t dex_pc,
+ mirror::Object* this_object,
+ const instrumentation::Instrumentation* instrumentation)
+ ALWAYS_INLINE;
+
+static inline uint32_t FindNextInstructionFollowingException(Thread* self,
+ ShadowFrame& shadow_frame,
+ uint32_t dex_pc,
+ mirror::Object* this_object,
+ const instrumentation::Instrumentation* instrumentation)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ self->VerifyStack();
+ ThrowLocation throw_location;
+ mirror::Throwable* exception = self->GetException(&throw_location);
+ bool clear_exception = false;
+ uint32_t found_dex_pc = shadow_frame.GetMethod()->FindCatchBlock(exception->GetClass(), dex_pc,
+ &clear_exception);
+ if (found_dex_pc == DexFile::kDexNoIndex) {
+ instrumentation->MethodUnwindEvent(self, this_object,
+ shadow_frame.GetMethod(), dex_pc);
+ } else {
+ instrumentation->ExceptionCaughtEvent(self, throw_location,
+ shadow_frame.GetMethod(),
+ found_dex_pc, exception);
+ if (clear_exception) {
+ self->ClearException();
+ }
+ }
+ return found_dex_pc;
+}
+
+static void UnexpectedOpcode(const Instruction* inst, MethodHelper& mh)
+ __attribute__((cold, noreturn, noinline));
+
+static void UnexpectedOpcode(const Instruction* inst, MethodHelper& mh)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ LOG(FATAL) << "Unexpected instruction: " << inst->DumpString(&mh.GetDexFile());
+ exit(0); // Unreachable, keep GCC happy.
+}
+
+static inline void TraceExecution(const ShadowFrame& shadow_frame, const Instruction* inst,
+ const uint32_t dex_pc, MethodHelper& mh) {
+ const bool kTracing = false;
+ if (kTracing) {
+#define TRACE_LOG std::cerr
+ TRACE_LOG << PrettyMethod(shadow_frame.GetMethod())
+ << StringPrintf("\n0x%x: ", dex_pc)
+ << inst->DumpString(&mh.GetDexFile()) << "\n";
+ for (size_t i = 0; i < shadow_frame.NumberOfVRegs(); ++i) {
+ uint32_t raw_value = shadow_frame.GetVReg(i);
+ Object* ref_value = shadow_frame.GetVRegReference(i);
+ TRACE_LOG << StringPrintf(" vreg%d=0x%08X", i, raw_value);
+ if (ref_value != NULL) {
+ if (ref_value->GetClass()->IsStringClass() &&
+ ref_value->AsString()->GetCharArray() != NULL) {
+ TRACE_LOG << "/java.lang.String \"" << ref_value->AsString()->ToModifiedUtf8() << "\"";
+ } else {
+ TRACE_LOG << "/" << PrettyTypeOf(ref_value);
+ }
+ }
+ }
+ TRACE_LOG << "\n";
+#undef TRACE_LOG
+ }
+}
+
+static inline bool IsBackwardBranch(int32_t branch_offset) {
+ return branch_offset <= 0;
+}
+
+} // namespace interpreter
+} // namespace art
+
+#endif // ART_RUNTIME_INTERPRETER_INTERPRETER_COMMON_H_
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
new file mode 100644
index 0000000..b55c2c2
--- /dev/null
+++ b/runtime/interpreter/interpreter_goto_table_impl.cc
@@ -0,0 +1,2357 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "interpreter_common.h"
+
+namespace art {
+namespace interpreter {
+
+// In the following macros, we expect the following local variables exist:
+// - "self": the current Thread*.
+// - "inst" : the current Instruction*.
+// - "inst_data" : the current instruction's first 16 bits.
+// - "dex_pc": the current pc.
+// - "shadow_frame": the current shadow frame.
+// - "mh": the current MethodHelper.
+// - "currentHandlersTable": the current table of pointer to each instruction handler.
+
+// Advance to the next instruction and updates interpreter state.
+// TODO: move check suspend to backward branch, return and exception handling.
+#define ADVANCE(_offset) \
+ do { \
+ int32_t disp = static_cast<int32_t>(_offset); \
+ inst = inst->RelativeAt(disp); \
+ dex_pc = static_cast<uint32_t>(static_cast<int32_t>(dex_pc) + disp); \
+ shadow_frame.SetDexPC(dex_pc); \
+ TraceExecution(shadow_frame, inst, dex_pc, mh); \
+ inst_data = inst->Fetch16(0); \
+ goto *currentHandlersTable[inst->Opcode(inst_data)]; \
+ } while (false)
+
+#define HANDLE_PENDING_EXCEPTION() goto exception_pending_label
+
+#define POSSIBLY_HANDLE_PENDING_EXCEPTION(_is_exception_pending, _offset) \
+ do { \
+ if (UNLIKELY(_is_exception_pending)) { \
+ HANDLE_PENDING_EXCEPTION(); \
+ } else { \
+ ADVANCE(_offset); \
+ } \
+ } while (false)
+
+#define UPDATE_HANDLER_TABLE() \
+ do { \
+ if (UNLIKELY(instrumentation->HasDexPcListeners())) { \
+ currentHandlersTable = instrumentationHandlersTable; \
+ } else { \
+ currentHandlersTable = handlersTable; \
+ } \
+ } while (false);
+
+#define UNREACHABLE_CODE_CHECK() \
+ do { \
+ if (kIsDebugBuild) { \
+ LOG(FATAL) << "We should not be here !"; \
+ } \
+ } while (false)
+
+#define HANDLE_INSTRUCTION_START(opcode) op_##opcode: // NOLINT(whitespace/labels)
+#define HANDLE_INSTRUCTION_END() UNREACHABLE_CODE_CHECK()
+
+template<bool do_access_check>
+JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem* code_item,
+ ShadowFrame& shadow_frame, JValue result_register) {
+ if (UNLIKELY(!shadow_frame.HasReferenceArray())) {
+ LOG(FATAL) << "Invalid shadow frame for interpreter use";
+ return JValue();
+ }
+ self->VerifyStack();
+
+ uint32_t dex_pc = shadow_frame.GetDexPC();
+ const instrumentation::Instrumentation* const instrumentation = Runtime::Current()->GetInstrumentation();
+ if (LIKELY(dex_pc == 0)) { // We are entering the method as opposed to deoptimizing..
+ if (UNLIKELY(instrumentation->HasMethodEntryListeners())) {
+ instrumentation->MethodEnterEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
+ shadow_frame.GetMethod(), 0);
+ }
+ }
+ const Instruction* inst = Instruction::At(code_item->insns_ + dex_pc);
+ uint16_t inst_data;
+
+ // Define handlers table.
+ static const void* handlersTable[kNumPackedOpcodes] = {
+#define INSTRUCTION_HANDLER(o, code, n, f, r, i, a, v) &&op_##code,
+#include "dex_instruction_list.h"
+ DEX_INSTRUCTION_LIST(INSTRUCTION_HANDLER)
+#undef DEX_INSTRUCTION_LIST
+#undef INSTRUCTION_HANDLER
+ };
+
+ static const void* instrumentationHandlersTable[kNumPackedOpcodes] = {
+#define INSTRUCTION_HANDLER(o, code, n, f, r, i, a, v) &&instrumentation_op_##code,
+#include "dex_instruction_list.h"
+ DEX_INSTRUCTION_LIST(INSTRUCTION_HANDLER)
+#undef DEX_INSTRUCTION_LIST
+#undef INSTRUCTION_HANDLER
+ };
+
+ const void** currentHandlersTable;
+ UPDATE_HANDLER_TABLE();
+
+ // Jump to first instruction.
+ ADVANCE(0);
+ UNREACHABLE_CODE_CHECK();
+
+ HANDLE_INSTRUCTION_START(NOP)
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MOVE)
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MOVE_FROM16)
+ shadow_frame.SetVReg(inst->VRegA_22x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MOVE_16)
+ shadow_frame.SetVReg(inst->VRegA_32x(),
+ shadow_frame.GetVReg(inst->VRegB_32x()));
+ ADVANCE(3);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MOVE_WIDE)
+ shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MOVE_WIDE_FROM16)
+ shadow_frame.SetVRegLong(inst->VRegA_22x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_22x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MOVE_WIDE_16)
+ shadow_frame.SetVRegLong(inst->VRegA_32x(),
+ shadow_frame.GetVRegLong(inst->VRegB_32x()));
+ ADVANCE(3);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MOVE_OBJECT)
+ shadow_frame.SetVRegReference(inst->VRegA_12x(inst_data),
+ shadow_frame.GetVRegReference(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MOVE_OBJECT_FROM16)
+ shadow_frame.SetVRegReference(inst->VRegA_22x(inst_data),
+ shadow_frame.GetVRegReference(inst->VRegB_22x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MOVE_OBJECT_16)
+ shadow_frame.SetVRegReference(inst->VRegA_32x(),
+ shadow_frame.GetVRegReference(inst->VRegB_32x()));
+ ADVANCE(3);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MOVE_RESULT)
+ shadow_frame.SetVReg(inst->VRegA_11x(inst_data), result_register.GetI());
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MOVE_RESULT_WIDE)
+ shadow_frame.SetVRegLong(inst->VRegA_11x(inst_data), result_register.GetJ());
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MOVE_RESULT_OBJECT)
+ shadow_frame.SetVRegReference(inst->VRegA_11x(inst_data), result_register.GetL());
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MOVE_EXCEPTION) {
+ Throwable* exception = self->GetException(NULL);
+ self->ClearException();
+ shadow_frame.SetVRegReference(inst->VRegA_11x(inst_data), exception);
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(RETURN_VOID) {
+ JValue result;
+ if (do_access_check) {
+ // If access checks are required then the dex-to-dex compiler and analysis of
+ // whether the class has final fields hasn't been performed. Conservatively
+ // perform the memory barrier now.
+ ANDROID_MEMBAR_STORE();
+ }
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
+ instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
+ shadow_frame.GetMethod(), dex_pc,
+ result);
+ }
+ return result;
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(RETURN_VOID_BARRIER) {
+ ANDROID_MEMBAR_STORE();
+ JValue result;
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
+ instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
+ shadow_frame.GetMethod(), dex_pc,
+ result);
+ }
+ return result;
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(RETURN) {
+ JValue result;
+ result.SetJ(0);
+ result.SetI(shadow_frame.GetVReg(inst->VRegA_11x(inst_data)));
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
+ instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
+ shadow_frame.GetMethod(), dex_pc,
+ result);
+ }
+ return result;
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(RETURN_WIDE) {
+ JValue result;
+ result.SetJ(shadow_frame.GetVRegLong(inst->VRegA_11x(inst_data)));
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
+ instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
+ shadow_frame.GetMethod(), dex_pc,
+ result);
+ }
+ return result;
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(RETURN_OBJECT) {
+ JValue result;
+ result.SetJ(0);
+ result.SetL(shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data)));
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
+ instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
+ shadow_frame.GetMethod(), dex_pc,
+ result);
+ }
+ return result;
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(CONST_4) {
+ uint32_t dst = inst->VRegA_11n(inst_data);
+ int32_t val = inst->VRegB_11n(inst_data);
+ shadow_frame.SetVReg(dst, val);
+ if (val == 0) {
+ shadow_frame.SetVRegReference(dst, NULL);
+ }
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(CONST_16) {
+ uint32_t dst = inst->VRegA_21s(inst_data);
+ int32_t val = inst->VRegB_21s();
+ shadow_frame.SetVReg(dst, val);
+ if (val == 0) {
+ shadow_frame.SetVRegReference(dst, NULL);
+ }
+ ADVANCE(2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(CONST) {
+ uint32_t dst = inst->VRegA_31i(inst_data);
+ int32_t val = inst->VRegB_31i();
+ shadow_frame.SetVReg(dst, val);
+ if (val == 0) {
+ shadow_frame.SetVRegReference(dst, NULL);
+ }
+ ADVANCE(3);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(CONST_HIGH16) {
+ uint32_t dst = inst->VRegA_21h(inst_data);
+ int32_t val = static_cast<int32_t>(inst->VRegB_21h() << 16);
+ shadow_frame.SetVReg(dst, val);
+ if (val == 0) {
+ shadow_frame.SetVRegReference(dst, NULL);
+ }
+ ADVANCE(2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(CONST_WIDE_16)
+ shadow_frame.SetVRegLong(inst->VRegA_21s(inst_data), inst->VRegB_21s());
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(CONST_WIDE_32)
+ shadow_frame.SetVRegLong(inst->VRegA_31i(inst_data), inst->VRegB_31i());
+ ADVANCE(3);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(CONST_WIDE)
+ shadow_frame.SetVRegLong(inst->VRegA_51l(inst_data), inst->VRegB_51l());
+ ADVANCE(5);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(CONST_WIDE_HIGH16)
+ shadow_frame.SetVRegLong(inst->VRegA_21h(inst_data),
+ static_cast<uint64_t>(inst->VRegB_21h()) << 48);
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(CONST_STRING) {
+ String* s = ResolveString(self, mh, inst->VRegB_21c());
+ if (UNLIKELY(s == NULL)) {
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), s);
+ ADVANCE(2);
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(CONST_STRING_JUMBO) {
+ String* s = ResolveString(self, mh, inst->VRegB_31c());
+ if (UNLIKELY(s == NULL)) {
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ shadow_frame.SetVRegReference(inst->VRegA_31c(inst_data), s);
+ ADVANCE(3);
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(CONST_CLASS) {
+ Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame.GetMethod(),
+ self, false, do_access_check);
+ if (UNLIKELY(c == NULL)) {
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), c);
+ ADVANCE(2);
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MONITOR_ENTER) {
+ Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
+ if (UNLIKELY(obj == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ DoMonitorEnter(self, obj);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), 1);
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MONITOR_EXIT) {
+ Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
+ if (UNLIKELY(obj == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ DoMonitorExit(self, obj);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), 1);
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(CHECK_CAST) {
+ Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame.GetMethod(),
+ self, false, do_access_check);
+ if (UNLIKELY(c == NULL)) {
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ Object* obj = shadow_frame.GetVRegReference(inst->VRegA_21c(inst_data));
+ if (UNLIKELY(obj != NULL && !obj->InstanceOf(c))) {
+ ThrowClassCastException(c, obj->GetClass());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ ADVANCE(2);
+ }
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(INSTANCE_OF) {
+ Class* c = ResolveVerifyAndClinit(inst->VRegC_22c(), shadow_frame.GetMethod(),
+ self, false, do_access_check);
+ if (UNLIKELY(c == NULL)) {
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ Object* obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
+ shadow_frame.SetVReg(inst->VRegA_22c(inst_data), (obj != NULL && obj->InstanceOf(c)) ? 1 : 0);
+ ADVANCE(2);
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(ARRAY_LENGTH) {
+ Object* array = shadow_frame.GetVRegReference(inst->VRegB_12x(inst_data));
+ if (UNLIKELY(array == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data), array->AsArray()->GetLength());
+ ADVANCE(1);
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(NEW_INSTANCE) {
+ Object* obj = AllocObjectFromCode(inst->VRegB_21c(), shadow_frame.GetMethod(),
+ self, do_access_check);
+ if (UNLIKELY(obj == NULL)) {
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), obj);
+ ADVANCE(2);
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(NEW_ARRAY) {
+ int32_t length = shadow_frame.GetVReg(inst->VRegB_22c(inst_data));
+ Object* obj = AllocArrayFromCode(inst->VRegC_22c(), shadow_frame.GetMethod(),
+ length, self, do_access_check);
+ if (UNLIKELY(obj == NULL)) {
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ shadow_frame.SetVRegReference(inst->VRegA_22c(inst_data), obj);
+ ADVANCE(2);
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(FILLED_NEW_ARRAY) {
+ bool success = DoFilledNewArray<false, do_access_check>(inst, shadow_frame,
+ self, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(FILLED_NEW_ARRAY_RANGE) {
+ bool success = DoFilledNewArray<true, do_access_check>(inst, shadow_frame,
+ self, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(FILL_ARRAY_DATA) {
+ Object* obj = shadow_frame.GetVRegReference(inst->VRegA_31t(inst_data));
+ if (UNLIKELY(obj == NULL)) {
+ ThrowNullPointerException(NULL, "null array in FILL_ARRAY_DATA");
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ Array* array = obj->AsArray();
+ DCHECK(array->IsArrayInstance() && !array->IsObjectArray());
+ const uint16_t* payload_addr = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t();
+ const Instruction::ArrayDataPayload* payload =
+ reinterpret_cast<const Instruction::ArrayDataPayload*>(payload_addr);
+ if (UNLIKELY(static_cast<int32_t>(payload->element_count) > array->GetLength())) {
+ self->ThrowNewExceptionF(shadow_frame.GetCurrentLocationForThrow(),
+ "Ljava/lang/ArrayIndexOutOfBoundsException;",
+ "failed FILL_ARRAY_DATA; length=%d, index=%d",
+ array->GetLength(), payload->element_count);
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ uint32_t size_in_bytes = payload->element_count * payload->element_width;
+ memcpy(array->GetRawData(payload->element_width), payload->data, size_in_bytes);
+ ADVANCE(3);
+ }
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(THROW) {
+ Object* exception = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
+ if (UNLIKELY(exception == NULL)) {
+ ThrowNullPointerException(NULL, "throw with null exception");
+ } else {
+ self->SetException(shadow_frame.GetCurrentLocationForThrow(), exception->AsThrowable());
+ }
+ HANDLE_PENDING_EXCEPTION();
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(GOTO) {
+ int8_t offset = inst->VRegA_10t(inst_data);
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ UPDATE_HANDLER_TABLE();
+ }
+ ADVANCE(offset);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(GOTO_16) {
+ int16_t offset = inst->VRegA_20t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ UPDATE_HANDLER_TABLE();
+ }
+ ADVANCE(offset);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(GOTO_32) {
+ int32_t offset = inst->VRegA_30t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ UPDATE_HANDLER_TABLE();
+ }
+ ADVANCE(offset);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(PACKED_SWITCH) {
+ int32_t offset = DoPackedSwitch(inst, shadow_frame, inst_data);
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ UPDATE_HANDLER_TABLE();
+ }
+ ADVANCE(offset);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SPARSE_SWITCH) {
+ int32_t offset = DoSparseSwitch(inst, shadow_frame, inst_data);
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ UPDATE_HANDLER_TABLE();
+ }
+ ADVANCE(offset);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(CMPL_FLOAT) {
+ float val1 = shadow_frame.GetVRegFloat(inst->VRegB_23x());
+ float val2 = shadow_frame.GetVRegFloat(inst->VRegC_23x());
+ int32_t result;
+ if (val1 > val2) {
+ result = 1;
+ } else if (val1 == val2) {
+ result = 0;
+ } else {
+ result = -1;
+ }
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data), result);
+ ADVANCE(2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(CMPG_FLOAT) {
+ float val1 = shadow_frame.GetVRegFloat(inst->VRegB_23x());
+ float val2 = shadow_frame.GetVRegFloat(inst->VRegC_23x());
+ int32_t result;
+ if (val1 < val2) {
+ result = -1;
+ } else if (val1 == val2) {
+ result = 0;
+ } else {
+ result = 1;
+ }
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data), result);
+ ADVANCE(2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(CMPL_DOUBLE) {
+ double val1 = shadow_frame.GetVRegDouble(inst->VRegB_23x());
+ double val2 = shadow_frame.GetVRegDouble(inst->VRegC_23x());
+ int32_t result;
+ if (val1 > val2) {
+ result = 1;
+ } else if (val1 == val2) {
+ result = 0;
+ } else {
+ result = -1;
+ }
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data), result);
+ ADVANCE(2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(CMPG_DOUBLE) {
+ double val1 = shadow_frame.GetVRegDouble(inst->VRegB_23x());
+ double val2 = shadow_frame.GetVRegDouble(inst->VRegC_23x());
+ int32_t result;
+ if (val1 < val2) {
+ result = -1;
+ } else if (val1 == val2) {
+ result = 0;
+ } else {
+ result = 1;
+ }
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data), result);
+ ADVANCE(2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(CMP_LONG) {
+ int64_t val1 = shadow_frame.GetVRegLong(inst->VRegB_23x());
+ int64_t val2 = shadow_frame.GetVRegLong(inst->VRegC_23x());
+ int32_t result;
+ if (val1 > val2) {
+ result = 1;
+ } else if (val1 == val2) {
+ result = 0;
+ } else {
+ result = -1;
+ }
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data), result);
+ ADVANCE(2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IF_EQ) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) == shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ int16_t offset = inst->VRegC_22t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ UPDATE_HANDLER_TABLE();
+ }
+ ADVANCE(offset);
+ } else {
+ ADVANCE(2);
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IF_NE) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) != shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ int16_t offset = inst->VRegC_22t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ UPDATE_HANDLER_TABLE();
+ }
+ ADVANCE(offset);
+ } else {
+ ADVANCE(2);
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IF_LT) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) < shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ int16_t offset = inst->VRegC_22t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ UPDATE_HANDLER_TABLE();
+ }
+ ADVANCE(offset);
+ } else {
+ ADVANCE(2);
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IF_GE) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) >= shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ int16_t offset = inst->VRegC_22t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ UPDATE_HANDLER_TABLE();
+ }
+ ADVANCE(offset);
+ } else {
+ ADVANCE(2);
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IF_GT) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) > shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ int16_t offset = inst->VRegC_22t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ UPDATE_HANDLER_TABLE();
+ }
+ ADVANCE(offset);
+ } else {
+ ADVANCE(2);
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IF_LE) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) <= shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ int16_t offset = inst->VRegC_22t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ UPDATE_HANDLER_TABLE();
+ }
+ ADVANCE(offset);
+ } else {
+ ADVANCE(2);
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IF_EQZ) {
+ if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) == 0) {
+ int16_t offset = inst->VRegB_21t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ UPDATE_HANDLER_TABLE();
+ }
+ ADVANCE(offset);
+ } else {
+ ADVANCE(2);
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IF_NEZ) {
+ if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) != 0) {
+ int16_t offset = inst->VRegB_21t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ UPDATE_HANDLER_TABLE();
+ }
+ ADVANCE(offset);
+ } else {
+ ADVANCE(2);
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IF_LTZ) {
+ if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) < 0) {
+ int16_t offset = inst->VRegB_21t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ UPDATE_HANDLER_TABLE();
+ }
+ ADVANCE(offset);
+ } else {
+ ADVANCE(2);
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IF_GEZ) {
+ if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) >= 0) {
+ int16_t offset = inst->VRegB_21t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ UPDATE_HANDLER_TABLE();
+ }
+ ADVANCE(offset);
+ } else {
+ ADVANCE(2);
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IF_GTZ) {
+ if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) > 0) {
+ int16_t offset = inst->VRegB_21t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ UPDATE_HANDLER_TABLE();
+ }
+ ADVANCE(offset);
+ } else {
+ ADVANCE(2);
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IF_LEZ) {
+ if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) <= 0) {
+ int16_t offset = inst->VRegB_21t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ UPDATE_HANDLER_TABLE();
+ }
+ ADVANCE(offset);
+ } else {
+ ADVANCE(2);
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(AGET_BOOLEAN) {
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ BooleanArray* array = a->AsBooleanArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetData()[index]);
+ ADVANCE(2);
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(AGET_BYTE) {
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ ByteArray* array = a->AsByteArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetData()[index]);
+ ADVANCE(2);
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(AGET_CHAR) {
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ CharArray* array = a->AsCharArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetData()[index]);
+ ADVANCE(2);
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(AGET_SHORT) {
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ ShortArray* array = a->AsShortArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetData()[index]);
+ ADVANCE(2);
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(AGET) {
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ IntArray* array = a->AsIntArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetData()[index]);
+ ADVANCE(2);
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(AGET_WIDE) {
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ LongArray* array = a->AsLongArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data), array->GetData()[index]);
+ ADVANCE(2);
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(AGET_OBJECT) {
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ ObjectArray<Object>* array = a->AsObjectArray<Object>();
+ if (LIKELY(array->IsValidIndex(index))) {
+ shadow_frame.SetVRegReference(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
+ ADVANCE(2);
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(APUT_BOOLEAN) {
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ uint8_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ BooleanArray* array = a->AsBooleanArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ array->GetData()[index] = val;
+ ADVANCE(2);
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(APUT_BYTE) {
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ int8_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ ByteArray* array = a->AsByteArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ array->GetData()[index] = val;
+ ADVANCE(2);
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(APUT_CHAR) {
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ uint16_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ CharArray* array = a->AsCharArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ array->GetData()[index] = val;
+ ADVANCE(2);
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(APUT_SHORT) {
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ int16_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ ShortArray* array = a->AsShortArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ array->GetData()[index] = val;
+ ADVANCE(2);
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(APUT) {
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ int32_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ IntArray* array = a->AsIntArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ array->GetData()[index] = val;
+ ADVANCE(2);
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(APUT_WIDE) {
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ int64_t val = shadow_frame.GetVRegLong(inst->VRegA_23x(inst_data));
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ LongArray* array = a->AsLongArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ array->GetData()[index] = val;
+ ADVANCE(2);
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(APUT_OBJECT) {
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ Object* val = shadow_frame.GetVRegReference(inst->VRegA_23x(inst_data));
+ ObjectArray<Object>* array = a->AsObjectArray<Object>();
+ if (LIKELY(array->IsValidIndex(index) && array->CheckAssignable(val))) {
+ array->SetWithoutChecks(index, val);
+ ADVANCE(2);
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IGET_BOOLEAN) {
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IGET_BYTE) {
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IGET_CHAR) {
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IGET_SHORT) {
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IGET) {
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IGET_WIDE) {
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IGET_OBJECT) {
+ bool success = DoFieldGet<InstanceObjectRead, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IGET_QUICK) {
+ bool success = DoIGetQuick<Primitive::kPrimInt>(shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IGET_WIDE_QUICK) {
+ bool success = DoIGetQuick<Primitive::kPrimLong>(shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IGET_OBJECT_QUICK) {
+ bool success = DoIGetQuick<Primitive::kPrimNot>(shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SGET_BOOLEAN) {
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SGET_BYTE) {
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SGET_CHAR) {
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SGET_SHORT) {
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SGET) {
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SGET_WIDE) {
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SGET_OBJECT) {
+ bool success = DoFieldGet<StaticObjectRead, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IPUT_BOOLEAN) {
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IPUT_BYTE) {
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IPUT_CHAR) {
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IPUT_SHORT) {
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IPUT) {
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IPUT_WIDE) {
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IPUT_OBJECT) {
+ bool success = DoFieldPut<InstanceObjectWrite, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IPUT_QUICK) {
+ bool success = DoIPutQuick<Primitive::kPrimInt>(shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IPUT_WIDE_QUICK) {
+ bool success = DoIPutQuick<Primitive::kPrimLong>(shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IPUT_OBJECT_QUICK) {
+ bool success = DoIPutQuick<Primitive::kPrimNot>(shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SPUT_BOOLEAN) {
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SPUT_BYTE) {
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SPUT_CHAR) {
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SPUT_SHORT) {
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SPUT) {
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SPUT_WIDE) {
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SPUT_OBJECT) {
+ bool success = DoFieldPut<StaticObjectWrite, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(INVOKE_VIRTUAL) {
+ bool success = DoInvoke<kVirtual, false, do_access_check>(self, shadow_frame, inst, &result_register);
+ UPDATE_HANDLER_TABLE();
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(INVOKE_VIRTUAL_RANGE) {
+ bool success = DoInvoke<kVirtual, true, do_access_check>(self, shadow_frame, inst, &result_register);
+ UPDATE_HANDLER_TABLE();
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(INVOKE_SUPER) {
+ bool success = DoInvoke<kSuper, false, do_access_check>(self, shadow_frame, inst, &result_register);
+ UPDATE_HANDLER_TABLE();
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(INVOKE_SUPER_RANGE) {
+ bool success = DoInvoke<kSuper, true, do_access_check>(self, shadow_frame, inst, &result_register);
+ UPDATE_HANDLER_TABLE();
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(INVOKE_DIRECT) {
+ bool success = DoInvoke<kDirect, false, do_access_check>(self, shadow_frame, inst, &result_register);
+ UPDATE_HANDLER_TABLE();
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(INVOKE_DIRECT_RANGE) {
+ bool success = DoInvoke<kDirect, true, do_access_check>(self, shadow_frame, inst, &result_register);
+ UPDATE_HANDLER_TABLE();
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(INVOKE_INTERFACE) {
+ bool success = DoInvoke<kInterface, false, do_access_check>(self, shadow_frame, inst, &result_register);
+ UPDATE_HANDLER_TABLE();
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(INVOKE_INTERFACE_RANGE) {
+ bool success = DoInvoke<kInterface, true, do_access_check>(self, shadow_frame, inst, &result_register);
+ UPDATE_HANDLER_TABLE();
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(INVOKE_STATIC) {
+ bool success = DoInvoke<kStatic, false, do_access_check>(self, shadow_frame, inst, &result_register);
+ UPDATE_HANDLER_TABLE();
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(INVOKE_STATIC_RANGE) {
+ bool success = DoInvoke<kStatic, true, do_access_check>(self, shadow_frame, inst, &result_register);
+ UPDATE_HANDLER_TABLE();
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(INVOKE_VIRTUAL_QUICK) {
+ bool success = DoInvokeVirtualQuick<false>(self, shadow_frame, inst, &result_register);
+ UPDATE_HANDLER_TABLE();
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(INVOKE_VIRTUAL_RANGE_QUICK) {
+ bool success = DoInvokeVirtualQuick<true>(self, shadow_frame, inst, &result_register);
+ UPDATE_HANDLER_TABLE();
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(NEG_INT)
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data), -shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(NOT_INT)
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data), ~shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(NEG_LONG)
+ shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), -shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(NOT_LONG)
+ shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), ~shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(NEG_FLOAT)
+ shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data), -shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(NEG_DOUBLE)
+ shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data), -shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(INT_TO_LONG)
+ shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(INT_TO_FLOAT)
+ shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data), shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(INT_TO_DOUBLE)
+ shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data), shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(LONG_TO_INT)
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data), shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(LONG_TO_FLOAT)
+ shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data), shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(LONG_TO_DOUBLE)
+ shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data), shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(FLOAT_TO_INT) {
+ float val = shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data));
+ int32_t result;
+ if (val != val) {
+ result = 0;
+ } else if (val > static_cast<float>(kMaxInt)) {
+ result = kMaxInt;
+ } else if (val < static_cast<float>(kMinInt)) {
+ result = kMinInt;
+ } else {
+ result = val;
+ }
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data), result);
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(FLOAT_TO_LONG) {
+ float val = shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data));
+ int64_t result;
+ if (val != val) {
+ result = 0;
+ } else if (val > static_cast<float>(kMaxLong)) {
+ result = kMaxLong;
+ } else if (val < static_cast<float>(kMinLong)) {
+ result = kMinLong;
+ } else {
+ result = val;
+ }
+ shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), result);
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(FLOAT_TO_DOUBLE)
+ shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data), shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(DOUBLE_TO_INT) {
+ double val = shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data));
+ int32_t result;
+ if (val != val) {
+ result = 0;
+ } else if (val > static_cast<double>(kMaxInt)) {
+ result = kMaxInt;
+ } else if (val < static_cast<double>(kMinInt)) {
+ result = kMinInt;
+ } else {
+ result = val;
+ }
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data), result);
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(DOUBLE_TO_LONG) {
+ double val = shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data));
+ int64_t result;
+ if (val != val) {
+ result = 0;
+ } else if (val > static_cast<double>(kMaxLong)) {
+ result = kMaxLong;
+ } else if (val < static_cast<double>(kMinLong)) {
+ result = kMinLong;
+ } else {
+ result = val;
+ }
+ shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), result);
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(DOUBLE_TO_FLOAT)
+ shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data), shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(INT_TO_BYTE)
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data),
+ static_cast<int8_t>(shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(INT_TO_CHAR)
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data),
+ static_cast<uint16_t>(shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(INT_TO_SHORT)
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data),
+ static_cast<int16_t>(shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(ADD_INT)
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_23x()) +
+ shadow_frame.GetVReg(inst->VRegC_23x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SUB_INT)
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_23x()) -
+ shadow_frame.GetVReg(inst->VRegC_23x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MUL_INT)
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_23x()) *
+ shadow_frame.GetVReg(inst->VRegC_23x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(DIV_INT) {
+ bool success = DoIntDivide(shadow_frame, inst->VRegA_23x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_23x()),
+ shadow_frame.GetVReg(inst->VRegC_23x()));
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(REM_INT) {
+ bool success = DoIntRemainder(shadow_frame, inst->VRegA_23x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_23x()),
+ shadow_frame.GetVReg(inst->VRegC_23x()));
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SHL_INT)
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_23x()) <<
+ (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x1f));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SHR_INT)
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_23x()) >>
+ (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x1f));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(USHR_INT)
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+ static_cast<uint32_t>(shadow_frame.GetVReg(inst->VRegB_23x())) >>
+ (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x1f));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(AND_INT)
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_23x()) &
+ shadow_frame.GetVReg(inst->VRegC_23x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(OR_INT)
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_23x()) |
+ shadow_frame.GetVReg(inst->VRegC_23x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(XOR_INT)
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_23x()) ^
+ shadow_frame.GetVReg(inst->VRegC_23x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(ADD_LONG)
+ shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_23x()) +
+ shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SUB_LONG)
+ shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_23x()) -
+ shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MUL_LONG)
+ shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_23x()) *
+ shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(DIV_LONG) {
+ bool success = DoLongDivide(shadow_frame, inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_23x()),
+ shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(REM_LONG) {
+ bool success = DoLongRemainder(shadow_frame, inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_23x()),
+ shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(AND_LONG)
+ shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_23x()) &
+ shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(OR_LONG)
+ shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_23x()) |
+ shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(XOR_LONG)
+ shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_23x()) ^
+ shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SHL_LONG)
+ shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_23x()) <<
+ (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x3f));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SHR_LONG)
+ shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_23x()) >>
+ (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x3f));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(USHR_LONG)
+ shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+ static_cast<uint64_t>(shadow_frame.GetVRegLong(inst->VRegB_23x())) >>
+ (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x3f));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(ADD_FLOAT)
+ shadow_frame.SetVRegFloat(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegFloat(inst->VRegB_23x()) +
+ shadow_frame.GetVRegFloat(inst->VRegC_23x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SUB_FLOAT)
+ shadow_frame.SetVRegFloat(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegFloat(inst->VRegB_23x()) -
+ shadow_frame.GetVRegFloat(inst->VRegC_23x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MUL_FLOAT)
+ shadow_frame.SetVRegFloat(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegFloat(inst->VRegB_23x()) *
+ shadow_frame.GetVRegFloat(inst->VRegC_23x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(DIV_FLOAT)
+ shadow_frame.SetVRegFloat(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegFloat(inst->VRegB_23x()) /
+ shadow_frame.GetVRegFloat(inst->VRegC_23x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(REM_FLOAT)
+ shadow_frame.SetVRegFloat(inst->VRegA_23x(inst_data),
+ fmodf(shadow_frame.GetVRegFloat(inst->VRegB_23x()),
+ shadow_frame.GetVRegFloat(inst->VRegC_23x())));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(ADD_DOUBLE)
+ shadow_frame.SetVRegDouble(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegDouble(inst->VRegB_23x()) +
+ shadow_frame.GetVRegDouble(inst->VRegC_23x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SUB_DOUBLE)
+ shadow_frame.SetVRegDouble(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegDouble(inst->VRegB_23x()) -
+ shadow_frame.GetVRegDouble(inst->VRegC_23x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MUL_DOUBLE)
+ shadow_frame.SetVRegDouble(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegDouble(inst->VRegB_23x()) *
+ shadow_frame.GetVRegDouble(inst->VRegC_23x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(DIV_DOUBLE)
+ shadow_frame.SetVRegDouble(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegDouble(inst->VRegB_23x()) /
+ shadow_frame.GetVRegDouble(inst->VRegC_23x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(REM_DOUBLE)
+ shadow_frame.SetVRegDouble(inst->VRegA_23x(inst_data),
+ fmod(shadow_frame.GetVRegDouble(inst->VRegB_23x()),
+ shadow_frame.GetVRegDouble(inst->VRegC_23x())));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(ADD_INT_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVReg(vregA,
+ shadow_frame.GetVReg(vregA) +
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SUB_INT_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVReg(vregA,
+ shadow_frame.GetVReg(vregA) -
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MUL_INT_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVReg(vregA,
+ shadow_frame.GetVReg(vregA) *
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(DIV_INT_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ bool success = DoIntDivide(shadow_frame, vregA, shadow_frame.GetVReg(vregA),
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(REM_INT_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ bool success = DoIntRemainder(shadow_frame, vregA, shadow_frame.GetVReg(vregA),
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SHL_INT_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVReg(vregA,
+ shadow_frame.GetVReg(vregA) <<
+ (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x1f));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SHR_INT_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVReg(vregA,
+ shadow_frame.GetVReg(vregA) >>
+ (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x1f));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(USHR_INT_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVReg(vregA,
+ static_cast<uint32_t>(shadow_frame.GetVReg(vregA)) >>
+ (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x1f));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(AND_INT_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVReg(vregA,
+ shadow_frame.GetVReg(vregA) &
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(OR_INT_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVReg(vregA,
+ shadow_frame.GetVReg(vregA) |
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(XOR_INT_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVReg(vregA,
+ shadow_frame.GetVReg(vregA) ^
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(ADD_LONG_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegLong(vregA,
+ shadow_frame.GetVRegLong(vregA) +
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SUB_LONG_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegLong(vregA,
+ shadow_frame.GetVRegLong(vregA) -
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MUL_LONG_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegLong(vregA,
+ shadow_frame.GetVRegLong(vregA) *
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(DIV_LONG_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ bool success = DoLongDivide(shadow_frame, vregA, shadow_frame.GetVRegLong(vregA),
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(REM_LONG_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ bool success = DoLongRemainder(shadow_frame, vregA, shadow_frame.GetVRegLong(vregA),
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(AND_LONG_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegLong(vregA,
+ shadow_frame.GetVRegLong(vregA) &
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(OR_LONG_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegLong(vregA,
+ shadow_frame.GetVRegLong(vregA) |
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(XOR_LONG_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegLong(vregA,
+ shadow_frame.GetVRegLong(vregA) ^
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SHL_LONG_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegLong(vregA,
+ shadow_frame.GetVRegLong(vregA) <<
+ (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x3f));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SHR_LONG_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegLong(vregA,
+ shadow_frame.GetVRegLong(vregA) >>
+ (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x3f));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(USHR_LONG_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegLong(vregA,
+ static_cast<uint64_t>(shadow_frame.GetVRegLong(vregA)) >>
+ (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x3f));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(ADD_FLOAT_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegFloat(vregA,
+ shadow_frame.GetVRegFloat(vregA) +
+ shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SUB_FLOAT_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegFloat(vregA,
+ shadow_frame.GetVRegFloat(vregA) -
+ shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MUL_FLOAT_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegFloat(vregA,
+ shadow_frame.GetVRegFloat(vregA) *
+ shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(DIV_FLOAT_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegFloat(vregA,
+ shadow_frame.GetVRegFloat(vregA) /
+ shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(REM_FLOAT_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegFloat(vregA,
+ fmodf(shadow_frame.GetVRegFloat(vregA),
+ shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data))));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(ADD_DOUBLE_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegDouble(vregA,
+ shadow_frame.GetVRegDouble(vregA) +
+ shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SUB_DOUBLE_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegDouble(vregA,
+ shadow_frame.GetVRegDouble(vregA) -
+ shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MUL_DOUBLE_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegDouble(vregA,
+ shadow_frame.GetVRegDouble(vregA) *
+ shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(DIV_DOUBLE_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegDouble(vregA,
+ shadow_frame.GetVRegDouble(vregA) /
+ shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(REM_DOUBLE_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegDouble(vregA,
+ fmod(shadow_frame.GetVRegDouble(vregA),
+ shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data))));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(ADD_INT_LIT16)
+ shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22s(inst_data)) +
+ inst->VRegC_22s());
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(RSUB_INT)
+ shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
+ inst->VRegC_22s() -
+ shadow_frame.GetVReg(inst->VRegB_22s(inst_data)));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MUL_INT_LIT16)
+ shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22s(inst_data)) *
+ inst->VRegC_22s());
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(DIV_INT_LIT16) {
+ bool success = DoIntDivide(shadow_frame, inst->VRegA_22s(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22s(inst_data)), inst->VRegC_22s());
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(REM_INT_LIT16) {
+ bool success = DoIntRemainder(shadow_frame, inst->VRegA_22s(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22s(inst_data)), inst->VRegC_22s());
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(AND_INT_LIT16)
+ shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22s(inst_data)) &
+ inst->VRegC_22s());
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(OR_INT_LIT16)
+ shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22s(inst_data)) |
+ inst->VRegC_22s());
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(XOR_INT_LIT16)
+ shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22s(inst_data)) ^
+ inst->VRegC_22s());
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(ADD_INT_LIT8)
+ shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22b()) +
+ inst->VRegC_22b());
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(RSUB_INT_LIT8)
+ shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+ inst->VRegC_22b() -
+ shadow_frame.GetVReg(inst->VRegB_22b()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MUL_INT_LIT8)
+ shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22b()) *
+ inst->VRegC_22b());
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(DIV_INT_LIT8) {
+ bool success = DoIntDivide(shadow_frame, inst->VRegA_22b(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22b()), inst->VRegC_22b());
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(REM_INT_LIT8) {
+ bool success = DoIntRemainder(shadow_frame, inst->VRegA_22b(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22b()), inst->VRegC_22b());
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(AND_INT_LIT8)
+ shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22b()) &
+ inst->VRegC_22b());
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(OR_INT_LIT8)
+ shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22b()) |
+ inst->VRegC_22b());
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(XOR_INT_LIT8)
+ shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22b()) ^
+ inst->VRegC_22b());
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SHL_INT_LIT8)
+ shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22b()) <<
+ (inst->VRegC_22b() & 0x1f));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SHR_INT_LIT8)
+ shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22b()) >>
+ (inst->VRegC_22b() & 0x1f));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(USHR_INT_LIT8)
+ shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+ static_cast<uint32_t>(shadow_frame.GetVReg(inst->VRegB_22b())) >>
+ (inst->VRegC_22b() & 0x1f));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_3E)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_3F)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_40)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_41)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_42)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_43)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_79)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_7A)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_EB)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_EC)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_ED)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_EE)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_EF)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_F0)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_F1)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_F2)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_F3)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_F4)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_F5)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_F6)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_F7)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_F8)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_F9)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_FA)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_FB)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_FC)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_FD)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_FE)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_FF)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ exception_pending_label: {
+ CHECK(self->IsExceptionPending());
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ Object* this_object = shadow_frame.GetThisObject(code_item->ins_size_);
+ uint32_t found_dex_pc = FindNextInstructionFollowingException(self, shadow_frame, dex_pc,
+ this_object,
+ instrumentation);
+ if (found_dex_pc == DexFile::kDexNoIndex) {
+ return JValue(); /* Handled in caller. */
+ } else {
+ int32_t displacement = static_cast<int32_t>(found_dex_pc) - static_cast<int32_t>(dex_pc);
+ ADVANCE(displacement);
+ }
+ }
+
+ // Create alternative instruction handlers dedicated to instrumentation.
+#define INSTRUMENTATION_INSTRUCTION_HANDLER(o, code, n, f, r, i, a, v) \
+ instrumentation_op_##code: { \
+ instrumentation->DexPcMovedEvent(self, shadow_frame.GetThisObject(code_item->ins_size_), \
+ shadow_frame.GetMethod(), dex_pc); \
+ goto *handlersTable[Instruction::code]; \
+ }
+#include "dex_instruction_list.h"
+ DEX_INSTRUCTION_LIST(INSTRUMENTATION_INSTRUCTION_HANDLER)
+#undef DEX_INSTRUCTION_LIST
+#undef INSTRUMENTATION_INSTRUCTION_HANDLER
+} // NOLINT(readability/fn_size)
+
+// Explicit definitions of ExecuteGotoImpl.
+template JValue ExecuteGotoImpl<true>(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame& shadow_frame, JValue result_register);
+template JValue ExecuteGotoImpl<false>(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame& shadow_frame, JValue result_register);
+
+} // namespace interpreter
+} // namespace art
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
new file mode 100644
index 0000000..b2e480f
--- /dev/null
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -0,0 +1,2149 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "interpreter_common.h"
+
+namespace art {
+namespace interpreter {
+
+#define HANDLE_PENDING_EXCEPTION() \
+ do { \
+ CHECK(self->IsExceptionPending()); \
+ if (UNLIKELY(self->TestAllFlags())) { \
+ CheckSuspend(self); \
+ } \
+ Object* this_object = shadow_frame.GetThisObject(code_item->ins_size_); \
+ uint32_t found_dex_pc = FindNextInstructionFollowingException(self, shadow_frame, \
+ inst->GetDexPc(insns), \
+ this_object, \
+ instrumentation); \
+ if (found_dex_pc == DexFile::kDexNoIndex) { \
+ return JValue(); /* Handled in caller. */ \
+ } else { \
+ int32_t displacement = static_cast<int32_t>(found_dex_pc) - static_cast<int32_t>(dex_pc); \
+ inst = inst->RelativeAt(displacement); \
+ } \
+ } while (false)
+
+#define POSSIBLY_HANDLE_PENDING_EXCEPTION(_is_exception_pending, _next_function) \
+ do { \
+ if (UNLIKELY(_is_exception_pending)) { \
+ HANDLE_PENDING_EXCEPTION(); \
+ } else { \
+ inst = inst->_next_function(); \
+ } \
+ } while (false)
+
+// Code to run before each dex instruction.
+#define PREAMBLE()
+
+template<bool do_access_check>
+static JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem* code_item,
+ ShadowFrame& shadow_frame, JValue result_register) {
+ if (UNLIKELY(!shadow_frame.HasReferenceArray())) {
+ LOG(FATAL) << "Invalid shadow frame for interpreter use";
+ return JValue();
+ }
+ self->VerifyStack();
+
+ uint32_t dex_pc = shadow_frame.GetDexPC();
+ const instrumentation::Instrumentation* const instrumentation = Runtime::Current()->GetInstrumentation();
+ if (LIKELY(dex_pc == 0)) { // We are entering the method as opposed to deoptimizing..
+ if (UNLIKELY(instrumentation->HasMethodEntryListeners())) {
+ instrumentation->MethodEnterEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
+ shadow_frame.GetMethod(), 0);
+ }
+ }
+ const uint16_t* const insns = code_item->insns_;
+ const Instruction* inst = Instruction::At(insns + dex_pc);
+ uint16_t inst_data;
+ while (true) {
+ dex_pc = inst->GetDexPc(insns);
+ shadow_frame.SetDexPC(dex_pc);
+ if (UNLIKELY(instrumentation->HasDexPcListeners())) {
+ instrumentation->DexPcMovedEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
+ shadow_frame.GetMethod(), dex_pc);
+ }
+ TraceExecution(shadow_frame, inst, dex_pc, mh);
+ inst_data = inst->Fetch16(0);
+ switch (inst->Opcode(inst_data)) {
+ case Instruction::NOP:
+ PREAMBLE();
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::MOVE:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::MOVE_FROM16:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_22x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::MOVE_16:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_32x(),
+ shadow_frame.GetVReg(inst->VRegB_32x()));
+ inst = inst->Next_3xx();
+ break;
+ case Instruction::MOVE_WIDE:
+ PREAMBLE();
+ shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::MOVE_WIDE_FROM16:
+ PREAMBLE();
+ shadow_frame.SetVRegLong(inst->VRegA_22x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_22x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::MOVE_WIDE_16:
+ PREAMBLE();
+ shadow_frame.SetVRegLong(inst->VRegA_32x(),
+ shadow_frame.GetVRegLong(inst->VRegB_32x()));
+ inst = inst->Next_3xx();
+ break;
+ case Instruction::MOVE_OBJECT:
+ PREAMBLE();
+ shadow_frame.SetVRegReference(inst->VRegA_12x(inst_data),
+ shadow_frame.GetVRegReference(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::MOVE_OBJECT_FROM16:
+ PREAMBLE();
+ shadow_frame.SetVRegReference(inst->VRegA_22x(inst_data),
+ shadow_frame.GetVRegReference(inst->VRegB_22x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::MOVE_OBJECT_16:
+ PREAMBLE();
+ shadow_frame.SetVRegReference(inst->VRegA_32x(),
+ shadow_frame.GetVRegReference(inst->VRegB_32x()));
+ inst = inst->Next_3xx();
+ break;
+ case Instruction::MOVE_RESULT:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_11x(inst_data), result_register.GetI());
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::MOVE_RESULT_WIDE:
+ PREAMBLE();
+ shadow_frame.SetVRegLong(inst->VRegA_11x(inst_data), result_register.GetJ());
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::MOVE_RESULT_OBJECT:
+ PREAMBLE();
+ shadow_frame.SetVRegReference(inst->VRegA_11x(inst_data), result_register.GetL());
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::MOVE_EXCEPTION: {
+ PREAMBLE();
+ Throwable* exception = self->GetException(NULL);
+ self->ClearException();
+ shadow_frame.SetVRegReference(inst->VRegA_11x(inst_data), exception);
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::RETURN_VOID: {
+ PREAMBLE();
+ JValue result;
+ if (do_access_check) {
+ // If access checks are required then the dex-to-dex compiler and analysis of
+ // whether the class has final fields hasn't been performed. Conservatively
+ // perform the memory barrier now.
+ ANDROID_MEMBAR_STORE();
+ }
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
+ instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
+ shadow_frame.GetMethod(), inst->GetDexPc(insns),
+ result);
+ }
+ return result;
+ }
+ case Instruction::RETURN_VOID_BARRIER: {
+ PREAMBLE();
+ ANDROID_MEMBAR_STORE();
+ JValue result;
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
+ instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
+ shadow_frame.GetMethod(), inst->GetDexPc(insns),
+ result);
+ }
+ return result;
+ }
+ case Instruction::RETURN: {
+ PREAMBLE();
+ JValue result;
+ result.SetJ(0);
+ result.SetI(shadow_frame.GetVReg(inst->VRegA_11x(inst_data)));
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
+ instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
+ shadow_frame.GetMethod(), inst->GetDexPc(insns),
+ result);
+ }
+ return result;
+ }
+ case Instruction::RETURN_WIDE: {
+ PREAMBLE();
+ JValue result;
+ result.SetJ(shadow_frame.GetVRegLong(inst->VRegA_11x(inst_data)));
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
+ instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
+ shadow_frame.GetMethod(), inst->GetDexPc(insns),
+ result);
+ }
+ return result;
+ }
+ case Instruction::RETURN_OBJECT: {
+ PREAMBLE();
+ JValue result;
+ result.SetJ(0);
+ result.SetL(shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data)));
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
+ instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
+ shadow_frame.GetMethod(), inst->GetDexPc(insns),
+ result);
+ }
+ return result;
+ }
+ case Instruction::CONST_4: {
+ PREAMBLE();
+ uint4_t dst = inst->VRegA_11n(inst_data);
+ int4_t val = inst->VRegB_11n(inst_data);
+ shadow_frame.SetVReg(dst, val);
+ if (val == 0) {
+ shadow_frame.SetVRegReference(dst, NULL);
+ }
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::CONST_16: {
+ PREAMBLE();
+ uint8_t dst = inst->VRegA_21s(inst_data);
+ int16_t val = inst->VRegB_21s();
+ shadow_frame.SetVReg(dst, val);
+ if (val == 0) {
+ shadow_frame.SetVRegReference(dst, NULL);
+ }
+ inst = inst->Next_2xx();
+ break;
+ }
+ case Instruction::CONST: {
+ PREAMBLE();
+ uint8_t dst = inst->VRegA_31i(inst_data);
+ int32_t val = inst->VRegB_31i();
+ shadow_frame.SetVReg(dst, val);
+ if (val == 0) {
+ shadow_frame.SetVRegReference(dst, NULL);
+ }
+ inst = inst->Next_3xx();
+ break;
+ }
+ case Instruction::CONST_HIGH16: {
+ PREAMBLE();
+ uint8_t dst = inst->VRegA_21h(inst_data);
+ int32_t val = static_cast<int32_t>(inst->VRegB_21h() << 16);
+ shadow_frame.SetVReg(dst, val);
+ if (val == 0) {
+ shadow_frame.SetVRegReference(dst, NULL);
+ }
+ inst = inst->Next_2xx();
+ break;
+ }
+ case Instruction::CONST_WIDE_16:
+ PREAMBLE();
+ shadow_frame.SetVRegLong(inst->VRegA_21s(inst_data), inst->VRegB_21s());
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::CONST_WIDE_32:
+ PREAMBLE();
+ shadow_frame.SetVRegLong(inst->VRegA_31i(inst_data), inst->VRegB_31i());
+ inst = inst->Next_3xx();
+ break;
+ case Instruction::CONST_WIDE:
+ PREAMBLE();
+ shadow_frame.SetVRegLong(inst->VRegA_51l(inst_data), inst->VRegB_51l());
+ inst = inst->Next_51l();
+ break;
+ case Instruction::CONST_WIDE_HIGH16:
+ shadow_frame.SetVRegLong(inst->VRegA_21h(inst_data),
+ static_cast<uint64_t>(inst->VRegB_21h()) << 48);
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::CONST_STRING: {
+ PREAMBLE();
+ String* s = ResolveString(self, mh, inst->VRegB_21c());
+ if (UNLIKELY(s == NULL)) {
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), s);
+ inst = inst->Next_2xx();
+ }
+ break;
+ }
+ case Instruction::CONST_STRING_JUMBO: {
+ PREAMBLE();
+ String* s = ResolveString(self, mh, inst->VRegB_31c());
+ if (UNLIKELY(s == NULL)) {
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ shadow_frame.SetVRegReference(inst->VRegA_31c(inst_data), s);
+ inst = inst->Next_3xx();
+ }
+ break;
+ }
+ case Instruction::CONST_CLASS: {
+ PREAMBLE();
+ Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame.GetMethod(),
+ self, false, do_access_check);
+ if (UNLIKELY(c == NULL)) {
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), c);
+ inst = inst->Next_2xx();
+ }
+ break;
+ }
+ case Instruction::MONITOR_ENTER: {
+ PREAMBLE();
+ Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
+ if (UNLIKELY(obj == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ DoMonitorEnter(self, obj);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx);
+ }
+ break;
+ }
+ case Instruction::MONITOR_EXIT: {
+ PREAMBLE();
+ Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
+ if (UNLIKELY(obj == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ DoMonitorExit(self, obj);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx);
+ }
+ break;
+ }
+ case Instruction::CHECK_CAST: {
+ PREAMBLE();
+ Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame.GetMethod(),
+ self, false, do_access_check);
+ if (UNLIKELY(c == NULL)) {
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ Object* obj = shadow_frame.GetVRegReference(inst->VRegA_21c(inst_data));
+ if (UNLIKELY(obj != NULL && !obj->InstanceOf(c))) {
+ ThrowClassCastException(c, obj->GetClass());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ inst = inst->Next_2xx();
+ }
+ }
+ break;
+ }
+ case Instruction::INSTANCE_OF: {
+ PREAMBLE();
+ Class* c = ResolveVerifyAndClinit(inst->VRegC_22c(), shadow_frame.GetMethod(),
+ self, false, do_access_check);
+ if (UNLIKELY(c == NULL)) {
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ Object* obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
+ shadow_frame.SetVReg(inst->VRegA_22c(inst_data), (obj != NULL && obj->InstanceOf(c)) ? 1 : 0);
+ inst = inst->Next_2xx();
+ }
+ break;
+ }
+ case Instruction::ARRAY_LENGTH: {
+ PREAMBLE();
+ Object* array = shadow_frame.GetVRegReference(inst->VRegB_12x(inst_data));
+ if (UNLIKELY(array == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data), array->AsArray()->GetLength());
+ inst = inst->Next_1xx();
+ }
+ break;
+ }
+ case Instruction::NEW_INSTANCE: {
+ PREAMBLE();
+ Object* obj = AllocObjectFromCode(inst->VRegB_21c(), shadow_frame.GetMethod(),
+ self, do_access_check);
+ if (UNLIKELY(obj == NULL)) {
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), obj);
+ inst = inst->Next_2xx();
+ }
+ break;
+ }
+ case Instruction::NEW_ARRAY: {
+ PREAMBLE();
+ int32_t length = shadow_frame.GetVReg(inst->VRegB_22c(inst_data));
+ Object* obj = AllocArrayFromCode(inst->VRegC_22c(), shadow_frame.GetMethod(),
+ length, self, do_access_check);
+ if (UNLIKELY(obj == NULL)) {
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ shadow_frame.SetVRegReference(inst->VRegA_22c(inst_data), obj);
+ inst = inst->Next_2xx();
+ }
+ break;
+ }
+ case Instruction::FILLED_NEW_ARRAY: {
+ PREAMBLE();
+ bool success = DoFilledNewArray<false, do_access_check>(inst, shadow_frame,
+ self, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+ break;
+ }
+ case Instruction::FILLED_NEW_ARRAY_RANGE: {
+ PREAMBLE();
+ bool success = DoFilledNewArray<true, do_access_check>(inst, shadow_frame,
+ self, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+ break;
+ }
+ case Instruction::FILL_ARRAY_DATA: {
+ PREAMBLE();
+ Object* obj = shadow_frame.GetVRegReference(inst->VRegA_31t(inst_data));
+ if (UNLIKELY(obj == NULL)) {
+ ThrowNullPointerException(NULL, "null array in FILL_ARRAY_DATA");
+ HANDLE_PENDING_EXCEPTION();
+ break;
+ }
+ Array* array = obj->AsArray();
+ DCHECK(array->IsArrayInstance() && !array->IsObjectArray());
+ const uint16_t* payload_addr = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t();
+ const Instruction::ArrayDataPayload* payload =
+ reinterpret_cast<const Instruction::ArrayDataPayload*>(payload_addr);
+ if (UNLIKELY(static_cast<int32_t>(payload->element_count) > array->GetLength())) {
+ self->ThrowNewExceptionF(shadow_frame.GetCurrentLocationForThrow(),
+ "Ljava/lang/ArrayIndexOutOfBoundsException;",
+ "failed FILL_ARRAY_DATA; length=%d, index=%d",
+ array->GetLength(), payload->element_count);
+ HANDLE_PENDING_EXCEPTION();
+ break;
+ }
+ uint32_t size_in_bytes = payload->element_count * payload->element_width;
+ memcpy(array->GetRawData(payload->element_width), payload->data, size_in_bytes);
+ inst = inst->Next_3xx();
+ break;
+ }
+ case Instruction::THROW: {
+ PREAMBLE();
+ Object* exception = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
+ if (UNLIKELY(exception == NULL)) {
+ ThrowNullPointerException(NULL, "throw with null exception");
+ } else {
+ self->SetException(shadow_frame.GetCurrentLocationForThrow(), exception->AsThrowable());
+ }
+ HANDLE_PENDING_EXCEPTION();
+ break;
+ }
+ case Instruction::GOTO: {
+ PREAMBLE();
+ int8_t offset = inst->VRegA_10t(inst_data);
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ }
+ inst = inst->RelativeAt(offset);
+ break;
+ }
+ case Instruction::GOTO_16: {
+ PREAMBLE();
+ int16_t offset = inst->VRegA_20t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ }
+ inst = inst->RelativeAt(offset);
+ break;
+ }
+ case Instruction::GOTO_32: {
+ PREAMBLE();
+ int32_t offset = inst->VRegA_30t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ }
+ inst = inst->RelativeAt(offset);
+ break;
+ }
+ case Instruction::PACKED_SWITCH: {
+ PREAMBLE();
+ int32_t offset = DoPackedSwitch(inst, shadow_frame, inst_data);
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ }
+ inst = inst->RelativeAt(offset);
+ break;
+ }
+ case Instruction::SPARSE_SWITCH: {
+ PREAMBLE();
+ int32_t offset = DoSparseSwitch(inst, shadow_frame, inst_data);
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ }
+ inst = inst->RelativeAt(offset);
+ break;
+ }
+ case Instruction::CMPL_FLOAT: {
+ PREAMBLE();
+ float val1 = shadow_frame.GetVRegFloat(inst->VRegB_23x());
+ float val2 = shadow_frame.GetVRegFloat(inst->VRegC_23x());
+ int32_t result;
+ if (val1 > val2) {
+ result = 1;
+ } else if (val1 == val2) {
+ result = 0;
+ } else {
+ result = -1;
+ }
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data), result);
+ inst = inst->Next_2xx();
+ break;
+ }
+ case Instruction::CMPG_FLOAT: {
+ PREAMBLE();
+ float val1 = shadow_frame.GetVRegFloat(inst->VRegB_23x());
+ float val2 = shadow_frame.GetVRegFloat(inst->VRegC_23x());
+ int32_t result;
+ if (val1 < val2) {
+ result = -1;
+ } else if (val1 == val2) {
+ result = 0;
+ } else {
+ result = 1;
+ }
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data), result);
+ inst = inst->Next_2xx();
+ break;
+ }
+ case Instruction::CMPL_DOUBLE: {
+ PREAMBLE();
+ double val1 = shadow_frame.GetVRegDouble(inst->VRegB_23x());
+ double val2 = shadow_frame.GetVRegDouble(inst->VRegC_23x());
+ int32_t result;
+ if (val1 > val2) {
+ result = 1;
+ } else if (val1 == val2) {
+ result = 0;
+ } else {
+ result = -1;
+ }
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data), result);
+ inst = inst->Next_2xx();
+ break;
+ }
+
+ case Instruction::CMPG_DOUBLE: {
+ PREAMBLE();
+ double val1 = shadow_frame.GetVRegDouble(inst->VRegB_23x());
+ double val2 = shadow_frame.GetVRegDouble(inst->VRegC_23x());
+ int32_t result;
+ if (val1 < val2) {
+ result = -1;
+ } else if (val1 == val2) {
+ result = 0;
+ } else {
+ result = 1;
+ }
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data), result);
+ inst = inst->Next_2xx();
+ break;
+ }
+ case Instruction::CMP_LONG: {
+ PREAMBLE();
+ int64_t val1 = shadow_frame.GetVRegLong(inst->VRegB_23x());
+ int64_t val2 = shadow_frame.GetVRegLong(inst->VRegC_23x());
+ int32_t result;
+ if (val1 > val2) {
+ result = 1;
+ } else if (val1 == val2) {
+ result = 0;
+ } else {
+ result = -1;
+ }
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data), result);
+ inst = inst->Next_2xx();
+ break;
+ }
+ case Instruction::IF_EQ: {
+ PREAMBLE();
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) == shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ int16_t offset = inst->VRegC_22t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ }
+ inst = inst->RelativeAt(offset);
+ } else {
+ inst = inst->Next_2xx();
+ }
+ break;
+ }
+ case Instruction::IF_NE: {
+ PREAMBLE();
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) != shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ int16_t offset = inst->VRegC_22t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ }
+ inst = inst->RelativeAt(offset);
+ } else {
+ inst = inst->Next_2xx();
+ }
+ break;
+ }
+ case Instruction::IF_LT: {
+ PREAMBLE();
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) < shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ int16_t offset = inst->VRegC_22t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ }
+ inst = inst->RelativeAt(offset);
+ } else {
+ inst = inst->Next_2xx();
+ }
+ break;
+ }
+ case Instruction::IF_GE: {
+ PREAMBLE();
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) >= shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ int16_t offset = inst->VRegC_22t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ }
+ inst = inst->RelativeAt(offset);
+ } else {
+ inst = inst->Next_2xx();
+ }
+ break;
+ }
+ case Instruction::IF_GT: {
+ PREAMBLE();
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) > shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ int16_t offset = inst->VRegC_22t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ }
+ inst = inst->RelativeAt(offset);
+ } else {
+ inst = inst->Next_2xx();
+ }
+ break;
+ }
+ case Instruction::IF_LE: {
+ PREAMBLE();
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) <= shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ int16_t offset = inst->VRegC_22t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ }
+ inst = inst->RelativeAt(offset);
+ } else {
+ inst = inst->Next_2xx();
+ }
+ break;
+ }
+ case Instruction::IF_EQZ: {
+ PREAMBLE();
+ if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) == 0) {
+ int16_t offset = inst->VRegB_21t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ }
+ inst = inst->RelativeAt(offset);
+ } else {
+ inst = inst->Next_2xx();
+ }
+ break;
+ }
+ case Instruction::IF_NEZ: {
+ PREAMBLE();
+ if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) != 0) {
+ int16_t offset = inst->VRegB_21t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ }
+ inst = inst->RelativeAt(offset);
+ } else {
+ inst = inst->Next_2xx();
+ }
+ break;
+ }
+ case Instruction::IF_LTZ: {
+ PREAMBLE();
+ if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) < 0) {
+ int16_t offset = inst->VRegB_21t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ }
+ inst = inst->RelativeAt(offset);
+ } else {
+ inst = inst->Next_2xx();
+ }
+ break;
+ }
+ case Instruction::IF_GEZ: {
+ PREAMBLE();
+ if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) >= 0) {
+ int16_t offset = inst->VRegB_21t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ }
+ inst = inst->RelativeAt(offset);
+ } else {
+ inst = inst->Next_2xx();
+ }
+ break;
+ }
+ case Instruction::IF_GTZ: {
+ PREAMBLE();
+ if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) > 0) {
+ int16_t offset = inst->VRegB_21t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ }
+ inst = inst->RelativeAt(offset);
+ } else {
+ inst = inst->Next_2xx();
+ }
+ break;
+ }
+ case Instruction::IF_LEZ: {
+ PREAMBLE();
+ if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) <= 0) {
+ int16_t offset = inst->VRegB_21t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ }
+ inst = inst->RelativeAt(offset);
+ } else {
+ inst = inst->Next_2xx();
+ }
+ break;
+ }
+ case Instruction::AGET_BOOLEAN: {
+ PREAMBLE();
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ break;
+ }
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ BooleanArray* array = a->AsBooleanArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetData()[index]);
+ inst = inst->Next_2xx();
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ break;
+ }
+ case Instruction::AGET_BYTE: {
+ PREAMBLE();
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ break;
+ }
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ ByteArray* array = a->AsByteArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetData()[index]);
+ inst = inst->Next_2xx();
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ break;
+ }
+ case Instruction::AGET_CHAR: {
+ PREAMBLE();
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ break;
+ }
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ CharArray* array = a->AsCharArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetData()[index]);
+ inst = inst->Next_2xx();
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ break;
+ }
+ case Instruction::AGET_SHORT: {
+ PREAMBLE();
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ break;
+ }
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ ShortArray* array = a->AsShortArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetData()[index]);
+ inst = inst->Next_2xx();
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ break;
+ }
+ case Instruction::AGET: {
+ PREAMBLE();
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ break;
+ }
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ IntArray* array = a->AsIntArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetData()[index]);
+ inst = inst->Next_2xx();
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ break;
+ }
+ case Instruction::AGET_WIDE: {
+ PREAMBLE();
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ break;
+ }
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ LongArray* array = a->AsLongArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data), array->GetData()[index]);
+ inst = inst->Next_2xx();
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ break;
+ }
+ case Instruction::AGET_OBJECT: {
+ PREAMBLE();
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ break;
+ }
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ ObjectArray<Object>* array = a->AsObjectArray<Object>();
+ if (LIKELY(array->IsValidIndex(index))) {
+ shadow_frame.SetVRegReference(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
+ inst = inst->Next_2xx();
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ break;
+ }
+ case Instruction::APUT_BOOLEAN: {
+ PREAMBLE();
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ break;
+ }
+ uint8_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ BooleanArray* array = a->AsBooleanArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ array->GetData()[index] = val;
+ inst = inst->Next_2xx();
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ break;
+ }
+ case Instruction::APUT_BYTE: {
+ PREAMBLE();
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ break;
+ }
+ int8_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ ByteArray* array = a->AsByteArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ array->GetData()[index] = val;
+ inst = inst->Next_2xx();
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ break;
+ }
+ case Instruction::APUT_CHAR: {
+ PREAMBLE();
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ break;
+ }
+ uint16_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ CharArray* array = a->AsCharArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ array->GetData()[index] = val;
+ inst = inst->Next_2xx();
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ break;
+ }
+ case Instruction::APUT_SHORT: {
+ PREAMBLE();
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ break;
+ }
+ int16_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ ShortArray* array = a->AsShortArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ array->GetData()[index] = val;
+ inst = inst->Next_2xx();
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ break;
+ }
+ case Instruction::APUT: {
+ PREAMBLE();
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ break;
+ }
+ int32_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ IntArray* array = a->AsIntArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ array->GetData()[index] = val;
+ inst = inst->Next_2xx();
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ break;
+ }
+ case Instruction::APUT_WIDE: {
+ PREAMBLE();
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ break;
+ }
+ int64_t val = shadow_frame.GetVRegLong(inst->VRegA_23x(inst_data));
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ LongArray* array = a->AsLongArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ array->GetData()[index] = val;
+ inst = inst->Next_2xx();
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ break;
+ }
+ case Instruction::APUT_OBJECT: {
+ PREAMBLE();
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ break;
+ }
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ Object* val = shadow_frame.GetVRegReference(inst->VRegA_23x(inst_data));
+ ObjectArray<Object>* array = a->AsObjectArray<Object>();
+ if (LIKELY(array->IsValidIndex(index) && array->CheckAssignable(val))) {
+ array->SetWithoutChecks(index, val);
+ inst = inst->Next_2xx();
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ break;
+ }
+ case Instruction::IGET_BOOLEAN: {
+ PREAMBLE();
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::IGET_BYTE: {
+ PREAMBLE();
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::IGET_CHAR: {
+ PREAMBLE();
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::IGET_SHORT: {
+ PREAMBLE();
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::IGET: {
+ PREAMBLE();
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::IGET_WIDE: {
+ PREAMBLE();
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::IGET_OBJECT: {
+ PREAMBLE();
+ bool success = DoFieldGet<InstanceObjectRead, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::IGET_QUICK: {
+ PREAMBLE();
+ bool success = DoIGetQuick<Primitive::kPrimInt>(shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::IGET_WIDE_QUICK: {
+ PREAMBLE();
+ bool success = DoIGetQuick<Primitive::kPrimLong>(shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::IGET_OBJECT_QUICK: {
+ PREAMBLE();
+ bool success = DoIGetQuick<Primitive::kPrimNot>(shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::SGET_BOOLEAN: {
+ PREAMBLE();
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::SGET_BYTE: {
+ PREAMBLE();
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::SGET_CHAR: {
+ PREAMBLE();
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::SGET_SHORT: {
+ PREAMBLE();
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::SGET: {
+ PREAMBLE();
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::SGET_WIDE: {
+ PREAMBLE();
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::SGET_OBJECT: {
+ PREAMBLE();
+ bool success = DoFieldGet<StaticObjectRead, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::IPUT_BOOLEAN: {
+ PREAMBLE();
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::IPUT_BYTE: {
+ PREAMBLE();
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::IPUT_CHAR: {
+ PREAMBLE();
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::IPUT_SHORT: {
+ PREAMBLE();
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::IPUT: {
+ PREAMBLE();
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::IPUT_WIDE: {
+ PREAMBLE();
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::IPUT_OBJECT: {
+ PREAMBLE();
+ bool success = DoFieldPut<InstanceObjectWrite, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::IPUT_QUICK: {
+ PREAMBLE();
+ bool success = DoIPutQuick<Primitive::kPrimInt>(shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::IPUT_WIDE_QUICK: {
+ PREAMBLE();
+ bool success = DoIPutQuick<Primitive::kPrimLong>(shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::IPUT_OBJECT_QUICK: {
+ PREAMBLE();
+ bool success = DoIPutQuick<Primitive::kPrimNot>(shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::SPUT_BOOLEAN: {
+ PREAMBLE();
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::SPUT_BYTE: {
+ PREAMBLE();
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::SPUT_CHAR: {
+ PREAMBLE();
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::SPUT_SHORT: {
+ PREAMBLE();
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::SPUT: {
+ PREAMBLE();
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::SPUT_WIDE: {
+ PREAMBLE();
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::SPUT_OBJECT: {
+ PREAMBLE();
+ bool success = DoFieldPut<StaticObjectWrite, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::INVOKE_VIRTUAL: {
+ PREAMBLE();
+ bool success = DoInvoke<kVirtual, false, do_access_check>(self, shadow_frame, inst, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+ break;
+ }
+ case Instruction::INVOKE_VIRTUAL_RANGE: {
+ PREAMBLE();
+ bool success = DoInvoke<kVirtual, true, do_access_check>(self, shadow_frame, inst, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+ break;
+ }
+ case Instruction::INVOKE_SUPER: {
+ PREAMBLE();
+ bool success = DoInvoke<kSuper, false, do_access_check>(self, shadow_frame, inst, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+ break;
+ }
+ case Instruction::INVOKE_SUPER_RANGE: {
+ PREAMBLE();
+ bool success = DoInvoke<kSuper, true, do_access_check>(self, shadow_frame, inst, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+ break;
+ }
+ case Instruction::INVOKE_DIRECT: {
+ PREAMBLE();
+ bool success = DoInvoke<kDirect, false, do_access_check>(self, shadow_frame, inst, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+ break;
+ }
+ case Instruction::INVOKE_DIRECT_RANGE: {
+ PREAMBLE();
+ bool success = DoInvoke<kDirect, true, do_access_check>(self, shadow_frame, inst, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+ break;
+ }
+ case Instruction::INVOKE_INTERFACE: {
+ PREAMBLE();
+ bool success = DoInvoke<kInterface, false, do_access_check>(self, shadow_frame, inst, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+ break;
+ }
+ case Instruction::INVOKE_INTERFACE_RANGE: {
+ PREAMBLE();
+ bool success = DoInvoke<kInterface, true, do_access_check>(self, shadow_frame, inst, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+ break;
+ }
+ case Instruction::INVOKE_STATIC: {
+ PREAMBLE();
+ bool success = DoInvoke<kStatic, false, do_access_check>(self, shadow_frame, inst, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+ break;
+ }
+ case Instruction::INVOKE_STATIC_RANGE: {
+ PREAMBLE();
+ bool success = DoInvoke<kStatic, true, do_access_check>(self, shadow_frame, inst, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+ break;
+ }
+ case Instruction::INVOKE_VIRTUAL_QUICK: {
+ PREAMBLE();
+ bool success = DoInvokeVirtualQuick<false>(self, shadow_frame, inst, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+ break;
+ }
+ case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: {
+ PREAMBLE();
+ bool success = DoInvokeVirtualQuick<true>(self, shadow_frame, inst, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+ break;
+ }
+ case Instruction::NEG_INT:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data), -shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::NOT_INT:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data), ~shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::NEG_LONG:
+ PREAMBLE();
+ shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), -shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::NOT_LONG:
+ PREAMBLE();
+ shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), ~shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::NEG_FLOAT:
+ PREAMBLE();
+ shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data), -shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::NEG_DOUBLE:
+ PREAMBLE();
+ shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data), -shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::INT_TO_LONG:
+ PREAMBLE();
+ shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::INT_TO_FLOAT:
+ PREAMBLE();
+ shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data), shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::INT_TO_DOUBLE:
+ PREAMBLE();
+ shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data), shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::LONG_TO_INT:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data), shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::LONG_TO_FLOAT:
+ PREAMBLE();
+ shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data), shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::LONG_TO_DOUBLE:
+ PREAMBLE();
+ shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data), shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::FLOAT_TO_INT: {
+ PREAMBLE();
+ float val = shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data));
+ int32_t result;
+ if (val != val) {
+ result = 0;
+ } else if (val > static_cast<float>(kMaxInt)) {
+ result = kMaxInt;
+ } else if (val < static_cast<float>(kMinInt)) {
+ result = kMinInt;
+ } else {
+ result = val;
+ }
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data), result);
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::FLOAT_TO_LONG: {
+ PREAMBLE();
+ float val = shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data));
+ int64_t result;
+ if (val != val) {
+ result = 0;
+ } else if (val > static_cast<float>(kMaxLong)) {
+ result = kMaxLong;
+ } else if (val < static_cast<float>(kMinLong)) {
+ result = kMinLong;
+ } else {
+ result = val;
+ }
+ shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), result);
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::FLOAT_TO_DOUBLE:
+ PREAMBLE();
+ shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data), shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::DOUBLE_TO_INT: {
+ PREAMBLE();
+ double val = shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data));
+ int32_t result;
+ if (val != val) {
+ result = 0;
+ } else if (val > static_cast<double>(kMaxInt)) {
+ result = kMaxInt;
+ } else if (val < static_cast<double>(kMinInt)) {
+ result = kMinInt;
+ } else {
+ result = val;
+ }
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data), result);
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::DOUBLE_TO_LONG: {
+ PREAMBLE();
+ double val = shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data));
+ int64_t result;
+ if (val != val) {
+ result = 0;
+ } else if (val > static_cast<double>(kMaxLong)) {
+ result = kMaxLong;
+ } else if (val < static_cast<double>(kMinLong)) {
+ result = kMinLong;
+ } else {
+ result = val;
+ }
+ shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), result);
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::DOUBLE_TO_FLOAT:
+ PREAMBLE();
+ shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data), shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::INT_TO_BYTE:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data),
+ static_cast<int8_t>(shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::INT_TO_CHAR:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data),
+ static_cast<uint16_t>(shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::INT_TO_SHORT:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data),
+ static_cast<int16_t>(shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::ADD_INT:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_23x()) +
+ shadow_frame.GetVReg(inst->VRegC_23x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::SUB_INT:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_23x()) -
+ shadow_frame.GetVReg(inst->VRegC_23x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::MUL_INT:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_23x()) *
+ shadow_frame.GetVReg(inst->VRegC_23x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::DIV_INT: {
+ PREAMBLE();
+ bool success = DoIntDivide(shadow_frame, inst->VRegA_23x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_23x()),
+ shadow_frame.GetVReg(inst->VRegC_23x()));
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::REM_INT: {
+ PREAMBLE();
+ bool success = DoIntRemainder(shadow_frame, inst->VRegA_23x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_23x()),
+ shadow_frame.GetVReg(inst->VRegC_23x()));
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::SHL_INT:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_23x()) <<
+ (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x1f));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::SHR_INT:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_23x()) >>
+ (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x1f));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::USHR_INT:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+ static_cast<uint32_t>(shadow_frame.GetVReg(inst->VRegB_23x())) >>
+ (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x1f));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::AND_INT:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_23x()) &
+ shadow_frame.GetVReg(inst->VRegC_23x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::OR_INT:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_23x()) |
+ shadow_frame.GetVReg(inst->VRegC_23x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::XOR_INT:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_23x()) ^
+ shadow_frame.GetVReg(inst->VRegC_23x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::ADD_LONG:
+ PREAMBLE();
+ shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_23x()) +
+ shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::SUB_LONG:
+ PREAMBLE();
+ shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_23x()) -
+ shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::MUL_LONG:
+ PREAMBLE();
+ shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_23x()) *
+ shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::DIV_LONG:
+ PREAMBLE();
+ DoLongDivide(shadow_frame, inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_23x()),
+ shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_2xx);
+ break;
+ case Instruction::REM_LONG:
+ PREAMBLE();
+ DoLongRemainder(shadow_frame, inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_23x()),
+ shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_2xx);
+ break;
+ case Instruction::AND_LONG:
+ PREAMBLE();
+ shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_23x()) &
+ shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::OR_LONG:
+ PREAMBLE();
+ shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_23x()) |
+ shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::XOR_LONG:
+ PREAMBLE();
+ shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_23x()) ^
+ shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::SHL_LONG:
+ PREAMBLE();
+ shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_23x()) <<
+ (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x3f));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::SHR_LONG:
+ PREAMBLE();
+ shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_23x()) >>
+ (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x3f));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::USHR_LONG:
+ PREAMBLE();
+ shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+ static_cast<uint64_t>(shadow_frame.GetVRegLong(inst->VRegB_23x())) >>
+ (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x3f));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::ADD_FLOAT:
+ PREAMBLE();
+ shadow_frame.SetVRegFloat(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegFloat(inst->VRegB_23x()) +
+ shadow_frame.GetVRegFloat(inst->VRegC_23x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::SUB_FLOAT:
+ PREAMBLE();
+ shadow_frame.SetVRegFloat(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegFloat(inst->VRegB_23x()) -
+ shadow_frame.GetVRegFloat(inst->VRegC_23x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::MUL_FLOAT:
+ PREAMBLE();
+ shadow_frame.SetVRegFloat(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegFloat(inst->VRegB_23x()) *
+ shadow_frame.GetVRegFloat(inst->VRegC_23x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::DIV_FLOAT:
+ PREAMBLE();
+ shadow_frame.SetVRegFloat(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegFloat(inst->VRegB_23x()) /
+ shadow_frame.GetVRegFloat(inst->VRegC_23x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::REM_FLOAT:
+ PREAMBLE();
+ shadow_frame.SetVRegFloat(inst->VRegA_23x(inst_data),
+ fmodf(shadow_frame.GetVRegFloat(inst->VRegB_23x()),
+ shadow_frame.GetVRegFloat(inst->VRegC_23x())));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::ADD_DOUBLE:
+ PREAMBLE();
+ shadow_frame.SetVRegDouble(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegDouble(inst->VRegB_23x()) +
+ shadow_frame.GetVRegDouble(inst->VRegC_23x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::SUB_DOUBLE:
+ PREAMBLE();
+ shadow_frame.SetVRegDouble(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegDouble(inst->VRegB_23x()) -
+ shadow_frame.GetVRegDouble(inst->VRegC_23x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::MUL_DOUBLE:
+ PREAMBLE();
+ shadow_frame.SetVRegDouble(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegDouble(inst->VRegB_23x()) *
+ shadow_frame.GetVRegDouble(inst->VRegC_23x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::DIV_DOUBLE:
+ PREAMBLE();
+ shadow_frame.SetVRegDouble(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegDouble(inst->VRegB_23x()) /
+ shadow_frame.GetVRegDouble(inst->VRegC_23x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::REM_DOUBLE:
+ PREAMBLE();
+ shadow_frame.SetVRegDouble(inst->VRegA_23x(inst_data),
+ fmod(shadow_frame.GetVRegDouble(inst->VRegB_23x()),
+ shadow_frame.GetVRegDouble(inst->VRegC_23x())));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::ADD_INT_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVReg(vregA,
+ shadow_frame.GetVReg(vregA) +
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::SUB_INT_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVReg(vregA,
+ shadow_frame.GetVReg(vregA) -
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::MUL_INT_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVReg(vregA,
+ shadow_frame.GetVReg(vregA) *
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::DIV_INT_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ bool success = DoIntDivide(shadow_frame, vregA, shadow_frame.GetVReg(vregA),
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_1xx);
+ break;
+ }
+ case Instruction::REM_INT_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ bool success = DoIntRemainder(shadow_frame, vregA, shadow_frame.GetVReg(vregA),
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_1xx);
+ break;
+ }
+ case Instruction::SHL_INT_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVReg(vregA,
+ shadow_frame.GetVReg(vregA) <<
+ (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x1f));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::SHR_INT_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVReg(vregA,
+ shadow_frame.GetVReg(vregA) >>
+ (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x1f));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::USHR_INT_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVReg(vregA,
+ static_cast<uint32_t>(shadow_frame.GetVReg(vregA)) >>
+ (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x1f));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::AND_INT_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVReg(vregA,
+ shadow_frame.GetVReg(vregA) &
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::OR_INT_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVReg(vregA,
+ shadow_frame.GetVReg(vregA) |
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::XOR_INT_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVReg(vregA,
+ shadow_frame.GetVReg(vregA) ^
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::ADD_LONG_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegLong(vregA,
+ shadow_frame.GetVRegLong(vregA) +
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::SUB_LONG_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegLong(vregA,
+ shadow_frame.GetVRegLong(vregA) -
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::MUL_LONG_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegLong(vregA,
+ shadow_frame.GetVRegLong(vregA) *
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::DIV_LONG_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ DoLongDivide(shadow_frame, vregA, shadow_frame.GetVRegLong(vregA),
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx);
+ break;
+ }
+ case Instruction::REM_LONG_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ DoLongRemainder(shadow_frame, vregA, shadow_frame.GetVRegLong(vregA),
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx);
+ break;
+ }
+ case Instruction::AND_LONG_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegLong(vregA,
+ shadow_frame.GetVRegLong(vregA) &
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::OR_LONG_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegLong(vregA,
+ shadow_frame.GetVRegLong(vregA) |
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::XOR_LONG_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegLong(vregA,
+ shadow_frame.GetVRegLong(vregA) ^
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::SHL_LONG_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegLong(vregA,
+ shadow_frame.GetVRegLong(vregA) <<
+ (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x3f));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::SHR_LONG_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegLong(vregA,
+ shadow_frame.GetVRegLong(vregA) >>
+ (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x3f));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::USHR_LONG_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegLong(vregA,
+ static_cast<uint64_t>(shadow_frame.GetVRegLong(vregA)) >>
+ (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x3f));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::ADD_FLOAT_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegFloat(vregA,
+ shadow_frame.GetVRegFloat(vregA) +
+ shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::SUB_FLOAT_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegFloat(vregA,
+ shadow_frame.GetVRegFloat(vregA) -
+ shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::MUL_FLOAT_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegFloat(vregA,
+ shadow_frame.GetVRegFloat(vregA) *
+ shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::DIV_FLOAT_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegFloat(vregA,
+ shadow_frame.GetVRegFloat(vregA) /
+ shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::REM_FLOAT_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegFloat(vregA,
+ fmodf(shadow_frame.GetVRegFloat(vregA),
+ shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data))));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::ADD_DOUBLE_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegDouble(vregA,
+ shadow_frame.GetVRegDouble(vregA) +
+ shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::SUB_DOUBLE_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegDouble(vregA,
+ shadow_frame.GetVRegDouble(vregA) -
+ shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::MUL_DOUBLE_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegDouble(vregA,
+ shadow_frame.GetVRegDouble(vregA) *
+ shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::DIV_DOUBLE_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegDouble(vregA,
+ shadow_frame.GetVRegDouble(vregA) /
+ shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::REM_DOUBLE_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegDouble(vregA,
+ fmod(shadow_frame.GetVRegDouble(vregA),
+ shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data))));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::ADD_INT_LIT16:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22s(inst_data)) +
+ inst->VRegC_22s());
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::RSUB_INT:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
+ inst->VRegC_22s() -
+ shadow_frame.GetVReg(inst->VRegB_22s(inst_data)));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::MUL_INT_LIT16:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22s(inst_data)) *
+ inst->VRegC_22s());
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::DIV_INT_LIT16: {
+ PREAMBLE();
+ bool success = DoIntDivide(shadow_frame, inst->VRegA_22s(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22s(inst_data)), inst->VRegC_22s());
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::REM_INT_LIT16: {
+ PREAMBLE();
+ bool success = DoIntRemainder(shadow_frame, inst->VRegA_22s(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22s(inst_data)), inst->VRegC_22s());
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::AND_INT_LIT16:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22s(inst_data)) &
+ inst->VRegC_22s());
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::OR_INT_LIT16:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22s(inst_data)) |
+ inst->VRegC_22s());
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::XOR_INT_LIT16:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22s(inst_data)) ^
+ inst->VRegC_22s());
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::ADD_INT_LIT8:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22b()) +
+ inst->VRegC_22b());
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::RSUB_INT_LIT8:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+ inst->VRegC_22b() -
+ shadow_frame.GetVReg(inst->VRegB_22b()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::MUL_INT_LIT8:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22b()) *
+ inst->VRegC_22b());
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::DIV_INT_LIT8: {
+ PREAMBLE();
+ bool success = DoIntDivide(shadow_frame, inst->VRegA_22b(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22b()), inst->VRegC_22b());
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::REM_INT_LIT8: {
+ PREAMBLE();
+ bool success = DoIntRemainder(shadow_frame, inst->VRegA_22b(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22b()), inst->VRegC_22b());
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::AND_INT_LIT8:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22b()) &
+ inst->VRegC_22b());
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::OR_INT_LIT8:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22b()) |
+ inst->VRegC_22b());
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::XOR_INT_LIT8:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22b()) ^
+ inst->VRegC_22b());
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::SHL_INT_LIT8:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22b()) <<
+ (inst->VRegC_22b() & 0x1f));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::SHR_INT_LIT8:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22b()) >>
+ (inst->VRegC_22b() & 0x1f));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::USHR_INT_LIT8:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+ static_cast<uint32_t>(shadow_frame.GetVReg(inst->VRegB_22b())) >>
+ (inst->VRegC_22b() & 0x1f));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::UNUSED_3E ... Instruction::UNUSED_43:
+ case Instruction::UNUSED_EB ... Instruction::UNUSED_FF:
+ case Instruction::UNUSED_79:
+ case Instruction::UNUSED_7A:
+ UnexpectedOpcode(inst, mh);
+ }
+ }
+} // NOLINT(readability/fn_size)
+
+// Explicit definitions of ExecuteSwitchImpl.
+template JValue ExecuteSwitchImpl<true>(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame& shadow_frame, JValue result_register);
+template JValue ExecuteSwitchImpl<false>(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame& shadow_frame, JValue result_register);
+
+} // namespace interpreter
+} // namespace art
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index d72ddf6..7f0fde4 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -314,14 +314,14 @@
return soa.EncodeField(field);
}
-static void PinPrimitiveArray(const ScopedObjectAccess& soa, const Array* array)
+static void PinPrimitiveArray(const ScopedObjectAccess& soa, Array* array)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
JavaVMExt* vm = soa.Vm();
MutexLock mu(soa.Self(), vm->pins_lock);
vm->pin_table.Add(array);
}
-static void UnpinPrimitiveArray(const ScopedObjectAccess& soa, const Array* array)
+static void UnpinPrimitiveArray(const ScopedObjectAccess& soa, Array* array)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
JavaVMExt* vm = soa.Vm();
MutexLock mu(soa.Self(), vm->pins_lock);
@@ -1997,7 +1997,7 @@
CHECK_NON_NULL_ARGUMENT(GetStringUTFRegion, java_string);
ScopedObjectAccess soa(env);
String* s = soa.Decode<String*>(java_string);
- const CharArray* chars = s->GetCharArray();
+ CharArray* chars = s->GetCharArray();
PinPrimitiveArray(soa, chars);
if (is_copy != NULL) {
*is_copy = JNI_FALSE;
@@ -3217,6 +3217,18 @@
return native_method;
}
+void JavaVMExt::SweepJniWeakGlobals(RootVisitor visitor, void* arg) {
+ WriterMutexLock mu(Thread::Current(), weak_globals_lock);
+ for (mirror::Object** entry : weak_globals) {
+ mirror::Object* obj = *entry;
+ mirror::Object* new_obj = visitor(obj, arg);
+ if (new_obj == nullptr) {
+ new_obj = kClearedJniWeakGlobal;
+ }
+ *entry = new_obj;
+ }
+}
+
void JavaVMExt::VisitRoots(RootVisitor* visitor, void* arg) {
Thread* self = Thread::Current();
{
diff --git a/runtime/jni_internal.h b/runtime/jni_internal.h
index bad3841..2fcebf0 100644
--- a/runtime/jni_internal.h
+++ b/runtime/jni_internal.h
@@ -89,6 +89,8 @@
void SetCheckJniEnabled(bool enabled);
+ void SweepJniWeakGlobals(RootVisitor visitor, void* arg);
+
void VisitRoots(RootVisitor*, void*);
Runtime* runtime;
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index 79d156d..c389580 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -1012,31 +1012,50 @@
scalar_type, \
expected_class_descriptor) \
jsize size = 4; \
+ \
/* Allocate an array and check it has the right type and length. */ \
scalar_type ## Array a = env_->new_fn(size); \
EXPECT_TRUE(a != NULL); \
EXPECT_TRUE(env_->IsInstanceOf(a, env_->FindClass(expected_class_descriptor))); \
EXPECT_EQ(size, env_->GetArrayLength(a)); \
+ \
+ /* GetPrimitiveArrayRegion/SetPrimitiveArrayRegion */ \
/* AIOOBE for negative start offset. */ \
env_->get_region_fn(a, -1, 1, NULL); \
EXPECT_EXCEPTION(aioobe_); \
env_->set_region_fn(a, -1, 1, NULL); \
EXPECT_EXCEPTION(aioobe_); \
+ \
/* AIOOBE for negative length. */ \
env_->get_region_fn(a, 0, -1, NULL); \
EXPECT_EXCEPTION(aioobe_); \
env_->set_region_fn(a, 0, -1, NULL); \
EXPECT_EXCEPTION(aioobe_); \
+ \
/* AIOOBE for buffer overrun. */ \
env_->get_region_fn(a, size - 1, size, NULL); \
EXPECT_EXCEPTION(aioobe_); \
env_->set_region_fn(a, size - 1, size, NULL); \
EXPECT_EXCEPTION(aioobe_); \
+ \
+ /* It's okay for the buffer to be NULL as long as the length is 0. */ \
+ env_->get_region_fn(a, 2, 0, NULL); \
+ /* Even if the offset is invalid... */ \
+ env_->get_region_fn(a, 123, 0, NULL); \
+ EXPECT_EXCEPTION(aioobe_); \
+ \
+ /* It's okay for the buffer to be NULL as long as the length is 0. */ \
+ env_->set_region_fn(a, 2, 0, NULL); \
+ /* Even if the offset is invalid... */ \
+ env_->set_region_fn(a, 123, 0, NULL); \
+ EXPECT_EXCEPTION(aioobe_); \
+ \
/* Prepare a couple of buffers. */ \
UniquePtr<scalar_type[]> src_buf(new scalar_type[size]); \
UniquePtr<scalar_type[]> dst_buf(new scalar_type[size]); \
for (jsize i = 0; i < size; ++i) { src_buf[i] = scalar_type(i); } \
for (jsize i = 0; i < size; ++i) { dst_buf[i] = scalar_type(-1); } \
+ \
/* Copy all of src_buf onto the heap. */ \
env_->set_region_fn(a, 0, size, &src_buf[0]); \
/* Copy back only part. */ \
@@ -1252,6 +1271,12 @@
EXPECT_EQ('l', chars[2]);
EXPECT_EQ('x', chars[3]);
+ // It's okay for the buffer to be NULL as long as the length is 0.
+ env_->GetStringRegion(s, 2, 0, NULL);
+ // Even if the offset is invalid...
+ env_->GetStringRegion(s, 123, 0, NULL);
+ EXPECT_EXCEPTION(sioobe_);
+
env_->GetStringUTFRegion(s, -1, 0, NULL);
EXPECT_EXCEPTION(sioobe_);
env_->GetStringUTFRegion(s, 0, -1, NULL);
@@ -1267,6 +1292,12 @@
EXPECT_EQ('e', bytes[1]);
EXPECT_EQ('l', bytes[2]);
EXPECT_EQ('x', bytes[3]);
+
+ // It's okay for the buffer to be NULL as long as the length is 0.
+ env_->GetStringUTFRegion(s, 2, 0, NULL);
+ // Even if the offset is invalid...
+ env_->GetStringUTFRegion(s, 123, 0, NULL);
+ EXPECT_EXCEPTION(sioobe_);
}
TEST_F(JniInternalTest, GetStringUTFChars_ReleaseStringUTFChars) {
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index eb73c7d..c7b370f 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -20,6 +20,8 @@
#include "array.h"
#include "class.h"
+#include "thread.h"
+#include "utils.h"
namespace art {
namespace mirror {
@@ -33,6 +35,39 @@
return header_size + data_size;
}
+inline Array* Array::Alloc(Thread* self, Class* array_class, int32_t component_count,
+ size_t component_size) {
+ DCHECK(array_class != NULL);
+ DCHECK_GE(component_count, 0);
+ DCHECK(array_class->IsArrayClass());
+
+ size_t header_size = sizeof(Object) + (component_size == sizeof(int64_t) ? 8 : 4);
+ size_t data_size = component_count * component_size;
+ size_t size = header_size + data_size;
+
+ // Check for overflow and throw OutOfMemoryError if this was an unreasonable request.
+ size_t component_shift = sizeof(size_t) * 8 - 1 - CLZ(component_size);
+ if (UNLIKELY(data_size >> component_shift != size_t(component_count) || size < data_size)) {
+ self->ThrowOutOfMemoryError(StringPrintf("%s of length %d would overflow",
+ PrettyDescriptor(array_class).c_str(),
+ component_count).c_str());
+ return NULL;
+ }
+
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ Array* array = down_cast<Array*>(heap->AllocObject(self, array_class, size));
+ if (LIKELY(array != NULL)) {
+ DCHECK(array->IsArrayInstance());
+ array->SetLength(component_count);
+ }
+ return array;
+}
+
+inline Array* Array::Alloc(Thread* self, Class* array_class, int32_t component_count) {
+ DCHECK(array_class->IsArrayClass());
+ return Alloc(self, array_class, component_count, array_class->GetComponentSize());
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/array.cc b/runtime/mirror/array.cc
index 88cd309..020085d 100644
--- a/runtime/mirror/array.cc
+++ b/runtime/mirror/array.cc
@@ -32,39 +32,6 @@
namespace art {
namespace mirror {
-Array* Array::Alloc(Thread* self, Class* array_class, int32_t component_count,
- size_t component_size) {
- DCHECK(array_class != NULL);
- DCHECK_GE(component_count, 0);
- DCHECK(array_class->IsArrayClass());
-
- size_t header_size = sizeof(Object) + (component_size == sizeof(int64_t) ? 8 : 4);
- size_t data_size = component_count * component_size;
- size_t size = header_size + data_size;
-
- // Check for overflow and throw OutOfMemoryError if this was an unreasonable request.
- size_t component_shift = sizeof(size_t) * 8 - 1 - CLZ(component_size);
- if (UNLIKELY(data_size >> component_shift != size_t(component_count) || size < data_size)) {
- self->ThrowOutOfMemoryError(StringPrintf("%s of length %d would overflow",
- PrettyDescriptor(array_class).c_str(),
- component_count).c_str());
- return NULL;
- }
-
- gc::Heap* heap = Runtime::Current()->GetHeap();
- Array* array = down_cast<Array*>(heap->AllocObject(self, array_class, size));
- if (array != NULL) {
- DCHECK(array->IsArrayInstance());
- array->SetLength(component_count);
- }
- return array;
-}
-
-Array* Array::Alloc(Thread* self, Class* array_class, int32_t component_count) {
- DCHECK(array_class->IsArrayClass());
- return Alloc(self, array_class, component_count, array_class->GetComponentSize());
-}
-
// Create a multi-dimensional array of Objects or primitive types.
//
// We have to generate the names for X[], X[][], X[][][], and so on. The
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 1e11387..438ce81 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -342,6 +342,15 @@
SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, name_), name, false);
}
+inline Object* Class::AllocObject(Thread* self) {
+ DCHECK(!IsArrayClass()) << PrettyClass(this);
+ DCHECK(IsInstantiable()) << PrettyClass(this);
+ // TODO: decide whether we want this check. It currently fails during bootstrap.
+ // DCHECK(!Runtime::Current()->IsStarted() || IsInitializing()) << PrettyClass(this);
+ DCHECK_GE(this->object_size_, sizeof(Object));
+ return Runtime::Current()->GetHeap()->AllocObject(self, this, this->object_size_);
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 5e8b827..328c67d 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -118,15 +118,6 @@
SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, dex_cache_), new_dex_cache, false);
}
-Object* Class::AllocObject(Thread* self) {
- DCHECK(!IsArrayClass()) << PrettyClass(this);
- DCHECK(IsInstantiable()) << PrettyClass(this);
- // TODO: decide whether we want this check. It currently fails during bootstrap.
- // DCHECK(!Runtime::Current()->IsStarted() || IsInitializing()) << PrettyClass(this);
- DCHECK_GE(this->object_size_, sizeof(Object));
- return Runtime::Current()->GetHeap()->AllocObject(self, this, this->object_size_);
-}
-
void Class::SetClassSize(size_t new_class_size) {
DCHECK_GE(new_class_size, GetClassSize()) << " class=" << PrettyTypeOf(this);
SetField32(OFFSET_OF_OBJECT_MEMBER(Class, class_size_), new_class_size, false);
diff --git a/runtime/mirror/stack_trace_element.cc b/runtime/mirror/stack_trace_element.cc
index a505ed0..9d76c6b 100644
--- a/runtime/mirror/stack_trace_element.cc
+++ b/runtime/mirror/stack_trace_element.cc
@@ -17,6 +17,7 @@
#include "stack_trace_element.h"
#include "class.h"
+#include "class-inl.h"
#include "gc/accounting/card_table-inl.h"
#include "object-inl.h"
#include "string.h"
diff --git a/runtime/mirror/string.cc b/runtime/mirror/string.cc
index f8a0e53..b82683e 100644
--- a/runtime/mirror/string.cc
+++ b/runtime/mirror/string.cc
@@ -17,6 +17,7 @@
#include "string.h"
#include "array.h"
+#include "class-inl.h"
#include "gc/accounting/card_table-inl.h"
#include "intern_table.h"
#include "object-inl.h"
@@ -32,6 +33,10 @@
return GetFieldObject<const CharArray*>(ValueOffset(), false);
}
+CharArray* String::GetCharArray() {
+ return GetFieldObject<CharArray*>(ValueOffset(), false);
+}
+
void String::ComputeHashCode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
SetHashCode(ComputeUtf16Hash(GetCharArray(), GetOffset(), GetLength()));
}
@@ -285,4 +290,3 @@
} // namespace mirror
} // namespace art
-
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index bf545ea..01d8f31 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -44,6 +44,7 @@
}
const CharArray* GetCharArray() const;
+ CharArray* GetCharArray();
int32_t GetOffset() const {
int32_t result = GetField32(OffsetOffset(), false);
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 66c51e6..570c2be 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -194,6 +194,10 @@
return obj_;
}
+void Monitor::SetObject(mirror::Object* object) {
+ obj_ = object;
+}
+
void Monitor::Lock(Thread* self) {
if (owner_ == self) {
lock_count_++;
@@ -260,8 +264,7 @@
if (!Runtime::Current()->IsStarted()) {
std::ostringstream ss;
self->Dump(ss);
- std::string str(ss.str());
- LOG(ERROR) << "IllegalMonitorStateException: " << str;
+ LOG(ERROR) << self->GetException(NULL)->Dump() << "\n" << ss.str();
}
va_end(args);
}
@@ -1002,15 +1005,19 @@
list_.push_front(m);
}
-void MonitorList::SweepMonitorList(IsMarkedTester is_marked, void* arg) {
+void MonitorList::SweepMonitorList(RootVisitor visitor, void* arg) {
MutexLock mu(Thread::Current(), monitor_list_lock_);
for (auto it = list_.begin(); it != list_.end(); ) {
Monitor* m = *it;
- if (!is_marked(m->GetObject(), arg)) {
- VLOG(monitor) << "freeing monitor " << m << " belonging to unmarked object " << m->GetObject();
+ mirror::Object* obj = m->GetObject();
+ mirror::Object* new_obj = visitor(obj, arg);
+ if (new_obj == nullptr) {
+ VLOG(monitor) << "freeing monitor " << m << " belonging to unmarked object "
+ << m->GetObject();
delete m;
it = list_.erase(it);
} else {
+ m->SetObject(new_obj);
++it;
}
}
diff --git a/runtime/monitor.h b/runtime/monitor.h
index 6651768..4249316 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -103,6 +103,7 @@
static bool IsValidLockWord(int32_t lock_word);
mirror::Object* GetObject();
+ void SetObject(mirror::Object* object);
private:
explicit Monitor(Thread* owner, mirror::Object* obj)
@@ -159,7 +160,7 @@
int lock_count_ GUARDED_BY(monitor_lock_);
// What object are we part of (for debugging).
- mirror::Object* const obj_;
+ mirror::Object* obj_;
// Threads currently waiting on this monitor.
Thread* wait_set_ GUARDED_BY(monitor_lock_);
@@ -183,8 +184,7 @@
void Add(Monitor* m);
- void SweepMonitorList(IsMarkedTester is_marked, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ void SweepMonitorList(RootVisitor visitor, void* arg);
private:
Mutex monitor_list_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
diff --git a/runtime/native/java_lang_System.cc b/runtime/native/java_lang_System.cc
index 30b4dc7..100f5a9 100644
--- a/runtime/native/java_lang_System.cc
+++ b/runtime/native/java_lang_System.cc
@@ -316,6 +316,28 @@
}
}
+static void System_arraycopyCharUnchecked(JNIEnv* env, jclass, jobject javaSrc, jint srcPos, jobject javaDst, jint dstPos, jint length) {
+ ScopedObjectAccess soa(env);
+ DCHECK(javaSrc != NULL);
+ DCHECK(javaDst != NULL);
+ mirror::Object* srcObject = soa.Decode<mirror::Object*>(javaSrc);
+ mirror::Object* dstObject = soa.Decode<mirror::Object*>(javaDst);
+ DCHECK(srcObject->IsArrayInstance());
+ DCHECK(dstObject->IsArrayInstance());
+ mirror::Array* srcArray = srcObject->AsArray();
+ mirror::Array* dstArray = dstObject->AsArray();
+ DCHECK(srcPos >= 0 && dstPos >= 0 && length >= 0 &&
+ srcPos + length <= srcArray->GetLength() && dstPos + length <= dstArray->GetLength());
+ DCHECK_EQ(srcArray->GetClass()->GetComponentType(), dstArray->GetClass()->GetComponentType());
+ DCHECK(srcArray->GetClass()->GetComponentType()->IsPrimitive());
+ DCHECK(dstArray->GetClass()->GetComponentType()->IsPrimitive());
+ DCHECK_EQ(srcArray->GetClass()->GetComponentSize(), static_cast<size_t>(2));
+ DCHECK_EQ(dstArray->GetClass()->GetComponentSize(), static_cast<size_t>(2));
+ uint8_t* dstBytes = reinterpret_cast<uint8_t*>(dstArray->GetRawData(2));
+ const uint8_t* srcBytes = reinterpret_cast<const uint8_t*>(srcArray->GetRawData(2));
+ move16(dstBytes + dstPos * 2, srcBytes + srcPos * 2, length * 2);
+}
+
static jint System_identityHashCode(JNIEnv* env, jclass, jobject javaObject) {
ScopedObjectAccess soa(env);
mirror::Object* o = soa.Decode<mirror::Object*>(javaObject);
@@ -324,6 +346,7 @@
static JNINativeMethod gMethods[] = {
NATIVE_METHOD(System, arraycopy, "(Ljava/lang/Object;ILjava/lang/Object;II)V"),
+ NATIVE_METHOD(System, arraycopyCharUnchecked, "([CI[CII)V"),
NATIVE_METHOD(System, identityHashCode, "(Ljava/lang/Object;)I"),
};
diff --git a/runtime/reference_table.cc b/runtime/reference_table.cc
index 8e23cbb..e95fdb9 100644
--- a/runtime/reference_table.cc
+++ b/runtime/reference_table.cc
@@ -38,16 +38,16 @@
ReferenceTable::~ReferenceTable() {
}
-void ReferenceTable::Add(const mirror::Object* obj) {
+void ReferenceTable::Add(mirror::Object* obj) {
DCHECK(obj != NULL);
- if (entries_.size() == max_size_) {
+ if (entries_.size() >= max_size_) {
LOG(FATAL) << "ReferenceTable '" << name_ << "' "
<< "overflowed (" << max_size_ << " entries)";
}
entries_.push_back(obj);
}
-void ReferenceTable::Remove(const mirror::Object* obj) {
+void ReferenceTable::Remove(mirror::Object* obj) {
// We iterate backwards on the assumption that references are LIFO.
for (int i = entries_.size() - 1; i >= 0; --i) {
if (entries_[i] == obj) {
@@ -232,8 +232,8 @@
}
void ReferenceTable::VisitRoots(RootVisitor* visitor, void* arg) {
- for (const auto& ref : entries_) {
- visitor(ref, arg);
+ for (auto& ref : entries_) {
+ ref = visitor(const_cast<mirror::Object*>(ref), arg);
}
}
diff --git a/runtime/reference_table.h b/runtime/reference_table.h
index e369fd0..37b3172 100644
--- a/runtime/reference_table.h
+++ b/runtime/reference_table.h
@@ -39,9 +39,9 @@
ReferenceTable(const char* name, size_t initial_size, size_t max_size);
~ReferenceTable();
- void Add(const mirror::Object* obj);
+ void Add(mirror::Object* obj);
- void Remove(const mirror::Object* obj);
+ void Remove(mirror::Object* obj);
size_t Size() const;
@@ -50,7 +50,7 @@
void VisitRoots(RootVisitor* visitor, void* arg);
private:
- typedef std::vector<const mirror::Object*> Table;
+ typedef std::vector<mirror::Object*> Table;
static void Dump(std::ostream& os, const Table& entries)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
friend class IndirectReferenceTable; // For Dump.
diff --git a/runtime/root_visitor.h b/runtime/root_visitor.h
index 3aa9b4b..a2d898b 100644
--- a/runtime/root_visitor.h
+++ b/runtime/root_visitor.h
@@ -23,7 +23,8 @@
} // namespace mirror
class StackVisitor;
-typedef void (RootVisitor)(const mirror::Object* root, void* arg);
+typedef mirror::Object* (RootVisitor)(mirror::Object* root, void* arg)
+ __attribute__((warn_unused_result));
typedef void (VerifyRootVisitor)(const mirror::Object* root, void* arg, size_t vreg,
const StackVisitor* visitor);
typedef bool (IsMarkedTester)(const mirror::Object* object, void* arg);
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index f7b5f74..c37b783 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -319,6 +319,12 @@
return result;
}
+void Runtime::SweepSystemWeaks(RootVisitor* visitor, void* arg) {
+ GetInternTable()->SweepInternTableWeaks(visitor, arg);
+ GetMonitorList()->SweepMonitorList(visitor, arg);
+ GetJavaVM()->SweepJniWeakGlobals(visitor, arg);
+}
+
Runtime::ParsedOptions* Runtime::ParsedOptions::Create(const Options& options, bool ignore_unrecognized) {
UniquePtr<ParsedOptions> parsed(new ParsedOptions());
const char* boot_class_path_string = getenv("BOOTCLASSPATH");
@@ -1138,12 +1144,17 @@
void Runtime::VisitNonThreadRoots(RootVisitor* visitor, void* arg) {
java_vm_->VisitRoots(visitor, arg);
- if (pre_allocated_OutOfMemoryError_ != NULL) {
- visitor(pre_allocated_OutOfMemoryError_, arg);
+ if (pre_allocated_OutOfMemoryError_ != nullptr) {
+ pre_allocated_OutOfMemoryError_ = reinterpret_cast<mirror::Throwable*>(
+ visitor(pre_allocated_OutOfMemoryError_, arg));
+ DCHECK(pre_allocated_OutOfMemoryError_ != nullptr);
}
- visitor(resolution_method_, arg);
+ resolution_method_ = reinterpret_cast<mirror::ArtMethod*>(visitor(resolution_method_, arg));
+ DCHECK(resolution_method_ != nullptr);
for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
- visitor(callee_save_methods_[i], arg);
+ callee_save_methods_[i] = reinterpret_cast<mirror::ArtMethod*>(
+ visitor(callee_save_methods_[i], arg));
+ DCHECK(callee_save_methods_[i] != nullptr);
}
}
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 21161a0..5acd5d7 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -317,6 +317,10 @@
void VisitNonConcurrentRoots(RootVisitor* visitor, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Sweep system weaks, the system weak is deleted if the visitor return nullptr. Otherwise, the
+ // system weak is updated to be the visitor's returned value.
+ void SweepSystemWeaks(RootVisitor* visitor, void* arg);
+
// Returns a special method that calls into a trampoline for runtime method resolution
mirror::ArtMethod* GetResolutionMethod() const {
CHECK(HasResolutionMethod());
diff --git a/runtime/sirt_ref.h b/runtime/sirt_ref.h
index 81f0dff..25d6fb3 100644
--- a/runtime/sirt_ref.h
+++ b/runtime/sirt_ref.h
@@ -30,7 +30,7 @@
self_->PushSirt(&sirt_);
}
~SirtRef() {
- CHECK(self_->PopSirt() == &sirt_);
+ CHECK_EQ(self_->PopSirt(), &sirt_);
}
T& operator*() const { return *get(); }
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 206bff3..1715664 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -148,8 +148,8 @@
const DexFile::CodeItem* code_item = MethodHelper(m).GetCodeItem();
DCHECK(code_item != NULL) << PrettyMethod(m); // Can't be NULL or how would we compile its instructions?
size_t frame_size = m->GetFrameSizeInBytes();
- return GetVReg(cur_quick_frame_, code_item, m->GetCoreSpillMask(), m->GetFpSpillMask(),
- frame_size, vreg);
+ return *GetVRegAddr(cur_quick_frame_, code_item, m->GetCoreSpillMask(), m->GetFpSpillMask(),
+ frame_size, vreg);
}
} else {
return cur_shadow_frame_->GetVReg(vreg);
diff --git a/runtime/stack.h b/runtime/stack.h
index 8ecf8f0..bd29ceb 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -150,7 +150,12 @@
mirror::Object* GetVRegReference(size_t i) const {
DCHECK_LT(i, NumberOfVRegs());
if (HasReferenceArray()) {
- return References()[i];
+ mirror::Object* ref = References()[i];
+ // If the vreg reference is not equal to the vreg then the vreg reference is stale.
+ if (reinterpret_cast<uint32_t>(ref) != vregs_[i]) {
+ return nullptr;
+ }
+ return ref;
} else {
const uint32_t* vreg = &vregs_[i];
return *reinterpret_cast<mirror::Object* const*>(vreg);
@@ -459,13 +464,14 @@
uintptr_t GetGPR(uint32_t reg) const;
void SetGPR(uint32_t reg, uintptr_t value);
- uint32_t GetVReg(mirror::ArtMethod** cur_quick_frame, const DexFile::CodeItem* code_item,
+ // This is a fast-path for getting/setting values in a quick frame.
+ uint32_t* GetVRegAddr(mirror::ArtMethod** cur_quick_frame, const DexFile::CodeItem* code_item,
uint32_t core_spills, uint32_t fp_spills, size_t frame_size,
uint16_t vreg) const {
int offset = GetVRegOffset(code_item, core_spills, fp_spills, frame_size, vreg);
DCHECK_EQ(cur_quick_frame, GetCurrentQuickFrame());
byte* vreg_addr = reinterpret_cast<byte*>(cur_quick_frame) + offset;
- return *reinterpret_cast<uint32_t*>(vreg_addr);
+ return reinterpret_cast<uint32_t*>(vreg_addr);
}
uintptr_t GetReturnPc() const;
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index c22f2cd..4552062 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -19,11 +19,24 @@
#include "thread.h"
+#include <pthread.h>
+
#include "base/mutex-inl.h"
#include "cutils/atomic-inline.h"
namespace art {
+inline Thread* Thread::Current() {
+ // We rely on Thread::Current returning NULL for a detached thread, so it's not obvious
+ // that we can replace this with a direct %fs access on x86.
+ if (!is_started_) {
+ return NULL;
+ } else {
+ void* thread = pthread_getspecific(Thread::pthread_key_self_);
+ return reinterpret_cast<Thread*>(thread);
+ }
+}
+
inline ThreadState Thread::SetState(ThreadState new_state) {
// Cannot use this code to change into Runnable as changing to Runnable should fail if
// old_state_and_flags.suspend_request is true.
diff --git a/runtime/thread.cc b/runtime/thread.cc
index a454195..d7d4b1f 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1012,9 +1012,10 @@
}
}
-static void MonitorExitVisitor(const mirror::Object* object, void* arg) NO_THREAD_SAFETY_ANALYSIS {
+static mirror::Object* MonitorExitVisitor(mirror::Object* object, void* arg)
+ NO_THREAD_SAFETY_ANALYSIS {
Thread* self = reinterpret_cast<Thread*>(arg);
- mirror::Object* entered_monitor = const_cast<mirror::Object*>(object);
+ mirror::Object* entered_monitor = object;
if (self->HoldsLock(entered_monitor)) {
LOG(WARNING) << "Calling MonitorExit on object "
<< object << " (" << PrettyTypeOf(object) << ")"
@@ -1022,6 +1023,7 @@
<< *Thread::Current() << " which is detaching";
entered_monitor->MonitorExit(self);
}
+ return object;
}
void Thread::Destroy() {
@@ -1151,8 +1153,12 @@
size_t num_refs = cur->NumberOfReferences();
for (size_t j = 0; j < num_refs; j++) {
mirror::Object* object = cur->GetReference(j);
- if (object != NULL) {
- visitor(object, arg);
+ if (object != nullptr) {
+ const mirror::Object* new_obj = visitor(object, arg);
+ DCHECK(new_obj != nullptr);
+ if (new_obj != object) {
+ cur->SetReference(j, const_cast<mirror::Object*>(new_obj));
+ }
}
}
}
@@ -2019,8 +2025,11 @@
// SIRT for JNI or References for interpreter.
for (size_t reg = 0; reg < num_regs; ++reg) {
mirror::Object* ref = shadow_frame->GetVRegReference(reg);
- if (ref != NULL) {
- visitor_(ref, reg, this);
+ if (ref != nullptr) {
+ mirror::Object* new_ref = visitor_(ref, reg, this);
+ if (new_ref != ref) {
+ shadow_frame->SetVRegReference(reg, new_ref);
+ }
}
}
} else {
@@ -2040,8 +2049,11 @@
for (size_t reg = 0; reg < num_regs; ++reg) {
if (TestBitmap(reg, reg_bitmap)) {
mirror::Object* ref = shadow_frame->GetVRegReference(reg);
- if (ref != NULL) {
- visitor_(ref, reg, this);
+ if (ref != nullptr) {
+ mirror::Object* new_ref = visitor_(ref, reg, this);
+ if (new_ref != ref) {
+ shadow_frame->SetVRegReference(reg, new_ref);
+ }
}
}
}
@@ -2072,19 +2084,25 @@
// Does this register hold a reference?
if (TestBitmap(reg, reg_bitmap)) {
uint32_t vmap_offset;
- mirror::Object* ref;
if (vmap_table.IsInContext(reg, kReferenceVReg, &vmap_offset)) {
- uintptr_t val = GetGPR(vmap_table.ComputeRegister(core_spills, vmap_offset,
- kReferenceVReg));
- ref = reinterpret_cast<mirror::Object*>(val);
+ int vmap_reg = vmap_table.ComputeRegister(core_spills, vmap_offset, kReferenceVReg);
+ mirror::Object* ref = reinterpret_cast<mirror::Object*>(GetGPR(vmap_reg));
+ if (ref != nullptr) {
+ mirror::Object* new_ref = visitor_(ref, reg, this);
+ if (ref != new_ref) {
+ SetGPR(vmap_reg, reinterpret_cast<uintptr_t>(new_ref));
+ }
+ }
} else {
- ref = reinterpret_cast<mirror::Object*>(GetVReg(cur_quick_frame, code_item,
- core_spills, fp_spills, frame_size,
- reg));
- }
-
- if (ref != NULL) {
- visitor_(ref, reg, this);
+ uint32_t* reg_addr =
+ GetVRegAddr(cur_quick_frame, code_item, core_spills, fp_spills, frame_size, reg);
+ mirror::Object* ref = reinterpret_cast<mirror::Object*>(*reg_addr);
+ if (ref != nullptr) {
+ mirror::Object* new_ref = visitor_(ref, reg, this);
+ if (ref != new_ref) {
+ *reg_addr = reinterpret_cast<uint32_t>(new_ref);
+ }
+ }
}
}
}
@@ -2110,8 +2128,8 @@
public:
RootCallbackVisitor(RootVisitor* visitor, void* arg) : visitor_(visitor), arg_(arg) {}
- void operator()(const mirror::Object* obj, size_t, const StackVisitor*) const {
- visitor_(obj, arg_);
+ mirror::Object* operator()(mirror::Object* obj, size_t, const StackVisitor*) const {
+ return visitor_(obj, arg_);
}
private:
@@ -2135,67 +2153,17 @@
void* const arg_;
};
-struct VerifyRootWrapperArg {
- VerifyRootVisitor* visitor;
- void* arg;
-};
-
-static void VerifyRootWrapperCallback(const mirror::Object* root, void* arg) {
- VerifyRootWrapperArg* wrapperArg = reinterpret_cast<VerifyRootWrapperArg*>(arg);
- wrapperArg->visitor(root, wrapperArg->arg, 0, NULL);
-}
-
-void Thread::VerifyRoots(VerifyRootVisitor* visitor, void* arg) {
- // We need to map from a RootVisitor to VerifyRootVisitor, so pass in nulls for arguments we
- // don't have.
- VerifyRootWrapperArg wrapperArg;
- wrapperArg.arg = arg;
- wrapperArg.visitor = visitor;
-
- if (opeer_ != NULL) {
- VerifyRootWrapperCallback(opeer_, &wrapperArg);
- }
- if (exception_ != NULL) {
- VerifyRootWrapperCallback(exception_, &wrapperArg);
- }
- throw_location_.VisitRoots(VerifyRootWrapperCallback, &wrapperArg);
- if (class_loader_override_ != NULL) {
- VerifyRootWrapperCallback(class_loader_override_, &wrapperArg);
- }
- jni_env_->locals.VisitRoots(VerifyRootWrapperCallback, &wrapperArg);
- jni_env_->monitors.VisitRoots(VerifyRootWrapperCallback, &wrapperArg);
-
- SirtVisitRoots(VerifyRootWrapperCallback, &wrapperArg);
-
- // Visit roots on this thread's stack
- Context* context = GetLongJumpContext();
- VerifyCallbackVisitor visitorToCallback(visitor, arg);
- ReferenceMapVisitor<VerifyCallbackVisitor> mapper(this, context, visitorToCallback);
- mapper.WalkStack();
- ReleaseLongJumpContext(context);
-
- std::deque<instrumentation::InstrumentationStackFrame>* instrumentation_stack = GetInstrumentationStack();
- typedef std::deque<instrumentation::InstrumentationStackFrame>::const_iterator It;
- for (It it = instrumentation_stack->begin(), end = instrumentation_stack->end(); it != end; ++it) {
- mirror::Object* this_object = (*it).this_object_;
- if (this_object != NULL) {
- VerifyRootWrapperCallback(this_object, &wrapperArg);
- }
- mirror::ArtMethod* method = (*it).method_;
- VerifyRootWrapperCallback(method, &wrapperArg);
- }
-}
-
void Thread::VisitRoots(RootVisitor* visitor, void* arg) {
- if (opeer_ != NULL) {
- visitor(opeer_, arg);
+ if (opeer_ != nullptr) {
+ opeer_ = visitor(opeer_, arg);
}
- if (exception_ != NULL) {
- visitor(exception_, arg);
+ if (exception_ != nullptr) {
+ exception_ = reinterpret_cast<mirror::Throwable*>(visitor(exception_, arg));
}
throw_location_.VisitRoots(visitor, arg);
- if (class_loader_override_ != NULL) {
- visitor(class_loader_override_, arg);
+ if (class_loader_override_ != nullptr) {
+ class_loader_override_ = reinterpret_cast<mirror::ClassLoader*>(
+ visitor(class_loader_override_, arg));
}
jni_env_->locals.VisitRoots(visitor, arg);
jni_env_->monitors.VisitRoots(visitor, arg);
@@ -2209,24 +2177,26 @@
mapper.WalkStack();
ReleaseLongJumpContext(context);
- for (const instrumentation::InstrumentationStackFrame& frame : *GetInstrumentationStack()) {
- mirror::Object* this_object = frame.this_object_;
- if (this_object != NULL) {
- visitor(this_object, arg);
+ for (instrumentation::InstrumentationStackFrame& frame : *GetInstrumentationStack()) {
+ if (frame.this_object_ != nullptr) {
+ frame.this_object_ = visitor(frame.this_object_, arg);
+ DCHECK(frame.this_object_ != nullptr);
}
- mirror::ArtMethod* method = frame.method_;
- visitor(method, arg);
+ frame.method_ = reinterpret_cast<mirror::ArtMethod*>(visitor(frame.method_, arg));
+ DCHECK(frame.method_ != nullptr);
}
}
-static void VerifyObject(const mirror::Object* root, void* arg) {
- gc::Heap* heap = reinterpret_cast<gc::Heap*>(arg);
- heap->VerifyObject(root);
+static mirror::Object* VerifyRoot(mirror::Object* root, void* arg) {
+ DCHECK(root != nullptr);
+ DCHECK(arg != nullptr);
+ reinterpret_cast<gc::Heap*>(arg)->VerifyObject(root);
+ return root;
}
void Thread::VerifyStackImpl() {
UniquePtr<Context> context(Context::Create());
- RootCallbackVisitor visitorToCallback(VerifyObject, Runtime::Current()->GetHeap());
+ RootCallbackVisitor visitorToCallback(VerifyRoot, Runtime::Current()->GetHeap());
ReferenceMapVisitor<RootCallbackVisitor> mapper(this, context.get(), visitorToCallback);
mapper.WalkStack();
}
diff --git a/runtime/thread.h b/runtime/thread.h
index 40e3f5f..dbf9736 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -17,8 +17,6 @@
#ifndef ART_RUNTIME_THREAD_H_
#define ART_RUNTIME_THREAD_H_
-#include <pthread.h>
-
#include <bitset>
#include <deque>
#include <iosfwd>
@@ -104,16 +102,7 @@
// Reset internal state of child thread after fork.
void InitAfterFork();
- static Thread* Current() {
- // We rely on Thread::Current returning NULL for a detached thread, so it's not obvious
- // that we can replace this with a direct %fs access on x86.
- if (!is_started_) {
- return NULL;
- } else {
- void* thread = pthread_getspecific(Thread::pthread_key_self_);
- return reinterpret_cast<Thread*>(thread);
- }
- }
+ static Thread* Current();
static Thread* FromManagedThread(const ScopedObjectAccessUnchecked& ts,
mirror::Object* thread_peer)
@@ -406,9 +395,6 @@
void VisitRoots(RootVisitor* visitor, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void VerifyRoots(VerifyRootVisitor* visitor, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
void VerifyStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
//
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 671924a..44cf810 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -21,6 +21,7 @@
#include <unistd.h>
#include "base/mutex.h"
+#include "base/mutex-inl.h"
#include "base/timing_logger.h"
#include "debugger.h"
#include "thread.h"
@@ -568,10 +569,24 @@
}
}
+struct VerifyRootWrapperArg {
+ VerifyRootVisitor* visitor;
+ void* arg;
+};
+
+static mirror::Object* VerifyRootWrapperCallback(mirror::Object* root, void* arg) {
+ VerifyRootWrapperArg* wrapperArg = reinterpret_cast<VerifyRootWrapperArg*>(arg);
+ wrapperArg->visitor(root, wrapperArg->arg, 0, NULL);
+ return root;
+}
+
void ThreadList::VerifyRoots(VerifyRootVisitor* visitor, void* arg) const {
+ VerifyRootWrapperArg wrapper;
+ wrapper.visitor = visitor;
+ wrapper.arg = arg;
MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
for (const auto& thread : list_) {
- thread->VerifyRoots(visitor, arg);
+ thread->VisitRoots(VerifyRootWrapperCallback, &wrapper);
}
}
diff --git a/runtime/throw_location.cc b/runtime/throw_location.cc
index e428511..01497ef 100644
--- a/runtime/throw_location.cc
+++ b/runtime/throw_location.cc
@@ -34,11 +34,14 @@
}
void ThrowLocation::VisitRoots(RootVisitor* visitor, void* arg) {
- if (this_object_ != NULL) {
- visitor(this_object_, arg);
+ if (this_object_ != nullptr) {
+ this_object_ = const_cast<mirror::Object*>(visitor(this_object_, arg));
+ DCHECK(this_object_ != nullptr);
}
- if (method_ != NULL) {
- visitor(method_, arg);
+ if (method_ != nullptr) {
+ method_ = const_cast<mirror::ArtMethod*>(
+ reinterpret_cast<const mirror::ArtMethod*>(visitor(method_, arg)));
+ DCHECK(method_ != nullptr);
}
}
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index 25f840c..857acb8 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -99,7 +99,7 @@
}
std::string BooleanType::Dump() const {
- return "boolean";
+ return "Boolean";
}
std::string ConflictType::Dump() const {
@@ -111,7 +111,7 @@
}
std::string ShortType::Dump() const {
- return "short";
+ return "Short";
}
std::string CharType::Dump() const {
@@ -119,15 +119,15 @@
}
std::string FloatType::Dump() const {
- return "float";
+ return "Float";
}
std::string LongLoType::Dump() const {
- return "long (Low Half)";
+ return "Long (Low Half)";
}
std::string LongHiType::Dump() const {
- return "long (High Half)";
+ return "Long (High Half)";
}
std::string DoubleLoType::Dump() const {
@@ -461,7 +461,6 @@
std::stringstream result;
uint32_t val = ConstantValue();
if (val == 0) {
- CHECK(IsPreciseConstant());
result << "Zero/null";
} else {
result << "Imprecise ";
diff --git a/runtime/verifier/reg_type_cache-inl.h b/runtime/verifier/reg_type_cache-inl.h
index 295e271..fc9e5c9 100644
--- a/runtime/verifier/reg_type_cache-inl.h
+++ b/runtime/verifier/reg_type_cache-inl.h
@@ -23,17 +23,6 @@
namespace art {
namespace verifier {
-template <class Type>
-Type* RegTypeCache::CreatePrimitiveTypeInstance(const std::string& descriptor) {
- mirror::Class* klass = NULL;
- // Try loading the class from linker.
- if (!descriptor.empty()) {
- klass = art::Runtime::Current()->GetClassLinker()->FindSystemClass(descriptor.c_str());
- }
- Type* entry = Type::CreateInstance(klass, descriptor, RegTypeCache::primitive_count_);
- RegTypeCache::primitive_count_++;
- return entry;
-}
inline const art::verifier::RegType& RegTypeCache::GetFromId(uint16_t id) const {
DCHECK_LT(id, entries_.size());
@@ -41,6 +30,16 @@
DCHECK(result != NULL);
return *result;
}
+
+inline const ConstantType& RegTypeCache::FromCat1Const(int32_t value, bool precise) {
+ // We only expect 0 to be a precise constant.
+ DCHECK(value != 0 || precise);
+ if (precise && (value >= kMinSmallConstant) && (value <= kMaxSmallConstant)) {
+ return *small_precise_constants_[value - kMinSmallConstant];
+ }
+ return FromCat1NonSmallConstant(value, precise);
+}
+
} // namespace verifier
} // namespace art
#endif // ART_RUNTIME_VERIFIER_REG_TYPE_CACHE_INL_H_
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index 2c18132..ce465a4 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -26,8 +26,8 @@
namespace verifier {
bool RegTypeCache::primitive_initialized_ = false;
-uint16_t RegTypeCache::primitive_start_ = 0;
uint16_t RegTypeCache::primitive_count_ = 0;
+PreciseConstType* RegTypeCache::small_precise_constants_[kMaxSmallConstant - kMinSmallConstant + 1];
static bool MatchingPrecisionForClass(RegType* entry, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -44,7 +44,7 @@
}
}
-void RegTypeCache::FillPrimitiveTypes() {
+void RegTypeCache::FillPrimitiveAndSmallConstantTypes() {
entries_.push_back(UndefinedType::GetInstance());
entries_.push_back(ConflictType::GetInstance());
entries_.push_back(BooleanType::GetInstance());
@@ -57,6 +57,11 @@
entries_.push_back(FloatType::GetInstance());
entries_.push_back(DoubleLoType::GetInstance());
entries_.push_back(DoubleHiType::GetInstance());
+ for (int32_t value = kMinSmallConstant; value <= kMaxSmallConstant; ++value) {
+ int32_t i = value - kMinSmallConstant;
+ DCHECK_EQ(entries_.size(), small_precise_constants_[i]->GetId());
+ entries_.push_back(small_precise_constants_[i]);
+ }
DCHECK_EQ(entries_.size(), primitive_count_);
}
@@ -232,12 +237,12 @@
RegTypeCache::~RegTypeCache() {
CHECK_LE(primitive_count_, entries_.size());
// Delete only the non primitive types.
- if (entries_.size() == kNumPrimitives) {
- // All entries are primitive, nothing to delete.
+ if (entries_.size() == kNumPrimitivesAndSmallConstants) {
+ // All entries are from the global pool, nothing to delete.
return;
}
std::vector<RegType*>::iterator non_primitive_begin = entries_.begin();
- std::advance(non_primitive_begin, kNumPrimitives);
+ std::advance(non_primitive_begin, kNumPrimitivesAndSmallConstants);
STLDeleteContainerPointers(non_primitive_begin, entries_.end());
}
@@ -255,12 +260,29 @@
FloatType::Destroy();
DoubleLoType::Destroy();
DoubleHiType::Destroy();
+ for (uint16_t value = kMinSmallConstant; value <= kMaxSmallConstant; ++value) {
+ PreciseConstType* type = small_precise_constants_[value - kMinSmallConstant];
+ delete type;
+ }
+
RegTypeCache::primitive_initialized_ = false;
RegTypeCache::primitive_count_ = 0;
}
}
-void RegTypeCache::CreatePrimitiveTypes() {
+template <class Type>
+Type* RegTypeCache::CreatePrimitiveTypeInstance(const std::string& descriptor) {
+ mirror::Class* klass = NULL;
+ // Try loading the class from linker.
+ if (!descriptor.empty()) {
+ klass = art::Runtime::Current()->GetClassLinker()->FindSystemClass(descriptor.c_str());
+ }
+ Type* entry = Type::CreateInstance(klass, descriptor, RegTypeCache::primitive_count_);
+ RegTypeCache::primitive_count_++;
+ return entry;
+}
+
+void RegTypeCache::CreatePrimitiveAndSmallConstantTypes() {
CreatePrimitiveTypeInstance<UndefinedType>("");
CreatePrimitiveTypeInstance<ConflictType>("");
CreatePrimitiveTypeInstance<BooleanType>("Z");
@@ -273,6 +295,11 @@
CreatePrimitiveTypeInstance<FloatType>("F");
CreatePrimitiveTypeInstance<DoubleLoType>("D");
CreatePrimitiveTypeInstance<DoubleHiType>("D");
+ for (int32_t value = kMinSmallConstant; value <= kMaxSmallConstant; ++value) {
+ PreciseConstType* type = new PreciseConstType(value, primitive_count_);
+ small_precise_constants_[value - kMinSmallConstant] = type;
+ primitive_count_++;
+ }
}
const RegType& RegTypeCache::FromUnresolvedMerge(const RegType& left, const RegType& right) {
@@ -331,29 +358,28 @@
return *entry;
}
-const RegType& RegTypeCache::Uninitialized(const RegType& type, uint32_t allocation_pc) {
- RegType* entry = NULL;
- RegType* cur_entry = NULL;
+const UninitializedType& RegTypeCache::Uninitialized(const RegType& type, uint32_t allocation_pc) {
+ UninitializedType* entry = NULL;
const std::string& descriptor(type.GetDescriptor());
if (type.IsUnresolvedTypes()) {
for (size_t i = primitive_count_; i < entries_.size(); i++) {
- cur_entry = entries_[i];
+ RegType* cur_entry = entries_[i];
if (cur_entry->IsUnresolvedAndUninitializedReference() &&
down_cast<UnresolvedUninitializedRefType*>(cur_entry)->GetAllocationPc() == allocation_pc &&
(cur_entry->GetDescriptor() == descriptor)) {
- return *cur_entry;
+ return *down_cast<UnresolvedUninitializedRefType*>(cur_entry);
}
}
entry = new UnresolvedUninitializedRefType(descriptor, allocation_pc, entries_.size());
} else {
mirror::Class* klass = type.GetClass();
for (size_t i = primitive_count_; i < entries_.size(); i++) {
- cur_entry = entries_[i];
+ RegType* cur_entry = entries_[i];
if (cur_entry->IsUninitializedReference() &&
down_cast<UninitializedReferenceType*>(cur_entry)
->GetAllocationPc() == allocation_pc &&
cur_entry->GetClass() == klass) {
- return *cur_entry;
+ return *down_cast<UninitializedReferenceType*>(cur_entry);
}
}
entry = new UninitializedReferenceType(klass, descriptor, allocation_pc, entries_.size());
@@ -404,27 +430,33 @@
return *entry;
}
-const RegType& RegTypeCache::ByteConstant() {
- return FromCat1Const(std::numeric_limits<jbyte>::min(), false);
+const ImpreciseConstType& RegTypeCache::ByteConstant() {
+ const ConstantType& result = FromCat1Const(std::numeric_limits<jbyte>::min(), false);
+ DCHECK(result.IsImpreciseConstant());
+ return *down_cast<const ImpreciseConstType*>(&result);
}
-const RegType& RegTypeCache::ShortConstant() {
- return FromCat1Const(std::numeric_limits<jshort>::min(), false);
+const ImpreciseConstType& RegTypeCache::ShortConstant() {
+ const ConstantType& result = FromCat1Const(std::numeric_limits<jshort>::min(), false);
+ DCHECK(result.IsImpreciseConstant());
+ return *down_cast<const ImpreciseConstType*>(&result);
}
-const RegType& RegTypeCache::IntConstant() {
- return FromCat1Const(std::numeric_limits<jint>::max(), false);
+const ImpreciseConstType& RegTypeCache::IntConstant() {
+ const ConstantType& result = FromCat1Const(std::numeric_limits<jint>::max(), false);
+ DCHECK(result.IsImpreciseConstant());
+ return *down_cast<const ImpreciseConstType*>(&result);
}
-const RegType& RegTypeCache::UninitializedThisArgument(const RegType& type) {
- RegType* entry;
+const UninitializedType& RegTypeCache::UninitializedThisArgument(const RegType& type) {
+ UninitializedType* entry;
const std::string& descriptor(type.GetDescriptor());
if (type.IsUnresolvedTypes()) {
for (size_t i = primitive_count_; i < entries_.size(); i++) {
RegType* cur_entry = entries_[i];
if (cur_entry->IsUnresolvedAndUninitializedThisReference() &&
cur_entry->GetDescriptor() == descriptor) {
- return *cur_entry;
+ return *down_cast<UninitializedType*>(cur_entry);
}
}
entry = new UnresolvedUninitializedThisRefType(descriptor, entries_.size());
@@ -433,7 +465,7 @@
for (size_t i = primitive_count_; i < entries_.size(); i++) {
RegType* cur_entry = entries_[i];
if (cur_entry->IsUninitializedThisReference() && cur_entry->GetClass() == klass) {
- return *cur_entry;
+ return *down_cast<UninitializedType*>(cur_entry);
}
}
entry = new UninitializedThisReferenceType(klass, descriptor, entries_.size());
@@ -442,16 +474,16 @@
return *entry;
}
-const RegType& RegTypeCache::FromCat1Const(int32_t value, bool precise) {
+const ConstantType& RegTypeCache::FromCat1NonSmallConstant(int32_t value, bool precise) {
for (size_t i = primitive_count_; i < entries_.size(); i++) {
RegType* cur_entry = entries_[i];
if (cur_entry->klass_ == NULL && cur_entry->IsConstant() &&
cur_entry->IsPreciseConstant() == precise &&
(down_cast<ConstantType*>(cur_entry))->ConstantValue() == value) {
- return *cur_entry;
+ return *down_cast<ConstantType*>(cur_entry);
}
}
- RegType* entry;
+ ConstantType* entry;
if (precise) {
entry = new PreciseConstType(value, entries_.size());
} else {
@@ -461,15 +493,15 @@
return *entry;
}
-const RegType& RegTypeCache::FromCat2ConstLo(int32_t value, bool precise) {
+const ConstantType& RegTypeCache::FromCat2ConstLo(int32_t value, bool precise) {
for (size_t i = primitive_count_; i < entries_.size(); i++) {
RegType* cur_entry = entries_[i];
if (cur_entry->IsConstantLo() && (cur_entry->IsPrecise() == precise) &&
(down_cast<ConstantType*>(cur_entry))->ConstantValueLo() == value) {
- return *cur_entry;
+ return *down_cast<ConstantType*>(cur_entry);
}
}
- RegType* entry;
+ ConstantType* entry;
if (precise) {
entry = new PreciseConstLoType(value, entries_.size());
} else {
@@ -479,15 +511,15 @@
return *entry;
}
-const RegType& RegTypeCache::FromCat2ConstHi(int32_t value, bool precise) {
+const ConstantType& RegTypeCache::FromCat2ConstHi(int32_t value, bool precise) {
for (size_t i = primitive_count_; i < entries_.size(); i++) {
RegType* cur_entry = entries_[i];
if (cur_entry->IsConstantHi() && (cur_entry->IsPrecise() == precise) &&
(down_cast<ConstantType*>(cur_entry))->ConstantValueHi() == value) {
- return *cur_entry;
+ return *down_cast<ConstantType*>(cur_entry);
}
}
- RegType* entry;
+ ConstantType* entry;
if (precise) {
entry = new PreciseConstHiType(value, entries_.size());
} else {
diff --git a/runtime/verifier/reg_type_cache.h b/runtime/verifier/reg_type_cache.h
index 77f5893..a9f8bff 100644
--- a/runtime/verifier/reg_type_cache.h
+++ b/runtime/verifier/reg_type_cache.h
@@ -35,19 +35,18 @@
class RegType;
-const size_t kNumPrimitives = 12;
class RegTypeCache {
public:
explicit RegTypeCache(bool can_load_classes) : can_load_classes_(can_load_classes) {
entries_.reserve(64);
- FillPrimitiveTypes();
+ FillPrimitiveAndSmallConstantTypes();
}
~RegTypeCache();
static void Init() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (!RegTypeCache::primitive_initialized_) {
CHECK_EQ(RegTypeCache::primitive_count_, 0);
- CreatePrimitiveTypes();
- CHECK_EQ(RegTypeCache::primitive_count_, kNumPrimitives);
+ CreatePrimitiveAndSmallConstantTypes();
+ CHECK_EQ(RegTypeCache::primitive_count_, kNumPrimitivesAndSmallConstants);
RegTypeCache::primitive_initialized_ = true;
}
}
@@ -55,17 +54,13 @@
const art::verifier::RegType& GetFromId(uint16_t id) const;
const RegType& From(mirror::ClassLoader* loader, const char* descriptor, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- template <class Type>
- static Type* CreatePrimitiveTypeInstance(const std::string& descriptor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void FillPrimitiveTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const RegType& FromClass(const char* descriptor, mirror::Class* klass, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const RegType& FromCat1Const(int32_t value, bool precise)
+ const ConstantType& FromCat1Const(int32_t value, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const RegType& FromCat2ConstLo(int32_t value, bool precise)
+ const ConstantType& FromCat2ConstLo(int32_t value, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const RegType& FromCat2ConstHi(int32_t value, bool precise)
+ const ConstantType& FromCat2ConstHi(int32_t value, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const RegType& FromDescriptor(mirror::ClassLoader* loader, const char* descriptor, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -129,34 +124,56 @@
const RegType& JavaLangObject(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return From(NULL, "Ljava/lang/Object;", precise);
}
- const RegType& Uninitialized(const RegType& type, uint32_t allocation_pc)
+ const UninitializedType& Uninitialized(const RegType& type, uint32_t allocation_pc)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Create an uninitialized 'this' argument for the given type.
- const RegType& UninitializedThisArgument(const RegType& type)
+ const UninitializedType& UninitializedThisArgument(const RegType& type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const RegType& FromUninitialized(const RegType& uninit_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const RegType& ByteConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const RegType& ShortConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const RegType& IntConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const ImpreciseConstType& ByteConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const ImpreciseConstType& ShortConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const ImpreciseConstType& IntConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const RegType& GetComponentType(const RegType& array, mirror::ClassLoader* loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const RegType& RegTypeFromPrimitiveType(Primitive::Type) const;
private:
- std::vector<RegType*> entries_;
- static bool primitive_initialized_;
- static uint16_t primitive_start_;
- static uint16_t primitive_count_;
- static void CreatePrimitiveTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Whether or not we're allowed to load classes.
- const bool can_load_classes_;
+ void FillPrimitiveAndSmallConstantTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::Class* ResolveClass(const char* descriptor, mirror::ClassLoader* loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void ClearException();
bool MatchDescriptor(size_t idx, const char* descriptor, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const ConstantType& FromCat1NonSmallConstant(int32_t value, bool precise)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ template <class Type>
+ static Type* CreatePrimitiveTypeInstance(const std::string& descriptor)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void CreatePrimitiveAndSmallConstantTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // The actual storage for the RegTypes.
+ std::vector<RegType*> entries_;
+
+ // A quick look up for popular small constants.
+ static constexpr int32_t kMinSmallConstant = -1;
+ static constexpr int32_t kMaxSmallConstant = 4;
+ static PreciseConstType* small_precise_constants_[kMaxSmallConstant - kMinSmallConstant + 1];
+
+ static constexpr size_t kNumPrimitivesAndSmallConstants =
+ 12 + (kMaxSmallConstant - kMinSmallConstant + 1);
+
+ // Have the well known global primitives been created?
+ static bool primitive_initialized_;
+
+ // Number of well known primitives that will be copied into a RegTypeCache upon construction.
+ static uint16_t primitive_count_;
+
+ // Whether or not we're allowed to load classes.
+ const bool can_load_classes_;
+
DISALLOW_COPY_AND_ASSIGN(RegTypeCache);
};
diff --git a/test/run-test b/test/run-test
index 11dcfc5..c449e84 100755
--- a/test/run-test
+++ b/test/run-test
@@ -269,7 +269,7 @@
fi
fi
# Clean up extraneous files that are not used by tests.
- find $tmp_dir -mindepth 1 ! -regex ".*/\(.*jar\|$build_output\|$expected\)" | xargs rm -rf
+ find $tmp_dir -mindepth 1 ! -regex ".*/\(.*jar\|$output\|$expected\)" | xargs rm -rf
exit 0
else
"./${build}" >"$build_output" 2>&1