summaryrefslogtreecommitdiff
path: root/compiler/dex/mir_dataflow.cc
diff options
context:
space:
mode:
author Nicolas Geoffray <ngeoffray@google.com> 2016-03-21 17:10:24 +0000
committer Vladimir Marko <vmarko@google.com> 2016-03-21 17:39:20 +0000
commit3c94f0945ed596ceee39783fa075f013b65e80a1 (patch)
treec10b5808a5d7157371c2750823e6a168c73aa231 /compiler/dex/mir_dataflow.cc
parent162629ee8ac0fee2df0c0cdec27dff34bc6f0062 (diff)
Remove Quick from tree.
So long, old friend. Change-Id: I0241c798a34b92bf994fed83888da67d6e7f1891
Diffstat (limited to 'compiler/dex/mir_dataflow.cc')
-rw-r--r--compiler/dex/mir_dataflow.cc1453
1 files changed, 0 insertions, 1453 deletions
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
deleted file mode 100644
index f1cc5fc4d2..0000000000
--- a/compiler/dex/mir_dataflow.cc
+++ /dev/null
@@ -1,1453 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "local_value_numbering.h"
-#include "dataflow_iterator-inl.h"
-
-namespace art {
-
-/*
- * Main table containing data flow attributes for each bytecode. The
- * first kNumPackedOpcodes entries are for Dalvik bytecode
- * instructions, where extended opcode at the MIR level are appended
- * afterwards.
- *
- * TODO - many optimization flags are incomplete - they will only limit the
- * scope of optimizations but will not cause mis-optimizations.
- */
-const uint64_t MIRGraph::oat_data_flow_attributes_[kMirOpLast] = {
- // 00 NOP
- DF_NOP,
-
- // 01 MOVE vA, vB
- DF_DA | DF_UB | DF_IS_MOVE,
-
- // 02 MOVE_FROM16 vAA, vBBBB
- DF_DA | DF_UB | DF_IS_MOVE,
-
- // 03 MOVE_16 vAAAA, vBBBB
- DF_DA | DF_UB | DF_IS_MOVE,
-
- // 04 MOVE_WIDE vA, vB
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_IS_MOVE,
-
- // 05 MOVE_WIDE_FROM16 vAA, vBBBB
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_IS_MOVE,
-
- // 06 MOVE_WIDE_16 vAAAA, vBBBB
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_IS_MOVE,
-
- // 07 MOVE_OBJECT vA, vB
- DF_DA | DF_UB | DF_NULL_TRANSFER_0 | DF_IS_MOVE | DF_REF_A | DF_REF_B,
-
- // 08 MOVE_OBJECT_FROM16 vAA, vBBBB
- DF_DA | DF_UB | DF_NULL_TRANSFER_0 | DF_IS_MOVE | DF_REF_A | DF_REF_B,
-
- // 09 MOVE_OBJECT_16 vAAAA, vBBBB
- DF_DA | DF_UB | DF_NULL_TRANSFER_0 | DF_IS_MOVE | DF_REF_A | DF_REF_B,
-
- // 0A MOVE_RESULT vAA
- DF_DA,
-
- // 0B MOVE_RESULT_WIDE vAA
- DF_DA | DF_A_WIDE,
-
- // 0C MOVE_RESULT_OBJECT vAA
- DF_DA | DF_REF_A,
-
- // 0D MOVE_EXCEPTION vAA
- DF_DA | DF_REF_A | DF_NON_NULL_DST,
-
- // 0E RETURN_VOID
- DF_NOP,
-
- // 0F RETURN vAA
- DF_UA,
-
- // 10 RETURN_WIDE vAA
- DF_UA | DF_A_WIDE,
-
- // 11 RETURN_OBJECT vAA
- DF_UA | DF_REF_A,
-
- // 12 CONST_4 vA, #+B
- DF_DA | DF_SETS_CONST,
-
- // 13 CONST_16 vAA, #+BBBB
- DF_DA | DF_SETS_CONST,
-
- // 14 CONST vAA, #+BBBBBBBB
- DF_DA | DF_SETS_CONST,
-
- // 15 CONST_HIGH16 VAA, #+BBBB0000
- DF_DA | DF_SETS_CONST,
-
- // 16 CONST_WIDE_16 vAA, #+BBBB
- DF_DA | DF_A_WIDE | DF_SETS_CONST,
-
- // 17 CONST_WIDE_32 vAA, #+BBBBBBBB
- DF_DA | DF_A_WIDE | DF_SETS_CONST,
-
- // 18 CONST_WIDE vAA, #+BBBBBBBBBBBBBBBB
- DF_DA | DF_A_WIDE | DF_SETS_CONST,
-
- // 19 CONST_WIDE_HIGH16 vAA, #+BBBB000000000000
- DF_DA | DF_A_WIDE | DF_SETS_CONST,
-
- // 1A CONST_STRING vAA, string@BBBB
- DF_DA | DF_REF_A | DF_NON_NULL_DST,
-
- // 1B CONST_STRING_JUMBO vAA, string@BBBBBBBB
- DF_DA | DF_REF_A | DF_NON_NULL_DST,
-
- // 1C CONST_CLASS vAA, type@BBBB
- DF_DA | DF_REF_A | DF_NON_NULL_DST,
-
- // 1D MONITOR_ENTER vAA
- DF_UA | DF_NULL_CHK_A | DF_REF_A,
-
- // 1E MONITOR_EXIT vAA
- DF_UA | DF_NULL_CHK_A | DF_REF_A,
-
- // 1F CHK_CAST vAA, type@BBBB
- DF_UA | DF_REF_A | DF_CHK_CAST | DF_UMS,
-
- // 20 INSTANCE_OF vA, vB, type@CCCC
- DF_DA | DF_UB | DF_CORE_A | DF_REF_B | DF_UMS,
-
- // 21 ARRAY_LENGTH vA, vB
- DF_DA | DF_UB | DF_NULL_CHK_B | DF_CORE_A | DF_REF_B,
-
- // 22 NEW_INSTANCE vAA, type@BBBB
- DF_DA | DF_NON_NULL_DST | DF_REF_A | DF_UMS,
-
- // 23 NEW_ARRAY vA, vB, type@CCCC
- DF_DA | DF_UB | DF_NON_NULL_DST | DF_REF_A | DF_CORE_B | DF_UMS,
-
- // 24 FILLED_NEW_ARRAY {vD, vE, vF, vG, vA}
- DF_FORMAT_35C | DF_NON_NULL_RET | DF_UMS,
-
- // 25 FILLED_NEW_ARRAY_RANGE {vCCCC .. vNNNN}, type@BBBB
- DF_FORMAT_3RC | DF_NON_NULL_RET | DF_UMS,
-
- // 26 FILL_ARRAY_DATA vAA, +BBBBBBBB
- DF_UA | DF_REF_A | DF_UMS,
-
- // 27 THROW vAA
- DF_UA | DF_REF_A | DF_UMS,
-
- // 28 GOTO
- DF_NOP,
-
- // 29 GOTO_16
- DF_NOP,
-
- // 2A GOTO_32
- DF_NOP,
-
- // 2B PACKED_SWITCH vAA, +BBBBBBBB
- DF_UA | DF_CORE_A,
-
- // 2C SPARSE_SWITCH vAA, +BBBBBBBB
- DF_UA | DF_CORE_A,
-
- // 2D CMPL_FLOAT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_FP_B | DF_FP_C | DF_CORE_A,
-
- // 2E CMPG_FLOAT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_FP_B | DF_FP_C | DF_CORE_A,
-
- // 2F CMPL_DOUBLE vAA, vBB, vCC
- DF_DA | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_B | DF_FP_C | DF_CORE_A,
-
- // 30 CMPG_DOUBLE vAA, vBB, vCC
- DF_DA | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_B | DF_FP_C | DF_CORE_A,
-
- // 31 CMP_LONG vAA, vBB, vCC
- DF_DA | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // 32 IF_EQ vA, vB, +CCCC
- DF_UA | DF_UB | DF_SAME_TYPE_AB,
-
- // 33 IF_NE vA, vB, +CCCC
- DF_UA | DF_UB | DF_SAME_TYPE_AB,
-
- // 34 IF_LT vA, vB, +CCCC
- DF_UA | DF_UB | DF_SAME_TYPE_AB,
-
- // 35 IF_GE vA, vB, +CCCC
- DF_UA | DF_UB | DF_SAME_TYPE_AB,
-
- // 36 IF_GT vA, vB, +CCCC
- DF_UA | DF_UB | DF_SAME_TYPE_AB,
-
- // 37 IF_LE vA, vB, +CCCC
- DF_UA | DF_UB | DF_SAME_TYPE_AB,
-
- // 38 IF_EQZ vAA, +BBBB
- DF_UA,
-
- // 39 IF_NEZ vAA, +BBBB
- DF_UA,
-
- // 3A IF_LTZ vAA, +BBBB
- DF_UA,
-
- // 3B IF_GEZ vAA, +BBBB
- DF_UA,
-
- // 3C IF_GTZ vAA, +BBBB
- DF_UA,
-
- // 3D IF_LEZ vAA, +BBBB
- DF_UA,
-
- // 3E UNUSED_3E
- DF_NOP,
-
- // 3F UNUSED_3F
- DF_NOP,
-
- // 40 UNUSED_40
- DF_NOP,
-
- // 41 UNUSED_41
- DF_NOP,
-
- // 42 UNUSED_42
- DF_NOP,
-
- // 43 UNUSED_43
- DF_NOP,
-
- // 44 AGET vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
-
- // 45 AGET_WIDE vAA, vBB, vCC
- DF_DA | DF_A_WIDE | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
-
- // 46 AGET_OBJECT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_A | DF_REF_B | DF_CORE_C | DF_LVN,
-
- // 47 AGET_BOOLEAN vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
-
- // 48 AGET_BYTE vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
-
- // 49 AGET_CHAR vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
-
- // 4A AGET_SHORT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
-
- // 4B APUT vAA, vBB, vCC
- DF_UA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
-
- // 4C APUT_WIDE vAA, vBB, vCC
- DF_UA | DF_A_WIDE | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
-
- // 4D APUT_OBJECT vAA, vBB, vCC
- DF_UA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_A | DF_REF_B | DF_CORE_C | DF_LVN,
-
- // 4E APUT_BOOLEAN vAA, vBB, vCC
- DF_UA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
-
- // 4F APUT_BYTE vAA, vBB, vCC
- DF_UA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
-
- // 50 APUT_CHAR vAA, vBB, vCC
- DF_UA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
-
- // 51 APUT_SHORT vAA, vBB, vCC
- DF_UA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
-
- // 52 IGET vA, vB, field@CCCC
- DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
- // 53 IGET_WIDE vA, vB, field@CCCC
- DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
- // 54 IGET_OBJECT vA, vB, field@CCCC
- DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_A | DF_REF_B | DF_IFIELD | DF_LVN,
-
- // 55 IGET_BOOLEAN vA, vB, field@CCCC
- DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
- // 56 IGET_BYTE vA, vB, field@CCCC
- DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
- // 57 IGET_CHAR vA, vB, field@CCCC
- DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
- // 58 IGET_SHORT vA, vB, field@CCCC
- DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
- // 59 IPUT vA, vB, field@CCCC
- DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
- // 5A IPUT_WIDE vA, vB, field@CCCC
- DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
- // 5B IPUT_OBJECT vA, vB, field@CCCC
- DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_A | DF_REF_B | DF_IFIELD | DF_LVN,
-
- // 5C IPUT_BOOLEAN vA, vB, field@CCCC
- DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
- // 5D IPUT_BYTE vA, vB, field@CCCC
- DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
- // 5E IPUT_CHAR vA, vB, field@CCCC
- DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
- // 5F IPUT_SHORT vA, vB, field@CCCC
- DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
- // 60 SGET vAA, field@BBBB
- DF_DA | DF_SFIELD | DF_CLINIT | DF_UMS,
-
- // 61 SGET_WIDE vAA, field@BBBB
- DF_DA | DF_A_WIDE | DF_SFIELD | DF_CLINIT | DF_UMS,
-
- // 62 SGET_OBJECT vAA, field@BBBB
- DF_DA | DF_REF_A | DF_SFIELD | DF_CLINIT | DF_UMS,
-
- // 63 SGET_BOOLEAN vAA, field@BBBB
- DF_DA | DF_SFIELD | DF_CLINIT | DF_UMS,
-
- // 64 SGET_BYTE vAA, field@BBBB
- DF_DA | DF_SFIELD | DF_CLINIT | DF_UMS,
-
- // 65 SGET_CHAR vAA, field@BBBB
- DF_DA | DF_SFIELD | DF_CLINIT | DF_UMS,
-
- // 66 SGET_SHORT vAA, field@BBBB
- DF_DA | DF_SFIELD | DF_CLINIT | DF_UMS,
-
- // 67 SPUT vAA, field@BBBB
- DF_UA | DF_SFIELD | DF_CLINIT | DF_UMS,
-
- // 68 SPUT_WIDE vAA, field@BBBB
- DF_UA | DF_A_WIDE | DF_SFIELD | DF_CLINIT | DF_UMS,
-
- // 69 SPUT_OBJECT vAA, field@BBBB
- DF_UA | DF_REF_A | DF_SFIELD | DF_CLINIT | DF_UMS,
-
- // 6A SPUT_BOOLEAN vAA, field@BBBB
- DF_UA | DF_SFIELD | DF_CLINIT | DF_UMS,
-
- // 6B SPUT_BYTE vAA, field@BBBB
- DF_UA | DF_SFIELD | DF_CLINIT | DF_UMS,
-
- // 6C SPUT_CHAR vAA, field@BBBB
- DF_UA | DF_SFIELD | DF_CLINIT | DF_UMS,
-
- // 6D SPUT_SHORT vAA, field@BBBB
- DF_UA | DF_SFIELD | DF_CLINIT | DF_UMS,
-
- // 6E INVOKE_VIRTUAL {vD, vE, vF, vG, vA}
- DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
-
- // 6F INVOKE_SUPER {vD, vE, vF, vG, vA}
- DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
-
- // 70 INVOKE_DIRECT {vD, vE, vF, vG, vA}
- DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
-
- // 71 INVOKE_STATIC {vD, vE, vF, vG, vA}
- DF_FORMAT_35C | DF_CLINIT | DF_UMS,
-
- // 72 INVOKE_INTERFACE {vD, vE, vF, vG, vA}
- DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
-
- // 73 RETURN_VOID_NO_BARRIER
- DF_NOP,
-
- // 74 INVOKE_VIRTUAL_RANGE {vCCCC .. vNNNN}
- DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
-
- // 75 INVOKE_SUPER_RANGE {vCCCC .. vNNNN}
- DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
-
- // 76 INVOKE_DIRECT_RANGE {vCCCC .. vNNNN}
- DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
-
- // 77 INVOKE_STATIC_RANGE {vCCCC .. vNNNN}
- DF_FORMAT_3RC | DF_CLINIT | DF_UMS,
-
- // 78 INVOKE_INTERFACE_RANGE {vCCCC .. vNNNN}
- DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
-
- // 79 UNUSED_79
- DF_NOP,
-
- // 7A UNUSED_7A
- DF_NOP,
-
- // 7B NEG_INT vA, vB
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // 7C NOT_INT vA, vB
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // 7D NEG_LONG vA, vB
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
- // 7E NOT_LONG vA, vB
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
- // 7F NEG_FLOAT vA, vB
- DF_DA | DF_UB | DF_FP_A | DF_FP_B,
-
- // 80 NEG_DOUBLE vA, vB
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
-
- // 81 INT_TO_LONG vA, vB
- DF_DA | DF_A_WIDE | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // 82 INT_TO_FLOAT vA, vB
- DF_DA | DF_UB | DF_FP_A | DF_CORE_B,
-
- // 83 INT_TO_DOUBLE vA, vB
- DF_DA | DF_A_WIDE | DF_UB | DF_FP_A | DF_CORE_B,
-
- // 84 LONG_TO_INT vA, vB
- DF_DA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
- // 85 LONG_TO_FLOAT vA, vB
- DF_DA | DF_UB | DF_B_WIDE | DF_FP_A | DF_CORE_B,
-
- // 86 LONG_TO_DOUBLE vA, vB
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_A | DF_CORE_B,
-
- // 87 FLOAT_TO_INT vA, vB
- DF_DA | DF_UB | DF_FP_B | DF_CORE_A,
-
- // 88 FLOAT_TO_LONG vA, vB
- DF_DA | DF_A_WIDE | DF_UB | DF_FP_B | DF_CORE_A,
-
- // 89 FLOAT_TO_DOUBLE vA, vB
- DF_DA | DF_A_WIDE | DF_UB | DF_FP_A | DF_FP_B,
-
- // 8A DOUBLE_TO_INT vA, vB
- DF_DA | DF_UB | DF_B_WIDE | DF_FP_B | DF_CORE_A,
-
- // 8B DOUBLE_TO_LONG vA, vB
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_B | DF_CORE_A,
-
- // 8C DOUBLE_TO_FLOAT vA, vB
- DF_DA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
-
- // 8D INT_TO_BYTE vA, vB
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // 8E INT_TO_CHAR vA, vB
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // 8F INT_TO_SHORT vA, vB
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // 90 ADD_INT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // 91 SUB_INT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // 92 MUL_INT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // 93 DIV_INT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // 94 REM_INT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // 95 AND_INT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // 96 OR_INT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // 97 XOR_INT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // 98 SHL_INT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // 99 SHR_INT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // 9A USHR_INT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // 9B ADD_LONG vAA, vBB, vCC
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // 9C SUB_LONG vAA, vBB, vCC
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // 9D MUL_LONG vAA, vBB, vCC
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // 9E DIV_LONG vAA, vBB, vCC
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // 9F REM_LONG vAA, vBB, vCC
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // A0 AND_LONG vAA, vBB, vCC
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // A1 OR_LONG vAA, vBB, vCC
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // A2 XOR_LONG vAA, vBB, vCC
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // A3 SHL_LONG vAA, vBB, vCC
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // A4 SHR_LONG vAA, vBB, vCC
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // A5 USHR_LONG vAA, vBB, vCC
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // A6 ADD_FLOAT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
-
- // A7 SUB_FLOAT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
-
- // A8 MUL_FLOAT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
-
- // A9 DIV_FLOAT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
-
- // AA REM_FLOAT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
-
- // AB ADD_DOUBLE vAA, vBB, vCC
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
-
- // AC SUB_DOUBLE vAA, vBB, vCC
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
-
- // AD MUL_DOUBLE vAA, vBB, vCC
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
-
- // AE DIV_DOUBLE vAA, vBB, vCC
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
-
- // AF REM_DOUBLE vAA, vBB, vCC
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
-
- // B0 ADD_INT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // B1 SUB_INT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // B2 MUL_INT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // B3 DIV_INT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // B4 REM_INT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // B5 AND_INT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // B6 OR_INT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // B7 XOR_INT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // B8 SHL_INT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // B9 SHR_INT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // BA USHR_INT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // BB ADD_LONG_2ADDR vA, vB
- DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
- // BC SUB_LONG_2ADDR vA, vB
- DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
- // BD MUL_LONG_2ADDR vA, vB
- DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
- // BE DIV_LONG_2ADDR vA, vB
- DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
- // BF REM_LONG_2ADDR vA, vB
- DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
- // C0 AND_LONG_2ADDR vA, vB
- DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
- // C1 OR_LONG_2ADDR vA, vB
- DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
- // C2 XOR_LONG_2ADDR vA, vB
- DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
- // C3 SHL_LONG_2ADDR vA, vB
- DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // C4 SHR_LONG_2ADDR vA, vB
- DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // C5 USHR_LONG_2ADDR vA, vB
- DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // C6 ADD_FLOAT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
-
- // C7 SUB_FLOAT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
-
- // C8 MUL_FLOAT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
-
- // C9 DIV_FLOAT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
-
- // CA REM_FLOAT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
-
- // CB ADD_DOUBLE_2ADDR vA, vB
- DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
-
- // CC SUB_DOUBLE_2ADDR vA, vB
- DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
-
- // CD MUL_DOUBLE_2ADDR vA, vB
- DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
-
- // CE DIV_DOUBLE_2ADDR vA, vB
- DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
-
- // CF REM_DOUBLE_2ADDR vA, vB
- DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
-
- // D0 ADD_INT_LIT16 vA, vB, #+CCCC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // D1 RSUB_INT vA, vB, #+CCCC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // D2 MUL_INT_LIT16 vA, vB, #+CCCC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // D3 DIV_INT_LIT16 vA, vB, #+CCCC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // D4 REM_INT_LIT16 vA, vB, #+CCCC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // D5 AND_INT_LIT16 vA, vB, #+CCCC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // D6 OR_INT_LIT16 vA, vB, #+CCCC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // D7 XOR_INT_LIT16 vA, vB, #+CCCC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // D8 ADD_INT_LIT8 vAA, vBB, #+CC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // D9 RSUB_INT_LIT8 vAA, vBB, #+CC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // DA MUL_INT_LIT8 vAA, vBB, #+CC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // DB DIV_INT_LIT8 vAA, vBB, #+CC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // DC REM_INT_LIT8 vAA, vBB, #+CC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // DD AND_INT_LIT8 vAA, vBB, #+CC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // DE OR_INT_LIT8 vAA, vBB, #+CC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // DF XOR_INT_LIT8 vAA, vBB, #+CC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // E0 SHL_INT_LIT8 vAA, vBB, #+CC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // E1 SHR_INT_LIT8 vAA, vBB, #+CC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // E2 USHR_INT_LIT8 vAA, vBB, #+CC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // E3 IGET_QUICK
- DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
- // E4 IGET_WIDE_QUICK
- DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
- // E5 IGET_OBJECT_QUICK
- DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_A | DF_REF_B | DF_IFIELD | DF_LVN,
-
- // E6 IPUT_QUICK
- DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
- // E7 IPUT_WIDE_QUICK
- DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
- // E8 IPUT_OBJECT_QUICK
- DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_A | DF_REF_B | DF_IFIELD | DF_LVN,
-
- // E9 INVOKE_VIRTUAL_QUICK
- DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
-
- // EA INVOKE_VIRTUAL_RANGE_QUICK
- DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
-
- // EB IPUT_BOOLEAN_QUICK vA, vB, index
- DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
- // EC IPUT_BYTE_QUICK vA, vB, index
- DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
- // ED IPUT_CHAR_QUICK vA, vB, index
- DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
- // EE IPUT_SHORT_QUICK vA, vB, index
- DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
- // EF IGET_BOOLEAN_QUICK vA, vB, index
- DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
- // F0 IGET_BYTE_QUICK vA, vB, index
- DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
- // F1 IGET_CHAR_QUICK vA, vB, index
- DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
- // F2 IGET_SHORT_QUICK vA, vB, index
- DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
- // F3 UNUSED_F3
- DF_NOP,
-
- // F4 UNUSED_F4
- DF_NOP,
-
- // F5 UNUSED_F5
- DF_NOP,
-
- // F6 UNUSED_F6
- DF_NOP,
-
- // F7 UNUSED_F7
- DF_NOP,
-
- // F8 UNUSED_F8
- DF_NOP,
-
- // F9 UNUSED_F9
- DF_NOP,
-
- // FA UNUSED_FA
- DF_NOP,
-
- // FB UNUSED_FB
- DF_NOP,
-
- // FC UNUSED_FC
- DF_NOP,
-
- // FD UNUSED_FD
- DF_NOP,
-
- // FE UNUSED_FE
- DF_NOP,
-
- // FF UNUSED_FF
- DF_NOP,
-
- // Beginning of extended MIR opcodes
- // 100 MIR_PHI
- DF_DA | DF_NULL_TRANSFER_N,
-
- // 101 MIR_COPY
- DF_DA | DF_UB | DF_IS_MOVE,
-
- // 102 MIR_FUSED_CMPL_FLOAT
- DF_UA | DF_UB | DF_FP_A | DF_FP_B,
-
- // 103 MIR_FUSED_CMPG_FLOAT
- DF_UA | DF_UB | DF_FP_A | DF_FP_B,
-
- // 104 MIR_FUSED_CMPL_DOUBLE
- DF_UA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
-
- // 105 MIR_FUSED_CMPG_DOUBLE
- DF_UA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
-
- // 106 MIR_FUSED_CMP_LONG
- DF_UA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
- // 107 MIR_NOP
- DF_NOP,
-
- // 108 MIR_NULL_CHECK
- DF_UA | DF_REF_A | DF_NULL_CHK_A | DF_LVN,
-
- // 109 MIR_RANGE_CHECK
- 0,
-
- // 10A MIR_DIV_ZERO_CHECK
- 0,
-
- // 10B MIR_CHECK
- 0,
-
- // 10D MIR_SELECT
- DF_DA | DF_UB,
-
- // 10E MirOpConstVector
- 0,
-
- // 10F MirOpMoveVector
- 0,
-
- // 110 MirOpPackedMultiply
- 0,
-
- // 111 MirOpPackedAddition
- 0,
-
- // 112 MirOpPackedSubtract
- 0,
-
- // 113 MirOpPackedShiftLeft
- 0,
-
- // 114 MirOpPackedSignedShiftRight
- 0,
-
- // 115 MirOpPackedUnsignedShiftRight
- 0,
-
- // 116 MirOpPackedAnd
- 0,
-
- // 117 MirOpPackedOr
- 0,
-
- // 118 MirOpPackedXor
- 0,
-
- // 119 MirOpPackedAddReduce
- DF_FORMAT_EXTENDED,
-
- // 11A MirOpPackedReduce
- DF_FORMAT_EXTENDED,
-
- // 11B MirOpPackedSet
- DF_FORMAT_EXTENDED,
-
- // 11C MirOpReserveVectorRegisters
- 0,
-
- // 11D MirOpReturnVectorRegisters
- 0,
-
- // 11E MirOpMemBarrier
- 0,
-
- // 11F MirOpPackedArrayGet
- DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
-
- // 120 MirOpPackedArrayPut
- DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
-
- // 121 MirOpMaddInt
- DF_FORMAT_EXTENDED,
-
- // 122 MirOpMsubInt
- DF_FORMAT_EXTENDED,
-
- // 123 MirOpMaddLong
- DF_FORMAT_EXTENDED,
-
- // 124 MirOpMsubLong
- DF_FORMAT_EXTENDED,
-};
-
-/* Any register that is used before being defined is considered live-in */
-void MIRGraph::HandleLiveInUse(ArenaBitVector* use_v, ArenaBitVector* def_v,
- ArenaBitVector* live_in_v, int dalvik_reg_id) {
- use_v->SetBit(dalvik_reg_id);
- if (!def_v->IsBitSet(dalvik_reg_id)) {
- live_in_v->SetBit(dalvik_reg_id);
- }
-}
-
-/* Mark a reg as being defined */
-void MIRGraph::HandleDef(ArenaBitVector* def_v, int dalvik_reg_id) {
- def_v->SetBit(dalvik_reg_id);
-}
-
-void MIRGraph::HandleExtended(ArenaBitVector* use_v, ArenaBitVector* def_v,
- ArenaBitVector* live_in_v,
- const MIR::DecodedInstruction& d_insn) {
- // For vector MIRs, vC contains type information
- bool is_vector_type_wide = false;
- int type_size = d_insn.vC >> 16;
- if (type_size == k64 || type_size == kDouble) {
- is_vector_type_wide = true;
- }
-
- switch (static_cast<int>(d_insn.opcode)) {
- case kMirOpPackedAddReduce:
- HandleLiveInUse(use_v, def_v, live_in_v, d_insn.vA);
- if (is_vector_type_wide == true) {
- HandleLiveInUse(use_v, def_v, live_in_v, d_insn.vA + 1);
- }
- HandleDef(def_v, d_insn.vA);
- if (is_vector_type_wide == true) {
- HandleDef(def_v, d_insn.vA + 1);
- }
- break;
- case kMirOpPackedReduce:
- HandleDef(def_v, d_insn.vA);
- if (is_vector_type_wide == true) {
- HandleDef(def_v, d_insn.vA + 1);
- }
- break;
- case kMirOpPackedSet:
- HandleLiveInUse(use_v, def_v, live_in_v, d_insn.vB);
- if (is_vector_type_wide == true) {
- HandleLiveInUse(use_v, def_v, live_in_v, d_insn.vB + 1);
- }
- break;
- case kMirOpMaddInt:
- case kMirOpMsubInt:
- HandleLiveInUse(use_v, def_v, live_in_v, d_insn.vB);
- HandleLiveInUse(use_v, def_v, live_in_v, d_insn.vC);
- HandleLiveInUse(use_v, def_v, live_in_v, d_insn.arg[0]);
- HandleDef(def_v, d_insn.vA);
- break;
- case kMirOpMaddLong:
- case kMirOpMsubLong:
- HandleLiveInUse(use_v, def_v, live_in_v, d_insn.vB);
- HandleLiveInUse(use_v, def_v, live_in_v, d_insn.vB + 1);
- HandleLiveInUse(use_v, def_v, live_in_v, d_insn.vC);
- HandleLiveInUse(use_v, def_v, live_in_v, d_insn.vC + 1);
- HandleLiveInUse(use_v, def_v, live_in_v, d_insn.arg[0]);
- HandleLiveInUse(use_v, def_v, live_in_v, d_insn.arg[0] + 1);
- HandleDef(def_v, d_insn.vA);
- HandleDef(def_v, d_insn.vA + 1);
- break;
- default:
- LOG(ERROR) << "Unexpected Extended Opcode " << d_insn.opcode;
- break;
- }
-}
-
-/*
- * Find out live-in variables for natural loops. Variables that are live-in in
- * the main loop body are considered to be defined in the entry block.
- */
-bool MIRGraph::FindLocalLiveIn(BasicBlock* bb) {
- MIR* mir;
- ArenaBitVector *use_v, *def_v, *live_in_v;
-
- if (bb->data_flow_info == nullptr) return false;
-
- use_v = bb->data_flow_info->use_v =
- new (arena_) ArenaBitVector(arena_, GetNumOfCodeAndTempVRs(), false);
- def_v = bb->data_flow_info->def_v =
- new (arena_) ArenaBitVector(arena_, GetNumOfCodeAndTempVRs(), false);
- live_in_v = bb->data_flow_info->live_in_v =
- new (arena_) ArenaBitVector(arena_, GetNumOfCodeAndTempVRs(), false);
-
- for (mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
- uint64_t df_attributes = GetDataFlowAttributes(mir);
- MIR::DecodedInstruction* d_insn = &mir->dalvikInsn;
-
- if (df_attributes & DF_HAS_USES) {
- if (df_attributes & DF_UA) {
- HandleLiveInUse(use_v, def_v, live_in_v, d_insn->vA);
- if (df_attributes & DF_A_WIDE) {
- HandleLiveInUse(use_v, def_v, live_in_v, d_insn->vA+1);
- }
- }
- if (df_attributes & DF_UB) {
- HandleLiveInUse(use_v, def_v, live_in_v, d_insn->vB);
- if (df_attributes & DF_B_WIDE) {
- HandleLiveInUse(use_v, def_v, live_in_v, d_insn->vB+1);
- }
- }
- if (df_attributes & DF_UC) {
- HandleLiveInUse(use_v, def_v, live_in_v, d_insn->vC);
- if (df_attributes & DF_C_WIDE) {
- HandleLiveInUse(use_v, def_v, live_in_v, d_insn->vC+1);
- }
- }
- }
- if (df_attributes & DF_FORMAT_35C) {
- for (unsigned int i = 0; i < d_insn->vA; i++) {
- HandleLiveInUse(use_v, def_v, live_in_v, d_insn->arg[i]);
- }
- }
- if (df_attributes & DF_FORMAT_3RC) {
- for (unsigned int i = 0; i < d_insn->vA; i++) {
- HandleLiveInUse(use_v, def_v, live_in_v, d_insn->vC+i);
- }
- }
- if (df_attributes & DF_HAS_DEFS) {
- HandleDef(def_v, d_insn->vA);
- if (df_attributes & DF_A_WIDE) {
- HandleDef(def_v, d_insn->vA+1);
- }
- }
- if (df_attributes & DF_FORMAT_EXTENDED) {
- HandleExtended(use_v, def_v, live_in_v, mir->dalvikInsn);
- }
- }
- return true;
-}
-
-int MIRGraph::AddNewSReg(int v_reg) {
- int subscript = ++ssa_last_defs_[v_reg];
- uint32_t ssa_reg = GetNumSSARegs();
- SetNumSSARegs(ssa_reg + 1);
- ssa_base_vregs_.push_back(v_reg);
- ssa_subscripts_.push_back(subscript);
- DCHECK_EQ(ssa_base_vregs_.size(), ssa_subscripts_.size());
- // If we are expanding very late, update use counts too.
- if (ssa_reg > 0 && use_counts_.size() == ssa_reg) {
- // Need to expand the counts.
- use_counts_.push_back(0);
- raw_use_counts_.push_back(0);
- }
- return ssa_reg;
-}
-
-/* Find out the latest SSA register for a given Dalvik register */
-void MIRGraph::HandleSSAUse(int* uses, int dalvik_reg, int reg_index) {
- DCHECK((dalvik_reg >= 0) && (dalvik_reg < static_cast<int>(GetNumOfCodeAndTempVRs())));
- uses[reg_index] = vreg_to_ssa_map_[dalvik_reg];
-}
-
-/* Setup a new SSA register for a given Dalvik register */
-void MIRGraph::HandleSSADef(int* defs, int dalvik_reg, int reg_index) {
- DCHECK((dalvik_reg >= 0) && (dalvik_reg < static_cast<int>(GetNumOfCodeAndTempVRs())));
- int ssa_reg = AddNewSReg(dalvik_reg);
- vreg_to_ssa_map_[dalvik_reg] = ssa_reg;
- defs[reg_index] = ssa_reg;
-}
-
-void MIRGraph::AllocateSSAUseData(MIR *mir, int num_uses) {
- mir->ssa_rep->num_uses = num_uses;
-
- if (mir->ssa_rep->num_uses_allocated < num_uses) {
- mir->ssa_rep->uses = arena_->AllocArray<int32_t>(num_uses, kArenaAllocDFInfo);
- }
-}
-
-void MIRGraph::AllocateSSADefData(MIR *mir, int num_defs) {
- mir->ssa_rep->num_defs = num_defs;
-
- if (mir->ssa_rep->num_defs_allocated < num_defs) {
- mir->ssa_rep->defs = arena_->AllocArray<int32_t>(num_defs, kArenaAllocDFInfo);
- }
-}
-
-/* Look up new SSA names for format_35c instructions */
-void MIRGraph::DataFlowSSAFormat35C(MIR* mir) {
- MIR::DecodedInstruction* d_insn = &mir->dalvikInsn;
- int num_uses = d_insn->vA;
- int i;
-
- AllocateSSAUseData(mir, num_uses);
-
- for (i = 0; i < num_uses; i++) {
- HandleSSAUse(mir->ssa_rep->uses, d_insn->arg[i], i);
- }
-}
-
-/* Look up new SSA names for format_3rc instructions */
-void MIRGraph::DataFlowSSAFormat3RC(MIR* mir) {
- MIR::DecodedInstruction* d_insn = &mir->dalvikInsn;
- int num_uses = d_insn->vA;
- int i;
-
- AllocateSSAUseData(mir, num_uses);
-
- for (i = 0; i < num_uses; i++) {
- HandleSSAUse(mir->ssa_rep->uses, d_insn->vC+i, i);
- }
-}
-
-void MIRGraph::DataFlowSSAFormatExtended(MIR* mir) {
- const MIR::DecodedInstruction& d_insn = mir->dalvikInsn;
- // For vector MIRs, vC contains type information
- bool is_vector_type_wide = false;
- int type_size = d_insn.vC >> 16;
- if (type_size == k64 || type_size == kDouble) {
- is_vector_type_wide = true;
- }
-
- switch (static_cast<int>(mir->dalvikInsn.opcode)) {
- case kMirOpPackedAddReduce:
- // We have one use, plus one more for wide
- AllocateSSAUseData(mir, is_vector_type_wide ? 2 : 1);
- HandleSSAUse(mir->ssa_rep->uses, d_insn.vA, 0);
- if (is_vector_type_wide == true) {
- HandleSSAUse(mir->ssa_rep->uses, d_insn.vA + 1, 1);
- }
-
- // We have a def, plus one more for wide
- AllocateSSADefData(mir, is_vector_type_wide ? 2 : 1);
- HandleSSADef(mir->ssa_rep->defs, d_insn.vA, 0);
- if (is_vector_type_wide == true) {
- HandleSSADef(mir->ssa_rep->defs, d_insn.vA + 1, 1);
- }
- break;
- case kMirOpPackedReduce:
- // We have a def, plus one more for wide
- AllocateSSADefData(mir, is_vector_type_wide ? 2 : 1);
- HandleSSADef(mir->ssa_rep->defs, d_insn.vA, 0);
- if (is_vector_type_wide == true) {
- HandleSSADef(mir->ssa_rep->defs, d_insn.vA + 1, 1);
- }
- break;
- case kMirOpPackedSet:
- // We have one use, plus one more for wide
- AllocateSSAUseData(mir, is_vector_type_wide ? 2 : 1);
- HandleSSAUse(mir->ssa_rep->uses, d_insn.vB, 0);
- if (is_vector_type_wide == true) {
- HandleSSAUse(mir->ssa_rep->uses, d_insn.vB + 1, 1);
- }
- break;
- case kMirOpMaddInt:
- case kMirOpMsubInt:
- AllocateSSAUseData(mir, 3);
- HandleSSAUse(mir->ssa_rep->uses, d_insn.vB, 0);
- HandleSSAUse(mir->ssa_rep->uses, d_insn.vC, 1);
- HandleSSAUse(mir->ssa_rep->uses, d_insn.arg[0], 2);
- AllocateSSADefData(mir, 1);
- HandleSSADef(mir->ssa_rep->defs, d_insn.vA, 0);
- break;
- case kMirOpMaddLong:
- case kMirOpMsubLong:
- AllocateSSAUseData(mir, 6);
- HandleSSAUse(mir->ssa_rep->uses, d_insn.vB, 0);
- HandleSSAUse(mir->ssa_rep->uses, d_insn.vB + 1, 1);
- HandleSSAUse(mir->ssa_rep->uses, d_insn.vC, 2);
- HandleSSAUse(mir->ssa_rep->uses, d_insn.vC + 1, 3);
- HandleSSAUse(mir->ssa_rep->uses, d_insn.arg[0], 4);
- HandleSSAUse(mir->ssa_rep->uses, d_insn.arg[0] + 1, 5);
- AllocateSSADefData(mir, 2);
- HandleSSADef(mir->ssa_rep->defs, d_insn.vA, 0);
- HandleSSADef(mir->ssa_rep->defs, d_insn.vA + 1, 1);
- break;
- default:
- LOG(ERROR) << "Missing case for extended MIR: " << mir->dalvikInsn.opcode;
- break;
- }
-}
-
-/* Entry function to convert a block into SSA representation */
-bool MIRGraph::DoSSAConversion(BasicBlock* bb) {
- if (bb->data_flow_info == nullptr) return false;
-
- /*
- * Pruned SSA form: Insert phi nodes for each dalvik register marked in phi_node_blocks
- * only if the dalvik register is in the live-in set.
- */
- BasicBlockId bb_id = bb->id;
- for (int dalvik_reg = GetNumOfCodeAndTempVRs() - 1; dalvik_reg >= 0; dalvik_reg--) {
- if (temp_.ssa.phi_node_blocks[dalvik_reg]->IsBitSet(bb_id)) {
- if (!bb->data_flow_info->live_in_v->IsBitSet(dalvik_reg)) {
- /* Variable will be clobbered before being used - no need for phi */
- vreg_to_ssa_map_[dalvik_reg] = INVALID_SREG;
- continue;
- }
- MIR *phi = NewMIR();
- phi->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpPhi);
- phi->dalvikInsn.vA = dalvik_reg;
- phi->offset = bb->start_offset;
- phi->m_unit_index = 0; // Arbitrarily assign all Phi nodes to outermost method.
- bb->PrependMIR(phi);
- }
- }
-
- for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
- mir->ssa_rep =
- static_cast<struct SSARepresentation *>(arena_->Alloc(sizeof(SSARepresentation),
- kArenaAllocDFInfo));
- memset(mir->ssa_rep, 0, sizeof(*mir->ssa_rep));
-
- uint64_t df_attributes = GetDataFlowAttributes(mir);
-
- // If not a pseudo-op, note non-leaf or can throw
- if (!MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) {
- int flags = mir->dalvikInsn.FlagsOf();
-
- if ((flags & Instruction::kInvoke) != 0) {
- attributes_ &= ~METHOD_IS_LEAF;
- }
- }
-
- int num_uses = 0;
-
- if (df_attributes & DF_FORMAT_35C) {
- DataFlowSSAFormat35C(mir);
- continue;
- }
-
- if (df_attributes & DF_FORMAT_3RC) {
- DataFlowSSAFormat3RC(mir);
- continue;
- }
-
- if (df_attributes & DF_FORMAT_EXTENDED) {
- DataFlowSSAFormatExtended(mir);
- continue;
- }
-
- if (df_attributes & DF_HAS_USES) {
- if (df_attributes & DF_UA) {
- num_uses++;
- if (df_attributes & DF_A_WIDE) {
- num_uses++;
- }
- }
- if (df_attributes & DF_UB) {
- num_uses++;
- if (df_attributes & DF_B_WIDE) {
- num_uses++;
- }
- }
- if (df_attributes & DF_UC) {
- num_uses++;
- if (df_attributes & DF_C_WIDE) {
- num_uses++;
- }
- }
- }
-
- AllocateSSAUseData(mir, num_uses);
-
- int num_defs = 0;
-
- if (df_attributes & DF_HAS_DEFS) {
- num_defs++;
- if (df_attributes & DF_A_WIDE) {
- num_defs++;
- }
- }
-
- AllocateSSADefData(mir, num_defs);
-
- MIR::DecodedInstruction* d_insn = &mir->dalvikInsn;
-
- if (df_attributes & DF_HAS_USES) {
- num_uses = 0;
- if (df_attributes & DF_UA) {
- HandleSSAUse(mir->ssa_rep->uses, d_insn->vA, num_uses++);
- if (df_attributes & DF_A_WIDE) {
- HandleSSAUse(mir->ssa_rep->uses, d_insn->vA+1, num_uses++);
- }
- }
- if (df_attributes & DF_UB) {
- HandleSSAUse(mir->ssa_rep->uses, d_insn->vB, num_uses++);
- if (df_attributes & DF_B_WIDE) {
- HandleSSAUse(mir->ssa_rep->uses, d_insn->vB+1, num_uses++);
- }
- }
- if (df_attributes & DF_UC) {
- HandleSSAUse(mir->ssa_rep->uses, d_insn->vC, num_uses++);
- if (df_attributes & DF_C_WIDE) {
- HandleSSAUse(mir->ssa_rep->uses, d_insn->vC+1, num_uses++);
- }
- }
- }
- if (df_attributes & DF_HAS_DEFS) {
- HandleSSADef(mir->ssa_rep->defs, d_insn->vA, 0);
- if (df_attributes & DF_A_WIDE) {
- HandleSSADef(mir->ssa_rep->defs, d_insn->vA+1, 1);
- }
- }
- }
-
- /*
- * Take a snapshot of Dalvik->SSA mapping at the end of each block. The
- * input to PHI nodes can be derived from the snapshot of all
- * predecessor blocks.
- */
- bb->data_flow_info->vreg_to_ssa_map_exit =
- arena_->AllocArray<int32_t>(GetNumOfCodeAndTempVRs(), kArenaAllocDFInfo);
-
- memcpy(bb->data_flow_info->vreg_to_ssa_map_exit, vreg_to_ssa_map_,
- sizeof(int) * GetNumOfCodeAndTempVRs());
- return true;
-}
-
-void MIRGraph::InitializeBasicBlockDataFlow() {
- /*
- * Allocate the BasicBlockDataFlow structure for the entry and code blocks.
- */
- for (BasicBlock* bb : block_list_) {
- if (bb->hidden == true) continue;
- if (bb->block_type == kDalvikByteCode ||
- bb->block_type == kEntryBlock ||
- bb->block_type == kExitBlock) {
- bb->data_flow_info =
- static_cast<BasicBlockDataFlow*>(arena_->Alloc(sizeof(BasicBlockDataFlow),
- kArenaAllocDFInfo));
- }
- }
-}
-
-/* Setup the basic data structures for SSA conversion */
-void MIRGraph::CompilerInitializeSSAConversion() {
- size_t num_reg = GetNumOfCodeAndTempVRs();
-
- ssa_base_vregs_.clear();
- ssa_base_vregs_.reserve(num_reg + GetDefCount() + 128);
- ssa_subscripts_.clear();
- ssa_subscripts_.reserve(num_reg + GetDefCount() + 128);
-
- /*
- * Initial number of SSA registers is equal to the number of Dalvik
- * registers.
- */
- SetNumSSARegs(num_reg);
-
- /*
- * Initialize the SSA2Dalvik map list. For the first num_reg elements,
- * the subscript is 0 so we use the ENCODE_REG_SUB macro to encode the value
- * into "(0 << 16) | i"
- */
- for (unsigned int i = 0; i < num_reg; i++) {
- ssa_base_vregs_.push_back(i);
- ssa_subscripts_.push_back(0);
- }
-
- /*
- * Initialize the DalvikToSSAMap map. There is one entry for each
- * Dalvik register, and the SSA names for those are the same.
- */
- vreg_to_ssa_map_ = arena_->AllocArray<int32_t>(num_reg, kArenaAllocDFInfo);
- /* Keep track of the higest def for each dalvik reg */
- ssa_last_defs_ = arena_->AllocArray<int>(num_reg, kArenaAllocDFInfo);
-
- for (unsigned int i = 0; i < num_reg; i++) {
- vreg_to_ssa_map_[i] = i;
- ssa_last_defs_[i] = 0;
- }
-
- // Create a compiler temporary for Method*. This is done after SSA initialization.
- CompilerTemp* method_temp = GetNewCompilerTemp(kCompilerTempSpecialMethodPtr, false);
- // The MIR graph keeps track of the sreg for method pointer specially, so record that now.
- method_sreg_ = method_temp->s_reg_low;
-
- InitializeBasicBlockDataFlow();
-}
-
-uint32_t MIRGraph::GetUseCountWeight(BasicBlock* bb) const {
- // Each level of nesting adds *100 to count, up to 3 levels deep.
- uint32_t depth = std::min(3U, static_cast<uint32_t>(bb->nesting_depth));
- uint32_t weight = std::max(1U, depth * 100);
- return weight;
-}
-
-/*
- * Count uses, weighting by loop nesting depth. This code only
- * counts explicitly used s_regs. A later phase will add implicit
- * counts for things such as Method*, null-checked references, etc.
- */
-void MIRGraph::CountUses(BasicBlock* bb) {
- if (bb->block_type != kDalvikByteCode) {
- return;
- }
- uint32_t weight = GetUseCountWeight(bb);
- for (MIR* mir = bb->first_mir_insn; (mir != nullptr); mir = mir->next) {
- if (mir->ssa_rep == nullptr) {
- continue;
- }
- for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
- int s_reg = mir->ssa_rep->uses[i];
- raw_use_counts_[s_reg] += 1u;
- use_counts_[s_reg] += weight;
- }
- }
-}
-
-/* Verify if all the successor is connected with all the claimed predecessors */
-bool MIRGraph::VerifyPredInfo(BasicBlock* bb) {
- for (BasicBlockId pred_id : bb->predecessors) {
- BasicBlock* pred_bb = GetBasicBlock(pred_id);
- DCHECK(pred_bb != nullptr);
- bool found = false;
- if (pred_bb->taken == bb->id) {
- found = true;
- } else if (pred_bb->fall_through == bb->id) {
- found = true;
- } else if (pred_bb->successor_block_list_type != kNotUsed) {
- for (SuccessorBlockInfo* successor_block_info : pred_bb->successor_blocks) {
- BasicBlockId succ_bb = successor_block_info->block;
- if (succ_bb == bb->id) {
- found = true;
- break;
- }
- }
- }
- if (found == false) {
- char block_name1[BLOCK_NAME_LEN], block_name2[BLOCK_NAME_LEN];
- GetBlockName(bb, block_name1);
- GetBlockName(pred_bb, block_name2);
- DumpCFG("/sdcard/cfg/", false);
- LOG(FATAL) << "Successor " << block_name1 << " not found from "
- << block_name2;
- }
- }
- return true;
-}
-
-void MIRGraph::VerifyDataflow() {
- /* Verify if all blocks are connected as claimed */
- AllNodesIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
- VerifyPredInfo(bb);
- }
-}
-
-} // namespace art