Foundation for Quick LLVM compiler
Ready for review - probably better get this cleaned up and
checked in even though much work remains.
Basic conversion from MIR to GreenlandIR and from GreenlandIR
back to LIR. Support sufficient to run Fibonacci test.
Note some structural changes in MIR to support this work:
o retaining incoming label for phi nodes
o constant propagation
o include object reference detection in type inference pass
Change-Id: I8ba63c73e76d071aa40cae0f744e598b96f68699
diff --git a/build/Android.common.mk b/build/Android.common.mk
index bcac723..064aa52 100644
--- a/build/Android.common.mk
+++ b/build/Android.common.mk
@@ -28,11 +28,17 @@
ART_USE_GREENLAND_COMPILER := false
endif
+ifneq ($(wildcard art/USE_QUICK_COMPILER),)
+ART_USE_QUICK_COMPILER := true
+else
+ART_USE_QUICK_COMPILER := false
+endif
+
ifeq ($(filter-out true,$(ART_USE_LLVM_COMPILER) $(ART_USE_GREENLAND_COMPILER)),)
$(error Cannot enable art-greenland and art-llvm compiler simultaneously!)
endif
-ifeq ($(filter true,$(ART_USE_LLVM_COMPILER) $(ART_USE_GREENLAND_COMPILER)),true)
+ifeq ($(filter true,$(ART_USE_LLVM_COMPILER) $(ART_USE_GREENLAND_COMPILER) $(ART_USE_QUICK_COMPILER)),true)
ART_REQUIRE_LLVM := true
else
ART_REQUIRE_LLVM := false
diff --git a/build/Android.executable.mk b/build/Android.executable.mk
index 9305d4d..3f0280b 100644
--- a/build/Android.executable.mk
+++ b/build/Android.executable.mk
@@ -26,6 +26,10 @@
ART_EXECUTABLES_CFLAGS += -DART_USE_GREENLAND_COMPILER=1
endif
+ifeq ($(ART_USE_QUICK_COMPILER),true)
+ ART_EXECUTABLES_CFLAGS += -DART_USE_QUICK_COMPILER=1
+endif
+
# $(1): executable ("d" will be appended for debug version)
# $(2): source
# $(3): target or host
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 63c4b60..96f6dc3 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -28,6 +28,10 @@
ART_TEST_CFLAGS += -DART_USE_GREENLAND_COMPILER=1
endif
+ifeq ($(ART_USE_QUICK_COMPILER),true)
+ ART_TEST_CFLAGS += -DART_USE_QUICK_COMPILER=1
+endif
+
# $(1): target or host
# $(2): file name
define build-art-test
diff --git a/build/Android.libart-compiler.mk b/build/Android.libart-compiler.mk
index a899341..f27ddaa 100644
--- a/build/Android.libart-compiler.mk
+++ b/build/Android.libart-compiler.mk
@@ -27,6 +27,12 @@
src/oat/jni/arm/calling_convention_arm.cc \
src/oat/jni/x86/calling_convention_x86.cc
+ifeq ($(ART_USE_QUICK_COMPILER), true)
+LIBART_COMPILER_COMMON_SRC_FILES += \
+ src/greenland/ir_builder.cc \
+ src/greenland/intrinsic_helper.cc
+endif
+
LIBART_COMPILER_ARM_SRC_FILES += \
$(LIBART_COMPILER_COMMON_SRC_FILES) \
src/compiler/codegen/arm/ArchUtility.cc \
@@ -117,16 +123,39 @@
LOCAL_CFLAGS += -D__mips_hard_float
endif
+ ifeq ($(ART_USE_QUICK_COMPILER), true)
+ LOCAL_CFLAGS += -DART_USE_QUICK_COMPILER
+ endif
+
LOCAL_C_INCLUDES += $(ART_C_INCLUDES)
+
+ ifeq ($(ART_USE_QUICK_COMPILER), true)
+ LOCAL_STATIC_LIBRARIES += \
+ libLLVMBitWriter \
+ libLLVMBitReader \
+ libLLVMCore \
+ libLLVMSupport
+ endif
+
ifeq ($$(art_target_or_host),target)
LOCAL_SHARED_LIBRARIES += libstlport
else # host
LOCAL_LDLIBS := -ldl -lpthread
endif
ifeq ($$(art_target_or_host),target)
+ ifeq ($(ART_USE_QUICK_COMPILER), true)
+ LOCAL_SHARED_LIBRARIES += libcutils
+ include $(LLVM_GEN_INTRINSICS_MK)
+ include $(LLVM_DEVICE_BUILD_MK)
+ endif
include $(BUILD_SHARED_LIBRARY)
else # host
LOCAL_IS_HOST_MODULE := true
+ ifeq ($(ART_USE_QUICK_COMPILER), true)
+ LOCAL_STATIC_LIBRARIES += libcutils
+ include $(LLVM_GEN_INTRINSICS_MK)
+ include $(LLVM_HOST_BUILD_MK)
+ endif
include $(BUILD_HOST_SHARED_LIBRARY)
endif
diff --git a/build/Android.libart.mk b/build/Android.libart.mk
index b5cc6a0..01fa42b 100644
--- a/build/Android.libart.mk
+++ b/build/Android.libart.mk
@@ -23,6 +23,10 @@
LIBART_CFLAGS += -DART_USE_GREENLAND_COMPILER=1
endif
+ifeq ($(ART_USE_QUICK_COMPILER),true)
+ LIBART_CFLAGS += -DART_USE_QUICK_COMPILER=1
+endif
+
# $(1): target or host
# $(2): ndebug or debug
define build-libart
diff --git a/src/compiler.cc b/src/compiler.cc
index 477b6cc..2e17c9b 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -1668,7 +1668,7 @@
compiled_method->SetGcMap(*gc_map);
}
-#if defined(ART_USE_LLVM_COMPILER)
+#if defined(ART_USE_LLVM_COMPILER) || defined(ART_USE_QUICK_COMPILER)
void Compiler::SetBitcodeFileName(std::string const& filename) {
typedef void (*SetBitcodeFileNameFn)(Compiler&, std::string const&);
@@ -1679,7 +1679,9 @@
set_bitcode_file_name(*this, filename);
}
+#endif
+#if defined(ART_USE_LLVM_COMPILER)
void Compiler::EnableAutoElfLoading() {
compiler_enable_auto_elf_loading_(*this);
}
diff --git a/src/compiler.h b/src/compiler.h
index 5ae46d5..74d3205 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -142,9 +142,11 @@
bool target_is_direct,
size_t literal_offset);
-#if defined(ART_USE_LLVM_COMPILER)
+#if defined(ART_USE_LLVM_COMPILER) || defined(ART_USE_QUICK_COMPILER)
void SetBitcodeFileName(std::string const& filename);
+#endif
+#if defined(ART_USE_LLVM_COMPILER)
void EnableAutoElfLoading();
const void* GetMethodCodeAddr(const CompiledMethod* cm,
diff --git a/src/compiler/CompilerIR.h b/src/compiler/CompilerIR.h
index 972bfac..2e43b01 100644
--- a/src/compiler/CompilerIR.h
+++ b/src/compiler/CompilerIR.h
@@ -23,6 +23,10 @@
#include "CompilerUtility.h"
#include "oat_compilation_unit.h"
#include "safe_map.h"
+#if defined(ART_USE_QUICK_COMPILER)
+#include "greenland/ir_builder.h"
+#include "llvm/Module.h"
+#endif
namespace art {
@@ -62,6 +66,7 @@
RegLocationType location:3;
unsigned wide:1;
unsigned defined:1; // Do we know the type?
+ unsigned isConst:1; // Constant, value in cUnit->constantValues[]
unsigned fp:1; // Floating point?
unsigned core:1; // Non-floating point?
unsigned ref:1; // Something GC cares about
@@ -70,6 +75,8 @@
u1 lowReg; // First physical register
u1 highReg; // 2nd physical register (if wide)
int32_t sRegLow; // SSA name for low Dalvik word
+ int32_t origSReg; // TODO: remove after Bitcode gen complete
+ // and consolodate usage w/ sRegLow
};
struct CompilerTemp {
@@ -114,6 +121,8 @@
#define SSA_METHOD_BASEREG (-2)
/* First compiler temp basereg, grows smaller */
#define SSA_CTEMP_BASEREG (SSA_METHOD_BASEREG - 1)
+/* Max SSA name length */
+#define SSA_NAME_MAX 16
/*
* Some code patterns cause the generation of excessively large
@@ -246,6 +255,9 @@
bool hidden;
bool catchEntry;
bool fallThroughTarget; // Reached via fallthrough
+#if defined(ART_USE_QUICK_COMPILER)
+ bool hasReturn;
+#endif
uint16_t startOffset;
uint16_t nestingDepth;
const Method* containingMethod; // For blocks from the callee
@@ -334,6 +346,7 @@
numSSARegs(0),
ssaBaseVRegs(NULL),
ssaSubscripts(NULL),
+ ssaStrings(NULL),
vRegToSSAMap(NULL),
SSALastDefs(NULL),
isConstantV(NULL),
@@ -357,6 +370,7 @@
tempBlockV(NULL),
tempDalvikRegisterV(NULL),
tempSSARegisterV(NULL),
+ tempSSABlockIdV(NULL),
printSSANames(false),
blockLabelList(NULL),
quitLoopMode(false),
@@ -381,11 +395,21 @@
currentArena(NULL),
numArenaBlocks(0),
mstats(NULL),
- opcodeCount(NULL) {
-#if !defined(NDEBUG)
- liveSReg = 0;
+#if defined(ART_USE_QUICK_COMPILER)
+ genBitcode(false),
+ context(NULL),
+ module(NULL),
+ func(NULL),
+ intrinsic_helper(NULL),
+ irb(NULL),
+ placeholderBB(NULL),
+ entryBB(NULL),
+ tempName(0),
#endif
- }
+#ifndef NDEBUG
+ liveSReg(0),
+#endif
+ opcodeCount(NULL) {}
int numBlocks;
GrowableList blockList;
@@ -434,6 +458,7 @@
/* Map SSA reg i to the base virtual register/subscript */
GrowableList* ssaBaseVRegs;
GrowableList* ssaSubscripts;
+ GrowableList* ssaStrings;
/* The following are new data structures to support SSA representations */
/* Map original Dalvik virtual reg i to the current SSA name */
@@ -486,6 +511,7 @@
ArenaBitVector* tempBlockV;
ArenaBitVector* tempDalvikRegisterV;
ArenaBitVector* tempSSARegisterV; // numSSARegs
+ int* tempSSABlockIdV; // working storage for Phi labels
bool printSSANames;
void* blockLabelList;
bool quitLoopMode; // cold path/complex bytecode
@@ -534,7 +560,22 @@
ArenaMemBlock* currentArena;
int numArenaBlocks;
Memstats* mstats;
- int* opcodeCount; // Count Dalvik opcodes for tuning
+#if defined(ART_USE_QUICK_COMPILER)
+ bool genBitcode;
+ llvm::LLVMContext* context;
+ llvm::Module* module;
+ llvm::Function* func;
+ greenland::IntrinsicHelper* intrinsic_helper;
+ greenland::IRBuilder* irb;
+ llvm::BasicBlock* placeholderBB;
+ llvm::BasicBlock* entryBB;
+ std::string bitcode_filename;
+ GrowableList llvmValues;
+ int32_t tempName;
+ SafeMap<llvm::BasicBlock*, LIR*> blockToLabelMap; // llvm bb -> LIR label
+ SafeMap<int32_t, llvm::BasicBlock*> idToBlockMap; // block id -> llvm bb
+ SafeMap<llvm::Value*, RegLocation> locMap; // llvm Value to loc rec
+#endif
#ifndef NDEBUG
/*
* Sanity checking for the register temp tracking. The same ssa
@@ -543,6 +584,7 @@
*/
int liveSReg;
#endif
+ int* opcodeCount; // Count Dalvik opcodes for tuning
};
enum OpSize {
diff --git a/src/compiler/Dataflow.cc b/src/compiler/Dataflow.cc
index 3d2ac5d..3483d5b 100644
--- a/src/compiler/Dataflow.cc
+++ b/src/compiler/Dataflow.cc
@@ -1184,6 +1184,8 @@
int ssaReg = cUnit->numSSARegs++;
oatInsertGrowableList(cUnit, cUnit->ssaBaseVRegs, vReg);
oatInsertGrowableList(cUnit, cUnit->ssaSubscripts, subscript);
+ char* name = (char*)oatNew(cUnit, SSA_NAME_MAX, true, kAllocDFInfo);
+ oatInsertGrowableList(cUnit, cUnit->ssaStrings, (intptr_t)getSSAName(cUnit, ssaReg, name));
DCHECK_EQ(cUnit->ssaBaseVRegs->numUsed, cUnit->ssaSubscripts->numUsed);
return ssaReg;
}
@@ -1431,14 +1433,14 @@
break;
default:
break;
- }
}
+ }
/* Handle instructions that set up constants directly */
- } else if (dfAttributes & DF_IS_MOVE) {
- int i;
+ } else if (dfAttributes & DF_IS_MOVE) {
+ int i;
- for (i = 0; i < mir->ssaRep->numUses; i++) {
- if (!oatIsBitSet(isConstantV, mir->ssaRep->uses[i])) break;
+ for (i = 0; i < mir->ssaRep->numUses; i++) {
+ if (!oatIsBitSet(isConstantV, mir->ssaRep->uses[i])) break;
}
/* Move a register holding a constant to another register */
if (i == mir->ssaRep->numUses) {
@@ -1465,6 +1467,8 @@
false, kAllocDFInfo);
cUnit->ssaSubscripts = (GrowableList *)oatNew(cUnit, sizeof(GrowableList),
false, kAllocDFInfo);
+ cUnit->ssaStrings = (GrowableList *)oatNew(cUnit, sizeof(GrowableList),
+ false, kAllocDFInfo);
// Create the ssa mappings, estimating the max size
oatInitGrowableList(cUnit, cUnit->ssaBaseVRegs,
numDalvikReg + cUnit->defCount + 128,
@@ -1472,6 +1476,9 @@
oatInitGrowableList(cUnit, cUnit->ssaSubscripts,
numDalvikReg + cUnit->defCount + 128,
kListSSAtoDalvikMap);
+ oatInitGrowableList(cUnit, cUnit->ssaStrings,
+ numDalvikReg + cUnit->defCount + 128,
+ kListSSAtoDalvikMap);
/*
* Initial number of SSA registers is equal to the number of Dalvik
* registers.
@@ -1486,6 +1493,8 @@
for (i = 0; i < numDalvikReg; i++) {
oatInsertGrowableList(cUnit, cUnit->ssaBaseVRegs, i);
oatInsertGrowableList(cUnit, cUnit->ssaSubscripts, 0);
+ char* name = (char*)oatNew(cUnit, SSA_NAME_MAX, true, kAllocDFInfo);
+ oatInsertGrowableList(cUnit, cUnit->ssaStrings, (intptr_t)getSSAName(cUnit, i, name));
}
/*
diff --git a/src/compiler/Frontend.cc b/src/compiler/Frontend.cc
index a764583..70de4fc 100644
--- a/src/compiler/Frontend.cc
+++ b/src/compiler/Frontend.cc
@@ -712,7 +712,7 @@
* THROW_VERIFICATION_ERROR is also an unconditional
* branch, but we shouldn't treat it as such until we have
* a dead code elimination pass (which won't be important
- * until inlining w/ constant propogation is implemented.
+ * until inlining w/ constant propagation is implemented.
*/
if (insn->dalvikInsn.opcode != Instruction::THROW) {
curBlock->fallThrough = fallthroughBlock;
@@ -764,6 +764,9 @@
cUnit->numIns = code_item->ins_size_;
cUnit->numRegs = code_item->registers_size_ - cUnit->numIns;
cUnit->numOuts = code_item->outs_size_;
+#if defined(ART_USE_QUICK_COMPILER)
+ cUnit->genBitcode = PrettyMethod(method_idx, dex_file).find("Fibonacci.fibonacci") != std::string::npos;
+#endif
/* Adjust this value accordingly once inlining is performed */
cUnit->numDalvikRegisters = code_item->registers_size_;
// TODO: set this from command line
@@ -778,6 +781,9 @@
cUnit->printMe = VLOG_IS_ON(compiler) ||
(cUnit->enableDebug & (1 << kDebugVerbose));
}
+#if defined(ART_USE_QUICK_COMPILER)
+ if (cUnit->genBitcode) cUnit->printMe = true;
+#endif
if (cUnit->instructionSet == kX86) {
// Disable optimizations on X86 for now
cUnit->disableOpt = -1;
@@ -985,6 +991,13 @@
}
}
+#if defined(ART_USE_QUICK_COMPILER)
+ if (cUnit->genBitcode) {
+ // Bitcode generation requires full dataflow analysis, no qdMode
+ cUnit->qdMode = false;
+ }
+#endif
+
if (cUnit->qdMode) {
cUnit->disableDataflow = true;
// Disable optimization which require dataflow/ssa
@@ -1012,6 +1025,18 @@
/* Perform SSA transformation for the whole method */
oatMethodSSATransformation(cUnit.get());
+ /* Do constant propagation */
+ // TODO: Probably need to make these expandable to support new ssa names
+ // introducted during MIR optimization passes
+ cUnit->isConstantV = oatAllocBitVector(cUnit.get(), cUnit->numSSARegs,
+ false /* not expandable */);
+ cUnit->constantValues =
+ (int*)oatNew(cUnit.get(), sizeof(int) * cUnit->numSSARegs, true,
+ kAllocDFInfo);
+ oatDataFlowAnalysisDispatcher(cUnit.get(), oatDoConstantPropagation,
+ kAllNodes,
+ false /* isIterative */);
+
/* Detect loops */
oatMethodLoopDetection(cUnit.get());
@@ -1029,19 +1054,31 @@
/* Allocate Registers using simple local allocation scheme */
oatSimpleRegAlloc(cUnit.get());
- if (specialCase != kNoHandler) {
- /*
- * Custom codegen for special cases. If for any reason the
- * special codegen doesn't success, cUnit->firstLIRInsn will
- * set to NULL;
- */
- oatSpecialMIR2LIR(cUnit.get(), specialCase);
- }
+#if defined(ART_USE_QUICK_COMPILER)
+ /* Go the LLVM path? */
+ if (cUnit->genBitcode) {
+ // MIR->Bitcode
+ oatMethodMIR2Bitcode(cUnit.get());
+ // Bitcode->LIR
+ oatMethodBitcode2LIR(cUnit.get());
+ } else {
+#endif
+ if (specialCase != kNoHandler) {
+ /*
+ * Custom codegen for special cases. If for any reason the
+ * special codegen doesn't succeed, cUnit->firstLIRInsn will
+ * set to NULL;
+ */
+ oatSpecialMIR2LIR(cUnit.get(), specialCase);
+ }
- /* Convert MIR to LIR, etc. */
- if (cUnit->firstLIRInsn == NULL) {
- oatMethodMIR2LIR(cUnit.get());
+ /* Convert MIR to LIR, etc. */
+ if (cUnit->firstLIRInsn == NULL) {
+ oatMethodMIR2LIR(cUnit.get());
+ }
+#if defined(ART_USE_QUICK_COMPILER)
}
+#endif
// Debugging only
if (cUnit->enableDebug & (1 << kDebugDumpCFG)) {
diff --git a/src/compiler/Ralloc.cc b/src/compiler/Ralloc.cc
index 480a96d..bc0c6de 100644
--- a/src/compiler/Ralloc.cc
+++ b/src/compiler/Ralloc.cc
@@ -331,8 +331,9 @@
}
}
-static const RegLocation freshLoc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0,
- INVALID_REG, INVALID_REG, INVALID_SREG};
+static const RegLocation freshLoc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0,
+ INVALID_REG, INVALID_REG, INVALID_SREG,
+ INVALID_SREG};
/*
* Simple register allocation. Some Dalvik virtual registers may
@@ -351,6 +352,7 @@
for (i=0; i< cUnit->numSSARegs; i++) {
loc[i] = freshLoc;
loc[i].sRegLow = i;
+ loc[i].isConst = oatIsBitSet(cUnit->isConstantV, i);
}
/* Patch up the locations for Method* and the compiler temps */
@@ -415,10 +417,19 @@
}
}
+#if defined(ART_USE_QUICK_COMPILER)
+ if (!cUnit->genBitcode) {
+ /* Remap names */
+ oatDataFlowAnalysisDispatcher(cUnit, remapNames,
+ kPreOrderDFSTraversal,
+ false /* isIterative */);
+ }
+#else
/* Remap names */
oatDataFlowAnalysisDispatcher(cUnit, remapNames,
kPreOrderDFSTraversal,
false /* isIterative */);
+#endif
/* Do type & size inference pass */
oatDataFlowAnalysisDispatcher(cUnit, inferTypeAndSize,
@@ -432,7 +443,9 @@
*/
for (i=0; i < cUnit->numSSARegs; i++) {
if (cUnit->regLocation[i].location != kLocCompilerTemp) {
- cUnit->regLocation[i].sRegLow = SRegToVReg(cUnit, loc[i].sRegLow);
+ int origSReg = cUnit->regLocation[i].sRegLow;
+ cUnit->regLocation[i].origSReg = origSReg;
+ cUnit->regLocation[i].sRegLow = SRegToVReg(cUnit, origSReg);
}
}
diff --git a/src/compiler/SSATransformation.cc b/src/compiler/SSATransformation.cc
index ada9351..b32ad5b 100644
--- a/src/compiler/SSATransformation.cc
+++ b/src/compiler/SSATransformation.cc
@@ -692,15 +692,20 @@
if (!predBB) break;
int ssaReg = predBB->dataFlowInfo->vRegToSSAMap[vReg];
oatSetBit(cUnit, ssaRegV, ssaReg);
+ cUnit->tempSSABlockIdV[ssaReg] = predBB->id;
}
/* Count the number of SSA registers for a Dalvik register */
int numUses = oatCountSetBits(ssaRegV);
mir->ssaRep->numUses = numUses;
mir->ssaRep->uses =
- (int *) oatNew(cUnit, sizeof(int) * numUses, false, kAllocDFInfo);
+ (int*) oatNew(cUnit, sizeof(int) * numUses, false, kAllocDFInfo);
mir->ssaRep->fpUse =
- (bool *) oatNew(cUnit, sizeof(bool) * numUses, true, kAllocDFInfo);
+ (bool*) oatNew(cUnit, sizeof(bool) * numUses, true, kAllocDFInfo);
+ int* incoming =
+ (int*) oatNew(cUnit, sizeof(int) * numUses, false, kAllocDFInfo);
+ // TODO: Ugly, rework (but don't burden each MIR/LIR for Phi-only needs)
+ mir->dalvikInsn.vB = (intptr_t) incoming;
ArenaBitVectorIterator phiIterator;
@@ -712,6 +717,7 @@
int ssaRegIdx = oatBitVectorIteratorNext(&phiIterator);
if (ssaRegIdx == -1) break;
*usePtr++ = ssaRegIdx;
+ *incoming++ = cUnit->tempSSABlockIdV[ssaRegIdx];
}
}
@@ -796,6 +802,10 @@
cUnit->tempSSARegisterV = oatAllocBitVector(cUnit, cUnit->numSSARegs,
false, kBitMapTempSSARegisterV);
+ cUnit->tempSSABlockIdV =
+ (int*)oatNew(cUnit, sizeof(int) * cUnit->numSSARegs, false,
+ kAllocDFInfo);
+
/* Insert phi-operands with latest SSA names from predecessor blocks */
oatDataFlowAnalysisDispatcher(cUnit, insertPhiNodeOperands,
kReachableNodes, false /* isIterative */);
diff --git a/src/compiler/codegen/CompilerCodegen.h b/src/compiler/codegen/CompilerCodegen.h
index 9381735..4508416 100644
--- a/src/compiler/codegen/CompilerCodegen.h
+++ b/src/compiler/codegen/CompilerCodegen.h
@@ -35,6 +35,10 @@
/* Lower middle-level IR to low-level IR for the whole method */
void oatMethodMIR2LIR(CompilationUnit* cUnit);
+/* Bitcode conversions */
+void oatMethodMIR2Bitcode(CompilationUnit* cUnit);
+void oatMethodBitcode2LIR(CompilationUnit* cUnit);
+
/* Lower middle-level IR to low-level IR for the simple methods */
void oatSpecialMIR2LIR(CompilationUnit* cUnit, SpecialCaseHandler specialCase );
diff --git a/src/compiler/codegen/MethodBitcode.cc b/src/compiler/codegen/MethodBitcode.cc
new file mode 100644
index 0000000..27e7d26
--- /dev/null
+++ b/src/compiler/codegen/MethodBitcode.cc
@@ -0,0 +1,1674 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if defined(ART_USE_QUICK_COMPILER)
+
+#include "object_utils.h"
+
+#include <llvm/Support/ToolOutputFile.h>
+#include <llvm/Bitcode/ReaderWriter.h>
+#include <llvm/Analysis/Verifier.h>
+#include <llvm/Metadata.h>
+#include <llvm/ADT/DepthFirstIterator.h>
+#include <llvm/Instruction.h>
+#include <llvm/Type.h>
+#include <llvm/Instructions.h>
+#include <llvm/Support/Casting.h>
+
+const char* labelFormat = "L0x%x_d";
+
+namespace art {
+extern const RegLocation badLoc;
+
+llvm::BasicBlock* getLLVMBlock(CompilationUnit* cUnit, int id)
+{
+ return cUnit->idToBlockMap.Get(id);
+}
+
+llvm::Value* getLLVMValue(CompilationUnit* cUnit, int sReg)
+{
+ return (llvm::Value*)oatGrowableListGetElement(&cUnit->llvmValues, sReg);
+}
+
+// Replace the placeholder value with the real definition
+void defineValue(CompilationUnit* cUnit, llvm::Value* val, int sReg)
+{
+ llvm::Value* placeholder = getLLVMValue(cUnit, sReg);
+ CHECK(placeholder != NULL) << "Null placeholder - shouldn't happen";
+ placeholder->replaceAllUsesWith(val);
+ val->takeName(placeholder);
+ cUnit->llvmValues.elemList[sReg] = (intptr_t)val;
+}
+
+llvm::Type* llvmTypeFromLocRec(CompilationUnit* cUnit, RegLocation loc)
+{
+ llvm::Type* res = NULL;
+ if (loc.wide) {
+ if (loc.fp)
+ res = cUnit->irb->GetJDoubleTy();
+ else
+ res = cUnit->irb->GetJLongTy();
+ } else {
+ if (loc.fp) {
+ res = cUnit->irb->GetJFloatTy();
+ } else {
+ if (loc.ref)
+ res = cUnit->irb->GetJObjectTy();
+ else
+ res = cUnit->irb->GetJIntTy();
+ }
+ }
+ return res;
+}
+
+void initIR(CompilationUnit* cUnit)
+{
+ cUnit->context = new llvm::LLVMContext();
+ cUnit->module = new llvm::Module("art", *cUnit->context);
+ llvm::StructType::create(*cUnit->context, "JavaObject");
+ llvm::StructType::create(*cUnit->context, "Method");
+ llvm::StructType::create(*cUnit->context, "Thread");
+ cUnit->intrinsic_helper =
+ new greenland::IntrinsicHelper(*cUnit->context, *cUnit->module);
+ cUnit->irb =
+ new greenland::IRBuilder(*cUnit->context, *cUnit->module,
+ *cUnit->intrinsic_helper);
+}
+
+void freeIR(CompilationUnit* cUnit)
+{
+ delete cUnit->irb;
+ delete cUnit->intrinsic_helper;
+ delete cUnit->module;
+ delete cUnit->context;
+}
+
+const char* llvmSSAName(CompilationUnit* cUnit, int ssaReg) {
+ return GET_ELEM_N(cUnit->ssaStrings, char*, ssaReg);
+}
+
+llvm::Value* emitConst(CompilationUnit* cUnit, llvm::ArrayRef<llvm::Value*> src,
+ RegLocation loc)
+{
+ greenland::IntrinsicHelper::IntrinsicId id;
+ if (loc.wide) {
+ if (loc.fp) {
+ id = greenland::IntrinsicHelper::ConstDouble;
+ } else {
+ id = greenland::IntrinsicHelper::ConstLong;
+ }
+ } else {
+ if (loc.fp) {
+ id = greenland::IntrinsicHelper::ConstFloat;
+ } if (loc.ref) {
+ id = greenland::IntrinsicHelper::ConstObj;
+ } else {
+ id = greenland::IntrinsicHelper::ConstInt;
+ }
+ }
+ llvm::Function* intr = cUnit->intrinsic_helper->GetIntrinsicFunction(id);
+ return cUnit->irb->CreateCall(intr, src);
+}
+llvm::Value* emitCopy(CompilationUnit* cUnit, llvm::ArrayRef<llvm::Value*> src,
+ RegLocation loc)
+{
+ greenland::IntrinsicHelper::IntrinsicId id;
+ if (loc.wide) {
+ if (loc.fp) {
+ id = greenland::IntrinsicHelper::CopyDouble;
+ } else {
+ id = greenland::IntrinsicHelper::CopyLong;
+ }
+ } else {
+ if (loc.fp) {
+ id = greenland::IntrinsicHelper::CopyFloat;
+ } if (loc.ref) {
+ id = greenland::IntrinsicHelper::CopyObj;
+ } else {
+ id = greenland::IntrinsicHelper::CopyInt;
+ }
+ }
+ llvm::Function* intr = cUnit->intrinsic_helper->GetIntrinsicFunction(id);
+ return cUnit->irb->CreateCall(intr, src);
+}
+
+void emitSuspendCheck(CompilationUnit* cUnit)
+{
+ greenland::IntrinsicHelper::IntrinsicId id =
+ greenland::IntrinsicHelper::CheckSuspend;
+ llvm::Function* intr = cUnit->intrinsic_helper->GetIntrinsicFunction(id);
+ cUnit->irb->CreateCall(intr);
+}
+
+llvm::Value* convertCompare(CompilationUnit* cUnit, ConditionCode cc,
+ llvm::Value* src1, llvm::Value* src2)
+{
+ llvm::Value* res = NULL;
+ switch(cc) {
+ case kCondEq: res = cUnit->irb->CreateICmpEQ(src1, src2); break;
+ case kCondNe: res = cUnit->irb->CreateICmpNE(src1, src2); break;
+ case kCondLt: res = cUnit->irb->CreateICmpSLT(src1, src2); break;
+ case kCondGe: res = cUnit->irb->CreateICmpSGE(src1, src2); break;
+ case kCondGt: res = cUnit->irb->CreateICmpSGT(src1, src2); break;
+ case kCondLe: res = cUnit->irb->CreateICmpSLE(src1, src2); break;
+ default: LOG(FATAL) << "Unexpected cc value " << cc;
+ }
+ return res;
+}
+
+void convertCompareAndBranch(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir,
+ ConditionCode cc, RegLocation rlSrc1,
+ RegLocation rlSrc2)
+{
+ if (bb->taken->startOffset <= mir->offset) {
+ emitSuspendCheck(cUnit);
+ }
+ llvm::Value* src1 = getLLVMValue(cUnit, rlSrc1.origSReg);
+ llvm::Value* src2 = getLLVMValue(cUnit, rlSrc2.origSReg);
+ llvm::Value* condValue = convertCompare(cUnit, cc, src1, src2);
+ condValue->setName(StringPrintf("t%d", cUnit->tempName++));
+ cUnit->irb->CreateCondBr(condValue, getLLVMBlock(cUnit, bb->taken->id),
+ getLLVMBlock(cUnit, bb->fallThrough->id));
+}
+
+void convertCompareZeroAndBranch(CompilationUnit* cUnit, BasicBlock* bb,
+ MIR* mir, ConditionCode cc, RegLocation rlSrc1)
+{
+ if (bb->taken->startOffset <= mir->offset) {
+ emitSuspendCheck(cUnit);
+ }
+ llvm::Value* src1 = getLLVMValue(cUnit, rlSrc1.origSReg);
+ llvm::Value* src2;
+ if (rlSrc1.ref) {
+ src2 = cUnit->irb->GetJNull();
+ } else {
+ src2 = cUnit->irb->getInt32(0);
+ }
+ llvm::Value* condValue = convertCompare(cUnit, cc, src1, src2);
+ condValue->setName(StringPrintf("t%d", cUnit->tempName++));
+ cUnit->irb->CreateCondBr(condValue, getLLVMBlock(cUnit, bb->taken->id),
+ getLLVMBlock(cUnit, bb->fallThrough->id));
+}
+
+llvm::Value* genDivModOp(CompilationUnit* cUnit, bool isDiv, bool isLong,
+ llvm::Value* src1, llvm::Value* src2)
+{
+ greenland::IntrinsicHelper::IntrinsicId id;
+ if (isLong) {
+ if (isDiv) {
+ id = greenland::IntrinsicHelper::DivLong;
+ } else {
+ id = greenland::IntrinsicHelper::RemLong;
+ }
+ } else if (isDiv) {
+ id = greenland::IntrinsicHelper::DivInt;
+ } else {
+ id = greenland::IntrinsicHelper::RemInt;
+ }
+ llvm::Function* intr = cUnit->intrinsic_helper->GetIntrinsicFunction(id);
+ llvm::SmallVector<llvm::Value*, 2>args;
+ args.push_back(src1);
+ args.push_back(src2);
+ return cUnit->irb->CreateCall(intr, args);
+}
+
+llvm::Value* genArithOp(CompilationUnit* cUnit, OpKind op, bool isLong,
+ llvm::Value* src1, llvm::Value* src2)
+{
+ llvm::Value* res = NULL;
+ switch(op) {
+ case kOpAdd: res = cUnit->irb->CreateAdd(src1, src2); break;
+ case kOpSub: res = cUnit->irb->CreateSub(src1, src2); break;
+ case kOpMul: res = cUnit->irb->CreateMul(src1, src2); break;
+ case kOpOr: res = cUnit->irb->CreateOr(src1, src2); break;
+ case kOpAnd: res = cUnit->irb->CreateAnd(src1, src2); break;
+ case kOpXor: res = cUnit->irb->CreateXor(src1, src2); break;
+ case kOpDiv: res = genDivModOp(cUnit, true, isLong, src1, src2); break;
+ case kOpRem: res = genDivModOp(cUnit, false, isLong, src1, src2); break;
+ case kOpLsl: UNIMPLEMENTED(FATAL) << "Need Lsl"; break;
+ case kOpLsr: UNIMPLEMENTED(FATAL) << "Need Lsr"; break;
+ case kOpAsr: UNIMPLEMENTED(FATAL) << "Need Asr"; break;
+ default:
+ LOG(FATAL) << "Invalid op " << op;
+ }
+ return res;
+}
+
+void convertFPArithOp(CompilationUnit* cUnit, OpKind op, RegLocation rlDest,
+ RegLocation rlSrc1, RegLocation rlSrc2)
+{
+ llvm::Value* src1 = getLLVMValue(cUnit, rlSrc1.origSReg);
+ llvm::Value* src2 = getLLVMValue(cUnit, rlSrc2.origSReg);
+ llvm::Value* res = NULL;
+ switch(op) {
+ case kOpAdd: res = cUnit->irb->CreateFAdd(src1, src2); break;
+ case kOpSub: res = cUnit->irb->CreateFSub(src1, src2); break;
+ case kOpMul: res = cUnit->irb->CreateFMul(src1, src2); break;
+ case kOpDiv: res = cUnit->irb->CreateFDiv(src1, src2); break;
+ case kOpRem: res = cUnit->irb->CreateFRem(src1, src2); break;
+ default:
+ LOG(FATAL) << "Invalid op " << op;
+ }
+ defineValue(cUnit, res, rlDest.origSReg);
+}
+
+void convertArithOp(CompilationUnit* cUnit, OpKind op, RegLocation rlDest,
+ RegLocation rlSrc1, RegLocation rlSrc2)
+{
+ llvm::Value* src1 = getLLVMValue(cUnit, rlSrc1.origSReg);
+ llvm::Value* src2 = getLLVMValue(cUnit, rlSrc2.origSReg);
+ llvm::Value* res = genArithOp(cUnit, op, rlDest.wide, src1, src2);
+ defineValue(cUnit, res, rlDest.origSReg);
+}
+
+void convertArithOpLit(CompilationUnit* cUnit, OpKind op, RegLocation rlDest,
+ RegLocation rlSrc1, int32_t imm)
+{
+ llvm::Value* src1 = getLLVMValue(cUnit, rlSrc1.origSReg);
+ llvm::Value* src2 = cUnit->irb->getInt32(imm);
+ llvm::Value* res = genArithOp(cUnit, op, rlDest.wide, src1, src2);
+ defineValue(cUnit, res, rlDest.origSReg);
+}
+
+/*
+ * Target-independent code generation. Use only high-level
+ * load/store utilities here, or target-dependent genXX() handlers
+ * when necessary.
+ */
+bool convertMIRNode(CompilationUnit* cUnit, MIR* mir, BasicBlock* bb,
+ llvm::BasicBlock* llvmBB, LIR* labelList)
+{
+ bool res = false; // Assume success
+ RegLocation rlSrc[3];
+ RegLocation rlDest = badLoc;
+ RegLocation rlResult = badLoc;
+ Instruction::Code opcode = mir->dalvikInsn.opcode;
+
+ /* Prep Src and Dest locations */
+ int nextSreg = 0;
+ int nextLoc = 0;
+ int attrs = oatDataFlowAttributes[opcode];
+ rlSrc[0] = rlSrc[1] = rlSrc[2] = badLoc;
+ if (attrs & DF_UA) {
+ if (attrs & DF_A_WIDE) {
+ rlSrc[nextLoc++] = oatGetSrcWide(cUnit, mir, nextSreg, nextSreg + 1);
+ nextSreg+= 2;
+ } else {
+ rlSrc[nextLoc++] = oatGetSrc(cUnit, mir, nextSreg);
+ nextSreg++;
+ }
+ }
+ if (attrs & DF_UB) {
+ if (attrs & DF_B_WIDE) {
+ rlSrc[nextLoc++] = oatGetSrcWide(cUnit, mir, nextSreg, nextSreg + 1);
+ nextSreg+= 2;
+ } else {
+ rlSrc[nextLoc++] = oatGetSrc(cUnit, mir, nextSreg);
+ nextSreg++;
+ }
+ }
+ if (attrs & DF_UC) {
+ if (attrs & DF_C_WIDE) {
+ rlSrc[nextLoc++] = oatGetSrcWide(cUnit, mir, nextSreg, nextSreg + 1);
+ } else {
+ rlSrc[nextLoc++] = oatGetSrc(cUnit, mir, nextSreg);
+ }
+ }
+ if (attrs & DF_DA) {
+ if (attrs & DF_A_WIDE) {
+ rlDest = oatGetDestWide(cUnit, mir, 0, 1);
+ } else {
+ rlDest = oatGetDest(cUnit, mir, 0);
+ }
+ }
+
+ switch (opcode) {
+ case Instruction::NOP:
+ break;
+
+ case Instruction::MOVE:
+ case Instruction::MOVE_OBJECT:
+ case Instruction::MOVE_16:
+ case Instruction::MOVE_OBJECT_16:
+ case Instruction::MOVE_FROM16:
+ case Instruction::MOVE_WIDE:
+ case Instruction::MOVE_WIDE_16:
+ case Instruction::MOVE_WIDE_FROM16: {
+ /*
+ * Moves/copies are meaningless in pure SSA register form,
+ * but we need to preserve them for the conversion back into
+ * MIR (at least until we stop using the Dalvik register maps).
+ * Insert a dummy intrinsic copy call, which will be recognized
+ * by the quick path and removed by the portable path.
+ */
+ llvm::Value* src = getLLVMValue(cUnit, rlSrc[0].origSReg);
+ llvm::Value* res = emitCopy(cUnit, src, rlDest);
+ defineValue(cUnit, res, rlDest.origSReg);
+ }
+ break;
+
+ case Instruction::CONST:
+ case Instruction::CONST_4:
+ case Instruction::CONST_16: {
+ llvm::Constant* immValue = cUnit->irb->GetJInt(mir->dalvikInsn.vB);
+ llvm::Value* res = emitConst(cUnit, immValue, rlDest);
+ defineValue(cUnit, res, rlDest.origSReg);
+ }
+ break;
+
+ case Instruction::CONST_WIDE_16:
+ case Instruction::CONST_WIDE_32: {
+ llvm::Constant* immValue = cUnit->irb->GetJLong(mir->dalvikInsn.vB);
+ llvm::Value* res = emitConst(cUnit, immValue, rlDest);
+ defineValue(cUnit, res, rlDest.origSReg);
+ }
+ break;
+
+ case Instruction::CONST_HIGH16: {
+ llvm::Constant* immValue = cUnit->irb->GetJInt(mir->dalvikInsn.vB << 16);
+ llvm::Value* res = emitConst(cUnit, immValue, rlDest);
+ defineValue(cUnit, res, rlDest.origSReg);
+ }
+ break;
+
+ case Instruction::CONST_WIDE: {
+ llvm::Constant* immValue =
+ cUnit->irb->GetJLong(mir->dalvikInsn.vB_wide);
+ llvm::Value* res = emitConst(cUnit, immValue, rlDest);
+ defineValue(cUnit, res, rlDest.origSReg);
+ }
+ case Instruction::CONST_WIDE_HIGH16: {
+ int64_t imm = static_cast<int64_t>(mir->dalvikInsn.vB) << 48;
+ llvm::Constant* immValue = cUnit->irb->GetJLong(imm);
+ llvm::Value* res = emitConst(cUnit, immValue, rlDest);
+ defineValue(cUnit, res, rlDest.origSReg);
+ }
+
+ case Instruction::RETURN_WIDE:
+ case Instruction::RETURN:
+ case Instruction::RETURN_OBJECT: {
+ if (!cUnit->attrs & METHOD_IS_LEAF) {
+ emitSuspendCheck(cUnit);
+ }
+ cUnit->irb->CreateRet(getLLVMValue(cUnit, rlSrc[0].origSReg));
+ bb->hasReturn = true;
+ }
+ break;
+
+ case Instruction::RETURN_VOID: {
+ if (!cUnit->attrs & METHOD_IS_LEAF) {
+ emitSuspendCheck(cUnit);
+ }
+ cUnit->irb->CreateRetVoid();
+ bb->hasReturn = true;
+ }
+ break;
+
+ case Instruction::IF_EQ:
+ convertCompareAndBranch(cUnit, bb, mir, kCondEq, rlSrc[0], rlSrc[1]);
+ break;
+ case Instruction::IF_NE:
+ convertCompareAndBranch(cUnit, bb, mir, kCondNe, rlSrc[0], rlSrc[1]);
+ break;
+ case Instruction::IF_LT:
+ convertCompareAndBranch(cUnit, bb, mir, kCondLt, rlSrc[0], rlSrc[1]);
+ break;
+ case Instruction::IF_GE:
+ convertCompareAndBranch(cUnit, bb, mir, kCondGe, rlSrc[0], rlSrc[1]);
+ break;
+ case Instruction::IF_GT:
+ convertCompareAndBranch(cUnit, bb, mir, kCondGt, rlSrc[0], rlSrc[1]);
+ break;
+ case Instruction::IF_LE:
+ convertCompareAndBranch(cUnit, bb, mir, kCondLe, rlSrc[0], rlSrc[1]);
+ break;
+ case Instruction::IF_EQZ:
+ convertCompareZeroAndBranch(cUnit, bb, mir, kCondEq, rlSrc[0]);
+ break;
+ case Instruction::IF_NEZ:
+ convertCompareZeroAndBranch(cUnit, bb, mir, kCondNe, rlSrc[0]);
+ break;
+ case Instruction::IF_LTZ:
+ convertCompareZeroAndBranch(cUnit, bb, mir, kCondLt, rlSrc[0]);
+ break;
+ case Instruction::IF_GEZ:
+ convertCompareZeroAndBranch(cUnit, bb, mir, kCondGe, rlSrc[0]);
+ break;
+ case Instruction::IF_GTZ:
+ convertCompareZeroAndBranch(cUnit, bb, mir, kCondGt, rlSrc[0]);
+ break;
+ case Instruction::IF_LEZ:
+ convertCompareZeroAndBranch(cUnit, bb, mir, kCondLe, rlSrc[0]);
+ break;
+
+ case Instruction::GOTO:
+ case Instruction::GOTO_16:
+ case Instruction::GOTO_32: {
+ if (bb->taken->startOffset <= bb->startOffset) {
+ emitSuspendCheck(cUnit);
+ }
+ cUnit->irb->CreateBr(getLLVMBlock(cUnit, bb->taken->id));
+ }
+ break;
+
+ case Instruction::ADD_LONG:
+ case Instruction::ADD_LONG_2ADDR:
+ case Instruction::ADD_INT:
+ case Instruction::ADD_INT_2ADDR:
+ convertArithOp(cUnit, kOpAdd, rlDest, rlSrc[0], rlSrc[1]);
+ break;
+ case Instruction::SUB_LONG:
+ case Instruction::SUB_LONG_2ADDR:
+ case Instruction::SUB_INT:
+ case Instruction::SUB_INT_2ADDR:
+ convertArithOp(cUnit, kOpSub, rlDest, rlSrc[0], rlSrc[1]);
+ break;
+ case Instruction::MUL_LONG:
+ case Instruction::MUL_LONG_2ADDR:
+ case Instruction::MUL_INT:
+ case Instruction::MUL_INT_2ADDR:
+ convertArithOp(cUnit, kOpMul, rlDest, rlSrc[0], rlSrc[1]);
+ break;
+ case Instruction::DIV_LONG:
+ case Instruction::DIV_LONG_2ADDR:
+ case Instruction::DIV_INT:
+ case Instruction::DIV_INT_2ADDR:
+ convertArithOp(cUnit, kOpDiv, rlDest, rlSrc[0], rlSrc[1]);
+ break;
+ case Instruction::REM_LONG:
+ case Instruction::REM_LONG_2ADDR:
+ case Instruction::REM_INT:
+ case Instruction::REM_INT_2ADDR:
+ convertArithOp(cUnit, kOpRem, rlDest, rlSrc[0], rlSrc[1]);
+ break;
+ case Instruction::AND_LONG:
+ case Instruction::AND_LONG_2ADDR:
+ case Instruction::AND_INT:
+ case Instruction::AND_INT_2ADDR:
+ convertArithOp(cUnit, kOpAnd, rlDest, rlSrc[0], rlSrc[1]);
+ break;
+ case Instruction::OR_LONG:
+ case Instruction::OR_LONG_2ADDR:
+ case Instruction::OR_INT:
+ case Instruction::OR_INT_2ADDR:
+ convertArithOp(cUnit, kOpOr, rlDest, rlSrc[0], rlSrc[1]);
+ break;
+ case Instruction::XOR_LONG:
+ case Instruction::XOR_LONG_2ADDR:
+ case Instruction::XOR_INT:
+ case Instruction::XOR_INT_2ADDR:
+ convertArithOp(cUnit, kOpXor, rlDest, rlSrc[0], rlSrc[1]);
+ break;
+ case Instruction::SHL_LONG:
+ case Instruction::SHL_LONG_2ADDR:
+ case Instruction::SHL_INT:
+ case Instruction::SHL_INT_2ADDR:
+ convertArithOp(cUnit, kOpLsl, rlDest, rlSrc[0], rlSrc[1]);
+ break;
+ case Instruction::SHR_LONG:
+ case Instruction::SHR_LONG_2ADDR:
+ case Instruction::SHR_INT:
+ case Instruction::SHR_INT_2ADDR:
+ convertArithOp(cUnit, kOpAsr, rlDest, rlSrc[0], rlSrc[1]);
+ break;
+ case Instruction::USHR_LONG:
+ case Instruction::USHR_LONG_2ADDR:
+ case Instruction::USHR_INT:
+ case Instruction::USHR_INT_2ADDR:
+ convertArithOp(cUnit, kOpLsr, rlDest, rlSrc[0], rlSrc[1]);
+ break;
+
+ case Instruction::ADD_INT_LIT16:
+ case Instruction::ADD_INT_LIT8:
+ convertArithOpLit(cUnit, kOpAdd, rlDest, rlSrc[0], mir->dalvikInsn.vC);
+ break;
+ case Instruction::RSUB_INT:
+ case Instruction::RSUB_INT_LIT8:
+ convertArithOpLit(cUnit, kOpRsub, rlDest, rlSrc[0], mir->dalvikInsn.vC);
+ break;
+ case Instruction::MUL_INT_LIT16:
+ case Instruction::MUL_INT_LIT8:
+ convertArithOpLit(cUnit, kOpMul, rlDest, rlSrc[0], mir->dalvikInsn.vC);
+ break;
+ case Instruction::DIV_INT_LIT16:
+ case Instruction::DIV_INT_LIT8:
+ convertArithOpLit(cUnit, kOpDiv, rlDest, rlSrc[0], mir->dalvikInsn.vC);
+ break;
+ case Instruction::REM_INT_LIT16:
+ case Instruction::REM_INT_LIT8:
+ convertArithOpLit(cUnit, kOpRem, rlDest, rlSrc[0], mir->dalvikInsn.vC);
+ break;
+ case Instruction::AND_INT_LIT16:
+ case Instruction::AND_INT_LIT8:
+ convertArithOpLit(cUnit, kOpAnd, rlDest, rlSrc[0], mir->dalvikInsn.vC);
+ break;
+ case Instruction::OR_INT_LIT16:
+ case Instruction::OR_INT_LIT8:
+ convertArithOpLit(cUnit, kOpOr, rlDest, rlSrc[0], mir->dalvikInsn.vC);
+ break;
+ case Instruction::XOR_INT_LIT16:
+ case Instruction::XOR_INT_LIT8:
+ convertArithOpLit(cUnit, kOpXor, rlDest, rlSrc[0], mir->dalvikInsn.vC);
+ break;
+ case Instruction::SHL_INT_LIT8:
+ convertArithOpLit(cUnit, kOpLsl, rlDest, rlSrc[0], mir->dalvikInsn.vC);
+ break;
+ case Instruction::SHR_INT_LIT8:
+ convertArithOpLit(cUnit, kOpLsr, rlDest, rlSrc[0], mir->dalvikInsn.vC);
+ break;
+ case Instruction::USHR_INT_LIT8:
+ convertArithOpLit(cUnit, kOpAsr, rlDest, rlSrc[0], mir->dalvikInsn.vC);
+ break;
+
+ case Instruction::ADD_FLOAT:
+ case Instruction::ADD_FLOAT_2ADDR:
+ case Instruction::ADD_DOUBLE:
+ case Instruction::ADD_DOUBLE_2ADDR:
+ convertFPArithOp(cUnit, kOpAdd, rlDest, rlSrc[0], rlSrc[1]);
+ break;
+
+ case Instruction::SUB_FLOAT:
+ case Instruction::SUB_FLOAT_2ADDR:
+ case Instruction::SUB_DOUBLE:
+ case Instruction::SUB_DOUBLE_2ADDR:
+ convertFPArithOp(cUnit, kOpSub, rlDest, rlSrc[0], rlSrc[1]);
+ break;
+
+ case Instruction::MUL_FLOAT:
+ case Instruction::MUL_FLOAT_2ADDR:
+ case Instruction::MUL_DOUBLE:
+ case Instruction::MUL_DOUBLE_2ADDR:
+ convertFPArithOp(cUnit, kOpMul, rlDest, rlSrc[0], rlSrc[1]);
+ break;
+
+ case Instruction::DIV_FLOAT:
+ case Instruction::DIV_FLOAT_2ADDR:
+ case Instruction::DIV_DOUBLE:
+ case Instruction::DIV_DOUBLE_2ADDR:
+ convertFPArithOp(cUnit, kOpDiv, rlDest, rlSrc[0], rlSrc[1]);
+ break;
+
+ case Instruction::REM_FLOAT:
+ case Instruction::REM_FLOAT_2ADDR:
+ case Instruction::REM_DOUBLE:
+ case Instruction::REM_DOUBLE_2ADDR:
+ convertFPArithOp(cUnit, kOpRem, rlDest, rlSrc[0], rlSrc[1]);
+ break;
+
+#if 0
+
+ case Instruction::MOVE_EXCEPTION: {
+ int exOffset = Thread::ExceptionOffset().Int32Value();
+ rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+#if defined(TARGET_X86)
+ newLIR2(cUnit, kX86Mov32RT, rlResult.lowReg, exOffset);
+ newLIR2(cUnit, kX86Mov32TI, exOffset, 0);
+#else
+ int resetReg = oatAllocTemp(cUnit);
+ loadWordDisp(cUnit, rSELF, exOffset, rlResult.lowReg);
+ loadConstant(cUnit, resetReg, 0);
+ storeWordDisp(cUnit, rSELF, exOffset, resetReg);
+ storeValue(cUnit, rlDest, rlResult);
+ oatFreeTemp(cUnit, resetReg);
+#endif
+ break;
+ }
+
+ case Instruction::MOVE_RESULT_WIDE:
+ if (mir->optimizationFlags & MIR_INLINED)
+ break; // Nop - combined w/ previous invoke
+ storeValueWide(cUnit, rlDest, oatGetReturnWide(cUnit, rlDest.fp));
+ break;
+
+ case Instruction::MOVE_RESULT:
+ case Instruction::MOVE_RESULT_OBJECT:
+ if (mir->optimizationFlags & MIR_INLINED)
+ break; // Nop - combined w/ previous invoke
+ storeValue(cUnit, rlDest, oatGetReturn(cUnit, rlDest.fp));
+ break;
+
+ case Instruction::MONITOR_ENTER:
+ genMonitorEnter(cUnit, mir, rlSrc[0]);
+ break;
+
+ case Instruction::MONITOR_EXIT:
+ genMonitorExit(cUnit, mir, rlSrc[0]);
+ break;
+
+ case Instruction::CHECK_CAST:
+ genCheckCast(cUnit, mir, rlSrc[0]);
+ break;
+
+ case Instruction::INSTANCE_OF:
+ genInstanceof(cUnit, mir, rlDest, rlSrc[0]);
+ break;
+
+ case Instruction::NEW_INSTANCE:
+ genNewInstance(cUnit, mir, rlDest);
+ break;
+
+ case Instruction::THROW:
+ genThrow(cUnit, mir, rlSrc[0]);
+ break;
+
+ case Instruction::THROW_VERIFICATION_ERROR:
+ genThrowVerificationError(cUnit, mir);
+ break;
+
+ case Instruction::ARRAY_LENGTH:
+ int lenOffset;
+ lenOffset = Array::LengthOffset().Int32Value();
+ rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
+ genNullCheck(cUnit, rlSrc[0].sRegLow, rlSrc[0].lowReg, mir);
+ rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ loadWordDisp(cUnit, rlSrc[0].lowReg, lenOffset, rlResult.lowReg);
+ storeValue(cUnit, rlDest, rlResult);
+ break;
+
+ case Instruction::CONST_STRING:
+ case Instruction::CONST_STRING_JUMBO:
+ genConstString(cUnit, mir, rlDest, rlSrc[0]);
+ break;
+
+ case Instruction::CONST_CLASS:
+ genConstClass(cUnit, mir, rlDest, rlSrc[0]);
+ break;
+
+ case Instruction::FILL_ARRAY_DATA:
+ genFillArrayData(cUnit, mir, rlSrc[0]);
+ break;
+
+ case Instruction::FILLED_NEW_ARRAY:
+ genFilledNewArray(cUnit, mir, false /* not range */);
+ break;
+
+ case Instruction::FILLED_NEW_ARRAY_RANGE:
+ genFilledNewArray(cUnit, mir, true /* range */);
+ break;
+
+ case Instruction::NEW_ARRAY:
+ genNewArray(cUnit, mir, rlDest, rlSrc[0]);
+ break;
+
+ case Instruction::PACKED_SWITCH:
+ genPackedSwitch(cUnit, mir, rlSrc[0]);
+ break;
+
+ case Instruction::SPARSE_SWITCH:
+ genSparseSwitch(cUnit, mir, rlSrc[0], labelList);
+ break;
+
+ case Instruction::CMPL_FLOAT:
+ case Instruction::CMPG_FLOAT:
+ case Instruction::CMPL_DOUBLE:
+ case Instruction::CMPG_DOUBLE:
+ res = genCmpFP(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
+ break;
+
+ case Instruction::CMP_LONG:
+ genCmpLong(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
+ break;
+
+ case Instruction::AGET_WIDE:
+ genArrayGet(cUnit, mir, kLong, rlSrc[0], rlSrc[1], rlDest, 3);
+ break;
+ case Instruction::AGET:
+ case Instruction::AGET_OBJECT:
+ genArrayGet(cUnit, mir, kWord, rlSrc[0], rlSrc[1], rlDest, 2);
+ break;
+ case Instruction::AGET_BOOLEAN:
+ genArrayGet(cUnit, mir, kUnsignedByte, rlSrc[0], rlSrc[1], rlDest, 0);
+ break;
+ case Instruction::AGET_BYTE:
+ genArrayGet(cUnit, mir, kSignedByte, rlSrc[0], rlSrc[1], rlDest, 0);
+ break;
+ case Instruction::AGET_CHAR:
+ genArrayGet(cUnit, mir, kUnsignedHalf, rlSrc[0], rlSrc[1], rlDest, 1);
+ break;
+ case Instruction::AGET_SHORT:
+ genArrayGet(cUnit, mir, kSignedHalf, rlSrc[0], rlSrc[1], rlDest, 1);
+ break;
+ case Instruction::APUT_WIDE:
+ genArrayPut(cUnit, mir, kLong, rlSrc[1], rlSrc[2], rlSrc[0], 3);
+ break;
+ case Instruction::APUT:
+ genArrayPut(cUnit, mir, kWord, rlSrc[1], rlSrc[2], rlSrc[0], 2);
+ break;
+ case Instruction::APUT_OBJECT:
+ genArrayObjPut(cUnit, mir, rlSrc[1], rlSrc[2], rlSrc[0], 2);
+ break;
+ case Instruction::APUT_SHORT:
+ case Instruction::APUT_CHAR:
+ genArrayPut(cUnit, mir, kUnsignedHalf, rlSrc[1], rlSrc[2], rlSrc[0], 1);
+ break;
+ case Instruction::APUT_BYTE:
+ case Instruction::APUT_BOOLEAN:
+ genArrayPut(cUnit, mir, kUnsignedByte, rlSrc[1], rlSrc[2],
+ rlSrc[0], 0);
+ break;
+
+ case Instruction::IGET_OBJECT:
+ //case Instruction::IGET_OBJECT_VOLATILE:
+ genIGet(cUnit, mir, kWord, rlDest, rlSrc[0], false, true);
+ break;
+
+ case Instruction::IGET_WIDE:
+ //case Instruction::IGET_WIDE_VOLATILE:
+ genIGet(cUnit, mir, kLong, rlDest, rlSrc[0], true, false);
+ break;
+
+ case Instruction::IGET:
+ //case Instruction::IGET_VOLATILE:
+ genIGet(cUnit, mir, kWord, rlDest, rlSrc[0], false, false);
+ break;
+
+ case Instruction::IGET_CHAR:
+ genIGet(cUnit, mir, kUnsignedHalf, rlDest, rlSrc[0], false, false);
+ break;
+
+ case Instruction::IGET_SHORT:
+ genIGet(cUnit, mir, kSignedHalf, rlDest, rlSrc[0], false, false);
+ break;
+
+ case Instruction::IGET_BOOLEAN:
+ case Instruction::IGET_BYTE:
+ genIGet(cUnit, mir, kUnsignedByte, rlDest, rlSrc[0], false, false);
+ break;
+
+ case Instruction::IPUT_WIDE:
+ //case Instruction::IPUT_WIDE_VOLATILE:
+ genIPut(cUnit, mir, kLong, rlSrc[0], rlSrc[1], true, false);
+ break;
+
+ case Instruction::IPUT_OBJECT:
+ //case Instruction::IPUT_OBJECT_VOLATILE:
+ genIPut(cUnit, mir, kWord, rlSrc[0], rlSrc[1], false, true);
+ break;
+
+ case Instruction::IPUT:
+ //case Instruction::IPUT_VOLATILE:
+ genIPut(cUnit, mir, kWord, rlSrc[0], rlSrc[1], false, false);
+ break;
+
+ case Instruction::IPUT_BOOLEAN:
+ case Instruction::IPUT_BYTE:
+ genIPut(cUnit, mir, kUnsignedByte, rlSrc[0], rlSrc[1], false, false);
+ break;
+
+ case Instruction::IPUT_CHAR:
+ genIPut(cUnit, mir, kUnsignedHalf, rlSrc[0], rlSrc[1], false, false);
+ break;
+
+ case Instruction::IPUT_SHORT:
+ genIPut(cUnit, mir, kSignedHalf, rlSrc[0], rlSrc[1], false, false);
+ break;
+
+ case Instruction::SGET_OBJECT:
+ genSget(cUnit, mir, rlDest, false, true);
+ break;
+ case Instruction::SGET:
+ case Instruction::SGET_BOOLEAN:
+ case Instruction::SGET_BYTE:
+ case Instruction::SGET_CHAR:
+ case Instruction::SGET_SHORT:
+ genSget(cUnit, mir, rlDest, false, false);
+ break;
+
+ case Instruction::SGET_WIDE:
+ genSget(cUnit, mir, rlDest, true, false);
+ break;
+
+ case Instruction::SPUT_OBJECT:
+ genSput(cUnit, mir, rlSrc[0], false, true);
+ break;
+
+ case Instruction::SPUT:
+ case Instruction::SPUT_BOOLEAN:
+ case Instruction::SPUT_BYTE:
+ case Instruction::SPUT_CHAR:
+ case Instruction::SPUT_SHORT:
+ genSput(cUnit, mir, rlSrc[0], false, false);
+ break;
+
+ case Instruction::SPUT_WIDE:
+ genSput(cUnit, mir, rlSrc[0], true, false);
+ break;
+
+ case Instruction::INVOKE_STATIC_RANGE:
+ genInvoke(cUnit, bb, mir, kStatic, true /*range*/);
+ break;
+ case Instruction::INVOKE_STATIC:
+ genInvoke(cUnit, bb, mir, kStatic, false /*range*/);
+ break;
+
+ case Instruction::INVOKE_DIRECT:
+ genInvoke(cUnit, bb, mir, kDirect, false /*range*/);
+ break;
+ case Instruction::INVOKE_DIRECT_RANGE:
+ genInvoke(cUnit, bb, mir, kDirect, true /*range*/);
+ break;
+
+ case Instruction::INVOKE_VIRTUAL:
+ genInvoke(cUnit, bb, mir, kVirtual, false /*range*/);
+ break;
+ case Instruction::INVOKE_VIRTUAL_RANGE:
+ genInvoke(cUnit, bb, mir, kVirtual, true /*range*/);
+ break;
+
+ case Instruction::INVOKE_SUPER:
+ genInvoke(cUnit, bb, mir, kSuper, false /*range*/);
+ break;
+ case Instruction::INVOKE_SUPER_RANGE:
+ genInvoke(cUnit, bb, mir, kSuper, true /*range*/);
+ break;
+
+ case Instruction::INVOKE_INTERFACE:
+ genInvoke(cUnit, bb, mir, kInterface, false /*range*/);
+ break;
+ case Instruction::INVOKE_INTERFACE_RANGE:
+ genInvoke(cUnit, bb, mir, kInterface, true /*range*/);
+ break;
+
+ case Instruction::NEG_INT:
+ case Instruction::NOT_INT:
+ res = genArithOpInt(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
+ break;
+
+ case Instruction::NEG_LONG:
+ case Instruction::NOT_LONG:
+ res = genArithOpLong(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
+ break;
+
+ case Instruction::NEG_FLOAT:
+ res = genArithOpFloat(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
+ break;
+
+ case Instruction::NEG_DOUBLE:
+ res = genArithOpDouble(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
+ break;
+
+ case Instruction::INT_TO_LONG:
+ genIntToLong(cUnit, mir, rlDest, rlSrc[0]);
+ break;
+
+ case Instruction::LONG_TO_INT:
+ rlSrc[0] = oatUpdateLocWide(cUnit, rlSrc[0]);
+ rlSrc[0] = oatWideToNarrow(cUnit, rlSrc[0]);
+ storeValue(cUnit, rlDest, rlSrc[0]);
+ break;
+
+ case Instruction::INT_TO_BYTE:
+ case Instruction::INT_TO_SHORT:
+ case Instruction::INT_TO_CHAR:
+ genIntNarrowing(cUnit, mir, rlDest, rlSrc[0]);
+ break;
+
+ case Instruction::INT_TO_FLOAT:
+ case Instruction::INT_TO_DOUBLE:
+ case Instruction::LONG_TO_FLOAT:
+ case Instruction::LONG_TO_DOUBLE:
+ case Instruction::FLOAT_TO_INT:
+ case Instruction::FLOAT_TO_LONG:
+ case Instruction::FLOAT_TO_DOUBLE:
+ case Instruction::DOUBLE_TO_INT:
+ case Instruction::DOUBLE_TO_LONG:
+ case Instruction::DOUBLE_TO_FLOAT:
+ genConversion(cUnit, mir);
+ break;
+
+#endif
+
+ default:
+ res = true;
+ }
+ return res;
+}
+
+/* Extended MIR instructions like PHI */
+void convertExtendedMIR(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir,
+ llvm::BasicBlock* llvmBB)
+{
+
+ switch ((ExtendedMIROpcode)mir->dalvikInsn.opcode) {
+ case kMirOpPhi: {
+ int* incoming = (int*)mir->dalvikInsn.vB;
+ RegLocation rlDest = cUnit->regLocation[mir->ssaRep->defs[0]];
+ llvm::Type* phiType =
+ llvmTypeFromLocRec(cUnit, rlDest);
+ llvm::PHINode* phi = cUnit->irb->CreatePHI(phiType, mir->ssaRep->numUses);
+ for (int i = 0; i < mir->ssaRep->numUses; i++) {
+ RegLocation loc;
+ if (rlDest.wide) {
+ loc = oatGetSrcWide(cUnit, mir, i, i+1);
+ i++;
+ } else {
+ loc = oatGetSrc(cUnit, mir, i);
+ }
+ phi->addIncoming(getLLVMValue(cUnit, loc.origSReg),
+ getLLVMBlock(cUnit, incoming[i]));
+ }
+ defineValue(cUnit, phi, rlDest.origSReg);
+ break;
+ }
+ case kMirOpCopy: {
+ UNIMPLEMENTED(WARNING) << "unimp kMirOpPhi";
+ break;
+ }
+#if defined(TARGET_ARM)
+ case kMirOpFusedCmplFloat:
+ UNIMPLEMENTED(WARNING) << "unimp kMirOpFusedCmpFloat";
+ break;
+ case kMirOpFusedCmpgFloat:
+ UNIMPLEMENTED(WARNING) << "unimp kMirOpFusedCmgFloat";
+ break;
+ case kMirOpFusedCmplDouble:
+ UNIMPLEMENTED(WARNING) << "unimp kMirOpFusedCmplDouble";
+ break;
+ case kMirOpFusedCmpgDouble:
+ UNIMPLEMENTED(WARNING) << "unimp kMirOpFusedCmpgDouble";
+ break;
+ case kMirOpFusedCmpLong:
+ UNIMPLEMENTED(WARNING) << "unimp kMirOpLongCmpBranch";
+ break;
+#endif
+ default:
+ break;
+ }
+}
+
+void setDexOffset(CompilationUnit* cUnit, int32_t offset)
+{
+ cUnit->currentDalvikOffset = offset;
+ llvm::SmallVector<llvm::Value*, 1>arrayRef;
+ arrayRef.push_back(cUnit->irb->getInt32(offset));
+ llvm::MDNode* node = llvm::MDNode::get(*cUnit->context, arrayRef);
+ cUnit->irb->SetDexOffset(node);
+}
+
+// Attach method info as metadata to special intrinsic
+void setMethodInfo(CompilationUnit* cUnit)
+{
+ // We don't want dex offset on this
+ cUnit->irb->SetDexOffset(NULL);
+ greenland::IntrinsicHelper::IntrinsicId id;
+ id = greenland::IntrinsicHelper::MethodInfo;
+ llvm::Function* intr = cUnit->intrinsic_helper->GetIntrinsicFunction(id);
+ llvm::Instruction* inst = cUnit->irb->CreateCall(intr);
+ llvm::SmallVector<llvm::Value*, 2> regInfo;
+ regInfo.push_back(cUnit->irb->getInt32(cUnit->numIns));
+ regInfo.push_back(cUnit->irb->getInt32(cUnit->numRegs));
+ regInfo.push_back(cUnit->irb->getInt32(cUnit->numOuts));
+ regInfo.push_back(cUnit->irb->getInt32(cUnit->numCompilerTemps));
+ regInfo.push_back(cUnit->irb->getInt32(cUnit->numSSARegs));
+ llvm::MDNode* regInfoNode = llvm::MDNode::get(*cUnit->context, regInfo);
+ inst->setMetadata("RegInfo", regInfoNode);
+ int promoSize = cUnit->numDalvikRegisters + cUnit->numCompilerTemps + 1;
+ llvm::SmallVector<llvm::Value*, 50> pmap;
+ for (int i = 0; i < promoSize; i++) {
+ PromotionMap* p = &cUnit->promotionMap[i];
+ int32_t mapData = ((p->firstInPair & 0xff) << 24) |
+ ((p->fpReg & 0xff) << 16) |
+ ((p->coreReg & 0xff) << 8) |
+ ((p->fpLocation & 0xf) << 4) |
+ (p->coreLocation & 0xf);
+ pmap.push_back(cUnit->irb->getInt32(mapData));
+ }
+ llvm::MDNode* mapNode = llvm::MDNode::get(*cUnit->context, pmap);
+ inst->setMetadata("PromotionMap", mapNode);
+ setDexOffset(cUnit, cUnit->currentDalvikOffset);
+}
+
+/* Handle the content in each basic block */
+bool methodBlockBitcodeConversion(CompilationUnit* cUnit, BasicBlock* bb)
+{
+ llvm::BasicBlock* llvmBB = getLLVMBlock(cUnit, bb->id);
+ cUnit->irb->SetInsertPoint(llvmBB);
+ setDexOffset(cUnit, bb->startOffset);
+
+ if (bb->blockType == kEntryBlock) {
+ setMethodInfo(cUnit);
+ //genEntrySequence(cUnit, bb);
+ } else if (bb->blockType == kExitBlock) {
+ /*
+ * Because of the differences between how MIR/LIR and llvm handle exit
+ * blocks, we won't explicitly covert them. On the llvm-to-lir
+ * path, it will need to be regenereated.
+ */
+ return false;
+ }
+
+ for (MIR* mir = bb->firstMIRInsn; mir; mir = mir->next) {
+
+ setDexOffset(cUnit, mir->offset);
+
+ Instruction::Code dalvikOpcode = mir->dalvikInsn.opcode;
+ Instruction::Format dalvikFormat = Instruction::FormatOf(dalvikOpcode);
+
+ /* If we're compiling for the debugger, generate an update callout */
+ if (cUnit->genDebugger) {
+ UNIMPLEMENTED(FATAL) << "Need debug codegen";
+ //genDebuggerUpdate(cUnit, mir->offset);
+ }
+
+ if ((int)mir->dalvikInsn.opcode >= (int)kMirOpFirst) {
+ convertExtendedMIR(cUnit, bb, mir, llvmBB);
+ continue;
+ }
+
+ bool notHandled = convertMIRNode(cUnit, mir, bb, llvmBB,
+ NULL /* labelList */);
+ if (notHandled) {
+ LOG(WARNING) << StringPrintf("%#06x: Op %#x (%s) / Fmt %d not handled",
+ mir->offset, dalvikOpcode,
+ Instruction::Name(dalvikOpcode),
+ dalvikFormat);
+ }
+ }
+
+ if ((bb->taken == NULL) && (bb->fallThrough != NULL) && !bb->hasReturn) {
+ cUnit->irb->CreateBr(getLLVMBlock(cUnit, bb->fallThrough->id));
+ }
+
+ return false;
+}
+
+llvm::FunctionType* getFunctionType(CompilationUnit* cUnit) {
+
+ // Get return type
+ llvm::Type* ret_type = cUnit->irb->GetJType(cUnit->shorty[0],
+ greenland::kAccurate);
+
+ // Get argument type
+ std::vector<llvm::Type*> args_type;
+
+ // method object
+ args_type.push_back(cUnit->irb->GetJMethodTy());
+
+ // Do we have a "this"?
+ if ((cUnit->access_flags & kAccStatic) == 0) {
+ args_type.push_back(cUnit->irb->GetJObjectTy());
+ }
+
+ for (uint32_t i = 1; i < strlen(cUnit->shorty); ++i) {
+ args_type.push_back(cUnit->irb->GetJType(cUnit->shorty[i],
+ greenland::kAccurate));
+ }
+
+ return llvm::FunctionType::get(ret_type, args_type, false);
+}
+
+bool createFunction(CompilationUnit* cUnit) {
+ std::string func_name(PrettyMethod(cUnit->method_idx, *cUnit->dex_file,
+ /* with_signature */ false));
+ llvm::FunctionType* func_type = getFunctionType(cUnit);
+
+ if (func_type == NULL) {
+ return false;
+ }
+
+ cUnit->func = llvm::Function::Create(func_type,
+ llvm::Function::ExternalLinkage,
+ func_name, cUnit->module);
+
+ llvm::Function::arg_iterator arg_iter(cUnit->func->arg_begin());
+ llvm::Function::arg_iterator arg_end(cUnit->func->arg_end());
+
+ arg_iter->setName("method");
+ ++arg_iter;
+
+ int startSReg = cUnit->numRegs;
+
+ for (unsigned i = 0; arg_iter != arg_end; ++i, ++arg_iter) {
+ arg_iter->setName(StringPrintf("v%i_0", startSReg));
+ startSReg += cUnit->regLocation[startSReg].wide ? 2 : 1;
+ }
+
+ return true;
+}
+
+bool createLLVMBasicBlock(CompilationUnit* cUnit, BasicBlock* bb)
+{
+ // Skip the exit block
+ if (bb->blockType == kExitBlock) {
+ cUnit->idToBlockMap.Put(bb->id, NULL);
+ } else {
+ int offset = bb->startOffset;
+ bool entryBlock = (bb->blockType == kEntryBlock);
+ llvm::BasicBlock* llvmBB =
+ llvm::BasicBlock::Create(*cUnit->context, entryBlock ? "entry" :
+ StringPrintf(labelFormat, offset, bb->id),
+ cUnit->func);
+ if (entryBlock) {
+ cUnit->entryBB = llvmBB;
+ cUnit->placeholderBB =
+ llvm::BasicBlock::Create(*cUnit->context, "placeholder",
+ cUnit->func);
+ }
+ cUnit->idToBlockMap.Put(bb->id, llvmBB);
+ }
+ return false;
+}
+
+
+/*
+ * Convert MIR to LLVM_IR
+ * o For each ssa name, create LLVM named value. Type these
+ * appropriately, and ignore high half of wide and double operands.
+ * o For each MIR basic block, create an LLVM basic block.
+ * o Iterate through the MIR a basic block at a time, setting arguments
+ * to recovered ssa name.
+ */
+void oatMethodMIR2Bitcode(CompilationUnit* cUnit)
+{
+ initIR(cUnit);
+ oatInitGrowableList(cUnit, &cUnit->llvmValues, cUnit->numSSARegs);
+
+ // Create the function
+ createFunction(cUnit);
+
+ // Create an LLVM basic block for each MIR block in dfs preorder
+ oatDataFlowAnalysisDispatcher(cUnit, createLLVMBasicBlock,
+ kPreOrderDFSTraversal, false /* isIterative */);
+ /*
+ * Create an llvm named value for each MIR SSA name. Note: we'll use
+ * placeholders for all non-argument values (because we haven't seen
+ * the definition yet).
+ */
+ cUnit->irb->SetInsertPoint(cUnit->placeholderBB);
+ llvm::Function::arg_iterator arg_iter(cUnit->func->arg_begin());
+ arg_iter++; /* Skip path method */
+ for (int i = 0; i < cUnit->numSSARegs; i++) {
+ llvm::Value* val;
+ llvm::Type* ty = llvmTypeFromLocRec(cUnit, cUnit->regLocation[i]);
+ if (i < cUnit->numRegs) {
+ // Skip non-argument _0 names - should never be a use
+ oatInsertGrowableList(cUnit, &cUnit->llvmValues, (intptr_t)0);
+ } else if (i >= (cUnit->numRegs + cUnit->numIns)) {
+ // Handle SSA defs, skipping Method* and compiler temps
+ if (SRegToVReg(cUnit, i) < 0) {
+ val = NULL;
+ } else {
+ val = cUnit->irb->CreateLoad(cUnit->irb->CreateAlloca(ty, 0));
+ val->setName(llvmSSAName(cUnit, i));
+ }
+ oatInsertGrowableList(cUnit, &cUnit->llvmValues, (intptr_t)val);
+ if (cUnit->regLocation[i].wide) {
+ // Skip high half of wide values
+ oatInsertGrowableList(cUnit, &cUnit->llvmValues, 0);
+ i++;
+ }
+ } else {
+ // Recover previously-created argument values
+ llvm::Value* argVal = arg_iter++;
+ oatInsertGrowableList(cUnit, &cUnit->llvmValues, (intptr_t)argVal);
+ }
+ }
+ cUnit->irb->CreateBr(cUnit->placeholderBB);
+
+ oatDataFlowAnalysisDispatcher(cUnit, methodBlockBitcodeConversion,
+ kPreOrderDFSTraversal, false /* Iterative */);
+
+ cUnit->placeholderBB->eraseFromParent();
+
+ llvm::verifyFunction(*cUnit->func, llvm::PrintMessageAction);
+
+ // Write bitcode to file
+ std::string errmsg;
+
+ llvm::OwningPtr<llvm::tool_output_file> out_file(
+ new llvm::tool_output_file("/tmp/foo.bc", errmsg,
+ llvm::raw_fd_ostream::F_Binary));
+
+ if (!errmsg.empty()) {
+ LOG(ERROR) << "Failed to create bitcode output file: " << errmsg;
+ }
+
+ llvm::WriteBitcodeToFile(cUnit->module, out_file->os());
+ out_file->keep();
+
+
+}
+
+RegLocation getLoc(CompilationUnit* cUnit, llvm::Value* val) {
+ RegLocation res;
+ SafeMap<llvm::Value*, RegLocation>::iterator it = cUnit->locMap.find(val);
+ if (it == cUnit->locMap.end()) {
+ const char* valName = val->getName().str().c_str();
+ DCHECK(valName != NULL);
+ DCHECK(strlen(valName) > 0);
+ if (valName[0] == 'v') {
+ int baseSReg = INVALID_SREG;
+ sscanf(valName, "v%d_", &baseSReg);
+ res = cUnit->regLocation[baseSReg];
+ cUnit->locMap.Put(val, res);
+ } else {
+ UNIMPLEMENTED(WARNING) << "Need to handle llvm temps";
+ DCHECK(valName[0] == 't');
+ }
+ } else {
+ res = it->second;
+ }
+ return res;
+}
+
+Instruction::Code getDalvikOpcode(OpKind op, bool isConst, bool isWide)
+{
+ Instruction::Code res = Instruction::NOP;
+ if (isWide) {
+ switch(op) {
+ case kOpAdd: res = Instruction::ADD_LONG; break;
+ case kOpSub: res = Instruction::SUB_LONG; break;
+ case kOpMul: res = Instruction::MUL_LONG; break;
+ case kOpDiv: res = Instruction::DIV_LONG; break;
+ case kOpRem: res = Instruction::REM_LONG; break;
+ case kOpAnd: res = Instruction::AND_LONG; break;
+ case kOpOr: res = Instruction::OR_LONG; break;
+ case kOpXor: res = Instruction::XOR_LONG; break;
+ case kOpLsl: res = Instruction::SHL_LONG; break;
+ case kOpLsr: res = Instruction::USHR_LONG; break;
+ case kOpAsr: res = Instruction::SHR_LONG; break;
+ default: LOG(FATAL) << "Unexpected OpKind " << op;
+ }
+ } else if (isConst){
+ switch(op) {
+ case kOpAdd: res = Instruction::ADD_INT_LIT16; break;
+ case kOpSub: res = Instruction::RSUB_INT_LIT8; break;
+ case kOpMul: res = Instruction::MUL_INT_LIT16; break;
+ case kOpDiv: res = Instruction::DIV_INT_LIT16; break;
+ case kOpRem: res = Instruction::REM_INT_LIT16; break;
+ case kOpAnd: res = Instruction::AND_INT_LIT16; break;
+ case kOpOr: res = Instruction::OR_INT_LIT16; break;
+ case kOpXor: res = Instruction::XOR_INT_LIT16; break;
+ case kOpLsl: res = Instruction::SHL_INT_LIT8; break;
+ case kOpLsr: res = Instruction::USHR_INT_LIT8; break;
+ case kOpAsr: res = Instruction::SHR_INT_LIT8; break;
+ default: LOG(FATAL) << "Unexpected OpKind " << op;
+ }
+ } else {
+ switch(op) {
+ case kOpAdd: res = Instruction::ADD_INT; break;
+ case kOpSub: res = Instruction::SUB_INT; break;
+ case kOpMul: res = Instruction::MUL_INT; break;
+ case kOpDiv: res = Instruction::DIV_INT; break;
+ case kOpRem: res = Instruction::REM_INT; break;
+ case kOpAnd: res = Instruction::AND_INT; break;
+ case kOpOr: res = Instruction::OR_INT; break;
+ case kOpXor: res = Instruction::XOR_INT; break;
+ case kOpLsl: res = Instruction::SHL_INT; break;
+ case kOpLsr: res = Instruction::USHR_INT; break;
+ case kOpAsr: res = Instruction::SHR_INT; break;
+ default: LOG(FATAL) << "Unexpected OpKind " << op;
+ }
+ }
+ return res;
+}
+
+void cvtBinOp(CompilationUnit* cUnit, OpKind op, llvm::Instruction* inst)
+{
+ RegLocation rlDest = getLoc(cUnit, inst);
+ llvm::Value* lhs = inst->getOperand(0);
+ DCHECK(llvm::dyn_cast<llvm::ConstantInt>(lhs) == NULL);
+ RegLocation rlSrc1 = getLoc(cUnit, inst->getOperand(0));
+ llvm::Value* rhs = inst->getOperand(1);
+ if (llvm::ConstantInt* src2 = llvm::dyn_cast<llvm::ConstantInt>(rhs)) {
+ Instruction::Code dalvikOp = getDalvikOpcode(op, true, false);
+ genArithOpIntLit(cUnit, dalvikOp, rlDest, rlSrc1, src2->getSExtValue());
+ } else {
+ Instruction::Code dalvikOp = getDalvikOpcode(op, false, rlDest.wide);
+ RegLocation rlSrc2 = getLoc(cUnit, rhs);
+ if (rlDest.wide) {
+ genArithOpLong(cUnit, dalvikOp, rlDest, rlSrc1, rlSrc2);
+ } else {
+ genArithOpInt(cUnit, dalvikOp, rlDest, rlSrc1, rlSrc2);
+ }
+ }
+}
+
+void cvtBr(CompilationUnit* cUnit, llvm::Instruction* inst)
+{
+ llvm::BranchInst* brInst = llvm::dyn_cast<llvm::BranchInst>(inst);
+ DCHECK(brInst != NULL);
+ DCHECK(brInst->isUnconditional()); // May change - but this is all we use now
+ llvm::BasicBlock* targetBB = brInst->getSuccessor(0);
+ opUnconditionalBranch(cUnit, cUnit->blockToLabelMap.Get(targetBB));
+}
+
+void cvtPhi(CompilationUnit* cUnit, llvm::Instruction* inst)
+{
+ // Nop - these have already been processed
+}
+
+void cvtRet(CompilationUnit* cUnit, llvm::Instruction* inst)
+{
+ llvm::ReturnInst* retInst = llvm::dyn_cast<llvm::ReturnInst>(inst);
+ llvm::Value* retVal = retInst->getReturnValue();
+ if (retVal != NULL) {
+ RegLocation rlSrc = getLoc(cUnit, retVal);
+ if (rlSrc.wide) {
+ storeValueWide(cUnit, oatGetReturnWide(cUnit, rlSrc.fp), rlSrc);
+ } else {
+ storeValue(cUnit, oatGetReturn(cUnit, rlSrc.fp), rlSrc);
+ }
+ }
+ genExitSequence(cUnit);
+}
+
+ConditionCode getCond(llvm::ICmpInst::Predicate llvmCond)
+{
+ ConditionCode res = kCondAl;
+ switch(llvmCond) {
+ case llvm::ICmpInst::ICMP_NE: res = kCondNe; break;
+ case llvm::ICmpInst::ICMP_SGT: res = kCondGt; break;
+ default: LOG(FATAL) << "Unexpected llvm condition";
+ }
+ return res;
+}
+
+void cvtICmp(CompilationUnit* cUnit, llvm::Instruction* inst)
+{
+ // genCmpLong(cUnit, rlDest, rlSrc1, rlSrc2)
+ UNIMPLEMENTED(FATAL);
+}
+
+void cvtICmpBr(CompilationUnit* cUnit, llvm::Instruction* inst,
+ llvm::BranchInst* brInst)
+{
+ // Get targets
+ llvm::BasicBlock* takenBB = brInst->getSuccessor(0);
+ LIR* taken = cUnit->blockToLabelMap.Get(takenBB);
+ llvm::BasicBlock* fallThroughBB = brInst->getSuccessor(1);
+ LIR* fallThrough = cUnit->blockToLabelMap.Get(fallThroughBB);
+ // Get comparison operands
+ llvm::ICmpInst* iCmpInst = llvm::dyn_cast<llvm::ICmpInst>(inst);
+ ConditionCode cond = getCond(iCmpInst->getPredicate());
+ llvm::Value* lhs = iCmpInst->getOperand(0);
+ // Not expecting a constant as 1st operand
+ DCHECK(llvm::dyn_cast<llvm::ConstantInt>(lhs) == NULL);
+ RegLocation rlSrc1 = getLoc(cUnit, inst->getOperand(0));
+ rlSrc1 = loadValue(cUnit, rlSrc1, kCoreReg);
+ llvm::Value* rhs = inst->getOperand(1);
+#if defined(TARGET_MIPS)
+ // Compare and branch in one shot
+ (void)taken;
+ (void)cond;
+ (void)rhs;
+ UNIMPLEMENTED(FATAL);
+#else
+ //Compare, then branch
+ // TODO: handle fused CMP_LONG/IF_xxZ case
+ if (llvm::ConstantInt* src2 = llvm::dyn_cast<llvm::ConstantInt>(rhs)) {
+ opRegImm(cUnit, kOpCmp, rlSrc1.lowReg, src2->getSExtValue());
+ } else {
+ RegLocation rlSrc2 = getLoc(cUnit, rhs);
+ rlSrc2 = loadValue(cUnit, rlSrc2, kCoreReg);
+ opRegReg(cUnit, kOpCmp, rlSrc1.lowReg, rlSrc2.lowReg);
+ }
+ opCondBranch(cUnit, cond, taken);
+#endif
+ // Fallthrough
+ opUnconditionalBranch(cUnit, fallThrough);
+}
+
+void cvtCall(CompilationUnit* cUnit, llvm::CallInst* callInst,
+ llvm::Function* callee)
+{
+ UNIMPLEMENTED(FATAL);
+}
+
+void setMethodInfo(CompilationUnit* cUnit, llvm::CallInst* callInst)
+{
+ UNIMPLEMENTED(WARNING) << "Net setMethodInfo";
+}
+
+void cvtCopy(CompilationUnit* cUnit, llvm::CallInst* callInst)
+{
+ DCHECK(callInst->getNumArgOperands() == 1);
+ RegLocation rlSrc = getLoc(cUnit, callInst->getArgOperand(0));
+ RegLocation rlDest = getLoc(cUnit, callInst);
+ if (rlSrc.wide) {
+ storeValueWide(cUnit, rlDest, rlSrc);
+ } else {
+ storeValue(cUnit, rlDest, rlSrc);
+ }
+}
+
+// Note: Immediate arg is a ConstantInt regardless of result type
+void cvtConst(CompilationUnit* cUnit, llvm::CallInst* callInst)
+{
+ DCHECK(callInst->getNumArgOperands() == 1);
+ llvm::ConstantInt* src =
+ llvm::dyn_cast<llvm::ConstantInt>(callInst->getArgOperand(0));
+ uint64_t immval = src->getZExtValue();
+ RegLocation rlDest = getLoc(cUnit, callInst);
+ RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
+ if (rlDest.wide) {
+ loadConstantValueWide(cUnit, rlResult.lowReg, rlResult.highReg,
+ (immval) & 0xffffffff, (immval >> 32) & 0xffffffff);
+ storeValueWide(cUnit, rlDest, rlResult);
+ } else {
+ loadConstantNoClobber(cUnit, rlResult.lowReg, immval & 0xffffffff);
+ storeValue(cUnit, rlDest, rlResult);
+ }
+}
+
+bool methodBitcodeBlockCodeGen(CompilationUnit* cUnit, llvm::BasicBlock* bb)
+{
+ bool isEntry = (bb == &cUnit->func->getEntryBlock());
+ // Define the starting label
+ LIR* blockLabel = cUnit->blockToLabelMap.Get(bb);
+ // Extract the starting offset from the block's name
+ if (!isEntry) {
+ const char* blockName = bb->getName().str().c_str();
+ int dummy;
+ sscanf(blockName, labelFormat, &blockLabel->operands[0], &dummy);
+ }
+ // Set the label kind
+ blockLabel->opcode = kPseudoNormalBlockLabel;
+ // Insert the label
+ oatAppendLIR(cUnit, blockLabel);
+
+ // Free temp registers and reset redundant store tracking */
+ oatResetRegPool(cUnit);
+ oatResetDefTracking(cUnit);
+
+ //TODO: restore oat incoming liveness optimization
+ oatClobberAllRegs(cUnit);
+
+ //LIR* headLIR = NULL;
+
+
+ if (isEntry) {
+ cUnit->currentDalvikOffset = 0;
+ genEntrySequence(cUnit);
+ }
+
+ // Visit all of the instructions in the block
+ for (llvm::BasicBlock::iterator it = bb->begin(), e = bb->end(); it != e;) {
+ llvm::Instruction* inst = it;
+ llvm::BasicBlock::iterator nextIt = ++it;
+ // Extract the Dalvik offset from the instruction
+ uint32_t opcode = inst->getOpcode();
+ llvm::MDNode* dexOffsetNode = inst->getMetadata("DexOff");
+ if (dexOffsetNode != NULL) {
+ llvm::ConstantInt* dexOffsetValue =
+ static_cast<llvm::ConstantInt*>(dexOffsetNode->getOperand(0));
+ cUnit->currentDalvikOffset = dexOffsetValue->getZExtValue();
+ }
+
+ switch(opcode) {
+
+ case llvm::Instruction::ICmp: {
+ llvm::Instruction* nextInst = nextIt;
+ llvm::BranchInst* brInst = llvm::dyn_cast<llvm::BranchInst>(nextInst);
+ if (brInst != NULL /* and... */) {
+ cvtICmpBr(cUnit, inst, brInst);
+ ++it;
+ } else {
+ cvtICmp(cUnit, inst);
+ }
+ }
+ break;
+
+ case llvm::Instruction::Call: {
+ llvm::CallInst* callInst = llvm::dyn_cast<llvm::CallInst>(inst);
+ llvm::Function* callee = callInst->getCalledFunction();
+ greenland::IntrinsicHelper::IntrinsicId id =
+ cUnit->intrinsic_helper->GetIntrinsicId(callee);
+ switch (id) {
+ case greenland::IntrinsicHelper::CopyInt:
+ case greenland::IntrinsicHelper::CopyObj:
+ case greenland::IntrinsicHelper::CopyFloat:
+ case greenland::IntrinsicHelper::CopyLong:
+ case greenland::IntrinsicHelper::CopyDouble:
+ cvtCopy(cUnit, callInst);
+ break;
+ case greenland::IntrinsicHelper::ConstInt:
+ case greenland::IntrinsicHelper::ConstObj:
+ case greenland::IntrinsicHelper::ConstLong:
+ case greenland::IntrinsicHelper::ConstFloat:
+ case greenland::IntrinsicHelper::ConstDouble:
+ cvtConst(cUnit, callInst);
+ break;
+ case greenland::IntrinsicHelper::MethodInfo:
+ setMethodInfo(cUnit, callInst);
+ break;
+ case greenland::IntrinsicHelper::CheckSuspend:
+ genSuspendTest(cUnit, 0 /* optFlags already applied */);
+ break;
+ case greenland::IntrinsicHelper::UnknownId:
+ cvtCall(cUnit, callInst, callee);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected intrinsic " << (int)id << ", "
+ << cUnit->intrinsic_helper->GetName(id);
+ }
+ }
+ break;
+
+ case llvm::Instruction::Br: cvtBr(cUnit, inst); break;
+ case llvm::Instruction::Add: cvtBinOp(cUnit, kOpAdd, inst); break;
+ case llvm::Instruction::Sub: cvtBinOp(cUnit, kOpSub, inst); break;
+ case llvm::Instruction::Mul: cvtBinOp(cUnit, kOpMul, inst); break;
+ case llvm::Instruction::SDiv: cvtBinOp(cUnit, kOpDiv, inst); break;
+ case llvm::Instruction::SRem: cvtBinOp(cUnit, kOpRem, inst); break;
+ case llvm::Instruction::And: cvtBinOp(cUnit, kOpAnd, inst); break;
+ case llvm::Instruction::Or: cvtBinOp(cUnit, kOpOr, inst); break;
+ case llvm::Instruction::Xor: cvtBinOp(cUnit, kOpXor, inst); break;
+ case llvm::Instruction::Shl: cvtBinOp(cUnit, kOpLsl, inst); break;
+ case llvm::Instruction::LShr: cvtBinOp(cUnit, kOpLsr, inst); break;
+ case llvm::Instruction::AShr: cvtBinOp(cUnit, kOpAsr, inst); break;
+ case llvm::Instruction::PHI: cvtPhi(cUnit, inst); break;
+ case llvm::Instruction::Ret: cvtRet(cUnit, inst); break;
+
+ case llvm::Instruction::Invoke:
+ case llvm::Instruction::FAdd:
+ case llvm::Instruction::FSub:
+ case llvm::Instruction::FMul:
+ case llvm::Instruction::FDiv:
+ case llvm::Instruction::FRem:
+ case llvm::Instruction::Trunc:
+ case llvm::Instruction::ZExt:
+ case llvm::Instruction::SExt:
+ case llvm::Instruction::FPToUI:
+ case llvm::Instruction::FPToSI:
+ case llvm::Instruction::UIToFP:
+ case llvm::Instruction::SIToFP:
+ case llvm::Instruction::FPTrunc:
+ case llvm::Instruction::FPExt:
+ case llvm::Instruction::PtrToInt:
+ case llvm::Instruction::IntToPtr:
+ case llvm::Instruction::Switch:
+ case llvm::Instruction::FCmp:
+ UNIMPLEMENTED(FATAL) << "Unimplemented llvm opcode: " << opcode; break;
+
+ case llvm::Instruction::URem:
+ case llvm::Instruction::UDiv:
+ case llvm::Instruction::Resume:
+ case llvm::Instruction::Unreachable:
+ case llvm::Instruction::Alloca:
+ case llvm::Instruction::GetElementPtr:
+ case llvm::Instruction::Fence:
+ case llvm::Instruction::AtomicCmpXchg:
+ case llvm::Instruction::AtomicRMW:
+ case llvm::Instruction::BitCast:
+ case llvm::Instruction::VAArg:
+ case llvm::Instruction::Select:
+ case llvm::Instruction::UserOp1:
+ case llvm::Instruction::UserOp2:
+ case llvm::Instruction::ExtractElement:
+ case llvm::Instruction::InsertElement:
+ case llvm::Instruction::ShuffleVector:
+ case llvm::Instruction::ExtractValue:
+ case llvm::Instruction::InsertValue:
+ case llvm::Instruction::LandingPad:
+ case llvm::Instruction::IndirectBr:
+ case llvm::Instruction::Load:
+ case llvm::Instruction::Store:
+ LOG(FATAL) << "Unexpected llvm opcode: " << opcode; break;
+
+ default:
+ LOG(FATAL) << "Unknown llvm opcode: " << opcode; break;
+ }
+ }
+ return false;
+}
+
+/*
+ * Convert LLVM_IR to MIR:
+ * o Iterate through the LLVM_IR and construct a graph using
+ * standard MIR building blocks.
+ * o Perform a basic-block optimization pass to remove unnecessary
+ * store/load sequences.
+ * o Convert the LLVM Value operands into RegLocations where applicable.
+ * o Create ssaRep def/use operand arrays for each converted LLVM opcode
+ * o Perform register promotion
+ * o Iterate through the graph a basic block at a time, generating
+ * LIR.
+ * o Assemble LIR as usual.
+ * o Profit.
+ */
+void oatMethodBitcode2LIR(CompilationUnit* cUnit)
+{
+ int numBasicBlocks = cUnit->func->getBasicBlockList().size();
+ // Allocate a list for LIR basic block labels
+ cUnit->blockLabelList =
+ (void*)oatNew(cUnit, sizeof(LIR) * numBasicBlocks, true, kAllocLIR);
+ LIR* labelList = (LIR*)cUnit->blockLabelList;
+ int nextLabel = 0;
+ for (llvm::Function::iterator i = cUnit->func->begin(),
+ e = cUnit->func->end(); i != e; ++i) {
+ cUnit->blockToLabelMap.Put(static_cast<llvm::BasicBlock*>(i),
+ &labelList[nextLabel++]);
+ }
+ // Walk the blocks, generating code.
+ for (llvm::Function::iterator i = cUnit->func->begin(),
+ e = cUnit->func->end(); i != e; ++i) {
+ methodBitcodeBlockCodeGen(cUnit, static_cast<llvm::BasicBlock*>(i));
+ }
+
+ handleSuspendLaunchpads(cUnit);
+
+ handleThrowLaunchpads(cUnit);
+
+ handleIntrinsicLaunchpads(cUnit);
+
+ freeIR(cUnit);
+}
+
+
+} // namespace art
+
+#endif // ART_USE_QUICK_COMPILER
diff --git a/src/compiler/codegen/MethodCodegenDriver.cc b/src/compiler/codegen/MethodCodegenDriver.cc
index 9c51f0a..15dbedd 100644
--- a/src/compiler/codegen/MethodCodegenDriver.cc
+++ b/src/compiler/codegen/MethodCodegenDriver.cc
@@ -21,8 +21,9 @@
#define DISPLAY_MISSING_TARGETS (cUnit->enableDebug & \
(1 << kDebugDisplayMissingTargets))
-const RegLocation badLoc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0,
- INVALID_REG, INVALID_REG, INVALID_SREG};
+const RegLocation badLoc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0,
+ INVALID_REG, INVALID_REG, INVALID_SREG,
+ INVALID_SREG};
/* Mark register usage state and return long retloc */
RegLocation oatGetReturnWide(CompilationUnit* cUnit, bool isDouble)
@@ -851,9 +852,9 @@
LIR* headLIR = NULL;
if (bb->blockType == kEntryBlock) {
- genEntrySequence(cUnit, bb);
+ genEntrySequence(cUnit);
} else if (bb->blockType == kExitBlock) {
- genExitSequence(cUnit, bb);
+ genExitSequence(cUnit);
}
for (mir = bb->firstMIRInsn; mir; mir = mir->next) {
diff --git a/src/compiler/codegen/arm/ArchFactory.cc b/src/compiler/codegen/arm/ArchFactory.cc
index bc30335..c0d91a4 100644
--- a/src/compiler/codegen/arm/ArchFactory.cc
+++ b/src/compiler/codegen/arm/ArchFactory.cc
@@ -56,7 +56,7 @@
return rLR;
}
-void genEntrySequence(CompilationUnit* cUnit, BasicBlock* bb)
+void genEntrySequence(CompilationUnit* cUnit)
{
int spillCount = cUnit->numCoreSpills + cUnit->numFPSpills;
/*
@@ -116,7 +116,7 @@
oatFreeTemp(cUnit, r3);
}
-void genExitSequence(CompilationUnit* cUnit, BasicBlock* bb)
+void genExitSequence(CompilationUnit* cUnit)
{
int spillCount = cUnit->numCoreSpills + cUnit->numFPSpills;
/*
diff --git a/src/compiler/codegen/arm/ArmLIR.h b/src/compiler/codegen/arm/ArmLIR.h
index 93e7878..4004fbb 100644
--- a/src/compiler/codegen/arm/ArmLIR.h
+++ b/src/compiler/codegen/arm/ArmLIR.h
@@ -125,10 +125,10 @@
#define rNone (-1)
/* RegisterLocation templates return values (r0, or r0/r1) */
-#define LOC_C_RETURN {kLocPhysReg, 0, 0, 0, 0, 0, 0, 1, r0, INVALID_REG,\
- INVALID_SREG}
-#define LOC_C_RETURN_WIDE {kLocPhysReg, 1, 0, 0, 0, 0, 0, 1, r0, r1, \
- INVALID_SREG}
+#define LOC_C_RETURN {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, r0, INVALID_REG,\
+ INVALID_SREG, INVALID_SREG}
+#define LOC_C_RETURN_WIDE {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1, \
+ INVALID_SREG, INVALID_SREG}
#define LOC_C_RETURN_FLOAT LOC_C_RETURN
#define LOC_C_RETURN_WIDE_DOUBLE LOC_C_RETURN_WIDE
diff --git a/src/compiler/codegen/arm/armv7-a-neon/Codegen.cc b/src/compiler/codegen/arm/armv7-a-neon/Codegen.cc
index f7132ab..660d095 100644
--- a/src/compiler/codegen/arm/armv7-a-neon/Codegen.cc
+++ b/src/compiler/codegen/arm/armv7-a-neon/Codegen.cc
@@ -45,6 +45,9 @@
/* Thumb2-specific register allocation */
#include "../Thumb2/Ralloc.cc"
+/* Bitcode conversion */
+#include "../../MethodBitcode.cc"
+
/* MIR2LIR dispatcher and architectural independent codegen routines */
#include "../../MethodCodegenDriver.cc"
diff --git a/src/compiler/codegen/arm/armv7-a/Codegen.cc b/src/compiler/codegen/arm/armv7-a/Codegen.cc
index 6f62ccf..ba1e7ab 100644
--- a/src/compiler/codegen/arm/armv7-a/Codegen.cc
+++ b/src/compiler/codegen/arm/armv7-a/Codegen.cc
@@ -45,6 +45,9 @@
/* Thumb2-specific register allocation */
#include "../Thumb2/Ralloc.cc"
+/* Bitcode conversion */
+#include "../../MethodBitcode.cc"
+
/* MIR2LIR dispatcher and architectural independent codegen routines */
#include "../../MethodCodegenDriver.cc"
diff --git a/src/compiler/codegen/mips/ArchFactory.cc b/src/compiler/codegen/mips/ArchFactory.cc
index 4fd127f..bf4c8a6 100644
--- a/src/compiler/codegen/mips/ArchFactory.cc
+++ b/src/compiler/codegen/mips/ArchFactory.cc
@@ -144,7 +144,7 @@
opRegImm(cUnit, kOpAdd, rSP, cUnit->frameSize);
}
-void genEntrySequence(CompilationUnit* cUnit, BasicBlock* bb)
+void genEntrySequence(CompilationUnit* cUnit)
{
int spillCount = cUnit->numCoreSpills + cUnit->numFPSpills;
/*
@@ -198,7 +198,7 @@
oatFreeTemp(cUnit, rARG3);
}
-void genExitSequence(CompilationUnit* cUnit, BasicBlock* bb)
+void genExitSequence(CompilationUnit* cUnit)
{
/*
* In the exit path, rRET0/rRET1 are live - make sure they aren't
diff --git a/src/compiler/codegen/mips/MipsLIR.h b/src/compiler/codegen/mips/MipsLIR.h
index c9f917b..4850205 100644
--- a/src/compiler/codegen/mips/MipsLIR.h
+++ b/src/compiler/codegen/mips/MipsLIR.h
@@ -145,16 +145,16 @@
#define r_FRESULT1 r_F1
/* RegisterLocation templates return values (r_V0, or r_V0/r_V1) */
-#define LOC_C_RETURN {kLocPhysReg, 0, 0, 0, 0, 0, 0, 1, r_V0, INVALID_REG, \
- INVALID_SREG}
+#define LOC_C_RETURN {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, r_V0, INVALID_REG, \
+ INVALID_SREG, INVALID_SREG}
#define LOC_C_RETURN_FLOAT LOC_C_RETURN
-#define LOC_C_RETURN_ALT {kLocPhysReg, 0, 0, 0, 0, 0, 0, 1, r_F0, INVALID_REG, \
- INVALID_SREG}
-#define LOC_C_RETURN_WIDE {kLocPhysReg, 1, 0, 0, 0, 0, 0, 1, r_RESULT0, \
- r_RESULT1, INVALID_SREG}
+#define LOC_C_RETURN_ALT {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, r_F0, \
+ INVALID_REG, INVALID_SREG, INVALID_SREG}
+#define LOC_C_RETURN_WIDE {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r_RESULT0, \
+ r_RESULT1, INVALID_SREG, INVALID_SREG}
#define LOC_C_RETURN_WIDE_DOUBLE LOC_C_RETURN_WIDE
-#define LOC_C_RETURN_WIDE_ALT {kLocPhysReg, 1, 0, 0, 0, 0, 0, 1, r_FRESULT0,\
- r_FRESULT1, INVALID_SREG}
+#define LOC_C_RETURN_WIDE_ALT {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r_FRESULT0,\
+ r_FRESULT1, INVALID_SREG, INVALID_SREG}
enum ResourceEncodingPos {
kGPReg0 = 0,
diff --git a/src/compiler/codegen/mips/mips/Codegen.cc b/src/compiler/codegen/mips/mips/Codegen.cc
index 71f43e5..60a7646 100644
--- a/src/compiler/codegen/mips/mips/Codegen.cc
+++ b/src/compiler/codegen/mips/mips/Codegen.cc
@@ -45,6 +45,9 @@
/* Mips32-specific register allocation */
#include "../Mips32/Ralloc.cc"
+/* Bitcode conversion */
+#include "../../MethodBitcode.cc"
+
/* MIR2LIR dispatcher and architectural independent codegen routines */
#include "../../MethodCodegenDriver.cc"
diff --git a/src/compiler/codegen/x86/ArchFactory.cc b/src/compiler/codegen/x86/ArchFactory.cc
index 9e6ef09..b3cebdc 100644
--- a/src/compiler/codegen/x86/ArchFactory.cc
+++ b/src/compiler/codegen/x86/ArchFactory.cc
@@ -34,7 +34,8 @@
// Compute (r1:r0) = (r1:r0) + (r2:r3)
opRegReg(cUnit, kOpAdd, r0, r2); // r0 = r0 + r2
opRegReg(cUnit, kOpAdc, r1, r3); // r1 = r1 + r3 + CF
- RegLocation rlResult = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 1, r0, r1, INVALID_SREG};
+ RegLocation rlResult = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
+ INVALID_SREG, INVALID_SREG};
storeValueWide(cUnit, rlDest, rlResult);
return false;
}
@@ -49,7 +50,8 @@
// Compute (r1:r0) = (r1:r0) + (r2:r3)
opRegReg(cUnit, kOpSub, r0, r2); // r0 = r0 - r2
opRegReg(cUnit, kOpSbc, r1, r3); // r1 = r1 - r3 - CF
- RegLocation rlResult = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 1, r0, r1, INVALID_SREG};
+ RegLocation rlResult = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
+ INVALID_SREG, INVALID_SREG};
storeValueWide(cUnit, rlDest, rlResult);
return false;
}
@@ -64,7 +66,8 @@
// Compute (r1:r0) = (r1:r0) + (r2:r3)
opRegReg(cUnit, kOpAnd, r0, r2); // r0 = r0 - r2
opRegReg(cUnit, kOpAnd, r1, r3); // r1 = r1 - r3 - CF
- RegLocation rlResult = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 1, r0, r1, INVALID_SREG};
+ RegLocation rlResult = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
+ INVALID_SREG, INVALID_SREG};
storeValueWide(cUnit, rlDest, rlResult);
return false;
}
@@ -79,7 +82,8 @@
// Compute (r1:r0) = (r1:r0) + (r2:r3)
opRegReg(cUnit, kOpOr, r0, r2); // r0 = r0 - r2
opRegReg(cUnit, kOpOr, r1, r3); // r1 = r1 - r3 - CF
- RegLocation rlResult = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 1, r0, r1, INVALID_SREG};
+ RegLocation rlResult = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
+ INVALID_SREG, INVALID_SREG};
storeValueWide(cUnit, rlDest, rlResult);
return false;
}
@@ -94,7 +98,8 @@
// Compute (r1:r0) = (r1:r0) + (r2:r3)
opRegReg(cUnit, kOpXor, r0, r2); // r0 = r0 - r2
opRegReg(cUnit, kOpXor, r1, r3); // r1 = r1 - r3 - CF
- RegLocation rlResult = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 1, r0, r1, INVALID_SREG};
+ RegLocation rlResult = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
+ INVALID_SREG, INVALID_SREG};
storeValueWide(cUnit, rlDest, rlResult);
return false;
}
@@ -109,7 +114,8 @@
opRegReg(cUnit, kOpNeg, r0, r0); // r0 = -r0
opRegImm(cUnit, kOpAdc, r1, 0); // r1 = r1 + CF
opRegReg(cUnit, kOpNeg, r1, r1); // r1 = -r1
- RegLocation rlResult = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 1, r0, r1, INVALID_SREG};
+ RegLocation rlResult = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
+ INVALID_SREG, INVALID_SREG};
storeValueWide(cUnit, rlDest, rlResult);
return false;
}
@@ -157,7 +163,7 @@
newLIR2(cUnit, opcode, rDest, threadOffset);
}
-void genEntrySequence(CompilationUnit* cUnit, BasicBlock* bb)
+void genEntrySequence(CompilationUnit* cUnit)
{
/*
* On entry, rARG0, rARG1, rARG2 are live. Let the register
@@ -210,7 +216,7 @@
oatFreeTemp(cUnit, rARG2);
}
-void genExitSequence(CompilationUnit* cUnit, BasicBlock* bb) {
+void genExitSequence(CompilationUnit* cUnit) {
/*
* In the exit path, rRET0/rRET1 are live - make sure they aren't
* allocated by the register utilities as temps.
diff --git a/src/compiler/codegen/x86/X86LIR.h b/src/compiler/codegen/x86/X86LIR.h
index 3ec1112..6e7dcee 100644
--- a/src/compiler/codegen/x86/X86LIR.h
+++ b/src/compiler/codegen/x86/X86LIR.h
@@ -137,11 +137,11 @@
#define rNone (-1)
/* RegisterLocation templates return values (rAX, rAX/rDX or XMM0) */
-// location, wide, defined, fp, core, ref, highWord, home, lowReg, highReg, sRegLow
-#define LOC_C_RETURN {kLocPhysReg, 0, 0, 0, 0, 0, 0, 1, rAX, INVALID_REG, INVALID_SREG}
-#define LOC_C_RETURN_WIDE {kLocPhysReg, 1, 0, 0, 0, 0, 0, 1, rAX, rDX, INVALID_SREG}
-#define LOC_C_RETURN_FLOAT {kLocPhysReg, 0, 0, 1, 0, 0, 0, 1, fr0, INVALID_REG, INVALID_SREG}
-#define LOC_C_RETURN_WIDE_DOUBLE {kLocPhysReg, 1, 0, 1, 0, 0, 0, 1, fr0, fr1, INVALID_SREG}
+// location, wide, defined, const, fp, core, ref, highWord, home, lowReg, highReg, sRegLow
+#define LOC_C_RETURN {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, rAX, INVALID_REG, INVALID_SREG, INVALID_SREG}
+#define LOC_C_RETURN_WIDE {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, rAX, rDX, INVALID_SREG, INVALID_SREG}
+#define LOC_C_RETURN_FLOAT {kLocPhysReg, 0, 0, 0, 1, 0, 0, 0, 1, fr0, INVALID_REG, INVALID_SREG, INVALID_SREG}
+#define LOC_C_RETURN_WIDE_DOUBLE {kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1, fr0, fr1, INVALID_SREG, INVALID_SREG}
enum ResourceEncodingPos {
kGPReg0 = 0,
diff --git a/src/compiler/codegen/x86/x86/Codegen.cc b/src/compiler/codegen/x86/x86/Codegen.cc
index f8719d9..0552ce3 100644
--- a/src/compiler/codegen/x86/x86/Codegen.cc
+++ b/src/compiler/codegen/x86/x86/Codegen.cc
@@ -44,6 +44,9 @@
/* X86-specific register allocation */
#include "../X86/Ralloc.cc"
+/* Bitcode conversion */
+#include "../../MethodBitcode.cc"
+
/* MIR2LIR dispatcher and architectural independent codegen routines */
#include "../../MethodCodegenDriver.cc"
diff --git a/src/greenland/intrinsic_func_list.def b/src/greenland/intrinsic_func_list.def
index 48c1247..87ce43b 100644
--- a/src/greenland/intrinsic_func_list.def
+++ b/src/greenland/intrinsic_func_list.def
@@ -558,6 +558,13 @@
kVoidTy,
_EXPAND_ARG1(kJavaThreadTy))
+// void dex_lang_check_suspend() /* Expands to GetCurrentThread/TestSuspend */
+_EVAL_DEF_INTRINSICS_FUNC(CheckSuspend,
+ dex_lang_check_suspend,
+ kAttrNoThrow,
+ kVoidTy,
+ _EXPAND_ARG0())
+
//----------------------------------------------------------------------------
// Shadow Frame
//----------------------------------------------------------------------------
@@ -590,6 +597,92 @@
kVoidTy,
_EXPAND_ARG1(kInt32ConstantTy))
+//----------------------------------------------------------------------------
+// Const intrinsics to assist MIR to Greenland_ir conversion. Should not materialize
+// For simplicity, all use integer input
+//----------------------------------------------------------------------------
+// int const_int(int)
+_EVAL_DEF_INTRINSICS_FUNC(ConstInt,
+ dex_lang_const_int,
+ kAttrReadOnly | kAttrNoThrow,
+ kInt32Ty,
+ _EXPAND_ARG1(kInt32Ty))
+
+// int const_obj(int)
+_EVAL_DEF_INTRINSICS_FUNC(ConstObj,
+ dex_lang_const_obj,
+ kAttrReadOnly | kAttrNoThrow,
+ kJavaObjectTy,
+ _EXPAND_ARG1(kInt32Ty))
+
+// int const_long(long)
+_EVAL_DEF_INTRINSICS_FUNC(ConstLong,
+ dex_lang_const_long,
+ kAttrReadOnly | kAttrNoThrow,
+ kInt64Ty,
+ _EXPAND_ARG1(kInt64Ty))
+
+// int const_float(int)
+_EVAL_DEF_INTRINSICS_FUNC(ConstFloat,
+ dex_lang_const_Float,
+ kAttrReadOnly | kAttrNoThrow,
+ kFloatTy,
+ _EXPAND_ARG1(kInt32Ty))
+
+// int copy_double(long)
+_EVAL_DEF_INTRINSICS_FUNC(ConstDouble,
+ dex_lang_const_Double,
+ kAttrReadOnly | kAttrNoThrow,
+ kDoubleTy,
+ _EXPAND_ARG1(kInt64Ty))
+
+
+//----------------------------------------------------------------------------
+// Copy intrinsics to assist MIR to Greenland_ir conversion. Should not materialize
+//----------------------------------------------------------------------------
+
+// void method_info(void)
+_EVAL_DEF_INTRINSICS_FUNC(MethodInfo,
+ dex_lang_method_info,
+ kAttrReadOnly | kAttrNoThrow,
+ kVoidTy,
+ _EXPAND_ARG0())
+
+// int copy_int(int)
+_EVAL_DEF_INTRINSICS_FUNC(CopyInt,
+ dex_lang_copy_int,
+ kAttrReadOnly | kAttrNoThrow,
+ kInt32Ty,
+ _EXPAND_ARG1(kInt32Ty))
+
+// int copy_obj(obj)
+_EVAL_DEF_INTRINSICS_FUNC(CopyObj,
+ dex_lang_copy_obj,
+ kAttrReadOnly | kAttrNoThrow,
+ kJavaObjectTy,
+ _EXPAND_ARG1(kJavaObjectTy))
+
+// int copy_long(long)
+_EVAL_DEF_INTRINSICS_FUNC(CopyLong,
+ dex_lang_copy_long,
+ kAttrReadOnly | kAttrNoThrow,
+ kInt64Ty,
+ _EXPAND_ARG1(kInt64Ty))
+
+// int copy_float(float)
+_EVAL_DEF_INTRINSICS_FUNC(CopyFloat,
+ dex_lang_copy_Float,
+ kAttrReadOnly | kAttrNoThrow,
+ kFloatTy,
+ _EXPAND_ARG1(kFloatTy))
+
+// int copy_double(double)
+_EVAL_DEF_INTRINSICS_FUNC(CopyDouble,
+ dex_lang_copy_Double,
+ kAttrReadOnly | kAttrNoThrow,
+ kDoubleTy,
+ _EXPAND_ARG1(kDoubleTy))
+
// Clean up all internal used macros
#undef _EXPAND_ARG0
diff --git a/src/greenland/intrinsic_helper.cc b/src/greenland/intrinsic_helper.cc
index 07836bb..b1837fb 100644
--- a/src/greenland/intrinsic_helper.cc
+++ b/src/greenland/intrinsic_helper.cc
@@ -65,6 +65,12 @@
case IntrinsicHelper::kInt64ConstantTy: {
return irb.getInt64Ty();
}
+ case IntrinsicHelper::kFloatTy: {
+ return irb.getFloatTy();
+ }
+ case IntrinsicHelper::kDoubleTy: {
+ return irb.getDoubleTy();
+ }
case IntrinsicHelper::kNone:
case IntrinsicHelper::kVarArgTy:
default: {
diff --git a/src/greenland/intrinsic_helper.h b/src/greenland/intrinsic_helper.h
index a5c2097..7e84cc9 100644
--- a/src/greenland/intrinsic_helper.h
+++ b/src/greenland/intrinsic_helper.h
@@ -75,6 +75,9 @@
kInt32ConstantTy,
kInt64ConstantTy,
+ kFloatTy,
+ kDoubleTy,
+
kVarArgTy,
};
@@ -100,7 +103,7 @@
}
static const char* GetName(IntrinsicId id) {
- return GetInfo(id).name_;
+ return (id <= MaxIntrinsicId) ? GetInfo(id).name_ : "InvalidIntrinsic";
}
static unsigned GetAttr(IntrinsicId id) {
diff --git a/src/greenland/ir_builder.h b/src/greenland/ir_builder.h
index a46e4b3..baa0ae7 100644
--- a/src/greenland/ir_builder.h
+++ b/src/greenland/ir_builder.h
@@ -23,6 +23,8 @@
#include "logging.h"
#include <llvm/Support/IRBuilder.h>
+#include <llvm/Support/NoFolder.h>
+#include <llvm/Metadata.h>
namespace llvm {
class Module;
@@ -31,7 +33,30 @@
namespace art {
namespace greenland {
-typedef llvm::IRBuilder<> LLVMIRBuilder;
+#if defined(ART_USE_QUICK_COMPILER)
+class InserterWithDexOffset
+ : public llvm::IRBuilderDefaultInserter<true> {
+ public:
+ InserterWithDexOffset() : node_(NULL) {}
+ void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name,
+ llvm::BasicBlock *BB,
+ llvm::BasicBlock::iterator InsertPt) const {
+ llvm::IRBuilderDefaultInserter<true>::InsertHelper(I, Name, BB, InsertPt);
+ if (node_ != NULL) {
+ I->setMetadata("DexOff", node_);
+ }
+ }
+ void SetDexOffset(llvm::MDNode* node) {
+ node_ = node;
+ }
+ private:
+ llvm::MDNode* node_;
+};
+
+typedef llvm::IRBuilder<true, llvm::NoFolder, InserterWithDexOffset> LLVMIRBuilder;
+#else
+typedef llvm::IRBuilder<true> LLVMIRBuilder;
+#endif
class IRBuilder : public LLVMIRBuilder {
public: