summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/card_table.cc37
-rw-r--r--src/card_table.h37
-rw-r--r--src/class_linker.cc5
-rw-r--r--src/compiler/Compiler.h1
-rw-r--r--src/compiler/CompilerIR.h14
-rw-r--r--src/compiler/Dataflow.cc194
-rw-r--r--src/compiler/Frontend.cc125
-rw-r--r--src/compiler/Ralloc.cc26
-rw-r--r--src/compiler/Utility.cc11
-rw-r--r--src/compiler/codegen/GenInvoke.cc4
-rw-r--r--src/compiler/codegen/LocalOptimizations.cc9
-rw-r--r--src/compiler/codegen/MethodBitcode.cc122
-rw-r--r--src/compiler/codegen/MethodCodegenDriver.cc26
-rw-r--r--src/compiler/codegen/mips/FP/MipsFP.cc6
-rw-r--r--src/compiler/codegen/mips/Mips32/Gen.cc5
-rw-r--r--src/compiler/codegen/x86/Assemble.cc8
-rw-r--r--src/compiler/codegen/x86/FP/X86FP.cc103
-rw-r--r--src/compiler/codegen/x86/X86/Gen.cc91
-rw-r--r--src/compiler/codegen/x86/X86LIR.h2
-rw-r--r--src/compiler_llvm/compiler_llvm.cc10
-rw-r--r--src/disassembler_x86.cc1
-rw-r--r--src/globals.h1
-rw-r--r--src/greenland/intrinsic_func_list.def22
-rw-r--r--src/heap.cc289
-rw-r--r--src/heap.h34
-rw-r--r--src/heap_bitmap.cc21
-rw-r--r--src/heap_bitmap.h11
-rw-r--r--src/macros.h2
-rw-r--r--src/mark_sweep.cc159
-rw-r--r--src/mark_sweep.h32
-rw-r--r--src/mem_map.cc7
-rw-r--r--src/mem_map.h5
-rw-r--r--src/mod_union_table.cc325
-rw-r--r--src/mod_union_table.h155
-rw-r--r--src/native/dalvik_system_Zygote.cc39
-rw-r--r--src/oat/runtime/support_invoke.cc7
-rw-r--r--src/runtime.cc17
-rw-r--r--src/runtime.h11
-rw-r--r--src/space.cc59
-rw-r--r--src/space.h46
-rw-r--r--src/space_bitmap.cc72
-rw-r--r--src/space_bitmap.h91
-rw-r--r--src/space_bitmap_test.cc86
-rw-r--r--src/space_test.cc71
-rw-r--r--src/verifier/method_verifier.cc2
-rw-r--r--src/verifier/reg_type.cc86
-rw-r--r--src/verifier/reg_type.h54
-rw-r--r--src/verifier/reg_type_cache.cc54
-rw-r--r--src/verifier/reg_type_cache.h4
-rw-r--r--src/verifier/register_line.cc14
-rw-r--r--src/verifier/register_line.h14
51 files changed, 1767 insertions, 860 deletions
diff --git a/src/card_table.cc b/src/card_table.cc
index 758a88957c..6c127b6f2b 100644
--- a/src/card_table.cc
+++ b/src/card_table.cc
@@ -88,16 +88,11 @@ CardTable::CardTable(MemMap* mem_map, byte* biased_begin, size_t offset)
ANNOTATE_BENIGN_RACE_SIZED(begin, (end - begin), "writes to GC card table");
}
-void CardTable::ClearNonImageSpaceCards(Heap* heap) {
+void CardTable::ClearSpaceCards(Space* space) {
// TODO: clear just the range of the table that has been modified
- const std::vector<Space*>& spaces = heap->GetSpaces();
- for (size_t i = 0; i < spaces.size(); ++i) {
- if (!spaces[i]->IsImageSpace()) {
- byte* card_start = CardFromAddr(spaces[i]->Begin());
- byte* card_end = CardFromAddr(spaces[i]->End());
- memset(reinterpret_cast<void*>(card_start), GC_CARD_CLEAN, card_end - card_start);
- }
- }
+ byte* card_start = CardFromAddr(space->Begin());
+ byte* card_end = CardFromAddr(space->End()); // Make sure to round up.
+ memset(reinterpret_cast<void*>(card_start), GC_CARD_CLEAN, card_end - card_start);
}
void CardTable::ClearCardTable() {
@@ -117,30 +112,6 @@ void CardTable::CheckAddrIsInCardTable(const byte* addr) const {
}
}
-void CardTable::Scan(SpaceBitmap* bitmap, byte* scan_begin, byte* scan_end, Callback* visitor, void* arg) const {
- DCHECK(bitmap->HasAddress(scan_begin));
- DCHECK(bitmap->HasAddress(scan_end - 1)); // scan_end is the byte after the last byte we scan.
- byte* card_cur = CardFromAddr(scan_begin);
- byte* card_end = CardFromAddr(scan_end);
- while (card_cur < card_end) {
- while (card_cur < card_end && *card_cur == GC_CARD_CLEAN) {
- card_cur++;
- }
- byte* run_start = card_cur;
-
- while (card_cur < card_end && *card_cur == GC_CARD_DIRTY) {
- card_cur++;
- }
- byte* run_end = card_cur;
-
- if (run_start != run_end) {
- bitmap->VisitRange(reinterpret_cast<uintptr_t>(AddrFromCard(run_start)),
- reinterpret_cast<uintptr_t>(AddrFromCard(run_end)),
- visitor, arg);
- }
- }
-}
-
void CardTable::VerifyCardTable() {
UNIMPLEMENTED(WARNING) << "Card table verification";
}
diff --git a/src/card_table.h b/src/card_table.h
index ea46cfe981..d065bed8e8 100644
--- a/src/card_table.h
+++ b/src/card_table.h
@@ -20,11 +20,13 @@
#include "globals.h"
#include "logging.h"
#include "mem_map.h"
+#include "space_bitmap.h"
#include "UniquePtr.h"
namespace art {
class Heap;
+class Space;
class SpaceBitmap;
class Object;
@@ -70,9 +72,31 @@ class CardTable {
return biased_begin_;
}
- // For every dirty card between begin and end invoke the visitor with the specified argument
- typedef void Callback(Object* obj, void* arg);
- void Scan(SpaceBitmap* bitmap, byte* begin, byte* end, Callback* visitor, void* arg) const;
+ // For every dirty card between begin and end invoke the visitor with the specified argument.
+ template <typename Visitor>
+ void Scan(SpaceBitmap* bitmap, byte* scan_begin, byte* scan_end, const Visitor& visitor) const {
+ DCHECK(bitmap->HasAddress(scan_begin));
+ DCHECK(bitmap->HasAddress(scan_end - 1)); // scan_end is the byte after the last byte we scan.
+ byte* card_cur = CardFromAddr(scan_begin);
+ byte* card_end = CardFromAddr(scan_end);
+ while (card_cur < card_end) {
+ while (card_cur < card_end && *card_cur == GC_CARD_CLEAN) {
+ card_cur++;
+ }
+ byte* run_start = card_cur;
+
+ while (card_cur < card_end && *card_cur == GC_CARD_DIRTY) {
+ card_cur++;
+ }
+ byte* run_end = card_cur;
+
+ if (run_start != run_end) {
+ uintptr_t start = reinterpret_cast<uintptr_t>(AddrFromCard(run_start));
+ uintptr_t end = reinterpret_cast<uintptr_t>(AddrFromCard(run_end));
+ bitmap->VisitMarkedRange(start, end, visitor);
+ }
+ }
+ }
// Assertion used to check the given address is covered by the card table
void CheckAddrIsInCardTable(const byte* addr) const;
@@ -81,7 +105,7 @@ class CardTable {
void ClearCardTable();
// Resets all of the bytes in the card table which do not map to the image space.
- void ClearNonImageSpaceCards(Heap* heap);
+ void ClearSpaceCards(Space* space);
// Returns the first address in the heap which maps to this card.
void* AddrFromCard(const byte *card_addr) const {
@@ -92,8 +116,6 @@ class CardTable {
uintptr_t offset = card_addr - biased_begin_;
return reinterpret_cast<void*>(offset << GC_CARD_SHIFT);
}
- private:
- CardTable(MemMap* begin, byte* biased_begin, size_t offset);
// Returns the address of the relevant byte in the card table, given an address on the heap.
byte* CardFromAddr(const void *addr) const {
@@ -104,6 +126,9 @@ class CardTable {
return card_addr;
}
+ private:
+ CardTable(MemMap* begin, byte* biased_begin, size_t offset);
+
// Returns true iff the card table address is within the bounds of the card table.
bool IsValidCard(const byte* card_addr) const {
byte* begin = mem_map_->Begin() + offset_;
diff --git a/src/class_linker.cc b/src/class_linker.cc
index 5bd69e8c5e..df14a4105e 100644
--- a/src/class_linker.cc
+++ b/src/class_linker.cc
@@ -2045,6 +2045,11 @@ void ClassLinker::VerifyClass(Class* klass) {
LOG(FATAL) << "Verification failed hard on class " << PrettyDescriptor(klass)
<< " at compile time, but succeeded at runtime! The verifier must be broken.";
}
+ if (!preverified && verifier_failure != verifier::MethodVerifier::kNoFailure) {
+ LOG(WARNING) << "Soft verification failure in class " << PrettyDescriptor(klass)
+ << " in " << klass->GetDexCache()->GetLocation()->ToModifiedUtf8()
+ << " because: " << error_msg;
+ }
DCHECK(!Thread::Current()->IsExceptionPending());
CHECK(verifier_failure == verifier::MethodVerifier::kNoFailure ||
Runtime::Current()->IsCompiler());
diff --git a/src/compiler/Compiler.h b/src/compiler/Compiler.h
index af472b9cff..088768b2c9 100644
--- a/src/compiler/Compiler.h
+++ b/src/compiler/Compiler.h
@@ -129,7 +129,6 @@ enum debugControlVector {
kDebugCountOpcodes,
#if defined(ART_USE_QUICK_COMPILER)
kDebugDumpBitcodeFile,
- kDebugVerifyBitcode,
#endif
};
diff --git a/src/compiler/CompilerIR.h b/src/compiler/CompilerIR.h
index 43cfa2fca5..1ecf61aef5 100644
--- a/src/compiler/CompilerIR.h
+++ b/src/compiler/CompilerIR.h
@@ -196,10 +196,9 @@ enum ExtendedMIROpcode {
kMirOpFusedCmpgDouble,
kMirOpFusedCmpLong,
kMirOpNop,
- kMirOpNullCheck,
- kMirOpRangeCheck,
- kMirOpDivZeroCheck,
- kMirOpCheck,
+ kMirOpNullNRangeUpCheck,
+ kMirOpNullNRangeDownCheck,
+ kMirOpLowerBound,
kMirOpLast,
};
@@ -246,10 +245,12 @@ struct MIR {
int optimizationFlags;
int seqNum;
union {
+ // Used by the inlined insn from the callee to find the mother method
+ const Method* calleeMethod;
+ // Used by the inlined invoke to find the class and method pointers
+ CallsiteInfo* callsiteInfo;
// Used to quickly locate all Phi opcodes
MIR* phiNext;
- // Establish link between two halves of throwing instructions
- MIR* throwInsn;
} meta;
};
@@ -277,6 +278,7 @@ struct BasicBlock {
uint16_t nestingDepth;
const Method* containingMethod; // For blocks from the callee
BBType blockType;
+ bool needFallThroughBranch; // For blocks ended due to length limit
bool isFallThroughFromInvoke; // True means the block needs alignment
MIR* firstMIRInsn;
MIR* lastMIRInsn;
diff --git a/src/compiler/Dataflow.cc b/src/compiler/Dataflow.cc
index 38d18ace36..33ef0addad 100644
--- a/src/compiler/Dataflow.cc
+++ b/src/compiler/Dataflow.cc
@@ -822,16 +822,13 @@ const int oatDataFlowAttributes[kMirOpLast] = {
// 107 MIR_NOP
DF_NOP,
- // 108 MIR_NULL_CHECK
+ // 108 MIR_NULL_RANGE_UP_CHECK
0,
- // 109 MIR_RANGE_CHECK
+ // 109 MIR_NULL_RANGE_DOWN_CHECK
0,
- // 110 MIR_DIV_ZERO_CHECK
- 0,
-
- // 111 MIR_CHECK
+ // 110 MIR_LOWER_BOUND
0,
};
@@ -858,29 +855,27 @@ int getSSAUseCount(CompilationUnit* cUnit, int sReg)
char* oatGetDalvikDisassembly(CompilationUnit* cUnit,
const DecodedInstruction& insn, const char* note)
{
- std::string str;
- int opcode = insn.opcode;
+ char buffer[256];
+ Instruction::Code opcode = insn.opcode;
int dfAttributes = oatDataFlowAttributes[opcode];
int flags;
char* ret;
- if (opcode >= kMirOpFirst) {
- if (opcode == kMirOpPhi) {
- str.append("PHI");
- } else if (opcode == kMirOpCheck) {
- str.append("Check");
+ buffer[0] = 0;
+ if ((int)opcode >= (int)kMirOpFirst) {
+ if ((int)opcode == (int)kMirOpPhi) {
+ strcpy(buffer, "PHI");
} else {
- str.append(StringPrintf("Opcode %#x", opcode));
+ sprintf(buffer, "Opcode %#x", opcode);
}
flags = 0;
} else {
- str.append(Instruction::Name(insn.opcode));
- flags = Instruction::Flags(insn.opcode);
+ strcpy(buffer, Instruction::Name(opcode));
+ flags = Instruction::Flags(opcode);
}
- if (note) {
- str.append(note);
- }
+ if (note)
+ strcat(buffer, note);
/* For branches, decode the instructions to print out the branch targets */
if (flags & Instruction::kBranch) {
@@ -888,11 +883,11 @@ char* oatGetDalvikDisassembly(CompilationUnit* cUnit,
int offset = 0;
switch (dalvikFormat) {
case Instruction::k21t:
- str.append(StringPrintf(" v%d,", insn.vA));
+ snprintf(buffer + strlen(buffer), 256, " v%d,", insn.vA);
offset = (int) insn.vB;
break;
case Instruction::k22t:
- str.append(StringPrintf(" v%d, v%d,", insn.vA, insn.vB));
+ snprintf(buffer + strlen(buffer), 256, " v%d, v%d,", insn.vA, insn.vB);
offset = (int) insn.vC;
break;
case Instruction::k10t:
@@ -904,43 +899,45 @@ char* oatGetDalvikDisassembly(CompilationUnit* cUnit,
LOG(FATAL) << "Unexpected branch format " << (int)dalvikFormat
<< " / opcode " << (int)opcode;
}
- str.append(StringPrintf(" (%c%x)",
- offset > 0 ? '+' : '-',
- offset > 0 ? offset : -offset));
+ snprintf(buffer + strlen(buffer), 256, " (%c%x)",
+ offset > 0 ? '+' : '-',
+ offset > 0 ? offset : -offset);
} else if (dfAttributes & DF_FORMAT_35C) {
unsigned int i;
for (i = 0; i < insn.vA; i++) {
- if (i != 0) str.append(",");
- str.append(StringPrintf(" v%d", insn.arg[i]));
+ if (i != 0) strcat(buffer, ",");
+ snprintf(buffer + strlen(buffer), 256, " v%d", insn.arg[i]);
}
}
else if (dfAttributes & DF_FORMAT_3RC) {
- str.append(StringPrintf(" v%d..v%d", insn.vC, insn.vC + insn.vA - 1));
+ snprintf(buffer + strlen(buffer), 256,
+ " v%d..v%d", insn.vC, insn.vC + insn.vA - 1);
} else {
if (dfAttributes & DF_A_IS_REG) {
- str.append(StringPrintf(" v%d", insn.vA));
+ snprintf(buffer + strlen(buffer), 256, " v%d", insn.vA);
}
if (dfAttributes & DF_B_IS_REG) {
- str.append(StringPrintf(", v%d", insn.vB));
+ snprintf(buffer + strlen(buffer), 256, ", v%d", insn.vB);
} else if ((int)opcode < (int)kMirOpFirst) {
- str.append(StringPrintf(", (#%d)", insn.vB));
+ snprintf(buffer + strlen(buffer), 256, ", (#%d)", insn.vB);
}
if (dfAttributes & DF_C_IS_REG) {
- str.append(StringPrintf(", v%d", insn.vC));
+ snprintf(buffer + strlen(buffer), 256, ", v%d", insn.vC);
} else if ((int)opcode < (int)kMirOpFirst) {
- str.append(StringPrintf(", (#%d)", insn.vC));
+ snprintf(buffer + strlen(buffer), 256, ", (#%d)", insn.vC);
}
}
- int length = str.length() + 1;
+ int length = strlen(buffer) + 1;
ret = (char*)oatNew(cUnit, length, false, kAllocDFInfo);
- strncpy(ret, str.c_str(), length);
+ memcpy(ret, buffer, length);
return ret;
}
-std::string getSSAName(const CompilationUnit* cUnit, int ssaReg)
+char* getSSAName(const CompilationUnit* cUnit, int ssaReg, char* name)
{
- return StringPrintf("v%d_%d", SRegToVReg(cUnit, ssaReg),
- SRegToSubscript(cUnit, ssaReg));
+ sprintf(name, "v%d_%d", SRegToVReg(cUnit, ssaReg),
+ SRegToSubscript(cUnit, ssaReg));
+ return name;
}
/*
@@ -948,38 +945,32 @@ std::string getSSAName(const CompilationUnit* cUnit, int ssaReg)
*/
char* oatFullDisassembler(CompilationUnit* cUnit, const MIR* mir)
{
- std::string str;
+ char buffer[256];
+ char operand0[32], operand1[32];
const DecodedInstruction* insn = &mir->dalvikInsn;
- int opcode = insn->opcode;
+ Instruction::Code opcode = insn->opcode;
int dfAttributes = oatDataFlowAttributes[opcode];
char* ret;
int length;
- if (opcode >= kMirOpFirst) {
- if (opcode == kMirOpPhi) {
- int* incoming = (int*)mir->dalvikInsn.vB;
- str.append(StringPrintf("PHI %s = (%s",
- getSSAName(cUnit, mir->ssaRep->defs[0]).c_str(),
- getSSAName(cUnit, mir->ssaRep->uses[0]).c_str()));
- str.append(StringPrintf(":%d",incoming[0]));
+ buffer[0] = 0;
+ if (static_cast<int>(opcode) >= static_cast<int>(kMirOpFirst)) {
+ if (static_cast<int>(opcode) == static_cast<int>(kMirOpPhi)) {
+ snprintf(buffer, 256, "PHI %s = (%s",
+ getSSAName(cUnit, mir->ssaRep->defs[0], operand0),
+ getSSAName(cUnit, mir->ssaRep->uses[0], operand1));
int i;
for (i = 1; i < mir->ssaRep->numUses; i++) {
- str.append(StringPrintf(", %s:%d",
- getSSAName(cUnit, mir->ssaRep->uses[i]).c_str(),
- incoming[i]));
+ snprintf(buffer + strlen(buffer), 256, ", %s",
+ getSSAName(cUnit, mir->ssaRep->uses[i], operand0));
}
- str.append(")");
- } else if (opcode == kMirOpCheck) {
- str.append("Check ");
- str.append(Instruction::Name(mir->meta.throwInsn->dalvikInsn.opcode));
- } else if (opcode == kMirOpNop) {
- str.append("MirNop");
+ snprintf(buffer + strlen(buffer), 256, ")");
} else {
- str.append(StringPrintf("Opcode %#x", opcode));
+ sprintf(buffer, "Opcode %#x", opcode);
}
goto done;
} else {
- str.append(Instruction::Name(insn->opcode));
+ strcpy(buffer, Instruction::Name(opcode));
}
/* For branches, decode the instructions to print out the branch targets */
@@ -988,14 +979,14 @@ char* oatFullDisassembler(CompilationUnit* cUnit, const MIR* mir)
int delta = 0;
switch (dalvikFormat) {
case Instruction::k21t:
- str.append(StringPrintf(" %s, ",
- getSSAName(cUnit, mir->ssaRep->uses[0]).c_str()));
+ snprintf(buffer + strlen(buffer), 256, " %s, ",
+ getSSAName(cUnit, mir->ssaRep->uses[0], operand0));
delta = (int) insn->vB;
break;
case Instruction::k22t:
- str.append(StringPrintf(" %s, %s, ",
- getSSAName(cUnit, mir->ssaRep->uses[0]).c_str(),
- getSSAName(cUnit, mir->ssaRep->uses[1]).c_str()));
+ snprintf(buffer + strlen(buffer), 256, " %s, %s, ",
+ getSSAName(cUnit, mir->ssaRep->uses[0], operand0),
+ getSSAName(cUnit, mir->ssaRep->uses[1], operand1));
delta = (int) insn->vC;
break;
case Instruction::k10t:
@@ -1006,53 +997,54 @@ char* oatFullDisassembler(CompilationUnit* cUnit, const MIR* mir)
default:
LOG(FATAL) << "Unexpected branch format: " << (int)dalvikFormat;
}
- str.append(StringPrintf(" %04x", mir->offset + delta));
+ snprintf(buffer + strlen(buffer), 256, " %04x",
+ mir->offset + delta);
} else if (dfAttributes & (DF_FORMAT_35C | DF_FORMAT_3RC)) {
unsigned int i;
for (i = 0; i < insn->vA; i++) {
- if (i != 0) str.append(",");
- str.append(" ");
- str.append(getSSAName(cUnit, mir->ssaRep->uses[i]));
+ if (i != 0) strcat(buffer, ",");
+ snprintf(buffer + strlen(buffer), 256, " %s",
+ getSSAName(cUnit, mir->ssaRep->uses[i], operand0));
}
} else {
int udIdx;
if (mir->ssaRep->numDefs) {
for (udIdx = 0; udIdx < mir->ssaRep->numDefs; udIdx++) {
- str.append(" ");
- str.append(getSSAName(cUnit, mir->ssaRep->defs[udIdx]));
+ snprintf(buffer + strlen(buffer), 256, " %s",
+ getSSAName(cUnit, mir->ssaRep->defs[udIdx], operand0));
}
- str.append(",");
+ strcat(buffer, ",");
}
if (mir->ssaRep->numUses) {
/* No leading ',' for the first use */
- str.append(" ");
- str.append(getSSAName(cUnit, mir->ssaRep->uses[0]));
+ snprintf(buffer + strlen(buffer), 256, " %s",
+ getSSAName(cUnit, mir->ssaRep->uses[0], operand0));
for (udIdx = 1; udIdx < mir->ssaRep->numUses; udIdx++) {
- str.append(", ");
- str.append(getSSAName(cUnit, mir->ssaRep->uses[udIdx]));
+ snprintf(buffer + strlen(buffer), 256, ", %s",
+ getSSAName(cUnit, mir->ssaRep->uses[udIdx], operand0));
}
}
if (static_cast<int>(opcode) < static_cast<int>(kMirOpFirst)) {
- Instruction::Format dalvikFormat = Instruction::FormatOf(insn->opcode);
+ Instruction::Format dalvikFormat = Instruction::FormatOf(opcode);
switch (dalvikFormat) {
case Instruction::k11n: // op vA, #+B
case Instruction::k21s: // op vAA, #+BBBB
case Instruction::k21h: // op vAA, #+BBBB00000[00000000]
case Instruction::k31i: // op vAA, #+BBBBBBBB
case Instruction::k51l: // op vAA, #+BBBBBBBBBBBBBBBB
- str.append(StringPrintf(" #%#x", insn->vB));
+ snprintf(buffer + strlen(buffer), 256, " #%#x", insn->vB);
break;
case Instruction::k21c: // op vAA, thing@BBBB
case Instruction::k31c: // op vAA, thing@BBBBBBBB
- str.append(StringPrintf(" @%#x", insn->vB));
+ snprintf(buffer + strlen(buffer), 256, " @%#x", insn->vB);
break;
case Instruction::k22b: // op vAA, vBB, #+CC
case Instruction::k22s: // op vA, vB, #+CCCC
- str.append(StringPrintf(" #%#x", insn->vC));
+ snprintf(buffer + strlen(buffer), 256, " #%#x", insn->vC);
break;
case Instruction::k22c: // op vA, vB, thing@CCCC
- str.append(StringPrintf(" @%#x", insn->vC));
+ snprintf(buffer + strlen(buffer), 256, " @%#x", insn->vC);
break;
/* No need for special printing */
default:
@@ -1062,38 +1054,44 @@ char* oatFullDisassembler(CompilationUnit* cUnit, const MIR* mir)
}
done:
- length = str.length() + 1;
+ length = strlen(buffer) + 1;
ret = (char*) oatNew(cUnit, length, false, kAllocDFInfo);
- strncpy(ret, str.c_str(), length);
+ memcpy(ret, buffer, length);
return ret;
}
char* oatGetSSAString(CompilationUnit* cUnit, SSARepresentation* ssaRep)
{
- std::string str;
+ char buffer[256];
char* ret;
int i;
+ buffer[0] = 0;
for (i = 0; i < ssaRep->numDefs; i++) {
int ssaReg = ssaRep->defs[i];
- str.append(StringPrintf("s%d(v%d_%d) ", ssaReg,
- SRegToVReg(cUnit, ssaReg),
- SRegToSubscript(cUnit, ssaReg)));
+ sprintf(buffer + strlen(buffer), "s%d(v%d_%d) ", ssaReg,
+ SRegToVReg(cUnit, ssaReg), SRegToSubscript(cUnit, ssaReg));
}
if (ssaRep->numDefs) {
- str.append("<- ");
+ strcat(buffer, "<- ");
}
for (i = 0; i < ssaRep->numUses; i++) {
+ int len = strlen(buffer);
int ssaReg = ssaRep->uses[i];
- str.append(StringPrintf("s%d(v%d_%d) ", ssaReg, SRegToVReg(cUnit, ssaReg),
- SRegToSubscript(cUnit, ssaReg)));
+
+ if (snprintf(buffer + len, 250 - len, "s%d(v%d_%d) ", ssaReg,
+ SRegToVReg(cUnit, ssaReg),
+ SRegToSubscript(cUnit, ssaReg))) {
+ strcat(buffer, "...");
+ break;
+ }
}
- int length = str.length() + 1;
+ int length = strlen(buffer) + 1;
ret = (char*)oatNew(cUnit, length, false, kAllocDFInfo);
- strncpy(ret, str.c_str(), length);
+ memcpy(ret, buffer, length);
return ret;
}
@@ -1185,10 +1183,8 @@ int addNewSReg(CompilationUnit* cUnit, int vReg)
int ssaReg = cUnit->numSSARegs++;
oatInsertGrowableList(cUnit, cUnit->ssaBaseVRegs, vReg);
oatInsertGrowableList(cUnit, cUnit->ssaSubscripts, subscript);
- std::string ssaName = getSSAName(cUnit, ssaReg);
- char* name = (char*)oatNew(cUnit, ssaName.length() + 1, false, kAllocDFInfo);
- strncpy(name, ssaName.c_str(), ssaName.length() + 1);
- oatInsertGrowableList(cUnit, cUnit->ssaStrings, (intptr_t)name);
+ char* name = (char*)oatNew(cUnit, SSA_NAME_MAX, true, kAllocDFInfo);
+ oatInsertGrowableList(cUnit, cUnit->ssaStrings, (intptr_t)getSSAName(cUnit, ssaReg, name));
DCHECK_EQ(cUnit->ssaBaseVRegs->numUsed, cUnit->ssaSubscripts->numUsed);
return ssaReg;
}
@@ -1496,10 +1492,8 @@ void oatInitializeSSAConversion(CompilationUnit* cUnit)
for (i = 0; i < numDalvikReg; i++) {
oatInsertGrowableList(cUnit, cUnit->ssaBaseVRegs, i);
oatInsertGrowableList(cUnit, cUnit->ssaSubscripts, 0);
- std::string ssaName = getSSAName(cUnit, i);
- char* name = (char*)oatNew(cUnit, ssaName.length() + 1, true, kAllocDFInfo);
- strncpy(name, ssaName.c_str(), ssaName.length() + 1);
- oatInsertGrowableList(cUnit, cUnit->ssaStrings, (intptr_t)name);
+ char* name = (char*)oatNew(cUnit, SSA_NAME_MAX, true, kAllocDFInfo);
+ oatInsertGrowableList(cUnit, cUnit->ssaStrings, (intptr_t)getSSAName(cUnit, i, name));
}
/*
@@ -1693,14 +1687,14 @@ MIR* oatFindMoveResult(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir)
BasicBlock* tbb = bb;
mir = advanceMIR(cUnit, &tbb, mir, NULL, false);
while (mir != NULL) {
- int opcode = mir->dalvikInsn.opcode;
if ((mir->dalvikInsn.opcode == Instruction::MOVE_RESULT) ||
(mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) ||
(mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_WIDE)) {
break;
}
// Keep going if pseudo op, otherwise terminate
- if (opcode < kNumPackedOpcodes) {
+ if (mir->dalvikInsn.opcode <
+ static_cast<Instruction::Code>(kNumPackedOpcodes)) {
mir = NULL;
} else {
mir = advanceMIR(cUnit, &tbb, mir, NULL, false);
@@ -1849,7 +1843,6 @@ bool basicBlockOpt(CompilationUnit* cUnit, BasicBlock* bb)
squashDupRangeChecks(cUnit, &tbb, mir, arrSreg, idxSreg);
}
break;
-#if defined(TARGET_ARM)
case Instruction::CMPL_FLOAT:
case Instruction::CMPL_DOUBLE:
case Instruction::CMPG_FLOAT:
@@ -1919,7 +1912,6 @@ bool basicBlockOpt(CompilationUnit* cUnit, BasicBlock* bb)
}
}
break;
-#endif
default:
break;
}
diff --git a/src/compiler/Frontend.cc b/src/compiler/Frontend.cc
index 6b45f0d2cd..7585b77d02 100644
--- a/src/compiler/Frontend.cc
+++ b/src/compiler/Frontend.cc
@@ -54,7 +54,6 @@ static uint32_t kCompilerDebugFlags = 0 | // Enable debug/testing modes
//(1 << kDebugCountOpcodes) |
#if defined(ART_USE_QUICK_COMPILER)
//(1 << kDebugDumpBitcodeFile) |
- //(1 << kDebugVerifyBitcode) |
#endif
0;
@@ -155,8 +154,10 @@ BasicBlock *splitBlock(CompilationUnit* cUnit, unsigned int codeOffset,
}
/* Handle the fallthrough path */
+ bottomBlock->needFallThroughBranch = origBlock->needFallThroughBranch;
bottomBlock->fallThrough = origBlock->fallThrough;
origBlock->fallThrough = bottomBlock;
+ origBlock->needFallThroughBranch = true;
oatInsertGrowableList(cUnit, bottomBlock->predecessors,
(intptr_t)origBlock);
if (bottomBlock->fallThrough) {
@@ -288,12 +289,12 @@ void oatDumpCFG(CompilationUnit* cUnit, const char* dirPrefix)
blockIdx);
if (bb == NULL) break;
if (bb->blockType == kEntryBlock) {
- fprintf(file, " entry_%d [shape=Mdiamond];\n", bb->id);
+ fprintf(file, " entry [shape=Mdiamond];\n");
} else if (bb->blockType == kExitBlock) {
- fprintf(file, " exit_%d [shape=Mdiamond];\n", bb->id);
+ fprintf(file, " exit [shape=Mdiamond];\n");
} else if (bb->blockType == kDalvikByteCode) {
- fprintf(file, " block%04x_%d [shape=record,label = \"{ \\\n",
- bb->startOffset, bb->id);
+ fprintf(file, " block%04x [shape=record,label = \"{ \\\n",
+ bb->startOffset);
const MIR *mir;
fprintf(file, " {block id %d\\l}%s\\\n", bb->id,
bb->firstMIRInsn ? " | " : " ");
@@ -326,8 +327,8 @@ void oatDumpCFG(CompilationUnit* cUnit, const char* dirPrefix)
}
if (bb->successorBlockList.blockListType != kNotUsed) {
- fprintf(file, " succ%04x_%d [shape=%s,label = \"{ \\\n",
- bb->startOffset, bb->id,
+ fprintf(file, " succ%04x [shape=%s,label = \"{ \\\n",
+ bb->startOffset,
(bb->successorBlockList.blockListType == kCatch) ?
"Mrecord" : "record");
GrowableListIterator iterator;
@@ -355,8 +356,8 @@ void oatDumpCFG(CompilationUnit* cUnit, const char* dirPrefix)
fprintf(file, " }\"];\n\n");
oatGetBlockName(bb, blockName1);
- fprintf(file, " %s:s -> succ%04x_%d:n [style=dashed]\n",
- blockName1, bb->startOffset, bb->id);
+ fprintf(file, " %s:s -> succ%04x:n [style=dashed]\n",
+ blockName1, bb->startOffset);
if (bb->successorBlockList.blockListType == kPackedSwitch ||
bb->successorBlockList.blockListType == kSparseSwitch) {
@@ -373,8 +374,8 @@ void oatDumpCFG(CompilationUnit* cUnit, const char* dirPrefix)
BasicBlock *destBlock = successorBlockInfo->block;
oatGetBlockName(destBlock, blockName2);
- fprintf(file, " succ%04x_%d:f%d:e -> %s:n\n", bb->startOffset,
- bb->id, succId++, blockName2);
+ fprintf(file, " succ%04x:f%d:e -> %s:n\n", bb->startOffset,
+ succId++, blockName2);
}
}
}
@@ -643,20 +644,18 @@ void processCanSwitch(CompilationUnit* cUnit, BasicBlock* curBlock,
}
/* Process instructions with the kThrow flag */
-BasicBlock* processCanThrow(CompilationUnit* cUnit, BasicBlock* curBlock,
- MIR* insn, int curOffset, int width, int flags,
- ArenaBitVector* tryBlockAddr, const u2* codePtr,
- const u2* codeEnd)
+void processCanThrow(CompilationUnit* cUnit, BasicBlock* curBlock, MIR* insn,
+ int curOffset, int width, int flags,
+ ArenaBitVector* tryBlockAddr, const u2* codePtr,
+ const u2* codeEnd)
{
const DexFile::CodeItem* code_item = cUnit->code_item;
- bool inTryBlock = oatIsBitSet(tryBlockAddr, curOffset);
/* In try block */
- if (inTryBlock) {
+ if (oatIsBitSet(tryBlockAddr, curOffset)) {
CatchHandlerIterator iterator(*code_item, curOffset);
if (curBlock->successorBlockList.blockListType != kNotUsed) {
- LOG(INFO) << PrettyMethod(cUnit->method_idx, *cUnit->dex_file);
LOG(FATAL) << "Successor block list already in use: "
<< (int)curBlock->successorBlockList.blockListType;
}
@@ -689,46 +688,37 @@ BasicBlock* processCanThrow(CompilationUnit* cUnit, BasicBlock* curBlock,
oatInsertGrowableList(cUnit, ehBlock->predecessors, (intptr_t)curBlock);
}
- if (insn->dalvikInsn.opcode == Instruction::THROW){
- if ((codePtr < codeEnd) && contentIsInsn(codePtr)) {
- // Force creation of new block following THROW via side-effect
- findBlock(cUnit, curOffset + width, /* split */ false,
- /* create */ true, /* immedPredBlockP */ NULL);
- }
- if (!inTryBlock) {
- // Don't split a THROW that can't rethrow - we're done.
- return curBlock;
- }
- }
-
/*
- * Split the potentially-throwing instruction into two parts.
- * The first half will be a pseudo-op that captures the exception
- * edges and terminates the basic block. It always falls through.
- * Then, create a new basic block that begins with the throwing instruction
- * (minus exceptions). Note: this new basic block must NOT be entered into
- * the blockMap. If the potentially-throwing instruction is the target of a
- * future branch, we need to find the check psuedo half. The new
- * basic block containing the work portion of the instruction should
- * only be entered via fallthrough from the block containing the
- * pseudo exception edge MIR. Note also that this new block is
- * not automatically terminated after the work portion, and may
- * contain following instructions.
+ * Force the current block to terminate.
+ *
+ * Data may be present before codeEnd, so we need to parse it to know
+ * whether it is code or data.
*/
- BasicBlock *newBlock = oatNewBB(cUnit, kDalvikByteCode, cUnit->numBlocks++);
- oatInsertGrowableList(cUnit, &cUnit->blockList, (intptr_t)newBlock);
- newBlock->startOffset = insn->offset;
- curBlock->fallThrough = newBlock;
- oatInsertGrowableList(cUnit, newBlock->predecessors, (intptr_t)curBlock);
- MIR* newInsn = (MIR*)oatNew(cUnit, sizeof(MIR), true, kAllocMIR);
- *newInsn = *insn;
- insn->dalvikInsn.opcode =
- static_cast<Instruction::Code>(kMirOpCheck);
- // Associate the two halves
- insn->meta.throwInsn = newInsn;
- newInsn->meta.throwInsn = insn;
- oatAppendMIR(newBlock, newInsn);
- return newBlock;
+ if (codePtr < codeEnd) {
+ /* Create a fallthrough block for real instructions (incl. NOP) */
+ if (contentIsInsn(codePtr)) {
+ BasicBlock *fallthroughBlock = findBlock(cUnit,
+ curOffset + width,
+ /* split */
+ false,
+ /* create */
+ true,
+ /* immedPredBlockP */
+ NULL);
+ /*
+ * THROW is an unconditional branch. NOTE:
+ * THROW_VERIFICATION_ERROR is also an unconditional
+ * branch, but we shouldn't treat it as such until we have
+ * a dead code elimination pass (which won't be important
+ * until inlining w/ constant propagation is implemented.
+ */
+ if (insn->dalvikInsn.opcode != Instruction::THROW) {
+ curBlock->fallThrough = fallthroughBlock;
+ oatInsertGrowableList(cUnit, fallthroughBlock->predecessors,
+ (intptr_t)curBlock);
+ }
+ }
+ }
}
void oatInit(CompilationUnit* cUnit, const Compiler& compiler) {
@@ -773,11 +763,15 @@ CompiledMethod* oatCompileMethod(Compiler& compiler,
cUnit->numRegs = code_item->registers_size_ - cUnit->numIns;
cUnit->numOuts = code_item->outs_size_;
#if defined(ART_USE_QUICK_COMPILER)
- DCHECK((cUnit->instructionSet == kThumb2) ||
- (cUnit->instructionSet == kX86) ||
- (cUnit->instructionSet == kMips));
- if (cUnit->instructionSet == kThumb2) {
- // TODO: remove this once x86 is tested
+ // TODO: fix bug and remove this workaround
+ std::string methodName = PrettyMethod(method_idx, dex_file);
+ if ((methodName.find("gdata2.AndroidGDataClient.createAndExecuteMethod")
+ != std::string::npos) || (methodName.find("hG.a") != std::string::npos)
+ || (methodName.find("hT.a(hV, java.lang.String, java.lang.String, java")
+ != std::string::npos) || (methodName.find("AndroidHttpTransport.exchange")
+ != std::string::npos)) {
+ LOG(INFO) << "Skipping bitcode generation for " << methodName;
+ } else {
cUnit->genBitcode = true;
}
#endif
@@ -797,17 +791,12 @@ CompiledMethod* oatCompileMethod(Compiler& compiler,
}
#if defined(ART_USE_QUICK_COMPILER)
if (cUnit->genBitcode) {
- //cUnit->enableDebug |= (1 << kDebugVerifyBitcode);
//cUnit->printMe = true;
//cUnit->enableDebug |= (1 << kDebugDumpBitcodeFile);
// Disable non-safe optimizations for now
cUnit->disableOpt |= ~(1 << kSafeOptimizations);
}
#endif
- if (cUnit->instructionSet == kX86) {
- // Disable some optimizations on X86 for now
- cUnit->disableOpt |= (1 << kLoadStoreElimination);
- }
/* Are we generating code for the debugger? */
if (compiler.IsDebuggingSupported()) {
cUnit->genDebugger = true;
@@ -969,8 +958,8 @@ CompiledMethod* oatCompileMethod(Compiler& compiler,
}
}
} else if (flags & Instruction::kThrow) {
- curBlock = processCanThrow(cUnit.get(), curBlock, insn, curOffset,
- width, flags, tryBlockAddr, codePtr, codeEnd);
+ processCanThrow(cUnit.get(), curBlock, insn, curOffset, width, flags,
+ tryBlockAddr, codePtr, codeEnd);
} else if (flags & Instruction::kSwitch) {
processCanSwitch(cUnit.get(), curBlock, insn, curOffset, width, flags);
}
diff --git a/src/compiler/Ralloc.cc b/src/compiler/Ralloc.cc
index f4e735a5b5..500b1b2d1b 100644
--- a/src/compiler/Ralloc.cc
+++ b/src/compiler/Ralloc.cc
@@ -89,6 +89,20 @@ bool remapNames(CompilationUnit* cUnit, BasicBlock* bb)
return false;
}
+// Try to find the next move result which might have an FP target
+SSARepresentation* findFPMoveResult(MIR* mir)
+{
+ SSARepresentation* res = NULL;
+ for (; mir; mir = mir->next) {
+ if ((mir->dalvikInsn.opcode == Instruction::MOVE_RESULT) ||
+ (mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_WIDE)) {
+ res = mir->ssaRep;
+ break;
+ }
+ }
+ return res;
+}
+
/*
* Infer types and sizes. We don't need to track change on sizes,
* as it doesn't propagate. We're guaranteed at least one pass through
@@ -222,12 +236,14 @@ bool inferTypeAndSize(CompilationUnit* cUnit, BasicBlock* bb)
const char* shorty = oatGetShortyFromTargetIdx(cUnit, target_idx);
// Handle result type if floating point
if ((shorty[0] == 'F') || (shorty[0] == 'D')) {
- MIR* moveResultMIR = oatFindMoveResult(cUnit, bb, mir);
+ // Find move-result that consumes this result
+ SSARepresentation* tgtRep = findFPMoveResult(mir->next);
+ // Might be in next basic block
+ if (!tgtRep) {
+ tgtRep = findFPMoveResult(bb->fallThrough->firstMIRInsn);
+ }
// Result might not be used at all, so no move-result
- if (moveResultMIR && (moveResultMIR->dalvikInsn.opcode !=
- Instruction::MOVE_RESULT_OBJECT)) {
- SSARepresentation* tgtRep = moveResultMIR->ssaRep;
- DCHECK(tgtRep != NULL);
+ if (tgtRep) {
tgtRep->fpDef[0] = true;
changed |= setFp(cUnit, tgtRep->defs[0], true);
if (shorty[0] == 'D') {
diff --git a/src/compiler/Utility.cc b/src/compiler/Utility.cc
index c865718536..571208fe45 100644
--- a/src/compiler/Utility.cc
+++ b/src/compiler/Utility.cc
@@ -687,20 +687,19 @@ void oatGetBlockName(BasicBlock* bb, char* name)
{
switch (bb->blockType) {
case kEntryBlock:
- snprintf(name, BLOCK_NAME_LEN, "entry_%d", bb->id);
+ snprintf(name, BLOCK_NAME_LEN, "entry");
break;
case kExitBlock:
- snprintf(name, BLOCK_NAME_LEN, "exit_%d", bb->id);
+ snprintf(name, BLOCK_NAME_LEN, "exit");
break;
case kDalvikByteCode:
- snprintf(name, BLOCK_NAME_LEN, "block%04x_%d", bb->startOffset, bb->id);
+ snprintf(name, BLOCK_NAME_LEN, "block%04x", bb->startOffset);
break;
case kExceptionHandling:
- snprintf(name, BLOCK_NAME_LEN, "exception%04x_%d", bb->startOffset,
- bb->id);
+ snprintf(name, BLOCK_NAME_LEN, "exception%04x", bb->startOffset);
break;
default:
- snprintf(name, BLOCK_NAME_LEN, "??_%d", bb->id);
+ snprintf(name, BLOCK_NAME_LEN, "??");
break;
}
}
diff --git a/src/compiler/codegen/GenInvoke.cc b/src/compiler/codegen/GenInvoke.cc
index e6714aa807..9f1d58e304 100644
--- a/src/compiler/codegen/GenInvoke.cc
+++ b/src/compiler/codegen/GenInvoke.cc
@@ -752,7 +752,7 @@ bool genInlinedAbsLong(CompilationUnit *cUnit, CallInfo* info)
bool genInlinedFloatCvt(CompilationUnit *cUnit, CallInfo* info)
{
-#if defined(TARGET_ARM)
+#if defined(TARGET_ARM) || defined(TARGET_X86)
RegLocation rlSrc = info->args[0];
RegLocation rlDest = inlineTarget(cUnit, info);
storeValue(cUnit, rlDest, rlSrc);
@@ -764,7 +764,7 @@ bool genInlinedFloatCvt(CompilationUnit *cUnit, CallInfo* info)
bool genInlinedDoubleCvt(CompilationUnit *cUnit, CallInfo* info)
{
-#if defined(TARGET_ARM)
+#if defined(TARGET_ARM) || defined(TARGET_X86)
RegLocation rlSrc = info->args[0];
RegLocation rlDest = inlineTargetWide(cUnit, info);
storeValueWide(cUnit, rlDest, rlSrc);
diff --git a/src/compiler/codegen/LocalOptimizations.cc b/src/compiler/codegen/LocalOptimizations.cc
index faab3e0046..2fc7ae0ccd 100644
--- a/src/compiler/codegen/LocalOptimizations.cc
+++ b/src/compiler/codegen/LocalOptimizations.cc
@@ -226,6 +226,15 @@ void applyLoadStoreElimination(CompilationUnit* cUnit, LIR* headLIR,
}
if (stopHere == true) {
+#if defined(TARGET_X86)
+ // Prevent stores from being sunk between ops that generate ccodes and
+ // ops that use them.
+ int flags = EncodingMap[checkLIR->opcode].flags;
+ if (sinkDistance > 0 && (flags & IS_BRANCH) && (flags & USES_CCODES)) {
+ checkLIR = PREV_LIR(checkLIR);
+ sinkDistance--;
+ }
+#endif
DEBUG_OPT(dumpDependentInsnPair(thisLIR, checkLIR, "REG CLOBBERED"));
/* Only sink store instructions */
if (sinkDistance && !isThisLIRLoad) {
diff --git a/src/compiler/codegen/MethodBitcode.cc b/src/compiler/codegen/MethodBitcode.cc
index 8e9f15fa76..a8da1cde44 100644
--- a/src/compiler/codegen/MethodBitcode.cc
+++ b/src/compiler/codegen/MethodBitcode.cc
@@ -51,7 +51,7 @@ void defineValue(CompilationUnit* cUnit, llvm::Value* val, int sReg)
llvm::Value* placeholder = getLLVMValue(cUnit, sReg);
if (placeholder == NULL) {
// This can happen on instruction rewrite on verification failure
- LOG(WARNING) << "Null placeholder";
+ LOG(WARNING) << "Null placeholder - invalid CFG";
return;
}
placeholder->replaceAllUsesWith(val);
@@ -307,6 +307,7 @@ void convertThrow(CompilationUnit* cUnit, RegLocation rlSrc)
llvm::Function* func = cUnit->intrinsic_helper->GetIntrinsicFunction(
greenland::IntrinsicHelper::Throw);
cUnit->irb->CreateCall(func, src);
+ cUnit->irb->CreateUnreachable();
}
void convertMonitorEnterExit(CompilationUnit* cUnit, int optFlags,
@@ -803,7 +804,6 @@ bool convertMIRNode(CompilationUnit* cUnit, MIR* mir, BasicBlock* bb,
bool res = false; // Assume success
RegLocation rlSrc[3];
RegLocation rlDest = badLoc;
- RegLocation rlResult = badLoc;
Instruction::Code opcode = mir->dalvikInsn.opcode;
uint32_t vA = mir->dalvikInsn.vA;
uint32_t vB = mir->dalvikInsn.vB;
@@ -812,15 +812,6 @@ bool convertMIRNode(CompilationUnit* cUnit, MIR* mir, BasicBlock* bb,
bool objectDefinition = false;
- if (cUnit->printMe) {
- if ((int)opcode < kMirOpFirst) {
- LOG(INFO) << ".. " << Instruction::Name(opcode) << " 0x"
- << std::hex << (int)opcode;
- } else {
- LOG(INFO) << ".. opcode 0x" << std::hex << (int)opcode;
- }
- }
-
/* Prep Src and Dest locations */
int nextSreg = 0;
int nextLoc = 0;
@@ -1301,24 +1292,10 @@ bool convertMIRNode(CompilationUnit* cUnit, MIR* mir, BasicBlock* bb,
case Instruction::THROW:
convertThrow(cUnit, rlSrc[0]);
- /*
- * If this throw is standalone, terminate.
- * If it might rethrow, force termination
- * of the following block.
- */
- if (bb->fallThrough == NULL) {
- cUnit->irb->CreateUnreachable();
- } else {
- bb->fallThrough->fallThrough = NULL;
- bb->fallThrough->taken = NULL;
- }
break;
case Instruction::THROW_VERIFICATION_ERROR:
convertThrowVerificationError(cUnit, vA, vB);
- UNIMPLEMENTED(WARNING) << "Need dead code elimination pass"
- << " - disabling bitcode verification";
- cUnit->enableDebug &= ~(1 << kDebugVerifyBitcode);
break;
case Instruction::MOVE_RESULT_WIDE:
@@ -1329,8 +1306,7 @@ bool convertMIRNode(CompilationUnit* cUnit, MIR* mir, BasicBlock* bb,
* Instruction rewriting on verification failure can eliminate
* the invoke that feeds this move0result. It won't ever be reached,
* so we can ignore it.
- * TODO: verify that previous instruction is THROW_VERIFICATION_ERROR,
- * or better, add dead-code elimination.
+ * TODO: verify that previous instruction if THROW_VERIFICATION_ERROR
*/
UNIMPLEMENTED(WARNING) << "Need to verify previous inst was rewritten";
#else
@@ -1673,13 +1649,6 @@ void convertExtendedMIR(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir,
UNIMPLEMENTED(WARNING) << "unimp kMirOpPhi";
break;
}
- case kMirOpNop:
- if ((mir == bb->lastMIRInsn) && (bb->taken == NULL) &&
- (bb->fallThrough == NULL)) {
- cUnit->irb->CreateUnreachable();
- }
- break;
-
#if defined(TARGET_ARM)
case kMirOpFusedCmplFloat:
UNIMPLEMENTED(WARNING) << "unimp kMirOpFusedCmpFloat";
@@ -1751,16 +1720,6 @@ bool methodBlockBitcodeConversion(CompilationUnit* cUnit, BasicBlock* bb)
cUnit->irb->SetInsertPoint(llvmBB);
setDexOffset(cUnit, bb->startOffset);
- if (cUnit->printMe) {
- LOG(INFO) << "................................";
- LOG(INFO) << "Block id " << bb->id;
- if (llvmBB != NULL) {
- LOG(INFO) << "label " << llvmBB->getName().str().c_str();
- } else {
- LOG(INFO) << "llvmBB is NULL";
- }
- }
-
if (bb->blockType == kEntryBlock) {
setMethodInfo(cUnit);
bool *canBeRef = (bool*) oatNew(cUnit, sizeof(bool) *
@@ -1800,6 +1759,8 @@ bool methodBlockBitcodeConversion(CompilationUnit* cUnit, BasicBlock* bb)
/*
* Because we're deferring null checking, delete the associated empty
* exception block.
+ * TODO: add new block type for exception blocks that we generate
+ * greenland code for.
*/
llvmBB->eraseFromParent();
return false;
@@ -1809,9 +1770,8 @@ bool methodBlockBitcodeConversion(CompilationUnit* cUnit, BasicBlock* bb)
setDexOffset(cUnit, mir->offset);
- int opcode = mir->dalvikInsn.opcode;
- Instruction::Format dalvikFormat =
- Instruction::FormatOf(mir->dalvikInsn.opcode);
+ Instruction::Code dalvikOpcode = mir->dalvikInsn.opcode;
+ Instruction::Format dalvikFormat = Instruction::FormatOf(dalvikOpcode);
/* If we're compiling for the debugger, generate an update callout */
if (cUnit->genDebugger) {
@@ -1819,43 +1779,7 @@ bool methodBlockBitcodeConversion(CompilationUnit* cUnit, BasicBlock* bb)
//genDebuggerUpdate(cUnit, mir->offset);
}
- if (opcode == kMirOpCheck) {
- // Combine check and work halves of throwing instruction.
- MIR* workHalf = mir->meta.throwInsn;
- mir->dalvikInsn.opcode = workHalf->dalvikInsn.opcode;
- opcode = mir->dalvikInsn.opcode;
- SSARepresentation* ssaRep = workHalf->ssaRep;
- workHalf->ssaRep = mir->ssaRep;
- mir->ssaRep = ssaRep;
- workHalf->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
- if (bb->successorBlockList.blockListType == kCatch) {
- llvm::Function* intr = cUnit->intrinsic_helper->GetIntrinsicFunction(
- greenland::IntrinsicHelper::CatchTargets);
- llvm::Value* switchKey =
- cUnit->irb->CreateCall(intr, cUnit->irb->getInt32(mir->offset));
- GrowableListIterator iter;
- oatGrowableListIteratorInit(&bb->successorBlockList.blocks, &iter);
- // New basic block to use for work half
- llvm::BasicBlock* workBB =
- llvm::BasicBlock::Create(*cUnit->context, "", cUnit->func);
- llvm::SwitchInst* sw =
- cUnit->irb->CreateSwitch(switchKey, workBB,
- bb->successorBlockList.blocks.numUsed);
- while (true) {
- SuccessorBlockInfo *successorBlockInfo =
- (SuccessorBlockInfo *) oatGrowableListIteratorNext(&iter);
- if (successorBlockInfo == NULL) break;
- llvm::BasicBlock *target =
- getLLVMBlock(cUnit, successorBlockInfo->block->id);
- int typeIndex = successorBlockInfo->key;
- sw->addCase(cUnit->irb->getInt32(typeIndex), target);
- }
- llvmBB = workBB;
- cUnit->irb->SetInsertPoint(llvmBB);
- }
- }
-
- if (opcode >= kMirOpFirst) {
+ if ((int)mir->dalvikInsn.opcode >= (int)kMirOpFirst) {
convertExtendedMIR(cUnit, bb, mir, llvmBB);
continue;
}
@@ -1863,9 +1787,8 @@ bool methodBlockBitcodeConversion(CompilationUnit* cUnit, BasicBlock* bb)
bool notHandled = convertMIRNode(cUnit, mir, bb, llvmBB,
NULL /* labelList */);
if (notHandled) {
- Instruction::Code dalvikOpcode = static_cast<Instruction::Code>(opcode);
LOG(WARNING) << StringPrintf("%#06x: Op %#x (%s) / Fmt %d not handled",
- mir->offset, opcode,
+ mir->offset, dalvikOpcode,
Instruction::Name(dalvikOpcode),
dalvikFormat);
}
@@ -2063,14 +1986,7 @@ void oatMethodMIR2Bitcode(CompilationUnit* cUnit)
cUnit->irb->SetInsertPoint(cUnit->entryBB);
cUnit->irb->CreateBr(cUnit->entryTargetBB);
- if (cUnit->enableDebug & (1 << kDebugVerifyBitcode)) {
- if (llvm::verifyFunction(*cUnit->func, llvm::PrintMessageAction)) {
- LOG(INFO) << "Bitcode verification FAILED for "
- << PrettyMethod(cUnit->method_idx, *cUnit->dex_file)
- << " of size " << cUnit->insnsSize;
- cUnit->enableDebug |= (1 << kDebugDumpBitcodeFile);
- }
- }
+ //llvm::verifyFunction(*cUnit->func, llvm::PrintMessageAction);
if (cUnit->enableDebug & (1 << kDebugDumpBitcodeFile)) {
// Write bitcode to file
@@ -3218,24 +3134,6 @@ bool methodBitcodeBlockCodeGen(CompilationUnit* cUnit, llvm::BasicBlock* bb)
cvtShiftOp(cUnit, Instruction::USHR_INT, callInst);
break;
- case greenland::IntrinsicHelper::CatchTargets: {
- llvm::SwitchInst* swInst =
- llvm::dyn_cast<llvm::SwitchInst>(nextIt);
- DCHECK(swInst != NULL);
- /*
- * Discard the edges and the following conditional branch.
- * Do a direct branch to the default target (which is the
- * "work" portion of the pair.
- * TODO: awful code layout - rework
- */
- llvm::BasicBlock* targetBB = swInst->getDefaultDest();
- DCHECK(targetBB != NULL);
- opUnconditionalBranch(cUnit,
- cUnit->blockToLabelMap.Get(targetBB));
- ++it;
- }
- break;
-
default:
LOG(FATAL) << "Unexpected intrinsic " << (int)id << ", "
<< cUnit->intrinsic_helper->GetName(id);
diff --git a/src/compiler/codegen/MethodCodegenDriver.cc b/src/compiler/codegen/MethodCodegenDriver.cc
index 3c5fb23259..b93cbd94b1 100644
--- a/src/compiler/codegen/MethodCodegenDriver.cc
+++ b/src/compiler/codegen/MethodCodegenDriver.cc
@@ -179,10 +179,6 @@ CallInfo* oatNewCallInfo(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir,
{
CallInfo* info = (CallInfo*)oatNew(cUnit, sizeof(CallInfo), true,
kAllocMisc);
-//FIXME: Disable fusing for x86
-#if defined(TARGET_X86)
- info->result.location = kLocInvalid;
-#else
MIR* moveResultMIR = oatFindMoveResult(cUnit, bb, mir);
if (moveResultMIR == NULL) {
info->result.location = kLocInvalid;
@@ -190,7 +186,6 @@ CallInfo* oatNewCallInfo(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir,
info->result = oatGetRawDest(cUnit, moveResultMIR);
moveResultMIR->dalvikInsn.opcode = Instruction::NOP;
}
-#endif
info->numArgWords = mir->ssaRep->numUses;
info->args = (info->numArgWords == 0) ? NULL : (RegLocation*)
oatNew(cUnit, sizeof(RegLocation) * info->numArgWords, false, kAllocMisc);
@@ -819,10 +814,9 @@ const char* extendedMIROpNames[kMirOpLast - kMirOpFirst] = {
"kMirFusedCmpgDouble",
"kMirFusedCmpLong",
"kMirNop",
- "kMirOpNullCheck",
- "kMirOpRangeCheck",
- "kMirOpDivZeroCheck",
- "kMirOpCheck",
+ "kMirOpNullNRangeUpCheck",
+ "kMirOpNullNRangeDownCheck",
+ "kMirOpLowerBound",
};
/* Extended MIR instructions like PHI */
@@ -853,7 +847,6 @@ void handleExtendedMethodMIR(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir)
storeValue(cUnit, rlDest, rlSrc);
break;
}
-#if defined(TARGET_ARM)
case kMirOpFusedCmplFloat:
genFusedFPCmpBranch(cUnit, bb, mir, false /*gt bias*/, false /*double*/);
break;
@@ -869,7 +862,6 @@ void handleExtendedMethodMIR(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir)
case kMirOpFusedCmpLong:
genFusedLongCmpBranch(cUnit, bb, mir);
break;
-#endif
default:
break;
}
@@ -959,17 +951,7 @@ bool methodBlockCodeGen(CompilationUnit* cUnit, BasicBlock* bb)
newLIR1(cUnit, kPseudoSSARep, (int) ssaString);
}
- if ((int)dalvikOpcode == (int)kMirOpCheck) {
- // Combine check and work halves of throwing instruction.
- MIR* workHalf = mir->meta.throwInsn;
- mir->dalvikInsn.opcode = workHalf->dalvikInsn.opcode;
- SSARepresentation* ssaRep = workHalf->ssaRep;
- workHalf->ssaRep = mir->ssaRep;
- mir->ssaRep = ssaRep;
- workHalf->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
- }
-
- if ((int)dalvikOpcode >= (int)kMirOpFirst) {
+ if ((int)mir->dalvikInsn.opcode >= (int)kMirOpFirst) {
handleExtendedMethodMIR(cUnit, bb, mir);
continue;
}
diff --git a/src/compiler/codegen/mips/FP/MipsFP.cc b/src/compiler/codegen/mips/FP/MipsFP.cc
index a57d34a6d0..2bf26e448b 100644
--- a/src/compiler/codegen/mips/FP/MipsFP.cc
+++ b/src/compiler/codegen/mips/FP/MipsFP.cc
@@ -210,4 +210,10 @@ static bool genCmpFP(CompilationUnit *cUnit, Instruction::Code opcode, RegLocati
return false;
}
+void genFusedFPCmpBranch(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir,
+ bool gtBias, bool isDouble)
+{
+ UNIMPLEMENTED(FATAL) << "Need codegen for fused fp cmp branch";
+}
+
} // namespace art
diff --git a/src/compiler/codegen/mips/Mips32/Gen.cc b/src/compiler/codegen/mips/Mips32/Gen.cc
index 77129851f5..c3048366b8 100644
--- a/src/compiler/codegen/mips/Mips32/Gen.cc
+++ b/src/compiler/codegen/mips/Mips32/Gen.cc
@@ -517,4 +517,9 @@ void opRegCopyWide(CompilationUnit *cUnit, int destLo, int destHi,
#endif
}
+void genFusedLongCmpBranch(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir)
+{
+ UNIMPLEMENTED(FATAL) << "Need codegen for fused long cmp branch";
+}
+
} // namespace art
diff --git a/src/compiler/codegen/x86/Assemble.cc b/src/compiler/codegen/x86/Assemble.cc
index a245660d7e..0c5d3cf481 100644
--- a/src/compiler/codegen/x86/Assemble.cc
+++ b/src/compiler/codegen/x86/Assemble.cc
@@ -298,6 +298,14 @@ ENCODING_MAP(Cmp, IS_LOAD, 0, 0,
EXT_0F_ENCODING_MAP(Imul16, 0x66, 0xAF, REG_DEF0 | SETS_CCODES),
EXT_0F_ENCODING_MAP(Imul32, 0x00, 0xAF, REG_DEF0 | SETS_CCODES),
+
+ { kX86CmpxchgRR, kRegRegStore, IS_BINARY_OP | REG_DEF0 | REG_USE01 | REG_DEFA_USEA | SETS_CCODES, { 0, 0, 0x0F, 0xB1, 0, 0, 0, 0 }, "Cmpxchg", "!0r,!1r" },
+ { kX86CmpxchgMR, kMemReg, IS_STORE | IS_TERTIARY_OP | REG_USE02 | REG_DEFA_USEA | SETS_CCODES, { 0, 0, 0x0F, 0xB1, 0, 0, 0, 0 }, "Cmpxchg", "[!0r+!1d],!2r" },
+ { kX86CmpxchgAR, kArrayReg, IS_STORE | IS_QUIN_OP | REG_USE014 | REG_DEFA_USEA | SETS_CCODES, { 0, 0, 0x0F, 0xB1, 0, 0, 0, 0 }, "Cmpxchg", "[!0r+!1r<<!2d+!3d],!4r" },
+ { kX86LockCmpxchgRR, kRegRegStore, IS_BINARY_OP | REG_DEF0 | REG_USE01 | REG_DEFA_USEA | SETS_CCODES, { 0xF0, 0, 0x0F, 0xB1, 0, 0, 0, 0 }, "Lock Cmpxchg", "!0r,!1r" },
+ { kX86LockCmpxchgMR, kMemReg, IS_STORE | IS_TERTIARY_OP | REG_USE02 | REG_DEFA_USEA | SETS_CCODES, { 0xF0, 0, 0x0F, 0xB1, 0, 0, 0, 0 }, "Lock Cmpxchg", "[!0r+!1d],!2r" },
+ { kX86LockCmpxchgAR, kArrayReg, IS_STORE | IS_QUIN_OP | REG_USE014 | REG_DEFA_USEA | SETS_CCODES, { 0xF0, 0, 0x0F, 0xB1, 0, 0, 0, 0 }, "Lock Cmpxchg", "[!0r+!1r<<!2d+!3d],!4r" },
+
EXT_0F_ENCODING_MAP(Movzx8, 0x00, 0xB6, REG_DEF0),
EXT_0F_ENCODING_MAP(Movzx16, 0x00, 0xB7, REG_DEF0),
EXT_0F_ENCODING_MAP(Movsx8, 0x00, 0xBE, REG_DEF0),
diff --git a/src/compiler/codegen/x86/FP/X86FP.cc b/src/compiler/codegen/x86/FP/X86FP.cc
index 8cd32b45fd..be628db39e 100644
--- a/src/compiler/codegen/x86/FP/X86FP.cc
+++ b/src/compiler/codegen/x86/FP/X86FP.cc
@@ -21,7 +21,6 @@ static bool genArithOpFloat(CompilationUnit *cUnit, Instruction::Code opcode,
RegLocation rlSrc2) {
X86OpCode op = kX86Nop;
RegLocation rlResult;
- int tempReg;
/*
* Don't attempt to optimize register usage since these opcodes call out to
@@ -45,19 +44,9 @@ static bool genArithOpFloat(CompilationUnit *cUnit, Instruction::Code opcode,
op = kX86MulssRR;
break;
case Instruction::NEG_FLOAT:
- // TODO: Make this an XorpsRM where the memory location holds 0x80000000
- rlSrc1 = loadValue(cUnit, rlSrc1, kFPReg);
- rlResult = oatEvalLoc(cUnit, rlDest, kFPReg, true);
- tempReg = oatAllocTemp(cUnit);
- loadConstant(cUnit, tempReg, 0x80000000);
- newLIR2(cUnit, kX86MovdxrRR, rlResult.lowReg, tempReg);
- newLIR2(cUnit, kX86XorpsRR, rlResult.lowReg, rlSrc1.lowReg);
- storeValue(cUnit, rlDest, rlResult);
- return false;
case Instruction::REM_FLOAT_2ADDR:
- case Instruction::REM_FLOAT: {
+ case Instruction::REM_FLOAT:
return genArithOpFloatPortable(cUnit, opcode, rlDest, rlSrc1, rlSrc2);
- }
default:
return true;
}
@@ -67,7 +56,7 @@ static bool genArithOpFloat(CompilationUnit *cUnit, Instruction::Code opcode,
int rDest = rlResult.lowReg;
int rSrc1 = rlSrc1.lowReg;
int rSrc2 = rlSrc2.lowReg;
- if (rSrc2 == rDest) {
+ if (rDest == rSrc2) {
rSrc2 = oatAllocTempFloat(cUnit);
opRegCopy(cUnit, rSrc2, rDest);
}
@@ -83,7 +72,6 @@ static bool genArithOpDouble(CompilationUnit *cUnit, Instruction::Code opcode,
RegLocation rlSrc2) {
X86OpCode op = kX86Nop;
RegLocation rlResult;
- int tempReg;
switch (opcode) {
case Instruction::ADD_DOUBLE_2ADDR:
@@ -103,20 +91,9 @@ static bool genArithOpDouble(CompilationUnit *cUnit, Instruction::Code opcode,
op = kX86MulsdRR;
break;
case Instruction::NEG_DOUBLE:
- // TODO: Make this an XorpdRM where the memory location holds 0x8000000000000000
- rlSrc1 = loadValueWide(cUnit, rlSrc1, kFPReg);
- rlResult = oatEvalLoc(cUnit, rlDest, kFPReg, true);
- tempReg = oatAllocTemp(cUnit);
- loadConstant(cUnit, tempReg, 0x80000000);
- newLIR2(cUnit, kX86MovdxrRR, rlResult.lowReg, tempReg);
- newLIR2(cUnit, kX86PsllqRI, rlResult.lowReg, 32);
- newLIR2(cUnit, kX86XorpsRR, rlResult.lowReg, rlSrc1.lowReg);
- storeValueWide(cUnit, rlDest, rlResult);
- return false;
case Instruction::REM_DOUBLE_2ADDR:
- case Instruction::REM_DOUBLE: {
+ case Instruction::REM_DOUBLE:
return genArithOpDoublePortable(cUnit, opcode, rlDest, rlSrc1, rlSrc2);
- }
default:
return true;
}
@@ -207,9 +184,7 @@ static bool genConversion(CompilationUnit *cUnit, Instruction::Code opcode,
}
case Instruction::LONG_TO_DOUBLE:
case Instruction::LONG_TO_FLOAT:
- // These can be implemented inline by using memory as a 64-bit source.
- // However, this can't be done easily if the register has been promoted.
- UNIMPLEMENTED(WARNING) << "inline l2[df] " << PrettyMethod(cUnit->method_idx, *cUnit->dex_file);
+ // TODO: inline by using memory as a 64-bit source. Be careful about promoted registers.
case Instruction::FLOAT_TO_LONG:
case Instruction::DOUBLE_TO_LONG:
return genConversionPortable(cUnit, opcode, rlDest, rlSrc);
@@ -286,4 +261,74 @@ static bool genCmpFP(CompilationUnit *cUnit, Instruction::Code code, RegLocation
return false;
}
+void genFusedFPCmpBranch(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir,
+ bool gtBias, bool isDouble) {
+ LIR* labelList = cUnit->blockLabelList;
+ LIR* taken = &labelList[bb->taken->id];
+ LIR* notTaken = &labelList[bb->fallThrough->id];
+ LIR* branch = NULL;
+ RegLocation rlSrc1;
+ RegLocation rlSrc2;
+ if (isDouble) {
+ rlSrc1 = oatGetSrcWide(cUnit, mir, 0);
+ rlSrc2 = oatGetSrcWide(cUnit, mir, 2);
+ rlSrc1 = loadValueWide(cUnit, rlSrc1, kFPReg);
+ rlSrc2 = loadValueWide(cUnit, rlSrc2, kFPReg);
+ newLIR2(cUnit, kX86UcomisdRR, S2D(rlSrc1.lowReg, rlSrc1.highReg),
+ S2D(rlSrc2.lowReg, rlSrc2.highReg));
+ } else {
+ rlSrc1 = oatGetSrc(cUnit, mir, 0);
+ rlSrc2 = oatGetSrc(cUnit, mir, 1);
+ rlSrc1 = loadValue(cUnit, rlSrc1, kFPReg);
+ rlSrc2 = loadValue(cUnit, rlSrc2, kFPReg);
+ newLIR2(cUnit, kX86UcomissRR, rlSrc1.lowReg, rlSrc2.lowReg);
+ }
+ ConditionCode ccode = static_cast<ConditionCode>(mir->dalvikInsn.arg[0]);
+ switch (ccode) {
+ case kCondEq:
+ if (gtBias) {
+ branch = newLIR2(cUnit, kX86Jcc8, 0, kX86CondPE);
+ branch->target = notTaken;
+ }
+ break;
+ case kCondNe:
+ if (!gtBias) {
+ branch = newLIR2(cUnit, kX86Jcc8, 0, kX86CondPE);
+ branch->target = taken;
+ }
+ break;
+ case kCondLt:
+ if (gtBias) {
+ branch = newLIR2(cUnit, kX86Jcc8, 0, kX86CondPE);
+ branch->target = notTaken;
+ }
+ ccode = kCondCs;
+ break;
+ case kCondLe:
+ if (gtBias) {
+ branch = newLIR2(cUnit, kX86Jcc8, 0, kX86CondPE);
+ branch->target = notTaken;
+ }
+ ccode = kCondLs;
+ break;
+ case kCondGt:
+ if (gtBias) {
+ branch = newLIR2(cUnit, kX86Jcc8, 0, kX86CondPE);
+ branch->target = taken;
+ }
+ ccode = kCondHi;
+ break;
+ case kCondGe:
+ if (gtBias) {
+ branch = newLIR2(cUnit, kX86Jcc8, 0, kX86CondPE);
+ branch->target = taken;
+ }
+ ccode = kCondCc;
+ break;
+ default:
+ LOG(FATAL) << "Unexpected ccode: " << (int)ccode;
+ }
+ opCondBranch(cUnit, ccode, taken);
+}
+
} // namespace art
diff --git a/src/compiler/codegen/x86/X86/Gen.cc b/src/compiler/codegen/x86/X86/Gen.cc
index adad05b3f3..4bfc5310a6 100644
--- a/src/compiler/codegen/x86/X86/Gen.cc
+++ b/src/compiler/codegen/x86/X86/Gen.cc
@@ -181,60 +181,63 @@ void genFillArrayData(CompilationUnit* cUnit, uint32_t tableOffset,
void genNegFloat(CompilationUnit *cUnit, RegLocation rlDest, RegLocation rlSrc)
{
- UNIMPLEMENTED(WARNING) << "genNegFloat "
- << PrettyMethod(cUnit->method_idx, *cUnit->dex_file);
- newLIR0(cUnit, kX86Bkpt);
-#if 0
RegLocation rlResult;
rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
opRegRegImm(cUnit, kOpAdd, rlResult.lowReg, rlSrc.lowReg, 0x80000000);
storeValue(cUnit, rlDest, rlResult);
-#endif
}
void genNegDouble(CompilationUnit *cUnit, RegLocation rlDest, RegLocation rlSrc)
{
- UNIMPLEMENTED(WARNING) << "genNegDouble"
- << PrettyMethod(cUnit->method_idx, *cUnit->dex_file);
- newLIR0(cUnit, kX86Bkpt);
-#if 0
RegLocation rlResult;
rlSrc = loadValueWide(cUnit, rlSrc, kCoreReg);
rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
opRegRegImm(cUnit, kOpAdd, rlResult.highReg, rlSrc.highReg, 0x80000000);
opRegCopy(cUnit, rlResult.lowReg, rlSrc.lowReg);
storeValueWide(cUnit, rlDest, rlResult);
-#endif
}
LIR* genNullCheck(CompilationUnit* cUnit, int sReg, int mReg, int optFlags);
void callRuntimeHelperReg(CompilationUnit* cUnit, int helperOffset, int arg0);
-/*
- * TODO: implement fast path to short-circuit thin-lock case
- */
void genMonitorEnter(CompilationUnit* cUnit, int optFlags, RegLocation rlSrc)
{
oatFlushAllRegs(cUnit);
- loadValueDirectFixed(cUnit, rlSrc, rARG0); // Get obj
+ loadValueDirectFixed(cUnit, rlSrc, rCX); // Get obj
oatLockCallTemps(cUnit); // Prepare for explicit register usage
- genNullCheck(cUnit, rlSrc.sRegLow, rARG0, optFlags);
- // Go expensive route - artLockObjectFromCode(self, obj);
- callRuntimeHelperReg(cUnit, ENTRYPOINT_OFFSET(pLockObjectFromCode), rARG0);
+ genNullCheck(cUnit, rlSrc.sRegLow, rCX, optFlags);
+ // If lock is unheld, try to grab it quickly with compare and exchange
+ // TODO: copy and clear hash state?
+ newLIR2(cUnit, kX86Mov32RT, rDX, Thread::ThinLockIdOffset().Int32Value());
+ newLIR2(cUnit, kX86Sal32RI, rDX, LW_LOCK_OWNER_SHIFT);
+ newLIR2(cUnit, kX86Xor32RR, rAX, rAX);
+ newLIR3(cUnit, kX86LockCmpxchgMR, rCX, Object::MonitorOffset().Int32Value(), rDX);
+ LIR* branch = newLIR2(cUnit, kX86Jcc8, 0, kX86CondEq);
+ // If lock is held, go the expensive route - artLockObjectFromCode(self, obj);
+ callRuntimeHelperReg(cUnit, ENTRYPOINT_OFFSET(pLockObjectFromCode), rCX);
+ branch->target = newLIR0(cUnit, kPseudoTargetLabel);
}
-/*
- * TODO: implement fast path to short-circuit thin-lock case
- */
void genMonitorExit(CompilationUnit* cUnit, int optFlags, RegLocation rlSrc)
{
oatFlushAllRegs(cUnit);
- loadValueDirectFixed(cUnit, rlSrc, rARG0); // Get obj
+ loadValueDirectFixed(cUnit, rlSrc, rAX); // Get obj
oatLockCallTemps(cUnit); // Prepare for explicit register usage
- genNullCheck(cUnit, rlSrc.sRegLow, rARG0, optFlags);
- // Go expensive route - UnlockObjectFromCode(obj);
- callRuntimeHelperReg(cUnit, ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rARG0);
+ genNullCheck(cUnit, rlSrc.sRegLow, rAX, optFlags);
+ // If lock is held by the current thread, clear it to quickly release it
+ // TODO: clear hash state?
+ newLIR2(cUnit, kX86Mov32RT, rDX, Thread::ThinLockIdOffset().Int32Value());
+ newLIR2(cUnit, kX86Sal32RI, rDX, LW_LOCK_OWNER_SHIFT);
+ newLIR3(cUnit, kX86Mov32RM, rCX, rAX, Object::MonitorOffset().Int32Value());
+ opRegReg(cUnit, kOpSub, rCX, rDX);
+ LIR* branch = newLIR2(cUnit, kX86Jcc8, 0, kX86CondNe);
+ newLIR3(cUnit, kX86Mov32MR, rAX, Object::MonitorOffset().Int32Value(), rCX);
+ LIR* branch2 = newLIR1(cUnit, kX86Jmp8, 0);
+ branch->target = newLIR0(cUnit, kPseudoTargetLabel);
+ // Otherwise, go the expensive route - UnlockObjectFromCode(obj);
+ callRuntimeHelperReg(cUnit, ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rAX);
+ branch2->target = newLIR0(cUnit, kPseudoTargetLabel);
}
/*
@@ -377,4 +380,44 @@ void opRegCopyWide(CompilationUnit *cUnit, int destLo, int destHi,
}
}
+void genFusedLongCmpBranch(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir) {
+ LIR* labelList = cUnit->blockLabelList;
+ LIR* taken = &labelList[bb->taken->id];
+ RegLocation rlSrc1 = oatGetSrcWide(cUnit, mir, 0);
+ RegLocation rlSrc2 = oatGetSrcWide(cUnit, mir, 2);
+ oatFlushAllRegs(cUnit);
+ oatLockCallTemps(cUnit); // Prepare for explicit register usage
+ loadValueDirectWideFixed(cUnit, rlSrc1, r0, r1);
+ loadValueDirectWideFixed(cUnit, rlSrc2, r2, r3);
+ ConditionCode ccode = static_cast<ConditionCode>(mir->dalvikInsn.arg[0]);
+ // Swap operands and condition code to prevent use of zero flag.
+ if (ccode == kCondLe || ccode == kCondGt) {
+ // Compute (r3:r2) = (r3:r2) - (r1:r0)
+ opRegReg(cUnit, kOpSub, r2, r0); // r2 = r2 - r0
+ opRegReg(cUnit, kOpSbc, r3, r1); // r3 = r3 - r1 - CF
+ } else {
+ // Compute (r1:r0) = (r1:r0) - (r3:r2)
+ opRegReg(cUnit, kOpSub, r0, r2); // r0 = r0 - r2
+ opRegReg(cUnit, kOpSbc, r1, r3); // r1 = r1 - r3 - CF
+ }
+ switch (ccode) {
+ case kCondEq:
+ case kCondNe:
+ opRegReg(cUnit, kOpOr, r0, r1); // r0 = r0 | r1
+ break;
+ case kCondLe:
+ ccode = kCondGe;
+ break;
+ case kCondGt:
+ ccode = kCondLt;
+ break;
+ case kCondLt:
+ case kCondGe:
+ break;
+ default:
+ LOG(FATAL) << "Unexpected ccode: " << (int)ccode;
+ }
+ opCondBranch(cUnit, ccode, taken);
+}
+
} // namespace art
diff --git a/src/compiler/codegen/x86/X86LIR.h b/src/compiler/codegen/x86/X86LIR.h
index 5bf4dd9cf3..72c8c03dea 100644
--- a/src/compiler/codegen/x86/X86LIR.h
+++ b/src/compiler/codegen/x86/X86LIR.h
@@ -445,6 +445,8 @@ enum X86OpCode {
kX86Mfence, // memory barrier
Binary0fOpCode(kX86Imul16), // 16bit multiply
Binary0fOpCode(kX86Imul32), // 32bit multiply
+ kX86CmpxchgRR, kX86CmpxchgMR, kX86CmpxchgAR,// compare and exchange
+ kX86LockCmpxchgRR, kX86LockCmpxchgMR, kX86LockCmpxchgAR,// locked compare and exchange
Binary0fOpCode(kX86Movzx8), // zero-extend 8-bit value
Binary0fOpCode(kX86Movzx16), // zero-extend 16-bit value
Binary0fOpCode(kX86Movsx8), // sign-extend 8-bit value
diff --git a/src/compiler_llvm/compiler_llvm.cc b/src/compiler_llvm/compiler_llvm.cc
index db4ad0a28d..635fde39bf 100644
--- a/src/compiler_llvm/compiler_llvm.cc
+++ b/src/compiler_llvm/compiler_llvm.cc
@@ -54,12 +54,18 @@ void InitializeLLVM() {
// Initialize LLVM target-specific options.
art::compiler_llvm::InitialBackendOptions();
- // Initialize LLVM target, MC subsystem, asm printer, and asm parser
+ // Initialize LLVM target, MC subsystem, asm printer, and asm parser.
+#if defined(ART_TARGET)
+ // Don't initialize all targets on device. Just initialize the device's native target
+ llvm::InitializeNativeTarget();
+ llvm::InitializeNativeTargetAsmPrinter();
+ llvm::InitializeNativeTargetAsmParser();
+#else
llvm::InitializeAllTargets();
llvm::InitializeAllTargetMCs();
llvm::InitializeAllAsmPrinters();
llvm::InitializeAllAsmParsers();
- // TODO: Maybe we don't have to initialize "all" targets.
+#endif
// Initialize LLVM optimization passes
llvm::PassRegistry &registry = *llvm::PassRegistry::getPassRegistry();
diff --git a/src/disassembler_x86.cc b/src/disassembler_x86.cc
index f70289fe03..646e9782c6 100644
--- a/src/disassembler_x86.cc
+++ b/src/disassembler_x86.cc
@@ -511,6 +511,7 @@ DISASSEMBLER_ENTRY(cmp,
no_ops = true;
}
break;
+ case 0xB1: opcode << "cmpxchg"; has_modrm = true; store = true; break;
case 0xB6: opcode << "movzxb"; has_modrm = true; load = true; break;
case 0xB7: opcode << "movzxw"; has_modrm = true; load = true; break;
case 0xBE: opcode << "movsxb"; has_modrm = true; load = true; break;
diff --git a/src/globals.h b/src/globals.h
index 0efa7eb2ac..1eeaca26aa 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -36,6 +36,7 @@ const int kPointerSize = sizeof(void*);
const int kBitsPerByte = 8;
const int kBitsPerByteLog2 = 3;
const int kBitsPerWord = kWordSize * kBitsPerByte;
+const int kWordHighBitMask = 1 << (kBitsPerWord - 1);
// Required stack alignment
const int kStackAlignment = 16;
diff --git a/src/greenland/intrinsic_func_list.def b/src/greenland/intrinsic_func_list.def
index a0f64d6f12..0ebebb25a3 100644
--- a/src/greenland/intrinsic_func_list.def
+++ b/src/greenland/intrinsic_func_list.def
@@ -66,23 +66,11 @@ _EVAL_DEF_INTRINSICS_FUNC(GetCurrentThread,
// Exception
//----------------------------------------------------------------------------
-// Should not expand - introduces the catch targets for a potentially
-// throwing instruction. The result is a switch key and this
-// instruction will be followed by a switch statement. The catch
-// targets will be enumerated as cases of the switch, with the fallthrough
-// designating the block containing the potentially throwing instruction.
-// bool dex_lang_catch_targets(int dex_pc)
-_EVAL_DEF_INTRINSICS_FUNC(CatchTargets,
- dex_lang_catch_targets,
- kAttrReadOnly | kAttrNoThrow,
- kInt32Ty,
- _EXPAND_ARG1(kInt32ConstantTy))
-
// JavaObject* dex_lang_get_current_exception()
_EVAL_DEF_INTRINSICS_FUNC(GetException,
dex_lang_get_current_exception,
kAttrReadOnly | kAttrNoThrow,
- kJavaObjectTy,
+ kJavaObjectTy,
_EXPAND_ARG0())
// bool dex_lang_is_exception_pending()
@@ -1186,28 +1174,28 @@ _EVAL_DEF_INTRINSICS_FUNC(ConstInt,
kInt32Ty,
_EXPAND_ARG1(kInt32Ty))
-// JavaObject* const_obj(int)
+// int const_obj(int)
_EVAL_DEF_INTRINSICS_FUNC(ConstObj,
dex_lang_const_obj,
kAttrReadOnly | kAttrNoThrow,
kJavaObjectTy,
_EXPAND_ARG1(kInt32Ty))
-// long const_long(long)
+// int const_long(long)
_EVAL_DEF_INTRINSICS_FUNC(ConstLong,
dex_lang_const_long,
kAttrReadOnly | kAttrNoThrow,
kInt64Ty,
_EXPAND_ARG1(kInt64Ty))
-// float const_float(int)
+// int const_float(int)
_EVAL_DEF_INTRINSICS_FUNC(ConstFloat,
dex_lang_const_Float,
kAttrReadOnly | kAttrNoThrow,
kFloatTy,
_EXPAND_ARG1(kInt32Ty))
-// double const_double(long)
+// int const_double(long)
_EVAL_DEF_INTRINSICS_FUNC(ConstDouble,
dex_lang_const_Double,
kAttrReadOnly | kAttrNoThrow,
diff --git a/src/heap.cc b/src/heap.cc
index 005609b38f..626adf9728 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -24,6 +24,7 @@
#include "card_table.h"
#include "debugger.h"
+#include "heap_bitmap.h"
#include "image.h"
#include "mark_sweep.h"
#include "mod_union_table.h"
@@ -141,6 +142,7 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t capacity,
card_table_(NULL),
card_marking_disabled_(false),
is_gc_running_(false),
+ concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
concurrent_start_size_(128 * KB),
concurrent_min_free_(256 * KB),
try_running_gc_(false),
@@ -153,6 +155,7 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t capacity,
reference_queueNext_offset_(0),
reference_pendingNext_offset_(0),
finalizer_reference_zombie_offset_(0),
+ have_zygote_space_(false),
target_utilization_(0.5),
verify_objects_(false) {
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
@@ -164,8 +167,8 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t capacity,
Space* first_space = NULL;
Space* last_space = NULL;
- live_bitmap_.reset(new HeapBitmap);
- mark_bitmap_.reset(new HeapBitmap);
+ live_bitmap_.reset(new HeapBitmap(this));
+ mark_bitmap_.reset(new HeapBitmap(this));
// Requested begin for the alloc space, to follow the mapped image and oat files
byte* requested_begin = NULL;
@@ -210,10 +213,8 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t capacity,
UniquePtr<AllocSpace> alloc_space(Space::CreateAllocSpace(
"alloc space", initial_size, growth_limit, capacity, requested_begin));
alloc_space_ = alloc_space.release();
+ CHECK(alloc_space_ != NULL) << "Failed to create alloc space";
AddSpace(alloc_space_);
- if (alloc_space_ == NULL) {
- LOG(FATAL) << "Failed to create alloc space";
- }
UpdateFirstAndLastSpace(&first_space, &last_space, alloc_space_);
byte* heap_begin = first_space->Begin();
@@ -228,44 +229,48 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t capacity,
}
// Allocate the card table.
- UniquePtr<CardTable> card_table(CardTable::Create(heap_begin, heap_capacity));
- if (card_table.get() == NULL) {
- LOG(FATAL) << "Failed to create card table";
- }
+ card_table_.reset(CardTable::Create(heap_begin, heap_capacity));
+ CHECK(card_table_.get() != NULL) << "Failed to create card table";
- // Allocate the mod-union table
- ModUnionTableReferenceCache* mod_union_table = new ModUnionTableReferenceCache(this);
- mod_union_table->Init();
- mod_union_table_ = mod_union_table;
+ mod_union_table_.reset(new ModUnionTableToZygoteAllocspace<ModUnionTableReferenceCache>(this));
+ CHECK(mod_union_table_.get() != NULL) << "Failed to create mod-union table";
- card_table_ = card_table.release();
+ zygote_mod_union_table_.reset(new ModUnionTableCardCache(this));
+ CHECK(zygote_mod_union_table_.get() != NULL) << "Failed to create Zygote mod-union table";
num_bytes_allocated_ = 0;
num_objects_allocated_ = 0;
- mark_stack_ = MarkStack::Create();
+ mark_stack_.reset(MarkStack::Create());
// It's still too early to take a lock because there are no threads yet,
// but we can create the heap lock now. We don't create it earlier to
// make it clear that you can't use locks during heap initialization.
- lock_ = new Mutex("Heap lock", kHeapLock);
- condition_ = new ConditionVariable("Heap condition variable");
-
- concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
+ lock_.reset(new Mutex("Heap lock", kHeapLock));
+ condition_.reset(new ConditionVariable("Heap condition variable"));
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "Heap() exiting";
}
}
+// Sort spaces based on begin address
+class SpaceSorter {
+ public:
+ bool operator () (const Space* a, const Space* b) const {
+ return a->Begin() < b->Begin();
+ }
+};
+
void Heap::AddSpace(Space* space) {
+ DCHECK(space != NULL);
DCHECK(space->GetLiveBitmap() != NULL);
live_bitmap_->AddSpaceBitmap(space->GetLiveBitmap());
-
DCHECK(space->GetMarkBitmap() != NULL);
mark_bitmap_->AddSpaceBitmap(space->GetMarkBitmap());
-
spaces_.push_back(space);
+ // Ensure that spaces remain sorted in increasing order of start address (required for CMS finger)
+ std::sort(spaces_.begin(), spaces_.end(), SpaceSorter());
}
Heap::~Heap() {
@@ -275,11 +280,6 @@ Heap::~Heap() {
// all daemon threads are suspended, and we also know that the threads list have been deleted, so
// those threads can't resume. We're the only running thread, and we can do whatever we like...
STLDeleteElements(&spaces_);
- delete card_table_;
- delete mod_union_table_;
- delete mark_stack_;
- delete condition_;
- delete lock_;
}
Space* Heap::FindSpaceFromObject(const Object* obj) const {
@@ -345,6 +345,10 @@ Object* Heap::AllocObject(Class* c, size_t byte_count) {
RequestConcurrentGC();
}
VerifyObject(obj);
+
+ // Additional verification to ensure that we did not allocate into a zygote space.
+ DCHECK(!have_zygote_space_ || !FindSpaceFromObject(obj)->IsZygoteSpace());
+
return obj;
}
total_bytes_free = GetFreeMemory();
@@ -389,46 +393,60 @@ bool Heap::IsLiveObjectLocked(const Object* obj) {
#if VERIFY_OBJECT_ENABLED
void Heap::VerifyObject(const Object* obj) {
- if (this == NULL || !verify_objects_ || Runtime::Current()->IsShuttingDown() ||
+ if (obj == NULL || this == NULL || !verify_objects_ || Runtime::Current()->IsShuttingDown() ||
Thread::Current() == NULL ||
Runtime::Current()->GetThreadList()->GetLockOwner() == Thread::Current()->GetTid()) {
return;
}
- ScopedHeapLock heap_lock;
- Heap::VerifyObjectLocked(obj);
+ {
+ ScopedHeapLock heap_lock;
+ Heap::VerifyObjectLocked(obj);
+ }
}
#endif
+void Heap::DumpSpaces() {
+ // TODO: C++0x auto
+ for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
+ LOG(INFO) << **it;
+ }
+}
+
void Heap::VerifyObjectLocked(const Object* obj) {
lock_->AssertHeld();
- if (obj != NULL) {
- if (!IsAligned<kObjectAlignment>(obj)) {
- LOG(FATAL) << "Object isn't aligned: " << obj;
- } else if (!GetLiveBitmap()->Test(obj)) {
- LOG(FATAL) << "Object is dead: " << obj;
+ if (!IsAligned<kObjectAlignment>(obj)) {
+ LOG(FATAL) << "Object isn't aligned: " << obj;
+ } else if (!GetLiveBitmap()->Test(obj)) {
+ Space* space = FindSpaceFromObject(obj);
+ if (space == NULL) {
+ DumpSpaces();
+ LOG(FATAL) << "Object " << obj << " is not contained in any space";
}
- // Ignore early dawn of the universe verifications
- if (num_objects_allocated_ > 10) {
- const byte* raw_addr = reinterpret_cast<const byte*>(obj) +
- Object::ClassOffset().Int32Value();
- const Class* c = *reinterpret_cast<Class* const *>(raw_addr);
- if (c == NULL) {
- LOG(FATAL) << "Null class in object: " << obj;
- } else if (!IsAligned<kObjectAlignment>(c)) {
- LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj;
- } else if (!GetLiveBitmap()->Test(c)) {
- LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj;
- }
- // Check obj.getClass().getClass() == obj.getClass().getClass().getClass()
- // Note: we don't use the accessors here as they have internal sanity checks
- // that we don't want to run
- raw_addr = reinterpret_cast<const byte*>(c) + Object::ClassOffset().Int32Value();
- const Class* c_c = *reinterpret_cast<Class* const *>(raw_addr);
- raw_addr = reinterpret_cast<const byte*>(c_c) + Object::ClassOffset().Int32Value();
- const Class* c_c_c = *reinterpret_cast<Class* const *>(raw_addr);
- CHECK_EQ(c_c, c_c_c);
+ LOG(FATAL) << "Object is dead: " << obj << " in space " << *space;
+ }
+#if !VERIFY_OBJECT_FAST
+ // Ignore early dawn of the universe verifications
+ if (num_objects_allocated_ > 10) {
+ const byte* raw_addr = reinterpret_cast<const byte*>(obj) +
+ Object::ClassOffset().Int32Value();
+ const Class* c = *reinterpret_cast<Class* const *>(raw_addr);
+ if (c == NULL) {
+ LOG(FATAL) << "Null class in object: " << obj;
+ } else if (!IsAligned<kObjectAlignment>(c)) {
+ LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj;
+ } else if (!GetLiveBitmap()->Test(c)) {
+ LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj;
}
+ // Check obj.getClass().getClass() == obj.getClass().getClass().getClass()
+ // Note: we don't use the accessors here as they have internal sanity checks
+ // that we don't want to run
+ raw_addr = reinterpret_cast<const byte*>(c) + Object::ClassOffset().Int32Value();
+ const Class* c_c = *reinterpret_cast<Class* const *>(raw_addr);
+ raw_addr = reinterpret_cast<const byte*>(c_c) + Object::ClassOffset().Int32Value();
+ const Class* c_c_c = *reinterpret_cast<Class* const *>(raw_addr);
+ CHECK_EQ(c_c, c_c_c);
}
+#endif
}
void Heap::VerificationCallback(Object* obj, void* arg) {
@@ -498,19 +516,6 @@ Object* Heap::AllocateLocked(size_t size) {
return obj;
}
- // TODO: C++0x auto
- for (Spaces::const_iterator cur = spaces_.begin(); cur != spaces_.end(); ++cur) {
- if ((*cur)->IsAllocSpace() && *cur != alloc_space_) {
- AllocSpace* space = (*cur)->AsAllocSpace();
- Object* obj = AllocateLocked(space, size);
- if (obj != NULL) {
- RecordAllocationLocked(space, obj);
- // Switch to this alloc space since the old one did not have enough storage.
- alloc_space_ = space;
- return obj;
- }
- }
- }
return NULL;
}
@@ -526,7 +531,7 @@ Object* Heap::AllocateLocked(AllocSpace* space, size_t alloc_size) {
if (alloc_size > space->Capacity()) {
// On failure collect soft references
WaitForConcurrentGcToComplete();
- CollectGarbageInternal(false, true);
+ CollectGarbageInternal(false, false, true);
return NULL;
}
@@ -553,9 +558,19 @@ Object* Heap::AllocateLocked(AllocSpace* space, size_t alloc_size) {
++Runtime::Current()->GetStats()->gc_for_alloc_count;
++Thread::Current()->GetStats()->gc_for_alloc_count;
}
- // We don't need a WaitForConcurrentGcToComplete here since we checked
- // is_gc_running_ earlier and we are in a heap lock.
- CollectGarbageInternal(false, false);
+
+ if (have_zygote_space_) {
+ // We don't need a WaitForConcurrentGcToComplete here since we checked is_gc_running_ earlier
+ // and we are in a heap lock. Try partial GC first.
+ CollectGarbageInternal(true, false, false);
+ ptr = space->AllocWithoutGrowth(alloc_size);
+ if (ptr != NULL) {
+ return ptr;
+ }
+ }
+
+ // Partial GC didn't free enough memory, try a full GC.
+ CollectGarbageInternal(false, false, false);
ptr = space->AllocWithoutGrowth(alloc_size);
if (ptr != NULL) {
return ptr;
@@ -581,7 +596,7 @@ Object* Heap::AllocateLocked(AllocSpace* space, size_t alloc_size) {
// OLD-TODO: wait for the finalizers from the previous GC to finish
VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size) << " allocation";
// We don't need a WaitForConcurrentGcToComplete here either.
- CollectGarbageInternal(false, true);
+ CollectGarbageInternal(false, false, true);
ptr = space->AllocWithGrowth(alloc_size);
if (ptr != NULL) {
return ptr;
@@ -654,14 +669,43 @@ void Heap::CollectGarbage(bool clear_soft_references) {
// If we just waited for a GC to complete then we do not need to do another
// GC unless we clear soft references.
if (!WaitForConcurrentGcToComplete() || clear_soft_references) {
- CollectGarbageInternal(false, clear_soft_references);
+ CollectGarbageInternal(have_zygote_space_, true, clear_soft_references);
}
}
-void Heap::CollectGarbageInternal(bool concurrent, bool clear_soft_references) {
+void Heap::PreZygoteFork() {
+ ScopedHeapLock heap_lock;
+
+ // Try to see if we have any Zygote spaces.
+ if (have_zygote_space_) {
+ return;
+ }
+
+ VLOG(heap) << "Starting PreZygoteFork with alloc space size " << PrettySize(GetBytesAllocated());
+
+ // Replace the first alloc space we find with a zygote space.
+ // TODO: C++0x auto
+ for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
+ if ((*it)->IsAllocSpace()) {
+ AllocSpace* zygote_space = (*it)->AsAllocSpace();
+
+ // Turns the current alloc space into a Zygote space and obtain the new alloc space composed
+ // of the remaining available heap memory.
+ alloc_space_ = zygote_space->CreateZygoteSpace();
+
+ // Change the GC retention policy of the zygote space to only collect when full.
+ zygote_space->SetGcRetentionPolicy(GCRP_FULL_COLLECT);
+ AddSpace(alloc_space_);
+ have_zygote_space_ = true;
+ break;
+ }
+ }
+}
+
+void Heap::CollectGarbageInternal(bool partial_gc, bool concurrent, bool clear_soft_references) {
lock_->AssertHeld();
- CHECK(!is_gc_running_);
+ CHECK(!is_gc_running_) << "Attempted recursive GC";
is_gc_running_ = true;
TimingLogger timings("CollectGarbageInternal");
@@ -674,19 +718,41 @@ void Heap::CollectGarbageInternal(bool concurrent, bool clear_soft_references) {
size_t initial_size = num_bytes_allocated_;
Object* cleared_references = NULL;
{
- MarkSweep mark_sweep(mark_stack_);
+ MarkSweep mark_sweep(mark_stack_.get());
timings.AddSplit("ctor");
mark_sweep.Init();
timings.AddSplit("Init");
- if (concurrent) {
- card_table_->ClearNonImageSpaceCards(this);
- timings.AddSplit("ClearNonImageSpaceCards");
- }
+ // Make sure that the tables have the correct pointer for the mark sweep.
+ mod_union_table_->Init(&mark_sweep);
+ zygote_mod_union_table_->Init(&mark_sweep);
// Clear image space cards and keep track of cards we cleared in the mod-union table.
- mod_union_table_->ClearCards();
+ for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
+ Space* space = *it;
+ if (space->IsImageSpace()) {
+ mod_union_table_->ClearCards(*it);
+ } else if (space->GetGcRetentionPolicy() == GCRP_FULL_COLLECT) {
+ zygote_mod_union_table_->ClearCards(space);
+ } else if (concurrent) {
+ card_table_->ClearSpaceCards(space);
+ }
+ }
+ timings.AddSplit("ClearCards");
+
+#if VERIFY_MOD_UNION
+ mod_union_table_->Verify();
+ zygote_mod_union_table_->Verify();
+#endif
+
+ if (partial_gc) {
+ // Copy the mark bits over from the live bits, do this as early as possible or else we can
+ // accidentally un-mark roots.
+ // Needed for scanning dirty objects.
+ mark_sweep.CopyMarkBits();
+ timings.AddSplit("CopyMarkBits");
+ }
mark_sweep.MarkRoots();
timings.AddSplit("MarkRoots");
@@ -703,17 +769,26 @@ void Heap::CollectGarbageInternal(bool concurrent, bool clear_soft_references) {
timings.AddSplit("RootEnd");
}
+ // Update zygote mod union table.
+ if (partial_gc) {
+ zygote_mod_union_table_->Update();
+ timings.AddSplit("UpdateZygoteModUnionTable");
+
+ zygote_mod_union_table_->MarkReferences();
+ timings.AddSplit("ZygoteMarkReferences");
+ }
+
// Processes the cards we cleared earlier and adds their objects into the mod-union table.
- mod_union_table_->Update(&mark_sweep);
+ mod_union_table_->Update();
timings.AddSplit("UpdateModUnionTable");
// Scans all objects in the mod-union table.
- mod_union_table_->MarkReferences(&mark_sweep);
+ mod_union_table_->MarkReferences();
timings.AddSplit("MarkImageToAllocSpaceReferences");
// Recursively mark all the non-image bits set in the mark bitmap.
- mark_sweep.RecursiveMark();
- timings.AddSplit("RecursiveMark");
+ mark_sweep.RecursiveMark(partial_gc);
+ timings.AddSplit(partial_gc ? "PartialMark" : "RecursiveMark");
if (concurrent) {
dirty_begin = NanoTime();
@@ -739,7 +814,8 @@ void Heap::CollectGarbageInternal(bool concurrent, bool clear_soft_references) {
// instead, resulting in no new allocated objects being incorrectly freed by sweep.
for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
Space* space = *it;
- if (space->IsAllocSpace()) {
+ // We never allocate into zygote spaces.
+ if (space->GetGcRetentionPolicy() == GCRP_ALWAYS_COLLECT) {
live_bitmap_->ReplaceBitmap(space->GetLiveBitmap(), space->GetMarkBitmap());
mark_bitmap_->ReplaceBitmap(space->GetMarkBitmap(), space->GetLiveBitmap());
space->AsAllocSpace()->SwapBitmaps();
@@ -756,7 +832,7 @@ void Heap::CollectGarbageInternal(bool concurrent, bool clear_soft_references) {
Unlock();
}
- mark_sweep.Sweep();
+ mark_sweep.Sweep(partial_gc);
timings.AddSplit("Sweep");
cleared_references = mark_sweep.GetClearedReferences();
@@ -791,18 +867,20 @@ void Heap::CollectGarbageInternal(bool concurrent, bool clear_soft_references) {
// If the GC was slow, then print timings in the log.
if (concurrent) {
- uint64_t pause_roots_time = (root_end - t0) / 1000 * 1000;
- uint64_t pause_dirty_time = (dirty_end - dirty_begin) / 1000 * 1000;
- if (pause_roots_time > MsToNs(5) || pause_dirty_time > MsToNs(5)) {
- LOG(INFO) << "GC freed " << PrettySize(bytes_freed) << ", " << GetPercentFree() << "% free, "
+ uint64_t pause_roots = (root_end - t0) / 1000 * 1000;
+ uint64_t pause_dirty = (dirty_end - dirty_begin) / 1000 * 1000;
+ if (pause_roots > MsToNs(5) || pause_dirty > MsToNs(5)) {
+ LOG(INFO) << (partial_gc ? "Partial " : "")
+ << "GC freed " << PrettySize(bytes_freed) << ", " << GetPercentFree() << "% free, "
<< PrettySize(num_bytes_allocated_) << "/" << PrettySize(GetTotalMemory()) << ", "
- << "paused " << PrettyDuration(pause_roots_time) << "+" << PrettyDuration(pause_dirty_time)
+ << "paused " << PrettyDuration(pause_roots) << "+" << PrettyDuration(pause_dirty)
<< ", total " << PrettyDuration(duration_ns);
}
} else {
if (duration_ns > MsToNs(50)) {
uint64_t markSweepTime = (dirty_end - t0) / 1000 * 1000;
- LOG(INFO) << "GC freed " << PrettySize(bytes_freed) << ", " << GetPercentFree() << "% free, "
+ LOG(INFO) << (partial_gc ? "Partial " : "")
+ << "GC freed " << PrettySize(bytes_freed) << ", " << GetPercentFree() << "% free, "
<< PrettySize(num_bytes_allocated_) << "/" << PrettySize(GetTotalMemory()) << ", "
<< "paused " << PrettyDuration(markSweepTime)
<< ", total " << PrettyDuration(duration_ns);
@@ -854,20 +932,15 @@ size_t Heap::GetPercentFree() {
}
void Heap::SetIdealFootprint(size_t max_allowed_footprint) {
- // TODO: C++0x auto
- for (Spaces::const_iterator cur = spaces_.begin(); cur != spaces_.end(); ++cur) {
- if ((*cur)->IsAllocSpace()) {
- AllocSpace* alloc_space = (*cur)->AsAllocSpace();
- // TODO: Behavior for multiple alloc spaces?
- size_t alloc_space_capacity = alloc_space->Capacity();
- if (max_allowed_footprint > alloc_space_capacity) {
- VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint)
- << " to " << PrettySize(alloc_space_capacity);
- max_allowed_footprint = alloc_space_capacity;
- }
- alloc_space->SetFootprintLimit(max_allowed_footprint);
- }
+ AllocSpace* alloc_space = alloc_space_;
+ // TODO: Behavior for multiple alloc spaces?
+ size_t alloc_space_capacity = alloc_space->Capacity();
+ if (max_allowed_footprint > alloc_space_capacity) {
+ VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint)
+ << " to " << PrettySize(alloc_space_capacity);
+ max_allowed_footprint = alloc_space_capacity;
}
+ alloc_space->SetFootprintLimit(max_allowed_footprint);
}
// kHeapIdealFree is the ideal maximum free size, when we grow the heap for utilization.
@@ -1049,7 +1122,9 @@ void Heap::ConcurrentGC() {
CHECK(!is_gc_running_);
// Current thread needs to be runnable or else we can't suspend all threads.
ScopedThreadStateChange tsc(Thread::Current(), kRunnable);
- CollectGarbageInternal(true, false);
+ if (!WaitForConcurrentGcToComplete()) {
+ CollectGarbageInternal(have_zygote_space_, true, false);
+ }
}
void Heap::Trim(AllocSpace* alloc_space) {
diff --git a/src/heap.h b/src/heap.h
index e383665873..e908248f93 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -31,6 +31,9 @@
#define VERIFY_OBJECT_ENABLED 0
+// Fast verification means we do not verify the classes of objects.
+#define VERIFY_OBJECT_FAST 1
+
namespace art {
class AllocSpace;
@@ -39,7 +42,6 @@ class HeapBitmap;
class ImageSpace;
class MarkStack;
class ModUnionTable;
-class ModUnionTableBitmap;
class Object;
class Space;
class SpaceTest;
@@ -195,7 +197,7 @@ class LOCKABLE Heap {
}
CardTable* GetCardTable() {
- return card_table_;
+ return card_table_.get();
}
void DisableCardMarking() {
@@ -236,10 +238,13 @@ class LOCKABLE Heap {
return mark_bitmap_.get();
}
- // Assumes there is only one image space.
+ void PreZygoteFork();
+
// DEPRECATED: Should remove in "near" future when support for multiple image spaces is added.
+ // Assumes there is only one image space.
ImageSpace* GetImageSpace();
AllocSpace* GetAllocSpace();
+ void DumpSpaces();
private:
// Allocates uninitialized storage.
@@ -258,7 +263,7 @@ class LOCKABLE Heap {
void RecordAllocationLocked(AllocSpace* space, const Object* object);
// TODO: can we teach GCC to understand the weird locking in here?
- void CollectGarbageInternal(bool concurrent, bool clear_soft_references) NO_THREAD_SAFETY_ANALYSIS;
+ void CollectGarbageInternal(bool partial_gc, bool concurrent, bool clear_soft_references) NO_THREAD_SAFETY_ANALYSIS;
// Given the current contents of the alloc space, increase the allowed heap footprint to match
// the target utilization ratio. This should only be called immediately after a full garbage
@@ -275,19 +280,22 @@ class LOCKABLE Heap {
static void VerificationCallback(Object* obj, void* arg);
- Mutex* lock_;
- ConditionVariable* condition_;
+ UniquePtr<Mutex> lock_;
+ UniquePtr<ConditionVariable> condition_;
Spaces spaces_;
// The alloc space which we are currently allocating into.
AllocSpace* alloc_space_;
- // TODO: Reduce memory usage, this bitmap currently takes 1 bit per 8 bytes
- // of image space.
- ModUnionTable* mod_union_table_;
+ // The mod-union table remembers all of the referneces from the image space to the alloc /
+ // zygote spaces.
+ UniquePtr<ModUnionTable> mod_union_table_;
+
+ // This table holds all of the references from the zygote space to the alloc space.
+ UniquePtr<ModUnionTable> zygote_mod_union_table_;
- CardTable* card_table_;
+ UniquePtr<CardTable> card_table_;
// Used by the image writer to disable card marking on copied objects
// TODO: remove
@@ -313,7 +321,7 @@ class LOCKABLE Heap {
bool requesting_gc_;
// Mark stack that we reuse to avoid re-allocating the mark stack
- MarkStack* mark_stack_;
+ UniquePtr<MarkStack> mark_stack_;
// Number of bytes allocated. Adjusted after each allocation and free.
size_t num_bytes_allocated_;
@@ -339,6 +347,9 @@ class LOCKABLE Heap {
// offset of java.lang.ref.FinalizerReference.zombie
MemberOffset finalizer_reference_zombie_offset_;
+ // If we have a zygote space.
+ bool have_zygote_space_;
+
// Target ideal heap utilization ratio
float target_utilization_;
@@ -347,6 +358,7 @@ class LOCKABLE Heap {
friend class ScopedHeapLock;
FRIEND_TEST(SpaceTest, AllocAndFree);
FRIEND_TEST(SpaceTest, AllocAndFreeList);
+ FRIEND_TEST(SpaceTest, ZygoteSpace);
friend class SpaceTest;
DISALLOW_IMPLICIT_CONSTRUCTORS(Heap);
diff --git a/src/heap_bitmap.cc b/src/heap_bitmap.cc
index 7d81a5d623..50a037b5f5 100644
--- a/src/heap_bitmap.cc
+++ b/src/heap_bitmap.cc
@@ -1,16 +1,31 @@
#include "heap_bitmap.h"
+#include "space.h"
namespace art {
void HeapBitmap::ReplaceBitmap(SpaceBitmap* old_bitmap, SpaceBitmap* new_bitmap) {
// TODO: C++0x auto
- for (Bitmaps::iterator cur = bitmaps_.begin(); cur != bitmaps_.end(); ++cur) {
- if (*cur == old_bitmap) {
- *cur = new_bitmap;
+ for (Bitmaps::iterator it = bitmaps_.begin(); it != bitmaps_.end(); ++it) {
+ if (*it == old_bitmap) {
+ *it = new_bitmap;
return;
}
}
LOG(FATAL) << "bitmap " << static_cast<const void*>(old_bitmap) << " not found";
}
+void HeapBitmap::AddSpaceBitmap(SpaceBitmap* bitmap) {
+ DCHECK(bitmap != NULL);
+
+ // Check for interval overlap.
+ for (Bitmaps::const_iterator it = bitmaps_.begin(); it != bitmaps_.end(); ++it) {
+ SpaceBitmap* cur_bitmap = *it;
+ if (bitmap->HeapBegin() < cur_bitmap->HeapSize() + cur_bitmap->HeapSize() &&
+ bitmap->HeapBegin() + bitmap->HeapSize() > cur_bitmap->HeapBegin()) {
+ LOG(FATAL) << "Overlapping space bitmaps added to heap bitmap!";
+ }
+ }
+ bitmaps_.push_back(bitmap);
+}
+
} // namespace art
diff --git a/src/heap_bitmap.h b/src/heap_bitmap.h
index 29a7b1f396..433319913a 100644
--- a/src/heap_bitmap.h
+++ b/src/heap_bitmap.h
@@ -69,11 +69,16 @@ namespace art {
// Find and replace a bitmap pointer, this is used by for the bitmap swapping in the GC.
void ReplaceBitmap(SpaceBitmap* old_bitmap, SpaceBitmap* new_bitmap);
- private:
- void AddSpaceBitmap(SpaceBitmap* space) {
- bitmaps_.push_back(space);
+ HeapBitmap(Heap* heap) : heap_(heap) {
+
}
+ private:
+
+ const Heap* const heap_;
+
+ void AddSpaceBitmap(SpaceBitmap* bitmap);
+
typedef std::vector<SpaceBitmap*> Bitmaps;
Bitmaps bitmaps_;
diff --git a/src/macros.h b/src/macros.h
index 28b9f7aba6..f7146ad3dc 100644
--- a/src/macros.h
+++ b/src/macros.h
@@ -140,6 +140,8 @@ char (&ArraySizeHelper(T (&array)[N]))[N];
_rc; })
#endif
+template<typename T> void UNUSED(const T&) {}
+
#if defined(__SUPPORT_TS_ANNOTATION__)
#define ACQUIRED_AFTER(...) __attribute__ ((acquired_after(__VA_ARGS__)))
diff --git a/src/mark_sweep.cc b/src/mark_sweep.cc
index 5155e30afe..df394db90a 100644
--- a/src/mark_sweep.cc
+++ b/src/mark_sweep.cc
@@ -59,7 +59,7 @@ void MarkSweep::Init() {
const Spaces& spaces = heap_->GetSpaces();
// TODO: C++0x auto
for (Spaces::const_iterator cur = spaces.begin(); cur != spaces.end(); ++cur) {
- if ((*cur)->IsAllocSpace()) {
+ if ((*cur)->GetGcRetentionPolicy() == GCRP_ALWAYS_COLLECT) {
current_mark_bitmap_ = (*cur)->GetMarkBitmap();
break;
}
@@ -126,14 +126,6 @@ void MarkSweep::MarkRoots() {
Runtime::Current()->VisitRoots(MarkObjectVisitor, this);
}
-void MarkSweep::ScanImageRootVisitor(Object* root, void* arg) {
- DCHECK(root != NULL);
- DCHECK(arg != NULL);
- MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
- // We do not need to mark since live == marked for image spaces.
- mark_sweep->ScanObject(root);
-}
-
class CheckObjectVisitor {
public:
CheckObjectVisitor(MarkSweep* const mark_sweep)
@@ -163,28 +155,45 @@ void MarkSweep::VerifyImageRootVisitor(Object* root, void* arg) {
mark_sweep->CheckObject(root);
}
-// Marks all objects that are in images and have been touched by the mutator
-void MarkSweep::ScanDirtyImageRoots() {
+void MarkSweep::CopyMarkBits() {
const std::vector<Space*>& spaces = heap_->GetSpaces();
- CardTable* card_table = heap_->GetCardTable();
for (size_t i = 0; i < spaces.size(); ++i) {
- if (spaces[i]->IsImageSpace()) {
- byte* begin = spaces[i]->Begin();
- byte* end = spaces[i]->End();
- card_table->Scan(spaces[i]->GetLiveBitmap(), begin, end, ScanImageRootVisitor, this);
+ Space* space = spaces[i];
+ if (space->GetGcRetentionPolicy() == GCRP_FULL_COLLECT) {
+ SpaceBitmap* live_bitmap = space->GetLiveBitmap();
+ SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
+ DCHECK_EQ(live_bitmap->Size(), mark_bitmap->Size());
+ std::copy(live_bitmap->Begin(), live_bitmap->Begin() + live_bitmap->Size() / kWordSize, mark_bitmap->Begin());
}
}
}
-void MarkSweep::CheckBitmapCallback(Object* obj, void* finger, void* arg) {
- MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
- mark_sweep->finger_ = reinterpret_cast<Object*>(finger);
- mark_sweep->CheckObject(obj);
-}
+class ScanImageRootVisitor {
+ public:
+ ScanImageRootVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {
-void MarkSweep::CheckBitmapNoFingerCallback(Object* obj, void* arg) {
- MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
- mark_sweep->CheckObject(obj);
+ }
+
+ void operator ()(const Object* root) const {
+ DCHECK(root != NULL);
+ mark_sweep_->ScanObject(root);
+ }
+
+ private:
+ MarkSweep* const mark_sweep_;
+};
+
+// Marks all objects that are in images and have been touched by the mutator
+void MarkSweep::ScanDirtyImageRoots() {
+ const std::vector<Space*>& spaces = heap_->GetSpaces();
+ CardTable* card_table = heap_->GetCardTable();
+ ScanImageRootVisitor image_root_visitor(this);
+ for (size_t i = 0; i < spaces.size(); ++i) {
+ Space* space = spaces[i];
+ if (space->IsImageSpace()) {
+ card_table->Scan(space->GetLiveBitmap(), space->Begin(), space->End(), image_root_visitor);
+ }
+ }
}
void MarkSweep::ScanBitmapCallback(Object* obj, void* finger, void* arg) {
@@ -201,37 +210,53 @@ void MarkSweep::ScanDirtyCardCallback(Object* obj, void* arg) {
void MarkSweep::ScanGrayObjects() {
const std::vector<Space*>& spaces = heap_->GetSpaces();
CardTable* card_table = heap_->GetCardTable();
+ ScanImageRootVisitor image_root_visitor(this);
for (size_t i = 0; i < spaces.size(); ++i) {
byte* begin = spaces[i]->Begin();
byte* end = spaces[i]->End();
// Image spaces are handled properly since live == marked for them.
- card_table->Scan(spaces[i]->GetMarkBitmap(), begin, end, ScanImageRootVisitor, this);
+ card_table->Scan(spaces[i]->GetMarkBitmap(), begin, end, image_root_visitor);
}
}
+class CheckBitmapVisitor {
+ public:
+ CheckBitmapVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {
+
+ }
+
+ void operator ()(const Object* obj) const {
+ DCHECK(obj != NULL);
+ mark_sweep_->CheckObject(obj);
+ }
+
+ private:
+ MarkSweep* mark_sweep_;
+};
+
void MarkSweep::VerifyImageRoots() {
// Verify roots ensures that all the references inside the image space point
// objects which are either in the image space or marked objects in the alloc
// space
#ifndef NDEBUG
- void* arg = reinterpret_cast<void*>(this);
- const std::vector<Space*>& spaces = heap_->GetSpaces();
- for (size_t i = 0; i < spaces.size(); ++i) {
- if (spaces[i]->IsImageSpace()) {
- uintptr_t begin = reinterpret_cast<uintptr_t>(spaces[i]->Begin());
- uintptr_t end = reinterpret_cast<uintptr_t>(spaces[i]->End());
- SpaceBitmap* live_bitmap = spaces[i]->GetLiveBitmap();
+ CheckBitmapVisitor visitor(this);
+ const Spaces& spaces = heap_->GetSpaces();
+ for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
+ const Space* space = *it;
+ if (space->IsImageSpace()) {
+ uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
+ uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
+ SpaceBitmap* live_bitmap = space->GetLiveBitmap();
DCHECK(live_bitmap != NULL);
- live_bitmap->ScanWalk(begin, end, CheckBitmapCallback, arg);
+ live_bitmap->VisitMarkedRange(begin, end, visitor);
}
}
- finger_ = reinterpret_cast<Object*>(~0);
#endif
}
// Populates the mark stack based on the set of marked objects and
// recursively marks until the mark stack is emptied.
-void MarkSweep::RecursiveMark() {
+void MarkSweep::RecursiveMark(bool partial) {
// RecursiveMark will build the lists of known instances of the Reference classes.
// See DelayReferenceReferent for details.
CHECK(soft_reference_list_ == NULL);
@@ -241,12 +266,17 @@ void MarkSweep::RecursiveMark() {
CHECK(cleared_reference_list_ == NULL);
void* arg = reinterpret_cast<void*>(this);
- const std::vector<Space*>& spaces = heap_->GetSpaces();
+ const Spaces& spaces = heap_->GetSpaces();
+
for (size_t i = 0; i < spaces.size(); ++i) {
- if (spaces[i]->IsAllocSpace()) {
- uintptr_t begin = reinterpret_cast<uintptr_t>(spaces[i]->Begin());
- uintptr_t end = reinterpret_cast<uintptr_t>(spaces[i]->End());
- current_mark_bitmap_ = spaces[i]->GetMarkBitmap();
+ Space* space = spaces[i];
+ if (space->GetGcRetentionPolicy() == GCRP_ALWAYS_COLLECT ||
+ (!partial && space->GetGcRetentionPolicy() == GCRP_FULL_COLLECT)
+ ) {
+ uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
+ uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
+
+ current_mark_bitmap_ = space->GetMarkBitmap();
current_mark_bitmap_->ScanWalk(begin, end, &ScanBitmapCallback, arg);
}
}
@@ -305,7 +335,6 @@ void MarkSweep::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
for (size_t i = 0; i < num_ptrs; ++i) {
Object* obj = static_cast<Object*>(ptrs[i]);
freed_bytes += space->AllocationSize(obj);
- heap->GetLiveBitmap()->Clear(obj);
}
// AllocSpace::FreeList clears the value in ptrs, so perform after clearing the live bit
space->FreeList(num_ptrs, ptrs);
@@ -313,14 +342,25 @@ void MarkSweep::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
for (size_t i = 0; i < num_ptrs; ++i) {
Object* obj = static_cast<Object*>(ptrs[i]);
freed_bytes += space->AllocationSize(obj);
- heap->GetLiveBitmap()->Clear(obj);
space->Free(obj);
}
}
heap->RecordFreeLocked(freed_objects, freed_bytes);
}
-void MarkSweep::Sweep() {
+void MarkSweep::ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
+ ScopedHeapLock lock;
+ SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
+ Heap* heap = context->heap;
+ // We don't free any actual memory to avoid dirtying the shared zygote pages.
+ for (size_t i = 0; i < num_ptrs; ++i) {
+ Object* obj = static_cast<Object*>(ptrs[i]);
+ heap->GetLiveBitmap()->Clear(obj);
+ heap->GetCardTable()->MarkCard(obj);
+ }
+}
+
+void MarkSweep::Sweep(bool partial) {
SweepSystemWeaks();
DCHECK(mark_stack_->IsEmpty());
@@ -329,15 +369,25 @@ void MarkSweep::Sweep() {
SweepCallbackContext scc;
scc.heap = heap_;
for (size_t i = 0; i < spaces.size(); ++i) {
- if (!spaces[i]->IsImageSpace()) {
- uintptr_t begin = reinterpret_cast<uintptr_t>(spaces[i]->Begin());
- uintptr_t end = reinterpret_cast<uintptr_t>(spaces[i]->End());
- scc.space = spaces[i]->AsAllocSpace();
- // Bitmaps are pre-swapped for optimization which enables sweeping with the heap unlocked.
- SpaceBitmap* live_bitmap = scc.space->GetMarkBitmap();
- SpaceBitmap* mark_bitmap = scc.space->GetLiveBitmap();
- SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
- &MarkSweep::SweepCallback, reinterpret_cast<void*>(&scc));
+ Space* space = spaces[i];
+ if (
+ space->GetGcRetentionPolicy() == GCRP_ALWAYS_COLLECT ||
+ (!partial && space->GetGcRetentionPolicy() == GCRP_FULL_COLLECT)
+ ) {
+ uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
+ uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
+ scc.space = space->AsAllocSpace();
+ SpaceBitmap* live_bitmap = space->GetLiveBitmap();
+ SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
+ if (space->GetGcRetentionPolicy() == GCRP_ALWAYS_COLLECT) {
+ // Bitmaps are pre-swapped for optimization which enables sweeping with the heap unlocked.
+ SpaceBitmap::SweepWalk(
+ *mark_bitmap, *live_bitmap, begin, end, &SweepCallback, reinterpret_cast<void*>(&scc));
+ } else {
+ // Zygote sweep takes care of dirtying cards and clearing live bits, does not free actual memory.
+ SpaceBitmap::SweepWalk(
+ *live_bitmap, *mark_bitmap, begin, end, &ZygoteSweepCallback, reinterpret_cast<void*>(&scc));
+ }
}
}
}
@@ -500,7 +550,7 @@ inline void MarkSweep::ScanOther(const Object* obj) {
// Scans an object reference. Determines the type of the reference
// and dispatches to a specialized scanning routine.
-inline void MarkSweep::ScanObject(const Object* obj) {
+void MarkSweep::ScanObject(const Object* obj) {
DCHECK(obj != NULL);
DCHECK(obj->GetClass() != NULL);
DCHECK(heap_->GetMarkBitmap()->Test(obj));
@@ -517,6 +567,7 @@ inline void MarkSweep::ScanObject(const Object* obj) {
void MarkSweep::ProcessMarkStack() {
while (!mark_stack_->IsEmpty()) {
const Object* obj = mark_stack_->Pop();
+ DCHECK(obj != NULL);
ScanObject(obj);
}
}
@@ -649,7 +700,7 @@ MarkSweep::~MarkSweep() {
const Spaces& spaces = heap_->GetSpaces();
// TODO: C++0x auto
for (Spaces::const_iterator cur = spaces.begin(); cur != spaces.end(); ++cur) {
- if ((*cur)->IsAllocSpace()) {
+ if ((*cur)->GetGcRetentionPolicy() != GCRP_NEVER_COLLECT) {
(*cur)->GetMarkBitmap()->Clear();
}
}
diff --git a/src/mark_sweep.h b/src/mark_sweep.h
index 5f275a4624..108da877e2 100644
--- a/src/mark_sweep.h
+++ b/src/mark_sweep.h
@@ -57,7 +57,10 @@ class MarkSweep {
}
// Builds a mark stack and recursively mark until it empties.
- void RecursiveMark();
+ void RecursiveMark(bool partial);
+
+ // Copies mark bits from live bitmap of zygote space to mark bitmap for partial GCs.
+ void CopyMarkBits();
// Builds a mark stack with objects on dirty cards and recursively mark
// until it empties.
@@ -66,6 +69,10 @@ class MarkSweep {
// Remarks the root set after completing the concurrent mark.
void ReMarkRoots();
+ Heap* GetHeap() {
+ return heap_;
+ }
+
void ProcessReferences(bool clear_soft_references) {
ProcessReferences(&soft_reference_list_, clear_soft_references,
&weak_reference_list_,
@@ -74,12 +81,15 @@ class MarkSweep {
}
// Sweeps unmarked objects to complete the garbage collection.
- void Sweep();
+ void Sweep(bool partial);
Object* GetClearedReferences() {
return cleared_reference_list_;
}
+ // Blackens an object.
+ void ScanObject(const Object* obj);
+
private:
// Returns true if the object has its bit set in the mark bitmap.
bool IsMarked(const Object* object) const {
@@ -97,8 +107,6 @@ class MarkSweep {
static void ReMarkObjectVisitor(const Object* root, void* arg);
- static void ScanImageRootVisitor(Object* root, void* arg);
-
static void VerifyImageRootVisitor(Object* root, void* arg);
static void ScanDirtyCardCallback(Object* obj, void* arg);
@@ -111,16 +119,12 @@ class MarkSweep {
static void ScanBitmapCallback(Object* obj, void* finger, void* arg);
- static void CheckBitmapCallback(Object* obj, void* finger, void* arg);
-
- static void CheckBitmapNoFingerCallback(Object* obj, void* arg);
-
static void SweepCallback(size_t num_ptrs, Object** ptrs, void* arg);
- void CheckReference(const Object* obj, const Object* ref, MemberOffset offset, bool is_static);
+ // Special sweep for zygote that just marks objects / dirties cards.
+ static void ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg);
- // Blackens an object.
- void ScanObject(const Object* obj);
+ void CheckReference(const Object* obj, const Object* ref, MemberOffset offset, bool is_static);
void CheckObject(const Object* obj);
@@ -275,14 +279,20 @@ class MarkSweep {
size_t other_count_;
friend class AddIfReachesAllocSpaceVisitor; // Used by mod-union table.
+ friend class CheckBitmapVisitor;
friend class CheckObjectVisitor;
+ friend class CheckReferenceVisitor;
friend class InternTableEntryIsUnmarked;
friend class MarkIfReachesAllocspaceVisitor;
+ friend class ModUnionCheckReferences;
friend class ModUnionClearCardVisitor;
friend class ModUnionReferenceVisitor;
friend class ModUnionVisitor;
friend class ModUnionTableBitmap;
friend class ModUnionTableReferenceCache;
+ friend class ModUnionScanImageRootVisitor;
+ friend class ScanBitmapVisitor;
+ friend class ScanImageRootVisitor;
DISALLOW_COPY_AND_ASSIGN(MarkSweep);
};
diff --git a/src/mem_map.cc b/src/mem_map.cc
index 409e653ac1..9e867728ff 100644
--- a/src/mem_map.cc
+++ b/src/mem_map.cc
@@ -134,6 +134,13 @@ MemMap::MemMap(byte* begin, size_t size, void* base_begin, size_t base_size, int
CHECK_NE(base_size_, 0U);
};
+void MemMap::UnMapAtEnd(byte* new_end) {
+ DCHECK_GE(new_end, Begin());
+ DCHECK_LE(new_end, End());
+ size_t unmap_size = End() - new_end;
+ munmap(new_end, unmap_size);
+ size_ -= unmap_size;
+}
bool MemMap::Protect(int prot) {
if (base_begin_ == NULL && base_size_ == 0) {
diff --git a/src/mem_map.h b/src/mem_map.h
index f442570000..c7744bbf3b 100644
--- a/src/mem_map.h
+++ b/src/mem_map.h
@@ -75,11 +75,14 @@ class MemMap {
return begin_ + size_;
}
+ // Trim by unmapping pages at the end of the map.
+ void UnMapAtEnd(byte* new_end);
+
private:
MemMap(byte* begin, size_t size, void* base_begin, size_t base_size, int prot);
byte* const begin_; // Start of data.
- const size_t size_; // Length of data.
+ size_t size_; // Length of data.
void* const base_begin_; // Page-aligned base address.
const size_t base_size_; // Length of mapping.
diff --git a/src/mod_union_table.cc b/src/mod_union_table.cc
index 3b0de40e06..410bf62887 100644
--- a/src/mod_union_table.cc
+++ b/src/mod_union_table.cc
@@ -39,6 +39,7 @@ class MarkIfReachesAllocspaceVisitor {
for (Spaces::const_iterator cur = spaces.begin(); cur != spaces.end(); ++cur) {
if ((*cur)->IsAllocSpace() && (*cur)->Contains(ref)) {
bitmap_->Set(obj);
+ break;
}
}
}
@@ -80,88 +81,86 @@ class ModUnionClearCardVisitor {
std::vector<byte*>* cleared_cards_;
};
-ModUnionTableBitmap::ModUnionTableBitmap(Heap* heap) : heap_(heap) {
+ModUnionTableBitmap::ModUnionTableBitmap(Heap* heap) : ModUnionTable(heap) {
// Prevent fragmentation of the heap which is caused by resizing of the vector.
// TODO: Make a new vector which uses madvise (basically same as a mark stack).
cleared_cards_.reserve(32);
+ const Spaces& spaces = mark_sweep_->GetHeap()->GetSpaces();
+ // Create one heap bitmap per image space.
+ // TODO: C++0x auto
+ for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
+ Space* space = *it;
+ if (space->IsImageSpace()) {
+ // The mod-union table is only needed when we have an image space since it's purpose is to
+ // cache image roots.
+ UniquePtr<SpaceBitmap> bitmap(SpaceBitmap::Create("mod-union table bitmap", space->Begin(), space->Capacity()));
+ CHECK(bitmap.get() != NULL) << "Failed to create mod-union bitmap";
+ bitmaps_.Put(space, bitmap.release());
+ }
+ }
}
ModUnionTableBitmap::~ModUnionTableBitmap() {
STLDeleteValues(&bitmaps_);
}
-void ModUnionTableBitmap::Init() {
- const Spaces& spaces = heap_->GetSpaces();
-
- // Create one heap bitmap per image space.
- for (size_t i = 0; i < spaces.size(); ++i) {
- if (spaces[i]->IsImageSpace()) {
- // Allocate the mod-union table
- // The mod-union table is only needed when we have an image space since it's purpose is to cache image roots.
- UniquePtr<SpaceBitmap> bitmap(SpaceBitmap::Create("mod-union table bitmap", spaces[i]->Begin(), spaces[i]->Capacity()));
- if (bitmap.get() == NULL) {
- LOG(FATAL) << "Failed to create mod-union bitmap";
- }
-
- bitmaps_.Put(spaces[i], bitmap.release());
- }
- }
-}
-
-void ModUnionTableBitmap::ClearCards() {
- CardTable* card_table = heap_->GetCardTable();
- for (BitmapMap::iterator it = bitmaps_.begin(); it != bitmaps_.end(); ++it) {
- const Space* space = it->first;
- ModUnionClearCardVisitor visitor(&cleared_cards_);
- // Clear dirty cards in the this image space and update the corresponding mod-union bits.
- card_table->VisitClear(space->Begin(), space->End(), visitor);
- }
+void ModUnionTableBitmap::ClearCards(Space* space) {
+ CardTable* card_table = mark_sweep_->heap_->GetCardTable();
+ ModUnionClearCardVisitor visitor(&cleared_cards_);
+ // Clear dirty cards in the this image space and update the corresponding mod-union bits.
+ card_table->VisitClear(space->Begin(), space->End(), visitor);
}
-void ModUnionTableBitmap::Update(MarkSweep* mark_sweep) {
- CardTable* card_table = heap_->GetCardTable();
+void ModUnionTableBitmap::Update() {
+ CardTable* card_table = mark_sweep_->heap_->GetCardTable();
while (!cleared_cards_.empty()) {
byte* card = cleared_cards_.back();
cleared_cards_.pop_back();
- // Find out which bitmap the card maps to.
- SpaceBitmap* bitmap = 0;
- const Space* space = 0;
- for (BitmapMap::iterator cur = bitmaps_.begin(); cur != bitmaps_.end(); ++cur) {
- space = cur->first;
- if (space->Contains(reinterpret_cast<Object*>(card_table->AddrFromCard(card)))) {
- bitmap = cur->second;
- break;
- }
- }
- DCHECK(bitmap != NULL);
-
uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
- uintptr_t end = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card + 1));
+ uintptr_t end = start + GC_CARD_SIZE;
+ Space* space = heap_->FindSpaceFromObject(reinterpret_cast<Object*>(start));
+ SpaceBitmap* bitmap = space->GetLiveBitmap();
- // Clear the mod-union bitmap range corresponding to this card so that we
- // don't have any objects marked which do not reach the alloc space.
+ // Clear the mod-union bitmap range corresponding to this card so that we don't have any
+ // objects marked which do not reach the alloc space.
bitmap->VisitRange(start, end, SpaceBitmap::ClearVisitor(bitmap));
- // At this point we need to update the mod-union bitmap to contain all the
- // objects which reach the alloc space.
- ModUnionVisitor visitor(mark_sweep, bitmap);
+ // At this point we need to update the mod-union bitmap to contain all the objects which reach
+ // the alloc space.
+ ModUnionVisitor visitor(mark_sweep_, bitmap);
space->GetLiveBitmap()->VisitMarkedRange(start, end, visitor);
}
}
-void ModUnionTableBitmap::MarkReferences(MarkSweep* mark_sweep) {
+class ModUnionScanImageRootVisitor {
+ public:
+ ModUnionScanImageRootVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {
+
+ }
+
+ void operator ()(const Object* root) const {
+ DCHECK(root != NULL);
+ mark_sweep_->ScanObject(root);
+ }
+
+ private:
+ MarkSweep* const mark_sweep_;
+};
+
+void ModUnionTableBitmap::MarkReferences() {
// Some tests have no image space, and therefore no mod-union bitmap.
+ ModUnionScanImageRootVisitor image_root_scanner(GetMarkSweep());
for (BitmapMap::iterator cur = bitmaps_.begin(); cur != bitmaps_.end(); ++cur) {
const Space* space = cur->first;
uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
- cur->second->VisitRange(begin, end, MarkSweep::ScanImageRootVisitor, mark_sweep);
+ cur->second->VisitMarkedRange(begin, end, image_root_scanner);
}
}
-ModUnionTableReferenceCache::ModUnionTableReferenceCache(Heap* heap) : heap_(heap) {
+ModUnionTableReferenceCache::ModUnionTableReferenceCache(Heap* heap) : ModUnionTable(heap) {
cleared_cards_.reserve(32);
}
@@ -169,51 +168,43 @@ ModUnionTableReferenceCache::~ModUnionTableReferenceCache() {
}
-void ModUnionTableReferenceCache::Init() {
+void ModUnionTableReferenceCache::ClearCards(Space* space) {
+ CardTable* card_table = GetMarkSweep()->GetHeap()->GetCardTable();
+ ModUnionClearCardVisitor visitor(&cleared_cards_);
+ // Clear dirty cards in the this space and update the corresponding mod-union bits.
+ card_table->VisitClear(space->Begin(), space->End(), visitor);
}
-void ModUnionTableReferenceCache::ClearCards() {
- const Spaces& spaces = heap_->GetSpaces();
- CardTable* card_table = heap_->GetCardTable();
-
- // Create one heap bitmap per image space.
- for (size_t i = 0; i < spaces.size(); ++i) {
- if (spaces[i]->IsImageSpace()) {
- ModUnionClearCardVisitor visitor(&cleared_cards_);
- // Clear dirty cards in the this image space and update the corresponding mod-union bits.
- card_table->VisitClear(spaces[i]->Begin(), spaces[i]->End(), visitor);
- }
- }
-}
-
-class AddIfReachesAllocSpaceVisitor {
+class AddToReferenceArrayVisitor {
public:
- explicit AddIfReachesAllocSpaceVisitor(
- MarkSweep* const mark_sweep,
+ explicit AddToReferenceArrayVisitor(
+ ModUnionTableReferenceCache* const mod_union_table,
ModUnionTableReferenceCache::ReferenceArray* references)
- : mark_sweep_(mark_sweep),
+ : mod_union_table_(mod_union_table),
references_(references) {
}
// Extra parameters are required since we use this same visitor signature for checking objects.
- void operator ()(const Object* /* obj */, const Object* ref, const MemberOffset& /* offset */, bool /* is_static */) const {
- if (mark_sweep_->heap_->GetAllocSpace()->Contains(ref)) {
+ void operator ()(const Object* obj, const Object* ref, const MemberOffset& /* offset */,
+ bool /* is_static */) const {
+ // Only add the reference if it fits our criteria.
+ if (mod_union_table_->AddReference(obj, ref)) {
references_->push_back(ref);
}
}
private:
- MarkSweep* const mark_sweep_;
- ModUnionTableReferenceCache::ReferenceArray* references_;
+ ModUnionTableReferenceCache* mod_union_table_;
+ ModUnionTable::ReferenceArray* references_;
};
class ModUnionReferenceVisitor {
public:
explicit ModUnionReferenceVisitor(
- MarkSweep* const mark_sweep,
+ ModUnionTableReferenceCache* const mod_union_table,
ModUnionTableReferenceCache::ReferenceArray* references)
- : mark_sweep_(mark_sweep),
+ : mod_union_table_(mod_union_table),
references_(references) {
}
@@ -221,50 +212,190 @@ class ModUnionReferenceVisitor {
DCHECK(obj != NULL);
// We don't have an early exit since we use the visitor pattern, an early
// exit should significantly speed this up.
- AddIfReachesAllocSpaceVisitor visitor(mark_sweep_, references_);
- mark_sweep_->VisitObjectReferences(obj, visitor);
+ AddToReferenceArrayVisitor visitor(mod_union_table_, references_);
+ mod_union_table_->GetMarkSweep()->VisitObjectReferences(obj, visitor);
}
private:
- MarkSweep* const mark_sweep_;
- ModUnionTableReferenceCache::ReferenceArray* references_;
+ ModUnionTableReferenceCache* const mod_union_table_;
+ ModUnionTable::ReferenceArray* references_;
};
-void ModUnionTableReferenceCache::Update(MarkSweep* mark_sweep) {
- CardTable* card_table = heap_->GetCardTable();
- while (!cleared_cards_.empty()) {
- byte* card = cleared_cards_.back();
- cleared_cards_.pop_back();
- // Update the corresponding references for the card
- // TODO: C++0x auto
- ReferenceMap::iterator found = references_.find(card);
- if (found == references_.end()) {
- references_.Put(card, ReferenceArray());
- found = references_.find(card);
+class CheckReferenceVisitor {
+ public:
+ typedef std::set<const Object*> ReferenceSet;
+
+ explicit CheckReferenceVisitor(
+ ModUnionTableReferenceCache* const mod_union_table,
+ const ReferenceSet& references)
+ : mod_union_table_(mod_union_table),
+ references_(references) {
+ }
+
+ // Extra parameters are required since we use this same visitor signature for checking objects.
+ void operator ()(const Object* obj, const Object* ref, const MemberOffset& /* offset */, bool /* is_static */) const {
+ Heap* heap = mod_union_table_->GetMarkSweep()->GetHeap();
+ if (mod_union_table_->AddReference(obj, ref) && references_.find(ref) == references_.end()) {
+ Space* from_space = heap->FindSpaceFromObject(obj);
+ Space* to_space = heap->FindSpaceFromObject(ref);
+ LOG(INFO) << "Object " << reinterpret_cast<const void*>(obj) << "(" << PrettyTypeOf(obj) << ")"
+ << "References " << reinterpret_cast<const void*>(ref)
+ << "(" << PrettyTypeOf(ref) << ") without being in mod-union table";
+ LOG(INFO) << "FromSpace " << from_space->GetName() << " type " << from_space->GetGcRetentionPolicy();
+ LOG(INFO) << "ToSpace " << to_space->GetName() << " type " << to_space->GetGcRetentionPolicy();
+ mod_union_table_->GetHeap()->DumpSpaces();
+ LOG(FATAL) << "FATAL ERROR";
}
+ }
+
+ private:
+ ModUnionTableReferenceCache* const mod_union_table_;
+ const ReferenceSet& references_;
+};
+
+class ModUnionCheckReferences {
+ public:
+ typedef std::set<const Object*> ReferenceSet;
+
+ explicit ModUnionCheckReferences (
+ ModUnionTableReferenceCache* const mod_union_table,
+ const ReferenceSet& references)
+ : mod_union_table_(mod_union_table),
+ references_(references) {
+ }
+
+ void operator ()(Object* obj) const {
+ DCHECK(obj != NULL);
+ MarkSweep* mark_sweep = mod_union_table_->GetMarkSweep();
+ CheckReferenceVisitor visitor(mod_union_table_, references_);
+ mark_sweep->VisitObjectReferences(obj, visitor);
+ }
+
+ private:
+ ModUnionTableReferenceCache* const mod_union_table_;
+ const ReferenceSet& references_;
+};
+
+void ModUnionTableReferenceCache::Verify() {
+#if VERIFY_MOD_UNION
+ // Start by checking that everything in the mod union table is marked.
+ Heap* heap = GetMarkSweep()->GetHeap();
+ for (ReferenceMap::const_iterator it = references_.begin(); it != references_.end(); ++it) {
+ for (ReferenceArray::const_iterator it_ref = it->second.begin(); it_ref != it->second.end(); ++it_ref ) {
+ DCHECK(heap->GetLiveBitmap()->Test(*it_ref));
+ }
+ }
+
+ // Check the references of each clean card which is also in the mod union table.
+ for (ReferenceMap::const_iterator it = references_.begin(); it != references_.end(); ++it) {
+ const byte* card = &*it->first;
+ if (*card == GC_CARD_CLEAN) {
+ std::set<const Object*> reference_set;
+ for (ReferenceArray::const_iterator itr = it->second.begin(); itr != it->second.end();++itr) {
+ reference_set.insert(*itr);
+ }
+ ModUnionCheckReferences visitor(this, reference_set);
+ CardTable* card_table = heap->GetCardTable();
+ uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
+ uintptr_t end = start + GC_CARD_SIZE;
+ SpaceBitmap* live_bitmap =
+ heap->FindSpaceFromObject(reinterpret_cast<Object*>(start))->GetLiveBitmap();
+ live_bitmap->VisitMarkedRange(start, end, visitor);
+ }
+ }
+#endif
+}
+
+void ModUnionTableReferenceCache::Update() {
+ Heap* heap = GetMarkSweep()->GetHeap();
+ CardTable* card_table = heap->GetCardTable();
+
+ ReferenceArray cards_references;
+ ModUnionReferenceVisitor visitor(this, &cards_references);
+
+ for (size_t i = 0; i < cleared_cards_.size(); ++i) {
+ byte* card = cleared_cards_[i];
// Clear and re-compute alloc space references associated with this card.
- ReferenceArray& cards_references = found->second;
cards_references.clear();
- ModUnionReferenceVisitor visitor(mark_sweep, &cards_references);
uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
- uintptr_t end = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card + 1));
+ uintptr_t end = start + GC_CARD_SIZE;
SpaceBitmap* live_bitmap =
- heap_->FindSpaceFromObject(reinterpret_cast<Object*>(start))->GetLiveBitmap();
+ heap->FindSpaceFromObject(reinterpret_cast<Object*>(start))->GetLiveBitmap();
live_bitmap->VisitMarkedRange(start, end, visitor);
+
+ // Update the corresponding references for the card.
+ // TODO: C++0x auto
+ ReferenceMap::iterator found = references_.find(card);
+ if (found == references_.end()) {
+ if (cards_references.empty()) {
+ // No reason to add empty array.
+ continue;
+ }
+ references_.Put(card, cards_references);
+ } else {
+ found->second = cards_references;
+ }
}
+ cleared_cards_.clear();
}
-void ModUnionTableReferenceCache::MarkReferences(MarkSweep* mark_sweep) {
+void ModUnionTableReferenceCache::MarkReferences() {
+ Heap* heap = GetMarkSweep()->GetHeap();
+ HeapBitmap* mark_bitmap = heap->GetMarkBitmap();
// TODO: C++0x auto
size_t count = 0;
for (ReferenceMap::const_iterator it = references_.begin(); it != references_.end(); ++it) {
for (ReferenceArray::const_iterator it_ref = it->second.begin(); it_ref != it->second.end(); ++it_ref ) {
- mark_sweep->MarkObject(*it_ref);
+ mark_bitmap->Set(*it_ref);
++count;
}
}
- VLOG(heap) << "Marked " << count << " references in mod union table";
+ if (VLOG_IS_ON(heap)) {
+ VLOG(gc) << "Marked " << count << " references in mod union table";
+ }
+}
+
+ModUnionTableCardCache::ModUnionTableCardCache(Heap* heap) : ModUnionTable(heap) {
+
+}
+
+ModUnionTableCardCache::~ModUnionTableCardCache() {
+
+}
+
+class ModUnionClearCardSetVisitor {
+ public:
+ explicit ModUnionClearCardSetVisitor(std::set<byte*>* const cleared_cards)
+ : cleared_cards_(cleared_cards) {
+ }
+
+ void operator ()(byte* card) const {
+ cleared_cards_->insert(card);
+ }
+ private:
+ std::set<byte*>* const cleared_cards_;
+};
+
+void ModUnionTableCardCache::ClearCards(Space* space) {
+ CardTable* card_table = GetMarkSweep()->GetHeap()->GetCardTable();
+ ModUnionClearCardSetVisitor visitor(&cleared_cards_);
+ // Clear dirty cards in the this space and update the corresponding mod-union bits.
+ card_table->VisitClear(space->Begin(), space->End(), visitor);
+}
+
+// Mark all references to the alloc space(s).
+void ModUnionTableCardCache::MarkReferences() {
+ CardTable* card_table = heap_->GetCardTable();
+ ModUnionScanImageRootVisitor visitor(GetMarkSweep());
+ for (ClearedCards::const_iterator it = cleared_cards_.begin(); it != cleared_cards_.end(); ++it) {
+ byte* card = *it;
+ uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
+ uintptr_t end = start + GC_CARD_SIZE;
+ SpaceBitmap* live_bitmap =
+ heap_->FindSpaceFromObject(reinterpret_cast<Object*>(start))->GetLiveBitmap();
+ live_bitmap->VisitMarkedRange(start, end, visitor);
+ }
}
} // namespace art
diff --git a/src/mod_union_table.h b/src/mod_union_table.h
index a9111ee982..f44004bd7e 100644
--- a/src/mod_union_table.h
+++ b/src/mod_union_table.h
@@ -17,7 +17,11 @@
#ifndef ART_SRC_MOD_UNION_TABLE_H_
#define ART_SRC_MOD_UNION_TABLE_H_
+#include "heap.h"
#include "safe_map.h"
+#include "space.h"
+
+#define VERIFY_MOD_UNION 0
namespace art {
@@ -25,78 +29,173 @@ class Heap;
class HeapBitmap;
class Space;
+// Base class
class ModUnionTable {
public:
- // Clear cards image space cards.
- virtual void ClearCards() = 0;
+ typedef std::vector<const Object*> ReferenceArray;
- // Update the mod-union table.
- virtual void Update(MarkSweep* mark_sweep) = 0;
+ ModUnionTable(Heap* heap) : heap_(heap), mark_sweep_(0) {
- // Mark all references to the alloc space(s).
- virtual void MarkReferences(MarkSweep* mark_sweep) = 0;
+ }
virtual ~ModUnionTable() {
}
+
+ // Clear cards which map to a memory range of a space.
+ virtual void ClearCards(Space* space) = 0;
+
+ // Update the mod-union table.
+ virtual void Update() = 0;
+
+ // Mark all references which are stored in the mod union table.
+ virtual void MarkReferences() = 0;
+
+ // Verification, sanity checks that we don't have clean cards which conflict with out cached data
+ // for said cards.
+ virtual void Verify() = 0;
+
+ // Should probably clean this up later.
+ void Init(MarkSweep* mark_sweep) {
+ mark_sweep_ = mark_sweep;
+ }
+
+ MarkSweep* GetMarkSweep() {
+ return mark_sweep_;
+ }
+
+ Heap* GetHeap() {
+ return heap_;
+ }
+
+ protected:
+ Heap* heap_;
+ MarkSweep* mark_sweep_;
};
// Bitmap implementation.
+// DEPRECATED, performs strictly less well than merely caching which cards were dirty.
class ModUnionTableBitmap : public ModUnionTable {
public:
ModUnionTableBitmap(Heap* heap);
virtual ~ModUnionTableBitmap();
- void Init();
-
- // Clear image space cards.
- void ClearCards();
+ // Clear space cards.
+ void ClearCards(Space* space);
// Update table based on cleared cards.
- void Update(MarkSweep* mark_sweep);
+ void Update();
// Mark all references to the alloc space(s).
- void MarkReferences(MarkSweep* mark_sweep);
- private:
+ void MarkReferences();
+
+ protected:
// Cleared card array, used to update the mod-union table.
std::vector<byte*> cleared_cards_;
// One bitmap per image space.
- // TODO: Add support for zygote spaces?
- typedef SafeMap<Space*, SpaceBitmap*> BitmapMap;
+ // TODO: Add support for Zygote spaces?
+ typedef SafeMap<Space*, SpaceBitmap*> BitmapMap;
BitmapMap bitmaps_;
-
- Heap* heap_;
};
-// Reference caching implementation. Caches references pointing to alloc space(s)
-// for each card.
+// Reference caching implementation. Caches references pointing to alloc space(s) for each card.
class ModUnionTableReferenceCache : public ModUnionTable {
public:
- typedef std::vector<const Object*> ReferenceArray;
typedef SafeMap<const byte*, ReferenceArray > ReferenceMap;
ModUnionTableReferenceCache(Heap* heap);
virtual ~ModUnionTableReferenceCache();
- void Init();
-
- // Clear image space cards.
- void ClearCards();
+ // Clear and store cards for a space.
+ void ClearCards(Space* space);
// Update table based on cleared cards.
- void Update(MarkSweep* mark_sweep);
+ void Update();
// Mark all references to the alloc space(s).
- void MarkReferences(MarkSweep* mark_sweep);
- private:
+ void MarkReferences();
+
+ // Verify the mod-union table.
+ void Verify();
+
+ // Function that tells whether or not to add a reference to the table.
+ virtual bool AddReference(const Object* obj, const Object* ref) = 0;
+
+ protected:
// Cleared card array, used to update the mod-union table.
std::vector<byte*> cleared_cards_;
// Maps from dirty cards to their corresponding alloc space references.
ReferenceMap references_;
+};
- Heap* heap_;
+// Card caching implementation. Keeps track of which cards we cleared and only this information.
+class ModUnionTableCardCache : public ModUnionTable {
+ public:
+ typedef std::set<byte*> ClearedCards;
+ typedef SafeMap<const byte*, ReferenceArray > ReferenceMap;
+
+ ModUnionTableCardCache(Heap* heap);
+ virtual ~ModUnionTableCardCache();
+
+ // Clear and store cards for a space.
+ void ClearCards(Space* space);
+
+ // Nothing to update.
+ void Update() {}
+
+ // Mark all references to the alloc space(s).
+ void MarkReferences();
+
+ // Nothing to verify.
+ void Verify() {}
+
+ protected:
+ // Cleared card array, used to update the mod-union table.
+ ClearedCards cleared_cards_;
+};
+
+template <typename Implementation>
+class ModUnionTableToZygoteAllocspace : public Implementation {
+public:
+ ModUnionTableToZygoteAllocspace(Heap* heap) : Implementation(heap) {
+ }
+
+ bool AddReference(const Object* /* obj */, const Object* ref) {
+ const Spaces& spaces = Implementation::GetMarkSweep()->GetHeap()->GetSpaces();
+ for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
+ if ((*it)->Contains(ref)) {
+ return (*it)->IsAllocSpace();
+ }
+ }
+ if (ref != NULL) {
+ Implementation::GetHeap()->DumpSpaces();
+ LOG(FATAL) << "Reference " << ref << " not in any space!";
+ }
+ return false;
+ }
+};
+
+template <typename Implementation>
+class ModUnionTableToAllocspace : public Implementation {
+public:
+ ModUnionTableToAllocspace(Heap* heap) : Implementation(heap) {
+ }
+
+ bool AddReference(const Object* /* obj */, const Object* ref) {
+ const Spaces& spaces = Implementation::GetMarkSweep()->GetHeap()->GetSpaces();
+ for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
+ if ((*it)->Contains(ref)) {
+ return (*it)->GetGcRetentionPolicy() == GCRP_ALWAYS_COLLECT;
+ }
+ }
+ if (ref != NULL) {
+ Implementation::GetHeap()->DumpSpaces();
+ LOG(FATAL) << "Reference " << ref << " not in any space!";
+ }
+ return false;
+ }
};
} // namespace art
diff --git a/src/native/dalvik_system_Zygote.cc b/src/native/dalvik_system_Zygote.cc
index 3a8aa48ec9..4fb8397491 100644
--- a/src/native/dalvik_system_Zygote.cc
+++ b/src/native/dalvik_system_Zygote.cc
@@ -35,6 +35,10 @@
#include <sys/prctl.h>
#endif
+#if defined(HAVE_SELINUX)
+#include <selinux/android.h>
+#endif
+
#if defined(__linux__)
#include <sys/personality.h>
#endif
@@ -223,12 +227,10 @@ static void SetCapabilities(int64_t permitted, int64_t effective) {
}
static void SetSchedulerPolicy() {
-#if 0 // SP_DEFAULT is not available in ics-mr1-plus-art.
errno = -set_sched_policy(0, SP_DEFAULT);
if (errno != 0) {
PLOG(FATAL) << "set_sched_policy(0, SP_DEFAULT) failed";
}
-#endif
}
#else
@@ -289,10 +291,11 @@ static void EnableDebugFeatures(uint32_t debug_flags) {
// Utility routine to fork zygote and specialize the child process.
static pid_t ForkAndSpecializeCommon(JNIEnv* env, uid_t uid, gid_t gid, jintArray javaGids,
jint debug_flags, jobjectArray javaRlimits,
- jlong permittedCapabilities, jlong effectiveCapabilities) {
+ jlong permittedCapabilities, jlong effectiveCapabilities,
+ jstring java_se_info, jstring java_se_name, bool is_system_server) {
Runtime* runtime = Runtime::Current();
CHECK(runtime->IsZygote()) << "runtime instance not started with -Xzygote";
- if (false) { // TODO: do we need do anything special like !dvmGcPreZygoteFork()?
+ if (!runtime->PreZygoteFork()) {
LOG(FATAL) << "pre-fork heap failed";
}
@@ -340,6 +343,25 @@ static pid_t ForkAndSpecializeCommon(JNIEnv* env, uid_t uid, gid_t gid, jintArra
SetSchedulerPolicy();
+#if defined(HAVE_SELINUX) && defined(HAVE_ANDROID_OS)
+ {
+ ScopedUtfChars se_info(env, java_se_info);
+ CHECK(se_info != NULL);
+ ScopedUtfChars se_name(env, java_se_name);
+ CHECK(se_name != NULL);
+ rc = selinux_android_setcontext(uid, is_system_server, se_info, se_name);
+ if (rc == -1) {
+ PLOG(FATAL) << "selinux_android_setcontext(" << uid << ", "
+ << (is_system_server ? "true" : "false") << ", "
+ << "\"" << se_info << "\", \"" << se_name << "\") failed";
+ }
+ }
+#else
+ UNUSED(is_system_server);
+ UNUSED(java_se_info);
+ UNUSED(java_se_name);
+#endif
+
// Our system thread ID, etc, has changed so reset Thread state.
self->InitAfterFork();
@@ -354,8 +376,9 @@ static pid_t ForkAndSpecializeCommon(JNIEnv* env, uid_t uid, gid_t gid, jintArra
}
static jint Zygote_nativeForkAndSpecialize(JNIEnv* env, jclass, jint uid, jint gid, jintArray gids,
- jint debug_flags, jobjectArray rlimits) {
- return ForkAndSpecializeCommon(env, uid, gid, gids, debug_flags, rlimits, 0, 0);
+ jint debug_flags, jobjectArray rlimits,
+ jstring se_info, jstring se_name) {
+ return ForkAndSpecializeCommon(env, uid, gid, gids, debug_flags, rlimits, 0, 0, se_info, se_name, false);
}
static jint Zygote_nativeForkSystemServer(JNIEnv* env, jclass, uid_t uid, gid_t gid, jintArray gids,
@@ -363,7 +386,7 @@ static jint Zygote_nativeForkSystemServer(JNIEnv* env, jclass, uid_t uid, gid_t
jlong permittedCapabilities, jlong effectiveCapabilities) {
pid_t pid = ForkAndSpecializeCommon(env, uid, gid, gids,
debug_flags, rlimits,
- permittedCapabilities, effectiveCapabilities);
+ permittedCapabilities, effectiveCapabilities, NULL, NULL, true);
if (pid > 0) {
// The zygote process checks whether the child process has died or not.
LOG(INFO) << "System server process " << pid << " has been created";
@@ -382,7 +405,7 @@ static jint Zygote_nativeForkSystemServer(JNIEnv* env, jclass, uid_t uid, gid_t
static JNINativeMethod gMethods[] = {
NATIVE_METHOD(Zygote, nativeExecShell, "(Ljava/lang/String;)V"),
//NATIVE_METHOD(Zygote, nativeFork, "()I"),
- NATIVE_METHOD(Zygote, nativeForkAndSpecialize, "(II[II[[I)I"),
+ NATIVE_METHOD(Zygote, nativeForkAndSpecialize, "(II[II[[ILjava/lang/String;Ljava/lang/String;)I"),
NATIVE_METHOD(Zygote, nativeForkSystemServer, "(II[II[[IJJ)I"),
};
diff --git a/src/oat/runtime/support_invoke.cc b/src/oat/runtime/support_invoke.cc
index 95dab98f8a..466968834e 100644
--- a/src/oat/runtime/support_invoke.cc
+++ b/src/oat/runtime/support_invoke.cc
@@ -38,7 +38,12 @@ static uint64_t artInvokeCommon(uint32_t method_idx, Object* this_object, Method
const void* code = method->GetCode();
// When we return, the caller will branch to this address, so it had better not be 0!
- CHECK(code != NULL) << PrettyMethod(method);
+ if (UNLIKELY(code == NULL)) {
+ MethodHelper mh(method);
+ LOG(FATAL) << "Code was NULL in method: " << PrettyMethod(method)
+ << " location: " << mh.GetDexFile().GetLocation();
+ }
+
uint32_t method_uint = reinterpret_cast<uint32_t>(method);
uint64_t code_uint = reinterpret_cast<uint32_t>(code);
diff --git a/src/runtime.cc b/src/runtime.cc
index 79f75c76b1..6447ede6a0 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -222,6 +222,11 @@ void Runtime::Abort() {
// notreached
}
+bool Runtime::PreZygoteFork() {
+ heap_->PreZygoteFork();
+ return true;
+}
+
void Runtime::CallExitHook(jint status) {
if (exit_ != NULL) {
ScopedThreadStateChange tsc(Thread::Current(), kNative);
@@ -765,9 +770,21 @@ void Runtime::InitThreadGroups(Thread* self) {
main_thread_group_ =
env->NewGlobalRef(env->GetStaticObjectField(WellKnownClasses::java_lang_ThreadGroup,
WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup));
+ CHECK(main_thread_group_ != NULL || IsCompiler());
system_thread_group_ =
env->NewGlobalRef(env->GetStaticObjectField(WellKnownClasses::java_lang_ThreadGroup,
WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup));
+ CHECK(system_thread_group_ != NULL || IsCompiler());
+}
+
+jobject Runtime::GetMainThreadGroup() const {
+ CHECK(main_thread_group_ != NULL || IsCompiler());
+ return main_thread_group_;
+}
+
+jobject Runtime::GetSystemThreadGroup() const {
+ CHECK(system_thread_group_ != NULL || IsCompiler());
+ return system_thread_group_;
}
void Runtime::RegisterRuntimeNativeMethods(JNIEnv* env) {
diff --git a/src/runtime.h b/src/runtime.h
index b5c36085bd..544dcf4173 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -141,16 +141,10 @@ class Runtime {
static void Abort();
// Returns the "main" ThreadGroup, used when attaching user threads.
- jobject GetMainThreadGroup() const {
- CHECK(main_thread_group_ != NULL || IsCompiler());
- return main_thread_group_;
- }
+ jobject GetMainThreadGroup() const;
// Returns the "system" ThreadGroup, used when attaching our internal threads.
- jobject GetSystemThreadGroup() const {
- CHECK(system_thread_group_ != NULL || IsCompiler());
- return system_thread_group_;
- }
+ jobject GetSystemThreadGroup() const;
// Attaches the calling native thread to the runtime.
void AttachCurrentThread(const char* thread_name, bool as_daemon, jobject thread_group);
@@ -310,6 +304,7 @@ class Runtime {
void SetStatsEnabled(bool new_state);
void DidForkFromZygote();
+ bool PreZygoteFork();
void EnableMethodTracing(Trace* tracer);
void DisableMethodTracing();
diff --git a/src/space.cc b/src/space.cc
index 3d8c5e07e0..02230e146d 100644
--- a/src/space.cc
+++ b/src/space.cc
@@ -22,6 +22,7 @@
#include "image.h"
#include "logging.h"
#include "os.h"
+#include "space_bitmap.h"
#include "stl_util.h"
#include "utils.h"
@@ -42,13 +43,16 @@ namespace art {
size_t AllocSpace::bitmap_index_ = 0;
-AllocSpace::AllocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* end,
+AllocSpace::AllocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin, byte* end,
size_t growth_limit)
- : Space(name, mem_map, end), mspace_(mspace), growth_limit_(growth_limit) {
+ : Space(name, mem_map, begin, end, GCRP_ALWAYS_COLLECT), mspace_(mspace), growth_limit_(growth_limit) {
CHECK(mspace != NULL);
size_t bitmap_index = bitmap_index_++;
+ DCHECK(reinterpret_cast<uintptr_t>(mem_map->Begin()) % static_cast<uintptr_t>GC_CARD_SIZE == 0);
+ DCHECK(reinterpret_cast<uintptr_t>(mem_map->End()) % static_cast<uintptr_t>GC_CARD_SIZE == 0);
+
live_bitmap_.reset(SpaceBitmap::Create(
StringPrintf("allocspace-%s-live-bitmap-%d", name.c_str(), static_cast<int>(bitmap_index)),
Begin(), Capacity()));
@@ -120,7 +124,8 @@ AllocSpace* Space::CreateAllocSpace(const std::string& name, size_t initial_size
}
// Everything is set so record in immutable structure and leave
- AllocSpace* space = new AllocSpace(name, mem_map.release(), mspace, end, growth_limit);
+ MemMap* mem_map_ptr = mem_map.release();
+ AllocSpace* space = new AllocSpace(name, mem_map_ptr, mspace, mem_map_ptr->Begin(), end, growth_limit);
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "Space::CreateAllocSpace exiting (" << PrettyDuration(NanoTime() - start_time)
<< " ) " << *space;
@@ -176,6 +181,52 @@ Object* AllocSpace::AllocWithGrowth(size_t num_bytes) {
return result;
}
+AllocSpace* AllocSpace::CreateZygoteSpace() {
+ end_ = reinterpret_cast<byte*>(RoundUp(reinterpret_cast<uintptr_t>(end_), kPageSize));
+ DCHECK(IsAligned<GC_CARD_SIZE>(begin_));
+ DCHECK(IsAligned<GC_CARD_SIZE>(end_));
+ DCHECK(IsAligned<kPageSize>(begin_));
+ DCHECK(IsAligned<kPageSize>(end_));
+ size_t size = RoundUp(Size(), kPageSize);
+ // Trim the heap so that we minimize the size of the Zygote space.
+ Trim();
+ // Trim our mem-map to free unused pages.
+ mem_map_->UnMapAtEnd(end_);
+ // TODO: Not hardcode these in?
+ const size_t starting_size = kPageSize;
+ const size_t initial_size = 2 * MB;
+ // Remaining size is for the new alloc space.
+ const size_t growth_limit = growth_limit_ - size;
+ const size_t capacity = Capacity() - size;
+ VLOG(heap) << "Begin " << reinterpret_cast<const void*>(begin_);
+ VLOG(heap) << "End " << reinterpret_cast<const void*>(end_);
+ VLOG(heap) << "Size " << size;
+ VLOG(heap) << "GrowthLimit " << growth_limit_;
+ VLOG(heap) << "Capacity " << Capacity();
+ growth_limit_ = RoundUp(size, kPageSize);
+ // FIXME: Do we need reference counted pointers here?
+ // Make the two spaces share the same mark bitmaps since the bitmaps span both of the spaces.
+ VLOG(heap) << "Creating new AllocSpace: ";
+ VLOG(heap) << "Size " << mem_map_->Size();
+ VLOG(heap) << "GrowthLimit " << PrettySize(growth_limit);
+ VLOG(heap) << "Capacity " << PrettySize(capacity);
+ UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(name_.c_str(), end_, capacity, PROT_READ | PROT_WRITE));
+ void* mspace = CreateMallocSpace(end_, starting_size, initial_size);
+ // Protect memory beyond the initial size.
+ byte* end = mem_map->Begin() + starting_size;
+ if (capacity - initial_size > 0) {
+ CHECK_MEMORY_CALL(mprotect, (end, capacity - initial_size, PROT_NONE), name_.c_str());
+ }
+ AllocSpace* alloc_space = new AllocSpace(name_, mem_map.release(), mspace, end_, end, growth_limit);
+ live_bitmap_->SetHeapLimit(reinterpret_cast<uintptr_t>(end_));
+ CHECK(live_bitmap_->HeapLimit() == reinterpret_cast<uintptr_t>(end_));
+ mark_bitmap_->SetHeapLimit(reinterpret_cast<uintptr_t>(end_));
+ CHECK(mark_bitmap_->HeapLimit() == reinterpret_cast<uintptr_t>(end_));
+ name_ += "-zygote-transformed";
+ VLOG(heap) << "zygote space creation done";
+ return alloc_space;
+}
+
void AllocSpace::Free(Object* ptr) {
#if DEBUG_SPACES
CHECK(ptr != NULL);
@@ -309,7 +360,7 @@ void AllocSpace::SetFootprintLimit(size_t new_size) {
size_t ImageSpace::bitmap_index_ = 0;
ImageSpace::ImageSpace(const std::string& name, MemMap* mem_map)
- : Space(name, mem_map, mem_map->End()) {
+ : Space(name, mem_map, mem_map->Begin(), mem_map->End(), GCRP_NEVER_COLLECT) {
const size_t bitmap_index = bitmap_index_++;
live_bitmap_.reset(SpaceBitmap::Create(
StringPrintf("imagespace-%s-live-bitmap-%d", name.c_str(), static_cast<int>(bitmap_index)),
diff --git a/src/space.h b/src/space.h
index be0fb61890..1aeb33e5b9 100644
--- a/src/space.h
+++ b/src/space.h
@@ -33,6 +33,13 @@ class ImageSpace;
class Object;
class SpaceBitmap;
+enum GcRetentionPolicy {
+ GCRP_NEVER_COLLECT,
+ GCRP_ALWAYS_COLLECT,
+ GCRP_FULL_COLLECT,
+};
+std::ostream& operator<<(std::ostream& os, const GcRetentionPolicy& policy);
+
// A space contains memory allocated for managed objects.
class Space {
public:
@@ -85,6 +92,14 @@ class Space {
return Capacity();
}
+ GcRetentionPolicy GetGcRetentionPolicy() const {
+ return gc_retention_policy_;
+ }
+
+ void SetGcRetentionPolicy(GcRetentionPolicy gc_retention_policy) {
+ gc_retention_policy_ = gc_retention_policy;
+ }
+
ImageSpace* AsImageSpace() {
DCHECK(IsImageSpace());
return down_cast<ImageSpace*>(this);
@@ -97,6 +112,7 @@ class Space {
virtual bool IsAllocSpace() const = 0;
virtual bool IsImageSpace() const = 0;
+ virtual bool IsZygoteSpace() const = 0;
virtual SpaceBitmap* GetLiveBitmap() const = 0;
virtual SpaceBitmap* GetMarkBitmap() const = 0;
@@ -106,8 +122,12 @@ class Space {
}
protected:
- Space(const std::string& name, MemMap* mem_map, byte* end)
- : name_(name), mem_map_(mem_map), begin_(mem_map->Begin()), end_(end) {}
+ Space(const std::string& name, MemMap* mem_map, byte* begin, byte* end, GcRetentionPolicy gc_retention_policy)
+ : name_(name),
+ mem_map_(mem_map),
+ begin_(begin),
+ end_(end),
+ gc_retention_policy_(gc_retention_policy) {}
std::string name_;
@@ -117,9 +137,12 @@ class Space {
// The beginning of the storage for fast access (always equals mem_map_->GetAddress())
byte* const begin_;
- // Current end of the space
+ // Current end of the space.
byte* end_;
+ // Garbage collection retention policy, used to figure out when we should sweep over this space.
+ GcRetentionPolicy gc_retention_policy_;
+
DISALLOW_COPY_AND_ASSIGN(Space);
};
@@ -180,13 +203,17 @@ class AllocSpace : public Space {
}
virtual bool IsAllocSpace() const {
- return true;
+ return gc_retention_policy_ != GCRP_NEVER_COLLECT;
}
virtual bool IsImageSpace() const {
return false;
}
+ virtual bool IsZygoteSpace() const {
+ return gc_retention_policy_ == GCRP_FULL_COLLECT;
+ }
+
virtual SpaceBitmap* GetLiveBitmap() const {
return live_bitmap_.get();
}
@@ -198,6 +225,9 @@ class AllocSpace : public Space {
// Swap the live and mark bitmaps of this space. This is used by the GC for concurrent sweeping.
void SwapBitmaps();
+ // Turn ourself into a zygote space and return a new alloc space which has our unused memory.
+ AllocSpace* CreateZygoteSpace();
+
private:
friend class Space;
@@ -205,7 +235,7 @@ class AllocSpace : public Space {
UniquePtr<SpaceBitmap> mark_bitmap_;
static size_t bitmap_index_;
- AllocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* end,
+ AllocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin, byte* end,
size_t growth_limit);
bool Init(size_t initial_size, size_t maximum_size, size_t growth_size, byte* requested_base);
@@ -252,11 +282,17 @@ class ImageSpace : public Space {
return true;
}
+ virtual bool IsZygoteSpace() const {
+ return false;
+ }
+
virtual SpaceBitmap* GetLiveBitmap() const {
return live_bitmap_.get();
}
virtual SpaceBitmap* GetMarkBitmap() const {
+ // ImageSpaces have the same bitmap for both live and marked. This helps reduce the number of
+ // special cases to test against.
return live_bitmap_.get();
}
diff --git a/src/space_bitmap.cc b/src/space_bitmap.cc
index 28dee4505a..7da8146a14 100644
--- a/src/space_bitmap.cc
+++ b/src/space_bitmap.cc
@@ -38,6 +38,18 @@ SpaceBitmap* SpaceBitmap::Create(const std::string& name, byte* heap_begin, size
// Clean up any resources associated with the bitmap.
SpaceBitmap::~SpaceBitmap() {}
+void SpaceBitmap::SetHeapLimit(uintptr_t new_end) {
+ DCHECK(IsAligned<kBitsPerWord * kAlignment>(new_end));
+ size_t new_size = OffsetToIndex(new_end - heap_begin_) * kWordSize;
+ if (new_size < bitmap_size_) {
+ bitmap_size_ = new_size;
+ }
+ // Not sure if doing this trim is necessary, since nothing past the end of the heap capacity
+ // should be marked.
+ // TODO: Fix this code is, it broken and causes rare heap corruption!
+ // mem_map_->Trim(reinterpret_cast<byte*>(heap_begin_ + bitmap_size_));
+}
+
// Fill the bitmap with zeroes. Returns the bitmap's memory to the
// system as a side-effect.
void SpaceBitmap::Clear() {
@@ -61,24 +73,6 @@ bool SpaceBitmap::HasAddress(const void* obj) const {
return index < bitmap_size_ / kWordSize;
}
-void SpaceBitmap::VisitRange(uintptr_t visit_begin, uintptr_t visit_end, Callback* visitor, void* arg) const {
- size_t start = OffsetToIndex(visit_begin - heap_begin_);
- size_t end = OffsetToIndex(visit_end - heap_begin_ - 1);
- for (size_t i = start; i <= end; i++) {
- word w = bitmap_begin_[i];
- if (w != 0) {
- word high_bit = 1 << (kBitsPerWord - 1);
- uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
- while (w != 0) {
- const int shift = CLZ(w);
- Object* obj = reinterpret_cast<Object*>(ptr_base + shift * kAlignment);
- (*visitor)(obj, arg);
- w &= ~(high_bit >> shift);
- }
- }
- }
-}
-
// Visits set bits in address order. The callback is not permitted to
// change the bitmap bits or max during the traversal.
void SpaceBitmap::Walk(SpaceBitmap::Callback* callback, void* arg) {
@@ -91,13 +85,12 @@ void SpaceBitmap::Walk(SpaceBitmap::Callback* callback, void* arg) {
for (uintptr_t i = 0; i <= end; ++i) {
word w = bitmap_begin_[i];
if (UNLIKELY(w != 0)) {
- word high_bit = 1 << (kBitsPerWord - 1);
uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
while (w != 0) {
- const int shift = CLZ(w);
+ const size_t shift = CLZ(w);
Object* obj = reinterpret_cast<Object*>(ptr_base + shift * kAlignment);
(*callback)(obj, arg);
- w &= ~(high_bit >> shift);
+ w ^= static_cast<size_t>(kWordHighBitMask) >> shift;
}
}
}
@@ -116,22 +109,34 @@ void SpaceBitmap::ScanWalk(uintptr_t scan_begin, uintptr_t scan_end, ScanCallbac
CHECK(callback != NULL);
CHECK_LE(scan_begin, scan_end);
CHECK_GE(scan_begin, heap_begin_);
- size_t start = OffsetToIndex(scan_begin - heap_begin_);
+
+ // This function doesn't support unaligned boundaries yet.
+ size_t begin_offset = scan_begin - heap_begin_;
+ size_t end_offset = scan_end - heap_begin_;
+ DCHECK((begin_offset / kAlignment) % kBitsPerWord == 0)
+ << "scan begin " << reinterpret_cast<const void*>(scan_begin)
+ << " with offset " << begin_offset
+ << " not aligned to word boundary";
+ DCHECK((end_offset / kAlignment) % kBitsPerWord == 0)
+ << "scan end " << reinterpret_cast<const void*>(scan_end)
+ << " with offset " << end_offset
+ << " not aligned to word boundary";
+
+ size_t start = OffsetToIndex(begin_offset);
if (scan_end < heap_end_) {
// The end of the space we're looking at is before the current maximum bitmap PC, scan to that
// and don't recompute end on each iteration
- size_t end = OffsetToIndex(scan_end - heap_begin_ - 1);
+ size_t end = OffsetToIndex(end_offset - 1);
for (size_t i = start; i <= end; i++) {
word w = bitmap_begin_[i];
if (UNLIKELY(w != 0)) {
- word high_bit = 1 << (kBitsPerWord - 1);
uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
void* finger = reinterpret_cast<void*>(IndexToOffset(i + 1) + heap_begin_);
while (w != 0) {
- const int shift = CLZ(w);
+ const size_t shift = CLZ(w);
Object* obj = reinterpret_cast<Object*>(ptr_base + shift * kAlignment);
(*callback)(obj, finger, arg);
- w &= ~(high_bit >> shift);
+ w ^= static_cast<size_t>(kWordHighBitMask) >> shift;
}
}
}
@@ -140,14 +145,13 @@ void SpaceBitmap::ScanWalk(uintptr_t scan_begin, uintptr_t scan_end, ScanCallbac
for (size_t i = start; i <= end; i++) {
word w = bitmap_begin_[i];
if (UNLIKELY(w != 0)) {
- word high_bit = 1 << (kBitsPerWord - 1);
uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
void* finger = reinterpret_cast<void*>(IndexToOffset(i + 1) + heap_begin_);
while (w != 0) {
- const int shift = CLZ(w);
+ const size_t shift = CLZ(w);
Object* obj = reinterpret_cast<Object*>(ptr_base + shift * kAlignment);
(*callback)(obj, finger, arg);
- w &= ~(high_bit >> shift);
+ w ^= static_cast<size_t>(kWordHighBitMask) >> shift;
}
}
// update 'end' in case callback modified bitmap
@@ -188,11 +192,10 @@ void SpaceBitmap::SweepWalk(const SpaceBitmap& live_bitmap,
for (size_t i = start; i <= end; i++) {
word garbage = live[i] & ~mark[i];
if (UNLIKELY(garbage != 0)) {
- word high_bit = 1 << (kBitsPerWord - 1);
uintptr_t ptr_base = IndexToOffset(i) + live_bitmap.heap_begin_;
while (garbage != 0) {
- int shift = CLZ(garbage);
- garbage &= ~(high_bit >> shift);
+ const size_t shift = CLZ(garbage);
+ garbage ^= static_cast<size_t>(kWordHighBitMask) >> shift;
*pb++ = reinterpret_cast<Object*>(ptr_base + shift * kAlignment);
}
// Make sure that there are always enough slots available for an
@@ -296,13 +299,12 @@ void SpaceBitmap::InOrderWalk(SpaceBitmap::Callback* callback, void* arg) {
for (uintptr_t i = 0; i <= end; ++i) {
word w = bitmap_begin_[i];
if (UNLIKELY(w != 0)) {
- word high_bit = 1 << (kBitsPerWord - 1);
uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
while (w != 0) {
- const int shift = CLZ(w);
+ const size_t shift = CLZ(w);
Object* obj = reinterpret_cast<Object*>(ptr_base + shift * kAlignment);
WalkFieldsInOrder(visited.get(), callback, obj, arg);
- w &= ~(high_bit >> shift);
+ w ^= static_cast<size_t>(kWordHighBitMask) >> shift;
}
}
}
diff --git a/src/space_bitmap.h b/src/space_bitmap.h
index fa79d5daef..adf1996afe 100644
--- a/src/space_bitmap.h
+++ b/src/space_bitmap.h
@@ -60,7 +60,7 @@ class SpaceBitmap {
// Pack the bits in backwards so they come out in address order when using CLZ.
static word OffsetToMask(uintptr_t offset_) {
- return 1 << (sizeof(word) * 8 - 1 - (offset_ / kAlignment) % kBitsPerWord);
+ return static_cast<uintptr_t>(kWordHighBitMask) >> ((offset_ / kAlignment) % kBitsPerWord);
}
inline void Set(const Object* obj) {
@@ -112,21 +112,67 @@ class SpaceBitmap {
template <typename Visitor>
void VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end, const Visitor& visitor) const {
- size_t start = OffsetToIndex(visit_begin - heap_begin_);
- size_t end = OffsetToIndex(visit_end - heap_begin_ - 1);
- for (size_t i = start; i <= end; i++) {
- word w = bitmap_begin_[i];
+ DCHECK_LT(visit_begin, visit_end);
+
+ const size_t bit_index_start = (visit_begin - heap_begin_) / kAlignment;
+ const size_t bit_index_end = (visit_end - heap_begin_ - 1) / kAlignment;
+
+ size_t word_start = bit_index_start / kBitsPerWord;
+ size_t word_end = bit_index_end / kBitsPerWord;
+ DCHECK_LT(word_end * kWordSize, Size());
+
+ // Trim off left_bits of left bits.
+ size_t edge_word = bitmap_begin_[word_start];
+
+ // Handle bits on the left first as a special case
+ size_t left_bits = bit_index_start & (kBitsPerWord - 1);
+ if (left_bits != 0) {
+ edge_word &= (1 << (kBitsPerWord - left_bits)) - 1;
+ }
+
+ // If word_start == word_end then handle this case at the same place we handle the right edge.
+ if (edge_word != 0 && word_start < word_end) {
+ uintptr_t ptr_base = IndexToOffset(word_start) + heap_begin_;
+ do {
+ const size_t shift = CLZ(edge_word);
+ Object* obj = reinterpret_cast<Object*>(ptr_base + shift * kAlignment);
+ visitor(obj);
+ edge_word ^= static_cast<size_t>(kWordHighBitMask) >> shift;
+ } while (edge_word != 0);
+ }
+ word_start++;
+
+ for (size_t i = word_start; i < word_end; i++) {
+ size_t w = bitmap_begin_[i];
if (w != 0) {
- word high_bit = 1 << (kBitsPerWord - 1);
uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
do {
- const int shift = CLZ(w);
+ const size_t shift = CLZ(w);
Object* obj = reinterpret_cast<Object*>(ptr_base + shift * kAlignment);
visitor(obj);
- w &= ~(high_bit >> shift);
+ w ^= static_cast<size_t>(kWordHighBitMask) >> shift;
} while (w != 0);
}
}
+
+ // Handle the right edge, and also the left edge if both edges are on the same word.
+ size_t right_bits = bit_index_end & (kBitsPerWord - 1);
+
+ // If word_start == word_end then we need to use the word which we removed the left bits.
+ if (word_start <= word_end) {
+ edge_word = bitmap_begin_[word_end];
+ }
+
+ // Bits that we trim off the right.
+ const size_t trim_bits = kBitsPerWord - 1 - right_bits;
+ edge_word &= ~((1 << trim_bits) - 1);
+ uintptr_t ptr_base = IndexToOffset(word_end) + heap_begin_;
+ while (edge_word != 0) {
+ const size_t shift = CLZ(edge_word);
+ Object* obj = reinterpret_cast<Object*>(ptr_base + shift * kAlignment);
+ visitor(obj);
+ edge_word ^= static_cast<size_t>(kWordHighBitMask) >> shift;
+ }
}
void Walk(Callback* callback, void* arg);
@@ -140,6 +186,33 @@ class SpaceBitmap {
uintptr_t base, uintptr_t max,
SweepCallback* thunk, void* arg);
+ // Starting address of our internal storage.
+ word* Begin() {
+ return bitmap_begin_;
+ }
+
+ // Size of our internal storage
+ size_t Size() const {
+ return bitmap_size_;
+ }
+
+ // Size in bytes of the memory that the bitmaps spans.
+ size_t HeapSize() const {
+ return IndexToOffset(Size() / kWordSize);
+ }
+
+ uintptr_t HeapBegin() const {
+ return heap_begin_;
+ }
+
+ // The maximum address which the bitmap can span. (HeapBegin() <= object < HeapLimit()).
+ uintptr_t HeapLimit() const {
+ return HeapBegin() + static_cast<uintptr_t>(HeapSize());
+ }
+
+ // Set the max address which can covered by the bitmap.
+ void SetHeapLimit(uintptr_t new_end);
+
private:
// TODO: heap_end_ is initialized so that the heap bitmap is empty, this doesn't require the -1,
// however, we document that this is expected on heap_end_
@@ -172,7 +245,7 @@ class SpaceBitmap {
word* const bitmap_begin_;
// Size of this bitmap.
- const size_t bitmap_size_;
+ size_t bitmap_size_;
// The base address of the heap, which corresponds to the word containing the first bit in the
// bitmap.
diff --git a/src/space_bitmap_test.cc b/src/space_bitmap_test.cc
new file mode 100644
index 0000000000..a2f1afc304
--- /dev/null
+++ b/src/space_bitmap_test.cc
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "space_bitmap.h"
+
+#include "common_test.h"
+#include "dlmalloc.h"
+#include "globals.h"
+#include "UniquePtr.h"
+
+#include <stdint.h>
+
+namespace art {
+
+class SpaceBitmapTest : public CommonTest {
+ public:
+};
+
+TEST_F(SpaceBitmapTest, Init) {
+ byte* heap_begin = reinterpret_cast<byte*>(0x10000000);
+ size_t heap_capacity = 16 * MB;
+ UniquePtr<SpaceBitmap> space_bitmap(SpaceBitmap::Create("test-bitmap",
+ heap_begin, heap_capacity));
+ EXPECT_TRUE(space_bitmap.get() != NULL);
+}
+
+class BitmapVerify {
+ public:
+ BitmapVerify(SpaceBitmap* bitmap, const Object* begin, const Object* end)
+ : bitmap_(bitmap),
+ begin_(begin),
+ end_(end) {}
+
+ void operator ()(const Object* obj) {
+ EXPECT_TRUE(obj >= begin_);
+ EXPECT_TRUE(obj <= end_);
+ EXPECT_TRUE(bitmap_->Test(obj) == ((reinterpret_cast<uintptr_t>(obj) & 0xF) != 0));
+ }
+
+ SpaceBitmap* bitmap_;
+ const Object* begin_;
+ const Object* end_;
+};
+
+TEST_F(SpaceBitmapTest, ScanRange) {
+ byte* heap_begin = reinterpret_cast<byte*>(0x10000000);
+ size_t heap_capacity = 16 * MB;
+
+ UniquePtr<SpaceBitmap> space_bitmap(SpaceBitmap::Create("test-bitmap",
+ heap_begin, heap_capacity));
+ EXPECT_TRUE(space_bitmap.get() != NULL);
+
+ // Set all the odd bits in the first BitsPerWord * 3 to one.
+ for (size_t j = 0;j < kBitsPerWord * 3; ++j) {
+ const Object* obj = reinterpret_cast<Object*>(heap_begin + j * SpaceBitmap::kAlignment);
+ if (reinterpret_cast<uintptr_t>(obj) & 0xF) {
+ space_bitmap->Set(obj);
+ }
+ }
+ // Try every possible starting bit in the first word. Then for each starting bit, try each
+ // possible length up to a maximum of kBitsPerWord * 2 - 1 bits.
+ // This handles all the cases, having runs which start and end on the same word, and different
+ // words.
+ for (size_t i = 0; i < static_cast<size_t>(kBitsPerWord); ++i) {
+ Object* start = reinterpret_cast<Object*>(heap_begin + i * SpaceBitmap::kAlignment);
+ for (size_t j = 0; j < static_cast<size_t>(kBitsPerWord * 2); ++j) {
+ Object* end = reinterpret_cast<Object*>(heap_begin + (i + j) * SpaceBitmap::kAlignment);
+ BitmapVerify(space_bitmap.get(), start, end);
+ }
+ }
+}
+
+} // namespace art
diff --git a/src/space_test.cc b/src/space_test.cc
index 7ac049357f..c1c1dca895 100644
--- a/src/space_test.cc
+++ b/src/space_test.cc
@@ -70,6 +70,77 @@ TEST_F(SpaceTest, Init) {
}
}
+// TODO: This test is not very good, we should improve it.
+// The test should do more allocations before the creation of the ZygoteSpace, and then do
+// allocations after the ZygoteSpace is created. The test should also do some GCs to ensure that
+// the GC works with the ZygoteSpace.
+TEST_F(SpaceTest, ZygoteSpace) {
+ AllocSpace* space(Space::CreateAllocSpace("test", 4 * MB, 16 * MB, 16 * MB, NULL));
+ ASSERT_TRUE(space != NULL);
+
+ // Make space findable to the heap, will also delete space when runtime is cleaned up
+ Runtime::Current()->GetHeap()->AddSpace(space);
+
+ // Succeeds, fits without adjusting the footprint limit.
+ Object* ptr1 = space->AllocWithoutGrowth(1 * MB);
+ EXPECT_TRUE(ptr1 != NULL);
+
+ // Fails, requires a higher footprint limit.
+ Object* ptr2 = space->AllocWithoutGrowth(8 * MB);
+ EXPECT_TRUE(ptr2 == NULL);
+
+ // Succeeds, adjusts the footprint.
+ Object* ptr3 = space->AllocWithGrowth(8 * MB);
+ EXPECT_TRUE(ptr3 != NULL);
+
+ // Fails, requires a higher footprint limit.
+ Object* ptr4 = space->AllocWithoutGrowth(8 * MB);
+ EXPECT_TRUE(ptr4 == NULL);
+
+ // Also fails, requires a higher allowed footprint.
+ Object* ptr5 = space->AllocWithGrowth(8 * MB);
+ EXPECT_TRUE(ptr5 == NULL);
+
+ // Release some memory.
+ size_t free3 = space->AllocationSize(ptr3);
+ space->Free(ptr3);
+ EXPECT_LE(8U * MB, free3);
+
+ // Succeeds, now that memory has been freed.
+ void* ptr6 = space->AllocWithGrowth(9 * MB);
+ EXPECT_TRUE(ptr6 != NULL);
+
+ // Final clean up.
+ size_t free1 = space->AllocationSize(ptr1);
+ space->Free(ptr1);
+ EXPECT_LE(1U * MB, free1);
+
+ // Make sure that the zygote space isn't directly at the start of the space.
+ space->AllocWithoutGrowth(1U * MB);
+ space = space->CreateZygoteSpace();
+
+ // Make space findable to the heap, will also delete space when runtime is cleaned up
+ Runtime::Current()->GetHeap()->AddSpace(space);
+
+ // Succeeds, fits without adjusting the footprint limit.
+ ptr1 = space->AllocWithoutGrowth(1 * MB);
+ EXPECT_TRUE(ptr1 != NULL);
+
+ // Fails, requires a higher footprint limit.
+ ptr2 = space->AllocWithoutGrowth(8 * MB);
+ EXPECT_TRUE(ptr2 == NULL);
+
+ // Succeeds, adjusts the footprint.
+ ptr3 = space->AllocWithGrowth(2 * MB);
+ EXPECT_TRUE(ptr3 != NULL);
+ space->Free(ptr3);
+
+ // Final clean up.
+ free1 = space->AllocationSize(ptr1);
+ space->Free(ptr1);
+ EXPECT_LE(1U * MB, free1);
+}
+
TEST_F(SpaceTest, AllocAndFree) {
AllocSpace* space(Space::CreateAllocSpace("test", 4 * MB, 16 * MB, 16 * MB, NULL));
ASSERT_TRUE(space != NULL);
diff --git a/src/verifier/method_verifier.cc b/src/verifier/method_verifier.cc
index cc01224ebd..5e98b1e8a4 100644
--- a/src/verifier/method_verifier.cc
+++ b/src/verifier/method_verifier.cc
@@ -2627,7 +2627,7 @@ Method* MethodVerifier::VerifyInvocationArgs(const DecodedInstruction& dec_insn,
if (is_super) {
DCHECK(method_type == METHOD_VIRTUAL);
const RegType& super = GetDeclaringClass().GetSuperClass(&reg_types_);
- if (super.IsConflict()) { // unknown super class
+ if (super.IsUnresolvedTypes()) {
Fail(VERIFY_ERROR_NO_METHOD) << "unknown super class in invoke-super from "
<< PrettyMethod(method_idx_, *dex_file_)
<< " to super " << PrettyMethod(res_method);
diff --git a/src/verifier/reg_type.cc b/src/verifier/reg_type.cc
index dd54b5fc38..8d1df22c65 100644
--- a/src/verifier/reg_type.cc
+++ b/src/verifier/reg_type.cc
@@ -43,13 +43,41 @@ static const char* type_strings[] = {
"Uninitialized This Reference",
"Unresolved And Uninitialized Reference",
"Unresolved And Uninitialized This Reference",
+ "Unresolved Merged References",
+ "Unresolved Super Class",
"Reference",
};
-std::string RegType::Dump() const {
+std::string RegType::Dump(const RegTypeCache* reg_types) const {
DCHECK(type_ >= kRegTypeUndefined && type_ <= kRegTypeReference);
+ DCHECK(arraysize(type_strings) == (kRegTypeReference + 1));
std::string result;
- if (IsConstant()) {
+ if (IsUnresolvedMergedReference()) {
+ if (reg_types == NULL) {
+ std::pair<uint16_t, uint16_t> refs = GetTopMergedTypes();
+ result += StringPrintf("UnresolvedMergedReferences(%d, %d)", refs.first, refs.second);
+ } else {
+ std::set<uint16_t> types = GetMergedTypes(reg_types);
+ result += "UnresolvedMergedReferences(";
+ typedef std::set<uint16_t>::const_iterator It; // TODO: C++0x auto
+ It it = types.begin();
+ result += reg_types->GetFromId(*it).Dump(reg_types);
+ for(++it; it != types.end(); ++it) {
+ result += ", ";
+ result += reg_types->GetFromId(*it).Dump(reg_types);
+ }
+ result += ")";
+ }
+ } else if (IsUnresolvedSuperClass()) {
+ uint16_t super_type_id = GetUnresolvedSuperClassChildId();
+ if (reg_types == NULL) {
+ result += StringPrintf("UnresolvedSuperClass(%d)", super_type_id);
+ } else {
+ result += "UnresolvedSuperClass(";
+ result += reg_types->GetFromId(super_type_id).Dump(reg_types);
+ result += ")";
+ }
+ } else if (IsConstant()) {
uint32_t val = ConstantValue();
if (val == 0) {
result = "Zero";
@@ -85,6 +113,31 @@ const RegType& RegType::HighHalf(RegTypeCache* cache) const {
}
}
+std::set<uint16_t> RegType::GetMergedTypes(const RegTypeCache* cache) const {
+ std::pair<uint16_t, uint16_t> refs = GetTopMergedTypes();
+ const RegType& left = cache->GetFromId(refs.first);
+ const RegType& right = cache->GetFromId(refs.second);
+ std::set<uint16_t> types;
+ if (left.IsUnresolvedMergedReference()) {
+ types = left.GetMergedTypes(cache);
+ } else {
+ types.insert(refs.first);
+ }
+ if (right.IsUnresolvedMergedReference()) {
+ std::set<uint16_t> right_types = right.GetMergedTypes(cache);
+ types.insert(right_types.begin(), right_types.end());
+ } else {
+ types.insert(refs.second);
+ }
+#ifndef NDEBUG
+ typedef std::set<uint16_t>::const_iterator It; // TODO: C++0x auto
+ for(It it = types.begin(); it != types.end(); ++it) {
+ CHECK(!cache->GetFromId(*it).IsUnresolvedMergedReference());
+ }
+#endif
+ return types;
+}
+
const RegType& RegType::GetSuperClass(RegTypeCache* cache) const {
if (!IsUnresolvedTypes()) {
Class* super_klass = GetClass()->GetSuperClass();
@@ -94,8 +147,13 @@ const RegType& RegType::GetSuperClass(RegTypeCache* cache) const {
return cache->Zero();
}
} else {
- // TODO: handle unresolved type cases better?
- return cache->Conflict();
+ if (!IsUnresolvedMergedReference() && !IsUnresolvedSuperClass() &&
+ GetDescriptor()->CharAt(0) == '[') {
+ // Super class of all arrays is Object.
+ return cache->JavaLangObject();
+ } else {
+ return cache->FromUnresolvedSuperClass(*this);
+ }
}
}
@@ -157,12 +215,8 @@ bool RegType::IsAssignableFrom(const RegType& src) const {
GetClass()->IsAssignableFrom(src.GetClass())) {
// We're assignable from the Class point-of-view
return true;
- } else if (IsUnresolvedTypes() && src.IsUnresolvedTypes() &&
- GetDescriptor() == src.GetDescriptor()) {
- // Two unresolved types (maybe one is uninitialized), we're clearly assignable if the
- // descriptor is the same.
- return true;
} else {
+ // TODO: unresolved types are only assignable for null, Object and equality currently.
return false;
}
}
@@ -248,10 +302,16 @@ const RegType& RegType::Merge(const RegType& incoming_type, RegTypeCache* reg_ty
return SelectNonConstant(*this, incoming_type); // 0 MERGE ref => ref
} else if (IsJavaLangObject() || incoming_type.IsJavaLangObject()) {
return reg_types->JavaLangObject(); // Object MERGE ref => Object
- } else if (IsUninitializedTypes() || incoming_type.IsUninitializedTypes() ||
- IsUnresolvedTypes() || incoming_type.IsUnresolvedTypes()) {
- // Can only merge an unresolved or uninitialized type with itself, 0 or Object, we've already
- // checked these so => Conflict
+ } else if (IsUnresolvedTypes() || incoming_type.IsUnresolvedTypes()) {
+ // We know how to merge an unresolved type with itself, 0 or Object. In this case we
+ // have two sub-classes and don't know how to merge. Create a new string-based unresolved
+ // type that reflects our lack of knowledge and that allows the rest of the unresolved
+ // mechanics to continue.
+ return reg_types->FromUnresolvedMerge(*this, incoming_type);
+ } else if (IsUninitializedTypes() || incoming_type.IsUninitializedTypes()) {
+ // Something that is uninitialized hasn't had its constructor called. Mark any merge
+ // of this type with something that is initialized as conflicting. The cases of a merge
+ // with itself, 0 or Object are handled above.
return reg_types->Conflict();
} else { // Two reference types, compute Join
Class* c1 = GetClass();
diff --git a/src/verifier/reg_type.h b/src/verifier/reg_type.h
index 41a925542b..7e8fca1051 100644
--- a/src/verifier/reg_type.h
+++ b/src/verifier/reg_type.h
@@ -62,6 +62,8 @@ class RegType {
kRegTypeUnresolvedAndUninitializedReference, // Freshly allocated unresolved reference type.
// Freshly allocated unresolved reference passed as "this".
kRegTypeUnresolvedAndUninitializedThisReference,
+ kRegTypeUnresolvedMergedReference, // Tree of merged references (at least 1 is unresolved).
+ kRegTypeUnresolvedSuperClass, // Super class of an unresolved type.
kRegTypeReference, // Reference type.
};
@@ -88,6 +90,8 @@ class RegType {
bool IsUnresolvedAndUninitializedThisReference() const {
return type_ == kRegTypeUnresolvedAndUninitializedThisReference;
}
+ bool IsUnresolvedMergedReference() const { return type_ == kRegTypeUnresolvedMergedReference; }
+ bool IsUnresolvedSuperClass() const { return type_ == kRegTypeUnresolvedSuperClass; }
bool IsReference() const { return type_ == kRegTypeReference; }
bool IsUninitializedTypes() const {
return IsUninitializedReference() || IsUninitializedThisReference() ||
@@ -95,7 +99,8 @@ class RegType {
}
bool IsUnresolvedTypes() const {
return IsUnresolvedReference() || IsUnresolvedAndUninitializedReference() ||
- IsUnresolvedAndUninitializedThisReference();
+ IsUnresolvedAndUninitializedThisReference() || IsUnresolvedMergedReference() ||
+ IsUnresolvedSuperClass();
}
bool IsLowHalf() const { return type_ == kRegTypeLongLo ||
type_ == kRegTypeDoubleLo ||
@@ -122,7 +127,7 @@ class RegType {
// approximate to the actual constant value by virtue of merging.
int32_t ConstantValue() const {
DCHECK(IsConstant());
- return allocation_pc_or_constant_;
+ return allocation_pc_or_constant_or_merged_types_;
}
bool IsZero() const { return IsConstant() && ConstantValue() == 0; }
@@ -146,14 +151,18 @@ class RegType {
bool IsReferenceTypes() const {
return IsNonZeroReferenceTypes() || IsZero();
}
+
bool IsNonZeroReferenceTypes() const {
return IsReference() || IsUnresolvedReference() ||
IsUninitializedReference() || IsUninitializedThisReference() ||
- IsUnresolvedAndUninitializedReference() || IsUnresolvedAndUninitializedThisReference();
+ IsUnresolvedAndUninitializedReference() || IsUnresolvedAndUninitializedThisReference() ||
+ IsUnresolvedMergedReference() || IsUnresolvedSuperClass();
}
+
bool IsCategory1Types() const {
return (type_ >= kRegType1nrSTART && type_ <= kRegType1nrEND) || IsConstant();
}
+
bool IsCategory2Types() const {
return IsLowHalf(); // Don't expect explicit testing of high halves
}
@@ -185,7 +194,7 @@ class RegType {
uint32_t GetAllocationPc() const {
DCHECK(IsUninitializedTypes());
- return allocation_pc_or_constant_;
+ return allocation_pc_or_constant_or_merged_types_;
}
Class* GetClass() const {
@@ -200,7 +209,7 @@ class RegType {
}
bool IsArrayTypes() const {
- if (IsUnresolvedTypes()) {
+ if (IsUnresolvedTypes() && !IsUnresolvedMergedReference() && !IsUnresolvedSuperClass()) {
return GetDescriptor()->CharAt(0) == '[';
} else if (IsReference()) {
return GetClass()->IsArrayClass();
@@ -210,7 +219,7 @@ class RegType {
}
bool IsObjectArrayTypes() const {
- if (IsUnresolvedTypes()) {
+ if (IsUnresolvedTypes() && !IsUnresolvedMergedReference() && !IsUnresolvedSuperClass()) {
// Primitive arrays will always resolve
DCHECK(GetDescriptor()->CharAt(1) == 'L' || GetDescriptor()->CharAt(1) == '[');
return GetDescriptor()->CharAt(0) == '[';
@@ -258,7 +267,7 @@ class RegType {
}
String* GetDescriptor() const {
- DCHECK(IsUnresolvedTypes());
+ DCHECK(IsUnresolvedTypes() && !IsUnresolvedMergedReference() && !IsUnresolvedSuperClass());
DCHECK(klass_or_descriptor_ != NULL);
DCHECK(klass_or_descriptor_->GetClass()->IsStringClass());
return down_cast<String*>(klass_or_descriptor_);
@@ -268,9 +277,25 @@ class RegType {
return cache_id_;
}
+ // The top of a tree of merged types.
+ std::pair<uint16_t, uint16_t> GetTopMergedTypes() const {
+ DCHECK(IsUnresolvedMergedReference());
+ uint16_t type1 = static_cast<uint16_t>(allocation_pc_or_constant_or_merged_types_ & 0xFFFF);
+ uint16_t type2 = static_cast<uint16_t>(allocation_pc_or_constant_or_merged_types_ >> 16);
+ return std::pair<uint16_t, uint16_t>(type1, type2);
+ }
+
+ // The complete set of merged types.
+ std::set<uint16_t> GetMergedTypes(const RegTypeCache* cache) const;
+
+ uint16_t GetUnresolvedSuperClassChildId() const {
+ DCHECK(IsUnresolvedSuperClass());
+ return static_cast<uint16_t>(allocation_pc_or_constant_or_merged_types_ & 0xFFFF);
+ }
+
const RegType& GetSuperClass(RegTypeCache* cache) const;
- std::string Dump() const;
+ std::string Dump(const RegTypeCache* reg_types = NULL) const;
// Can this type access other?
bool CanAccess(const RegType& other) const;
@@ -306,12 +331,15 @@ class RegType {
private:
friend class RegTypeCache;
- RegType(Type type, Object* klass_or_descriptor, uint32_t allocation_pc_or_constant, uint16_t cache_id)
+ RegType(Type type, Object* klass_or_descriptor,
+ uint32_t allocation_pc_or_constant_or_merged_types, uint16_t cache_id)
: type_(type), klass_or_descriptor_(klass_or_descriptor),
- allocation_pc_or_constant_(allocation_pc_or_constant), cache_id_(cache_id) {
- DCHECK(IsConstant() || IsUninitializedTypes() || allocation_pc_or_constant == 0);
+ allocation_pc_or_constant_or_merged_types_(allocation_pc_or_constant_or_merged_types),
+ cache_id_(cache_id) {
+ DCHECK(IsConstant() || IsUninitializedTypes() || IsUnresolvedMergedReference() ||
+ IsUnresolvedSuperClass() || allocation_pc_or_constant_or_merged_types == 0);
if (!IsConstant() && !IsLongConstant() && !IsLongConstantHigh() && !IsUndefined() &&
- !IsConflict()) {
+ !IsConflict() && !IsUnresolvedMergedReference() && !IsUnresolvedSuperClass()) {
DCHECK(klass_or_descriptor != NULL);
DCHECK(IsUnresolvedTypes() || klass_or_descriptor_->IsClass());
DCHECK(!IsUnresolvedTypes() || klass_or_descriptor_->GetClass()->IsStringClass());
@@ -327,7 +355,7 @@ class RegType {
// - if IsConstant() holds a 32bit constant value
// - is IsReference() holds the allocation_pc or kInitArgAddr for an initialized reference or
// kUninitThisArgAddr for an uninitialized this ptr
- const uint32_t allocation_pc_or_constant_;
+ const uint32_t allocation_pc_or_constant_or_merged_types_;
// A RegType cache densely encodes types, this is the location in the cache for this type
const uint16_t cache_id_;
diff --git a/src/verifier/reg_type_cache.cc b/src/verifier/reg_type_cache.cc
index bb05e7e103..37086c9acc 100644
--- a/src/verifier/reg_type_cache.cc
+++ b/src/verifier/reg_type_cache.cc
@@ -143,6 +143,60 @@ const RegType& RegTypeCache::FromClass(Class* klass) {
}
}
+const RegType& RegTypeCache::FromUnresolvedMerge(const RegType& left, const RegType& right) {
+ std::set<uint16_t> types;
+ if (left.IsUnresolvedMergedReference()) {
+ types = left.GetMergedTypes(this);
+ } else {
+ types.insert(left.GetId());
+ }
+ if (right.IsUnresolvedMergedReference()) {
+ std::set<uint16_t> right_types = right.GetMergedTypes(this);
+ types.insert(right_types.begin(), right_types.end());
+ } else {
+ types.insert(right.GetId());
+ }
+ // Check if entry already exists.
+ for (size_t i = RegType::kRegTypeLastFixedLocation + 1; i < entries_.size(); i++) {
+ RegType* cur_entry = entries_[i];
+ if (cur_entry->IsUnresolvedMergedReference()) {
+ std::set<uint16_t> cur_entry_types = cur_entry->GetMergedTypes(this);
+ if (cur_entry_types == types) {
+ return *cur_entry;
+ }
+ }
+ }
+ // Create entry.
+ uint32_t merged_ids = static_cast<uint32_t>(left.GetId()) << 16 |
+ static_cast<uint32_t>(right.GetId());
+ RegType* entry = new RegType(RegType::kRegTypeUnresolvedMergedReference, NULL, merged_ids,
+ entries_.size());
+ entries_.push_back(entry);
+#ifndef DEBUG
+ std::set<uint16_t> check_types = entry->GetMergedTypes(this);
+ CHECK(check_types == types);
+#endif
+ return *entry;
+}
+
+const RegType& RegTypeCache::FromUnresolvedSuperClass(const RegType& child) {
+ // Check if entry already exists.
+ for (size_t i = RegType::kRegTypeLastFixedLocation + 1; i < entries_.size(); i++) {
+ RegType* cur_entry = entries_[i];
+ if (cur_entry->IsUnresolvedSuperClass()) {
+ uint16_t unresolved_super_child_id = cur_entry->GetUnresolvedSuperClassChildId();
+ if (unresolved_super_child_id == child.GetId()) {
+ return *cur_entry;
+ }
+ }
+ }
+ // Create entry.
+ RegType* entry = new RegType(RegType::kRegTypeUnresolvedSuperClass, NULL, child.GetId(),
+ entries_.size());
+ entries_.push_back(entry);
+ return *entry;
+}
+
const RegType& RegTypeCache::Uninitialized(const RegType& type, uint32_t allocation_pc) {
RegType* entry;
if (type.IsUnresolvedTypes()) {
diff --git a/src/verifier/reg_type_cache.h b/src/verifier/reg_type_cache.h
index 765809c733..4ba667b368 100644
--- a/src/verifier/reg_type_cache.h
+++ b/src/verifier/reg_type_cache.h
@@ -33,7 +33,7 @@ class RegTypeCache {
STLDeleteElements(&entries_);
}
- const RegType& GetFromId(uint16_t id) {
+ const RegType& GetFromId(uint16_t id) const {
DCHECK_LT(id, entries_.size());
RegType* result = entries_[id];
DCHECK(result != NULL);
@@ -45,6 +45,8 @@ class RegTypeCache {
const RegType& FromCat1Const(int32_t value);
const RegType& FromDescriptor(ClassLoader* loader, const char* descriptor);
const RegType& FromType(RegType::Type);
+ const RegType& FromUnresolvedMerge(const RegType& left, const RegType& right);
+ const RegType& FromUnresolvedSuperClass(const RegType& child);
const RegType& Boolean() { return FromType(RegType::kRegTypeBoolean); }
const RegType& Byte() { return FromType(RegType::kRegTypeByte); }
diff --git a/src/verifier/register_line.cc b/src/verifier/register_line.cc
index d5477a3c91..d6aca98eb1 100644
--- a/src/verifier/register_line.cc
+++ b/src/verifier/register_line.cc
@@ -130,6 +130,20 @@ void RegisterLine::MarkRefsAsInitialized(const RegType& uninit_type) {
DCHECK_GT(changed, 0u);
}
+std::string RegisterLine::Dump() const {
+ std::string result;
+ for (size_t i = 0; i < num_regs_; i++) {
+ result += StringPrintf("%zd:[", i);
+ result += GetRegisterType(i).Dump(verifier_->GetRegTypeCache());
+ result += "],";
+ }
+ typedef std::deque<uint32_t>::const_iterator It; // TODO: C++0x auto
+ for (It it = monitors_.begin(), end = monitors_.end(); it != end ; ++it) {
+ result += StringPrintf("{%d},", *it);
+ }
+ return result;
+}
+
void RegisterLine::MarkUninitRefsAsInvalid(const RegType& uninit_type) {
for (size_t i = 0; i < num_regs_; i++) {
if (GetRegisterType(i).Equals(uninit_type)) {
diff --git a/src/verifier/register_line.h b/src/verifier/register_line.h
index e4066783bd..9664a5b3e4 100644
--- a/src/verifier/register_line.h
+++ b/src/verifier/register_line.h
@@ -98,19 +98,7 @@ class RegisterLine {
reg_to_lock_depths_ = src->reg_to_lock_depths_;
}
- std::string Dump() const {
- std::string result;
- for (size_t i = 0; i < num_regs_; i++) {
- result += StringPrintf("%zd:[", i);
- result += GetRegisterType(i).Dump();
- result += "],";
- }
- typedef std::deque<uint32_t>::const_iterator It; // TODO: C++0x auto
- for (It it = monitors_.begin(), end = monitors_.end(); it != end ; ++it) {
- result += StringPrintf("{%d},", *it);
- }
- return result;
- }
+ std::string Dump() const;
void FillWithGarbage() {
memset(line_.get(), 0xf1, num_regs_ * sizeof(uint16_t));