summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--MODULE_LICENSE_APACHE20
-rw-r--r--NOTICE190
-rw-r--r--build/Android.gtest.mk1
-rw-r--r--compiler/dex/compiler_ir.h2
-rw-r--r--compiler/dex/frontend.cc7
-rw-r--r--compiler/dex/frontend.h1
-rw-r--r--compiler/dex/local_value_numbering.cc16
-rw-r--r--compiler/dex/mir_dataflow.cc9
-rw-r--r--compiler/dex/mir_graph.cc25
-rw-r--r--compiler/dex/mir_graph.h4
-rw-r--r--compiler/dex/quick/mir_to_lir-inl.h4
-rw-r--r--compiler/driver/compiler_driver.cc26
-rw-r--r--compiler/driver/compiler_driver.h26
-rw-r--r--compiler/driver/compiler_driver_test.cc3
-rw-r--r--compiler/image_test.cc5
-rw-r--r--compiler/leb128_encoder_test.cc116
-rw-r--r--compiler/oat_test.cc7
-rw-r--r--compiler/oat_writer.cc34
-rw-r--r--compiler/oat_writer.h3
-rw-r--r--dex2oat/dex2oat.cc13
-rw-r--r--disassembler/disassembler_arm.cc4
-rw-r--r--runtime/Android.mk1
-rw-r--r--runtime/base/histogram-inl.h18
-rw-r--r--runtime/base/histogram.h2
-rw-r--r--runtime/base/histogram_test.cc22
-rw-r--r--runtime/base/timing_logger.cc60
-rw-r--r--runtime/base/timing_logger.h18
-rw-r--r--runtime/base/timing_logger_test.cc30
-rw-r--r--runtime/common_test.h3
-rw-r--r--runtime/debugger.cc59
-rw-r--r--runtime/dex_file.cc6
-rw-r--r--runtime/gc/collector/garbage_collector.h4
-rw-r--r--runtime/gc/collector/mark_sweep.cc213
-rw-r--r--runtime/gc/collector/mark_sweep.h16
-rw-r--r--runtime/gc/collector/semi_space.cc231
-rw-r--r--runtime/gc/collector/semi_space.h5
-rw-r--r--runtime/gc/heap.cc208
-rw-r--r--runtime/gc/heap.h90
-rw-r--r--runtime/gc/reference_queue.cc163
-rw-r--r--runtime/gc/reference_queue.h96
-rw-r--r--runtime/mapping_table.h34
-rw-r--r--runtime/thread_pool.cc2
-rw-r--r--runtime/verifier/reg_type_cache.cc4
43 files changed, 1035 insertions, 746 deletions
diff --git a/MODULE_LICENSE_APACHE2 b/MODULE_LICENSE_APACHE2
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/MODULE_LICENSE_APACHE2
diff --git a/NOTICE b/NOTICE
new file mode 100644
index 0000000000..faed58a153
--- /dev/null
+++ b/NOTICE
@@ -0,0 +1,190 @@
+
+ Copyright (c) 2005-2013, The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index b07753c4fe..bed48ba999 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -22,6 +22,7 @@ TEST_COMMON_SRC_FILES := \
compiler/elf_writer_test.cc \
compiler/image_test.cc \
compiler/jni/jni_compiler_test.cc \
+ compiler/leb128_encoder_test.cc \
compiler/oat_test.cc \
compiler/output_stream_test.cc \
compiler/utils/dedupe_set_test.cc \
diff --git a/compiler/dex/compiler_ir.h b/compiler/dex/compiler_ir.h
index 546ce4aee7..3798b459d1 100644
--- a/compiler/dex/compiler_ir.h
+++ b/compiler/dex/compiler_ir.h
@@ -94,7 +94,7 @@ struct CompilationUnit {
UniquePtr<MIRGraph> mir_graph; // MIR container.
UniquePtr<Backend> cg; // Target-specific codegen.
- base::TimingLogger timings;
+ TimingLogger timings;
};
} // namespace art
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index b8cd67e3e7..3dc1914c30 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -84,6 +84,7 @@ static uint32_t kCompilerOptimizerDisableFlags = 0 | // Disable specific optimi
// (1 << kBBOpt) |
// (1 << kMatch) |
// (1 << kPromoteCompilerTemps) |
+ // (1 << kSuppressExceptionEdges) |
0;
static uint32_t kCompilerDebugFlags = 0 | // Enable debug/testing modes
@@ -157,7 +158,7 @@ void CompilationUnit::EndTiming() {
if (enable_debug & (1 << kDebugTimings)) {
timings.EndSplit();
LOG(INFO) << "TIMINGS " << PrettyMethod(method_idx, *dex_file);
- LOG(INFO) << Dumpable<base::TimingLogger>(timings);
+ LOG(INFO) << Dumpable<TimingLogger>(timings);
}
}
@@ -212,7 +213,9 @@ static CompiledMethod* CompileMethod(CompilerDriver& compiler,
if (compiler_backend == kPortable) {
// Fused long branches not currently useful in bitcode.
- cu.disable_opt |= (1 << kBranchFusing);
+ cu.disable_opt |=
+ (1 << kBranchFusing) |
+ (1 << kSuppressExceptionEdges);
}
if (cu.instruction_set == kMips) {
diff --git a/compiler/dex/frontend.h b/compiler/dex/frontend.h
index 43f68554b5..b9b4178890 100644
--- a/compiler/dex/frontend.h
+++ b/compiler/dex/frontend.h
@@ -56,6 +56,7 @@ enum opt_control_vector {
kMatch,
kPromoteCompilerTemps,
kBranchFusing,
+ kSuppressExceptionEdges,
};
// Force code generation paths for testing.
diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc
index 35d29235f2..75883b7bd6 100644
--- a/compiler/dex/local_value_numbering.cc
+++ b/compiler/dex/local_value_numbering.cc
@@ -380,7 +380,9 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
}
mir->optimization_flags |= MIR_IGNORE_RANGE_CHECK;
}
- mir->meta.throw_insn->optimization_flags |= mir->optimization_flags;
+ if (mir->meta.throw_insn != NULL) {
+ mir->meta.throw_insn->optimization_flags |= mir->optimization_flags;
+ }
// Use side effect to note range check completed.
(void)LookupValue(ARRAY_REF, array, index, NO_VALUE);
// Establish value number for loaded register. Note use of memory version.
@@ -419,7 +421,9 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
}
mir->optimization_flags |= MIR_IGNORE_RANGE_CHECK;
}
- mir->meta.throw_insn->optimization_flags |= mir->optimization_flags;
+ if (mir->meta.throw_insn != NULL) {
+ mir->meta.throw_insn->optimization_flags |= mir->optimization_flags;
+ }
// Use side effect to note range check completed.
(void)LookupValue(ARRAY_REF, array, index, NO_VALUE);
// Rev the memory version
@@ -443,7 +447,9 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
} else {
null_checked_.insert(base);
}
- mir->meta.throw_insn->optimization_flags |= mir->optimization_flags;
+ if (mir->meta.throw_insn != NULL) {
+ mir->meta.throw_insn->optimization_flags |= mir->optimization_flags;
+ }
uint16_t field_ref = mir->dalvikInsn.vC;
uint16_t memory_version = GetMemoryVersion(base, field_ref);
if (opcode == Instruction::IGET_WIDE) {
@@ -473,7 +479,9 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
} else {
null_checked_.insert(base);
}
- mir->meta.throw_insn->optimization_flags |= mir->optimization_flags;
+ if (mir->meta.throw_insn != NULL) {
+ mir->meta.throw_insn->optimization_flags |= mir->optimization_flags;
+ }
uint16_t field_ref = mir->dalvikInsn.vC;
AdvanceMemoryVersion(base, field_ref);
}
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
index 11e19dc43f..d359ee2dfe 100644
--- a/compiler/dex/mir_dataflow.cc
+++ b/compiler/dex/mir_dataflow.cc
@@ -1243,12 +1243,13 @@ bool MIRGraph::CountUses(struct BasicBlock* bb) {
if (mir->ssa_rep == NULL) {
continue;
}
- // Each level of nesting adds *16 to count, up to 3 levels deep.
- uint32_t weight = std::min(3U, static_cast<uint32_t>(bb->nesting_depth) * 4);
+ // Each level of nesting adds *100 to count, up to 3 levels deep.
+ uint32_t depth = std::min(3U, static_cast<uint32_t>(bb->nesting_depth));
+ uint32_t weight = std::max(1U, depth * 100);
for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
int s_reg = mir->ssa_rep->uses[i];
raw_use_counts_.Increment(s_reg);
- use_counts_.Put(s_reg, use_counts_.Get(s_reg) + (1 << weight));
+ use_counts_.Put(s_reg, use_counts_.Get(s_reg) + weight);
}
if (!(cu_->disable_opt & (1 << kPromoteCompilerTemps))) {
int df_attributes = oat_data_flow_attributes_[mir->dalvikInsn.opcode];
@@ -1267,7 +1268,7 @@ bool MIRGraph::CountUses(struct BasicBlock* bb) {
}
if (uses_method_star) {
raw_use_counts_.Increment(method_sreg_);
- use_counts_.Put(method_sreg_, use_counts_.Get(method_sreg_) + (1 << weight));
+ use_counts_.Put(method_sreg_, use_counts_.Get(method_sreg_) + weight);
}
}
}
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index cf758fc5da..deaf2ffe80 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -365,8 +365,8 @@ BasicBlock* MIRGraph::ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffs
}
/* Process instructions with the kSwitch flag */
-void MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, int width,
- int flags) {
+BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset,
+ int width, int flags) {
const uint16_t* switch_data =
reinterpret_cast<const uint16_t*>(GetCurrentInsns() + cur_offset + insn->dalvikInsn.vB);
int size;
@@ -437,6 +437,7 @@ void MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffset cur_
/* create */ true, /* immed_pred_block_p */ NULL);
cur_block->fall_through = fallthrough_block->id;
fallthrough_block->predecessors->Insert(cur_block->id);
+ return cur_block;
}
/* Process instructions with the kThrow flag */
@@ -444,6 +445,9 @@ BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffse
int width, int flags, ArenaBitVector* try_block_addr,
const uint16_t* code_ptr, const uint16_t* code_end) {
bool in_try_block = try_block_addr->IsBitSet(cur_offset);
+ bool is_throw = (insn->dalvikInsn.opcode == Instruction::THROW);
+ bool build_all_edges =
+ (cu_->disable_opt & (1 << kSuppressExceptionEdges)) || is_throw || in_try_block;
/* In try block */
if (in_try_block) {
@@ -473,7 +477,7 @@ BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffse
cur_block->successor_blocks->Insert(successor_block_info);
catch_block->predecessors->Insert(cur_block->id);
}
- } else {
+ } else if (build_all_edges) {
BasicBlock *eh_block = NewMemBB(kExceptionHandling, num_blocks_++);
cur_block->taken = eh_block->id;
block_list_.Insert(eh_block);
@@ -481,7 +485,7 @@ BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffse
eh_block->predecessors->Insert(cur_block->id);
}
- if (insn->dalvikInsn.opcode == Instruction::THROW) {
+ if (is_throw) {
cur_block->explicit_throw = true;
if (code_ptr < code_end) {
// Force creation of new block following THROW via side-effect
@@ -494,6 +498,16 @@ BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffse
}
}
+ if (!build_all_edges) {
+ /*
+ * Even though there is an exception edge here, control cannot return to this
+ * method. Thus, for the purposes of dataflow analysis and optimization, we can
+ * ignore the edge. Doing this reduces compile time, and increases the scope
+ * of the basic-block level optimization pass.
+ */
+ return cur_block;
+ }
+
/*
* Split the potentially-throwing instruction into two parts.
* The first half will be a pseudo-op that captures the exception
@@ -695,7 +709,7 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_
cur_block = ProcessCanThrow(cur_block, insn, current_offset_, width, flags, try_block_addr_,
code_ptr, code_end);
} else if (flags & Instruction::kSwitch) {
- ProcessCanSwitch(cur_block, insn, current_offset_, width, flags);
+ cur_block = ProcessCanSwitch(cur_block, insn, current_offset_, width, flags);
}
current_offset_ += width;
BasicBlock *next_block = FindBlock(current_offset_, /* split */ false, /* create */
@@ -1100,6 +1114,7 @@ const char* MIRGraph::GetShortyFromTargetIdx(int target_idx) {
void MIRGraph::DumpMIRGraph() {
BasicBlock* bb;
const char* block_type_names[] = {
+ "Null Block",
"Entry Block",
"Code Block",
"Exit Block",
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index a69dde0da3..8c20728a51 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -698,8 +698,8 @@ class MIRGraph {
void ProcessTryCatchBlocks();
BasicBlock* ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, int width,
int flags, const uint16_t* code_ptr, const uint16_t* code_end);
- void ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, int width,
- int flags);
+ BasicBlock* ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, int width,
+ int flags);
BasicBlock* ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, int width,
int flags, ArenaBitVector* try_block_addr, const uint16_t* code_ptr,
const uint16_t* code_end);
diff --git a/compiler/dex/quick/mir_to_lir-inl.h b/compiler/dex/quick/mir_to_lir-inl.h
index 1a30b7aef0..f567b5c6dd 100644
--- a/compiler/dex/quick/mir_to_lir-inl.h
+++ b/compiler/dex/quick/mir_to_lir-inl.h
@@ -198,6 +198,10 @@ inline void Mir2Lir::SetupResourceMasks(LIR* lir) {
SetupRegMask(&lir->u.m.use_mask, lir->operands[3]);
}
+ if (flags & REG_USE4) {
+ SetupRegMask(&lir->u.m.use_mask, lir->operands[4]);
+ }
+
if (flags & SETS_CCODES) {
lir->u.m.def_mask |= ENCODE_CCODE;
}
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index d74383e33b..b9df1d6f48 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -503,7 +503,7 @@ const std::vector<uint8_t>* CompilerDriver::CreateQuickToInterpreterBridge() con
void CompilerDriver::CompileAll(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
- base::TimingLogger& timings) {
+ TimingLogger& timings) {
DCHECK(!Runtime::Current()->IsStarted());
UniquePtr<ThreadPool> thread_pool(new ThreadPool("Compiler driver thread pool", thread_count_ - 1));
PreCompile(class_loader, dex_files, *thread_pool.get(), timings);
@@ -546,7 +546,7 @@ static DexToDexCompilationLevel GetDexToDexCompilationlevel(
}
}
-void CompilerDriver::CompileOne(const mirror::ArtMethod* method, base::TimingLogger& timings) {
+void CompilerDriver::CompileOne(const mirror::ArtMethod* method, TimingLogger& timings) {
DCHECK(!Runtime::Current()->IsStarted());
Thread* self = Thread::Current();
jobject jclass_loader;
@@ -591,7 +591,7 @@ void CompilerDriver::CompileOne(const mirror::ArtMethod* method, base::TimingLog
}
void CompilerDriver::Resolve(jobject class_loader, const std::vector<const DexFile*>& dex_files,
- ThreadPool& thread_pool, base::TimingLogger& timings) {
+ ThreadPool& thread_pool, TimingLogger& timings) {
for (size_t i = 0; i != dex_files.size(); ++i) {
const DexFile* dex_file = dex_files[i];
CHECK(dex_file != NULL);
@@ -600,7 +600,7 @@ void CompilerDriver::Resolve(jobject class_loader, const std::vector<const DexFi
}
void CompilerDriver::PreCompile(jobject class_loader, const std::vector<const DexFile*>& dex_files,
- ThreadPool& thread_pool, base::TimingLogger& timings) {
+ ThreadPool& thread_pool, TimingLogger& timings) {
LoadImageClasses(timings);
Resolve(class_loader, dex_files, thread_pool, timings);
@@ -685,7 +685,7 @@ static bool RecordImageClassesVisitor(mirror::Class* klass, void* arg)
}
// Make a list of descriptors for classes to include in the image
-void CompilerDriver::LoadImageClasses(base::TimingLogger& timings)
+void CompilerDriver::LoadImageClasses(TimingLogger& timings)
LOCKS_EXCLUDED(Locks::mutator_lock_) {
if (!IsImage()) {
return;
@@ -773,7 +773,7 @@ void CompilerDriver::FindClinitImageClassesCallback(mirror::Object* object, void
MaybeAddToImageClasses(object->GetClass(), compiler_driver->image_classes_.get());
}
-void CompilerDriver::UpdateImageClasses(base::TimingLogger& timings) {
+void CompilerDriver::UpdateImageClasses(TimingLogger& timings) {
if (IsImage()) {
timings.NewSplit("UpdateImageClasses");
@@ -1613,7 +1613,7 @@ static void ResolveType(const ParallelCompilationManager* manager, size_t type_i
}
void CompilerDriver::ResolveDexFile(jobject class_loader, const DexFile& dex_file,
- ThreadPool& thread_pool, base::TimingLogger& timings) {
+ ThreadPool& thread_pool, TimingLogger& timings) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
// TODO: we could resolve strings here, although the string table is largely filled with class
@@ -1632,7 +1632,7 @@ void CompilerDriver::ResolveDexFile(jobject class_loader, const DexFile& dex_fil
}
void CompilerDriver::Verify(jobject class_loader, const std::vector<const DexFile*>& dex_files,
- ThreadPool& thread_pool, base::TimingLogger& timings) {
+ ThreadPool& thread_pool, TimingLogger& timings) {
for (size_t i = 0; i != dex_files.size(); ++i) {
const DexFile* dex_file = dex_files[i];
CHECK(dex_file != NULL);
@@ -1686,7 +1686,7 @@ static void VerifyClass(const ParallelCompilationManager* manager, size_t class_
}
void CompilerDriver::VerifyDexFile(jobject class_loader, const DexFile& dex_file,
- ThreadPool& thread_pool, base::TimingLogger& timings) {
+ ThreadPool& thread_pool, TimingLogger& timings) {
timings.NewSplit("Verify Dex File");
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
ParallelCompilationManager context(class_linker, class_loader, this, &dex_file, thread_pool);
@@ -2192,7 +2192,7 @@ static void InitializeClass(const ParallelCompilationManager* manager, size_t cl
}
void CompilerDriver::InitializeClasses(jobject jni_class_loader, const DexFile& dex_file,
- ThreadPool& thread_pool, base::TimingLogger& timings) {
+ ThreadPool& thread_pool, TimingLogger& timings) {
timings.NewSplit("InitializeNoClinit");
#ifndef NDEBUG
// Sanity check blacklist descriptors.
@@ -2210,7 +2210,7 @@ void CompilerDriver::InitializeClasses(jobject jni_class_loader, const DexFile&
void CompilerDriver::InitializeClasses(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
- ThreadPool& thread_pool, base::TimingLogger& timings) {
+ ThreadPool& thread_pool, TimingLogger& timings) {
for (size_t i = 0; i != dex_files.size(); ++i) {
const DexFile* dex_file = dex_files[i];
CHECK(dex_file != NULL);
@@ -2219,7 +2219,7 @@ void CompilerDriver::InitializeClasses(jobject class_loader,
}
void CompilerDriver::Compile(jobject class_loader, const std::vector<const DexFile*>& dex_files,
- ThreadPool& thread_pool, base::TimingLogger& timings) {
+ ThreadPool& thread_pool, TimingLogger& timings) {
for (size_t i = 0; i != dex_files.size(); ++i) {
const DexFile* dex_file = dex_files[i];
CHECK(dex_file != NULL);
@@ -2300,7 +2300,7 @@ void CompilerDriver::CompileClass(const ParallelCompilationManager* manager, siz
}
void CompilerDriver::CompileDexFile(jobject class_loader, const DexFile& dex_file,
- ThreadPool& thread_pool, base::TimingLogger& timings) {
+ ThreadPool& thread_pool, TimingLogger& timings) {
timings.NewSplit("Compile Dex File");
ParallelCompilationManager context(Runtime::Current()->GetClassLinker(), class_loader, this,
&dex_file, thread_pool);
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 9bfea6ff0a..7e8184975c 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -98,11 +98,11 @@ class CompilerDriver {
~CompilerDriver();
void CompileAll(jobject class_loader, const std::vector<const DexFile*>& dex_files,
- base::TimingLogger& timings)
+ TimingLogger& timings)
LOCKS_EXCLUDED(Locks::mutator_lock_);
// Compile a single Method
- void CompileOne(const mirror::ArtMethod* method, base::TimingLogger& timings)
+ void CompileOne(const mirror::ArtMethod* method, TimingLogger& timings)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const InstructionSet& GetInstructionSet() const {
@@ -340,43 +340,43 @@ class CompilerDriver {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void PreCompile(jobject class_loader, const std::vector<const DexFile*>& dex_files,
- ThreadPool& thread_pool, base::TimingLogger& timings)
+ ThreadPool& thread_pool, TimingLogger& timings)
LOCKS_EXCLUDED(Locks::mutator_lock_);
- void LoadImageClasses(base::TimingLogger& timings);
+ void LoadImageClasses(TimingLogger& timings);
// Attempt to resolve all type, methods, fields, and strings
// referenced from code in the dex file following PathClassLoader
// ordering semantics.
void Resolve(jobject class_loader, const std::vector<const DexFile*>& dex_files,
- ThreadPool& thread_pool, base::TimingLogger& timings)
+ ThreadPool& thread_pool, TimingLogger& timings)
LOCKS_EXCLUDED(Locks::mutator_lock_);
void ResolveDexFile(jobject class_loader, const DexFile& dex_file,
- ThreadPool& thread_pool, base::TimingLogger& timings)
+ ThreadPool& thread_pool, TimingLogger& timings)
LOCKS_EXCLUDED(Locks::mutator_lock_);
void Verify(jobject class_loader, const std::vector<const DexFile*>& dex_files,
- ThreadPool& thread_pool, base::TimingLogger& timings);
+ ThreadPool& thread_pool, TimingLogger& timings);
void VerifyDexFile(jobject class_loader, const DexFile& dex_file,
- ThreadPool& thread_pool, base::TimingLogger& timings)
+ ThreadPool& thread_pool, TimingLogger& timings)
LOCKS_EXCLUDED(Locks::mutator_lock_);
void InitializeClasses(jobject class_loader, const std::vector<const DexFile*>& dex_files,
- ThreadPool& thread_pool, base::TimingLogger& timings)
+ ThreadPool& thread_pool, TimingLogger& timings)
LOCKS_EXCLUDED(Locks::mutator_lock_);
void InitializeClasses(jobject class_loader, const DexFile& dex_file,
- ThreadPool& thread_pool, base::TimingLogger& timings)
+ ThreadPool& thread_pool, TimingLogger& timings)
LOCKS_EXCLUDED(Locks::mutator_lock_, compiled_classes_lock_);
- void UpdateImageClasses(base::TimingLogger& timings)
+ void UpdateImageClasses(TimingLogger& timings)
LOCKS_EXCLUDED(Locks::mutator_lock_);
static void FindClinitImageClassesCallback(mirror::Object* object, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void Compile(jobject class_loader, const std::vector<const DexFile*>& dex_files,
- ThreadPool& thread_pool, base::TimingLogger& timings);
+ ThreadPool& thread_pool, TimingLogger& timings);
void CompileDexFile(jobject class_loader, const DexFile& dex_file,
- ThreadPool& thread_pool, base::TimingLogger& timings)
+ ThreadPool& thread_pool, TimingLogger& timings)
LOCKS_EXCLUDED(Locks::mutator_lock_);
void CompileMethod(const DexFile::CodeItem* code_item, uint32_t access_flags,
InvokeType invoke_type, uint16_t class_def_idx, uint32_t method_idx,
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index bfc93b3c8f..a5eb94f0e9 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -36,12 +36,13 @@ namespace art {
class CompilerDriverTest : public CommonTest {
protected:
void CompileAll(jobject class_loader) LOCKS_EXCLUDED(Locks::mutator_lock_) {
- base::TimingLogger timings("CompilerDriverTest::CompileAll", false, false);
+ TimingLogger timings("CompilerDriverTest::CompileAll", false, false);
timings.StartSplit("CompileAll");
compiler_driver_->CompileAll(class_loader,
Runtime::Current()->GetCompileTimeClassPath(class_loader),
timings);
MakeAllExecutable(class_loader);
+ timings.EndSplit();
}
void EnsureCompiled(jobject class_loader, const char* class_name, const char* method,
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index 9d9c06401e..a8a9d2e461 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -46,7 +46,7 @@ TEST_F(ImageTest, WriteRead) {
{
jobject class_loader = NULL;
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- base::TimingLogger timings("ImageTest::WriteRead", false, false);
+ TimingLogger timings("ImageTest::WriteRead", false, false);
timings.StartSplit("CompileAll");
#if defined(ART_USE_PORTABLE_COMPILER)
// TODO: we disable this for portable so the test executes in a reasonable amount of time.
@@ -60,13 +60,14 @@ TEST_F(ImageTest, WriteRead) {
ScopedObjectAccess soa(Thread::Current());
OatWriter oat_writer(class_linker->GetBootClassPath(),
- 0, 0, "", compiler_driver_.get());
+ 0, 0, "", compiler_driver_.get(), &timings);
bool success = compiler_driver_->WriteElf(GetTestAndroidRoot(),
!kIsTargetBuild,
class_linker->GetBootClassPath(),
oat_writer,
tmp_elf.GetFile());
ASSERT_TRUE(success);
+ timings.EndSplit();
}
}
// Workound bug that mcld::Linker::emit closes tmp_elf by reopening as tmp_oat.
diff --git a/compiler/leb128_encoder_test.cc b/compiler/leb128_encoder_test.cc
new file mode 100644
index 0000000000..4fa80757c5
--- /dev/null
+++ b/compiler/leb128_encoder_test.cc
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "base/histogram-inl.h"
+#include "common_test.h"
+#include "leb128.h"
+#include "leb128_encoder.h"
+
+namespace art {
+
+class Leb128Test : public CommonTest {};
+
+struct DecodeUnsignedLeb128TestCase {
+ uint32_t decoded;
+ uint8_t leb128_data[5];
+};
+
+static DecodeUnsignedLeb128TestCase uleb128_tests[] = {
+ {0, {0, 0, 0, 0, 0}},
+ {1, {1, 0, 0, 0, 0}},
+ {0x7F, {0x7F, 0, 0, 0, 0}},
+ {0x80, {0x80, 1, 0, 0, 0}},
+ {0x81, {0x81, 1, 0, 0, 0}},
+ {0xFF, {0xFF, 1, 0, 0, 0}},
+ {0x4000, {0x80, 0x80, 1, 0, 0}},
+ {0x4001, {0x81, 0x80, 1, 0, 0}},
+ {0x4081, {0x81, 0x81, 1, 0, 0}},
+ {0x0FFFFFFF, {0xFF, 0xFF, 0xFF, 0x7F, 0}},
+ {0xFFFFFFFF, {0xFF, 0xFF, 0xFF, 0xFF, 0xF}},
+};
+
+TEST_F(Leb128Test, Singles) {
+ // Test individual encodings.
+ for (size_t i = 0; i < arraysize(uleb128_tests); ++i) {
+ UnsignedLeb128EncodingVector builder;
+ builder.PushBack(uleb128_tests[i].decoded);
+ const uint8_t* data_ptr = &uleb128_tests[i].leb128_data[0];
+ const uint8_t* encoded_data_ptr = &builder.GetData()[0];
+ for (size_t j = 0; j < 5; ++j) {
+ if (j < builder.GetData().size()) {
+ EXPECT_EQ(data_ptr[j], encoded_data_ptr[j]) << " i = " << i << " j = " << j;
+ } else {
+ EXPECT_EQ(data_ptr[j], 0U) << " i = " << i << " j = " << j;
+ }
+ }
+ EXPECT_EQ(DecodeUnsignedLeb128(&data_ptr), uleb128_tests[i].decoded) << " i = " << i;
+ }
+}
+
+TEST_F(Leb128Test, Stream) {
+ // Encode a number of entries.
+ UnsignedLeb128EncodingVector builder;
+ for (size_t i = 0; i < arraysize(uleb128_tests); ++i) {
+ builder.PushBack(uleb128_tests[i].decoded);
+ }
+ const uint8_t* encoded_data_ptr = &builder.GetData()[0];
+ for (size_t i = 0; i < arraysize(uleb128_tests); ++i) {
+ const uint8_t* data_ptr = &uleb128_tests[i].leb128_data[0];
+ for (size_t j = 0; j < 5; ++j) {
+ if (data_ptr[j] != 0) {
+ EXPECT_EQ(data_ptr[j], encoded_data_ptr[j]) << " i = " << i << " j = " << j;
+ }
+ }
+ EXPECT_EQ(DecodeUnsignedLeb128(&encoded_data_ptr), uleb128_tests[i].decoded) << " i = " << i;
+ }
+}
+
+TEST_F(Leb128Test, Speed) {
+ UniquePtr<Histogram<uint64_t> > enc_hist(new Histogram<uint64_t>("Leb128EncodeSpeedTest", 5));
+ UniquePtr<Histogram<uint64_t> > dec_hist(new Histogram<uint64_t>("Leb128DecodeSpeedTest", 5));
+ UnsignedLeb128EncodingVector builder;
+ // Push back 1024 chunks of 1024 values measuring encoding speed.
+ uint64_t last_time = NanoTime();
+ for (size_t i = 0; i < 1024; i++) {
+ for (size_t j = 0; j < 1024; j++) {
+ builder.PushBack((i * 1024) + j);
+ }
+ uint64_t cur_time = NanoTime();
+ enc_hist->AddValue(cur_time - last_time);
+ last_time = cur_time;
+ }
+ // Verify encoding and measure decode speed.
+ const uint8_t* encoded_data_ptr = &builder.GetData()[0];
+ last_time = NanoTime();
+ for (size_t i = 0; i < 1024; i++) {
+ for (size_t j = 0; j < 1024; j++) {
+ EXPECT_EQ(DecodeUnsignedLeb128(&encoded_data_ptr), (i * 1024) + j);
+ }
+ uint64_t cur_time = NanoTime();
+ dec_hist->AddValue(cur_time - last_time);
+ last_time = cur_time;
+ }
+
+ Histogram<uint64_t>::CumulativeData enc_data;
+ enc_hist->CreateHistogram(&enc_data);
+ enc_hist->PrintConfidenceIntervals(std::cout, 0.99, enc_data);
+
+ Histogram<uint64_t>::CumulativeData dec_data;
+ dec_hist->CreateHistogram(&dec_data);
+ dec_hist->PrintConfidenceIntervals(std::cout, 0.99, dec_data);
+}
+
+} // namespace art
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index c423f34f7f..fd0a69deea 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -67,6 +67,7 @@ class OatTest : public CommonTest {
};
TEST_F(OatTest, WriteRead) {
+ TimingLogger timings("CommonTest::WriteRead", false, false);
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
// TODO: make selectable
@@ -82,7 +83,7 @@ TEST_F(OatTest, WriteRead) {
insn_features, false, NULL, 2, true));
jobject class_loader = NULL;
if (kCompile) {
- base::TimingLogger timings("OatTest::WriteRead", false, false);
+ TimingLogger timings("OatTest::WriteRead", false, false);
compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), timings);
}
@@ -92,7 +93,8 @@ TEST_F(OatTest, WriteRead) {
42U,
4096U,
"lue.art",
- compiler_driver_.get());
+ compiler_driver_.get(),
+ &timings);
bool success = compiler_driver_->WriteElf(GetTestAndroidRoot(),
!kIsTargetBuild,
class_linker->GetBootClassPath(),
@@ -101,7 +103,6 @@ TEST_F(OatTest, WriteRead) {
ASSERT_TRUE(success);
if (kCompile) { // OatWriter strips the code, regenerate to compare
- base::TimingLogger timings("CommonTest::WriteRead", false, false);
compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), timings);
}
std::string error_msg;
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 28fb1479d7..83824694ae 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -40,7 +40,8 @@ OatWriter::OatWriter(const std::vector<const DexFile*>& dex_files,
uint32_t image_file_location_oat_checksum,
uint32_t image_file_location_oat_begin,
const std::string& image_file_location,
- const CompilerDriver* compiler)
+ const CompilerDriver* compiler,
+ TimingLogger* timings)
: compiler_driver_(compiler),
dex_files_(&dex_files),
image_file_location_oat_checksum_(image_file_location_oat_checksum),
@@ -77,12 +78,31 @@ OatWriter::OatWriter(const std::vector<const DexFile*>& dex_files,
size_oat_class_status_(0),
size_oat_class_method_bitmaps_(0),
size_oat_class_method_offsets_(0) {
- size_t offset = InitOatHeader();
- offset = InitOatDexFiles(offset);
- offset = InitDexFiles(offset);
- offset = InitOatClasses(offset);
- offset = InitOatCode(offset);
- offset = InitOatCodeDexFiles(offset);
+ size_t offset;
+ {
+ TimingLogger::ScopedSplit split("InitOatHeader", timings);
+ offset = InitOatHeader();
+ }
+ {
+ TimingLogger::ScopedSplit split("InitOatDexFiles", timings);
+ offset = InitOatDexFiles(offset);
+ }
+ {
+ TimingLogger::ScopedSplit split("InitDexFiles", timings);
+ offset = InitDexFiles(offset);
+ }
+ {
+ TimingLogger::ScopedSplit split("InitOatClasses", timings);
+ offset = InitOatClasses(offset);
+ }
+ {
+ TimingLogger::ScopedSplit split("InitOatCode", timings);
+ offset = InitOatCode(offset);
+ }
+ {
+ TimingLogger::ScopedSplit split("InitOatCodeDexFiles", timings);
+ offset = InitOatCodeDexFiles(offset);
+ }
size_ = offset;
CHECK_EQ(dex_files_->size(), oat_dex_files_.size());
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index 5d947cfaea..64275e6bbb 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -67,7 +67,8 @@ class OatWriter {
uint32_t image_file_location_oat_checksum,
uint32_t image_file_location_oat_begin,
const std::string& image_file_location,
- const CompilerDriver* compiler);
+ const CompilerDriver* compiler,
+ TimingLogger* timings);
const OatHeader& GetOatHeader() const {
return *oat_header_;
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 3781921927..8b232700b0 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -242,7 +242,7 @@ class Dex2Oat {
bool image,
UniquePtr<CompilerDriver::DescriptorSet>& image_classes,
bool dump_stats,
- base::TimingLogger& timings) {
+ TimingLogger& timings) {
// SirtRef and ClassLoader creation needs to come after Runtime::Create
jobject class_loader = NULL;
Thread* self = Thread::Current();
@@ -280,6 +280,7 @@ class Dex2Oat {
uint32_t image_file_location_oat_checksum = 0;
uint32_t image_file_location_oat_data_begin = 0;
if (!driver->IsImage()) {
+ TimingLogger::ScopedSplit split("Loading image checksum", &timings);
gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetImageSpace();
image_file_location_oat_checksum = image_space->GetImageHeader().GetOatChecksum();
image_file_location_oat_data_begin =
@@ -294,8 +295,10 @@ class Dex2Oat {
image_file_location_oat_checksum,
image_file_location_oat_data_begin,
image_file_location,
- driver.get());
+ driver.get(),
+ &timings);
+ TimingLogger::ScopedSplit split("Writing ELF", &timings);
if (!driver->WriteElf(android_root, is_host, dex_files, oat_writer, oat_file)) {
LOG(ERROR) << "Failed to write ELF file " << oat_file->GetPath();
return NULL;
@@ -600,7 +603,7 @@ static InstructionSetFeatures ParseFeatureList(std::string str) {
}
static int dex2oat(int argc, char** argv) {
- base::TimingLogger timings("compiler", false, false);
+ TimingLogger timings("compiler", false, false);
InitLogging(argv);
@@ -1091,7 +1094,7 @@ static int dex2oat(int argc, char** argv) {
if (is_host) {
if (dump_timing || (dump_slow_timing && timings.GetTotalNs() > MsToNs(1000))) {
- LOG(INFO) << Dumpable<base::TimingLogger>(timings);
+ LOG(INFO) << Dumpable<TimingLogger>(timings);
}
return EXIT_SUCCESS;
}
@@ -1133,7 +1136,7 @@ static int dex2oat(int argc, char** argv) {
timings.EndSplit();
if (dump_timing || (dump_slow_timing && timings.GetTotalNs() > MsToNs(1000))) {
- LOG(INFO) << Dumpable<base::TimingLogger>(timings);
+ LOG(INFO) << Dumpable<TimingLogger>(timings);
}
// Everything was successfully written, do an explicit exit here to avoid running Runtime
diff --git a/disassembler/disassembler_arm.cc b/disassembler/disassembler_arm.cc
index 65f2383904..db51fabcc9 100644
--- a/disassembler/disassembler_arm.cc
+++ b/disassembler/disassembler_arm.cc
@@ -427,9 +427,9 @@ size_t DisassemblerArm::DumpThumb32(std::ostream& os, const uint8_t* instr_ptr)
args << Rt << "," << Rd << ", [" << Rn;
const char *sign = U ? "+" : "-";
if (P == 0 && W == 1) {
- args << "], #" << sign << imm8;
+ args << "], #" << sign << (imm8 << 2);
} else {
- args << ", #" << sign << imm8 << "]";
+ args << ", #" << sign << (imm8 << 2) << "]";
if (W == 1) {
args << "!";
}
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 97cbdd9ab5..60683d0d52 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -53,6 +53,7 @@ LIBART_COMMON_SRC_FILES := \
gc/collector/semi_space.cc \
gc/collector/sticky_mark_sweep.cc \
gc/heap.cc \
+ gc/reference_queue.cc \
gc/space/bump_pointer_space.cc \
gc/space/dlmalloc_space.cc \
gc/space/image_space.cc \
diff --git a/runtime/base/histogram-inl.h b/runtime/base/histogram-inl.h
index 0345266fbd..9e08ae6feb 100644
--- a/runtime/base/histogram-inl.h
+++ b/runtime/base/histogram-inl.h
@@ -170,20 +170,20 @@ inline void Histogram<Value>::PrintConfidenceIntervals(std::ostream &os, double
os << FormatDuration(Max() * kAdjust, unit) << "\n";
}
-template <class Value> inline void Histogram<Value>::CreateHistogram(CumulativeData& out_data) {
+template <class Value> inline void Histogram<Value>::CreateHistogram(CumulativeData* out_data) {
DCHECK_GT(sample_size_, 0ull);
- out_data.freq_.clear();
- out_data.perc_.clear();
+ out_data->freq_.clear();
+ out_data->perc_.clear();
uint64_t accumulated = 0;
- out_data.freq_.push_back(accumulated);
- out_data.perc_.push_back(0.0);
+ out_data->freq_.push_back(accumulated);
+ out_data->perc_.push_back(0.0);
for (size_t idx = 0; idx < frequency_.size(); idx++) {
accumulated += frequency_[idx];
- out_data.freq_.push_back(accumulated);
- out_data.perc_.push_back(static_cast<double>(accumulated) / static_cast<double>(sample_size_));
+ out_data->freq_.push_back(accumulated);
+ out_data->perc_.push_back(static_cast<double>(accumulated) / static_cast<double>(sample_size_));
}
- DCHECK_EQ(out_data.freq_.back(), sample_size_);
- DCHECK_LE(std::abs(out_data.perc_.back() - 1.0), 0.001);
+ DCHECK_EQ(out_data->freq_.back(), sample_size_);
+ DCHECK_LE(std::abs(out_data->perc_.back() - 1.0), 0.001);
}
template <class Value>
diff --git a/runtime/base/histogram.h b/runtime/base/histogram.h
index 2a02cf4245..e22b6e17b2 100644
--- a/runtime/base/histogram.h
+++ b/runtime/base/histogram.h
@@ -47,7 +47,7 @@ template <class Value> class Histogram {
// cumulative_freq[i] = sum(frequency[j] : 0 < j < i )
// Accumulative summation of percentiles; which is the frequency / SampleSize
// cumulative_perc[i] = sum(frequency[j] / SampleSize : 0 < j < i )
- void CreateHistogram(CumulativeData& data);
+ void CreateHistogram(CumulativeData* data);
// Reset the cumulative values, next time CreateHistogram is called it will recreate the cache.
void Reset();
double Mean() const;
diff --git a/runtime/base/histogram_test.cc b/runtime/base/histogram_test.cc
index 534440c64f..9d371f5754 100644
--- a/runtime/base/histogram_test.cc
+++ b/runtime/base/histogram_test.cc
@@ -85,7 +85,7 @@ TEST(Histtest, Percentile) {
hist->AddValue(145);
hist->AddValue(155);
- hist->CreateHistogram(data);
+ hist->CreateHistogram(&data);
PerValue = hist->Percentile(0.50, data);
EXPECT_EQ(875, static_cast<int>(PerValue * 10));
}
@@ -117,7 +117,7 @@ TEST(Histtest, UpdateRange) {
hist->AddValue(200);
hist->AddValue(205);
hist->AddValue(212);
- hist->CreateHistogram(data);
+ hist->CreateHistogram(&data);
PerValue = hist->Percentile(0.50, data);
std::string text;
@@ -132,7 +132,6 @@ TEST(Histtest, UpdateRange) {
TEST(Histtest, Reset) {
UniquePtr<Histogram<uint64_t> > hist(new Histogram<uint64_t>("Reset", 5));
- Histogram<uint64_t>::CumulativeData data;
double PerValue;
hist->AddValue(0);
@@ -160,7 +159,8 @@ TEST(Histtest, Reset) {
hist->AddValue(200);
hist->AddValue(205);
hist->AddValue(212);
- hist->CreateHistogram(data);
+ Histogram<uint64_t>::CumulativeData data;
+ hist->CreateHistogram(&data);
PerValue = hist->Percentile(0.50, data);
std::string text;
@@ -185,7 +185,7 @@ TEST(Histtest, MultipleCreateHist) {
hist->AddValue(68);
hist->AddValue(75);
hist->AddValue(93);
- hist->CreateHistogram(data);
+ hist->CreateHistogram(&data);
hist->AddValue(110);
hist->AddValue(121);
hist->AddValue(132);
@@ -194,14 +194,14 @@ TEST(Histtest, MultipleCreateHist) {
hist->AddValue(155);
hist->AddValue(163);
hist->AddValue(168);
- hist->CreateHistogram(data);
+ hist->CreateHistogram(&data);
hist->AddValue(175);
hist->AddValue(182);
hist->AddValue(193);
hist->AddValue(200);
hist->AddValue(205);
hist->AddValue(212);
- hist->CreateHistogram(data);
+ hist->CreateHistogram(&data);
PerValue = hist->Percentile(0.50, data);
std::stringstream stream;
std::string expected("MultipleCreateHist:\t99% C.I. 15us-212us Avg: 126.380us Max: 212us\n");
@@ -217,7 +217,7 @@ TEST(Histtest, SingleValue) {
Histogram<uint64_t>::CumulativeData data;
hist->AddValue(1);
- hist->CreateHistogram(data);
+ hist->CreateHistogram(&data);
std::stringstream stream;
std::string expected = "SingleValue:\t99% C.I. 1us-1us Avg: 1us Max: 1us\n";
hist->PrintConfidenceIntervals(stream, 0.99, data);
@@ -234,7 +234,7 @@ TEST(Histtest, CappingPercentiles) {
for (uint64_t idx = 0ull; idx < 150ull; idx++) {
hist->AddValue(0);
}
- hist->CreateHistogram(data);
+ hist->CreateHistogram(&data);
per_995 = hist->Percentile(0.995, data);
EXPECT_EQ(per_995, 0);
hist->Reset();
@@ -243,7 +243,7 @@ TEST(Histtest, CappingPercentiles) {
hist->AddValue(val);
}
}
- hist->CreateHistogram(data);
+ hist->CreateHistogram(&data);
per_005 = hist->Percentile(0.005, data);
per_995 = hist->Percentile(0.995, data);
EXPECT_EQ(1, per_005);
@@ -260,7 +260,7 @@ TEST(Histtest, SpikyValues) {
}
}
hist->AddValue(10000);
- hist->CreateHistogram(data);
+ hist->CreateHistogram(&data);
std::stringstream stream;
std::string expected = "SpikyValues:\t99% C.I. 0.089us-2541.825us Avg: 95.033us Max: 10000us\n";
hist->PrintConfidenceIntervals(stream, 0.99, data);
diff --git a/runtime/base/timing_logger.cc b/runtime/base/timing_logger.cc
index 45a546f37e..bebbd70b81 100644
--- a/runtime/base/timing_logger.cc
+++ b/runtime/base/timing_logger.cc
@@ -74,12 +74,11 @@ uint64_t CumulativeLogger::GetTotalTime() const {
return total;
}
-void CumulativeLogger::AddLogger(const base::TimingLogger &logger) {
+void CumulativeLogger::AddLogger(const TimingLogger &logger) {
MutexLock mu(Thread::Current(), lock_);
- const base::TimingLogger::SplitTimings& splits = logger.GetSplits();
- for (base::TimingLogger::SplitTimingsIterator it = splits.begin(), end = splits.end();
- it != end; ++it) {
- base::TimingLogger::SplitTiming split = *it;
+ const TimingLogger::SplitTimings& splits = logger.GetSplits();
+ for (auto it = splits.begin(), end = splits.end(); it != end; ++it) {
+ TimingLogger::SplitTiming split = *it;
uint64_t split_time = split.first;
const char* split_name = split.second;
AddPair(split_name, split_time);
@@ -101,7 +100,8 @@ void CumulativeLogger::AddPair(const std::string &label, uint64_t delta_time) {
delta_time /= kAdjust;
if (histograms_.find(label) == histograms_.end()) {
- // TODO: Shoud this be a defined constant so we we know out of which orifice 16 and 100 were picked?
+ // TODO: Should this be a defined constant so we we know out of which orifice 16 and 100 were
+ // picked?
const size_t max_buckets = Runtime::Current()->GetHeap()->IsLowMemoryMode() ? 16 : 100;
// TODO: Should this be a defined constant so we know 50 of WTF?
histograms_[label] = new Histogram<uint64_t>(label.c_str(), 50, max_buckets);
@@ -115,7 +115,7 @@ void CumulativeLogger::DumpHistogram(std::ostream &os) {
for (CumulativeLogger::HistogramsIterator it = histograms_.begin(), end = histograms_.end();
it != end; ++it) {
Histogram<uint64_t>::CumulativeData cumulative_data;
- it->second->CreateHistogram(cumulative_data);
+ it->second->CreateHistogram(&cumulative_data);
it->second->PrintConfidenceIntervals(os, 0.99, cumulative_data);
// Reset cumulative values to save memory. We don't expect DumpHistogram to be called often, so
// it is not performance critical.
@@ -123,9 +123,6 @@ void CumulativeLogger::DumpHistogram(std::ostream &os) {
os << "Done Dumping histograms \n";
}
-
-namespace base {
-
TimingLogger::TimingLogger(const char* name, bool precise, bool verbose)
: name_(name), precise_(precise), verbose_(verbose), current_split_(NULL) {
}
@@ -136,33 +133,35 @@ void TimingLogger::Reset() {
}
void TimingLogger::StartSplit(const char* new_split_label) {
- DCHECK(new_split_label != NULL) << "Starting split (" << new_split_label << ") with null label.";
- TimingLogger::ScopedSplit* explicit_scoped_split = new TimingLogger::ScopedSplit(new_split_label, this);
+ DCHECK(new_split_label != nullptr) << "Starting split with null label.";
+ TimingLogger::ScopedSplit* explicit_scoped_split =
+ new TimingLogger::ScopedSplit(new_split_label, this);
explicit_scoped_split->explicit_ = true;
}
void TimingLogger::EndSplit() {
- CHECK(current_split_ != NULL) << "Ending a non-existent split.";
- DCHECK(current_split_->label_ != NULL);
- DCHECK(current_split_->explicit_ == true) << "Explicitly ending scoped split: " << current_split_->label_;
-
+ CHECK(current_split_ != nullptr) << "Ending a non-existent split.";
+ DCHECK(current_split_->label_ != nullptr);
+ DCHECK(current_split_->explicit_ == true)
+ << "Explicitly ending scoped split: " << current_split_->label_;
delete current_split_;
+ // TODO: current_split_ = nullptr;
}
// Ends the current split and starts the one given by the label.
void TimingLogger::NewSplit(const char* new_split_label) {
- CHECK(current_split_ != NULL) << "Inserting a new split (" << new_split_label
- << ") into a non-existent split.";
- DCHECK(new_split_label != NULL) << "New split (" << new_split_label << ") with null label.";
-
- current_split_->TailInsertSplit(new_split_label);
+ if (current_split_ == nullptr) {
+ StartSplit(new_split_label);
+ } else {
+ DCHECK(new_split_label != nullptr) << "New split (" << new_split_label << ") with null label.";
+ current_split_->TailInsertSplit(new_split_label);
+ }
}
uint64_t TimingLogger::GetTotalNs() const {
uint64_t total_ns = 0;
- for (base::TimingLogger::SplitTimingsIterator it = splits_.begin(), end = splits_.end();
- it != end; ++it) {
- base::TimingLogger::SplitTiming split = *it;
+ for (auto it = splits_.begin(), end = splits_.end(); it != end; ++it) {
+ TimingLogger::SplitTiming split = *it;
total_ns += split.first;
}
return total_ns;
@@ -171,9 +170,8 @@ uint64_t TimingLogger::GetTotalNs() const {
void TimingLogger::Dump(std::ostream &os) const {
uint64_t longest_split = 0;
uint64_t total_ns = 0;
- for (base::TimingLogger::SplitTimingsIterator it = splits_.begin(), end = splits_.end();
- it != end; ++it) {
- base::TimingLogger::SplitTiming split = *it;
+ for (auto it = splits_.begin(), end = splits_.end(); it != end; ++it) {
+ TimingLogger::SplitTiming split = *it;
uint64_t split_time = split.first;
longest_split = std::max(longest_split, split_time);
total_ns += split_time;
@@ -182,9 +180,8 @@ void TimingLogger::Dump(std::ostream &os) const {
TimeUnit tu = GetAppropriateTimeUnit(longest_split);
uint64_t divisor = GetNsToTimeUnitDivisor(tu);
// Print formatted splits.
- for (base::TimingLogger::SplitTimingsIterator it = splits_.begin(), end = splits_.end();
- it != end; ++it) {
- base::TimingLogger::SplitTiming split = *it;
+ for (auto it = splits_.begin(), end = splits_.end(); it != end; ++it) {
+ const TimingLogger::SplitTiming& split = *it;
uint64_t split_time = split.first;
if (!precise_ && divisor >= 1000) {
// Make the fractional part 0.
@@ -231,7 +228,7 @@ TimingLogger::ScopedSplit::~ScopedSplit() {
LOG(INFO) << "End: " << label_ << " " << PrettyDuration(split_time);
}
- // If one or more enclosed explcitly started splits are not terminated we can
+ // If one or more enclosed explicitly started splits are not terminated we can
// either fail or "unwind" the stack of splits in the timing logger to 'this'
// (by deleting the intervening scoped splits). This implements the latter.
TimingLogger::ScopedSplit* current = timing_logger_->current_split_;
@@ -293,5 +290,4 @@ void TimingLogger::ScopedSplit::Resume() {
ATRACE_BEGIN(label_);
}
-} // namespace base
} // namespace art
diff --git a/runtime/base/timing_logger.h b/runtime/base/timing_logger.h
index 501d2d7fd2..f1f78557aa 100644
--- a/runtime/base/timing_logger.h
+++ b/runtime/base/timing_logger.h
@@ -26,10 +26,7 @@
#include <map>
namespace art {
-
-namespace base {
- class TimingLogger;
-} // namespace base
+class TimingLogger;
class CumulativeLogger {
public:
@@ -44,7 +41,7 @@ class CumulativeLogger {
// Allow the name to be modified, particularly when the cumulative logger is a field within a
// parent class that is unable to determine the "name" of a sub-class.
void SetName(const std::string& name);
- void AddLogger(const base::TimingLogger& logger) LOCKS_EXCLUDED(lock_);
+ void AddLogger(const TimingLogger& logger) LOCKS_EXCLUDED(lock_);
size_t GetIterations() const;
private:
@@ -65,19 +62,17 @@ class CumulativeLogger {
DISALLOW_COPY_AND_ASSIGN(CumulativeLogger);
};
-namespace base {
-
-
// A timing logger that knows when a split starts for the purposes of logging tools, like systrace.
class TimingLogger {
public:
// Splits are nanosecond times and split names.
typedef std::pair<uint64_t, const char*> SplitTiming;
typedef std::vector<SplitTiming> SplitTimings;
- typedef std::vector<SplitTiming>::const_iterator SplitTimingsIterator;
explicit TimingLogger(const char* name, bool precise, bool verbose);
-
+ ~TimingLogger() {
+ // TODO: DCHECK(current_split_ == nullptr) << "Forgot to end split: " << current_split_->label_;
+ }
// Clears current splits and labels.
void Reset();
@@ -143,7 +138,7 @@ class TimingLogger {
friend class ScopedSplit;
protected:
// The name of the timing logger.
- const char* name_;
+ const char* const name_;
// Do we want to print the exactly recorded split (true) or round down to the time unit being
// used (false).
@@ -162,7 +157,6 @@ class TimingLogger {
DISALLOW_COPY_AND_ASSIGN(TimingLogger);
};
-} // namespace base
} // namespace art
#endif // ART_RUNTIME_BASE_TIMING_LOGGER_H_
diff --git a/runtime/base/timing_logger_test.cc b/runtime/base/timing_logger_test.cc
index 8f28e4809b..03cc9cc5e4 100644
--- a/runtime/base/timing_logger_test.cc
+++ b/runtime/base/timing_logger_test.cc
@@ -26,13 +26,13 @@ class TimingLoggerTest : public CommonTest {};
TEST_F(TimingLoggerTest, StartEnd) {
const char* split1name = "First Split";
- base::TimingLogger timings("StartEnd", true, false);
+ TimingLogger timings("StartEnd", true, false);
timings.StartSplit(split1name);
timings.EndSplit(); // Ends split1.
- const base::TimingLogger::SplitTimings& splits = timings.GetSplits();
+ const TimingLogger::SplitTimings& splits = timings.GetSplits();
EXPECT_EQ(1U, splits.size());
EXPECT_STREQ(splits[0].second, split1name);
@@ -43,7 +43,7 @@ TEST_F(TimingLoggerTest, StartNewEnd) {
const char* split1name = "First Split";
const char* split2name = "Second Split";
const char* split3name = "Third Split";
- base::TimingLogger timings("StartNewEnd", true, false);
+ TimingLogger timings("StartNewEnd", true, false);
timings.StartSplit(split1name);
@@ -53,7 +53,7 @@ TEST_F(TimingLoggerTest, StartNewEnd) {
timings.EndSplit(); // Ends split3.
- const base::TimingLogger::SplitTimings& splits = timings.GetSplits();
+ const TimingLogger::SplitTimings& splits = timings.GetSplits();
EXPECT_EQ(3U, splits.size());
EXPECT_STREQ(splits[0].second, split1name);
@@ -67,7 +67,7 @@ TEST_F(TimingLoggerTest, StartNewEndNested) {
const char* split3name = "Third Split";
const char* split4name = "Fourth Split";
const char* split5name = "Fifth Split";
- base::TimingLogger timings("StartNewEndNested", true, false);
+ TimingLogger timings("StartNewEndNested", true, false);
timings.StartSplit(split1name);
@@ -85,7 +85,7 @@ TEST_F(TimingLoggerTest, StartNewEndNested) {
timings.EndSplit(); // Ends split2.
- const base::TimingLogger::SplitTimings& splits = timings.GetSplits();
+ const TimingLogger::SplitTimings& splits = timings.GetSplits();
EXPECT_EQ(5U, splits.size());
EXPECT_STREQ(splits[0].second, split1name);
@@ -101,25 +101,25 @@ TEST_F(TimingLoggerTest, Scoped) {
const char* innersplit1 = "Inner Split 1";
const char* innerinnersplit1 = "Inner Inner Split 1";
const char* innersplit2 = "Inner Split 2";
- base::TimingLogger timings("Scoped", true, false);
+ TimingLogger timings("Scoped", true, false);
{
- base::TimingLogger::ScopedSplit outer(outersplit, &timings);
+ TimingLogger::ScopedSplit outer(outersplit, &timings);
{
- base::TimingLogger::ScopedSplit inner1(innersplit1, &timings);
+ TimingLogger::ScopedSplit inner1(innersplit1, &timings);
{
- base::TimingLogger::ScopedSplit innerinner1(innerinnersplit1, &timings);
+ TimingLogger::ScopedSplit innerinner1(innerinnersplit1, &timings);
} // Ends innerinnersplit1.
} // Ends innersplit1.
{
- base::TimingLogger::ScopedSplit inner2(innersplit2, &timings);
+ TimingLogger::ScopedSplit inner2(innersplit2, &timings);
} // Ends innersplit2.
} // Ends outersplit.
- const base::TimingLogger::SplitTimings& splits = timings.GetSplits();
+ const TimingLogger::SplitTimings& splits = timings.GetSplits();
EXPECT_EQ(4U, splits.size());
EXPECT_STREQ(splits[0].second, innerinnersplit1);
@@ -134,12 +134,12 @@ TEST_F(TimingLoggerTest, ScopedAndExplicit) {
const char* innersplit = "Inner Split";
const char* innerinnersplit1 = "Inner Inner Split 1";
const char* innerinnersplit2 = "Inner Inner Split 2";
- base::TimingLogger timings("Scoped", true, false);
+ TimingLogger timings("Scoped", true, false);
timings.StartSplit(outersplit);
{
- base::TimingLogger::ScopedSplit inner(innersplit, &timings);
+ TimingLogger::ScopedSplit inner(innersplit, &timings);
timings.StartSplit(innerinnersplit1);
@@ -148,7 +148,7 @@ TEST_F(TimingLoggerTest, ScopedAndExplicit) {
timings.EndSplit(); // Ends outersplit.
- const base::TimingLogger::SplitTimings& splits = timings.GetSplits();
+ const TimingLogger::SplitTimings& splits = timings.GetSplits();
EXPECT_EQ(4U, splits.size());
EXPECT_STREQ(splits[0].second, innerinnersplit1);
diff --git a/runtime/common_test.h b/runtime/common_test.h
index 7cc29a1e58..d860b6c34a 100644
--- a/runtime/common_test.h
+++ b/runtime/common_test.h
@@ -582,10 +582,11 @@ class CommonTest : public testing::Test {
void CompileMethod(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
CHECK(method != NULL);
- base::TimingLogger timings("CommonTest::CompileMethod", false, false);
+ TimingLogger timings("CommonTest::CompileMethod", false, false);
timings.StartSplit("CompileOne");
compiler_driver_->CompileOne(method, timings);
MakeExecutable(method);
+ timings.EndSplit();
}
void CompileDirectMethod(SirtRef<mirror::ClassLoader>& class_loader, const char* class_name,
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index f537709261..3ef0a7fd81 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -1194,41 +1194,37 @@ static uint32_t MangleAccessFlags(uint32_t accessFlags) {
return accessFlags;
}
-static const uint16_t kEclipseWorkaroundSlot = 1000;
-
/*
- * Eclipse appears to expect that the "this" reference is in slot zero.
- * If it's not, the "variables" display will show two copies of "this",
- * possibly because it gets "this" from SF.ThisObject and then displays
- * all locals with nonzero slot numbers.
- *
- * So, we remap the item in slot 0 to 1000, and remap "this" to zero. On
- * SF.GetValues / SF.SetValues we map them back.
- *
- * TODO: jdb uses the value to determine whether a variable is a local or an argument,
- * by checking whether it's less than the number of arguments. To make that work, we'd
- * have to "mangle" all the arguments to come first, not just the implicit argument 'this'.
+ * Circularly shifts registers so that arguments come first. Debuggers
+ * expect slots to begin with arguments, but dex code places them at
+ * the end.
*/
-static uint16_t MangleSlot(uint16_t slot, const char* name) {
- uint16_t newSlot = slot;
- if (strcmp(name, "this") == 0) {
- newSlot = 0;
- } else if (slot == 0) {
- newSlot = kEclipseWorkaroundSlot;
+static uint16_t MangleSlot(uint16_t slot, mirror::ArtMethod* m)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const DexFile::CodeItem* code_item = MethodHelper(m).GetCodeItem();
+ uint16_t ins_size = code_item->ins_size_;
+ uint16_t locals_size = code_item->registers_size_ - ins_size;
+ if (slot >= locals_size) {
+ return slot - locals_size;
+ } else {
+ return slot + ins_size;
}
- return newSlot;
}
+/*
+ * Circularly shifts registers so that arguments come last. Reverts
+ * slots to dex style argument placement.
+ */
static uint16_t DemangleSlot(uint16_t slot, mirror::ArtMethod* m)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (slot == kEclipseWorkaroundSlot) {
- return 0;
- } else if (slot == 0) {
- const DexFile::CodeItem* code_item = MethodHelper(m).GetCodeItem();
- CHECK(code_item != NULL) << PrettyMethod(m);
- return code_item->registers_size_ - code_item->ins_size_;
+ const DexFile::CodeItem* code_item = MethodHelper(m).GetCodeItem();
+ uint16_t ins_size = code_item->ins_size_;
+ uint16_t locals_size = code_item->registers_size_ - ins_size;
+ if (slot < ins_size) {
+ return slot + locals_size;
+ } else {
+ return slot - ins_size;
}
- return slot;
}
JDWP::JdwpError Dbg::OutputDeclaredFields(JDWP::RefTypeId class_id, bool with_generic, JDWP::ExpandBuf* pReply) {
@@ -1347,16 +1343,18 @@ void Dbg::OutputLineTable(JDWP::RefTypeId, JDWP::MethodId method_id, JDWP::Expan
void Dbg::OutputVariableTable(JDWP::RefTypeId, JDWP::MethodId method_id, bool with_generic, JDWP::ExpandBuf* pReply) {
struct DebugCallbackContext {
+ mirror::ArtMethod* method;
JDWP::ExpandBuf* pReply;
size_t variable_count;
bool with_generic;
- static void Callback(void* context, uint16_t slot, uint32_t startAddress, uint32_t endAddress, const char* name, const char* descriptor, const char* signature) {
+ static void Callback(void* context, uint16_t slot, uint32_t startAddress, uint32_t endAddress, const char* name, const char* descriptor, const char* signature)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
- VLOG(jdwp) << StringPrintf(" %2zd: %d(%d) '%s' '%s' '%s' actual slot=%d mangled slot=%d", pContext->variable_count, startAddress, endAddress - startAddress, name, descriptor, signature, slot, MangleSlot(slot, name));
+ VLOG(jdwp) << StringPrintf(" %2zd: %d(%d) '%s' '%s' '%s' actual slot=%d mangled slot=%d", pContext->variable_count, startAddress, endAddress - startAddress, name, descriptor, signature, slot, MangleSlot(slot, pContext->method));
- slot = MangleSlot(slot, name);
+ slot = MangleSlot(slot, pContext->method);
expandBufAdd8BE(pContext->pReply, startAddress);
expandBufAddUtf8String(pContext->pReply, name);
@@ -1384,6 +1382,7 @@ void Dbg::OutputVariableTable(JDWP::RefTypeId, JDWP::MethodId method_id, bool wi
expandBufAdd4BE(pReply, 0);
DebugCallbackContext context;
+ context.method = m;
context.pReply = pReply;
context.variable_count = 0;
context.with_generic = with_generic;
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index a02823eb90..4f33292929 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -721,9 +721,9 @@ void DexFile::DecodeDebugInfo0(const CodeItem* code_item, bool is_static, uint32
for (;;) {
uint8_t opcode = *stream++;
uint16_t reg;
- uint16_t name_idx;
- uint16_t descriptor_idx;
- uint16_t signature_idx = 0;
+ uint32_t name_idx;
+ uint32_t descriptor_idx;
+ uint32_t signature_idx = 0;
switch (opcode) {
case DBG_END_SEQUENCE:
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index 6111c2fbf2..a80f5935c2 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -64,7 +64,7 @@ class GarbageCollector {
void RegisterPause(uint64_t nano_length);
- base::TimingLogger& GetTimings() {
+ TimingLogger& GetTimings() {
return timings_;
}
@@ -131,7 +131,7 @@ class GarbageCollector {
const bool verbose_;
uint64_t duration_ns_;
- base::TimingLogger timings_;
+ TimingLogger timings_;
// Cumulative statistics.
uint64_t total_time_ns_;
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 56dc0e528a..61b3f09a4c 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -157,7 +157,7 @@ MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_pre
void MarkSweep::InitializePhase() {
timings_.Reset();
- base::TimingLogger::ScopedSplit split("InitializePhase", &timings_);
+ TimingLogger::ScopedSplit split("InitializePhase", &timings_);
mark_stack_ = heap_->mark_stack_.get();
DCHECK(mark_stack_ != nullptr);
SetImmuneRange(nullptr, nullptr);
@@ -185,14 +185,14 @@ void MarkSweep::InitializePhase() {
}
void MarkSweep::ProcessReferences(Thread* self) {
- base::TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
+ TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
- ProcessReferences(&soft_reference_list_, clear_soft_references_, &weak_reference_list_,
- &finalizer_reference_list_, &phantom_reference_list_);
+ GetHeap()->ProcessReferences(timings_, clear_soft_references_, &IsMarkedCallback,
+ &RecursiveMarkObjectCallback, this);
}
bool MarkSweep::HandleDirtyObjectsPhase() {
- base::TimingLogger::ScopedSplit split("HandleDirtyObjectsPhase", &timings_);
+ TimingLogger::ScopedSplit split("HandleDirtyObjectsPhase", &timings_);
Thread* self = Thread::Current();
Locks::mutator_lock_->AssertExclusiveHeld(self);
@@ -238,7 +238,7 @@ bool MarkSweep::IsConcurrent() const {
}
void MarkSweep::MarkingPhase() {
- base::TimingLogger::ScopedSplit split("MarkingPhase", &timings_);
+ TimingLogger::ScopedSplit split("MarkingPhase", &timings_);
Thread* self = Thread::Current();
BindBitmaps();
@@ -272,7 +272,7 @@ void MarkSweep::UpdateAndMarkModUnion() {
if (IsImmuneSpace(space)) {
const char* name = space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
"UpdateAndMarkImageModUnionTable";
- base::TimingLogger::ScopedSplit split(name, &timings_);
+ TimingLogger::ScopedSplit split(name, &timings_);
accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space);
CHECK(mod_union_table != nullptr);
mod_union_table->UpdateAndMarkReferences(MarkRootCallback, this);
@@ -297,7 +297,7 @@ void MarkSweep::MarkReachableObjects() {
}
void MarkSweep::ReclaimPhase() {
- base::TimingLogger::ScopedSplit split("ReclaimPhase", &timings_);
+ TimingLogger::ScopedSplit split("ReclaimPhase", &timings_);
Thread* self = Thread::Current();
if (!IsConcurrent()) {
@@ -312,7 +312,7 @@ void MarkSweep::ReclaimPhase() {
if (IsConcurrent()) {
Runtime::Current()->AllowNewSystemWeaks();
- base::TimingLogger::ScopedSplit split("UnMarkAllocStack", &timings_);
+ TimingLogger::ScopedSplit split("UnMarkAllocStack", &timings_);
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get();
// The allocation stack contains things allocated since the start of the GC. These may have been
@@ -363,7 +363,7 @@ void MarkSweep::SetImmuneRange(Object* begin, Object* end) {
}
void MarkSweep::FindDefaultMarkBitmap() {
- base::TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_);
+ TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_);
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
accounting::SpaceBitmap* bitmap = space->GetMarkBitmap();
if (bitmap != nullptr &&
@@ -407,6 +407,13 @@ inline void MarkSweep::MarkObjectNonNullParallel(const Object* obj) {
}
}
+mirror::Object* MarkSweep::RecursiveMarkObjectCallback(mirror::Object* obj, void* arg) {
+ MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
+ mark_sweep->MarkObject(obj);
+ mark_sweep->ProcessMarkStack(true);
+ return obj;
+}
+
inline void MarkSweep::UnMarkObjectNonNull(const Object* obj) {
DCHECK(!IsImmune(obj));
// Try to take advantage of locality of references within a space, failing this find the space
@@ -932,7 +939,7 @@ class RecursiveMarkTask : public MarkStackTask<false> {
// Populates the mark stack based on the set of marked objects and
// recursively marks until the mark stack is emptied.
void MarkSweep::RecursiveMark() {
- base::TimingLogger::ScopedSplit split("RecursiveMark", &timings_);
+ TimingLogger::ScopedSplit split("RecursiveMark", &timings_);
// RecursiveMark will build the lists of known instances of the Reference classes.
// See DelayReferenceReferent for details.
CHECK(soft_reference_list_ == NULL);
@@ -992,7 +999,7 @@ void MarkSweep::RecursiveMark() {
ProcessMarkStack(false);
}
-mirror::Object* MarkSweep::SystemWeakIsMarkedCallback(Object* object, void* arg) {
+mirror::Object* MarkSweep::IsMarkedCallback(Object* object, void* arg) {
if (reinterpret_cast<MarkSweep*>(arg)->IsMarked(object)) {
return object;
}
@@ -1013,7 +1020,7 @@ void MarkSweep::ReMarkRoots() {
void MarkSweep::SweepSystemWeaks() {
Runtime* runtime = Runtime::Current();
timings_.StartSplit("SweepSystemWeaks");
- runtime->SweepSystemWeaks(SystemWeakIsMarkedCallback, this);
+ runtime->SweepSystemWeaks(IsMarkedCallback, this);
timings_.EndSplit();
}
@@ -1198,7 +1205,7 @@ void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitma
void MarkSweep::Sweep(bool swap_bitmaps) {
DCHECK(mark_stack_->IsEmpty());
- base::TimingLogger::ScopedSplit("Sweep", &timings_);
+ TimingLogger::ScopedSplit("Sweep", &timings_);
const bool partial = (GetGcType() == kGcTypePartial);
SweepCallbackContext scc;
@@ -1224,12 +1231,12 @@ void MarkSweep::Sweep(bool swap_bitmaps) {
std::swap(live_bitmap, mark_bitmap);
}
if (!space->IsZygoteSpace()) {
- base::TimingLogger::ScopedSplit split("SweepAllocSpace", &timings_);
+ TimingLogger::ScopedSplit split("SweepAllocSpace", &timings_);
// Bitmaps are pre-swapped for optimization which enables sweeping with the heap unlocked.
accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
&SweepCallback, reinterpret_cast<void*>(&scc));
} else {
- base::TimingLogger::ScopedSplit split("SweepZygote", &timings_);
+ TimingLogger::ScopedSplit split("SweepZygote", &timings_);
// Zygote sweep takes care of dirtying cards and clearing live bits, does not free actual
// memory.
accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
@@ -1242,7 +1249,7 @@ void MarkSweep::Sweep(bool swap_bitmaps) {
}
void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
- base::TimingLogger::ScopedSplit("SweepLargeObjects", &timings_);
+ TimingLogger::ScopedSplit("SweepLargeObjects", &timings_);
// Sweep large objects
space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects();
@@ -1314,40 +1321,7 @@ void MarkSweep::DelayReferenceReferent(mirror::Class* klass, Object* obj) {
DCHECK(klass != nullptr);
DCHECK(klass->IsReferenceClass());
DCHECK(obj != NULL);
- Object* referent = heap_->GetReferenceReferent(obj);
- if (referent != NULL && !IsMarked(referent)) {
- if (kCountJavaLangRefs) {
- ++reference_count_;
- }
- Thread* self = Thread::Current();
- // TODO: Remove these locks, and use atomic stacks for storing references?
- // We need to check that the references haven't already been enqueued since we can end up
- // scanning the same reference multiple times due to dirty cards.
- if (klass->IsSoftReferenceClass()) {
- MutexLock mu(self, *heap_->GetSoftRefQueueLock());
- if (!heap_->IsEnqueued(obj)) {
- heap_->EnqueuePendingReference(obj, &soft_reference_list_);
- }
- } else if (klass->IsWeakReferenceClass()) {
- MutexLock mu(self, *heap_->GetWeakRefQueueLock());
- if (!heap_->IsEnqueued(obj)) {
- heap_->EnqueuePendingReference(obj, &weak_reference_list_);
- }
- } else if (klass->IsFinalizerReferenceClass()) {
- MutexLock mu(self, *heap_->GetFinalizerRefQueueLock());
- if (!heap_->IsEnqueued(obj)) {
- heap_->EnqueuePendingReference(obj, &finalizer_reference_list_);
- }
- } else if (klass->IsPhantomReferenceClass()) {
- MutexLock mu(self, *heap_->GetPhantomRefQueueLock());
- if (!heap_->IsEnqueued(obj)) {
- heap_->EnqueuePendingReference(obj, &phantom_reference_list_);
- }
- } else {
- LOG(FATAL) << "Invalid reference type " << PrettyClass(klass)
- << " " << std::hex << klass->GetAccessFlags();
- }
- }
+ heap_->DelayReferenceReferent(klass, obj, IsMarkedCallback, this);
}
class MarkObjectVisitor {
@@ -1435,43 +1409,6 @@ void MarkSweep::ProcessMarkStack(bool paused) {
timings_.EndSplit();
}
-// Walks the reference list marking any references subject to the
-// reference clearing policy. References with a black referent are
-// removed from the list. References with white referents biased
-// toward saving are blackened and also removed from the list.
-void MarkSweep::PreserveSomeSoftReferences(Object** list) {
- DCHECK(list != NULL);
- Object* clear = NULL;
- size_t counter = 0;
-
- DCHECK(mark_stack_->IsEmpty());
-
- timings_.StartSplit("PreserveSomeSoftReferences");
- while (*list != NULL) {
- Object* ref = heap_->DequeuePendingReference(list);
- Object* referent = heap_->GetReferenceReferent(ref);
- if (referent == NULL) {
- // Referent was cleared by the user during marking.
- continue;
- }
- bool is_marked = IsMarked(referent);
- if (!is_marked && ((++counter) & 1)) {
- // Referent is white and biased toward saving, mark it.
- MarkObject(referent);
- is_marked = true;
- }
- if (!is_marked) {
- // Referent is white, queue it for clearing.
- heap_->EnqueuePendingReference(ref, &clear);
- }
- }
- *list = clear;
- timings_.EndSplit();
-
- // Restart the mark with the newly black references added to the root set.
- ProcessMarkStack(true);
-}
-
inline bool MarkSweep::IsMarked(const Object* object) const
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
if (IsImmune(object)) {
@@ -1484,100 +1421,8 @@ inline bool MarkSweep::IsMarked(const Object* object) const
return heap_->GetMarkBitmap()->Test(object);
}
-// Unlink the reference list clearing references objects with white
-// referents. Cleared references registered to a reference queue are
-// scheduled for appending by the heap worker thread.
-void MarkSweep::ClearWhiteReferences(Object** list) {
- DCHECK(list != NULL);
- while (*list != NULL) {
- Object* ref = heap_->DequeuePendingReference(list);
- Object* referent = heap_->GetReferenceReferent(ref);
- if (referent != NULL && !IsMarked(referent)) {
- // Referent is white, clear it.
- heap_->ClearReferenceReferent(ref);
- if (heap_->IsEnqueuable(ref)) {
- heap_->EnqueueReference(ref, &cleared_reference_list_);
- }
- }
- }
- DCHECK(*list == NULL);
-}
-
-// Enqueues finalizer references with white referents. White
-// referents are blackened, moved to the zombie field, and the
-// referent field is cleared.
-void MarkSweep::EnqueueFinalizerReferences(Object** list) {
- DCHECK(list != NULL);
- timings_.StartSplit("EnqueueFinalizerReferences");
- MemberOffset zombie_offset = heap_->GetFinalizerReferenceZombieOffset();
- bool has_enqueued = false;
- while (*list != NULL) {
- Object* ref = heap_->DequeuePendingReference(list);
- Object* referent = heap_->GetReferenceReferent(ref);
- if (referent != NULL && !IsMarked(referent)) {
- MarkObject(referent);
- // If the referent is non-null the reference must queuable.
- DCHECK(heap_->IsEnqueuable(ref));
- ref->SetFieldObject(zombie_offset, referent, false);
- heap_->ClearReferenceReferent(ref);
- heap_->EnqueueReference(ref, &cleared_reference_list_);
- has_enqueued = true;
- }
- }
- timings_.EndSplit();
- if (has_enqueued) {
- ProcessMarkStack(true);
- }
- DCHECK(*list == NULL);
-}
-
-// Process reference class instances and schedule finalizations.
-void MarkSweep::ProcessReferences(Object** soft_references, bool clear_soft,
- Object** weak_references,
- Object** finalizer_references,
- Object** phantom_references) {
- CHECK(soft_references != NULL);
- CHECK(weak_references != NULL);
- CHECK(finalizer_references != NULL);
- CHECK(phantom_references != NULL);
- CHECK(mark_stack_->IsEmpty());
-
- // Unless we are in the zygote or required to clear soft references
- // with white references, preserve some white referents.
- if (!clear_soft && !Runtime::Current()->IsZygote()) {
- PreserveSomeSoftReferences(soft_references);
- }
-
- timings_.StartSplit("ProcessReferences");
- // Clear all remaining soft and weak references with white
- // referents.
- ClearWhiteReferences(soft_references);
- ClearWhiteReferences(weak_references);
- timings_.EndSplit();
-
- // Preserve all white objects with finalize methods and schedule
- // them for finalization.
- EnqueueFinalizerReferences(finalizer_references);
-
- timings_.StartSplit("ProcessReferences");
- // Clear all f-reachable soft and weak references with white
- // referents.
- ClearWhiteReferences(soft_references);
- ClearWhiteReferences(weak_references);
-
- // Clear all phantom references with white referents.
- ClearWhiteReferences(phantom_references);
-
- // At this point all reference lists should be empty.
- DCHECK(*soft_references == NULL);
- DCHECK(*weak_references == NULL);
- DCHECK(*finalizer_references == NULL);
- DCHECK(*phantom_references == NULL);
- timings_.EndSplit();
-}
-
void MarkSweep::UnBindBitmaps() {
- base::TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_);
+ TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_);
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
if (space->IsDlMallocSpace()) {
space::DlMallocSpace* alloc_space = space->AsDlMallocSpace();
@@ -1594,13 +1439,9 @@ void MarkSweep::UnBindBitmaps() {
}
void MarkSweep::FinishPhase() {
- base::TimingLogger::ScopedSplit split("FinishPhase", &timings_);
+ TimingLogger::ScopedSplit split("FinishPhase", &timings_);
// Can't enqueue references if we hold the mutator lock.
- Object* cleared_references = GetClearedReferences();
Heap* heap = GetHeap();
- timings_.NewSplit("EnqueueClearedReferences");
- heap->EnqueueClearedReferences(&cleared_references);
-
timings_.NewSplit("PostGcVerification");
heap->PostGcVerification(this);
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index cc5841244d..53d85b0d70 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -189,6 +189,10 @@ class MarkSweep : public GarbageCollector {
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
Locks::mutator_lock_);
+ static mirror::Object* RecursiveMarkObjectCallback(mirror::Object* obj, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
static mirror::Object* MarkRootCallback(mirror::Object* root, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
@@ -212,10 +216,7 @@ class MarkSweep : public GarbageCollector {
// Returns true if the object has its bit set in the mark bitmap.
bool IsMarked(const mirror::Object* object) const;
- static mirror::Object* SystemWeakIsMarkedCallback(mirror::Object* object, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
- static mirror::Object* SystemWeakIsMarkedArrayCallback(mirror::Object* object, void* arg)
+ static mirror::Object* IsMarkedCallback(mirror::Object* object, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
static void VerifyImageRootVisitor(mirror::Object* root, void* arg)
@@ -349,13 +350,6 @@ class MarkSweep : public GarbageCollector {
void ClearWhiteReferences(mirror::Object** list)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
- void ProcessReferences(mirror::Object** soft_references, bool clear_soft_references,
- mirror::Object** weak_references,
- mirror::Object** finalizer_references,
- mirror::Object** phantom_references)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
// Whether or not we count how many of each type of object were scanned.
static const bool kCountScannedTypes = false;
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index d833631da9..ba98314f59 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -139,7 +139,7 @@ SemiSpace::SemiSpace(Heap* heap, const std::string& name_prefix)
void SemiSpace::InitializePhase() {
timings_.Reset();
- base::TimingLogger::ScopedSplit split("InitializePhase", &timings_);
+ TimingLogger::ScopedSplit split("InitializePhase", &timings_);
mark_stack_ = heap_->mark_stack_.get();
DCHECK(mark_stack_ != nullptr);
immune_begin_ = nullptr;
@@ -156,16 +156,16 @@ void SemiSpace::InitializePhase() {
}
void SemiSpace::ProcessReferences(Thread* self) {
- base::TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
+ TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
- ProcessReferences(&soft_reference_list_, clear_soft_references_, &weak_reference_list_,
- &finalizer_reference_list_, &phantom_reference_list_);
+ GetHeap()->ProcessReferences(timings_, clear_soft_references_, &MarkedForwardingAddressCallback,
+ &RecursiveMarkObjectCallback, this);
}
void SemiSpace::MarkingPhase() {
Thread* self = Thread::Current();
Locks::mutator_lock_->AssertExclusiveHeld(self);
- base::TimingLogger::ScopedSplit split("MarkingPhase", &timings_);
+ TimingLogger::ScopedSplit split("MarkingPhase", &timings_);
// Need to do this with mutators paused so that somebody doesn't accidentally allocate into the
// wrong space.
heap_->SwapSemiSpaces();
@@ -198,7 +198,7 @@ void SemiSpace::UpdateAndMarkModUnion() {
accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
CHECK(table != nullptr);
// TODO: Improve naming.
- base::TimingLogger::ScopedSplit split(
+ TimingLogger::ScopedSplit split(
space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
"UpdateAndMarkImageModUnionTable",
&timings_);
@@ -218,7 +218,7 @@ void SemiSpace::MarkReachableObjects() {
}
void SemiSpace::ReclaimPhase() {
- base::TimingLogger::ScopedSplit split("ReclaimPhase", &timings_);
+ TimingLogger::ScopedSplit split("ReclaimPhase", &timings_);
Thread* self = Thread::Current();
ProcessReferences(self);
{
@@ -344,6 +344,15 @@ Object* SemiSpace::MarkObject(Object* obj) {
return ret;
}
+Object* SemiSpace::RecursiveMarkObjectCallback(Object* root, void* arg) {
+ DCHECK(root != nullptr);
+ DCHECK(arg != nullptr);
+ SemiSpace* semi_space = reinterpret_cast<SemiSpace*>(arg);
+ mirror::Object* ret = semi_space->MarkObject(root);
+ semi_space->ProcessMarkStack(true);
+ return ret;
+}
+
Object* SemiSpace::MarkRootCallback(Object* root, void* arg) {
DCHECK(root != nullptr);
DCHECK(arg != nullptr);
@@ -374,13 +383,13 @@ mirror::Object* SemiSpace::GetForwardingAddress(mirror::Object* obj) {
return obj;
}
-mirror::Object* SemiSpace::SystemWeakIsMarkedCallback(Object* object, void* arg) {
+mirror::Object* SemiSpace::MarkedForwardingAddressCallback(Object* object, void* arg) {
return reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(object);
}
void SemiSpace::SweepSystemWeaks() {
timings_.StartSplit("SweepSystemWeaks");
- Runtime::Current()->SweepSystemWeaks(SystemWeakIsMarkedCallback, this);
+ Runtime::Current()->SweepSystemWeaks(MarkedForwardingAddressCallback, this);
timings_.EndSplit();
}
@@ -417,7 +426,7 @@ void SemiSpace::ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
void SemiSpace::Sweep(bool swap_bitmaps) {
DCHECK(mark_stack_->IsEmpty());
- base::TimingLogger::ScopedSplit("Sweep", &timings_);
+ TimingLogger::ScopedSplit("Sweep", &timings_);
const bool partial = (GetGcType() == kGcTypePartial);
SweepCallbackContext scc;
@@ -443,12 +452,12 @@ void SemiSpace::Sweep(bool swap_bitmaps) {
std::swap(live_bitmap, mark_bitmap);
}
if (!space->IsZygoteSpace()) {
- base::TimingLogger::ScopedSplit split("SweepAllocSpace", &timings_);
+ TimingLogger::ScopedSplit split("SweepAllocSpace", &timings_);
// Bitmaps are pre-swapped for optimization which enables sweeping with the heap unlocked.
accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
&SweepCallback, reinterpret_cast<void*>(&scc));
} else {
- base::TimingLogger::ScopedSplit split("SweepZygote", &timings_);
+ TimingLogger::ScopedSplit split("SweepZygote", &timings_);
// Zygote sweep takes care of dirtying cards and clearing live bits, does not free actual
// memory.
accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
@@ -461,7 +470,7 @@ void SemiSpace::Sweep(bool swap_bitmaps) {
}
void SemiSpace::SweepLargeObjects(bool swap_bitmaps) {
- base::TimingLogger::ScopedSplit("SweepLargeObjects", &timings_);
+ TimingLogger::ScopedSplit("SweepLargeObjects", &timings_);
// Sweep large objects
space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects();
@@ -487,45 +496,7 @@ void SemiSpace::SweepLargeObjects(bool swap_bitmaps) {
// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
// marked, put it on the appropriate list in the heap for later processing.
void SemiSpace::DelayReferenceReferent(mirror::Class* klass, Object* obj) {
- DCHECK(klass != nullptr);
- DCHECK(klass->IsReferenceClass());
- DCHECK(obj != nullptr);
- Object* referent = heap_->GetReferenceReferent(obj);
- if (referent != nullptr) {
- Object* forward_address = GetMarkedForwardAddress(referent);
- if (forward_address == nullptr) {
- Thread* self = Thread::Current();
- // TODO: Remove these locks, and use atomic stacks for storing references?
- // We need to check that the references haven't already been enqueued since we can end up
- // scanning the same reference multiple times due to dirty cards.
- if (klass->IsSoftReferenceClass()) {
- MutexLock mu(self, *heap_->GetSoftRefQueueLock());
- if (!heap_->IsEnqueued(obj)) {
- heap_->EnqueuePendingReference(obj, &soft_reference_list_);
- }
- } else if (klass->IsWeakReferenceClass()) {
- MutexLock mu(self, *heap_->GetWeakRefQueueLock());
- if (!heap_->IsEnqueued(obj)) {
- heap_->EnqueuePendingReference(obj, &weak_reference_list_);
- }
- } else if (klass->IsFinalizerReferenceClass()) {
- MutexLock mu(self, *heap_->GetFinalizerRefQueueLock());
- if (!heap_->IsEnqueued(obj)) {
- heap_->EnqueuePendingReference(obj, &finalizer_reference_list_);
- }
- } else if (klass->IsPhantomReferenceClass()) {
- MutexLock mu(self, *heap_->GetPhantomRefQueueLock());
- if (!heap_->IsEnqueued(obj)) {
- heap_->EnqueuePendingReference(obj, &phantom_reference_list_);
- }
- } else {
- LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) << " " << std::hex
- << klass->GetAccessFlags();
- }
- } else if (referent != forward_address) {
- heap_->SetReferenceReferent(obj, forward_address);
- }
- }
+ heap_->DelayReferenceReferent(klass, obj, MarkedForwardingAddressCallback, this);
}
// Visit all of the references of an object and update.
@@ -555,48 +526,6 @@ void SemiSpace::ProcessMarkStack(bool paused) {
timings_.EndSplit();
}
-// Walks the reference list marking any references subject to the
-// reference clearing policy. References with a black referent are
-// removed from the list. References with white referents biased
-// toward saving are blackened and also removed from the list.
-void SemiSpace::PreserveSomeSoftReferences(Object** list) {
- DCHECK(list != NULL);
- Object* clear = NULL;
- size_t counter = 0;
- DCHECK(mark_stack_->IsEmpty());
- timings_.StartSplit("PreserveSomeSoftReferences");
- while (*list != NULL) {
- Object* ref = heap_->DequeuePendingReference(list);
- Object* referent = heap_->GetReferenceReferent(ref);
- if (referent == NULL) {
- // Referent was cleared by the user during marking.
- continue;
- }
- Object* forward_address = GetMarkedForwardAddress(referent);
- bool is_marked = forward_address != nullptr;
- if (!is_marked && ((++counter) & 1)) {
- // Referent is white and biased toward saving, mark it.
- forward_address = MarkObject(referent);
- if (referent != forward_address) {
- // Update the referent if we moved it.
- heap_->SetReferenceReferent(ref, forward_address);
- }
- } else {
- if (!is_marked) {
- // Referent is white, queue it for clearing.
- heap_->EnqueuePendingReference(ref, &clear);
- } else if (referent != forward_address) {
- CHECK(forward_address != nullptr);
- heap_->SetReferenceReferent(ref, forward_address);
- }
- }
- }
- *list = clear;
- timings_.EndSplit();
- // Restart the mark with the newly black references added to the root set.
- ProcessMarkStack(true);
-}
-
inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
// All immune objects are assumed marked.
@@ -618,114 +547,8 @@ inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const
return heap_->GetMarkBitmap()->Test(obj) ? obj : nullptr;
}
-// Unlink the reference list clearing references objects with white
-// referents. Cleared references registered to a reference queue are
-// scheduled for appending by the heap worker thread.
-void SemiSpace::ClearWhiteReferences(Object** list) {
- DCHECK(list != NULL);
- while (*list != NULL) {
- Object* ref = heap_->DequeuePendingReference(list);
- Object* referent = heap_->GetReferenceReferent(ref);
- if (referent != nullptr) {
- Object* forward_address = GetMarkedForwardAddress(referent);
- if (forward_address == nullptr) {
- // Referent is white, clear it.
- heap_->ClearReferenceReferent(ref);
- if (heap_->IsEnqueuable(ref)) {
- heap_->EnqueueReference(ref, &cleared_reference_list_);
- }
- } else if (referent != forward_address) {
- heap_->SetReferenceReferent(ref, forward_address);
- }
- }
- }
- DCHECK(*list == NULL);
-}
-
-// Enqueues finalizer references with white referents. White
-// referents are blackened, moved to the zombie field, and the
-// referent field is cleared.
-void SemiSpace::EnqueueFinalizerReferences(Object** list) {
- // *list = NULL;
- // return;
- DCHECK(list != NULL);
- timings_.StartSplit("EnqueueFinalizerReferences");
- MemberOffset zombie_offset = heap_->GetFinalizerReferenceZombieOffset();
- bool has_enqueued = false;
- while (*list != NULL) {
- Object* ref = heap_->DequeuePendingReference(list);
- Object* referent = heap_->GetReferenceReferent(ref);
- if (referent != nullptr) {
- Object* forward_address = GetMarkedForwardAddress(referent);
- // Not marked.
- if (forward_address == nullptr) {
- forward_address = MarkObject(referent);
- // If the referent is non-null the reference must queuable.
- DCHECK(heap_->IsEnqueuable(ref));
- // Move the referent to the zombie field.
- ref->SetFieldObject(zombie_offset, forward_address, false);
- heap_->ClearReferenceReferent(ref);
- heap_->EnqueueReference(ref, &cleared_reference_list_);
- has_enqueued = true;
- } else if (referent != forward_address) {
- heap_->SetReferenceReferent(ref, forward_address);
- }
- }
- }
- timings_.EndSplit();
- if (has_enqueued) {
- ProcessMarkStack(true);
- }
- DCHECK(*list == NULL);
-}
-
-// Process reference class instances and schedule finalizations.
-void SemiSpace::ProcessReferences(Object** soft_references, bool clear_soft,
- Object** weak_references,
- Object** finalizer_references,
- Object** phantom_references) {
- CHECK(soft_references != NULL);
- CHECK(weak_references != NULL);
- CHECK(finalizer_references != NULL);
- CHECK(phantom_references != NULL);
- CHECK(mark_stack_->IsEmpty());
-
- // Unless we are in the zygote or required to clear soft references
- // with white references, preserve some white referents.
- if (!clear_soft && !Runtime::Current()->IsZygote()) {
- PreserveSomeSoftReferences(soft_references);
- }
-
- timings_.StartSplit("ProcessReferences");
- // Clear all remaining soft and weak references with white
- // referents.
- ClearWhiteReferences(soft_references);
- ClearWhiteReferences(weak_references);
- timings_.EndSplit();
-
- // Preserve all white objects with finalize methods and schedule
- // them for finalization.
- EnqueueFinalizerReferences(finalizer_references);
-
- timings_.StartSplit("ProcessReferences");
- // Clear all f-reachable soft and weak references with white
- // referents.
- ClearWhiteReferences(soft_references);
- ClearWhiteReferences(weak_references);
-
- // Clear all phantom references with white referents.
- ClearWhiteReferences(phantom_references);
-
- // At this point all reference lists should be empty.
- DCHECK(*soft_references == NULL);
- DCHECK(*weak_references == NULL);
- DCHECK(*finalizer_references == NULL);
- DCHECK(*phantom_references == NULL);
- timings_.EndSplit();
-}
-
void SemiSpace::UnBindBitmaps() {
- base::TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_);
+ TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_);
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
if (space->IsDlMallocSpace()) {
space::DlMallocSpace* alloc_space = space->AsDlMallocSpace();
@@ -749,13 +572,9 @@ void SemiSpace::SetFromSpace(space::ContinuousMemMapAllocSpace* from_space) {
}
void SemiSpace::FinishPhase() {
- base::TimingLogger::ScopedSplit split("FinishPhase", &timings_);
+ TimingLogger::ScopedSplit split("FinishPhase", &timings_);
// Can't enqueue references if we hold the mutator lock.
- Object* cleared_references = GetClearedReferences();
Heap* heap = GetHeap();
- timings_.NewSplit("EnqueueClearedReferences");
- heap->EnqueueClearedReferences(&cleared_references);
-
timings_.NewSplit("PostGcVerification");
heap->PostGcVerification(this);
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 13d519559a..0f0cae1966 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -150,12 +150,15 @@ class SemiSpace : public GarbageCollector {
static mirror::Object* MarkRootCallback(mirror::Object* root, void* arg)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ static mirror::Object* RecursiveMarkObjectCallback(mirror::Object* root, void* arg)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
protected:
// Returns null if the object is not marked, otherwise returns the forwarding address (same as
// object for non movable things).
mirror::Object* GetMarkedForwardAddress(mirror::Object* object) const;
- static mirror::Object* SystemWeakIsMarkedCallback(mirror::Object* object, void* arg)
+ static mirror::Object* MarkedForwardingAddressCallback(mirror::Object* object, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
// Marks or unmarks a large object based on whether or not set is true. If set is true, then we
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 70df3d3133..f446fcf541 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -82,10 +82,11 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
long_gc_log_threshold_(long_gc_log_threshold),
ignore_max_footprint_(ignore_max_footprint),
have_zygote_space_(false),
- soft_ref_queue_lock_(NULL),
- weak_ref_queue_lock_(NULL),
- finalizer_ref_queue_lock_(NULL),
- phantom_ref_queue_lock_(NULL),
+ soft_reference_queue_(this),
+ weak_reference_queue_(this),
+ finalizer_reference_queue_(this),
+ phantom_reference_queue_(this),
+ cleared_references_(this),
is_gc_running_(false),
last_gc_type_(collector::kGcTypeNone),
next_gc_type_(collector::kGcTypePartial),
@@ -233,13 +234,6 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
gc_complete_lock_ = new Mutex("GC complete lock");
gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
*gc_complete_lock_));
-
- // Create the reference queue locks, this is required so for parallel object scanning in the GC.
- soft_ref_queue_lock_ = new Mutex("Soft reference queue lock");
- weak_ref_queue_lock_ = new Mutex("Weak reference queue lock");
- finalizer_ref_queue_lock_ = new Mutex("Finalizer reference queue lock");
- phantom_ref_queue_lock_ = new Mutex("Phantom reference queue lock");
-
last_gc_time_ns_ = NanoTime();
last_gc_size_ = GetBytesAllocated();
@@ -599,10 +593,6 @@ Heap::~Heap() {
STLDeleteElements(&continuous_spaces_);
STLDeleteElements(&discontinuous_spaces_);
delete gc_complete_lock_;
- delete soft_ref_queue_lock_;
- delete weak_ref_queue_lock_;
- delete finalizer_ref_queue_lock_;
- delete phantom_ref_queue_lock_;
VLOG(heap) << "Finished ~Heap()";
}
@@ -640,6 +630,106 @@ space::Space* Heap::FindSpaceFromObject(const mirror::Object* obj, bool fail_ok)
return FindDiscontinuousSpaceFromObject(obj, true);
}
+struct SoftReferenceArgs {
+ RootVisitor* is_marked_callback_;
+ RootVisitor* recursive_mark_callback_;
+ void* arg_;
+};
+
+mirror::Object* Heap::PreserveSoftReferenceCallback(mirror::Object* obj, void* arg) {
+ SoftReferenceArgs* args = reinterpret_cast<SoftReferenceArgs*>(arg);
+ // TODO: Not preserve all soft references.
+ return args->recursive_mark_callback_(obj, args->arg_);
+}
+
+// Process reference class instances and schedule finalizations.
+void Heap::ProcessReferences(TimingLogger& timings, bool clear_soft,
+ RootVisitor* is_marked_callback,
+ RootVisitor* recursive_mark_object_callback, void* arg) {
+ // Unless we are in the zygote or required to clear soft references with white references,
+ // preserve some white referents.
+ if (!clear_soft && !Runtime::Current()->IsZygote()) {
+ SoftReferenceArgs soft_reference_args;
+ soft_reference_args.is_marked_callback_ = is_marked_callback;
+ soft_reference_args.recursive_mark_callback_ = recursive_mark_object_callback;
+ soft_reference_args.arg_ = arg;
+ soft_reference_queue_.PreserveSomeSoftReferences(&PreserveSoftReferenceCallback,
+ &soft_reference_args);
+ }
+ timings.StartSplit("ProcessReferences");
+ // Clear all remaining soft and weak references with white referents.
+ soft_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
+ weak_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
+ timings.EndSplit();
+ // Preserve all white objects with finalize methods and schedule them for finalization.
+ timings.StartSplit("EnqueueFinalizerReferences");
+ finalizer_reference_queue_.EnqueueFinalizerReferences(cleared_references_, is_marked_callback,
+ recursive_mark_object_callback, arg);
+ timings.EndSplit();
+ timings.StartSplit("ProcessReferences");
+ // Clear all f-reachable soft and weak references with white referents.
+ soft_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
+ weak_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
+ // Clear all phantom references with white referents.
+ phantom_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
+ // At this point all reference queues other than the cleared references should be empty.
+ DCHECK(soft_reference_queue_.IsEmpty());
+ DCHECK(weak_reference_queue_.IsEmpty());
+ DCHECK(finalizer_reference_queue_.IsEmpty());
+ DCHECK(phantom_reference_queue_.IsEmpty());
+ timings.EndSplit();
+}
+
+bool Heap::IsEnqueued(mirror::Object* ref) const {
+ // Since the references are stored as cyclic lists it means that once enqueued, the pending next
+ // will always be non-null.
+ return ref->GetFieldObject<mirror::Object*>(GetReferencePendingNextOffset(), false) != nullptr;
+}
+
+bool Heap::IsEnqueuable(const mirror::Object* ref) const {
+ DCHECK(ref != nullptr);
+ const mirror::Object* queue =
+ ref->GetFieldObject<mirror::Object*>(GetReferenceQueueOffset(), false);
+ const mirror::Object* queue_next =
+ ref->GetFieldObject<mirror::Object*>(GetReferenceQueueNextOffset(), false);
+ return queue != nullptr && queue_next == nullptr;
+}
+
+// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
+// marked, put it on the appropriate list in the heap for later processing.
+void Heap::DelayReferenceReferent(mirror::Class* klass, mirror::Object* obj,
+ RootVisitor mark_visitor, void* arg) {
+ DCHECK(klass != nullptr);
+ DCHECK(klass->IsReferenceClass());
+ DCHECK(obj != nullptr);
+ mirror::Object* referent = GetReferenceReferent(obj);
+ if (referent != nullptr) {
+ mirror::Object* forward_address = mark_visitor(referent, arg);
+ // Null means that the object is not currently marked.
+ if (forward_address == nullptr) {
+ Thread* self = Thread::Current();
+ // TODO: Remove these locks, and use atomic stacks for storing references?
+ // We need to check that the references haven't already been enqueued since we can end up
+ // scanning the same reference multiple times due to dirty cards.
+ if (klass->IsSoftReferenceClass()) {
+ soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj);
+ } else if (klass->IsWeakReferenceClass()) {
+ weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj);
+ } else if (klass->IsFinalizerReferenceClass()) {
+ finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj);
+ } else if (klass->IsPhantomReferenceClass()) {
+ phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj);
+ } else {
+ LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) << " " << std::hex
+ << klass->GetAccessFlags();
+ }
+ } else if (referent != forward_address) {
+ // Referent is already marked and we need to update it.
+ SetReferenceReferent(obj, forward_address);
+ }
+ }
+}
+
space::ImageSpace* Heap::GetImageSpace() const {
for (const auto& space : continuous_spaces_) {
if (space->IsImageSpace()) {
@@ -843,7 +933,6 @@ bool Heap::IsLiveObjectLocked(const mirror::Object* obj, bool search_allocation_
if (i > 0) {
NanoSleep(MsToNs(10));
}
-
if (search_allocation_stack) {
if (sorted) {
if (allocation_stack_->ContainsSorted(const_cast<mirror::Object*>(obj))) {
@@ -1442,6 +1531,9 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus
total_objects_freed_ever_ += collector->GetFreedObjects();
total_bytes_freed_ever_ += collector->GetFreedBytes();
+ // Enqueue cleared references.
+ EnqueueClearedReferences();
+
// Grow the heap so that we know when to perform the next GC.
GrowForUtilization(gc_type, collector->GetDurationNs());
@@ -1474,7 +1566,7 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus
<< PrettySize(total_memory) << ", " << "paused " << pause_string.str()
<< " total " << PrettyDuration((duration / 1000) * 1000);
if (VLOG_IS_ON(heap)) {
- LOG(INFO) << Dumpable<base::TimingLogger>(collector->GetTimings());
+ LOG(INFO) << Dumpable<TimingLogger>(collector->GetTimings());
}
}
}
@@ -1808,17 +1900,17 @@ accounting::ModUnionTable* Heap::FindModUnionTableFromSpace(space::Space* space)
return it->second;
}
-void Heap::ProcessCards(base::TimingLogger& timings) {
+void Heap::ProcessCards(TimingLogger& timings) {
// Clear cards and keep track of cards cleared in the mod-union table.
for (const auto& space : continuous_spaces_) {
accounting::ModUnionTable* table = FindModUnionTableFromSpace(space);
if (table != nullptr) {
const char* name = space->IsZygoteSpace() ? "ZygoteModUnionClearCards" :
"ImageModUnionClearCards";
- base::TimingLogger::ScopedSplit split(name, &timings);
+ TimingLogger::ScopedSplit split(name, &timings);
table->ClearCards();
} else if (space->GetType() != space::kSpaceTypeBumpPointerSpace) {
- base::TimingLogger::ScopedSplit split("AllocSpaceClearCards", &timings);
+ TimingLogger::ScopedSplit split("AllocSpaceClearCards", &timings);
// No mod union table for the AllocSpace. Age the cards so that the GC knows that these cards
// were dirty before the GC started.
// TODO: Don't need to use atomic.
@@ -2072,72 +2164,6 @@ mirror::Object* Heap::GetReferenceReferent(mirror::Object* reference) {
return reference->GetFieldObject<mirror::Object*>(reference_referent_offset_, true);
}
-void Heap::ClearReferenceReferent(mirror::Object* reference) {
- DCHECK(reference != NULL);
- DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
- reference->SetFieldObject(reference_referent_offset_, nullptr, true);
-}
-
-// Returns true if the reference object has not yet been enqueued.
-bool Heap::IsEnqueuable(const mirror::Object* ref) {
- DCHECK(ref != NULL);
- const mirror::Object* queue =
- ref->GetFieldObject<mirror::Object*>(reference_queue_offset_, false);
- const mirror::Object* queue_next =
- ref->GetFieldObject<mirror::Object*>(reference_queueNext_offset_, false);
- return (queue != NULL) && (queue_next == NULL);
-}
-
-void Heap::EnqueueReference(mirror::Object* ref, mirror::Object** cleared_reference_list) {
- DCHECK(ref != NULL);
- CHECK(ref->GetFieldObject<mirror::Object*>(reference_queue_offset_, false) != NULL);
- CHECK(ref->GetFieldObject<mirror::Object*>(reference_queueNext_offset_, false) == NULL);
- EnqueuePendingReference(ref, cleared_reference_list);
-}
-
-bool Heap::IsEnqueued(mirror::Object* ref) {
- // Since the references are stored as cyclic lists it means that once enqueued, the pending next
- // will always be non-null.
- return ref->GetFieldObject<mirror::Object*>(GetReferencePendingNextOffset(), false) != nullptr;
-}
-
-void Heap::EnqueuePendingReference(mirror::Object* ref, mirror::Object** list) {
- DCHECK(ref != NULL);
- DCHECK(list != NULL);
- if (*list == NULL) {
- // 1 element cyclic queue, ie: Reference ref = ..; ref.pendingNext = ref;
- ref->SetFieldObject(reference_pendingNext_offset_, ref, false);
- *list = ref;
- } else {
- mirror::Object* head =
- (*list)->GetFieldObject<mirror::Object*>(reference_pendingNext_offset_, false);
- ref->SetFieldObject(reference_pendingNext_offset_, head, false);
- (*list)->SetFieldObject(reference_pendingNext_offset_, ref, false);
- }
-}
-
-mirror::Object* Heap::DequeuePendingReference(mirror::Object** list) {
- DCHECK(list != NULL);
- DCHECK(*list != NULL);
- mirror::Object* head = (*list)->GetFieldObject<mirror::Object*>(reference_pendingNext_offset_,
- false);
- mirror::Object* ref;
-
- // Note: the following code is thread-safe because it is only called from ProcessReferences which
- // is single threaded.
- if (*list == head) {
- ref = *list;
- *list = NULL;
- } else {
- mirror::Object* next = head->GetFieldObject<mirror::Object*>(reference_pendingNext_offset_,
- false);
- (*list)->SetFieldObject(reference_pendingNext_offset_, next, false);
- ref = head;
- }
- ref->SetFieldObject(reference_pendingNext_offset_, NULL, false);
- return ref;
-}
-
void Heap::AddFinalizerReference(Thread* self, mirror::Object* object) {
ScopedObjectAccess soa(self);
JValue result;
@@ -2168,20 +2194,18 @@ void Heap::PrintReferenceQueue(std::ostream& os, mirror::Object** queue) {
}
}
-void Heap::EnqueueClearedReferences(mirror::Object** cleared) {
- DCHECK(cleared != nullptr);
- mirror::Object* list = *cleared;
- if (list != nullptr) {
+void Heap::EnqueueClearedReferences() {
+ if (!cleared_references_.IsEmpty()) {
// When a runtime isn't started there are no reference queues to care about so ignore.
if (LIKELY(Runtime::Current()->IsStarted())) {
ScopedObjectAccess soa(Thread::Current());
JValue result;
ArgArray arg_array(NULL, 0);
- arg_array.Append(reinterpret_cast<uint32_t>(list));
+ arg_array.Append(reinterpret_cast<uint32_t>(cleared_references_.GetList()));
soa.DecodeMethod(WellKnownClasses::java_lang_ref_ReferenceQueue_add)->Invoke(soa.Self(),
arg_array.GetArray(), arg_array.GetNumBytes(), &result, 'V');
}
- *cleared = nullptr;
+ cleared_references_.Clear();
}
}
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 0fa000f18d..08bec99ad9 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -31,6 +31,7 @@
#include "jni.h"
#include "locks.h"
#include "offsets.h"
+#include "reference_queue.h"
#include "root_visitor.h"
#include "safe_map.h"
#include "thread_pool.h"
@@ -289,32 +290,26 @@ class Heap {
MemberOffset reference_queueNext_offset,
MemberOffset reference_pendingNext_offset,
MemberOffset finalizer_reference_zombie_offset);
-
- void SetReferenceReferent(mirror::Object* reference, mirror::Object* referent)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::Object* GetReferenceReferent(mirror::Object* reference)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void ClearReferenceReferent(mirror::Object* reference) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- // Returns true if the reference object has not yet been enqueued.
- bool IsEnqueuable(const mirror::Object* ref);
- void EnqueueReference(mirror::Object* ref, mirror::Object** list)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool IsEnqueued(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void EnqueuePendingReference(mirror::Object* ref, mirror::Object** list)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::Object* DequeuePendingReference(mirror::Object** list)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- MemberOffset GetReferencePendingNextOffset() {
- DCHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U);
+ MemberOffset GetReferenceReferentOffset() const {
+ return reference_referent_offset_;
+ }
+ MemberOffset GetReferenceQueueOffset() const {
+ return reference_queue_offset_;
+ }
+ MemberOffset GetReferenceQueueNextOffset() const {
+ return reference_queueNext_offset_;
+ }
+ MemberOffset GetReferencePendingNextOffset() const {
return reference_pendingNext_offset_;
}
-
- MemberOffset GetFinalizerReferenceZombieOffset() {
- DCHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U);
+ MemberOffset GetFinalizerReferenceZombieOffset() const {
return finalizer_reference_zombie_offset_;
}
+ static mirror::Object* PreserveSoftReferenceCallback(mirror::Object* obj, void* arg);
+ void ProcessReferences(TimingLogger& timings, bool clear_soft, RootVisitor* is_marked_callback,
+ RootVisitor* recursive_mark_object_callback, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
// Enable verification of object references when the runtime is sufficiently initialized.
void EnableObjectValidation() {
@@ -460,22 +455,6 @@ class Heap {
return large_object_space_;
}
- Mutex* GetSoftRefQueueLock() {
- return soft_ref_queue_lock_;
- }
-
- Mutex* GetWeakRefQueueLock() {
- return weak_ref_queue_lock_;
- }
-
- Mutex* GetFinalizerRefQueueLock() {
- return finalizer_ref_queue_lock_;
- }
-
- Mutex* GetPhantomRefQueueLock() {
- return phantom_ref_queue_lock_;
- }
-
void DumpSpaces(std::ostream& stream = LOG(INFO));
// GC performance measuring
@@ -575,8 +554,20 @@ class Heap {
bool IsOutOfMemoryOnAllocation(size_t alloc_size, bool grow);
// Pushes a list of cleared references out to the managed heap.
- void EnqueueClearedReferences(mirror::Object** cleared_references);
-
+ void SetReferenceReferent(mirror::Object* reference, mirror::Object* referent)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Object* GetReferenceReferent(mirror::Object* reference)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void ClearReferenceReferent(mirror::Object* reference)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SetReferenceReferent(reference, nullptr);
+ }
+ void EnqueueClearedReferences();
+ // Returns true if the reference object has not yet been enqueued.
+ bool IsEnqueuable(const mirror::Object* ref) const;
+ bool IsEnqueued(mirror::Object* ref) const;
+ void DelayReferenceReferent(mirror::Class* klass, mirror::Object* obj, RootVisitor mark_visitor,
+ void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Print a reference queue.
void PrintReferenceQueue(std::ostream& os, mirror::Object** queue);
@@ -638,7 +629,7 @@ class Heap {
void SwapStacks();
// Clear cards and update the mod union table.
- void ProcessCards(base::TimingLogger& timings);
+ void ProcessCards(TimingLogger& timings);
// All-known continuous spaces, where objects lie within fixed bounds.
std::vector<space::ContinuousSpace*> continuous_spaces_;
@@ -699,12 +690,12 @@ class Heap {
Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
UniquePtr<ConditionVariable> gc_complete_cond_ GUARDED_BY(gc_complete_lock_);
- // Mutexes held when adding references to reference queues.
- // TODO: move to a UniquePtr, currently annotalysis is confused that UniquePtr isn't lockable.
- Mutex* soft_ref_queue_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
- Mutex* weak_ref_queue_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
- Mutex* finalizer_ref_queue_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
- Mutex* phantom_ref_queue_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ // Reference queues.
+ ReferenceQueue soft_reference_queue_;
+ ReferenceQueue weak_reference_queue_;
+ ReferenceQueue finalizer_reference_queue_;
+ ReferenceQueue phantom_reference_queue_;
+ ReferenceQueue cleared_references_;
// True while the garbage collector is running.
volatile bool is_gc_running_ GUARDED_BY(gc_complete_lock_);
@@ -819,16 +810,12 @@ class Heap {
// offset of java.lang.ref.Reference.referent
MemberOffset reference_referent_offset_;
-
// offset of java.lang.ref.Reference.queue
MemberOffset reference_queue_offset_;
-
// offset of java.lang.ref.Reference.queueNext
MemberOffset reference_queueNext_offset_;
-
// offset of java.lang.ref.Reference.pendingNext
MemberOffset reference_pendingNext_offset_;
-
// offset of java.lang.ref.FinalizerReference.zombie
MemberOffset finalizer_reference_zombie_offset_;
@@ -861,6 +848,7 @@ class Heap {
friend class collector::MarkSweep;
friend class collector::SemiSpace;
+ friend class ReferenceQueue;
friend class VerifyReferenceCardVisitor;
friend class VerifyReferenceVisitor;
friend class VerifyObjectVisitor;
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
new file mode 100644
index 0000000000..d006349cbb
--- /dev/null
+++ b/runtime/gc/reference_queue.cc
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "reference_queue.h"
+
+#include "accounting/card_table-inl.h"
+#include "heap.h"
+#include "mirror/class-inl.h"
+#include "mirror/object-inl.h"
+
+namespace art {
+namespace gc {
+
+ReferenceQueue::ReferenceQueue(Heap* heap)
+ : lock_("reference queue lock"),
+ heap_(heap),
+ list_(nullptr) {
+}
+
+void ReferenceQueue::AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Object* ref) {
+ DCHECK(ref != NULL);
+ MutexLock mu(self, lock_);
+ if (!heap_->IsEnqueued(ref)) {
+ EnqueuePendingReference(ref);
+ }
+}
+
+void ReferenceQueue::EnqueueReference(mirror::Object* ref) {
+ CHECK(heap_->IsEnqueuable(ref));
+ EnqueuePendingReference(ref);
+}
+
+void ReferenceQueue::EnqueuePendingReference(mirror::Object* ref) {
+ DCHECK(ref != NULL);
+ MemberOffset pending_next_offset = heap_->GetReferencePendingNextOffset();
+ DCHECK_NE(pending_next_offset.Uint32Value(), 0U);
+ if (IsEmpty()) {
+ // 1 element cyclic queue, ie: Reference ref = ..; ref.pendingNext = ref;
+ ref->SetFieldObject(pending_next_offset, ref, false);
+ list_ = ref;
+ } else {
+ mirror::Object* head =
+ list_->GetFieldObject<mirror::Object*>(pending_next_offset, false);
+ ref->SetFieldObject(pending_next_offset, head, false);
+ list_->SetFieldObject(pending_next_offset, ref, false);
+ }
+}
+
+mirror::Object* ReferenceQueue::DequeuePendingReference() {
+ DCHECK(!IsEmpty());
+ MemberOffset pending_next_offset = heap_->GetReferencePendingNextOffset();
+ mirror::Object* head = list_->GetFieldObject<mirror::Object*>(pending_next_offset, false);
+ DCHECK(head != nullptr);
+ mirror::Object* ref;
+ // Note: the following code is thread-safe because it is only called from ProcessReferences which
+ // is single threaded.
+ if (list_ == head) {
+ ref = list_;
+ list_ = nullptr;
+ } else {
+ mirror::Object* next = head->GetFieldObject<mirror::Object*>(pending_next_offset, false);
+ list_->SetFieldObject(pending_next_offset, next, false);
+ ref = head;
+ }
+ ref->SetFieldObject(pending_next_offset, nullptr, false);
+ return ref;
+}
+
+void ReferenceQueue::Dump(std::ostream& os) const {
+ mirror::Object* cur = list_;
+ os << "Reference starting at list_=" << list_ << "\n";
+ while (cur != nullptr) {
+ mirror::Object* pending_next =
+ cur->GetFieldObject<mirror::Object*>(heap_->GetReferencePendingNextOffset(), false);
+ os << "PendingNext=" << pending_next;
+ if (cur->GetClass()->IsFinalizerReferenceClass()) {
+ os << " Zombie=" <<
+ cur->GetFieldObject<mirror::Object*>(heap_->GetFinalizerReferenceZombieOffset(), false);
+ }
+ os << "\n";
+ cur = pending_next;
+ }
+}
+
+void ReferenceQueue::ClearWhiteReferences(ReferenceQueue& cleared_references, RootVisitor visitor,
+ void* arg) {
+ while (!IsEmpty()) {
+ mirror::Object* ref = DequeuePendingReference();
+ mirror::Object* referent = heap_->GetReferenceReferent(ref);
+ if (referent != nullptr) {
+ mirror::Object* forward_address = visitor(referent, arg);
+ if (forward_address == nullptr) {
+ // Referent is white, clear it.
+ heap_->ClearReferenceReferent(ref);
+ if (heap_->IsEnqueuable(ref)) {
+ cleared_references.EnqueuePendingReference(ref);
+ }
+ } else if (referent != forward_address) {
+ // Object moved, need to updated the referrent.
+ heap_->SetReferenceReferent(ref, forward_address);
+ }
+ }
+ }
+}
+
+void ReferenceQueue::EnqueueFinalizerReferences(ReferenceQueue& cleared_references,
+ RootVisitor is_marked_callback,
+ RootVisitor recursive_mark_callback, void* arg) {
+ while (!IsEmpty()) {
+ mirror::Object* ref = DequeuePendingReference();
+ mirror::Object* referent = heap_->GetReferenceReferent(ref);
+ if (referent != nullptr) {
+ mirror::Object* forward_address = is_marked_callback(referent, arg);
+ // If the referent isn't marked, mark it and update the
+ if (forward_address == nullptr) {
+ forward_address = recursive_mark_callback(referent, arg);
+ // If the referent is non-null the reference must queuable.
+ DCHECK(heap_->IsEnqueuable(ref));
+ // Move the updated referent to the zombie field.
+ ref->SetFieldObject(heap_->GetFinalizerReferenceZombieOffset(), forward_address, false);
+ heap_->ClearReferenceReferent(ref);
+ cleared_references.EnqueueReference(ref);
+ } else if (referent != forward_address) {
+ heap_->SetReferenceReferent(ref, forward_address);
+ }
+ }
+ }
+}
+
+void ReferenceQueue::PreserveSomeSoftReferences(RootVisitor preserve_callback, void* arg) {
+ ReferenceQueue cleared(heap_);
+ while (!IsEmpty()) {
+ mirror::Object* ref = DequeuePendingReference();
+ mirror::Object* referent = heap_->GetReferenceReferent(ref);
+ if (referent != nullptr) {
+ mirror::Object* forward_address = preserve_callback(referent, arg);
+ if (forward_address == nullptr) {
+ // Either the reference isn't marked or we don't wish to preserve it.
+ cleared.EnqueuePendingReference(ref);
+ } else {
+ heap_->SetReferenceReferent(ref, forward_address);
+ }
+ }
+ }
+ list_ = cleared.GetList();
+}
+
+} // namespace gc
+} // namespace art
+
diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h
new file mode 100644
index 0000000000..89589c39f6
--- /dev/null
+++ b/runtime/gc/reference_queue.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_REFERENCE_QUEUE_H_
+#define ART_RUNTIME_GC_REFERENCE_QUEUE_H_
+
+#include <iosfwd>
+#include <string>
+#include <vector>
+
+#include "atomic_integer.h"
+#include "base/timing_logger.h"
+#include "globals.h"
+#include "gtest/gtest.h"
+#include "jni.h"
+#include "locks.h"
+#include "offsets.h"
+#include "root_visitor.h"
+#include "thread_pool.h"
+
+namespace art {
+namespace gc {
+
+class Heap;
+
+// Used to temporarily store java.lang.ref.Reference(s) during GC and prior to queueing on the
+// appropriate java.lang.ref.ReferenceQueue. The linked list is maintained in the
+// java.lang.ref.Reference objects.
+class ReferenceQueue {
+ public:
+ explicit ReferenceQueue(Heap* heap);
+ // Enqueue a reference if is not already enqueued. Thread safe to call from multiple threads
+ // since it uses a lock to avoid a race between checking for the references presence and adding
+ // it.
+ void AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Object* ref)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
+ // Enqueue a reference, unlike EnqueuePendingReference, enqueue reference checks that the
+ // reference IsEnqueueable. Not thread safe, used when mutators are paused to minimize lock
+ // overhead.
+ void EnqueueReference(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void EnqueuePendingReference(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Object* DequeuePendingReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Enqueues finalizer references with white referents. White referents are blackened, moved to the
+ // zombie field, and the referent field is cleared.
+ void EnqueueFinalizerReferences(ReferenceQueue& cleared_references,
+ RootVisitor is_marked_callback,
+ RootVisitor recursive_mark_callback, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Walks the reference list marking any references subject to the reference clearing policy.
+ // References with a black referent are removed from the list. References with white referents
+ // biased toward saving are blackened and also removed from the list.
+ void PreserveSomeSoftReferences(RootVisitor preserve_callback, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Unlink the reference list clearing references objects with white referents. Cleared references
+ // registered to a reference queue are scheduled for appending by the heap worker thread.
+ void ClearWhiteReferences(ReferenceQueue& cleared_references, RootVisitor visitor, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void Dump(std::ostream& os) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsEmpty() const {
+ return list_ == nullptr;
+ }
+ void Clear() {
+ list_ = nullptr;
+ }
+ mirror::Object* GetList() {
+ return list_;
+ }
+
+ private:
+ // Lock, used for parallel GC reference enqueuing. It allows for multiple threads simultaneously
+ // calling AtomicEnqueueIfNotEnqueued.
+ Mutex lock_;
+ // The heap contains the reference offsets.
+ Heap* const heap_;
+ // The actual reference list. Not a root since it will be nullptr when the GC is not running.
+ mirror::Object* list_;
+};
+
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_REFERENCE_QUEUE_H_
diff --git a/runtime/mapping_table.h b/runtime/mapping_table.h
index 9955f30d40..c468c1efb4 100644
--- a/runtime/mapping_table.h
+++ b/runtime/mapping_table.h
@@ -30,7 +30,7 @@ class MappingTable {
uint32_t TotalSize() const PURE {
const uint8_t* table = encoded_table_;
- if (table == NULL) {
+ if (table == nullptr) {
return 0;
} else {
return DecodeUnsignedLeb128(&table);
@@ -39,7 +39,7 @@ class MappingTable {
uint32_t DexToPcSize() const PURE {
const uint8_t* table = encoded_table_;
- if (table == NULL) {
+ if (table == nullptr) {
return 0;
} else {
uint32_t total_size = DecodeUnsignedLeb128(&table);
@@ -50,9 +50,11 @@ class MappingTable {
const uint8_t* FirstDexToPcPtr() const {
const uint8_t* table = encoded_table_;
- if (table != NULL) {
- DecodeUnsignedLeb128(&table); // Total_size, unused.
+ if (table != nullptr) {
+ uint32_t total_size = DecodeUnsignedLeb128(&table);
uint32_t pc_to_dex_size = DecodeUnsignedLeb128(&table);
+ // We must have dex to pc entries or else the loop will go beyond the end of the table.
+ DCHECK_GT(total_size, pc_to_dex_size);
for (uint32_t i = 0; i < pc_to_dex_size; ++i) {
DecodeUnsignedLeb128(&table); // Move ptr past native PC.
DecodeUnsignedLeb128(&table); // Move ptr past dex PC.
@@ -64,15 +66,15 @@ class MappingTable {
class DexToPcIterator {
public:
DexToPcIterator(const MappingTable* table, uint32_t element) :
- table_(table), element_(element), end_(table_->DexToPcSize()), encoded_table_ptr_(NULL),
+ table_(table), element_(element), end_(table_->DexToPcSize()), encoded_table_ptr_(nullptr),
native_pc_offset_(0), dex_pc_(0) {
- if (element == 0) {
- encoded_table_ptr_ = table_->FirstDexToPcPtr();
+ if (element == 0) { // An iterator wanted from the start.
if (end_ > 0) {
+ encoded_table_ptr_ = table_->FirstDexToPcPtr();
native_pc_offset_ = DecodeUnsignedLeb128(&encoded_table_ptr_);
dex_pc_ = DecodeUnsignedLeb128(&encoded_table_ptr_);
}
- } else {
+ } else { // An iterator wanted from the end.
DCHECK_EQ(table_->DexToPcSize(), element);
}
}
@@ -102,7 +104,7 @@ class MappingTable {
const MappingTable* const table_; // The original table.
uint32_t element_; // A value in the range 0 to end_.
const uint32_t end_; // Equal to table_->DexToPcSize().
- const uint8_t* encoded_table_ptr_; // Either NULL or points to encoded data after this entry.
+ const uint8_t* encoded_table_ptr_; // Either nullptr or points to encoded data after this entry.
uint32_t native_pc_offset_; // The current value of native pc offset.
uint32_t dex_pc_; // The current value of dex pc.
};
@@ -118,7 +120,7 @@ class MappingTable {
uint32_t PcToDexSize() const PURE {
const uint8_t* table = encoded_table_;
- if (table == NULL) {
+ if (table == nullptr) {
return 0;
} else {
DecodeUnsignedLeb128(&table); // Total_size, unused.
@@ -129,7 +131,7 @@ class MappingTable {
const uint8_t* FirstPcToDexPtr() const {
const uint8_t* table = encoded_table_;
- if (table != NULL) {
+ if (table != nullptr) {
DecodeUnsignedLeb128(&table); // Total_size, unused.
DecodeUnsignedLeb128(&table); // PC to Dex size, unused.
}
@@ -139,15 +141,15 @@ class MappingTable {
class PcToDexIterator {
public:
PcToDexIterator(const MappingTable* table, uint32_t element) :
- table_(table), element_(element), end_(table_->PcToDexSize()), encoded_table_ptr_(NULL),
+ table_(table), element_(element), end_(table_->PcToDexSize()), encoded_table_ptr_(nullptr),
native_pc_offset_(0), dex_pc_(0) {
- if (element == 0) {
- encoded_table_ptr_ = table_->FirstPcToDexPtr();
+ if (element == 0) { // An iterator wanted from the start.
if (end_ > 0) {
+ encoded_table_ptr_ = table_->FirstPcToDexPtr();
native_pc_offset_ = DecodeUnsignedLeb128(&encoded_table_ptr_);
dex_pc_ = DecodeUnsignedLeb128(&encoded_table_ptr_);
}
- } else {
+ } else { // An iterator wanted from the end.
DCHECK_EQ(table_->PcToDexSize(), element);
}
}
@@ -177,7 +179,7 @@ class MappingTable {
const MappingTable* const table_; // The original table.
uint32_t element_; // A value in the range 0 to PcToDexSize.
const uint32_t end_; // Equal to table_->PcToDexSize().
- const uint8_t* encoded_table_ptr_; // Either NULL or points to encoded data after this entry.
+ const uint8_t* encoded_table_ptr_; // Either null or points to encoded data after this entry.
uint32_t native_pc_offset_; // The current value of native pc offset.
uint32_t dex_pc_; // The current value of dex pc.
};
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index aca0561a77..67fcd58220 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -43,6 +43,8 @@ ThreadPoolWorker::ThreadPoolWorker(ThreadPool* thread_pool, const std::string& n
ThreadPoolWorker::~ThreadPoolWorker() {
CHECK_PTHREAD_CALL(pthread_join, (pthread_, NULL), "thread pool worker shutdown");
+ // TODO: Delete this when race condition in pthread_join is fixed.
+ usleep(500);
}
void ThreadPoolWorker::Run() {
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index a62e835b1c..9c9673aafd 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -262,11 +262,11 @@ void RegTypeCache::ShutDown() {
FloatType::Destroy();
DoubleLoType::Destroy();
DoubleHiType::Destroy();
- for (uint16_t value = kMinSmallConstant; value <= kMaxSmallConstant; ++value) {
+ for (int32_t value = kMinSmallConstant; value <= kMaxSmallConstant; ++value) {
PreciseConstType* type = small_precise_constants_[value - kMinSmallConstant];
delete type;
+ small_precise_constants_[value - kMinSmallConstant] = nullptr;
}
-
RegTypeCache::primitive_initialized_ = false;
RegTypeCache::primitive_count_ = 0;
}