summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--build/Android.common_test.mk24
-rw-r--r--build/Android.cpplint.mk5
-rw-r--r--compiler/driver/compiler_driver.cc3
-rw-r--r--compiler/image_test.cc6
-rw-r--r--compiler/image_writer.cc18
-rw-r--r--compiler/image_writer.h8
-rw-r--r--compiler/jit/jit_compiler.cc5
-rw-r--r--compiler/optimizing/code_generator_mips.cc7
-rw-r--r--compiler/optimizing/inliner.cc20
-rw-r--r--compiler/optimizing/load_store_elimination.cc2
-rw-r--r--compiler/optimizing/nodes.cc29
-rw-r--r--compiler/optimizing/nodes.h3
-rw-r--r--compiler/optimizing/optimizing_compiler.cc4
-rw-r--r--dex2oat/dex2oat.cc86
-rw-r--r--runtime/art_method-inl.h6
-rw-r--r--runtime/art_method.cc44
-rw-r--r--runtime/art_method.h10
-rw-r--r--runtime/base/timing_logger.cc2
-rw-r--r--runtime/class_linker.cc2
-rw-r--r--runtime/debugger.cc10
-rw-r--r--runtime/dex_instruction.cc2
-rw-r--r--runtime/gc/collector/concurrent_copying-inl.h110
-rw-r--r--runtime/gc/collector/concurrent_copying.cc241
-rw-r--r--runtime/gc/collector/concurrent_copying.h4
-rw-r--r--runtime/instrumentation.cc358
-rw-r--r--runtime/instrumentation.h21
-rw-r--r--runtime/interpreter/interpreter_common.h44
-rw-r--r--runtime/jit/jit_code_cache.cc38
-rw-r--r--runtime/jit/jit_instrumentation.cc6
-rw-r--r--runtime/lambda/leaking_allocator.cc7
-rw-r--r--runtime/lambda/leaking_allocator.h23
-rw-r--r--runtime/mirror/object.h2
-rw-r--r--runtime/mirror/object_array-inl.h2
-rw-r--r--runtime/oat_file_assistant.cc45
-rw-r--r--runtime/oat_file_assistant_test.cc32
-rw-r--r--runtime/read_barrier-inl.h2
-rw-r--r--runtime/stack.cc24
-rw-r--r--runtime/thread_list.cc3
-rw-r--r--runtime/verifier/method_verifier.cc37
-rw-r--r--test/004-ReferenceMap/stack_walk_refmap_jni.cc7
-rwxr-xr-xtest/004-ThreadStress/run19
-rw-r--r--test/449-checker-bce/src/Main.java5
-rw-r--r--test/542-inline-trycatch/expected.txt0
-rw-r--r--test/542-inline-trycatch/info.txt1
-rw-r--r--test/542-inline-trycatch/src/Main.java178
-rw-r--r--test/542-unresolved-access-check/expected.txt0
-rw-r--r--test/542-unresolved-access-check/info.txt1
-rw-r--r--test/542-unresolved-access-check/src/Main.java104
-rw-r--r--test/542-unresolved-access-check/src/p1/InP1.java93
-rw-r--r--test/542-unresolved-access-check/src/p1/OtherInP1.java32
-rw-r--r--test/542-unresolved-access-check/src/p1/PlaceHolder.java24
-rw-r--r--test/545-tracing-and-jit/expected.txt0
-rw-r--r--test/545-tracing-and-jit/info.txt2
-rw-r--r--test/545-tracing-and-jit/src/Main.java251
-rw-r--r--test/962-iface-static/smali/Displayer.smali2
-rw-r--r--test/962-iface-static/smali/iface.smali2
-rwxr-xr-xtest/964-default-iface-init-generated/util-src/generate_smali.py2
-rw-r--r--test/Android.run-test.mk7
-rwxr-xr-xtest/run-test73
-rw-r--r--tools/ahat/README.txt3
-rw-r--r--tools/ahat/src/InstanceUtils.java48
-rw-r--r--tools/ahat/src/Value.java31
-rw-r--r--tools/ahat/src/manifest.txt2
-rw-r--r--tools/ahat/test-dump/Main.java6
-rw-r--r--tools/ahat/test/InstanceUtilsTest.java49
-rwxr-xr-xtools/cpplint.py55
-rwxr-xr-xtools/run-libcore-tests.sh5
67 files changed, 1715 insertions, 582 deletions
diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk
index 2f43f5f809..420db43cbc 100644
--- a/build/Android.common_test.mk
+++ b/build/Android.common_test.mk
@@ -40,6 +40,9 @@ ART_TEST_KEEP_GOING ?= true
# Do you want all tests, even those that are time consuming?
ART_TEST_FULL ?= false
+# Do you want run-test to be quieter? run-tests will only show output if they fail.
+ART_TEST_QUIET ?= true
+
# Do you want default compiler tests run?
ART_TEST_DEFAULT_COMPILER ?= true
@@ -116,12 +119,25 @@ define ART_TEST_FAILED
|| (echo -e "$(1) \e[91mFAILED\e[0m" >&2 )))
endef
+ifeq ($(ART_TEST_QUIET),true)
+ ART_TEST_ANNOUNCE_PASS := ( true )
+ ART_TEST_ANNOUNCE_RUN := ( true )
+ ART_TEST_ANNOUNCE_SKIP_FAILURE := ( true )
+ ART_TEST_ANNOUNCE_SKIP_BROKEN := ( true )
+else
+ # Note the use of '=' and not ':=' is intentional since these are actually functions.
+ ART_TEST_ANNOUNCE_PASS = ( echo -e "$(1) \e[92mPASSED\e[0m" )
+ ART_TEST_ANNOUNCE_RUN = ( echo -e "$(1) \e[95mRUNNING\e[0m")
+ ART_TEST_ANNOUNCE_SKIP_FAILURE = ( echo -e "$(1) \e[93mSKIPPING DUE TO EARLIER FAILURE\e[0m" )
+ ART_TEST_ANNOUNCE_SKIP_BROKEN = ( echo -e "$(1) \e[93mSKIPPING BROKEN TEST\e[0m" )
+endif
+
# Define the command run on test success. $(1) is the name of the test. Executed by the shell.
# The command checks prints "PASSED" then checks to see if this was a top-level make target (e.g.
# "mm test-art-host-oat-HelloWorld32"), if it was then it does nothing, otherwise it creates a file
# to be printed in the passing test summary.
define ART_TEST_PASSED
- ( echo -e "$(1) \e[92mPASSED\e[0m" && \
+ ( $(call ART_TEST_ANNOUNCE_PASS,$(1)) && \
(echo $(MAKECMDGOALS) | grep -q $(1) || \
(mkdir -p $(ART_HOST_TEST_DIR)/passed/ && touch $(ART_HOST_TEST_DIR)/passed/$(1))))
endef
@@ -150,11 +166,11 @@ endef
define ART_TEST_SKIP
((echo $(ART_TEST_KNOWN_BROKEN) | grep -q -v $(1) \
&& ([ ! -d $(ART_HOST_TEST_DIR)/failed/ ] || [ $(ART_TEST_KEEP_GOING) = true ])\
- && echo -e "$(1) \e[95mRUNNING\e[0m") \
+ && $(call ART_TEST_ANNOUNCE_RUN,$(1)) ) \
|| ((mkdir -p $(ART_HOST_TEST_DIR)/skipped/ && touch $(ART_HOST_TEST_DIR)/skipped/$(1) \
&& ([ -d $(ART_HOST_TEST_DIR)/failed/ ] \
- && echo -e "$(1) \e[93mSKIPPING DUE TO EARLIER FAILURE\e[0m") \
- || echo -e "$(1) \e[93mSKIPPING BROKEN TEST\e[0m") && false))
+ && $(call ART_TEST_ANNOUNCE_SKIP_FAILURE,$(1)) ) \
+ || $(call ART_TEST_ANNOUNCE_SKIP_BROKEN,$(1)) ) && false))
endef
# Create a build rule to create the dex file for a test.
diff --git a/build/Android.cpplint.mk b/build/Android.cpplint.mk
index 79f8f5eeaf..953cfc091c 100644
--- a/build/Android.cpplint.mk
+++ b/build/Android.cpplint.mk
@@ -18,6 +18,7 @@ include art/build/Android.common_build.mk
ART_CPPLINT := art/tools/cpplint.py
ART_CPPLINT_FILTER := --filter=-whitespace/line_length,-build/include,-readability/function,-readability/streams,-readability/todo,-runtime/references,-runtime/sizeof,-runtime/threadsafe_fn,-runtime/printf
+ART_CPPLINT_FLAGS := --quiet
ART_CPPLINT_SRC := $(shell find art -name "*.h" -o -name "*$(ART_CPP_EXTENSION)" | grep -v art/compiler/llvm/generated/ | grep -v art/runtime/elf\.h)
# "mm cpplint-art" to verify we aren't regressing
@@ -39,8 +40,8 @@ art_cpplint_file := $(1)
art_cpplint_touch := $$(OUT_CPPLINT)/$$(subst /,__,$$(art_cpplint_file))
$$(art_cpplint_touch): $$(art_cpplint_file) $(ART_CPPLINT) art/build/Android.cpplint.mk
- $(hide) $(ART_CPPLINT) $(ART_CPPLINT_FILTER) $$<
- @mkdir -p $$(dir $$@)
+ $(hide) $(ART_CPPLINT) $(ART_CPPLINT_FLAGS) $(ART_CPPLINT_FILTER) $$<
+ $(hide) mkdir -p $$(dir $$@)
$(hide) touch $$@
ART_CPPLINT_TARGETS += $$(art_cpplint_touch)
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index fb116bb3da..d055b37ea7 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -697,6 +697,9 @@ void CompilerDriver::CompileOne(Thread* self, ArtMethod* method, TimingLogger* t
}
CompiledMethod* CompilerDriver::CompileArtMethod(Thread* self, ArtMethod* method) {
+ DCHECK_EQ(method,
+ method->GetInterfaceMethodIfProxy(
+ Runtime::Current()->GetClassLinker()->GetImagePointerSize()));
const uint32_t method_idx = method->GetDexMethodIndex();
const uint32_t access_flags = method->GetAccessFlags();
const InvokeType invoke_type = method->GetInvokeType();
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index 21d582eec4..fd6cd82f7c 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -97,8 +97,10 @@ TEST_F(ImageTest, WriteRead) {
ASSERT_TRUE(dup_oat.get() != nullptr);
{
- bool success_image =
- writer->Write(image_file.GetFilename(), dup_oat->GetPath(), dup_oat->GetPath());
+ bool success_image = writer->Write(kInvalidImageFd,
+ image_file.GetFilename(),
+ dup_oat->GetPath(),
+ dup_oat->GetPath());
ASSERT_TRUE(success_image);
bool success_fixup = ElfWriter::Fixup(dup_oat.get(), writer->GetOatDataBegin());
ASSERT_TRUE(success_fixup);
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 0e5a97ffbd..af2a4f9426 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -122,7 +122,8 @@ bool ImageWriter::PrepareImageAddressSpace() {
return true;
}
-bool ImageWriter::Write(const std::string& image_filename,
+bool ImageWriter::Write(int image_fd,
+ const std::string& image_filename,
const std::string& oat_filename,
const std::string& oat_location) {
CHECK(!image_filename.empty());
@@ -178,10 +179,13 @@ bool ImageWriter::Write(const std::string& image_filename,
LOG(ERROR) << "Failed to flush and close oat file " << oat_filename << " for " << oat_location;
return false;
}
-
- std::unique_ptr<File> image_file(OS::CreateEmptyFile(image_filename.c_str()));
- ImageHeader* image_header = reinterpret_cast<ImageHeader*>(image_->Begin());
- if (image_file.get() == nullptr) {
+ std::unique_ptr<File> image_file;
+ if (image_fd != kInvalidImageFd) {
+ image_file.reset(new File(image_fd, image_filename, unix_file::kCheckSafeUsage));
+ } else {
+ image_file.reset(OS::CreateEmptyFile(image_filename.c_str()));
+ }
+ if (image_file == nullptr) {
LOG(ERROR) << "Failed to open image file " << image_filename;
return false;
}
@@ -192,6 +196,7 @@ bool ImageWriter::Write(const std::string& image_filename,
}
// Write out the image + fields + methods.
+ ImageHeader* const image_header = reinterpret_cast<ImageHeader*>(image_->Begin());
const auto write_count = image_header->GetImageSize();
if (!image_file->WriteFully(image_->Begin(), write_count)) {
PLOG(ERROR) << "Failed to write image file " << image_filename;
@@ -200,7 +205,8 @@ bool ImageWriter::Write(const std::string& image_filename,
}
// Write out the image bitmap at the page aligned start of the image end.
- const ImageSection& bitmap_section = image_header->GetImageSection(ImageHeader::kSectionImageBitmap);
+ const ImageSection& bitmap_section = image_header->GetImageSection(
+ ImageHeader::kSectionImageBitmap);
CHECK_ALIGNED(bitmap_section.Offset(), kPageSize);
if (!image_file->Write(reinterpret_cast<char*>(image_bitmap_->Begin()),
bitmap_section.Size(), bitmap_section.Offset())) {
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index e235bc4553..7a2febcea1 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -41,6 +41,8 @@
namespace art {
+static constexpr int kInvalidImageFd = -1;
+
// Write a Space built during compilation for use during execution.
class ImageWriter FINAL {
public:
@@ -89,7 +91,11 @@ class ImageWriter FINAL {
uint8_t* GetOatFileBegin() const;
- bool Write(const std::string& image_filename, const std::string& oat_filename,
+ // If image_fd is not kInvalidImageFd, then we use that for the file. Otherwise we open
+ // image_filename.
+ bool Write(int image_fd,
+ const std::string& image_filename,
+ const std::string& oat_filename,
const std::string& oat_location)
REQUIRES(!Locks::mutator_lock_);
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index c1b87c9cd0..d520208d32 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -192,7 +192,10 @@ bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method) {
CompiledMethod* compiled_method = nullptr;
{
TimingLogger::ScopedTiming t2("Compiling", &logger);
- compiled_method = compiler_driver_->CompileArtMethod(self, method);
+ // If we get a request to compile a proxy method, we pass the actual Java method
+ // of that proxy method, as the compiler does not expect a proxy method.
+ ArtMethod* method_to_compile = method->GetInterfaceMethodIfProxy(sizeof(void*));
+ compiled_method = compiler_driver_->CompileArtMethod(self, method_to_compile);
}
// Trim maps to reduce memory usage.
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 29d08beb97..8106499c02 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -1732,12 +1732,11 @@ void InstructionCodeGeneratorMIPS::VisitArrayLength(HArrayLength* instruction) {
}
void LocationsBuilderMIPS::VisitArraySet(HArraySet* instruction) {
- Primitive::Type value_type = instruction->GetComponentType();
- bool is_object = value_type == Primitive::kPrimNot;
+ bool needs_runtime_call = instruction->NeedsTypeCheck();
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
instruction,
- is_object ? LocationSummary::kCall : LocationSummary::kNoCall);
- if (is_object) {
+ needs_runtime_call ? LocationSummary::kCall : LocationSummary::kNoCall);
+ if (needs_runtime_call) {
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 0aaa6b3f2c..353881e47a 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -494,6 +494,26 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method,
<< " it is in a different dex file and requires access to the dex cache";
return false;
}
+
+ if (current->IsNewInstance() &&
+ (current->AsNewInstance()->GetEntrypoint() == kQuickAllocObjectWithAccessCheck)) {
+ // Allocation entrypoint does not handle inlined frames.
+ return false;
+ }
+
+ if (current->IsNewArray() &&
+ (current->AsNewArray()->GetEntrypoint() == kQuickAllocArrayWithAccessCheck)) {
+ // Allocation entrypoint does not handle inlined frames.
+ return false;
+ }
+
+ if (current->IsUnresolvedStaticFieldGet() ||
+ current->IsUnresolvedInstanceFieldGet() ||
+ current->IsUnresolvedStaticFieldSet() ||
+ current->IsUnresolvedInstanceFieldSet()) {
+ // Entrypoint for unresolved fields does not handle inlined frames.
+ return false;
+ }
}
}
number_of_inlined_instructions_ += number_of_instructions;
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 90f28e511e..6fbb6823d6 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -59,7 +59,7 @@ class ReferenceInfo : public ArenaObject<kArenaAllocMisc> {
(use->IsInstanceFieldSet() && (reference_ == use->InputAt(1))) ||
(use->IsUnresolvedInstanceFieldSet() && (reference_ == use->InputAt(1))) ||
(use->IsStaticFieldSet() && (reference_ == use->InputAt(1))) ||
- (use->IsUnresolvedStaticFieldSet() && (reference_ == use->InputAt(1))) ||
+ (use->IsUnresolvedStaticFieldSet() && (reference_ == use->InputAt(0))) ||
(use->IsArraySet() && (reference_ == use->InputAt(2)))) {
// reference_ is merged to a phi, passed to a callee, or stored to heap.
// reference_ isn't the only name that can refer to its value anymore.
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 8b28ff91d4..68fb0acf7f 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -1652,7 +1652,8 @@ HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
// Update the meta information surrounding blocks:
// (1) the graph they are now in,
// (2) the reverse post order of that graph,
- // (3) the potential loop information they are now in.
+ // (3) the potential loop information they are now in,
+ // (4) try block membership.
// We don't add the entry block, the exit block, and the first block, which
// has been merged with `at`.
@@ -1668,41 +1669,47 @@ HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
size_t index_of_at = IndexOfElement(outer_graph->reverse_post_order_, at);
MakeRoomFor(&outer_graph->reverse_post_order_, blocks_added, index_of_at);
- // Do a reverse post order of the blocks in the callee and do (1), (2),
- // and (3) to the blocks that apply.
- HLoopInformation* info = at->GetLoopInformation();
+ HLoopInformation* loop_info = at->GetLoopInformation();
+ // Copy TryCatchInformation if `at` is a try block, not if it is a catch block.
+ TryCatchInformation* try_catch_info = at->IsTryBlock() ? at->GetTryCatchInformation() : nullptr;
+
+ // Do a reverse post order of the blocks in the callee and do (1), (2), (3)
+ // and (4) to the blocks that apply.
for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) {
HBasicBlock* current = it.Current();
if (current != exit_block_ && current != entry_block_ && current != first) {
DCHECK(!current->IsInLoop());
+ DCHECK(current->GetTryCatchInformation() == nullptr);
DCHECK(current->GetGraph() == this);
current->SetGraph(outer_graph);
outer_graph->AddBlock(current);
outer_graph->reverse_post_order_[++index_of_at] = current;
- if (info != nullptr) {
- current->SetLoopInformation(info);
+ if (loop_info != nullptr) {
+ current->SetLoopInformation(loop_info);
for (HLoopInformationOutwardIterator loop_it(*at); !loop_it.Done(); loop_it.Advance()) {
loop_it.Current()->Add(current);
}
}
+ current->SetTryCatchInformation(try_catch_info);
}
}
- // Do (1), (2), and (3) to `to`.
+ // Do (1), (2), (3) and (4) to `to`.
to->SetGraph(outer_graph);
outer_graph->AddBlock(to);
outer_graph->reverse_post_order_[++index_of_at] = to;
- if (info != nullptr) {
- to->SetLoopInformation(info);
+ if (loop_info != nullptr) {
+ to->SetLoopInformation(loop_info);
for (HLoopInformationOutwardIterator loop_it(*at); !loop_it.Done(); loop_it.Advance()) {
loop_it.Current()->Add(to);
}
- if (info->IsBackEdge(*at)) {
+ if (loop_info->IsBackEdge(*at)) {
// Only `to` can become a back edge, as the inlined blocks
// are predecessors of `to`.
- info->ReplaceBackEdge(at, to);
+ loop_info->ReplaceBackEdge(at, to);
}
}
+ to->SetTryCatchInformation(try_catch_info);
}
// Update the next instruction id of the outer graph, so that instructions
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 7df586692b..0f2c1cffee 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -4750,6 +4750,9 @@ class HLoadClass : public HExpression<1> {
return generate_clinit_check_;
}
void SetMustGenerateClinitCheck(bool generate_clinit_check) {
+ // The entrypoint the code generator is going to call does not do
+ // clinit of the class.
+ DCHECK(!NeedsAccessCheck());
generate_clinit_check_ = generate_clinit_check;
}
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 6632f95ebe..8cb2cfc816 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -492,6 +492,8 @@ static void RunOptimizations(HGraph* graph,
RunOptimizations(optimizations1, arraysize(optimizations1), pass_observer);
+ MaybeRunInliner(graph, codegen, driver, stats, dex_compilation_unit, pass_observer, handles);
+
// TODO: Update passes incompatible with try/catch so we have the same
// pipeline for all methods.
if (graph->HasTryCatch()) {
@@ -507,8 +509,6 @@ static void RunOptimizations(HGraph* graph,
RunOptimizations(optimizations2, arraysize(optimizations2), pass_observer);
} else {
- MaybeRunInliner(graph, codegen, driver, stats, dex_compilation_unit, pass_observer, handles);
-
HOptimization* optimizations2[] = {
// BooleanSimplifier depends on the InstructionSimplifier removing
// redundant suspend checks to recognize empty blocks.
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 8773169010..2653807369 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -126,11 +126,12 @@ static std::string StrippedCommandLine() {
// However, we prefer to drop this when we saw --zip-fd.
if (saw_zip_fd) {
- // Drop anything --zip-X, --dex-X, --oat-X, --swap-X.
+ // Drop anything --zip-X, --dex-X, --oat-X, --swap-X, or --app-image-X
if (StartsWith(original_argv[i], "--zip-") ||
StartsWith(original_argv[i], "--dex-") ||
StartsWith(original_argv[i], "--oat-") ||
- StartsWith(original_argv[i], "--swap-")) {
+ StartsWith(original_argv[i], "--swap-") ||
+ StartsWith(original_argv[i], "--app-image-")) {
continue;
}
}
@@ -336,6 +337,12 @@ NO_RETURN static void Usage(const char* fmt, ...) {
UsageError(" --swap-fd=<file-descriptor>: specifies a file to use for swap (by descriptor).");
UsageError(" Example: --swap-fd=10");
UsageError("");
+ UsageError(" --app-image-fd=<file-descriptor>: specify output file descriptor for app image.");
+ UsageError(" Example: --app-image-fd=10");
+ UsageError("");
+ UsageError(" --app-image-file=<file-name>: specify a file name for app image.");
+ UsageError(" Example: --app-image-file=/data/dalvik-cache/system@app@Calculator.apk.art");
+ UsageError("");
std::cerr << "See log for usage error information\n";
exit(EXIT_FAILURE);
}
@@ -484,7 +491,8 @@ class Dex2Oat FINAL {
compiled_classes_filename_(nullptr),
compiled_methods_zip_filename_(nullptr),
compiled_methods_filename_(nullptr),
- image_(false),
+ app_image_(false),
+ boot_image_(false),
is_host_(false),
driver_(nullptr),
dump_stats_(false),
@@ -493,6 +501,7 @@ class Dex2Oat FINAL {
dump_slow_timing_(kIsDebugBuild),
dump_cfg_append_(false),
swap_fd_(-1),
+ app_image_fd_(kInvalidImageFd),
timings_(timings) {}
~Dex2Oat() {
@@ -608,13 +617,15 @@ class Dex2Oat FINAL {
}
}
- void ParseSwapFd(const StringPiece& option) {
- ParseUintOption(option, "--swap-fd", &swap_fd_, Usage);
- }
-
void ProcessOptions(ParserOptions* parser_options) {
- image_ = (!image_filename_.empty());
- if (image_) {
+ boot_image_ = !image_filename_.empty();
+ app_image_ = app_image_fd_ != -1 || !app_image_file_name_.empty();
+
+ if (IsAppImage() && IsBootImage()) {
+ Usage("Can't have both --image and (--app-image-fd or --app-image-file)");
+ }
+
+ if (IsBootImage()) {
// We need the boot image to always be debuggable.
compiler_options_->debuggable_ = true;
}
@@ -647,7 +658,7 @@ class Dex2Oat FINAL {
android_root_ += android_root_env_var;
}
- if (!image_ && parser_options->boot_image_filename.empty()) {
+ if (!boot_image_ && parser_options->boot_image_filename.empty()) {
parser_options->boot_image_filename += android_root_;
parser_options->boot_image_filename += "/framework/boot.art";
}
@@ -656,7 +667,7 @@ class Dex2Oat FINAL {
boot_image_option_ += parser_options->boot_image_filename;
}
- if (image_classes_filename_ != nullptr && !image_) {
+ if (image_classes_filename_ != nullptr && !IsBootImage()) {
Usage("--image-classes should only be used with --image");
}
@@ -668,7 +679,7 @@ class Dex2Oat FINAL {
Usage("--image-classes-zip should be used with --image-classes");
}
- if (compiled_classes_filename_ != nullptr && !image_) {
+ if (compiled_classes_filename_ != nullptr && !IsBootImage()) {
Usage("--compiled-classes should only be used with --image");
}
@@ -912,7 +923,11 @@ class Dex2Oat FINAL {
} else if (option.starts_with("--swap-file=")) {
swap_file_name_ = option.substr(strlen("--swap-file=")).data();
} else if (option.starts_with("--swap-fd=")) {
- ParseSwapFd(option);
+ ParseUintOption(option, "--swap-fd", &swap_fd_, Usage);
+ } else if (option.starts_with("--app-image-file=")) {
+ app_image_file_name_ = option.substr(strlen("--app-image-file=")).data();
+ } else if (option.starts_with("--app-image-fd=")) {
+ ParseUintOption(option, "--app-image-fd", &app_image_fd_, Usage);
} else if (option.starts_with("--verbose-methods=")) {
// TODO: rather than switch off compiler logging, make all VLOG(compiler) messages
// conditional on having verbost methods.
@@ -974,7 +989,6 @@ class Dex2Oat FINAL {
// released immediately.
unlink(swap_file_name_.c_str());
}
-
return true;
}
@@ -1016,7 +1030,7 @@ class Dex2Oat FINAL {
callbacks_.reset(new QuickCompilerCallbacks(
verification_results_.get(),
&method_inliner_map_,
- image_ ?
+ IsBootImage() ?
CompilerCallbacks::CallbackMode::kCompileBootImage :
CompilerCallbacks::CallbackMode::kCompileApp));
runtime_options.push_back(std::make_pair("compilercallbacks", callbacks_.get()));
@@ -1026,7 +1040,7 @@ class Dex2Oat FINAL {
// Only allow no boot image for the runtime if we're compiling one. When we compile an app,
// we don't want fallback mode, it will abort as we do not push a boot classpath (it might
// have been stripped in preopting, anyways).
- if (!image_) {
+ if (!IsBootImage()) {
runtime_options.push_back(std::make_pair("-Xno-dex-file-fallback", nullptr));
}
// Disable libsigchain. We don't don't need it during compilation and it prevents us
@@ -1065,7 +1079,7 @@ class Dex2Oat FINAL {
"': " << error_msg;
return false;
}
- } else if (image_) {
+ } else if (IsBootImage()) {
image_classes_.reset(new std::unordered_set<std::string>);
}
// If --compiled-classes was specified, calculate the full list of classes to compile in the
@@ -1178,7 +1192,7 @@ class Dex2Oat FINAL {
// If we use a swap file, ensure we are above the threshold to make it necessary.
if (swap_fd_ != -1) {
- if (!UseSwap(image_, dex_files_)) {
+ if (!UseSwap(IsBootImage(), dex_files_)) {
close(swap_fd_);
swap_fd_ = -1;
VLOG(compiler) << "Decided to run without swap.";
@@ -1192,7 +1206,7 @@ class Dex2Oat FINAL {
* If we're not in interpret-only or verify-none mode, go ahead and compile small applications.
* Don't bother to check if we're doing the image.
*/
- if (!image_ &&
+ if (!IsBootImage() &&
compiler_options_->IsCompilationEnabled() &&
compiler_kind_ == Compiler::kQuick) {
size_t num_methods = 0;
@@ -1246,7 +1260,7 @@ class Dex2Oat FINAL {
compiler_kind_,
instruction_set_,
instruction_set_features_.get(),
- image_,
+ IsBootImage(),
image_classes_.release(),
compiled_classes_.release(),
nullptr,
@@ -1341,7 +1355,7 @@ class Dex2Oat FINAL {
uint32_t image_file_location_oat_checksum = 0;
uintptr_t image_file_location_oat_data_begin = 0;
int32_t image_patch_delta = 0;
- if (image_) {
+ if (IsImage()) {
PrepareImageWriter(image_base_);
} else {
TimingLogger::ScopedTiming t3("Loading image checksum", timings_);
@@ -1366,7 +1380,7 @@ class Dex2Oat FINAL {
key_value_store_.get()));
}
- if (image_) {
+ if (IsImage()) {
// The OatWriter constructor has already updated offsets in methods and we need to
// prepare method offsets in the image address space for direct method patching.
TimingLogger::ScopedTiming t2("dex2oat Prepare image address space", timings_);
@@ -1391,7 +1405,7 @@ class Dex2Oat FINAL {
// If we are compiling an image, invoke the image creation routine. Else just skip.
bool HandleImage() {
- if (image_) {
+ if (IsImage()) {
TimingLogger::ScopedTiming t("dex2oat ImageWriter", timings_);
if (!CreateImageFile()) {
return false;
@@ -1474,7 +1488,15 @@ class Dex2Oat FINAL {
}
bool IsImage() const {
- return image_;
+ return IsAppImage() || IsBootImage();
+ }
+
+ bool IsAppImage() const {
+ return app_image_;
+ }
+
+ bool IsBootImage() const {
+ return boot_image_;
}
bool IsHost() const {
@@ -1576,7 +1598,10 @@ class Dex2Oat FINAL {
bool CreateImageFile()
REQUIRES(!Locks::mutator_lock_) {
CHECK(image_writer_ != nullptr);
- if (!image_writer_->Write(image_filename_, oat_unstripped_, oat_location_)) {
+ if (!image_writer_->Write(app_image_fd_,
+ IsBootImage() ? image_filename_ : app_image_file_name_,
+ oat_unstripped_,
+ oat_location_)) {
LOG(ERROR) << "Failed to create image file " << image_filename_;
return false;
}
@@ -1585,8 +1610,8 @@ class Dex2Oat FINAL {
// Destroy ImageWriter before doing FixupElf.
image_writer_.reset();
- // Do not fix up the ELF file if we are --compile-pic
- if (!compiler_options_->GetCompilePic()) {
+ // Do not fix up the ELF file if we are --compile-pic or compiing the app image
+ if (!compiler_options_->GetCompilePic() && IsBootImage()) {
std::unique_ptr<File> oat_file(OS::OpenFileReadWrite(oat_unstripped_.c_str()));
if (oat_file.get() == nullptr) {
PLOG(ERROR) << "Failed to open ELF file: " << oat_unstripped_;
@@ -1748,7 +1773,8 @@ class Dex2Oat FINAL {
std::unique_ptr<std::unordered_set<std::string>> image_classes_;
std::unique_ptr<std::unordered_set<std::string>> compiled_classes_;
std::unique_ptr<std::unordered_set<std::string>> compiled_methods_;
- bool image_;
+ bool app_image_;
+ bool boot_image_;
bool is_host_;
std::string android_root_;
std::vector<const DexFile*> dex_files_;
@@ -1767,6 +1793,8 @@ class Dex2Oat FINAL {
bool dump_cfg_append_;
std::string swap_file_name_;
int swap_fd_;
+ std::string app_image_file_name_;
+ int app_image_fd_;
std::string profile_file_; // Profile file to use
TimingLogger* timings_;
std::unique_ptr<CumulativeLogger> compiler_phases_timings_;
@@ -1895,7 +1923,7 @@ static int dex2oat(int argc, char** argv) {
// 3) Compiling with --host
// 4) Compiling on the host (not a target build)
// Otherwise, print a stripped command line.
- if (kIsDebugBuild || dex2oat.IsImage() || dex2oat.IsHost() || !kIsTargetBuild) {
+ if (kIsDebugBuild || dex2oat.IsBootImage() || dex2oat.IsHost() || !kIsTargetBuild) {
LOG(INFO) << CommandLine();
} else {
LOG(INFO) << StrippedCommandLine();
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index f741732046..cf548ada33 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -468,12 +468,6 @@ void ArtMethod::VisitRoots(RootVisitorType& visitor, size_t pointer_size) {
}
}
-inline void ArtMethod::CopyFrom(const ArtMethod* src, size_t image_pointer_size) {
- memcpy(reinterpret_cast<void*>(this), reinterpret_cast<const void*>(src),
- Size(image_pointer_size));
- declaring_class_ = GcRoot<mirror::Class>(const_cast<ArtMethod*>(src)->GetDeclaringClass());
-}
-
} // namespace art
#endif // ART_RUNTIME_ART_METHOD_INL_H_
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index c1279bf6b1..2a8cf9965c 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -367,7 +367,11 @@ const uint8_t* ArtMethod::GetQuickenedInfo() {
}
const OatQuickMethodHeader* ArtMethod::GetOatQuickMethodHeader(uintptr_t pc) {
- if (IsRuntimeMethod() || IsProxyMethod()) {
+ // Our callers should make sure they don't pass the instrumentation exit pc,
+ // as this method does not look at the side instrumentation stack.
+ DCHECK_NE(pc, reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()));
+
+ if (IsRuntimeMethod()) {
return nullptr;
}
@@ -381,6 +385,12 @@ const OatQuickMethodHeader* ArtMethod::GetOatQuickMethodHeader(uintptr_t pc) {
return nullptr;
}
+ if (existing_entry_point == GetQuickProxyInvokeHandler()) {
+ DCHECK(IsProxyMethod() && !IsConstructor());
+ // The proxy entry point does not have any method header.
+ return nullptr;
+ }
+
// Check whether the current entry point contains this pc.
if (!class_linker->IsQuickResolutionStub(existing_entry_point) &&
!class_linker->IsQuickToInterpreterBridge(existing_entry_point)) {
@@ -428,7 +438,7 @@ const OatQuickMethodHeader* ArtMethod::GetOatQuickMethodHeader(uintptr_t pc) {
}
const void* oat_entry_point = oat_method.GetQuickCode();
if (oat_entry_point == nullptr || class_linker->IsQuickGenericJniStub(oat_entry_point)) {
- DCHECK(IsNative());
+ DCHECK(IsNative()) << PrettyMethod(this);
return nullptr;
}
@@ -439,12 +449,6 @@ const OatQuickMethodHeader* ArtMethod::GetOatQuickMethodHeader(uintptr_t pc) {
return method_header;
}
- if (pc == reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc())) {
- // If we're instrumenting, just return the compiled OAT code.
- // TODO(ngeoffray): Avoid this call path.
- return method_header;
- }
-
DCHECK(method_header->Contains(pc))
<< PrettyMethod(this)
<< std::hex << pc << " " << oat_entry_point
@@ -452,4 +456,28 @@ const OatQuickMethodHeader* ArtMethod::GetOatQuickMethodHeader(uintptr_t pc) {
return method_header;
}
+
+void ArtMethod::CopyFrom(ArtMethod* src, size_t image_pointer_size) {
+ memcpy(reinterpret_cast<void*>(this), reinterpret_cast<const void*>(src),
+ Size(image_pointer_size));
+ declaring_class_ = GcRoot<mirror::Class>(const_cast<ArtMethod*>(src)->GetDeclaringClass());
+
+ // If the entry point of the method we are copying from is from JIT code, we just
+ // put the entry point of the new method to interpreter. We could set the entry point
+ // to the JIT code, but this would require taking the JIT code cache lock to notify
+ // it, which we do not want at this level.
+ Runtime* runtime = Runtime::Current();
+ if (runtime->GetJit() != nullptr) {
+ if (runtime->GetJit()->GetCodeCache()->ContainsPc(GetEntryPointFromQuickCompiledCode())) {
+ SetEntryPointFromQuickCompiledCodePtrSize(GetQuickToInterpreterBridge(), image_pointer_size);
+ }
+ }
+ // Clear the profiling info for the same reasons as the JIT code.
+ if (!src->IsNative()) {
+ SetProfilingInfoPtrSize(nullptr, image_pointer_size);
+ }
+ // Clear hotness to let the JIT properly decide when to compile this method.
+ hotness_count_ = 0;
+}
+
} // namespace art
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 551989d182..ce9f2025ce 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -49,8 +49,8 @@ class ArtMethod FINAL {
ArtMethod() : access_flags_(0), dex_code_item_offset_(0), dex_method_index_(0),
method_index_(0) { }
- ArtMethod(const ArtMethod& src, size_t image_pointer_size) {
- CopyFrom(&src, image_pointer_size);
+ ArtMethod(ArtMethod* src, size_t image_pointer_size) {
+ CopyFrom(src, image_pointer_size);
}
static ArtMethod* FromReflectedMethod(const ScopedObjectAccessAlreadyRunnable& soa,
@@ -313,6 +313,10 @@ class ArtMethod FINAL {
SetEntryPointFromJniPtrSize(info, sizeof(void*));
}
+ ALWAYS_INLINE void SetProfilingInfoPtrSize(ProfilingInfo* info, size_t pointer_size) {
+ SetEntryPointFromJniPtrSize(info, pointer_size);
+ }
+
static MemberOffset ProfilingInfoOffset() {
return EntryPointFromJniOffset(sizeof(void*));
}
@@ -429,7 +433,7 @@ class ArtMethod FINAL {
return pointer_size;
}
- void CopyFrom(const ArtMethod* src, size_t image_pointer_size)
+ void CopyFrom(ArtMethod* src, size_t image_pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE GcRoot<mirror::Class>* GetDexCacheResolvedTypes(size_t pointer_size)
diff --git a/runtime/base/timing_logger.cc b/runtime/base/timing_logger.cc
index f1f6f9b1c1..1942e1dc1b 100644
--- a/runtime/base/timing_logger.cc
+++ b/runtime/base/timing_logger.cc
@@ -125,7 +125,7 @@ void CumulativeLogger::DumpHistogram(std::ostream &os) const {
histogram->CreateHistogram(&cumulative_data);
histogram->PrintConfidenceIntervals(os, 0.99, cumulative_data);
}
- os << "Done Dumping histograms \n";
+ os << "Done Dumping histograms\n";
}
TimingLogger::TimingLogger(const char* name, bool precise, bool verbose)
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 5de1cacba8..da70456369 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -5279,7 +5279,7 @@ bool ClassLinker::LinkInterfaceMethods(
miranda_method = reinterpret_cast<ArtMethod*>(allocator.Alloc(method_size));
CHECK(miranda_method != nullptr);
// Point the interface table at a phantom slot.
- new(miranda_method) ArtMethod(*interface_method, image_pointer_size_);
+ new(miranda_method) ArtMethod(interface_method, image_pointer_size_);
miranda_methods.push_back(miranda_method);
}
method_array->SetElementPtrSize(j, miranda_method, image_pointer_size_);
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 7117be9a54..e523fbb104 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -1231,7 +1231,15 @@ JDWP::JdwpError Dbg::CreateObject(JDWP::RefTypeId class_id, JDWP::ObjectId* new_
return error;
}
Thread* self = Thread::Current();
- mirror::Object* new_object = c->AllocObject(self);
+ mirror::Object* new_object;
+ if (c->IsStringClass()) {
+ // Special case for java.lang.String.
+ gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
+ mirror::SetStringCountVisitor visitor(0);
+ new_object = mirror::String::Alloc<true>(self, 0, allocator_type, visitor);
+ } else {
+ new_object = c->AllocObject(self);
+ }
if (new_object == nullptr) {
DCHECK(self->IsExceptionPending());
self->ClearException();
diff --git a/runtime/dex_instruction.cc b/runtime/dex_instruction.cc
index 5250b0d79b..438b6b8109 100644
--- a/runtime/dex_instruction.cc
+++ b/runtime/dex_instruction.cc
@@ -333,7 +333,7 @@ std::string Instruction::DumpString(const DexFile* file) const {
if (i != 0) {
os << ", ";
}
- os << "v" << arg[i+2]; // Don't print the pair of vC registers. Pair is implicit.
+ os << "v" << arg[i + 2]; // Don't print the pair of vC registers. Pair is implicit.
}
os << "}";
break;
diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h
new file mode 100644
index 0000000000..26f5ad3df5
--- /dev/null
+++ b/runtime/gc/collector/concurrent_copying-inl.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_INL_H_
+#define ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_INL_H_
+
+#include "concurrent_copying.h"
+
+#include "gc/accounting/space_bitmap-inl.h"
+#include "gc/heap.h"
+#include "gc/space/region_space.h"
+#include "lock_word.h"
+
+namespace art {
+namespace gc {
+namespace collector {
+
+inline mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref) {
+ if (from_ref == nullptr) {
+ return nullptr;
+ }
+ DCHECK(heap_->collector_type_ == kCollectorTypeCC);
+ if (UNLIKELY(kUseBakerReadBarrier && !is_active_)) {
+ // In the lock word forward address state, the read barrier bits
+ // in the lock word are part of the stored forwarding address and
+ // invalid. This is usually OK as the from-space copy of objects
+ // aren't accessed by mutators due to the to-space
+ // invariant. However, during the dex2oat image writing relocation
+ // and the zygote compaction, objects can be in the forward
+ // address state (to store the forward/relocation addresses) and
+ // they can still be accessed and the invalid read barrier bits
+ // are consulted. If they look like gray but aren't really, the
+ // read barriers slow path can trigger when it shouldn't. To guard
+ // against this, return here if the CC collector isn't running.
+ return from_ref;
+ }
+ DCHECK(region_space_ != nullptr) << "Read barrier slow path taken when CC isn't running?";
+ space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref);
+ switch (rtype) {
+ case space::RegionSpace::RegionType::kRegionTypeToSpace:
+ // It's already marked.
+ return from_ref;
+ case space::RegionSpace::RegionType::kRegionTypeFromSpace: {
+ mirror::Object* to_ref = GetFwdPtr(from_ref);
+ if (kUseBakerReadBarrier) {
+ DCHECK_NE(to_ref, ReadBarrier::GrayPtr())
+ << "from_ref=" << from_ref << " to_ref=" << to_ref;
+ }
+ if (to_ref == nullptr) {
+ // It isn't marked yet. Mark it by copying it to the to-space.
+ to_ref = Copy(from_ref);
+ }
+ DCHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref))
+ << "from_ref=" << from_ref << " to_ref=" << to_ref;
+ return to_ref;
+ }
+ case space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace: {
+ // This may or may not succeed, which is ok.
+ if (kUseBakerReadBarrier) {
+ from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
+ }
+ mirror::Object* to_ref = from_ref;
+ if (region_space_bitmap_->AtomicTestAndSet(from_ref)) {
+ // Already marked.
+ } else {
+ // Newly marked.
+ if (kUseBakerReadBarrier) {
+ DCHECK_EQ(to_ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr());
+ }
+ PushOntoMarkStack(to_ref);
+ }
+ return to_ref;
+ }
+ case space::RegionSpace::RegionType::kRegionTypeNone:
+ return MarkNonMoving(from_ref);
+ default:
+ UNREACHABLE();
+ }
+}
+
+inline mirror::Object* ConcurrentCopying::GetFwdPtr(mirror::Object* from_ref) {
+ DCHECK(region_space_->IsInFromSpace(from_ref));
+ LockWord lw = from_ref->GetLockWord(false);
+ if (lw.GetState() == LockWord::kForwardingAddress) {
+ mirror::Object* fwd_ptr = reinterpret_cast<mirror::Object*>(lw.ForwardingAddress());
+ DCHECK(fwd_ptr != nullptr);
+ return fwd_ptr;
+ } else {
+ return nullptr;
+ }
+}
+
+} // namespace collector
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_INL_H_
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 20e775c7aa..4a49712cbc 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -368,30 +368,15 @@ void ConcurrentCopying::MarkingPhase() {
}
}
}
- // TODO: Other garbage collectors uses Runtime::VisitConcurrentRoots(), refactor this part
- // to also use the same function.
{
- TimingLogger::ScopedTiming split2("VisitConstantRoots", GetTimings());
- Runtime::Current()->VisitConstantRoots(this);
- }
- {
- TimingLogger::ScopedTiming split3("VisitInternTableRoots", GetTimings());
- Runtime::Current()->GetInternTable()->VisitRoots(this, kVisitRootFlagAllRoots);
- }
- {
- TimingLogger::ScopedTiming split4("VisitClassLinkerRoots", GetTimings());
- Runtime::Current()->GetClassLinker()->VisitRoots(this, kVisitRootFlagAllRoots);
+ TimingLogger::ScopedTiming split2("VisitConcurrentRoots", GetTimings());
+ Runtime::Current()->VisitConcurrentRoots(this, kVisitRootFlagAllRoots);
}
{
// TODO: don't visit the transaction roots if it's not active.
TimingLogger::ScopedTiming split5("VisitNonThreadRoots", GetTimings());
Runtime::Current()->VisitNonThreadRoots(this);
}
- {
- TimingLogger::ScopedTiming split6("Dbg::VisitRoots", GetTimings());
- Dbg::VisitRoots(this);
- }
- Runtime::Current()->GetHeap()->VisitAllocationRecords(this);
// Immune spaces.
for (auto& space : heap_->GetContinuousSpaces()) {
@@ -594,8 +579,8 @@ void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) {
Thread* self = Thread::Current(); // TODO: pass self as an argument from call sites?
CHECK(thread_running_gc_ != nullptr);
MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
- if (mark_stack_mode == kMarkStackModeThreadLocal) {
- if (self == thread_running_gc_) {
+ if (LIKELY(mark_stack_mode == kMarkStackModeThreadLocal)) {
+ if (LIKELY(self == thread_running_gc_)) {
// If GC-running thread, use the GC mark stack instead of a thread-local mark stack.
CHECK(self->GetThreadLocalMarkStack() == nullptr);
if (UNLIKELY(gc_mark_stack_->IsFull())) {
@@ -663,18 +648,6 @@ accounting::ObjectStack* ConcurrentCopying::GetLiveStack() {
return heap_->live_stack_.get();
}
-inline mirror::Object* ConcurrentCopying::GetFwdPtr(mirror::Object* from_ref) {
- DCHECK(region_space_->IsInFromSpace(from_ref));
- LockWord lw = from_ref->GetLockWord(false);
- if (lw.GetState() == LockWord::kForwardingAddress) {
- mirror::Object* fwd_ptr = reinterpret_cast<mirror::Object*>(lw.ForwardingAddress());
- CHECK(fwd_ptr != nullptr);
- return fwd_ptr;
- } else {
- return nullptr;
- }
-}
-
// The following visitors are that used to verify that there's no
// references to the from-space left after marking.
class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor : public SingleRootVisitor {
@@ -1080,7 +1053,7 @@ size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_acc
return count;
}
-void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
+inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
DCHECK(!region_space_->IsInFromSpace(to_ref));
if (kUseBakerReadBarrier) {
DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
@@ -1095,9 +1068,10 @@ void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
<< " " << to_ref << " " << to_ref->GetReadBarrierPointer()
<< " is_marked=" << IsMarked(to_ref);
}
- if (to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() &&
- to_ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr &&
- !IsInToSpace(to_ref->AsReference()->GetReferent<kWithoutReadBarrier>())) {
+#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
+ if (UNLIKELY((to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() &&
+ to_ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr &&
+ !IsInToSpace(to_ref->AsReference()->GetReferent<kWithoutReadBarrier>())))) {
// Leave this Reference gray in the queue so that GetReferent() will trigger a read barrier. We
// will change it to black or white later in ReferenceQueue::DequeuePendingReference().
CHECK(to_ref->AsReference()->IsEnqueued()) << "Left unenqueued ref gray " << to_ref;
@@ -1106,14 +1080,13 @@ void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
// be concurrently marked after the Scan() call above has enqueued the Reference, in which case
// the above IsInToSpace() evaluates to true and we change the color from gray to black or white
// here in this else block.
-#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
if (kUseBakerReadBarrier) {
if (region_space_->IsInToSpace(to_ref)) {
// If to-space, change from gray to white.
bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
ReadBarrier::WhitePtr());
CHECK(success) << "Must succeed as we won the race.";
- CHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
+ DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
} else {
// If non-moving space/unevac from space, change from gray
// to black. We can't change gray to white because it's not
@@ -1125,13 +1098,13 @@ void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
ReadBarrier::BlackPtr());
CHECK(success) << "Must succeed as we won the race.";
- CHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
+ DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
}
}
+ }
#else
- DCHECK(!kUseBakerReadBarrier);
+ DCHECK(!kUseBakerReadBarrier);
#endif
- }
if (ReadBarrier::kEnableToSpaceInvariantChecks || kIsDebugBuild) {
ConcurrentCopyingAssertToSpaceInvariantObjectVisitor visitor(this);
visitor(to_ref);
@@ -1622,6 +1595,7 @@ class ConcurrentCopyingRefFieldsVisitor {
}
void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ ALWAYS_INLINE
SHARED_REQUIRES(Locks::mutator_lock_) {
if (!root->IsNull()) {
VisitRoot(root);
@@ -1629,6 +1603,7 @@ class ConcurrentCopyingRefFieldsVisitor {
}
void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ ALWAYS_INLINE
SHARED_REQUIRES(Locks::mutator_lock_) {
collector_->MarkRoot(root);
}
@@ -1638,7 +1613,7 @@ class ConcurrentCopyingRefFieldsVisitor {
};
// Scan ref fields of an object.
-void ConcurrentCopying::Scan(mirror::Object* to_ref) {
+inline void ConcurrentCopying::Scan(mirror::Object* to_ref) {
DCHECK(!region_space_->IsInFromSpace(to_ref));
ConcurrentCopyingRefFieldsVisitor visitor(this);
to_ref->VisitReferences(visitor, visitor);
@@ -1648,9 +1623,6 @@ void ConcurrentCopying::Scan(mirror::Object* to_ref) {
inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) {
mirror::Object* ref = obj->GetFieldObject<
mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset);
- if (ref == nullptr || region_space_->IsInToSpace(ref)) {
- return;
- }
mirror::Object* to_ref = Mark(ref);
if (to_ref == ref) {
return;
@@ -1669,14 +1641,11 @@ inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset)
}
// Process some roots.
-void ConcurrentCopying::VisitRoots(
+inline void ConcurrentCopying::VisitRoots(
mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) {
for (size_t i = 0; i < count; ++i) {
mirror::Object** root = roots[i];
mirror::Object* ref = *root;
- if (ref == nullptr || region_space_->IsInToSpace(ref)) {
- continue;
- }
mirror::Object* to_ref = Mark(ref);
if (to_ref == ref) {
continue;
@@ -1693,12 +1662,9 @@ void ConcurrentCopying::VisitRoots(
}
}
-void ConcurrentCopying::MarkRoot(mirror::CompressedReference<mirror::Object>* root) {
+inline void ConcurrentCopying::MarkRoot(mirror::CompressedReference<mirror::Object>* root) {
DCHECK(!root->IsNull());
mirror::Object* const ref = root->AsMirrorPtr();
- if (region_space_->IsInToSpace(ref)) {
- return;
- }
mirror::Object* to_ref = Mark(ref);
if (to_ref != ref) {
auto* addr = reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root);
@@ -1714,7 +1680,7 @@ void ConcurrentCopying::MarkRoot(mirror::CompressedReference<mirror::Object>* ro
}
}
-void ConcurrentCopying::VisitRoots(
+inline void ConcurrentCopying::VisitRoots(
mirror::CompressedReference<mirror::Object>** roots, size_t count,
const RootInfo& info ATTRIBUTE_UNUSED) {
for (size_t i = 0; i < count; ++i) {
@@ -2013,148 +1979,85 @@ bool ConcurrentCopying::IsOnAllocStack(mirror::Object* ref) {
return alloc_stack->Contains(ref);
}
-mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref) {
- if (from_ref == nullptr) {
- return nullptr;
- }
- DCHECK(from_ref != nullptr);
- DCHECK(heap_->collector_type_ == kCollectorTypeCC);
- if (kUseBakerReadBarrier && !is_active_) {
- // In the lock word forward address state, the read barrier bits
- // in the lock word are part of the stored forwarding address and
- // invalid. This is usually OK as the from-space copy of objects
- // aren't accessed by mutators due to the to-space
- // invariant. However, during the dex2oat image writing relocation
- // and the zygote compaction, objects can be in the forward
- // address state (to store the forward/relocation addresses) and
- // they can still be accessed and the invalid read barrier bits
- // are consulted. If they look like gray but aren't really, the
- // read barriers slow path can trigger when it shouldn't. To guard
- // against this, return here if the CC collector isn't running.
- return from_ref;
- }
- DCHECK(region_space_ != nullptr) << "Read barrier slow path taken when CC isn't running?";
- space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref);
- if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) {
- // It's already marked.
- return from_ref;
- }
- mirror::Object* to_ref;
- if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) {
- to_ref = GetFwdPtr(from_ref);
- if (kUseBakerReadBarrier) {
- DCHECK(to_ref != ReadBarrier::GrayPtr()) << "from_ref=" << from_ref << " to_ref=" << to_ref;
- }
- if (to_ref == nullptr) {
- // It isn't marked yet. Mark it by copying it to the to-space.
- to_ref = Copy(from_ref);
+mirror::Object* ConcurrentCopying::MarkNonMoving(mirror::Object* ref) {
+ // ref is in a non-moving space (from_ref == to_ref).
+ DCHECK(!region_space_->HasAddress(ref)) << ref;
+ if (immune_region_.ContainsObject(ref)) {
+ accounting::ContinuousSpaceBitmap* cc_bitmap =
+ cc_heap_bitmap_->GetContinuousSpaceBitmap(ref);
+ DCHECK(cc_bitmap != nullptr)
+ << "An immune space object must have a bitmap";
+ if (kIsDebugBuild) {
+ DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(ref)->Test(ref))
+ << "Immune space object must be already marked";
}
- DCHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref))
- << "from_ref=" << from_ref << " to_ref=" << to_ref;
- } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) {
// This may or may not succeed, which is ok.
if (kUseBakerReadBarrier) {
- from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
+ ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
}
- if (region_space_bitmap_->AtomicTestAndSet(from_ref)) {
+ if (cc_bitmap->AtomicTestAndSet(ref)) {
// Already marked.
- to_ref = from_ref;
} else {
// Newly marked.
- to_ref = from_ref;
if (kUseBakerReadBarrier) {
- DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
+ DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr());
}
- PushOntoMarkStack(to_ref);
+ PushOntoMarkStack(ref);
}
} else {
- // from_ref is in a non-moving space.
- DCHECK(!region_space_->HasAddress(from_ref)) << from_ref;
- if (immune_region_.ContainsObject(from_ref)) {
- accounting::ContinuousSpaceBitmap* cc_bitmap =
- cc_heap_bitmap_->GetContinuousSpaceBitmap(from_ref);
- DCHECK(cc_bitmap != nullptr)
- << "An immune space object must have a bitmap";
- if (kIsDebugBuild) {
- DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref)->Test(from_ref))
- << "Immune space object must be already marked";
- }
- // This may or may not succeed, which is ok.
+ // Use the mark bitmap.
+ accounting::ContinuousSpaceBitmap* mark_bitmap =
+ heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
+ accounting::LargeObjectBitmap* los_bitmap =
+ heap_mark_bitmap_->GetLargeObjectBitmap(ref);
+ CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
+ bool is_los = mark_bitmap == nullptr;
+ if (!is_los && mark_bitmap->Test(ref)) {
+ // Already marked.
if (kUseBakerReadBarrier) {
- from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
+ DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
+ ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
}
- if (cc_bitmap->AtomicTestAndSet(from_ref)) {
- // Already marked.
- to_ref = from_ref;
- } else {
- // Newly marked.
- to_ref = from_ref;
- if (kUseBakerReadBarrier) {
- DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
- }
- PushOntoMarkStack(to_ref);
+ } else if (is_los && los_bitmap->Test(ref)) {
+ // Already marked in LOS.
+ if (kUseBakerReadBarrier) {
+ DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
+ ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
}
} else {
- // Use the mark bitmap.
- accounting::ContinuousSpaceBitmap* mark_bitmap =
- heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref);
- accounting::LargeObjectBitmap* los_bitmap =
- heap_mark_bitmap_->GetLargeObjectBitmap(from_ref);
- CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
- bool is_los = mark_bitmap == nullptr;
- if (!is_los && mark_bitmap->Test(from_ref)) {
- // Already marked.
- to_ref = from_ref;
- if (kUseBakerReadBarrier) {
- DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
- to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
+ // Not marked.
+ if (IsOnAllocStack(ref)) {
+ // If it's on the allocation stack, it's considered marked. Keep it white.
+ // Objects on the allocation stack need not be marked.
+ if (!is_los) {
+ DCHECK(!mark_bitmap->Test(ref));
+ } else {
+ DCHECK(!los_bitmap->Test(ref));
}
- } else if (is_los && los_bitmap->Test(from_ref)) {
- // Already marked in LOS.
- to_ref = from_ref;
if (kUseBakerReadBarrier) {
- DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
- to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
+ DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr());
}
} else {
- // Not marked.
- if (IsOnAllocStack(from_ref)) {
- // If it's on the allocation stack, it's considered marked. Keep it white.
- to_ref = from_ref;
- // Objects on the allocation stack need not be marked.
- if (!is_los) {
- DCHECK(!mark_bitmap->Test(to_ref));
- } else {
- DCHECK(!los_bitmap->Test(to_ref));
- }
- if (kUseBakerReadBarrier) {
- DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
- }
+ // Not marked or on the allocation stack. Try to mark it.
+ // This may or may not succeed, which is ok.
+ if (kUseBakerReadBarrier) {
+ ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
+ }
+ if (!is_los && mark_bitmap->AtomicTestAndSet(ref)) {
+ // Already marked.
+ } else if (is_los && los_bitmap->AtomicTestAndSet(ref)) {
+ // Already marked in LOS.
} else {
- // Not marked or on the allocation stack. Try to mark it.
- // This may or may not succeed, which is ok.
+ // Newly marked.
if (kUseBakerReadBarrier) {
- from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
- }
- if (!is_los && mark_bitmap->AtomicTestAndSet(from_ref)) {
- // Already marked.
- to_ref = from_ref;
- } else if (is_los && los_bitmap->AtomicTestAndSet(from_ref)) {
- // Already marked in LOS.
- to_ref = from_ref;
- } else {
- // Newly marked.
- to_ref = from_ref;
- if (kUseBakerReadBarrier) {
- DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
- }
- PushOntoMarkStack(to_ref);
+ DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr());
}
+ PushOntoMarkStack(ref);
}
}
}
}
- return to_ref;
+ return ref;
}
void ConcurrentCopying::FinishPhase() {
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index c32b19ea3a..27726e23c1 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -93,7 +93,7 @@ class ConcurrentCopying : public GarbageCollector {
DCHECK(ref != nullptr);
return IsMarked(ref) == ref;
}
- mirror::Object* Mark(mirror::Object* from_ref) SHARED_REQUIRES(Locks::mutator_lock_)
+ ALWAYS_INLINE mirror::Object* Mark(mirror::Object* from_ref) SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
bool IsMarking() const {
return is_marking_;
@@ -183,6 +183,8 @@ class ConcurrentCopying : public GarbageCollector {
void DisableMarking() SHARED_REQUIRES(Locks::mutator_lock_);
void IssueDisableMarkingCheckpoint() SHARED_REQUIRES(Locks::mutator_lock_);
void ExpandGcMarkStack() SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::Object* MarkNonMoving(mirror::Object* from_ref) SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
space::RegionSpace* region_space_; // The underlying region space.
std::unique_ptr<Barrier> gc_barrier_;
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 4db37e600a..0bc9dd8375 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -66,13 +66,20 @@ class InstallStubsClassVisitor : public ClassVisitor {
Instrumentation::Instrumentation()
- : instrumentation_stubs_installed_(false), entry_exit_stubs_installed_(false),
+ : instrumentation_stubs_installed_(false),
+ entry_exit_stubs_installed_(false),
interpreter_stubs_installed_(false),
- interpret_only_(false), forced_interpret_only_(false),
- have_method_entry_listeners_(false), have_method_exit_listeners_(false),
- have_method_unwind_listeners_(false), have_dex_pc_listeners_(false),
- have_field_read_listeners_(false), have_field_write_listeners_(false),
- have_exception_caught_listeners_(false), have_backward_branch_listeners_(false),
+ interpret_only_(false),
+ forced_interpret_only_(false),
+ have_method_entry_listeners_(false),
+ have_method_exit_listeners_(false),
+ have_method_unwind_listeners_(false),
+ have_dex_pc_listeners_(false),
+ have_field_read_listeners_(false),
+ have_field_write_listeners_(false),
+ have_exception_caught_listeners_(false),
+ have_backward_branch_listeners_(false),
+ have_invoke_virtual_or_interface_listeners_(false),
deoptimized_methods_lock_("deoptimized methods lock"),
deoptimization_enabled_(false),
interpreter_handler_table_(kMainHandlerTable),
@@ -297,7 +304,9 @@ void Instrumentation::InstrumentThreadStack(Thread* thread) {
// Removes the instrumentation exit pc as the return PC for every quick frame.
static void InstrumentationRestoreStack(Thread* thread, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES(Locks::mutator_lock_) {
+ Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
+
struct RestoreStackVisitor FINAL : public StackVisitor {
RestoreStackVisitor(Thread* thread_in, uintptr_t instrumentation_exit_pc,
Instrumentation* instrumentation)
@@ -387,146 +396,151 @@ static bool HasEvent(Instrumentation::InstrumentationEvent expected, uint32_t ev
return (events & expected) != 0;
}
-void Instrumentation::AddListener(InstrumentationListener* listener, uint32_t events) {
+static void PotentiallyAddListenerTo(Instrumentation::InstrumentationEvent event,
+ uint32_t events,
+ std::list<InstrumentationListener*>& list,
+ InstrumentationListener* listener,
+ bool* has_listener)
+ REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::classlinker_classes_lock_) {
Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
- if (HasEvent(kMethodEntered, events)) {
- method_entry_listeners_.push_back(listener);
- have_method_entry_listeners_ = true;
- }
- if (HasEvent(kMethodExited, events)) {
- method_exit_listeners_.push_back(listener);
- have_method_exit_listeners_ = true;
- }
- if (HasEvent(kMethodUnwind, events)) {
- method_unwind_listeners_.push_back(listener);
- have_method_unwind_listeners_ = true;
- }
- if (HasEvent(kBackwardBranch, events)) {
- backward_branch_listeners_.push_back(listener);
- have_backward_branch_listeners_ = true;
- }
- if (HasEvent(kInvokeVirtualOrInterface, events)) {
- invoke_virtual_or_interface_listeners_.push_back(listener);
- have_invoke_virtual_or_interface_listeners_ = true;
- }
- if (HasEvent(kDexPcMoved, events)) {
- std::list<InstrumentationListener*>* modified;
- if (have_dex_pc_listeners_) {
- modified = new std::list<InstrumentationListener*>(*dex_pc_listeners_.get());
- } else {
- modified = new std::list<InstrumentationListener*>();
- }
- modified->push_back(listener);
- dex_pc_listeners_.reset(modified);
- have_dex_pc_listeners_ = true;
- }
- if (HasEvent(kFieldRead, events)) {
- std::list<InstrumentationListener*>* modified;
- if (have_field_read_listeners_) {
- modified = new std::list<InstrumentationListener*>(*field_read_listeners_.get());
- } else {
- modified = new std::list<InstrumentationListener*>();
- }
- modified->push_back(listener);
- field_read_listeners_.reset(modified);
- have_field_read_listeners_ = true;
- }
- if (HasEvent(kFieldWritten, events)) {
- std::list<InstrumentationListener*>* modified;
- if (have_field_write_listeners_) {
- modified = new std::list<InstrumentationListener*>(*field_write_listeners_.get());
- } else {
- modified = new std::list<InstrumentationListener*>();
- }
- modified->push_back(listener);
- field_write_listeners_.reset(modified);
- have_field_write_listeners_ = true;
+ if (!HasEvent(event, events)) {
+ return;
}
- if (HasEvent(kExceptionCaught, events)) {
- std::list<InstrumentationListener*>* modified;
- if (have_exception_caught_listeners_) {
- modified = new std::list<InstrumentationListener*>(*exception_caught_listeners_.get());
- } else {
- modified = new std::list<InstrumentationListener*>();
- }
- modified->push_back(listener);
- exception_caught_listeners_.reset(modified);
- have_exception_caught_listeners_ = true;
+ // If there is a free slot in the list, we insert the listener in that slot.
+ // Otherwise we add it to the end of the list.
+ auto it = std::find(list.begin(), list.end(), nullptr);
+ if (it != list.end()) {
+ *it = listener;
+ } else {
+ list.push_back(listener);
}
- UpdateInterpreterHandlerTable();
+ *has_listener = true;
}
-void Instrumentation::RemoveListener(InstrumentationListener* listener, uint32_t events) {
+void Instrumentation::AddListener(InstrumentationListener* listener, uint32_t events) {
Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
+ PotentiallyAddListenerTo(kMethodEntered,
+ events,
+ method_entry_listeners_,
+ listener,
+ &have_method_entry_listeners_);
+ PotentiallyAddListenerTo(kMethodExited,
+ events,
+ method_exit_listeners_,
+ listener,
+ &have_method_exit_listeners_);
+ PotentiallyAddListenerTo(kMethodUnwind,
+ events,
+ method_unwind_listeners_,
+ listener,
+ &have_method_unwind_listeners_);
+ PotentiallyAddListenerTo(kBackwardBranch,
+ events,
+ backward_branch_listeners_,
+ listener,
+ &have_backward_branch_listeners_);
+ PotentiallyAddListenerTo(kInvokeVirtualOrInterface,
+ events,
+ invoke_virtual_or_interface_listeners_,
+ listener,
+ &have_invoke_virtual_or_interface_listeners_);
+ PotentiallyAddListenerTo(kDexPcMoved,
+ events,
+ dex_pc_listeners_,
+ listener,
+ &have_dex_pc_listeners_);
+ PotentiallyAddListenerTo(kFieldRead,
+ events,
+ field_read_listeners_,
+ listener,
+ &have_field_read_listeners_);
+ PotentiallyAddListenerTo(kFieldWritten,
+ events,
+ field_write_listeners_,
+ listener,
+ &have_field_write_listeners_);
+ PotentiallyAddListenerTo(kExceptionCaught,
+ events,
+ exception_caught_listeners_,
+ listener,
+ &have_exception_caught_listeners_);
+ UpdateInterpreterHandlerTable();
+}
- if (HasEvent(kMethodEntered, events) && have_method_entry_listeners_) {
- method_entry_listeners_.remove(listener);
- have_method_entry_listeners_ = !method_entry_listeners_.empty();
- }
- if (HasEvent(kMethodExited, events) && have_method_exit_listeners_) {
- method_exit_listeners_.remove(listener);
- have_method_exit_listeners_ = !method_exit_listeners_.empty();
- }
- if (HasEvent(kMethodUnwind, events) && have_method_unwind_listeners_) {
- method_unwind_listeners_.remove(listener);
- have_method_unwind_listeners_ = !method_unwind_listeners_.empty();
- }
- if (HasEvent(kBackwardBranch, events) && have_backward_branch_listeners_) {
- backward_branch_listeners_.remove(listener);
- have_backward_branch_listeners_ = !backward_branch_listeners_.empty();
- }
- if (HasEvent(kInvokeVirtualOrInterface, events) && have_invoke_virtual_or_interface_listeners_) {
- invoke_virtual_or_interface_listeners_.remove(listener);
- have_invoke_virtual_or_interface_listeners_ = !invoke_virtual_or_interface_listeners_.empty();
- }
- if (HasEvent(kDexPcMoved, events) && have_dex_pc_listeners_) {
- std::list<InstrumentationListener*>* modified =
- new std::list<InstrumentationListener*>(*dex_pc_listeners_.get());
- modified->remove(listener);
- have_dex_pc_listeners_ = !modified->empty();
- if (have_dex_pc_listeners_) {
- dex_pc_listeners_.reset(modified);
- } else {
- dex_pc_listeners_.reset();
- delete modified;
- }
+static void PotentiallyRemoveListenerFrom(Instrumentation::InstrumentationEvent event,
+ uint32_t events,
+ std::list<InstrumentationListener*>& list,
+ InstrumentationListener* listener,
+ bool* has_listener)
+ REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::classlinker_classes_lock_) {
+ Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
+ if (!HasEvent(event, events)) {
+ return;
}
- if (HasEvent(kFieldRead, events) && have_field_read_listeners_) {
- std::list<InstrumentationListener*>* modified =
- new std::list<InstrumentationListener*>(*field_read_listeners_.get());
- modified->remove(listener);
- have_field_read_listeners_ = !modified->empty();
- if (have_field_read_listeners_) {
- field_read_listeners_.reset(modified);
- } else {
- field_read_listeners_.reset();
- delete modified;
- }
+ auto it = std::find(list.begin(), list.end(), listener);
+ if (it != list.end()) {
+ // Just update the entry, do not remove from the list. Removing entries in the list
+ // is unsafe when mutators are iterating over it.
+ *it = nullptr;
}
- if (HasEvent(kFieldWritten, events) && have_field_write_listeners_) {
- std::list<InstrumentationListener*>* modified =
- new std::list<InstrumentationListener*>(*field_write_listeners_.get());
- modified->remove(listener);
- have_field_write_listeners_ = !modified->empty();
- if (have_field_write_listeners_) {
- field_write_listeners_.reset(modified);
- } else {
- field_write_listeners_.reset();
- delete modified;
- }
- }
- if (HasEvent(kExceptionCaught, events) && have_exception_caught_listeners_) {
- std::list<InstrumentationListener*>* modified =
- new std::list<InstrumentationListener*>(*exception_caught_listeners_.get());
- modified->remove(listener);
- have_exception_caught_listeners_ = !modified->empty();
- if (have_exception_caught_listeners_) {
- exception_caught_listeners_.reset(modified);
- } else {
- exception_caught_listeners_.reset();
- delete modified;
+
+ // Check if the list contains any non-null listener, and update 'has_listener'.
+ for (InstrumentationListener* l : list) {
+ if (l != nullptr) {
+ *has_listener = true;
+ return;
}
}
+ *has_listener = false;
+}
+
+void Instrumentation::RemoveListener(InstrumentationListener* listener, uint32_t events) {
+ Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
+ PotentiallyRemoveListenerFrom(kMethodEntered,
+ events,
+ method_entry_listeners_,
+ listener,
+ &have_method_entry_listeners_);
+ PotentiallyRemoveListenerFrom(kMethodExited,
+ events,
+ method_exit_listeners_,
+ listener,
+ &have_method_exit_listeners_);
+ PotentiallyRemoveListenerFrom(kMethodUnwind,
+ events,
+ method_unwind_listeners_,
+ listener,
+ &have_method_unwind_listeners_);
+ PotentiallyRemoveListenerFrom(kBackwardBranch,
+ events,
+ backward_branch_listeners_,
+ listener,
+ &have_backward_branch_listeners_);
+ PotentiallyRemoveListenerFrom(kInvokeVirtualOrInterface,
+ events,
+ invoke_virtual_or_interface_listeners_,
+ listener,
+ &have_invoke_virtual_or_interface_listeners_);
+ PotentiallyRemoveListenerFrom(kDexPcMoved,
+ events,
+ dex_pc_listeners_,
+ listener,
+ &have_dex_pc_listeners_);
+ PotentiallyRemoveListenerFrom(kFieldRead,
+ events,
+ field_read_listeners_,
+ listener,
+ &have_field_read_listeners_);
+ PotentiallyRemoveListenerFrom(kFieldWritten,
+ events,
+ field_write_listeners_,
+ listener,
+ &have_field_write_listeners_);
+ PotentiallyRemoveListenerFrom(kExceptionCaught,
+ events,
+ exception_caught_listeners_,
+ listener,
+ &have_exception_caught_listeners_);
UpdateInterpreterHandlerTable();
}
@@ -594,9 +608,11 @@ void Instrumentation::ConfigureStubs(const char* key, InstrumentationLevel desir
empty = IsDeoptimizedMethodsEmpty(); // Avoid lock violation.
}
if (empty) {
- instrumentation_stubs_installed_ = false;
MutexLock mu(self, *Locks::thread_list_lock_);
Runtime::Current()->GetThreadList()->ForEach(InstrumentationRestoreStack, this);
+ // Only do this after restoring, as walking the stack when restoring will see
+ // the instrumentation exit pc.
+ instrumentation_stubs_installed_ = false;
}
}
}
@@ -861,28 +877,24 @@ const void* Instrumentation::GetQuickCodeFor(ArtMethod* method, size_t pointer_s
void Instrumentation::MethodEnterEventImpl(Thread* thread, mirror::Object* this_object,
ArtMethod* method,
uint32_t dex_pc) const {
- auto it = method_entry_listeners_.begin();
- bool is_end = (it == method_entry_listeners_.end());
- // Implemented this way to prevent problems caused by modification of the list while iterating.
- while (!is_end) {
- InstrumentationListener* cur = *it;
- ++it;
- is_end = (it == method_entry_listeners_.end());
- cur->MethodEntered(thread, this_object, method, dex_pc);
+ if (HasMethodEntryListeners()) {
+ for (InstrumentationListener* listener : method_entry_listeners_) {
+ if (listener != nullptr) {
+ listener->MethodEntered(thread, this_object, method, dex_pc);
+ }
+ }
}
}
void Instrumentation::MethodExitEventImpl(Thread* thread, mirror::Object* this_object,
ArtMethod* method,
uint32_t dex_pc, const JValue& return_value) const {
- auto it = method_exit_listeners_.begin();
- bool is_end = (it == method_exit_listeners_.end());
- // Implemented this way to prevent problems caused by modification of the list while iterating.
- while (!is_end) {
- InstrumentationListener* cur = *it;
- ++it;
- is_end = (it == method_exit_listeners_.end());
- cur->MethodExited(thread, this_object, method, dex_pc, return_value);
+ if (HasMethodExitListeners()) {
+ for (InstrumentationListener* listener : method_exit_listeners_) {
+ if (listener != nullptr) {
+ listener->MethodExited(thread, this_object, method, dex_pc, return_value);
+ }
+ }
}
}
@@ -891,7 +903,9 @@ void Instrumentation::MethodUnwindEvent(Thread* thread, mirror::Object* this_obj
uint32_t dex_pc) const {
if (HasMethodUnwindListeners()) {
for (InstrumentationListener* listener : method_unwind_listeners_) {
- listener->MethodUnwind(thread, this_object, method, dex_pc);
+ if (listener != nullptr) {
+ listener->MethodUnwind(thread, this_object, method, dex_pc);
+ }
}
}
}
@@ -899,16 +913,19 @@ void Instrumentation::MethodUnwindEvent(Thread* thread, mirror::Object* this_obj
void Instrumentation::DexPcMovedEventImpl(Thread* thread, mirror::Object* this_object,
ArtMethod* method,
uint32_t dex_pc) const {
- std::shared_ptr<std::list<InstrumentationListener*>> original(dex_pc_listeners_);
- for (InstrumentationListener* listener : *original.get()) {
- listener->DexPcMoved(thread, this_object, method, dex_pc);
+ for (InstrumentationListener* listener : dex_pc_listeners_) {
+ if (listener != nullptr) {
+ listener->DexPcMoved(thread, this_object, method, dex_pc);
+ }
}
}
void Instrumentation::BackwardBranchImpl(Thread* thread, ArtMethod* method,
int32_t offset) const {
for (InstrumentationListener* listener : backward_branch_listeners_) {
- listener->BackwardBranch(thread, method, offset);
+ if (listener != nullptr) {
+ listener->BackwardBranch(thread, method, offset);
+ }
}
}
@@ -918,25 +935,29 @@ void Instrumentation::InvokeVirtualOrInterfaceImpl(Thread* thread,
uint32_t dex_pc,
ArtMethod* callee) const {
for (InstrumentationListener* listener : invoke_virtual_or_interface_listeners_) {
- listener->InvokeVirtualOrInterface(thread, this_object, caller, dex_pc, callee);
+ if (listener != nullptr) {
+ listener->InvokeVirtualOrInterface(thread, this_object, caller, dex_pc, callee);
+ }
}
}
void Instrumentation::FieldReadEventImpl(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc,
ArtField* field) const {
- std::shared_ptr<std::list<InstrumentationListener*>> original(field_read_listeners_);
- for (InstrumentationListener* listener : *original.get()) {
- listener->FieldRead(thread, this_object, method, dex_pc, field);
+ for (InstrumentationListener* listener : field_read_listeners_) {
+ if (listener != nullptr) {
+ listener->FieldRead(thread, this_object, method, dex_pc, field);
+ }
}
}
void Instrumentation::FieldWriteEventImpl(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc,
ArtField* field, const JValue& field_value) const {
- std::shared_ptr<std::list<InstrumentationListener*>> original(field_write_listeners_);
- for (InstrumentationListener* listener : *original.get()) {
- listener->FieldWritten(thread, this_object, method, dex_pc, field, field_value);
+ for (InstrumentationListener* listener : field_write_listeners_) {
+ if (listener != nullptr) {
+ listener->FieldWritten(thread, this_object, method, dex_pc, field, field_value);
+ }
}
}
@@ -945,9 +966,10 @@ void Instrumentation::ExceptionCaughtEvent(Thread* thread,
if (HasExceptionCaughtListeners()) {
DCHECK_EQ(thread->GetException(), exception_object);
thread->ClearException();
- std::shared_ptr<std::list<InstrumentationListener*>> original(exception_caught_listeners_);
- for (InstrumentationListener* listener : *original.get()) {
- listener->ExceptionCaught(thread, exception_object);
+ for (InstrumentationListener* listener : exception_caught_listeners_) {
+ if (listener != nullptr) {
+ listener->ExceptionCaught(thread, exception_object);
+ }
}
thread->SetException(exception_object);
}
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 8dd2357e06..726cf1b461 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -517,20 +517,25 @@ class Instrumentation {
InstrumentationLevelTable requested_instrumentation_levels_ GUARDED_BY(Locks::mutator_lock_);
// The event listeners, written to with the mutator_lock_ exclusively held.
+ // Mutators must be able to iterate over these lists concurrently, that is, with listeners being
+ // added or removed while iterating. The modifying thread holds exclusive lock,
+ // so other threads cannot iterate (i.e. read the data of the list) at the same time but they
+ // do keep iterators that need to remain valid. This is the reason these listeners are std::list
+ // and not for example std::vector: the existing storage for a std::list does not move.
+ // Note that mutators cannot make a copy of these lists before iterating, as the instrumentation
+ // listeners can also be deleted concurrently.
+ // As a result, these lists are never trimmed. That's acceptable given the low number of
+ // listeners we have.
std::list<InstrumentationListener*> method_entry_listeners_ GUARDED_BY(Locks::mutator_lock_);
std::list<InstrumentationListener*> method_exit_listeners_ GUARDED_BY(Locks::mutator_lock_);
std::list<InstrumentationListener*> method_unwind_listeners_ GUARDED_BY(Locks::mutator_lock_);
std::list<InstrumentationListener*> backward_branch_listeners_ GUARDED_BY(Locks::mutator_lock_);
std::list<InstrumentationListener*> invoke_virtual_or_interface_listeners_
GUARDED_BY(Locks::mutator_lock_);
- std::shared_ptr<std::list<InstrumentationListener*>> dex_pc_listeners_
- GUARDED_BY(Locks::mutator_lock_);
- std::shared_ptr<std::list<InstrumentationListener*>> field_read_listeners_
- GUARDED_BY(Locks::mutator_lock_);
- std::shared_ptr<std::list<InstrumentationListener*>> field_write_listeners_
- GUARDED_BY(Locks::mutator_lock_);
- std::shared_ptr<std::list<InstrumentationListener*>> exception_caught_listeners_
- GUARDED_BY(Locks::mutator_lock_);
+ std::list<InstrumentationListener*> dex_pc_listeners_ GUARDED_BY(Locks::mutator_lock_);
+ std::list<InstrumentationListener*> field_read_listeners_ GUARDED_BY(Locks::mutator_lock_);
+ std::list<InstrumentationListener*> field_write_listeners_ GUARDED_BY(Locks::mutator_lock_);
+ std::list<InstrumentationListener*> exception_caught_listeners_ GUARDED_BY(Locks::mutator_lock_);
// The set of methods being deoptimized (by the debugger) which must be executed with interpreter
// only.
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 8c495fc7bb..c8650c4238 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -157,11 +157,11 @@ static inline bool IsValidLambdaTargetOrThrow(ArtMethod* called_method)
// Write out the 'Closure*' into vreg and vreg+1, as if it was a jlong.
static inline void WriteLambdaClosureIntoVRegs(ShadowFrame& shadow_frame,
- const lambda::Closure* lambda_closure,
+ const lambda::Closure& lambda_closure,
uint32_t vreg) {
// Split the method into a lo and hi 32 bits so we can encode them into 2 virtual registers.
- uint32_t closure_lo = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(lambda_closure));
- uint32_t closure_hi = static_cast<uint32_t>(reinterpret_cast<uint64_t>(lambda_closure)
+ uint32_t closure_lo = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(&lambda_closure));
+ uint32_t closure_hi = static_cast<uint32_t>(reinterpret_cast<uint64_t>(&lambda_closure)
>> BitSizeOf<uint32_t>());
// Use uint64_t instead of uintptr_t to allow shifting past the max on 32-bit.
static_assert(sizeof(uint64_t) >= sizeof(uintptr_t), "Impossible");
@@ -193,6 +193,9 @@ static inline bool DoCreateLambda(Thread* self,
DCHECK(uninitialized_closure != nullptr);
DCHECK_ALIGNED(uninitialized_closure, alignof(lambda::Closure));
+ using lambda::ArtLambdaMethod;
+ using lambda::LeakingAllocator;
+
/*
* create-lambda is opcode 0x21c
* - vA is the target register where the closure will be stored into
@@ -214,12 +217,13 @@ static inline bool DoCreateLambda(Thread* self,
return false;
}
- lambda::ArtLambdaMethod* initialized_lambda_method;
+ ArtLambdaMethod* initialized_lambda_method;
// Initialize the ArtLambdaMethod with the right data.
{
- lambda::ArtLambdaMethod* uninitialized_lambda_method =
- reinterpret_cast<lambda::ArtLambdaMethod*>(
- lambda::LeakingAllocator::AllocateMemory(self, sizeof(lambda::ArtLambdaMethod)));
+ // Allocate enough memory to store a well-aligned ArtLambdaMethod.
+ // This is not the final type yet since the data starts out uninitialized.
+ LeakingAllocator::AlignedMemoryStorage<ArtLambdaMethod>* uninitialized_lambda_method =
+ LeakingAllocator::AllocateMemory<ArtLambdaMethod>(self);
std::string captured_variables_shorty = closure_builder->GetCapturedVariableShortyTypes();
std::string captured_variables_long_type_desc;
@@ -244,30 +248,28 @@ static inline bool DoCreateLambda(Thread* self,
// Copy strings to dynamically allocated storage. This leaks, but that's ok. Fix it later.
// TODO: Strings need to come from the DexFile, so they won't need their own allocations.
- char* captured_variables_type_desc = lambda::LeakingAllocator::MakeFlexibleInstance<char>(
+ char* captured_variables_type_desc = LeakingAllocator::MakeFlexibleInstance<char>(
self,
captured_variables_long_type_desc.size() + 1);
strcpy(captured_variables_type_desc, captured_variables_long_type_desc.c_str());
- char* captured_variables_shorty_copy = lambda::LeakingAllocator::MakeFlexibleInstance<char>(
+ char* captured_variables_shorty_copy = LeakingAllocator::MakeFlexibleInstance<char>(
self,
captured_variables_shorty.size() + 1);
strcpy(captured_variables_shorty_copy, captured_variables_shorty.c_str());
- new (uninitialized_lambda_method) lambda::ArtLambdaMethod(called_method,
- captured_variables_type_desc,
- captured_variables_shorty_copy,
- true); // innate lambda
- initialized_lambda_method = uninitialized_lambda_method;
+ // After initialization, the object at the storage is well-typed. Use strong type going forward.
+ initialized_lambda_method =
+ new (uninitialized_lambda_method) ArtLambdaMethod(called_method,
+ captured_variables_type_desc,
+ captured_variables_shorty_copy,
+ true); // innate lambda
}
// Write all the closure captured variables and the closure header into the closure.
- lambda::Closure* initialized_closure;
- {
- initialized_closure =
- closure_builder->CreateInPlace(uninitialized_closure, initialized_lambda_method);
- }
+ lambda::Closure* initialized_closure =
+ closure_builder->CreateInPlace(uninitialized_closure, initialized_lambda_method);
- WriteLambdaClosureIntoVRegs(/*inout*/shadow_frame, initialized_closure, vreg_dest_closure);
+ WriteLambdaClosureIntoVRegs(/*inout*/shadow_frame, *initialized_closure, vreg_dest_closure);
return true;
}
@@ -928,7 +930,7 @@ static inline bool DoUnboxLambda(Thread* self,
}
DCHECK(unboxed_closure != nullptr);
- WriteLambdaClosureIntoVRegs(/*inout*/shadow_frame, unboxed_closure, vreg_target_closure);
+ WriteLambdaClosureIntoVRegs(/*inout*/shadow_frame, *unboxed_closure, vreg_target_closure);
return true;
}
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index cfccec87cf..4c7cb1e36a 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -25,6 +25,7 @@
#include "linear_alloc.h"
#include "mem_map.h"
#include "oat_file-inl.h"
+#include "scoped_thread_state_change.h"
#include "thread_list.h"
namespace art {
@@ -369,6 +370,23 @@ class MarkCodeClosure FINAL : public Closure {
DCHECK(thread == Thread::Current() || thread->IsSuspended());
MarkCodeVisitor visitor(thread, code_cache_);
visitor.WalkStack();
+ if (kIsDebugBuild) {
+ // The stack walking code queries the side instrumentation stack if it
+ // sees an instrumentation exit pc, so the JIT code of methods in that stack
+ // must have been seen. We sanity check this below.
+ for (const instrumentation::InstrumentationStackFrame& frame
+ : *thread->GetInstrumentationStack()) {
+ // The 'method_' in InstrumentationStackFrame is the one that has return_pc_ in
+ // its stack frame, it is not the method owning return_pc_. We just pass null to
+ // LookupMethodHeader: the method is only checked against in debug builds.
+ OatQuickMethodHeader* method_header =
+ code_cache_->LookupMethodHeader(frame.return_pc_, nullptr);
+ if (method_header != nullptr) {
+ const void* code = method_header->GetCode();
+ CHECK(code_cache_->GetLiveBitmap()->Test(FromCodeToAllocation(code)));
+ }
+ }
+ }
barrier_->Pass(Thread::Current());
}
@@ -407,9 +425,17 @@ void JitCodeCache::GarbageCollectCache(Thread* self) {
// Run a checkpoint on all threads to mark the JIT compiled code they are running.
{
Barrier barrier(0);
- MarkCodeClosure closure(this, &barrier);
- size_t threads_running_checkpoint =
- Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
+ size_t threads_running_checkpoint = 0;
+ {
+ // Walking the stack requires the mutator lock.
+ // We only take the lock when running the checkpoint and not waiting so that
+ // when we go back to suspended, we can execute checkpoints that were requested
+ // concurrently, and then move to waiting for our own checkpoint to finish.
+ ScopedObjectAccess soa(self);
+ MarkCodeClosure closure(this, &barrier);
+ threads_running_checkpoint =
+ Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
+ }
if (threads_running_checkpoint != 0) {
barrier.Increment(self, threads_running_checkpoint);
}
@@ -480,8 +506,10 @@ OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod*
if (!method_header->Contains(pc)) {
return nullptr;
}
- DCHECK_EQ(it->second, method)
- << PrettyMethod(method) << " " << PrettyMethod(it->second) << " " << std::hex << pc;
+ if (kIsDebugBuild && method != nullptr) {
+ DCHECK_EQ(it->second, method)
+ << PrettyMethod(method) << " " << PrettyMethod(it->second) << " " << std::hex << pc;
+ }
return method_header;
}
diff --git a/runtime/jit/jit_instrumentation.cc b/runtime/jit/jit_instrumentation.cc
index 8aaa5fa304..7931306ff6 100644
--- a/runtime/jit/jit_instrumentation.cc
+++ b/runtime/jit/jit_instrumentation.cc
@@ -102,15 +102,13 @@ void JitInstrumentationCache::AddSamples(Thread* self, ArtMethod* method, size_t
} else {
// We failed allocating. Instead of doing the collection on the Java thread, we push
// an allocation to a compiler thread, that will do the collection.
- thread_pool_->AddTask(self, new JitCompileTask(
- method->GetInterfaceMethodIfProxy(sizeof(void*)), JitCompileTask::kAllocateProfile));
+ thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kAllocateProfile));
thread_pool_->StartWorkers(self);
}
}
if (sample_count == hot_method_threshold_) {
- thread_pool_->AddTask(self, new JitCompileTask(
- method->GetInterfaceMethodIfProxy(sizeof(void*)), JitCompileTask::kCompile));
+ thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kCompile));
thread_pool_->StartWorkers(self);
}
}
diff --git a/runtime/lambda/leaking_allocator.cc b/runtime/lambda/leaking_allocator.cc
index 4910732a6c..22bb294d03 100644
--- a/runtime/lambda/leaking_allocator.cc
+++ b/runtime/lambda/leaking_allocator.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "base/bit_utils.h"
#include "lambda/leaking_allocator.h"
#include "linear_alloc.h"
#include "runtime.h"
@@ -21,9 +22,11 @@
namespace art {
namespace lambda {
-void* LeakingAllocator::AllocateMemory(Thread* self, size_t byte_size) {
+void* LeakingAllocator::AllocateMemoryImpl(Thread* self, size_t byte_size, size_t align_size) {
// TODO: use GetAllocatorForClassLoader to allocate lambda ArtMethod data.
- return Runtime::Current()->GetLinearAlloc()->Alloc(self, byte_size);
+ void* mem = Runtime::Current()->GetLinearAlloc()->Alloc(self, byte_size);
+ DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(mem), align_size);
+ return mem;
}
} // namespace lambda
diff --git a/runtime/lambda/leaking_allocator.h b/runtime/lambda/leaking_allocator.h
index c3222d0485..cb5a1bf4c3 100644
--- a/runtime/lambda/leaking_allocator.h
+++ b/runtime/lambda/leaking_allocator.h
@@ -17,6 +17,7 @@
#define ART_RUNTIME_LAMBDA_LEAKING_ALLOCATOR_H_
#include <utility> // std::forward
+#include <type_traits> // std::aligned_storage
namespace art {
class Thread; // forward declaration
@@ -33,20 +34,36 @@ namespace lambda {
// TODO: do all of the above a/b for each callsite, and delete this class.
class LeakingAllocator {
public:
+ // An opaque type which is guaranteed for:
+ // * a) be large enough to hold T (e.g. for in-place new)
+ // * b) be well-aligned (so that reads/writes are well-defined) to T
+ // * c) strict-aliasing compatible with T*
+ //
+ // Nominally used to allocate memory for yet unconstructed instances of T.
+ template <typename T>
+ using AlignedMemoryStorage = typename std::aligned_storage<sizeof(T), alignof(T)>::type;
+
// Allocate byte_size bytes worth of memory. Never freed.
- static void* AllocateMemory(Thread* self, size_t byte_size);
+ template <typename T>
+ static AlignedMemoryStorage<T>* AllocateMemory(Thread* self, size_t byte_size = sizeof(T)) {
+ return reinterpret_cast<AlignedMemoryStorage<T>*>(
+ AllocateMemoryImpl(self, byte_size, alignof(T)));
+ }
// Make a new instance of T, flexibly sized, in-place at newly allocated memory. Never freed.
template <typename T, typename... Args>
static T* MakeFlexibleInstance(Thread* self, size_t byte_size, Args&&... args) {
- return new (AllocateMemory(self, byte_size)) T(std::forward<Args>(args)...);
+ return new (AllocateMemory<T>(self, byte_size)) T(std::forward<Args>(args)...);
}
// Make a new instance of T in-place at newly allocated memory. Never freed.
template <typename T, typename... Args>
static T* MakeInstance(Thread* self, Args&&... args) {
- return new (AllocateMemory(self, sizeof(T))) T(std::forward<Args>(args)...);
+ return new (AllocateMemory<T>(self, sizeof(T))) T(std::forward<Args>(args)...);
}
+
+ private:
+ static void* AllocateMemoryImpl(Thread* self, size_t byte_size, size_t align_size);
};
} // namespace lambda
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 022f31dc53..5c6520fcab 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -99,7 +99,7 @@ class MANAGED LOCKABLE Object {
#ifndef USE_BAKER_OR_BROOKS_READ_BARRIER
NO_RETURN
#endif
- bool AtomicSetReadBarrierPointer(Object* expected_rb_ptr, Object* rb_ptr)
+ ALWAYS_INLINE bool AtomicSetReadBarrierPointer(Object* expected_rb_ptr, Object* rb_ptr)
SHARED_REQUIRES(Locks::mutator_lock_);
void AssertReadBarrierPointer() const SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/mirror/object_array-inl.h b/runtime/mirror/object_array-inl.h
index 5b73557941..5337760fb8 100644
--- a/runtime/mirror/object_array-inl.h
+++ b/runtime/mirror/object_array-inl.h
@@ -270,7 +270,7 @@ inline MemberOffset ObjectArray<T>::OffsetOfElement(int32_t i) {
}
template<class T> template<typename Visitor>
-void ObjectArray<T>::VisitReferences(const Visitor& visitor) {
+inline void ObjectArray<T>::VisitReferences(const Visitor& visitor) {
const size_t length = static_cast<size_t>(GetLength());
for (size_t i = 0; i < length; ++i) {
visitor(this, OffsetOfElement(i), false);
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 8d5418d07d..99080f611c 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -662,23 +662,19 @@ bool OatFileAssistant::RelocateOatFile(const std::string* input_file,
bool OatFileAssistant::GenerateOatFile(std::string* error_msg) {
CHECK(error_msg != nullptr);
- if (OatFileName() == nullptr) {
+ Runtime* runtime = Runtime::Current();
+ if (!runtime->IsDex2OatEnabled()) {
*error_msg = "Generation of oat file for dex location " + dex_location_
- + " not attempted because the oat file name could not be determined.";
+ + " not attempted because dex2oat is disabled.";
return false;
}
- const std::string& oat_file_name = *OatFileName();
- Runtime* runtime = Runtime::Current();
- if (!runtime->IsDex2OatEnabled()) {
- *error_msg = "Generation of oat file " + oat_file_name
- + " not attempted because dex2oat is disabled";
+ if (OatFileName() == nullptr) {
+ *error_msg = "Generation of oat file for dex location " + dex_location_
+ + " not attempted because the oat file name could not be determined.";
return false;
}
-
- std::vector<std::string> args;
- args.push_back("--dex-file=" + dex_location_);
- args.push_back("--oat-file=" + oat_file_name);
+ const std::string& oat_file_name = *OatFileName();
// dex2oat ignores missing dex files and doesn't report an error.
// Check explicitly here so we can detect the error properly.
@@ -688,9 +684,36 @@ bool OatFileAssistant::GenerateOatFile(std::string* error_msg) {
return false;
}
+ std::unique_ptr<File> oat_file;
+ oat_file.reset(OS::CreateEmptyFile(oat_file_name.c_str()));
+ if (oat_file.get() == nullptr) {
+ *error_msg = "Generation of oat file " + oat_file_name
+ + " not attempted because the oat file could not be created.";
+ return false;
+ }
+
+ if (fchmod(oat_file->Fd(), 0644) != 0) {
+ *error_msg = "Generation of oat file " + oat_file_name
+ + " not attempted because the oat file could not be made world readable.";
+ oat_file->Erase();
+ return false;
+ }
+
+ std::vector<std::string> args;
+ args.push_back("--dex-file=" + dex_location_);
+ args.push_back("--oat-fd=" + std::to_string(oat_file->Fd()));
+ args.push_back("--oat-location=" + oat_file_name);
+
if (!Dex2Oat(args, error_msg)) {
// Manually delete the file. This ensures there is no garbage left over if
// the process unexpectedly died.
+ oat_file->Erase();
+ TEMP_FAILURE_RETRY(unlink(oat_file_name.c_str()));
+ return false;
+ }
+
+ if (oat_file->FlushCloseOrErase() != 0) {
+ *error_msg = "Unable to close oat file " + oat_file_name;
TEMP_FAILURE_RETRY(unlink(oat_file_name.c_str()));
return false;
}
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index 2c81eddf39..c54d7f8761 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -849,6 +849,38 @@ TEST_F(OatFileAssistantTest, LoadDexNoAlternateOat) {
EXPECT_FALSE(ofm.OatFileExists());
}
+// Case: We have a DEX file but can't write the oat file.
+// Expect: We should fail to make the oat file up to date.
+TEST_F(OatFileAssistantTest, LoadDexUnwriteableAlternateOat) {
+ std::string dex_location = GetScratchDir() + "/LoadDexUnwriteableAlternateOat.jar";
+
+ // Make the oat location unwritable by inserting some non-existent
+ // intermediate directories.
+ std::string oat_location = GetScratchDir() + "/foo/bar/LoadDexUnwriteableAlternateOat.oat";
+
+ Copy(GetDexSrc1(), dex_location);
+
+ OatFileAssistant oat_file_assistant(
+ dex_location.c_str(), oat_location.c_str(), kRuntimeISA, true);
+ std::string error_msg;
+ ASSERT_FALSE(oat_file_assistant.MakeUpToDate(&error_msg));
+
+ std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
+ ASSERT_TRUE(oat_file.get() == nullptr);
+}
+
+// Case: We don't have a DEX file and can't write the oat file.
+// Expect: We should fail to generate the oat file without crashing.
+TEST_F(OatFileAssistantTest, GenNoDex) {
+ std::string dex_location = GetScratchDir() + "/GenNoDex.jar";
+ std::string oat_location = GetScratchDir() + "/GenNoDex.oat";
+
+ OatFileAssistant oat_file_assistant(
+ dex_location.c_str(), oat_location.c_str(), kRuntimeISA, true);
+ std::string error_msg;
+ ASSERT_FALSE(oat_file_assistant.GenerateOatFile(&error_msg));
+}
+
// Turn an absolute path into a path relative to the current working
// directory.
static std::string MakePathRelative(std::string target) {
diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h
index 4998a6a478..7de6c06f2b 100644
--- a/runtime/read_barrier-inl.h
+++ b/runtime/read_barrier-inl.h
@@ -19,7 +19,7 @@
#include "read_barrier.h"
-#include "gc/collector/concurrent_copying.h"
+#include "gc/collector/concurrent_copying-inl.h"
#include "gc/heap.h"
#include "mirror/object_reference.h"
#include "mirror/reference.h"
diff --git a/runtime/stack.cc b/runtime/stack.cc
index b0727daa15..d7edfade15 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -958,26 +958,18 @@ QuickMethodFrameInfo StackVisitor::GetCurrentQuickFrameInfo() const {
return runtime->GetRuntimeMethodFrameInfo(method);
}
- // For Proxy method we add special handling for the direct method case (there is only one
- // direct method - constructor). Direct method is cloned from original
- // java.lang.reflect.Proxy class together with code and as a result it is executed as usual
- // quick compiled method without any stubs. So the frame info should be returned as it is a
- // quick method not a stub. However, if instrumentation stubs are installed, the
- // instrumentation->GetQuickCodeFor() returns the artQuickProxyInvokeHandler instead of an
- // oat code pointer, thus we have to add a special case here.
if (method->IsProxyMethod()) {
- if (method->IsDirect()) {
- CHECK(method->IsConstructor());
- const void* code_pointer =
- EntryPointToCodePointer(method->GetEntryPointFromQuickCompiledCode());
- return reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].frame_info_;
- } else {
- return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
- }
+ // There is only one direct method of a proxy class: the constructor. A direct method is
+ // cloned from the original java.lang.reflect.Proxy and is executed as usual quick
+ // compiled method without any stubs. Therefore the method must have a OatQuickMethodHeader.
+ DCHECK(!method->IsDirect() && !method->IsConstructor())
+ << "Constructors of proxy classes must have a OatQuickMethodHeader";
+ return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
}
- ClassLinker* class_linker = runtime->GetClassLinker();
+ // The only remaining case is if the method is native and uses the generic JNI stub.
DCHECK(method->IsNative());
+ ClassLinker* class_linker = runtime->GetClassLinker();
const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(method, sizeof(void*));
DCHECK(class_linker->IsQuickGenericJniStub(entry_point)) << PrettyMethod(method);
// Generic JNI frame.
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index dcf9601b4b..b09b87fb58 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -275,9 +275,6 @@ size_t ThreadList::RunCheckpoint(Closure* checkpoint_function) {
Locks::mutator_lock_->AssertNotExclusiveHeld(self);
Locks::thread_list_lock_->AssertNotHeld(self);
Locks::thread_suspend_count_lock_->AssertNotHeld(self);
- if (kDebugLocking && gAborting == 0) {
- CHECK_NE(self->GetState(), kRunnable);
- }
std::vector<Thread*> suspended_count_modified_threads;
size_t count = 0;
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index e1d4160aac..2db79ab229 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -665,20 +665,22 @@ bool MethodVerifier::Verify() {
// Interfaces may always have static initializers for their fields. If we are running with
// default methods enabled we also allow other public, static, non-final methods to have code.
// Otherwise that is the only type of method allowed.
- if (runtime->AreExperimentalFlagsEnabled(ExperimentalFlags::kDefaultMethods)) {
- if (IsInstanceConstructor()) {
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interfaces may not have non-static constructor";
- return false;
- } else if (method_access_flags_ & kAccFinal) {
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interfaces may not have final methods";
- return false;
- } else if (!(method_access_flags_ & kAccPublic)) {
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interfaces may not have non-public members";
+ if (!(IsConstructor() && IsStatic())) {
+ if (runtime->AreExperimentalFlagsEnabled(ExperimentalFlags::kDefaultMethods)) {
+ if (IsInstanceConstructor()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interfaces may not have non-static constructor";
+ return false;
+ } else if (method_access_flags_ & kAccFinal) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interfaces may not have final methods";
+ return false;
+ } else if (!(method_access_flags_ & kAccPublic)) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interfaces may not have non-public members";
+ return false;
+ }
+ } else {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interface methods must be abstract";
return false;
}
- } else if (!IsConstructor() || !IsStatic()) {
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interface methods must be abstract";
- return false;
}
}
@@ -3662,8 +3664,15 @@ ArtMethod* MethodVerifier::ResolveMethodAndCheckAccess(
<< PrettyMethod(res_method);
return nullptr;
}
- // Check that interface methods match interface classes.
- if (klass->IsInterface() && method_type != METHOD_INTERFACE) {
+ // Check that interface methods are static or match interface classes.
+ // We only allow statics if we don't have default methods enabled.
+ Runtime* runtime = Runtime::Current();
+ const bool default_methods_supported =
+ runtime == nullptr ||
+ runtime->AreExperimentalFlagsEnabled(ExperimentalFlags::kDefaultMethods);
+ if (klass->IsInterface() &&
+ method_type != METHOD_INTERFACE &&
+ (!default_methods_supported || method_type != METHOD_STATIC)) {
Fail(VERIFY_ERROR_CLASS_CHANGE) << "non-interface method " << PrettyMethod(res_method)
<< " is in an interface class " << PrettyClass(klass);
return nullptr;
diff --git a/test/004-ReferenceMap/stack_walk_refmap_jni.cc b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
index 34fb3f8a01..2dbd7e8126 100644
--- a/test/004-ReferenceMap/stack_walk_refmap_jni.cc
+++ b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
@@ -49,7 +49,9 @@ struct ReferenceMap2Visitor : public CheckReferenceMapVisitor {
if (m_name.compare("f") == 0) {
CHECK_REGS_CONTAIN_REFS(0x03U, true, 8); // v8: this
CHECK_REGS_CONTAIN_REFS(0x06U, true, 8, 1); // v8: this, v1: x
- CHECK_REGS_CONTAIN_REFS(0x08U, true, 8, 3, 1); // v8: this, v3: y, v1: x
+ if (!GetCurrentOatQuickMethodHeader()->IsOptimized()) {
+ CHECK_REGS_CONTAIN_REFS(0x08U, true, 8, 3, 1); // v8: this, v3: y, v1: x
+ }
CHECK_REGS_CONTAIN_REFS(0x0cU, true, 8, 3, 1); // v8: this, v3: y, v1: x
if (!GetCurrentOatQuickMethodHeader()->IsOptimized()) {
CHECK_REGS_CONTAIN_REFS(0x0eU, true, 8, 3, 1); // v8: this, v3: y, v1: x
@@ -66,9 +68,10 @@ struct ReferenceMap2Visitor : public CheckReferenceMapVisitor {
CHECK_REGS_CONTAIN_REFS(0x13U, false, 3); // v3: y
// Note that v0: ex can be eliminated because it's a dead merge of two different exceptions.
CHECK_REGS_CONTAIN_REFS(0x18U, true, 8, 2, 1); // v8: this, v2: y, v1: x (dead v0: ex)
- CHECK_REGS_CONTAIN_REFS(0x1aU, true, 8, 5, 2, 1); // v8: this, v5: x[1], v2: y, v1: x (dead v0: ex)
if (!GetCurrentOatQuickMethodHeader()->IsOptimized()) {
// v8: this, v5: x[1], v2: y, v1: x (dead v0: ex)
+ CHECK_REGS_CONTAIN_REFS(0x1aU, true, 8, 5, 2, 1);
+ // v8: this, v5: x[1], v2: y, v1: x (dead v0: ex)
CHECK_REGS_CONTAIN_REFS(0x1dU, true, 8, 5, 2, 1);
// v5 is removed from the root set because there is a "merge" operation.
// See 0015: if-nez v2, 001f.
diff --git a/test/004-ThreadStress/run b/test/004-ThreadStress/run
deleted file mode 100755
index 27c501da8d..0000000000
--- a/test/004-ThreadStress/run
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Be less agressive than the default debug option for the jit code cache
-# to avoid timeouts.
-exec ${RUN} "$@" --runtime-option -Xjitcodecachesize:1M
diff --git a/test/449-checker-bce/src/Main.java b/test/449-checker-bce/src/Main.java
index 22829cddc8..ffeae7d9a2 100644
--- a/test/449-checker-bce/src/Main.java
+++ b/test/449-checker-bce/src/Main.java
@@ -624,12 +624,13 @@ public class Main {
constantIndexing2(new int[3]);
} catch (ArrayIndexOutOfBoundsException e) {
assertIsManaged(); // This is to ensure that single-frame deoptimization works.
- // Will need to be updated if constantIndexing2 is inlined.
+ // Will need to be updated if constantIndexing2 is inlined.
try {
// This will cause AIOOBE.
constantIndexingForward6(new int[3]);
} catch (ArrayIndexOutOfBoundsException e2) {
- assertIsManaged();
+ // Having deopted, we expect to be running interpreted at this point.
+ // Does not apply to debuggable, however, since we do not inline.
return 99;
}
}
diff --git a/test/542-inline-trycatch/expected.txt b/test/542-inline-trycatch/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/542-inline-trycatch/expected.txt
diff --git a/test/542-inline-trycatch/info.txt b/test/542-inline-trycatch/info.txt
new file mode 100644
index 0000000000..b3e50d3d61
--- /dev/null
+++ b/test/542-inline-trycatch/info.txt
@@ -0,0 +1 @@
+Tests inlining in the optimizing compiler under try/catch. \ No newline at end of file
diff --git a/test/542-inline-trycatch/src/Main.java b/test/542-inline-trycatch/src/Main.java
new file mode 100644
index 0000000000..5a6e06fa0c
--- /dev/null
+++ b/test/542-inline-trycatch/src/Main.java
@@ -0,0 +1,178 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ // The following tests make sure that we inline methods used inside try and catch
+ // blocks, provided they meet other inlining criteria. To do that, we rely on
+ // the compiler recognizing and enforcing the $inline$ and $noinline$ markers.
+
+ // We expect a single block to always be inlined.
+
+ private static int $inline$SingleBlock(String str) throws NumberFormatException {
+ return Integer.parseInt(str);
+ }
+
+ // We expect a "simple" method with multiple blocks to always be inlined.
+
+ private static int $inline$MultipleBlocks(String str, boolean is_hex)
+ throws NumberFormatException {
+ return is_hex ? Integer.parseInt(str, 16) : Integer.parseInt(str);
+ }
+
+ // We expect methods with try/catch to not be inlined. Inlined try/catch
+ // blocks are not supported at the moment.
+
+ private static int $noinline$TryCatch(String str) {
+ try {
+ return Integer.parseInt(str);
+ } catch (NumberFormatException ex) {
+ return -1;
+ }
+ }
+
+ public static void testSingleBlockFromTry() {
+ int val = 0;
+
+ try {
+ val = $inline$SingleBlock("42");
+ } catch (NumberFormatException ex) {
+ unreachable();
+ }
+ assertEquals(42, val);
+
+ try {
+ $inline$SingleBlock("xyz");
+ unreachable();
+ } catch (NumberFormatException ex) {}
+ }
+
+ public static void testSingleBlockFromCatch() {
+ int val = 0;
+
+ try {
+ throwException();
+ } catch (Exception ex) {
+ val = $inline$SingleBlock("42");
+ }
+ assertEquals(42, val);
+ }
+
+ public static void testMultipleBlocksFromTry() {
+ int val = 0;
+
+ try {
+ val = $inline$MultipleBlocks("42", false);
+ } catch (NumberFormatException ex) {
+ unreachable();
+ }
+ assertEquals(42, val);
+
+ try {
+ val = $inline$MultipleBlocks("20", true);
+ } catch (NumberFormatException ex) {
+ unreachable();
+ }
+ assertEquals(32, val);
+
+ try {
+ $inline$MultipleBlocks("xyz", false);
+ unreachable();
+ } catch (NumberFormatException ex) {}
+
+ try {
+ $inline$MultipleBlocks("xyz", true);
+ unreachable();
+ } catch (NumberFormatException ex) {}
+ }
+
+ public static void testMultipleBlocksFromCatch() {
+ int val = 0;
+
+ try {
+ throwException();
+ } catch (Exception ex) {
+ val = $inline$MultipleBlocks("42", false);
+ }
+ assertEquals(42, val);
+
+ try {
+ throwException();
+ } catch (Exception ex) {
+ val = $inline$MultipleBlocks("20", true);
+ }
+ assertEquals(32, val);
+ }
+
+ public static void testTryCatchFromTry() {
+ int val = 0;
+
+ try {
+ val = $noinline$TryCatch("42");
+ } catch (NumberFormatException ex) {
+ unreachable();
+ }
+ assertEquals(42, val);
+
+ try {
+ val = $noinline$TryCatch("xyz");
+ } catch (NumberFormatException ex) {
+ unreachable();
+ }
+ assertEquals(-1, val);
+ }
+
+ public static void testTryCatchFromCatch() {
+ int val = 0;
+
+ try {
+ throwException();
+ } catch (Exception ex) {
+ val = $noinline$TryCatch("42");
+ }
+ assertEquals(42, val);
+
+ try {
+ throwException();
+ } catch (Exception ex) {
+ val = $noinline$TryCatch("xyz");
+ }
+ assertEquals(-1, val);
+ }
+
+ public static void main(String[] args) {
+ testSingleBlockFromTry();
+ testSingleBlockFromCatch();
+ testMultipleBlocksFromTry();
+ testMultipleBlocksFromCatch();
+ testTryCatchFromTry();
+ testTryCatchFromCatch();
+ }
+
+ private static void assertEquals(int expected, int actual) {
+ if (expected != actual) {
+ throw new AssertionError("Wrong result: " + expected + " != " + actual);
+ }
+ }
+
+ private static void unreachable() {
+ throw new Error("Unreachable");
+ }
+
+ private static void throwException() throws Exception {
+ throw new Exception();
+ }
+}
diff --git a/test/542-unresolved-access-check/expected.txt b/test/542-unresolved-access-check/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/542-unresolved-access-check/expected.txt
diff --git a/test/542-unresolved-access-check/info.txt b/test/542-unresolved-access-check/info.txt
new file mode 100644
index 0000000000..30d45b8ec3
--- /dev/null
+++ b/test/542-unresolved-access-check/info.txt
@@ -0,0 +1 @@
+Test unresolved/access checks entry points with the JIT.
diff --git a/test/542-unresolved-access-check/src/Main.java b/test/542-unresolved-access-check/src/Main.java
new file mode 100644
index 0000000000..2bdf47f172
--- /dev/null
+++ b/test/542-unresolved-access-check/src/Main.java
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+import java.util.List;
+import p1.InP1;
+import p1.PlaceHolder;
+
+
+// Custom class loader to prevent loading while verifying.
+class MyClassLoader extends ClassLoader {
+ MyClassLoader() throws Exception {
+ super(MyClassLoader.class.getClassLoader());
+
+ // Some magic to get access to the pathList field of BaseDexClassLoader.
+ ClassLoader loader = getClass().getClassLoader();
+ Class<?> baseDexClassLoader = loader.getClass().getSuperclass();
+ Field f = baseDexClassLoader.getDeclaredField("pathList");
+ f.setAccessible(true);
+ Object pathList = f.get(loader);
+
+ // Some magic to get access to the dexField field of pathList.
+ f = pathList.getClass().getDeclaredField("dexElements");
+ f.setAccessible(true);
+ dexElements = (Object[]) f.get(pathList);
+ dexFileField = dexElements[0].getClass().getDeclaredField("dexFile");
+ dexFileField.setAccessible(true);
+ }
+
+ Object[] dexElements;
+ Field dexFileField;
+
+ protected Class<?> loadClass(String className, boolean resolve) throws ClassNotFoundException {
+ if (className.equals("p1.OtherInP1") && !p1.PlaceHolder.entered) {
+ // The request comes from the verifier. Return null to get the access check entry
+ // point in the compiled code.
+ return null;
+ }
+ // Mimic what DexPathList.findClass is doing.
+ try {
+ for (Object element : dexElements) {
+ Object dex = dexFileField.get(element);
+ Method method = dex.getClass().getDeclaredMethod(
+ "loadClassBinaryName", String.class, ClassLoader.class, List.class);
+
+ if (dex != null) {
+ Class clazz = (Class)method.invoke(dex, className, this, null);
+ if (clazz != null) {
+ return clazz;
+ }
+ }
+ }
+ } catch (Exception e) { /* Ignore */ }
+ return getParent().loadClass(className);
+ }
+}
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ MyClassLoader o = new MyClassLoader();
+ Class foo = o.loadClass("LoadedByMyClassLoader");
+ Method m = foo.getDeclaredMethod("main");
+ m.invoke(null);
+ }
+}
+
+class LoadedByMyClassLoader {
+ public static void main() throws Exception {
+ for (int i = 0; i < 10000; ++i) {
+ // Warm up the JIT.
+ doTheCall(i);
+ }
+ // Sleep a while to let the JIT compile things.
+ // TODO(ngeoffray): Remove the sleep. b/25414532
+ Thread.sleep(2000);
+ doTheCall(10001);
+ }
+
+ public static void doTheCall(int i) {
+ InP1.$inline$AllocateOtherInP1(i);
+ InP1.$inline$AllocateArrayOtherInP1(i);
+ InP1.$inline$UseStaticFieldOtherInP1(i);
+ InP1.$inline$SetStaticFieldOtherInP1(i);
+ InP1.$inline$UseInstanceFieldOtherInP1(i);
+ InP1.$inline$SetInstanceFieldOtherInP1(i);
+ InP1.$inline$LoadOtherInP1(i);
+ InP1.$inline$StaticCallOtherInP1(i);
+ InP1.$inline$InstanceCallOtherInP1(i);
+ }
+}
diff --git a/test/542-unresolved-access-check/src/p1/InP1.java b/test/542-unresolved-access-check/src/p1/InP1.java
new file mode 100644
index 0000000000..3516c7231b
--- /dev/null
+++ b/test/542-unresolved-access-check/src/p1/InP1.java
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package p1;
+
+public class InP1 {
+ public static Object $inline$AllocateOtherInP1(int i) {
+ // Let this method execute a while to make sure the JIT sees it hot.
+ if (i <= 10000) {
+ return null;
+ }
+ // Set the flag that we have entered InP1 code to get OtherInP1 loaded.
+ PlaceHolder.entered = true;
+ return new OtherInP1();
+ }
+
+ public static Object $inline$AllocateArrayOtherInP1(int i) {
+ if (i <= 10000) {
+ return null;
+ }
+ return new OtherInP1[10];
+ }
+
+ public static Object $inline$UseStaticFieldOtherInP1(int i) {
+ if (i <= 10000) {
+ return null;
+ }
+ return OtherInP1.staticField;
+ }
+
+ public static void $inline$SetStaticFieldOtherInP1(int i) {
+ if (i <= 10000) {
+ return;
+ }
+ OtherInP1.staticField = new Object();
+ }
+
+ public static Object $inline$UseInstanceFieldOtherInP1(int i) {
+ if (i <= 10000) {
+ return null;
+ }
+ return $noinline$AllocateOtherInP1().instanceField;
+ }
+
+ public static void $inline$SetInstanceFieldOtherInP1(int i) {
+ if (i <= 10000) {
+ return;
+ }
+ $noinline$AllocateOtherInP1().instanceField = new Object();
+ }
+
+ public static OtherInP1 $noinline$AllocateOtherInP1() {
+ try {
+ return new OtherInP1();
+ } catch (Exception e) {
+ throw new Error(e);
+ }
+ }
+
+ public static Object $inline$LoadOtherInP1(int i) {
+ if (i <= 10000) {
+ return null;
+ }
+ return OtherInP1.class;
+ }
+
+ public static Object $inline$StaticCallOtherInP1(int i) {
+ if (i <= 10000) {
+ return null;
+ }
+ return OtherInP1.doTheStaticCall();
+ }
+
+ public static Object $inline$InstanceCallOtherInP1(int i) {
+ if (i <= 10000) {
+ return null;
+ }
+ return $noinline$AllocateOtherInP1().doTheInstanceCall();
+ }
+}
diff --git a/test/542-unresolved-access-check/src/p1/OtherInP1.java b/test/542-unresolved-access-check/src/p1/OtherInP1.java
new file mode 100644
index 0000000000..adc1ce1297
--- /dev/null
+++ b/test/542-unresolved-access-check/src/p1/OtherInP1.java
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package p1;
+
+class OtherInP1 {
+ OtherInP1() {
+ }
+ static Object staticField = new Object();
+ Object instanceField = new Object();
+
+ static Object doTheStaticCall() {
+ return null;
+ }
+
+ Object doTheInstanceCall() {
+ return null;
+ }
+}
diff --git a/test/542-unresolved-access-check/src/p1/PlaceHolder.java b/test/542-unresolved-access-check/src/p1/PlaceHolder.java
new file mode 100644
index 0000000000..2bf4bdf15f
--- /dev/null
+++ b/test/542-unresolved-access-check/src/p1/PlaceHolder.java
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package p1;
+
+// Specific class for putting the 'entered' marker. If we were to put the marker
+// in InP1 or in OtherInP1, the code in MyClassLoader using that marker would load
+// InP1 or OtherInP1 in the system class loader, and not in MyClassLoader.
+public class PlaceHolder {
+ public static boolean entered = false;
+}
diff --git a/test/545-tracing-and-jit/expected.txt b/test/545-tracing-and-jit/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/545-tracing-and-jit/expected.txt
diff --git a/test/545-tracing-and-jit/info.txt b/test/545-tracing-and-jit/info.txt
new file mode 100644
index 0000000000..34e654e646
--- /dev/null
+++ b/test/545-tracing-and-jit/info.txt
@@ -0,0 +1,2 @@
+Tests interaction between the JIT and the method tracing
+functionality.
diff --git a/test/545-tracing-and-jit/src/Main.java b/test/545-tracing-and-jit/src/Main.java
new file mode 100644
index 0000000000..a2d51d5a8c
--- /dev/null
+++ b/test/545-tracing-and-jit/src/Main.java
@@ -0,0 +1,251 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.io.IOException;
+import java.lang.reflect.Method;
+import java.util.concurrent.ConcurrentSkipListMap;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.TreeSet;
+
+public class Main {
+ private static final String TEMP_FILE_NAME_PREFIX = "test";
+ private static final String TEMP_FILE_NAME_SUFFIX = ".trace";
+ private static File file;
+
+ public static void main(String[] args) throws Exception {
+ String name = System.getProperty("java.vm.name");
+ if (!"Dalvik".equals(name)) {
+ System.out.println("This test is not supported on " + name);
+ return;
+ }
+ file = createTempFile();
+ try {
+ new Main().ensureCaller(true, 0);
+ new Main().ensureCaller(false, 0);
+ } finally {
+ if (file != null) {
+ file.delete();
+ }
+ }
+ }
+
+ private static File createTempFile() throws Exception {
+ try {
+ return File.createTempFile(TEMP_FILE_NAME_PREFIX, TEMP_FILE_NAME_SUFFIX);
+ } catch (IOException e) {
+ System.setProperty("java.io.tmpdir", "/data/local/tmp");
+ try {
+ return File.createTempFile(TEMP_FILE_NAME_PREFIX, TEMP_FILE_NAME_SUFFIX);
+ } catch (IOException e2) {
+ System.setProperty("java.io.tmpdir", "/sdcard");
+ return File.createTempFile(TEMP_FILE_NAME_PREFIX, TEMP_FILE_NAME_SUFFIX);
+ }
+ }
+ }
+
+ // We make sure 'doLoadsOfStuff' has a caller, because it is this caller that will be
+ // pushed in the side instrumentation frame.
+ public void ensureCaller(boolean warmup, int invocationCount) throws Exception {
+ doLoadsOfStuff(warmup, invocationCount);
+ }
+
+ // The number of recursive calls we are going to do in 'doLoadsOfStuff' to ensure
+ // the JIT sees it hot.
+ static final int NUMBER_OF_INVOCATIONS = 5;
+
+ public void doLoadsOfStuff(boolean warmup, int invocationCount) throws Exception {
+ // Warmup is to make sure the JIT gets a chance to compile 'doLoadsOfStuff'.
+ if (warmup) {
+ if (invocationCount < NUMBER_OF_INVOCATIONS) {
+ doLoadsOfStuff(warmup, ++invocationCount);
+ } else {
+ // Give the JIT a chance to compiler.
+ Thread.sleep(1000);
+ }
+ } else {
+ if (invocationCount == 0) {
+ // When running the trace in trace mode, there is already a trace running.
+ if (VMDebug.getMethodTracingMode() != 0) {
+ VMDebug.stopMethodTracing();
+ }
+ VMDebug.startMethodTracing(file.getPath(), 0, 0, false, 0);
+ }
+ fillJit();
+ if (invocationCount < NUMBER_OF_INVOCATIONS) {
+ doLoadsOfStuff(warmup, ++invocationCount);
+ } else {
+ VMDebug.stopMethodTracing();
+ }
+ }
+ }
+
+ // This method creates enough profiling data to fill the code cache and trigger
+ // a collection in debug mode (at the time of the test 10KB of data space). We
+ // used to crash by not looking at the instrumentation stack and deleting JIT code
+ // that will be later restored by the instrumentation.
+ public static void fillJit() throws Exception {
+ Map map = new HashMap();
+ map.put("foo", "bar");
+ map.clear();
+ map.containsKey("foo");
+ map.containsValue("foo");
+ map.entrySet();
+ map.equals(map);
+ map.hashCode();
+ map.isEmpty();
+ map.keySet();
+ map.putAll(map);
+ map.remove("foo");
+ map.size();
+ map.put("bar", "foo");
+ map.values();
+
+ map = new LinkedHashMap();
+ map.put("foo", "bar");
+ map.clear();
+ map.containsKey("foo");
+ map.containsValue("foo");
+ map.entrySet();
+ map.equals(map);
+ map.hashCode();
+ map.isEmpty();
+ map.keySet();
+ map.putAll(map);
+ map.remove("foo");
+ map.size();
+ map.put("bar", "foo");
+ map.values();
+
+ map = new TreeMap();
+ map.put("foo", "bar");
+ map.clear();
+ map.containsKey("foo");
+ map.containsValue("foo");
+ map.entrySet();
+ map.equals(map);
+ map.hashCode();
+ map.isEmpty();
+ map.keySet();
+ map.putAll(map);
+ map.remove("foo");
+ map.size();
+ map.put("bar", "foo");
+ map.values();
+
+ map = new ConcurrentSkipListMap();
+ map.put("foo", "bar");
+ map.clear();
+ map.containsKey("foo");
+ map.containsValue("foo");
+ map.entrySet();
+ map.equals(map);
+ map.hashCode();
+ map.isEmpty();
+ map.keySet();
+ map.putAll(map);
+ map.remove("foo");
+ map.size();
+ map.put("bar", "foo");
+ map.values();
+
+ Set set = new HashSet();
+ set.add("foo");
+ set.addAll(set);
+ set.clear();
+ set.contains("foo");
+ set.containsAll(set);
+ set.equals(set);
+ set.hashCode();
+ set.isEmpty();
+ set.iterator();
+ set.remove("foo");
+ set.removeAll(set);
+ set.retainAll(set);
+ set.size();
+ set.add("foo");
+ set.toArray();
+
+ set = new LinkedHashSet();
+ set.add("foo");
+ set.addAll(set);
+ set.clear();
+ set.contains("foo");
+ set.containsAll(set);
+ set.equals(set);
+ set.hashCode();
+ set.isEmpty();
+ set.iterator();
+ set.remove("foo");
+ set.removeAll(set);
+ set.retainAll(set);
+ set.size();
+ set.add("foo");
+ set.toArray();
+
+ set = new TreeSet();
+ set.add("foo");
+ set.addAll(set);
+ set.clear();
+ set.contains("foo");
+ set.containsAll(set);
+ set.equals(set);
+ set.hashCode();
+ set.isEmpty();
+ set.iterator();
+ set.remove("foo");
+ set.removeAll(set);
+ set.retainAll(set);
+ set.size();
+ set.add("foo");
+ set.toArray();
+ }
+
+ private static class VMDebug {
+ private static final Method startMethodTracingMethod;
+ private static final Method stopMethodTracingMethod;
+ private static final Method getMethodTracingModeMethod;
+ static {
+ try {
+ Class c = Class.forName("dalvik.system.VMDebug");
+ startMethodTracingMethod = c.getDeclaredMethod("startMethodTracing", String.class,
+ Integer.TYPE, Integer.TYPE, Boolean.TYPE, Integer.TYPE);
+ stopMethodTracingMethod = c.getDeclaredMethod("stopMethodTracing");
+ getMethodTracingModeMethod = c.getDeclaredMethod("getMethodTracingMode");
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ public static void startMethodTracing(String filename, int bufferSize, int flags,
+ boolean samplingEnabled, int intervalUs) throws Exception {
+ startMethodTracingMethod.invoke(null, filename, bufferSize, flags, samplingEnabled,
+ intervalUs);
+ }
+ public static void stopMethodTracing() throws Exception {
+ stopMethodTracingMethod.invoke(null);
+ }
+ public static int getMethodTracingMode() throws Exception {
+ return (int) getMethodTracingModeMethod.invoke(null);
+ }
+ }
+}
diff --git a/test/962-iface-static/smali/Displayer.smali b/test/962-iface-static/smali/Displayer.smali
index 06bec16432..ed4c013d3b 100644
--- a/test/962-iface-static/smali/Displayer.smali
+++ b/test/962-iface-static/smali/Displayer.smali
@@ -27,7 +27,7 @@
.class public LDisplayer;
.super Ljava/lang/Object;
-.method public static <clinit>()V
+.method static constructor <clinit>()V
.locals 3
sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
const-string v0, "init"
diff --git a/test/962-iface-static/smali/iface.smali b/test/962-iface-static/smali/iface.smali
index 441aae669e..5b9c03ec46 100644
--- a/test/962-iface-static/smali/iface.smali
+++ b/test/962-iface-static/smali/iface.smali
@@ -27,7 +27,7 @@
.field public final static f:LDisplayer;
-.method public static <clinit>()V
+.method static constructor <clinit>()V
.locals 3
new-instance v1, LDisplayer;
invoke-direct {v1}, LDisplayer;-><init>()V
diff --git a/test/964-default-iface-init-generated/util-src/generate_smali.py b/test/964-default-iface-init-generated/util-src/generate_smali.py
index be2d3ba563..3c138abf26 100755
--- a/test/964-default-iface-init-generated/util-src/generate_smali.py
+++ b/test/964-default-iface-init-generated/util-src/generate_smali.py
@@ -334,7 +334,7 @@ class TestInterface(mixins.DumpMixin, mixins.Named, mixins.NameComparableMixin,
# public static final Displayer field = new Displayer("{tree}");
.field public final static field:LDisplayer;
-.method public static constructor <clinit>()V
+.method static constructor <clinit>()V
.locals 3
const-string v2, "{tree}"
new-instance v1, LDisplayer;
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 6ce3d9472c..8744674a30 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -59,6 +59,9 @@ define define-build-art-run-test
else
run_test_options += --build-with-javac-dx
endif
+ ifeq ($(ART_TEST_QUIET),true)
+ run_test_options += --quiet
+ endif
$$(dmart_target): PRIVATE_RUN_TEST_OPTIONS := $$(run_test_options)
$$(dmart_target): $(TEST_ART_RUN_TEST_DEPENDENCIES) $(TARGET_JACK_CLASSPATH_DEPENDENCIES)
$(hide) rm -rf $$(dir $$@) && mkdir -p $$(dir $$@)
@@ -442,7 +445,6 @@ TEST_ART_BROKEN_DEFAULT_RUN_TESTS :=
TEST_ART_BROKEN_OPTIMIZING_MIPS_RUN_TESTS := \
441-checker-inliner \
510-checker-try-catch \
- 521-checker-array-set-null \
536-checker-intrinsic-optimization \
ifeq (mips,$(TARGET_ARCH))
@@ -858,6 +860,9 @@ define define-test-art-run-test
ifneq ($(ART_TEST_ANDROID_ROOT),)
run_test_options := --android-root $(ART_TEST_ANDROID_ROOT) $$(run_test_options)
endif
+ ifeq ($(ART_TEST_QUIET),true)
+ run_test_options += --quiet
+ endif
$$(run_test_rule_name): PRIVATE_RUN_TEST_OPTIONS := $$(run_test_options)
$$(run_test_rule_name): PRIVATE_JACK_CLASSPATH := $$(jack_classpath)
.PHONY: $$(run_test_rule_name)
diff --git a/test/run-test b/test/run-test
index f2bbaa7747..9b0261ed31 100755
--- a/test/run-test
+++ b/test/run-test
@@ -110,6 +110,7 @@ testlib="arttestd"
run_args="--quiet"
build_args=""
+quiet="no"
debuggable="no"
prebuild_mode="yes"
target_mode="yes"
@@ -142,6 +143,9 @@ while true; do
DEX_LOCATION=$tmp_dir
run_args="${run_args} --host"
shift
+ elif [ "x$1" = "x--quiet" ]; then
+ quiet="yes"
+ shift
elif [ "x$1" = "x--use-java-home" ]; then
if [ -n "${JAVA_HOME}" ]; then
export JAVA="${JAVA_HOME}/bin/java"
@@ -351,6 +355,29 @@ while true; do
fi
done
+# Allocate file descriptor real_stderr and redirect it to the shell's error
+# output (fd 2).
+if [ ${BASH_VERSINFO[1]} -ge 4 ] && [ ${BASH_VERSINFO[2]} -ge 1 ]; then
+ exec {real_stderr}>&2
+else
+ # In bash before version 4.1 we need to do a manual search for free file
+ # descriptors.
+ FD=3
+ while [ -e /dev/fd/$FD ]; do FD=$((FD + 1)); done
+ real_stderr=$FD
+ eval "exec ${real_stderr}>&2"
+fi
+if [ "$quiet" = "yes" ]; then
+ # Force the default standard output and error to go to /dev/null so we will
+ # not print them.
+ exec 1>/dev/null
+ exec 2>/dev/null
+fi
+
+function err_echo() {
+ echo "$@" 1>&${real_stderr}
+}
+
# tmp_dir may be relative, resolve.
#
# Cannot use realpath, as it does not exist on Mac.
@@ -383,7 +410,7 @@ if [ "$trace" = "true" ]; then
run_args="${run_args} --runtime-option -Xmethod-trace-file:${DEX_LOCATION}/trace.bin"
fi
elif [ "$trace_stream" = "true" ]; then
- echo "Cannot use --stream without --trace."
+ err_echo "Cannot use --stream without --trace."
exit 1
fi
@@ -410,7 +437,7 @@ function guess_host_arch_name() {
if [ "$target_mode" = "no" ]; then
if [ "$runtime" = "jvm" ]; then
if [ "$prebuild_mode" = "yes" ]; then
- echo "--prebuild with --jvm is unsupported";
+ err_echo "--prebuild with --jvm is unsupported"
exit 1;
fi
fi
@@ -462,7 +489,7 @@ fi
if [ "$have_image" = "no" ]; then
if [ "$runtime" != "art" ]; then
- echo "--no-image is only supported on the art runtime"
+ err_echo "--no-image is only supported on the art runtime"
exit 1
fi
if [ "$target_mode" = "no" ]; then
@@ -485,7 +512,12 @@ if [ "$have_image" = "no" ]; then
fi
if [ "$dev_mode" = "yes" -a "$update_mode" = "yes" ]; then
- echo "--dev and --update are mutually exclusive" 1>&2
+ err_echo "--dev and --update are mutually exclusive"
+ usage="yes"
+fi
+
+if [ "$dev_mode" = "yes" -a "$quiet" = "yes" ]; then
+ err_echo "--dev and --quiet are mutually exclusive"
usage="yes"
fi
@@ -499,7 +531,7 @@ if [ "$usage" = "no" ]; then
if [ '!' -d "$test_dir" ]; then
td2=`echo ${test_dir}-*`
if [ '!' -d "$td2" ]; then
- echo "${test_dir}: no such test directory" 1>&2
+ err_echo "${test_dir}: no such test directory"
usage="yes"
fi
test_dir="$td2"
@@ -580,7 +612,8 @@ if [ "$usage" = "yes" ]; then
echo " --pic-image Use an image compiled with position independent code for the"
echo " boot class path."
echo " --pic-test Compile the test code position independent."
- ) 1>&2
+ echo " --quiet Don't print anything except failure messages"
+ ) 1>&2 # Direct to stderr so usage is not printed if --quiet is set.
exit 1
fi
@@ -591,12 +624,12 @@ td_info="${test_dir}/${info}"
td_expected="${test_dir}/${expected}"
if [ ! -r $td_info ]; then
- echo "${test_dir}: missing file $td_info" 1>&2
+ err_echo "${test_dir}: missing file $td_info"
exit 1
fi
if [ ! -r $td_expected ]; then
- echo "${test_dir}: missing file $td_expected" 1>&2
+ err_echo "${test_dir}: missing file $td_expected"
exit 1
fi
@@ -691,7 +724,7 @@ fi
if [ ${USE_JACK} = "false" ]; then
# Set ulimit if we build with dx only, Jack can generate big temp files.
if ! ulimit -S "$build_file_size_limit"; then
- echo "ulimit file size setting failed"
+ err_echo "ulimit file size setting failed"
fi
fi
@@ -704,7 +737,7 @@ if [ "$dev_mode" = "yes" ]; then
echo "build exit status: $build_exit" 1>&2
if [ "$build_exit" = '0' ]; then
if ! ulimit -S "$run_file_size_limit"; then
- echo "ulimit file size setting failed"
+ err_echo "ulimit file size setting failed"
fi
echo "${test_dir}: running..." 1>&2
"./${run}" $run_args "$@" 2>&1
@@ -720,7 +753,7 @@ if [ "$dev_mode" = "yes" ]; then
if [ "$checker_exit" = "0" ]; then
good="yes"
fi
- echo "checker exit status: $checker_exit" 1>&2
+ err_echo "checker exit status: $checker_exit"
else
good="yes"
fi
@@ -732,7 +765,7 @@ elif [ "$update_mode" = "yes" ]; then
build_exit="$?"
if [ "$build_exit" = '0' ]; then
if ! ulimit -S "$run_file_size_limit"; then
- echo "ulimit file size setting failed"
+ err_echo "ulimit file size setting failed"
fi
echo "${test_dir}: running..." 1>&2
"./${run}" $run_args "$@" >"$output" 2>&1
@@ -745,8 +778,8 @@ elif [ "$update_mode" = "yes" ]; then
sed -e 's/[[:cntrl:]]$//g' < "$output" >"${td_expected}"
good="yes"
else
- cat "$build_output" 1>&2
- echo "build exit status: $build_exit" 1>&2
+ cat "$build_output" 1>&${real_stderr} 1>&2
+ err_echo "build exit status: $build_exit"
fi
elif [ "$build_only" = "yes" ]; then
good="yes"
@@ -758,7 +791,7 @@ elif [ "$build_only" = "yes" ]; then
diff --strip-trailing-cr -q "$expected" "$output" >/dev/null
if [ "$?" '!=' "0" ]; then
good="no"
- echo "BUILD FAILED For ${TEST_NAME}"
+ err_echo "BUILD FAILED For ${TEST_NAME}"
fi
fi
# Clean up extraneous files that are not used by tests.
@@ -769,13 +802,13 @@ else
build_exit="$?"
if [ "$build_exit" = '0' ]; then
if ! ulimit -S "$run_file_size_limit"; then
- echo "ulimit file size setting failed"
+ err_echo "ulimit file size setting failed"
fi
echo "${test_dir}: running..." 1>&2
"./${run}" $run_args "$@" >"$output" 2>&1
run_exit="$?"
if [ "$run_exit" != "0" ]; then
- echo "run exit status: $run_exit" 1>&2
+ err_echo "run exit status: $run_exit"
good_run="no"
elif [ "$run_checker" = "yes" ]; then
if [ "$target_mode" = "yes" ]; then
@@ -784,7 +817,7 @@ else
"$checker" -q $checker_args "$cfg_output" "$tmp_dir" >> "$output" 2>&1
checker_exit="$?"
if [ "$checker_exit" != "0" ]; then
- echo "checker exit status: $checker_exit" 1>&2
+ err_echo "checker exit status: $checker_exit"
good_run="no"
else
good_run="yes"
@@ -831,7 +864,7 @@ fi
echo ' '
fi
-) 1>&2
+) 2>&${real_stderr} 1>&2
# Clean up test files.
if [ "$always_clean" = "yes" -o "$good" = "yes" ] && [ "$never_clean" = "no" ]; then
@@ -859,6 +892,6 @@ fi
fi
fi
-) 1>&2
+) 2>&${real_stderr} 1>&2
exit 1
diff --git a/tools/ahat/README.txt b/tools/ahat/README.txt
index 5615f8f409..d6f55aae16 100644
--- a/tools/ahat/README.txt
+++ b/tools/ahat/README.txt
@@ -28,8 +28,6 @@ TODO:
- Use consistent order for heap columns.
Sometimes I see "app" first, sometimes last (from one heap dump to
another) How about, always sort by name?
- * For long strings, limit the string length shown in the summary view to
- something reasonable. Say 50 chars, then add a "..." at the end.
* For HeapTable with single heap shown, the heap name isn't centered?
* Consistently document functions.
* Should help be part of an AhatHandler, that automatically gets the menu and
@@ -70,6 +68,7 @@ Things to Test:
showing all the instances.
* That InstanceUtils.asString properly takes into account "offset" and
"count" fields, if they are present.
+ * InstanceUtils.getDexCacheLocation
Reported Issues:
* Request to be able to sort tables by size.
diff --git a/tools/ahat/src/InstanceUtils.java b/tools/ahat/src/InstanceUtils.java
index eb9e363d8c..7fa53c78b5 100644
--- a/tools/ahat/src/InstanceUtils.java
+++ b/tools/ahat/src/InstanceUtils.java
@@ -60,9 +60,21 @@ class InstanceUtils {
}
- // Read the string value from an hprof Instance.
- // Returns null if the object can't be interpreted as a string.
+ /**
+ * Read the string value from an hprof Instance.
+ * Returns null if the object can't be interpreted as a string.
+ */
public static String asString(Instance inst) {
+ return asString(inst, -1);
+ }
+
+ /**
+ * Read the string value from an hprof Instance.
+ * Returns null if the object can't be interpreted as a string.
+ * The returned string is truncated to maxChars characters.
+ * If maxChars is negative, the returned string is not truncated.
+ */
+ public static String asString(Instance inst, int maxChars) {
if (!isInstanceOfClass(inst, "java.lang.String")) {
return null;
}
@@ -81,13 +93,15 @@ class InstanceUtils {
// array, we should use that here.
int numChars = chars.getValues().length;
int count = getIntField(inst, "count", numChars);
- int offset = getIntField(inst, "offset", 0);
- int end = offset + count - 1;
-
if (count == 0) {
return "";
}
+ if (0 <= maxChars && maxChars < count) {
+ count = maxChars;
+ }
+ int offset = getIntField(inst, "offset", 0);
+ int end = offset + count - 1;
if (offset >= 0 && offset < numChars && end >= 0 && end < numChars) {
return new String(chars.asCharArray(offset, count));
}
@@ -230,16 +244,36 @@ class InstanceUtils {
return null;
}
+ private static boolean isJavaLangRefReference(Instance inst) {
+ ClassObj cls = (inst == null) ? null : inst.getClassObj();
+ while (cls != null) {
+ if ("java.lang.ref.Reference".equals(cls.getClassName())) {
+ return true;
+ }
+ cls = cls.getSuperClassObj();
+ }
+ return false;
+ }
+
+ public static Instance getReferent(Instance inst) {
+ if (isJavaLangRefReference(inst)) {
+ return getRefField(inst, "referent");
+ }
+ return null;
+ }
+
/**
* Assuming inst represents a DexCache object, return the dex location for
* that dex cache. Returns null if the given instance doesn't represent a
* DexCache object or the location could not be found.
+ * If maxChars is non-negative, the returned location is truncated to
+ * maxChars in length.
*/
- public static String getDexCacheLocation(Instance inst) {
+ public static String getDexCacheLocation(Instance inst, int maxChars) {
if (isInstanceOfClass(inst, "java.lang.DexCache")) {
Instance location = getRefField(inst, "location");
if (location != null) {
- return asString(location);
+ return asString(location, maxChars);
}
}
return null;
diff --git a/tools/ahat/src/Value.java b/tools/ahat/src/Value.java
index 9b483faa32..7c969b3645 100644
--- a/tools/ahat/src/Value.java
+++ b/tools/ahat/src/Value.java
@@ -25,6 +25,10 @@ import java.net.URI;
*/
class Value {
+ // For string literals, we limit the number of characters we show to
+ // kMaxChars in case the string is really long.
+ private static int kMaxChars = 200;
+
/**
* Create a DocString representing a summary of the given instance.
*/
@@ -41,21 +45,36 @@ class Value {
}
link.append(inst.toString());
+ URI objTarget = DocString.formattedUri("object?id=%d", inst.getId());
+ DocString formatted = DocString.link(objTarget, link);
// Annotate Strings with their values.
- String stringValue = InstanceUtils.asString(inst);
+ String stringValue = InstanceUtils.asString(inst, kMaxChars);
if (stringValue != null) {
- link.appendFormat("\"%s\"", stringValue);
+ formatted.appendFormat(" \"%s", stringValue);
+ formatted.append(kMaxChars == stringValue.length() ? "..." : "\"");
+ }
+
+ // Annotate Reference with its referent
+ Instance referent = InstanceUtils.getReferent(inst);
+ if (referent != null) {
+ formatted.append(" for ");
+
+ // It should not be possible for a referent to refer back to the
+ // reference object, even indirectly, so there shouldn't be any issues
+ // with infinite recursion here.
+ formatted.append(renderInstance(referent));
}
// Annotate DexCache with its location.
- String dexCacheLocation = InstanceUtils.getDexCacheLocation(inst);
+ String dexCacheLocation = InstanceUtils.getDexCacheLocation(inst, kMaxChars);
if (dexCacheLocation != null) {
- link.append(" for " + dexCacheLocation);
+ formatted.appendFormat(" for %s", dexCacheLocation);
+ if (kMaxChars == dexCacheLocation.length()) {
+ formatted.append("...");
+ }
}
- URI objTarget = DocString.formattedUri("object?id=%d", inst.getId());
- DocString formatted = DocString.link(objTarget, link);
// Annotate bitmaps with a thumbnail.
Instance bitmap = InstanceUtils.getAssociatedBitmapInstance(inst);
diff --git a/tools/ahat/src/manifest.txt b/tools/ahat/src/manifest.txt
index 7efb1a770f..421de1715a 100644
--- a/tools/ahat/src/manifest.txt
+++ b/tools/ahat/src/manifest.txt
@@ -1,4 +1,4 @@
Name: ahat/
Implementation-Title: ahat
-Implementation-Version: 0.2
+Implementation-Version: 0.3
Main-Class: com.android.ahat.Main
diff --git a/tools/ahat/test-dump/Main.java b/tools/ahat/test-dump/Main.java
index cea1dc179e..7b8774a34d 100644
--- a/tools/ahat/test-dump/Main.java
+++ b/tools/ahat/test-dump/Main.java
@@ -16,6 +16,9 @@
import dalvik.system.VMDebug;
import java.io.IOException;
+import java.lang.ref.PhantomReference;
+import java.lang.ref.ReferenceQueue;
+import java.lang.ref.WeakReference;
/**
* Program used to create a heap dump for test purposes.
@@ -33,6 +36,9 @@ public class Main {
public String basicString = "hello, world";
public String nullString = null;
public Object anObject = new Object();
+ public ReferenceQueue<Object> referenceQueue = new ReferenceQueue<Object>();
+ public PhantomReference aPhantomReference = new PhantomReference(anObject, referenceQueue);
+ public WeakReference aWeakReference = new WeakReference(anObject, referenceQueue);
}
public static void main(String[] args) throws IOException {
diff --git a/tools/ahat/test/InstanceUtilsTest.java b/tools/ahat/test/InstanceUtilsTest.java
index 7613df4994..32f48ce560 100644
--- a/tools/ahat/test/InstanceUtilsTest.java
+++ b/tools/ahat/test/InstanceUtilsTest.java
@@ -25,24 +25,67 @@ import org.junit.Test;
public class InstanceUtilsTest {
@Test
- public void basicString() throws IOException {
+ public void asStringBasic() throws IOException {
TestDump dump = TestDump.getTestDump();
Instance str = (Instance)dump.getDumpedThing("basicString");
assertEquals("hello, world", InstanceUtils.asString(str));
}
@Test
- public void nullString() throws IOException {
+ public void asStringTruncated() throws IOException {
+ TestDump dump = TestDump.getTestDump();
+ Instance str = (Instance)dump.getDumpedThing("basicString");
+ assertEquals("hello", InstanceUtils.asString(str, 5));
+ }
+
+ @Test
+ public void asStringExactMax() throws IOException {
+ TestDump dump = TestDump.getTestDump();
+ Instance str = (Instance)dump.getDumpedThing("basicString");
+ assertEquals("hello, world", InstanceUtils.asString(str, 12));
+ }
+
+ @Test
+ public void asStringNotTruncated() throws IOException {
+ TestDump dump = TestDump.getTestDump();
+ Instance str = (Instance)dump.getDumpedThing("basicString");
+ assertEquals("hello, world", InstanceUtils.asString(str, 50));
+ }
+
+ @Test
+ public void asStringNegativeMax() throws IOException {
+ TestDump dump = TestDump.getTestDump();
+ Instance str = (Instance)dump.getDumpedThing("basicString");
+ assertEquals("hello, world", InstanceUtils.asString(str, -3));
+ }
+
+ @Test
+ public void asStringNull() throws IOException {
TestDump dump = TestDump.getTestDump();
Instance obj = (Instance)dump.getDumpedThing("nullString");
assertNull(InstanceUtils.asString(obj));
}
@Test
- public void notString() throws IOException {
+ public void asStringNotString() throws IOException {
TestDump dump = TestDump.getTestDump();
Instance obj = (Instance)dump.getDumpedThing("anObject");
assertNotNull(obj);
assertNull(InstanceUtils.asString(obj));
}
+
+ @Test
+ public void basicReference() throws IOException {
+ TestDump dump = TestDump.getTestDump();
+
+ Instance pref = (Instance)dump.getDumpedThing("aPhantomReference");
+ Instance wref = (Instance)dump.getDumpedThing("aWeakReference");
+ Instance referent = (Instance)dump.getDumpedThing("anObject");
+ assertNotNull(pref);
+ assertNotNull(wref);
+ assertNotNull(referent);
+ assertEquals(referent, InstanceUtils.getReferent(pref));
+ assertEquals(referent, InstanceUtils.getReferent(wref));
+ assertNull(InstanceUtils.getReferent(referent));
+ }
}
diff --git a/tools/cpplint.py b/tools/cpplint.py
index 4f063d931c..308dd8c479 100755
--- a/tools/cpplint.py
+++ b/tools/cpplint.py
@@ -90,6 +90,7 @@ import unicodedata
_USAGE = """
Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
[--counting=total|toplevel|detailed]
+ [--quiet]
<file> [file] ...
The style guidelines this tries to follow are those in
@@ -115,6 +116,9 @@ Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
verbose=#
Specify a number 0-5 to restrict errors to certain verbosity levels.
+ quiet
+ Don't print anything if no errors are found.
+
filter=-x,+y,...
Specify a comma-separated list of category-filters to apply: only
error messages whose category names pass the filters will be printed.
@@ -558,6 +562,9 @@ class _CppLintState(object):
self.filters = _DEFAULT_FILTERS[:]
self.counting = 'total' # In what way are we counting errors?
self.errors_by_category = {} # string to int dict storing error counts
+ # BEGIN android-added
+ self.quiet = False # global setting.
+ # END android-added
# output format:
# "emacs" - format that emacs can parse (default)
@@ -568,6 +575,14 @@ class _CppLintState(object):
"""Sets the output format for errors."""
self.output_format = output_format
+ # BEGIN android-added
+ def SetQuiet(self, level):
+ """Sets the module's quiet setting, and returns the previous setting."""
+ last_quiet = self.quiet
+ self.quiet = level
+ return last_quiet
+ # END android-added
+
def SetVerboseLevel(self, level):
"""Sets the module's verbosity, and returns the previous setting."""
last_verbose_level = self.verbose_level
@@ -638,6 +653,17 @@ def _SetOutputFormat(output_format):
_cpplint_state.SetOutputFormat(output_format)
+# BEGIN android-added
+def _Quiet():
+ """Returns the module's quiet setting."""
+ return _cpplint_state.quiet
+
+
+def _SetQuiet(level):
+ """Sets the module's quiet status, and returns the previous setting."""
+ return _cpplint_state.SetQuiet(level)
+# END android-added
+
def _VerboseLevel():
"""Returns the module's verbosity setting."""
return _cpplint_state.verbose_level
@@ -3888,6 +3914,9 @@ def ProcessFile(filename, vlevel, extra_check_functions=[]):
"""
_SetVerboseLevel(vlevel)
+# BEGIN android-added
+ old_errors = _cpplint_state.error_count
+# END android-added
try:
# Support the UNIX convention of using "-" for stdin. Note that
@@ -3938,8 +3967,11 @@ def ProcessFile(filename, vlevel, extra_check_functions=[]):
'One or more unexpected \\r (^M) found;'
'better to use only a \\n')
- sys.stderr.write('Done processing %s\n' % filename)
-
+# BEGIN android-changed
+ # sys.stderr.write('Done processing %s\n' % filename)
+ if not _cpplint_state.quiet or old_errors != _cpplint_state.error_count:
+ sys.stderr.write('Done processing %s\n' % filename)
+# END android-changed
def PrintUsage(message):
"""Prints a brief usage string and exits, optionally with an error message.
@@ -3977,6 +4009,9 @@ def ParseArguments(args):
try:
(opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
'stdout', # TODO(enh): added --stdout
+ # BEGIN android-added
+ 'quiet',
+ # END android-added
'counting=',
'filter=',
'root='])
@@ -3987,6 +4022,9 @@ def ParseArguments(args):
output_format = _OutputFormat()
output_stream = sys.stderr # TODO(enh): added --stdout
filters = ''
+ # BEGIN android-added
+ quiet = _Quiet()
+ # END android-added
counting_style = ''
for (opt, val) in opts:
@@ -3994,6 +4032,10 @@ def ParseArguments(args):
PrintUsage(None)
elif opt == '--stdout': # TODO(enh): added --stdout
output_stream = sys.stdout # TODO(enh): added --stdout
+ # BEGIN android-added
+ elif opt == '--quiet':
+ quiet = True
+ # END android-added
elif opt == '--output':
if not val in ('emacs', 'vs7', 'eclipse'):
PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.')
@@ -4019,6 +4061,9 @@ def ParseArguments(args):
_SetVerboseLevel(verbosity)
_SetFilters(filters)
_SetCountingStyle(counting_style)
+ # BEGIN android-added
+ _SetQuiet(quiet)
+ # END android-added
sys.stderr = output_stream # TODO(enh): added --stdout
return filenames
@@ -4037,7 +4082,11 @@ def main():
_cpplint_state.ResetErrorCounts()
for filename in filenames:
ProcessFile(filename, _cpplint_state.verbose_level)
- _cpplint_state.PrintErrorCounts()
+ # BEGIN android-changed
+ # _cpplint_state.PrintErrorCounts()
+ if not _cpplint_state.quiet or _cpplint_state.error_count > 0:
+ _cpplint_state.PrintErrorCounts()
+ # END android-changed
sys.exit(_cpplint_state.error_count > 0)
diff --git a/tools/run-libcore-tests.sh b/tools/run-libcore-tests.sh
index 67a79838ee..4b5a5ca76d 100755
--- a/tools/run-libcore-tests.sh
+++ b/tools/run-libcore-tests.sh
@@ -57,7 +57,6 @@ working_packages=("dalvik.system"
"org.apache.harmony.luni"
"org.apache.harmony.nio"
"org.apache.harmony.regex"
- "org.apache.harmony.security"
"org.apache.harmony.testframework"
"org.apache.harmony.tests.java.io"
"org.apache.harmony.tests.java.lang"
@@ -68,6 +67,10 @@ working_packages=("dalvik.system"
"tests.java.lang.String"
"jsr166")
+# List of packages we could run, but don't have rights to revert
+# changes in case of failures.
+# "org.apache.harmony.security"
+
vogar_args=$@
while true; do
if [[ "$1" == "--mode=device" ]]; then