summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--build/apex/Android.bp1
-rw-r--r--build/apex/file_contexts13
-rw-r--r--build/codegen.go5
-rw-r--r--compiler/Android.bp24
-rw-r--r--compiler/driver/compiler_options.cc2
-rw-r--r--compiler/driver/compiler_options.h16
-rw-r--r--compiler/jit/jit_compiler.cc11
-rw-r--r--compiler/optimizing/code_generator_x86.cc29
-rw-r--r--compiler/optimizing/code_generator_x86.h1
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc41
-rw-r--r--compiler/optimizing/code_generator_x86_64.h1
-rw-r--r--compiler/optimizing/inliner.cc24
-rw-r--r--compiler/optimizing/optimizing_compiler.cc21
-rw-r--r--compiler/optimizing/optimizing_compiler.h7
-rw-r--r--dex2oat/Android.bp12
-rw-r--r--dex2oat/dex2oat.cc11
-rw-r--r--dex2oat/linker/image_writer.cc10
-rw-r--r--dex2oat/linker/image_writer.h1
-rw-r--r--disassembler/Android.bp6
-rw-r--r--libartbase/Android.bp21
-rw-r--r--libartbase/libartbase.map15
-rw-r--r--oatdump/Android.bp6
-rw-r--r--openjdkjvmti/deopt_manager.cc3
-rw-r--r--openjdkjvmti/events.cc100
-rw-r--r--openjdkjvmti/events.h2
-rw-r--r--runtime/Android.bp7
-rw-r--r--runtime/base/mutex-inl.h40
-rw-r--r--runtime/base/mutex.h8
-rw-r--r--runtime/cha.cc6
-rw-r--r--runtime/class_linker.cc2
-rw-r--r--runtime/class_loader_context.cc5
-rw-r--r--runtime/class_loader_context_test.cc11
-rw-r--r--runtime/gc/space/image_space.cc4
-rw-r--r--runtime/interpreter/mterp/arm/other.S6
-rw-r--r--runtime/interpreter/mterp/arm64/other.S4
-rw-r--r--runtime/interpreter/mterp/x86/invoke.S6
-rw-r--r--runtime/interpreter/mterp/x86/main.S5
-rw-r--r--runtime/interpreter/mterp/x86/other.S14
-rw-r--r--runtime/interpreter/mterp/x86_64/invoke.S6
-rw-r--r--runtime/interpreter/mterp/x86_64/main.S5
-rw-r--r--runtime/interpreter/mterp/x86_64/other.S14
-rw-r--r--runtime/mirror/array-inl.h12
-rw-r--r--runtime/mirror/array.h8
-rw-r--r--runtime/mirror/class-inl.h35
-rw-r--r--runtime/mirror/class.h35
-rw-r--r--runtime/mirror/class_ext-inl.h4
-rw-r--r--runtime/mirror/object-inl.h95
-rw-r--r--runtime/mirror/object.h30
-rw-r--r--runtime/monitor.cc188
-rw-r--r--runtime/monitor.h5
-rw-r--r--runtime/native/java_lang_System.cc28
-rw-r--r--runtime/native/sun_misc_Unsafe.cc24
-rw-r--r--runtime/oat.h1
-rw-r--r--runtime/runtime.cc15
-rw-r--r--runtime/runtime.h12
-rw-r--r--runtime/runtime_callbacks.cc20
-rw-r--r--runtime/runtime_callbacks.h20
-rw-r--r--runtime/thread.cc10
-rw-r--r--runtime/well_known_classes.cc3
-rw-r--r--runtime/well_known_classes.h1
-rw-r--r--simulator/Android.bp4
-rw-r--r--test/004-UnsafeTest/src/Main.java49
-rw-r--r--test/004-UnsafeTest/unsafe_test.cc13
-rw-r--r--test/1931-monitor-events/check22
-rw-r--r--test/1931-monitor-events/expected.txt3
-rw-r--r--test/1931-monitor-events/jvm-expected.patch3
-rw-r--r--test/1931-monitor-events/src/art/Test1931.java12
-rw-r--r--test/411-checker-hdiv-hrem-pow2/src/RemTest.java116
-rw-r--r--test/911-get-stack-trace/src/art/PrintThread.java2
-rw-r--r--test/Android.bp3
-rwxr-xr-xtools/buildbot-build.sh2
-rw-r--r--tools/hiddenapi/hiddenapi.cc1
-rw-r--r--tools/libcore_gcstress_failures.txt10
73 files changed, 939 insertions, 373 deletions
diff --git a/build/apex/Android.bp b/build/apex/Android.bp
index bca29598e2..8bddb5dfe7 100644
--- a/build/apex/Android.bp
+++ b/build/apex/Android.bp
@@ -76,7 +76,6 @@ apex {
name: "com.android.runtime",
compile_multilib: "both",
manifest: "manifest.json",
- file_contexts: "file_contexts",
native_shared_libs: art_runtime_base_native_shared_libs
+ art_runtime_fake_native_shared_libs
+ art_runtime_debug_native_shared_libs,
diff --git a/build/apex/file_contexts b/build/apex/file_contexts
deleted file mode 100644
index 4d0df809e8..0000000000
--- a/build/apex/file_contexts
+++ /dev/null
@@ -1,13 +0,0 @@
-#############################
-# APEX module manifest.
-#
-/manifest\.json u:object_r:system_file:s0
-
-#############################
-# System files
-#
-(/.*)? u:object_r:system_file:s0
-/bin/dex2oat(d)? u:object_r:dex2oat_exec:s0
-/bin/dexoptanalyzer(d)? u:object_r:dexoptanalyzer_exec:s0
-/bin/profman(d)? u:object_r:profman_exec:s0
-/lib(64)?(/.*)? u:object_r:system_lib_file:s0
diff --git a/build/codegen.go b/build/codegen.go
index 8526bf192b..d0db78e571 100644
--- a/build/codegen.go
+++ b/build/codegen.go
@@ -107,8 +107,9 @@ func codegen(ctx android.LoadHookContext, c *codegenProperties, library bool) {
}
type CodegenCommonArchProperties struct {
- Srcs []string
- Cflags []string
+ Srcs []string
+ Cflags []string
+ Cppflags []string
}
type CodegenLibraryArchProperties struct {
diff --git a/compiler/Android.bp b/compiler/Android.bp
index 30a65b280a..0d92b05593 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -183,7 +183,6 @@ art_cc_defaults {
shared_libs: [
"libbase",
"libcutils", // for atrace.
- "liblzma",
],
include_dirs: ["art/disassembler"],
header_libs: [
@@ -199,7 +198,6 @@ cc_defaults {
static_libs: [
"libbase",
"libcutils",
- "liblzma",
],
}
@@ -233,12 +231,12 @@ art_cc_library {
// VIXL assembly support for ARM targets.
static: {
whole_static_libs: [
- "libvixl-arm",
+ "libvixl",
],
},
shared: {
shared_libs: [
- "libvixl-arm",
+ "libvixl",
],
},
},
@@ -246,12 +244,12 @@ art_cc_library {
// VIXL assembly support for ARM64 targets.
static: {
whole_static_libs: [
- "libvixl-arm64",
+ "libvixl",
],
},
shared: {
shared_libs: [
- "libvixl-arm64",
+ "libvixl",
],
},
},
@@ -295,12 +293,12 @@ art_cc_library {
// VIXL assembly support for ARM targets.
static: {
whole_static_libs: [
- "libvixld-arm",
+ "libvixld",
],
},
shared: {
shared_libs: [
- "libvixld-arm",
+ "libvixld",
],
},
},
@@ -308,12 +306,12 @@ art_cc_library {
// VIXL assembly support for ARM64 targets.
static: {
whole_static_libs: [
- "libvixld-arm64",
+ "libvixld",
],
},
shared: {
shared_libs: [
- "libvixld-arm64",
+ "libvixld",
],
},
},
@@ -454,8 +452,7 @@ art_cc_test {
"libprofiled",
"libartd-compiler",
"libartd-simulator-container",
- "libvixld-arm",
- "libvixld-arm64",
+ "libvixld",
"libbacktrace",
"libnativeloader",
@@ -512,7 +509,6 @@ art_cc_test {
},
shared_libs: [
"libartd-compiler",
- "libvixld-arm",
- "libvixld-arm64",
+ "libvixld",
],
}
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index be8e10e41e..685cde338b 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -45,8 +45,8 @@ CompilerOptions::CompilerOptions()
dex_files_for_oat_file_(),
image_classes_(),
boot_image_(false),
- core_image_(false),
app_image_(false),
+ compiling_with_core_image_(false),
baseline_(false),
debuggable_(false),
generate_debug_info_(kDefaultGenerateDebugInfo),
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 77f84820e5..2f4e5428ea 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -198,13 +198,6 @@ class CompilerOptions final {
return baseline_;
}
- // Are we compiling a core image (small boot image only used for ART testing)?
- bool IsCoreImage() const {
- // Ensure that `core_image_` => `boot_image_`.
- DCHECK(!core_image_ || boot_image_);
- return core_image_;
- }
-
// Are we compiling an app image?
bool IsAppImage() const {
return app_image_;
@@ -214,6 +207,13 @@ class CompilerOptions final {
app_image_ = false;
}
+ // Returns whether we are compiling against a "core" image, which
+ // is an indicative we are running tests. The compiler will use that
+ // information for checking invariants.
+ bool CompilingWithCoreImage() const {
+ return compiling_with_core_image_;
+ }
+
// Should the code be compiled as position independent?
bool GetCompilePic() const {
return compile_pic_;
@@ -357,8 +357,8 @@ class CompilerOptions final {
HashSet<std::string> image_classes_;
bool boot_image_;
- bool core_image_;
bool app_image_;
+ bool compiling_with_core_image_;
bool baseline_;
bool debuggable_;
bool generate_debug_info_;
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index f22f61fa21..bb35065921 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -90,10 +90,11 @@ JitCompiler::JitCompiler() {
// Special case max code units for inlining, whose default is "unset" (implictly
// meaning no limit). Do this before parsing the actual passed options.
compiler_options_->SetInlineMaxCodeUnits(CompilerOptions::kDefaultInlineMaxCodeUnits);
+ Runtime* runtime = Runtime::Current();
{
std::string error_msg;
- if (!compiler_options_->ParseCompilerOptions(Runtime::Current()->GetCompilerOptions(),
- true /* ignore_unrecognized */,
+ if (!compiler_options_->ParseCompilerOptions(runtime->GetCompilerOptions(),
+ /*ignore_unrecognized=*/ true,
&error_msg)) {
LOG(FATAL) << error_msg;
UNREACHABLE();
@@ -103,7 +104,7 @@ JitCompiler::JitCompiler() {
compiler_options_->SetNonPic();
// Set debuggability based on the runtime value.
- compiler_options_->SetDebuggable(Runtime::Current()->IsJavaDebuggable());
+ compiler_options_->SetDebuggable(runtime->IsJavaDebuggable());
const InstructionSet instruction_set = compiler_options_->GetInstructionSet();
if (kRuntimeISA == InstructionSet::kArm) {
@@ -112,7 +113,7 @@ JitCompiler::JitCompiler() {
DCHECK_EQ(instruction_set, kRuntimeISA);
}
std::unique_ptr<const InstructionSetFeatures> instruction_set_features;
- for (const StringPiece option : Runtime::Current()->GetCompilerOptions()) {
+ for (const StringPiece option : runtime->GetCompilerOptions()) {
VLOG(compiler) << "JIT compiler option " << option;
std::string error_msg;
if (option.starts_with("--instruction-set-variant=")) {
@@ -144,6 +145,8 @@ JitCompiler::JitCompiler() {
instruction_set_features = InstructionSetFeatures::FromCppDefines();
}
compiler_options_->instruction_set_features_ = std::move(instruction_set_features);
+ compiler_options_->compiling_with_core_image_ =
+ CompilerDriver::IsCoreImageFilename(runtime->GetImageLocation());
compiler_driver_.reset(new CompilerDriver(
compiler_options_.get(),
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 7dcf28952d..fba4da63cc 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -3497,6 +3497,27 @@ void InstructionCodeGeneratorX86::DivRemOneOrMinusOne(HBinaryOperation* instruct
}
}
+void InstructionCodeGeneratorX86::RemByPowerOfTwo(HRem* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Location second = locations->InAt(1);
+
+ Register out = locations->Out().AsRegister<Register>();
+ Register numerator = locations->InAt(0).AsRegister<Register>();
+
+ int32_t imm = Int64FromConstant(second.GetConstant());
+ DCHECK(IsPowerOfTwo(AbsOrMin(imm)));
+ uint32_t abs_imm = static_cast<uint32_t>(AbsOrMin(imm));
+
+ Register tmp = locations->GetTemp(0).AsRegister<Register>();
+ NearLabel done;
+ __ movl(out, numerator);
+ __ andl(out, Immediate(abs_imm-1));
+ __ j(Condition::kZero, &done);
+ __ leal(tmp, Address(out, static_cast<int32_t>(~(abs_imm-1))));
+ __ testl(numerator, numerator);
+ __ cmovl(Condition::kLess, out, tmp);
+ __ Bind(&done);
+}
void InstructionCodeGeneratorX86::DivByPowerOfTwo(HDiv* instruction) {
LocationSummary* locations = instruction->GetLocations();
@@ -3610,8 +3631,12 @@ void InstructionCodeGeneratorX86::GenerateDivRemIntegral(HBinaryOperation* instr
// Do not generate anything for 0. DivZeroCheck would forbid any generated code.
} else if (imm == 1 || imm == -1) {
DivRemOneOrMinusOne(instruction);
- } else if (is_div && IsPowerOfTwo(AbsOrMin(imm))) {
- DivByPowerOfTwo(instruction->AsDiv());
+ } else if (IsPowerOfTwo(AbsOrMin(imm))) {
+ if (is_div) {
+ DivByPowerOfTwo(instruction->AsDiv());
+ } else {
+ RemByPowerOfTwo(instruction->AsRem());
+ }
} else {
DCHECK(imm <= -2 || imm >= 2);
GenerateDivRemWithAnyConstant(instruction);
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 1e49403402..deeef888e2 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -218,6 +218,7 @@ class InstructionCodeGeneratorX86 : public InstructionCodeGenerator {
void GenerateDivRemIntegral(HBinaryOperation* instruction);
void DivRemOneOrMinusOne(HBinaryOperation* instruction);
void DivByPowerOfTwo(HDiv* instruction);
+ void RemByPowerOfTwo(HRem* instruction);
void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
void GenerateRemFP(HRem* rem);
void HandleCondition(HCondition* condition);
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index d8253907fc..14cff05f58 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -3560,7 +3560,40 @@ void InstructionCodeGeneratorX86_64::DivRemOneOrMinusOne(HBinaryOperation* instr
LOG(FATAL) << "Unexpected type for div by (-)1 " << instruction->GetResultType();
}
}
+void InstructionCodeGeneratorX86_64::RemByPowerOfTwo(HRem* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Location second = locations->InAt(1);
+ CpuRegister out = locations->Out().AsRegister<CpuRegister>();
+ CpuRegister numerator = locations->InAt(0).AsRegister<CpuRegister>();
+ int64_t imm = Int64FromConstant(second.GetConstant());
+ DCHECK(IsPowerOfTwo(AbsOrMin(imm)));
+ uint64_t abs_imm = AbsOrMin(imm);
+ CpuRegister tmp = locations->GetTemp(0).AsRegister<CpuRegister>();
+ if (instruction->GetResultType() == DataType::Type::kInt32) {
+ NearLabel done;
+ __ movl(out, numerator);
+ __ andl(out, Immediate(abs_imm-1));
+ __ j(Condition::kZero, &done);
+ __ leal(tmp, Address(out, static_cast<int32_t>(~(abs_imm-1))));
+ __ testl(numerator, numerator);
+ __ cmov(Condition::kLess, out, tmp, false);
+ __ Bind(&done);
+
+ } else {
+ DCHECK_EQ(instruction->GetResultType(), DataType::Type::kInt64);
+ codegen_->Load64BitValue(tmp, abs_imm - 1);
+ NearLabel done;
+ __ movq(out, numerator);
+ __ andq(out, tmp);
+ __ j(Condition::kZero, &done);
+ __ movq(tmp, numerator);
+ __ sarq(tmp, Immediate(63));
+ __ shlq(tmp, Immediate(WhichPowerOf2(abs_imm)));
+ __ orq(out, tmp);
+ __ Bind(&done);
+ }
+}
void InstructionCodeGeneratorX86_64::DivByPowerOfTwo(HDiv* instruction) {
LocationSummary* locations = instruction->GetLocations();
Location second = locations->InAt(1);
@@ -3737,8 +3770,12 @@ void InstructionCodeGeneratorX86_64::GenerateDivRemIntegral(HBinaryOperation* in
// Do not generate anything. DivZeroCheck would prevent any code to be executed.
} else if (imm == 1 || imm == -1) {
DivRemOneOrMinusOne(instruction);
- } else if (instruction->IsDiv() && IsPowerOfTwo(AbsOrMin(imm))) {
- DivByPowerOfTwo(instruction->AsDiv());
+ } else if (IsPowerOfTwo(AbsOrMin(imm))) {
+ if (is_div) {
+ DivByPowerOfTwo(instruction->AsDiv());
+ } else {
+ RemByPowerOfTwo(instruction->AsRem());
+ }
} else {
DCHECK(imm <= -2 || imm >= 2);
GenerateDivRemWithAnyConstant(instruction);
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 72c4fd499d..f74e130702 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -215,6 +215,7 @@ class InstructionCodeGeneratorX86_64 : public InstructionCodeGenerator {
void GenerateRemFP(HRem* rem);
void DivRemOneOrMinusOne(HBinaryOperation* instruction);
void DivByPowerOfTwo(HDiv* instruction);
+ void RemByPowerOfTwo(HRem* instruction);
void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
void GenerateDivRemIntegral(HBinaryOperation* instruction);
void HandleCondition(HCondition* condition);
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index c1daf95727..d85bfd5564 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -39,7 +39,6 @@
#include "mirror/object_array-alloc-inl.h"
#include "mirror/object_array-inl.h"
#include "nodes.h"
-#include "optimizing_compiler.h"
#include "reference_type_propagation.h"
#include "register_allocator_linear_scan.h"
#include "scoped_thread_state_change-inl.h"
@@ -151,13 +150,13 @@ bool HInliner::Run() {
// If we're compiling with a core image (which is only used for
// test purposes), honor inlining directives in method names:
- // - if a method's name contains the substring "$inline$", ensure
- // that this method is actually inlined;
// - if a method's name contains the substring "$noinline$", do not
- // inline that method.
+ // inline that method;
+ // - if a method's name contains the substring "$inline$", ensure
+ // that this method is actually inlined.
// We limit the latter to AOT compilation, as the JIT may or may not inline
// depending on the state of classes at runtime.
- const bool honor_noinline_directives = IsCompilingWithCoreImage();
+ const bool honor_noinline_directives = codegen_->GetCompilerOptions().CompilingWithCoreImage();
const bool honor_inline_directives =
honor_noinline_directives && Runtime::Current()->IsAotCompiler();
@@ -1737,6 +1736,21 @@ static inline Handle<T> NewHandleIfDifferent(T* object,
return (object != hint.Get()) ? handles->NewHandle(object) : hint;
}
+static bool CanEncodeInlinedMethodInStackMap(const DexFile& caller_dex_file, ArtMethod* callee)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (!Runtime::Current()->IsAotCompiler()) {
+ // JIT can always encode methods in stack maps.
+ return true;
+ }
+ if (IsSameDexFile(caller_dex_file, *callee->GetDexFile())) {
+ return true;
+ }
+ // TODO(ngeoffray): Support more AOT cases for inlining:
+ // - methods in multidex
+ // - methods in boot image for on-device non-PIC compilation.
+ return false;
+}
+
bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
ArtMethod* resolved_method,
ReferenceTypeInfo receiver_type,
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index c9c1194e5a..fe6abd4999 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -1136,7 +1136,7 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
}
if (kIsDebugBuild &&
- IsCompilingWithCoreImage() &&
+ compiler_driver->GetCompilerOptions().CompilingWithCoreImage() &&
IsInstructionSetSupported(compiler_driver->GetCompilerOptions().GetInstructionSet())) {
// For testing purposes, we put a special marker on method names
// that should be compiled with this compiler (when the
@@ -1234,30 +1234,11 @@ Compiler* CreateOptimizingCompiler(CompilerDriver* driver) {
return new OptimizingCompiler(driver);
}
-bool IsCompilingWithCoreImage() {
- const std::string& image = Runtime::Current()->GetImageLocation();
- return CompilerDriver::IsCoreImageFilename(image);
-}
-
bool EncodeArtMethodInInlineInfo(ArtMethod* method ATTRIBUTE_UNUSED) {
// Note: the runtime is null only for unit testing.
return Runtime::Current() == nullptr || !Runtime::Current()->IsAotCompiler();
}
-bool CanEncodeInlinedMethodInStackMap(const DexFile& caller_dex_file, ArtMethod* callee) {
- if (!Runtime::Current()->IsAotCompiler()) {
- // JIT can always encode methods in stack maps.
- return true;
- }
- if (IsSameDexFile(caller_dex_file, *callee->GetDexFile())) {
- return true;
- }
- // TODO(ngeoffray): Support more AOT cases for inlining:
- // - methods in multidex
- // - methods in boot image for on-device non-PIC compilation.
- return false;
-}
-
bool OptimizingCompiler::JitCompile(Thread* self,
jit::JitCodeCache* code_cache,
ArtMethod* method,
diff --git a/compiler/optimizing/optimizing_compiler.h b/compiler/optimizing/optimizing_compiler.h
index 6ee9c70fdb..f5279e83eb 100644
--- a/compiler/optimizing/optimizing_compiler.h
+++ b/compiler/optimizing/optimizing_compiler.h
@@ -29,14 +29,7 @@ class DexFile;
Compiler* CreateOptimizingCompiler(CompilerDriver* driver);
-// Returns whether we are compiling against a "core" image, which
-// is an indicative we are running tests. The compiler will use that
-// information for checking invariants.
-bool IsCompilingWithCoreImage();
-
bool EncodeArtMethodInInlineInfo(ArtMethod* method);
-bool CanEncodeInlinedMethodInStackMap(const DexFile& caller_dex_file, ArtMethod* callee)
- REQUIRES_SHARED(Locks::mutator_lock_);
} // namespace art
diff --git a/dex2oat/Android.bp b/dex2oat/Android.bp
index fd5f3cd9e4..8ce96a4c16 100644
--- a/dex2oat/Android.bp
+++ b/dex2oat/Android.bp
@@ -71,11 +71,8 @@ art_cc_defaults {
generated_sources: ["art_dex2oat_operator_srcs"],
shared_libs: [
"libbase",
- "liblz4",
- "liblzma",
],
include_dirs: [
- "external/lz4/lib",
"external/zlib",
],
export_include_dirs: ["."],
@@ -98,8 +95,6 @@ cc_defaults {
},
static_libs: [
"libbase",
- "liblz4",
- "liblzma",
],
}
@@ -247,7 +242,6 @@ art_cc_binary {
"libdexfile",
"libartbase",
"libbase",
- "liblz4",
"libsigchain",
],
static_libs: [
@@ -285,7 +279,6 @@ art_cc_binary {
"libdexfiled",
"libartbased",
"libbase",
- "liblz4",
"libsigchain",
],
static_libs: [
@@ -315,7 +308,6 @@ cc_defaults {
],
static_libs: [
"libbase",
- "liblz4",
"libsigchain_dummy",
],
}
@@ -411,11 +403,11 @@ art_cc_test {
"external/zlib",
],
shared_libs: [
- "libprofiled",
+ "libartbased",
"libartd-compiler",
"libartd-dexlayout",
"libbase",
- "liblz4",
+ "libprofiled",
"libsigchain",
"libziparchive",
],
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index d901c01fc8..dc123e489b 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -732,11 +732,6 @@ class Dex2Oat final {
compiler_options_->boot_image_ = !image_filenames_.empty();
compiler_options_->app_image_ = app_image_fd_ != -1 || !app_image_file_name_.empty();
- if (IsBootImage() && image_filenames_.size() == 1) {
- const std::string& boot_image_filename = image_filenames_[0];
- compiler_options_->core_image_ = CompilerDriver::IsCoreImageFilename(boot_image_filename);
- }
-
if (IsAppImage() && IsBootImage()) {
Usage("Can't have both --image and (--app-image-fd or --app-image-file)");
}
@@ -951,6 +946,9 @@ class Dex2Oat final {
}
}
compiler_options_->passes_to_run_ = passes_to_run_.get();
+ compiler_options_->compiling_with_core_image_ =
+ !boot_image_filename_.empty() &&
+ CompilerDriver::IsCoreImageFilename(boot_image_filename_);
}
static bool SupportsDeterministicCompilation() {
@@ -1055,9 +1053,6 @@ class Dex2Oat final {
oss << argv[i];
}
key_value_store_->Put(OatHeader::kDex2OatCmdLineKey, oss.str());
- oss.str(""); // Reset.
- oss << kRuntimeISA;
- key_value_store_->Put(OatHeader::kDex2OatHostKey, oss.str());
}
key_value_store_->Put(
OatHeader::kDebuggableKey,
diff --git a/dex2oat/linker/image_writer.cc b/dex2oat/linker/image_writer.cc
index 5ca7f0733d..2b2b02941a 100644
--- a/dex2oat/linker/image_writer.cc
+++ b/dex2oat/linker/image_writer.cc
@@ -2670,10 +2670,8 @@ void ImageWriter::CopyAndFixupObjects() {
void ImageWriter::FixupPointerArray(mirror::Object* dst,
mirror::PointerArray* arr,
- mirror::Class* klass,
Bin array_type) {
- CHECK(klass->IsArrayClass());
- CHECK(arr->IsIntArray() || arr->IsLongArray()) << klass->PrettyClass() << " " << arr;
+ CHECK(arr->IsIntArray() || arr->IsLongArray()) << arr->GetClass()->PrettyClass() << " " << arr;
// Fixup int and long pointers for the ArtMethod or ArtField arrays.
const size_t num_elements = arr->GetLength();
CopyAndFixupReference(
@@ -2879,13 +2877,12 @@ void ImageWriter::FixupObject(Object* orig, Object* copy) {
if (kUseBakerReadBarrier) {
orig->AssertReadBarrierState();
}
- auto* klass = orig->GetClass();
- if (klass->IsIntArrayClass() || klass->IsLongArrayClass()) {
+ if (orig->IsIntArray() || orig->IsLongArray()) {
// Is this a native pointer array?
auto it = pointer_arrays_.find(down_cast<mirror::PointerArray*>(orig));
if (it != pointer_arrays_.end()) {
// Should only need to fixup every pointer array exactly once.
- FixupPointerArray(copy, down_cast<mirror::PointerArray*>(orig), klass, it->second);
+ FixupPointerArray(copy, down_cast<mirror::PointerArray*>(orig), it->second);
pointer_arrays_.erase(it);
return;
}
@@ -2895,6 +2892,7 @@ void ImageWriter::FixupObject(Object* orig, Object* copy) {
} else {
ObjPtr<mirror::ObjectArray<mirror::Class>> class_roots =
Runtime::Current()->GetClassLinker()->GetClassRoots();
+ ObjPtr<mirror::Class> klass = orig->GetClass();
if (klass == GetClassRoot<mirror::Method>(class_roots) ||
klass == GetClassRoot<mirror::Constructor>(class_roots)) {
// Need to go update the ArtMethod.
diff --git a/dex2oat/linker/image_writer.h b/dex2oat/linker/image_writer.h
index 06c694c793..ccd529aa13 100644
--- a/dex2oat/linker/image_writer.h
+++ b/dex2oat/linker/image_writer.h
@@ -541,7 +541,6 @@ class ImageWriter final {
REQUIRES_SHARED(Locks::mutator_lock_);
void FixupPointerArray(mirror::Object* dst,
mirror::PointerArray* arr,
- mirror::Class* klass,
Bin array_type)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/disassembler/Android.bp b/disassembler/Android.bp
index a7c1802515..5aa159e98b 100644
--- a/disassembler/Android.bp
+++ b/disassembler/Android.bp
@@ -69,8 +69,7 @@ art_cc_library {
defaults: ["libart-disassembler-defaults"],
shared_libs: [
// For disassembler_arm*.
- "libvixl-arm",
- "libvixl-arm64",
+ "libvixl",
],
}
@@ -82,7 +81,6 @@ art_cc_library {
],
shared_libs: [
// For disassembler_arm*.
- "libvixld-arm",
- "libvixld-arm64",
+ "libvixld",
],
}
diff --git a/libartbase/Android.bp b/libartbase/Android.bp
index 5010f683d7..6a667bc7a7 100644
--- a/libartbase/Android.bp
+++ b/libartbase/Android.bp
@@ -54,6 +54,10 @@ cc_defaults {
"libziparchive",
"libz",
],
+ // Exclude the version script from Darwin host since it's not
+ // supported by the linker there. That means ASan checks on Darwin
+ // might trigger ODR violations.
+ version_script: "libartbase.map",
},
host: {
shared_libs: [
@@ -61,16 +65,31 @@ cc_defaults {
"libz",
],
},
+ linux_glibc: {
+ version_script: "libartbase.map",
+ },
+ windows: {
+ version_script: "libartbase.map",
+ },
},
generated_sources: ["art_libartbase_operator_srcs"],
cflags: ["-DBUILDING_LIBART=1"],
shared_libs: [
"liblog",
- // For ashmem.
+ // For ashmem.
"libcutils",
// For common macros.
"libbase",
],
+
+ // Utilities used by various ART libs and tools are linked in statically
+ // here to avoid shared lib dependencies outside the ART APEX. No target
+ // there should depend on these separately.
+ whole_static_libs: [
+ "liblz4",
+ "liblzma",
+ ],
+
export_include_dirs: ["."],
// ART's macros.h depends on libbase's macros.h.
// Note: runtime_options.h depends on cmdline. But we don't really want to export this
diff --git a/libartbase/libartbase.map b/libartbase/libartbase.map
new file mode 100644
index 0000000000..6249930dc1
--- /dev/null
+++ b/libartbase/libartbase.map
@@ -0,0 +1,15 @@
+# This is used only to hide data symbols that get imported through
+# whole_static_libs, or else they might trigger the ASan odr-violation check.
+# Before adding symbols here, please make sure that it doesn't give rise to a
+# real ODR problem. All these symbols are either in .rodata or .data.rel.ro
+# sections.
+LIBARTBASE {
+ local:
+ PPMD7_kExpEscape;
+ XZ_SIG;
+ g_AlignedAlloc;
+ g_Alloc;
+ g_BigAlloc;
+ g_MidAlloc;
+ k7zSignature;
+};
diff --git a/oatdump/Android.bp b/oatdump/Android.bp
index 596a94664d..45f853b06e 100644
--- a/oatdump/Android.bp
+++ b/oatdump/Android.bp
@@ -100,8 +100,7 @@ art_cc_binary {
],
static_libs: [
"libart-disassembler",
- "libvixl-arm",
- "libvixl-arm64",
+ "libvixl",
],
}
@@ -124,8 +123,7 @@ art_cc_binary {
},
static_libs: [
"libartd-disassembler",
- "libvixld-arm",
- "libvixld-arm64",
+ "libvixld",
],
group_static_libs: true,
}
diff --git a/openjdkjvmti/deopt_manager.cc b/openjdkjvmti/deopt_manager.cc
index 8bac38a355..d456d83368 100644
--- a/openjdkjvmti/deopt_manager.cc
+++ b/openjdkjvmti/deopt_manager.cc
@@ -139,6 +139,9 @@ void DeoptManager::FinishSetup() {
// OnLoad since the runtime hasn't started up sufficiently. This is only expected to happen
// on userdebug/eng builds.
LOG(INFO) << "Attempting to start jit for openjdkjvmti plugin.";
+ // Note: use rwx allowed = true, because if this is the system server, we will not be
+ // allowed to allocate any JIT code cache, anyways.
+ runtime->CreateJitCodeCache(/*rwx_memory_allowed=*/true);
runtime->CreateJit();
if (runtime->GetJit() == nullptr) {
LOG(WARNING) << "Could not start jit for openjdkjvmti plugin. This process might be "
diff --git a/openjdkjvmti/events.cc b/openjdkjvmti/events.cc
index 48df53a143..a96436e95a 100644
--- a/openjdkjvmti/events.cc
+++ b/openjdkjvmti/events.cc
@@ -32,6 +32,7 @@
#include "events-inl.h"
#include <array>
+#include <sys/time.h>
#include "art_field-inl.h"
#include "art_jvmti.h"
@@ -56,6 +57,7 @@
#include "thread-inl.h"
#include "thread_list.h"
#include "ti_phase.h"
+#include "well_known_classes.h"
namespace openjdkjvmti {
@@ -410,14 +412,103 @@ class JvmtiMonitorListener : public art::MonitorCallback {
EventHandler* handler_;
};
-static void SetupMonitorListener(art::MonitorCallback* listener, bool enable) {
+class JvmtiParkListener : public art::ParkCallback {
+ public:
+ explicit JvmtiParkListener(EventHandler* handler) : handler_(handler) {}
+
+ void ThreadParkStart(bool is_absolute, int64_t timeout)
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ if (handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMonitorWait)) {
+ art::Thread* self = art::Thread::Current();
+ art::JNIEnvExt* jnienv = self->GetJniEnv();
+ art::ArtField* parkBlockerField = art::jni::DecodeArtField(
+ art::WellKnownClasses::java_lang_Thread_parkBlocker);
+ art::ObjPtr<art::mirror::Object> blocker_obj = parkBlockerField->GetObj(self->GetPeer());
+ if (blocker_obj.IsNull()) {
+ blocker_obj = self->GetPeer();
+ }
+ int64_t timeout_ms;
+ if (!is_absolute) {
+ if (timeout == 0) {
+ timeout_ms = 0;
+ } else {
+ timeout_ms = timeout / 1000000;
+ if (timeout_ms == 0) {
+ // If we were instructed to park for a nonzero number of nanoseconds, but not enough
+ // to be a full millisecond, round up to 1 ms. A nonzero park() call will return
+ // soon, but a 0 wait or park call will wait indefinitely.
+ timeout_ms = 1;
+ }
+ }
+ } else {
+ struct timeval tv;
+ gettimeofday(&tv, (struct timezone *) nullptr);
+ int64_t now = tv.tv_sec * 1000LL + tv.tv_usec / 1000;
+ if (now < timeout) {
+ timeout_ms = timeout - now;
+ } else {
+ // Waiting for 0 ms is an indefinite wait; parking until a time in
+ // the past or the current time will return immediately, so emulate
+ // the shortest possible wait event.
+ timeout_ms = 1;
+ }
+ }
+ ScopedLocalRef<jobject> blocker(jnienv, AddLocalRef<jobject>(jnienv, blocker_obj.Ptr()));
+ RunEventCallback<ArtJvmtiEvent::kMonitorWait>(
+ handler_,
+ self,
+ jnienv,
+ blocker.get(),
+ static_cast<jlong>(timeout_ms));
+ }
+ }
+
+
+ // Our interpretation of the spec is that the JVMTI_EVENT_MONITOR_WAITED will be sent immediately
+ // after a thread has woken up from a sleep caused by a call to Object#wait. If the thread will
+ // never go to sleep (due to not having the lock, having bad arguments, or having an exception
+ // propogated from JVMTI_EVENT_MONITOR_WAIT) we will not send this event.
+ //
+ // This does not fully match the RI semantics. Specifically, we will not send the
+ // JVMTI_EVENT_MONITOR_WAITED event in one situation where the RI would, there was an exception in
+ // the JVMTI_EVENT_MONITOR_WAIT event but otherwise the call was fine. In that case the RI would
+ // send this event and return without going to sleep.
+ //
+ // See b/65558434 for more discussion.
+ void ThreadParkFinished(bool timeout) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ if (handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMonitorWaited)) {
+ art::Thread* self = art::Thread::Current();
+ art::JNIEnvExt* jnienv = self->GetJniEnv();
+ art::ArtField* parkBlockerField = art::jni::DecodeArtField(
+ art::WellKnownClasses::java_lang_Thread_parkBlocker);
+ art::ObjPtr<art::mirror::Object> blocker_obj = parkBlockerField->GetObj(self->GetPeer());
+ if (blocker_obj.IsNull()) {
+ blocker_obj = self->GetPeer();
+ }
+ ScopedLocalRef<jobject> blocker(jnienv, AddLocalRef<jobject>(jnienv, blocker_obj.Ptr()));
+ RunEventCallback<ArtJvmtiEvent::kMonitorWaited>(
+ handler_,
+ self,
+ jnienv,
+ blocker.get(),
+ static_cast<jboolean>(timeout));
+ }
+ }
+
+ private:
+ EventHandler* handler_;
+};
+
+static void SetupMonitorListener(art::MonitorCallback* monitor_listener, art::ParkCallback* park_listener, bool enable) {
// We must not hold the mutator lock here, but if we're in FastJNI, for example, we might. For
// now, do a workaround: (possibly) acquire and release.
art::ScopedObjectAccess soa(art::Thread::Current());
if (enable) {
- art::Runtime::Current()->GetRuntimeCallbacks()->AddMonitorCallback(listener);
+ art::Runtime::Current()->GetRuntimeCallbacks()->AddMonitorCallback(monitor_listener);
+ art::Runtime::Current()->GetRuntimeCallbacks()->AddParkCallback(park_listener);
} else {
- art::Runtime::Current()->GetRuntimeCallbacks()->RemoveMonitorCallback(listener);
+ art::Runtime::Current()->GetRuntimeCallbacks()->RemoveMonitorCallback(monitor_listener);
+ art::Runtime::Current()->GetRuntimeCallbacks()->RemoveParkCallback(park_listener);
}
}
@@ -1053,7 +1144,7 @@ void EventHandler::HandleEventType(ArtJvmtiEvent event, bool enable) {
case ArtJvmtiEvent::kMonitorWait:
case ArtJvmtiEvent::kMonitorWaited:
if (!OtherMonitorEventsEnabledAnywhere(event)) {
- SetupMonitorListener(monitor_listener_.get(), enable);
+ SetupMonitorListener(monitor_listener_.get(), park_listener_.get(), enable);
}
return;
default:
@@ -1204,6 +1295,7 @@ EventHandler::EventHandler()
gc_pause_listener_.reset(new JvmtiGcPauseListener(this));
method_trace_listener_.reset(new JvmtiMethodTraceListener(this));
monitor_listener_.reset(new JvmtiMonitorListener(this));
+ park_listener_.reset(new JvmtiParkListener(this));
}
EventHandler::~EventHandler() {
diff --git a/openjdkjvmti/events.h b/openjdkjvmti/events.h
index 9f91a08b8b..abb15cc329 100644
--- a/openjdkjvmti/events.h
+++ b/openjdkjvmti/events.h
@@ -35,6 +35,7 @@ class JvmtiDdmChunkListener;
class JvmtiGcPauseListener;
class JvmtiMethodTraceListener;
class JvmtiMonitorListener;
+class JvmtiParkListener;
// an enum for ArtEvents. This differs from the JVMTI events only in that we distinguish between
// retransformation capable and incapable loading
@@ -331,6 +332,7 @@ class EventHandler {
std::unique_ptr<JvmtiGcPauseListener> gc_pause_listener_;
std::unique_ptr<JvmtiMethodTraceListener> method_trace_listener_;
std::unique_ptr<JvmtiMonitorListener> monitor_listener_;
+ std::unique_ptr<JvmtiParkListener> park_listener_;
// True if frame pop has ever been enabled. Since we store pointers to stack frames we need to
// continue to listen to this event even if it has been disabled.
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 86ee952022..410901ea47 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -377,7 +377,6 @@ libart_cc_defaults {
include_dirs: [
"art/sigchainlib",
"external/icu/icu4c/source/common",
- "external/lz4/lib",
"external/zlib",
],
header_libs: [
@@ -390,7 +389,6 @@ libart_cc_defaults {
"libnativebridge",
"libnativeloader",
"libbacktrace",
- "liblz4",
"liblog",
// For atrace, properties, ashmem, set_sched_policy.
"libcutils",
@@ -422,8 +420,6 @@ libart_static_cc_defaults {
"libbase",
"libcutils",
"liblog",
- "liblz4",
- "liblzma",
"libnativebridge",
"libnativeloader",
"libunwindstack",
@@ -661,8 +657,7 @@ art_cc_test {
],
shared_libs: [
"libartd-compiler",
- "libvixld-arm",
- "libvixld-arm64",
+ "libvixld",
],
}
diff --git a/runtime/base/mutex-inl.h b/runtime/base/mutex-inl.h
index e775fe4505..5daead9901 100644
--- a/runtime/base/mutex-inl.h
+++ b/runtime/base/mutex-inl.h
@@ -91,6 +91,15 @@ inline void BaseMutex::RegisterAsLocked(Thread* self) {
CheckUnattachedThread(level_);
return;
}
+ LockLevel level = level_;
+ // It would be nice to avoid this condition checking in the non-debug case,
+ // but that would make the various methods that check if a mutex is held not
+ // work properly for thread wait locks. Since the vast majority of lock
+ // acquisitions are not thread wait locks, this check should not be too
+ // expensive.
+ if (UNLIKELY(level == kThreadWaitLock) && self->GetHeldMutex(kThreadWaitLock) != nullptr) {
+ level = kThreadWaitWakeLock;
+ }
if (kDebugLocking) {
// Check if a bad Mutex of this level or lower is held.
bool bad_mutexes_held = false;
@@ -98,13 +107,13 @@ inline void BaseMutex::RegisterAsLocked(Thread* self) {
// mutator_lock_ exclusive. This is because we suspending when holding locks at this level is
// not allowed and if we hold the mutator_lock_ exclusive we must unsuspend stuff eventually
// so there are no deadlocks.
- if (level_ == kTopLockLevel &&
+ if (level == kTopLockLevel &&
Locks::mutator_lock_->IsSharedHeld(self) &&
!Locks::mutator_lock_->IsExclusiveHeld(self)) {
LOG(ERROR) << "Lock level violation: holding \"" << Locks::mutator_lock_->name_ << "\" "
<< "(level " << kMutatorLock << " - " << static_cast<int>(kMutatorLock)
<< ") non-exclusive while locking \"" << name_ << "\" "
- << "(level " << level_ << " - " << static_cast<int>(level_) << ") a top level"
+ << "(level " << level << " - " << static_cast<int>(level) << ") a top level"
<< "mutex. This is not allowed.";
bad_mutexes_held = true;
} else if (this == Locks::mutator_lock_ && self->GetHeldMutex(kTopLockLevel) != nullptr) {
@@ -113,10 +122,10 @@ inline void BaseMutex::RegisterAsLocked(Thread* self) {
<< "not allowed.";
bad_mutexes_held = true;
}
- for (int i = level_; i >= 0; --i) {
+ for (int i = level; i >= 0; --i) {
LockLevel lock_level_i = static_cast<LockLevel>(i);
BaseMutex* held_mutex = self->GetHeldMutex(lock_level_i);
- if (level_ == kTopLockLevel &&
+ if (level == kTopLockLevel &&
lock_level_i == kMutatorLock &&
Locks::mutator_lock_->IsExclusiveHeld(self)) {
// This is checked above.
@@ -125,7 +134,7 @@ inline void BaseMutex::RegisterAsLocked(Thread* self) {
LOG(ERROR) << "Lock level violation: holding \"" << held_mutex->name_ << "\" "
<< "(level " << lock_level_i << " - " << i
<< ") while locking \"" << name_ << "\" "
- << "(level " << level_ << " - " << static_cast<int>(level_) << ")";
+ << "(level " << level << " - " << static_cast<int>(level) << ")";
if (lock_level_i > kAbortLock) {
// Only abort in the check below if this is more than abort level lock.
bad_mutexes_held = true;
@@ -138,8 +147,8 @@ inline void BaseMutex::RegisterAsLocked(Thread* self) {
}
// Don't record monitors as they are outside the scope of analysis. They may be inspected off of
// the monitor list.
- if (level_ != kMonitorLock) {
- self->SetHeldMutex(level_, this);
+ if (level != kMonitorLock) {
+ self->SetHeldMutex(level, this);
}
}
@@ -149,10 +158,17 @@ inline void BaseMutex::RegisterAsUnlocked(Thread* self) {
return;
}
if (level_ != kMonitorLock) {
+ auto level = level_;
+ if (UNLIKELY(level == kThreadWaitLock) && self->GetHeldMutex(kThreadWaitWakeLock) == this) {
+ level = kThreadWaitWakeLock;
+ }
if (kDebugLocking && gAborting == 0) { // Avoid recursive aborts.
- CHECK(self->GetHeldMutex(level_) == this) << "Unlocking on unacquired mutex: " << name_;
+ if (level == kThreadWaitWakeLock) {
+ CHECK(self->GetHeldMutex(kThreadWaitLock) != nullptr) << "Held " << kThreadWaitWakeLock << " without " << kThreadWaitLock;;
+ }
+ CHECK(self->GetHeldMutex(level) == this) << "Unlocking on unacquired mutex: " << name_;
}
- self->SetHeldMutex(level_, nullptr);
+ self->SetHeldMutex(level, nullptr);
}
}
@@ -214,7 +230,11 @@ inline bool Mutex::IsExclusiveHeld(const Thread* self) const {
if (kDebugLocking) {
// Sanity debug check that if we think it is locked we have it in our held mutexes.
if (result && self != nullptr && level_ != kMonitorLock && !gAborting) {
- CHECK_EQ(self->GetHeldMutex(level_), this);
+ if (level_ == kThreadWaitLock && self->GetHeldMutex(kThreadWaitLock) != this) {
+ CHECK_EQ(self->GetHeldMutex(kThreadWaitWakeLock), this);
+ } else {
+ CHECK_EQ(self->GetHeldMutex(level_), this);
+ }
}
}
return result;
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 7711be9c90..0c8fe58252 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -68,6 +68,14 @@ enum LockLevel : uint8_t {
// A generic lock level for mutexs that should not allow any additional mutexes to be gained after
// acquiring it.
kGenericBottomLock,
+ // Tracks the second acquisition at the same lock level for kThreadWaitLock. This is an exception
+ // to the normal lock ordering, used to implement Monitor::Wait - while holding one kThreadWait
+ // level lock, it is permitted to acquire a second one - with internal safeguards to ensure that
+ // the second lock acquisition does not result in deadlock. This is implemented in the lock
+ // order by treating the second acquisition of a kThreadWaitLock as a kThreadWaitWakeLock
+ // acquisition. Thus, acquiring kThreadWaitWakeLock requires holding kThreadWaitLock.
+ kThreadWaitWakeLock,
+ kThreadWaitLock,
kJdwpAdbStateLock,
kJdwpSocketLock,
kRegionSpaceRegionLock,
diff --git a/runtime/cha.cc b/runtime/cha.cc
index de4aebed36..8e06fdae54 100644
--- a/runtime/cha.cc
+++ b/runtime/cha.cc
@@ -115,7 +115,7 @@ void ClassHierarchyAnalysis::ResetSingleImplementationInHierarchy(ObjPtr<mirror:
// if they have SingleImplementations methods defined by 'klass'.
// Skip all virtual methods that do not override methods from super class since they cannot be
// SingleImplementations for anything.
- int32_t vtbl_size = super->GetVTableLength<kDefaultVerifyFlags, kWithoutReadBarrier>();
+ int32_t vtbl_size = super->GetVTableLength<kDefaultVerifyFlags>();
ObjPtr<mirror::ClassLoader> loader =
klass->GetClassLoader<kDefaultVerifyFlags, kWithoutReadBarrier>();
for (int vtbl_index = 0; vtbl_index < vtbl_size; ++vtbl_index) {
@@ -131,7 +131,7 @@ void ClassHierarchyAnalysis::ResetSingleImplementationInHierarchy(ObjPtr<mirror:
// so start with a superclass and move up looking into a corresponding vtbl slot.
for (ObjPtr<mirror::Class> super_it = super;
super_it != nullptr &&
- super_it->GetVTableLength<kDefaultVerifyFlags, kWithoutReadBarrier>() > vtbl_index;
+ super_it->GetVTableLength<kDefaultVerifyFlags>() > vtbl_index;
super_it = super_it->GetSuperClass<kDefaultVerifyFlags, kWithoutReadBarrier>()) {
// Skip superclasses that are also going to be unloaded.
ObjPtr<mirror::ClassLoader> super_loader = super_it->
@@ -158,7 +158,7 @@ void ClassHierarchyAnalysis::ResetSingleImplementationInHierarchy(ObjPtr<mirror:
// Check all possible interface methods too.
ObjPtr<mirror::IfTable> iftable = klass->GetIfTable<kDefaultVerifyFlags, kWithoutReadBarrier>();
- const size_t ifcount = klass->GetIfTableCount<kDefaultVerifyFlags, kWithoutReadBarrier>();
+ const size_t ifcount = klass->GetIfTableCount<kDefaultVerifyFlags>();
for (size_t i = 0; i < ifcount; ++i) {
ObjPtr<mirror::Class> interface =
iftable->GetInterface<kDefaultVerifyFlags, kWithoutReadBarrier>(i);
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 67513f9ce6..cc4f56cc06 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -3112,7 +3112,7 @@ bool ClassLinker::ShouldUseInterpreterEntrypoint(ArtMethod* method, const void*
return (jit == nullptr) || !jit->GetCodeCache()->ContainsPc(quick_code);
}
- if (runtime->IsNativeDebuggable()) {
+ if (runtime->IsNativeDebuggableZygoteOK()) {
DCHECK(runtime->UseJitCompilation() && runtime->GetJit()->JitAtFirstUse());
// If we are doing native debugging, ignore application's AOT code,
// since we want to JIT it (at first use) with extra stackmaps for native
diff --git a/runtime/class_loader_context.cc b/runtime/class_loader_context.cc
index de9fe221ff..0bae60a886 100644
--- a/runtime/class_loader_context.cc
+++ b/runtime/class_loader_context.cc
@@ -338,6 +338,7 @@ bool ClassLoaderContext::OpenDexFiles(InstructionSet isa, const std::string& cla
// no dex files. So that we can distinguish the real failures...
const ArtDexFileLoader dex_file_loader;
std::vector<ClassLoaderInfo*> work_list;
+ CHECK(class_loader_chain_ != nullptr);
work_list.push_back(class_loader_chain_.get());
while (!work_list.empty()) {
ClassLoaderInfo* info = work_list.back();
@@ -908,7 +909,9 @@ ClassLoaderContext::VerificationResult ClassLoaderContext::VerifyClassLoaderCont
// collision check.
if (expected_context.special_shared_library_) {
// Special case where we are the only entry in the class path.
- if (class_loader_chain_->parent == nullptr && class_loader_chain_->classpath.size() == 0) {
+ if (class_loader_chain_ != nullptr &&
+ class_loader_chain_->parent == nullptr &&
+ class_loader_chain_->classpath.size() == 0) {
return VerificationResult::kVerifies;
}
return VerificationResult::kForcedToSkipChecks;
diff --git a/runtime/class_loader_context_test.cc b/runtime/class_loader_context_test.cc
index cb3dc6506f..f3e2ac00ba 100644
--- a/runtime/class_loader_context_test.cc
+++ b/runtime/class_loader_context_test.cc
@@ -735,6 +735,17 @@ TEST_F(ClassLoaderContextTest, VerifyClassLoaderContextMatch) {
ClassLoaderContext::VerificationResult::kMismatch);
}
+TEST_F(ClassLoaderContextTest, VerifyClassLoaderContextMatchSpecial) {
+ std::string context_spec = "&";
+ std::unique_ptr<ClassLoaderContext> context = ParseContextWithChecksums(context_spec);
+ // Pretend that we successfully open the dex files to pass the DCHECKS.
+ // (as it's much easier to test all the corner cases without relying on actual dex files).
+ PretendContextOpenedDexFiles(context.get());
+
+ ASSERT_EQ(context->VerifyClassLoaderContextMatch(context_spec),
+ ClassLoaderContext::VerificationResult::kForcedToSkipChecks);
+}
+
TEST_F(ClassLoaderContextTest, VerifyClassLoaderContextMatchWithSL) {
std::string context_spec =
"PCL[a.dex*123:b.dex*456]{PCL[d.dex*321];PCL[e.dex*654]#PCL[f.dex*098:g.dex*999]}"
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index b46abfbf6e..0766999c02 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -832,7 +832,7 @@ class ImageSpace::Loader {
reinterpret_cast<uintptr_t>(array) + kObjectAlignment);
// If the bit is not set then the contents have not yet been updated.
if (!visited_->Test(contents_bit)) {
- array->Fixup<kVerifyNone, kWithoutReadBarrier>(array, pointer_size_, visitor);
+ array->Fixup<kVerifyNone>(array, pointer_size_, visitor);
visited_->Set(contents_bit);
}
}
@@ -1884,7 +1884,7 @@ class ImageSpace::BootImageLoader {
}
auto* iftable = klass->GetIfTable<kVerifyNone, kWithoutReadBarrier>();
if (iftable != nullptr) {
- int32_t ifcount = klass->GetIfTableCount<kVerifyNone, kWithoutReadBarrier>();
+ int32_t ifcount = klass->GetIfTableCount<kVerifyNone>();
for (int32_t i = 0; i != ifcount; ++i) {
mirror::PointerArray* unpatched_ifarray =
iftable->GetMethodArrayOrNull<kVerifyNone, kWithoutReadBarrier>(i);
diff --git a/runtime/interpreter/mterp/arm/other.S b/runtime/interpreter/mterp/arm/other.S
index 340038c83e..fcdde1e72a 100644
--- a/runtime/interpreter/mterp/arm/other.S
+++ b/runtime/interpreter/mterp/arm/other.S
@@ -159,6 +159,9 @@
cmp r0, #0
bne MterpException
FETCH_ADVANCE_INST 1
+ ldr r0, [rSELF, #THREAD_USE_MTERP_OFFSET]
+ cmp r0, #0
+ beq MterpFallback
GET_INST_OPCODE ip @ extract opcode from rINST
GOTO_OPCODE ip @ jump to next instruction
@@ -179,6 +182,9 @@
cmp r0, #0 @ failed?
bne MterpException
FETCH_ADVANCE_INST 1 @ before throw: advance rPC, load rINST
+ ldr r0, [rSELF, #THREAD_USE_MTERP_OFFSET]
+ cmp r0, #0
+ beq MterpFallback
GET_INST_OPCODE ip @ extract opcode from rINST
GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/other.S b/runtime/interpreter/mterp/arm64/other.S
index 024a5c8a07..f1d0ef34dc 100644
--- a/runtime/interpreter/mterp/arm64/other.S
+++ b/runtime/interpreter/mterp/arm64/other.S
@@ -146,6 +146,8 @@
bl artLockObjectFromCode
cbnz w0, MterpException
FETCH_ADVANCE_INST 1
+ ldr w0, [xSELF, #THREAD_USE_MTERP_OFFSET]
+ cbz w0, MterpFallback
GET_INST_OPCODE ip // extract opcode from rINST
GOTO_OPCODE ip // jump to next instruction
@@ -165,6 +167,8 @@
bl artUnlockObjectFromCode // w0<- success for unlock(self, obj)
cbnz w0, MterpException
FETCH_ADVANCE_INST 1 // before throw: advance rPC, load rINST
+ ldr w0, [xSELF, #THREAD_USE_MTERP_OFFSET]
+ cbz w0, MterpFallback
GET_INST_OPCODE ip // extract opcode from rINST
GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/x86/invoke.S b/runtime/interpreter/mterp/x86/invoke.S
index cfb9c7c719..06cd904756 100644
--- a/runtime/interpreter/mterp/x86/invoke.S
+++ b/runtime/interpreter/mterp/x86/invoke.S
@@ -18,8 +18,7 @@
jz MterpException
ADVANCE_PC 3
movl rSELF, %eax
- movb THREAD_USE_MTERP_OFFSET(%eax), %al
- testb %al, %al
+ cmpb LITERAL(0), THREAD_USE_MTERP_OFFSET(%eax)
jz MterpFallback
RESTORE_IBASE
FETCH_INST
@@ -45,8 +44,7 @@
jz MterpException
ADVANCE_PC 4
movl rSELF, %eax
- movb THREAD_USE_MTERP_OFFSET(%eax), %al
- testb %al, %al
+ cmpb LITERAL(0), THREAD_USE_MTERP_OFFSET(%eax)
jz MterpFallback
RESTORE_IBASE
FETCH_INST
diff --git a/runtime/interpreter/mterp/x86/main.S b/runtime/interpreter/mterp/x86/main.S
index b233f2c522..0621fb468a 100644
--- a/runtime/interpreter/mterp/x86/main.S
+++ b/runtime/interpreter/mterp/x86/main.S
@@ -91,6 +91,8 @@ unspecified registers or condition codes.
#include "asm_support.h"
#include "interpreter/cfi_asm_support.h"
+#define LITERAL(value) $$(value)
+
/*
* Handle mac compiler specific
*/
@@ -561,8 +563,7 @@ MterpException:
movl rPC, OFF_FP_DEX_PC_PTR(rFP)
/* Do we need to switch interpreters? */
movl rSELF, %eax
- movb THREAD_USE_MTERP_OFFSET(%eax), %al
- testb %al, %al
+ cmpb LITERAL(0), THREAD_USE_MTERP_OFFSET(%eax)
jz MterpFallback
/* resume execution at catch block */
REFRESH_IBASE
diff --git a/runtime/interpreter/mterp/x86/other.S b/runtime/interpreter/mterp/x86/other.S
index 5de33813b8..270ccb688c 100644
--- a/runtime/interpreter/mterp/x86/other.S
+++ b/runtime/interpreter/mterp/x86/other.S
@@ -132,7 +132,12 @@
RESTORE_IBASE
testb %al, %al
jnz MterpException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+ ADVANCE_PC 1
+ movl rSELF, %eax
+ cmpb LITERAL(0), THREAD_USE_MTERP_OFFSET(%eax)
+ jz MterpFallback
+ FETCH_INST
+ GOTO_NEXT
%def op_monitor_exit():
/*
@@ -152,7 +157,12 @@
RESTORE_IBASE
testb %al, %al
jnz MterpException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+ ADVANCE_PC 1
+ movl rSELF, %eax
+ cmpb LITERAL(0), THREAD_USE_MTERP_OFFSET(%eax)
+ jz MterpFallback
+ FETCH_INST
+ GOTO_NEXT
%def op_move(is_object="0"):
/* for move, move-object, long-to-int */
diff --git a/runtime/interpreter/mterp/x86_64/invoke.S b/runtime/interpreter/mterp/x86_64/invoke.S
index f727915265..15b48c91dc 100644
--- a/runtime/interpreter/mterp/x86_64/invoke.S
+++ b/runtime/interpreter/mterp/x86_64/invoke.S
@@ -16,8 +16,7 @@
jz MterpException
ADVANCE_PC 3
movq rSELF, %rax
- movb THREAD_USE_MTERP_OFFSET(%rax), %al
- testb %al, %al
+ cmpb LITERAL(0), THREAD_USE_MTERP_OFFSET(%rax)
jz MterpFallback
FETCH_INST
GOTO_NEXT
@@ -40,8 +39,7 @@
jz MterpException
ADVANCE_PC 4
movq rSELF, %rax
- movb THREAD_USE_MTERP_OFFSET(%rax), %al
- testb %al, %al
+ cmpb LITERAL(0), THREAD_USE_MTERP_OFFSET(%rax)
jz MterpFallback
FETCH_INST
GOTO_NEXT
diff --git a/runtime/interpreter/mterp/x86_64/main.S b/runtime/interpreter/mterp/x86_64/main.S
index 75eb00c461..4609067a5a 100644
--- a/runtime/interpreter/mterp/x86_64/main.S
+++ b/runtime/interpreter/mterp/x86_64/main.S
@@ -87,6 +87,8 @@ unspecified registers or condition codes.
#include "asm_support.h"
#include "interpreter/cfi_asm_support.h"
+#define LITERAL(value) $$(value)
+
/*
* Handle mac compiler specific
*/
@@ -527,8 +529,7 @@ MterpException:
movq rPC, OFF_FP_DEX_PC_PTR(rFP)
/* Do we need to switch interpreters? */
movq rSELF, %rax
- movb THREAD_USE_MTERP_OFFSET(%rax), %al
- testb %al, %al
+ cmpb LITERAL(0), THREAD_USE_MTERP_OFFSET(%rax)
jz MterpFallback
/* resume execution at catch block */
REFRESH_IBASE
diff --git a/runtime/interpreter/mterp/x86_64/other.S b/runtime/interpreter/mterp/x86_64/other.S
index 849155ca8f..412389f3ed 100644
--- a/runtime/interpreter/mterp/x86_64/other.S
+++ b/runtime/interpreter/mterp/x86_64/other.S
@@ -108,7 +108,12 @@
call SYMBOL(artLockObjectFromCode) # (object, self)
testq %rax, %rax
jnz MterpException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+ ADVANCE_PC 1
+ movq rSELF, %rax
+ cmpb LITERAL(0), THREAD_USE_MTERP_OFFSET(%rax)
+ jz MterpFallback
+ FETCH_INST
+ GOTO_NEXT
%def op_monitor_exit():
/*
@@ -125,7 +130,12 @@
call SYMBOL(artUnlockObjectFromCode) # (object, self)
testq %rax, %rax
jnz MterpException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+ ADVANCE_PC 1
+ movq rSELF, %rax
+ cmpb LITERAL(0), THREAD_USE_MTERP_OFFSET(%rax)
+ jz MterpFallback
+ FETCH_INST
+ GOTO_NEXT
%def op_move(is_object="0"):
/* for move, move-object, long-to-int */
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index 442733234b..a6a5ba298c 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -224,16 +224,14 @@ inline void PrimitiveArray<T>::Memcpy(int32_t dst_pos,
}
}
-template<typename T, VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+template<typename T, VerifyObjectFlags kVerifyFlags>
inline T PointerArray::GetElementPtrSize(uint32_t idx, PointerSize ptr_size) {
// C style casts here since we sometimes have T be a pointer, or sometimes an integer
// (for stack traces).
if (ptr_size == PointerSize::k64) {
- return (T)static_cast<uintptr_t>(
- AsLongArray<kVerifyFlags, kReadBarrierOption>()->GetWithoutChecks(idx));
+ return (T)static_cast<uintptr_t>(AsLongArray<kVerifyFlags>()->GetWithoutChecks(idx));
}
- return (T)static_cast<uintptr_t>(static_cast<uint32_t>(
- AsIntArray<kVerifyFlags, kReadBarrierOption>()->GetWithoutChecks(idx)));
+ return (T)static_cast<uintptr_t>(AsIntArray<kVerifyFlags>()->GetWithoutChecks(idx));
}
template<bool kTransactionActive, bool kUnchecked>
@@ -255,12 +253,12 @@ inline void PointerArray::SetElementPtrSize(uint32_t idx, T* element, PointerSiz
ptr_size);
}
-template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption, typename Visitor>
+template <VerifyObjectFlags kVerifyFlags, typename Visitor>
inline void PointerArray::Fixup(mirror::PointerArray* dest,
PointerSize pointer_size,
const Visitor& visitor) {
for (size_t i = 0, count = GetLength(); i < count; ++i) {
- void* ptr = GetElementPtrSize<void*, kVerifyFlags, kReadBarrierOption>(i, pointer_size);
+ void* ptr = GetElementPtrSize<void*, kVerifyFlags>(i, pointer_size);
void* new_ptr = visitor(ptr);
if (ptr != new_ptr) {
dest->SetElementPtrSize<false, true>(i, new_ptr, pointer_size);
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index 7211f30baa..8816c619df 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -193,9 +193,7 @@ extern template class PrimitiveArray<int16_t>; // ShortArray
// Either an IntArray or a LongArray.
class PointerArray : public Array {
public:
- template<typename T,
- VerifyObjectFlags kVerifyFlags = kVerifyNone,
- ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+ template<typename T, VerifyObjectFlags kVerifyFlags = kVerifyNone>
T GetElementPtrSize(uint32_t idx, PointerSize ptr_size)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -216,9 +214,7 @@ class PointerArray : public Array {
// Fixup the pointers in the dest arrays by passing our pointers through the visitor. Only copies
// to dest if visitor(source_ptr) != source_ptr.
- template <VerifyObjectFlags kVerifyFlags = kVerifyNone,
- ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
- typename Visitor>
+ template <VerifyObjectFlags kVerifyFlags = kVerifyNone, typename Visitor>
void Fixup(mirror::PointerArray* dest, PointerSize pointer_size, const Visitor& visitor)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 9a4130d0d5..6b9ba8c8a2 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -294,16 +294,20 @@ inline void Class::SetVTable(ObjPtr<PointerArray> new_vtable) {
}
inline bool Class::HasVTable() {
- return GetVTable() != nullptr || ShouldHaveEmbeddedVTable();
+ // No read barrier is needed for comparing with null.
+ return GetVTable<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr ||
+ ShouldHaveEmbeddedVTable();
}
-template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+template<VerifyObjectFlags kVerifyFlags>
inline int32_t Class::GetVTableLength() {
if (ShouldHaveEmbeddedVTable<kVerifyFlags>()) {
return GetEmbeddedVTableLength();
}
- return GetVTable<kVerifyFlags, kReadBarrierOption>() != nullptr ?
- GetVTable<kVerifyFlags, kReadBarrierOption>()->GetLength() : 0;
+ // We do not need a read barrier here as the length is constant,
+ // both from-space and to-space vtables shall yield the same result.
+ ObjPtr<PointerArray> vtable = GetVTable<kVerifyFlags, kWithoutReadBarrier>();
+ return vtable != nullptr ? vtable->GetLength() : 0;
}
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
@@ -311,10 +315,9 @@ inline ArtMethod* Class::GetVTableEntry(uint32_t i, PointerSize pointer_size) {
if (ShouldHaveEmbeddedVTable<kVerifyFlags>()) {
return GetEmbeddedVTableEntry(i, pointer_size);
}
- auto* vtable = GetVTable<kVerifyFlags, kReadBarrierOption>();
+ ObjPtr<PointerArray> vtable = GetVTable<kVerifyFlags, kReadBarrierOption>();
DCHECK(vtable != nullptr);
- return vtable->template GetElementPtrSize<ArtMethod*, kVerifyFlags, kReadBarrierOption>(
- i, pointer_size);
+ return vtable->GetElementPtrSize<ArtMethod*, kVerifyFlags>(i, pointer_size);
}
template<VerifyObjectFlags kVerifyFlags>
@@ -624,9 +627,11 @@ inline IfTable* Class::GetIfTable() {
return ret.Ptr();
}
-template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+template<VerifyObjectFlags kVerifyFlags>
inline int32_t Class::GetIfTableCount() {
- return GetIfTable<kVerifyFlags, kReadBarrierOption>()->Count();
+ // We do not need a read barrier here as the length is constant,
+ // both from-space and to-space iftables shall yield the same result.
+ return GetIfTable<kVerifyFlags, kWithoutReadBarrier>()->Count();
}
inline void Class::SetIfTable(ObjPtr<IfTable> new_iftable) {
@@ -970,7 +975,17 @@ inline bool Class::IsObjectArrayClass() {
// We do not need a read barrier here as the primitive type is constant,
// both from-space and to-space component type classes shall yield the same result.
ObjPtr<Class> const component_type = GetComponentType<kVerifyFlags, kWithoutReadBarrier>();
- return component_type != nullptr && !component_type->IsPrimitive<kVerifyFlags>();
+ constexpr VerifyObjectFlags kNewFlags = RemoveThisFlags(kVerifyFlags);
+ return component_type != nullptr && !component_type->IsPrimitive<kNewFlags>();
+}
+
+template<VerifyObjectFlags kVerifyFlags>
+bool Class::IsPrimitiveArray() {
+ // We do not need a read barrier here as the primitive type is constant,
+ // both from-space and to-space component type classes shall yield the same result.
+ ObjPtr<Class> const component_type = GetComponentType<kVerifyFlags, kWithoutReadBarrier>();
+ constexpr VerifyObjectFlags kNewFlags = RemoveThisFlags(kVerifyFlags);
+ return component_type != nullptr && component_type->IsPrimitive<kNewFlags>();
}
inline bool Class::IsAssignableFrom(ObjPtr<Class> src) {
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 74fca549ea..bb54b3d33f 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -422,13 +422,6 @@ class MANAGED Class final : public Object {
return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimVoid;
}
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitiveArray() REQUIRES_SHARED(Locks::mutator_lock_) {
- return IsArrayClass<kVerifyFlags>() &&
- GetComponentType<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>()->
- IsPrimitive();
- }
-
// Depth of class from java.lang.Object
uint32_t Depth() REQUIRES_SHARED(Locks::mutator_lock_);
@@ -466,7 +459,8 @@ class MANAGED Class final : public Object {
}
bool IsObjectClass() REQUIRES_SHARED(Locks::mutator_lock_) {
- return !IsPrimitive() && GetSuperClass() == nullptr;
+ // No read barrier is needed for comparing with null.
+ return !IsPrimitive() && GetSuperClass<kDefaultVerifyFlags, kWithoutReadBarrier>() == nullptr;
}
bool IsInstantiableNonArray() REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -485,18 +479,7 @@ class MANAGED Class final : public Object {
ALWAYS_INLINE bool IsObjectArrayClass() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsIntArrayClass() REQUIRES_SHARED(Locks::mutator_lock_) {
- constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
- auto* component_type = GetComponentType<kVerifyFlags>();
- return component_type != nullptr && component_type->template IsPrimitiveInt<kNewFlags>();
- }
-
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsLongArrayClass() REQUIRES_SHARED(Locks::mutator_lock_) {
- constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
- auto* component_type = GetComponentType<kVerifyFlags>();
- return component_type != nullptr && component_type->template IsPrimitiveLong<kNewFlags>();
- }
+ bool IsPrimitiveArray() REQUIRES_SHARED(Locks::mutator_lock_);
// Creates a raw object instance but does not invoke the default constructor.
template<bool kIsInstrumented, bool kCheckAddFinalizer = true>
@@ -633,7 +616,8 @@ class MANAGED Class final : public Object {
void SetSuperClass(ObjPtr<Class> new_super_class) REQUIRES_SHARED(Locks::mutator_lock_);
bool HasSuperClass() REQUIRES_SHARED(Locks::mutator_lock_) {
- return GetSuperClass() != nullptr;
+ // No read barrier is needed for comparing with null.
+ return GetSuperClass<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr;
}
static constexpr MemberOffset SuperClassOffset() {
@@ -812,8 +796,7 @@ class MANAGED Class final : public Object {
static MemberOffset EmbeddedVTableEntryOffset(uint32_t i, PointerSize pointer_size);
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
- ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
int32_t GetVTableLength() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
@@ -948,8 +931,7 @@ class MANAGED Class final : public Object {
return (GetAccessFlags() & kAccRecursivelyInitialized) != 0;
}
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
- ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE int32_t GetIfTableCount() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
@@ -1216,7 +1198,8 @@ class MANAGED Class final : public Object {
// Returns true if the class loader is null, ie the class loader is the boot strap class loader.
bool IsBootStrapClassLoaded() REQUIRES_SHARED(Locks::mutator_lock_) {
- return GetClassLoader() == nullptr;
+ // No read barrier is needed for comparing with null.
+ return GetClassLoader<kDefaultVerifyFlags, kWithoutReadBarrier>() == nullptr;
}
static size_t ImTableEntrySize(PointerSize pointer_size) {
diff --git a/runtime/mirror/class_ext-inl.h b/runtime/mirror/class_ext-inl.h
index feaac8580a..8d68dc92f9 100644
--- a/runtime/mirror/class_ext-inl.h
+++ b/runtime/mirror/class_ext-inl.h
@@ -32,9 +32,7 @@ void ClassExt::VisitNativeRoots(Visitor& visitor, PointerSize pointer_size) {
}
int32_t len = arr->GetLength();
for (int32_t i = 0; i < len; i++) {
- ArtMethod* method = arr->GetElementPtrSize<ArtMethod*,
- kDefaultVerifyFlags,
- kReadBarrierOption>(i, pointer_size);
+ ArtMethod* method = arr->GetElementPtrSize<ArtMethod*, kDefaultVerifyFlags>(i, pointer_size);
if (method != nullptr) {
method->VisitRoots<kReadBarrierOption>(visitor, pointer_size);
}
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 8ae79a8c66..2c2ad9b9b0 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -156,7 +156,7 @@ template<VerifyObjectFlags kVerifyFlags>
inline bool Object::IsObjectArray() {
// We do not need a read barrier here as the primitive type is constant,
// both from-space and to-space component type classes shall yield the same result.
- constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
+ constexpr VerifyObjectFlags kNewFlags = RemoveThisFlags(kVerifyFlags);
return IsArrayInstance<kVerifyFlags>() &&
!GetClass<kNewFlags, kWithoutReadBarrier>()->
template GetComponentType<kNewFlags, kWithoutReadBarrier>()->IsPrimitive();
@@ -192,113 +192,102 @@ inline Array* Object::AsArray() {
return down_cast<Array*>(this);
}
+template<VerifyObjectFlags kVerifyFlags, Primitive::Type kType>
+ALWAYS_INLINE bool Object::IsSpecificPrimitiveArray() {
+ // We do not need a read barrier here as the primitive type is constant,
+ // both from-space and to-space component type classes shall yield the same result.
+ ObjPtr<Class> klass = GetClass<kVerifyFlags, kWithoutReadBarrier>();
+ constexpr VerifyObjectFlags kNewFlags = RemoveThisFlags(kVerifyFlags);
+ ObjPtr<Class> const component_type = klass->GetComponentType<kNewFlags, kWithoutReadBarrier>();
+ return component_type != nullptr &&
+ component_type->GetPrimitiveType<kNewFlags>() == kType;
+}
+
+template<VerifyObjectFlags kVerifyFlags>
+inline bool Object::IsBooleanArray() {
+ return IsSpecificPrimitiveArray<kVerifyFlags, Primitive::kPrimBoolean>();
+}
+
template<VerifyObjectFlags kVerifyFlags>
inline BooleanArray* Object::AsBooleanArray() {
- constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
- DCHECK(GetClass<kVerifyFlags>()->IsArrayClass());
- DCHECK(GetClass<kNewFlags>()->GetComponentType()->IsPrimitiveBoolean());
+ DCHECK(IsBooleanArray<kVerifyFlags>());
return down_cast<BooleanArray*>(this);
}
template<VerifyObjectFlags kVerifyFlags>
+inline bool Object::IsByteArray() {
+ return IsSpecificPrimitiveArray<kVerifyFlags, Primitive::kPrimByte>();
+}
+
+template<VerifyObjectFlags kVerifyFlags>
inline ByteArray* Object::AsByteArray() {
- constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
- DCHECK(GetClass<kVerifyFlags>()->IsArrayClass());
- DCHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveByte());
+ DCHECK(IsByteArray<kVerifyFlags>());
return down_cast<ByteArray*>(this);
}
template<VerifyObjectFlags kVerifyFlags>
-inline ByteArray* Object::AsByteSizedArray() {
- constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
- DCHECK(GetClass<kVerifyFlags>()->IsArrayClass());
- DCHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveByte() ||
- GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveBoolean());
- return down_cast<ByteArray*>(this);
+inline bool Object::IsCharArray() {
+ return IsSpecificPrimitiveArray<kVerifyFlags, Primitive::kPrimChar>();
}
template<VerifyObjectFlags kVerifyFlags>
inline CharArray* Object::AsCharArray() {
- constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
- DCHECK(GetClass<kVerifyFlags>()->IsArrayClass());
- DCHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveChar());
+ DCHECK(IsCharArray<kVerifyFlags>());
return down_cast<CharArray*>(this);
}
template<VerifyObjectFlags kVerifyFlags>
-inline ShortArray* Object::AsShortArray() {
- constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
- DCHECK(GetClass<kVerifyFlags>()->IsArrayClass());
- DCHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveShort());
- return down_cast<ShortArray*>(this);
+inline bool Object::IsShortArray() {
+ return IsSpecificPrimitiveArray<kVerifyFlags, Primitive::kPrimShort>();
}
template<VerifyObjectFlags kVerifyFlags>
-inline ShortArray* Object::AsShortSizedArray() {
- constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
- DCHECK(GetClass<kVerifyFlags>()->IsArrayClass());
- DCHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveShort() ||
- GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveChar());
+inline ShortArray* Object::AsShortArray() {
+ DCHECK(IsShortArray<kVerifyFlags>());
return down_cast<ShortArray*>(this);
}
-template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+template<VerifyObjectFlags kVerifyFlags>
inline bool Object::IsIntArray() {
- constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
- ObjPtr<Class> klass = GetClass<kVerifyFlags, kReadBarrierOption>();
- ObjPtr<Class> component_type = klass->GetComponentType<kVerifyFlags, kReadBarrierOption>();
- return component_type != nullptr && component_type->template IsPrimitiveInt<kNewFlags>();
+ return IsSpecificPrimitiveArray<kVerifyFlags, Primitive::kPrimInt>();
}
-template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+template<VerifyObjectFlags kVerifyFlags>
inline IntArray* Object::AsIntArray() {
- DCHECK((IsIntArray<kVerifyFlags, kReadBarrierOption>()));
+ DCHECK((IsIntArray<kVerifyFlags>()));
return down_cast<IntArray*>(this);
}
-template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+template<VerifyObjectFlags kVerifyFlags>
inline bool Object::IsLongArray() {
- constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
- ObjPtr<Class> klass = GetClass<kVerifyFlags, kReadBarrierOption>();
- ObjPtr<Class> component_type = klass->GetComponentType<kVerifyFlags, kReadBarrierOption>();
- return component_type != nullptr && component_type->template IsPrimitiveLong<kNewFlags>();
+ return IsSpecificPrimitiveArray<kVerifyFlags, Primitive::kPrimLong>();
}
-template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+template<VerifyObjectFlags kVerifyFlags>
inline LongArray* Object::AsLongArray() {
- DCHECK((IsLongArray<kVerifyFlags, kReadBarrierOption>()));
+ DCHECK((IsLongArray<kVerifyFlags>()));
return down_cast<LongArray*>(this);
}
template<VerifyObjectFlags kVerifyFlags>
inline bool Object::IsFloatArray() {
- constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
- auto* component_type = GetClass<kVerifyFlags>()->GetComponentType();
- return component_type != nullptr && component_type->template IsPrimitiveFloat<kNewFlags>();
+ return IsSpecificPrimitiveArray<kVerifyFlags, Primitive::kPrimFloat>();
}
template<VerifyObjectFlags kVerifyFlags>
inline FloatArray* Object::AsFloatArray() {
DCHECK(IsFloatArray<kVerifyFlags>());
- constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
- DCHECK(GetClass<kVerifyFlags>()->IsArrayClass());
- DCHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveFloat());
return down_cast<FloatArray*>(this);
}
template<VerifyObjectFlags kVerifyFlags>
inline bool Object::IsDoubleArray() {
- constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
- auto* component_type = GetClass<kVerifyFlags>()->GetComponentType();
- return component_type != nullptr && component_type->template IsPrimitiveDouble<kNewFlags>();
+ return IsSpecificPrimitiveArray<kVerifyFlags, Primitive::kPrimDouble>();
}
template<VerifyObjectFlags kVerifyFlags>
inline DoubleArray* Object::AsDoubleArray() {
DCHECK(IsDoubleArray<kVerifyFlags>());
- constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
- DCHECK(GetClass<kVerifyFlags>()->IsArrayClass());
- DCHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveDouble());
return down_cast<DoubleArray*>(this);
}
@@ -351,7 +340,7 @@ inline size_t Object::SizeOf() {
// values is OK because of that.
static constexpr ReadBarrierOption kRBO = kWithoutReadBarrier;
size_t result;
- constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
+ constexpr VerifyObjectFlags kNewFlags = RemoveThisFlags(kVerifyFlags);
if (IsArrayInstance<kVerifyFlags>()) {
result = AsArray<kNewFlags>()->template SizeOf<kNewFlags, kRBO>();
} else if (IsClass<kNewFlags>()) {
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 11e8ccadc4..bca7511489 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -21,6 +21,7 @@
#include "base/casts.h"
#include "base/enums.h"
#include "base/globals.h"
+#include "dex/primitive.h"
#include "obj_ptr.h"
#include "object_reference.h"
#include "offsets.h"
@@ -199,31 +200,33 @@ class MANAGED LOCKABLE Object {
Array* AsArray() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ bool IsBooleanArray() REQUIRES_SHARED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
BooleanArray* AsBooleanArray() REQUIRES_SHARED(Locks::mutator_lock_);
+
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ByteArray* AsByteArray() REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsByteArray() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ByteArray* AsByteSizedArray() REQUIRES_SHARED(Locks::mutator_lock_);
+ ByteArray* AsByteArray() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ bool IsCharArray() REQUIRES_SHARED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
CharArray* AsCharArray() REQUIRES_SHARED(Locks::mutator_lock_);
+
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ShortArray* AsShortArray() REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsShortArray() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ShortArray* AsShortSizedArray() REQUIRES_SHARED(Locks::mutator_lock_);
+ ShortArray* AsShortArray() REQUIRES_SHARED(Locks::mutator_lock_);
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
- ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsIntArray() REQUIRES_SHARED(Locks::mutator_lock_);
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
- ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
IntArray* AsIntArray() REQUIRES_SHARED(Locks::mutator_lock_);
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
- ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsLongArray() REQUIRES_SHARED(Locks::mutator_lock_);
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
- ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
LongArray* AsLongArray() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
@@ -757,6 +760,9 @@ class MANAGED LOCKABLE Object {
size_t num_bytes)
REQUIRES_SHARED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags, Primitive::Type kType>
+ bool IsSpecificPrimitiveArray() REQUIRES_SHARED(Locks::mutator_lock_);
+
static Atomic<uint32_t> hash_code_seed;
// The Class representing the type of the object.
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 0f0a378142..df2a8e29cb 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -97,6 +97,7 @@ Monitor::Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_
lock_count_(0),
obj_(GcRoot<mirror::Object>(obj)),
wait_set_(nullptr),
+ wake_set_(nullptr),
hash_code_(hash_code),
locking_method_(nullptr),
locking_dex_pc_(0),
@@ -120,6 +121,7 @@ Monitor::Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_
lock_count_(0),
obj_(GcRoot<mirror::Object>(obj)),
wait_set_(nullptr),
+ wake_set_(nullptr),
hash_code_(hash_code),
locking_method_(nullptr),
locking_dex_pc_(0),
@@ -226,7 +228,8 @@ Monitor::~Monitor() {
}
void Monitor::AppendToWaitSet(Thread* thread) {
- DCHECK(owner_ == Thread::Current());
+ // Not checking that the owner is equal to this thread, since we've released
+ // the monitor by the time this method is called.
DCHECK(thread != nullptr);
DCHECK(thread->GetWaitNext() == nullptr) << thread->GetWaitNext();
if (wait_set_ == nullptr) {
@@ -245,24 +248,29 @@ void Monitor::AppendToWaitSet(Thread* thread) {
void Monitor::RemoveFromWaitSet(Thread *thread) {
DCHECK(owner_ == Thread::Current());
DCHECK(thread != nullptr);
- if (wait_set_ == nullptr) {
- return;
- }
- if (wait_set_ == thread) {
- wait_set_ = thread->GetWaitNext();
- thread->SetWaitNext(nullptr);
- return;
- }
-
- Thread* t = wait_set_;
- while (t->GetWaitNext() != nullptr) {
- if (t->GetWaitNext() == thread) {
- t->SetWaitNext(thread->GetWaitNext());
- thread->SetWaitNext(nullptr);
- return;
+ auto remove = [&](Thread*& set){
+ if (set != nullptr) {
+ if (set == thread) {
+ set = thread->GetWaitNext();
+ thread->SetWaitNext(nullptr);
+ return true;
+ }
+ Thread* t = set;
+ while (t->GetWaitNext() != nullptr) {
+ if (t->GetWaitNext() == thread) {
+ t->SetWaitNext(thread->GetWaitNext());
+ thread->SetWaitNext(nullptr);
+ return true;
+ }
+ t = t->GetWaitNext();
+ }
}
- t = t->GetWaitNext();
+ return false;
+ };
+ if (remove(wait_set_)) {
+ return;
}
+ remove(wake_set_);
}
void Monitor::SetObject(mirror::Object* object) {
@@ -699,33 +707,81 @@ void Monitor::FailedUnlock(mirror::Object* o,
bool Monitor::Unlock(Thread* self) {
DCHECK(self != nullptr);
uint32_t owner_thread_id = 0u;
- {
- MutexLock mu(self, monitor_lock_);
- Thread* owner = owner_;
- if (owner != nullptr) {
- owner_thread_id = owner->GetThreadId();
- }
- if (owner == self) {
- // We own the monitor, so nobody else can be in here.
- AtraceMonitorUnlock();
- if (lock_count_ == 0) {
- owner_ = nullptr;
- locking_method_ = nullptr;
- locking_dex_pc_ = 0;
- // Wake a contender.
- monitor_contenders_.Signal(self);
- } else {
- --lock_count_;
- }
+ DCHECK(!monitor_lock_.IsExclusiveHeld(self));
+ monitor_lock_.Lock(self);
+ Thread* owner = owner_;
+ if (owner != nullptr) {
+ owner_thread_id = owner->GetThreadId();
+ }
+ if (owner == self) {
+ // We own the monitor, so nobody else can be in here.
+ AtraceMonitorUnlock();
+ if (lock_count_ == 0) {
+ owner_ = nullptr;
+ locking_method_ = nullptr;
+ locking_dex_pc_ = 0;
+ SignalContendersAndReleaseMonitorLock(self);
+ return true;
+ } else {
+ --lock_count_;
+ monitor_lock_.Unlock(self);
return true;
}
}
// We don't own this, so we're not allowed to unlock it.
// The JNI spec says that we should throw IllegalMonitorStateException in this case.
FailedUnlock(GetObject(), self->GetThreadId(), owner_thread_id, this);
+ monitor_lock_.Unlock(self);
return false;
}
+void Monitor::SignalContendersAndReleaseMonitorLock(Thread* self) {
+ // We want to signal one thread to wake up, to acquire the monitor that
+ // we are releasing. This could either be a Thread waiting on its own
+ // ConditionVariable, or a thread waiting on monitor_contenders_.
+ while (wake_set_ != nullptr) {
+ // No risk of waking ourselves here; since monitor_lock_ is not released until we're ready to
+ // return, notify can't move the current thread from wait_set_ to wake_set_ until this
+ // method is done checking wake_set_.
+ Thread* thread = wake_set_;
+ wake_set_ = thread->GetWaitNext();
+ thread->SetWaitNext(nullptr);
+
+ // Check to see if the thread is still waiting.
+ {
+ // In the case of wait(), we'll be acquiring another thread's GetWaitMutex with
+ // self's GetWaitMutex held. This does not risk deadlock, because we only acquire this lock
+ // for threads in the wake_set_. A thread can only enter wake_set_ from Notify or NotifyAll,
+ // and those hold monitor_lock_. Thus, the threads whose wait mutexes we acquire here must
+ // have already been released from wait(), since we have not released monitor_lock_ until
+ // after we've chosen our thread to wake, so there is no risk of the following lock ordering
+ // leading to deadlock:
+ // Thread 1 waits
+ // Thread 2 waits
+ // Thread 3 moves threads 1 and 2 from wait_set_ to wake_set_
+ // Thread 1 enters this block, and attempts to acquire Thread 2's GetWaitMutex to wake it
+ // Thread 2 enters this block, and attempts to acquire Thread 1's GetWaitMutex to wake it
+ //
+ // Since monitor_lock_ is not released until the thread-to-be-woken-up's GetWaitMutex is
+ // acquired, two threads cannot attempt to acquire each other's GetWaitMutex while holding
+ // their own and cause deadlock.
+ MutexLock wait_mu(self, *thread->GetWaitMutex());
+ if (thread->GetWaitMonitor() != nullptr) {
+ // Release the lock, so that a potentially awakened thread will not
+ // immediately contend on it. The lock ordering here is:
+ // monitor_lock_, self->GetWaitMutex, thread->GetWaitMutex
+ monitor_lock_.Unlock(self);
+ thread->GetWaitConditionVariable()->Signal(self);
+ return;
+ }
+ }
+ }
+ // If we didn't wake any threads that were originally waiting on us,
+ // wake a contender.
+ monitor_contenders_.Signal(self);
+ monitor_lock_.Unlock(self);
+}
+
void Monitor::Wait(Thread* self, int64_t ms, int32_t ns,
bool interruptShouldThrow, ThreadState why) {
DCHECK(self != nullptr);
@@ -755,17 +811,9 @@ void Monitor::Wait(Thread* self, int64_t ms, int32_t ns,
}
/*
- * Add ourselves to the set of threads waiting on this monitor, and
- * release our hold. We need to let it go even if we're a few levels
+ * Release our hold - we need to let it go even if we're a few levels
* deep in a recursive lock, and we need to restore that later.
- *
- * We append to the wait set ahead of clearing the count and owner
- * fields so the subroutine can check that the calling thread owns
- * the monitor. Aside from that, the order of member updates is
- * not order sensitive as we hold the pthread mutex.
*/
- AppendToWaitSet(self);
- ++num_waiters_;
int prev_lock_count = lock_count_;
lock_count_ = 0;
owner_ = nullptr;
@@ -790,6 +838,17 @@ void Monitor::Wait(Thread* self, int64_t ms, int32_t ns,
// Pseudo-atomically wait on self's wait_cond_ and release the monitor lock.
MutexLock mu(self, *self->GetWaitMutex());
+ /*
+ * Add ourselves to the set of threads waiting on this monitor.
+ * It's important that we are only added to the wait set after
+ * acquiring our GetWaitMutex, so that calls to Notify() that occur after we
+ * have released monitor_lock_ will not move us from wait_set_ to wake_set_
+ * until we've signalled contenders on this monitor.
+ */
+ AppendToWaitSet(self);
+ ++num_waiters_;
+
+
// Set wait_monitor_ to the monitor object we will be waiting on. When wait_monitor_ is
// non-null a notifying or interrupting thread must signal the thread's wait_cond_ to wake it
// up.
@@ -797,8 +856,7 @@ void Monitor::Wait(Thread* self, int64_t ms, int32_t ns,
self->SetWaitMonitor(this);
// Release the monitor lock.
- monitor_contenders_.Signal(self);
- monitor_lock_.Unlock(self);
+ SignalContendersAndReleaseMonitorLock(self);
// Handle the case where the thread was interrupted before we called wait().
if (self->IsInterrupted()) {
@@ -874,18 +932,12 @@ void Monitor::Notify(Thread* self) {
ThrowIllegalMonitorStateExceptionF("object not locked by thread before notify()");
return;
}
- // Signal the first waiting thread in the wait set.
- while (wait_set_ != nullptr) {
- Thread* thread = wait_set_;
- wait_set_ = thread->GetWaitNext();
- thread->SetWaitNext(nullptr);
-
- // Check to see if the thread is still waiting.
- MutexLock wait_mu(self, *thread->GetWaitMutex());
- if (thread->GetWaitMonitor() != nullptr) {
- thread->GetWaitConditionVariable()->Signal(self);
- return;
- }
+ // Move one thread from waiters to wake set
+ Thread* to_move = wait_set_;
+ if (to_move != nullptr) {
+ wait_set_ = to_move->GetWaitNext();
+ to_move->SetWaitNext(wake_set_);
+ wake_set_ = to_move;
}
}
@@ -897,12 +949,20 @@ void Monitor::NotifyAll(Thread* self) {
ThrowIllegalMonitorStateExceptionF("object not locked by thread before notifyAll()");
return;
}
- // Signal all threads in the wait set.
- while (wait_set_ != nullptr) {
- Thread* thread = wait_set_;
- wait_set_ = thread->GetWaitNext();
- thread->SetWaitNext(nullptr);
- thread->Notify();
+
+ // Move all threads from waiters to wake set
+ Thread* to_move = wait_set_;
+ if (to_move != nullptr) {
+ wait_set_ = nullptr;
+ Thread* move_to = wake_set_;
+ if (move_to == nullptr) {
+ wake_set_ = to_move;
+ return;
+ }
+ while (move_to->GetWaitNext() != nullptr) {
+ move_to = move_to->GetWaitNext();
+ }
+ move_to->SetWaitNext(to_move);
}
}
diff --git a/runtime/monitor.h b/runtime/monitor.h
index 6b7604ec8a..c1f84e92bf 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -181,6 +181,8 @@ class Monitor {
// this routine.
void RemoveFromWaitSet(Thread* thread) REQUIRES(monitor_lock_);
+ void SignalContendersAndReleaseMonitorLock(Thread* self) RELEASE(monitor_lock_);
+
// Changes the shape of a monitor from thin to fat, preserving the internal lock state. The
// calling thread must own the lock or the owner must be suspended. There's a race with other
// threads inflating the lock, installing hash codes and spurious failures. The caller should
@@ -306,6 +308,9 @@ class Monitor {
// Threads currently waiting on this monitor.
Thread* wait_set_ GUARDED_BY(monitor_lock_);
+ // Threads that were waiting on this monitor, but are now contending on it.
+ Thread* wake_set_ GUARDED_BY(monitor_lock_);
+
// Stored object hash code, generated lazily by GetHashCode.
AtomicInteger hash_code_;
diff --git a/runtime/native/java_lang_System.cc b/runtime/native/java_lang_System.cc
index 2c4184c285..e4bc8ce5a2 100644
--- a/runtime/native/java_lang_System.cc
+++ b/runtime/native/java_lang_System.cc
@@ -101,32 +101,36 @@ static void System_arraycopy(JNIEnv* env, jclass, jobject javaSrc, jint srcPos,
case Primitive::kPrimBoolean:
case Primitive::kPrimByte:
DCHECK_EQ(Primitive::ComponentSize(dstComponentPrimitiveType), 1U);
- dstArray->AsByteSizedArray()->Memmove(dstPos, srcArray->AsByteSizedArray(), srcPos, count);
+ // Note: Treating BooleanArray as ByteArray.
+ ObjPtr<mirror::ByteArray>::DownCast(dstArray)->Memmove(
+ dstPos, ObjPtr<mirror::ByteArray>::DownCast(srcArray), srcPos, count);
return;
case Primitive::kPrimChar:
case Primitive::kPrimShort:
DCHECK_EQ(Primitive::ComponentSize(dstComponentPrimitiveType), 2U);
- dstArray->AsShortSizedArray()->Memmove(dstPos, srcArray->AsShortSizedArray(), srcPos, count);
+ // Note: Treating CharArray as ShortArray.
+ ObjPtr<mirror::ShortArray>::DownCast(dstArray)->Memmove(
+ dstPos, ObjPtr<mirror::ShortArray>::DownCast(srcArray), srcPos, count);
return;
case Primitive::kPrimInt:
- DCHECK_EQ(Primitive::ComponentSize(dstComponentPrimitiveType), 4U);
- dstArray->AsIntArray()->Memmove(dstPos, srcArray->AsIntArray(), srcPos, count);
- return;
case Primitive::kPrimFloat:
DCHECK_EQ(Primitive::ComponentSize(dstComponentPrimitiveType), 4U);
- dstArray->AsFloatArray()->Memmove(dstPos, srcArray->AsFloatArray(), srcPos, count);
+ // Note: Treating FloatArray as IntArray.
+ ObjPtr<mirror::IntArray>::DownCast(dstArray)->Memmove(
+ dstPos, ObjPtr<mirror::IntArray>::DownCast(srcArray), srcPos, count);
return;
case Primitive::kPrimLong:
- DCHECK_EQ(Primitive::ComponentSize(dstComponentPrimitiveType), 8U);
- dstArray->AsLongArray()->Memmove(dstPos, srcArray->AsLongArray(), srcPos, count);
- return;
case Primitive::kPrimDouble:
DCHECK_EQ(Primitive::ComponentSize(dstComponentPrimitiveType), 8U);
- dstArray->AsDoubleArray()->Memmove(dstPos, srcArray->AsDoubleArray(), srcPos, count);
+ // Note: Treating DoubleArray as LongArray.
+ ObjPtr<mirror::LongArray>::DownCast(dstArray)->Memmove(
+ dstPos, ObjPtr<mirror::LongArray>::DownCast(srcArray), srcPos, count);
return;
case Primitive::kPrimNot: {
- mirror::ObjectArray<mirror::Object>* dstObjArray = dstArray->AsObjectArray<mirror::Object>();
- mirror::ObjectArray<mirror::Object>* srcObjArray = srcArray->AsObjectArray<mirror::Object>();
+ mirror::ObjectArray<mirror::Object>* dstObjArray =
+ dstArray->AsObjectArray<mirror::Object>();
+ mirror::ObjectArray<mirror::Object>* srcObjArray =
+ srcArray->AsObjectArray<mirror::Object>();
dstObjArray->AssignableMemmove(dstPos, srcObjArray, srcPos, count);
return;
}
diff --git a/runtime/native/sun_misc_Unsafe.cc b/runtime/native/sun_misc_Unsafe.cc
index a739c2d16e..5014f340cd 100644
--- a/runtime/native/sun_misc_Unsafe.cc
+++ b/runtime/native/sun_misc_Unsafe.cc
@@ -366,13 +366,17 @@ static void Unsafe_copyMemoryToPrimitiveArray(JNIEnv *env,
ObjPtr<mirror::Object> dst = soa.Decode<mirror::Object>(dstObj);
ObjPtr<mirror::Class> component_type = dst->GetClass()->GetComponentType();
if (component_type->IsPrimitiveByte() || component_type->IsPrimitiveBoolean()) {
- copyToArray(srcAddr, MakeObjPtr(dst->AsByteSizedArray()), dst_offset, sz);
+ // Note: Treating BooleanArray as ByteArray.
+ copyToArray(srcAddr, ObjPtr<mirror::ByteArray>::DownCast(dst), dst_offset, sz);
} else if (component_type->IsPrimitiveShort() || component_type->IsPrimitiveChar()) {
- copyToArray(srcAddr, MakeObjPtr(dst->AsShortSizedArray()), dst_offset, sz);
+ // Note: Treating CharArray as ShortArray.
+ copyToArray(srcAddr, ObjPtr<mirror::ShortArray>::DownCast(dst), dst_offset, sz);
} else if (component_type->IsPrimitiveInt() || component_type->IsPrimitiveFloat()) {
- copyToArray(srcAddr, MakeObjPtr(dst->AsIntArray()), dst_offset, sz);
+ // Note: Treating FloatArray as IntArray.
+ copyToArray(srcAddr, ObjPtr<mirror::IntArray>::DownCast(dst), dst_offset, sz);
} else if (component_type->IsPrimitiveLong() || component_type->IsPrimitiveDouble()) {
- copyToArray(srcAddr, MakeObjPtr(dst->AsLongArray()), dst_offset, sz);
+ // Note: Treating DoubleArray as LongArray.
+ copyToArray(srcAddr, ObjPtr<mirror::LongArray>::DownCast(dst), dst_offset, sz);
} else {
ThrowIllegalAccessException("not a primitive array");
}
@@ -397,13 +401,17 @@ static void Unsafe_copyMemoryFromPrimitiveArray(JNIEnv *env,
ObjPtr<mirror::Object> src = soa.Decode<mirror::Object>(srcObj);
ObjPtr<mirror::Class> component_type = src->GetClass()->GetComponentType();
if (component_type->IsPrimitiveByte() || component_type->IsPrimitiveBoolean()) {
- copyFromArray(dstAddr, MakeObjPtr(src->AsByteSizedArray()), src_offset, sz);
+ // Note: Treating BooleanArray as ByteArray.
+ copyFromArray(dstAddr, ObjPtr<mirror::ByteArray>::DownCast(src), src_offset, sz);
} else if (component_type->IsPrimitiveShort() || component_type->IsPrimitiveChar()) {
- copyFromArray(dstAddr, MakeObjPtr(src->AsShortSizedArray()), src_offset, sz);
+ // Note: Treating CharArray as ShortArray.
+ copyFromArray(dstAddr, ObjPtr<mirror::ShortArray>::DownCast(src), src_offset, sz);
} else if (component_type->IsPrimitiveInt() || component_type->IsPrimitiveFloat()) {
- copyFromArray(dstAddr, MakeObjPtr(src->AsIntArray()), src_offset, sz);
+ // Note: Treating FloatArray as IntArray.
+ copyFromArray(dstAddr, ObjPtr<mirror::IntArray>::DownCast(src), src_offset, sz);
} else if (component_type->IsPrimitiveLong() || component_type->IsPrimitiveDouble()) {
- copyFromArray(dstAddr, MakeObjPtr(src->AsLongArray()), src_offset, sz);
+ // Note: Treating DoubleArray as LongArray.
+ copyFromArray(dstAddr, ObjPtr<mirror::LongArray>::DownCast(src), src_offset, sz);
} else {
ThrowIllegalAccessException("not a primitive array");
}
diff --git a/runtime/oat.h b/runtime/oat.h
index b07294adeb..3d6415e2a0 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -35,7 +35,6 @@ class PACKED(4) OatHeader {
static constexpr uint8_t kOatVersion[] = { '1', '6', '3', '\0' };
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
- static constexpr const char* kDex2OatHostKey = "dex2oat-host";
static constexpr const char* kDebuggableKey = "debuggable";
static constexpr const char* kNativeDebuggableKey = "native-debuggable";
static constexpr const char* kCompilerFilter = "compiler-filter";
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index e5772666d1..3dfa0c4b6a 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -786,7 +786,7 @@ bool Runtime::Start() {
// TODO(calin): We use the JIT class as a proxy for JIT compilation and for
// recoding profiles. Maybe we should consider changing the name to be more clear it's
// not only about compiling. b/28295073.
- if (!safe_mode_ && (jit_options_->UseJitCompilation() || jit_options_->GetSaveProfilingInfo())) {
+ if (jit_options_->UseJitCompilation() || jit_options_->GetSaveProfilingInfo()) {
// Try to load compiler pre zygote to reduce PSS. b/27744947
std::string error_msg;
if (!jit::Jit::LoadCompilerLibrary(&error_msg)) {
@@ -2490,7 +2490,7 @@ void Runtime::CreateJitCodeCache(bool rwx_memory_allowed) {
DCHECK(!jit_options_->UseJitCompilation());
}
- if (safe_mode_ || (!jit_options_->UseJitCompilation() && !jit_options_->GetSaveProfilingInfo())) {
+ if (!jit_options_->UseJitCompilation() && !jit_options_->GetSaveProfilingInfo()) {
return;
}
@@ -2511,7 +2511,16 @@ void Runtime::CreateJitCodeCache(bool rwx_memory_allowed) {
}
void Runtime::CreateJit() {
+ DCHECK(jit_ == nullptr);
if (jit_code_cache_.get() == nullptr) {
+ if (!IsSafeMode()) {
+ LOG(WARNING) << "Missing code cache, cannot create JIT.";
+ }
+ return;
+ }
+ if (IsSafeMode()) {
+ LOG(INFO) << "Not creating JIT because of SafeMode.";
+ jit_code_cache_.reset();
return;
}
@@ -2520,7 +2529,7 @@ void Runtime::CreateJit() {
if (jit == nullptr) {
LOG(WARNING) << "Failed to allocate JIT";
// Release JIT code cache resources (several MB of memory).
- jit_code_cache_.reset(nullptr);
+ jit_code_cache_.reset();
}
}
diff --git a/runtime/runtime.h b/runtime/runtime.h
index be5b3c119c..ad4d3bb0d7 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -650,6 +650,13 @@ class Runtime {
void DeoptimizeBootImage();
bool IsNativeDebuggable() const {
+ CHECK(!is_zygote_ || IsAotCompiler());
+ return is_native_debuggable_;
+ }
+
+ // Note: prefer not to use this method, but the checked version above. The separation exists
+ // as the runtime state may change for a zygote child.
+ bool IsNativeDebuggableZygoteOK() const {
return is_native_debuggable_;
}
@@ -706,6 +713,11 @@ class Runtime {
double GetHashTableMinLoadFactor() const;
double GetHashTableMaxLoadFactor() const;
+ bool IsSafeMode() const {
+ CHECK(!is_zygote_);
+ return safe_mode_;
+ }
+
void SetSafeMode(bool mode) {
safe_mode_ = mode;
}
diff --git a/runtime/runtime_callbacks.cc b/runtime/runtime_callbacks.cc
index 758917cf7e..bf74816f24 100644
--- a/runtime/runtime_callbacks.cc
+++ b/runtime/runtime_callbacks.cc
@@ -151,6 +151,26 @@ void RuntimeCallbacks::RemoveMonitorCallback(MonitorCallback* cb) {
Remove(cb, &monitor_callbacks_);
}
+void RuntimeCallbacks::ThreadParkStart(bool is_absolute, int64_t timeout) {
+ for (ParkCallback * cb : park_callbacks_) {
+ cb->ThreadParkStart(is_absolute, timeout);
+ }
+}
+
+void RuntimeCallbacks::ThreadParkFinished(bool timeout) {
+ for (ParkCallback * cb : park_callbacks_) {
+ cb->ThreadParkFinished(timeout);
+ }
+}
+
+void RuntimeCallbacks::AddParkCallback(ParkCallback* cb) {
+ park_callbacks_.push_back(cb);
+}
+
+void RuntimeCallbacks::RemoveParkCallback(ParkCallback* cb) {
+ Remove(cb, &park_callbacks_);
+}
+
void RuntimeCallbacks::RemoveThreadLifecycleCallback(ThreadLifecycleCallback* cb) {
Remove(cb, &thread_callbacks_);
}
diff --git a/runtime/runtime_callbacks.h b/runtime/runtime_callbacks.h
index 9f0410d102..4cce15ed52 100644
--- a/runtime/runtime_callbacks.h
+++ b/runtime/runtime_callbacks.h
@@ -115,6 +115,19 @@ class MonitorCallback {
virtual ~MonitorCallback() {}
};
+class ParkCallback {
+ public:
+ // Called on entry to the Unsafe.#park method
+ virtual void ThreadParkStart(bool is_absolute, int64_t millis_timeout)
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
+
+ // Called just after the thread has woken up from going to sleep for a park(). This will only be
+ // called for Unsafe.park() calls where the thread did (or at least could have) gone to sleep.
+ virtual void ThreadParkFinished(bool timed_out) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
+
+ virtual ~ParkCallback() {}
+};
+
// A callback to let parts of the runtime note that they are currently relying on a particular
// method remaining in it's current state. Users should not rely on always being called. If multiple
// callbacks are added the runtime will short-circuit when the first one returns 'true'.
@@ -193,6 +206,11 @@ class RuntimeCallbacks {
void AddMonitorCallback(MonitorCallback* cb) REQUIRES_SHARED(Locks::mutator_lock_);
void RemoveMonitorCallback(MonitorCallback* cb) REQUIRES_SHARED(Locks::mutator_lock_);
+ void ThreadParkStart(bool is_absolute, int64_t timeout) REQUIRES_SHARED(Locks::mutator_lock_);
+ void ThreadParkFinished(bool timed_out) REQUIRES_SHARED(Locks::mutator_lock_);
+ void AddParkCallback(ParkCallback* cb) REQUIRES_SHARED(Locks::mutator_lock_);
+ void RemoveParkCallback(ParkCallback* cb) REQUIRES_SHARED(Locks::mutator_lock_);
+
// Returns true if some MethodInspectionCallback indicates the method is being inspected/depended
// on by some code.
bool IsMethodBeingInspected(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
@@ -243,6 +261,8 @@ class RuntimeCallbacks {
GUARDED_BY(Locks::mutator_lock_);
std::vector<MonitorCallback*> monitor_callbacks_
GUARDED_BY(Locks::mutator_lock_);
+ std::vector<ParkCallback*> park_callbacks_
+ GUARDED_BY(Locks::mutator_lock_);
std::vector<MethodInspectionCallback*> method_inspection_callbacks_
GUARDED_BY(Locks::mutator_lock_);
std::vector<DdmCallback*> ddm_callbacks_
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 66e852a216..e9fed76d6f 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -302,8 +302,9 @@ void Thread::Park(bool is_absolute, int64_t time) {
int old_state = tls32_.park_state_.fetch_add(1, std::memory_order_relaxed);
if (old_state == kNoPermit) {
// no permit was available. block thread until later.
- // TODO: Call to signal jvmti here
+ Runtime::Current()->GetRuntimeCallbacks()->ThreadParkStart(is_absolute, time);
int result = 0;
+ bool timed_out = false;
if (!is_absolute && time == 0) {
// Thread.getState() is documented to return waiting for untimed parks.
ScopedThreadSuspension sts(this, ThreadState::kWaiting);
@@ -351,8 +352,10 @@ void Thread::Park(bool is_absolute, int64_t time) {
}
if (result == -1) {
switch (errno) {
- case EAGAIN:
case ETIMEDOUT:
+ timed_out = true;
+ FALLTHROUGH_INTENDED;
+ case EAGAIN:
case EINTR: break; // park() is allowed to spuriously return
default: PLOG(FATAL) << "Failed to park";
}
@@ -360,6 +363,7 @@ void Thread::Park(bool is_absolute, int64_t time) {
// Mark as no longer waiting, and consume permit if there is one.
tls32_.park_state_.store(kNoPermit, std::memory_order_relaxed);
// TODO: Call to signal jvmti here
+ Runtime::Current()->GetRuntimeCallbacks()->ThreadParkFinished(timed_out);
} else {
// the fetch_add has consumed the permit. immediately return.
DCHECK_EQ(old_state, kPermitAvailable);
@@ -2250,7 +2254,7 @@ Thread::Thread(bool daemon)
: tls32_(daemon),
wait_monitor_(nullptr),
is_runtime_thread_(false) {
- wait_mutex_ = new Mutex("a thread wait mutex");
+ wait_mutex_ = new Mutex("a thread wait mutex", LockLevel::kThreadWaitLock);
wait_cond_ = new ConditionVariable("a thread wait condition variable", *wait_mutex_);
tlsPtr_.instrumentation_stack = new std::deque<instrumentation::InstrumentationStackFrame>;
tlsPtr_.name = new std::string(kThreadNameDuringStartup);
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index 94faa626f6..65039bc6d4 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -122,6 +122,7 @@ jfieldID WellKnownClasses::dalvik_system_BaseDexClassLoader_pathList;
jfieldID WellKnownClasses::dalvik_system_DexPathList_dexElements;
jfieldID WellKnownClasses::dalvik_system_DexPathList__Element_dexFile;
jfieldID WellKnownClasses::dalvik_system_VMRuntime_nonSdkApiUsageConsumer;
+jfieldID WellKnownClasses::java_lang_Thread_parkBlocker;
jfieldID WellKnownClasses::java_lang_Thread_daemon;
jfieldID WellKnownClasses::java_lang_Thread_group;
jfieldID WellKnownClasses::java_lang_Thread_lock;
@@ -371,6 +372,7 @@ void WellKnownClasses::Init(JNIEnv* env) {
dalvik_system_DexPathList_dexElements = CacheField(env, dalvik_system_DexPathList, false, "dexElements", "[Ldalvik/system/DexPathList$Element;");
dalvik_system_DexPathList__Element_dexFile = CacheField(env, dalvik_system_DexPathList__Element, false, "dexFile", "Ldalvik/system/DexFile;");
dalvik_system_VMRuntime_nonSdkApiUsageConsumer = CacheField(env, dalvik_system_VMRuntime, true, "nonSdkApiUsageConsumer", "Ljava/util/function/Consumer;");
+ java_lang_Thread_parkBlocker = CacheField(env, java_lang_Thread, false, "parkBlocker", "Ljava/lang/Object;");
java_lang_Thread_daemon = CacheField(env, java_lang_Thread, false, "daemon", "Z");
java_lang_Thread_group = CacheField(env, java_lang_Thread, false, "group", "Ljava/lang/ThreadGroup;");
java_lang_Thread_lock = CacheField(env, java_lang_Thread, false, "lock", "Ljava/lang/Object;");
@@ -518,6 +520,7 @@ void WellKnownClasses::Clear() {
dalvik_system_DexPathList_dexElements = nullptr;
dalvik_system_DexPathList__Element_dexFile = nullptr;
dalvik_system_VMRuntime_nonSdkApiUsageConsumer = nullptr;
+ java_lang_Thread_parkBlocker = nullptr;
java_lang_Thread_daemon = nullptr;
java_lang_Thread_group = nullptr;
java_lang_Thread_lock = nullptr;
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index 8c85228dfc..130747c026 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -131,6 +131,7 @@ struct WellKnownClasses {
static jfieldID dalvik_system_DexPathList_dexElements;
static jfieldID dalvik_system_DexPathList__Element_dexFile;
static jfieldID dalvik_system_VMRuntime_nonSdkApiUsageConsumer;
+ static jfieldID java_lang_Thread_parkBlocker;
static jfieldID java_lang_Thread_daemon;
static jfieldID java_lang_Thread_group;
static jfieldID java_lang_Thread_lock;
diff --git a/simulator/Android.bp b/simulator/Android.bp
index 8690426466..223c891da0 100644
--- a/simulator/Android.bp
+++ b/simulator/Android.bp
@@ -45,7 +45,7 @@ art_cc_library {
shared_libs: [
"libart",
"libartbase",
- "libvixl-arm64",
+ "libvixl",
],
}
@@ -58,7 +58,7 @@ art_cc_library {
shared_libs: [
"libartd",
"libartbased",
- "libvixld-arm64",
+ "libvixld",
],
}
diff --git a/test/004-UnsafeTest/src/Main.java b/test/004-UnsafeTest/src/Main.java
index d43d374a42..9176e89aaf 100644
--- a/test/004-UnsafeTest/src/Main.java
+++ b/test/004-UnsafeTest/src/Main.java
@@ -32,6 +32,20 @@ public class Main {
}
}
+ private static void check(float actual, float expected, String msg) {
+ if (actual != expected) {
+ System.out.println(msg + " : " + actual + " != " + expected);
+ System.exit(1);
+ }
+ }
+
+ private static void check(double actual, double expected, String msg) {
+ if (actual != expected) {
+ System.out.println(msg + " : " + actual + " != " + expected);
+ System.exit(1);
+ }
+ }
+
private static void check(Object actual, Object expected, String msg) {
if (actual != expected) {
System.out.println(msg + " : " + actual + " != " + expected);
@@ -54,6 +68,7 @@ public class Main {
testArrayIndexScale(unsafe);
testGetAndPutAndCAS(unsafe);
testGetAndPutVolatile(unsafe);
+ testCopyMemoryPrimitiveArrays(unsafe);
}
private static void testArrayBaseOffset(Unsafe unsafe) {
@@ -237,6 +252,38 @@ public class Main {
"Unsafe.getObjectVolatile(Object, long)");
}
+ // Regression test for "copyMemory" operations hitting a DCHECK() for float/double arrays.
+ private static void testCopyMemoryPrimitiveArrays(Unsafe unsafe) {
+ int size = 4 * 1024;
+ long memory = unsafeTestMalloc(size);
+
+ int floatSize = 4;
+ float[] inputFloats = new float[size / floatSize];
+ for (int i = 0; i != inputFloats.length; ++i) {
+ inputFloats[i] = ((float)i) + 0.5f;
+ }
+ float[] outputFloats = new float[size / floatSize];
+ unsafe.copyMemoryFromPrimitiveArray(inputFloats, 0, memory, size);
+ unsafe.copyMemoryToPrimitiveArray(memory, outputFloats, 0, size);
+ for (int i = 0; i != inputFloats.length; ++i) {
+ check(inputFloats[i], outputFloats[i], "unsafe.copyMemory/float");
+ }
+
+ int doubleSize = 8;
+ double[] inputDoubles = new double[size / doubleSize];
+ for (int i = 0; i != inputDoubles.length; ++i) {
+ inputDoubles[i] = ((double)i) + 0.5;
+ }
+ double[] outputDoubles = new double[size / doubleSize];
+ unsafe.copyMemoryFromPrimitiveArray(inputDoubles, 0, memory, size);
+ unsafe.copyMemoryToPrimitiveArray(memory, outputDoubles, 0, size);
+ for (int i = 0; i != inputDoubles.length; ++i) {
+ check(inputDoubles[i], outputDoubles[i], "unsafe.copyMemory/double");
+ }
+
+ unsafeTestFree(memory);
+ }
+
private static class TestClass {
public int intVar = 0;
public long longVar = 0;
@@ -251,4 +298,6 @@ public class Main {
private static native int vmArrayBaseOffset(Class<?> clazz);
private static native int vmArrayIndexScale(Class<?> clazz);
+ private static native long unsafeTestMalloc(long size);
+ private static native void unsafeTestFree(long memory);
}
diff --git a/test/004-UnsafeTest/unsafe_test.cc b/test/004-UnsafeTest/unsafe_test.cc
index 18d9ea8913..e970aaa840 100644
--- a/test/004-UnsafeTest/unsafe_test.cc
+++ b/test/004-UnsafeTest/unsafe_test.cc
@@ -15,6 +15,7 @@
*/
#include "art_method-inl.h"
+#include "base/casts.h"
#include "jni.h"
#include "mirror/array.h"
#include "mirror/class-inl.h"
@@ -37,4 +38,16 @@ extern "C" JNIEXPORT jint JNICALL Java_Main_vmArrayIndexScale(JNIEnv* env, jclas
return Primitive::ComponentSize(klass->GetComponentType()->GetPrimitiveType());
}
+extern "C" JNIEXPORT jlong JNICALL Java_Main_unsafeTestMalloc(JNIEnv*, jclass, jlong size) {
+ void* memory = malloc(dchecked_integral_cast<size_t>(size));
+ CHECK(memory != nullptr);
+ return reinterpret_cast64<jlong>(memory);
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_unsafeTestFree(JNIEnv*, jclass, jlong memory) {
+ void* mem = reinterpret_cast64<void*>(memory);
+ CHECK(mem != nullptr);
+ free(mem);
+}
+
} // namespace art
diff --git a/test/1931-monitor-events/check b/test/1931-monitor-events/check
new file mode 100644
index 0000000000..8a7f844283
--- /dev/null
+++ b/test/1931-monitor-events/check
@@ -0,0 +1,22 @@
+#!/bin/bash
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Art sends events for park/unpark, and the RI doesn't. Remove it from the expected output.
+if [[ "$TEST_RUNTIME" == "jvm" ]]; then
+ patch -p0 expected.txt < jvm-expected.patch >/dev/null
+fi
+
+./default-check "$@"
diff --git a/test/1931-monitor-events/expected.txt b/test/1931-monitor-events/expected.txt
index 33a9bd3684..f368ae20be 100644
--- a/test/1931-monitor-events/expected.txt
+++ b/test/1931-monitor-events/expected.txt
@@ -1,6 +1,9 @@
Testing contended locking.
Locker thread 1 for NamedLock[Lock testLock] contended-LOCKING NamedLock[Lock testLock]
Locker thread 1 for NamedLock[Lock testLock] LOCKED NamedLock[Lock testLock]
+Testing park.
+ParkThread start-monitor-wait NamedLock[Parking blocker object] timeout: 1
+ParkThread monitor-waited NamedLock[Parking blocker object] timed_out: true
Testing monitor wait.
Locker thread 2 for NamedLock[Lock testWait] start-monitor-wait NamedLock[Lock testWait] timeout: 0
Locker thread 2 for NamedLock[Lock testWait] monitor-waited NamedLock[Lock testWait] timed_out: false
diff --git a/test/1931-monitor-events/jvm-expected.patch b/test/1931-monitor-events/jvm-expected.patch
new file mode 100644
index 0000000000..7595b145a2
--- /dev/null
+++ b/test/1931-monitor-events/jvm-expected.patch
@@ -0,0 +1,3 @@
+5,6d4
+< ParkThread start-monitor-wait NamedLock[Parking blocker object] timeout: 1
+< ParkThread monitor-waited NamedLock[Parking blocker object] timed_out: true
diff --git a/test/1931-monitor-events/src/art/Test1931.java b/test/1931-monitor-events/src/art/Test1931.java
index ccefede9f8..f5497893cb 100644
--- a/test/1931-monitor-events/src/art/Test1931.java
+++ b/test/1931-monitor-events/src/art/Test1931.java
@@ -23,6 +23,7 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.*;
+import java.util.concurrent.locks.LockSupport;
import java.util.ListIterator;
import java.util.function.Consumer;
import java.util.function.Function;
@@ -67,6 +68,9 @@ public class Test1931 {
System.out.println("Testing contended locking.");
testLock(new Monitors.NamedLock("Lock testLock"));
+ System.out.println("Testing park.");
+ testPark(new Monitors.NamedLock("Parking blocker object"));
+
System.out.println("Testing monitor wait.");
testWait(new Monitors.NamedLock("Lock testWait"));
@@ -88,6 +92,14 @@ public class Test1931 {
testInteruptWait(new Monitors.NamedLock("Lock testInteruptWait"));
}
+ public static void testPark(Object blocker) throws Exception {
+ Thread holder = new Thread(() -> {
+ LockSupport.parkNanos(blocker, 10); // Should round up to one millisecond
+ }, "ParkThread");
+ holder.start();
+ holder.join();
+ }
+
public static void testInteruptWait(final Monitors.NamedLock lk) throws Exception {
final Monitors.LockController controller1 = new Monitors.LockController(lk);
controller1.DoLock();
diff --git a/test/411-checker-hdiv-hrem-pow2/src/RemTest.java b/test/411-checker-hdiv-hrem-pow2/src/RemTest.java
index 72725c1cd4..54d7847fdf 100644
--- a/test/411-checker-hdiv-hrem-pow2/src/RemTest.java
+++ b/test/411-checker-hdiv-hrem-pow2/src/RemTest.java
@@ -92,6 +92,17 @@ public class RemTest {
/// CHECK: cmp w{{\d+}}, #0x0
/// CHECK: and w{{\d+}}, w{{\d+}}, #0x1
/// CHECK: cneg w{{\d+}}, w{{\d+}}, lt
+ /// CHECK-START-X86_64: java.lang.Integer RemTest.$noinline$IntMod2(int) disassembly (after)
+ /// CHECK: Rem [{{i\d+}},{{i\d+}}]
+ /// CHECK-NOT: imul
+ /// CHECK-NOT: shr
+ /// CHECK-NOT: imul
+ /// CHECK: mov
+ /// CHECK: and
+ /// CHECK: jz/eq
+ /// CHECK: lea
+ /// CHECK: test
+ /// CHECK: cmovl/nge
private static Integer $noinline$IntMod2(int v) {
int r = v % 2;
return r;
@@ -101,6 +112,17 @@ public class RemTest {
/// CHECK: cmp w{{\d+}}, #0x0
/// CHECK: and w{{\d+}}, w{{\d+}}, #0x1
/// CHECK: cneg w{{\d+}}, w{{\d+}}, lt
+ /// CHECK-START-X86_64: java.lang.Integer RemTest.$noinline$IntModMinus2(int) disassembly (after)
+ /// CHECK: Rem [{{i\d+}},{{i\d+}}]
+ /// CHECK-NOT: imul
+ /// CHECK-NOT: shr
+ /// CHECK-NOT: imul
+ /// CHECK: mov
+ /// CHECK: and
+ /// CHECK: jz/eq
+ /// CHECK: lea
+ /// CHECK: test
+ /// CHECK: cmovl/nge
private static Integer $noinline$IntModMinus2(int v) {
int r = v % -2;
return r;
@@ -111,6 +133,17 @@ public class RemTest {
/// CHECK: and w{{\d+}}, w{{\d+}}, #0xf
/// CHECK: and w{{\d+}}, w{{\d+}}, #0xf
/// CHECK: csneg w{{\d+}}, w{{\d+}}, mi
+ /// CHECK-START-X86_64: java.lang.Integer RemTest.$noinline$IntMod16(int) disassembly (after)
+ /// CHECK: Rem [{{i\d+}},{{i\d+}}]
+ /// CHECK-NOT: imul
+ /// CHECK-NOT: shr
+ /// CHECK-NOT: imul
+ /// CHECK: mov
+ /// CHECK: and
+ /// CHECK: jz/eq
+ /// CHECK: lea
+ /// CHECK: test
+ /// CHECK: cmovl/nge
private static Integer $noinline$IntMod16(int v) {
int r = v % 16;
return r;
@@ -121,6 +154,17 @@ public class RemTest {
/// CHECK: and w{{\d+}}, w{{\d+}}, #0xf
/// CHECK: and w{{\d+}}, w{{\d+}}, #0xf
/// CHECK: csneg w{{\d+}}, w{{\d+}}, mi
+ /// CHECK-START-X86_64: java.lang.Integer RemTest.$noinline$IntModMinus16(int) disassembly (after)
+ /// CHECK: Rem [{{i\d+}},{{i\d+}}]
+ /// CHECK-NOT: imul
+ /// CHECK-NOT: shr
+ /// CHECK-NOT: imul
+ /// CHECK: mov
+ /// CHECK: and
+ /// CHECK: jz/eq
+ /// CHECK: lea
+ /// CHECK: test
+ /// CHECK: cmovl/nge
private static Integer $noinline$IntModMinus16(int v) {
int r = v % -16;
return r;
@@ -131,6 +175,17 @@ public class RemTest {
/// CHECK: and w{{\d+}}, w{{\d+}}, #0x7fffffff
/// CHECK: and w{{\d+}}, w{{\d+}}, #0x7fffffff
/// CHECK: csneg w{{\d+}}, w{{\d+}}, mi
+ /// CHECK-START-X86_64: java.lang.Integer RemTest.$noinline$IntModIntMin(int) disassembly (after)
+ /// CHECK: Rem [{{i\d+}},{{i\d+}}]
+ /// CHECK-NOT: imul
+ /// CHECK-NOT: shr
+ /// CHECK-NOT: imul
+ /// CHECK: mov
+ /// CHECK: and
+ /// CHECK: jz/eq
+ /// CHECK: lea
+ /// CHECK: test
+ /// CHECK: cmovl/nge
private static Integer $noinline$IntModIntMin(int v) {
int r = v % Integer.MIN_VALUE;
return r;
@@ -211,6 +266,18 @@ public class RemTest {
/// CHECK: cmp x{{\d+}}, #0x0
/// CHECK: and x{{\d+}}, x{{\d+}}, #0x1
/// CHECK: cneg x{{\d+}}, x{{\d+}}, lt
+ /// CHECK-START-X86_64: java.lang.Long RemTest.$noinline$LongMod2(long) disassembly (after)
+ /// CHECK: Rem [{{j\d+}},{{j\d+}}]
+ /// CHECK-NOT: imul
+ /// CHECK-NOT: shrq
+ /// CHECK-NOT: imulq
+ /// CHECK: movq
+ /// CHECK: andq
+ /// CHECK: jz/eq
+ /// CHECK: movq
+ /// CHECK: sarq
+ /// CHECK: shlq
+ /// CHECK: orq
private static Long $noinline$LongMod2(long v) {
long r = v % 2;
return r;
@@ -220,6 +287,18 @@ public class RemTest {
/// CHECK: cmp x{{\d+}}, #0x0
/// CHECK: and x{{\d+}}, x{{\d+}}, #0x1
/// CHECK: cneg x{{\d+}}, x{{\d+}}, lt
+ /// CHECK-START-X86_64: java.lang.Long RemTest.$noinline$LongModMinus2(long) disassembly (after)
+ /// CHECK: Rem [{{j\d+}},{{j\d+}}]
+ /// CHECK-NOT: imul
+ /// CHECK-NOT: shrq
+ /// CHECK-NOT: imulq
+ /// CHECK: movq
+ /// CHECK: andq
+ /// CHECK: jz/eq
+ /// CHECK: movq
+ /// CHECK: sarq
+ /// CHECK: shlq
+ /// CHECK: orq
private static Long $noinline$LongModMinus2(long v) {
long r = v % -2;
return r;
@@ -230,6 +309,19 @@ public class RemTest {
/// CHECK: and x{{\d+}}, x{{\d+}}, #0xf
/// CHECK: and x{{\d+}}, x{{\d+}}, #0xf
/// CHECK: csneg x{{\d+}}, x{{\d+}}, mi
+
+ /// CHECK-START-X86_64: java.lang.Long RemTest.$noinline$LongMod16(long) disassembly (after)
+ /// CHECK: Rem [{{j\d+}},{{j\d+}}]
+ /// CHECK-NOT: imul
+ /// CHECK-NOT: shrq
+ /// CHECK-NOT: imulq
+ /// CHECK: movq
+ /// CHECK: andq
+ /// CHECK: jz/eq
+ /// CHECK: movq
+ /// CHECK: sarq
+ /// CHECK: shlq
+ /// CHECK: orq
private static Long $noinline$LongMod16(long v) {
long r = v % 16;
return r;
@@ -240,6 +332,18 @@ public class RemTest {
/// CHECK: and x{{\d+}}, x{{\d+}}, #0xf
/// CHECK: and x{{\d+}}, x{{\d+}}, #0xf
/// CHECK: csneg x{{\d+}}, x{{\d+}}, mi
+ /// CHECK-START-X86_64: java.lang.Long RemTest.$noinline$LongModMinus16(long) disassembly (after)
+ /// CHECK: Rem [{{j\d+}},{{j\d+}}]
+ /// CHECK-NOT: imul
+ /// CHECK-NOT: shrq
+ /// CHECK-NOT: imulq
+ /// CHECK: movq
+ /// CHECK: andq
+ /// CHECK: jz/eq
+ /// CHECK: movq
+ /// CHECK: sarq
+ /// CHECK: shlq
+ /// CHECK: orq
private static Long $noinline$LongModMinus16(long v) {
long r = v % -16;
return r;
@@ -250,6 +354,18 @@ public class RemTest {
/// CHECK: and x{{\d+}}, x{{\d+}}, #0x7fffffffffffffff
/// CHECK: and x{{\d+}}, x{{\d+}}, #0x7fffffffffffffff
/// CHECK: csneg x{{\d+}}, x{{\d+}}, mi
+ /// CHECK-START-X86_64: java.lang.Long RemTest.$noinline$LongModLongMin(long) disassembly (after)
+ /// CHECK: Rem [{{j\d+}},{{j\d+}}]
+ /// CHECK-NOT: imul
+ /// CHECK-NOT: shrq
+ /// CHECK-NOT: imulq
+ /// CHECK: movq
+ /// CHECK: andq
+ /// CHECK: jz/eq
+ /// CHECK: movq
+ /// CHECK: sarq
+ /// CHECK: shlq
+ /// CHECK: orq
private static Long $noinline$LongModLongMin(long v) {
long r = v % Long.MIN_VALUE;
return r;
diff --git a/test/911-get-stack-trace/src/art/PrintThread.java b/test/911-get-stack-trace/src/art/PrintThread.java
index d8b3cbc57e..798db061b8 100644
--- a/test/911-get-stack-trace/src/art/PrintThread.java
+++ b/test/911-get-stack-trace/src/art/PrintThread.java
@@ -42,7 +42,7 @@ public class PrintThread {
// may not exist depending on the environment.
public final static String IGNORE_THREAD_NAME_REGEX =
"Binder:|RenderThread|hwuiTask|Jit thread pool worker|Instr:|JDWP|Profile Saver|main|" +
- "queued-work-looper|InstrumentationConnectionThread";
+ "queued-work-looper|InstrumentationConnectionThread|intel_svc_streamer_thread";
public final static Matcher IGNORE_THREADS =
Pattern.compile(IGNORE_THREAD_NAME_REGEX).matcher("");
diff --git a/test/Android.bp b/test/Android.bp
index 561f95eb47..d85e2a6fca 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -59,8 +59,7 @@ art_cc_defaults {
shared_libs: [
"libartd",
"libartd-disassembler",
- "libvixld-arm",
- "libvixld-arm64",
+ "libvixld",
"libart-gtest",
"libdexfiled",
"libprofiled",
diff --git a/tools/buildbot-build.sh b/tools/buildbot-build.sh
index 3d70087b81..c1cc2e2318 100755
--- a/tools/buildbot-build.sh
+++ b/tools/buildbot-build.sh
@@ -81,6 +81,8 @@ elif [[ $mode == "target" ]]; then
# These targets are needed for the chroot environment.
make_command+=" crash_dump event-log-tags"
fi
+ # Build the Runtime APEX.
+ make_command+=" com.android.runtime"
mode_suffix="-target"
fi
diff --git a/tools/hiddenapi/hiddenapi.cc b/tools/hiddenapi/hiddenapi.cc
index f61b3e8038..65a4945355 100644
--- a/tools/hiddenapi/hiddenapi.cc
+++ b/tools/hiddenapi/hiddenapi.cc
@@ -813,6 +813,7 @@ class DexFileEditor final {
// Create a new MapItem entry with new MapList details.
DexFile::MapItem new_item;
new_item.type_ = old_item.type_;
+ new_item.unused_ = 0u; // initialize to ensure dex output is deterministic (b/119308882)
new_item.size_ = old_item.size_;
new_item.offset_ = new_map_offset;
diff --git a/tools/libcore_gcstress_failures.txt b/tools/libcore_gcstress_failures.txt
index fff1c70ad8..eec45fa354 100644
--- a/tools/libcore_gcstress_failures.txt
+++ b/tools/libcore_gcstress_failures.txt
@@ -27,12 +27,14 @@
description: "Timeouts.",
result: EXEC_FAILED,
modes: [device],
- names: ["libcore.java.lang.StringTest#testFastPathString_wellFormedUtf8Sequence",
+ names: ["jsr166.TimeUnitTest#testConvert",
+ "libcore.java.lang.StringTest#testFastPathString_wellFormedUtf8Sequence",
+ "libcore.java.text.DecimalFormatTest#testCurrencySymbolSpacing",
+ "libcore.java.text.SimpleDateFormatTest#testLocales",
"org.apache.harmony.tests.java.lang.ref.ReferenceQueueTest#test_remove",
"org.apache.harmony.tests.java.text.DateFormatTest#test_getAvailableLocales",
+ "org.apache.harmony.tests.java.lang.String2Test#test_getBytes",
"org.apache.harmony.tests.java.util.TimerTest#testOverdueTaskExecutesImmediately",
- "org.apache.harmony.tests.java.util.WeakHashMapTest#test_keySet_hasNext",
- "libcore.java.text.DecimalFormatTest#testCurrencySymbolSpacing",
- "libcore.java.text.SimpleDateFormatTest#testLocales"]
+ "org.apache.harmony.tests.java.util.WeakHashMapTest#test_keySet_hasNext"]
}
]