summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/jit/jit_compiler.cc2
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc17
-rw-r--r--compiler/optimizing/code_generator_mips.cc66
-rw-r--r--compiler/optimizing/code_generator_mips64.cc66
-rw-r--r--compiler/optimizing/optimizing_compiler.cc2
-rw-r--r--openjdkjvmti/OpenjdkJvmTi.cc1
-rw-r--r--openjdkjvmti/fixed_up_dex_file.cc7
-rw-r--r--openjdkjvmti/fixed_up_dex_file.h3
-rw-r--r--openjdkjvmti/ti_class_definition.cc93
-rw-r--r--openjdkjvmti/ti_class_definition.h35
-rw-r--r--openjdkjvmti/transform.cc173
-rw-r--r--openjdkjvmti/transform.h2
-rw-r--r--profman/profile_assistant.cc21
-rw-r--r--profman/profile_assistant.h11
-rw-r--r--profman/profile_assistant_test.cc108
-rw-r--r--profman/profman.cc136
-rw-r--r--runtime/arch/stub_test.cc10
-rw-r--r--runtime/base/mutex.cc6
-rw-r--r--runtime/base/mutex.h7
-rw-r--r--runtime/class_linker.cc2
-rw-r--r--runtime/dex/compact_dex_file.h6
-rw-r--r--runtime/dex/dex_file.h7
-rw-r--r--runtime/dex/standard_dex_file.h4
-rw-r--r--runtime/fault_handler.cc26
-rw-r--r--runtime/jit/debugger_interface.cc62
-rw-r--r--runtime/jit/debugger_interface.h20
-rw-r--r--runtime/jit/jit_code_cache.cc4
-rw-r--r--runtime/mem_map.cc115
-rw-r--r--runtime/mem_map.h33
-rw-r--r--runtime/mem_map_test.cc200
-rw-r--r--runtime/native/dalvik_system_DexFile.cc2
-rw-r--r--test/603-checker-instanceof/src/Main.java2
-rw-r--r--test/983-source-transform-verify/source_transform.cc8
-rwxr-xr-xtest/etc/run-test-jar2
34 files changed, 1096 insertions, 163 deletions
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 2c62095458..17b94d3bdf 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -76,7 +76,7 @@ extern "C" void jit_types_loaded(void* handle, mirror::Class** types, size_t cou
const ArrayRef<mirror::Class*> types_array(types, count);
std::vector<uint8_t> elf_file = debug::WriteDebugElfFileForClasses(
kRuntimeISA, jit_compiler->GetCompilerDriver()->GetInstructionSetFeatures(), types_array);
- MutexLock mu(Thread::Current(), g_jit_debug_mutex);
+ MutexLock mu(Thread::Current(), *Locks::native_debug_interface_lock_);
CreateJITCodeEntry(std::move(elf_file));
}
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 704a0d3b87..6d49b32dbc 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -2498,8 +2498,23 @@ void CodeGeneratorARMVIXL::GenerateFrameEntry() {
}
if (!skip_overflow_check) {
+ // Using r4 instead of IP saves 2 bytes.
UseScratchRegisterScope temps(GetVIXLAssembler());
- vixl32::Register temp = temps.Acquire();
+ vixl32::Register temp;
+ // TODO: Remove this check when R4 is made a callee-save register
+ // in ART compiled code (b/72801708). Currently we need to make
+ // sure r4 is not blocked, e.g. in special purpose
+ // TestCodeGeneratorARMVIXL; also asserting that r4 is available
+ // here.
+ if (!blocked_core_registers_[R4]) {
+ for (vixl32::Register reg : kParameterCoreRegistersVIXL) {
+ DCHECK(!reg.Is(r4));
+ }
+ DCHECK(!kCoreCalleeSaves.Includes(r4));
+ temp = r4;
+ } else {
+ temp = temps.Acquire();
+ }
__ Sub(temp, sp, Operand::From(GetStackOverflowReservedBytes(InstructionSet::kArm)));
// The load must immediately precede RecordPcInfo.
ExactAssemblyScope aas(GetVIXLAssembler(),
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 36c921986b..855da2b18f 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -395,7 +395,7 @@ class TypeCheckSlowPathMIPS : public SlowPathCodeMIPS {
CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
__ Bind(GetEntryLabel());
- if (!is_fatal_) {
+ if (!is_fatal_ || instruction_->CanThrowIntoCatchBlock()) {
SaveLiveRegisters(codegen, locations);
}
@@ -3283,26 +3283,8 @@ static size_t NumberOfCheckCastTemps(TypeCheckKind type_check_kind) {
}
void LocationsBuilderMIPS::VisitCheckCast(HCheckCast* instruction) {
- LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
- bool throws_into_catch = instruction->CanThrowIntoCatchBlock();
-
TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
- switch (type_check_kind) {
- case TypeCheckKind::kExactCheck:
- case TypeCheckKind::kAbstractClassCheck:
- case TypeCheckKind::kClassHierarchyCheck:
- case TypeCheckKind::kArrayObjectCheck:
- call_kind = (throws_into_catch || kEmitCompilerReadBarrier)
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall; // In fact, call on a fatal (non-returning) slow path.
- break;
- case TypeCheckKind::kArrayCheck:
- case TypeCheckKind::kUnresolvedCheck:
- case TypeCheckKind::kInterfaceCheck:
- call_kind = LocationSummary::kCallOnSlowPath;
- break;
- }
-
+ LocationSummary::CallKind call_kind = CodeGenerator::GetCheckCastCallKind(instruction);
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
@@ -3331,18 +3313,7 @@ void InstructionCodeGeneratorMIPS::VisitCheckCast(HCheckCast* instruction) {
mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
MipsLabel done;
- // Always false for read barriers since we may need to go to the entrypoint for non-fatal cases
- // from false negatives. The false negatives may come from avoiding read barriers below. Avoiding
- // read barriers is done for performance and code size reasons.
- bool is_type_check_slow_path_fatal = false;
- if (!kEmitCompilerReadBarrier) {
- is_type_check_slow_path_fatal =
- (type_check_kind == TypeCheckKind::kExactCheck ||
- type_check_kind == TypeCheckKind::kAbstractClassCheck ||
- type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
- type_check_kind == TypeCheckKind::kArrayObjectCheck) &&
- !instruction->CanThrowIntoCatchBlock();
- }
+ bool is_type_check_slow_path_fatal = CodeGenerator::IsTypeCheckSlowPathFatal(instruction);
SlowPathCodeMIPS* slow_path =
new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS(
instruction, is_type_check_slow_path_fatal);
@@ -7213,11 +7184,12 @@ void LocationsBuilderMIPS::VisitInstanceOf(HInstanceOf* instruction) {
case TypeCheckKind::kExactCheck:
case TypeCheckKind::kAbstractClassCheck:
case TypeCheckKind::kClassHierarchyCheck:
- case TypeCheckKind::kArrayObjectCheck:
- call_kind =
- kEmitCompilerReadBarrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall;
- baker_read_barrier_slow_path = kUseBakerReadBarrier;
+ case TypeCheckKind::kArrayObjectCheck: {
+ bool needs_read_barrier = CodeGenerator::InstanceOfNeedsReadBarrier(instruction);
+ call_kind = needs_read_barrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall;
+ baker_read_barrier_slow_path = kUseBakerReadBarrier && needs_read_barrier;
break;
+ }
case TypeCheckKind::kArrayCheck:
case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
@@ -7265,13 +7237,15 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
switch (type_check_kind) {
case TypeCheckKind::kExactCheck: {
+ ReadBarrierOption read_barrier_option =
+ CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
obj_loc,
class_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// Classes must be equal for the instanceof to succeed.
__ Xor(out, out, cls);
__ Sltiu(out, out, 1);
@@ -7279,13 +7253,15 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
}
case TypeCheckKind::kAbstractClassCheck: {
+ ReadBarrierOption read_barrier_option =
+ CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
obj_loc,
class_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
MipsLabel loop;
@@ -7295,7 +7271,7 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
out_loc,
super_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// If `out` is null, we use it for the result, and jump to `done`.
__ Beqz(out, &done);
__ Bne(out, cls, &loop);
@@ -7304,13 +7280,15 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
}
case TypeCheckKind::kClassHierarchyCheck: {
+ ReadBarrierOption read_barrier_option =
+ CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
obj_loc,
class_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// Walk over the class hierarchy to find a match.
MipsLabel loop, success;
__ Bind(&loop);
@@ -7320,7 +7298,7 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
out_loc,
super_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
__ Bnez(out, &loop);
// If `out` is null, we use it for the result, and jump to `done`.
__ B(&done);
@@ -7330,13 +7308,15 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
}
case TypeCheckKind::kArrayObjectCheck: {
+ ReadBarrierOption read_barrier_option =
+ CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
obj_loc,
class_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// Do an exact check.
MipsLabel success;
__ Beq(out, cls, &success);
@@ -7346,7 +7326,7 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
out_loc,
component_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// If `out` is null, we use it for the result, and jump to `done`.
__ Beqz(out, &done);
__ LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset);
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 6657582e2a..8a06061c6a 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -352,7 +352,7 @@ class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
__ Bind(GetEntryLabel());
- if (!is_fatal_) {
+ if (!is_fatal_ || instruction_->CanThrowIntoCatchBlock()) {
SaveLiveRegisters(codegen, locations);
}
@@ -2836,26 +2836,8 @@ static size_t NumberOfCheckCastTemps(TypeCheckKind type_check_kind) {
}
void LocationsBuilderMIPS64::VisitCheckCast(HCheckCast* instruction) {
- LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
- bool throws_into_catch = instruction->CanThrowIntoCatchBlock();
-
TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
- switch (type_check_kind) {
- case TypeCheckKind::kExactCheck:
- case TypeCheckKind::kAbstractClassCheck:
- case TypeCheckKind::kClassHierarchyCheck:
- case TypeCheckKind::kArrayObjectCheck:
- call_kind = (throws_into_catch || kEmitCompilerReadBarrier)
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall; // In fact, call on a fatal (non-returning) slow path.
- break;
- case TypeCheckKind::kArrayCheck:
- case TypeCheckKind::kUnresolvedCheck:
- case TypeCheckKind::kInterfaceCheck:
- call_kind = LocationSummary::kCallOnSlowPath;
- break;
- }
-
+ LocationSummary::CallKind call_kind = CodeGenerator::GetCheckCastCallKind(instruction);
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
@@ -2884,18 +2866,7 @@ void InstructionCodeGeneratorMIPS64::VisitCheckCast(HCheckCast* instruction) {
mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
Mips64Label done;
- // Always false for read barriers since we may need to go to the entrypoint for non-fatal cases
- // from false negatives. The false negatives may come from avoiding read barriers below. Avoiding
- // read barriers is done for performance and code size reasons.
- bool is_type_check_slow_path_fatal = false;
- if (!kEmitCompilerReadBarrier) {
- is_type_check_slow_path_fatal =
- (type_check_kind == TypeCheckKind::kExactCheck ||
- type_check_kind == TypeCheckKind::kAbstractClassCheck ||
- type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
- type_check_kind == TypeCheckKind::kArrayObjectCheck) &&
- !instruction->CanThrowIntoCatchBlock();
- }
+ bool is_type_check_slow_path_fatal = CodeGenerator::IsTypeCheckSlowPathFatal(instruction);
SlowPathCodeMIPS64* slow_path =
new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS64(
instruction, is_type_check_slow_path_fatal);
@@ -5528,11 +5499,12 @@ void LocationsBuilderMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
case TypeCheckKind::kExactCheck:
case TypeCheckKind::kAbstractClassCheck:
case TypeCheckKind::kClassHierarchyCheck:
- case TypeCheckKind::kArrayObjectCheck:
- call_kind =
- kEmitCompilerReadBarrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall;
- baker_read_barrier_slow_path = kUseBakerReadBarrier;
+ case TypeCheckKind::kArrayObjectCheck: {
+ bool needs_read_barrier = CodeGenerator::InstanceOfNeedsReadBarrier(instruction);
+ call_kind = needs_read_barrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall;
+ baker_read_barrier_slow_path = kUseBakerReadBarrier && needs_read_barrier;
break;
+ }
case TypeCheckKind::kArrayCheck:
case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
@@ -5580,13 +5552,15 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
switch (type_check_kind) {
case TypeCheckKind::kExactCheck: {
+ ReadBarrierOption read_barrier_option =
+ CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
obj_loc,
class_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// Classes must be equal for the instanceof to succeed.
__ Xor(out, out, cls);
__ Sltiu(out, out, 1);
@@ -5594,13 +5568,15 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
}
case TypeCheckKind::kAbstractClassCheck: {
+ ReadBarrierOption read_barrier_option =
+ CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
obj_loc,
class_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
Mips64Label loop;
@@ -5610,7 +5586,7 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
out_loc,
super_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// If `out` is null, we use it for the result, and jump to `done`.
__ Beqzc(out, &done);
__ Bnec(out, cls, &loop);
@@ -5619,13 +5595,15 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
}
case TypeCheckKind::kClassHierarchyCheck: {
+ ReadBarrierOption read_barrier_option =
+ CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
obj_loc,
class_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// Walk over the class hierarchy to find a match.
Mips64Label loop, success;
__ Bind(&loop);
@@ -5635,7 +5613,7 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
out_loc,
super_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
__ Bnezc(out, &loop);
// If `out` is null, we use it for the result, and jump to `done`.
__ Bc(&done);
@@ -5645,13 +5623,15 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
}
case TypeCheckKind::kArrayObjectCheck: {
+ ReadBarrierOption read_barrier_option =
+ CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
obj_loc,
class_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// Do an exact check.
Mips64Label success;
__ Beqc(out, cls, &success);
@@ -5661,7 +5641,7 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
out_loc,
component_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// If `out` is null, we use it for the result, and jump to `done`.
__ Beqzc(out, &done);
__ LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset);
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 47ef194574..b3f23a0dcd 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -1410,7 +1410,7 @@ void OptimizingCompiler::GenerateJitDebugInfo(ArtMethod* method, debug::MethodDe
GetCompilerDriver()->GetInstructionSetFeatures(),
mini_debug_info,
ArrayRef<const debug::MethodDebugInfo>(&info, 1));
- MutexLock mu(Thread::Current(), g_jit_debug_mutex);
+ MutexLock mu(Thread::Current(), *Locks::native_debug_interface_lock_);
JITCodeEntry* entry = CreateJITCodeEntry(elf_file);
IncrementJITCodeEntryRefcount(entry, info.code_address);
diff --git a/openjdkjvmti/OpenjdkJvmTi.cc b/openjdkjvmti/OpenjdkJvmTi.cc
index 027635bbb5..a0c7f40b6f 100644
--- a/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/openjdkjvmti/OpenjdkJvmTi.cc
@@ -1535,6 +1535,7 @@ extern "C" bool ArtPlugin_Initialize() {
MethodUtil::Register(&gEventHandler);
SearchUtil::Register();
HeapUtil::Register();
+ Transformer::Setup();
{
// Make sure we can deopt anything we need to.
diff --git a/openjdkjvmti/fixed_up_dex_file.cc b/openjdkjvmti/fixed_up_dex_file.cc
index a8d2a37fa6..e9522b3984 100644
--- a/openjdkjvmti/fixed_up_dex_file.cc
+++ b/openjdkjvmti/fixed_up_dex_file.cc
@@ -45,14 +45,13 @@
namespace openjdkjvmti {
-static void RecomputeDexChecksum(art::DexFile* dex_file)
- REQUIRES_SHARED(art::Locks::mutator_lock_) {
+static void RecomputeDexChecksum(art::DexFile* dex_file) {
reinterpret_cast<art::DexFile::Header*>(const_cast<uint8_t*>(dex_file->Begin()))->checksum_ =
dex_file->CalculateChecksum();
}
-static void DoDexUnquicken(const art::DexFile& new_dex_file, const art::DexFile& original_dex_file)
- REQUIRES_SHARED(art::Locks::mutator_lock_) {
+static void DoDexUnquicken(const art::DexFile& new_dex_file,
+ const art::DexFile& original_dex_file) {
const art::OatDexFile* oat_dex = original_dex_file.GetOatDexFile();
if (oat_dex == nullptr) {
return;
diff --git a/openjdkjvmti/fixed_up_dex_file.h b/openjdkjvmti/fixed_up_dex_file.h
index 7f05a2930a..594e8a7358 100644
--- a/openjdkjvmti/fixed_up_dex_file.h
+++ b/openjdkjvmti/fixed_up_dex_file.h
@@ -50,8 +50,7 @@ namespace openjdkjvmti {
class FixedUpDexFile {
public:
static std::unique_ptr<FixedUpDexFile> Create(const art::DexFile& original,
- const char* descriptor)
- REQUIRES_SHARED(art::Locks::mutator_lock_);
+ const char* descriptor);
const art::DexFile& GetDexFile() {
return *dex_file_;
diff --git a/openjdkjvmti/ti_class_definition.cc b/openjdkjvmti/ti_class_definition.cc
index 7f2f80009a..1b641cd905 100644
--- a/openjdkjvmti/ti_class_definition.cc
+++ b/openjdkjvmti/ti_class_definition.cc
@@ -45,6 +45,31 @@
namespace openjdkjvmti {
+void ArtClassDefinition::InitializeMemory() const {
+ DCHECK(art::MemMap::kCanReplaceMapping);
+ VLOG(signals) << "Initializing de-quickened memory for dex file of " << name_;
+ CHECK(dex_data_mmap_ != nullptr);
+ CHECK(temp_mmap_ != nullptr);
+ CHECK_EQ(dex_data_mmap_->GetProtect(), PROT_NONE);
+ CHECK_EQ(temp_mmap_->GetProtect(), PROT_READ | PROT_WRITE);
+
+ std::string desc = std::string("L") + name_ + ";";
+ std::unique_ptr<FixedUpDexFile>
+ fixed_dex_file(FixedUpDexFile::Create(*initial_dex_file_unquickened_, desc.c_str()));
+ CHECK(fixed_dex_file.get() != nullptr);
+ CHECK_LE(fixed_dex_file->Size(), temp_mmap_->Size());
+ CHECK_EQ(temp_mmap_->Size(), dex_data_mmap_->Size());
+ // Copy the data to the temp mmap.
+ memcpy(temp_mmap_->Begin(), fixed_dex_file->Begin(), fixed_dex_file->Size());
+
+ // Move the mmap atomically.
+ art::MemMap* source = temp_mmap_.release();
+ std::string error;
+ CHECK(dex_data_mmap_->ReplaceWith(&source, &error)) << "Failed to replace mmap for "
+ << name_ << " because " << error;
+ CHECK(dex_data_mmap_->Protect(PROT_READ));
+}
+
bool ArtClassDefinition::IsModified() const {
// RedefineClasses calls always are 'modified' since they need to change the current_dex_file of
// the class.
@@ -58,6 +83,27 @@ bool ArtClassDefinition::IsModified() const {
return false;
}
+ // The dex_data_ was never touched by the agents.
+ if (dex_data_mmap_ != nullptr && dex_data_mmap_->GetProtect() == PROT_NONE) {
+ if (current_dex_file_.data() == dex_data_mmap_->Begin()) {
+ // the dex_data_ looks like it changed (not equal to current_dex_file_) but we never
+ // initialized the dex_data_mmap_. This means the new_dex_data was filled in without looking
+ // at the initial dex_data_.
+ return true;
+ } else if (dex_data_.data() == dex_data_mmap_->Begin()) {
+ // The dex file used to have modifications but they were not added again.
+ return true;
+ } else {
+ // It's not clear what happened. It's possible that the agent got the current dex file data
+ // from some other source so we need to initialize everything to see if it is the same.
+ VLOG(signals) << "Lazy dex file for " << name_ << " was never touched but the dex_data_ is "
+ << "changed! Need to initialize the memory to see if anything changed";
+ InitializeMemory();
+ }
+ }
+
+ // We can definitely read current_dex_file_ and dex_file_ without causing page faults.
+
// Check if the dex file we want to set is the same as the current one.
// Unfortunately we need to do this check even if no modifications have been done since it could
// be that agents were removed in the mean-time so we still have a different dex file. The dex
@@ -194,6 +240,53 @@ void ArtClassDefinition::InitWithDex(GetOriginalDexFile get_original,
const art::DexFile* quick_dex) {
art::Thread* self = art::Thread::Current();
DCHECK(quick_dex != nullptr);
+ if (art::MemMap::kCanReplaceMapping && kEnableOnDemandDexDequicken) {
+ size_t dequick_size = quick_dex->GetDequickenedSize();
+ std::string mmap_name("anon-mmap-for-redefine: ");
+ mmap_name += name_;
+ std::string error;
+ dex_data_mmap_.reset(art::MemMap::MapAnonymous(mmap_name.c_str(),
+ nullptr,
+ dequick_size,
+ PROT_NONE,
+ /*low_4gb*/ false,
+ /*reuse*/ false,
+ &error));
+ mmap_name += "-TEMP";
+ temp_mmap_.reset(art::MemMap::MapAnonymous(mmap_name.c_str(),
+ nullptr,
+ dequick_size,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb*/ false,
+ /*reuse*/ false,
+ &error));
+ if (UNLIKELY(dex_data_mmap_ != nullptr && temp_mmap_ != nullptr)) {
+ // Need to save the initial dexfile so we don't need to search for it in the fault-handler.
+ initial_dex_file_unquickened_ = quick_dex;
+ dex_data_ = art::ArrayRef<const unsigned char>(dex_data_mmap_->Begin(),
+ dex_data_mmap_->Size());
+ if (from_class_ext_) {
+ // We got initial from class_ext so the current one must have undergone redefinition so no
+ // cdex or quickening stuff.
+ // We can only do this if it's not a first load.
+ DCHECK(klass_ != nullptr);
+ const art::DexFile& cur_dex = self->DecodeJObject(klass_)->AsClass()->GetDexFile();
+ current_dex_file_ = art::ArrayRef<const unsigned char>(cur_dex.Begin(), cur_dex.Size());
+ } else {
+ // This class hasn't been redefined before. The dequickened current data is the same as the
+ // dex_data_mmap_ when it's filled it. We don't need to copy anything because the mmap will
+ // not be cleared until after everything is done.
+ current_dex_file_ = art::ArrayRef<const unsigned char>(dex_data_mmap_->Begin(),
+ dequick_size);
+ }
+ return;
+ }
+ }
+ dex_data_mmap_.reset(nullptr);
+ temp_mmap_.reset(nullptr);
+ // Failed to mmap a large enough area (or on-demand dequickening was disabled). This is
+ // unfortunate. Since currently the size is just a guess though we might as well try to do it
+ // manually.
get_original(/*out*/&dex_data_memory_);
dex_data_ = art::ArrayRef<const unsigned char>(dex_data_memory_);
if (from_class_ext_) {
diff --git a/openjdkjvmti/ti_class_definition.h b/openjdkjvmti/ti_class_definition.h
index 00847394e7..31c3611e72 100644
--- a/openjdkjvmti/ti_class_definition.h
+++ b/openjdkjvmti/ti_class_definition.h
@@ -32,9 +32,14 @@
#ifndef ART_OPENJDKJVMTI_TI_CLASS_DEFINITION_H_
#define ART_OPENJDKJVMTI_TI_CLASS_DEFINITION_H_
+#include <stddef.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+
#include "art_jvmti.h"
#include "base/array_ref.h"
+#include "mem_map.h"
namespace openjdkjvmti {
@@ -43,13 +48,20 @@ namespace openjdkjvmti {
// redefinition/retransformation function that created it.
class ArtClassDefinition {
public:
+ // If we support doing a on-demand dex-dequickening using signal handlers.
+ static constexpr bool kEnableOnDemandDexDequicken = true;
+
ArtClassDefinition()
: klass_(nullptr),
loader_(nullptr),
name_(),
protection_domain_(nullptr),
+ dex_data_mmap_(nullptr),
+ temp_mmap_(nullptr),
dex_data_memory_(),
+ initial_dex_file_unquickened_(nullptr),
dex_data_(),
+ current_dex_memory_(),
current_dex_file_(),
redefined_(false),
from_class_ext_(false),
@@ -87,6 +99,12 @@ class ArtClassDefinition {
}
}
+ bool ContainsAddress(uintptr_t ptr) const {
+ return dex_data_mmap_ != nullptr &&
+ reinterpret_cast<uintptr_t>(dex_data_mmap_->Begin()) <= ptr &&
+ reinterpret_cast<uintptr_t>(dex_data_mmap_->End()) > ptr;
+ }
+
bool IsModified() const REQUIRES_SHARED(art::Locks::mutator_lock_);
bool IsInitialized() const {
@@ -108,6 +126,13 @@ class ArtClassDefinition {
return name_;
}
+ bool IsLazyDefinition() const {
+ DCHECK(IsInitialized());
+ return dex_data_mmap_ != nullptr &&
+ dex_data_.data() == dex_data_mmap_->Begin() &&
+ dex_data_mmap_->GetProtect() == PROT_NONE;
+ }
+
jobject GetProtectionDomain() const {
DCHECK(IsInitialized());
return protection_domain_;
@@ -118,6 +143,8 @@ class ArtClassDefinition {
return dex_data_;
}
+ void InitializeMemory() const;
+
private:
jvmtiError InitCommon(art::Thread* self, jclass klass);
@@ -130,9 +157,17 @@ class ArtClassDefinition {
std::string name_;
jobject protection_domain_;
+ // Mmap that will be filled with the original-dex-file lazily if it needs to be de-quickened or
+ // de-compact-dex'd
+ mutable std::unique_ptr<art::MemMap> dex_data_mmap_;
+ // This is a temporary mmap we will use to be able to fill the dex file data atomically.
+ mutable std::unique_ptr<art::MemMap> temp_mmap_;
+
// A unique_ptr to the current dex_data if it needs to be cleaned up.
std::vector<unsigned char> dex_data_memory_;
+ const art::DexFile* initial_dex_file_unquickened_;
+
// A ref to the current dex data. This is either dex_data_memory_, or current_dex_file_. This is
// what the dex file will be turned into.
art::ArrayRef<const unsigned char> dex_data_;
diff --git a/openjdkjvmti/transform.cc b/openjdkjvmti/transform.cc
index af838d62b9..7b19a24ba4 100644
--- a/openjdkjvmti/transform.cc
+++ b/openjdkjvmti/transform.cc
@@ -29,6 +29,9 @@
* questions.
*/
+#include <stddef.h>
+#include <sys/types.h>
+
#include <unordered_map>
#include <unordered_set>
@@ -41,6 +44,7 @@
#include "dex/dex_file_types.h"
#include "dex/utf.h"
#include "events-inl.h"
+#include "fault_handler.h"
#include "gc_root-inl.h"
#include "globals.h"
#include "jni_env_ext-inl.h"
@@ -63,6 +67,174 @@
namespace openjdkjvmti {
+// A FaultHandler that will deal with initializing ClassDefinitions when they are actually needed.
+class TransformationFaultHandler FINAL : public art::FaultHandler {
+ public:
+ explicit TransformationFaultHandler(art::FaultManager* manager)
+ : art::FaultHandler(manager),
+ uninitialized_class_definitions_lock_("JVMTI Initialized class definitions lock",
+ art::LockLevel::kSignalHandlingLock),
+ class_definition_initialized_cond_("JVMTI Initialized class definitions condition",
+ uninitialized_class_definitions_lock_) {
+ manager->AddHandler(this, /* generated_code */ false);
+ }
+
+ ~TransformationFaultHandler() {
+ art::MutexLock mu(art::Thread::Current(), uninitialized_class_definitions_lock_);
+ uninitialized_class_definitions_.clear();
+ }
+
+ bool Action(int sig, siginfo_t* siginfo, void* context ATTRIBUTE_UNUSED) OVERRIDE {
+ DCHECK_EQ(sig, SIGSEGV);
+ art::Thread* self = art::Thread::Current();
+ if (UNLIKELY(uninitialized_class_definitions_lock_.IsExclusiveHeld(self))) {
+ if (self != nullptr) {
+ LOG(FATAL) << "Recursive call into Transformation fault handler!";
+ UNREACHABLE();
+ } else {
+ LOG(ERROR) << "Possible deadlock due to recursive signal delivery of segv.";
+ }
+ }
+ uintptr_t ptr = reinterpret_cast<uintptr_t>(siginfo->si_addr);
+ ArtClassDefinition* res = nullptr;
+
+ {
+ // NB Technically using a mutex and condition variables here is non-posix compliant but
+ // everything should be fine since both glibc and bionic implementations of mutexs and
+ // condition variables work fine so long as the thread was not interrupted during a
+ // lock/unlock (which it wasn't) on all architectures we care about.
+ art::MutexLock mu(self, uninitialized_class_definitions_lock_);
+ auto it = std::find_if(uninitialized_class_definitions_.begin(),
+ uninitialized_class_definitions_.end(),
+ [&](const auto op) { return op->ContainsAddress(ptr); });
+ if (it != uninitialized_class_definitions_.end()) {
+ res = *it;
+ // Remove the class definition.
+ uninitialized_class_definitions_.erase(it);
+ // Put it in the initializing list
+ initializing_class_definitions_.push_back(res);
+ } else {
+ // Wait for the ptr to be initialized (if it is currently initializing).
+ while (DefinitionIsInitializing(ptr)) {
+ WaitForClassInitializationToFinish();
+ }
+ // Return true (continue with user code) if we find that the definition has been
+ // initialized. Return false (continue on to next signal handler) if the definition is not
+ // initialized or found.
+ return std::find_if(initialized_class_definitions_.begin(),
+ initialized_class_definitions_.end(),
+ [&](const auto op) { return op->ContainsAddress(ptr); }) !=
+ uninitialized_class_definitions_.end();
+ }
+ }
+
+ VLOG(signals) << "Lazy initialization of dex file for transformation of " << res->GetName()
+ << " during SEGV";
+ res->InitializeMemory();
+
+ {
+ art::MutexLock mu(self, uninitialized_class_definitions_lock_);
+ // Move to initialized state and notify waiters.
+ initializing_class_definitions_.erase(std::find(initializing_class_definitions_.begin(),
+ initializing_class_definitions_.end(),
+ res));
+ initialized_class_definitions_.push_back(res);
+ class_definition_initialized_cond_.Broadcast(self);
+ }
+
+ return true;
+ }
+
+ void RemoveDefinition(ArtClassDefinition* def) REQUIRES(!uninitialized_class_definitions_lock_) {
+ art::MutexLock mu(art::Thread::Current(), uninitialized_class_definitions_lock_);
+ auto it = std::find(uninitialized_class_definitions_.begin(),
+ uninitialized_class_definitions_.end(),
+ def);
+ if (it != uninitialized_class_definitions_.end()) {
+ uninitialized_class_definitions_.erase(it);
+ return;
+ }
+ while (std::find(initializing_class_definitions_.begin(),
+ initializing_class_definitions_.end(),
+ def) != initializing_class_definitions_.end()) {
+ WaitForClassInitializationToFinish();
+ }
+ it = std::find(initialized_class_definitions_.begin(),
+ initialized_class_definitions_.end(),
+ def);
+ CHECK(it != initialized_class_definitions_.end()) << "Could not find class definition for "
+ << def->GetName();
+ initialized_class_definitions_.erase(it);
+ }
+
+ void AddArtDefinition(ArtClassDefinition* def) REQUIRES(!uninitialized_class_definitions_lock_) {
+ DCHECK(def->IsLazyDefinition());
+ art::MutexLock mu(art::Thread::Current(), uninitialized_class_definitions_lock_);
+ uninitialized_class_definitions_.push_back(def);
+ }
+
+ private:
+ bool DefinitionIsInitializing(uintptr_t ptr) REQUIRES(uninitialized_class_definitions_lock_) {
+ return std::find_if(initializing_class_definitions_.begin(),
+ initializing_class_definitions_.end(),
+ [&](const auto op) { return op->ContainsAddress(ptr); }) !=
+ initializing_class_definitions_.end();
+ }
+
+ void WaitForClassInitializationToFinish() REQUIRES(uninitialized_class_definitions_lock_) {
+ class_definition_initialized_cond_.Wait(art::Thread::Current());
+ }
+
+ art::Mutex uninitialized_class_definitions_lock_ ACQUIRED_BEFORE(art::Locks::abort_lock_);
+ art::ConditionVariable class_definition_initialized_cond_
+ GUARDED_BY(uninitialized_class_definitions_lock_);
+
+ // A list of the class definitions that have a non-readable map.
+ std::vector<ArtClassDefinition*> uninitialized_class_definitions_
+ GUARDED_BY(uninitialized_class_definitions_lock_);
+
+ // A list of class definitions that are currently undergoing unquickening. Threads should wait
+ // until the definition is no longer in this before returning.
+ std::vector<ArtClassDefinition*> initializing_class_definitions_
+ GUARDED_BY(uninitialized_class_definitions_lock_);
+
+ // A list of class definitions that are already unquickened. Threads should immediately return if
+ // it is here.
+ std::vector<ArtClassDefinition*> initialized_class_definitions_
+ GUARDED_BY(uninitialized_class_definitions_lock_);
+};
+
+static TransformationFaultHandler* gTransformFaultHandler = nullptr;
+
+void Transformer::Setup() {
+ // Although we create this the fault handler is actually owned by the 'art::fault_manager' which
+ // will take care of destroying it.
+ if (art::MemMap::kCanReplaceMapping && ArtClassDefinition::kEnableOnDemandDexDequicken) {
+ gTransformFaultHandler = new TransformationFaultHandler(&art::fault_manager);
+ }
+}
+
+// Simple helper to add and remove the class definition from the fault handler.
+class ScopedDefinitionHandler {
+ public:
+ explicit ScopedDefinitionHandler(ArtClassDefinition* def)
+ : def_(def), is_lazy_(def_->IsLazyDefinition()) {
+ if (is_lazy_) {
+ gTransformFaultHandler->AddArtDefinition(def_);
+ }
+ }
+
+ ~ScopedDefinitionHandler() {
+ if (is_lazy_) {
+ gTransformFaultHandler->RemoveDefinition(def_);
+ }
+ }
+
+ private:
+ ArtClassDefinition* def_;
+ bool is_lazy_;
+};
+
// Initialize templates.
template
void Transformer::TransformSingleClassDirect<ArtJvmtiEvent::kClassFileLoadHookNonRetransformable>(
@@ -78,6 +250,7 @@ void Transformer::TransformSingleClassDirect(EventHandler* event_handler,
static_assert(kEvent == ArtJvmtiEvent::kClassFileLoadHookNonRetransformable ||
kEvent == ArtJvmtiEvent::kClassFileLoadHookRetransformable,
"bad event type");
+ ScopedDefinitionHandler handler(def);
jint new_len = -1;
unsigned char* new_data = nullptr;
art::ArrayRef<const unsigned char> dex_data = def->GetDexData();
diff --git a/openjdkjvmti/transform.h b/openjdkjvmti/transform.h
index f43af174f0..8bbeda4b09 100644
--- a/openjdkjvmti/transform.h
+++ b/openjdkjvmti/transform.h
@@ -48,6 +48,8 @@ jvmtiError GetClassLocation(ArtJvmTiEnv* env, jclass klass, /*out*/std::string*
class Transformer {
public:
+ static void Setup();
+
template<ArtJvmtiEvent kEvent>
static void TransformSingleClassDirect(
EventHandler* event_handler,
diff --git a/profman/profile_assistant.cc b/profman/profile_assistant.cc
index ff02b5d59f..a00b1fa5bd 100644
--- a/profman/profile_assistant.cc
+++ b/profman/profile_assistant.cc
@@ -31,12 +31,13 @@ static constexpr const uint32_t kMinNewClassesPercentChangeForCompilation = 2;
ProfileAssistant::ProcessingResult ProfileAssistant::ProcessProfilesInternal(
const std::vector<ScopedFlock>& profile_files,
- const ScopedFlock& reference_profile_file) {
+ const ScopedFlock& reference_profile_file,
+ const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn) {
DCHECK(!profile_files.empty());
ProfileCompilationInfo info;
// Load the reference profile.
- if (!info.Load(reference_profile_file->Fd())) {
+ if (!info.Load(reference_profile_file->Fd(), /*merge_classes*/ true, filter_fn)) {
LOG(WARNING) << "Could not load reference profile file";
return kErrorBadProfiles;
}
@@ -48,7 +49,7 @@ ProfileAssistant::ProcessingResult ProfileAssistant::ProcessProfilesInternal(
// Merge all current profiles.
for (size_t i = 0; i < profile_files.size(); i++) {
ProfileCompilationInfo cur_info;
- if (!cur_info.Load(profile_files[i]->Fd())) {
+ if (!cur_info.Load(profile_files[i]->Fd(), /*merge_classes*/ true, filter_fn)) {
LOG(WARNING) << "Could not load profile file at index " << i;
return kErrorBadProfiles;
}
@@ -122,7 +123,8 @@ class ScopedFlockList {
ProfileAssistant::ProcessingResult ProfileAssistant::ProcessProfiles(
const std::vector<int>& profile_files_fd,
- int reference_profile_file_fd) {
+ int reference_profile_file_fd,
+ const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn) {
DCHECK_GE(reference_profile_file_fd, 0);
std::string error;
@@ -143,12 +145,15 @@ ProfileAssistant::ProcessingResult ProfileAssistant::ProcessProfiles(
return kErrorCannotLock;
}
- return ProcessProfilesInternal(profile_files.Get(), reference_profile_file);
+ return ProcessProfilesInternal(profile_files.Get(),
+ reference_profile_file,
+ filter_fn);
}
ProfileAssistant::ProcessingResult ProfileAssistant::ProcessProfiles(
const std::vector<std::string>& profile_files,
- const std::string& reference_profile_file) {
+ const std::string& reference_profile_file,
+ const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn) {
std::string error;
ScopedFlockList profile_files_list(profile_files.size());
@@ -164,7 +169,9 @@ ProfileAssistant::ProcessingResult ProfileAssistant::ProcessProfiles(
return kErrorCannotLock;
}
- return ProcessProfilesInternal(profile_files_list.Get(), locked_reference_profile_file);
+ return ProcessProfilesInternal(profile_files_list.Get(),
+ locked_reference_profile_file,
+ filter_fn);
}
} // namespace art
diff --git a/profman/profile_assistant.h b/profman/profile_assistant.h
index be703abda8..ee555840d7 100644
--- a/profman/profile_assistant.h
+++ b/profman/profile_assistant.h
@@ -53,16 +53,21 @@ class ProfileAssistant {
//
static ProcessingResult ProcessProfiles(
const std::vector<std::string>& profile_files,
- const std::string& reference_profile_file);
+ const std::string& reference_profile_file,
+ const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn
+ = ProfileCompilationInfo::ProfileFilterFnAcceptAll);
static ProcessingResult ProcessProfiles(
const std::vector<int>& profile_files_fd_,
- int reference_profile_file_fd);
+ int reference_profile_file_fd,
+ const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn
+ = ProfileCompilationInfo::ProfileFilterFnAcceptAll);
private:
static ProcessingResult ProcessProfilesInternal(
const std::vector<ScopedFlock>& profile_files,
- const ScopedFlock& reference_profile_file);
+ const ScopedFlock& reference_profile_file,
+ const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn);
DISALLOW_COPY_AND_ASSIGN(ProfileAssistant);
};
diff --git a/profman/profile_assistant_test.cc b/profman/profile_assistant_test.cc
index c75f3e9688..79310ac166 100644
--- a/profman/profile_assistant_test.cc
+++ b/profman/profile_assistant_test.cc
@@ -16,6 +16,7 @@
#include <gtest/gtest.h>
+#include "android-base/strings.h"
#include "art_method-inl.h"
#include "base/unix_file/fd_file.h"
#include "common_runtime_test.h"
@@ -51,6 +52,28 @@ class ProfileAssistantTest : public CommonRuntimeTest {
uint32_t dex_location_checksum1 = checksum;
std::string dex_location2 = "location2" + id;
uint32_t dex_location_checksum2 = 10 * checksum;
+ SetupProfile(dex_location1,
+ dex_location_checksum1,
+ dex_location2,
+ dex_location_checksum2,
+ number_of_methods,
+ number_of_classes,
+ profile,
+ info,
+ start_method_index,
+ reverse_dex_write_order);
+ }
+
+ void SetupProfile(const std::string& dex_location1,
+ uint32_t dex_location_checksum1,
+ const std::string& dex_location2,
+ uint32_t dex_location_checksum2,
+ uint16_t number_of_methods,
+ uint16_t number_of_classes,
+ const ScratchFile& profile,
+ ProfileCompilationInfo* info,
+ uint16_t start_method_index = 0,
+ bool reverse_dex_write_order = false) {
for (uint16_t i = start_method_index; i < start_method_index + number_of_methods; i++) {
// reverse_dex_write_order controls the order in which the dex files will be added to
// the profile and thus written to disk.
@@ -1128,4 +1151,89 @@ TEST_F(ProfileAssistantTest, DumpOnly) {
}
}
+TEST_F(ProfileAssistantTest, MergeProfilesWithFilter) {
+ ScratchFile profile1;
+ ScratchFile profile2;
+ ScratchFile reference_profile;
+
+ std::vector<int> profile_fds({
+ GetFd(profile1),
+ GetFd(profile2)});
+ int reference_profile_fd = GetFd(reference_profile);
+
+ // Use a real dex file to generate profile test data.
+ // The file will be used during merging to filter unwanted data.
+ std::vector<std::unique_ptr<const DexFile>> dex_files = OpenTestDexFiles("ProfileTestMultiDex");
+ const DexFile& d1 = *dex_files[0];
+ const DexFile& d2 = *dex_files[1];
+ // The new profile info will contain the methods with indices 0-100.
+ const uint16_t kNumberOfMethodsToEnableCompilation = 100;
+ ProfileCompilationInfo info1;
+ SetupProfile(d1.GetLocation(), d1.GetLocationChecksum(), "p1", 1,
+ kNumberOfMethodsToEnableCompilation, 0, profile1, &info1);
+ ProfileCompilationInfo info2;
+ SetupProfile(d2.GetLocation(), d2.GetLocationChecksum(), "p2", 2,
+ kNumberOfMethodsToEnableCompilation, 0, profile2, &info2);
+
+
+ // The reference profile info will contain the methods with indices 50-150.
+ const uint16_t kNumberOfMethodsAlreadyCompiled = 100;
+ ProfileCompilationInfo reference_info;
+ SetupProfile(d1.GetLocation(), d1.GetLocationChecksum(), "p1", 1,
+ kNumberOfMethodsAlreadyCompiled, 0, reference_profile,
+ &reference_info, kNumberOfMethodsToEnableCompilation / 2);
+
+ // Run profman and pass the dex file with --apk-fd.
+ android::base::unique_fd apk_fd(
+ open(GetTestDexFileName("ProfileTestMultiDex").c_str(), O_RDONLY));
+ ASSERT_GE(apk_fd.get(), 0);
+
+ std::string profman_cmd = GetProfmanCmd();
+ std::vector<std::string> argv_str;
+ argv_str.push_back(profman_cmd);
+ argv_str.push_back("--profile-file-fd=" + std::to_string(profile1.GetFd()));
+ argv_str.push_back("--profile-file-fd=" + std::to_string(profile2.GetFd()));
+ argv_str.push_back("--reference-profile-file-fd=" + std::to_string(reference_profile.GetFd()));
+ argv_str.push_back("--apk-fd=" + std::to_string(apk_fd.get()));
+ std::string error;
+
+ EXPECT_EQ(ExecAndReturnCode(argv_str, &error), 0) << error;
+
+ // Verify that we can load the result.
+
+ ProfileCompilationInfo result;
+ ASSERT_TRUE(reference_profile.GetFile()->ResetOffset());
+ ASSERT_TRUE(result.Load(reference_profile_fd));
+
+
+ ASSERT_TRUE(profile1.GetFile()->ResetOffset());
+ ASSERT_TRUE(profile2.GetFile()->ResetOffset());
+ ASSERT_TRUE(reference_profile.GetFile()->ResetOffset());
+
+ // Verify that the result filtered out data not belonging to the dex file.
+ // This is equivalent to checking that the result is equal to the merging of
+ // all profiles while filtering out data not belonging to the dex file.
+
+ ProfileCompilationInfo::ProfileLoadFilterFn filter_fn =
+ [&d1, &d2](const std::string& dex_location, uint32_t checksum) -> bool {
+ return (dex_location == ProfileCompilationInfo::GetProfileDexFileKey(d1.GetLocation())
+ && checksum == d1.GetLocationChecksum())
+ || (dex_location == ProfileCompilationInfo::GetProfileDexFileKey(d2.GetLocation())
+ && checksum == d2.GetLocationChecksum());
+ };
+
+ ProfileCompilationInfo info1_filter;
+ ProfileCompilationInfo info2_filter;
+ ProfileCompilationInfo expected;
+
+ info2_filter.Load(profile1.GetFd(), /*merge_classes*/ true, filter_fn);
+ info2_filter.Load(profile2.GetFd(), /*merge_classes*/ true, filter_fn);
+ expected.Load(reference_profile.GetFd(), /*merge_classes*/ true, filter_fn);
+
+ ASSERT_TRUE(expected.MergeWith(info1_filter));
+ ASSERT_TRUE(expected.MergeWith(info2_filter));
+
+ ASSERT_TRUE(expected.Equals(result));
+}
+
} // namespace art
diff --git a/profman/profman.cc b/profman/profman.cc
index ea6c382a81..387ce8dfae 100644
--- a/profman/profman.cc
+++ b/profman/profman.cc
@@ -18,6 +18,7 @@
#include <stdio.h>
#include <stdlib.h>
#include <sys/file.h>
+#include <sys/param.h>
#include <sys/stat.h>
#include <unistd.h>
@@ -297,6 +298,22 @@ class ProfMan FINAL {
}
}
+ struct ProfileFilterKey {
+ ProfileFilterKey(const std::string& dex_location, uint32_t checksum)
+ : dex_location_(dex_location), checksum_(checksum) {}
+ const std::string dex_location_;
+ uint32_t checksum_;
+
+ bool operator==(const ProfileFilterKey& other) const {
+ return checksum_ == other.checksum_ && dex_location_ == other.dex_location_;
+ }
+ bool operator<(const ProfileFilterKey& other) const {
+ return checksum_ == other.checksum_
+ ? dex_location_ < other.dex_location_
+ : checksum_ < other.checksum_;
+ }
+ };
+
ProfileAssistant::ProcessingResult ProcessProfiles() {
// Validate that at least one profile file was passed, as well as a reference profile.
if (profile_files_.empty() && profile_files_fd_.empty()) {
@@ -310,6 +327,27 @@ class ProfMan FINAL {
Usage("Options --profile-file-fd and --reference-profile-file-fd "
"should only be used together");
}
+
+ // Check if we have any apks which we should use to filter the profile data.
+ std::set<ProfileFilterKey> profile_filter_keys;
+ if (!GetProfileFilterKeyFromApks(&profile_filter_keys)) {
+ return ProfileAssistant::kErrorIO;
+ }
+
+ // Build the profile filter function. If the set of keys is empty it means we
+ // don't have any apks; as such we do not filter anything.
+ const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn =
+ [profile_filter_keys](const std::string& dex_location, uint32_t checksum) {
+ if (profile_filter_keys.empty()) {
+ // No --apk was specified. Accept all dex files.
+ return true;
+ } else {
+ bool res = profile_filter_keys.find(
+ ProfileFilterKey(dex_location, checksum)) != profile_filter_keys.end();
+ return res;
+ }
+ };
+
ProfileAssistant::ProcessingResult result;
if (profile_files_.empty()) {
@@ -317,10 +355,13 @@ class ProfMan FINAL {
// so don't check the usage.
File file(reference_profile_file_fd_, false);
result = ProfileAssistant::ProcessProfiles(profile_files_fd_,
- reference_profile_file_fd_);
+ reference_profile_file_fd_,
+ filter_fn);
CloseAllFds(profile_files_fd_, "profile_files_fd_");
} else {
- result = ProfileAssistant::ProcessProfiles(profile_files_, reference_profile_file_);
+ result = ProfileAssistant::ProcessProfiles(profile_files_,
+ reference_profile_file_,
+ filter_fn);
}
return result;
}
@@ -329,18 +370,48 @@ class ProfMan FINAL {
return skip_apk_verification_;
}
- void OpenApkFilesFromLocations(std::vector<std::unique_ptr<const DexFile>>* dex_files) const {
+ bool GetProfileFilterKeyFromApks(std::set<ProfileFilterKey>* profile_filter_keys) {
+ auto process_fn = [profile_filter_keys](std::unique_ptr<const DexFile>&& dex_file) {
+ // Store the profile key of the location instead of the location itself.
+ // This will make the matching in the profile filter method much easier.
+ profile_filter_keys->emplace(ProfileCompilationInfo::GetProfileDexFileKey(
+ dex_file->GetLocation()), dex_file->GetLocationChecksum());
+ };
+ return OpenApkFilesFromLocations(process_fn);
+ }
+
+ bool OpenApkFilesFromLocations(std::vector<std::unique_ptr<const DexFile>>* dex_files) {
+ auto process_fn = [dex_files](std::unique_ptr<const DexFile>&& dex_file) {
+ dex_files->emplace_back(std::move(dex_file));
+ };
+ return OpenApkFilesFromLocations(process_fn);
+ }
+
+ bool OpenApkFilesFromLocations(
+ std::function<void(std::unique_ptr<const DexFile>&&)> process_fn) {
bool use_apk_fd_list = !apks_fd_.empty();
if (use_apk_fd_list) {
// Get the APKs from the collection of FDs.
- CHECK_EQ(dex_locations_.size(), apks_fd_.size());
+ if (dex_locations_.empty()) {
+ // Try to compute the dex locations from the file paths of the descriptions.
+ // This will make it easier to invoke profman with --apk-fd and without
+ // being force to pass --dex-location when the location would be the apk path.
+ if (!ComputeDexLocationsFromApkFds()) {
+ return false;
+ }
+ } else {
+ if (dex_locations_.size() != apks_fd_.size()) {
+ Usage("The number of apk-fds must match the number of dex-locations.");
+ }
+ }
} else if (!apk_files_.empty()) {
- // Get the APKs from the collection of filenames.
- CHECK_EQ(dex_locations_.size(), apk_files_.size());
+ if (dex_locations_.size() != apk_files_.size()) {
+ Usage("The number of apk-fds must match the number of dex-locations.");
+ }
} else {
// No APKs were specified.
CHECK(dex_locations_.empty());
- return;
+ return true;
}
static constexpr bool kVerifyChecksum = true;
for (size_t i = 0; i < dex_locations_.size(); ++i) {
@@ -355,8 +426,8 @@ class ProfMan FINAL {
&error_msg,
&dex_files_for_location)) {
} else {
- LOG(WARNING) << "OpenZip failed for '" << dex_locations_[i] << "' " << error_msg;
- continue;
+ LOG(ERROR) << "OpenZip failed for '" << dex_locations_[i] << "' " << error_msg;
+ return false;
}
} else {
if (dex_file_loader.Open(apk_files_[i].c_str(),
@@ -366,14 +437,36 @@ class ProfMan FINAL {
&error_msg,
&dex_files_for_location)) {
} else {
- LOG(WARNING) << "Open failed for '" << dex_locations_[i] << "' " << error_msg;
- continue;
+ LOG(ERROR) << "Open failed for '" << dex_locations_[i] << "' " << error_msg;
+ return false;
}
}
for (std::unique_ptr<const DexFile>& dex_file : dex_files_for_location) {
- dex_files->emplace_back(std::move(dex_file));
+ process_fn(std::move(dex_file));
}
}
+ return true;
+ }
+
+ // Get the dex locations from the apk fds.
+ // The methods reads the links from /proc/self/fd/ to find the original apk paths
+ // and puts them in the dex_locations_ vector.
+ bool ComputeDexLocationsFromApkFds() {
+ // We can't use a char array of PATH_MAX size without exceeding the frame size.
+ // So we use a vector as the buffer for the path.
+ std::vector<char> buffer(PATH_MAX, 0);
+ for (size_t i = 0; i < apks_fd_.size(); ++i) {
+ std::string fd_path = "/proc/self/fd/" + std::to_string(apks_fd_[i]);
+ ssize_t len = readlink(fd_path.c_str(), buffer.data(), buffer.size() - 1);
+ if (len == -1) {
+ PLOG(ERROR) << "Could not open path from fd";
+ return false;
+ }
+
+ buffer[len] = '\0';
+ dex_locations_.push_back(buffer.data());
+ }
+ return true;
}
std::unique_ptr<const ProfileCompilationInfo> LoadProfile(const std::string& filename, int fd) {
@@ -416,8 +509,6 @@ class ProfMan FINAL {
static const char* kOrdinaryProfile = "=== profile ===";
static const char* kReferenceProfile = "=== reference profile ===";
- // Open apk/zip files and and read dex files.
- MemMap::Init(); // for ZipArchive::OpenFromFd
std::vector<std::unique_ptr<const DexFile>> dex_files;
OpenApkFilesFromLocations(&dex_files);
std::string dump;
@@ -553,8 +644,7 @@ class ProfMan FINAL {
reference_profile_file_.empty() && !FdIsValid(reference_profile_file_fd_)) {
Usage("No profile files or reference profile specified.");
}
- // Open apk/zip files and and read dex files.
- MemMap::Init(); // for ZipArchive::OpenFromFd
+
// Open the dex files to get the names for classes.
std::vector<std::unique_ptr<const DexFile>> dex_files;
OpenApkFilesFromLocations(&dex_files);
@@ -948,8 +1038,6 @@ class ProfMan FINAL {
Usage("Profile must be specified with --reference-profile-file or "
"--reference-profile-file-fd");
}
- // for ZipArchive::OpenFromFd
- MemMap::Init();
// Open the profile output file if needed.
int fd = OpenReferenceProfile();
if (!FdIsValid(fd)) {
@@ -984,8 +1072,6 @@ class ProfMan FINAL {
}
int CreateBootProfile() {
- // Initialize memmap since it's required to open dex files.
- MemMap::Init();
// Open the profile output file.
const int reference_fd = OpenReferenceProfile();
if (!FdIsValid(reference_fd)) {
@@ -1065,8 +1151,6 @@ class ProfMan FINAL {
test_profile_class_percentage_,
test_profile_seed_);
} else {
- // Initialize MemMap for ZipArchive::OpenFromFd.
- MemMap::Init();
// Open the dex files to look up classes and methods.
std::vector<std::unique_ptr<const DexFile>> dex_files;
OpenApkFilesFromLocations(&dex_files);
@@ -1089,7 +1173,7 @@ class ProfMan FINAL {
return copy_and_update_profile_key_;
}
- bool CopyAndUpdateProfileKey() const {
+ bool CopyAndUpdateProfileKey() {
// Validate that at least one profile file was passed, as well as a reference profile.
if (!(profile_files_.size() == 1 ^ profile_files_fd_.size() == 1)) {
Usage("Only one profile file should be specified.");
@@ -1133,7 +1217,8 @@ class ProfMan FINAL {
static void CloseAllFds(const std::vector<int>& fds, const char* descriptor) {
for (size_t i = 0; i < fds.size(); i++) {
if (close(fds[i]) < 0) {
- PLOG(WARNING) << "Failed to close descriptor for " << descriptor << " at index " << i;
+ PLOG(WARNING) << "Failed to close descriptor for "
+ << descriptor << " at index " << i << ": " << fds[i];
}
}
}
@@ -1176,6 +1261,9 @@ static int profman(int argc, char** argv) {
// Parse arguments. Argument mistakes will lead to exit(EXIT_FAILURE) in UsageError.
profman.ParseArgs(argc, argv);
+ // Initialize MemMap for ZipArchive::OpenFromFd.
+ MemMap::Init();
+
if (profman.ShouldGenerateTestProfile()) {
return profman.GenerateTestProfile();
}
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index bd51809c22..4be4b12611 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -186,10 +186,9 @@ class StubTest : public CommonRuntimeTest {
"stp x2, x3, [sp, #16]\n\t"
"stp x4, x5, [sp, #32]\n\t"
"stp x6, x7, [sp, #48]\n\t"
- // To be extra defensive, store x20. We do this because some of the stubs might make a
+ // To be extra defensive, store x20,x21. We do this because some of the stubs might make a
// transition into the runtime via the blr instruction below and *not* save x20.
- "str x20, [sp, #64]\n\t"
- // 8 byte buffer
+ "stp x20, x21, [sp, #64]\n\t"
"sub sp, sp, #16\n\t" // Reserve stack space, 16B aligned
".cfi_adjust_cfa_offset 16\n\t"
@@ -288,7 +287,7 @@ class StubTest : public CommonRuntimeTest {
"ldp x2, x3, [sp, #16]\n\t"
"ldp x4, x5, [sp, #32]\n\t"
"ldp x6, x7, [sp, #48]\n\t"
- "ldr x20, [sp, #64]\n\t"
+ "ldp x20, x21, [sp, #64]\n\t"
"add sp, sp, #80\n\t" // Free stack space, now sp as on entry
".cfi_adjust_cfa_offset -80\n\t"
@@ -312,8 +311,9 @@ class StubTest : public CommonRuntimeTest {
// -fstack-protector-strong. According to AAPCS64 registers x9-x15 are caller-saved,
// which means we should unclobber one of the callee-saved registers that are unused.
// Here we use x20.
+ // http://b/72613441, Clang 7.0 asks for one more register, so we do not reserve x21.
: "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19",
- "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x30",
+ "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x30",
"d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
"d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
"d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 9f17ad051c..a4c32dd814 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -74,6 +74,7 @@ Uninterruptible Roles::uninterruptible_;
ReaderWriterMutex* Locks::jni_globals_lock_ = nullptr;
Mutex* Locks::jni_weak_globals_lock_ = nullptr;
ReaderWriterMutex* Locks::dex_lock_ = nullptr;
+Mutex* Locks::native_debug_interface_lock_ = nullptr;
std::vector<BaseMutex*> Locks::expected_mutexes_on_weak_ref_access_;
Atomic<const BaseMutex*> Locks::expected_mutexes_on_weak_ref_access_guard_;
@@ -1073,6 +1074,7 @@ void Locks::Init() {
DCHECK(unexpected_signal_lock_ != nullptr);
DCHECK(user_code_suspension_lock_ != nullptr);
DCHECK(dex_lock_ != nullptr);
+ DCHECK(native_debug_interface_lock_ != nullptr);
} else {
// Create global locks in level order from highest lock level to lowest.
LockLevel current_lock_level = kInstrumentEntrypointsLock;
@@ -1228,6 +1230,10 @@ void Locks::Init() {
DCHECK(unexpected_signal_lock_ == nullptr);
unexpected_signal_lock_ = new Mutex("unexpected signal lock", current_lock_level, true);
+ UPDATE_CURRENT_LOCK_LEVEL(kNativeDebugInterfaceLock);
+ DCHECK(native_debug_interface_lock_ == nullptr);
+ native_debug_interface_lock_ = new Mutex("Native debug interface lock", current_lock_level);
+
UPDATE_CURRENT_LOCK_LEVEL(kLoggingLock);
DCHECK(logging_lock_ == nullptr);
logging_lock_ = new Mutex("logging lock", current_lock_level, true);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index d541b79a98..bf27b7f17c 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -58,10 +58,12 @@ class Thread;
// [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163
enum LockLevel {
kLoggingLock = 0,
+ kNativeDebugInterfaceLock,
kSwapMutexesLock,
kUnexpectedSignalLock,
kThreadSuspendCountLock,
kAbortLock,
+ kSignalHandlingLock,
kJdwpAdbStateLock,
kJdwpSocketLock,
kRegionSpaceRegionLock,
@@ -745,8 +747,11 @@ class Locks {
// One unexpected signal at a time lock.
static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_);
+ // Guards the magic global variables used by native tools (e.g. libunwind).
+ static Mutex* native_debug_interface_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
+
// Have an exclusive logging thread.
- static Mutex* logging_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
+ static Mutex* logging_lock_ ACQUIRED_AFTER(native_debug_interface_lock_);
// List of mutexes that we expect a thread may hold when accessing weak refs. This is used to
// avoid a deadlock in the empty checkpoint while weak ref access is disabled (b/34964016). If we
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index ae707f0eca..800427d6ab 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -73,6 +73,7 @@
#include "intern_table.h"
#include "interpreter/interpreter.h"
#include "java_vm_ext.h"
+#include "jit/debugger_interface.h"
#include "jit/jit.h"
#include "jit/jit_code_cache.h"
#include "jit/profile_compilation_info.h"
@@ -3432,6 +3433,7 @@ void ClassLinker::RegisterDexFileLocked(const DexFile& dex_file,
data.weak_root = dex_cache_jweak;
data.dex_file = dex_cache->GetDexFile();
data.class_table = ClassTableForClassLoader(class_loader);
+ RegisterDexFileForNative(self, data.dex_file->Begin());
DCHECK(data.class_table != nullptr);
// Make sure to hold the dex cache live in the class table. This case happens for the boot class
// path dex caches without an image.
diff --git a/runtime/dex/compact_dex_file.h b/runtime/dex/compact_dex_file.h
index 1ecff04cba..31aeb27872 100644
--- a/runtime/dex/compact_dex_file.h
+++ b/runtime/dex/compact_dex_file.h
@@ -245,6 +245,12 @@ class CompactDexFile : public DexFile {
static bool IsVersionValid(const uint8_t* magic);
virtual bool IsVersionValid() const OVERRIDE;
+ // TODO This is completely a guess. We really need to do better. b/72402467
+ // We ask for 64 megabytes which should be big enough for any realistic dex file.
+ virtual size_t GetDequickenedSize() const OVERRIDE {
+ return 64 * MB;
+ }
+
const Header& GetHeader() const {
return down_cast<const Header&>(DexFile::GetHeader());
}
diff --git a/runtime/dex/dex_file.h b/runtime/dex/dex_file.h
index 7e2fe98923..cf8c840b59 100644
--- a/runtime/dex/dex_file.h
+++ b/runtime/dex/dex_file.h
@@ -456,6 +456,13 @@ class DexFile {
// Returns true if the dex file supports default methods.
virtual bool SupportsDefaultMethods() const = 0;
+ // Returns the maximum size in bytes needed to store an equivalent dex file strictly conforming to
+ // the dex file specification. That is the size if we wanted to get rid of all the
+ // quickening/compact-dexing/etc.
+ //
+ // TODO This should really be an exact size! b/72402467
+ virtual size_t GetDequickenedSize() const = 0;
+
// Returns the number of string identifiers in the .dex file.
size_t NumStringIds() const {
DCHECK(header_ != nullptr) << GetLocation();
diff --git a/runtime/dex/standard_dex_file.h b/runtime/dex/standard_dex_file.h
index 94ef1f2a8e..e0e9f2f11c 100644
--- a/runtime/dex/standard_dex_file.h
+++ b/runtime/dex/standard_dex_file.h
@@ -83,6 +83,10 @@ class StandardDexFile : public DexFile {
uint32_t GetCodeItemSize(const DexFile::CodeItem& item) const OVERRIDE;
+ virtual size_t GetDequickenedSize() const OVERRIDE {
+ return Size();
+ }
+
private:
StandardDexFile(const uint8_t* base,
size_t size,
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index 9d6e5de803..3015b10103 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -17,6 +17,7 @@
#include "fault_handler.h"
#include <setjmp.h>
+#include <string.h>
#include <sys/mman.h>
#include <sys/ucontext.h>
@@ -183,8 +184,31 @@ bool FaultManager::HandleFaultByOtherHandlers(int sig, siginfo_t* info, void* co
return false;
}
+static const char* SignalCodeName(int sig, int code) {
+ if (sig != SIGSEGV) {
+ return "UNKNOWN";
+ } else {
+ switch (code) {
+ case SEGV_MAPERR: return "SEGV_MAPERR";
+ case SEGV_ACCERR: return "SEGV_ACCERR";
+ default: return "UNKNOWN";
+ }
+ }
+}
+static std::ostream& PrintSignalInfo(std::ostream& os, siginfo_t* info) {
+ os << " si_signo: " << info->si_signo << " (" << strsignal(info->si_signo) << ")\n"
+ << " si_code: " << info->si_code
+ << " (" << SignalCodeName(info->si_signo, info->si_code) << ")";
+ if (info->si_signo == SIGSEGV) {
+ os << "\n" << " si_addr: " << info->si_addr;
+ }
+ return os;
+}
+
bool FaultManager::HandleFault(int sig, siginfo_t* info, void* context) {
- VLOG(signals) << "Handling fault";
+ if (VLOG_IS_ON(signals)) {
+ PrintSignalInfo(VLOG_STREAM(signals) << "Handling fault:" << "\n", info);
+ }
#ifdef TEST_NESTED_SIGNAL
// Simulate a crash in a handler.
diff --git a/runtime/jit/debugger_interface.cc b/runtime/jit/debugger_interface.cc
index 0e295e2442..d60f70a54f 100644
--- a/runtime/jit/debugger_interface.cc
+++ b/runtime/jit/debugger_interface.cc
@@ -68,11 +68,65 @@ extern "C" {
// Static initialization is necessary to prevent GDB from seeing
// uninitialized descriptor.
JITDescriptor __jit_debug_descriptor = { 1, JIT_NOACTION, nullptr, nullptr };
+
+ // Incremented whenever __jit_debug_descriptor is modified.
+ uint32_t __jit_debug_descriptor_timestamp = 0;
+
+ struct DEXFileEntry {
+ DEXFileEntry* next_;
+ DEXFileEntry* prev_;
+ const void* dexfile_;
+ };
+
+ DEXFileEntry* __art_debug_dexfiles = nullptr;
+
+ // Incremented whenever __art_debug_dexfiles is modified.
+ uint32_t __art_debug_dexfiles_timestamp = 0;
}
-Mutex g_jit_debug_mutex("JIT debug interface lock", kJitDebugInterfaceLock);
+static size_t g_jit_debug_mem_usage
+ GUARDED_BY(Locks::native_debug_interface_lock_) = 0;
+
+static std::unordered_map<const void*, DEXFileEntry*> g_dexfile_entries
+ GUARDED_BY(Locks::native_debug_interface_lock_);
+
+void RegisterDexFileForNative(Thread* current_thread, const void* dexfile_header) {
+ MutexLock mu(current_thread, *Locks::native_debug_interface_lock_);
+ if (g_dexfile_entries.count(dexfile_header) == 0) {
+ DEXFileEntry* entry = new DEXFileEntry();
+ CHECK(entry != nullptr);
+ entry->dexfile_ = dexfile_header;
+ entry->prev_ = nullptr;
+ entry->next_ = __art_debug_dexfiles;
+ if (entry->next_ != nullptr) {
+ entry->next_->prev_ = entry;
+ }
+ __art_debug_dexfiles = entry;
+ __art_debug_dexfiles_timestamp++;
+ g_dexfile_entries.emplace(dexfile_header, entry);
+ }
+}
-static size_t g_jit_debug_mem_usage = 0;
+void DeregisterDexFileForNative(Thread* current_thread, const void* dexfile_header) {
+ MutexLock mu(current_thread, *Locks::native_debug_interface_lock_);
+ auto it = g_dexfile_entries.find(dexfile_header);
+ // We register dex files in the class linker and free them in DexFile_closeDexFile,
+ // but might be cases where we load the dex file without using it in the class linker.
+ if (it != g_dexfile_entries.end()) {
+ DEXFileEntry* entry = it->second;
+ if (entry->prev_ != nullptr) {
+ entry->prev_->next_ = entry->next_;
+ } else {
+ __art_debug_dexfiles = entry->next_;
+ }
+ if (entry->next_ != nullptr) {
+ entry->next_->prev_ = entry->prev_;
+ }
+ __art_debug_dexfiles_timestamp++;
+ delete entry;
+ g_dexfile_entries.erase(it);
+ }
+}
JITCodeEntry* CreateJITCodeEntry(const std::vector<uint8_t>& symfile) {
DCHECK_NE(symfile.size(), 0u);
@@ -96,6 +150,7 @@ JITCodeEntry* CreateJITCodeEntry(const std::vector<uint8_t>& symfile) {
__jit_debug_descriptor.first_entry_ = entry;
__jit_debug_descriptor.relevant_entry_ = entry;
__jit_debug_descriptor.action_flag_ = JIT_REGISTER_FN;
+ __jit_debug_descriptor_timestamp++;
(*__jit_debug_register_code_ptr)();
return entry;
}
@@ -114,6 +169,7 @@ void DeleteJITCodeEntry(JITCodeEntry* entry) {
g_jit_debug_mem_usage -= sizeof(JITCodeEntry) + entry->symfile_size_;
__jit_debug_descriptor.relevant_entry_ = entry;
__jit_debug_descriptor.action_flag_ = JIT_UNREGISTER_FN;
+ __jit_debug_descriptor_timestamp++;
(*__jit_debug_register_code_ptr)();
delete[] entry->symfile_addr_;
delete entry;
@@ -121,7 +177,7 @@ void DeleteJITCodeEntry(JITCodeEntry* entry) {
// Mapping from code address to entry. Used to manage life-time of the entries.
static std::unordered_map<uintptr_t, JITCodeEntry*> g_jit_code_entries
- GUARDED_BY(g_jit_debug_mutex);
+ GUARDED_BY(Locks::native_debug_interface_lock_);
void IncrementJITCodeEntryRefcount(JITCodeEntry* entry, uintptr_t code_address) {
DCHECK(entry != nullptr);
diff --git a/runtime/jit/debugger_interface.h b/runtime/jit/debugger_interface.h
index 9aec988f67..8c4bb3fdf4 100644
--- a/runtime/jit/debugger_interface.h
+++ b/runtime/jit/debugger_interface.h
@@ -30,36 +30,42 @@ extern "C" {
struct JITCodeEntry;
}
-extern Mutex g_jit_debug_mutex;
+// Notify native tools (e.g. libunwind) that DEX file has been opened.
+// The pointer needs to point the start of the dex data (not the DexFile* object).
+void RegisterDexFileForNative(Thread* current_thread, const void* dexfile_header);
+
+// Notify native tools (e.g. libunwind) that DEX file has been closed.
+// The pointer needs to point the start of the dex data (not the DexFile* object).
+void DeregisterDexFileForNative(Thread* current_thread, const void* dexfile_header);
// Notify native debugger about new JITed code by passing in-memory ELF.
// It takes ownership of the in-memory ELF file.
JITCodeEntry* CreateJITCodeEntry(const std::vector<uint8_t>& symfile)
- REQUIRES(g_jit_debug_mutex);
+ REQUIRES(Locks::native_debug_interface_lock_);
// Notify native debugger that JITed code has been removed.
// It also releases the associated in-memory ELF file.
void DeleteJITCodeEntry(JITCodeEntry* entry)
- REQUIRES(g_jit_debug_mutex);
+ REQUIRES(Locks::native_debug_interface_lock_);
// Helper method to track life-time of JITCodeEntry.
// It registers given code address as being described by the given entry.
void IncrementJITCodeEntryRefcount(JITCodeEntry* entry, uintptr_t code_address)
- REQUIRES(g_jit_debug_mutex);
+ REQUIRES(Locks::native_debug_interface_lock_);
// Helper method to track life-time of JITCodeEntry.
// It de-registers given code address as being described by the given entry.
void DecrementJITCodeEntryRefcount(JITCodeEntry* entry, uintptr_t code_address)
- REQUIRES(g_jit_debug_mutex);
+ REQUIRES(Locks::native_debug_interface_lock_);
// Find the registered JITCodeEntry for given code address.
// There can be only one entry per address at any given time.
JITCodeEntry* GetJITCodeEntry(uintptr_t code_address)
- REQUIRES(g_jit_debug_mutex);
+ REQUIRES(Locks::native_debug_interface_lock_);
// Returns approximate memory used by all JITCodeEntries.
size_t GetJITCodeEntryMemUsage()
- REQUIRES(g_jit_debug_mutex);
+ REQUIRES(Locks::native_debug_interface_lock_);
} // namespace art
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 7f0447732e..c8c13cb20f 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -549,7 +549,7 @@ void JitCodeCache::FreeCode(const void* code_ptr) {
uintptr_t allocation = FromCodeToAllocation(code_ptr);
// Notify native debugger that we are about to remove the code.
// It does nothing if we are not using native debugger.
- MutexLock mu(Thread::Current(), g_jit_debug_mutex);
+ MutexLock mu(Thread::Current(), *Locks::native_debug_interface_lock_);
JITCodeEntry* entry = GetJITCodeEntry(reinterpret_cast<uintptr_t>(code_ptr));
if (entry != nullptr) {
DecrementJITCodeEntryRefcount(entry, reinterpret_cast<uintptr_t>(code_ptr));
@@ -1825,7 +1825,7 @@ void JitCodeCache::FreeData(uint8_t* data) {
void JitCodeCache::Dump(std::ostream& os) {
MutexLock mu(Thread::Current(), lock_);
- MutexLock mu2(Thread::Current(), g_jit_debug_mutex);
+ MutexLock mu2(Thread::Current(), *Locks::native_debug_interface_lock_);
os << "Current JIT code cache size: " << PrettySize(used_memory_for_code_) << "\n"
<< "Current JIT data cache size: " << PrettySize(used_memory_for_data_) << "\n"
<< "Current JIT mini-debug-info size: " << PrettySize(GetJITCodeEntryMemUsage()) << "\n"
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 55e9c390cd..26acef06d6 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -396,6 +396,91 @@ MemMap* MemMap::MapDummy(const char* name, uint8_t* addr, size_t byte_count) {
return new MemMap(name, addr, byte_count, addr, page_aligned_byte_count, 0, true /* reuse */);
}
+template<typename A, typename B>
+static ptrdiff_t PointerDiff(A* a, B* b) {
+ return static_cast<ptrdiff_t>(reinterpret_cast<intptr_t>(a) - reinterpret_cast<intptr_t>(b));
+}
+
+bool MemMap::ReplaceWith(MemMap** source_ptr, /*out*/std::string* error) {
+#if !HAVE_MREMAP_SYSCALL
+ UNUSED(source_ptr);
+ *error = "Cannot perform atomic replace because we are missing the required mremap syscall";
+ return false;
+#else // !HAVE_MREMAP_SYSCALL
+ CHECK(source_ptr != nullptr);
+ CHECK(*source_ptr != nullptr);
+ if (!MemMap::kCanReplaceMapping) {
+ *error = "Unable to perform atomic replace due to runtime environment!";
+ return false;
+ }
+ MemMap* source = *source_ptr;
+ // neither can be reuse.
+ if (source->reuse_ || reuse_) {
+ *error = "One or both mappings is not a real mmap!";
+ return false;
+ }
+ // TODO Support redzones.
+ if (source->redzone_size_ != 0 || redzone_size_ != 0) {
+ *error = "source and dest have different redzone sizes";
+ return false;
+ }
+ // Make sure they have the same offset from the actual mmap'd address
+ if (PointerDiff(BaseBegin(), Begin()) != PointerDiff(source->BaseBegin(), source->Begin())) {
+ *error =
+ "source starts at a different offset from the mmap. Cannot atomically replace mappings";
+ return false;
+ }
+ // mremap doesn't allow the final [start, end] to overlap with the initial [start, end] (it's like
+ // memcpy but the check is explicit and actually done).
+ if (source->BaseBegin() > BaseBegin() &&
+ reinterpret_cast<uint8_t*>(BaseBegin()) + source->BaseSize() >
+ reinterpret_cast<uint8_t*>(source->BaseBegin())) {
+ *error = "destination memory pages overlap with source memory pages";
+ return false;
+ }
+ // Change the protection to match the new location.
+ int old_prot = source->GetProtect();
+ if (!source->Protect(GetProtect())) {
+ *error = "Could not change protections for source to those required for dest.";
+ return false;
+ }
+
+ // Do the mremap.
+ void* res = mremap(/*old_address*/source->BaseBegin(),
+ /*old_size*/source->BaseSize(),
+ /*new_size*/source->BaseSize(),
+ /*flags*/MREMAP_MAYMOVE | MREMAP_FIXED,
+ /*new_address*/BaseBegin());
+ if (res == MAP_FAILED) {
+ int saved_errno = errno;
+ // Wasn't able to move mapping. Change the protection of source back to the original one and
+ // return.
+ source->Protect(old_prot);
+ *error = std::string("Failed to mremap source to dest. Error was ") + strerror(saved_errno);
+ return false;
+ }
+ CHECK(res == BaseBegin());
+
+ // The new base_size is all the pages of the 'source' plus any remaining dest pages. We will unmap
+ // them later.
+ size_t new_base_size = std::max(source->base_size_, base_size_);
+
+ // Delete the old source, don't unmap it though (set reuse) since it is already gone.
+ *source_ptr = nullptr;
+ size_t source_size = source->size_;
+ source->already_unmapped_ = true;
+ delete source;
+ source = nullptr;
+
+ size_ = source_size;
+ base_size_ = new_base_size;
+ // Reduce base_size if needed (this will unmap the extra pages).
+ SetSize(source_size);
+
+ return true;
+#endif // !HAVE_MREMAP_SYSCALL
+}
+
MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr,
size_t byte_count,
int prot,
@@ -499,9 +584,11 @@ MemMap::~MemMap() {
if (!reuse_) {
MEMORY_TOOL_MAKE_UNDEFINED(base_begin_, base_size_);
- int result = munmap(base_begin_, base_size_);
- if (result == -1) {
- PLOG(FATAL) << "munmap failed";
+ if (!already_unmapped_) {
+ int result = munmap(base_begin_, base_size_);
+ if (result == -1) {
+ PLOG(FATAL) << "munmap failed";
+ }
}
}
@@ -523,7 +610,7 @@ MemMap::~MemMap() {
MemMap::MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_begin,
size_t base_size, int prot, bool reuse, size_t redzone_size)
: name_(name), begin_(begin), size_(size), base_begin_(base_begin), base_size_(base_size),
- prot_(prot), reuse_(reuse), redzone_size_(redzone_size) {
+ prot_(prot), reuse_(reuse), already_unmapped_(false), redzone_size_(redzone_size) {
if (size_ == 0) {
CHECK(begin_ == nullptr);
CHECK(base_begin_ == nullptr);
@@ -794,19 +881,21 @@ void MemMap::Shutdown() {
}
void MemMap::SetSize(size_t new_size) {
- if (new_size == base_size_) {
+ CHECK_LE(new_size, size_);
+ size_t new_base_size = RoundUp(new_size + static_cast<size_t>(PointerDiff(Begin(), BaseBegin())),
+ kPageSize);
+ if (new_base_size == base_size_) {
+ size_ = new_size;
return;
}
- CHECK_ALIGNED(new_size, kPageSize);
- CHECK_EQ(base_size_, size_) << "Unsupported";
- CHECK_LE(new_size, base_size_);
+ CHECK_LT(new_base_size, base_size_);
MEMORY_TOOL_MAKE_UNDEFINED(
reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) +
- new_size),
- base_size_ - new_size);
- CHECK_EQ(munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) + new_size),
- base_size_ - new_size), 0) << new_size << " " << base_size_;
- base_size_ = new_size;
+ new_base_size),
+ base_size_ - new_base_size);
+ CHECK_EQ(munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) + new_base_size),
+ base_size_ - new_base_size), 0) << new_base_size << " " << base_size_;
+ base_size_ = new_base_size;
size_ = new_size;
}
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index 5603963eac..0ecb414614 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -39,8 +39,12 @@ namespace art {
#ifdef __linux__
static constexpr bool kMadviseZeroes = true;
+#define HAVE_MREMAP_SYSCALL true
#else
static constexpr bool kMadviseZeroes = false;
+// We cannot ever perform MemMap::ReplaceWith on non-linux hosts since the syscall is not
+// present.
+#define HAVE_MREMAP_SYSCALL false
#endif
// Used to keep track of mmap segments.
@@ -52,6 +56,32 @@ static constexpr bool kMadviseZeroes = false;
// Otherwise, calls might see uninitialized values.
class MemMap {
public:
+ static constexpr bool kCanReplaceMapping = HAVE_MREMAP_SYSCALL;
+
+ // Replace the data in this memmmap with the data in the memmap pointed to by source. The caller
+ // relinquishes ownership of the source mmap.
+ //
+ // For the call to be successful:
+ // * The range [dest->Begin, dest->Begin() + source->Size()] must not overlap with
+ // [source->Begin(), source->End()].
+ // * Neither source nor dest may be 'reused' mappings (they must own all the pages associated
+ // with them.
+ // * kCanReplaceMapping must be true.
+ // * Neither source nor dest may use manual redzones.
+ // * Both source and dest must have the same offset from the nearest page boundary.
+ // * mremap must succeed when called on the mappings.
+ //
+ // If this call succeeds it will return true and:
+ // * Deallocate *source
+ // * Sets *source to nullptr
+ // * The protection of this will remain the same.
+ // * The size of this will be the size of the source
+ // * The data in this will be the data from source.
+ //
+ // If this call fails it will return false and make no changes to *source or this. The ownership
+ // of the source mmap is returned to the caller.
+ bool ReplaceWith(/*in-out*/MemMap** source, /*out*/std::string* error);
+
// Request an anonymous region of length 'byte_count' and a requested base address.
// Use null as the requested base address if you don't care.
// "reuse" allows re-mapping an address range from an existing mapping.
@@ -246,6 +276,9 @@ class MemMap {
// unmapping.
const bool reuse_;
+ // When already_unmapped_ is true the destructor will not call munmap.
+ bool already_unmapped_;
+
const size_t redzone_size_;
#if USE_ART_LOW_4G_ALLOCATOR
diff --git a/runtime/mem_map_test.cc b/runtime/mem_map_test.cc
index a4ebb16d09..3adbf18a7a 100644
--- a/runtime/mem_map_test.cc
+++ b/runtime/mem_map_test.cc
@@ -19,6 +19,7 @@
#include <sys/mman.h>
#include <memory>
+#include <random>
#include "base/memory_tool.h"
#include "base/unix_file/fd_file.h"
@@ -36,6 +37,25 @@ class MemMapTest : public CommonRuntimeTest {
return mem_map->base_size_;
}
+ static bool IsAddressMapped(void* addr) {
+ bool res = msync(addr, 1, MS_SYNC) == 0;
+ if (!res && errno != ENOMEM) {
+ PLOG(FATAL) << "Unexpected error occurred on msync";
+ }
+ return res;
+ }
+
+ static std::vector<uint8_t> RandomData(size_t size) {
+ std::random_device rd;
+ std::uniform_int_distribution<uint8_t> dist;
+ std::vector<uint8_t> res;
+ res.resize(size);
+ for (size_t i = 0; i < size; i++) {
+ res[i] = dist(rd);
+ }
+ return res;
+ }
+
static uint8_t* GetValidMapAddress(size_t size, bool low_4gb) {
// Find a valid map address and unmap it before returning.
std::string error_msg;
@@ -143,6 +163,186 @@ TEST_F(MemMapTest, Start) {
}
#endif
+// We need mremap to be able to test ReplaceMapping at all
+#if HAVE_MREMAP_SYSCALL
+TEST_F(MemMapTest, ReplaceMapping_SameSize) {
+ std::string error_msg;
+ std::unique_ptr<MemMap> dest(MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
+ nullptr,
+ kPageSize,
+ PROT_READ,
+ false,
+ false,
+ &error_msg));
+ ASSERT_TRUE(dest != nullptr);
+ MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
+ nullptr,
+ kPageSize,
+ PROT_WRITE | PROT_READ,
+ false,
+ false,
+ &error_msg);
+ ASSERT_TRUE(source != nullptr);
+ void* source_addr = source->Begin();
+ void* dest_addr = dest->Begin();
+ ASSERT_TRUE(IsAddressMapped(source_addr));
+ ASSERT_TRUE(IsAddressMapped(dest_addr));
+
+ std::vector<uint8_t> data = RandomData(kPageSize);
+ memcpy(source->Begin(), data.data(), data.size());
+
+ ASSERT_TRUE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
+
+ ASSERT_FALSE(IsAddressMapped(source_addr));
+ ASSERT_TRUE(IsAddressMapped(dest_addr));
+ ASSERT_TRUE(source == nullptr);
+
+ ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
+
+ ASSERT_EQ(memcmp(dest->Begin(), data.data(), dest->Size()), 0);
+}
+
+TEST_F(MemMapTest, ReplaceMapping_MakeLarger) {
+ std::string error_msg;
+ std::unique_ptr<MemMap> dest(MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
+ nullptr,
+ 5 * kPageSize, // Need to make it larger
+ // initially so we know
+ // there won't be mappings
+ // in the way we we move
+ // source.
+ PROT_READ,
+ false,
+ false,
+ &error_msg));
+ ASSERT_TRUE(dest != nullptr);
+ MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
+ nullptr,
+ 3 * kPageSize,
+ PROT_WRITE | PROT_READ,
+ false,
+ false,
+ &error_msg);
+ ASSERT_TRUE(source != nullptr);
+ uint8_t* source_addr = source->Begin();
+ uint8_t* dest_addr = dest->Begin();
+ ASSERT_TRUE(IsAddressMapped(source_addr));
+
+ // Fill the source with random data.
+ std::vector<uint8_t> data = RandomData(3 * kPageSize);
+ memcpy(source->Begin(), data.data(), data.size());
+
+ // Make the dest smaller so that we know we'll have space.
+ dest->SetSize(kPageSize);
+
+ ASSERT_TRUE(IsAddressMapped(dest_addr));
+ ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * kPageSize));
+ ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
+
+ ASSERT_TRUE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
+
+ ASSERT_FALSE(IsAddressMapped(source_addr));
+ ASSERT_EQ(dest->Size(), static_cast<size_t>(3 * kPageSize));
+ ASSERT_TRUE(IsAddressMapped(dest_addr));
+ ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * kPageSize));
+ ASSERT_TRUE(source == nullptr);
+
+ ASSERT_EQ(memcmp(dest->Begin(), data.data(), dest->Size()), 0);
+}
+
+TEST_F(MemMapTest, ReplaceMapping_MakeSmaller) {
+ std::string error_msg;
+ std::unique_ptr<MemMap> dest(MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
+ nullptr,
+ 3 * kPageSize,
+ PROT_READ,
+ false,
+ false,
+ &error_msg));
+ ASSERT_TRUE(dest != nullptr);
+ MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
+ nullptr,
+ kPageSize,
+ PROT_WRITE | PROT_READ,
+ false,
+ false,
+ &error_msg);
+ ASSERT_TRUE(source != nullptr);
+ uint8_t* source_addr = source->Begin();
+ uint8_t* dest_addr = dest->Begin();
+ ASSERT_TRUE(IsAddressMapped(source_addr));
+ ASSERT_TRUE(IsAddressMapped(dest_addr));
+ ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * kPageSize));
+ ASSERT_EQ(dest->Size(), static_cast<size_t>(3 * kPageSize));
+
+ std::vector<uint8_t> data = RandomData(kPageSize);
+ memcpy(source->Begin(), data.data(), kPageSize);
+
+ ASSERT_TRUE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
+
+ ASSERT_FALSE(IsAddressMapped(source_addr));
+ ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
+ ASSERT_TRUE(IsAddressMapped(dest_addr));
+ ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * kPageSize));
+ ASSERT_TRUE(source == nullptr);
+
+ ASSERT_EQ(memcmp(dest->Begin(), data.data(), dest->Size()), 0);
+}
+
+TEST_F(MemMapTest, ReplaceMapping_FailureOverlap) {
+ std::string error_msg;
+ std::unique_ptr<MemMap> dest(
+ MemMap::MapAnonymous(
+ "MapAnonymousEmpty-atomic-replace-dest",
+ nullptr,
+ 3 * kPageSize, // Need to make it larger initially so we know there won't be mappings in
+ // the way we we move source.
+ PROT_READ | PROT_WRITE,
+ false,
+ false,
+ &error_msg));
+ ASSERT_TRUE(dest != nullptr);
+ // Resize down to 1 page so we can remap the rest.
+ dest->SetSize(kPageSize);
+ // Create source from the last 2 pages
+ MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
+ dest->Begin() + kPageSize,
+ 2 * kPageSize,
+ PROT_WRITE | PROT_READ,
+ false,
+ false,
+ &error_msg);
+ ASSERT_TRUE(source != nullptr);
+ MemMap* orig_source = source;
+ ASSERT_EQ(dest->Begin() + kPageSize, source->Begin());
+ uint8_t* source_addr = source->Begin();
+ uint8_t* dest_addr = dest->Begin();
+ ASSERT_TRUE(IsAddressMapped(source_addr));
+
+ // Fill the source and dest with random data.
+ std::vector<uint8_t> data = RandomData(2 * kPageSize);
+ memcpy(source->Begin(), data.data(), data.size());
+ std::vector<uint8_t> dest_data = RandomData(kPageSize);
+ memcpy(dest->Begin(), dest_data.data(), dest_data.size());
+
+ ASSERT_TRUE(IsAddressMapped(dest_addr));
+ ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
+
+ ASSERT_FALSE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
+
+ ASSERT_TRUE(source == orig_source);
+ ASSERT_TRUE(IsAddressMapped(source_addr));
+ ASSERT_TRUE(IsAddressMapped(dest_addr));
+ ASSERT_EQ(source->Size(), data.size());
+ ASSERT_EQ(dest->Size(), dest_data.size());
+
+ ASSERT_EQ(memcmp(source->Begin(), data.data(), data.size()), 0);
+ ASSERT_EQ(memcmp(dest->Begin(), dest_data.data(), dest_data.size()), 0);
+
+ delete source;
+}
+#endif // HAVE_MREMAP_SYSCALL
+
TEST_F(MemMapTest, MapAnonymousEmpty) {
CommonInit();
std::string error_msg;
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 0f430874cf..6ea9a7ad62 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -30,6 +30,7 @@
#include "dex/art_dex_file_loader.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_file_loader.h"
+#include "jit/debugger_interface.h"
#include "jni_internal.h"
#include "mirror/class_loader.h"
#include "mirror/object-inl.h"
@@ -331,6 +332,7 @@ static jboolean DexFile_closeDexFile(JNIEnv* env, jclass, jobject cookie) {
int32_t i = kDexFileIndexStart; // Oat file is at index 0.
for (const DexFile* dex_file : dex_files) {
if (dex_file != nullptr) {
+ DeregisterDexFileForNative(soa.Self(), dex_file->Begin());
// Only delete the dex file if the dex cache is not found to prevent runtime crashes if there
// are calls to DexFile.close while the ART DexFile is still in use.
if (!class_linker->IsDexFileRegistered(soa.Self(), *dex_file)) {
diff --git a/test/603-checker-instanceof/src/Main.java b/test/603-checker-instanceof/src/Main.java
index 1487969c03..2c97bedbaa 100644
--- a/test/603-checker-instanceof/src/Main.java
+++ b/test/603-checker-instanceof/src/Main.java
@@ -59,7 +59,7 @@ public class Main {
/// CHECK: InstanceOf check_kind:exact_check
/// CHECK-NOT: {{.*gs:.*}}
- /// CHECK-START-{ARM,ARM64}: boolean Main.$noinline$instanceOfString(java.lang.Object) disassembly (after)
+ /// CHECK-START-{ARM,ARM64,MIPS,MIPS64}: boolean Main.$noinline$instanceOfString(java.lang.Object) disassembly (after)
/// CHECK: InstanceOf check_kind:exact_check
// For ARM and ARM64, the marking register (r8 and x20, respectively) can be used in
// non-CC configs for any other purpose, so we'd need a config-specific checker test.
diff --git a/test/983-source-transform-verify/source_transform.cc b/test/983-source-transform-verify/source_transform.cc
index c076d1521f..dfefce207b 100644
--- a/test/983-source-transform-verify/source_transform.cc
+++ b/test/983-source-transform-verify/source_transform.cc
@@ -67,6 +67,14 @@ void JNICALL CheckDexFileHook(jvmtiEnv* jvmti_env ATTRIBUTE_UNUSED,
if (IsJVM()) {
return;
}
+
+ // Due to b/72402467 the class_data_len might just be an estimate.
+ CHECK_GE(static_cast<size_t>(class_data_len), sizeof(DexFile::Header));
+ const DexFile::Header* header = reinterpret_cast<const DexFile::Header*>(class_data);
+ uint32_t header_file_size = header->file_size_;
+ CHECK_LE(static_cast<jint>(header_file_size), class_data_len);
+ class_data_len = static_cast<jint>(header_file_size);
+
const ArtDexFileLoader dex_file_loader;
std::string error;
std::unique_ptr<const DexFile> dex(dex_file_loader.Open(class_data,
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index ea2d464d24..02438701b8 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -849,7 +849,7 @@ if [ "$HOST" = "n" ]; then
fi
# System libraries needed by libarttestd.so
- PUBLIC_LIBS=libart.so:libartd.so:libc++.so:libbacktrace.so:libbase.so:libnativehelper.so
+ PUBLIC_LIBS=libart.so:libartd.so:libc++.so:libbacktrace.so:libdexfile.so:libbase.so:libnativehelper.so
# Create a script with the command. The command can get longer than the longest
# allowed adb command and there is no way to get the exit status from a adb shell