summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/common_compiler_test.cc20
-rw-r--r--compiler/common_compiler_test.h2
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc493
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.h51
-rw-r--r--compiler/optimizing/intrinsics_arm_vixl.cc179
-rw-r--r--compiler/utils/swap_space.cc1
-rw-r--r--dex2oat/dex2oat.cc37
-rw-r--r--dex2oat/linker/image_test.h6
-rw-r--r--dex2oat/linker/image_writer.cc52
-rw-r--r--dex2oat/linker/image_writer.h4
-rw-r--r--dex2oat/linker/oat_writer.cc30
-rw-r--r--dex2oat/linker/oat_writer.h4
-rw-r--r--dex2oat/linker/oat_writer_test.cc6
-rw-r--r--libartbase/base/mem_map.cc259
-rw-r--r--libartbase/base/mem_map.h139
-rw-r--r--libartbase/base/mem_map_test.cc754
-rw-r--r--libartbase/base/zip_archive.cc58
-rw-r--r--libartbase/base/zip_archive.h15
-rw-r--r--libdexfile/dex/art_dex_file_loader.cc77
-rw-r--r--libdexfile/dex/art_dex_file_loader.h2
-rw-r--r--libprofile/profile/profile_compilation_info.cc16
-rw-r--r--libprofile/profile/profile_compilation_info.h8
-rw-r--r--oatdump/oatdump.cc8
-rw-r--r--openjdkjvmti/fixed_up_dex_file.cc5
-rw-r--r--openjdkjvmti/ti_class.cc12
-rw-r--r--openjdkjvmti/ti_class_definition.cc65
-rw-r--r--openjdkjvmti/ti_class_definition.h20
-rw-r--r--openjdkjvmti/ti_redefine.cc42
-rw-r--r--openjdkjvmti/ti_redefine.h6
-rw-r--r--patchoat/patchoat.cc42
-rw-r--r--patchoat/patchoat.h6
-rw-r--r--runtime/arch/arm/asm_support_arm.h5
-rw-r--r--runtime/arch/arm/entrypoints_init_arm.cc5
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S23
-rw-r--r--runtime/base/mem_map_arena_pool.cc34
-rw-r--r--runtime/base/mutex.cc8
-rw-r--r--runtime/base/mutex.h12
-rw-r--r--runtime/cha.cc74
-rw-r--r--runtime/dexopt_test.cc17
-rw-r--r--runtime/dexopt_test.h2
-rw-r--r--runtime/elf_file.cc110
-rw-r--r--runtime/elf_file_impl.h12
-rw-r--r--runtime/gc/accounting/atomic_stack.h19
-rw-r--r--runtime/gc/accounting/bitmap.cc47
-rw-r--r--runtime/gc/accounting/bitmap.h26
-rw-r--r--runtime/gc/accounting/card_table-inl.h12
-rw-r--r--runtime/gc/accounting/card_table.cc26
-rw-r--r--runtime/gc/accounting/card_table.h7
-rw-r--r--runtime/gc/accounting/read_barrier_table.h28
-rw-r--r--runtime/gc/accounting/space_bitmap.cc29
-rw-r--r--runtime/gc/accounting/space_bitmap.h12
-rw-r--r--runtime/gc/allocator/rosalloc.cc18
-rw-r--r--runtime/gc/allocator/rosalloc.h5
-rw-r--r--runtime/gc/collector/concurrent_copying-inl.h4
-rw-r--r--runtime/gc/collector/concurrent_copying.cc19
-rw-r--r--runtime/gc/collector/immune_spaces_test.cc72
-rw-r--r--runtime/gc/collector/mark_sweep.cc18
-rw-r--r--runtime/gc/collector/mark_sweep.h2
-rw-r--r--runtime/gc/heap.cc188
-rw-r--r--runtime/gc/heap.h10
-rw-r--r--runtime/gc/heap_test.cc18
-rw-r--r--runtime/gc/space/bump_pointer_space.cc34
-rw-r--r--runtime/gc/space/bump_pointer_space.h4
-rw-r--r--runtime/gc/space/dlmalloc_space.cc113
-rw-r--r--runtime/gc/space/dlmalloc_space.h33
-rw-r--r--runtime/gc/space/image_space.cc626
-rw-r--r--runtime/gc/space/image_space.h28
-rw-r--r--runtime/gc/space/image_space_test.cc31
-rw-r--r--runtime/gc/space/large_object_space.cc66
-rw-r--r--runtime/gc/space/large_object_space.h8
-rw-r--r--runtime/gc/space/malloc_space.cc64
-rw-r--r--runtime/gc/space/malloc_space.h34
-rw-r--r--runtime/gc/space/memory_tool_malloc_space-inl.h4
-rw-r--r--runtime/gc/space/memory_tool_malloc_space.h2
-rw-r--r--runtime/gc/space/region_space.cc86
-rw-r--r--runtime/gc/space/region_space.h17
-rw-r--r--runtime/gc/space/rosalloc_space.cc135
-rw-r--r--runtime/gc/space/rosalloc_space.h34
-rw-r--r--runtime/gc/space/space.h24
-rw-r--r--runtime/gc/space/zygote_space.cc17
-rw-r--r--runtime/gc/space/zygote_space.h5
-rw-r--r--runtime/indirect_reference_table.cc39
-rw-r--r--runtime/indirect_reference_table.h5
-rw-r--r--runtime/interpreter/mterp/arm/op_iget.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_iget_boolean.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_iget_byte.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_iget_char.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_iget_object.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_iget_short.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_iget_wide.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_iget.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_iget_boolean.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_iget_byte.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_iget_char.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_iget_object.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_iget_short.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_iget_wide.S2
-rw-r--r--runtime/interpreter/mterp/mips/op_iget.S2
-rw-r--r--runtime/interpreter/mterp/mips/op_iget_boolean.S2
-rw-r--r--runtime/interpreter/mterp/mips/op_iget_byte.S2
-rw-r--r--runtime/interpreter/mterp/mips/op_iget_char.S2
-rw-r--r--runtime/interpreter/mterp/mips/op_iget_object.S2
-rw-r--r--runtime/interpreter/mterp/mips/op_iget_short.S2
-rw-r--r--runtime/interpreter/mterp/mips/op_iget_wide.S2
-rw-r--r--runtime/interpreter/mterp/mips64/op_iget.S2
-rw-r--r--runtime/interpreter/mterp/mips64/op_iget_boolean.S2
-rw-r--r--runtime/interpreter/mterp/mips64/op_iget_byte.S2
-rw-r--r--runtime/interpreter/mterp/mips64/op_iget_char.S2
-rw-r--r--runtime/interpreter/mterp/mips64/op_iget_object.S2
-rw-r--r--runtime/interpreter/mterp/mips64/op_iget_short.S2
-rw-r--r--runtime/interpreter/mterp/mips64/op_iget_wide.S4
-rw-r--r--runtime/interpreter/mterp/mterp.cc91
-rw-r--r--runtime/interpreter/mterp/out/mterp_arm.S14
-rw-r--r--runtime/interpreter/mterp/out/mterp_arm64.S14
-rw-r--r--runtime/interpreter/mterp/out/mterp_mips.S14
-rw-r--r--runtime/interpreter/mterp/out/mterp_mips64.S28
-rw-r--r--runtime/interpreter/mterp/out/mterp_x86.S14
-rw-r--r--runtime/interpreter/mterp/out/mterp_x86_64.S14
-rw-r--r--runtime/interpreter/mterp/x86/op_iget.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_iget_boolean.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_iget_byte.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_iget_char.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_iget_object.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_iget_short.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_iget_wide.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_iget.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_iget_boolean.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_iget_byte.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_iget_char.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_iget_object.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_iget_short.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_iget_wide.S2
-rw-r--r--runtime/interpreter/unstarted_runtime.cc29
-rw-r--r--runtime/jit/jit_code_cache.cc175
-rw-r--r--runtime/jit/jit_code_cache.h16
-rw-r--r--runtime/native/dalvik_system_DexFile.cc41
-rw-r--r--runtime/native/java_lang_Thread.cc12
-rw-r--r--runtime/oat_file.cc8
-rw-r--r--runtime/runtime.cc22
-rw-r--r--runtime/runtime.h4
-rw-r--r--runtime/runtime_callbacks_test.cc18
-rw-r--r--runtime/thread_pool.cc17
-rw-r--r--runtime/thread_pool.h6
-rw-r--r--runtime/vdex_file.cc14
-rw-r--r--runtime/vdex_file.h12
-rw-r--r--test/305-other-fault-handler/fault_handler.cc16
-rw-r--r--test/458-checker-instruct-simplification/src/Main.java4
-rw-r--r--test/618-checker-induction/src/Main.java18
-rw-r--r--test/669-checker-break/src/Main.java8
-rw-r--r--test/988-method-trace/expected.txt82
-rw-r--r--test/988-method-trace/src/art/Test988.java10
-rw-r--r--test/988-method-trace/trace_fib.cc41
-rw-r--r--test/Android.bp1
-rw-r--r--tools/class2greylist/src/com/android/class2greylist/AnnotationVisitor.java89
-rw-r--r--tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java119
-rw-r--r--tools/class2greylist/src/com/android/class2greylist/FileWritingGreylistConsumer.java48
-rw-r--r--tools/class2greylist/src/com/android/class2greylist/GreylistConsumer.java13
-rw-r--r--tools/class2greylist/src/com/android/class2greylist/Status.java8
-rw-r--r--tools/class2greylist/src/com/android/class2greylist/SystemOutGreylistConsumer.java12
-rw-r--r--tools/class2greylist/test/src/com/android/class2greylist/AnnotationVisitorTest.java (renamed from tools/class2greylist/test/src/com/android/javac/AnnotationVisitorTest.java)191
-rw-r--r--tools/class2greylist/test/src/com/android/class2greylist/Class2GreylistTest.java73
-rw-r--r--tools/dexanalyze/dexanalyze_bytecode.cc63
-rw-r--r--tools/dexanalyze/dexanalyze_bytecode.h1
163 files changed, 3459 insertions, 2677 deletions
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 87197becf9..2f017662e2 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -221,7 +221,7 @@ void CommonCompilerTest::TearDown() {
callbacks_.reset();
verification_results_.reset();
compiler_options_.reset();
- image_reservation_.reset();
+ image_reservation_.Reset();
CommonRuntimeTest::TearDown();
}
@@ -323,18 +323,18 @@ void CommonCompilerTest::ReserveImageSpace() {
// accidentally end up colliding with the fixed memory address when we need to load the image.
std::string error_msg;
MemMap::Init();
- image_reservation_.reset(MemMap::MapAnonymous("image reservation",
- reinterpret_cast<uint8_t*>(ART_BASE_ADDRESS),
- (size_t)120 * 1024 * 1024, // 120MB
- PROT_NONE,
- false /* no need for 4gb flag with fixed mmap*/,
- false /* not reusing existing reservation */,
- &error_msg));
- CHECK(image_reservation_.get() != nullptr) << error_msg;
+ image_reservation_ = MemMap::MapAnonymous("image reservation",
+ reinterpret_cast<uint8_t*>(ART_BASE_ADDRESS),
+ (size_t)120 * 1024 * 1024, // 120MB
+ PROT_NONE,
+ false /* no need for 4gb flag with fixed mmap */,
+ false /* not reusing existing reservation */,
+ &error_msg);
+ CHECK(image_reservation_.IsValid()) << error_msg;
}
void CommonCompilerTest::UnreserveImageSpace() {
- image_reservation_.reset();
+ image_reservation_.Reset();
}
void CommonCompilerTest::SetDexFilesForOatFile(const std::vector<const DexFile*>& dex_files) {
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index db38110400..366489c58f 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -115,7 +115,7 @@ class CommonCompilerTest : public CommonRuntimeTest {
std::unique_ptr<CompilerDriver> compiler_driver_;
private:
- std::unique_ptr<MemMap> image_reservation_;
+ MemMap image_reservation_;
// Chunks must not move their storage after being created - use the node-based std::list.
std::list<std::vector<uint8_t>> header_code_and_maps_chunks_;
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index d811e0711b..8c5eafd0bb 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -111,7 +111,7 @@ class EmitAdrCode {
public:
EmitAdrCode(ArmVIXLMacroAssembler* assembler, vixl32::Register rd, vixl32::Label* label)
: assembler_(assembler), rd_(rd), label_(label) {
- ExactAssemblyScope aas(assembler, kMaxInstructionSizeInBytes);
+ DCHECK(!assembler->AllowMacroInstructions()); // In ExactAssemblyScope.
adr_location_ = assembler->GetCursorOffset();
assembler->adr(EncodingSize(Wide), rd, label);
}
@@ -715,294 +715,6 @@ class ArraySetSlowPathARMVIXL : public SlowPathCodeARMVIXL {
DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathARMVIXL);
};
-// Abstract base class for read barrier slow paths marking a reference
-// `ref`.
-//
-// Argument `entrypoint` must be a register location holding the read
-// barrier marking runtime entry point to be invoked or an empty
-// location; in the latter case, the read barrier marking runtime
-// entry point will be loaded by the slow path code itself.
-class ReadBarrierMarkSlowPathBaseARMVIXL : public SlowPathCodeARMVIXL {
- protected:
- ReadBarrierMarkSlowPathBaseARMVIXL(HInstruction* instruction, Location ref, Location entrypoint)
- : SlowPathCodeARMVIXL(instruction), ref_(ref), entrypoint_(entrypoint) {
- DCHECK(kEmitCompilerReadBarrier);
- }
-
- const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkSlowPathBaseARMVIXL"; }
-
- // Generate assembly code calling the read barrier marking runtime
- // entry point (ReadBarrierMarkRegX).
- void GenerateReadBarrierMarkRuntimeCall(CodeGenerator* codegen) {
- vixl32::Register ref_reg = RegisterFrom(ref_);
-
- // No need to save live registers; it's taken care of by the
- // entrypoint. Also, there is no need to update the stack mask,
- // as this runtime call will not trigger a garbage collection.
- CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
- DCHECK(!ref_reg.Is(sp));
- DCHECK(!ref_reg.Is(lr));
- DCHECK(!ref_reg.Is(pc));
- // IP is used internally by the ReadBarrierMarkRegX entry point
- // as a temporary, it cannot be the entry point's input/output.
- DCHECK(!ref_reg.Is(ip));
- DCHECK(ref_reg.IsRegister()) << ref_reg;
- // "Compact" slow path, saving two moves.
- //
- // Instead of using the standard runtime calling convention (input
- // and output in R0):
- //
- // R0 <- ref
- // R0 <- ReadBarrierMark(R0)
- // ref <- R0
- //
- // we just use rX (the register containing `ref`) as input and output
- // of a dedicated entrypoint:
- //
- // rX <- ReadBarrierMarkRegX(rX)
- //
- if (entrypoint_.IsValid()) {
- arm_codegen->ValidateInvokeRuntimeWithoutRecordingPcInfo(instruction_, this);
- __ Blx(RegisterFrom(entrypoint_));
- } else {
- // Entrypoint is not already loaded, load from the thread.
- int32_t entry_point_offset =
- Thread::ReadBarrierMarkEntryPointsOffset<kArmPointerSize>(ref_reg.GetCode());
- // This runtime call does not require a stack map.
- arm_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset, instruction_, this);
- }
- }
-
- // The location (register) of the marked object reference.
- const Location ref_;
-
- // The location of the entrypoint if already loaded.
- const Location entrypoint_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkSlowPathBaseARMVIXL);
-};
-
-// Slow path loading `obj`'s lock word, loading a reference from
-// object `*(obj + offset + (index << scale_factor))` into `ref`, and
-// marking `ref` if `obj` is gray according to the lock word (Baker
-// read barrier). If needed, this slow path also atomically updates
-// the field `obj.field` in the object `obj` holding this reference
-// after marking.
-//
-// This means that after the execution of this slow path, both `ref`
-// and `obj.field` will be up-to-date; i.e., after the flip, both will
-// hold the same to-space reference (unless another thread installed
-// another object reference (different from `ref`) in `obj.field`).
-//
-// Argument `entrypoint` must be a register location holding the read
-// barrier marking runtime entry point to be invoked or an empty
-// location; in the latter case, the read barrier marking runtime
-// entry point will be loaded by the slow path code itself.
-class LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARMVIXL
- : public ReadBarrierMarkSlowPathBaseARMVIXL {
- public:
- LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARMVIXL(
- HInstruction* instruction,
- Location ref,
- vixl32::Register obj,
- uint32_t offset,
- Location index,
- ScaleFactor scale_factor,
- bool needs_null_check,
- vixl32::Register temp1,
- vixl32::Register temp2,
- Location entrypoint = Location::NoLocation())
- : ReadBarrierMarkSlowPathBaseARMVIXL(instruction, ref, entrypoint),
- obj_(obj),
- offset_(offset),
- index_(index),
- scale_factor_(scale_factor),
- needs_null_check_(needs_null_check),
- temp1_(temp1),
- temp2_(temp2) {
- DCHECK(kEmitCompilerReadBarrier);
- DCHECK(kUseBakerReadBarrier);
- }
-
- const char* GetDescription() const OVERRIDE {
- return "LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARMVIXL";
- }
-
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- LocationSummary* locations = instruction_->GetLocations();
- vixl32::Register ref_reg = RegisterFrom(ref_);
- DCHECK(locations->CanCall());
- DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_reg.GetCode())) << ref_reg;
- DCHECK_NE(ref_.reg(), LocationFrom(temp1_).reg());
-
- // This slow path is only used by the UnsafeCASObject intrinsic at the moment.
- DCHECK((instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()))
- << "Unexpected instruction in read barrier marking and field updating slow path: "
- << instruction_->DebugName();
- DCHECK(instruction_->GetLocations()->Intrinsified());
- DCHECK_EQ(instruction_->AsInvoke()->GetIntrinsic(), Intrinsics::kUnsafeCASObject);
- DCHECK_EQ(offset_, 0u);
- DCHECK_EQ(scale_factor_, ScaleFactor::TIMES_1);
- Location field_offset = index_;
- DCHECK(field_offset.IsRegisterPair()) << field_offset;
-
- // Temporary register `temp1_`, used to store the lock word, must
- // not be IP, as we may use it to emit the reference load (in the
- // call to GenerateRawReferenceLoad below), and we need the lock
- // word to still be in `temp1_` after the reference load.
- DCHECK(!temp1_.Is(ip));
-
- __ Bind(GetEntryLabel());
-
- // The implementation is:
- //
- // uint32_t rb_state = Lockword(obj->monitor_).ReadBarrierState();
- // lfence; // Load fence or artificial data dependency to prevent load-load reordering
- // HeapReference<mirror::Object> ref = *src; // Original reference load.
- // bool is_gray = (rb_state == ReadBarrier::GrayState());
- // if (is_gray) {
- // old_ref = ref;
- // ref = entrypoint(ref); // ref = ReadBarrier::Mark(ref); // Runtime entry point call.
- // compareAndSwapObject(obj, field_offset, old_ref, ref);
- // }
-
- CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
-
- // /* int32_t */ monitor = obj->monitor_
- uint32_t monitor_offset = mirror::Object::MonitorOffset().Int32Value();
- arm_codegen->GetAssembler()->LoadFromOffset(kLoadWord, temp1_, obj_, monitor_offset);
- if (needs_null_check_) {
- codegen->MaybeRecordImplicitNullCheck(instruction_);
- }
- // /* LockWord */ lock_word = LockWord(monitor)
- static_assert(sizeof(LockWord) == sizeof(int32_t),
- "art::LockWord and int32_t have different sizes.");
-
- // Introduce a dependency on the lock_word including the rb_state,
- // which shall prevent load-load reordering without using
- // a memory barrier (which would be more expensive).
- // `obj` is unchanged by this operation, but its value now depends
- // on `temp`.
- __ Add(obj_, obj_, Operand(temp1_, ShiftType::LSR, 32));
-
- // The actual reference load.
- // A possible implicit null check has already been handled above.
- arm_codegen->GenerateRawReferenceLoad(
- instruction_, ref_, obj_, offset_, index_, scale_factor_, /* needs_null_check */ false);
-
- // Mark the object `ref` when `obj` is gray.
- //
- // if (rb_state == ReadBarrier::GrayState())
- // ref = ReadBarrier::Mark(ref);
- //
- // Given the numeric representation, it's enough to check the low bit of the
- // rb_state. We do that by shifting the bit out of the lock word with LSRS
- // which can be a 16-bit instruction unlike the TST immediate.
- static_assert(ReadBarrier::NonGrayState() == 0, "Expecting non-gray to have value 0");
- static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
- __ Lsrs(temp1_, temp1_, LockWord::kReadBarrierStateShift + 1);
- __ B(cc, GetExitLabel()); // Carry flag is the last bit shifted out by LSRS.
-
- // Save the old value of the reference before marking it.
- // Note that we cannot use IP to save the old reference, as IP is
- // used internally by the ReadBarrierMarkRegX entry point, and we
- // need the old reference after the call to that entry point.
- DCHECK(!temp1_.Is(ip));
- __ Mov(temp1_, ref_reg);
-
- GenerateReadBarrierMarkRuntimeCall(codegen);
-
- // If the new reference is different from the old reference,
- // update the field in the holder (`*(obj_ + field_offset)`).
- //
- // Note that this field could also hold a different object, if
- // another thread had concurrently changed it. In that case, the
- // LDREX/CMP/BNE sequence of instructions in the compare-and-set
- // (CAS) operation below would abort the CAS, leaving the field
- // as-is.
- __ Cmp(temp1_, ref_reg);
- __ B(eq, GetExitLabel());
-
- // Update the the holder's field atomically. This may fail if
- // mutator updates before us, but it's OK. This is achieved
- // using a strong compare-and-set (CAS) operation with relaxed
- // memory synchronization ordering, where the expected value is
- // the old reference and the desired value is the new reference.
-
- UseScratchRegisterScope temps(arm_codegen->GetVIXLAssembler());
- // Convenience aliases.
- vixl32::Register base = obj_;
- // The UnsafeCASObject intrinsic uses a register pair as field
- // offset ("long offset"), of which only the low part contains
- // data.
- vixl32::Register offset = LowRegisterFrom(field_offset);
- vixl32::Register expected = temp1_;
- vixl32::Register value = ref_reg;
- vixl32::Register tmp_ptr = temps.Acquire(); // Pointer to actual memory.
- vixl32::Register tmp = temp2_; // Value in memory.
-
- __ Add(tmp_ptr, base, offset);
-
- if (kPoisonHeapReferences) {
- arm_codegen->GetAssembler()->PoisonHeapReference(expected);
- if (value.Is(expected)) {
- // Do not poison `value`, as it is the same register as
- // `expected`, which has just been poisoned.
- } else {
- arm_codegen->GetAssembler()->PoisonHeapReference(value);
- }
- }
-
- // do {
- // tmp = [r_ptr] - expected;
- // } while (tmp == 0 && failure([r_ptr] <- r_new_value));
-
- vixl32::Label loop_head, comparison_failed, exit_loop;
- __ Bind(&loop_head);
- __ Ldrex(tmp, MemOperand(tmp_ptr));
- __ Cmp(tmp, expected);
- __ B(ne, &comparison_failed, /* far_target */ false);
- __ Strex(tmp, value, MemOperand(tmp_ptr));
- __ CompareAndBranchIfZero(tmp, &exit_loop, /* far_target */ false);
- __ B(&loop_head);
- __ Bind(&comparison_failed);
- __ Clrex();
- __ Bind(&exit_loop);
-
- if (kPoisonHeapReferences) {
- arm_codegen->GetAssembler()->UnpoisonHeapReference(expected);
- if (value.Is(expected)) {
- // Do not unpoison `value`, as it is the same register as
- // `expected`, which has just been unpoisoned.
- } else {
- arm_codegen->GetAssembler()->UnpoisonHeapReference(value);
- }
- }
-
- __ B(GetExitLabel());
- }
-
- private:
- // The register containing the object holding the marked object reference field.
- const vixl32::Register obj_;
- // The offset, index and scale factor to access the reference in `obj_`.
- uint32_t offset_;
- Location index_;
- ScaleFactor scale_factor_;
- // Is a null check required?
- bool needs_null_check_;
- // A temporary register used to hold the lock word of `obj_`; and
- // also to hold the original reference value, when the reference is
- // marked.
- const vixl32::Register temp1_;
- // A temporary register used in the implementation of the CAS, to
- // update the object's reference field.
- const vixl32::Register temp2_;
-
- DISALLOW_COPY_AND_ASSIGN(LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARMVIXL);
-};
-
// Slow path generating a read barrier for a heap reference.
class ReadBarrierForHeapReferenceSlowPathARMVIXL : public SlowPathCodeARMVIXL {
public:
@@ -2295,6 +2007,14 @@ void CodeGeneratorARMVIXL::Finalize(CodeAllocator* allocator) {
}
break;
}
+ case BakerReadBarrierKind::kUnsafeCas: {
+ DCHECK_GE(literal_offset, 4u);
+ uint32_t prev_insn = GetInsn32(literal_offset - 4u);
+ // ADD (register), encoding T3, with correct root_reg.
+ const uint32_t root_reg = BakerReadBarrierFirstRegField::Decode(encoded_data);
+ CHECK_EQ(prev_insn & 0xfff0fff0u, 0xeb000000u | (root_reg << 8));
+ break;
+ }
default:
LOG(FATAL) << "Unexpected kind: " << static_cast<uint32_t>(kind);
UNREACHABLE();
@@ -8626,7 +8346,11 @@ void CodeGeneratorARMVIXL::GenerateGcRootFieldLoad(
bool narrow = CanEmitNarrowLdr(root_reg, obj, offset);
uint32_t custom_data = EncodeBakerReadBarrierGcRootData(root_reg.GetCode(), narrow);
- vixl::EmissionCheckScope guard(GetVIXLAssembler(), 4 * vixl32::kMaxInstructionSizeInBytes);
+ size_t narrow_instructions = /* CMP */ (mr.IsLow() ? 1u : 0u) + /* LDR */ (narrow ? 1u : 0u);
+ size_t wide_instructions = /* ADR+CMP+LDR+BNE */ 4u - narrow_instructions;
+ size_t exact_size = wide_instructions * vixl32::k32BitT32InstructionSizeInBytes +
+ narrow_instructions * vixl32::k16BitT32InstructionSizeInBytes;
+ ExactAssemblyScope guard(GetVIXLAssembler(), exact_size);
vixl32::Label return_address;
EmitAdrCode adr(GetVIXLAssembler(), lr, &return_address);
__ cmp(mr, Operand(0));
@@ -8636,7 +8360,7 @@ void CodeGeneratorARMVIXL::GenerateGcRootFieldLoad(
ptrdiff_t old_offset = GetVIXLAssembler()->GetBuffer()->GetCursorOffset();
__ ldr(EncodingSize(narrow ? Narrow : Wide), root_reg, MemOperand(obj, offset));
EmitBakerReadBarrierBne(custom_data);
- __ Bind(&return_address);
+ __ bind(&return_address);
DCHECK_EQ(old_offset - GetVIXLAssembler()->GetBuffer()->GetCursorOffset(),
narrow ? BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_NARROW_OFFSET
: BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_WIDE_OFFSET);
@@ -8658,6 +8382,32 @@ void CodeGeneratorARMVIXL::GenerateGcRootFieldLoad(
MaybeGenerateMarkingRegisterCheck(/* code */ 19);
}
+void CodeGeneratorARMVIXL::GenerateUnsafeCasOldValueAddWithBakerReadBarrier(
+ vixl::aarch32::Register old_value,
+ vixl::aarch32::Register adjusted_old_value,
+ vixl::aarch32::Register expected) {
+ DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(kUseBakerReadBarrier);
+
+ // Similar to the Baker RB path in GenerateGcRootFieldLoad(), with an ADD instead of LDR.
+ uint32_t custom_data = EncodeBakerReadBarrierUnsafeCasData(old_value.GetCode());
+
+ size_t narrow_instructions = /* CMP */ (mr.IsLow() ? 1u : 0u);
+ size_t wide_instructions = /* ADR+CMP+ADD+BNE */ 4u - narrow_instructions;
+ size_t exact_size = wide_instructions * vixl32::k32BitT32InstructionSizeInBytes +
+ narrow_instructions * vixl32::k16BitT32InstructionSizeInBytes;
+ ExactAssemblyScope guard(GetVIXLAssembler(), exact_size);
+ vixl32::Label return_address;
+ EmitAdrCode adr(GetVIXLAssembler(), lr, &return_address);
+ __ cmp(mr, Operand(0));
+ ptrdiff_t old_offset = GetVIXLAssembler()->GetBuffer()->GetCursorOffset();
+ __ add(EncodingSize(Wide), old_value, adjusted_old_value, Operand(expected)); // Preserves flags.
+ EmitBakerReadBarrierBne(custom_data);
+ __ bind(&return_address);
+ DCHECK_EQ(old_offset - GetVIXLAssembler()->GetBuffer()->GetCursorOffset(),
+ BAKER_MARK_INTROSPECTION_UNSAFE_CAS_ADD_OFFSET);
+}
+
void CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
Location ref,
vixl32::Register obj,
@@ -8698,9 +8448,14 @@ void CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier(HInstruction* i
EncodeBakerReadBarrierFieldData(src.GetBaseRegister().GetCode(), obj.GetCode(), narrow);
{
- vixl::EmissionCheckScope guard(
- GetVIXLAssembler(),
- (kPoisonHeapReferences ? 5u : 4u) * vixl32::kMaxInstructionSizeInBytes);
+ size_t narrow_instructions =
+ /* CMP */ (mr.IsLow() ? 1u : 0u) +
+ /* LDR+unpoison? */ (narrow ? (kPoisonHeapReferences ? 2u : 1u) : 0u);
+ size_t wide_instructions =
+ /* ADR+CMP+LDR+BNE+unpoison? */ (kPoisonHeapReferences ? 5u : 4u) - narrow_instructions;
+ size_t exact_size = wide_instructions * vixl32::k32BitT32InstructionSizeInBytes +
+ narrow_instructions * vixl32::k16BitT32InstructionSizeInBytes;
+ ExactAssemblyScope guard(GetVIXLAssembler(), exact_size);
vixl32::Label return_address;
EmitAdrCode adr(GetVIXLAssembler(), lr, &return_address);
__ cmp(mr, Operand(0));
@@ -8719,7 +8474,7 @@ void CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier(HInstruction* i
__ rsb(EncodingSize(Wide), ref_reg, ref_reg, Operand(0));
}
}
- __ Bind(&return_address);
+ __ bind(&return_address);
DCHECK_EQ(old_offset - GetVIXLAssembler()->GetBuffer()->GetCursorOffset(),
narrow ? BAKER_MARK_INTROSPECTION_FIELD_LDR_NARROW_OFFSET
: BAKER_MARK_INTROSPECTION_FIELD_LDR_WIDE_OFFSET);
@@ -8791,9 +8546,12 @@ void CodeGeneratorARMVIXL::GenerateArrayLoadWithBakerReadBarrier(Location ref,
__ Add(data_reg, obj, Operand(data_offset));
{
- vixl::EmissionCheckScope guard(
- GetVIXLAssembler(),
- (kPoisonHeapReferences ? 5u : 4u) * vixl32::kMaxInstructionSizeInBytes);
+ size_t narrow_instructions = /* CMP */ (mr.IsLow() ? 1u : 0u);
+ size_t wide_instructions =
+ /* ADR+CMP+BNE+LDR+unpoison? */ (kPoisonHeapReferences ? 5u : 4u) - narrow_instructions;
+ size_t exact_size = wide_instructions * vixl32::k32BitT32InstructionSizeInBytes +
+ narrow_instructions * vixl32::k16BitT32InstructionSizeInBytes;
+ ExactAssemblyScope guard(GetVIXLAssembler(), exact_size);
vixl32::Label return_address;
EmitAdrCode adr(GetVIXLAssembler(), lr, &return_address);
__ cmp(mr, Operand(0));
@@ -8805,127 +8563,13 @@ void CodeGeneratorARMVIXL::GenerateArrayLoadWithBakerReadBarrier(Location ref,
if (kPoisonHeapReferences) {
__ rsb(EncodingSize(Wide), ref_reg, ref_reg, Operand(0));
}
- __ Bind(&return_address);
+ __ bind(&return_address);
DCHECK_EQ(old_offset - GetVIXLAssembler()->GetBuffer()->GetCursorOffset(),
BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET);
}
MaybeGenerateMarkingRegisterCheck(/* code */ 21, /* temp_loc */ LocationFrom(ip));
}
-void CodeGeneratorARMVIXL::UpdateReferenceFieldWithBakerReadBarrier(HInstruction* instruction,
- Location ref,
- vixl32::Register obj,
- Location field_offset,
- Location temp,
- bool needs_null_check,
- vixl32::Register temp2) {
- DCHECK(kEmitCompilerReadBarrier);
- DCHECK(kUseBakerReadBarrier);
-
- // Query `art::Thread::Current()->GetIsGcMarking()` (stored in the
- // Marking Register) to decide whether we need to enter the slow
- // path to update the reference field within `obj`. Then, in the
- // slow path, check the gray bit in the lock word of the reference's
- // holder (`obj`) to decide whether to mark `ref` and update the
- // field or not.
- //
- // if (mr) { // Thread::Current()->GetIsGcMarking()
- // // Slow path.
- // uint32_t rb_state = Lockword(obj->monitor_).ReadBarrierState();
- // lfence; // Load fence or artificial data dependency to prevent load-load reordering
- // HeapReference<mirror::Object> ref = *(obj + field_offset); // Reference load.
- // bool is_gray = (rb_state == ReadBarrier::GrayState());
- // if (is_gray) {
- // old_ref = ref;
- // entrypoint = Thread::Current()->pReadBarrierMarkReg ## root.reg()
- // ref = entrypoint(ref); // ref = ReadBarrier::Mark(ref); // Runtime entry point call.
- // compareAndSwapObject(obj, field_offset, old_ref, ref);
- // }
- // }
-
- vixl32::Register temp_reg = RegisterFrom(temp);
-
- // Slow path updating the object reference at address `obj + field_offset`
- // when the GC is marking. The entrypoint will be loaded by the slow path code.
- SlowPathCodeARMVIXL* slow_path =
- new (GetScopedAllocator()) LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARMVIXL(
- instruction,
- ref,
- obj,
- /* offset */ 0u,
- /* index */ field_offset,
- /* scale_factor */ ScaleFactor::TIMES_1,
- needs_null_check,
- temp_reg,
- temp2);
- AddSlowPath(slow_path);
-
- __ CompareAndBranchIfNonZero(mr, slow_path->GetEntryLabel());
- // Fast path: the GC is not marking: nothing to do (the field is
- // up-to-date, and we don't need to load the reference).
- __ Bind(slow_path->GetExitLabel());
- MaybeGenerateMarkingRegisterCheck(/* code */ 23);
-}
-
-void CodeGeneratorARMVIXL::GenerateRawReferenceLoad(HInstruction* instruction,
- Location ref,
- vixl32::Register obj,
- uint32_t offset,
- Location index,
- ScaleFactor scale_factor,
- bool needs_null_check) {
- DataType::Type type = DataType::Type::kReference;
- vixl32::Register ref_reg = RegisterFrom(ref, type);
-
- // If needed, vixl::EmissionCheckScope guards are used to ensure
- // that no pools are emitted between the load (macro) instruction
- // and MaybeRecordImplicitNullCheck.
-
- if (index.IsValid()) {
- // Load types involving an "index": ArrayGet,
- // UnsafeGetObject/UnsafeGetObjectVolatile and UnsafeCASObject
- // intrinsics.
- // /* HeapReference<mirror::Object> */ ref = *(obj + offset + (index << scale_factor))
- if (index.IsConstant()) {
- size_t computed_offset =
- (Int32ConstantFrom(index) << scale_factor) + offset;
- vixl::EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
- GetAssembler()->LoadFromOffset(kLoadWord, ref_reg, obj, computed_offset);
- if (needs_null_check) {
- MaybeRecordImplicitNullCheck(instruction);
- }
- } else {
- // Handle the special case of the
- // UnsafeGetObject/UnsafeGetObjectVolatile and UnsafeCASObject
- // intrinsics, which use a register pair as index ("long
- // offset"), of which only the low part contains data.
- vixl32::Register index_reg = index.IsRegisterPair()
- ? LowRegisterFrom(index)
- : RegisterFrom(index);
- UseScratchRegisterScope temps(GetVIXLAssembler());
- vixl32::Register temp = temps.Acquire();
- __ Add(temp, obj, Operand(index_reg, ShiftType::LSL, scale_factor));
- {
- vixl::EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
- GetAssembler()->LoadFromOffset(kLoadWord, ref_reg, temp, offset);
- if (needs_null_check) {
- MaybeRecordImplicitNullCheck(instruction);
- }
- }
- }
- } else {
- // /* HeapReference<mirror::Object> */ ref = *(obj + offset)
- vixl::EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
- GetAssembler()->LoadFromOffset(kLoadWord, ref_reg, obj, offset);
- if (needs_null_check) {
- MaybeRecordImplicitNullCheck(instruction);
- }
- }
-
- // Object* ref = ref_addr->AsMirrorPtr()
- GetAssembler()->MaybeUnpoisonHeapReference(ref_reg);
-}
-
void CodeGeneratorARMVIXL::MaybeGenerateMarkingRegisterCheck(int code, Location temp_loc) {
// The following condition is a compile-time one, so it does not have a run-time cost.
if (kEmitCompilerReadBarrier && kUseBakerReadBarrier && kIsDebugBuild) {
@@ -9215,7 +8859,7 @@ CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewPcRelativePa
}
void CodeGeneratorARMVIXL::EmitBakerReadBarrierBne(uint32_t custom_data) {
- ExactAssemblyScope eas(GetVIXLAssembler(), 1 * k32BitT32InstructionSizeInBytes);
+ DCHECK(!__ AllowMacroInstructions()); // In ExactAssemblyScope.
if (Runtime::Current()->UseJitCompilation()) {
auto it = jit_baker_read_barrier_slow_paths_.FindOrAdd(custom_data);
vixl::aarch32::Label* slow_path_entry = &it->second.label;
@@ -9774,7 +9418,8 @@ void CodeGeneratorARMVIXL::CompileBakerReadBarrierThunk(ArmVIXLAssembler& assemb
__ Bx(ep_reg); // Jump to the entrypoint's array switch case.
break;
}
- case BakerReadBarrierKind::kGcRoot: {
+ case BakerReadBarrierKind::kGcRoot:
+ case BakerReadBarrierKind::kUnsafeCas: {
// Check if the reference needs to be marked and if so (i.e. not null, not marked yet
// and it does not have a forwarding address), call the correct introspection entrypoint;
// otherwise return the reference (or the extracted forwarding address).
@@ -9802,10 +9447,14 @@ void CodeGeneratorARMVIXL::CompileBakerReadBarrierThunk(ArmVIXLAssembler& assemb
__ B(hs, &forwarding_address);
vixl32::Register ep_reg = LoadReadBarrierMarkIntrospectionEntrypoint(assembler);
// Adjust the art_quick_read_barrier_mark_introspection address in kBakerCcEntrypointRegister
- // to art_quick_read_barrier_mark_introspection_gc_roots.
- int32_t entrypoint_offset = (width == BakerReadBarrierWidth::kWide)
- ? BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_WIDE_ENTRYPOINT_OFFSET
- : BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_NARROW_ENTRYPOINT_OFFSET;
+ // to one of art_quick_read_barrier_mark_introspection_{gc_roots_{wide,narrow},unsafe_cas}.
+ DCHECK(kind != BakerReadBarrierKind::kUnsafeCas || width == BakerReadBarrierWidth::kWide);
+ int32_t entrypoint_offset =
+ (kind == BakerReadBarrierKind::kGcRoot)
+ ? (width == BakerReadBarrierWidth::kWide)
+ ? BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_WIDE_ENTRYPOINT_OFFSET
+ : BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_NARROW_ENTRYPOINT_OFFSET
+ : BAKER_MARK_INTROSPECTION_UNSAFE_CAS_ENTRYPOINT_OFFSET;
__ Add(ep_reg, ep_reg, Operand(entrypoint_offset));
__ Mov(ip, root_reg);
__ Bx(ep_reg);
@@ -9851,6 +9500,12 @@ void CodeGeneratorARMVIXL::CompileBakerReadBarrierThunk(ArmVIXLAssembler& assemb
DCHECK_EQ(kBakerReadBarrierInvalidEncodedReg,
BakerReadBarrierSecondRegField::Decode(encoded_data));
break;
+ case BakerReadBarrierKind::kUnsafeCas:
+ oss << "UnsafeCas_r" << BakerReadBarrierFirstRegField::Decode(encoded_data);
+ DCHECK_EQ(kBakerReadBarrierInvalidEncodedReg,
+ BakerReadBarrierSecondRegField::Decode(encoded_data));
+ DCHECK(BakerReadBarrierWidthField::Decode(encoded_data) == BakerReadBarrierWidth::kWide);
+ break;
}
*debug_name = oss.str();
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index 2fd18cab47..cb131a7ac1 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -622,6 +622,11 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
vixl::aarch32::Register obj,
uint32_t offset,
ReadBarrierOption read_barrier_option);
+ // Generate ADD for UnsafeCASObject to reconstruct the old value from
+ // `old_value - expected` and mark it with Baker read barrier.
+ void GenerateUnsafeCasOldValueAddWithBakerReadBarrier(vixl::aarch32::Register old_value,
+ vixl::aarch32::Register adjusted_old_value,
+ vixl::aarch32::Register expected);
// Fast path implementation of ReadBarrier::Barrier for a heap
// reference field load when Baker's read barriers are used.
// Overload suitable for Unsafe.getObject/-Volatile() intrinsic.
@@ -647,35 +652,6 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
Location temp,
bool needs_null_check);
- // Generate code checking whether the the reference field at the
- // address `obj + field_offset`, held by object `obj`, needs to be
- // marked, and if so, marking it and updating the field within `obj`
- // with the marked value.
- //
- // This routine is used for the implementation of the
- // UnsafeCASObject intrinsic with Baker read barriers.
- //
- // This method has a structure similar to
- // GenerateReferenceLoadWithBakerReadBarrier, but note that argument
- // `ref` is only as a temporary here, and thus its value should not
- // be used afterwards.
- void UpdateReferenceFieldWithBakerReadBarrier(HInstruction* instruction,
- Location ref,
- vixl::aarch32::Register obj,
- Location field_offset,
- Location temp,
- bool needs_null_check,
- vixl::aarch32::Register temp2);
-
- // Generate a heap reference load (with no read barrier).
- void GenerateRawReferenceLoad(HInstruction* instruction,
- Location ref,
- vixl::aarch32::Register obj,
- uint32_t offset,
- Location index,
- ScaleFactor scale_factor,
- bool needs_null_check);
-
// Emit code checking the status of the Marking Register, and
// aborting the program if MR does not match the value stored in the
// art::Thread object. Code is only emitted in debug mode and if
@@ -772,10 +748,11 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
// Encoding of thunk type and data for link-time generated thunks for Baker read barriers.
enum class BakerReadBarrierKind : uint8_t {
- kField, // Field get or array get with constant offset (i.e. constant index).
- kArray, // Array get with index in register.
- kGcRoot, // GC root load.
- kLast = kGcRoot
+ kField, // Field get or array get with constant offset (i.e. constant index).
+ kArray, // Array get with index in register.
+ kGcRoot, // GC root load.
+ kUnsafeCas, // UnsafeCASObject intrinsic.
+ kLast = kUnsafeCas
};
enum class BakerReadBarrierWidth : uint8_t {
@@ -842,6 +819,14 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
BakerReadBarrierWidthField::Encode(width);
}
+ static uint32_t EncodeBakerReadBarrierUnsafeCasData(uint32_t root_reg) {
+ CheckValidReg(root_reg);
+ return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kUnsafeCas) |
+ BakerReadBarrierFirstRegField::Encode(root_reg) |
+ BakerReadBarrierSecondRegField::Encode(kBakerReadBarrierInvalidEncodedReg) |
+ BakerReadBarrierWidthField::Encode(BakerReadBarrierWidth::kWide);
+ }
+
void CompileBakerReadBarrierThunk(ArmVIXLAssembler& assembler,
uint32_t encoded_data,
/*out*/ std::string* debug_name);
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 53b0aa2560..74a779d9e2 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -936,9 +936,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutLongVolatile(HInvoke* invoke)
codegen_);
}
-static void CreateIntIntIntIntIntToIntPlusTemps(ArenaAllocator* allocator,
- HInvoke* invoke,
- DataType::Type type) {
+static void CreateIntIntIntIntIntToIntPlusTemps(ArenaAllocator* allocator, HInvoke* invoke) {
bool can_call = kEmitCompilerReadBarrier &&
kUseBakerReadBarrier &&
(invoke->GetIntrinsic() == Intrinsics::kUnsafeCASObject);
@@ -948,20 +946,16 @@ static void CreateIntIntIntIntIntToIntPlusTemps(ArenaAllocator* allocator,
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall,
kIntrinsified);
+ if (can_call) {
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
+ }
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
locations->SetInAt(3, Location::RequiresRegister());
locations->SetInAt(4, Location::RequiresRegister());
- // If heap poisoning is enabled, we don't want the unpoisoning
- // operations to potentially clobber the output. Likewise when
- // emitting a (Baker) read barrier, which may call.
- Location::OutputOverlap overlaps =
- ((kPoisonHeapReferences && type == DataType::Type::kReference) || can_call)
- ? Location::kOutputOverlap
- : Location::kNoOutputOverlap;
- locations->SetOut(Location::RequiresRegister(), overlaps);
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
// Temporary registers used in CAS. In the object case
// (UnsafeCASObject intrinsic), these are also used for
@@ -970,24 +964,92 @@ static void CreateIntIntIntIntIntToIntPlusTemps(ArenaAllocator* allocator,
locations->AddTemp(Location::RequiresRegister()); // Temp 1.
}
+class BakerReadBarrierCasSlowPathARMVIXL : public SlowPathCodeARMVIXL {
+ public:
+ explicit BakerReadBarrierCasSlowPathARMVIXL(HInvoke* invoke)
+ : SlowPathCodeARMVIXL(invoke) {}
+
+ const char* GetDescription() const OVERRIDE { return "BakerReadBarrierCasSlowPathARMVIXL"; }
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
+ ArmVIXLAssembler* assembler = arm_codegen->GetAssembler();
+ __ Bind(GetEntryLabel());
+
+ LocationSummary* locations = instruction_->GetLocations();
+ vixl32::Register base = InputRegisterAt(instruction_, 1); // Object pointer.
+ vixl32::Register offset = LowRegisterFrom(locations->InAt(2)); // Offset (discard high 4B).
+ vixl32::Register expected = InputRegisterAt(instruction_, 3); // Expected.
+ vixl32::Register value = InputRegisterAt(instruction_, 4); // Value.
+
+ vixl32::Register tmp_ptr = RegisterFrom(locations->GetTemp(0)); // Pointer to actual memory.
+ vixl32::Register tmp = RegisterFrom(locations->GetTemp(1)); // Temporary.
+
+ // The `tmp` is initialized to `[tmp_ptr] - expected` in the main path. Reconstruct
+ // and mark the old value and compare with `expected`. We clobber `tmp_ptr` in the
+ // process due to lack of other temps suitable for the read barrier.
+ arm_codegen->GenerateUnsafeCasOldValueAddWithBakerReadBarrier(tmp_ptr, tmp, expected);
+ __ Cmp(tmp_ptr, expected);
+ __ B(ne, GetExitLabel());
+
+ // The old value we have read did not match `expected` (which is always a to-space reference)
+ // but after the read barrier in GenerateUnsafeCasOldValueAddWithBakerReadBarrier() the marked
+ // to-space value matched, so the old value must be a from-space reference to the same object.
+ // Do the same CAS loop as the main path but check for both `expected` and the unmarked
+ // old value representing the to-space and from-space references for the same object.
+
+ UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
+ vixl32::Register adjusted_old_value = temps.Acquire(); // For saved `tmp` from main path.
+
+ // Recalculate the `tmp_ptr` clobbered above and store the `adjusted_old_value`, i.e. IP.
+ __ Add(tmp_ptr, base, offset);
+ __ Mov(adjusted_old_value, tmp);
+
+ // do {
+ // tmp = [r_ptr] - expected;
+ // } while ((tmp == 0 || tmp == adjusted_old_value) && failure([r_ptr] <- r_new_value));
+ // result = (tmp == 0 || tmp == adjusted_old_value);
+
+ vixl32::Label loop_head;
+ __ Bind(&loop_head);
+ __ Ldrex(tmp, MemOperand(tmp_ptr)); // This can now load null stored by another thread.
+ assembler->MaybeUnpoisonHeapReference(tmp);
+ __ Subs(tmp, tmp, expected); // Use SUBS to get non-zero value if both compares fail.
+ {
+ // If the newly loaded value did not match `expected`, compare with `adjusted_old_value`.
+ ExactAssemblyScope aas(assembler->GetVIXLAssembler(), 2 * k16BitT32InstructionSizeInBytes);
+ __ it(ne);
+ __ cmp(ne, tmp, adjusted_old_value);
+ }
+ __ B(ne, GetExitLabel());
+ assembler->MaybePoisonHeapReference(value);
+ __ Strex(tmp, value, MemOperand(tmp_ptr));
+ assembler->MaybeUnpoisonHeapReference(value);
+ __ Cmp(tmp, 0);
+ __ B(ne, &loop_head, /* far_target */ false);
+ __ B(GetExitLabel());
+ }
+};
+
static void GenCas(HInvoke* invoke, DataType::Type type, CodeGeneratorARMVIXL* codegen) {
DCHECK_NE(type, DataType::Type::kInt64);
ArmVIXLAssembler* assembler = codegen->GetAssembler();
LocationSummary* locations = invoke->GetLocations();
- Location out_loc = locations->Out();
vixl32::Register out = OutputRegister(invoke); // Boolean result.
vixl32::Register base = InputRegisterAt(invoke, 1); // Object pointer.
- Location offset_loc = locations->InAt(2);
- vixl32::Register offset = LowRegisterFrom(offset_loc); // Offset (discard high 4B).
+ vixl32::Register offset = LowRegisterFrom(locations->InAt(2)); // Offset (discard high 4B).
vixl32::Register expected = InputRegisterAt(invoke, 3); // Expected.
vixl32::Register value = InputRegisterAt(invoke, 4); // Value.
- Location tmp_ptr_loc = locations->GetTemp(0);
- vixl32::Register tmp_ptr = RegisterFrom(tmp_ptr_loc); // Pointer to actual memory.
- vixl32::Register tmp = RegisterFrom(locations->GetTemp(1)); // Value in memory.
+ vixl32::Register tmp_ptr = RegisterFrom(locations->GetTemp(0)); // Pointer to actual memory.
+ vixl32::Register tmp = RegisterFrom(locations->GetTemp(1)); // Temporary.
+
+ vixl32::Label loop_exit_label;
+ vixl32::Label* loop_exit = &loop_exit_label;
+ vixl32::Label* failure = &loop_exit_label;
if (type == DataType::Type::kReference) {
// The only read barrier implementation supporting the
@@ -1000,87 +1062,62 @@ static void GenCas(HInvoke* invoke, DataType::Type type, CodeGeneratorARMVIXL* c
codegen->MarkGCCard(tmp_ptr, tmp, base, value, value_can_be_null);
if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
- // Need to make sure the reference stored in the field is a to-space
- // one before attempting the CAS or the CAS could fail incorrectly.
- codegen->UpdateReferenceFieldWithBakerReadBarrier(
- invoke,
- out_loc, // Unused, used only as a "temporary" within the read barrier.
- base,
- /* field_offset */ offset_loc,
- tmp_ptr_loc,
- /* needs_null_check */ false,
- tmp);
+ // If marking, check if the stored reference is a from-space reference to the same
+ // object as the to-space reference `expected`. If so, perform a custom CAS loop.
+ BakerReadBarrierCasSlowPathARMVIXL* slow_path =
+ new (codegen->GetScopedAllocator()) BakerReadBarrierCasSlowPathARMVIXL(invoke);
+ codegen->AddSlowPath(slow_path);
+ failure = slow_path->GetEntryLabel();
+ loop_exit = slow_path->GetExitLabel();
}
}
// Prevent reordering with prior memory operations.
// Emit a DMB ISH instruction instead of an DMB ISHST one, as the
- // latter allows a preceding load to be delayed past the STXR
+ // latter allows a preceding load to be delayed past the STREX
// instruction below.
__ Dmb(vixl32::ISH);
__ Add(tmp_ptr, base, offset);
- if (kPoisonHeapReferences && type == DataType::Type::kReference) {
- codegen->GetAssembler()->PoisonHeapReference(expected);
- if (value.Is(expected)) {
- // Do not poison `value`, as it is the same register as
- // `expected`, which has just been poisoned.
- } else {
- codegen->GetAssembler()->PoisonHeapReference(value);
- }
- }
-
// do {
// tmp = [r_ptr] - expected;
// } while (tmp == 0 && failure([r_ptr] <- r_new_value));
- // result = tmp != 0;
+ // result = tmp == 0;
vixl32::Label loop_head;
__ Bind(&loop_head);
-
__ Ldrex(tmp, MemOperand(tmp_ptr));
-
+ if (type == DataType::Type::kReference) {
+ assembler->MaybeUnpoisonHeapReference(tmp);
+ }
__ Subs(tmp, tmp, expected);
-
- {
- ExactAssemblyScope aas(assembler->GetVIXLAssembler(),
- 3 * kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
-
- __ itt(eq);
- __ strex(eq, tmp, value, MemOperand(tmp_ptr));
- __ cmp(eq, tmp, 1);
+ __ B(ne, failure, (failure == loop_exit) ? kNear : kBranchWithoutHint);
+ if (type == DataType::Type::kReference) {
+ assembler->MaybePoisonHeapReference(value);
}
+ __ Strex(tmp, value, MemOperand(tmp_ptr));
+ if (type == DataType::Type::kReference) {
+ assembler->MaybeUnpoisonHeapReference(value);
+ }
+ __ Cmp(tmp, 0);
+ __ B(ne, &loop_head, /* far_target */ false);
- __ B(eq, &loop_head, /* far_target */ false);
+ __ Bind(loop_exit);
__ Dmb(vixl32::ISH);
- __ Rsbs(out, tmp, 1);
-
- {
- ExactAssemblyScope aas(assembler->GetVIXLAssembler(),
- 2 * kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
-
- __ it(cc);
- __ mov(cc, out, 0);
- }
+ // out = tmp == 0.
+ __ Clz(out, tmp);
+ __ Lsr(out, out, WhichPowerOf2(out.GetSizeInBits()));
- if (kPoisonHeapReferences && type == DataType::Type::kReference) {
- codegen->GetAssembler()->UnpoisonHeapReference(expected);
- if (value.Is(expected)) {
- // Do not unpoison `value`, as it is the same register as
- // `expected`, which has just been unpoisoned.
- } else {
- codegen->GetAssembler()->UnpoisonHeapReference(value);
- }
+ if (type == DataType::Type::kReference) {
+ codegen->MaybeGenerateMarkingRegisterCheck(/* code */ 128);
}
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeCASInt(HInvoke* invoke) {
- CreateIntIntIntIntIntToIntPlusTemps(allocator_, invoke, DataType::Type::kInt32);
+ CreateIntIntIntIntIntToIntPlusTemps(allocator_, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeCASObject(HInvoke* invoke) {
// The only read barrier implementation supporting the
@@ -1089,7 +1126,7 @@ void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeCASObject(HInvoke* invoke) {
return;
}
- CreateIntIntIntIntIntToIntPlusTemps(allocator_, invoke, DataType::Type::kReference);
+ CreateIntIntIntIntIntToIntPlusTemps(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeCASInt(HInvoke* invoke) {
GenCas(invoke, DataType::Type::kInt32, codegen_);
diff --git a/compiler/utils/swap_space.cc b/compiler/utils/swap_space.cc
index 1f9ad4242d..dee83d1c71 100644
--- a/compiler/utils/swap_space.cc
+++ b/compiler/utils/swap_space.cc
@@ -141,6 +141,7 @@ void* SwapSpace::Alloc(size_t size) {
it->size -= size;
} else {
// Changing in place would break the std::set<> ordering, we need to remove and insert.
+ // TODO: When C++17 becomes available, use std::map<>::extract(), modify, insert.
free_by_size_.erase(it);
free_by_size_.insert(new_value);
}
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 0b68620e6e..29df0670d6 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -669,9 +669,7 @@ class Dex2Oat FINAL {
for (std::unique_ptr<const DexFile>& dex_file : opened_dex_files_) {
dex_file.release();
}
- for (std::unique_ptr<MemMap>& map : opened_dex_files_maps_) {
- map.release();
- }
+ new std::vector<MemMap>(std::move(opened_dex_files_maps_)); // Leak MemMaps.
for (std::unique_ptr<File>& vdex_file : vdex_files_) {
vdex_file.release();
}
@@ -1449,14 +1447,14 @@ class Dex2Oat FINAL {
LOG(INFO) << "No " << VdexFile::kVdexNameInDmFile << " file in DexMetadata archive. "
<< "Not doing fast verification.";
} else {
- std::unique_ptr<MemMap> input_file(zip_entry->MapDirectlyOrExtract(
+ MemMap input_file = zip_entry->MapDirectlyOrExtract(
VdexFile::kVdexNameInDmFile,
kDexMetadata,
- &error_msg));
- if (input_file == nullptr) {
+ &error_msg);
+ if (!input_file.IsValid()) {
LOG(WARNING) << "Could not open vdex file in DexMetadata archive: " << error_msg;
} else {
- input_vdex_file_ = std::make_unique<VdexFile>(input_file.release());
+ input_vdex_file_ = std::make_unique<VdexFile>(std::move(input_file));
}
}
}
@@ -1631,7 +1629,7 @@ class Dex2Oat FINAL {
for (size_t i = 0, size = oat_writers_.size(); i != size; ++i) {
rodata_.push_back(elf_writers_[i]->StartRoData());
// Unzip or copy dex files straight to the oat file.
- std::vector<std::unique_ptr<MemMap>> opened_dex_files_map;
+ std::vector<MemMap> opened_dex_files_map;
std::vector<std::unique_ptr<const DexFile>> opened_dex_files;
// No need to verify the dex file when we have a vdex file, which means it was already
// verified.
@@ -1651,7 +1649,7 @@ class Dex2Oat FINAL {
if (opened_dex_files_map.empty()) {
DCHECK(opened_dex_files.empty());
} else {
- for (std::unique_ptr<MemMap>& map : opened_dex_files_map) {
+ for (MemMap& map : opened_dex_files_map) {
opened_dex_files_maps_.push_back(std::move(map));
}
for (std::unique_ptr<const DexFile>& dex_file : opened_dex_files) {
@@ -1732,8 +1730,8 @@ class Dex2Oat FINAL {
}
// Ensure opened dex files are writable for dex-to-dex transformations.
- for (const std::unique_ptr<MemMap>& map : opened_dex_files_maps_) {
- if (!map->Protect(PROT_READ | PROT_WRITE)) {
+ for (MemMap& map : opened_dex_files_maps_) {
+ if (!map.Protect(PROT_READ | PROT_WRITE)) {
PLOG(ERROR) << "Failed to make .dex files writeable.";
return dex2oat::ReturnCode::kOther;
}
@@ -2002,9 +2000,9 @@ class Dex2Oat FINAL {
TimingLogger::ScopedTiming t("dex2oat Oat", timings_);
// Sync the data to the file, in case we did dex2dex transformations.
- for (const std::unique_ptr<MemMap>& map : opened_dex_files_maps_) {
- if (!map->Sync()) {
- PLOG(ERROR) << "Failed to Sync() dex2dex output. Map: " << map->GetName();
+ for (MemMap& map : opened_dex_files_maps_) {
+ if (!map.Sync()) {
+ PLOG(ERROR) << "Failed to Sync() dex2dex output. Map: " << map.GetName();
return false;
}
}
@@ -2737,16 +2735,13 @@ class Dex2Oat FINAL {
zip_filename, error_msg->c_str());
return nullptr;
}
- std::unique_ptr<MemMap> input_file(zip_entry->ExtractToMemMap(zip_filename,
- input_filename,
- error_msg));
- if (input_file.get() == nullptr) {
+ MemMap input_file = zip_entry->ExtractToMemMap(zip_filename, input_filename, error_msg);
+ if (!input_file.IsValid()) {
*error_msg = StringPrintf("Failed to extract '%s' from '%s': %s", input_filename,
zip_filename, error_msg->c_str());
return nullptr;
}
- const std::string input_string(reinterpret_cast<char*>(input_file->Begin()),
- input_file->Size());
+ const std::string input_string(reinterpret_cast<char*>(input_file.Begin()), input_file.Size());
std::istringstream input_stream(input_string);
return ReadCommentedInputStream<T>(input_stream, process);
}
@@ -2873,7 +2868,7 @@ class Dex2Oat FINAL {
std::unique_ptr<linker::ImageWriter> image_writer_;
std::unique_ptr<CompilerDriver> driver_;
- std::vector<std::unique_ptr<MemMap>> opened_dex_files_maps_;
+ std::vector<MemMap> opened_dex_files_maps_;
std::vector<std::unique_ptr<const DexFile>> opened_dex_files_;
bool avoid_storing_invocation_;
diff --git a/dex2oat/linker/image_test.h b/dex2oat/linker/image_test.h
index fa8c7784f5..440b3a47cb 100644
--- a/dex2oat/linker/image_test.h
+++ b/dex2oat/linker/image_test.h
@@ -252,7 +252,7 @@ inline void ImageTest::DoCompile(ImageHeader::StorageMode storage_mode,
}
std::vector<OutputStream*> rodata;
- std::vector<std::unique_ptr<MemMap>> opened_dex_files_maps;
+ std::vector<MemMap> opened_dex_files_maps;
std::vector<std::unique_ptr<const DexFile>> opened_dex_files;
// Now that we have finalized key_value_store_, start writing the oat file.
for (size_t i = 0, size = oat_writers.size(); i != size; ++i) {
@@ -265,7 +265,7 @@ inline void ImageTest::DoCompile(ImageHeader::StorageMode storage_mode,
dex_file->GetLocation().c_str(),
dex_file->GetLocationChecksum());
- std::vector<std::unique_ptr<MemMap>> cur_opened_dex_files_maps;
+ std::vector<MemMap> cur_opened_dex_files_maps;
std::vector<std::unique_ptr<const DexFile>> cur_opened_dex_files;
bool dex_files_ok = oat_writers[i]->WriteAndOpenDexFiles(
out_helper.vdex_files[i].GetFile(),
@@ -279,7 +279,7 @@ inline void ImageTest::DoCompile(ImageHeader::StorageMode storage_mode,
ASSERT_TRUE(dex_files_ok);
if (!cur_opened_dex_files_maps.empty()) {
- for (std::unique_ptr<MemMap>& cur_map : cur_opened_dex_files_maps) {
+ for (MemMap& cur_map : cur_opened_dex_files_maps) {
opened_dex_files_maps.push_back(std::move(cur_map));
}
for (std::unique_ptr<const DexFile>& cur_dex_file : cur_opened_dex_files) {
diff --git a/dex2oat/linker/image_writer.cc b/dex2oat/linker/image_writer.cc
index e10f9b3feb..27e797446e 100644
--- a/dex2oat/linker/image_writer.cc
+++ b/dex2oat/linker/image_writer.cc
@@ -303,8 +303,8 @@ bool ImageWriter::Write(int image_fd,
}
// Image data size excludes the bitmap and the header.
- ImageHeader* const image_header = reinterpret_cast<ImageHeader*>(image_info.image_->Begin());
- ArrayRef<const uint8_t> raw_image_data(image_info.image_->Begin() + sizeof(ImageHeader),
+ ImageHeader* const image_header = reinterpret_cast<ImageHeader*>(image_info.image_.Begin());
+ ArrayRef<const uint8_t> raw_image_data(image_info.image_.Begin() + sizeof(ImageHeader),
image_header->GetImageSize() - sizeof(ImageHeader));
CHECK_EQ(image_header->storage_mode_, image_storage_mode_);
@@ -362,7 +362,7 @@ bool ImageWriter::Write(int image_fd,
// We do not want to have a corrupted image with a valid header.
// The header is uncompressed since it contains whether the image is compressed or not.
image_header->data_size_ = image_data.size();
- if (!image_file->PwriteFully(reinterpret_cast<char*>(image_info.image_->Begin()),
+ if (!image_file->PwriteFully(reinterpret_cast<char*>(image_info.image_.Begin()),
sizeof(ImageHeader),
0)) {
PLOG(ERROR) << "Failed to write image file header " << image_filename;
@@ -730,14 +730,14 @@ bool ImageWriter::AllocMemory() {
image_info.CreateImageSections(unused_sections), kPageSize);
std::string error_msg;
- image_info.image_.reset(MemMap::MapAnonymous("image writer image",
- nullptr,
- length,
- PROT_READ | PROT_WRITE,
- false,
- false,
- &error_msg));
- if (UNLIKELY(image_info.image_.get() == nullptr)) {
+ image_info.image_ = MemMap::MapAnonymous("image writer image",
+ /* addr */ nullptr,
+ length,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ /* reuse */ false,
+ &error_msg);
+ if (UNLIKELY(!image_info.image_.IsValid())) {
LOG(ERROR) << "Failed to allocate memory for image file generation: " << error_msg;
return false;
}
@@ -745,7 +745,7 @@ bool ImageWriter::AllocMemory() {
// Create the image bitmap, only needs to cover mirror object section which is up to image_end_.
CHECK_LE(image_info.image_end_, length);
image_info.image_bitmap_.reset(gc::accounting::ContinuousSpaceBitmap::Create(
- "image bitmap", image_info.image_->Begin(), RoundUp(image_info.image_end_, kPageSize)));
+ "image bitmap", image_info.image_.Begin(), RoundUp(image_info.image_end_, kPageSize)));
if (image_info.image_bitmap_.get() == nullptr) {
LOG(ERROR) << "Failed to allocate memory for image bitmap";
return false;
@@ -2025,7 +2025,7 @@ void ImageWriter::CreateHeader(size_t oat_index) {
// Create the header, leave 0 for data size since we will fill this in as we are writing the
// image.
- ImageHeader* header = new (image_info.image_->Begin()) ImageHeader(
+ ImageHeader* header = new (image_info.image_.Begin()) ImageHeader(
PointerToLowMemUInt32(image_info.image_begin_),
image_end,
sections,
@@ -2163,8 +2163,8 @@ void ImageWriter::CopyAndFixupNativeData(size_t oat_index) {
if (relocation.oat_index != oat_index) {
continue;
}
- auto* dest = image_info.image_->Begin() + relocation.offset;
- DCHECK_GE(dest, image_info.image_->Begin() + image_info.image_end_);
+ auto* dest = image_info.image_.Begin() + relocation.offset;
+ DCHECK_GE(dest, image_info.image_.Begin() + image_info.image_end_);
DCHECK(!IsInBootImage(pair.first));
switch (relocation.type) {
case NativeObjectRelocationType::kArtField: {
@@ -2219,7 +2219,7 @@ void ImageWriter::CopyAndFixupNativeData(size_t oat_index) {
}
}
// Fixup the image method roots.
- auto* image_header = reinterpret_cast<ImageHeader*>(image_info.image_->Begin());
+ auto* image_header = reinterpret_cast<ImageHeader*>(image_info.image_.Begin());
for (size_t i = 0; i < ImageHeader::kImageMethodsCount; ++i) {
ArtMethod* method = image_methods_[i];
CHECK(method != nullptr);
@@ -2235,7 +2235,7 @@ void ImageWriter::CopyAndFixupNativeData(size_t oat_index) {
const ImageSection& intern_table_section = image_header->GetInternedStringsSection();
InternTable* const intern_table = image_info.intern_table_.get();
uint8_t* const intern_table_memory_ptr =
- image_info.image_->Begin() + intern_table_section.Offset();
+ image_info.image_.Begin() + intern_table_section.Offset();
const size_t intern_table_bytes = intern_table->WriteToMemory(intern_table_memory_ptr);
CHECK_EQ(intern_table_bytes, image_info.intern_table_bytes_);
// Fixup the pointers in the newly written intern table to contain image addresses.
@@ -2260,7 +2260,7 @@ void ImageWriter::CopyAndFixupNativeData(size_t oat_index) {
if (image_info.class_table_bytes_ > 0u) {
const ImageSection& class_table_section = image_header->GetClassTableSection();
uint8_t* const class_table_memory_ptr =
- image_info.image_->Begin() + class_table_section.Offset();
+ image_info.image_.Begin() + class_table_section.Offset();
Thread* self = Thread::Current();
ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
@@ -2342,14 +2342,14 @@ void ImageWriter::CopyAndFixupObject(Object* obj) {
size_t offset = GetImageOffset(obj);
size_t oat_index = GetOatIndex(obj);
ImageInfo& image_info = GetImageInfo(oat_index);
- auto* dst = reinterpret_cast<Object*>(image_info.image_->Begin() + offset);
+ auto* dst = reinterpret_cast<Object*>(image_info.image_.Begin() + offset);
DCHECK_LT(offset, image_info.image_end_);
const auto* src = reinterpret_cast<const uint8_t*>(obj);
image_info.image_bitmap_->Set(dst); // Mark the obj as live.
const size_t n = obj->SizeOf();
- DCHECK_LE(offset + n, image_info.image_->Size());
+ DCHECK_LE(offset + n, image_info.image_.Size());
memcpy(dst, src, n);
// Write in a hash code of objects which have inflated monitors or a hash code in their monitor
@@ -2456,7 +2456,7 @@ template <typename T>
T* ImageWriter::NativeCopyLocation(T* obj) {
const NativeObjectRelocation relocation = GetNativeRelocation(obj);
const ImageInfo& image_info = GetImageInfo(relocation.oat_index);
- return reinterpret_cast<T*>(image_info.image_->Begin() + relocation.offset);
+ return reinterpret_cast<T*>(image_info.image_.Begin() + relocation.offset);
}
class ImageWriter::NativeLocationVisitor {
@@ -3011,12 +3011,12 @@ void ImageWriter::RecordImageRelocation(const void* dest,
}
// Calculate the offset within the image.
ImageInfo* image_info = &image_infos_[oat_index];
- DCHECK(image_info->image_->HasAddress(dest))
- << "MemMap range " << static_cast<const void*>(image_info->image_->Begin())
- << "-" << static_cast<const void*>(image_info->image_->End())
+ DCHECK(image_info->image_.HasAddress(dest))
+ << "MemMap range " << static_cast<const void*>(image_info->image_.Begin())
+ << "-" << static_cast<const void*>(image_info->image_.End())
<< " does not contain " << dest;
- size_t offset = reinterpret_cast<const uint8_t*>(dest) - image_info->image_->Begin();
- ImageHeader* const image_header = reinterpret_cast<ImageHeader*>(image_info->image_->Begin());
+ size_t offset = reinterpret_cast<const uint8_t*>(dest) - image_info->image_.Begin();
+ ImageHeader* const image_header = reinterpret_cast<ImageHeader*>(image_info->image_.Begin());
size_t image_end = image_header->GetClassTableSection().End();
DCHECK_LT(offset, image_end);
// Calculate the location index.
diff --git a/dex2oat/linker/image_writer.h b/dex2oat/linker/image_writer.h
index 9ab9c3eb6f..7cf555bf96 100644
--- a/dex2oat/linker/image_writer.h
+++ b/dex2oat/linker/image_writer.h
@@ -307,7 +307,7 @@ class ImageWriter FINAL {
// Calculate the sum total of the bin slot sizes in [0, up_to). Defaults to all bins.
size_t GetBinSizeSum(Bin up_to) const;
- std::unique_ptr<MemMap> image_; // Memory mapped for generating the image.
+ MemMap image_; // Memory mapped for generating the image.
// Target begin of this image. Notes: It is not valid to write here, this is the address
// of the target image, not necessarily where image_ is mapped. The address is only valid
@@ -408,7 +408,7 @@ class ImageWriter FINAL {
size_t offset = GetImageOffset(object);
size_t oat_index = GetOatIndex(object);
const ImageInfo& image_info = GetImageInfo(oat_index);
- uint8_t* dst = image_info.image_->Begin() + offset;
+ uint8_t* dst = image_info.image_.Begin() + offset;
return reinterpret_cast<mirror::Object*>(dst);
}
diff --git a/dex2oat/linker/oat_writer.cc b/dex2oat/linker/oat_writer.cc
index 8bac7206c6..9045c43e03 100644
--- a/dex2oat/linker/oat_writer.cc
+++ b/dex2oat/linker/oat_writer.cc
@@ -654,7 +654,7 @@ bool OatWriter::WriteAndOpenDexFiles(
bool verify,
bool update_input_vdex,
CopyOption copy_dex_files,
- /*out*/ std::vector<std::unique_ptr<MemMap>>* opened_dex_files_map,
+ /*out*/ std::vector<MemMap>* opened_dex_files_map,
/*out*/ std::vector<std::unique_ptr<const DexFile>>* opened_dex_files) {
CHECK(write_state_ == WriteState::kAddingDexFileSources);
@@ -663,7 +663,7 @@ bool OatWriter::WriteAndOpenDexFiles(
return false;
}
- std::vector<std::unique_ptr<MemMap>> dex_files_map;
+ std::vector<MemMap> dex_files_map;
std::vector<std::unique_ptr<const DexFile>> dex_files;
// Initialize VDEX and OAT headers.
@@ -3424,12 +3424,12 @@ bool OatWriter::LayoutAndWriteDexFile(OutputStream* out, OatDexFile* oat_dex_fil
const ArtDexFileLoader dex_file_loader;
if (oat_dex_file->source_.IsZipEntry()) {
ZipEntry* zip_entry = oat_dex_file->source_.GetZipEntry();
- std::unique_ptr<MemMap> mem_map;
+ MemMap mem_map;
{
TimingLogger::ScopedTiming extract("Unzip", timings_);
- mem_map.reset(zip_entry->ExtractToMemMap(location.c_str(), "classes.dex", &error_msg));
+ mem_map = zip_entry->ExtractToMemMap(location.c_str(), "classes.dex", &error_msg);
}
- if (mem_map == nullptr) {
+ if (!mem_map.IsValid()) {
LOG(ERROR) << "Failed to extract dex file to mem map for layout: " << error_msg;
return false;
}
@@ -3684,7 +3684,7 @@ bool OatWriter::WriteDexFile(OutputStream* out,
bool OatWriter::OpenDexFiles(
File* file,
bool verify,
- /*out*/ std::vector<std::unique_ptr<MemMap>>* opened_dex_files_map,
+ /*out*/ std::vector<MemMap>* opened_dex_files_map,
/*out*/ std::vector<std::unique_ptr<const DexFile>>* opened_dex_files) {
TimingLogger::ScopedTiming split("OpenDexFiles", timings_);
@@ -3695,16 +3695,16 @@ bool OatWriter::OpenDexFiles(
if (!extract_dex_files_into_vdex_) {
std::vector<std::unique_ptr<const DexFile>> dex_files;
- std::vector<std::unique_ptr<MemMap>> maps;
+ std::vector<MemMap> maps;
for (OatDexFile& oat_dex_file : oat_dex_files_) {
std::string error_msg;
- MemMap* map = oat_dex_file.source_.GetZipEntry()->MapDirectlyOrExtract(
- oat_dex_file.dex_file_location_data_, "zipped dex", &error_msg);
- if (map == nullptr) {
+ maps.emplace_back(oat_dex_file.source_.GetZipEntry()->MapDirectlyOrExtract(
+ oat_dex_file.dex_file_location_data_, "zipped dex", &error_msg));
+ MemMap* map = &maps.back();
+ if (!map->IsValid()) {
LOG(ERROR) << error_msg;
return false;
}
- maps.emplace_back(map);
// Now, open the dex file.
const ArtDexFileLoader dex_file_loader;
dex_files.emplace_back(dex_file_loader.Open(map->Begin(),
@@ -3735,7 +3735,7 @@ bool OatWriter::OpenDexFiles(
size_t length = vdex_size_ - map_offset;
std::string error_msg;
- std::unique_ptr<MemMap> dex_files_map(MemMap::MapFile(
+ MemMap dex_files_map = MemMap::MapFile(
length,
PROT_READ | PROT_WRITE,
MAP_SHARED,
@@ -3743,8 +3743,8 @@ bool OatWriter::OpenDexFiles(
map_offset,
/* low_4gb */ false,
file->GetPath().c_str(),
- &error_msg));
- if (dex_files_map == nullptr) {
+ &error_msg);
+ if (!dex_files_map.IsValid()) {
LOG(ERROR) << "Failed to mmap() dex files from oat file. File: " << file->GetPath()
<< " error: " << error_msg;
return false;
@@ -3753,7 +3753,7 @@ bool OatWriter::OpenDexFiles(
std::vector<std::unique_ptr<const DexFile>> dex_files;
for (OatDexFile& oat_dex_file : oat_dex_files_) {
const uint8_t* raw_dex_file =
- dex_files_map->Begin() + oat_dex_file.dex_file_offset_ - map_offset;
+ dex_files_map.Begin() + oat_dex_file.dex_file_offset_ - map_offset;
if (kIsDebugBuild) {
// Sanity check our input files.
diff --git a/dex2oat/linker/oat_writer.h b/dex2oat/linker/oat_writer.h
index 9470f8c874..5202d39960 100644
--- a/dex2oat/linker/oat_writer.h
+++ b/dex2oat/linker/oat_writer.h
@@ -177,7 +177,7 @@ class OatWriter {
bool verify,
bool update_input_vdex,
CopyOption copy_dex_files,
- /*out*/ std::vector<std::unique_ptr<MemMap>>* opened_dex_files_map,
+ /*out*/ std::vector<MemMap>* opened_dex_files_map,
/*out*/ std::vector<std::unique_ptr<const DexFile>>* opened_dex_files);
// Initialize the writer with the given parameters.
void Initialize(const CompilerDriver* compiler_driver,
@@ -315,7 +315,7 @@ class OatWriter {
bool update_input_vdex);
bool OpenDexFiles(File* file,
bool verify,
- /*out*/ std::vector<std::unique_ptr<MemMap>>* opened_dex_files_map,
+ /*out*/ std::vector<MemMap>* opened_dex_files_map,
/*out*/ std::vector<std::unique_ptr<const DexFile>>* opened_dex_files);
size_t InitOatHeader(uint32_t num_dex_files, SafeMap<std::string, std::string>* key_value_store);
diff --git a/dex2oat/linker/oat_writer_test.cc b/dex2oat/linker/oat_writer_test.cc
index d73f10a6ed..0264b09eda 100644
--- a/dex2oat/linker/oat_writer_test.cc
+++ b/dex2oat/linker/oat_writer_test.cc
@@ -169,7 +169,7 @@ class OatTest : public CommonCompilerTest {
oat_file);
elf_writer->Start();
OutputStream* oat_rodata = elf_writer->StartRoData();
- std::vector<std::unique_ptr<MemMap>> opened_dex_files_maps;
+ std::vector<MemMap> opened_dex_files_maps;
std::vector<std::unique_ptr<const DexFile>> opened_dex_files;
if (!oat_writer.WriteAndOpenDexFiles(
vdex_file,
@@ -246,7 +246,7 @@ class OatTest : public CommonCompilerTest {
return false;
}
- for (std::unique_ptr<MemMap>& map : opened_dex_files_maps) {
+ for (MemMap& map : opened_dex_files_maps) {
opened_dex_files_maps_.emplace_back(std::move(map));
}
for (std::unique_ptr<const DexFile>& dex_file : opened_dex_files) {
@@ -261,7 +261,7 @@ class OatTest : public CommonCompilerTest {
std::unique_ptr<QuickCompilerCallbacks> callbacks_;
- std::vector<std::unique_ptr<MemMap>> opened_dex_files_maps_;
+ std::vector<MemMap> opened_dex_files_maps_;
std::vector<std::unique_ptr<const DexFile>> opened_dex_files_;
};
diff --git a/libartbase/base/mem_map.cc b/libartbase/base/mem_map.cc
index 5cea869519..c417d019e6 100644
--- a/libartbase/base/mem_map.cc
+++ b/libartbase/base/mem_map.cc
@@ -61,6 +61,21 @@ using Maps = AllocationTrackingMultiMap<void*, MemMap*, kAllocatorTagMaps>;
// All the non-empty MemMaps. Use a multimap as we do a reserve-and-divide (eg ElfMap::Load()).
static Maps* gMaps GUARDED_BY(MemMap::GetMemMapsLock()) = nullptr;
+// Retrieve iterator to a `gMaps` entry that is known to exist.
+Maps::iterator GetGMapsEntry(const MemMap& map) REQUIRES(MemMap::GetMemMapsLock()) {
+ DCHECK(map.IsValid());
+ DCHECK(gMaps != nullptr);
+ for (auto it = gMaps->lower_bound(map.BaseBegin()), end = gMaps->end();
+ it != end && it->first == map.BaseBegin();
+ ++it) {
+ if (it->second == &map) {
+ return it;
+ }
+ }
+ LOG(FATAL) << "MemMap not found";
+ UNREACHABLE();
+}
+
std::ostream& operator<<(std::ostream& os, const Maps& mem_maps) {
os << "MemMap:" << std::endl;
for (auto it = mem_maps.begin(); it != mem_maps.end(); ++it) {
@@ -231,20 +246,21 @@ void* MemMap::TryMemMapLow4GB(void* ptr,
}
#endif
-MemMap* MemMap::MapAnonymous(const char* name,
- uint8_t* expected_ptr,
- size_t byte_count,
- int prot,
- bool low_4gb,
- bool reuse,
- std::string* error_msg,
- bool use_ashmem) {
+MemMap MemMap::MapAnonymous(const char* name,
+ uint8_t* addr,
+ size_t byte_count,
+ int prot,
+ bool low_4gb,
+ bool reuse,
+ std::string* error_msg,
+ bool use_ashmem) {
#ifndef __LP64__
UNUSED(low_4gb);
#endif
use_ashmem = use_ashmem && !kIsTargetLinux && !kIsTargetFuchsia;
if (byte_count == 0) {
- return new MemMap(name, nullptr, 0, nullptr, 0, prot, false);
+ *error_msg = "Empty MemMap requested.";
+ return Invalid();
}
size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
@@ -252,9 +268,9 @@ MemMap* MemMap::MapAnonymous(const char* name,
if (reuse) {
// reuse means it is okay that it overlaps an existing page mapping.
// Only use this if you actually made the page reservation yourself.
- CHECK(expected_ptr != nullptr);
+ CHECK(addr != nullptr);
- DCHECK(ContainedWithinExistingMap(expected_ptr, byte_count, error_msg)) << *error_msg;
+ DCHECK(ContainedWithinExistingMap(addr, byte_count, error_msg)) << *error_msg;
flags |= MAP_FIXED;
}
@@ -296,7 +312,7 @@ MemMap* MemMap::MapAnonymous(const char* name,
// We need to store and potentially set an error number for pretty printing of errors
int saved_errno = 0;
- void* actual = MapInternal(expected_ptr,
+ void* actual = MapInternal(addr,
page_aligned_byte_count,
prot,
flags,
@@ -313,28 +329,33 @@ MemMap* MemMap::MapAnonymous(const char* name,
*error_msg = StringPrintf("Failed anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0): %s. "
"See process maps in the log.",
- expected_ptr,
+ addr,
page_aligned_byte_count,
prot,
flags,
fd.get(),
strerror(saved_errno));
}
- return nullptr;
+ return Invalid();
}
- if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
- return nullptr;
+ if (!CheckMapRequest(addr, actual, page_aligned_byte_count, error_msg)) {
+ return Invalid();
}
- return new MemMap(name, reinterpret_cast<uint8_t*>(actual), byte_count, actual,
- page_aligned_byte_count, prot, reuse);
+ return MemMap(name,
+ reinterpret_cast<uint8_t*>(actual),
+ byte_count,
+ actual,
+ page_aligned_byte_count,
+ prot,
+ reuse);
}
-MemMap* MemMap::MapDummy(const char* name, uint8_t* addr, size_t byte_count) {
+MemMap MemMap::MapDummy(const char* name, uint8_t* addr, size_t byte_count) {
if (byte_count == 0) {
- return new MemMap(name, nullptr, 0, nullptr, 0, 0, false);
+ return Invalid();
}
const size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
- return new MemMap(name, addr, byte_count, addr, page_aligned_byte_count, 0, true /* reuse */);
+ return MemMap(name, addr, byte_count, addr, page_aligned_byte_count, 0, true /* reuse */);
}
template<typename A, typename B>
@@ -342,19 +363,18 @@ static ptrdiff_t PointerDiff(A* a, B* b) {
return static_cast<ptrdiff_t>(reinterpret_cast<intptr_t>(a) - reinterpret_cast<intptr_t>(b));
}
-bool MemMap::ReplaceWith(MemMap** source_ptr, /*out*/std::string* error) {
+bool MemMap::ReplaceWith(MemMap* source, /*out*/std::string* error) {
#if !HAVE_MREMAP_SYSCALL
UNUSED(source_ptr);
*error = "Cannot perform atomic replace because we are missing the required mremap syscall";
return false;
#else // !HAVE_MREMAP_SYSCALL
- CHECK(source_ptr != nullptr);
- CHECK(*source_ptr != nullptr);
+ CHECK(source != nullptr);
+ CHECK(source->IsValid());
if (!MemMap::kCanReplaceMapping) {
*error = "Unable to perform atomic replace due to runtime environment!";
return false;
}
- MemMap* source = *source_ptr;
// neither can be reuse.
if (source->reuse_ || reuse_) {
*error = "One or both mappings is not a real mmap!";
@@ -406,12 +426,9 @@ bool MemMap::ReplaceWith(MemMap** source_ptr, /*out*/std::string* error) {
// them later.
size_t new_base_size = std::max(source->base_size_, base_size_);
- // Delete the old source, don't unmap it though (set reuse) since it is already gone.
- *source_ptr = nullptr;
+ // Invalidate *source, don't unmap it though since it is already gone.
size_t source_size = source->size_;
- source->already_unmapped_ = true;
- delete source;
- source = nullptr;
+ source->Invalidate();
size_ = source_size;
base_size_ = new_base_size;
@@ -422,16 +439,16 @@ bool MemMap::ReplaceWith(MemMap** source_ptr, /*out*/std::string* error) {
#endif // !HAVE_MREMAP_SYSCALL
}
-MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr,
- size_t byte_count,
- int prot,
- int flags,
- int fd,
- off_t start,
- bool low_4gb,
- bool reuse,
- const char* filename,
- std::string* error_msg) {
+MemMap MemMap::MapFileAtAddress(uint8_t* expected_ptr,
+ size_t byte_count,
+ int prot,
+ int flags,
+ int fd,
+ off_t start,
+ bool low_4gb,
+ bool reuse,
+ const char* filename,
+ std::string* error_msg) {
CHECK_NE(0, prot);
CHECK_NE(0, flags & (MAP_SHARED | MAP_PRIVATE));
@@ -452,7 +469,7 @@ MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr,
}
if (byte_count == 0) {
- return new MemMap(filename, nullptr, 0, nullptr, 0, prot, false);
+ return Invalid();
}
// Adjust 'offset' to be page-aligned as required by mmap.
int page_offset = start % kPageSize;
@@ -491,10 +508,10 @@ MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr,
static_cast<int64_t>(page_aligned_offset), filename,
strerror(saved_errno));
}
- return nullptr;
+ return Invalid();
}
if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
- return nullptr;
+ return Invalid();
}
if (redzone_size != 0) {
const uint8_t *real_start = actual + page_offset;
@@ -506,14 +523,27 @@ MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr,
page_aligned_byte_count -= redzone_size;
}
- return new MemMap(filename, actual + page_offset, byte_count, actual, page_aligned_byte_count,
- prot, reuse, redzone_size);
+ return MemMap(filename,
+ actual + page_offset,
+ byte_count,
+ actual,
+ page_aligned_byte_count,
+ prot,
+ reuse,
+ redzone_size);
+}
+
+MemMap::MemMap(MemMap&& other)
+ : MemMap() {
+ swap(other);
}
MemMap::~MemMap() {
- if (base_begin_ == nullptr && base_size_ == 0) {
- return;
- }
+ Reset();
+}
+
+void MemMap::DoReset() {
+ DCHECK(IsValid());
// Unlike Valgrind, AddressSanitizer requires that all manually poisoned memory is unpoisoned
// before it is returned to the system.
@@ -533,19 +563,56 @@ MemMap::~MemMap() {
}
}
+ Invalidate();
+}
+
+void MemMap::Invalidate() {
+ DCHECK(IsValid());
+
// Remove it from gMaps.
std::lock_guard<std::mutex> mu(*mem_maps_lock_);
- bool found = false;
- DCHECK(gMaps != nullptr);
- for (auto it = gMaps->lower_bound(base_begin_), end = gMaps->end();
- it != end && it->first == base_begin_; ++it) {
- if (it->second == this) {
- found = true;
- gMaps->erase(it);
- break;
+ auto it = GetGMapsEntry(*this);
+ gMaps->erase(it);
+
+ // Mark it as invalid.
+ base_size_ = 0u;
+ DCHECK(!IsValid());
+}
+
+void MemMap::swap(MemMap& other) {
+ if (IsValid() || other.IsValid()) {
+ std::lock_guard<std::mutex> mu(*mem_maps_lock_);
+ DCHECK(gMaps != nullptr);
+ auto this_it = IsValid() ? GetGMapsEntry(*this) : gMaps->end();
+ auto other_it = other.IsValid() ? GetGMapsEntry(other) : gMaps->end();
+ if (IsValid()) {
+ DCHECK(this_it != gMaps->end());
+ DCHECK_EQ(this_it->second, this);
+ this_it->second = &other;
+ }
+ if (other.IsValid()) {
+ DCHECK(other_it != gMaps->end());
+ DCHECK_EQ(other_it->second, &other);
+ other_it->second = this;
}
+ // Swap members with the `mem_maps_lock_` held so that `base_begin_` matches
+ // with the `gMaps` key when other threads try to use `gMaps`.
+ SwapMembers(other);
+ } else {
+ SwapMembers(other);
}
- CHECK(found) << "MemMap not found";
+}
+
+void MemMap::SwapMembers(MemMap& other) {
+ name_.swap(other.name_);
+ std::swap(begin_, other.begin_);
+ std::swap(size_, other.size_);
+ std::swap(base_begin_, other.base_begin_);
+ std::swap(base_size_, other.base_size_);
+ std::swap(prot_, other.prot_);
+ std::swap(reuse_, other.reuse_);
+ std::swap(already_unmapped_, other.already_unmapped_);
+ std::swap(redzone_size_, other.redzone_size_);
}
MemMap::MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_begin,
@@ -568,8 +635,11 @@ MemMap::MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_
}
}
-MemMap* MemMap::RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_prot,
- std::string* error_msg, bool use_ashmem) {
+MemMap MemMap::RemapAtEnd(uint8_t* new_end,
+ const char* tail_name,
+ int tail_prot,
+ std::string* error_msg,
+ bool use_ashmem) {
use_ashmem = use_ashmem && !kIsTargetLinux && !kIsTargetFuchsia;
DCHECK_GE(new_end, Begin());
DCHECK_LE(new_end, End());
@@ -583,11 +653,11 @@ MemMap* MemMap::RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_pro
uint8_t* new_base_end = new_end;
DCHECK_LE(new_base_end, old_base_end);
if (new_base_end == old_base_end) {
- return new MemMap(tail_name, nullptr, 0, nullptr, 0, tail_prot, false);
+ return Invalid();
}
- size_ = new_end - reinterpret_cast<uint8_t*>(begin_);
- base_size_ = new_base_end - reinterpret_cast<uint8_t*>(base_begin_);
- DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
+ size_t new_size = new_end - reinterpret_cast<uint8_t*>(begin_);
+ size_t new_base_size = new_base_end - reinterpret_cast<uint8_t*>(base_begin_);
+ DCHECK_LE(begin_ + new_size, reinterpret_cast<uint8_t*>(base_begin_) + new_base_size);
size_t tail_size = old_end - new_end;
uint8_t* tail_base_begin = new_base_end;
size_t tail_base_size = old_base_end - new_base_end;
@@ -595,7 +665,7 @@ MemMap* MemMap::RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_pro
DCHECK_ALIGNED(tail_base_size, kPageSize);
unique_fd fd;
- int flags = MAP_PRIVATE | MAP_ANONYMOUS;
+ int flags = MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS;
if (use_ashmem) {
// android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
// prefixed "dalvik-".
@@ -606,23 +676,14 @@ MemMap* MemMap::RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_pro
if (fd.get() == -1) {
*error_msg = StringPrintf("ashmem_create_region failed for '%s': %s",
tail_name, strerror(errno));
- return nullptr;
+ return Invalid();
}
}
MEMORY_TOOL_MAKE_UNDEFINED(tail_base_begin, tail_base_size);
- // Unmap/map the tail region.
- int result = TargetMUnmap(tail_base_begin, tail_base_size);
- if (result == -1) {
- PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
- *error_msg = StringPrintf("munmap(%p, %zd) failed for '%s'. See process maps in the log.",
- tail_base_begin, tail_base_size, name_.c_str());
- return nullptr;
- }
- // Don't cause memory allocation between the munmap and the mmap
- // calls. Otherwise, libc (or something else) might take this memory
- // region. Note this isn't perfect as there's no way to prevent
- // other threads to try to take this memory region here.
+ // Note: Do not explicitly unmap the tail region, mmap() with MAP_FIXED automatically
+ // removes old mappings for the overlapping region. This makes the operation atomic
+ // and prevents other threads from racing to allocate memory in the requested region.
uint8_t* actual = reinterpret_cast<uint8_t*>(TargetMMap(tail_base_begin,
tail_base_size,
tail_prot,
@@ -634,9 +695,18 @@ MemMap* MemMap::RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_pro
*error_msg = StringPrintf("anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0) failed. See process "
"maps in the log.", tail_base_begin, tail_base_size, tail_prot, flags,
fd.get());
- return nullptr;
+ return Invalid();
+ }
+ // Update *this.
+ if (new_base_size == 0u) {
+ std::lock_guard<std::mutex> mu(*mem_maps_lock_);
+ auto it = GetGMapsEntry(*this);
+ gMaps->erase(it);
}
- return new MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot, false);
+ size_ = new_size;
+ base_size_ = new_base_size;
+ // Return the new mapping.
+ return MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot, false);
}
void MemMap::MadviseDontNeedAndZero() {
@@ -675,15 +745,15 @@ bool MemMap::Protect(int prot) {
return false;
}
-bool MemMap::CheckNoGaps(MemMap* begin_map, MemMap* end_map) {
+bool MemMap::CheckNoGaps(MemMap& begin_map, MemMap& end_map) {
std::lock_guard<std::mutex> mu(*mem_maps_lock_);
- CHECK(begin_map != nullptr);
- CHECK(end_map != nullptr);
+ CHECK(begin_map.IsValid());
+ CHECK(end_map.IsValid());
CHECK(HasMemMap(begin_map));
CHECK(HasMemMap(end_map));
- CHECK_LE(begin_map->BaseBegin(), end_map->BaseBegin());
- MemMap* map = begin_map;
- while (map->BaseBegin() != end_map->BaseBegin()) {
+ CHECK_LE(begin_map.BaseBegin(), end_map.BaseBegin());
+ MemMap* map = &begin_map;
+ while (map->BaseBegin() != end_map.BaseBegin()) {
MemMap* next_map = GetLargestMemMapAt(map->BaseEnd());
if (next_map == nullptr) {
// Found a gap.
@@ -758,11 +828,11 @@ void MemMap::DumpMapsLocked(std::ostream& os, bool terse) {
}
}
-bool MemMap::HasMemMap(MemMap* map) {
- void* base_begin = map->BaseBegin();
+bool MemMap::HasMemMap(MemMap& map) {
+ void* base_begin = map.BaseBegin();
for (auto it = gMaps->lower_bound(base_begin), end = gMaps->end();
it != end && it->first == base_begin; ++it) {
- if (it->second == map) {
+ if (it->second == &map) {
return true;
}
}
@@ -1049,6 +1119,7 @@ void MemMap::AlignBy(size_t size) {
CHECK_EQ(size_, base_size_) << "Unsupported";
CHECK_GT(size, static_cast<size_t>(kPageSize));
CHECK_ALIGNED(size, kPageSize);
+ CHECK(!reuse_);
if (IsAlignedParam(reinterpret_cast<uintptr_t>(base_begin_), size) &&
IsAlignedParam(base_size_, size)) {
// Already aligned.
@@ -1079,17 +1150,17 @@ void MemMap::AlignBy(size_t size) {
<< " aligned_base_end=" << reinterpret_cast<void*>(aligned_base_end);
}
std::lock_guard<std::mutex> mu(*mem_maps_lock_);
+ if (base_begin < aligned_base_begin) {
+ auto it = GetGMapsEntry(*this);
+ // TODO: When C++17 becomes available, use std::map<>::extract(), modify, insert.
+ gMaps->erase(it);
+ gMaps->insert(std::make_pair(aligned_base_begin, this));
+ }
base_begin_ = aligned_base_begin;
base_size_ = aligned_base_size;
begin_ = aligned_base_begin;
size_ = aligned_base_size;
DCHECK(gMaps != nullptr);
- if (base_begin < aligned_base_begin) {
- auto it = gMaps->find(base_begin);
- CHECK(it != gMaps->end()) << "MemMap not found";
- gMaps->erase(it);
- gMaps->insert(std::make_pair(base_begin_, this));
- }
}
} // namespace art
diff --git a/libartbase/base/mem_map.h b/libartbase/base/mem_map.h
index 1979357714..525fade9c1 100644
--- a/libartbase/base/mem_map.h
+++ b/libartbase/base/mem_map.h
@@ -60,6 +60,37 @@ class MemMap {
public:
static constexpr bool kCanReplaceMapping = HAVE_MREMAP_SYSCALL;
+ // Creates an invalid mapping.
+ MemMap() {}
+
+ // Creates an invalid mapping. Used when we want to be more explicit than MemMap().
+ static MemMap Invalid() {
+ return MemMap();
+ }
+
+ MemMap(MemMap&& other) REQUIRES(!MemMap::mem_maps_lock_);
+ MemMap& operator=(MemMap&& other) REQUIRES(!MemMap::mem_maps_lock_) {
+ Reset();
+ swap(other);
+ return *this;
+ }
+
+ // Releases the memory mapping.
+ ~MemMap() REQUIRES(!MemMap::mem_maps_lock_);
+
+ // Swap two MemMaps.
+ void swap(MemMap& other);
+
+ void Reset() {
+ if (IsValid()) {
+ DoReset();
+ }
+ }
+
+ bool IsValid() const {
+ return base_size_ != 0u;
+ }
+
// Replace the data in this memmmap with the data in the memmap pointed to by source. The caller
// relinquishes ownership of the source mmap.
//
@@ -74,15 +105,14 @@ class MemMap {
// * mremap must succeed when called on the mappings.
//
// If this call succeeds it will return true and:
- // * Deallocate *source
- // * Sets *source to nullptr
+ // * Invalidate *source
// * The protection of this will remain the same.
// * The size of this will be the size of the source
// * The data in this will be the data from source.
//
// If this call fails it will return false and make no changes to *source or this. The ownership
// of the source mmap is returned to the caller.
- bool ReplaceWith(/*in-out*/MemMap** source, /*out*/std::string* error);
+ bool ReplaceWith(/*in-out*/MemMap* source, /*out*/std::string* error);
// Request an anonymous region of length 'byte_count' and a requested base address.
// Use null as the requested base address if you don't care.
@@ -92,34 +122,34 @@ class MemMap {
// 'name' will be used -- on systems that support it -- to give the mapping
// a name.
//
- // On success, returns returns a MemMap instance. On failure, returns null.
- static MemMap* MapAnonymous(const char* name,
- uint8_t* addr,
- size_t byte_count,
- int prot,
- bool low_4gb,
- bool reuse,
- std::string* error_msg,
- bool use_ashmem = true);
+ // On success, returns returns a valid MemMap. On failure, returns an invalid MemMap.
+ static MemMap MapAnonymous(const char* name,
+ uint8_t* addr,
+ size_t byte_count,
+ int prot,
+ bool low_4gb,
+ bool reuse,
+ std::string* error_msg,
+ bool use_ashmem = true);
// Create placeholder for a region allocated by direct call to mmap.
// This is useful when we do not have control over the code calling mmap,
// but when we still want to keep track of it in the list.
// The region is not considered to be owned and will not be unmmaped.
- static MemMap* MapDummy(const char* name, uint8_t* addr, size_t byte_count);
+ static MemMap MapDummy(const char* name, uint8_t* addr, size_t byte_count);
// Map part of a file, taking care of non-page aligned offsets. The
// "start" offset is absolute, not relative.
//
- // On success, returns returns a MemMap instance. On failure, returns null.
- static MemMap* MapFile(size_t byte_count,
- int prot,
- int flags,
- int fd,
- off_t start,
- bool low_4gb,
- const char* filename,
- std::string* error_msg) {
+ // On success, returns returns a valid MemMap. On failure, returns an invalid MemMap.
+ static MemMap MapFile(size_t byte_count,
+ int prot,
+ int flags,
+ int fd,
+ off_t start,
+ bool low_4gb,
+ const char* filename,
+ std::string* error_msg) {
return MapFileAtAddress(nullptr,
byte_count,
prot,
@@ -139,20 +169,17 @@ class MemMap {
// MapFileAtAddress fails. This helps improve performance of the fail case since reading and
// printing /proc/maps takes several milliseconds in the worst case.
//
- // On success, returns returns a MemMap instance. On failure, returns null.
- static MemMap* MapFileAtAddress(uint8_t* addr,
- size_t byte_count,
- int prot,
- int flags,
- int fd,
- off_t start,
- bool low_4gb,
- bool reuse,
- const char* filename,
- std::string* error_msg);
-
- // Releases the memory mapping.
- ~MemMap() REQUIRES(!MemMap::mem_maps_lock_);
+ // On success, returns returns a valid MemMap. On failure, returns an invalid MemMap.
+ static MemMap MapFileAtAddress(uint8_t* addr,
+ size_t byte_count,
+ int prot,
+ int flags,
+ int fd,
+ off_t start,
+ bool low_4gb,
+ bool reuse,
+ const char* filename,
+ std::string* error_msg);
const std::string& GetName() const {
return name_;
@@ -200,13 +227,13 @@ class MemMap {
}
// Unmap the pages at end and remap them to create another memory map.
- MemMap* RemapAtEnd(uint8_t* new_end,
- const char* tail_name,
- int tail_prot,
- std::string* error_msg,
- bool use_ashmem = true);
+ MemMap RemapAtEnd(uint8_t* new_end,
+ const char* tail_name,
+ int tail_prot,
+ std::string* error_msg,
+ bool use_ashmem = true);
- static bool CheckNoGaps(MemMap* begin_map, MemMap* end_map)
+ static bool CheckNoGaps(MemMap& begin_map, MemMap& end_map)
REQUIRES(!MemMap::mem_maps_lock_);
static void DumpMaps(std::ostream& os, bool terse = false)
REQUIRES(!MemMap::mem_maps_lock_);
@@ -240,9 +267,13 @@ class MemMap {
bool reuse,
size_t redzone_size = 0) REQUIRES(!MemMap::mem_maps_lock_);
+ void DoReset();
+ void Invalidate();
+ void SwapMembers(MemMap& other);
+
static void DumpMapsLocked(std::ostream& os, bool terse)
REQUIRES(MemMap::mem_maps_lock_);
- static bool HasMemMap(MemMap* map)
+ static bool HasMemMap(MemMap& map)
REQUIRES(MemMap::mem_maps_lock_);
static MemMap* GetLargestMemMapAt(void* address)
REQUIRES(MemMap::mem_maps_lock_);
@@ -271,23 +302,23 @@ class MemMap {
size_t byte_count,
std::string* error_msg);
- const std::string name_;
- uint8_t* begin_; // Start of data. May be changed by AlignBy.
- size_t size_; // Length of data.
+ std::string name_;
+ uint8_t* begin_ = nullptr; // Start of data. May be changed by AlignBy.
+ size_t size_ = 0u; // Length of data.
- void* base_begin_; // Page-aligned base address. May be changed by AlignBy.
- size_t base_size_; // Length of mapping. May be changed by RemapAtEnd (ie Zygote).
- int prot_; // Protection of the map.
+ void* base_begin_ = nullptr; // Page-aligned base address. May be changed by AlignBy.
+ size_t base_size_ = 0u; // Length of mapping. May be changed by RemapAtEnd (ie Zygote).
+ int prot_ = 0; // Protection of the map.
// When reuse_ is true, this is just a view of an existing mapping
// and we do not take ownership and are not responsible for
// unmapping.
- const bool reuse_;
+ bool reuse_ = false;
// When already_unmapped_ is true the destructor will not call munmap.
- bool already_unmapped_;
+ bool already_unmapped_ = false;
- const size_t redzone_size_;
+ size_t redzone_size_ = 0u;
#if USE_ART_LOW_4G_ALLOCATOR
static uintptr_t next_mem_pos_; // Next memory location to check for low_4g extent.
@@ -309,6 +340,10 @@ class MemMap {
friend class MemMapTest; // To allow access to base_begin_ and base_size_.
};
+inline void swap(MemMap& lhs, MemMap& rhs) {
+ lhs.swap(rhs);
+}
+
std::ostream& operator<<(std::ostream& os, const MemMap& mem_map);
// Zero and release pages if possible, no requirements on alignments.
diff --git a/libartbase/base/mem_map_test.cc b/libartbase/base/mem_map_test.cc
index c575c7a31f..b2f5c728e4 100644
--- a/libartbase/base/mem_map_test.cc
+++ b/libartbase/base/mem_map_test.cc
@@ -30,14 +30,6 @@ namespace art {
class MemMapTest : public CommonArtTest {
public:
- static uint8_t* BaseBegin(MemMap* mem_map) {
- return reinterpret_cast<uint8_t*>(mem_map->base_begin_);
- }
-
- static size_t BaseSize(MemMap* mem_map) {
- return mem_map->base_size_;
- }
-
static bool IsAddressMapped(void* addr) {
bool res = msync(addr, 1, MS_SYNC) == 0;
if (!res && errno != ENOMEM) {
@@ -60,15 +52,15 @@ class MemMapTest : public CommonArtTest {
static uint8_t* GetValidMapAddress(size_t size, bool low_4gb) {
// Find a valid map address and unmap it before returning.
std::string error_msg;
- std::unique_ptr<MemMap> map(MemMap::MapAnonymous("temp",
- nullptr,
- size,
- PROT_READ,
- low_4gb,
- false,
- &error_msg));
- CHECK(map != nullptr);
- return map->Begin();
+ MemMap map = MemMap::MapAnonymous("temp",
+ /* addr */ nullptr,
+ size,
+ PROT_READ,
+ low_4gb,
+ /* reuse */ false,
+ &error_msg);
+ CHECK(map.IsValid());
+ return map.Begin();
}
static void RemapAtEndTest(bool low_4gb) {
@@ -76,37 +68,38 @@ class MemMapTest : public CommonArtTest {
// Cast the page size to size_t.
const size_t page_size = static_cast<size_t>(kPageSize);
// Map a two-page memory region.
- MemMap* m0 = MemMap::MapAnonymous("MemMapTest_RemapAtEndTest_map0",
- nullptr,
- 2 * page_size,
- PROT_READ | PROT_WRITE,
- low_4gb,
- false,
- &error_msg);
+ MemMap m0 = MemMap::MapAnonymous("MemMapTest_RemapAtEndTest_map0",
+ /* addr */ nullptr,
+ 2 * page_size,
+ PROT_READ | PROT_WRITE,
+ low_4gb,
+ /* reuse */ false,
+ &error_msg);
// Check its state and write to it.
- uint8_t* base0 = m0->Begin();
+ ASSERT_TRUE(m0.IsValid());
+ uint8_t* base0 = m0.Begin();
ASSERT_TRUE(base0 != nullptr) << error_msg;
- size_t size0 = m0->Size();
- EXPECT_EQ(m0->Size(), 2 * page_size);
- EXPECT_EQ(BaseBegin(m0), base0);
- EXPECT_EQ(BaseSize(m0), size0);
+ size_t size0 = m0.Size();
+ EXPECT_EQ(m0.Size(), 2 * page_size);
+ EXPECT_EQ(m0.BaseBegin(), base0);
+ EXPECT_EQ(m0.BaseSize(), size0);
memset(base0, 42, 2 * page_size);
// Remap the latter half into a second MemMap.
- MemMap* m1 = m0->RemapAtEnd(base0 + page_size,
- "MemMapTest_RemapAtEndTest_map1",
- PROT_READ | PROT_WRITE,
- &error_msg);
+ MemMap m1 = m0.RemapAtEnd(base0 + page_size,
+ "MemMapTest_RemapAtEndTest_map1",
+ PROT_READ | PROT_WRITE,
+ &error_msg);
// Check the states of the two maps.
- EXPECT_EQ(m0->Begin(), base0) << error_msg;
- EXPECT_EQ(m0->Size(), page_size);
- EXPECT_EQ(BaseBegin(m0), base0);
- EXPECT_EQ(BaseSize(m0), page_size);
- uint8_t* base1 = m1->Begin();
- size_t size1 = m1->Size();
+ EXPECT_EQ(m0.Begin(), base0) << error_msg;
+ EXPECT_EQ(m0.Size(), page_size);
+ EXPECT_EQ(m0.BaseBegin(), base0);
+ EXPECT_EQ(m0.BaseSize(), page_size);
+ uint8_t* base1 = m1.Begin();
+ size_t size1 = m1.Size();
EXPECT_EQ(base1, base0 + page_size);
EXPECT_EQ(size1, page_size);
- EXPECT_EQ(BaseBegin(m1), base1);
- EXPECT_EQ(BaseSize(m1), size1);
+ EXPECT_EQ(m1.BaseBegin(), base1);
+ EXPECT_EQ(m1.BaseSize(), size1);
// Write to the second region.
memset(base1, 43, page_size);
// Check the contents of the two regions.
@@ -117,13 +110,18 @@ class MemMapTest : public CommonArtTest {
EXPECT_EQ(base1[i], 43);
}
// Unmap the first region.
- delete m0;
+ m0.Reset();
// Make sure the second region is still accessible after the first
// region is unmapped.
for (size_t i = 0; i < page_size; ++i) {
EXPECT_EQ(base1[i], 43);
}
- delete m1;
+ MemMap m2 = m1.RemapAtEnd(m1.Begin(),
+ "MemMapTest_RemapAtEndTest_map1",
+ PROT_READ | PROT_WRITE,
+ &error_msg);
+ ASSERT_TRUE(m2.IsValid()) << error_msg;
+ ASSERT_FALSE(m1.IsValid());
}
void CommonInit() {
@@ -168,232 +166,241 @@ TEST_F(MemMapTest, Start) {
#if HAVE_MREMAP_SYSCALL
TEST_F(MemMapTest, ReplaceMapping_SameSize) {
std::string error_msg;
- std::unique_ptr<MemMap> dest(MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
- nullptr,
- kPageSize,
- PROT_READ,
- false,
- false,
- &error_msg));
- ASSERT_TRUE(dest != nullptr);
- MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
- nullptr,
- kPageSize,
- PROT_WRITE | PROT_READ,
- false,
- false,
- &error_msg);
- ASSERT_TRUE(source != nullptr);
- void* source_addr = source->Begin();
- void* dest_addr = dest->Begin();
+ MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
+ /* addr */ nullptr,
+ kPageSize,
+ PROT_READ,
+ /* low_4gb */ false,
+ /* reuse */ false,
+ &error_msg);
+ ASSERT_TRUE(dest.IsValid());
+ MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
+ /* addr */ nullptr,
+ kPageSize,
+ PROT_WRITE | PROT_READ,
+ /* low_4gb */ false,
+ /* reuse */ false,
+ &error_msg);
+ ASSERT_TRUE(source.IsValid());
+ void* source_addr = source.Begin();
+ void* dest_addr = dest.Begin();
ASSERT_TRUE(IsAddressMapped(source_addr));
ASSERT_TRUE(IsAddressMapped(dest_addr));
std::vector<uint8_t> data = RandomData(kPageSize);
- memcpy(source->Begin(), data.data(), data.size());
+ memcpy(source.Begin(), data.data(), data.size());
- ASSERT_TRUE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
+ ASSERT_TRUE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
ASSERT_FALSE(IsAddressMapped(source_addr));
ASSERT_TRUE(IsAddressMapped(dest_addr));
- ASSERT_TRUE(source == nullptr);
+ ASSERT_FALSE(source.IsValid());
- ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
+ ASSERT_EQ(dest.Size(), static_cast<size_t>(kPageSize));
- ASSERT_EQ(memcmp(dest->Begin(), data.data(), dest->Size()), 0);
+ ASSERT_EQ(memcmp(dest.Begin(), data.data(), dest.Size()), 0);
}
TEST_F(MemMapTest, ReplaceMapping_MakeLarger) {
std::string error_msg;
- std::unique_ptr<MemMap> dest(MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
- nullptr,
- 5 * kPageSize, // Need to make it larger
- // initially so we know
- // there won't be mappings
- // in the way we we move
- // source.
- PROT_READ,
- false,
- false,
- &error_msg));
- ASSERT_TRUE(dest != nullptr);
- MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
- nullptr,
- 3 * kPageSize,
- PROT_WRITE | PROT_READ,
- false,
- false,
- &error_msg);
- ASSERT_TRUE(source != nullptr);
- uint8_t* source_addr = source->Begin();
- uint8_t* dest_addr = dest->Begin();
+ MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
+ /* addr */ nullptr,
+ 5 * kPageSize, // Need to make it larger
+ // initially so we know
+ // there won't be mappings
+ // in the way we we move
+ // source.
+ PROT_READ,
+ /* low_4gb */ false,
+ /* reuse */ false,
+ &error_msg);
+ ASSERT_TRUE(dest.IsValid());
+ MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
+ /* addr */ nullptr,
+ 3 * kPageSize,
+ PROT_WRITE | PROT_READ,
+ /* low_4gb */ false,
+ /* reuse */ false,
+ &error_msg);
+ ASSERT_TRUE(source.IsValid());
+ uint8_t* source_addr = source.Begin();
+ uint8_t* dest_addr = dest.Begin();
ASSERT_TRUE(IsAddressMapped(source_addr));
// Fill the source with random data.
std::vector<uint8_t> data = RandomData(3 * kPageSize);
- memcpy(source->Begin(), data.data(), data.size());
+ memcpy(source.Begin(), data.data(), data.size());
// Make the dest smaller so that we know we'll have space.
- dest->SetSize(kPageSize);
+ dest.SetSize(kPageSize);
ASSERT_TRUE(IsAddressMapped(dest_addr));
ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * kPageSize));
- ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
+ ASSERT_EQ(dest.Size(), static_cast<size_t>(kPageSize));
- ASSERT_TRUE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
+ ASSERT_TRUE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
ASSERT_FALSE(IsAddressMapped(source_addr));
- ASSERT_EQ(dest->Size(), static_cast<size_t>(3 * kPageSize));
+ ASSERT_EQ(dest.Size(), static_cast<size_t>(3 * kPageSize));
ASSERT_TRUE(IsAddressMapped(dest_addr));
ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * kPageSize));
- ASSERT_TRUE(source == nullptr);
+ ASSERT_FALSE(source.IsValid());
- ASSERT_EQ(memcmp(dest->Begin(), data.data(), dest->Size()), 0);
+ ASSERT_EQ(memcmp(dest.Begin(), data.data(), dest.Size()), 0);
}
TEST_F(MemMapTest, ReplaceMapping_MakeSmaller) {
std::string error_msg;
- std::unique_ptr<MemMap> dest(MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
- nullptr,
- 3 * kPageSize,
- PROT_READ,
- false,
- false,
- &error_msg));
- ASSERT_TRUE(dest != nullptr);
- MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
- nullptr,
- kPageSize,
- PROT_WRITE | PROT_READ,
- false,
- false,
- &error_msg);
- ASSERT_TRUE(source != nullptr);
- uint8_t* source_addr = source->Begin();
- uint8_t* dest_addr = dest->Begin();
+ MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
+ /* addr */ nullptr,
+ 3 * kPageSize,
+ PROT_READ,
+ /* low_4gb */ false,
+ /* reuse */ false,
+ &error_msg);
+ ASSERT_TRUE(dest.IsValid());
+ MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
+ /* addr */ nullptr,
+ kPageSize,
+ PROT_WRITE | PROT_READ,
+ /* low_4gb */ false,
+ /* reuse */ false,
+ &error_msg);
+ ASSERT_TRUE(source.IsValid());
+ uint8_t* source_addr = source.Begin();
+ uint8_t* dest_addr = dest.Begin();
ASSERT_TRUE(IsAddressMapped(source_addr));
ASSERT_TRUE(IsAddressMapped(dest_addr));
ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * kPageSize));
- ASSERT_EQ(dest->Size(), static_cast<size_t>(3 * kPageSize));
+ ASSERT_EQ(dest.Size(), static_cast<size_t>(3 * kPageSize));
std::vector<uint8_t> data = RandomData(kPageSize);
- memcpy(source->Begin(), data.data(), kPageSize);
+ memcpy(source.Begin(), data.data(), kPageSize);
- ASSERT_TRUE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
+ ASSERT_TRUE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
ASSERT_FALSE(IsAddressMapped(source_addr));
- ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
+ ASSERT_EQ(dest.Size(), static_cast<size_t>(kPageSize));
ASSERT_TRUE(IsAddressMapped(dest_addr));
ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * kPageSize));
- ASSERT_TRUE(source == nullptr);
+ ASSERT_FALSE(source.IsValid());
- ASSERT_EQ(memcmp(dest->Begin(), data.data(), dest->Size()), 0);
+ ASSERT_EQ(memcmp(dest.Begin(), data.data(), dest.Size()), 0);
}
TEST_F(MemMapTest, ReplaceMapping_FailureOverlap) {
std::string error_msg;
- std::unique_ptr<MemMap> dest(
+ MemMap dest =
MemMap::MapAnonymous(
"MapAnonymousEmpty-atomic-replace-dest",
- nullptr,
+ /* addr */ nullptr,
3 * kPageSize, // Need to make it larger initially so we know there won't be mappings in
// the way we we move source.
PROT_READ | PROT_WRITE,
- false,
- false,
- &error_msg));
- ASSERT_TRUE(dest != nullptr);
+ /* low_4gb */ false,
+ /* reuse */ false,
+ &error_msg);
+ ASSERT_TRUE(dest.IsValid());
// Resize down to 1 page so we can remap the rest.
- dest->SetSize(kPageSize);
+ dest.SetSize(kPageSize);
// Create source from the last 2 pages
- MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
- dest->Begin() + kPageSize,
- 2 * kPageSize,
- PROT_WRITE | PROT_READ,
- false,
- false,
- &error_msg);
- ASSERT_TRUE(source != nullptr);
- MemMap* orig_source = source;
- ASSERT_EQ(dest->Begin() + kPageSize, source->Begin());
- uint8_t* source_addr = source->Begin();
- uint8_t* dest_addr = dest->Begin();
+ MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
+ dest.Begin() + kPageSize,
+ 2 * kPageSize,
+ PROT_WRITE | PROT_READ,
+ /* low_4gb */ false,
+ /* reuse */ false,
+ &error_msg);
+ ASSERT_TRUE(source.IsValid());
+ ASSERT_EQ(dest.Begin() + kPageSize, source.Begin());
+ uint8_t* source_addr = source.Begin();
+ uint8_t* dest_addr = dest.Begin();
ASSERT_TRUE(IsAddressMapped(source_addr));
// Fill the source and dest with random data.
std::vector<uint8_t> data = RandomData(2 * kPageSize);
- memcpy(source->Begin(), data.data(), data.size());
+ memcpy(source.Begin(), data.data(), data.size());
std::vector<uint8_t> dest_data = RandomData(kPageSize);
- memcpy(dest->Begin(), dest_data.data(), dest_data.size());
+ memcpy(dest.Begin(), dest_data.data(), dest_data.size());
ASSERT_TRUE(IsAddressMapped(dest_addr));
- ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
+ ASSERT_EQ(dest.Size(), static_cast<size_t>(kPageSize));
- ASSERT_FALSE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
+ ASSERT_FALSE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
- ASSERT_TRUE(source == orig_source);
ASSERT_TRUE(IsAddressMapped(source_addr));
ASSERT_TRUE(IsAddressMapped(dest_addr));
- ASSERT_EQ(source->Size(), data.size());
- ASSERT_EQ(dest->Size(), dest_data.size());
+ ASSERT_EQ(source.Size(), data.size());
+ ASSERT_EQ(dest.Size(), dest_data.size());
- ASSERT_EQ(memcmp(source->Begin(), data.data(), data.size()), 0);
- ASSERT_EQ(memcmp(dest->Begin(), dest_data.data(), dest_data.size()), 0);
-
- delete source;
+ ASSERT_EQ(memcmp(source.Begin(), data.data(), data.size()), 0);
+ ASSERT_EQ(memcmp(dest.Begin(), dest_data.data(), dest_data.size()), 0);
}
#endif // HAVE_MREMAP_SYSCALL
TEST_F(MemMapTest, MapAnonymousEmpty) {
CommonInit();
std::string error_msg;
- std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
- nullptr,
- 0,
- PROT_READ,
- false,
- false,
- &error_msg));
- ASSERT_TRUE(map.get() != nullptr) << error_msg;
- ASSERT_TRUE(error_msg.empty());
- map.reset(MemMap::MapAnonymous("MapAnonymousEmpty",
- nullptr,
- kPageSize,
- PROT_READ | PROT_WRITE,
- false,
- false,
- &error_msg));
- ASSERT_TRUE(map.get() != nullptr) << error_msg;
+ MemMap map = MemMap::MapAnonymous("MapAnonymousEmpty",
+ /* addr */ nullptr,
+ 0,
+ PROT_READ,
+ /* low_4gb */ false,
+ /* reuse */ false,
+ &error_msg);
+ ASSERT_FALSE(map.IsValid()) << error_msg;
+ ASSERT_FALSE(error_msg.empty());
+
+ error_msg.clear();
+ map = MemMap::MapAnonymous("MapAnonymousNonEmpty",
+ /* addr */ nullptr,
+ kPageSize,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ /* reuse */ false,
+ &error_msg);
+ ASSERT_TRUE(map.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
}
TEST_F(MemMapTest, MapAnonymousFailNullError) {
CommonInit();
// Test that we don't crash with a null error_str when mapping at an invalid location.
- std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousInvalid",
- reinterpret_cast<uint8_t*>(kPageSize),
- 0x20000,
- PROT_READ | PROT_WRITE,
- false,
- false,
- nullptr));
- ASSERT_EQ(nullptr, map.get());
+ MemMap map = MemMap::MapAnonymous("MapAnonymousInvalid",
+ reinterpret_cast<uint8_t*>(kPageSize),
+ 0x20000,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ /* reuse */ false,
+ nullptr);
+ ASSERT_FALSE(map.IsValid());
}
#ifdef __LP64__
TEST_F(MemMapTest, MapAnonymousEmpty32bit) {
CommonInit();
std::string error_msg;
- std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
- nullptr,
- kPageSize,
- PROT_READ | PROT_WRITE,
- true,
- false,
- &error_msg));
- ASSERT_TRUE(map.get() != nullptr) << error_msg;
+ MemMap map = MemMap::MapAnonymous("MapAnonymousEmpty",
+ /* addr */ nullptr,
+ 0,
+ PROT_READ,
+ /* low_4gb */ true,
+ /* reuse */ false,
+ &error_msg);
+ ASSERT_FALSE(map.IsValid()) << error_msg;
+ ASSERT_FALSE(error_msg.empty());
+
+ error_msg.clear();
+ map = MemMap::MapAnonymous("MapAnonymousNonEmpty",
+ /* addr */ nullptr,
+ kPageSize,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ true,
+ /* reuse */ false,
+ &error_msg);
+ ASSERT_TRUE(map.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
- ASSERT_LT(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), 1ULL << 32);
+ ASSERT_LT(reinterpret_cast<uintptr_t>(map.BaseBegin()), 1ULL << 32);
}
TEST_F(MemMapTest, MapFile32Bit) {
CommonInit();
@@ -402,18 +409,18 @@ TEST_F(MemMapTest, MapFile32Bit) {
constexpr size_t kMapSize = kPageSize;
std::unique_ptr<uint8_t[]> data(new uint8_t[kMapSize]());
ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize));
- std::unique_ptr<MemMap> map(MemMap::MapFile(/*byte_count*/kMapSize,
- PROT_READ,
- MAP_PRIVATE,
- scratch_file.GetFd(),
- /*start*/0,
- /*low_4gb*/true,
- scratch_file.GetFilename().c_str(),
- &error_msg));
- ASSERT_TRUE(map != nullptr) << error_msg;
+ MemMap map = MemMap::MapFile(/*byte_count*/kMapSize,
+ PROT_READ,
+ MAP_PRIVATE,
+ scratch_file.GetFd(),
+ /*start*/0,
+ /*low_4gb*/true,
+ scratch_file.GetFilename().c_str(),
+ &error_msg);
+ ASSERT_TRUE(map.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
- ASSERT_EQ(map->Size(), kMapSize);
- ASSERT_LT(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), 1ULL << 32);
+ ASSERT_EQ(map.Size(), kMapSize);
+ ASSERT_LT(reinterpret_cast<uintptr_t>(map.BaseBegin()), 1ULL << 32);
}
#endif
@@ -423,36 +430,36 @@ TEST_F(MemMapTest, MapAnonymousExactAddr) {
// Find a valid address.
uint8_t* valid_address = GetValidMapAddress(kPageSize, /*low_4gb*/false);
// Map at an address that should work, which should succeed.
- std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
- valid_address,
- kPageSize,
- PROT_READ | PROT_WRITE,
- false,
- false,
- &error_msg));
- ASSERT_TRUE(map0.get() != nullptr) << error_msg;
+ MemMap map0 = MemMap::MapAnonymous("MapAnonymous0",
+ valid_address,
+ kPageSize,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ /* reuse */ false,
+ &error_msg);
+ ASSERT_TRUE(map0.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
- ASSERT_TRUE(map0->BaseBegin() == valid_address);
+ ASSERT_TRUE(map0.BaseBegin() == valid_address);
// Map at an unspecified address, which should succeed.
- std::unique_ptr<MemMap> map1(MemMap::MapAnonymous("MapAnonymous1",
- nullptr,
- kPageSize,
- PROT_READ | PROT_WRITE,
- false,
- false,
- &error_msg));
- ASSERT_TRUE(map1.get() != nullptr) << error_msg;
+ MemMap map1 = MemMap::MapAnonymous("MapAnonymous1",
+ /* addr */ nullptr,
+ kPageSize,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ /* reuse */ false,
+ &error_msg);
+ ASSERT_TRUE(map1.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
- ASSERT_TRUE(map1->BaseBegin() != nullptr);
+ ASSERT_TRUE(map1.BaseBegin() != nullptr);
// Attempt to map at the same address, which should fail.
- std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
- reinterpret_cast<uint8_t*>(map1->BaseBegin()),
- kPageSize,
- PROT_READ | PROT_WRITE,
- false,
- false,
- &error_msg));
- ASSERT_TRUE(map2.get() == nullptr) << error_msg;
+ MemMap map2 = MemMap::MapAnonymous("MapAnonymous2",
+ reinterpret_cast<uint8_t*>(map1.BaseBegin()),
+ kPageSize,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ /* reuse */ false,
+ &error_msg);
+ ASSERT_FALSE(map2.IsValid()) << error_msg;
ASSERT_TRUE(!error_msg.empty());
}
@@ -480,23 +487,23 @@ TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
// Try all addresses starting from 2GB to 4GB.
size_t start_addr = 2 * GB;
std::string error_msg;
- std::unique_ptr<MemMap> map;
+ MemMap map;
for (; start_addr <= std::numeric_limits<uint32_t>::max() - size; start_addr += size) {
- map.reset(MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr",
- reinterpret_cast<uint8_t*>(start_addr),
- size,
- PROT_READ | PROT_WRITE,
- /*low_4gb*/true,
- false,
- &error_msg));
- if (map != nullptr) {
+ map = MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr",
+ reinterpret_cast<uint8_t*>(start_addr),
+ size,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb*/ true,
+ /* reuse */ false,
+ &error_msg);
+ if (map.IsValid()) {
break;
}
}
- ASSERT_TRUE(map.get() != nullptr) << error_msg;
- ASSERT_GE(reinterpret_cast<uintptr_t>(map->End()), 2u * GB);
+ ASSERT_TRUE(map.IsValid()) << error_msg;
+ ASSERT_GE(reinterpret_cast<uintptr_t>(map.End()), 2u * GB);
ASSERT_TRUE(error_msg.empty());
- ASSERT_EQ(BaseBegin(map.get()), reinterpret_cast<void*>(start_addr));
+ ASSERT_EQ(map.BaseBegin(), reinterpret_cast<void*>(start_addr));
}
TEST_F(MemMapTest, MapAnonymousOverflow) {
@@ -504,14 +511,14 @@ TEST_F(MemMapTest, MapAnonymousOverflow) {
std::string error_msg;
uintptr_t ptr = 0;
ptr -= kPageSize; // Now it's close to the top.
- std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousOverflow",
- reinterpret_cast<uint8_t*>(ptr),
- 2 * kPageSize, // brings it over the top.
- PROT_READ | PROT_WRITE,
- false,
- false,
- &error_msg));
- ASSERT_EQ(nullptr, map.get());
+ MemMap map = MemMap::MapAnonymous("MapAnonymousOverflow",
+ reinterpret_cast<uint8_t*>(ptr),
+ 2 * kPageSize, // brings it over the top.
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ /* reuse */ false,
+ &error_msg);
+ ASSERT_FALSE(map.IsValid());
ASSERT_FALSE(error_msg.empty());
}
@@ -519,29 +526,29 @@ TEST_F(MemMapTest, MapAnonymousOverflow) {
TEST_F(MemMapTest, MapAnonymousLow4GBExpectedTooHigh) {
CommonInit();
std::string error_msg;
- std::unique_ptr<MemMap> map(
+ MemMap map =
MemMap::MapAnonymous("MapAnonymousLow4GBExpectedTooHigh",
reinterpret_cast<uint8_t*>(UINT64_C(0x100000000)),
kPageSize,
PROT_READ | PROT_WRITE,
- true,
- false,
- &error_msg));
- ASSERT_EQ(nullptr, map.get());
+ /* low_4gb */ true,
+ /* reuse */ false,
+ &error_msg);
+ ASSERT_FALSE(map.IsValid());
ASSERT_FALSE(error_msg.empty());
}
TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) {
CommonInit();
std::string error_msg;
- std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBRangeTooHigh",
- reinterpret_cast<uint8_t*>(0xF0000000),
- 0x20000000,
- PROT_READ | PROT_WRITE,
- true,
- false,
- &error_msg));
- ASSERT_EQ(nullptr, map.get());
+ MemMap map = MemMap::MapAnonymous("MapAnonymousLow4GBRangeTooHigh",
+ reinterpret_cast<uint8_t*>(0xF0000000),
+ 0x20000000,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ true,
+ /* reuse */ false,
+ &error_msg);
+ ASSERT_FALSE(map.IsValid());
ASSERT_FALSE(error_msg.empty());
}
#endif
@@ -549,23 +556,23 @@ TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) {
TEST_F(MemMapTest, MapAnonymousReuse) {
CommonInit();
std::string error_msg;
- std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousReserve",
- nullptr,
- 0x20000,
- PROT_READ | PROT_WRITE,
- false,
- false,
- &error_msg));
- ASSERT_NE(nullptr, map.get());
+ MemMap map = MemMap::MapAnonymous("MapAnonymousReserve",
+ nullptr,
+ 0x20000,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ /* reuse */ false,
+ &error_msg);
+ ASSERT_TRUE(map.IsValid());
ASSERT_TRUE(error_msg.empty());
- std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymousReused",
- reinterpret_cast<uint8_t*>(map->BaseBegin()),
- 0x10000,
- PROT_READ | PROT_WRITE,
- false,
- true,
- &error_msg));
- ASSERT_NE(nullptr, map2.get());
+ MemMap map2 = MemMap::MapAnonymous("MapAnonymousReused",
+ reinterpret_cast<uint8_t*>(map.BaseBegin()),
+ 0x10000,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ /* reuse */ true,
+ &error_msg);
+ ASSERT_TRUE(map2.IsValid());
ASSERT_TRUE(error_msg.empty());
}
@@ -574,65 +581,65 @@ TEST_F(MemMapTest, CheckNoGaps) {
std::string error_msg;
constexpr size_t kNumPages = 3;
// Map a 3-page mem map.
- std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymous0",
- nullptr,
- kPageSize * kNumPages,
- PROT_READ | PROT_WRITE,
- false,
- false,
- &error_msg));
- ASSERT_TRUE(map.get() != nullptr) << error_msg;
+ MemMap map = MemMap::MapAnonymous("MapAnonymous0",
+ /* addr */ nullptr,
+ kPageSize * kNumPages,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ /* reuse */ false,
+ &error_msg);
+ ASSERT_TRUE(map.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
// Record the base address.
- uint8_t* map_base = reinterpret_cast<uint8_t*>(map->BaseBegin());
+ uint8_t* map_base = reinterpret_cast<uint8_t*>(map.BaseBegin());
// Unmap it.
- map.reset();
+ map.Reset();
// Map at the same address, but in page-sized separate mem maps,
// assuming the space at the address is still available.
- std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
- map_base,
- kPageSize,
- PROT_READ | PROT_WRITE,
- false,
- false,
- &error_msg));
- ASSERT_TRUE(map0.get() != nullptr) << error_msg;
+ MemMap map0 = MemMap::MapAnonymous("MapAnonymous0",
+ map_base,
+ kPageSize,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ /* reuse */ false,
+ &error_msg);
+ ASSERT_TRUE(map0.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
- std::unique_ptr<MemMap> map1(MemMap::MapAnonymous("MapAnonymous1",
- map_base + kPageSize,
- kPageSize,
- PROT_READ | PROT_WRITE,
- false,
- false,
- &error_msg));
- ASSERT_TRUE(map1.get() != nullptr) << error_msg;
+ MemMap map1 = MemMap::MapAnonymous("MapAnonymous1",
+ map_base + kPageSize,
+ kPageSize,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ /* reuse */ false,
+ &error_msg);
+ ASSERT_TRUE(map1.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
- std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
- map_base + kPageSize * 2,
- kPageSize,
- PROT_READ | PROT_WRITE,
- false,
- false,
- &error_msg));
- ASSERT_TRUE(map2.get() != nullptr) << error_msg;
+ MemMap map2 = MemMap::MapAnonymous("MapAnonymous2",
+ map_base + kPageSize * 2,
+ kPageSize,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ /* reuse */ false,
+ &error_msg);
+ ASSERT_TRUE(map2.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
// One-map cases.
- ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map0.get()));
- ASSERT_TRUE(MemMap::CheckNoGaps(map1.get(), map1.get()));
- ASSERT_TRUE(MemMap::CheckNoGaps(map2.get(), map2.get()));
+ ASSERT_TRUE(MemMap::CheckNoGaps(map0, map0));
+ ASSERT_TRUE(MemMap::CheckNoGaps(map1, map1));
+ ASSERT_TRUE(MemMap::CheckNoGaps(map2, map2));
// Two or three-map cases.
- ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map1.get()));
- ASSERT_TRUE(MemMap::CheckNoGaps(map1.get(), map2.get()));
- ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map2.get()));
+ ASSERT_TRUE(MemMap::CheckNoGaps(map0, map1));
+ ASSERT_TRUE(MemMap::CheckNoGaps(map1, map2));
+ ASSERT_TRUE(MemMap::CheckNoGaps(map0, map2));
// Unmap the middle one.
- map1.reset();
+ map1.Reset();
// Should return false now that there's a gap in the middle.
- ASSERT_FALSE(MemMap::CheckNoGaps(map0.get(), map2.get()));
+ ASSERT_FALSE(MemMap::CheckNoGaps(map0, map2));
}
TEST_F(MemMapTest, AlignBy) {
@@ -641,52 +648,53 @@ TEST_F(MemMapTest, AlignBy) {
// Cast the page size to size_t.
const size_t page_size = static_cast<size_t>(kPageSize);
// Map a region.
- std::unique_ptr<MemMap> m0(MemMap::MapAnonymous("MemMapTest_AlignByTest_map0",
- nullptr,
- 14 * page_size,
- PROT_READ | PROT_WRITE,
- false,
- false,
- &error_msg));
- uint8_t* base0 = m0->Begin();
+ MemMap m0 = MemMap::MapAnonymous("MemMapTest_AlignByTest_map0",
+ /* addr */ nullptr,
+ 14 * page_size,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ /* reuse */ false,
+ &error_msg);
+ ASSERT_TRUE(m0.IsValid());
+ uint8_t* base0 = m0.Begin();
ASSERT_TRUE(base0 != nullptr) << error_msg;
- ASSERT_EQ(m0->Size(), 14 * page_size);
- ASSERT_EQ(BaseBegin(m0.get()), base0);
- ASSERT_EQ(BaseSize(m0.get()), m0->Size());
+ ASSERT_EQ(m0.Size(), 14 * page_size);
+ ASSERT_EQ(m0.BaseBegin(), base0);
+ ASSERT_EQ(m0.BaseSize(), m0.Size());
// Break it into several regions by using RemapAtEnd.
- std::unique_ptr<MemMap> m1(m0->RemapAtEnd(base0 + 3 * page_size,
- "MemMapTest_AlignByTest_map1",
- PROT_READ | PROT_WRITE,
- &error_msg));
- uint8_t* base1 = m1->Begin();
+ MemMap m1 = m0.RemapAtEnd(base0 + 3 * page_size,
+ "MemMapTest_AlignByTest_map1",
+ PROT_READ | PROT_WRITE,
+ &error_msg);
+ uint8_t* base1 = m1.Begin();
ASSERT_TRUE(base1 != nullptr) << error_msg;
ASSERT_EQ(base1, base0 + 3 * page_size);
- ASSERT_EQ(m0->Size(), 3 * page_size);
+ ASSERT_EQ(m0.Size(), 3 * page_size);
- std::unique_ptr<MemMap> m2(m1->RemapAtEnd(base1 + 4 * page_size,
- "MemMapTest_AlignByTest_map2",
- PROT_READ | PROT_WRITE,
- &error_msg));
- uint8_t* base2 = m2->Begin();
+ MemMap m2 = m1.RemapAtEnd(base1 + 4 * page_size,
+ "MemMapTest_AlignByTest_map2",
+ PROT_READ | PROT_WRITE,
+ &error_msg);
+ uint8_t* base2 = m2.Begin();
ASSERT_TRUE(base2 != nullptr) << error_msg;
ASSERT_EQ(base2, base1 + 4 * page_size);
- ASSERT_EQ(m1->Size(), 4 * page_size);
+ ASSERT_EQ(m1.Size(), 4 * page_size);
- std::unique_ptr<MemMap> m3(m2->RemapAtEnd(base2 + 3 * page_size,
- "MemMapTest_AlignByTest_map1",
- PROT_READ | PROT_WRITE,
- &error_msg));
- uint8_t* base3 = m3->Begin();
+ MemMap m3 = m2.RemapAtEnd(base2 + 3 * page_size,
+ "MemMapTest_AlignByTest_map1",
+ PROT_READ | PROT_WRITE,
+ &error_msg);
+ uint8_t* base3 = m3.Begin();
ASSERT_TRUE(base3 != nullptr) << error_msg;
ASSERT_EQ(base3, base2 + 3 * page_size);
- ASSERT_EQ(m2->Size(), 3 * page_size);
- ASSERT_EQ(m3->Size(), 4 * page_size);
+ ASSERT_EQ(m2.Size(), 3 * page_size);
+ ASSERT_EQ(m3.Size(), 4 * page_size);
- uint8_t* end0 = base0 + m0->Size();
- uint8_t* end1 = base1 + m1->Size();
- uint8_t* end2 = base2 + m2->Size();
- uint8_t* end3 = base3 + m3->Size();
+ uint8_t* end0 = base0 + m0.Size();
+ uint8_t* end1 = base1 + m1.Size();
+ uint8_t* end2 = base2 + m2.Size();
+ uint8_t* end3 = base3 + m3.Size();
ASSERT_EQ(static_cast<size_t>(end3 - base0), 14 * page_size);
@@ -703,39 +711,39 @@ TEST_F(MemMapTest, AlignBy) {
}
// Align by 2 * page_size;
- m0->AlignBy(2 * page_size);
- m1->AlignBy(2 * page_size);
- m2->AlignBy(2 * page_size);
- m3->AlignBy(2 * page_size);
+ m0.AlignBy(2 * page_size);
+ m1.AlignBy(2 * page_size);
+ m2.AlignBy(2 * page_size);
+ m3.AlignBy(2 * page_size);
- EXPECT_TRUE(IsAlignedParam(m0->Begin(), 2 * page_size));
- EXPECT_TRUE(IsAlignedParam(m1->Begin(), 2 * page_size));
- EXPECT_TRUE(IsAlignedParam(m2->Begin(), 2 * page_size));
- EXPECT_TRUE(IsAlignedParam(m3->Begin(), 2 * page_size));
+ EXPECT_TRUE(IsAlignedParam(m0.Begin(), 2 * page_size));
+ EXPECT_TRUE(IsAlignedParam(m1.Begin(), 2 * page_size));
+ EXPECT_TRUE(IsAlignedParam(m2.Begin(), 2 * page_size));
+ EXPECT_TRUE(IsAlignedParam(m3.Begin(), 2 * page_size));
- EXPECT_TRUE(IsAlignedParam(m0->Begin() + m0->Size(), 2 * page_size));
- EXPECT_TRUE(IsAlignedParam(m1->Begin() + m1->Size(), 2 * page_size));
- EXPECT_TRUE(IsAlignedParam(m2->Begin() + m2->Size(), 2 * page_size));
- EXPECT_TRUE(IsAlignedParam(m3->Begin() + m3->Size(), 2 * page_size));
+ EXPECT_TRUE(IsAlignedParam(m0.Begin() + m0.Size(), 2 * page_size));
+ EXPECT_TRUE(IsAlignedParam(m1.Begin() + m1.Size(), 2 * page_size));
+ EXPECT_TRUE(IsAlignedParam(m2.Begin() + m2.Size(), 2 * page_size));
+ EXPECT_TRUE(IsAlignedParam(m3.Begin() + m3.Size(), 2 * page_size));
if (IsAlignedParam(base0, 2 * page_size)) {
- EXPECT_EQ(m0->Begin(), base0);
- EXPECT_EQ(m0->Begin() + m0->Size(), end0 - page_size);
- EXPECT_EQ(m1->Begin(), base1 + page_size);
- EXPECT_EQ(m1->Begin() + m1->Size(), end1 - page_size);
- EXPECT_EQ(m2->Begin(), base2 + page_size);
- EXPECT_EQ(m2->Begin() + m2->Size(), end2);
- EXPECT_EQ(m3->Begin(), base3);
- EXPECT_EQ(m3->Begin() + m3->Size(), end3);
+ EXPECT_EQ(m0.Begin(), base0);
+ EXPECT_EQ(m0.Begin() + m0.Size(), end0 - page_size);
+ EXPECT_EQ(m1.Begin(), base1 + page_size);
+ EXPECT_EQ(m1.Begin() + m1.Size(), end1 - page_size);
+ EXPECT_EQ(m2.Begin(), base2 + page_size);
+ EXPECT_EQ(m2.Begin() + m2.Size(), end2);
+ EXPECT_EQ(m3.Begin(), base3);
+ EXPECT_EQ(m3.Begin() + m3.Size(), end3);
} else {
- EXPECT_EQ(m0->Begin(), base0 + page_size);
- EXPECT_EQ(m0->Begin() + m0->Size(), end0);
- EXPECT_EQ(m1->Begin(), base1);
- EXPECT_EQ(m1->Begin() + m1->Size(), end1);
- EXPECT_EQ(m2->Begin(), base2);
- EXPECT_EQ(m2->Begin() + m2->Size(), end2 - page_size);
- EXPECT_EQ(m3->Begin(), base3 + page_size);
- EXPECT_EQ(m3->Begin() + m3->Size(), end3 - page_size);
+ EXPECT_EQ(m0.Begin(), base0 + page_size);
+ EXPECT_EQ(m0.Begin() + m0.Size(), end0);
+ EXPECT_EQ(m1.Begin(), base1);
+ EXPECT_EQ(m1.Begin() + m1.Size(), end1);
+ EXPECT_EQ(m2.Begin(), base2);
+ EXPECT_EQ(m2.Begin() + m2.Size(), end2 - page_size);
+ EXPECT_EQ(m3.Begin(), base3 + page_size);
+ EXPECT_EQ(m3.Begin() + m3.Size(), end3 - page_size);
}
}
diff --git a/libartbase/base/zip_archive.cc b/libartbase/base/zip_archive.cc
index b5f946e5a2..3c68ca1de8 100644
--- a/libartbase/base/zip_archive.cc
+++ b/libartbase/base/zip_archive.cc
@@ -68,31 +68,34 @@ bool ZipEntry::ExtractToFile(File& file, std::string* error_msg) {
return true;
}
-MemMap* ZipEntry::ExtractToMemMap(const char* zip_filename, const char* entry_filename,
- std::string* error_msg) {
+MemMap ZipEntry::ExtractToMemMap(const char* zip_filename,
+ const char* entry_filename,
+ std::string* error_msg) {
std::string name(entry_filename);
name += " extracted in memory from ";
name += zip_filename;
- std::unique_ptr<MemMap> map(MemMap::MapAnonymous(name.c_str(),
- nullptr, GetUncompressedLength(),
- PROT_READ | PROT_WRITE, false, false,
- error_msg));
- if (map.get() == nullptr) {
+ MemMap map = MemMap::MapAnonymous(name.c_str(),
+ /* addr */ nullptr,
+ GetUncompressedLength(),
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ /* reuse */ false,
+ error_msg);
+ if (!map.IsValid()) {
DCHECK(!error_msg->empty());
- return nullptr;
+ return MemMap::Invalid();
}
- const int32_t error = ExtractToMemory(handle_, zip_entry_,
- map->Begin(), map->Size());
+ const int32_t error = ExtractToMemory(handle_, zip_entry_, map.Begin(), map.Size());
if (error) {
*error_msg = std::string(ErrorCodeString(error));
- return nullptr;
+ return MemMap::Invalid();
}
- return map.release();
+ return map;
}
-MemMap* ZipEntry::MapDirectlyFromFile(const char* zip_filename, std::string* error_msg) {
+MemMap ZipEntry::MapDirectlyFromFile(const char* zip_filename, std::string* error_msg) {
const int zip_fd = GetFileDescriptor(handle_);
const char* entry_filename = entry_name_.c_str();
@@ -109,7 +112,7 @@ MemMap* ZipEntry::MapDirectlyFromFile(const char* zip_filename, std::string* err
*error_msg = StringPrintf("Cannot map '%s' (in zip '%s') directly because it is compressed.",
entry_filename,
zip_filename);
- return nullptr;
+ return MemMap::Invalid();
} else if (zip_entry_->uncompressed_length != zip_entry_->compressed_length) {
*error_msg = StringPrintf("Cannot map '%s' (in zip '%s') directly because "
"entry has bad size (%u != %u).",
@@ -117,7 +120,7 @@ MemMap* ZipEntry::MapDirectlyFromFile(const char* zip_filename, std::string* err
zip_filename,
zip_entry_->uncompressed_length,
zip_entry_->compressed_length);
- return nullptr;
+ return MemMap::Invalid();
}
std::string name(entry_filename);
@@ -130,7 +133,7 @@ MemMap* ZipEntry::MapDirectlyFromFile(const char* zip_filename, std::string* err
LOG(INFO) << "zip_archive: " << "make mmap of " << name << " @ offset = " << offset;
}
- std::unique_ptr<MemMap> map(
+ MemMap map =
MemMap::MapFileAtAddress(nullptr, // Expected pointer address
GetUncompressedLength(), // Byte count
PROT_READ | PROT_WRITE,
@@ -140,9 +143,9 @@ MemMap* ZipEntry::MapDirectlyFromFile(const char* zip_filename, std::string* err
false, // Don't restrict allocation to lower4GB
false, // Doesn't overlap existing map (reuse=false)
name.c_str(),
- /*out*/error_msg));
+ /*out*/error_msg);
- if (map == nullptr) {
+ if (!map.IsValid()) {
DCHECK(!error_msg->empty());
}
@@ -169,12 +172,12 @@ MemMap* ZipEntry::MapDirectlyFromFile(const char* zip_filename, std::string* err
LOG(INFO) << "---------------------------";
// Dump map contents.
- if (map != nullptr) {
+ if (map.IsValid()) {
tmp = "";
count = kMaxDumpChars;
- uint8_t* begin = map->Begin();
+ uint8_t* begin = map.Begin();
for (i = 0; i < count; ++i) {
tmp += StringPrintf("%3d ", (unsigned int)begin[i]);
}
@@ -185,19 +188,20 @@ MemMap* ZipEntry::MapDirectlyFromFile(const char* zip_filename, std::string* err
}
}
- return map.release();
+ return map;
}
-MemMap* ZipEntry::MapDirectlyOrExtract(const char* zip_filename,
- const char* entry_filename,
- std::string* error_msg) {
+MemMap ZipEntry::MapDirectlyOrExtract(const char* zip_filename,
+ const char* entry_filename,
+ std::string* error_msg) {
if (IsUncompressed() && GetFileDescriptor(handle_) >= 0) {
- MemMap* ret = MapDirectlyFromFile(zip_filename, error_msg);
- if (ret != nullptr) {
+ std::string local_error_msg;
+ MemMap ret = MapDirectlyFromFile(zip_filename, &local_error_msg);
+ if (ret.IsValid()) {
return ret;
}
+ // Fall back to extraction for the failure case.
}
- // Fall back to extraction for the failure case.
return ExtractToMemMap(zip_filename, entry_filename, error_msg);
}
diff --git a/libartbase/base/zip_archive.h b/libartbase/base/zip_archive.h
index 73495da96a..8fc8b54d2c 100644
--- a/libartbase/base/zip_archive.h
+++ b/libartbase/base/zip_archive.h
@@ -43,21 +43,22 @@ class ZipEntry {
bool ExtractToFile(File& file, std::string* error_msg);
// Extract this entry to anonymous memory (R/W).
// Returns null on failure and sets error_msg.
- MemMap* ExtractToMemMap(const char* zip_filename, const char* entry_filename,
- std::string* error_msg);
+ MemMap ExtractToMemMap(const char* zip_filename,
+ const char* entry_filename,
+ std::string* error_msg);
// Create a file-backed private (clean, R/W) memory mapping to this entry.
// 'zip_filename' is used for diagnostics only,
// the original file that the ZipArchive was open with is used
// for the mapping.
//
// Will only succeed if the entry is stored uncompressed.
- // Returns null on failure and sets error_msg.
- MemMap* MapDirectlyFromFile(const char* zip_filename, /*out*/std::string* error_msg);
+ // Returns invalid MemMap on failure and sets error_msg.
+ MemMap MapDirectlyFromFile(const char* zip_filename, /*out*/std::string* error_msg);
virtual ~ZipEntry();
- MemMap* MapDirectlyOrExtract(const char* zip_filename,
- const char* entry_filename,
- std::string* error_msg);
+ MemMap MapDirectlyOrExtract(const char* zip_filename,
+ const char* entry_filename,
+ std::string* error_msg);
uint32_t GetUncompressedLength();
uint32_t GetCrc32();
diff --git a/libdexfile/dex/art_dex_file_loader.cc b/libdexfile/dex/art_dex_file_loader.cc
index cc7d7aae34..1846a13a20 100644
--- a/libdexfile/dex/art_dex_file_loader.cc
+++ b/libdexfile/dex/art_dex_file_loader.cc
@@ -23,6 +23,7 @@
#include "base/file_magic.h"
#include "base/file_utils.h"
+#include "base/mem_map.h"
#include "base/stl_util.h"
#include "base/systrace.h"
#include "base/unix_file/fd_file.h"
@@ -38,14 +39,14 @@ namespace {
class MemMapContainer : public DexFileContainer {
public:
- explicit MemMapContainer(std::unique_ptr<MemMap>&& mem_map) : mem_map_(std::move(mem_map)) { }
+ explicit MemMapContainer(MemMap&& mem_map) : mem_map_(std::move(mem_map)) { }
virtual ~MemMapContainer() OVERRIDE { }
int GetPermissions() OVERRIDE {
- if (mem_map_.get() == nullptr) {
+ if (!mem_map_.IsValid()) {
return 0;
} else {
- return mem_map_->GetProtect();
+ return mem_map_.GetProtect();
}
}
@@ -55,24 +56,24 @@ class MemMapContainer : public DexFileContainer {
bool EnableWrite() OVERRIDE {
CHECK(IsReadOnly());
- if (mem_map_.get() == nullptr) {
+ if (!mem_map_.IsValid()) {
return false;
} else {
- return mem_map_->Protect(PROT_READ | PROT_WRITE);
+ return mem_map_.Protect(PROT_READ | PROT_WRITE);
}
}
bool DisableWrite() OVERRIDE {
CHECK(!IsReadOnly());
- if (mem_map_.get() == nullptr) {
+ if (!mem_map_.IsValid()) {
return false;
} else {
- return mem_map_->Protect(PROT_READ);
+ return mem_map_.Protect(PROT_READ);
}
}
private:
- std::unique_ptr<MemMap> mem_map_;
+ MemMap mem_map_;
DISALLOW_COPY_AND_ASSIGN(MemMapContainer);
};
@@ -180,22 +181,24 @@ std::unique_ptr<const DexFile> ArtDexFileLoader::Open(const uint8_t* base,
std::unique_ptr<const DexFile> ArtDexFileLoader::Open(const std::string& location,
uint32_t location_checksum,
- std::unique_ptr<MemMap> map,
+ MemMap&& map,
bool verify,
bool verify_checksum,
std::string* error_msg) const {
ScopedTrace trace(std::string("Open dex file from mapped-memory ") + location);
- CHECK(map.get() != nullptr);
+ CHECK(map.IsValid());
- if (map->Size() < sizeof(DexFile::Header)) {
+ size_t size = map.Size();
+ if (size < sizeof(DexFile::Header)) {
*error_msg = StringPrintf(
"DexFile: failed to open dex file '%s' that is too short to have a header",
location.c_str());
return nullptr;
}
- std::unique_ptr<DexFile> dex_file = OpenCommon(map->Begin(),
- map->Size(),
+ uint8_t* begin = map.Begin();
+ std::unique_ptr<DexFile> dex_file = OpenCommon(begin,
+ size,
/*data_base*/ nullptr,
/*data_size*/ 0u,
location,
@@ -285,7 +288,7 @@ std::unique_ptr<const DexFile> ArtDexFileLoader::OpenFile(int fd,
std::string* error_msg) const {
ScopedTrace trace(std::string("Open dex file ") + std::string(location));
CHECK(!location.empty());
- std::unique_ptr<MemMap> map;
+ MemMap map;
{
File delayed_close(fd, /* check_usage */ false);
struct stat sbuf;
@@ -300,31 +303,33 @@ std::unique_ptr<const DexFile> ArtDexFileLoader::OpenFile(int fd,
return nullptr;
}
size_t length = sbuf.st_size;
- map.reset(MemMap::MapFile(length,
- PROT_READ,
- mmap_shared ? MAP_SHARED : MAP_PRIVATE,
- fd,
- 0,
- /*low_4gb*/false,
- location.c_str(),
- error_msg));
- if (map == nullptr) {
+ map = MemMap::MapFile(length,
+ PROT_READ,
+ mmap_shared ? MAP_SHARED : MAP_PRIVATE,
+ fd,
+ 0,
+ /*low_4gb*/false,
+ location.c_str(),
+ error_msg);
+ if (!map.IsValid()) {
DCHECK(!error_msg->empty());
return nullptr;
}
}
- if (map->Size() < sizeof(DexFile::Header)) {
+ const uint8_t* begin = map.Begin();
+ size_t size = map.Size();
+ if (size < sizeof(DexFile::Header)) {
*error_msg = StringPrintf(
"DexFile: failed to open dex file '%s' that is too short to have a header",
location.c_str());
return nullptr;
}
- const DexFile::Header* dex_header = reinterpret_cast<const DexFile::Header*>(map->Begin());
+ const DexFile::Header* dex_header = reinterpret_cast<const DexFile::Header*>(begin);
- std::unique_ptr<DexFile> dex_file = OpenCommon(map->Begin(),
- map->Size(),
+ std::unique_ptr<DexFile> dex_file = OpenCommon(begin,
+ size,
/*data_base*/ nullptr,
/*data_size*/ 0u,
location,
@@ -366,7 +371,7 @@ std::unique_ptr<const DexFile> ArtDexFileLoader::OpenOneDexFileFromZip(
return nullptr;
}
- std::unique_ptr<MemMap> map;
+ MemMap map;
if (zip_entry->IsUncompressed()) {
if (!zip_entry->IsAlignedTo(alignof(DexFile::Header))) {
// Do not mmap unaligned ZIP entries because
@@ -376,8 +381,8 @@ std::unique_ptr<const DexFile> ArtDexFileLoader::OpenOneDexFileFromZip(
<< "Falling back to extracting file.";
} else {
// Map uncompressed files within zip as file-backed to avoid a dirty copy.
- map.reset(zip_entry->MapDirectlyFromFile(location.c_str(), /*out*/error_msg));
- if (map == nullptr) {
+ map = zip_entry->MapDirectlyFromFile(location.c_str(), /*out*/error_msg);
+ if (!map.IsValid()) {
LOG(WARNING) << "Can't mmap dex file " << location << "!" << entry_name << " directly; "
<< "is your ZIP file corrupted? Falling back to extraction.";
// Try again with Extraction which still has a chance of recovery.
@@ -385,21 +390,23 @@ std::unique_ptr<const DexFile> ArtDexFileLoader::OpenOneDexFileFromZip(
}
}
- if (map == nullptr) {
+ if (!map.IsValid()) {
// Default path for compressed ZIP entries,
// and fallback for stored ZIP entries.
- map.reset(zip_entry->ExtractToMemMap(location.c_str(), entry_name, error_msg));
+ map = zip_entry->ExtractToMemMap(location.c_str(), entry_name, error_msg);
}
- if (map == nullptr) {
+ if (!map.IsValid()) {
*error_msg = StringPrintf("Failed to extract '%s' from '%s': %s", entry_name, location.c_str(),
error_msg->c_str());
*error_code = DexFileLoaderErrorCode::kExtractToMemoryError;
return nullptr;
}
VerifyResult verify_result;
- std::unique_ptr<DexFile> dex_file = OpenCommon(map->Begin(),
- map->Size(),
+ uint8_t* begin = map.Begin();
+ size_t size = map.Size();
+ std::unique_ptr<DexFile> dex_file = OpenCommon(begin,
+ size,
/*data_base*/ nullptr,
/*data_size*/ 0u,
location,
diff --git a/libdexfile/dex/art_dex_file_loader.h b/libdexfile/dex/art_dex_file_loader.h
index da2620f587..420b347808 100644
--- a/libdexfile/dex/art_dex_file_loader.h
+++ b/libdexfile/dex/art_dex_file_loader.h
@@ -66,7 +66,7 @@ class ArtDexFileLoader : public DexFileLoader {
// Opens .dex file that has been memory-mapped by the caller.
std::unique_ptr<const DexFile> Open(const std::string& location,
uint32_t location_checkum,
- std::unique_ptr<MemMap> mem_map,
+ MemMap&& mem_map,
bool verify,
bool verify_checksum,
std::string* error_msg) const;
diff --git a/libprofile/profile/profile_compilation_info.cc b/libprofile/profile/profile_compilation_info.cc
index 6f49adf718..c7653457aa 100644
--- a/libprofile/profile/profile_compilation_info.cc
+++ b/libprofile/profile/profile_compilation_info.cc
@@ -1183,7 +1183,7 @@ ProfileCompilationInfo::ProfileLoadStatus ProfileCompilationInfo::OpenSource(
// (e.g. dex metadata files)
LOG(WARNING) << "Could not find entry " << kDexMetadataProfileEntry
<< " in the zip archive. Creating an empty profile.";
- source->reset(ProfileSource::Create(nullptr));
+ source->reset(ProfileSource::Create(MemMap::Invalid()));
return kProfileLoadSuccess;
}
if (zip_entry->GetUncompressedLength() == 0) {
@@ -1192,11 +1192,9 @@ ProfileCompilationInfo::ProfileLoadStatus ProfileCompilationInfo::OpenSource(
}
// TODO(calin) pass along file names to assist with debugging.
- std::unique_ptr<MemMap> map(zip_entry->MapDirectlyOrExtract(kDexMetadataProfileEntry,
- "profile file",
- error));
+ MemMap map = zip_entry->MapDirectlyOrExtract(kDexMetadataProfileEntry, "profile file", error);
- if (map != nullptr) {
+ if (map.IsValid()) {
source->reset(ProfileSource::Create(std::move(map)));
return kProfileLoadSuccess;
} else {
@@ -1211,11 +1209,11 @@ ProfileCompilationInfo::ProfileLoadStatus ProfileCompilationInfo::ProfileSource:
const std::string& debug_stage,
std::string* error) {
if (IsMemMap()) {
- if (mem_map_cur_ + byte_count > mem_map_->Size()) {
+ if (mem_map_cur_ + byte_count > mem_map_.Size()) {
return kProfileLoadBadData;
}
for (size_t i = 0; i < byte_count; i++) {
- buffer[i] = *(mem_map_->Begin() + mem_map_cur_);
+ buffer[i] = *(mem_map_.Begin() + mem_map_cur_);
mem_map_cur_++;
}
} else {
@@ -1237,13 +1235,13 @@ ProfileCompilationInfo::ProfileLoadStatus ProfileCompilationInfo::ProfileSource:
bool ProfileCompilationInfo::ProfileSource::HasConsumedAllData() const {
return IsMemMap()
- ? (mem_map_ == nullptr || mem_map_cur_ == mem_map_->Size())
+ ? (!mem_map_.IsValid() || mem_map_cur_ == mem_map_.Size())
: (testEOF(fd_) == 0);
}
bool ProfileCompilationInfo::ProfileSource::HasEmptyContent() const {
if (IsMemMap()) {
- return mem_map_ == nullptr || mem_map_->Size() == 0;
+ return !mem_map_.IsValid() || mem_map_.Size() == 0;
} else {
struct stat stat_buffer;
if (fstat(fd_, &stat_buffer) != 0) {
diff --git a/libprofile/profile/profile_compilation_info.h b/libprofile/profile/profile_compilation_info.h
index 3596f3e5a6..0dbf490cde 100644
--- a/libprofile/profile/profile_compilation_info.h
+++ b/libprofile/profile/profile_compilation_info.h
@@ -637,14 +637,14 @@ class ProfileCompilationInfo {
*/
static ProfileSource* Create(int32_t fd) {
DCHECK_GT(fd, -1);
- return new ProfileSource(fd, /*map*/ nullptr);
+ return new ProfileSource(fd, MemMap::Invalid());
}
/**
* Create a profile source backed by a memory map. The map can be null in
* which case it will the treated as an empty source.
*/
- static ProfileSource* Create(std::unique_ptr<MemMap>&& mem_map) {
+ static ProfileSource* Create(MemMap&& mem_map) {
return new ProfileSource(/*fd*/ -1, std::move(mem_map));
}
@@ -664,13 +664,13 @@ class ProfileCompilationInfo {
bool HasConsumedAllData() const;
private:
- ProfileSource(int32_t fd, std::unique_ptr<MemMap>&& mem_map)
+ ProfileSource(int32_t fd, MemMap&& mem_map)
: fd_(fd), mem_map_(std::move(mem_map)), mem_map_cur_(0) {}
bool IsMemMap() const { return fd_ == -1; }
int32_t fd_; // The fd is not owned by this class.
- std::unique_ptr<MemMap> mem_map_;
+ MemMap mem_map_;
size_t mem_map_cur_; // Current position in the map to read from.
};
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index c04c50e027..a5cc38b866 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -708,7 +708,7 @@ class OatDumper {
return nullptr;
}
- std::unique_ptr<MemMap> mmap(MemMap::MapFile(
+ MemMap mmap = MemMap::MapFile(
file->GetLength(),
PROT_READ | PROT_WRITE,
MAP_PRIVATE,
@@ -716,13 +716,13 @@ class OatDumper {
/* start offset */ 0,
/* low_4gb */ false,
vdex_filename.c_str(),
- error_msg));
- if (mmap == nullptr) {
+ error_msg);
+ if (!mmap.IsValid()) {
*error_msg = "Failed to mmap file " + vdex_filename + ": " + *error_msg;
return nullptr;
}
- std::unique_ptr<VdexFile> vdex_file(new VdexFile(mmap.release()));
+ std::unique_ptr<VdexFile> vdex_file(new VdexFile(std::move(mmap)));
if (!vdex_file->IsValid()) {
*error_msg = "Vdex file is not valid";
return nullptr;
diff --git a/openjdkjvmti/fixed_up_dex_file.cc b/openjdkjvmti/fixed_up_dex_file.cc
index a660fb56c4..2ca87fd482 100644
--- a/openjdkjvmti/fixed_up_dex_file.cc
+++ b/openjdkjvmti/fixed_up_dex_file.cc
@@ -79,11 +79,8 @@ static void DoDexUnquicken(const art::DexFile& new_dex_file,
const art::VdexFile* vdex = GetVdex(original_dex_file);
if (vdex != nullptr) {
vdex->UnquickenDexFile(new_dex_file, original_dex_file, /* decompile_return_instruction */true);
- } else {
- // The dex file isn't quickened since it is being used directly. We might still have hiddenapis
- // so we need to get rid of those.
- UnhideApis(new_dex_file);
}
+ UnhideApis(new_dex_file);
}
static void DCheckVerifyDexFile(const art::DexFile& dex) {
diff --git a/openjdkjvmti/ti_class.cc b/openjdkjvmti/ti_class.cc
index 9bea18a763..209add36e1 100644
--- a/openjdkjvmti/ti_class.cc
+++ b/openjdkjvmti/ti_class.cc
@@ -91,10 +91,8 @@ static std::unique_ptr<const art::DexFile> MakeSingleDexFile(art::Thread* self,
// Make the mmap
std::string error_msg;
art::ArrayRef<const unsigned char> final_data(final_dex_data, final_len);
- std::unique_ptr<art::MemMap> map(Redefiner::MoveDataToMemMap(orig_location,
- final_data,
- &error_msg));
- if (map.get() == nullptr) {
+ art::MemMap map = Redefiner::MoveDataToMemMap(orig_location, final_data, &error_msg);
+ if (!map.IsValid()) {
LOG(WARNING) << "Unable to allocate mmap for redefined dex file! Error was: " << error_msg;
self->ThrowOutOfMemoryError(StringPrintf(
"Unable to allocate dex file for transformation of %s", descriptor).c_str());
@@ -102,15 +100,15 @@ static std::unique_ptr<const art::DexFile> MakeSingleDexFile(art::Thread* self,
}
// Make a dex-file
- if (map->Size() < sizeof(art::DexFile::Header)) {
+ if (map.Size() < sizeof(art::DexFile::Header)) {
LOG(WARNING) << "Could not read dex file header because dex_data was too short";
art::ThrowClassFormatError(nullptr,
"Unable to read transformed dex file of %s",
descriptor);
return nullptr;
}
- uint32_t checksum = reinterpret_cast<const art::DexFile::Header*>(map->Begin())->checksum_;
- std::string map_name = map->GetName();
+ uint32_t checksum = reinterpret_cast<const art::DexFile::Header*>(map.Begin())->checksum_;
+ std::string map_name = map.GetName();
const art::ArtDexFileLoader dex_file_loader;
std::unique_ptr<const art::DexFile> dex_file(dex_file_loader.Open(map_name,
checksum,
diff --git a/openjdkjvmti/ti_class_definition.cc b/openjdkjvmti/ti_class_definition.cc
index dce2733e7e..030ad98d02 100644
--- a/openjdkjvmti/ti_class_definition.cc
+++ b/openjdkjvmti/ti_class_definition.cc
@@ -49,26 +49,27 @@ namespace openjdkjvmti {
void ArtClassDefinition::InitializeMemory() const {
DCHECK(art::MemMap::kCanReplaceMapping);
VLOG(signals) << "Initializing de-quickened memory for dex file of " << name_;
- CHECK(dex_data_mmap_ != nullptr);
- CHECK(temp_mmap_ != nullptr);
- CHECK_EQ(dex_data_mmap_->GetProtect(), PROT_NONE);
- CHECK_EQ(temp_mmap_->GetProtect(), PROT_READ | PROT_WRITE);
+ CHECK(dex_data_mmap_.IsValid());
+ CHECK(temp_mmap_.IsValid());
+ CHECK_EQ(dex_data_mmap_.GetProtect(), PROT_NONE);
+ CHECK_EQ(temp_mmap_.GetProtect(), PROT_READ | PROT_WRITE);
std::string desc = std::string("L") + name_ + ";";
std::unique_ptr<FixedUpDexFile>
fixed_dex_file(FixedUpDexFile::Create(*initial_dex_file_unquickened_, desc.c_str()));
CHECK(fixed_dex_file.get() != nullptr);
- CHECK_LE(fixed_dex_file->Size(), temp_mmap_->Size());
- CHECK_EQ(temp_mmap_->Size(), dex_data_mmap_->Size());
+ CHECK_LE(fixed_dex_file->Size(), temp_mmap_.Size());
+ CHECK_EQ(temp_mmap_.Size(), dex_data_mmap_.Size());
// Copy the data to the temp mmap.
- memcpy(temp_mmap_->Begin(), fixed_dex_file->Begin(), fixed_dex_file->Size());
+ memcpy(temp_mmap_.Begin(), fixed_dex_file->Begin(), fixed_dex_file->Size());
// Move the mmap atomically.
- art::MemMap* source = temp_mmap_.release();
+ art::MemMap source;
+ source.swap(temp_mmap_);
std::string error;
- CHECK(dex_data_mmap_->ReplaceWith(&source, &error)) << "Failed to replace mmap for "
- << name_ << " because " << error;
- CHECK(dex_data_mmap_->Protect(PROT_READ));
+ CHECK(dex_data_mmap_.ReplaceWith(&source, &error)) << "Failed to replace mmap for "
+ << name_ << " because " << error;
+ CHECK(dex_data_mmap_.Protect(PROT_READ));
}
bool ArtClassDefinition::IsModified() const {
@@ -85,13 +86,13 @@ bool ArtClassDefinition::IsModified() const {
}
// The dex_data_ was never touched by the agents.
- if (dex_data_mmap_ != nullptr && dex_data_mmap_->GetProtect() == PROT_NONE) {
- if (current_dex_file_.data() == dex_data_mmap_->Begin()) {
+ if (dex_data_mmap_.IsValid() && dex_data_mmap_.GetProtect() == PROT_NONE) {
+ if (current_dex_file_.data() == dex_data_mmap_.Begin()) {
// the dex_data_ looks like it changed (not equal to current_dex_file_) but we never
// initialized the dex_data_mmap_. This means the new_dex_data was filled in without looking
// at the initial dex_data_.
return true;
- } else if (dex_data_.data() == dex_data_mmap_->Begin()) {
+ } else if (dex_data_.data() == dex_data_mmap_.Begin()) {
// The dex file used to have modifications but they were not added again.
return true;
} else {
@@ -244,26 +245,26 @@ void ArtClassDefinition::InitWithDex(GetOriginalDexFile get_original,
std::string mmap_name("anon-mmap-for-redefine: ");
mmap_name += name_;
std::string error;
- dex_data_mmap_.reset(art::MemMap::MapAnonymous(mmap_name.c_str(),
- nullptr,
- dequick_size,
- PROT_NONE,
- /*low_4gb*/ false,
- /*reuse*/ false,
- &error));
- mmap_name += "-TEMP";
- temp_mmap_.reset(art::MemMap::MapAnonymous(mmap_name.c_str(),
- nullptr,
+ dex_data_mmap_ = art::MemMap::MapAnonymous(mmap_name.c_str(),
+ /* addr */ nullptr,
dequick_size,
- PROT_READ | PROT_WRITE,
+ PROT_NONE,
/*low_4gb*/ false,
/*reuse*/ false,
- &error));
- if (UNLIKELY(dex_data_mmap_ != nullptr && temp_mmap_ != nullptr)) {
+ &error);
+ mmap_name += "-TEMP";
+ temp_mmap_ = art::MemMap::MapAnonymous(mmap_name.c_str(),
+ /* addr */ nullptr,
+ dequick_size,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb*/ false,
+ /*reuse*/ false,
+ &error);
+ if (UNLIKELY(dex_data_mmap_.IsValid() && temp_mmap_.IsValid())) {
// Need to save the initial dexfile so we don't need to search for it in the fault-handler.
initial_dex_file_unquickened_ = quick_dex;
- dex_data_ = art::ArrayRef<const unsigned char>(dex_data_mmap_->Begin(),
- dex_data_mmap_->Size());
+ dex_data_ = art::ArrayRef<const unsigned char>(dex_data_mmap_.Begin(),
+ dex_data_mmap_.Size());
if (from_class_ext_) {
// We got initial from class_ext so the current one must have undergone redefinition so no
// cdex or quickening stuff.
@@ -275,14 +276,14 @@ void ArtClassDefinition::InitWithDex(GetOriginalDexFile get_original,
// This class hasn't been redefined before. The dequickened current data is the same as the
// dex_data_mmap_ when it's filled it. We don't need to copy anything because the mmap will
// not be cleared until after everything is done.
- current_dex_file_ = art::ArrayRef<const unsigned char>(dex_data_mmap_->Begin(),
+ current_dex_file_ = art::ArrayRef<const unsigned char>(dex_data_mmap_.Begin(),
dequick_size);
}
return;
}
}
- dex_data_mmap_.reset(nullptr);
- temp_mmap_.reset(nullptr);
+ dex_data_mmap_.Reset();
+ temp_mmap_.Reset();
// Failed to mmap a large enough area (or on-demand dequickening was disabled). This is
// unfortunate. Since currently the size is just a guess though we might as well try to do it
// manually.
diff --git a/openjdkjvmti/ti_class_definition.h b/openjdkjvmti/ti_class_definition.h
index f888a7474f..224e664459 100644
--- a/openjdkjvmti/ti_class_definition.h
+++ b/openjdkjvmti/ti_class_definition.h
@@ -56,8 +56,8 @@ class ArtClassDefinition {
loader_(nullptr),
name_(),
protection_domain_(nullptr),
- dex_data_mmap_(nullptr),
- temp_mmap_(nullptr),
+ dex_data_mmap_(),
+ temp_mmap_(),
dex_data_memory_(),
initial_dex_file_unquickened_(nullptr),
dex_data_(),
@@ -100,9 +100,9 @@ class ArtClassDefinition {
}
bool ContainsAddress(uintptr_t ptr) const {
- return dex_data_mmap_ != nullptr &&
- reinterpret_cast<uintptr_t>(dex_data_mmap_->Begin()) <= ptr &&
- reinterpret_cast<uintptr_t>(dex_data_mmap_->End()) > ptr;
+ return dex_data_mmap_.IsValid() &&
+ reinterpret_cast<uintptr_t>(dex_data_mmap_.Begin()) <= ptr &&
+ reinterpret_cast<uintptr_t>(dex_data_mmap_.End()) > ptr;
}
bool IsModified() const REQUIRES_SHARED(art::Locks::mutator_lock_);
@@ -128,9 +128,9 @@ class ArtClassDefinition {
bool IsLazyDefinition() const {
DCHECK(IsInitialized());
- return dex_data_mmap_ != nullptr &&
- dex_data_.data() == dex_data_mmap_->Begin() &&
- dex_data_mmap_->GetProtect() == PROT_NONE;
+ return dex_data_mmap_.IsValid() &&
+ dex_data_.data() == dex_data_mmap_.Begin() &&
+ dex_data_mmap_.GetProtect() == PROT_NONE;
}
jobject GetProtectionDomain() const {
@@ -159,9 +159,9 @@ class ArtClassDefinition {
// Mmap that will be filled with the original-dex-file lazily if it needs to be de-quickened or
// de-compact-dex'd
- mutable std::unique_ptr<art::MemMap> dex_data_mmap_;
+ mutable art::MemMap dex_data_mmap_;
// This is a temporary mmap we will use to be able to fill the dex file data atomically.
- mutable std::unique_ptr<art::MemMap> temp_mmap_;
+ mutable art::MemMap temp_mmap_;
// A unique_ptr to the current dex_data if it needs to be cleaned up.
std::vector<unsigned char> dex_data_memory_;
diff --git a/openjdkjvmti/ti_redefine.cc b/openjdkjvmti/ti_redefine.cc
index dd0428dfcf..6cba48a0c6 100644
--- a/openjdkjvmti/ti_redefine.cc
+++ b/openjdkjvmti/ti_redefine.cc
@@ -300,24 +300,23 @@ jvmtiError Redefiner::GetClassRedefinitionError(art::Handle<art::mirror::Class>
}
// Moves dex data to an anonymous, read-only mmap'd region.
-std::unique_ptr<art::MemMap> Redefiner::MoveDataToMemMap(const std::string& original_location,
- art::ArrayRef<const unsigned char> data,
- std::string* error_msg) {
- std::unique_ptr<art::MemMap> map(art::MemMap::MapAnonymous(
+art::MemMap Redefiner::MoveDataToMemMap(const std::string& original_location,
+ art::ArrayRef<const unsigned char> data,
+ std::string* error_msg) {
+ art::MemMap map = art::MemMap::MapAnonymous(
StringPrintf("%s-transformed", original_location.c_str()).c_str(),
- nullptr,
+ /* addr */ nullptr,
data.size(),
PROT_READ|PROT_WRITE,
- /*low_4gb*/false,
- /*reuse*/false,
- error_msg));
- if (map == nullptr) {
- return map;
- }
- memcpy(map->Begin(), data.data(), data.size());
- // Make the dex files mmap read only. This matches how other DexFiles are mmaped and prevents
- // programs from corrupting it.
- map->Protect(PROT_READ);
+ /*low_4gb*/ false,
+ /*reuse*/ false,
+ error_msg);
+ if (LIKELY(map.IsValid())) {
+ memcpy(map.Begin(), data.data(), data.size());
+ // Make the dex files mmap read only. This matches how other DexFiles are mmaped and prevents
+ // programs from corrupting it.
+ map.Protect(PROT_READ);
+ }
return map;
}
@@ -429,23 +428,22 @@ jvmtiError Redefiner::AddRedefinition(ArtJvmTiEnv* env, const ArtClassDefinition
}
JvmtiUniquePtr<char> generic_unique_ptr(MakeJvmtiUniquePtr(env, generic_ptr_unused));
JvmtiUniquePtr<char> signature_unique_ptr(MakeJvmtiUniquePtr(env, signature_ptr));
- std::unique_ptr<art::MemMap> map(MoveDataToMemMap(original_dex_location,
- def.GetDexData(),
- error_msg_));
+ art::MemMap map = MoveDataToMemMap(original_dex_location, def.GetDexData(), error_msg_);
std::ostringstream os;
- if (map.get() == nullptr) {
+ if (!map.IsValid()) {
os << "Failed to create anonymous mmap for modified dex file of class " << def.GetName()
<< "in dex file " << original_dex_location << " because: " << *error_msg_;
*error_msg_ = os.str();
return ERR(OUT_OF_MEMORY);
}
- if (map->Size() < sizeof(art::DexFile::Header)) {
+ if (map.Size() < sizeof(art::DexFile::Header)) {
*error_msg_ = "Could not read dex file header because dex_data was too short";
return ERR(INVALID_CLASS_FORMAT);
}
- uint32_t checksum = reinterpret_cast<const art::DexFile::Header*>(map->Begin())->checksum_;
+ std::string name = map.GetName();
+ uint32_t checksum = reinterpret_cast<const art::DexFile::Header*>(map.Begin())->checksum_;
const art::ArtDexFileLoader dex_file_loader;
- std::unique_ptr<const art::DexFile> dex_file(dex_file_loader.Open(map->GetName(),
+ std::unique_ptr<const art::DexFile> dex_file(dex_file_loader.Open(name,
checksum,
std::move(map),
/*verify*/true,
diff --git a/openjdkjvmti/ti_redefine.h b/openjdkjvmti/ti_redefine.h
index 6d8f6bf0db..f4a4280aac 100644
--- a/openjdkjvmti/ti_redefine.h
+++ b/openjdkjvmti/ti_redefine.h
@@ -78,9 +78,9 @@ class Redefiner {
static jvmtiError IsModifiableClass(jvmtiEnv* env, jclass klass, jboolean* is_redefinable);
- static std::unique_ptr<art::MemMap> MoveDataToMemMap(const std::string& original_location,
- art::ArrayRef<const unsigned char> data,
- std::string* error_msg);
+ static art::MemMap MoveDataToMemMap(const std::string& original_location,
+ art::ArrayRef<const unsigned char> data,
+ std::string* error_msg);
// Helper for checking if redefinition/retransformation is allowed.
static jvmtiError GetClassRedefinitionError(jclass klass, /*out*/std::string* error_msg)
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index a15f7b88d8..8169979759 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -538,7 +538,7 @@ bool PatchOat::Patch(const std::string& image_location,
ScopedObjectAccess soa(Thread::Current());
std::vector<gc::space::ImageSpace*> spaces = Runtime::Current()->GetHeap()->GetBootImageSpaces();
- std::map<gc::space::ImageSpace*, std::unique_ptr<MemMap>> space_to_memmap_map;
+ std::map<gc::space::ImageSpace*, MemMap> space_to_memmap_map;
for (size_t i = 0; i < spaces.size(); ++i) {
t.NewTiming("Image Patching setup");
@@ -567,15 +567,15 @@ bool PatchOat::Patch(const std::string& image_location,
// Create the map where we will write the image patches to.
std::string error_msg;
- std::unique_ptr<MemMap> image(MemMap::MapFile(image_len,
- PROT_READ | PROT_WRITE,
- MAP_PRIVATE,
- input_image->Fd(),
- 0,
- /*low_4gb*/false,
- input_image->GetPath().c_str(),
- &error_msg));
- if (image.get() == nullptr) {
+ MemMap image = MemMap::MapFile(image_len,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE,
+ input_image->Fd(),
+ 0,
+ /*low_4gb*/false,
+ input_image->GetPath().c_str(),
+ &error_msg);
+ if (!image.IsValid()) {
LOG(ERROR) << "Unable to map image file " << input_image->GetPath() << " : " << error_msg;
return false;
}
@@ -583,7 +583,7 @@ bool PatchOat::Patch(const std::string& image_location,
space_to_memmap_map.emplace(space, std::move(image));
PatchOat p = PatchOat(isa,
- space_to_memmap_map[space].get(),
+ &space_to_memmap_map[space],
space->GetLiveBitmap(),
space->GetMemMap(),
delta,
@@ -636,22 +636,22 @@ bool PatchOat::Patch(const std::string& image_location,
LOG(ERROR) << "Error while getting input image size";
return false;
}
- std::unique_ptr<MemMap> original(MemMap::MapFile(input_image_size,
- PROT_READ,
- MAP_PRIVATE,
- input_image->Fd(),
- 0,
- /*low_4gb*/false,
- input_image->GetPath().c_str(),
- &error_msg));
- if (original.get() == nullptr) {
+ MemMap original = MemMap::MapFile(input_image_size,
+ PROT_READ,
+ MAP_PRIVATE,
+ input_image->Fd(),
+ 0,
+ /*low_4gb*/false,
+ input_image->GetPath().c_str(),
+ &error_msg);
+ if (!original.IsValid()) {
LOG(ERROR) << "Unable to map image file " << input_image->GetPath() << " : " << error_msg;
return false;
}
const MemMap* relocated = p.image_;
- if (!WriteRelFile(*original, *relocated, image_relocation_filename, &error_msg)) {
+ if (!WriteRelFile(original, *relocated, image_relocation_filename, &error_msg)) {
LOG(ERROR) << "Failed to create image relocation file " << image_relocation_filename
<< ": " << error_msg;
return false;
diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h
index 2b1210b5b1..ac2fdf594d 100644
--- a/patchoat/patchoat.h
+++ b/patchoat/patchoat.h
@@ -74,7 +74,7 @@ class PatchOat {
// All pointers are only borrowed.
PatchOat(InstructionSet isa, MemMap* image,
gc::accounting::ContinuousSpaceBitmap* bitmap, MemMap* heap, off_t delta,
- std::map<gc::space::ImageSpace*, std::unique_ptr<MemMap>>* map, TimingLogger* timings)
+ std::map<gc::space::ImageSpace*, MemMap>* map, TimingLogger* timings)
: image_(image), bitmap_(bitmap), heap_(heap),
delta_(delta), isa_(isa), space_map_(map), timings_(timings) {}
@@ -139,7 +139,7 @@ class PatchOat {
if (image_space->Contains(obj)) {
uintptr_t heap_off = reinterpret_cast<uintptr_t>(obj) -
reinterpret_cast<uintptr_t>(image_space->GetMemMap()->Begin());
- return reinterpret_cast<T*>(space_map_->find(image_space)->second->Begin() + heap_off);
+ return reinterpret_cast<T*>(space_map_->find(image_space)->second.Begin() + heap_off);
}
}
LOG(FATAL) << "Did not find object in boot image space " << obj;
@@ -195,7 +195,7 @@ class PatchOat {
// Active instruction set, used to know the entrypoint size.
const InstructionSet isa_;
- const std::map<gc::space::ImageSpace*, std::unique_ptr<MemMap>>* space_map_;
+ const std::map<gc::space::ImageSpace*, MemMap>* space_map_;
TimingLogger* timings_;
diff --git a/runtime/arch/arm/asm_support_arm.h b/runtime/arch/arm/asm_support_arm.h
index 7123ae73b4..9a01656caa 100644
--- a/runtime/arch/arm/asm_support_arm.h
+++ b/runtime/arch/arm/asm_support_arm.h
@@ -37,6 +37,9 @@
// The offset from art_quick_read_barrier_mark_introspection to the array switch cases,
// i.e. art_quick_read_barrier_mark_introspection_arrays.
#define BAKER_MARK_INTROSPECTION_ARRAY_SWITCH_OFFSET 0x100
+// The offset from art_quick_read_barrier_mark_introspection to the entrypoint for the
+// UnsafeCASObject intrinsic, i.e. art_quick_read_barrier_mark_introspection_unsafe_cas.
+#define BAKER_MARK_INTROSPECTION_UNSAFE_CAS_ENTRYPOINT_OFFSET 0x180
// The offset of the reference load LDR from the return address in LR for field loads.
#ifdef USE_HEAP_POISONING
@@ -55,5 +58,7 @@
// The offset of the reference load LDR from the return address in LR for GC root loads.
#define BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_WIDE_OFFSET -8
#define BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_NARROW_OFFSET -6
+// The offset of the ADD from the return address in LR for UnsafeCASObject intrinsic.
+#define BAKER_MARK_INTROSPECTION_UNSAFE_CAS_ADD_OFFSET -8
#endif // ART_RUNTIME_ARCH_ARM_ASM_SUPPORT_ARM_H_
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
index b4e9036084..2c5465e120 100644
--- a/runtime/arch/arm/entrypoints_init_arm.cc
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -59,6 +59,7 @@ extern "C" mirror::Object* art_quick_read_barrier_mark_introspection_arrays(mirr
extern "C" mirror::Object* art_quick_read_barrier_mark_introspection_gc_roots_wide(mirror::Object*);
extern "C" mirror::Object* art_quick_read_barrier_mark_introspection_gc_roots_narrow(
mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_introspection_unsafe_cas(mirror::Object*);
// Used by soft float.
// Single-precision FP arithmetics.
@@ -113,6 +114,10 @@ void UpdateReadBarrierEntrypoints(QuickEntryPoints* qpoints, bool is_active) {
reinterpret_cast<intptr_t>(art_quick_read_barrier_mark_introspection_gc_roots_narrow) -
reinterpret_cast<intptr_t>(art_quick_read_barrier_mark_introspection);
DCHECK_EQ(BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_NARROW_ENTRYPOINT_OFFSET, gc_roots_narrow_diff);
+ intptr_t unsafe_cas_diff =
+ reinterpret_cast<intptr_t>(art_quick_read_barrier_mark_introspection_unsafe_cas) -
+ reinterpret_cast<intptr_t>(art_quick_read_barrier_mark_introspection);
+ DCHECK_EQ(BAKER_MARK_INTROSPECTION_UNSAFE_CAS_ENTRYPOINT_OFFSET, unsafe_cas_diff);
// The register 12, i.e. IP, is reserved, so there is no art_quick_read_barrier_mark_reg12.
// We're using the entry to hold a pointer to the introspection entrypoint instead.
qpoints->pReadBarrierMarkReg12 =
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index c86baa1057..8f56430a00 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -2575,6 +2575,12 @@ art_quick_read_barrier_mark_introspection_gc_roots\label_suffix:
* The thunk also performs all the fast-path checks, so we need just the
* slow path.
*
+ * The UnsafeCASObject intrinsic is similar to the GC roots wide approach
+ * but using ADD (register, T3) instead of the LDR (immediate, T3), so the
+ * destination register is in bits 8-11 rather than 12-15. Therefore it has
+ * its own entrypoint, art_quick_read_barrier_mark_introspection_unsafe_cas
+ * at the offset BAKER_MARK_INTROSPECTION_UNSAFE_CAS_ENTRYPOINT_OFFSET.
+ *
* The code structure is
* art_quick_read_barrier_mark_introspection: // @0x00
* Up to 32 bytes code for main entrypoint fast-path code for fields
@@ -2610,6 +2616,9 @@ art_quick_read_barrier_mark_introspection_gc_roots\label_suffix:
* return switch.
* art_quick_read_barrier_mark_introspection_arrays: // @0x100
* Exactly 128 bytes for array load switch cases (16x2 instructions).
+ * art_quick_read_barrier_mark_introspection_unsafe_cas: // @0x180
+ * UnsafeCASObject intrinsic entrypoint for ADD (register) encoding T3
+ * (6 bytes). Loads the return register and jumps to the runtime call.
*/
#if defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER)
.balign 512
@@ -2669,7 +2678,6 @@ art_quick_read_barrier_mark_introspection_narrow:
BRBMI_RUNTIME_CALL
b .Lmark_introspection_return_switch
-
.balign 256
.thumb_func
.type art_quick_read_barrier_mark_introspection_arrays, #function
@@ -2677,6 +2685,19 @@ art_quick_read_barrier_mark_introspection_narrow:
.global art_quick_read_barrier_mark_introspection_arrays
art_quick_read_barrier_mark_introspection_arrays:
BRBMI_FOR_REGISTERS BRBMI_ARRAY_LOAD, BRBMI_BKPT_FILL_8B
+
+ .balign 8
+ .thumb_func
+ .type art_quick_read_barrier_mark_introspection_unsafe_cas, #function
+ .hidden art_quick_read_barrier_mark_introspection_unsafe_cas
+ .global art_quick_read_barrier_mark_introspection_unsafe_cas
+art_quick_read_barrier_mark_introspection_unsafe_cas:
+ // Load the byte of the ADD instruction that contains Rd. Adjust for the thumb state in LR.
+ // The ADD (register, T3) is |11101011000|S|Rn|(0)imm3|Rd|imm2|type|Rm| and we're using
+ // no shift (type=0, imm2=0, imm3=0), so the byte we read here, i.e. |(0)imm3|Rd|,
+ // contains only the register number, the top 4 bits are 0.
+ ldrb rMR, [lr, #(-1 + BAKER_MARK_INTROSPECTION_UNSAFE_CAS_ADD_OFFSET + 3)]
+ b .Lmark_introspection_runtime_call
END art_quick_read_barrier_mark_introspection
#else // defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER)
ENTRY art_quick_read_barrier_mark_introspection
diff --git a/runtime/base/mem_map_arena_pool.cc b/runtime/base/mem_map_arena_pool.cc
index 702f0e453b..0f472e2604 100644
--- a/runtime/base/mem_map_arena_pool.cc
+++ b/runtime/base/mem_map_arena_pool.cc
@@ -38,22 +38,34 @@ class MemMapArena FINAL : public Arena {
void Release() OVERRIDE;
private:
- std::unique_ptr<MemMap> map_;
+ static MemMap Allocate(size_t size, bool low_4gb, const char* name);
+
+ MemMap map_;
};
-MemMapArena::MemMapArena(size_t size, bool low_4gb, const char* name) {
+MemMapArena::MemMapArena(size_t size, bool low_4gb, const char* name)
+ : map_(Allocate(size, low_4gb, name)) {
+ memory_ = map_.Begin();
+ static_assert(ArenaAllocator::kArenaAlignment <= kPageSize,
+ "Arena should not need stronger alignment than kPageSize.");
+ DCHECK_ALIGNED(memory_, ArenaAllocator::kArenaAlignment);
+ size_ = map_.Size();
+}
+
+MemMap MemMapArena::Allocate(size_t size, bool low_4gb, const char* name) {
// Round up to a full page as that's the smallest unit of allocation for mmap()
// and we want to be able to use all memory that we actually allocate.
size = RoundUp(size, kPageSize);
std::string error_msg;
- map_.reset(MemMap::MapAnonymous(
- name, nullptr, size, PROT_READ | PROT_WRITE, low_4gb, false, &error_msg));
- CHECK(map_.get() != nullptr) << error_msg;
- memory_ = map_->Begin();
- static_assert(ArenaAllocator::kArenaAlignment <= kPageSize,
- "Arena should not need stronger alignment than kPageSize.");
- DCHECK_ALIGNED(memory_, ArenaAllocator::kArenaAlignment);
- size_ = map_->Size();
+ MemMap map = MemMap::MapAnonymous(name,
+ /* addr */ nullptr,
+ size,
+ PROT_READ | PROT_WRITE,
+ low_4gb,
+ /* reuse */ false,
+ &error_msg);
+ CHECK(map.IsValid()) << error_msg;
+ return map;
}
MemMapArena::~MemMapArena() {
@@ -62,7 +74,7 @@ MemMapArena::~MemMapArena() {
void MemMapArena::Release() {
if (bytes_allocated_ > 0) {
- map_->MadviseDontNeedAndZero();
+ map_.MadviseDontNeedAndZero();
bytes_allocated_ = 0;
}
}
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 7b888b18d9..044c4c2f78 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -1142,10 +1142,6 @@ void Locks::Init() {
DCHECK(subtype_check_lock_ == nullptr);
subtype_check_lock_ = new Mutex("SubtypeCheck lock", current_lock_level);
- UPDATE_CURRENT_LOCK_LEVEL(kCHALock);
- DCHECK(cha_lock_ == nullptr);
- cha_lock_ = new Mutex("CHA lock", current_lock_level);
-
UPDATE_CURRENT_LOCK_LEVEL(kClassLinkerClassesLock);
DCHECK(classlinker_classes_lock_ == nullptr);
classlinker_classes_lock_ = new ReaderWriterMutex("ClassLinker classes lock",
@@ -1226,6 +1222,10 @@ void Locks::Init() {
DCHECK(custom_tls_lock_ == nullptr);
custom_tls_lock_ = new Mutex("Thread::custom_tls_ lock", current_lock_level);
+ UPDATE_CURRENT_LOCK_LEVEL(kCHALock);
+ DCHECK(cha_lock_ == nullptr);
+ cha_lock_ = new Mutex("CHA lock", current_lock_level);
+
UPDATE_CURRENT_LOCK_LEVEL(kNativeDebugInterfaceLock);
DCHECK(native_debug_interface_lock_ == nullptr);
native_debug_interface_lock_ = new Mutex("Native debug interface lock", current_lock_level);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index af2e7b2763..fba209a0b6 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -72,6 +72,7 @@ enum LockLevel : uint8_t {
kJdwpSocketLock,
kRegionSpaceRegionLock,
kMarkSweepMarkStackLock,
+ kCHALock,
kJitCodeCacheLock,
kRosAllocGlobalLock,
kRosAllocBracketLock,
@@ -109,7 +110,6 @@ enum LockLevel : uint8_t {
kMonitorPoolLock,
kClassLinkerClassesLock, // TODO rename.
kDexToDexCompilerLock,
- kCHALock,
kSubtypeCheckLock,
kBreakpointLock,
kMonitorLock,
@@ -661,14 +661,11 @@ class Locks {
// TODO: improve name, perhaps instrumentation_update_lock_.
static Mutex* deoptimization_lock_ ACQUIRED_AFTER(alloc_tracker_lock_);
- // Guards Class Hierarchy Analysis (CHA).
- static Mutex* cha_lock_ ACQUIRED_AFTER(deoptimization_lock_);
-
// Guard the update of the SubtypeCheck data stores in each Class::status_ field.
// This lock is used in SubtypeCheck methods which are the interface for
// any SubtypeCheck-mutating methods.
// In Class::IsSubClass, the lock is not required since it does not update the SubtypeCheck data.
- static Mutex* subtype_check_lock_ ACQUIRED_AFTER(cha_lock_);
+ static Mutex* subtype_check_lock_ ACQUIRED_AFTER(deoptimization_lock_);
// The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads
// attaching and detaching.
@@ -745,11 +742,14 @@ class Locks {
// GetThreadLocalStorage.
static Mutex* custom_tls_lock_ ACQUIRED_AFTER(jni_function_table_lock_);
+ // Guards Class Hierarchy Analysis (CHA).
+ static Mutex* cha_lock_ ACQUIRED_AFTER(custom_tls_lock_);
+
// When declaring any Mutex add BOTTOM_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
// doesn't try to acquire a higher level Mutex. NB Due to the way the annotalysis works this
// actually only encodes the mutex being below jni_function_table_lock_ although having
// kGenericBottomLock level is lower than this.
- #define BOTTOM_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(art::Locks::custom_tls_lock_)
+ #define BOTTOM_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(art::Locks::cha_lock_)
// Have an exclusive aborting thread.
static Mutex* abort_lock_ ACQUIRED_AFTER(custom_tls_lock_);
diff --git a/runtime/cha.cc b/runtime/cha.cc
index ccbe066ed6..ce84e8ce2e 100644
--- a/runtime/cha.cc
+++ b/runtime/cha.cc
@@ -636,38 +636,54 @@ void ClassHierarchyAnalysis::InvalidateSingleImplementationMethods(
// We do this under cha_lock_. Committing code also grabs this lock to
// make sure the code is only committed when all single-implementation
// assumptions are still true.
- MutexLock cha_mu(self, *Locks::cha_lock_);
- // Invalidate compiled methods that assume some virtual calls have only
- // single implementations.
- for (ArtMethod* invalidated : invalidated_single_impl_methods) {
- if (!invalidated->HasSingleImplementation()) {
- // It might have been invalidated already when other class linking is
- // going on.
- continue;
- }
- invalidated->SetHasSingleImplementation(false);
- if (invalidated->IsAbstract()) {
- // Clear the single implementation method.
- invalidated->SetSingleImplementation(nullptr, image_pointer_size);
- }
+ std::vector<std::pair<ArtMethod*, OatQuickMethodHeader*>> headers;
+ {
+ MutexLock cha_mu(self, *Locks::cha_lock_);
+ // Invalidate compiled methods that assume some virtual calls have only
+ // single implementations.
+ for (ArtMethod* invalidated : invalidated_single_impl_methods) {
+ if (!invalidated->HasSingleImplementation()) {
+ // It might have been invalidated already when other class linking is
+ // going on.
+ continue;
+ }
+ invalidated->SetHasSingleImplementation(false);
+ if (invalidated->IsAbstract()) {
+ // Clear the single implementation method.
+ invalidated->SetSingleImplementation(nullptr, image_pointer_size);
+ }
- if (runtime->IsAotCompiler()) {
- // No need to invalidate any compiled code as the AotCompiler doesn't
- // run any code.
- continue;
- }
+ if (runtime->IsAotCompiler()) {
+ // No need to invalidate any compiled code as the AotCompiler doesn't
+ // run any code.
+ continue;
+ }
- // Invalidate all dependents.
- for (const auto& dependent : GetDependents(invalidated)) {
- ArtMethod* method = dependent.first;;
- OatQuickMethodHeader* method_header = dependent.second;
- VLOG(class_linker) << "CHA invalidated compiled code for " << method->PrettyMethod();
- DCHECK(runtime->UseJitCompilation());
- runtime->GetJit()->GetCodeCache()->InvalidateCompiledCodeFor(
- method, method_header);
- dependent_method_headers.insert(method_header);
+ // Invalidate all dependents.
+ for (const auto& dependent : GetDependents(invalidated)) {
+ ArtMethod* method = dependent.first;;
+ OatQuickMethodHeader* method_header = dependent.second;
+ VLOG(class_linker) << "CHA invalidated compiled code for " << method->PrettyMethod();
+ DCHECK(runtime->UseJitCompilation());
+ // We need to call JitCodeCache::InvalidateCompiledCodeFor but we cannot do it here
+ // since it would run into problems with lock-ordering. We don't want to re-order the
+ // locks since that would make code-commit racy.
+ headers.push_back({method, method_header});
+ dependent_method_headers.insert(method_header);
+ }
+ RemoveAllDependenciesFor(invalidated);
+ }
+ }
+ // Since we are still loading the class that invalidated the code it's fine we have this after
+ // getting rid of the dependency. Any calls would need to be with the old version (since the
+ // new one isn't loaded yet) which still works fine. We will deoptimize just after this to
+ // ensure everything gets the new state.
+ jit::Jit* jit = Runtime::Current()->GetJit();
+ if (jit != nullptr) {
+ jit::JitCodeCache* code_cache = jit->GetCodeCache();
+ for (const auto& pair : headers) {
+ code_cache->InvalidateCompiledCodeFor(pair.first, pair.second);
}
- RemoveAllDependenciesFor(invalidated);
}
}
diff --git a/runtime/dexopt_test.cc b/runtime/dexopt_test.cc
index f8388f315d..b0eef00551 100644
--- a/runtime/dexopt_test.cc
+++ b/runtime/dexopt_test.cc
@@ -249,14 +249,17 @@ void DexoptTest::ReserveImageSpace() {
void DexoptTest::ReserveImageSpaceChunk(uintptr_t start, uintptr_t end) {
if (start < end) {
std::string error_msg;
- image_reservation_.push_back(std::unique_ptr<MemMap>(
- MemMap::MapAnonymous("image reservation",
- reinterpret_cast<uint8_t*>(start), end - start,
- PROT_NONE, false, false, &error_msg)));
- ASSERT_TRUE(image_reservation_.back().get() != nullptr) << error_msg;
+ image_reservation_.push_back(MemMap::MapAnonymous("image reservation",
+ reinterpret_cast<uint8_t*>(start),
+ end - start,
+ PROT_NONE,
+ /* low_4gb*/ false,
+ /* reuse */ false,
+ &error_msg));
+ ASSERT_TRUE(image_reservation_.back().IsValid()) << error_msg;
LOG(INFO) << "Reserved space for image " <<
- reinterpret_cast<void*>(image_reservation_.back()->Begin()) << "-" <<
- reinterpret_cast<void*>(image_reservation_.back()->End());
+ reinterpret_cast<void*>(image_reservation_.back().Begin()) << "-" <<
+ reinterpret_cast<void*>(image_reservation_.back().End());
}
}
diff --git a/runtime/dexopt_test.h b/runtime/dexopt_test.h
index 6e8dc097d5..3203ee526b 100644
--- a/runtime/dexopt_test.h
+++ b/runtime/dexopt_test.h
@@ -91,7 +91,7 @@ class DexoptTest : public Dex2oatEnvironmentTest {
// before the image is loaded.
void UnreserveImageSpace();
- std::vector<std::unique_ptr<MemMap>> image_reservation_;
+ std::vector<MemMap> image_reservation_;
};
} // namespace art
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index 026b5da748..4ae736299b 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -283,7 +283,6 @@ bool ElfFileImpl<ElfTypes>::Setup(File* file,
template <typename ElfTypes>
ElfFileImpl<ElfTypes>::~ElfFileImpl() {
- STLDeleteElements(&segments_);
delete symtab_symbol_table_;
delete dynsym_symbol_table_;
}
@@ -418,17 +417,17 @@ template <typename ElfTypes>
}
template <typename ElfTypes>
-bool ElfFileImpl<ElfTypes>::SetMap(File* file, MemMap* map, std::string* error_msg) {
- if (map == nullptr) {
+bool ElfFileImpl<ElfTypes>::SetMap(File* file, MemMap&& map, std::string* error_msg) {
+ if (!map.IsValid()) {
// MemMap::Open should have already set an error.
DCHECK(!error_msg->empty());
return false;
}
- map_.reset(map);
- CHECK(map_.get() != nullptr) << file->GetPath();
- CHECK(map_->Begin() != nullptr) << file->GetPath();
+ map_ = std::move(map);
+ CHECK(map_.IsValid()) << file->GetPath();
+ CHECK(map_.Begin() != nullptr) << file->GetPath();
- header_ = reinterpret_cast<Elf_Ehdr*>(map_->Begin());
+ header_ = reinterpret_cast<Elf_Ehdr*>(map_.Begin());
if ((ELFMAG0 != header_->e_ident[EI_MAG0])
|| (ELFMAG1 != header_->e_ident[EI_MAG1])
|| (ELFMAG2 != header_->e_ident[EI_MAG2])
@@ -1164,14 +1163,14 @@ bool ElfFileImpl<ElfTypes>::Load(File* file,
DCHECK(!error_msg->empty());
return false;
}
- std::unique_ptr<MemMap> reserve(MemMap::MapAnonymous(reservation_name.c_str(),
- reserve_base_override,
- loaded_size,
- PROT_NONE,
- low_4gb,
- false,
- error_msg));
- if (reserve.get() == nullptr) {
+ MemMap reserve = MemMap::MapAnonymous(reservation_name.c_str(),
+ reserve_base_override,
+ loaded_size,
+ PROT_NONE,
+ low_4gb,
+ /* reuse */ false,
+ error_msg);
+ if (!reserve.IsValid()) {
*error_msg = StringPrintf("Failed to allocate %s: %s",
reservation_name.c_str(), error_msg->c_str());
return false;
@@ -1179,14 +1178,14 @@ bool ElfFileImpl<ElfTypes>::Load(File* file,
reserved = true;
// Base address is the difference of actual mapped location and the p_vaddr
- base_address_ = reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(reserve->Begin())
+ base_address_ = reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(reserve.Begin())
- reinterpret_cast<uintptr_t>(reserve_base));
// By adding the p_vaddr of a section/symbol to base_address_ we will always get the
// dynamic memory address of where that object is actually mapped
//
// TODO: base_address_ needs to be calculated in ::Open, otherwise
// FindDynamicSymbolAddress returns the wrong values until Load is called.
- segments_.push_back(reserve.release());
+ segments_.push_back(std::move(reserve));
}
// empty segment, nothing to map
if (program_header->p_memsz == 0) {
@@ -1234,7 +1233,7 @@ bool ElfFileImpl<ElfTypes>::Load(File* file,
return false;
}
if (program_header->p_filesz != 0u) {
- std::unique_ptr<MemMap> segment(
+ MemMap segment =
MemMap::MapFileAtAddress(p_vaddr,
program_header->p_filesz,
prot,
@@ -1244,40 +1243,42 @@ bool ElfFileImpl<ElfTypes>::Load(File* file,
/*low4_gb*/false,
/*reuse*/true, // implies MAP_FIXED
file->GetPath().c_str(),
- error_msg));
- if (segment.get() == nullptr) {
+ error_msg);
+ if (!segment.IsValid()) {
*error_msg = StringPrintf("Failed to map ELF file segment %d from %s: %s",
i, file->GetPath().c_str(), error_msg->c_str());
return false;
}
- if (segment->Begin() != p_vaddr) {
+ if (segment.Begin() != p_vaddr) {
*error_msg = StringPrintf("Failed to map ELF file segment %d from %s at expected address %p, "
"instead mapped to %p",
- i, file->GetPath().c_str(), p_vaddr, segment->Begin());
+ i, file->GetPath().c_str(), p_vaddr, segment.Begin());
return false;
}
- segments_.push_back(segment.release());
+ segments_.push_back(std::move(segment));
}
if (program_header->p_filesz < program_header->p_memsz) {
std::string name = StringPrintf("Zero-initialized segment %" PRIu64 " of ELF file %s",
static_cast<uint64_t>(i), file->GetPath().c_str());
- std::unique_ptr<MemMap> segment(
- MemMap::MapAnonymous(name.c_str(),
- p_vaddr + program_header->p_filesz,
- program_header->p_memsz - program_header->p_filesz,
- prot, false, true /* reuse */, error_msg));
- if (segment == nullptr) {
+ MemMap segment = MemMap::MapAnonymous(name.c_str(),
+ p_vaddr + program_header->p_filesz,
+ program_header->p_memsz - program_header->p_filesz,
+ prot,
+ /* low_4gb */ false,
+ /* reuse */ true,
+ error_msg);
+ if (!segment.IsValid()) {
*error_msg = StringPrintf("Failed to map zero-initialized ELF file segment %d from %s: %s",
i, file->GetPath().c_str(), error_msg->c_str());
return false;
}
- if (segment->Begin() != p_vaddr) {
+ if (segment.Begin() != p_vaddr) {
*error_msg = StringPrintf("Failed to map zero-initialized ELF file segment %d from %s "
"at expected address %p, instead mapped to %p",
- i, file->GetPath().c_str(), p_vaddr, segment->Begin());
+ i, file->GetPath().c_str(), p_vaddr, segment.Begin());
return false;
}
- segments_.push_back(segment.release());
+ segments_.push_back(std::move(segment));
}
}
@@ -1343,9 +1344,8 @@ bool ElfFileImpl<ElfTypes>::Load(File* file,
template <typename ElfTypes>
bool ElfFileImpl<ElfTypes>::ValidPointer(const uint8_t* start) const {
- for (size_t i = 0; i < segments_.size(); ++i) {
- const MemMap* segment = segments_[i];
- if (segment->Begin() <= start && start < segment->End()) {
+ for (const MemMap& segment : segments_) {
+ if (segment.Begin() <= start && start < segment.End()) {
return true;
}
}
@@ -1712,18 +1712,18 @@ ElfFile* ElfFile::Open(File* file,
file->GetPath().c_str());
return nullptr;
}
- std::unique_ptr<MemMap> map(MemMap::MapFile(EI_NIDENT,
- PROT_READ,
- MAP_PRIVATE,
- file->Fd(),
- 0,
- low_4gb,
- file->GetPath().c_str(),
- error_msg));
- if (map == nullptr || map->Size() != EI_NIDENT) {
+ MemMap map = MemMap::MapFile(EI_NIDENT,
+ PROT_READ,
+ MAP_PRIVATE,
+ file->Fd(),
+ 0,
+ low_4gb,
+ file->GetPath().c_str(),
+ error_msg);
+ if (!map.IsValid() || map.Size() != EI_NIDENT) {
return nullptr;
}
- uint8_t* header = map->Begin();
+ uint8_t* header = map.Begin();
if (header[EI_CLASS] == ELFCLASS64) {
ElfFileImpl64* elf_file_impl = ElfFileImpl64::Open(file,
writable,
@@ -1763,18 +1763,18 @@ ElfFile* ElfFile::Open(File* file, int mmap_prot, int mmap_flags, std::string* e
file->GetPath().c_str());
return nullptr;
}
- std::unique_ptr<MemMap> map(MemMap::MapFile(EI_NIDENT,
- PROT_READ,
- MAP_PRIVATE,
- file->Fd(),
- 0,
- low_4gb,
- file->GetPath().c_str(),
- error_msg));
- if (map == nullptr || map->Size() != EI_NIDENT) {
+ MemMap map = MemMap::MapFile(EI_NIDENT,
+ PROT_READ,
+ MAP_PRIVATE,
+ file->Fd(),
+ /* start */ 0,
+ low_4gb,
+ file->GetPath().c_str(),
+ error_msg);
+ if (!map.IsValid() || map.Size() != EI_NIDENT) {
return nullptr;
}
- uint8_t* header = map->Begin();
+ uint8_t* header = map.Begin();
if (header[EI_CLASS] == ELFCLASS64) {
ElfFileImpl64* elf_file_impl = ElfFileImpl64::Open(file,
mmap_prot,
diff --git a/runtime/elf_file_impl.h b/runtime/elf_file_impl.h
index a5808e27ba..58c38a4436 100644
--- a/runtime/elf_file_impl.h
+++ b/runtime/elf_file_impl.h
@@ -62,15 +62,15 @@ class ElfFileImpl {
}
uint8_t* Begin() const {
- return map_->Begin();
+ return map_.Begin();
}
uint8_t* End() const {
- return map_->End();
+ return map_.End();
}
size_t Size() const {
- return map_->Size();
+ return map_.Size();
}
Elf_Ehdr& GetHeader() const;
@@ -135,7 +135,7 @@ class ElfFileImpl {
bool Setup(File* file, int prot, int flags, bool low_4gb, std::string* error_msg);
- bool SetMap(File* file, MemMap* map, std::string* error_msg);
+ bool SetMap(File* file, MemMap&& map, std::string* error_msg);
uint8_t* GetProgramHeadersStart() const;
uint8_t* GetSectionHeadersStart() const;
@@ -193,9 +193,9 @@ class ElfFileImpl {
// ELF header mapping. If program_header_only_ is false, will
// actually point to the entire elf file.
- std::unique_ptr<MemMap> map_;
+ MemMap map_;
Elf_Ehdr* header_;
- std::vector<MemMap*> segments_;
+ std::vector<MemMap> segments_;
// Pointer to start of first PT_LOAD program segment after Load()
// when program_header_only_ is true.
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index e30fef4fc2..2a71dec4d5 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -72,12 +72,12 @@ class AtomicStack {
~AtomicStack() {}
void Reset() {
- DCHECK(mem_map_.get() != nullptr);
+ DCHECK(mem_map_.IsValid());
DCHECK(begin_ != nullptr);
front_index_.store(0, std::memory_order_relaxed);
back_index_.store(0, std::memory_order_relaxed);
debug_is_sorted_ = true;
- mem_map_->MadviseDontNeedAndZero();
+ mem_map_.MadviseDontNeedAndZero();
}
// Beware: Mixing atomic pushes and atomic pops will cause ABA problem.
@@ -252,10 +252,15 @@ class AtomicStack {
// Size in number of elements.
void Init() {
std::string error_msg;
- mem_map_.reset(MemMap::MapAnonymous(name_.c_str(), nullptr, capacity_ * sizeof(begin_[0]),
- PROT_READ | PROT_WRITE, false, false, &error_msg));
- CHECK(mem_map_.get() != nullptr) << "couldn't allocate mark stack.\n" << error_msg;
- uint8_t* addr = mem_map_->Begin();
+ mem_map_ = MemMap::MapAnonymous(name_.c_str(),
+ /* addr */ nullptr,
+ capacity_ * sizeof(begin_[0]),
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ /* reuse */ false,
+ &error_msg);
+ CHECK(mem_map_.IsValid()) << "couldn't allocate mark stack.\n" << error_msg;
+ uint8_t* addr = mem_map_.Begin();
CHECK(addr != nullptr);
debug_is_sorted_ = true;
begin_ = reinterpret_cast<StackReference<T>*>(addr);
@@ -265,7 +270,7 @@ class AtomicStack {
// Name of the mark stack.
std::string name_;
// Memory mapping of the atomic stack.
- std::unique_ptr<MemMap> mem_map_;
+ MemMap mem_map_;
// Back index (index after the last element pushed).
AtomicInteger back_index_;
// Front index, used for implementing PopFront.
diff --git a/runtime/gc/accounting/bitmap.cc b/runtime/gc/accounting/bitmap.cc
index d45a0cc018..e157e5e8c4 100644
--- a/runtime/gc/accounting/bitmap.cc
+++ b/runtime/gc/accounting/bitmap.cc
@@ -27,47 +27,51 @@ namespace art {
namespace gc {
namespace accounting {
-Bitmap* Bitmap::CreateFromMemMap(MemMap* mem_map, size_t num_bits) {
- CHECK(mem_map != nullptr);
- return new Bitmap(mem_map, num_bits);
+Bitmap* Bitmap::CreateFromMemMap(MemMap&& mem_map, size_t num_bits) {
+ CHECK(mem_map.IsValid());
+ return new Bitmap(std::move(mem_map), num_bits);
}
-Bitmap::Bitmap(MemMap* mem_map, size_t bitmap_size)
- : mem_map_(mem_map), bitmap_begin_(reinterpret_cast<uintptr_t*>(mem_map->Begin())),
+Bitmap::Bitmap(MemMap&& mem_map, size_t bitmap_size)
+ : mem_map_(std::move(mem_map)),
+ bitmap_begin_(reinterpret_cast<uintptr_t*>(mem_map_.Begin())),
bitmap_size_(bitmap_size) {
CHECK(bitmap_begin_ != nullptr);
CHECK_NE(bitmap_size, 0U);
}
Bitmap::~Bitmap() {
- // Destroys MemMap via std::unique_ptr<>.
+ // Destroys member MemMap.
}
-MemMap* Bitmap::AllocateMemMap(const std::string& name, size_t num_bits) {
+MemMap Bitmap::AllocateMemMap(const std::string& name, size_t num_bits) {
const size_t bitmap_size = RoundUp(
RoundUp(num_bits, kBitsPerBitmapWord) / kBitsPerBitmapWord * sizeof(uintptr_t), kPageSize);
std::string error_msg;
- std::unique_ptr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), nullptr, bitmap_size,
- PROT_READ | PROT_WRITE, false, false,
- &error_msg));
- if (UNLIKELY(mem_map.get() == nullptr)) {
+ MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
+ /* addr */ nullptr,
+ bitmap_size,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ /* reuse */ false,
+ &error_msg);
+ if (UNLIKELY(!mem_map.IsValid())) {
LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
- return nullptr;
}
- return mem_map.release();
+ return mem_map;
}
Bitmap* Bitmap::Create(const std::string& name, size_t num_bits) {
- auto* const mem_map = AllocateMemMap(name, num_bits);
- if (mem_map == nullptr) {
+ MemMap mem_map = AllocateMemMap(name, num_bits);
+ if (UNLIKELY(!mem_map.IsValid())) {
return nullptr;
}
- return CreateFromMemMap(mem_map, num_bits);
+ return CreateFromMemMap(std::move(mem_map), num_bits);
}
void Bitmap::Clear() {
if (bitmap_begin_ != nullptr) {
- mem_map_->MadviseDontNeedAndZero();
+ mem_map_.MadviseDontNeedAndZero();
}
}
@@ -83,14 +87,15 @@ MemoryRangeBitmap<kAlignment>* MemoryRangeBitmap<kAlignment>::Create(
CHECK_ALIGNED(cover_begin, kAlignment);
CHECK_ALIGNED(cover_end, kAlignment);
const size_t num_bits = (cover_end - cover_begin) / kAlignment;
- auto* const mem_map = Bitmap::AllocateMemMap(name, num_bits);
- return CreateFromMemMap(mem_map, cover_begin, num_bits);
+ MemMap mem_map = Bitmap::AllocateMemMap(name, num_bits);
+ CHECK(mem_map.IsValid());
+ return CreateFromMemMap(std::move(mem_map), cover_begin, num_bits);
}
template<size_t kAlignment>
MemoryRangeBitmap<kAlignment>* MemoryRangeBitmap<kAlignment>::CreateFromMemMap(
- MemMap* mem_map, uintptr_t begin, size_t num_bits) {
- return new MemoryRangeBitmap(mem_map, begin, num_bits);
+ MemMap&& mem_map, uintptr_t begin, size_t num_bits) {
+ return new MemoryRangeBitmap(std::move(mem_map), begin, num_bits);
}
template class MemoryRangeBitmap<CardTable::kCardSize>;
diff --git a/runtime/gc/accounting/bitmap.h b/runtime/gc/accounting/bitmap.h
index 2d83a8ad2e..ffef5662db 100644
--- a/runtime/gc/accounting/bitmap.h
+++ b/runtime/gc/accounting/bitmap.h
@@ -24,12 +24,11 @@
#include <vector>
#include "base/globals.h"
+#include "base/mem_map.h"
#include "base/mutex.h"
namespace art {
-class MemMap;
-
namespace gc {
namespace accounting {
@@ -42,7 +41,7 @@ class Bitmap {
// Initialize a space bitmap using the provided mem_map as the live bits. Takes ownership of the
// mem map. The address range covered starts at heap_begin and is of size equal to heap_capacity.
// Objects are kAlignement-aligned.
- static Bitmap* CreateFromMemMap(MemMap* mem_map, size_t num_bits);
+ static Bitmap* CreateFromMemMap(MemMap&& mem_map, size_t num_bits);
// offset is the difference from base to a index.
static ALWAYS_INLINE constexpr size_t BitIndexToWordIndex(uintptr_t offset) {
@@ -101,17 +100,17 @@ class Bitmap {
protected:
static constexpr size_t kBitsPerBitmapWord = sizeof(uintptr_t) * kBitsPerByte;
- Bitmap(MemMap* mem_map, size_t bitmap_size);
+ Bitmap(MemMap&& mem_map, size_t bitmap_size);
~Bitmap();
// Allocate the mem-map for a bitmap based on how many bits are required.
- static MemMap* AllocateMemMap(const std::string& name, size_t num_bits);
+ static MemMap AllocateMemMap(const std::string& name, size_t num_bits);
template<bool kSetBit>
ALWAYS_INLINE bool ModifyBit(uintptr_t bit_index);
// Backing storage for bitmap.
- std::unique_ptr<MemMap> mem_map_;
+ MemMap mem_map_;
// This bitmap itself, word sized for efficiency in scanning.
uintptr_t* const bitmap_begin_;
@@ -127,10 +126,10 @@ class Bitmap {
template<size_t kAlignment>
class MemoryRangeBitmap : public Bitmap {
public:
- static MemoryRangeBitmap* Create(const std::string& name, uintptr_t cover_begin,
- uintptr_t cover_end);
- static MemoryRangeBitmap* CreateFromMemMap(MemMap* mem_map, uintptr_t cover_begin,
- size_t num_bits);
+ static MemoryRangeBitmap* Create(
+ const std::string& name, uintptr_t cover_begin, uintptr_t cover_end);
+ static MemoryRangeBitmap* CreateFromMemMap(
+ MemMap&& mem_map, uintptr_t cover_begin, size_t num_bits);
// Beginning of the memory range that the bitmap covers.
ALWAYS_INLINE uintptr_t CoverBegin() const {
@@ -177,9 +176,10 @@ class MemoryRangeBitmap : public Bitmap {
}
private:
- MemoryRangeBitmap(MemMap* mem_map, uintptr_t begin, size_t num_bits)
- : Bitmap(mem_map, num_bits), cover_begin_(begin), cover_end_(begin + kAlignment * num_bits) {
- }
+ MemoryRangeBitmap(MemMap&& mem_map, uintptr_t begin, size_t num_bits)
+ : Bitmap(std::move(mem_map), num_bits),
+ cover_begin_(begin),
+ cover_end_(begin + kAlignment * num_bits) {}
uintptr_t const cover_begin_;
uintptr_t const cover_end_;
diff --git a/runtime/gc/accounting/card_table-inl.h b/runtime/gc/accounting/card_table-inl.h
index 357a4985b6..1e7d76c97e 100644
--- a/runtime/gc/accounting/card_table-inl.h
+++ b/runtime/gc/accounting/card_table-inl.h
@@ -213,8 +213,8 @@ inline void CardTable::ModifyCardsAtomic(uint8_t* scan_begin,
inline void* CardTable::AddrFromCard(const uint8_t *card_addr) const {
DCHECK(IsValidCard(card_addr))
<< " card_addr: " << reinterpret_cast<const void*>(card_addr)
- << " begin: " << reinterpret_cast<void*>(mem_map_->Begin() + offset_)
- << " end: " << reinterpret_cast<void*>(mem_map_->End());
+ << " begin: " << reinterpret_cast<void*>(mem_map_.Begin() + offset_)
+ << " end: " << reinterpret_cast<void*>(mem_map_.End());
uintptr_t offset = card_addr - biased_begin_;
return reinterpret_cast<void*>(offset << kCardShift);
}
@@ -228,16 +228,16 @@ inline uint8_t* CardTable::CardFromAddr(const void *addr) const {
}
inline bool CardTable::IsValidCard(const uint8_t* card_addr) const {
- uint8_t* begin = mem_map_->Begin() + offset_;
- uint8_t* end = mem_map_->End();
+ uint8_t* begin = mem_map_.Begin() + offset_;
+ uint8_t* end = mem_map_.End();
return card_addr >= begin && card_addr < end;
}
inline void CardTable::CheckCardValid(uint8_t* card) const {
DCHECK(IsValidCard(card))
<< " card_addr: " << reinterpret_cast<const void*>(card)
- << " begin: " << reinterpret_cast<void*>(mem_map_->Begin() + offset_)
- << " end: " << reinterpret_cast<void*>(mem_map_->End());
+ << " begin: " << reinterpret_cast<void*>(mem_map_.Begin() + offset_)
+ << " end: " << reinterpret_cast<void*>(mem_map_.End());
}
} // namespace accounting
diff --git a/runtime/gc/accounting/card_table.cc b/runtime/gc/accounting/card_table.cc
index 22104a30fe..89645e0083 100644
--- a/runtime/gc/accounting/card_table.cc
+++ b/runtime/gc/accounting/card_table.cc
@@ -64,15 +64,19 @@ CardTable* CardTable::Create(const uint8_t* heap_begin, size_t heap_capacity) {
size_t capacity = heap_capacity / kCardSize;
/* Allocate an extra 256 bytes to allow fixed low-byte of base */
std::string error_msg;
- std::unique_ptr<MemMap> mem_map(
- MemMap::MapAnonymous("card table", nullptr, capacity + 256, PROT_READ | PROT_WRITE,
- false, false, &error_msg));
- CHECK(mem_map.get() != nullptr) << "couldn't allocate card table: " << error_msg;
+ MemMap mem_map = MemMap::MapAnonymous("card table",
+ /* addr */ nullptr,
+ capacity + 256,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ /* reuse */ false,
+ &error_msg);
+ CHECK(mem_map.IsValid()) << "couldn't allocate card table: " << error_msg;
// All zeros is the correct initial value; all clean. Anonymous mmaps are initialized to zero, we
// don't clear the card table to avoid unnecessary pages being allocated
static_assert(kCardClean == 0, "kCardClean must be 0");
- uint8_t* cardtable_begin = mem_map->Begin();
+ uint8_t* cardtable_begin = mem_map.Begin();
CHECK(cardtable_begin != nullptr);
// We allocated up to a bytes worth of extra space to allow `biased_begin`'s byte value to equal
@@ -87,11 +91,11 @@ CardTable* CardTable::Create(const uint8_t* heap_begin, size_t heap_capacity) {
biased_begin += offset;
}
CHECK_EQ(reinterpret_cast<uintptr_t>(biased_begin) & 0xff, kCardDirty);
- return new CardTable(mem_map.release(), biased_begin, offset);
+ return new CardTable(std::move(mem_map), biased_begin, offset);
}
-CardTable::CardTable(MemMap* mem_map, uint8_t* biased_begin, size_t offset)
- : mem_map_(mem_map), biased_begin_(biased_begin), offset_(offset) {
+CardTable::CardTable(MemMap&& mem_map, uint8_t* biased_begin, size_t offset)
+ : mem_map_(std::move(mem_map)), biased_begin_(biased_begin), offset_(offset) {
}
CardTable::~CardTable() {
@@ -100,7 +104,7 @@ CardTable::~CardTable() {
void CardTable::ClearCardTable() {
static_assert(kCardClean == 0, "kCardClean must be 0");
- mem_map_->MadviseDontNeedAndZero();
+ mem_map_.MadviseDontNeedAndZero();
}
void CardTable::ClearCardRange(uint8_t* start, uint8_t* end) {
@@ -118,8 +122,8 @@ bool CardTable::AddrIsInCardTable(const void* addr) const {
void CardTable::CheckAddrIsInCardTable(const uint8_t* addr) const {
uint8_t* card_addr = biased_begin_ + ((uintptr_t)addr >> kCardShift);
- uint8_t* begin = mem_map_->Begin() + offset_;
- uint8_t* end = mem_map_->End();
+ uint8_t* begin = mem_map_.Begin() + offset_;
+ uint8_t* end = mem_map_.End();
CHECK(AddrIsInCardTable(addr))
<< "Card table " << this
<< " begin: " << reinterpret_cast<void*>(begin)
diff --git a/runtime/gc/accounting/card_table.h b/runtime/gc/accounting/card_table.h
index b8520b7dc0..47e2430b72 100644
--- a/runtime/gc/accounting/card_table.h
+++ b/runtime/gc/accounting/card_table.h
@@ -20,12 +20,11 @@
#include <memory>
#include "base/globals.h"
+#include "base/mem_map.h"
#include "base/mutex.h"
namespace art {
-class MemMap;
-
namespace mirror {
class Object;
} // namespace mirror
@@ -133,7 +132,7 @@ class CardTable {
bool AddrIsInCardTable(const void* addr) const;
private:
- CardTable(MemMap* begin, uint8_t* biased_begin, size_t offset);
+ CardTable(MemMap&& mem_map, uint8_t* biased_begin, size_t offset);
// Returns true iff the card table address is within the bounds of the card table.
bool IsValidCard(const uint8_t* card_addr) const ALWAYS_INLINE;
@@ -144,7 +143,7 @@ class CardTable {
void VerifyCardTable();
// Mmapped pages for the card table
- std::unique_ptr<MemMap> mem_map_;
+ MemMap mem_map_;
// Value used to compute card table addresses from object addresses, see GetBiasedBegin
uint8_t* const biased_begin_;
// Card table doesn't begin at the beginning of the mem_map_, instead it is displaced by offset
diff --git a/runtime/gc/accounting/read_barrier_table.h b/runtime/gc/accounting/read_barrier_table.h
index 4b5a8c61c1..d8b1bb2d88 100644
--- a/runtime/gc/accounting/read_barrier_table.h
+++ b/runtime/gc/accounting/read_barrier_table.h
@@ -39,11 +39,15 @@ class ReadBarrierTable {
DCHECK_EQ(kHeapCapacity / kRegionSize,
static_cast<uint64_t>(static_cast<size_t>(kHeapCapacity / kRegionSize)));
std::string error_msg;
- MemMap* mem_map = MemMap::MapAnonymous("read barrier table", nullptr, capacity,
- PROT_READ | PROT_WRITE, false, false, &error_msg);
- CHECK(mem_map != nullptr && mem_map->Begin() != nullptr)
+ mem_map_ = MemMap::MapAnonymous("read barrier table",
+ /* addr */ nullptr,
+ capacity,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ /* reuse */ false,
+ &error_msg);
+ CHECK(mem_map_.IsValid() && mem_map_.Begin() != nullptr)
<< "couldn't allocate read barrier table: " << error_msg;
- mem_map_.reset(mem_map);
}
void ClearForSpace(space::ContinuousSpace* space) {
uint8_t* entry_start = EntryFromAddr(space->Begin());
@@ -66,14 +70,14 @@ class ReadBarrierTable {
return entry_value == kSetEntryValue;
}
void ClearAll() {
- mem_map_->MadviseDontNeedAndZero();
+ mem_map_.MadviseDontNeedAndZero();
}
void SetAll() {
- memset(mem_map_->Begin(), kSetEntryValue, mem_map_->Size());
+ memset(mem_map_.Begin(), kSetEntryValue, mem_map_.Size());
}
bool IsAllCleared() const {
- for (uint32_t* p = reinterpret_cast<uint32_t*>(mem_map_->Begin());
- p < reinterpret_cast<uint32_t*>(mem_map_->End()); ++p) {
+ for (uint32_t* p = reinterpret_cast<uint32_t*>(mem_map_.Begin());
+ p < reinterpret_cast<uint32_t*>(mem_map_.End()); ++p) {
if (*p != 0) {
return false;
}
@@ -90,7 +94,7 @@ class ReadBarrierTable {
uint8_t* EntryFromAddr(const void* heap_addr) const {
DCHECK(IsValidHeapAddr(heap_addr)) << heap_addr;
- uint8_t* entry_addr = mem_map_->Begin() + reinterpret_cast<uintptr_t>(heap_addr) / kRegionSize;
+ uint8_t* entry_addr = mem_map_.Begin() + reinterpret_cast<uintptr_t>(heap_addr) / kRegionSize;
DCHECK(IsValidEntry(entry_addr)) << "heap_addr: " << heap_addr
<< " entry_addr: " << reinterpret_cast<void*>(entry_addr);
return entry_addr;
@@ -106,12 +110,12 @@ class ReadBarrierTable {
}
bool IsValidEntry(const uint8_t* entry_addr) const {
- uint8_t* begin = mem_map_->Begin();
- uint8_t* end = mem_map_->End();
+ uint8_t* begin = mem_map_.Begin();
+ uint8_t* end = mem_map_.End();
return entry_addr >= begin && entry_addr < end;
}
- std::unique_ptr<MemMap> mem_map_;
+ MemMap mem_map_;
};
} // namespace accounting
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index ced62cd249..f87a67e0de 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -49,21 +49,22 @@ size_t SpaceBitmap<kAlignment>::ComputeHeapSize(uint64_t bitmap_bytes) {
template<size_t kAlignment>
SpaceBitmap<kAlignment>* SpaceBitmap<kAlignment>::CreateFromMemMap(
- const std::string& name, MemMap* mem_map, uint8_t* heap_begin, size_t heap_capacity) {
- CHECK(mem_map != nullptr);
- uintptr_t* bitmap_begin = reinterpret_cast<uintptr_t*>(mem_map->Begin());
+ const std::string& name, MemMap&& mem_map, uint8_t* heap_begin, size_t heap_capacity) {
+ CHECK(mem_map.IsValid());
+ uintptr_t* bitmap_begin = reinterpret_cast<uintptr_t*>(mem_map.Begin());
const size_t bitmap_size = ComputeBitmapSize(heap_capacity);
- return new SpaceBitmap(name, mem_map, bitmap_begin, bitmap_size, heap_begin, heap_capacity);
+ return new SpaceBitmap(
+ name, std::move(mem_map), bitmap_begin, bitmap_size, heap_begin, heap_capacity);
}
template<size_t kAlignment>
SpaceBitmap<kAlignment>::SpaceBitmap(const std::string& name,
- MemMap* mem_map,
+ MemMap&& mem_map,
uintptr_t* bitmap_begin,
size_t bitmap_size,
const void* heap_begin,
size_t heap_capacity)
- : mem_map_(mem_map),
+ : mem_map_(std::move(mem_map)),
bitmap_begin_(reinterpret_cast<Atomic<uintptr_t>*>(bitmap_begin)),
bitmap_size_(bitmap_size),
heap_begin_(reinterpret_cast<uintptr_t>(heap_begin)),
@@ -83,14 +84,18 @@ SpaceBitmap<kAlignment>* SpaceBitmap<kAlignment>::Create(
// (we represent one word as an `intptr_t`).
const size_t bitmap_size = ComputeBitmapSize(heap_capacity);
std::string error_msg;
- std::unique_ptr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), nullptr, bitmap_size,
- PROT_READ | PROT_WRITE, false, false,
- &error_msg));
- if (UNLIKELY(mem_map.get() == nullptr)) {
+ MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
+ /* addr */ nullptr,
+ bitmap_size,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ /* reuse */ false,
+ &error_msg);
+ if (UNLIKELY(!mem_map.IsValid())) {
LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
return nullptr;
}
- return CreateFromMemMap(name, mem_map.release(), heap_begin, heap_capacity);
+ return CreateFromMemMap(name, std::move(mem_map), heap_begin, heap_capacity);
}
template<size_t kAlignment>
@@ -114,7 +119,7 @@ std::string SpaceBitmap<kAlignment>::Dump() const {
template<size_t kAlignment>
void SpaceBitmap<kAlignment>::Clear() {
if (bitmap_begin_ != nullptr) {
- mem_map_->MadviseDontNeedAndZero();
+ mem_map_.MadviseDontNeedAndZero();
}
}
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index 1237f6e8b5..6a3faefe08 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -24,6 +24,7 @@
#include <vector>
#include "base/globals.h"
+#include "base/mem_map.h"
#include "base/mutex.h"
namespace art {
@@ -32,7 +33,6 @@ namespace mirror {
class Class;
class Object;
} // namespace mirror
-class MemMap;
namespace gc {
namespace accounting {
@@ -50,8 +50,10 @@ class SpaceBitmap {
// Initialize a space bitmap using the provided mem_map as the live bits. Takes ownership of the
// mem map. The address range covered starts at heap_begin and is of size equal to heap_capacity.
// Objects are kAlignement-aligned.
- static SpaceBitmap* CreateFromMemMap(const std::string& name, MemMap* mem_map,
- uint8_t* heap_begin, size_t heap_capacity);
+ static SpaceBitmap* CreateFromMemMap(const std::string& name,
+ MemMap&& mem_map,
+ uint8_t* heap_begin,
+ size_t heap_capacity);
~SpaceBitmap();
@@ -215,7 +217,7 @@ class SpaceBitmap {
// TODO: heap_end_ is initialized so that the heap bitmap is empty, this doesn't require the -1,
// however, we document that this is expected on heap_end_
SpaceBitmap(const std::string& name,
- MemMap* mem_map,
+ MemMap&& mem_map,
uintptr_t* bitmap_begin,
size_t bitmap_size,
const void* heap_begin,
@@ -227,7 +229,7 @@ class SpaceBitmap {
bool Modify(const mirror::Object* obj);
// Backing storage for bitmap.
- std::unique_ptr<MemMap> mem_map_;
+ MemMap mem_map_;
// This bitmap itself, word sized for efficiency in scanning.
Atomic<uintptr_t>* const bitmap_begin_;
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index a4095d815f..1639a82718 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -91,11 +91,15 @@ RosAlloc::RosAlloc(void* base, size_t capacity, size_t max_capacity,
size_t num_of_pages = footprint_ / kPageSize;
size_t max_num_of_pages = max_capacity_ / kPageSize;
std::string error_msg;
- page_map_mem_map_.reset(MemMap::MapAnonymous("rosalloc page map", nullptr,
- RoundUp(max_num_of_pages, kPageSize),
- PROT_READ | PROT_WRITE, false, false, &error_msg));
- CHECK(page_map_mem_map_.get() != nullptr) << "Couldn't allocate the page map : " << error_msg;
- page_map_ = page_map_mem_map_->Begin();
+ page_map_mem_map_ = MemMap::MapAnonymous("rosalloc page map",
+ /* addr */ nullptr,
+ RoundUp(max_num_of_pages, kPageSize),
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ /* reuse */ false,
+ &error_msg);
+ CHECK(page_map_mem_map_.IsValid()) << "Couldn't allocate the page map : " << error_msg;
+ page_map_ = page_map_mem_map_.Begin();
page_map_size_ = num_of_pages;
max_page_map_size_ = max_num_of_pages;
free_page_run_size_map_.resize(num_of_pages);
@@ -1364,8 +1368,8 @@ bool RosAlloc::Trim() {
// Zero out the tail of the page map.
uint8_t* zero_begin = const_cast<uint8_t*>(page_map_) + new_num_of_pages;
uint8_t* madvise_begin = AlignUp(zero_begin, kPageSize);
- DCHECK_LE(madvise_begin, page_map_mem_map_->End());
- size_t madvise_size = page_map_mem_map_->End() - madvise_begin;
+ DCHECK_LE(madvise_begin, page_map_mem_map_.End());
+ size_t madvise_size = page_map_mem_map_.End() - madvise_begin;
if (madvise_size > 0) {
DCHECK_ALIGNED(madvise_begin, kPageSize);
DCHECK_EQ(RoundUp(madvise_size, kPageSize), madvise_size);
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 30213d55c5..056216724d 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -31,13 +31,12 @@
#include "base/allocator.h"
#include "base/bit_utils.h"
#include "base/globals.h"
+#include "base/mem_map.h"
#include "base/mutex.h"
#include "thread.h"
namespace art {
-class MemMap;
-
namespace gc {
namespace allocator {
@@ -746,7 +745,7 @@ class RosAlloc {
volatile uint8_t* page_map_; // No GUARDED_BY(lock_) for kReadPageMapEntryWithoutLockInBulkFree.
size_t page_map_size_;
size_t max_page_map_size_;
- std::unique_ptr<MemMap> page_map_mem_map_;
+ MemMap page_map_mem_map_;
// The table that indicates the size of free page runs. These sizes
// are stored here to avoid storing in the free page header and
diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h
index cde5dc7d50..783f2fc4bc 100644
--- a/runtime/gc/collector/concurrent_copying-inl.h
+++ b/runtime/gc/collector/concurrent_copying-inl.h
@@ -151,7 +151,9 @@ inline mirror::Object* ConcurrentCopying::Mark(Thread* const self,
case space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace:
return MarkUnevacFromSpaceRegion(self, from_ref, region_space_bitmap_);
default:
- // The reference is in an unused region.
+ // The reference is in an unused region. Remove memory protection from
+ // the region space and log debugging information.
+ region_space_->Unprotect();
LOG(FATAL_WITHOUT_ABORT) << DumpHeapReference(holder, offset, from_ref);
region_space_->DumpNonFreeRegions(LOG_STREAM(FATAL_WITHOUT_ABORT));
heap_->GetVerification()->LogHeapCorruption(holder, offset, from_ref, /* fatal */ true);
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 07abbfc684..7688b546d9 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -1873,17 +1873,20 @@ void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj,
} else if (type == RegionType::kRegionTypeUnevacFromSpace) {
if (!IsMarkedInUnevacFromSpace(ref)) {
LOG(FATAL_WITHOUT_ABORT) << "Found unmarked reference in unevac from-space:";
+ // Remove memory protection from the region space and log debugging information.
+ region_space_->Unprotect();
LOG(FATAL_WITHOUT_ABORT) << DumpHeapReference(obj, offset, ref);
}
CHECK(IsMarkedInUnevacFromSpace(ref)) << ref;
} else {
// Not OK: either a from-space ref or a reference in an unused region.
- // Do extra logging.
if (type == RegionType::kRegionTypeFromSpace) {
LOG(FATAL_WITHOUT_ABORT) << "Found from-space reference:";
} else {
LOG(FATAL_WITHOUT_ABORT) << "Found reference in region with type " << type << ":";
}
+ // Remove memory protection from the region space and log debugging information.
+ region_space_->Unprotect();
LOG(FATAL_WITHOUT_ABORT) << DumpHeapReference(obj, offset, ref);
if (obj != nullptr) {
LogFromSpaceRefHolder(obj, offset);
@@ -1951,17 +1954,20 @@ void ConcurrentCopying::AssertToSpaceInvariant(GcRootSource* gc_root_source,
} else if (type == RegionType::kRegionTypeUnevacFromSpace) {
if (!IsMarkedInUnevacFromSpace(ref)) {
LOG(FATAL_WITHOUT_ABORT) << "Found unmarked reference in unevac from-space:";
+ // Remove memory protection from the region space and log debugging information.
+ region_space_->Unprotect();
LOG(FATAL_WITHOUT_ABORT) << DumpGcRoot(ref);
}
CHECK(IsMarkedInUnevacFromSpace(ref)) << ref;
} else {
// Not OK: either a from-space ref or a reference in an unused region.
- // Do extra logging.
if (type == RegionType::kRegionTypeFromSpace) {
LOG(FATAL_WITHOUT_ABORT) << "Found from-space reference:";
} else {
LOG(FATAL_WITHOUT_ABORT) << "Found reference in region with type " << type << ":";
}
+ // Remove memory protection from the region space and log debugging information.
+ region_space_->Unprotect();
LOG(FATAL_WITHOUT_ABORT) << DumpGcRoot(ref);
if (gc_root_source == nullptr) {
// No info.
@@ -2361,6 +2367,8 @@ mirror::Object* ConcurrentCopying::Copy(Thread* const self,
// from a previous GC that is either inside or outside the allocated region.
mirror::Class* klass = from_ref->GetClass<kVerifyNone, kWithoutReadBarrier>();
if (UNLIKELY(klass == nullptr)) {
+ // Remove memory protection from the region space and log debugging information.
+ region_space_->Unprotect();
heap_->GetVerification()->LogHeapCorruption(holder, offset, from_ref, /* fatal */ true);
}
// There must not be a read barrier to avoid nested RB that might violate the to-space invariant.
@@ -2640,8 +2648,11 @@ mirror::Object* ConcurrentCopying::MarkNonMoving(Thread* const self,
}
}
if (is_los && !IsAligned<kPageSize>(ref)) {
- // Ref is a large object that is not aligned, it must be heap corruption. Dump data before
- // AtomicSetReadBarrierState since it will fault if the address is not valid.
+ // Ref is a large object that is not aligned, it must be heap
+ // corruption. Remove memory protection and dump data before
+ // AtomicSetReadBarrierState since it will fault if the address is not
+ // valid.
+ region_space_->Unprotect();
heap_->GetVerification()->LogHeapCorruption(holder, offset, ref, /* fatal */ true);
}
// Not marked nor on the allocation stack. Try to mark it.
diff --git a/runtime/gc/collector/immune_spaces_test.cc b/runtime/gc/collector/immune_spaces_test.cc
index 9767807fb8..558a4a7fd0 100644
--- a/runtime/gc/collector/immune_spaces_test.cc
+++ b/runtime/gc/collector/immune_spaces_test.cc
@@ -40,22 +40,22 @@ class DummyOatFile : public OatFile {
class DummyImageSpace : public space::ImageSpace {
public:
- DummyImageSpace(MemMap* map,
+ DummyImageSpace(MemMap&& map,
accounting::ContinuousSpaceBitmap* live_bitmap,
std::unique_ptr<DummyOatFile>&& oat_file,
- std::unique_ptr<MemMap>&& oat_map)
+ MemMap&& oat_map)
: ImageSpace("DummyImageSpace",
/*image_location*/"",
- map,
+ std::move(map),
live_bitmap,
- map->End()),
+ map.End()),
oat_map_(std::move(oat_map)) {
oat_file_ = std::move(oat_file);
oat_file_non_owned_ = oat_file_.get();
}
private:
- std::unique_ptr<MemMap> oat_map_;
+ MemMap oat_map_;
};
class ImmuneSpacesTest : public CommonRuntimeTest {
@@ -83,39 +83,39 @@ class ImmuneSpacesTest : public CommonRuntimeTest {
uint8_t* oat_begin,
size_t oat_size) {
std::string error_str;
- std::unique_ptr<MemMap> map(MemMap::MapAnonymous("DummyImageSpace",
- image_begin,
- image_size,
- PROT_READ | PROT_WRITE,
- /*low_4gb*/true,
- /*reuse*/false,
- &error_str));
- if (map == nullptr) {
+ MemMap map = MemMap::MapAnonymous("DummyImageSpace",
+ image_begin,
+ image_size,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb*/true,
+ /*reuse*/false,
+ &error_str);
+ if (!map.IsValid()) {
LOG(ERROR) << error_str;
return nullptr;
}
CHECK(!live_bitmaps_.empty());
std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap(std::move(live_bitmaps_.back()));
live_bitmaps_.pop_back();
- std::unique_ptr<MemMap> oat_map(MemMap::MapAnonymous("OatMap",
- oat_begin,
- oat_size,
- PROT_READ | PROT_WRITE,
- /*low_4gb*/true,
- /*reuse*/false,
- &error_str));
- if (oat_map == nullptr) {
+ MemMap oat_map = MemMap::MapAnonymous("OatMap",
+ oat_begin,
+ oat_size,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb*/true,
+ /*reuse*/false,
+ &error_str);
+ if (!oat_map.IsValid()) {
LOG(ERROR) << error_str;
return nullptr;
}
- std::unique_ptr<DummyOatFile> oat_file(new DummyOatFile(oat_map->Begin(), oat_map->End()));
+ std::unique_ptr<DummyOatFile> oat_file(new DummyOatFile(oat_map.Begin(), oat_map.End()));
// Create image header.
ImageSection sections[ImageHeader::kSectionCount];
- new (map->Begin()) ImageHeader(
- /*image_begin*/PointerToLowMemUInt32(map->Begin()),
- /*image_size*/map->Size(),
+ new (map.Begin()) ImageHeader(
+ /*image_begin*/PointerToLowMemUInt32(map.Begin()),
+ /*image_size*/map.Size(),
sections,
- /*image_roots*/PointerToLowMemUInt32(map->Begin()) + 1,
+ /*image_roots*/PointerToLowMemUInt32(map.Begin()) + 1,
/*oat_checksum*/0u,
// The oat file data in the header is always right after the image space.
/*oat_file_begin*/PointerToLowMemUInt32(oat_begin),
@@ -131,7 +131,7 @@ class ImmuneSpacesTest : public CommonRuntimeTest {
/*is_pic*/false,
ImageHeader::kStorageModeUncompressed,
/*storage_size*/0u);
- return new DummyImageSpace(map.release(),
+ return new DummyImageSpace(std::move(map),
live_bitmap.release(),
std::move(oat_file),
std::move(oat_map));
@@ -141,18 +141,18 @@ class ImmuneSpacesTest : public CommonRuntimeTest {
// returned address.
static uint8_t* GetContinuousMemoryRegion(size_t size) {
std::string error_str;
- std::unique_ptr<MemMap> map(MemMap::MapAnonymous("reserve",
- nullptr,
- size,
- PROT_READ | PROT_WRITE,
- /*low_4gb*/true,
- /*reuse*/false,
- &error_str));
- if (map == nullptr) {
+ MemMap map = MemMap::MapAnonymous("reserve",
+ /* addr */ nullptr,
+ size,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb*/ true,
+ /*reuse*/ false,
+ &error_str);
+ if (!map.IsValid()) {
LOG(ERROR) << "Failed to allocate memory region " << error_str;
return nullptr;
}
- return map->Begin();
+ return map.Begin();
}
private:
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 23359640fe..334c7a02e0 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -103,12 +103,16 @@ MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_pre
is_concurrent_(is_concurrent),
live_stack_freeze_size_(0) {
std::string error_msg;
- MemMap* mem_map = MemMap::MapAnonymous(
- "mark sweep sweep array free buffer", nullptr,
+ sweep_array_free_buffer_mem_map_ = MemMap::MapAnonymous(
+ "mark sweep sweep array free buffer",
+ /* addr */ nullptr,
RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
- PROT_READ | PROT_WRITE, false, false, &error_msg);
- CHECK(mem_map != nullptr) << "Couldn't allocate sweep array free buffer: " << error_msg;
- sweep_array_free_buffer_mem_map_.reset(mem_map);
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ /* reuse */ false,
+ &error_msg);
+ CHECK(sweep_array_free_buffer_mem_map_.IsValid())
+ << "Couldn't allocate sweep array free buffer: " << error_msg;
}
void MarkSweep::InitializePhase() {
@@ -1207,7 +1211,7 @@ void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitma
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Thread* self = Thread::Current();
mirror::Object** chunk_free_buffer = reinterpret_cast<mirror::Object**>(
- sweep_array_free_buffer_mem_map_->BaseBegin());
+ sweep_array_free_buffer_mem_map_.BaseBegin());
size_t chunk_free_pos = 0;
ObjectBytePair freed;
ObjectBytePair freed_los;
@@ -1300,7 +1304,7 @@ void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitma
t2.NewTiming("ResetStack");
allocations->Reset();
}
- sweep_array_free_buffer_mem_map_->MadviseDontNeedAndZero();
+ sweep_array_free_buffer_mem_map_.MadviseDontNeedAndZero();
}
void MarkSweep::Sweep(bool swap_bitmaps) {
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 5e0fe0607f..70e4432ab4 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -351,7 +351,7 @@ class MarkSweep : public GarbageCollector {
// Verification.
size_t live_stack_freeze_size_;
- std::unique_ptr<MemMap> sweep_array_free_buffer_mem_map_;
+ MemMap sweep_array_free_buffer_mem_map_;
private:
class CardScanTask;
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 58becb1d09..4768db8e18 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -318,12 +318,14 @@ Heap::Heap(size_t initial_size,
}
// Load image space(s).
+ std::vector<std::unique_ptr<space::ImageSpace>> boot_image_spaces;
if (space::ImageSpace::LoadBootImage(image_file_name,
image_instruction_set,
- &boot_image_spaces_,
+ &boot_image_spaces,
&requested_alloc_space_begin)) {
- for (auto space : boot_image_spaces_) {
- AddSpace(space);
+ for (std::unique_ptr<space::ImageSpace>& space : boot_image_spaces) {
+ boot_image_spaces_.push_back(space.get());
+ AddSpace(space.release());
}
}
@@ -358,8 +360,8 @@ Heap::Heap(size_t initial_size,
if (foreground_collector_type_ == kCollectorTypeGSS) {
separate_non_moving_space = false;
}
- std::unique_ptr<MemMap> main_mem_map_1;
- std::unique_ptr<MemMap> main_mem_map_2;
+ MemMap main_mem_map_1;
+ MemMap main_mem_map_2;
// Gross hack to make dex2oat deterministic.
if (foreground_collector_type_ == kCollectorTypeMS &&
@@ -374,7 +376,7 @@ Heap::Heap(size_t initial_size,
request_begin += non_moving_space_capacity;
}
std::string error_str;
- std::unique_ptr<MemMap> non_moving_space_mem_map;
+ MemMap non_moving_space_mem_map;
if (separate_non_moving_space) {
ScopedTrace trace2("Create separate non moving space");
// If we are the zygote, the non moving space becomes the zygote space when we run
@@ -383,11 +385,9 @@ Heap::Heap(size_t initial_size,
const char* space_name = is_zygote ? kZygoteSpaceName : kNonMovingSpaceName;
// Reserve the non moving mem map before the other two since it needs to be at a specific
// address.
- non_moving_space_mem_map.reset(MapAnonymousPreferredAddress(space_name,
- requested_alloc_space_begin,
- non_moving_space_capacity,
- &error_str));
- CHECK(non_moving_space_mem_map != nullptr) << error_str;
+ non_moving_space_mem_map = MapAnonymousPreferredAddress(
+ space_name, requested_alloc_space_begin, non_moving_space_capacity, &error_str);
+ CHECK(non_moving_space_mem_map.IsValid()) << error_str;
// Try to reserve virtual memory at a lower address if we have a separate non moving space.
request_begin = kPreferredAllocSpaceBegin + non_moving_space_capacity;
}
@@ -395,27 +395,29 @@ Heap::Heap(size_t initial_size,
if (foreground_collector_type_ != kCollectorTypeCC) {
ScopedTrace trace2("Create main mem map");
if (separate_non_moving_space || !is_zygote) {
- main_mem_map_1.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[0],
- request_begin,
- capacity_,
- &error_str));
+ main_mem_map_1 = MapAnonymousPreferredAddress(
+ kMemMapSpaceName[0], request_begin, capacity_, &error_str);
} else {
// If no separate non-moving space and we are the zygote, the main space must come right
// after the image space to avoid a gap. This is required since we want the zygote space to
// be adjacent to the image space.
- main_mem_map_1.reset(MemMap::MapAnonymous(kMemMapSpaceName[0], request_begin, capacity_,
- PROT_READ | PROT_WRITE, true, false,
- &error_str));
+ main_mem_map_1 = MemMap::MapAnonymous(kMemMapSpaceName[0],
+ request_begin,
+ capacity_,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ true,
+ /* reuse */ false,
+ &error_str);
}
- CHECK(main_mem_map_1.get() != nullptr) << error_str;
+ CHECK(main_mem_map_1.IsValid()) << error_str;
}
if (support_homogeneous_space_compaction ||
background_collector_type_ == kCollectorTypeSS ||
foreground_collector_type_ == kCollectorTypeSS) {
ScopedTrace trace2("Create main mem map 2");
- main_mem_map_2.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[1], main_mem_map_1->End(),
- capacity_, &error_str));
- CHECK(main_mem_map_2.get() != nullptr) << error_str;
+ main_mem_map_2 = MapAnonymousPreferredAddress(
+ kMemMapSpaceName[1], main_mem_map_1.End(), capacity_, &error_str);
+ CHECK(main_mem_map_2.IsValid()) << error_str;
}
// Create the non moving space first so that bitmaps don't take up the address range.
@@ -423,10 +425,14 @@ Heap::Heap(size_t initial_size,
ScopedTrace trace2("Add non moving space");
// Non moving space is always dlmalloc since we currently don't have support for multiple
// active rosalloc spaces.
- const size_t size = non_moving_space_mem_map->Size();
- non_moving_space_ = space::DlMallocSpace::CreateFromMemMap(
- non_moving_space_mem_map.release(), "zygote / non moving space", kDefaultStartingSize,
- initial_size, size, size, false);
+ const size_t size = non_moving_space_mem_map.Size();
+ non_moving_space_ = space::DlMallocSpace::CreateFromMemMap(std::move(non_moving_space_mem_map),
+ "zygote / non moving space",
+ kDefaultStartingSize,
+ initial_size,
+ size,
+ size,
+ /* can_move_objects */ false);
non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
CHECK(non_moving_space_ != nullptr) << "Failed creating non moving space "
<< requested_alloc_space_begin;
@@ -436,11 +442,10 @@ Heap::Heap(size_t initial_size,
if (foreground_collector_type_ == kCollectorTypeCC) {
CHECK(separate_non_moving_space);
// Reserve twice the capacity, to allow evacuating every region for explicit GCs.
- MemMap* region_space_mem_map = space::RegionSpace::CreateMemMap(kRegionSpaceName,
- capacity_ * 2,
- request_begin);
- CHECK(region_space_mem_map != nullptr) << "No region space mem map";
- region_space_ = space::RegionSpace::Create(kRegionSpaceName, region_space_mem_map);
+ MemMap region_space_mem_map =
+ space::RegionSpace::CreateMemMap(kRegionSpaceName, capacity_ * 2, request_begin);
+ CHECK(region_space_mem_map.IsValid()) << "No region space mem map";
+ region_space_ = space::RegionSpace::Create(kRegionSpaceName, std::move(region_space_mem_map));
AddSpace(region_space_);
} else if (IsMovingGc(foreground_collector_type_) &&
foreground_collector_type_ != kCollectorTypeGSS) {
@@ -448,16 +453,16 @@ Heap::Heap(size_t initial_size,
// We only to create the bump pointer if the foreground collector is a compacting GC.
// TODO: Place bump-pointer spaces somewhere to minimize size of card table.
bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 1",
- main_mem_map_1.release());
+ std::move(main_mem_map_1));
CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
AddSpace(bump_pointer_space_);
temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
- main_mem_map_2.release());
+ std::move(main_mem_map_2));
CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space";
AddSpace(temp_space_);
CHECK(separate_non_moving_space);
} else {
- CreateMainMallocSpace(main_mem_map_1.release(), initial_size, growth_limit_, capacity_);
+ CreateMainMallocSpace(std::move(main_mem_map_1), initial_size, growth_limit_, capacity_);
CHECK(main_space_ != nullptr);
AddSpace(main_space_);
if (!separate_non_moving_space) {
@@ -467,19 +472,23 @@ Heap::Heap(size_t initial_size,
if (foreground_collector_type_ == kCollectorTypeGSS) {
CHECK_EQ(foreground_collector_type_, background_collector_type_);
// Create bump pointer spaces instead of a backup space.
- main_mem_map_2.release();
- bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space 1",
- kGSSBumpPointerSpaceCapacity, nullptr);
+ main_mem_map_2.Reset();
+ bump_pointer_space_ = space::BumpPointerSpace::Create(
+ "Bump pointer space 1", kGSSBumpPointerSpaceCapacity, /* requested_begin */ nullptr);
CHECK(bump_pointer_space_ != nullptr);
AddSpace(bump_pointer_space_);
- temp_space_ = space::BumpPointerSpace::Create("Bump pointer space 2",
- kGSSBumpPointerSpaceCapacity, nullptr);
+ temp_space_ = space::BumpPointerSpace::Create(
+ "Bump pointer space 2", kGSSBumpPointerSpaceCapacity, /* requested_begin */ nullptr);
CHECK(temp_space_ != nullptr);
AddSpace(temp_space_);
- } else if (main_mem_map_2.get() != nullptr) {
+ } else if (main_mem_map_2.IsValid()) {
const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
- main_space_backup_.reset(CreateMallocSpaceFromMemMap(main_mem_map_2.release(), initial_size,
- growth_limit_, capacity_, name, true));
+ main_space_backup_.reset(CreateMallocSpaceFromMemMap(std::move(main_mem_map_2),
+ initial_size,
+ growth_limit_,
+ capacity_,
+ name,
+ /* can_move_objects */ true));
CHECK(main_space_backup_.get() != nullptr);
// Add the space so its accounted for in the heap_begin and heap_end.
AddSpace(main_space_backup_.get());
@@ -613,7 +622,7 @@ Heap::Heap(size_t initial_size,
first_space = space;
}
}
- bool no_gap = MemMap::CheckNoGaps(first_space->GetMemMap(), non_moving_space_->GetMemMap());
+ bool no_gap = MemMap::CheckNoGaps(*first_space->GetMemMap(), *non_moving_space_->GetMemMap());
if (!no_gap) {
PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
MemMap::DumpMaps(LOG_STREAM(ERROR), true);
@@ -632,14 +641,19 @@ Heap::Heap(size_t initial_size,
}
}
-MemMap* Heap::MapAnonymousPreferredAddress(const char* name,
- uint8_t* request_begin,
- size_t capacity,
- std::string* out_error_str) {
+MemMap Heap::MapAnonymousPreferredAddress(const char* name,
+ uint8_t* request_begin,
+ size_t capacity,
+ std::string* out_error_str) {
while (true) {
- MemMap* map = MemMap::MapAnonymous(name, request_begin, capacity,
- PROT_READ | PROT_WRITE, true, false, out_error_str);
- if (map != nullptr || request_begin == nullptr) {
+ MemMap map = MemMap::MapAnonymous(name,
+ request_begin,
+ capacity,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb*/ true,
+ /* reuse */ false,
+ out_error_str);
+ if (map.IsValid() || request_begin == nullptr) {
return map;
}
// Retry a second time with no specified request begin.
@@ -651,7 +665,7 @@ bool Heap::MayUseCollector(CollectorType type) const {
return foreground_collector_type_ == type || background_collector_type_ == type;
}
-space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap* mem_map,
+space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap&& mem_map,
size_t initial_size,
size_t growth_limit,
size_t capacity,
@@ -660,12 +674,21 @@ space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap* mem_map,
space::MallocSpace* malloc_space = nullptr;
if (kUseRosAlloc) {
// Create rosalloc space.
- malloc_space = space::RosAllocSpace::CreateFromMemMap(mem_map, name, kDefaultStartingSize,
- initial_size, growth_limit, capacity,
- low_memory_mode_, can_move_objects);
+ malloc_space = space::RosAllocSpace::CreateFromMemMap(std::move(mem_map),
+ name,
+ kDefaultStartingSize,
+ initial_size,
+ growth_limit,
+ capacity,
+ low_memory_mode_,
+ can_move_objects);
} else {
- malloc_space = space::DlMallocSpace::CreateFromMemMap(mem_map, name, kDefaultStartingSize,
- initial_size, growth_limit, capacity,
+ malloc_space = space::DlMallocSpace::CreateFromMemMap(std::move(mem_map),
+ name,
+ kDefaultStartingSize,
+ initial_size,
+ growth_limit,
+ capacity,
can_move_objects);
}
if (collector::SemiSpace::kUseRememberedSet) {
@@ -679,7 +702,9 @@ space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap* mem_map,
return malloc_space;
}
-void Heap::CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t growth_limit,
+void Heap::CreateMainMallocSpace(MemMap&& mem_map,
+ size_t initial_size,
+ size_t growth_limit,
size_t capacity) {
// Is background compaction is enabled?
bool can_move_objects = IsMovingGc(background_collector_type_) !=
@@ -698,7 +723,10 @@ void Heap::CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t gr
RemoveRememberedSet(main_space_);
}
const char* name = kUseRosAlloc ? kRosAllocSpaceName[0] : kDlMallocSpaceName[0];
- main_space_ = CreateMallocSpaceFromMemMap(mem_map, initial_size, growth_limit, capacity, name,
+ main_space_ = CreateMallocSpaceFromMemMap(std::move(mem_map),
+ initial_size,
+ growth_limit,
+ capacity, name,
can_move_objects);
SetSpaceAsDefault(main_space_);
VLOG(heap) << "Created main space " << main_space_;
@@ -2012,17 +2040,17 @@ void Heap::TransitionCollector(CollectorType collector_type) {
if (!IsMovingGc(collector_type_)) {
// Create the bump pointer space from the backup space.
CHECK(main_space_backup_ != nullptr);
- std::unique_ptr<MemMap> mem_map(main_space_backup_->ReleaseMemMap());
+ MemMap mem_map = main_space_backup_->ReleaseMemMap();
// We are transitioning from non moving GC -> moving GC, since we copied from the bump
// pointer space last transition it will be protected.
- CHECK(mem_map != nullptr);
- mem_map->Protect(PROT_READ | PROT_WRITE);
+ CHECK(mem_map.IsValid());
+ mem_map.Protect(PROT_READ | PROT_WRITE);
bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space",
- mem_map.release());
+ std::move(mem_map));
AddSpace(bump_pointer_space_);
collector = Compact(bump_pointer_space_, main_space_, kGcCauseCollectorTransition);
// Use the now empty main space mem map for the bump pointer temp space.
- mem_map.reset(main_space_->ReleaseMemMap());
+ mem_map = main_space_->ReleaseMemMap();
// Unset the pointers just in case.
if (dlmalloc_space_ == main_space_) {
dlmalloc_space_ = nullptr;
@@ -2038,7 +2066,7 @@ void Heap::TransitionCollector(CollectorType collector_type) {
RemoveRememberedSet(main_space_backup_.get());
main_space_backup_.reset(nullptr); // Deletes the space.
temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
- mem_map.release());
+ std::move(mem_map));
AddSpace(temp_space_);
}
break;
@@ -2048,37 +2076,35 @@ void Heap::TransitionCollector(CollectorType collector_type) {
case kCollectorTypeCMS: {
if (IsMovingGc(collector_type_)) {
CHECK(temp_space_ != nullptr);
- std::unique_ptr<MemMap> mem_map(temp_space_->ReleaseMemMap());
+ MemMap mem_map = temp_space_->ReleaseMemMap();
RemoveSpace(temp_space_);
temp_space_ = nullptr;
- mem_map->Protect(PROT_READ | PROT_WRITE);
- CreateMainMallocSpace(mem_map.get(),
+ mem_map.Protect(PROT_READ | PROT_WRITE);
+ CreateMainMallocSpace(std::move(mem_map),
kDefaultInitialSize,
- std::min(mem_map->Size(), growth_limit_),
- mem_map->Size());
- mem_map.release();
+ std::min(mem_map.Size(), growth_limit_),
+ mem_map.Size());
// Compact to the main space from the bump pointer space, don't need to swap semispaces.
AddSpace(main_space_);
collector = Compact(main_space_, bump_pointer_space_, kGcCauseCollectorTransition);
- mem_map.reset(bump_pointer_space_->ReleaseMemMap());
+ mem_map = bump_pointer_space_->ReleaseMemMap();
RemoveSpace(bump_pointer_space_);
bump_pointer_space_ = nullptr;
const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
// Temporarily unprotect the backup mem map so rosalloc can write the debug magic number.
if (kIsDebugBuild && kUseRosAlloc) {
- mem_map->Protect(PROT_READ | PROT_WRITE);
+ mem_map.Protect(PROT_READ | PROT_WRITE);
}
main_space_backup_.reset(CreateMallocSpaceFromMemMap(
- mem_map.get(),
+ std::move(mem_map),
kDefaultInitialSize,
- std::min(mem_map->Size(), growth_limit_),
- mem_map->Size(),
+ std::min(mem_map.Size(), growth_limit_),
+ mem_map.Size(),
name,
true));
if (kIsDebugBuild && kUseRosAlloc) {
- mem_map->Protect(PROT_NONE);
+ main_space_backup_->GetMemMap()->Protect(PROT_NONE);
}
- mem_map.release();
}
break;
}
@@ -2323,11 +2349,13 @@ void Heap::PreZygoteFork() {
if (reset_main_space) {
main_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
madvise(main_space_->Begin(), main_space_->Capacity(), MADV_DONTNEED);
- MemMap* mem_map = main_space_->ReleaseMemMap();
+ MemMap mem_map = main_space_->ReleaseMemMap();
RemoveSpace(main_space_);
space::Space* old_main_space = main_space_;
- CreateMainMallocSpace(mem_map, kDefaultInitialSize, std::min(mem_map->Size(), growth_limit_),
- mem_map->Size());
+ CreateMainMallocSpace(std::move(mem_map),
+ kDefaultInitialSize,
+ std::min(mem_map.Size(), growth_limit_),
+ mem_map.Size());
delete old_main_space;
AddSpace(main_space_);
} else {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 5c34c56e09..0dcf4f5abf 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -835,8 +835,10 @@ class Heap {
void FinishGC(Thread* self, collector::GcType gc_type) REQUIRES(!*gc_complete_lock_);
// Create a mem map with a preferred base address.
- static MemMap* MapAnonymousPreferredAddress(const char* name, uint8_t* request_begin,
- size_t capacity, std::string* out_error_str);
+ static MemMap MapAnonymousPreferredAddress(const char* name,
+ uint8_t* request_begin,
+ size_t capacity,
+ std::string* out_error_str);
bool SupportHSpaceCompaction() const {
// Returns true if we can do hspace compaction
@@ -979,13 +981,13 @@ class Heap {
collector::GarbageCollector* FindCollectorByGcType(collector::GcType gc_type);
// Create the main free list malloc space, either a RosAlloc space or DlMalloc space.
- void CreateMainMallocSpace(MemMap* mem_map,
+ void CreateMainMallocSpace(MemMap&& mem_map,
size_t initial_size,
size_t growth_limit,
size_t capacity);
// Create a malloc space based on a mem map. Does not set the space as default.
- space::MallocSpace* CreateMallocSpaceFromMemMap(MemMap* mem_map,
+ space::MallocSpace* CreateMallocSpaceFromMemMap(MemMap&& mem_map,
size_t initial_size,
size_t growth_limit,
size_t capacity,
diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc
index c6b2120f5b..d35ae38f34 100644
--- a/runtime/gc/heap_test.cc
+++ b/runtime/gc/heap_test.cc
@@ -33,19 +33,19 @@ class HeapTest : public CommonRuntimeTest {
MemMap::Init();
std::string error_msg;
// Reserve the preferred address to force the heap to use another one for testing.
- reserved_.reset(MemMap::MapAnonymous("ReserveMap",
- gc::Heap::kPreferredAllocSpaceBegin,
- 16 * KB,
- PROT_READ,
- /*low_4gb*/ true,
- /*reuse*/ false,
- &error_msg));
- ASSERT_TRUE(reserved_ != nullptr) << error_msg;
+ reserved_ = MemMap::MapAnonymous("ReserveMap",
+ gc::Heap::kPreferredAllocSpaceBegin,
+ 16 * KB,
+ PROT_READ,
+ /*low_4gb*/ true,
+ /*reuse*/ false,
+ &error_msg);
+ ASSERT_TRUE(reserved_.IsValid()) << error_msg;
CommonRuntimeTest::SetUp();
}
private:
- std::unique_ptr<MemMap> reserved_;
+ MemMap reserved_;
};
TEST_F(HeapTest, ClearGrowthLimit) {
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index e95da01d8c..2712ec2a35 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -28,23 +28,31 @@ BumpPointerSpace* BumpPointerSpace::Create(const std::string& name, size_t capac
uint8_t* requested_begin) {
capacity = RoundUp(capacity, kPageSize);
std::string error_msg;
- std::unique_ptr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), requested_begin, capacity,
- PROT_READ | PROT_WRITE, true, false,
- &error_msg));
- if (mem_map.get() == nullptr) {
+ MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
+ requested_begin,
+ capacity,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ true,
+ /* reuse */ false,
+ &error_msg);
+ if (!mem_map.IsValid()) {
LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
<< PrettySize(capacity) << " with message " << error_msg;
return nullptr;
}
- return new BumpPointerSpace(name, mem_map.release());
+ return new BumpPointerSpace(name, std::move(mem_map));
}
-BumpPointerSpace* BumpPointerSpace::CreateFromMemMap(const std::string& name, MemMap* mem_map) {
- return new BumpPointerSpace(name, mem_map);
+BumpPointerSpace* BumpPointerSpace::CreateFromMemMap(const std::string& name, MemMap&& mem_map) {
+ return new BumpPointerSpace(name, std::move(mem_map));
}
BumpPointerSpace::BumpPointerSpace(const std::string& name, uint8_t* begin, uint8_t* limit)
- : ContinuousMemMapAllocSpace(name, nullptr, begin, begin, limit,
+ : ContinuousMemMapAllocSpace(name,
+ MemMap::Invalid(),
+ begin,
+ begin,
+ limit,
kGcRetentionPolicyAlwaysCollect),
growth_end_(limit),
objects_allocated_(0), bytes_allocated_(0),
@@ -53,10 +61,14 @@ BumpPointerSpace::BumpPointerSpace(const std::string& name, uint8_t* begin, uint
num_blocks_(0) {
}
-BumpPointerSpace::BumpPointerSpace(const std::string& name, MemMap* mem_map)
- : ContinuousMemMapAllocSpace(name, mem_map, mem_map->Begin(), mem_map->Begin(), mem_map->End(),
+BumpPointerSpace::BumpPointerSpace(const std::string& name, MemMap&& mem_map)
+ : ContinuousMemMapAllocSpace(name,
+ std::move(mem_map),
+ mem_map.Begin(),
+ mem_map.Begin(),
+ mem_map.End(),
kGcRetentionPolicyAlwaysCollect),
- growth_end_(mem_map->End()),
+ growth_end_(mem_map_.End()),
objects_allocated_(0), bytes_allocated_(0),
block_lock_("Block lock", kBumpPointerSpaceBlockLock),
main_block_size_(0),
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 5ba13ca3ff..9b315584fb 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -47,7 +47,7 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
// guaranteed to be granted, if it is required, the caller should call Begin on the returned
// space to confirm the request was granted.
static BumpPointerSpace* Create(const std::string& name, size_t capacity, uint8_t* requested_begin);
- static BumpPointerSpace* CreateFromMemMap(const std::string& name, MemMap* mem_map);
+ static BumpPointerSpace* CreateFromMemMap(const std::string& name, MemMap&& mem_map);
// Allocate num_bytes, returns null if the space is full.
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
@@ -166,7 +166,7 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
static constexpr size_t kAlignment = 8;
protected:
- BumpPointerSpace(const std::string& name, MemMap* mem_map);
+ BumpPointerSpace(const std::string& name, MemMap&& mem_map);
// Allocate a raw block of bytes.
uint8_t* AllocBlock(size_t bytes) REQUIRES(block_lock_);
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 025c3f0ead..36d2161262 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -38,41 +38,73 @@ namespace space {
static constexpr bool kPrefetchDuringDlMallocFreeList = true;
-DlMallocSpace::DlMallocSpace(MemMap* mem_map, size_t initial_size, const std::string& name,
- void* mspace, uint8_t* begin, uint8_t* end, uint8_t* limit,
- size_t growth_limit, bool can_move_objects, size_t starting_size)
- : MallocSpace(name, mem_map, begin, end, limit, growth_limit, true, can_move_objects,
+DlMallocSpace::DlMallocSpace(MemMap&& mem_map,
+ size_t initial_size,
+ const std::string& name,
+ void* mspace,
+ uint8_t* begin,
+ uint8_t* end,
+ uint8_t* limit,
+ size_t growth_limit,
+ bool can_move_objects,
+ size_t starting_size)
+ : MallocSpace(name,
+ std::move(mem_map),
+ begin,
+ end,
+ limit,
+ growth_limit,
+ /* create_bitmaps */ true,
+ can_move_objects,
starting_size, initial_size),
mspace_(mspace) {
CHECK(mspace != nullptr);
}
-DlMallocSpace* DlMallocSpace::CreateFromMemMap(MemMap* mem_map, const std::string& name,
- size_t starting_size, size_t initial_size,
- size_t growth_limit, size_t capacity,
+DlMallocSpace* DlMallocSpace::CreateFromMemMap(MemMap&& mem_map,
+ const std::string& name,
+ size_t starting_size,
+ size_t initial_size,
+ size_t growth_limit,
+ size_t capacity,
bool can_move_objects) {
- DCHECK(mem_map != nullptr);
- void* mspace = CreateMspace(mem_map->Begin(), starting_size, initial_size);
+ DCHECK(mem_map.IsValid());
+ void* mspace = CreateMspace(mem_map.Begin(), starting_size, initial_size);
if (mspace == nullptr) {
LOG(ERROR) << "Failed to initialize mspace for alloc space (" << name << ")";
return nullptr;
}
// Protect memory beyond the starting size. morecore will add r/w permissions when necessory
- uint8_t* end = mem_map->Begin() + starting_size;
+ uint8_t* end = mem_map.Begin() + starting_size;
if (capacity - starting_size > 0) {
CheckedCall(mprotect, name.c_str(), end, capacity - starting_size, PROT_NONE);
}
// Everything is set so record in immutable structure and leave
- uint8_t* begin = mem_map->Begin();
+ uint8_t* begin = mem_map.Begin();
if (Runtime::Current()->IsRunningOnMemoryTool()) {
return new MemoryToolMallocSpace<DlMallocSpace, kDefaultMemoryToolRedZoneBytes, true, false>(
- mem_map, initial_size, name, mspace, begin, end, begin + capacity, growth_limit,
- can_move_objects, starting_size);
+ std::move(mem_map),
+ initial_size,
+ name,
+ mspace,
+ begin,
+ end,
+ begin + capacity, growth_limit,
+ can_move_objects,
+ starting_size);
} else {
- return new DlMallocSpace(mem_map, initial_size, name, mspace, begin, end, begin + capacity,
- growth_limit, can_move_objects, starting_size);
+ return new DlMallocSpace(std::move(mem_map),
+ initial_size,
+ name,
+ mspace,
+ begin,
+ end,
+ begin + capacity,
+ growth_limit,
+ can_move_objects,
+ starting_size);
}
}
@@ -94,15 +126,20 @@ DlMallocSpace* DlMallocSpace::Create(const std::string& name, size_t initial_siz
// will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
// size of the large allocation) will be greater than the footprint limit.
size_t starting_size = kPageSize;
- MemMap* mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity,
- requested_begin);
- if (mem_map == nullptr) {
+ MemMap mem_map =
+ CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity, requested_begin);
+ if (!mem_map.IsValid()) {
LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size "
<< PrettySize(capacity);
return nullptr;
}
- DlMallocSpace* space = CreateFromMemMap(mem_map, name, starting_size, initial_size,
- growth_limit, capacity, can_move_objects);
+ DlMallocSpace* space = CreateFromMemMap(std::move(mem_map),
+ name,
+ starting_size,
+ initial_size,
+ growth_limit,
+ capacity,
+ can_move_objects);
// We start out with only the initial size possibly containing objects.
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "DlMallocSpace::Create exiting (" << PrettyDuration(NanoTime() - start_time)
@@ -152,17 +189,37 @@ mirror::Object* DlMallocSpace::AllocWithGrowth(Thread* self, size_t num_bytes,
return result;
}
-MallocSpace* DlMallocSpace::CreateInstance(MemMap* mem_map, const std::string& name,
- void* allocator, uint8_t* begin, uint8_t* end,
- uint8_t* limit, size_t growth_limit,
+MallocSpace* DlMallocSpace::CreateInstance(MemMap&& mem_map,
+ const std::string& name,
+ void* allocator,
+ uint8_t* begin,
+ uint8_t* end,
+ uint8_t* limit,
+ size_t growth_limit,
bool can_move_objects) {
if (Runtime::Current()->IsRunningOnMemoryTool()) {
return new MemoryToolMallocSpace<DlMallocSpace, kDefaultMemoryToolRedZoneBytes, true, false>(
- mem_map, initial_size_, name, allocator, begin, end, limit, growth_limit,
- can_move_objects, starting_size_);
+ std::move(mem_map),
+ initial_size_,
+ name,
+ allocator,
+ begin,
+ end,
+ limit,
+ growth_limit,
+ can_move_objects,
+ starting_size_);
} else {
- return new DlMallocSpace(mem_map, initial_size_, name, allocator, begin, end, limit,
- growth_limit, can_move_objects, starting_size_);
+ return new DlMallocSpace(std::move(mem_map),
+ initial_size_,
+ name,
+ allocator,
+ begin,
+ end,
+ limit,
+ growth_limit,
+ can_move_objects,
+ starting_size_);
}
}
@@ -283,7 +340,7 @@ void DlMallocSpace::Clear() {
live_bitmap_->Clear();
mark_bitmap_->Clear();
SetEnd(Begin() + starting_size_);
- mspace_ = CreateMspace(mem_map_->Begin(), starting_size_, initial_size_);
+ mspace_ = CreateMspace(mem_map_.Begin(), starting_size_, initial_size_);
SetFootprintLimit(footprint_limit);
}
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index 4c7fcfdeb9..66537d5dac 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -34,9 +34,12 @@ namespace space {
class DlMallocSpace : public MallocSpace {
public:
// Create a DlMallocSpace from an existing mem_map.
- static DlMallocSpace* CreateFromMemMap(MemMap* mem_map, const std::string& name,
- size_t starting_size, size_t initial_size,
- size_t growth_limit, size_t capacity,
+ static DlMallocSpace* CreateFromMemMap(MemMap&& mem_map,
+ const std::string& name,
+ size_t starting_size,
+ size_t initial_size,
+ size_t growth_limit,
+ size_t capacity,
bool can_move_objects);
// Create a DlMallocSpace with the requested sizes. The requested
@@ -118,9 +121,14 @@ class DlMallocSpace : public MallocSpace {
// allocations fail we GC before increasing the footprint limit and allowing the mspace to grow.
void SetFootprintLimit(size_t limit) OVERRIDE;
- MallocSpace* CreateInstance(MemMap* mem_map, const std::string& name, void* allocator,
- uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit,
- bool can_move_objects);
+ MallocSpace* CreateInstance(MemMap&& mem_map,
+ const std::string& name,
+ void* allocator,
+ uint8_t* begin,
+ uint8_t* end,
+ uint8_t* limit,
+ size_t growth_limit,
+ bool can_move_objects) OVERRIDE;
uint64_t GetBytesAllocated() OVERRIDE;
uint64_t GetObjectsAllocated() OVERRIDE;
@@ -139,9 +147,16 @@ class DlMallocSpace : public MallocSpace {
REQUIRES_SHARED(Locks::mutator_lock_);
protected:
- DlMallocSpace(MemMap* mem_map, size_t initial_size, const std::string& name, void* mspace,
- uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit,
- bool can_move_objects, size_t starting_size);
+ DlMallocSpace(MemMap&& mem_map,
+ size_t initial_size,
+ const std::string& name,
+ void* mspace,
+ uint8_t* begin,
+ uint8_t* end,
+ uint8_t* limit,
+ size_t growth_limit,
+ bool can_move_objects,
+ size_t starting_size);
private:
mirror::Object* AllocWithoutGrowthLocked(Thread* self, size_t num_bytes, size_t* bytes_allocated,
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 826f382f72..2a4803ab14 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -62,12 +62,12 @@ Atomic<uint32_t> ImageSpace::bitmap_index_(0);
ImageSpace::ImageSpace(const std::string& image_filename,
const char* image_location,
- MemMap* mem_map,
+ MemMap&& mem_map,
accounting::ContinuousSpaceBitmap* live_bitmap,
uint8_t* end)
: MemMapSpace(image_filename,
- mem_map,
- mem_map->Begin(),
+ std::move(mem_map),
+ mem_map.Begin(),
end,
end,
kGcRetentionPolicyNeverCollect),
@@ -181,18 +181,19 @@ static bool FindImageFilenameImpl(const char* image_location,
bool have_android_data = false;
*dalvik_cache_exists = false;
GetDalvikCache(GetInstructionSetString(image_isa),
- true,
+ /* create_if_absent */ true,
dalvik_cache,
&have_android_data,
dalvik_cache_exists,
is_global_cache);
- if (have_android_data && *dalvik_cache_exists) {
+ if (*dalvik_cache_exists) {
+ DCHECK(have_android_data);
// Always set output location even if it does not exist,
// so that the caller knows where to create the image.
//
// image_location = /system/framework/boot.art
- // *image_filename = /data/dalvik-cache/<image_isa>/boot.art
+ // *image_filename = /data/dalvik-cache/<image_isa>/system@framework@boot.art
std::string error_msg;
if (!GetDalvikCacheFilename(image_location,
dalvik_cache->c_str(),
@@ -381,33 +382,6 @@ ImageHeader* ImageSpace::ReadImageHeader(const char* image_location,
return nullptr;
}
-static bool ChecksumsMatch(const char* image_a, const char* image_b, std::string* error_msg) {
- DCHECK(error_msg != nullptr);
-
- ImageHeader hdr_a;
- ImageHeader hdr_b;
-
- if (!ReadSpecificImageHeader(image_a, &hdr_a)) {
- *error_msg = StringPrintf("Cannot read header of %s", image_a);
- return false;
- }
- if (!ReadSpecificImageHeader(image_b, &hdr_b)) {
- *error_msg = StringPrintf("Cannot read header of %s", image_b);
- return false;
- }
-
- if (hdr_a.GetOatChecksum() != hdr_b.GetOatChecksum()) {
- *error_msg = StringPrintf("Checksum mismatch: %u(%s) vs %u(%s)",
- hdr_a.GetOatChecksum(),
- image_a,
- hdr_b.GetOatChecksum(),
- image_b);
- return false;
- }
-
- return true;
-}
-
static bool CanWriteToDalvikCache(const InstructionSet isa) {
const std::string dalvik_cache = GetDalvikCache(GetInstructionSetString(isa));
if (access(dalvik_cache.c_str(), O_RDWR) == 0) {
@@ -507,9 +481,9 @@ std::ostream& operator<<(std::ostream& os, const RelocationRange& reloc) {
// Helper class encapsulating loading, so we can access private ImageSpace members (this is a
// friend class), but not declare functions in the header.
-class ImageSpaceLoader {
+class ImageSpace::Loader {
public:
- static std::unique_ptr<ImageSpace> Load(const char* image_location,
+ static std::unique_ptr<ImageSpace> Load(const std::string& image_location,
const std::string& image_filename,
bool is_zygote,
bool is_global_cache,
@@ -541,7 +515,7 @@ class ImageSpaceLoader {
// Since we are the boot image, pass null since we load the oat file from the boot image oat
// file name.
return Init(image_filename.c_str(),
- image_location,
+ image_location.c_str(),
validate_oat_file,
/* oat_file */nullptr,
error_msg);
@@ -636,53 +610,53 @@ class ImageSpaceLoader {
return nullptr;
}
- std::unique_ptr<MemMap> map;
+ MemMap map;
// GetImageBegin is the preferred address to map the image. If we manage to map the
// image at the image begin, the amount of fixup work required is minimized.
// If it is pic we will retry with error_msg for the failure case. Pass a null error_msg to
// avoid reading proc maps for a mapping failure and slowing everything down.
- map.reset(LoadImageFile(image_filename,
- image_location,
- *image_header,
- image_header->GetImageBegin(),
- file->Fd(),
- logger,
- image_header->IsPic() ? nullptr : error_msg));
+ map = LoadImageFile(image_filename,
+ image_location,
+ *image_header,
+ image_header->GetImageBegin(),
+ file->Fd(),
+ logger,
+ image_header->IsPic() ? nullptr : error_msg);
// If the header specifies PIC mode, we can also map at a random low_4gb address since we can
// relocate in-place.
- if (map == nullptr && image_header->IsPic()) {
- map.reset(LoadImageFile(image_filename,
- image_location,
- *image_header,
- /* address */ nullptr,
- file->Fd(),
- logger,
- error_msg));
+ if (!map.IsValid() && image_header->IsPic()) {
+ map = LoadImageFile(image_filename,
+ image_location,
+ *image_header,
+ /* address */ nullptr,
+ file->Fd(),
+ logger,
+ error_msg);
}
// Were we able to load something and continue?
- if (map == nullptr) {
+ if (!map.IsValid()) {
DCHECK(!error_msg->empty());
return nullptr;
}
- DCHECK_EQ(0, memcmp(image_header, map->Begin(), sizeof(ImageHeader)));
-
- std::unique_ptr<MemMap> image_bitmap_map(MemMap::MapFileAtAddress(nullptr,
- bitmap_section.Size(),
- PROT_READ, MAP_PRIVATE,
- file->Fd(),
- image_bitmap_offset,
- /*low_4gb*/false,
- /*reuse*/false,
- image_filename,
- error_msg));
- if (image_bitmap_map == nullptr) {
+ DCHECK_EQ(0, memcmp(image_header, map.Begin(), sizeof(ImageHeader)));
+
+ MemMap image_bitmap_map = MemMap::MapFileAtAddress(nullptr,
+ bitmap_section.Size(),
+ PROT_READ, MAP_PRIVATE,
+ file->Fd(),
+ image_bitmap_offset,
+ /*low_4gb*/false,
+ /*reuse*/false,
+ image_filename,
+ error_msg);
+ if (!image_bitmap_map.IsValid()) {
*error_msg = StringPrintf("Failed to map image bitmap: %s", error_msg->c_str());
return nullptr;
}
// Loaded the map, use the image header from the file now in case we patch it with
// RelocateInPlace.
- image_header = reinterpret_cast<ImageHeader*>(map->Begin());
+ image_header = reinterpret_cast<ImageHeader*>(map.Begin());
const uint32_t bitmap_index = ImageSpace::bitmap_index_.fetch_add(1, std::memory_order_seq_cst);
std::string bitmap_name(StringPrintf("imagespace %s live-bitmap %u",
image_filename,
@@ -690,15 +664,15 @@ class ImageSpaceLoader {
// Bitmap only needs to cover until the end of the mirror objects section.
const ImageSection& image_objects = image_header->GetObjectsSection();
// We only want the mirror object, not the ArtFields and ArtMethods.
- uint8_t* const image_end = map->Begin() + image_objects.End();
+ uint8_t* const image_end = map.Begin() + image_objects.End();
std::unique_ptr<accounting::ContinuousSpaceBitmap> bitmap;
{
TimingLogger::ScopedTiming timing("CreateImageBitmap", &logger);
bitmap.reset(
accounting::ContinuousSpaceBitmap::CreateFromMemMap(
bitmap_name,
- image_bitmap_map.release(),
- reinterpret_cast<uint8_t*>(map->Begin()),
+ std::move(image_bitmap_map),
+ reinterpret_cast<uint8_t*>(map.Begin()),
// Make sure the bitmap is aligned to card size instead of just bitmap word size.
RoundUp(image_objects.End(), gc::accounting::CardTable::kCardSize)));
if (bitmap == nullptr) {
@@ -709,7 +683,7 @@ class ImageSpaceLoader {
{
TimingLogger::ScopedTiming timing("RelocateImage", &logger);
if (!RelocateInPlace(*image_header,
- map->Begin(),
+ map.Begin(),
bitmap.get(),
oat_file,
error_msg)) {
@@ -719,7 +693,7 @@ class ImageSpaceLoader {
// We only want the mirror object, not the ArtFields and ArtMethods.
std::unique_ptr<ImageSpace> space(new ImageSpace(image_filename,
image_location,
- map.release(),
+ std::move(map),
bitmap.release(),
image_end));
@@ -807,13 +781,13 @@ class ImageSpaceLoader {
}
private:
- static MemMap* LoadImageFile(const char* image_filename,
- const char* image_location,
- const ImageHeader& image_header,
- uint8_t* address,
- int fd,
- TimingLogger& logger,
- std::string* error_msg) {
+ static MemMap LoadImageFile(const char* image_filename,
+ const char* image_location,
+ const ImageHeader& image_header,
+ uint8_t* address,
+ int fd,
+ TimingLogger& logger,
+ std::string* error_msg) {
TimingLogger::ScopedTiming timing("MapImageFile", &logger);
const ImageHeader::StorageMode storage_mode = image_header.GetStorageMode();
if (storage_mode == ImageHeader::kStorageModeUncompressed) {
@@ -835,45 +809,45 @@ class ImageSpaceLoader {
*error_msg = StringPrintf("Invalid storage mode in image header %d",
static_cast<int>(storage_mode));
}
- return nullptr;
+ return MemMap::Invalid();
}
// Reserve output and decompress into it.
- std::unique_ptr<MemMap> map(MemMap::MapAnonymous(image_location,
- address,
- image_header.GetImageSize(),
- PROT_READ | PROT_WRITE,
- /*low_4gb*/true,
- /*reuse*/false,
- error_msg));
- if (map != nullptr) {
+ MemMap map = MemMap::MapAnonymous(image_location,
+ address,
+ image_header.GetImageSize(),
+ PROT_READ | PROT_WRITE,
+ /*low_4gb*/ true,
+ /*reuse*/ false,
+ error_msg);
+ if (map.IsValid()) {
const size_t stored_size = image_header.GetDataSize();
const size_t decompress_offset = sizeof(ImageHeader); // Skip the header.
- std::unique_ptr<MemMap> temp_map(MemMap::MapFile(sizeof(ImageHeader) + stored_size,
- PROT_READ,
- MAP_PRIVATE,
- fd,
- /*offset*/0,
- /*low_4gb*/false,
- image_filename,
- error_msg));
- if (temp_map == nullptr) {
+ MemMap temp_map = MemMap::MapFile(sizeof(ImageHeader) + stored_size,
+ PROT_READ,
+ MAP_PRIVATE,
+ fd,
+ /*offset*/0,
+ /*low_4gb*/false,
+ image_filename,
+ error_msg);
+ if (!temp_map.IsValid()) {
DCHECK(error_msg == nullptr || !error_msg->empty());
- return nullptr;
+ return MemMap::Invalid();
}
- memcpy(map->Begin(), &image_header, sizeof(ImageHeader));
+ memcpy(map.Begin(), &image_header, sizeof(ImageHeader));
const uint64_t start = NanoTime();
// LZ4HC and LZ4 have same internal format, both use LZ4_decompress.
TimingLogger::ScopedTiming timing2("LZ4 decompress image", &logger);
const size_t decompressed_size = LZ4_decompress_safe(
- reinterpret_cast<char*>(temp_map->Begin()) + sizeof(ImageHeader),
- reinterpret_cast<char*>(map->Begin()) + decompress_offset,
+ reinterpret_cast<char*>(temp_map.Begin()) + sizeof(ImageHeader),
+ reinterpret_cast<char*>(map.Begin()) + decompress_offset,
stored_size,
- map->Size() - decompress_offset);
+ map.Size() - decompress_offset);
const uint64_t time = NanoTime() - start;
// Add one 1 ns to prevent possible divide by 0.
VLOG(image) << "Decompressing image took " << PrettyDuration(time) << " ("
- << PrettySize(static_cast<uint64_t>(map->Size()) * MsToNs(1000) / (time + 1))
+ << PrettySize(static_cast<uint64_t>(map.Size()) * MsToNs(1000) / (time + 1))
<< "/s)";
if (decompressed_size + sizeof(ImageHeader) != image_header.GetImageSize()) {
if (error_msg != nullptr) {
@@ -882,11 +856,11 @@ class ImageSpaceLoader {
decompressed_size + sizeof(ImageHeader),
image_header.GetImageSize());
}
- return nullptr;
+ return MemMap::Invalid();
}
}
- return map.release();
+ return map;
}
class FixupVisitor : public ValueObject {
@@ -1471,6 +1445,181 @@ class ImageSpaceLoader {
}
};
+class ImageSpace::BootImageLoader {
+ public:
+ BootImageLoader(const std::string& image_location, InstructionSet image_isa)
+ : image_location_(image_location),
+ image_isa_(image_isa),
+ is_zygote_(Runtime::Current()->IsZygote()),
+ has_system_(false),
+ has_cache_(false),
+ is_global_cache_(true),
+ dalvik_cache_exists_(false),
+ dalvik_cache_(),
+ cache_filename_() {
+ }
+
+ bool IsZygote() const { return is_zygote_; }
+
+ void FindImageFiles() {
+ std::string system_filename;
+ bool found_image = FindImageFilenameImpl(image_location_.c_str(),
+ image_isa_,
+ &has_system_,
+ &system_filename,
+ &dalvik_cache_exists_,
+ &dalvik_cache_,
+ &is_global_cache_,
+ &has_cache_,
+ &cache_filename_);
+ DCHECK(!dalvik_cache_exists_ || !dalvik_cache_.empty());
+ DCHECK_EQ(found_image, has_system_ || has_cache_);
+ }
+
+ bool HasSystem() const { return has_system_; }
+ bool HasCache() const { return has_cache_; }
+
+ bool DalvikCacheExists() const { return dalvik_cache_exists_; }
+ bool IsGlobalCache() const { return is_global_cache_; }
+
+ const std::string& GetDalvikCache() const {
+ return dalvik_cache_;
+ }
+
+ const std::string& GetCacheFilename() const {
+ return cache_filename_;
+ }
+
+ bool LoadFromSystem(/*out*/ std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
+ /*out*/ uint8_t** oat_file_end,
+ /*out*/ std::string* error_msg) REQUIRES_SHARED(Locks::mutator_lock_) {
+ std::string filename = GetSystemImageFilename(image_location_.c_str(), image_isa_);
+ std::vector<std::string> locations;
+ if (!GetBootClassPathImageLocations(image_location_, filename, &locations, error_msg)) {
+ return false;
+ }
+ std::vector<std::unique_ptr<ImageSpace>> spaces;
+ spaces.reserve(locations.size());
+ for (const std::string& location : locations) {
+ filename = GetSystemImageFilename(location.c_str(), image_isa_);
+ spaces.push_back(Loader::Load(location,
+ filename,
+ is_zygote_,
+ is_global_cache_,
+ /* validate_oat_file */ false,
+ error_msg));
+ if (spaces.back() == nullptr) {
+ return false;
+ }
+ }
+ *oat_file_end = GetOatFileEnd(spaces);
+ boot_image_spaces->swap(spaces);
+ return true;
+ }
+
+ bool LoadFromDalvikCache(
+ bool validate_system_checksums,
+ bool validate_oat_file,
+ /*out*/ std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
+ /*out*/ uint8_t** oat_file_end,
+ /*out*/ std::string* error_msg) REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(DalvikCacheExists());
+ std::vector<std::string> locations;
+ if (!GetBootClassPathImageLocations(image_location_, cache_filename_, &locations, error_msg)) {
+ return false;
+ }
+ std::vector<std::unique_ptr<ImageSpace>> spaces;
+ spaces.reserve(locations.size());
+ for (const std::string& location : locations) {
+ std::string filename;
+ if (!GetDalvikCacheFilename(location.c_str(), dalvik_cache_.c_str(), &filename, error_msg)) {
+ return false;
+ }
+ spaces.push_back(Loader::Load(location,
+ filename,
+ is_zygote_,
+ is_global_cache_,
+ validate_oat_file,
+ error_msg));
+ if (spaces.back() == nullptr) {
+ return false;
+ }
+ if (validate_system_checksums) {
+ ImageHeader system_hdr;
+ std::string system_filename = GetSystemImageFilename(location.c_str(), image_isa_);
+ if (!ReadSpecificImageHeader(system_filename.c_str(), &system_hdr)) {
+ *error_msg = StringPrintf("Cannot read header of %s", system_filename.c_str());
+ return false;
+ }
+ if (spaces.back()->GetImageHeader().GetOatChecksum() != system_hdr.GetOatChecksum()) {
+ *error_msg = StringPrintf("Checksum mismatch: %u(%s) vs %u(%s)",
+ spaces.back()->GetImageHeader().GetOatChecksum(),
+ filename.c_str(),
+ system_hdr.GetOatChecksum(),
+ system_filename.c_str());
+ return false;
+ }
+ }
+ }
+ *oat_file_end = GetOatFileEnd(spaces);
+ boot_image_spaces->swap(spaces);
+ return true;
+ }
+
+ private:
+ // Extract boot class path from oat file associated with `image_filename`
+ // and list all associated image locations.
+ static bool GetBootClassPathImageLocations(const std::string& image_location,
+ const std::string& image_filename,
+ /*out*/ std::vector<std::string>* all_locations,
+ /*out*/ std::string* error_msg) {
+ std::string oat_filename = ImageHeader::GetOatLocationFromImageLocation(image_filename);
+ std::unique_ptr<OatFile> oat_file(OatFile::Open(/* zip_fd */ -1,
+ oat_filename,
+ oat_filename,
+ /* requested_base */ nullptr,
+ /* oat_file_begin */ nullptr,
+ /* executable */ false,
+ /* low_4gb */ false,
+ /* abs_dex_location */ nullptr,
+ error_msg));
+ if (oat_file == nullptr) {
+ *error_msg = StringPrintf("Failed to open oat file '%s' for image file %s: %s",
+ oat_filename.c_str(),
+ image_filename.c_str(),
+ error_msg->c_str());
+ return false;
+ }
+ const OatHeader& oat_header = oat_file->GetOatHeader();
+ const char* boot_classpath = oat_header.GetStoreValueByKey(OatHeader::kBootClassPathKey);
+ all_locations->push_back(image_location);
+ if (boot_classpath != nullptr && boot_classpath[0] != 0) {
+ ExtractMultiImageLocations(image_location, boot_classpath, all_locations);
+ }
+ return true;
+ }
+
+ uint8_t* GetOatFileEnd(const std::vector<std::unique_ptr<ImageSpace>>& spaces) {
+ DCHECK(std::is_sorted(
+ spaces.begin(),
+ spaces.end(),
+ [](const std::unique_ptr<ImageSpace>& lhs, const std::unique_ptr<ImageSpace>& rhs) {
+ return lhs->GetOatFileEnd() < rhs->GetOatFileEnd();
+ }));
+ return AlignUp(spaces.back()->GetOatFileEnd(), kPageSize);
+ }
+
+ const std::string& image_location_;
+ InstructionSet image_isa_;
+ bool is_zygote_;
+ bool has_system_;
+ bool has_cache_;
+ bool is_global_cache_;
+ bool dalvik_cache_exists_;
+ std::string dalvik_cache_;
+ std::string cache_filename_;
+};
+
static constexpr uint64_t kLowSpaceValue = 50 * MB;
static constexpr uint64_t kTmpFsSentinelValue = 384 * MB;
@@ -1506,70 +1655,56 @@ static bool CheckSpace(const std::string& cache_filename, std::string* error_msg
return true;
}
-std::unique_ptr<ImageSpace> ImageSpace::CreateBootImage(const char* image_location,
- const InstructionSet image_isa,
- bool secondary_image,
- std::string* error_msg) {
+bool ImageSpace::LoadBootImage(
+ const std::string& image_location,
+ const InstructionSet image_isa,
+ /*out*/ std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
+ /*out*/ uint8_t** oat_file_end) {
ScopedTrace trace(__FUNCTION__);
+ DCHECK(boot_image_spaces != nullptr);
+ DCHECK(boot_image_spaces->empty());
+ DCHECK(oat_file_end != nullptr);
+ DCHECK_NE(image_isa, InstructionSet::kNone);
+
+ if (image_location.empty()) {
+ return false;
+ }
+
+ BootImageLoader loader(image_location, image_isa);
+
// Step 0: Extra zygote work.
// Step 0.a: If we're the zygote, mark boot.
- const bool is_zygote = Runtime::Current()->IsZygote();
- if (is_zygote && !secondary_image && CanWriteToDalvikCache(image_isa)) {
+ if (loader.IsZygote() && CanWriteToDalvikCache(image_isa)) {
MarkZygoteStart(image_isa, Runtime::Current()->GetZygoteMaxFailedBoots());
}
+ loader.FindImageFiles();
+
// Step 0.b: If we're the zygote, check for free space, and prune the cache preemptively,
// if necessary. While the runtime may be fine (it is pretty tolerant to
// out-of-disk-space situations), other parts of the platform are not.
//
// The advantage of doing this proactively is that the later steps are simplified,
// i.e., we do not need to code retries.
- std::string system_filename;
- bool has_system = false;
- std::string cache_filename;
- bool has_cache = false;
- bool dalvik_cache_exists = false;
- bool is_global_cache = true;
- std::string dalvik_cache;
- bool found_image = FindImageFilenameImpl(image_location,
- image_isa,
- &has_system,
- &system_filename,
- &dalvik_cache_exists,
- &dalvik_cache,
- &is_global_cache,
- &has_cache,
- &cache_filename);
-
bool dex2oat_enabled = Runtime::Current()->IsImageDex2OatEnabled();
- if (is_zygote && dalvik_cache_exists && !secondary_image) {
+ if (loader.IsZygote() && loader.DalvikCacheExists()) {
// Extra checks for the zygote. These only apply when loading the first image, explained below.
+ const std::string& dalvik_cache = loader.GetDalvikCache();
DCHECK(!dalvik_cache.empty());
std::string local_error_msg;
// All secondary images are verified when the primary image is verified.
- bool verified = VerifyImage(image_location, dalvik_cache.c_str(), image_isa, &local_error_msg);
- // If we prune for space at a secondary image, we may end up in a crash loop with the _exit
- // path.
+ bool verified =
+ VerifyImage(image_location.c_str(), dalvik_cache.c_str(), image_isa, &local_error_msg);
bool check_space = CheckSpace(dalvik_cache, &local_error_msg);
if (!verified || !check_space) {
- // Note: it is important to only prune for space on the primary image, or we will hit the
- // restart path.
LOG(WARNING) << local_error_msg << " Preemptively pruning the dalvik cache.";
PruneDalvikCache(image_isa);
// Re-evaluate the image.
- found_image = FindImageFilenameImpl(image_location,
- image_isa,
- &has_system,
- &system_filename,
- &dalvik_cache_exists,
- &dalvik_cache,
- &is_global_cache,
- &has_cache,
- &cache_filename);
+ loader.FindImageFiles();
}
if (!check_space) {
// Disable compilation/patching - we do not want to fill up the space again.
@@ -1580,39 +1715,16 @@ std::unique_ptr<ImageSpace> ImageSpace::CreateBootImage(const char* image_locati
// Collect all the errors.
std::vector<std::string> error_msgs;
- // Step 1: Check if we have an existing and relocated image.
-
- // Step 1.a: Have files in system and cache. Then they need to match.
- if (found_image && has_system && has_cache) {
- std::string local_error_msg;
- // Check that the files are matching.
- if (ChecksumsMatch(system_filename.c_str(), cache_filename.c_str(), &local_error_msg)) {
- std::unique_ptr<ImageSpace> relocated_space =
- ImageSpaceLoader::Load(image_location,
- cache_filename,
- is_zygote,
- is_global_cache,
- /* validate_oat_file */ false,
- &local_error_msg);
- if (relocated_space != nullptr) {
- return relocated_space;
- }
- }
- error_msgs.push_back(local_error_msg);
- }
-
- // Step 1.b: Only have a cache file.
- if (found_image && !has_system && has_cache) {
+ // Step 1: Check if we have an existing image in the dalvik cache.
+ if (loader.HasCache()) {
std::string local_error_msg;
- std::unique_ptr<ImageSpace> cache_space =
- ImageSpaceLoader::Load(image_location,
- cache_filename,
- is_zygote,
- is_global_cache,
- /* validate_oat_file */ true,
- &local_error_msg);
- if (cache_space != nullptr) {
- return cache_space;
+ // If we have system image, validate system image checksums, otherwise validate the oat file.
+ if (loader.LoadFromDalvikCache(/* validate_system_checksums */ loader.HasSystem(),
+ /* validate_oat_file */ !loader.HasSystem(),
+ boot_image_spaces,
+ oat_file_end,
+ &local_error_msg)) {
+ return true;
}
error_msgs.push_back(local_error_msg);
}
@@ -1622,83 +1734,64 @@ std::unique_ptr<ImageSpace> ImageSpace::CreateBootImage(const char* image_locati
// Step 2.a: We are not required to relocate it. Then we can use it directly.
bool relocate = Runtime::Current()->ShouldRelocate();
- if (found_image && has_system && !relocate) {
+ if (loader.HasSystem() && !relocate) {
std::string local_error_msg;
- std::unique_ptr<ImageSpace> system_space =
- ImageSpaceLoader::Load(image_location,
- system_filename,
- is_zygote,
- is_global_cache,
- /* validate_oat_file */ false,
- &local_error_msg);
- if (system_space != nullptr) {
- return system_space;
+ if (loader.LoadFromSystem(boot_image_spaces, oat_file_end, &local_error_msg)) {
+ return true;
}
error_msgs.push_back(local_error_msg);
}
- // Step 2.b: We require a relocated image. Then we must patch it. This step fails if this is a
- // secondary image.
- if (found_image && has_system && relocate) {
+ // Step 2.b: We require a relocated image. Then we must patch it.
+ if (loader.HasSystem() && relocate) {
std::string local_error_msg;
if (!dex2oat_enabled) {
local_error_msg = "Patching disabled.";
- } else if (secondary_image) {
- // We really want a working image. Prune and restart.
- PruneDalvikCache(image_isa);
- _exit(1);
- } else if (ImageCreationAllowed(is_global_cache, image_isa, &local_error_msg)) {
- bool patch_success =
- RelocateImage(image_location, dalvik_cache.c_str(), image_isa, &local_error_msg);
+ } else if (ImageCreationAllowed(loader.IsGlobalCache(), image_isa, &local_error_msg)) {
+ bool patch_success = RelocateImage(
+ image_location.c_str(), loader.GetDalvikCache().c_str(), image_isa, &local_error_msg);
if (patch_success) {
- std::unique_ptr<ImageSpace> patched_space =
- ImageSpaceLoader::Load(image_location,
- cache_filename,
- is_zygote,
- is_global_cache,
- /* validate_oat_file */ false,
- &local_error_msg);
- if (patched_space != nullptr) {
- return patched_space;
+ if (loader.LoadFromDalvikCache(/* validate_system_checksums */ false,
+ /* validate_oat_file */ false,
+ boot_image_spaces,
+ oat_file_end,
+ &local_error_msg)) {
+ return true;
}
}
}
error_msgs.push_back(StringPrintf("Cannot relocate image %s to %s: %s",
- image_location,
- cache_filename.c_str(),
+ image_location.c_str(),
+ loader.GetCacheFilename().c_str(),
local_error_msg.c_str()));
}
- // Step 3: We do not have an existing image in /system, so generate an image into the dalvik
- // cache. This step fails if this is a secondary image.
- if (!has_system) {
+ // Step 3: We do not have an existing image in /system,
+ // so generate an image into the dalvik cache.
+ if (!loader.HasSystem() && loader.DalvikCacheExists()) {
std::string local_error_msg;
if (!dex2oat_enabled) {
local_error_msg = "Image compilation disabled.";
- } else if (secondary_image) {
- local_error_msg = "Cannot compile a secondary image.";
- } else if (ImageCreationAllowed(is_global_cache, image_isa, &local_error_msg)) {
- bool compilation_success = GenerateImage(cache_filename, image_isa, &local_error_msg);
+ } else if (ImageCreationAllowed(loader.IsGlobalCache(), image_isa, &local_error_msg)) {
+ bool compilation_success =
+ GenerateImage(loader.GetCacheFilename(), image_isa, &local_error_msg);
if (compilation_success) {
- std::unique_ptr<ImageSpace> compiled_space =
- ImageSpaceLoader::Load(image_location,
- cache_filename,
- is_zygote,
- is_global_cache,
- /* validate_oat_file */ false,
- &local_error_msg);
- if (compiled_space != nullptr) {
- return compiled_space;
+ if (loader.LoadFromDalvikCache(/* validate_system_checksums */ false,
+ /* validate_oat_file */ false,
+ boot_image_spaces,
+ oat_file_end,
+ &local_error_msg)) {
+ return true;
}
}
}
error_msgs.push_back(StringPrintf("Cannot compile image to %s: %s",
- cache_filename.c_str(),
+ loader.GetCacheFilename().c_str(),
local_error_msg.c_str()));
}
- // We failed. Prune the cache the free up space, create a compound error message and return no
- // image.
+ // We failed. Prune the cache the free up space, create a compound error message
+ // and return false.
PruneDalvikCache(image_isa);
std::ostringstream oss;
@@ -1709,84 +1802,11 @@ std::unique_ptr<ImageSpace> ImageSpace::CreateBootImage(const char* image_locati
}
oss << msg;
}
- *error_msg = oss.str();
-
- return nullptr;
-}
-
-bool ImageSpace::LoadBootImage(const std::string& image_file_name,
- const InstructionSet image_instruction_set,
- std::vector<space::ImageSpace*>* boot_image_spaces,
- uint8_t** oat_file_end) {
- DCHECK(boot_image_spaces != nullptr);
- DCHECK(boot_image_spaces->empty());
- DCHECK(oat_file_end != nullptr);
- DCHECK_NE(image_instruction_set, InstructionSet::kNone);
-
- if (image_file_name.empty()) {
- return false;
- }
-
- // For code reuse, handle this like a work queue.
- std::vector<std::string> image_file_names;
- image_file_names.push_back(image_file_name);
-
- bool error = false;
- uint8_t* oat_file_end_tmp = *oat_file_end;
-
- for (size_t index = 0; index < image_file_names.size(); ++index) {
- std::string& image_name = image_file_names[index];
- std::string error_msg;
- std::unique_ptr<space::ImageSpace> boot_image_space_uptr = CreateBootImage(
- image_name.c_str(),
- image_instruction_set,
- index > 0,
- &error_msg);
- if (boot_image_space_uptr != nullptr) {
- space::ImageSpace* boot_image_space = boot_image_space_uptr.release();
- boot_image_spaces->push_back(boot_image_space);
- // Oat files referenced by image files immediately follow them in memory, ensure alloc space
- // isn't going to get in the middle
- uint8_t* oat_file_end_addr = boot_image_space->GetImageHeader().GetOatFileEnd();
- CHECK_GT(oat_file_end_addr, boot_image_space->End());
- oat_file_end_tmp = AlignUp(oat_file_end_addr, kPageSize);
-
- if (index == 0) {
- // If this was the first space, check whether there are more images to load.
- const OatFile* boot_oat_file = boot_image_space->GetOatFile();
- if (boot_oat_file == nullptr) {
- continue;
- }
-
- const OatHeader& boot_oat_header = boot_oat_file->GetOatHeader();
- const char* boot_classpath =
- boot_oat_header.GetStoreValueByKey(OatHeader::kBootClassPathKey);
- if (boot_classpath == nullptr) {
- continue;
- }
-
- ExtractMultiImageLocations(image_file_name, boot_classpath, &image_file_names);
- }
- } else {
- error = true;
- LOG(ERROR) << "Could not create image space with image file '" << image_file_name << "'. "
- << "Attempting to fall back to imageless running. Error was: " << error_msg
- << "\nAttempted image: " << image_name;
- break;
- }
- }
- if (error) {
- // Remove already loaded spaces.
- for (space::Space* loaded_space : *boot_image_spaces) {
- delete loaded_space;
- }
- boot_image_spaces->clear();
- return false;
- }
+ LOG(ERROR) << "Could not create image space with image file '" << image_location << "'. "
+ << "Attempting to fall back to imageless running. Error was: " << oss.str();
- *oat_file_end = oat_file_end_tmp;
- return true;
+ return false;
}
ImageSpace::~ImageSpace() {
@@ -1815,11 +1835,7 @@ ImageSpace::~ImageSpace() {
std::unique_ptr<ImageSpace> ImageSpace::CreateFromAppImage(const char* image,
const OatFile* oat_file,
std::string* error_msg) {
- return ImageSpaceLoader::Init(image,
- image,
- /*validate_oat_file*/false,
- oat_file,
- /*out*/error_msg);
+ return Loader::Init(image, image, /*validate_oat_file*/false, oat_file, /*out*/error_msg);
}
const OatFile* ImageSpace::GetOatFile() const {
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index 3383d6b383..20bce66957 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -41,11 +41,11 @@ class ImageSpace : public MemMapSpace {
// On successful return, the loaded spaces are added to boot_image_spaces (which must be
// empty on entry) and oat_file_end is updated with the (page-aligned) end of the last
// oat file.
- static bool LoadBootImage(const std::string& image_file_name,
- const InstructionSet image_instruction_set,
- std::vector<space::ImageSpace*>* boot_image_spaces,
- uint8_t** oat_file_end)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ static bool LoadBootImage(
+ const std::string& image_location,
+ const InstructionSet image_isa,
+ /*out*/ std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
+ /*out*/ uint8_t** oat_file_end) REQUIRES_SHARED(Locks::mutator_lock_);
// Try to open an existing app image space.
static std::unique_ptr<ImageSpace> CreateFromAppImage(const char* image,
@@ -182,7 +182,7 @@ class ImageSpace : public MemMapSpace {
ImageSpace(const std::string& name,
const char* image_location,
- MemMap* mem_map,
+ MemMap&& mem_map,
accounting::ContinuousSpaceBitmap* live_bitmap,
uint8_t* end);
@@ -197,23 +197,11 @@ class ImageSpace : public MemMapSpace {
const std::string image_location_;
- friend class ImageSpaceLoader;
friend class Space;
private:
- // Create a boot image space from an image file for a specified instruction
- // set. Cannot be used for future allocation or collected.
- //
- // Create also opens the OatFile associated with the image file so
- // that it be contiguously allocated with the image before the
- // creation of the alloc space. The ReleaseOatFile will later be
- // used to transfer ownership of the OatFile to the ClassLinker when
- // it is initialized.
- static std::unique_ptr<ImageSpace> CreateBootImage(const char* image,
- InstructionSet image_isa,
- bool secondary_image,
- std::string* error_msg)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ class Loader;
+ class BootImageLoader;
DISALLOW_COPY_AND_ASSIGN(ImageSpace);
};
diff --git a/runtime/gc/space/image_space_test.cc b/runtime/gc/space/image_space_test.cc
index f202a43be9..b80a7bd76a 100644
--- a/runtime/gc/space/image_space_test.cc
+++ b/runtime/gc/space/image_space_test.cc
@@ -150,6 +150,37 @@ TEST_F(ImageSpaceNoRelocateNoDex2oatNoPatchoatTest, Test) {
EXPECT_FALSE(Runtime::Current()->GetHeap()->GetBootImageSpaces().empty());
}
+class NoAccessAndroidDataTest : public ImageSpaceLoadingTest<false, true, false, true> {
+ protected:
+ void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+ const char* android_data = getenv("ANDROID_DATA");
+ CHECK(android_data != nullptr);
+ old_android_data_ = android_data;
+ bad_android_data_ = old_android_data_ + "/no-android-data";
+ int result = setenv("ANDROID_DATA", bad_android_data_.c_str(), /* replace */ 1);
+ CHECK_EQ(result, 0) << strerror(errno);
+ result = mkdir(bad_android_data_.c_str(), /* no access */ 0);
+ CHECK_EQ(result, 0) << strerror(errno);
+ ImageSpaceLoadingTest<false, true, false, true>::SetUpRuntimeOptions(options);
+ }
+
+ void TearDown() OVERRIDE {
+ int result = rmdir(bad_android_data_.c_str());
+ CHECK_EQ(result, 0) << strerror(errno);
+ result = setenv("ANDROID_DATA", old_android_data_.c_str(), /* replace */ 1);
+ CHECK_EQ(result, 0) << strerror(errno);
+ ImageSpaceLoadingTest<false, true, false, true>::TearDown();
+ }
+
+ private:
+ std::string old_android_data_;
+ std::string bad_android_data_;
+};
+
+TEST_F(NoAccessAndroidDataTest, Test) {
+ EXPECT_TRUE(Runtime::Current()->GetHeap()->GetBootImageSpaces().empty());
+}
+
} // namespace space
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index a24ca32314..ada59b30f4 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -48,10 +48,6 @@ class MemoryToolLargeObjectMapSpace FINAL : public LargeObjectMapSpace {
// Historical note: We were deleting large objects to keep Valgrind happy if there were
// any large objects such as Dex cache arrays which aren't freed since they are held live
// by the class linker.
- MutexLock mu(Thread::Current(), lock_);
- for (auto& m : large_objects_) {
- delete m.second.mem_map;
- }
}
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
@@ -139,16 +135,21 @@ mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes,
size_t* bytes_allocated, size_t* usable_size,
size_t* bytes_tl_bulk_allocated) {
std::string error_msg;
- MemMap* mem_map = MemMap::MapAnonymous("large object space allocation", nullptr, num_bytes,
- PROT_READ | PROT_WRITE, true, false, &error_msg);
- if (UNLIKELY(mem_map == nullptr)) {
+ MemMap mem_map = MemMap::MapAnonymous("large object space allocation",
+ /* addr */ nullptr,
+ num_bytes,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ true,
+ /* reuse */ false,
+ &error_msg);
+ if (UNLIKELY(!mem_map.IsValid())) {
LOG(WARNING) << "Large object allocation failed: " << error_msg;
return nullptr;
}
- mirror::Object* const obj = reinterpret_cast<mirror::Object*>(mem_map->Begin());
+ mirror::Object* const obj = reinterpret_cast<mirror::Object*>(mem_map.Begin());
+ const size_t allocation_size = mem_map.BaseSize();
MutexLock mu(self, lock_);
- large_objects_.Put(obj, LargeObject {mem_map, false /* not zygote */});
- const size_t allocation_size = mem_map->BaseSize();
+ large_objects_.Put(obj, LargeObject {std::move(mem_map), false /* not zygote */});
DCHECK(bytes_allocated != nullptr);
if (begin_ == nullptr || begin_ > reinterpret_cast<uint8_t*>(obj)) {
@@ -191,13 +192,11 @@ size_t LargeObjectMapSpace::Free(Thread* self, mirror::Object* ptr) {
Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(FATAL_WITHOUT_ABORT));
LOG(FATAL) << "Attempted to free large object " << ptr << " which was not live";
}
- MemMap* mem_map = it->second.mem_map;
- const size_t map_size = mem_map->BaseSize();
+ const size_t map_size = it->second.mem_map.BaseSize();
DCHECK_GE(num_bytes_allocated_, map_size);
size_t allocation_size = map_size;
num_bytes_allocated_ -= allocation_size;
--num_objects_allocated_;
- delete mem_map;
large_objects_.erase(it);
return allocation_size;
}
@@ -206,7 +205,7 @@ size_t LargeObjectMapSpace::AllocationSize(mirror::Object* obj, size_t* usable_s
MutexLock mu(Thread::Current(), lock_);
auto it = large_objects_.find(obj);
CHECK(it != large_objects_.end()) << "Attempted to get size of a large object which is not live";
- size_t alloc_size = it->second.mem_map->BaseSize();
+ size_t alloc_size = it->second.mem_map.BaseSize();
if (usable_size != nullptr) {
*usable_size = alloc_size;
}
@@ -227,7 +226,7 @@ size_t LargeObjectSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object*
void LargeObjectMapSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
MutexLock mu(Thread::Current(), lock_);
for (auto& pair : large_objects_) {
- MemMap* mem_map = pair.second.mem_map;
+ MemMap* mem_map = &pair.second.mem_map;
callback(mem_map->Begin(), mem_map->End(), mem_map->Size(), arg);
callback(nullptr, nullptr, 0, arg);
}
@@ -326,7 +325,7 @@ class AllocationInfo {
size_t FreeListSpace::GetSlotIndexForAllocationInfo(const AllocationInfo* info) const {
DCHECK_GE(info, allocation_info_);
- DCHECK_LT(info, reinterpret_cast<AllocationInfo*>(allocation_info_map_->End()));
+ DCHECK_LT(info, reinterpret_cast<AllocationInfo*>(allocation_info_map_.End()));
return info - allocation_info_;
}
@@ -350,28 +349,39 @@ inline bool FreeListSpace::SortByPrevFree::operator()(const AllocationInfo* a,
FreeListSpace* FreeListSpace::Create(const std::string& name, uint8_t* requested_begin, size_t size) {
CHECK_EQ(size % kAlignment, 0U);
std::string error_msg;
- MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, size,
- PROT_READ | PROT_WRITE, true, false, &error_msg);
- CHECK(mem_map != nullptr) << "Failed to allocate large object space mem map: " << error_msg;
- return new FreeListSpace(name, mem_map, mem_map->Begin(), mem_map->End());
+ MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
+ requested_begin,
+ size,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ true,
+ /* reuse */ false,
+ &error_msg);
+ CHECK(mem_map.IsValid()) << "Failed to allocate large object space mem map: " << error_msg;
+ return new FreeListSpace(name, std::move(mem_map), mem_map.Begin(), mem_map.End());
}
-FreeListSpace::FreeListSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end)
+FreeListSpace::FreeListSpace(const std::string& name,
+ MemMap&& mem_map,
+ uint8_t* begin,
+ uint8_t* end)
: LargeObjectSpace(name, begin, end),
- mem_map_(mem_map),
+ mem_map_(std::move(mem_map)),
lock_("free list space lock", kAllocSpaceLock) {
const size_t space_capacity = end - begin;
free_end_ = space_capacity;
CHECK_ALIGNED(space_capacity, kAlignment);
const size_t alloc_info_size = sizeof(AllocationInfo) * (space_capacity / kAlignment);
std::string error_msg;
- allocation_info_map_.reset(
+ allocation_info_map_ =
MemMap::MapAnonymous("large object free list space allocation info map",
- nullptr, alloc_info_size, PROT_READ | PROT_WRITE,
- false, false, &error_msg));
- CHECK(allocation_info_map_.get() != nullptr) << "Failed to allocate allocation info map"
- << error_msg;
- allocation_info_ = reinterpret_cast<AllocationInfo*>(allocation_info_map_->Begin());
+ /* addr */ nullptr,
+ alloc_info_size,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ /* reuse */ false,
+ &error_msg);
+ CHECK(allocation_info_map_.IsValid()) << "Failed to allocate allocation info map" << error_msg;
+ allocation_info_ = reinterpret_cast<AllocationInfo*>(allocation_info_map_.Begin());
}
FreeListSpace::~FreeListSpace() {}
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index f37d814ffe..b69bd91162 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -148,7 +148,7 @@ class LargeObjectMapSpace : public LargeObjectSpace {
protected:
struct LargeObject {
- MemMap* mem_map;
+ MemMap mem_map;
bool is_zygote;
};
explicit LargeObjectMapSpace(const std::string& name);
@@ -182,7 +182,7 @@ class FreeListSpace FINAL : public LargeObjectSpace {
std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const OVERRIDE REQUIRES(!lock_);
protected:
- FreeListSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end);
+ FreeListSpace(const std::string& name, MemMap&& mem_map, uint8_t* begin, uint8_t* end);
size_t GetSlotIndexForAddress(uintptr_t address) const {
DCHECK(Contains(reinterpret_cast<mirror::Object*>(address)));
return (address - reinterpret_cast<uintptr_t>(Begin())) / kAlignment;
@@ -210,9 +210,9 @@ class FreeListSpace FINAL : public LargeObjectSpace {
// There is not footer for any allocations at the end of the space, so we keep track of how much
// free space there is at the end manually.
- std::unique_ptr<MemMap> mem_map_;
+ MemMap mem_map_;
// Side table for allocation info, one per page.
- std::unique_ptr<MemMap> allocation_info_map_;
+ MemMap allocation_info_map_;
AllocationInfo* allocation_info_;
mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index 6936fdc6d4..91e0ce8102 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -40,19 +40,26 @@ using android::base::StringPrintf;
size_t MallocSpace::bitmap_index_ = 0;
-MallocSpace::MallocSpace(const std::string& name, MemMap* mem_map,
- uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit,
- bool create_bitmaps, bool can_move_objects, size_t starting_size,
+MallocSpace::MallocSpace(const std::string& name,
+ MemMap&& mem_map,
+ uint8_t* begin,
+ uint8_t* end,
+ uint8_t* limit,
+ size_t growth_limit,
+ bool create_bitmaps,
+ bool can_move_objects,
+ size_t starting_size,
size_t initial_size)
- : ContinuousMemMapAllocSpace(name, mem_map, begin, end, limit, kGcRetentionPolicyAlwaysCollect),
+ : ContinuousMemMapAllocSpace(
+ name, std::move(mem_map), begin, end, limit, kGcRetentionPolicyAlwaysCollect),
recent_free_pos_(0), lock_("allocation space lock", kAllocSpaceLock),
growth_limit_(growth_limit), can_move_objects_(can_move_objects),
starting_size_(starting_size), initial_size_(initial_size) {
if (create_bitmaps) {
size_t bitmap_index = bitmap_index_++;
static const uintptr_t kGcCardSize = static_cast<uintptr_t>(accounting::CardTable::kCardSize);
- CHECK_ALIGNED(reinterpret_cast<uintptr_t>(mem_map->Begin()), kGcCardSize);
- CHECK_ALIGNED(reinterpret_cast<uintptr_t>(mem_map->End()), kGcCardSize);
+ CHECK_ALIGNED(reinterpret_cast<uintptr_t>(mem_map_.Begin()), kGcCardSize);
+ CHECK_ALIGNED(reinterpret_cast<uintptr_t>(mem_map_.End()), kGcCardSize);
live_bitmap_.reset(accounting::ContinuousSpaceBitmap::Create(
StringPrintf("allocspace %s live-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)),
Begin(), NonGrowthLimitCapacity()));
@@ -70,8 +77,12 @@ MallocSpace::MallocSpace(const std::string& name, MemMap* mem_map,
}
}
-MemMap* MallocSpace::CreateMemMap(const std::string& name, size_t starting_size, size_t* initial_size,
- size_t* growth_limit, size_t* capacity, uint8_t* requested_begin) {
+MemMap MallocSpace::CreateMemMap(const std::string& name,
+ size_t starting_size,
+ size_t* initial_size,
+ size_t* growth_limit,
+ size_t* capacity,
+ uint8_t* requested_begin) {
// Sanity check arguments
if (starting_size > *initial_size) {
*initial_size = starting_size;
@@ -80,13 +91,13 @@ MemMap* MallocSpace::CreateMemMap(const std::string& name, size_t starting_size,
LOG(ERROR) << "Failed to create alloc space (" << name << ") where the initial size ("
<< PrettySize(*initial_size) << ") is larger than its capacity ("
<< PrettySize(*growth_limit) << ")";
- return nullptr;
+ return MemMap::Invalid();
}
if (*growth_limit > *capacity) {
LOG(ERROR) << "Failed to create alloc space (" << name << ") where the growth limit capacity ("
<< PrettySize(*growth_limit) << ") is larger than the capacity ("
<< PrettySize(*capacity) << ")";
- return nullptr;
+ return MemMap::Invalid();
}
// Page align growth limit and capacity which will be used to manage mmapped storage
@@ -94,9 +105,14 @@ MemMap* MallocSpace::CreateMemMap(const std::string& name, size_t starting_size,
*capacity = RoundUp(*capacity, kPageSize);
std::string error_msg;
- MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, *capacity,
- PROT_READ | PROT_WRITE, true, false, &error_msg);
- if (mem_map == nullptr) {
+ MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
+ requested_begin,
+ *capacity,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ true,
+ /* reuse */ false,
+ &error_msg);
+ if (!mem_map.IsValid()) {
LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
<< PrettySize(*capacity) << ": " << error_msg;
}
@@ -194,18 +210,24 @@ ZygoteSpace* MallocSpace::CreateZygoteSpace(const char* alloc_space_name, bool l
VLOG(heap) << "Capacity " << PrettySize(capacity);
// Remap the tail.
std::string error_msg;
- std::unique_ptr<MemMap> mem_map(GetMemMap()->RemapAtEnd(End(), alloc_space_name,
- PROT_READ | PROT_WRITE, &error_msg));
- CHECK(mem_map.get() != nullptr) << error_msg;
- void* allocator = CreateAllocator(End(), starting_size_, initial_size_, capacity,
- low_memory_mode);
+ MemMap mem_map = GetMemMap()->RemapAtEnd(
+ End(), alloc_space_name, PROT_READ | PROT_WRITE, &error_msg);
+ CHECK(mem_map.IsValid()) << error_msg;
+ void* allocator =
+ CreateAllocator(End(), starting_size_, initial_size_, capacity, low_memory_mode);
// Protect memory beyond the initial size.
- uint8_t* end = mem_map->Begin() + starting_size_;
+ uint8_t* end = mem_map.Begin() + starting_size_;
if (capacity > initial_size_) {
CheckedCall(mprotect, alloc_space_name, end, capacity - initial_size_, PROT_NONE);
}
- *out_malloc_space = CreateInstance(mem_map.release(), alloc_space_name, allocator, End(), end,
- limit_, growth_limit, CanMoveObjects());
+ *out_malloc_space = CreateInstance(std::move(mem_map),
+ alloc_space_name,
+ allocator,
+ End(),
+ end,
+ limit_,
+ growth_limit,
+ CanMoveObjects());
SetLimit(End());
live_bitmap_->SetHeapLimit(reinterpret_cast<uintptr_t>(End()));
CHECK_EQ(live_bitmap_->HeapLimit(), reinterpret_cast<uintptr_t>(End()));
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index c1f4841cb6..e4a6f158ec 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -113,9 +113,14 @@ class MallocSpace : public ContinuousMemMapAllocSpace {
void SetGrowthLimit(size_t growth_limit);
- virtual MallocSpace* CreateInstance(MemMap* mem_map, const std::string& name, void* allocator,
- uint8_t* begin, uint8_t* end, uint8_t* limit,
- size_t growth_limit, bool can_move_objects) = 0;
+ virtual MallocSpace* CreateInstance(MemMap&& mem_map,
+ const std::string& name,
+ void* allocator,
+ uint8_t* begin,
+ uint8_t* end,
+ uint8_t* limit,
+ size_t growth_limit,
+ bool can_move_objects) = 0;
// Splits ourself into a zygote space and new malloc space which has our unused memory. When true,
// the low memory mode argument specifies that the heap wishes the created space to be more
@@ -137,12 +142,23 @@ class MallocSpace : public ContinuousMemMapAllocSpace {
}
protected:
- MallocSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end,
- uint8_t* limit, size_t growth_limit, bool create_bitmaps, bool can_move_objects,
- size_t starting_size, size_t initial_size);
-
- static MemMap* CreateMemMap(const std::string& name, size_t starting_size, size_t* initial_size,
- size_t* growth_limit, size_t* capacity, uint8_t* requested_begin);
+ MallocSpace(const std::string& name,
+ MemMap&& mem_map,
+ uint8_t* begin,
+ uint8_t* end,
+ uint8_t* limit,
+ size_t growth_limit,
+ bool create_bitmaps,
+ bool can_move_objects,
+ size_t starting_size,
+ size_t initial_size);
+
+ static MemMap CreateMemMap(const std::string& name,
+ size_t starting_size,
+ size_t* initial_size,
+ size_t* growth_limit,
+ size_t* capacity,
+ uint8_t* requested_begin);
// When true the low memory mode argument specifies that the heap wishes the created allocator to
// be more aggressive in releasing unused pages.
diff --git a/runtime/gc/space/memory_tool_malloc_space-inl.h b/runtime/gc/space/memory_tool_malloc_space-inl.h
index c022171082..f1c1cb8ca2 100644
--- a/runtime/gc/space/memory_tool_malloc_space-inl.h
+++ b/runtime/gc/space/memory_tool_malloc_space-inl.h
@@ -267,8 +267,8 @@ MemoryToolMallocSpace<S,
kMemoryToolRedZoneBytes,
kAdjustForRedzoneInAllocSize,
kUseObjSizeForUsable>::MemoryToolMallocSpace(
- MemMap* mem_map, size_t initial_size, Params... params)
- : S(mem_map, initial_size, params...) {
+ MemMap&& mem_map, size_t initial_size, Params... params)
+ : S(std::move(mem_map), initial_size, params...) {
// Don't want to change the memory tool states of the mem map here as the allocator is already
// initialized at this point and that may interfere with what the allocator does internally. Note
// that the tail beyond the initial size is mprotected.
diff --git a/runtime/gc/space/memory_tool_malloc_space.h b/runtime/gc/space/memory_tool_malloc_space.h
index e53f009213..32bd204354 100644
--- a/runtime/gc/space/memory_tool_malloc_space.h
+++ b/runtime/gc/space/memory_tool_malloc_space.h
@@ -53,7 +53,7 @@ class MemoryToolMallocSpace FINAL : public BaseMallocSpaceType {
size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE;
template <typename... Params>
- MemoryToolMallocSpace(MemMap* mem_map, size_t initial_size, Params... params);
+ MemoryToolMallocSpace(MemMap&& mem_map, size_t initial_size, Params... params);
virtual ~MemoryToolMallocSpace() {}
private:
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 0569092bcd..85e6919dac 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -30,9 +30,8 @@ namespace space {
// value of the region size, evaculate the region.
static constexpr uint kEvacuateLivePercentThreshold = 75U;
-// Whether we protect the cleared regions.
-// Only protect for target builds to prevent flaky test failures (b/63131961).
-static constexpr bool kProtectClearedRegions = kIsTargetBuild;
+// Whether we protect the unused and cleared regions.
+static constexpr bool kProtectClearedRegions = true;
// Wether we poison memory areas occupied by dead objects in unevacuated regions.
static constexpr bool kPoisonDeadObjectsInUnevacuatedRegions = true;
@@ -46,60 +45,65 @@ static constexpr uint32_t kPoisonDeadObject = 0xBADDB01D; // "BADDROID"
// Whether we check a region's live bytes count against the region bitmap.
static constexpr bool kCheckLiveBytesAgainstRegionBitmap = kIsDebugBuild;
-MemMap* RegionSpace::CreateMemMap(const std::string& name, size_t capacity,
- uint8_t* requested_begin) {
+MemMap RegionSpace::CreateMemMap(const std::string& name,
+ size_t capacity,
+ uint8_t* requested_begin) {
CHECK_ALIGNED(capacity, kRegionSize);
std::string error_msg;
// Ask for the capacity of an additional kRegionSize so that we can align the map by kRegionSize
// even if we get unaligned base address. This is necessary for the ReadBarrierTable to work.
- std::unique_ptr<MemMap> mem_map;
+ MemMap mem_map;
while (true) {
- mem_map.reset(MemMap::MapAnonymous(name.c_str(),
- requested_begin,
- capacity + kRegionSize,
- PROT_READ | PROT_WRITE,
- true,
- false,
- &error_msg));
- if (mem_map.get() != nullptr || requested_begin == nullptr) {
+ mem_map = MemMap::MapAnonymous(name.c_str(),
+ requested_begin,
+ capacity + kRegionSize,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ true,
+ /* reuse */ false,
+ &error_msg);
+ if (mem_map.IsValid() || requested_begin == nullptr) {
break;
}
// Retry with no specified request begin.
requested_begin = nullptr;
}
- if (mem_map.get() == nullptr) {
+ if (!mem_map.IsValid()) {
LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
<< PrettySize(capacity) << " with message " << error_msg;
MemMap::DumpMaps(LOG_STREAM(ERROR));
- return nullptr;
+ return MemMap::Invalid();
}
- CHECK_EQ(mem_map->Size(), capacity + kRegionSize);
- CHECK_EQ(mem_map->Begin(), mem_map->BaseBegin());
- CHECK_EQ(mem_map->Size(), mem_map->BaseSize());
- if (IsAlignedParam(mem_map->Begin(), kRegionSize)) {
+ CHECK_EQ(mem_map.Size(), capacity + kRegionSize);
+ CHECK_EQ(mem_map.Begin(), mem_map.BaseBegin());
+ CHECK_EQ(mem_map.Size(), mem_map.BaseSize());
+ if (IsAlignedParam(mem_map.Begin(), kRegionSize)) {
// Got an aligned map. Since we requested a map that's kRegionSize larger. Shrink by
// kRegionSize at the end.
- mem_map->SetSize(capacity);
+ mem_map.SetSize(capacity);
} else {
// Got an unaligned map. Align the both ends.
- mem_map->AlignBy(kRegionSize);
+ mem_map.AlignBy(kRegionSize);
}
- CHECK_ALIGNED(mem_map->Begin(), kRegionSize);
- CHECK_ALIGNED(mem_map->End(), kRegionSize);
- CHECK_EQ(mem_map->Size(), capacity);
- return mem_map.release();
+ CHECK_ALIGNED(mem_map.Begin(), kRegionSize);
+ CHECK_ALIGNED(mem_map.End(), kRegionSize);
+ CHECK_EQ(mem_map.Size(), capacity);
+ return mem_map;
}
-RegionSpace* RegionSpace::Create(const std::string& name, MemMap* mem_map) {
- return new RegionSpace(name, mem_map);
+RegionSpace* RegionSpace::Create(const std::string& name, MemMap&& mem_map) {
+ return new RegionSpace(name, std::move(mem_map));
}
-RegionSpace::RegionSpace(const std::string& name, MemMap* mem_map)
- : ContinuousMemMapAllocSpace(name, mem_map, mem_map->Begin(), mem_map->End(), mem_map->End(),
+RegionSpace::RegionSpace(const std::string& name, MemMap&& mem_map)
+ : ContinuousMemMapAllocSpace(name,
+ std::move(mem_map),
+ mem_map.Begin(),
+ mem_map.End(),
+ mem_map.End(),
kGcRetentionPolicyAlwaysCollect),
region_lock_("Region lock", kRegionSpaceRegionLock),
time_(1U),
- num_regions_(mem_map->Size() / kRegionSize),
+ num_regions_(mem_map_.Size() / kRegionSize),
num_non_free_regions_(0U),
num_evac_regions_(0U),
max_peak_num_non_free_regions_(0U),
@@ -107,11 +111,11 @@ RegionSpace::RegionSpace(const std::string& name, MemMap* mem_map)
current_region_(&full_region_),
evac_region_(nullptr),
cyclic_alloc_region_index_(0U) {
- CHECK_ALIGNED(mem_map->Size(), kRegionSize);
- CHECK_ALIGNED(mem_map->Begin(), kRegionSize);
+ CHECK_ALIGNED(mem_map_.Size(), kRegionSize);
+ CHECK_ALIGNED(mem_map_.Begin(), kRegionSize);
DCHECK_GT(num_regions_, 0U);
regions_.reset(new Region[num_regions_]);
- uint8_t* region_addr = mem_map->Begin();
+ uint8_t* region_addr = mem_map_.Begin();
for (size_t i = 0; i < num_regions_; ++i, region_addr += kRegionSize) {
regions_[i].Init(i, region_addr, region_addr + kRegionSize);
}
@@ -132,6 +136,8 @@ RegionSpace::RegionSpace(const std::string& name, MemMap* mem_map)
DCHECK(full_region_.IsAllocated());
size_t ignored;
DCHECK(full_region_.Alloc(kAlignment, &ignored, nullptr, &ignored) == nullptr);
+ // Protect the whole region space from the start.
+ Protect();
}
size_t RegionSpace::FromSpaceSize() {
@@ -552,6 +558,18 @@ void RegionSpace::Clear() {
evac_region_ = &full_region_;
}
+void RegionSpace::Protect() {
+ if (kProtectClearedRegions) {
+ CheckedCall(mprotect, __FUNCTION__, Begin(), Size(), PROT_NONE);
+ }
+}
+
+void RegionSpace::Unprotect() {
+ if (kProtectClearedRegions) {
+ CheckedCall(mprotect, __FUNCTION__, Begin(), Size(), PROT_READ | PROT_WRITE);
+ }
+}
+
void RegionSpace::ClampGrowthLimit(size_t new_capacity) {
MutexLock mu(Thread::Current(), region_lock_);
CHECK_LE(new_capacity, NonGrowthLimitCapacity());
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 90f1f1dd2a..beedfd2027 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -50,8 +50,8 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
// Create a region space mem map with the requested sizes. The requested base address is not
// guaranteed to be granted, if it is required, the caller should call Begin on the returned
// space to confirm the request was granted.
- static MemMap* CreateMemMap(const std::string& name, size_t capacity, uint8_t* requested_begin);
- static RegionSpace* Create(const std::string& name, MemMap* mem_map);
+ static MemMap CreateMemMap(const std::string& name, size_t capacity, uint8_t* requested_begin);
+ static RegionSpace* Create(const std::string& name, MemMap&& mem_map);
// Allocate `num_bytes`, returns null if the space is full.
mirror::Object* Alloc(Thread* self,
@@ -108,6 +108,17 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
void Clear() OVERRIDE REQUIRES(!region_lock_);
+ // Remove read and write memory protection from the whole region space,
+ // i.e. make memory pages backing the region area not readable and not
+ // writable.
+ void Protect();
+
+ // Remove memory protection from the whole region space, i.e. make memory
+ // pages backing the region area readable and writable. This method is useful
+ // to avoid page protection faults when dumping information about an invalid
+ // reference.
+ void Unprotect();
+
// Change the non growth limit capacity to new capacity by shrinking or expanding the map.
// Currently, only shrinking is supported.
// Unlike implementations of this function in other spaces, we need to pass
@@ -290,7 +301,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
}
private:
- RegionSpace(const std::string& name, MemMap* mem_map);
+ RegionSpace(const std::string& name, MemMap&& mem_map);
template<bool kToSpaceOnly, typename Visitor>
ALWAYS_INLINE void WalkInternal(Visitor&& visitor) NO_THREAD_SAFETY_ANALYSIS;
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index b0402e4b83..10ff1c15b1 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -44,48 +44,88 @@ static constexpr bool kVerifyFreedBytes = false;
// TODO: Fix
// template class MemoryToolMallocSpace<RosAllocSpace, allocator::RosAlloc*>;
-RosAllocSpace::RosAllocSpace(MemMap* mem_map, size_t initial_size, const std::string& name,
- art::gc::allocator::RosAlloc* rosalloc, uint8_t* begin, uint8_t* end,
- uint8_t* limit, size_t growth_limit, bool can_move_objects,
- size_t starting_size, bool low_memory_mode)
- : MallocSpace(name, mem_map, begin, end, limit, growth_limit, true, can_move_objects,
+RosAllocSpace::RosAllocSpace(MemMap&& mem_map,
+ size_t initial_size,
+ const std::string& name,
+ art::gc::allocator::RosAlloc* rosalloc,
+ uint8_t* begin,
+ uint8_t* end,
+ uint8_t* limit,
+ size_t growth_limit,
+ bool can_move_objects,
+ size_t starting_size,
+ bool low_memory_mode)
+ : MallocSpace(name,
+ std::move(mem_map),
+ begin,
+ end,
+ limit,
+ growth_limit,
+ true,
+ can_move_objects,
starting_size, initial_size),
rosalloc_(rosalloc), low_memory_mode_(low_memory_mode) {
CHECK(rosalloc != nullptr);
}
-RosAllocSpace* RosAllocSpace::CreateFromMemMap(MemMap* mem_map, const std::string& name,
- size_t starting_size, size_t initial_size,
- size_t growth_limit, size_t capacity,
- bool low_memory_mode, bool can_move_objects) {
- DCHECK(mem_map != nullptr);
+RosAllocSpace* RosAllocSpace::CreateFromMemMap(MemMap&& mem_map,
+ const std::string& name,
+ size_t starting_size,
+ size_t initial_size,
+ size_t growth_limit,
+ size_t capacity,
+ bool low_memory_mode,
+ bool can_move_objects) {
+ DCHECK(mem_map.IsValid());
bool running_on_memory_tool = Runtime::Current()->IsRunningOnMemoryTool();
- allocator::RosAlloc* rosalloc = CreateRosAlloc(mem_map->Begin(), starting_size, initial_size,
- capacity, low_memory_mode, running_on_memory_tool);
+ allocator::RosAlloc* rosalloc = CreateRosAlloc(mem_map.Begin(),
+ starting_size,
+ initial_size,
+ capacity,
+ low_memory_mode,
+ running_on_memory_tool);
if (rosalloc == nullptr) {
LOG(ERROR) << "Failed to initialize rosalloc for alloc space (" << name << ")";
return nullptr;
}
// Protect memory beyond the starting size. MoreCore will add r/w permissions when necessory
- uint8_t* end = mem_map->Begin() + starting_size;
+ uint8_t* end = mem_map.Begin() + starting_size;
if (capacity - starting_size > 0) {
CheckedCall(mprotect, name.c_str(), end, capacity - starting_size, PROT_NONE);
}
// Everything is set so record in immutable structure and leave
- uint8_t* begin = mem_map->Begin();
+ uint8_t* begin = mem_map.Begin();
// TODO: Fix RosAllocSpace to support ASan. There is currently some issues with
// AllocationSize caused by redzones. b/12944686
if (running_on_memory_tool) {
return new MemoryToolMallocSpace<RosAllocSpace, kDefaultMemoryToolRedZoneBytes, false, true>(
- mem_map, initial_size, name, rosalloc, begin, end, begin + capacity, growth_limit,
- can_move_objects, starting_size, low_memory_mode);
+ std::move(mem_map),
+ initial_size,
+ name,
+ rosalloc,
+ begin,
+ end,
+ begin + capacity,
+ growth_limit,
+ can_move_objects,
+ starting_size,
+ low_memory_mode);
} else {
- return new RosAllocSpace(mem_map, initial_size, name, rosalloc, begin, end, begin + capacity,
- growth_limit, can_move_objects, starting_size, low_memory_mode);
+ return new RosAllocSpace(std::move(mem_map),
+ initial_size,
+ name,
+ rosalloc,
+ begin,
+ end,
+ begin + capacity,
+ growth_limit,
+ can_move_objects,
+ starting_size,
+ low_memory_mode);
}
}
@@ -111,16 +151,21 @@ RosAllocSpace* RosAllocSpace::Create(const std::string& name, size_t initial_siz
// will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
// size of the large allocation) will be greater than the footprint limit.
size_t starting_size = Heap::kDefaultStartingSize;
- MemMap* mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity,
- requested_begin);
- if (mem_map == nullptr) {
+ MemMap mem_map =
+ CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity, requested_begin);
+ if (!mem_map.IsValid()) {
LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size "
<< PrettySize(capacity);
return nullptr;
}
- RosAllocSpace* space = CreateFromMemMap(mem_map, name, starting_size, initial_size,
- growth_limit, capacity, low_memory_mode,
+ RosAllocSpace* space = CreateFromMemMap(std::move(mem_map),
+ name,
+ starting_size,
+ initial_size,
+ growth_limit,
+ capacity,
+ low_memory_mode,
can_move_objects);
// We start out with only the initial size possibly containing objects.
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
@@ -175,18 +220,39 @@ mirror::Object* RosAllocSpace::AllocWithGrowth(Thread* self, size_t num_bytes,
return result;
}
-MallocSpace* RosAllocSpace::CreateInstance(MemMap* mem_map, const std::string& name,
- void* allocator, uint8_t* begin, uint8_t* end,
- uint8_t* limit, size_t growth_limit,
+MallocSpace* RosAllocSpace::CreateInstance(MemMap&& mem_map,
+ const std::string& name,
+ void* allocator,
+ uint8_t* begin,
+ uint8_t* end,
+ uint8_t* limit,
+ size_t growth_limit,
bool can_move_objects) {
if (Runtime::Current()->IsRunningOnMemoryTool()) {
return new MemoryToolMallocSpace<RosAllocSpace, kDefaultMemoryToolRedZoneBytes, false, true>(
- mem_map, initial_size_, name, reinterpret_cast<allocator::RosAlloc*>(allocator), begin, end,
- limit, growth_limit, can_move_objects, starting_size_, low_memory_mode_);
+ std::move(mem_map),
+ initial_size_,
+ name,
+ reinterpret_cast<allocator::RosAlloc*>(allocator),
+ begin,
+ end,
+ limit,
+ growth_limit,
+ can_move_objects,
+ starting_size_,
+ low_memory_mode_);
} else {
- return new RosAllocSpace(mem_map, initial_size_, name,
- reinterpret_cast<allocator::RosAlloc*>(allocator), begin, end, limit,
- growth_limit, can_move_objects, starting_size_, low_memory_mode_);
+ return new RosAllocSpace(std::move(mem_map),
+ initial_size_,
+ name,
+ reinterpret_cast<allocator::RosAlloc*>(allocator),
+ begin,
+ end,
+ limit,
+ growth_limit,
+ can_move_objects,
+ starting_size_,
+ low_memory_mode_);
}
}
@@ -364,8 +430,11 @@ void RosAllocSpace::Clear() {
mark_bitmap_->Clear();
SetEnd(begin_ + starting_size_);
delete rosalloc_;
- rosalloc_ = CreateRosAlloc(mem_map_->Begin(), starting_size_, initial_size_,
- NonGrowthLimitCapacity(), low_memory_mode_,
+ rosalloc_ = CreateRosAlloc(mem_map_.Begin(),
+ starting_size_,
+ initial_size_,
+ NonGrowthLimitCapacity(),
+ low_memory_mode_,
Runtime::Current()->IsRunningOnMemoryTool());
SetFootprintLimit(footprint_limit);
}
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index 4c17233360..c630826f48 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -41,10 +41,14 @@ class RosAllocSpace : public MallocSpace {
static RosAllocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit,
size_t capacity, uint8_t* requested_begin, bool low_memory_mode,
bool can_move_objects);
- static RosAllocSpace* CreateFromMemMap(MemMap* mem_map, const std::string& name,
- size_t starting_size, size_t initial_size,
- size_t growth_limit, size_t capacity,
- bool low_memory_mode, bool can_move_objects);
+ static RosAllocSpace* CreateFromMemMap(MemMap&& mem_map,
+ const std::string& name,
+ size_t starting_size,
+ size_t initial_size,
+ size_t growth_limit,
+ size_t capacity,
+ bool low_memory_mode,
+ bool can_move_objects);
mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
@@ -111,8 +115,13 @@ class RosAllocSpace : public MallocSpace {
void Clear() OVERRIDE;
- MallocSpace* CreateInstance(MemMap* mem_map, const std::string& name, void* allocator,
- uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit,
+ MallocSpace* CreateInstance(MemMap&& mem_map,
+ const std::string& name,
+ void* allocator,
+ uint8_t* begin,
+ uint8_t* end,
+ uint8_t* limit,
+ size_t growth_limit,
bool can_move_objects) OVERRIDE;
uint64_t GetBytesAllocated() OVERRIDE;
@@ -147,9 +156,16 @@ class RosAllocSpace : public MallocSpace {
void DumpStats(std::ostream& os);
protected:
- RosAllocSpace(MemMap* mem_map, size_t initial_size, const std::string& name,
- allocator::RosAlloc* rosalloc, uint8_t* begin, uint8_t* end, uint8_t* limit,
- size_t growth_limit, bool can_move_objects, size_t starting_size,
+ RosAllocSpace(MemMap&& mem_map,
+ size_t initial_size,
+ const std::string& name,
+ allocator::RosAlloc* rosalloc,
+ uint8_t* begin,
+ uint8_t* end,
+ uint8_t* limit,
+ size_t growth_limit,
+ bool can_move_objects,
+ size_t starting_size,
bool low_memory_mode);
private:
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 4f43d9f5c5..4e173a86f1 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -377,30 +377,30 @@ class MemMapSpace : public ContinuousSpace {
}
MemMap* GetMemMap() {
- return mem_map_.get();
+ return &mem_map_;
}
const MemMap* GetMemMap() const {
- return mem_map_.get();
+ return &mem_map_;
}
- MemMap* ReleaseMemMap() {
- return mem_map_.release();
+ MemMap ReleaseMemMap() {
+ return std::move(mem_map_);
}
protected:
MemMapSpace(const std::string& name,
- MemMap* mem_map,
+ MemMap&& mem_map,
uint8_t* begin,
uint8_t* end,
uint8_t* limit,
GcRetentionPolicy gc_retention_policy)
: ContinuousSpace(name, gc_retention_policy, begin, end, limit),
- mem_map_(mem_map) {
+ mem_map_(std::move(mem_map)) {
}
// Underlying storage of the space
- std::unique_ptr<MemMap> mem_map_;
+ MemMap mem_map_;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(MemMapSpace);
@@ -451,9 +451,13 @@ class ContinuousMemMapAllocSpace : public MemMapSpace, public AllocSpace {
std::unique_ptr<accounting::ContinuousSpaceBitmap> mark_bitmap_;
std::unique_ptr<accounting::ContinuousSpaceBitmap> temp_bitmap_;
- ContinuousMemMapAllocSpace(const std::string& name, MemMap* mem_map, uint8_t* begin,
- uint8_t* end, uint8_t* limit, GcRetentionPolicy gc_retention_policy)
- : MemMapSpace(name, mem_map, begin, end, limit, gc_retention_policy) {
+ ContinuousMemMapAllocSpace(const std::string& name,
+ MemMap&& mem_map,
+ uint8_t* begin,
+ uint8_t* end,
+ uint8_t* limit,
+ GcRetentionPolicy gc_retention_policy)
+ : MemMapSpace(name, std::move(mem_map), begin, end, limit, gc_retention_policy) {
}
private:
diff --git a/runtime/gc/space/zygote_space.cc b/runtime/gc/space/zygote_space.cc
index 8c73ef9116..ed85b061ed 100644
--- a/runtime/gc/space/zygote_space.cc
+++ b/runtime/gc/space/zygote_space.cc
@@ -41,7 +41,8 @@ class CountObjectsAllocated {
size_t* const objects_allocated_;
};
-ZygoteSpace* ZygoteSpace::Create(const std::string& name, MemMap* mem_map,
+ZygoteSpace* ZygoteSpace::Create(const std::string& name,
+ MemMap&& mem_map,
accounting::ContinuousSpaceBitmap* live_bitmap,
accounting::ContinuousSpaceBitmap* mark_bitmap) {
DCHECK(live_bitmap != nullptr);
@@ -49,9 +50,9 @@ ZygoteSpace* ZygoteSpace::Create(const std::string& name, MemMap* mem_map,
size_t objects_allocated = 0;
CountObjectsAllocated visitor(&objects_allocated);
ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
- live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(mem_map->Begin()),
- reinterpret_cast<uintptr_t>(mem_map->End()), visitor);
- ZygoteSpace* zygote_space = new ZygoteSpace(name, mem_map, objects_allocated);
+ live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(mem_map.Begin()),
+ reinterpret_cast<uintptr_t>(mem_map.End()), visitor);
+ ZygoteSpace* zygote_space = new ZygoteSpace(name, std::move(mem_map), objects_allocated);
CHECK(zygote_space->live_bitmap_.get() == nullptr);
CHECK(zygote_space->mark_bitmap_.get() == nullptr);
zygote_space->live_bitmap_.reset(live_bitmap);
@@ -64,8 +65,12 @@ void ZygoteSpace::Clear() {
UNREACHABLE();
}
-ZygoteSpace::ZygoteSpace(const std::string& name, MemMap* mem_map, size_t objects_allocated)
- : ContinuousMemMapAllocSpace(name, mem_map, mem_map->Begin(), mem_map->End(), mem_map->End(),
+ZygoteSpace::ZygoteSpace(const std::string& name, MemMap&& mem_map, size_t objects_allocated)
+ : ContinuousMemMapAllocSpace(name,
+ std::move(mem_map),
+ mem_map.Begin(),
+ mem_map.End(),
+ mem_map.End(),
kGcRetentionPolicyFullCollect),
objects_allocated_(objects_allocated) {
}
diff --git a/runtime/gc/space/zygote_space.h b/runtime/gc/space/zygote_space.h
index 6fe21d99a8..200c79f00c 100644
--- a/runtime/gc/space/zygote_space.h
+++ b/runtime/gc/space/zygote_space.h
@@ -30,7 +30,8 @@ namespace space {
class ZygoteSpace FINAL : public ContinuousMemMapAllocSpace {
public:
// Returns the remaining storage in the out_map field.
- static ZygoteSpace* Create(const std::string& name, MemMap* mem_map,
+ static ZygoteSpace* Create(const std::string& name,
+ MemMap&& mem_map,
accounting::ContinuousSpaceBitmap* live_bitmap,
accounting::ContinuousSpaceBitmap* mark_bitmap)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -85,7 +86,7 @@ class ZygoteSpace FINAL : public ContinuousMemMapAllocSpace {
}
private:
- ZygoteSpace(const std::string& name, MemMap* mem_map, size_t objects_allocated);
+ ZygoteSpace(const std::string& name, MemMap&& mem_map, size_t objects_allocated);
static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg);
AtomicInteger objects_allocated_;
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 950a54d61e..098db9f743 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -78,14 +78,19 @@ IndirectReferenceTable::IndirectReferenceTable(size_t max_count,
CHECK_LE(max_count, kMaxTableSizeInBytes / sizeof(IrtEntry));
const size_t table_bytes = max_count * sizeof(IrtEntry);
- table_mem_map_.reset(MemMap::MapAnonymous("indirect ref table", nullptr, table_bytes,
- PROT_READ | PROT_WRITE, false, false, error_msg));
- if (table_mem_map_.get() == nullptr && error_msg->empty()) {
+ table_mem_map_ = MemMap::MapAnonymous("indirect ref table",
+ /* addr */ nullptr,
+ table_bytes,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ /* reuse */ false,
+ error_msg);
+ if (!table_mem_map_.IsValid() && error_msg->empty()) {
*error_msg = "Unable to map memory for indirect ref table";
}
- if (table_mem_map_.get() != nullptr) {
- table_ = reinterpret_cast<IrtEntry*>(table_mem_map_->Begin());
+ if (table_mem_map_.IsValid()) {
+ table_ = reinterpret_cast<IrtEntry*>(table_mem_map_.Begin());
} else {
table_ = nullptr;
}
@@ -125,7 +130,7 @@ void IndirectReferenceTable::ConstexprChecks() {
}
bool IndirectReferenceTable::IsValid() const {
- return table_mem_map_.get() != nullptr;
+ return table_mem_map_.IsValid();
}
// Holes:
@@ -217,20 +222,20 @@ bool IndirectReferenceTable::Resize(size_t new_size, std::string* error_msg) {
// Note: the above check also ensures that there is no overflow below.
const size_t table_bytes = new_size * sizeof(IrtEntry);
- std::unique_ptr<MemMap> new_map(MemMap::MapAnonymous("indirect ref table",
- nullptr,
- table_bytes,
- PROT_READ | PROT_WRITE,
- false,
- false,
- error_msg));
- if (new_map == nullptr) {
+ MemMap new_map = MemMap::MapAnonymous("indirect ref table",
+ /* addr */ nullptr,
+ table_bytes,
+ PROT_READ | PROT_WRITE,
+ /* is_low_4gb */ false,
+ /* reuse */ false,
+ error_msg);
+ if (!new_map.IsValid()) {
return false;
}
- memcpy(new_map->Begin(), table_mem_map_->Begin(), table_mem_map_->Size());
+ memcpy(new_map.Begin(), table_mem_map_.Begin(), table_mem_map_.Size());
table_mem_map_ = std::move(new_map);
- table_ = reinterpret_cast<IrtEntry*>(table_mem_map_->Begin());
+ table_ = reinterpret_cast<IrtEntry*>(table_mem_map_.Begin());
max_entries_ = new_size;
return true;
@@ -444,7 +449,7 @@ void IndirectReferenceTable::Trim() {
ScopedTrace trace(__PRETTY_FUNCTION__);
const size_t top_index = Capacity();
auto* release_start = AlignUp(reinterpret_cast<uint8_t*>(&table_[top_index]), kPageSize);
- uint8_t* release_end = table_mem_map_->End();
+ uint8_t* release_end = table_mem_map_.End();
madvise(release_start, release_end - release_start, MADV_DONTNEED);
}
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index d2093f2818..8c63c0045f 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -27,6 +27,7 @@
#include "base/bit_utils.h"
#include "base/macros.h"
+#include "base/mem_map.h"
#include "base/mutex.h"
#include "gc_root.h"
#include "obj_ptr.h"
@@ -41,8 +42,6 @@ namespace mirror {
class Object;
} // namespace mirror
-class MemMap;
-
// Maintain a table of indirect references. Used for local/global JNI references.
//
// The table contains object references, where the strong (local/global) references are part of the
@@ -398,7 +397,7 @@ class IndirectReferenceTable {
IRTSegmentState segment_state_;
// Mem map where we store the indirect refs.
- std::unique_ptr<MemMap> table_mem_map_;
+ MemMap table_mem_map_;
// bottom of the stack. Do not directly access the object references
// in this as they are roots. Use Get() that has a read barrier.
IrtEntry* table_;
diff --git a/runtime/interpreter/mterp/arm/op_iget.S b/runtime/interpreter/mterp/arm/op_iget.S
index c7f777b71e..c45880b1c4 100644
--- a/runtime/interpreter/mterp/arm/op_iget.S
+++ b/runtime/interpreter/mterp/arm/op_iget.S
@@ -1,4 +1,4 @@
-%default { "is_object":"0", "helper":"artGet32InstanceFromCode"}
+%default { "is_object":"0", "helper":"artGet32InstanceFromMterp"}
/*
* General instance field get.
*
diff --git a/runtime/interpreter/mterp/arm/op_iget_boolean.S b/runtime/interpreter/mterp/arm/op_iget_boolean.S
index 628f40a7e5..9da6c8add6 100644
--- a/runtime/interpreter/mterp/arm/op_iget_boolean.S
+++ b/runtime/interpreter/mterp/arm/op_iget_boolean.S
@@ -1 +1 @@
-%include "arm/op_iget.S" { "helper":"artGetBooleanInstanceFromCode" }
+%include "arm/op_iget.S" { "helper":"artGetBooleanInstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_byte.S b/runtime/interpreter/mterp/arm/op_iget_byte.S
index c4e08e2ca2..3d1f52d359 100644
--- a/runtime/interpreter/mterp/arm/op_iget_byte.S
+++ b/runtime/interpreter/mterp/arm/op_iget_byte.S
@@ -1 +1 @@
-%include "arm/op_iget.S" { "helper":"artGetByteInstanceFromCode" }
+%include "arm/op_iget.S" { "helper":"artGetByteInstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_char.S b/runtime/interpreter/mterp/arm/op_iget_char.S
index 5e8da66a93..6b7154d8b9 100644
--- a/runtime/interpreter/mterp/arm/op_iget_char.S
+++ b/runtime/interpreter/mterp/arm/op_iget_char.S
@@ -1 +1 @@
-%include "arm/op_iget.S" { "helper":"artGetCharInstanceFromCode" }
+%include "arm/op_iget.S" { "helper":"artGetCharInstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_object.S b/runtime/interpreter/mterp/arm/op_iget_object.S
index 1cf2e3cb8d..a35b1c8976 100644
--- a/runtime/interpreter/mterp/arm/op_iget_object.S
+++ b/runtime/interpreter/mterp/arm/op_iget_object.S
@@ -1 +1 @@
-%include "arm/op_iget.S" { "is_object":"1", "helper":"artGetObjInstanceFromCode" }
+%include "arm/op_iget.S" { "is_object":"1", "helper":"artGetObjInstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_short.S b/runtime/interpreter/mterp/arm/op_iget_short.S
index 460f0450ca..3254c07fd1 100644
--- a/runtime/interpreter/mterp/arm/op_iget_short.S
+++ b/runtime/interpreter/mterp/arm/op_iget_short.S
@@ -1 +1 @@
-%include "arm/op_iget.S" { "helper":"artGetShortInstanceFromCode" }
+%include "arm/op_iget.S" { "helper":"artGetShortInstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_wide.S b/runtime/interpreter/mterp/arm/op_iget_wide.S
index e287d519ad..30405bd94c 100644
--- a/runtime/interpreter/mterp/arm/op_iget_wide.S
+++ b/runtime/interpreter/mterp/arm/op_iget_wide.S
@@ -9,7 +9,7 @@
GET_VREG r1, r1 @ r1<- fp[B], the object pointer
ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
mov r3, rSELF @ r3<- self
- bl artGet64InstanceFromCode
+ bl artGet64InstanceFromMterp
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
ubfx r2, rINST, #8, #4 @ r2<- A
PREFETCH_INST 2
diff --git a/runtime/interpreter/mterp/arm64/op_iget.S b/runtime/interpreter/mterp/arm64/op_iget.S
index 88533bd33d..d9feac7765 100644
--- a/runtime/interpreter/mterp/arm64/op_iget.S
+++ b/runtime/interpreter/mterp/arm64/op_iget.S
@@ -1,4 +1,4 @@
-%default { "extend":"", "is_object":"0", "helper":"artGet32InstanceFromCode"}
+%default { "extend":"", "is_object":"0", "helper":"artGet32InstanceFromMterp"}
/*
* General instance field get.
*
diff --git a/runtime/interpreter/mterp/arm64/op_iget_boolean.S b/runtime/interpreter/mterp/arm64/op_iget_boolean.S
index 36a9b6beb3..f6ea4dd8b5 100644
--- a/runtime/interpreter/mterp/arm64/op_iget_boolean.S
+++ b/runtime/interpreter/mterp/arm64/op_iget_boolean.S
@@ -1 +1 @@
-%include "arm64/op_iget.S" { "helper":"artGetBooleanInstanceFromCode", "extend":"uxtb w0, w0" }
+%include "arm64/op_iget.S" { "helper":"artGetBooleanInstanceFromMterp", "extend":"uxtb w0, w0" }
diff --git a/runtime/interpreter/mterp/arm64/op_iget_byte.S b/runtime/interpreter/mterp/arm64/op_iget_byte.S
index fd3f164518..497e2bf253 100644
--- a/runtime/interpreter/mterp/arm64/op_iget_byte.S
+++ b/runtime/interpreter/mterp/arm64/op_iget_byte.S
@@ -1 +1 @@
-%include "arm64/op_iget.S" { "helper":"artGetByteInstanceFromCode", "extend":"sxtb w0, w0" }
+%include "arm64/op_iget.S" { "helper":"artGetByteInstanceFromMterp", "extend":"sxtb w0, w0" }
diff --git a/runtime/interpreter/mterp/arm64/op_iget_char.S b/runtime/interpreter/mterp/arm64/op_iget_char.S
index ea23275224..4669859121 100644
--- a/runtime/interpreter/mterp/arm64/op_iget_char.S
+++ b/runtime/interpreter/mterp/arm64/op_iget_char.S
@@ -1 +1 @@
-%include "arm64/op_iget.S" { "helper":"artGetCharInstanceFromCode", "extend":"uxth w0, w0" }
+%include "arm64/op_iget.S" { "helper":"artGetCharInstanceFromMterp", "extend":"uxth w0, w0" }
diff --git a/runtime/interpreter/mterp/arm64/op_iget_object.S b/runtime/interpreter/mterp/arm64/op_iget_object.S
index 03be78d2a1..eb7bdeaee3 100644
--- a/runtime/interpreter/mterp/arm64/op_iget_object.S
+++ b/runtime/interpreter/mterp/arm64/op_iget_object.S
@@ -1 +1 @@
-%include "arm64/op_iget.S" { "is_object":"1", "helper":"artGetObjInstanceFromCode" }
+%include "arm64/op_iget.S" { "is_object":"1", "helper":"artGetObjInstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/arm64/op_iget_short.S b/runtime/interpreter/mterp/arm64/op_iget_short.S
index c347542f03..6f0a5055d7 100644
--- a/runtime/interpreter/mterp/arm64/op_iget_short.S
+++ b/runtime/interpreter/mterp/arm64/op_iget_short.S
@@ -1 +1 @@
-%include "arm64/op_iget.S" { "helper":"artGetShortInstanceFromCode", "extend":"sxth w0, w0" }
+%include "arm64/op_iget.S" { "helper":"artGetShortInstanceFromMterp", "extend":"sxth w0, w0" }
diff --git a/runtime/interpreter/mterp/arm64/op_iget_wide.S b/runtime/interpreter/mterp/arm64/op_iget_wide.S
index 9718390c3b..02ef0a71fd 100644
--- a/runtime/interpreter/mterp/arm64/op_iget_wide.S
+++ b/runtime/interpreter/mterp/arm64/op_iget_wide.S
@@ -9,7 +9,7 @@
GET_VREG w1, w1 // w1<- fp[B], the object pointer
ldr x2, [xFP, #OFF_FP_METHOD] // w2<- referrer
mov x3, xSELF // w3<- self
- bl artGet64InstanceFromCode
+ bl artGet64InstanceFromMterp
ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
ubfx w2, wINST, #8, #4 // w2<- A
PREFETCH_INST 2
diff --git a/runtime/interpreter/mterp/mips/op_iget.S b/runtime/interpreter/mterp/mips/op_iget.S
index 01f42d9c12..0785ac5e32 100644
--- a/runtime/interpreter/mterp/mips/op_iget.S
+++ b/runtime/interpreter/mterp/mips/op_iget.S
@@ -1,4 +1,4 @@
-%default { "is_object":"0", "helper":"artGet32InstanceFromCode"}
+%default { "is_object":"0", "helper":"artGet32InstanceFromMterp"}
/*
* General instance field get.
*
diff --git a/runtime/interpreter/mterp/mips/op_iget_boolean.S b/runtime/interpreter/mterp/mips/op_iget_boolean.S
index e03364e34b..a932c37a82 100644
--- a/runtime/interpreter/mterp/mips/op_iget_boolean.S
+++ b/runtime/interpreter/mterp/mips/op_iget_boolean.S
@@ -1 +1 @@
-%include "mips/op_iget.S" { "helper":"artGetBooleanInstanceFromCode" }
+%include "mips/op_iget.S" { "helper":"artGetBooleanInstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_byte.S b/runtime/interpreter/mterp/mips/op_iget_byte.S
index dc87cfecf8..e498a8c774 100644
--- a/runtime/interpreter/mterp/mips/op_iget_byte.S
+++ b/runtime/interpreter/mterp/mips/op_iget_byte.S
@@ -1 +1 @@
-%include "mips/op_iget.S" { "helper":"artGetByteInstanceFromCode" }
+%include "mips/op_iget.S" { "helper":"artGetByteInstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_char.S b/runtime/interpreter/mterp/mips/op_iget_char.S
index 55f8a93ff4..efd5b99372 100644
--- a/runtime/interpreter/mterp/mips/op_iget_char.S
+++ b/runtime/interpreter/mterp/mips/op_iget_char.S
@@ -1 +1 @@
-%include "mips/op_iget.S" { "helper":"artGetCharInstanceFromCode" }
+%include "mips/op_iget.S" { "helper":"artGetCharInstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_object.S b/runtime/interpreter/mterp/mips/op_iget_object.S
index 11d93a46d7..8fa96521f6 100644
--- a/runtime/interpreter/mterp/mips/op_iget_object.S
+++ b/runtime/interpreter/mterp/mips/op_iget_object.S
@@ -1 +1 @@
-%include "mips/op_iget.S" { "is_object":"1", "helper":"artGetObjInstanceFromCode" }
+%include "mips/op_iget.S" { "is_object":"1", "helper":"artGetObjInstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_short.S b/runtime/interpreter/mterp/mips/op_iget_short.S
index 9086246c97..efc5de4890 100644
--- a/runtime/interpreter/mterp/mips/op_iget_short.S
+++ b/runtime/interpreter/mterp/mips/op_iget_short.S
@@ -1 +1 @@
-%include "mips/op_iget.S" { "helper":"artGetShortInstanceFromCode" }
+%include "mips/op_iget.S" { "helper":"artGetShortInstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_wide.S b/runtime/interpreter/mterp/mips/op_iget_wide.S
index cf5019eaa5..e1d83a48f5 100644
--- a/runtime/interpreter/mterp/mips/op_iget_wide.S
+++ b/runtime/interpreter/mterp/mips/op_iget_wide.S
@@ -10,7 +10,7 @@
GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
lw a2, OFF_FP_METHOD(rFP) # a2 <- referrer
move a3, rSELF # a3 <- self
- JAL(artGet64InstanceFromCode)
+ JAL(artGet64InstanceFromMterp)
lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
GET_OPA4(a2) # a2<- A+
PREFETCH_INST(2) # load rINST
diff --git a/runtime/interpreter/mterp/mips64/op_iget.S b/runtime/interpreter/mterp/mips64/op_iget.S
index ade4b31b80..4158603e4a 100644
--- a/runtime/interpreter/mterp/mips64/op_iget.S
+++ b/runtime/interpreter/mterp/mips64/op_iget.S
@@ -1,4 +1,4 @@
-%default { "is_object":"0", "helper":"artGet32InstanceFromCode"}
+%default { "is_object":"0", "helper":"artGet32InstanceFromMterp"}
/*
* General instance field get.
*
diff --git a/runtime/interpreter/mterp/mips64/op_iget_boolean.S b/runtime/interpreter/mterp/mips64/op_iget_boolean.S
index cb2c8bef07..e64b7982f8 100644
--- a/runtime/interpreter/mterp/mips64/op_iget_boolean.S
+++ b/runtime/interpreter/mterp/mips64/op_iget_boolean.S
@@ -1 +1 @@
-%include "mips64/op_iget.S" { "helper":"artGetBooleanInstanceFromCode" }
+%include "mips64/op_iget.S" { "helper":"artGetBooleanInstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_byte.S b/runtime/interpreter/mterp/mips64/op_iget_byte.S
index 099d8d0362..fefe53f308 100644
--- a/runtime/interpreter/mterp/mips64/op_iget_byte.S
+++ b/runtime/interpreter/mterp/mips64/op_iget_byte.S
@@ -1 +1 @@
-%include "mips64/op_iget.S" { "helper":"artGetByteInstanceFromCode" }
+%include "mips64/op_iget.S" { "helper":"artGetByteInstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_char.S b/runtime/interpreter/mterp/mips64/op_iget_char.S
index 927b7affa6..9caf40ecff 100644
--- a/runtime/interpreter/mterp/mips64/op_iget_char.S
+++ b/runtime/interpreter/mterp/mips64/op_iget_char.S
@@ -1 +1 @@
-%include "mips64/op_iget.S" { "helper":"artGetCharInstanceFromCode" }
+%include "mips64/op_iget.S" { "helper":"artGetCharInstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_object.S b/runtime/interpreter/mterp/mips64/op_iget_object.S
index c658556992..ce3421a94f 100644
--- a/runtime/interpreter/mterp/mips64/op_iget_object.S
+++ b/runtime/interpreter/mterp/mips64/op_iget_object.S
@@ -1 +1 @@
-%include "mips64/op_iget.S" { "is_object":"1", "helper":"artGetObjInstanceFromCode" }
+%include "mips64/op_iget.S" { "is_object":"1", "helper":"artGetObjInstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_short.S b/runtime/interpreter/mterp/mips64/op_iget_short.S
index 28b5093b6d..e2d122d2af 100644
--- a/runtime/interpreter/mterp/mips64/op_iget_short.S
+++ b/runtime/interpreter/mterp/mips64/op_iget_short.S
@@ -1 +1 @@
-%include "mips64/op_iget.S" { "helper":"artGetShortInstanceFromCode" }
+%include "mips64/op_iget.S" { "helper":"artGetShortInstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_wide.S b/runtime/interpreter/mterp/mips64/op_iget_wide.S
index 85cf6705a7..ca793e0a27 100644
--- a/runtime/interpreter/mterp/mips64/op_iget_wide.S
+++ b/runtime/interpreter/mterp/mips64/op_iget_wide.S
@@ -3,14 +3,14 @@
*
* for: iget-wide
*/
- .extern artGet64InstanceFromCode
+ .extern artGet64InstanceFromMterp
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref CCCC
srl a1, rINST, 12 # a1 <- B
GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
ld a2, OFF_FP_METHOD(rFP) # a2 <- referrer
move a3, rSELF # a3 <- self
- jal artGet64InstanceFromCode
+ jal artGet64InstanceFromMterp
ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
ext a2, rINST, 8, 4 # a2 <- A
PREFETCH_INST 2
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index e4cc6d3f9f..e0a48740dc 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -681,12 +681,89 @@ extern "C" size_t MterpSuspendCheck(Thread* self)
return MterpShouldSwitchInterpreters();
}
+template<typename PrimType, typename RetType, typename Getter, FindFieldType kType>
+NO_INLINE RetType artGetInstanceFromMterp(uint32_t field_idx,
+ mirror::Object* obj,
+ ArtMethod* referrer,
+ Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ StackHandleScope<1> hs(self);
+ HandleWrapper<mirror::Object> h(hs.NewHandleWrapper(&obj)); // GC might move the object.
+ ArtField* field = FindFieldFromCode<kType, /* access_checks */ false>(
+ field_idx, referrer, self, sizeof(PrimType));
+ if (UNLIKELY(field == nullptr)) {
+ return 0; // Will throw exception by checking with Thread::Current.
+ }
+ if (UNLIKELY(h == nullptr)) {
+ ThrowNullPointerExceptionForFieldAccess(field, /*is_read*/ true);
+ return 0; // Will throw exception by checking with Thread::Current.
+ }
+ return Getter::Get(obj, field);
+}
+
+template<typename PrimType, typename RetType, typename Getter>
+ALWAYS_INLINE RetType artGetInstanceFromMterpFast(uint32_t field_idx,
+ mirror::Object* obj,
+ ArtMethod* referrer,
+ Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ constexpr bool kIsObject = std::is_same<RetType, mirror::Object*>::value;
+ constexpr FindFieldType kType = kIsObject ? InstanceObjectRead : InstancePrimitiveRead;
+
+ // This effectively inlines the fast path from ArtMethod::GetDexCache.
+ // It avoids non-inlined call which in turn allows elimination of the prologue and epilogue.
+ if (LIKELY(!referrer->IsObsolete())) {
+ // Avoid read barriers, since we need only the pointer to the native (non-movable)
+ // DexCache field array which we can get even through from-space objects.
+ ObjPtr<mirror::Class> klass = referrer->GetDeclaringClass<kWithoutReadBarrier>();
+ mirror::DexCache* dex_cache = klass->GetDexCache<kDefaultVerifyFlags, kWithoutReadBarrier>();
+ // Try to find the desired field in DexCache.
+ ArtField* field = dex_cache->GetResolvedField(field_idx, kRuntimePointerSize);
+ if (LIKELY(field != nullptr & obj != nullptr)) {
+ if (kIsDebugBuild) {
+ // Compare the fast path and slow path.
+ StackHandleScope<1> hs(self);
+ HandleWrapper<mirror::Object> h(hs.NewHandleWrapper(&obj)); // GC might move the object.
+ DCHECK_EQ(field, (FindFieldFromCode<kType, /* access_checks */ false>(
+ field_idx, referrer, self, sizeof(PrimType))));
+ }
+ return Getter::Get(obj, field);
+ }
+ }
+ // Slow path. Last and with identical arguments so that it becomes single instruction tail call.
+ return artGetInstanceFromMterp<PrimType, RetType, Getter, kType>(field_idx, obj, referrer, self);
+}
+
+#define ART_GET_FIELD_FROM_MTERP(Kind, PrimType, RetType, Ptr) \
+extern "C" RetType artGet ## Kind ## InstanceFromMterp(uint32_t field_idx, \
+ mirror::Object* obj, \
+ ArtMethod* referrer, \
+ Thread* self) \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
+ struct Getter { /* Specialize the field load depending on the field type */ \
+ static RetType Get(mirror::Object* o, ArtField* f) REQUIRES_SHARED(Locks::mutator_lock_) { \
+ return f->Get##Kind(o)Ptr; \
+ } \
+ }; \
+ return artGetInstanceFromMterpFast<PrimType, RetType, Getter>(field_idx, obj, referrer, self); \
+} \
+
+ART_GET_FIELD_FROM_MTERP(Byte, int8_t, ssize_t, )
+ART_GET_FIELD_FROM_MTERP(Boolean, uint8_t, size_t, )
+ART_GET_FIELD_FROM_MTERP(Short, int16_t, ssize_t, )
+ART_GET_FIELD_FROM_MTERP(Char, uint16_t, size_t, )
+ART_GET_FIELD_FROM_MTERP(32, uint32_t, size_t, )
+ART_GET_FIELD_FROM_MTERP(64, uint64_t, uint64_t, )
+ART_GET_FIELD_FROM_MTERP(Obj, mirror::HeapReference<mirror::Object>, mirror::Object*, .Ptr())
+
+#undef ART_GET_FIELD_FROM_MTERP
+
extern "C" ssize_t artSet8InstanceFromMterp(uint32_t field_idx,
mirror::Object* obj,
uint8_t new_value,
ArtMethod* referrer)
REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int8_t));
+ ArtField* field = referrer->GetDexCache()->GetResolvedField(field_idx, kRuntimePointerSize);
if (LIKELY(field != nullptr && obj != nullptr)) {
Primitive::Type type = field->GetTypeAsPrimitiveType();
if (type == Primitive::kPrimBoolean) {
@@ -705,8 +782,7 @@ extern "C" ssize_t artSet16InstanceFromMterp(uint32_t field_idx,
uint16_t new_value,
ArtMethod* referrer)
REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
- sizeof(int16_t));
+ ArtField* field = referrer->GetDexCache()->GetResolvedField(field_idx, kRuntimePointerSize);
if (LIKELY(field != nullptr && obj != nullptr)) {
Primitive::Type type = field->GetTypeAsPrimitiveType();
if (type == Primitive::kPrimChar) {
@@ -725,8 +801,7 @@ extern "C" ssize_t artSet32InstanceFromMterp(uint32_t field_idx,
uint32_t new_value,
ArtMethod* referrer)
REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
- sizeof(int32_t));
+ ArtField* field = referrer->GetDexCache()->GetResolvedField(field_idx, kRuntimePointerSize);
if (LIKELY(field != nullptr && obj != nullptr)) {
field->Set32<false>(obj, new_value);
return 0; // success
@@ -739,8 +814,7 @@ extern "C" ssize_t artSet64InstanceFromMterp(uint32_t field_idx,
uint64_t* new_value,
ArtMethod* referrer)
REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
- sizeof(int64_t));
+ ArtField* field = referrer->GetDexCache()->GetResolvedField(field_idx, kRuntimePointerSize);
if (LIKELY(field != nullptr && obj != nullptr)) {
field->Set64<false>(obj, *new_value);
return 0; // success
@@ -753,8 +827,7 @@ extern "C" ssize_t artSetObjInstanceFromMterp(uint32_t field_idx,
mirror::Object* new_value,
ArtMethod* referrer)
REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtField* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite,
- sizeof(mirror::HeapReference<mirror::Object>));
+ ArtField* field = referrer->GetDexCache()->GetResolvedField(field_idx, kRuntimePointerSize);
if (LIKELY(field != nullptr && obj != nullptr)) {
field->SetObj<false>(obj, new_value);
return 0; // success
diff --git a/runtime/interpreter/mterp/out/mterp_arm.S b/runtime/interpreter/mterp/out/mterp_arm.S
index 73b957f285..fd5d647624 100644
--- a/runtime/interpreter/mterp/out/mterp_arm.S
+++ b/runtime/interpreter/mterp/out/mterp_arm.S
@@ -2255,7 +2255,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG r1, r1 @ r1<- fp[B], the object pointer
ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
mov r3, rSELF @ r3<- self
- bl artGet32InstanceFromCode
+ bl artGet32InstanceFromMterp
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
ubfx r2, rINST, #8, #4 @ r2<- A
PREFETCH_INST 2
@@ -2285,7 +2285,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG r1, r1 @ r1<- fp[B], the object pointer
ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
mov r3, rSELF @ r3<- self
- bl artGet64InstanceFromCode
+ bl artGet64InstanceFromMterp
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
ubfx r2, rINST, #8, #4 @ r2<- A
PREFETCH_INST 2
@@ -2314,7 +2314,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG r1, r1 @ r1<- fp[B], the object pointer
ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
mov r3, rSELF @ r3<- self
- bl artGetObjInstanceFromCode
+ bl artGetObjInstanceFromMterp
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
ubfx r2, rINST, #8, #4 @ r2<- A
PREFETCH_INST 2
@@ -2346,7 +2346,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG r1, r1 @ r1<- fp[B], the object pointer
ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
mov r3, rSELF @ r3<- self
- bl artGetBooleanInstanceFromCode
+ bl artGetBooleanInstanceFromMterp
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
ubfx r2, rINST, #8, #4 @ r2<- A
PREFETCH_INST 2
@@ -2378,7 +2378,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG r1, r1 @ r1<- fp[B], the object pointer
ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
mov r3, rSELF @ r3<- self
- bl artGetByteInstanceFromCode
+ bl artGetByteInstanceFromMterp
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
ubfx r2, rINST, #8, #4 @ r2<- A
PREFETCH_INST 2
@@ -2410,7 +2410,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG r1, r1 @ r1<- fp[B], the object pointer
ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
mov r3, rSELF @ r3<- self
- bl artGetCharInstanceFromCode
+ bl artGetCharInstanceFromMterp
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
ubfx r2, rINST, #8, #4 @ r2<- A
PREFETCH_INST 2
@@ -2442,7 +2442,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG r1, r1 @ r1<- fp[B], the object pointer
ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
mov r3, rSELF @ r3<- self
- bl artGetShortInstanceFromCode
+ bl artGetShortInstanceFromMterp
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
ubfx r2, rINST, #8, #4 @ r2<- A
PREFETCH_INST 2
diff --git a/runtime/interpreter/mterp/out/mterp_arm64.S b/runtime/interpreter/mterp/out/mterp_arm64.S
index 2a0c4df3e2..213f7ff842 100644
--- a/runtime/interpreter/mterp/out/mterp_arm64.S
+++ b/runtime/interpreter/mterp/out/mterp_arm64.S
@@ -2192,7 +2192,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG w1, w1 // w1<- fp[B], the object pointer
ldr x2, [xFP, #OFF_FP_METHOD] // w2<- referrer
mov x3, xSELF // w3<- self
- bl artGet32InstanceFromCode
+ bl artGet32InstanceFromMterp
ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
ubfx w2, wINST, #8, #4 // w2<- A
@@ -2222,7 +2222,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG w1, w1 // w1<- fp[B], the object pointer
ldr x2, [xFP, #OFF_FP_METHOD] // w2<- referrer
mov x3, xSELF // w3<- self
- bl artGet64InstanceFromCode
+ bl artGet64InstanceFromMterp
ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
ubfx w2, wINST, #8, #4 // w2<- A
PREFETCH_INST 2
@@ -2249,7 +2249,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG w1, w1 // w1<- fp[B], the object pointer
ldr x2, [xFP, #OFF_FP_METHOD] // w2<- referrer
mov x3, xSELF // w3<- self
- bl artGetObjInstanceFromCode
+ bl artGetObjInstanceFromMterp
ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
ubfx w2, wINST, #8, #4 // w2<- A
@@ -2281,7 +2281,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG w1, w1 // w1<- fp[B], the object pointer
ldr x2, [xFP, #OFF_FP_METHOD] // w2<- referrer
mov x3, xSELF // w3<- self
- bl artGetBooleanInstanceFromCode
+ bl artGetBooleanInstanceFromMterp
ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
uxtb w0, w0
ubfx w2, wINST, #8, #4 // w2<- A
@@ -2313,7 +2313,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG w1, w1 // w1<- fp[B], the object pointer
ldr x2, [xFP, #OFF_FP_METHOD] // w2<- referrer
mov x3, xSELF // w3<- self
- bl artGetByteInstanceFromCode
+ bl artGetByteInstanceFromMterp
ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
sxtb w0, w0
ubfx w2, wINST, #8, #4 // w2<- A
@@ -2345,7 +2345,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG w1, w1 // w1<- fp[B], the object pointer
ldr x2, [xFP, #OFF_FP_METHOD] // w2<- referrer
mov x3, xSELF // w3<- self
- bl artGetCharInstanceFromCode
+ bl artGetCharInstanceFromMterp
ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
uxth w0, w0
ubfx w2, wINST, #8, #4 // w2<- A
@@ -2377,7 +2377,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG w1, w1 // w1<- fp[B], the object pointer
ldr x2, [xFP, #OFF_FP_METHOD] // w2<- referrer
mov x3, xSELF // w3<- self
- bl artGetShortInstanceFromCode
+ bl artGetShortInstanceFromMterp
ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
sxth w0, w0
ubfx w2, wINST, #8, #4 // w2<- A
diff --git a/runtime/interpreter/mterp/out/mterp_mips.S b/runtime/interpreter/mterp/out/mterp_mips.S
index 3b86279b47..c749057ee6 100644
--- a/runtime/interpreter/mterp/out/mterp_mips.S
+++ b/runtime/interpreter/mterp/out/mterp_mips.S
@@ -2677,7 +2677,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
lw a2, OFF_FP_METHOD(rFP) # a2 <- referrer
move a3, rSELF # a3 <- self
- JAL(artGet32InstanceFromCode)
+ JAL(artGet32InstanceFromMterp)
lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
GET_OPA4(a2) # a2<- A+
PREFETCH_INST(2) # load rINST
@@ -2706,7 +2706,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
lw a2, OFF_FP_METHOD(rFP) # a2 <- referrer
move a3, rSELF # a3 <- self
- JAL(artGet64InstanceFromCode)
+ JAL(artGet64InstanceFromMterp)
lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
GET_OPA4(a2) # a2<- A+
PREFETCH_INST(2) # load rINST
@@ -2732,7 +2732,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
lw a2, OFF_FP_METHOD(rFP) # a2 <- referrer
move a3, rSELF # a3 <- self
- JAL(artGetObjInstanceFromCode)
+ JAL(artGetObjInstanceFromMterp)
lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
GET_OPA4(a2) # a2<- A+
PREFETCH_INST(2) # load rINST
@@ -2763,7 +2763,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
lw a2, OFF_FP_METHOD(rFP) # a2 <- referrer
move a3, rSELF # a3 <- self
- JAL(artGetBooleanInstanceFromCode)
+ JAL(artGetBooleanInstanceFromMterp)
lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
GET_OPA4(a2) # a2<- A+
PREFETCH_INST(2) # load rINST
@@ -2794,7 +2794,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
lw a2, OFF_FP_METHOD(rFP) # a2 <- referrer
move a3, rSELF # a3 <- self
- JAL(artGetByteInstanceFromCode)
+ JAL(artGetByteInstanceFromMterp)
lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
GET_OPA4(a2) # a2<- A+
PREFETCH_INST(2) # load rINST
@@ -2825,7 +2825,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
lw a2, OFF_FP_METHOD(rFP) # a2 <- referrer
move a3, rSELF # a3 <- self
- JAL(artGetCharInstanceFromCode)
+ JAL(artGetCharInstanceFromMterp)
lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
GET_OPA4(a2) # a2<- A+
PREFETCH_INST(2) # load rINST
@@ -2856,7 +2856,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
lw a2, OFF_FP_METHOD(rFP) # a2 <- referrer
move a3, rSELF # a3 <- self
- JAL(artGetShortInstanceFromCode)
+ JAL(artGetShortInstanceFromMterp)
lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
GET_OPA4(a2) # a2<- A+
PREFETCH_INST(2) # load rINST
diff --git a/runtime/interpreter/mterp/out/mterp_mips64.S b/runtime/interpreter/mterp/out/mterp_mips64.S
index 58f98dfabb..f9b270b01e 100644
--- a/runtime/interpreter/mterp/out/mterp_mips64.S
+++ b/runtime/interpreter/mterp/out/mterp_mips64.S
@@ -2246,14 +2246,14 @@ artMterpAsmInstructionStart = .L_op_nop
*
* for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
*/
- .extern artGet32InstanceFromCode
+ .extern artGet32InstanceFromMterp
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref CCCC
srl a1, rINST, 12 # a1 <- B
GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
ld a2, OFF_FP_METHOD(rFP) # a2 <- referrer
move a3, rSELF # a3 <- self
- jal artGet32InstanceFromCode
+ jal artGet32InstanceFromMterp
ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
ext a2, rINST, 8, 4 # a2 <- A
PREFETCH_INST 2
@@ -2276,14 +2276,14 @@ artMterpAsmInstructionStart = .L_op_nop
*
* for: iget-wide
*/
- .extern artGet64InstanceFromCode
+ .extern artGet64InstanceFromMterp
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref CCCC
srl a1, rINST, 12 # a1 <- B
GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
ld a2, OFF_FP_METHOD(rFP) # a2 <- referrer
move a3, rSELF # a3 <- self
- jal artGet64InstanceFromCode
+ jal artGet64InstanceFromMterp
ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
ext a2, rINST, 8, 4 # a2 <- A
PREFETCH_INST 2
@@ -2303,14 +2303,14 @@ artMterpAsmInstructionStart = .L_op_nop
*
* for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
*/
- .extern artGetObjInstanceFromCode
+ .extern artGetObjInstanceFromMterp
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref CCCC
srl a1, rINST, 12 # a1 <- B
GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
ld a2, OFF_FP_METHOD(rFP) # a2 <- referrer
move a3, rSELF # a3 <- self
- jal artGetObjInstanceFromCode
+ jal artGetObjInstanceFromMterp
ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
ext a2, rINST, 8, 4 # a2 <- A
PREFETCH_INST 2
@@ -2335,14 +2335,14 @@ artMterpAsmInstructionStart = .L_op_nop
*
* for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
*/
- .extern artGetBooleanInstanceFromCode
+ .extern artGetBooleanInstanceFromMterp
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref CCCC
srl a1, rINST, 12 # a1 <- B
GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
ld a2, OFF_FP_METHOD(rFP) # a2 <- referrer
move a3, rSELF # a3 <- self
- jal artGetBooleanInstanceFromCode
+ jal artGetBooleanInstanceFromMterp
ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
ext a2, rINST, 8, 4 # a2 <- A
PREFETCH_INST 2
@@ -2367,14 +2367,14 @@ artMterpAsmInstructionStart = .L_op_nop
*
* for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
*/
- .extern artGetByteInstanceFromCode
+ .extern artGetByteInstanceFromMterp
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref CCCC
srl a1, rINST, 12 # a1 <- B
GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
ld a2, OFF_FP_METHOD(rFP) # a2 <- referrer
move a3, rSELF # a3 <- self
- jal artGetByteInstanceFromCode
+ jal artGetByteInstanceFromMterp
ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
ext a2, rINST, 8, 4 # a2 <- A
PREFETCH_INST 2
@@ -2399,14 +2399,14 @@ artMterpAsmInstructionStart = .L_op_nop
*
* for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
*/
- .extern artGetCharInstanceFromCode
+ .extern artGetCharInstanceFromMterp
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref CCCC
srl a1, rINST, 12 # a1 <- B
GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
ld a2, OFF_FP_METHOD(rFP) # a2 <- referrer
move a3, rSELF # a3 <- self
- jal artGetCharInstanceFromCode
+ jal artGetCharInstanceFromMterp
ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
ext a2, rINST, 8, 4 # a2 <- A
PREFETCH_INST 2
@@ -2431,14 +2431,14 @@ artMterpAsmInstructionStart = .L_op_nop
*
* for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
*/
- .extern artGetShortInstanceFromCode
+ .extern artGetShortInstanceFromMterp
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref CCCC
srl a1, rINST, 12 # a1 <- B
GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
ld a2, OFF_FP_METHOD(rFP) # a2 <- referrer
move a3, rSELF # a3 <- self
- jal artGetShortInstanceFromCode
+ jal artGetShortInstanceFromMterp
ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
ext a2, rINST, 8, 4 # a2 <- A
PREFETCH_INST 2
diff --git a/runtime/interpreter/mterp/out/mterp_x86.S b/runtime/interpreter/mterp/out/mterp_x86.S
index 6be70cce4c..ad74b29871 100644
--- a/runtime/interpreter/mterp/out/mterp_x86.S
+++ b/runtime/interpreter/mterp/out/mterp_x86.S
@@ -2132,7 +2132,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG2(%esp) # referrer
mov rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(artGet32InstanceFromCode)
+ call SYMBOL(artGet32InstanceFromMterp)
movl rSELF, %ecx
RESTORE_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
@@ -2165,7 +2165,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG2(%esp) # referrer
mov rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(artGet64InstanceFromCode)
+ call SYMBOL(artGet64InstanceFromMterp)
mov rSELF, %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
jnz MterpException # bail out
@@ -2196,7 +2196,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG2(%esp) # referrer
mov rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(artGetObjInstanceFromCode)
+ call SYMBOL(artGetObjInstanceFromMterp)
movl rSELF, %ecx
RESTORE_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
@@ -2231,7 +2231,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG2(%esp) # referrer
mov rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(artGetBooleanInstanceFromCode)
+ call SYMBOL(artGetBooleanInstanceFromMterp)
movl rSELF, %ecx
RESTORE_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
@@ -2266,7 +2266,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG2(%esp) # referrer
mov rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(artGetByteInstanceFromCode)
+ call SYMBOL(artGetByteInstanceFromMterp)
movl rSELF, %ecx
RESTORE_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
@@ -2301,7 +2301,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG2(%esp) # referrer
mov rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(artGetCharInstanceFromCode)
+ call SYMBOL(artGetCharInstanceFromMterp)
movl rSELF, %ecx
RESTORE_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
@@ -2336,7 +2336,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG2(%esp) # referrer
mov rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(artGetShortInstanceFromCode)
+ call SYMBOL(artGetShortInstanceFromMterp)
movl rSELF, %ecx
RESTORE_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
diff --git a/runtime/interpreter/mterp/out/mterp_x86_64.S b/runtime/interpreter/mterp/out/mterp_x86_64.S
index 562cf7ceb6..56d68e6caa 100644
--- a/runtime/interpreter/mterp/out/mterp_x86_64.S
+++ b/runtime/interpreter/mterp/out/mterp_x86_64.S
@@ -2075,7 +2075,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
GET_VREG OUT_32_ARG1, %rcx # the object pointer
movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
movq rSELF, OUT_ARG3
- call SYMBOL(artGet32InstanceFromCode)
+ call SYMBOL(artGet32InstanceFromMterp)
movq rSELF, %rcx
cmpq $0, THREAD_EXCEPTION_OFFSET(%rcx)
jnz MterpException # bail out
@@ -2108,7 +2108,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
GET_VREG OUT_32_ARG1, %rcx # the object pointer
movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
movq rSELF, OUT_ARG3
- call SYMBOL(artGet64InstanceFromCode)
+ call SYMBOL(artGet64InstanceFromMterp)
movq rSELF, %rcx
cmpq $0, THREAD_EXCEPTION_OFFSET(%rcx)
jnz MterpException # bail out
@@ -2142,7 +2142,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
GET_VREG OUT_32_ARG1, %rcx # the object pointer
movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
movq rSELF, OUT_ARG3
- call SYMBOL(artGetObjInstanceFromCode)
+ call SYMBOL(artGetObjInstanceFromMterp)
movq rSELF, %rcx
cmpq $0, THREAD_EXCEPTION_OFFSET(%rcx)
jnz MterpException # bail out
@@ -2176,7 +2176,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
GET_VREG OUT_32_ARG1, %rcx # the object pointer
movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
movq rSELF, OUT_ARG3
- call SYMBOL(artGetBooleanInstanceFromCode)
+ call SYMBOL(artGetBooleanInstanceFromMterp)
movq rSELF, %rcx
cmpq $0, THREAD_EXCEPTION_OFFSET(%rcx)
jnz MterpException # bail out
@@ -2210,7 +2210,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
GET_VREG OUT_32_ARG1, %rcx # the object pointer
movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
movq rSELF, OUT_ARG3
- call SYMBOL(artGetByteInstanceFromCode)
+ call SYMBOL(artGetByteInstanceFromMterp)
movq rSELF, %rcx
cmpq $0, THREAD_EXCEPTION_OFFSET(%rcx)
jnz MterpException # bail out
@@ -2244,7 +2244,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
GET_VREG OUT_32_ARG1, %rcx # the object pointer
movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
movq rSELF, OUT_ARG3
- call SYMBOL(artGetCharInstanceFromCode)
+ call SYMBOL(artGetCharInstanceFromMterp)
movq rSELF, %rcx
cmpq $0, THREAD_EXCEPTION_OFFSET(%rcx)
jnz MterpException # bail out
@@ -2278,7 +2278,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
GET_VREG OUT_32_ARG1, %rcx # the object pointer
movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
movq rSELF, OUT_ARG3
- call SYMBOL(artGetShortInstanceFromCode)
+ call SYMBOL(artGetShortInstanceFromMterp)
movq rSELF, %rcx
cmpq $0, THREAD_EXCEPTION_OFFSET(%rcx)
jnz MterpException # bail out
diff --git a/runtime/interpreter/mterp/x86/op_iget.S b/runtime/interpreter/mterp/x86/op_iget.S
index e3304ba6a7..219463b646 100644
--- a/runtime/interpreter/mterp/x86/op_iget.S
+++ b/runtime/interpreter/mterp/x86/op_iget.S
@@ -1,4 +1,4 @@
-%default { "is_object":"0", "helper":"artGet32InstanceFromCode"}
+%default { "is_object":"0", "helper":"artGet32InstanceFromMterp"}
/*
* General instance field get.
*
diff --git a/runtime/interpreter/mterp/x86/op_iget_boolean.S b/runtime/interpreter/mterp/x86/op_iget_boolean.S
index 9ddad041dc..4ab2afcd38 100644
--- a/runtime/interpreter/mterp/x86/op_iget_boolean.S
+++ b/runtime/interpreter/mterp/x86/op_iget_boolean.S
@@ -1 +1 @@
-%include "x86/op_iget.S" { "helper":"artGetBooleanInstanceFromCode" }
+%include "x86/op_iget.S" { "helper":"artGetBooleanInstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/x86/op_iget_byte.S b/runtime/interpreter/mterp/x86/op_iget_byte.S
index 8250788923..bb282d4648 100644
--- a/runtime/interpreter/mterp/x86/op_iget_byte.S
+++ b/runtime/interpreter/mterp/x86/op_iget_byte.S
@@ -1 +1 @@
-%include "x86/op_iget.S" { "helper":"artGetByteInstanceFromCode" }
+%include "x86/op_iget.S" { "helper":"artGetByteInstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/x86/op_iget_char.S b/runtime/interpreter/mterp/x86/op_iget_char.S
index e9d2156c88..a13203bb81 100644
--- a/runtime/interpreter/mterp/x86/op_iget_char.S
+++ b/runtime/interpreter/mterp/x86/op_iget_char.S
@@ -1 +1 @@
-%include "x86/op_iget.S" { "helper":"artGetCharInstanceFromCode" }
+%include "x86/op_iget.S" { "helper":"artGetCharInstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/x86/op_iget_object.S b/runtime/interpreter/mterp/x86/op_iget_object.S
index 3abeefcf81..79d5e5fd94 100644
--- a/runtime/interpreter/mterp/x86/op_iget_object.S
+++ b/runtime/interpreter/mterp/x86/op_iget_object.S
@@ -1 +1 @@
-%include "x86/op_iget.S" { "is_object":"1", "helper":"artGetObjInstanceFromCode" }
+%include "x86/op_iget.S" { "is_object":"1", "helper":"artGetObjInstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/x86/op_iget_short.S b/runtime/interpreter/mterp/x86/op_iget_short.S
index c8fad89040..8fc18a570f 100644
--- a/runtime/interpreter/mterp/x86/op_iget_short.S
+++ b/runtime/interpreter/mterp/x86/op_iget_short.S
@@ -1 +1 @@
-%include "x86/op_iget.S" { "helper":"artGetShortInstanceFromCode" }
+%include "x86/op_iget.S" { "helper":"artGetShortInstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/x86/op_iget_wide.S b/runtime/interpreter/mterp/x86/op_iget_wide.S
index a5d7e6937d..b111b29587 100644
--- a/runtime/interpreter/mterp/x86/op_iget_wide.S
+++ b/runtime/interpreter/mterp/x86/op_iget_wide.S
@@ -14,7 +14,7 @@
movl %eax, OUT_ARG2(%esp) # referrer
mov rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(artGet64InstanceFromCode)
+ call SYMBOL(artGet64InstanceFromMterp)
mov rSELF, %ecx
cmpl $$0, THREAD_EXCEPTION_OFFSET(%ecx)
jnz MterpException # bail out
diff --git a/runtime/interpreter/mterp/x86_64/op_iget.S b/runtime/interpreter/mterp/x86_64/op_iget.S
index df43efe6a4..ffc14b5d22 100644
--- a/runtime/interpreter/mterp/x86_64/op_iget.S
+++ b/runtime/interpreter/mterp/x86_64/op_iget.S
@@ -1,4 +1,4 @@
-%default { "is_object":"0", "helper":"artGet32InstanceFromCode", "wide":"0"}
+%default { "is_object":"0", "helper":"artGet32InstanceFromMterp", "wide":"0"}
/*
* General instance field get.
*
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_boolean.S b/runtime/interpreter/mterp/x86_64/op_iget_boolean.S
index 6ac55231b8..1379d53cfe 100644
--- a/runtime/interpreter/mterp/x86_64/op_iget_boolean.S
+++ b/runtime/interpreter/mterp/x86_64/op_iget_boolean.S
@@ -1 +1 @@
-%include "x86_64/op_iget.S" { "helper":"artGetBooleanInstanceFromCode" }
+%include "x86_64/op_iget.S" { "helper":"artGetBooleanInstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_byte.S b/runtime/interpreter/mterp/x86_64/op_iget_byte.S
index 6a861b1b7a..93047ec99c 100644
--- a/runtime/interpreter/mterp/x86_64/op_iget_byte.S
+++ b/runtime/interpreter/mterp/x86_64/op_iget_byte.S
@@ -1 +1 @@
-%include "x86_64/op_iget.S" { "helper":"artGetByteInstanceFromCode" }
+%include "x86_64/op_iget.S" { "helper":"artGetByteInstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_char.S b/runtime/interpreter/mterp/x86_64/op_iget_char.S
index 021a0f1b24..239f0d0bd7 100644
--- a/runtime/interpreter/mterp/x86_64/op_iget_char.S
+++ b/runtime/interpreter/mterp/x86_64/op_iget_char.S
@@ -1 +1 @@
-%include "x86_64/op_iget.S" { "helper":"artGetCharInstanceFromCode" }
+%include "x86_64/op_iget.S" { "helper":"artGetCharInstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_object.S b/runtime/interpreter/mterp/x86_64/op_iget_object.S
index d92bc9c345..2104d2c744 100644
--- a/runtime/interpreter/mterp/x86_64/op_iget_object.S
+++ b/runtime/interpreter/mterp/x86_64/op_iget_object.S
@@ -1 +1 @@
-%include "x86_64/op_iget.S" { "is_object":"1", "helper":"artGetObjInstanceFromCode" }
+%include "x86_64/op_iget.S" { "is_object":"1", "helper":"artGetObjInstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_short.S b/runtime/interpreter/mterp/x86_64/op_iget_short.S
index f158bea573..3525effe75 100644
--- a/runtime/interpreter/mterp/x86_64/op_iget_short.S
+++ b/runtime/interpreter/mterp/x86_64/op_iget_short.S
@@ -1 +1 @@
-%include "x86_64/op_iget.S" { "helper":"artGetShortInstanceFromCode" }
+%include "x86_64/op_iget.S" { "helper":"artGetShortInstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_wide.S b/runtime/interpreter/mterp/x86_64/op_iget_wide.S
index 74bb9ffe1c..706c44121e 100644
--- a/runtime/interpreter/mterp/x86_64/op_iget_wide.S
+++ b/runtime/interpreter/mterp/x86_64/op_iget_wide.S
@@ -1 +1 @@
-%include "x86_64/op_iget.S" { "helper":"artGet64InstanceFromCode", "wide":"1" }
+%include "x86_64/op_iget.S" { "helper":"artGet64InstanceFromMterp", "wide":"1" }
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 74aa787db7..d4b51af903 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -517,24 +517,23 @@ void UnstartedRuntime::UnstartedClassIsAnonymousClass(
result->SetZ(class_name == nullptr);
}
-static std::unique_ptr<MemMap> FindAndExtractEntry(const std::string& jar_file,
- const char* entry_name,
- size_t* size,
- std::string* error_msg) {
+static MemMap FindAndExtractEntry(const std::string& jar_file,
+ const char* entry_name,
+ size_t* size,
+ std::string* error_msg) {
CHECK(size != nullptr);
std::unique_ptr<ZipArchive> zip_archive(ZipArchive::Open(jar_file.c_str(), error_msg));
if (zip_archive == nullptr) {
- return nullptr;
+ return MemMap::Invalid();
}
std::unique_ptr<ZipEntry> zip_entry(zip_archive->Find(entry_name, error_msg));
if (zip_entry == nullptr) {
- return nullptr;
+ return MemMap::Invalid();
}
- std::unique_ptr<MemMap> tmp_map(
- zip_entry->ExtractToMemMap(jar_file.c_str(), entry_name, error_msg));
- if (tmp_map == nullptr) {
- return nullptr;
+ MemMap tmp_map = zip_entry->ExtractToMemMap(jar_file.c_str(), entry_name, error_msg);
+ if (!tmp_map.IsValid()) {
+ return MemMap::Invalid();
}
// OK, from here everything seems fine.
@@ -577,18 +576,18 @@ static void GetResourceAsStream(Thread* self,
return;
}
- std::unique_ptr<MemMap> mem_map;
+ MemMap mem_map;
size_t map_size;
std::string last_error_msg; // Only store the last message (we could concatenate).
for (const std::string& jar_file : split) {
mem_map = FindAndExtractEntry(jar_file, resource_cstr, &map_size, &last_error_msg);
- if (mem_map != nullptr) {
+ if (mem_map.IsValid()) {
break;
}
}
- if (mem_map == nullptr) {
+ if (!mem_map.IsValid()) {
// Didn't find it. There's a good chance this will be the same at runtime, but still
// conservatively abort the transaction here.
AbortTransactionOrFail(self,
@@ -607,9 +606,9 @@ static void GetResourceAsStream(Thread* self,
return;
}
// Copy in content.
- memcpy(h_array->GetData(), mem_map->Begin(), map_size);
+ memcpy(h_array->GetData(), mem_map.Begin(), map_size);
// Be proactive releasing memory.
- mem_map.reset();
+ mem_map.Reset();
// Create a ByteArrayInputStream.
Handle<mirror::Class> h_class(hs.NewHandle(
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index b92affa26e..d9c7900577 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -205,15 +205,16 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
// We could do PC-relative addressing to avoid this problem, but that
// would require reserving code and data area before submitting, which
// means more windows for the code memory to be RWX.
- std::unique_ptr<MemMap> data_map(MemMap::MapAnonymous(
- "data-code-cache", nullptr,
+ MemMap data_map = MemMap::MapAnonymous(
+ "data-code-cache",
+ /* addr */ nullptr,
max_capacity,
kProtData,
/* low_4gb */ true,
/* reuse */ false,
&error_str,
- use_ashmem));
- if (data_map == nullptr) {
+ use_ashmem);
+ if (!data_map.IsValid()) {
std::ostringstream oss;
oss << "Failed to create read write cache: " << error_str << " size=" << max_capacity;
*error_msg = oss.str();
@@ -229,26 +230,23 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
size_t data_size = max_capacity / 2;
size_t code_size = max_capacity - data_size;
DCHECK_EQ(code_size + data_size, max_capacity);
- uint8_t* divider = data_map->Begin() + data_size;
-
- MemMap* code_map = data_map->RemapAtEnd(
- divider,
- "jit-code-cache",
- memmap_flags_prot_code | PROT_WRITE,
- &error_str, use_ashmem);
- if (code_map == nullptr) {
+ uint8_t* divider = data_map.Begin() + data_size;
+
+ MemMap code_map = data_map.RemapAtEnd(
+ divider, "jit-code-cache", memmap_flags_prot_code | PROT_WRITE, &error_str, use_ashmem);
+ if (!code_map.IsValid()) {
std::ostringstream oss;
oss << "Failed to create read write execute cache: " << error_str << " size=" << max_capacity;
*error_msg = oss.str();
return nullptr;
}
- DCHECK_EQ(code_map->Begin(), divider);
+ DCHECK_EQ(code_map.Begin(), divider);
data_size = initial_capacity / 2;
code_size = initial_capacity - data_size;
DCHECK_EQ(code_size + data_size, initial_capacity);
return new JitCodeCache(
- code_map,
- data_map.release(),
+ std::move(code_map),
+ std::move(data_map),
code_size,
data_size,
max_capacity,
@@ -256,8 +254,8 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
memmap_flags_prot_code);
}
-JitCodeCache::JitCodeCache(MemMap* code_map,
- MemMap* data_map,
+JitCodeCache::JitCodeCache(MemMap&& code_map,
+ MemMap&& data_map,
size_t initial_code_capacity,
size_t initial_data_capacity,
size_t max_capacity,
@@ -266,8 +264,8 @@ JitCodeCache::JitCodeCache(MemMap* code_map,
: lock_("Jit code cache", kJitCodeCacheLock),
lock_cond_("Jit code cache condition variable", lock_),
collection_in_progress_(false),
- code_map_(code_map),
- data_map_(data_map),
+ code_map_(std::move(code_map)),
+ data_map_(std::move(data_map)),
max_capacity_(max_capacity),
current_capacity_(initial_code_capacity + initial_data_capacity),
code_end_(initial_code_capacity),
@@ -287,8 +285,8 @@ JitCodeCache::JitCodeCache(MemMap* code_map,
memmap_flags_prot_code_(memmap_flags_prot_code) {
DCHECK_GE(max_capacity, initial_code_capacity + initial_data_capacity);
- code_mspace_ = create_mspace_with_base(code_map_->Begin(), code_end_, false /*locked*/);
- data_mspace_ = create_mspace_with_base(data_map_->Begin(), data_end_, false /*locked*/);
+ code_mspace_ = create_mspace_with_base(code_map_.Begin(), code_end_, false /*locked*/);
+ data_mspace_ = create_mspace_with_base(data_map_.Begin(), data_end_, false /*locked*/);
if (code_mspace_ == nullptr || data_mspace_ == nullptr) {
PLOG(FATAL) << "create_mspace_with_base failed";
@@ -298,13 +296,13 @@ JitCodeCache::JitCodeCache(MemMap* code_map,
CheckedCall(mprotect,
"mprotect jit code cache",
- code_map_->Begin(),
- code_map_->Size(),
+ code_map_.Begin(),
+ code_map_.Size(),
memmap_flags_prot_code_);
CheckedCall(mprotect,
"mprotect jit data cache",
- data_map_->Begin(),
- data_map_->Size(),
+ data_map_.Begin(),
+ data_map_.Size(),
kProtData);
VLOG(jit) << "Created jit code cache: initial data size="
@@ -316,7 +314,7 @@ JitCodeCache::JitCodeCache(MemMap* code_map,
JitCodeCache::~JitCodeCache() {}
bool JitCodeCache::ContainsPc(const void* ptr) const {
- return code_map_->Begin() <= ptr && ptr < code_map_->End();
+ return code_map_.Begin() <= ptr && ptr < code_map_.End();
}
bool JitCodeCache::WillExecuteJitCode(ArtMethod* method) {
@@ -387,8 +385,8 @@ class ScopedCodeCacheWrite : ScopedTrace {
CheckedCall(
mprotect,
"make code writable",
- code_cache_->code_map_->Begin(),
- code_cache_->code_map_->Size(),
+ code_cache_->code_map_.Begin(),
+ code_cache_->code_map_.Size(),
code_cache_->memmap_flags_prot_code_ | PROT_WRITE);
}
@@ -397,8 +395,8 @@ class ScopedCodeCacheWrite : ScopedTrace {
CheckedCall(
mprotect,
"make code protected",
- code_cache_->code_map_->Begin(),
- code_cache_->code_map_->Size(),
+ code_cache_->code_map_.Begin(),
+ code_cache_->code_map_.Size(),
code_cache_->memmap_flags_prot_code_);
}
@@ -608,17 +606,17 @@ void JitCodeCache::FreeCodeAndData(const void* code_ptr) {
void JitCodeCache::FreeAllMethodHeaders(
const std::unordered_set<OatQuickMethodHeader*>& method_headers) {
- {
- MutexLock mu(Thread::Current(), *Locks::cha_lock_);
- Runtime::Current()->GetClassLinker()->GetClassHierarchyAnalysis()
- ->RemoveDependentsWithMethodHeaders(method_headers);
- }
-
// We need to remove entries in method_headers from CHA dependencies
// first since once we do FreeCode() below, the memory can be reused
// so it's possible for the same method_header to start representing
// different compile code.
MutexLock mu(Thread::Current(), lock_);
+ {
+ MutexLock mu2(Thread::Current(), *Locks::cha_lock_);
+ Runtime::Current()->GetClassLinker()->GetClassHierarchyAnalysis()
+ ->RemoveDependentsWithMethodHeaders(method_headers);
+ }
+
ScopedCodeCacheWrite scc(this);
for (const OatQuickMethodHeader* method_header : method_headers) {
FreeCodeAndData(method_header->GetCode());
@@ -742,6 +740,18 @@ static void ClearMethodCounter(ArtMethod* method, bool was_warm) {
method->SetCounter(std::min(jit_warmup_threshold - 1, 1));
}
+void JitCodeCache::WaitForPotentialCollectionToCompleteRunnable(Thread* self) {
+ while (collection_in_progress_) {
+ lock_.Unlock(self);
+ {
+ ScopedThreadSuspension sts(self, kSuspended);
+ MutexLock mu(self, lock_);
+ WaitForPotentialCollectionToComplete(self);
+ }
+ lock_.Lock(self);
+ }
+}
+
uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
ArtMethod* method,
uint8_t* stack_map,
@@ -755,6 +765,13 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
const ArenaSet<ArtMethod*>&
cha_single_implementation_list) {
DCHECK(!method->IsNative() || !osr);
+
+ if (!method->IsNative()) {
+ // We need to do this before grabbing the lock_ because it needs to be able to see the string
+ // InternTable. Native methods do not have roots.
+ DCheckRootsAreValid(roots);
+ }
+
size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
// Ensure the header ends up at expected instruction alignment.
size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
@@ -763,44 +780,45 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
OatQuickMethodHeader* method_header = nullptr;
uint8_t* code_ptr = nullptr;
uint8_t* memory = nullptr;
+ MutexLock mu(self, lock_);
+ // We need to make sure that there will be no jit-gcs going on and wait for any ongoing one to
+ // finish.
+ WaitForPotentialCollectionToCompleteRunnable(self);
{
- ScopedThreadSuspension sts(self, kSuspended);
- MutexLock mu(self, lock_);
- WaitForPotentialCollectionToComplete(self);
- {
- ScopedCodeCacheWrite scc(this);
- memory = AllocateCode(total_size);
- if (memory == nullptr) {
- return nullptr;
- }
- code_ptr = memory + header_size;
-
- std::copy(code, code + code_size, code_ptr);
- method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
- new (method_header) OatQuickMethodHeader(
- (stack_map != nullptr) ? code_ptr - stack_map : 0u,
- code_size);
- // Flush caches before we remove write permission because some ARMv8 Qualcomm kernels may
- // trigger a segfault if a page fault occurs when requesting a cache maintenance operation.
- // This is a kernel bug that we need to work around until affected devices (e.g. Nexus 5X and
- // 6P) stop being supported or their kernels are fixed.
- //
- // For reference, this behavior is caused by this commit:
- // https://android.googlesource.com/kernel/msm/+/3fbe6bc28a6b9939d0650f2f17eb5216c719950c
- FlushInstructionCache(reinterpret_cast<char*>(code_ptr),
- reinterpret_cast<char*>(code_ptr + code_size));
- DCHECK(!Runtime::Current()->IsAotCompiler());
- if (has_should_deoptimize_flag) {
- method_header->SetHasShouldDeoptimizeFlag();
- }
+ ScopedCodeCacheWrite scc(this);
+ memory = AllocateCode(total_size);
+ if (memory == nullptr) {
+ return nullptr;
+ }
+ code_ptr = memory + header_size;
+
+ std::copy(code, code + code_size, code_ptr);
+ method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+ new (method_header) OatQuickMethodHeader(
+ (stack_map != nullptr) ? code_ptr - stack_map : 0u,
+ code_size);
+ // Flush caches before we remove write permission because some ARMv8 Qualcomm kernels may
+ // trigger a segfault if a page fault occurs when requesting a cache maintenance operation.
+ // This is a kernel bug that we need to work around until affected devices (e.g. Nexus 5X and
+ // 6P) stop being supported or their kernels are fixed.
+ //
+ // For reference, this behavior is caused by this commit:
+ // https://android.googlesource.com/kernel/msm/+/3fbe6bc28a6b9939d0650f2f17eb5216c719950c
+ FlushInstructionCache(reinterpret_cast<char*>(code_ptr),
+ reinterpret_cast<char*>(code_ptr + code_size));
+ DCHECK(!Runtime::Current()->IsAotCompiler());
+ if (has_should_deoptimize_flag) {
+ method_header->SetHasShouldDeoptimizeFlag();
}
number_of_compilations_++;
}
// We need to update the entry point in the runnable state for the instrumentation.
{
- // Need cha_lock_ for checking all single-implementation flags and register
- // dependencies.
+ // The following needs to be guarded by cha_lock_ also. Otherwise it's possible that the
+ // compiled code is considered invalidated by some class linking, but below we still make the
+ // compiled code valid for the method. Need cha_lock_ for checking all single-implementation
+ // flags and register dependencies.
MutexLock cha_mu(self, *Locks::cha_lock_);
bool single_impl_still_valid = true;
for (ArtMethod* single_impl : cha_single_implementation_list) {
@@ -826,16 +844,6 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
single_impl, method, method_header);
}
- if (!method->IsNative()) {
- // We need to do this before grabbing the lock_ because it needs to be able to see the string
- // InternTable. Native methods do not have roots.
- DCheckRootsAreValid(roots);
- }
-
- // The following needs to be guarded by cha_lock_ also. Otherwise it's
- // possible that the compiled code is considered invalidated by some class linking,
- // but below we still make the compiled code valid for the method.
- MutexLock mu(self, lock_);
if (UNLIKELY(method->IsNative())) {
auto it = jni_stubs_map_.find(JniStubKey(method));
DCHECK(it != jni_stubs_map_.end())
@@ -867,11 +875,6 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
method, method_header->GetEntryPoint());
}
}
- if (collection_in_progress_) {
- // We need to update the live bitmap if there is a GC to ensure it sees this new
- // code.
- GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr));
- }
VLOG(jit)
<< "JIT added (osr=" << std::boolalpha << osr << std::noboolalpha << ") "
<< ArtMethod::PrettyMethod(method) << "@" << method
@@ -1232,8 +1235,8 @@ void JitCodeCache::GarbageCollectCache(Thread* self) {
number_of_collections_++;
live_bitmap_.reset(CodeCacheBitmap::Create(
"code-cache-bitmap",
- reinterpret_cast<uintptr_t>(code_map_->Begin()),
- reinterpret_cast<uintptr_t>(code_map_->Begin() + current_capacity_ / 2)));
+ reinterpret_cast<uintptr_t>(code_map_.Begin()),
+ reinterpret_cast<uintptr_t>(code_map_.Begin() + current_capacity_ / 2)));
collection_in_progress_ = true;
}
}
@@ -1605,12 +1608,12 @@ void* JitCodeCache::MoreCore(const void* mspace, intptr_t increment) NO_THREAD_S
if (code_mspace_ == mspace) {
size_t result = code_end_;
code_end_ += increment;
- return reinterpret_cast<void*>(result + code_map_->Begin());
+ return reinterpret_cast<void*>(result + code_map_.Begin());
} else {
DCHECK_EQ(data_mspace_, mspace);
size_t result = data_end_;
data_end_ += increment;
- return reinterpret_cast<void*>(result + data_map_->Begin());
+ return reinterpret_cast<void*>(result + data_map_.Begin());
}
}
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 29f9c9cf43..a4a0f8f4e8 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -28,6 +28,7 @@
#include "base/atomic.h"
#include "base/histogram.h"
#include "base/macros.h"
+#include "base/mem_map.h"
#include "base/mutex.h"
#include "base/safe_map.h"
@@ -39,7 +40,6 @@ class LinearAlloc;
class InlineCache;
class IsMarkedVisitor;
class JitJniStubTestHelper;
-class MemMap;
class OatQuickMethodHeader;
struct ProfileMethodInfo;
class ProfilingInfo;
@@ -279,8 +279,8 @@ class JitCodeCache {
private:
// Take ownership of maps.
- JitCodeCache(MemMap* code_map,
- MemMap* data_map,
+ JitCodeCache(MemMap&& code_map,
+ MemMap&& data_map,
size_t initial_code_capacity,
size_t initial_data_capacity,
size_t max_capacity,
@@ -314,6 +314,12 @@ class JitCodeCache {
REQUIRES(lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // If a collection is in progress, wait for it to finish. Must be called with the mutator lock.
+ // The non-mutator lock version should be used if possible. This method will release then
+ // re-acquire the mutator lock.
+ void WaitForPotentialCollectionToCompleteRunnable(Thread* self)
+ REQUIRES(lock_, !Roles::uninterruptible_) REQUIRES_SHARED(Locks::mutator_lock_);
+
// If a collection is in progress, wait for it to finish. Return
// whether the thread actually waited.
bool WaitForPotentialCollectionToComplete(Thread* self)
@@ -390,9 +396,9 @@ class JitCodeCache {
// Whether there is a code cache collection in progress.
bool collection_in_progress_ GUARDED_BY(lock_);
// Mem map which holds code.
- std::unique_ptr<MemMap> code_map_;
+ MemMap code_map_;
// Mem map which holds data (stack maps and profiling info).
- std::unique_ptr<MemMap> data_map_;
+ MemMap data_map_;
// The opaque mspace for allocating code.
void* code_mspace_ GUARDED_BY(lock_);
// The opaque mspace for allocating data.
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index b598df3eba..d49ebd1e80 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -163,33 +163,34 @@ class NullableScopedUtfChars {
void operator=(const NullableScopedUtfChars&);
};
-static std::unique_ptr<MemMap> AllocateDexMemoryMap(JNIEnv* env, jint start, jint end) {
+static MemMap AllocateDexMemoryMap(JNIEnv* env, jint start, jint end) {
if (end <= start) {
ScopedObjectAccess soa(env);
ThrowWrappedIOException("Bad range");
- return nullptr;
+ return MemMap::Invalid();
}
std::string error_message;
size_t length = static_cast<size_t>(end - start);
- std::unique_ptr<MemMap> dex_mem_map(MemMap::MapAnonymous("DEX data",
- nullptr,
- length,
- PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
- /* reuse */ false,
- &error_message));
- if (dex_mem_map == nullptr) {
+ MemMap dex_mem_map = MemMap::MapAnonymous("DEX data",
+ /* addr */ nullptr,
+ length,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ /* reuse */ false,
+ &error_message);
+ if (!dex_mem_map.IsValid()) {
ScopedObjectAccess soa(env);
ThrowWrappedIOException("%s", error_message.c_str());
+ return MemMap::Invalid();
}
return dex_mem_map;
}
-static const DexFile* CreateDexFile(JNIEnv* env, std::unique_ptr<MemMap> dex_mem_map) {
+static const DexFile* CreateDexFile(JNIEnv* env, MemMap&& dex_mem_map) {
std::string location = StringPrintf("Anonymous-DexFile@%p-%p",
- dex_mem_map->Begin(),
- dex_mem_map->End());
+ dex_mem_map.Begin(),
+ dex_mem_map.End());
std::string error_message;
const ArtDexFileLoader dex_file_loader;
std::unique_ptr<const DexFile> dex_file(dex_file_loader.Open(location,
@@ -213,7 +214,7 @@ static const DexFile* CreateDexFile(JNIEnv* env, std::unique_ptr<MemMap> dex_mem
return dex_file.release();
}
-static jobject CreateSingleDexFileCookie(JNIEnv* env, std::unique_ptr<MemMap> data) {
+static jobject CreateSingleDexFileCookie(JNIEnv* env, MemMap&& data) {
std::unique_ptr<const DexFile> dex_file(CreateDexFile(env, std::move(data)));
if (dex_file.get() == nullptr) {
DCHECK(env->ExceptionCheck());
@@ -236,14 +237,14 @@ static jobject DexFile_createCookieWithDirectBuffer(JNIEnv* env,
return nullptr;
}
- std::unique_ptr<MemMap> dex_mem_map(AllocateDexMemoryMap(env, start, end));
- if (dex_mem_map == nullptr) {
+ MemMap dex_mem_map = AllocateDexMemoryMap(env, start, end);
+ if (!dex_mem_map.IsValid()) {
DCHECK(Thread::Current()->IsExceptionPending());
return nullptr;
}
size_t length = static_cast<size_t>(end - start);
- memcpy(dex_mem_map->Begin(), base_address, length);
+ memcpy(dex_mem_map.Begin(), base_address, length);
return CreateSingleDexFileCookie(env, std::move(dex_mem_map));
}
@@ -252,13 +253,13 @@ static jobject DexFile_createCookieWithArray(JNIEnv* env,
jbyteArray buffer,
jint start,
jint end) {
- std::unique_ptr<MemMap> dex_mem_map(AllocateDexMemoryMap(env, start, end));
- if (dex_mem_map == nullptr) {
+ MemMap dex_mem_map = AllocateDexMemoryMap(env, start, end);
+ if (!dex_mem_map.IsValid()) {
DCHECK(Thread::Current()->IsExceptionPending());
return nullptr;
}
- auto destination = reinterpret_cast<jbyte*>(dex_mem_map.get()->Begin());
+ auto destination = reinterpret_cast<jbyte*>(dex_mem_map.Begin());
env->GetByteArrayRegion(buffer, start, end - start, destination);
return CreateSingleDexFileCookie(env, std::move(dex_mem_map));
}
diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc
index 13871f7be7..b7f0a7aabc 100644
--- a/runtime/native/java_lang_Thread.cc
+++ b/runtime/native/java_lang_Thread.cc
@@ -122,7 +122,7 @@ static jboolean Thread_holdsLock(JNIEnv* env, jclass, jobject java_object) {
return thread->HoldsLock(object);
}
-static void Thread_nativeInterrupt(JNIEnv* env, jobject java_thread) {
+static void Thread_interrupt0(JNIEnv* env, jobject java_thread) {
ScopedFastNativeObjectAccess soa(env);
MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
Thread* thread = Thread::FromManagedThread(soa, java_thread);
@@ -131,7 +131,7 @@ static void Thread_nativeInterrupt(JNIEnv* env, jobject java_thread) {
}
}
-static void Thread_nativeSetName(JNIEnv* env, jobject peer, jstring java_name) {
+static void Thread_setNativeName(JNIEnv* env, jobject peer, jstring java_name) {
ScopedUtfChars name(env, java_name);
{
ScopedObjectAccess soa(env);
@@ -168,7 +168,7 @@ static void Thread_nativeSetName(JNIEnv* env, jobject peer, jstring java_name) {
* from Thread.MIN_PRIORITY to Thread.MAX_PRIORITY (1-10), with "normal"
* threads at Thread.NORM_PRIORITY (5).
*/
-static void Thread_nativeSetPriority(JNIEnv* env, jobject java_thread, jint new_priority) {
+static void Thread_setPriority0(JNIEnv* env, jobject java_thread, jint new_priority) {
ScopedObjectAccess soa(env);
MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
Thread* thread = Thread::FromManagedThread(soa, java_thread);
@@ -200,9 +200,9 @@ static JNINativeMethod gMethods[] = {
NATIVE_METHOD(Thread, nativeCreate, "(Ljava/lang/Thread;JZ)V"),
NATIVE_METHOD(Thread, nativeGetStatus, "(Z)I"),
NATIVE_METHOD(Thread, holdsLock, "(Ljava/lang/Object;)Z"),
- FAST_NATIVE_METHOD(Thread, nativeInterrupt, "()V"),
- NATIVE_METHOD(Thread, nativeSetName, "(Ljava/lang/String;)V"),
- NATIVE_METHOD(Thread, nativeSetPriority, "(I)V"),
+ FAST_NATIVE_METHOD(Thread, interrupt0, "()V"),
+ NATIVE_METHOD(Thread, setNativeName, "(Ljava/lang/String;)V"),
+ NATIVE_METHOD(Thread, setPriority0, "(I)V"),
FAST_NATIVE_METHOD(Thread, sleep, "(Ljava/lang/Object;JI)V"),
NATIVE_METHOD(Thread, yield, "()V"),
};
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 58e16ed1b7..c7daef8310 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -956,7 +956,7 @@ class DlOpenOatFile FINAL : public OatFileBase {
void* dlopen_handle_; // TODO: Unique_ptr with custom deleter.
// Dummy memory map objects corresponding to the regions mapped by dlopen.
- std::vector<std::unique_ptr<MemMap>> dlopen_mmaps_;
+ std::vector<MemMap> dlopen_mmaps_;
// The number of shared objects the linker told us about before loading. Used to
// (optimistically) optimize the PreSetup stage (see comment there).
@@ -1122,8 +1122,8 @@ void DlOpenOatFile::PreSetup(const std::string& elf_filename) {
uint8_t* vaddr = reinterpret_cast<uint8_t*>(info->dlpi_addr +
info->dlpi_phdr[i].p_vaddr);
size_t memsz = info->dlpi_phdr[i].p_memsz;
- MemMap* mmap = MemMap::MapDummy(info->dlpi_name, vaddr, memsz);
- context->dlopen_mmaps_->push_back(std::unique_ptr<MemMap>(mmap));
+ MemMap mmap = MemMap::MapDummy(info->dlpi_name, vaddr, memsz);
+ context->dlopen_mmaps_->push_back(std::move(mmap));
}
}
return 1; // Stop iteration and return 1 from dl_iterate_phdr.
@@ -1131,7 +1131,7 @@ void DlOpenOatFile::PreSetup(const std::string& elf_filename) {
return 0; // Continue iteration and return 0 from dl_iterate_phdr when finished.
}
const uint8_t* const begin_;
- std::vector<std::unique_ptr<MemMap>>* const dlopen_mmaps_;
+ std::vector<MemMap>* const dlopen_mmaps_;
const size_t shared_objects_before;
size_t shared_objects_seen;
};
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index facebda953..9248bb928c 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -425,7 +425,7 @@ Runtime::~Runtime() {
low_4gb_arena_pool_.reset();
arena_pool_.reset();
jit_arena_pool_.reset();
- protected_fault_page_.reset();
+ protected_fault_page_.Reset();
MemMap::Shutdown();
// TODO: acquire a static mutex on Runtime to avoid racing.
@@ -1162,18 +1162,18 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
{
constexpr uintptr_t kSentinelAddr =
RoundDown(static_cast<uintptr_t>(Context::kBadGprBase), kPageSize);
- protected_fault_page_.reset(MemMap::MapAnonymous("Sentinel fault page",
- reinterpret_cast<uint8_t*>(kSentinelAddr),
- kPageSize,
- PROT_NONE,
- /* low_4g */ true,
- /* reuse */ false,
- /* error_msg */ nullptr));
- if (protected_fault_page_ == nullptr) {
+ protected_fault_page_ = MemMap::MapAnonymous("Sentinel fault page",
+ reinterpret_cast<uint8_t*>(kSentinelAddr),
+ kPageSize,
+ PROT_NONE,
+ /* low_4g */ true,
+ /* reuse */ false,
+ /* error_msg */ nullptr);
+ if (!protected_fault_page_.IsValid()) {
LOG(WARNING) << "Could not reserve sentinel fault page";
- } else if (reinterpret_cast<uintptr_t>(protected_fault_page_->Begin()) != kSentinelAddr) {
+ } else if (reinterpret_cast<uintptr_t>(protected_fault_page_.Begin()) != kSentinelAddr) {
LOG(WARNING) << "Could not reserve sentinel fault page at the right address.";
- protected_fault_page_.reset();
+ protected_fault_page_.Reset();
}
}
diff --git a/runtime/runtime.h b/runtime/runtime.h
index a98e8a81ed..f98d7b9829 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -29,6 +29,7 @@
#include "arch/instruction_set.h"
#include "base/macros.h"
+#include "base/mem_map.h"
#include "base/mutex.h"
#include "deoptimization_kind.h"
#include "dex/dex_file_types.h"
@@ -86,7 +87,6 @@ class InternTable;
class IsMarkedVisitor;
class JavaVMExt;
class LinearAlloc;
-class MemMap;
class MonitorList;
class MonitorPool;
class NullPointerHandler;
@@ -1090,7 +1090,7 @@ class Runtime {
std::atomic<uint32_t> deoptimization_counts_[
static_cast<uint32_t>(DeoptimizationKind::kLast) + 1];
- std::unique_ptr<MemMap> protected_fault_page_;
+ MemMap protected_fault_page_;
uint32_t verifier_logging_threshold_ms_;
diff --git a/runtime/runtime_callbacks_test.cc b/runtime/runtime_callbacks_test.cc
index 794ac19c4b..4c4dcd893c 100644
--- a/runtime/runtime_callbacks_test.cc
+++ b/runtime/runtime_callbacks_test.cc
@@ -190,19 +190,19 @@ TEST_F(ThreadLifecycleCallbackRuntimeCallbacksTest, ThreadLifecycleCallbackJava)
TEST_F(ThreadLifecycleCallbackRuntimeCallbacksTest, ThreadLifecycleCallbackAttach) {
std::string error_msg;
- std::unique_ptr<MemMap> stack(MemMap::MapAnonymous("ThreadLifecycleCallback Thread",
- nullptr,
- 128 * kPageSize, // Just some small stack.
- PROT_READ | PROT_WRITE,
- false,
- false,
- &error_msg));
- ASSERT_FALSE(stack == nullptr) << error_msg;
+ MemMap stack = MemMap::MapAnonymous("ThreadLifecycleCallback Thread",
+ /* addr */ nullptr,
+ 128 * kPageSize, // Just some small stack.
+ PROT_READ | PROT_WRITE,
+ false,
+ false,
+ &error_msg);
+ ASSERT_TRUE(stack.IsValid()) << error_msg;
const char* reason = "ThreadLifecycleCallback test thread";
pthread_attr_t attr;
CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), reason);
- CHECK_PTHREAD_CALL(pthread_attr_setstack, (&attr, stack->Begin(), stack->Size()), reason);
+ CHECK_PTHREAD_CALL(pthread_attr_setstack, (&attr, stack.Begin(), stack.Size()), reason);
pthread_t pthread;
CHECK_PTHREAD_CALL(pthread_create,
(&pthread,
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index 26ca19054d..2a69bc6c10 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -46,19 +46,24 @@ ThreadPoolWorker::ThreadPoolWorker(ThreadPool* thread_pool, const std::string& n
// Add an inaccessible page to catch stack overflow.
stack_size += kPageSize;
std::string error_msg;
- stack_.reset(MemMap::MapAnonymous(name.c_str(), nullptr, stack_size, PROT_READ | PROT_WRITE,
- false, false, &error_msg));
- CHECK(stack_.get() != nullptr) << error_msg;
- CHECK_ALIGNED(stack_->Begin(), kPageSize);
+ stack_ = MemMap::MapAnonymous(name.c_str(),
+ /* addr */ nullptr,
+ stack_size,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ /* reuse */ false,
+ &error_msg);
+ CHECK(stack_.IsValid()) << error_msg;
+ CHECK_ALIGNED(stack_.Begin(), kPageSize);
CheckedCall(mprotect,
"mprotect bottom page of thread pool worker stack",
- stack_->Begin(),
+ stack_.Begin(),
kPageSize,
PROT_NONE);
const char* reason = "new thread pool worker thread";
pthread_attr_t attr;
CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), reason);
- CHECK_PTHREAD_CALL(pthread_attr_setstack, (&attr, stack_->Begin(), stack_->Size()), reason);
+ CHECK_PTHREAD_CALL(pthread_attr_setstack, (&attr, stack_.Begin(), stack_.Size()), reason);
CHECK_PTHREAD_CALL(pthread_create, (&pthread_, &attr, &Callback, this), reason);
CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), reason);
}
diff --git a/runtime/thread_pool.h b/runtime/thread_pool.h
index 2784953d69..98a1193e72 100644
--- a/runtime/thread_pool.h
+++ b/runtime/thread_pool.h
@@ -53,8 +53,8 @@ class ThreadPoolWorker {
static const size_t kDefaultStackSize = 1 * MB;
size_t GetStackSize() const {
- DCHECK(stack_.get() != nullptr);
- return stack_->Size();
+ DCHECK(stack_.IsValid());
+ return stack_.Size();
}
virtual ~ThreadPoolWorker();
@@ -71,7 +71,7 @@ class ThreadPoolWorker {
ThreadPool* const thread_pool_;
const std::string name_;
- std::unique_ptr<MemMap> stack_;
+ MemMap stack_;
pthread_t pthread_;
Thread* thread_;
diff --git a/runtime/vdex_file.cc b/runtime/vdex_file.cc
index 32aa86dc93..a7dc2257f1 100644
--- a/runtime/vdex_file.cc
+++ b/runtime/vdex_file.cc
@@ -144,7 +144,7 @@ std::unique_ptr<VdexFile> VdexFile::OpenAtAddress(uint8_t* mmap_addr,
mmap_reuse = false;
}
CHECK(!mmap_reuse || mmap_addr != nullptr);
- std::unique_ptr<MemMap> mmap(MemMap::MapFileAtAddress(
+ MemMap mmap = MemMap::MapFileAtAddress(
mmap_addr,
vdex_length,
(writable || unquicken) ? PROT_READ | PROT_WRITE : PROT_READ,
@@ -154,13 +154,13 @@ std::unique_ptr<VdexFile> VdexFile::OpenAtAddress(uint8_t* mmap_addr,
low_4gb,
mmap_reuse,
vdex_filename.c_str(),
- error_msg));
- if (mmap == nullptr) {
+ error_msg);
+ if (!mmap.IsValid()) {
*error_msg = "Failed to mmap file " + vdex_filename + " : " + *error_msg;
return nullptr;
}
- std::unique_ptr<VdexFile> vdex(new VdexFile(mmap.release()));
+ std::unique_ptr<VdexFile> vdex(new VdexFile(std::move(mmap)));
if (!vdex->IsValid()) {
*error_msg = "Vdex file is not valid";
return nullptr;
@@ -175,7 +175,7 @@ std::unique_ptr<VdexFile> VdexFile::OpenAtAddress(uint8_t* mmap_addr,
/* decompile_return_instruction */ false);
// Update the quickening info size to pretend there isn't any.
size_t offset = vdex->GetDexSectionHeaderOffset();
- reinterpret_cast<DexSectionHeader*>(vdex->mmap_->Begin() + offset)->quickening_info_size_ = 0;
+ reinterpret_cast<DexSectionHeader*>(vdex->mmap_.Begin() + offset)->quickening_info_size_ = 0;
}
*error_msg = "Success";
@@ -299,10 +299,6 @@ void VdexFile::UnquickenDexFile(const DexFile& target_dex_file,
decompile_return_instruction);
}
}
- method.UnHideAccessFlags();
- }
- for (const ClassAccessor::Field& field : class_accessor.GetFields()) {
- field.UnHideAccessFlags();
}
}
}
diff --git a/runtime/vdex_file.h b/runtime/vdex_file.h
index 866a57e7d2..a39ec3128f 100644
--- a/runtime/vdex_file.h
+++ b/runtime/vdex_file.h
@@ -153,7 +153,7 @@ class VdexFile {
typedef uint32_t VdexChecksum;
using QuickeningTableOffsetType = uint32_t;
- explicit VdexFile(MemMap* mmap) : mmap_(mmap) {}
+ explicit VdexFile(MemMap&& mmap) : mmap_(std::move(mmap)) {}
// Returns nullptr if the vdex file cannot be opened or is not valid.
// The mmap_* parameters can be left empty (nullptr/0/false) to allocate at random address.
@@ -215,9 +215,9 @@ class VdexFile {
error_msg);
}
- const uint8_t* Begin() const { return mmap_->Begin(); }
- const uint8_t* End() const { return mmap_->End(); }
- size_t Size() const { return mmap_->Size(); }
+ const uint8_t* Begin() const { return mmap_.Begin(); }
+ const uint8_t* End() const { return mmap_.End(); }
+ size_t Size() const { return mmap_.Size(); }
const VerifierDepsHeader& GetVerifierDepsHeader() const {
return *reinterpret_cast<const VerifierDepsHeader*>(Begin());
@@ -260,7 +260,7 @@ class VdexFile {
}
bool IsValid() const {
- return mmap_->Size() >= sizeof(VerifierDepsHeader) && GetVerifierDepsHeader().IsValid();
+ return mmap_.Size() >= sizeof(VerifierDepsHeader) && GetVerifierDepsHeader().IsValid();
}
// This method is for iterating over the dex files in the vdex. If `cursor` is null,
@@ -328,7 +328,7 @@ class VdexFile {
return DexBegin() + GetDexSectionHeader().GetDexSize();
}
- std::unique_ptr<MemMap> mmap_;
+ MemMap mmap_;
DISALLOW_COPY_AND_ASSIGN(VdexFile);
};
diff --git a/test/305-other-fault-handler/fault_handler.cc b/test/305-other-fault-handler/fault_handler.cc
index 211d142a2b..093a93f349 100644
--- a/test/305-other-fault-handler/fault_handler.cc
+++ b/test/305-other-fault-handler/fault_handler.cc
@@ -33,7 +33,7 @@ class TestFaultHandler FINAL : public FaultHandler {
public:
explicit TestFaultHandler(FaultManager* manager)
: FaultHandler(manager),
- map_error_(""),
+ map_error_(),
target_map_(MemMap::MapAnonymous("test-305-mmap",
/* addr */ nullptr,
/* byte_count */ kPageSize,
@@ -43,7 +43,7 @@ class TestFaultHandler FINAL : public FaultHandler {
/* error_msg */ &map_error_,
/* use_ashmem */ false)),
was_hit_(false) {
- CHECK(target_map_ != nullptr) << "Unable to create segfault target address " << map_error_;
+ CHECK(target_map_.IsValid()) << "Unable to create segfault target address " << map_error_;
manager_->AddHandler(this, /*in_generated_code*/false);
}
@@ -59,16 +59,16 @@ class TestFaultHandler FINAL : public FaultHandler {
was_hit_ = true;
LOG(INFO) << "SEGV Caught. mprotecting map.";
- CHECK(target_map_->Protect(PROT_READ | PROT_WRITE)) << "Failed to mprotect R/W";
+ CHECK(target_map_.Protect(PROT_READ | PROT_WRITE)) << "Failed to mprotect R/W";
LOG(INFO) << "Setting value to be read.";
*GetTargetPointer() = kDataValue;
LOG(INFO) << "Changing prot to be read-only.";
- CHECK(target_map_->Protect(PROT_READ)) << "Failed to mprotect R-only";
+ CHECK(target_map_.Protect(PROT_READ)) << "Failed to mprotect R-only";
return true;
}
void CauseSegfault() {
- CHECK_EQ(target_map_->GetProtect(), PROT_NONE);
+ CHECK_EQ(target_map_.GetProtect(), PROT_NONE);
// This will segfault. The handler should deal with it though and we will get a value out of it.
uint32_t data = *GetTargetPointer();
@@ -78,19 +78,19 @@ class TestFaultHandler FINAL : public FaultHandler {
CHECK(was_hit_);
CHECK_EQ(data, kDataValue) << "Unexpected read value from mmap";
- CHECK_EQ(target_map_->GetProtect(), PROT_READ);
+ CHECK_EQ(target_map_.GetProtect(), PROT_READ);
LOG(INFO) << "Success!";
}
private:
uint32_t* GetTargetPointer() {
- return reinterpret_cast<uint32_t*>(target_map_->Begin() + 8);
+ return reinterpret_cast<uint32_t*>(target_map_.Begin() + 8);
}
static constexpr uint32_t kDataValue = 0xDEADBEEF;
std::string map_error_;
- std::unique_ptr<MemMap> target_map_;
+ MemMap target_map_;
bool was_hit_;
};
diff --git a/test/458-checker-instruct-simplification/src/Main.java b/test/458-checker-instruct-simplification/src/Main.java
index 9e714f5111..5ffb75feb9 100644
--- a/test/458-checker-instruct-simplification/src/Main.java
+++ b/test/458-checker-instruct-simplification/src/Main.java
@@ -1070,7 +1070,7 @@ public class Main {
/// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Const0>>,<<Const1>>]
/// CHECK-DAG: <<Cond:z\d+>> Equal [<<Phi1>>,<<Const2>>]
/// CHECK-DAG: If [<<Cond>>]
- /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Const1>>,<<Const0>>]
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Const0>>,<<Const1>>]
/// CHECK-DAG: Return [<<Phi2>>]
/// CHECK-START: boolean Main.$noinline$EqualBoolVsIntConst(boolean) dead_code_elimination$after_inlining (after)
@@ -1096,7 +1096,7 @@ public class Main {
/// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Const0>>,<<Const1>>]
/// CHECK-DAG: <<Cond:z\d+>> NotEqual [<<Phi1>>,<<Const2>>]
/// CHECK-DAG: If [<<Cond>>]
- /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Const1>>,<<Const0>>]
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Const0>>,<<Const1>>]
/// CHECK-DAG: Return [<<Phi2>>]
/// CHECK-START: boolean Main.$noinline$NotEqualBoolVsIntConst(boolean) dead_code_elimination$after_inlining (after)
diff --git a/test/618-checker-induction/src/Main.java b/test/618-checker-induction/src/Main.java
index 1460725e10..dd76e41836 100644
--- a/test/618-checker-induction/src/Main.java
+++ b/test/618-checker-induction/src/Main.java
@@ -290,7 +290,7 @@ public class Main {
/// CHECK-DAG: <<Phi2:i\d+>> Phi loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: <<Phi3:i\d+>> Phi loop:<<Loop2:B\d+>> outer_loop:<<Loop1>>
/// CHECK-DAG: <<Phi4:i\d+>> Phi loop:<<Loop2>> outer_loop:<<Loop1>>
- /// CHECK-DAG: Return [<<Phi2>>] loop:none
+ /// CHECK-DAG: Return [<<Phi1>>] loop:none
//
/// CHECK-START: int Main.closedFormNested() loop_optimization (after)
/// CHECK-NOT: Phi
@@ -313,7 +313,7 @@ public class Main {
/// CHECK-DAG: <<Phi2:i\d+>> Phi loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: <<Phi3:i\d+>> Phi loop:<<Loop2:B\d+>> outer_loop:<<Loop1>>
/// CHECK-DAG: <<Phi4:i\d+>> Phi loop:<<Loop2>> outer_loop:<<Loop1>>
- /// CHECK-DAG: Return [<<Phi2>>] loop:none
+ /// CHECK-DAG: Return [<<Phi1>>] loop:none
//
/// CHECK-START: int Main.closedFormNestedAlt() loop_optimization (after)
/// CHECK-NOT: Phi
@@ -547,7 +547,7 @@ public class Main {
/// CHECK-DAG: <<Phi2:i\d+>> Phi loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: <<Phi3:i\d+>> Phi loop:<<Loop2:B\d+>> outer_loop:none
/// CHECK-DAG: <<Phi4:i\d+>> Phi loop:<<Loop2>> outer_loop:none
- /// CHECK-DAG: Return [<<Phi4>>] loop:none
+ /// CHECK-DAG: Return [<<Phi3>>] loop:none
/// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
//
/// CHECK-START: int Main.closedFeed() loop_optimization (after)
@@ -634,7 +634,7 @@ public class Main {
/// CHECK-START: boolean Main.periodicBoolIdiom1() loop_optimization (before)
/// CHECK-DAG: <<Phi1:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Phi2:i\d+>> Phi loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Return [<<Phi2>>] loop:none
+ /// CHECK-DAG: Return [<<Phi1>>] loop:none
//
/// CHECK-START: boolean Main.periodicBoolIdiom1() loop_optimization (after)
/// CHECK-NOT: Phi
@@ -653,7 +653,7 @@ public class Main {
/// CHECK-START: boolean Main.periodicBoolIdiom2() loop_optimization (before)
/// CHECK-DAG: <<Phi1:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Phi2:i\d+>> Phi loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Return [<<Phi2>>] loop:none
+ /// CHECK-DAG: Return [<<Phi1>>] loop:none
//
/// CHECK-START: boolean Main.periodicBoolIdiom2() loop_optimization (after)
/// CHECK-NOT: Phi
@@ -672,7 +672,7 @@ public class Main {
/// CHECK-START: boolean Main.periodicBoolIdiom3() loop_optimization (before)
/// CHECK-DAG: <<Phi1:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Phi2:i\d+>> Phi loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Return [<<Phi2>>] loop:none
+ /// CHECK-DAG: Return [<<Phi1>>] loop:none
//
/// CHECK-START: boolean Main.periodicBoolIdiom3() loop_optimization (after)
/// CHECK-NOT: Phi
@@ -691,7 +691,7 @@ public class Main {
/// CHECK-START: boolean Main.periodicBoolIdiom1N(boolean, int) loop_optimization (before)
/// CHECK-DAG: <<Phi1:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Phi2:i\d+>> Phi loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Return [<<Phi1>>] loop:none
+ /// CHECK-DAG: Return [<<Phi2>>] loop:none
//
/// CHECK-START: boolean Main.periodicBoolIdiom1N(boolean, int) loop_optimization (after)
/// CHECK-NOT: Phi
@@ -705,7 +705,7 @@ public class Main {
/// CHECK-START: boolean Main.periodicBoolIdiom2N(boolean, int) loop_optimization (before)
/// CHECK-DAG: <<Phi1:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Phi2:i\d+>> Phi loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Return [<<Phi1>>] loop:none
+ /// CHECK-DAG: Return [<<Phi2>>] loop:none
//
/// CHECK-START: boolean Main.periodicBoolIdiom2N(boolean, int) loop_optimization (after)
/// CHECK-NOT: Phi
@@ -719,7 +719,7 @@ public class Main {
/// CHECK-START: boolean Main.periodicBoolIdiom3N(boolean, int) loop_optimization (before)
/// CHECK-DAG: <<Phi1:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Phi2:i\d+>> Phi loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Return [<<Phi1>>] loop:none
+ /// CHECK-DAG: Return [<<Phi2>>] loop:none
//
/// CHECK-START: boolean Main.periodicBoolIdiom3N(boolean, int) loop_optimization (after)
/// CHECK-NOT: Phi
diff --git a/test/669-checker-break/src/Main.java b/test/669-checker-break/src/Main.java
index e59061b1aa..c40e4a635e 100644
--- a/test/669-checker-break/src/Main.java
+++ b/test/669-checker-break/src/Main.java
@@ -232,8 +232,8 @@ public class Main {
/// CHECK-DAG: <<Zero:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<Nil:l\d+>> NullCheck [<<Par>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi [<<Zero>>,<<AddI:i\d+>>] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Red:i\d+>> Phi [<<Zero>>,<<RedI:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:i\d+>> Phi [<<Zero>>,<<RedI:i\d+>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi [<<Zero>>,<<AddI:i\d+>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Bnd:i\d+>> BoundsCheck [<<Phi>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get:i\d+>> ArrayGet [<<Nil>>,<<Bnd>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<RedI>> Add [<<Red>>,<<Get>>] loop:<<Loop>> outer_loop:none
@@ -248,8 +248,8 @@ public class Main {
/// CHECK-DAG: <<Zero:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<Nil:l\d+>> NullCheck [<<Par>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi [<<Zero>>,<<AddI:i\d+>>] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Red:i\d+>> Phi [<<Zero>>,<<RedI:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:i\d+>> Phi [<<Zero>>,<<RedI:i\d+>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi [<<Zero>>,<<AddI:i\d+>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<LE:z\d+>> LessThanOrEqual [<<Phi>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: If [<<LE>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Bnd:i\d+>> BoundsCheck [<<Phi>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
diff --git a/test/988-method-trace/expected.txt b/test/988-method-trace/expected.txt
index 75ee112c60..b263308573 100644
--- a/test/988-method-trace/expected.txt
+++ b/test/988-method-trace/expected.txt
@@ -70,6 +70,27 @@ fibonacci(30)=832040
fibonacci(5)=5
.<= public boolean java.util.ArrayList.add(java.lang.Object) -> <class java.lang.Boolean: true>
<= public static void art.Test988.doFibTest(int,java.util.function.IntUnaryOperator) -> <null: null>
+=> art.Test988$NativeOp()
+.=> public java.lang.Object()
+.<= public java.lang.Object() -> <null: null>
+<= art.Test988$NativeOp() -> <null: null>
+=> public static void art.Test988.doFibTest(int,java.util.function.IntUnaryOperator)
+.=> public int art.Test988$NativeOp.applyAsInt(int)
+..=> static int art.Test988.nativeFibonacci(int)
+..<= static int art.Test988.nativeFibonacci(int) -> <class java.lang.Integer: 5>
+.<= public int art.Test988$NativeOp.applyAsInt(int) -> <class java.lang.Integer: 5>
+.=> public art.Test988$FibResult(java.lang.String,int,int)
+..=> public java.lang.Object()
+..<= public java.lang.Object() -> <null: null>
+.<= public art.Test988$FibResult(java.lang.String,int,int) -> <null: null>
+.=> public boolean java.util.ArrayList.add(java.lang.Object)
+..=> private void java.util.ArrayList.ensureCapacityInternal(int)
+...=> private void java.util.ArrayList.ensureExplicitCapacity(int)
+...<= private void java.util.ArrayList.ensureExplicitCapacity(int) -> <null: null>
+..<= private void java.util.ArrayList.ensureCapacityInternal(int) -> <null: null>
+fibonacci(5)=5
+.<= public boolean java.util.ArrayList.add(java.lang.Object) -> <class java.lang.Boolean: true>
+<= public static void art.Test988.doFibTest(int,java.util.function.IntUnaryOperator) -> <null: null>
=> art.Test988$IterOp()
.=> public java.lang.Object()
.<= public java.lang.Object() -> <null: null>
@@ -147,8 +168,8 @@ fibonacci(5)=5
.....<= public synchronized java.lang.Throwable java.lang.Throwable.fillInStackTrace() -> <class java.lang.Error: java.lang.Error: Bad argument: -19 < 0
art.Test988.iter_fibonacci(Test988.java:255)
art.Test988$IterOp.applyAsInt(Test988.java:250)
- art.Test988.doFibTest(Test988.java:378)
- art.Test988.run(Test988.java:336)
+ art.Test988.doFibTest(Test988.java:388)
+ art.Test988.run(Test988.java:344)
<additional hidden frames>
>
....<= public java.lang.Throwable(java.lang.String) -> <null: null>
@@ -167,8 +188,8 @@ fibonacci(5)=5
fibonacci(-19) -> java.lang.Error: Bad argument: -19 < 0
art.Test988.iter_fibonacci(Test988.java:255)
art.Test988$IterOp.applyAsInt(Test988.java:250)
- art.Test988.doFibTest(Test988.java:378)
- art.Test988.run(Test988.java:336)
+ art.Test988.doFibTest(Test988.java:388)
+ art.Test988.run(Test988.java:344)
<additional hidden frames>
.<= public boolean java.util.ArrayList.add(java.lang.Object) -> <class java.lang.Boolean: true>
@@ -250,8 +271,8 @@ fibonacci(-19) -> java.lang.Error: Bad argument: -19 < 0
.....<= public synchronized java.lang.Throwable java.lang.Throwable.fillInStackTrace() -> <class java.lang.Error: java.lang.Error: Bad argument: -19 < 0
art.Test988.fibonacci(Test988.java:277)
art.Test988$RecurOp.applyAsInt(Test988.java:272)
- art.Test988.doFibTest(Test988.java:378)
- art.Test988.run(Test988.java:337)
+ art.Test988.doFibTest(Test988.java:388)
+ art.Test988.run(Test988.java:345)
<additional hidden frames>
>
....<= public java.lang.Throwable(java.lang.String) -> <null: null>
@@ -270,8 +291,53 @@ fibonacci(-19) -> java.lang.Error: Bad argument: -19 < 0
fibonacci(-19) -> java.lang.Error: Bad argument: -19 < 0
art.Test988.fibonacci(Test988.java:277)
art.Test988$RecurOp.applyAsInt(Test988.java:272)
- art.Test988.doFibTest(Test988.java:378)
- art.Test988.run(Test988.java:337)
+ art.Test988.doFibTest(Test988.java:388)
+ art.Test988.run(Test988.java:345)
+ <additional hidden frames>
+
+.<= public boolean java.util.ArrayList.add(java.lang.Object) -> <class java.lang.Boolean: true>
+<= public static void art.Test988.doFibTest(int,java.util.function.IntUnaryOperator) -> <null: null>
+=> art.Test988$NativeOp()
+.=> public java.lang.Object()
+.<= public java.lang.Object() -> <null: null>
+<= art.Test988$NativeOp() -> <null: null>
+=> public static void art.Test988.doFibTest(int,java.util.function.IntUnaryOperator)
+.=> public int art.Test988$NativeOp.applyAsInt(int)
+..=> static int art.Test988.nativeFibonacci(int)
+...=> public java.lang.Error(java.lang.String)
+....=> public java.lang.Throwable(java.lang.String)
+.....=> public java.lang.Object()
+.....<= public java.lang.Object() -> <null: null>
+.....=> public static final java.util.List java.util.Collections.emptyList()
+.....<= public static final java.util.List java.util.Collections.emptyList() -> <class java.util.Collections$EmptyList: []>
+.....=> public synchronized java.lang.Throwable java.lang.Throwable.fillInStackTrace()
+......=> private static java.lang.Object java.lang.Throwable.nativeFillInStackTrace()
+......<= private static java.lang.Object java.lang.Throwable.nativeFillInStackTrace() -> <class [Ljava.lang.Object;: <non-deterministic>>
+.....<= public synchronized java.lang.Throwable java.lang.Throwable.fillInStackTrace() -> <class java.lang.Error: java.lang.Error: bad argument
+ art.Test988.nativeFibonacci(Native Method)
+ art.Test988$NativeOp.applyAsInt(Test988.java:287)
+ art.Test988.doFibTest(Test988.java:388)
+ art.Test988.run(Test988.java:346)
+ <additional hidden frames>
+>
+....<= public java.lang.Throwable(java.lang.String) -> <null: null>
+...<= public java.lang.Error(java.lang.String) -> <null: null>
+..<= static int art.Test988.nativeFibonacci(int) EXCEPTION
+.<= public int art.Test988$NativeOp.applyAsInt(int) EXCEPTION
+.=> public art.Test988$FibThrow(java.lang.String,int,java.lang.Throwable)
+..=> public java.lang.Object()
+..<= public java.lang.Object() -> <null: null>
+.<= public art.Test988$FibThrow(java.lang.String,int,java.lang.Throwable) -> <null: null>
+.=> public boolean java.util.ArrayList.add(java.lang.Object)
+..=> private void java.util.ArrayList.ensureCapacityInternal(int)
+...=> private void java.util.ArrayList.ensureExplicitCapacity(int)
+...<= private void java.util.ArrayList.ensureExplicitCapacity(int) -> <null: null>
+..<= private void java.util.ArrayList.ensureCapacityInternal(int) -> <null: null>
+fibonacci(-19) -> java.lang.Error: bad argument
+ art.Test988.nativeFibonacci(Native Method)
+ art.Test988$NativeOp.applyAsInt(Test988.java:287)
+ art.Test988.doFibTest(Test988.java:388)
+ art.Test988.run(Test988.java:346)
<additional hidden frames>
.<= public boolean java.util.ArrayList.add(java.lang.Object) -> <class java.lang.Boolean: true>
diff --git a/test/988-method-trace/src/art/Test988.java b/test/988-method-trace/src/art/Test988.java
index 5720d1d87d..075e075c0f 100644
--- a/test/988-method-trace/src/art/Test988.java
+++ b/test/988-method-trace/src/art/Test988.java
@@ -282,6 +282,13 @@ public class Test988 {
}
}
+ static final class NativeOp implements IntUnaryOperator {
+ public int applyAsInt(int x) {
+ return nativeFibonacci(x);
+ }
+ }
+ static native int nativeFibonacci(int n);
+
static final class TestRunnableInvokeHandler implements InvocationHandler {
public Object invoke(Object proxy, Method m, Object[] args) throws Throwable {
return null;
@@ -333,8 +340,10 @@ public class Test988 {
Thread.currentThread());
doFibTest(30, new IterOp());
doFibTest(5, new RecurOp());
+ doFibTest(5, new NativeOp());
doFibTest(-19, new IterOp());
doFibTest(-19, new RecurOp());
+ doFibTest(-19, new NativeOp());
runnable.run();
@@ -358,6 +367,7 @@ public class Test988 {
ArrayList.class.toString();
RecurOp.class.toString();
IterOp.class.toString();
+ NativeOp.class.toString();
StringBuilder.class.toString();
Runnable.class.toString();
TestRunnableInvokeHandler.class.toString();
diff --git a/test/988-method-trace/trace_fib.cc b/test/988-method-trace/trace_fib.cc
new file mode 100644
index 0000000000..682f273ac1
--- /dev/null
+++ b/test/988-method-trace/trace_fib.cc
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <jni.h>
+
+namespace art {
+namespace Test988MethodTrace {
+
+extern "C" JNIEXPORT jint JNICALL Java_art_Test988_nativeFibonacci(JNIEnv* env, jclass, jint n) {
+ if (n < 0) {
+ env->ThrowNew(env->FindClass("java/lang/Error"), "bad argument");
+ return -1;
+ } else if (n == 0) {
+ return 0;
+ }
+ jint x = 1;
+ jint y = 1;
+ for (jint i = 3; i <= n; i++) {
+ jint z = x + y;
+ x = y;
+ y = z;
+ }
+ return y;
+}
+
+} // namespace Test988MethodTrace
+} // namespace art
+
diff --git a/test/Android.bp b/test/Android.bp
index a3de382059..e2656516ef 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -262,6 +262,7 @@ art_cc_defaults {
"984-obsolete-invoke/obsolete_invoke.cc",
"986-native-method-bind/native_bind.cc",
"987-agent-bind/agent_bind.cc",
+ "988-method-trace/trace_fib.cc",
"989-method-trace-throw/method_trace.cc",
"991-field-trace-2/field_trace.cc",
"992-source-data/source_file.cc",
diff --git a/tools/class2greylist/src/com/android/class2greylist/AnnotationVisitor.java b/tools/class2greylist/src/com/android/class2greylist/AnnotationVisitor.java
index c5c8ef02d7..1838575582 100644
--- a/tools/class2greylist/src/com/android/class2greylist/AnnotationVisitor.java
+++ b/tools/class2greylist/src/com/android/class2greylist/AnnotationVisitor.java
@@ -17,16 +17,19 @@
package com.android.class2greylist;
import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Joiner;
import org.apache.bcel.Const;
import org.apache.bcel.classfile.AnnotationEntry;
import org.apache.bcel.classfile.DescendingVisitor;
+import org.apache.bcel.classfile.ElementValue;
import org.apache.bcel.classfile.ElementValuePair;
import org.apache.bcel.classfile.EmptyVisitor;
import org.apache.bcel.classfile.Field;
import org.apache.bcel.classfile.FieldOrMethod;
import org.apache.bcel.classfile.JavaClass;
import org.apache.bcel.classfile.Method;
+import org.apache.bcel.classfile.SimpleElementValue;
import java.util.Locale;
import java.util.Set;
@@ -35,8 +38,8 @@ import java.util.function.Predicate;
/**
* Visits a JavaClass instance and pulls out all members annotated with a
* specific annotation. The signatures of such members are passed to {@link
- * Status#greylistEntry(String)}. Any errors result in a call to {@link
- * Status#error(String)}.
+ * GreylistConsumer#greylistEntry(String, Integer)}. Any errors result in a
+ * call to {@link Status#error(String, Object...)}.
*
* If the annotation has a property "expectedSignature" the generated signature
* will be verified against the one specified there. If it differs, an error
@@ -45,10 +48,13 @@ import java.util.function.Predicate;
public class AnnotationVisitor extends EmptyVisitor {
private static final String EXPECTED_SIGNATURE = "expectedSignature";
+ private static final String MAX_TARGET_SDK = "maxTargetSdk";
private final JavaClass mClass;
private final String mAnnotationType;
private final Predicate<Member> mMemberFilter;
+ private final Set<Integer> mValidMaxTargetSdkValues;
+ private final GreylistConsumer mConsumer;
private final Status mStatus;
private final DescendingVisitor mDescendingVisitor;
@@ -66,27 +72,43 @@ public class AnnotationVisitor extends EmptyVisitor {
* Indicates if this is a synthetic bridge method.
*/
public final boolean bridge;
+ /**
+ * Max target SDK of property this member, if it is set, else null.
+ *
+ * Note: even though the annotation itself specified a default value,
+ * that default value is not encoded into instances of the annotation
+ * in class files. So when no value is specified in source, it will
+ * result in null appearing in here.
+ */
+ public final Integer maxTargetSdk;
- public Member(String signature, boolean bridge) {
+ public Member(String signature, boolean bridge, Integer maxTargetSdk) {
this.signature = signature;
this.bridge = bridge;
+ this.maxTargetSdk = maxTargetSdk;
}
}
- public AnnotationVisitor(
- JavaClass clazz, String annotation, Set<String> publicApis, Status status) {
+ public AnnotationVisitor(JavaClass clazz, String annotation, Set<String> publicApis,
+ Set<Integer> validMaxTargetSdkValues, GreylistConsumer consumer,
+ Status status) {
this(clazz,
annotation,
member -> !(member.bridge && publicApis.contains(member.signature)),
+ validMaxTargetSdkValues,
+ consumer,
status);
}
@VisibleForTesting
- public AnnotationVisitor(
- JavaClass clazz, String annotation, Predicate<Member> memberFilter, Status status) {
+ public AnnotationVisitor(JavaClass clazz, String annotation, Predicate<Member> memberFilter,
+ Set<Integer> validMaxTargetSdkValues, GreylistConsumer consumer,
+ Status status) {
mClass = clazz;
mAnnotationType = annotation;
mMemberFilter = memberFilter;
+ mValidMaxTargetSdkValues = validMaxTargetSdkValues;
+ mConsumer = consumer;
mStatus = status;
mDescendingVisitor = new DescendingVisitor(clazz, this);
}
@@ -119,33 +141,64 @@ public class AnnotationVisitor extends EmptyVisitor {
for (AnnotationEntry a : member.getAnnotationEntries()) {
if (mAnnotationType.equals(a.getAnnotationType())) {
mStatus.debug("Member has annotation %s", mAnnotationType);
- boolean bridge = (member.getAccessFlags() & Const.ACC_BRIDGE) != 0;
+ // For fields, the same access flag means volatile, so only check for methods.
+ boolean bridge = (member instanceof Method)
+ && (member.getAccessFlags() & Const.ACC_BRIDGE) != 0;
if (bridge) {
mStatus.debug("Member is a bridge", mAnnotationType);
}
String signature = String.format(Locale.US, signatureFormatString,
getClassDescriptor(definingClass), member.getName(), member.getSignature());
+ Integer maxTargetSdk = null;
for (ElementValuePair property : a.getElementValuePairs()) {
switch (property.getNameString()) {
case EXPECTED_SIGNATURE:
- String expected = property.getValue().stringifyValue();
- // Don't enforce for bridge methods; they're generated so won't match.
- if (!bridge && !signature.equals(expected)) {
- error(definingClass, member,
- "Expected signature does not match generated:\n"
- + "Expected: %s\n"
- + "Generated: %s", expected, signature);
- }
+ verifyExpectedSignature(
+ property, signature, definingClass, member, bridge);
+ break;
+ case MAX_TARGET_SDK:
+ maxTargetSdk = verifyAndGetMaxTargetSdk(
+ property, definingClass, member);
break;
}
}
- if (mMemberFilter.test(new Member(signature, bridge))) {
- mStatus.greylistEntry(signature);
+ if (mMemberFilter.test(new Member(signature, bridge, maxTargetSdk))) {
+ mConsumer.greylistEntry(signature, maxTargetSdk);
}
}
}
}
+ private void verifyExpectedSignature(ElementValuePair property, String signature,
+ JavaClass definingClass, FieldOrMethod member, boolean isBridge) {
+ String expected = property.getValue().stringifyValue();
+ // Don't enforce for bridge methods; they're generated so won't match.
+ if (!isBridge && !signature.equals(expected)) {
+ error(definingClass, member,
+ "Expected signature does not match generated:\n"
+ + "Expected: %s\n"
+ + "Generated: %s", expected, signature);
+ }
+ }
+
+ private Integer verifyAndGetMaxTargetSdk(
+ ElementValuePair property, JavaClass definingClass, FieldOrMethod member) {
+ if (property.getValue().getElementValueType() != ElementValue.PRIMITIVE_INT) {
+ error(definingClass, member, "Expected property %s to be of type int; got %d",
+ property.getNameString(), property.getValue().getElementValueType());
+ }
+ int value = ((SimpleElementValue) property.getValue()).getValueInt();
+ if (!mValidMaxTargetSdkValues.contains(value)) {
+ error(definingClass, member,
+ "Invalid value for %s: got %d, expected one of [%s]",
+ property.getNameString(),
+ value,
+ Joiner.on(",").join(mValidMaxTargetSdkValues));
+ return null;
+ }
+ return value;
+ }
+
private void error(JavaClass clazz, FieldOrMethod member, String message, Object... args) {
StringBuilder error = new StringBuilder();
error.append(clazz.getSourceFileName())
diff --git a/tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java b/tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java
index abc9421e65..64a0357e13 100644
--- a/tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java
+++ b/tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java
@@ -16,6 +16,7 @@
package com.android.class2greylist;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Sets;
import com.google.common.io.Files;
@@ -30,7 +31,11 @@ import org.apache.commons.cli.ParseException;
import java.io.File;
import java.io.IOException;
import java.nio.charset.Charset;
+import java.util.Arrays;
import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
import java.util.Set;
/**
@@ -41,6 +46,11 @@ public class Class2Greylist {
private static final String ANNOTATION_TYPE = "Landroid/annotation/UnsupportedAppUsage;";
+ private final Status mStatus;
+ private final String mPublicApiListFile;
+ private final String[] mPerSdkOutputFiles;
+ private final String[] mJarFiles;
+
public static void main(String[] args) {
Options options = new Options();
options.addOption(OptionBuilder
@@ -49,6 +59,15 @@ public class Class2Greylist {
.withDescription("Public API list file. Used to de-dupe bridge methods.")
.create("p"));
options.addOption(OptionBuilder
+ .withLongOpt("write-greylist")
+ .hasArgs()
+ .withDescription(
+ "Specify file to write greylist to. Can be specified multiple times. " +
+ "Format is either just a filename, or \"int:filename\". If an integer is " +
+ "given, members with a matching maxTargetSdk are written to the file; if " +
+ "no integer is given, members with no maxTargetSdk are written.")
+ .create("g"));
+ options.addOption(OptionBuilder
.withLongOpt("debug")
.hasArgs(0)
.withDescription("Enable debug")
@@ -72,7 +91,7 @@ public class Class2Greylist {
if (cmd.hasOption('h')) {
help(options);
}
- String publicApiFilename = cmd.getOptionValue('p', null);
+
String[] jarFiles = cmd.getArgs();
if (jarFiles.length == 0) {
@@ -81,38 +100,98 @@ public class Class2Greylist {
}
Status status = new Status(cmd.hasOption('d'));
+ Class2Greylist c2gl = new Class2Greylist(
+ status, cmd.getOptionValue('p', null), cmd.getOptionValues('g'), jarFiles);
+ try {
+ c2gl.main();
+ } catch (IOException e) {
+ status.error(e);
+ }
+
+ if (status.ok()) {
+ System.exit(0);
+ } else {
+ System.exit(1);
+ }
+
+ }
+
+ @VisibleForTesting
+ Class2Greylist(Status status, String publicApiListFile, String[] perSdkLevelOutputFiles,
+ String[] jarFiles) {
+ mStatus = status;
+ mPublicApiListFile = publicApiListFile;
+ mPerSdkOutputFiles = perSdkLevelOutputFiles;
+ mJarFiles = jarFiles;
+ }
+
+ private void main() throws IOException {
+ GreylistConsumer output;
+ Set<Integer> allowedSdkVersions;
+ if (mPerSdkOutputFiles != null) {
+ Map<Integer, String> outputFiles = readGreylistMap(mPerSdkOutputFiles);
+ output = new FileWritingGreylistConsumer(mStatus, outputFiles);
+ allowedSdkVersions = outputFiles.keySet();
+ } else {
+ // TODO remove this once per-SDK greylist support integrated into the build.
+ // Right now, mPerSdkOutputFiles is always null as the build never passes the
+ // corresponding command lind flags. Once the build is updated, can remove this.
+ output = new SystemOutGreylistConsumer();
+ allowedSdkVersions = new HashSet<>(Arrays.asList(null, 26, 28));
+ }
Set<String> publicApis;
- if (publicApiFilename != null) {
- try {
- publicApis = Sets.newHashSet(
- Files.readLines(new File(publicApiFilename), Charset.forName("UTF-8")));
- } catch (IOException e) {
- status.error(e);
- System.exit(1);
- return;
- }
+ if (mPublicApiListFile != null) {
+ publicApis = Sets.newHashSet(
+ Files.readLines(new File(mPublicApiListFile), Charset.forName("UTF-8")));
} else {
publicApis = Collections.emptySet();
}
- for (String jarFile : jarFiles) {
- status.debug("Processing jar file %s", jarFile);
+ for (String jarFile : mJarFiles) {
+ mStatus.debug("Processing jar file %s", jarFile);
try {
- JarReader reader = new JarReader(status, jarFile);
+ JarReader reader = new JarReader(mStatus, jarFile);
reader.stream().forEach(clazz -> new AnnotationVisitor(clazz, ANNOTATION_TYPE,
- publicApis, status).visit());
+ publicApis, allowedSdkVersions, output, mStatus).visit());
reader.close();
} catch (IOException e) {
- status.error(e);
+ mStatus.error(e);
}
}
- if (status.ok()) {
- System.exit(0);
- } else {
- System.exit(1);
- }
+ output.close();
+ }
+ @VisibleForTesting
+ Map<Integer, String> readGreylistMap(String[] argValues) {
+ Map<Integer, String> map = new HashMap<>();
+ for (String sdkFile : argValues) {
+ Integer maxTargetSdk = null;
+ String filename;
+ int colonPos = sdkFile.indexOf(':');
+ if (colonPos != -1) {
+ try {
+ maxTargetSdk = Integer.valueOf(sdkFile.substring(0, colonPos));
+ } catch (NumberFormatException nfe) {
+ mStatus.error("Not a valid integer: %s from argument value '%s'",
+ sdkFile.substring(0, colonPos), sdkFile);
+ }
+ filename = sdkFile.substring(colonPos + 1);
+ if (filename.length() == 0) {
+ mStatus.error("Not a valid file name: %s from argument value '%s'",
+ filename, sdkFile);
+ }
+ } else {
+ maxTargetSdk = null;
+ filename = sdkFile;
+ }
+ if (map.containsKey(maxTargetSdk)) {
+ mStatus.error("Multiple output files for maxTargetSdk %s", maxTargetSdk);
+ } else {
+ map.put(maxTargetSdk, filename);
+ }
+ }
+ return map;
}
private static void help(Options options) {
diff --git a/tools/class2greylist/src/com/android/class2greylist/FileWritingGreylistConsumer.java b/tools/class2greylist/src/com/android/class2greylist/FileWritingGreylistConsumer.java
new file mode 100644
index 0000000000..86eeeffbb5
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/FileWritingGreylistConsumer.java
@@ -0,0 +1,48 @@
+package com.android.class2greylist;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.PrintStream;
+import java.util.HashMap;
+import java.util.Map;
+
+public class FileWritingGreylistConsumer implements GreylistConsumer {
+
+ private final Status mStatus;
+ private final Map<Integer, PrintStream> mSdkToPrintStreamMap;
+
+ private static Map<Integer, PrintStream> openFiles(
+ Map<Integer, String> filenames) throws FileNotFoundException {
+ Map<Integer, PrintStream> streams = new HashMap<>();
+ for (Map.Entry<Integer, String> entry : filenames.entrySet()) {
+ streams.put(entry.getKey(),
+ new PrintStream(new FileOutputStream(new File(entry.getValue()))));
+ }
+ return streams;
+ }
+
+ public FileWritingGreylistConsumer(Status status, Map<Integer, String> sdkToFilenameMap)
+ throws FileNotFoundException {
+ mStatus = status;
+ mSdkToPrintStreamMap = openFiles(sdkToFilenameMap);
+ }
+
+ @Override
+ public void greylistEntry(String signature, Integer maxTargetSdk) {
+ PrintStream p = mSdkToPrintStreamMap.get(maxTargetSdk);
+ if (p == null) {
+ mStatus.error("No output file for signature %s with maxTargetSdk of %d", signature,
+ maxTargetSdk == null ? "<absent>" : maxTargetSdk.toString());
+ return;
+ }
+ p.println(signature);
+ }
+
+ @Override
+ public void close() {
+ for (PrintStream p : mSdkToPrintStreamMap.values()) {
+ p.close();
+ }
+ }
+}
diff --git a/tools/class2greylist/src/com/android/class2greylist/GreylistConsumer.java b/tools/class2greylist/src/com/android/class2greylist/GreylistConsumer.java
new file mode 100644
index 0000000000..debc21d139
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/GreylistConsumer.java
@@ -0,0 +1,13 @@
+package com.android.class2greylist;
+
+public interface GreylistConsumer {
+ /**
+ * Handle a new greylist entry.
+ *
+ * @param signature Signature of the member.
+ * @param maxTargetSdk maxTargetSdk value from the annotation, or null if none set.
+ */
+ void greylistEntry(String signature, Integer maxTargetSdk);
+
+ void close();
+}
diff --git a/tools/class2greylist/src/com/android/class2greylist/Status.java b/tools/class2greylist/src/com/android/class2greylist/Status.java
index d7078986d9..b5ee9f138f 100644
--- a/tools/class2greylist/src/com/android/class2greylist/Status.java
+++ b/tools/class2greylist/src/com/android/class2greylist/Status.java
@@ -42,16 +42,12 @@ public class Status {
mHasErrors = true;
}
- public void error(String message) {
+ public void error(String message, Object... args) {
System.err.print(ERROR);
- System.err.println(message);
+ System.err.println(String.format(Locale.US, message, args));
mHasErrors = true;
}
- public void greylistEntry(String signature) {
- System.out.println(signature);
- }
-
public boolean ok() {
return !mHasErrors;
}
diff --git a/tools/class2greylist/src/com/android/class2greylist/SystemOutGreylistConsumer.java b/tools/class2greylist/src/com/android/class2greylist/SystemOutGreylistConsumer.java
new file mode 100644
index 0000000000..8e12759f2b
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/SystemOutGreylistConsumer.java
@@ -0,0 +1,12 @@
+package com.android.class2greylist;
+
+public class SystemOutGreylistConsumer implements GreylistConsumer {
+ @Override
+ public void greylistEntry(String signature, Integer maxTargetSdk) {
+ System.out.println(signature);
+ }
+
+ @Override
+ public void close() {
+ }
+}
diff --git a/tools/class2greylist/test/src/com/android/javac/AnnotationVisitorTest.java b/tools/class2greylist/test/src/com/android/class2greylist/AnnotationVisitorTest.java
index ff9c265a25..994fe89a61 100644
--- a/tools/class2greylist/test/src/com/android/javac/AnnotationVisitorTest.java
+++ b/tools/class2greylist/test/src/com/android/class2greylist/AnnotationVisitorTest.java
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-package com.android.javac;
+package com.android.class2greylist;
import static com.google.common.truth.Truth.assertThat;
@@ -25,10 +25,12 @@ import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.withSettings;
-import com.android.class2greylist.AnnotationVisitor;
-import com.android.class2greylist.Status;
+import static java.util.Collections.emptySet;
+
+import com.android.javac.Javac;
import com.google.common.base.Joiner;
+import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
import org.junit.Before;
@@ -48,28 +50,30 @@ public class AnnotationVisitorTest {
public TestName mTestName = new TestName();
private Javac mJavac;
+ private GreylistConsumer mConsumer;
private Status mStatus;
@Before
public void setup() throws IOException {
System.out.println(String.format("\n============== STARTING TEST: %s ==============\n",
mTestName.getMethodName()));
+ mConsumer = mock(GreylistConsumer.class);
mStatus = mock(Status.class, withSettings().verboseLogging());
mJavac = new Javac();
mJavac.addSource("annotation.Anno", Joiner.on('\n').join(
"package annotation;",
- "import static java.lang.annotation.RetentionPolicy.CLASS;",
- "import java.lang.annotation.Retention;",
- "import java.lang.annotation.Target;",
+ "import static java.lang.annotation.RetentionPolicy.CLASS;",
+ "import java.lang.annotation.Retention;",
"@Retention(CLASS)",
"public @interface Anno {",
" String expectedSignature() default \"\";",
+ " int maxTargetSdk() default Integer.MAX_VALUE;",
"}"));
}
private void assertNoErrors() {
verify(mStatus, never()).error(any(Throwable.class));
- verify(mStatus, never()).error(any(String.class));
+ verify(mStatus, never()).error(any(), any());
}
@Test
@@ -83,12 +87,12 @@ public class AnnotationVisitorTest {
"}"));
assertThat(mJavac.compile()).isTrue();
- new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true, mStatus)
- .visit();
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true,
+ emptySet(), mConsumer, mStatus).visit();
assertNoErrors();
ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
- verify(mStatus, times(1)).greylistEntry(greylist.capture());
+ verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any());
assertThat(greylist.getValue()).isEqualTo("La/b/Class;->method()V");
}
@@ -103,12 +107,12 @@ public class AnnotationVisitorTest {
"}"));
assertThat(mJavac.compile()).isTrue();
- new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true, mStatus)
- .visit();
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true,
+ emptySet(), mConsumer, mStatus).visit();
assertNoErrors();
ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
- verify(mStatus, times(1)).greylistEntry(greylist.capture());
+ verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any());
assertThat(greylist.getValue()).isEqualTo("La/b/Class;-><init>()V");
}
@@ -123,12 +127,12 @@ public class AnnotationVisitorTest {
"}"));
assertThat(mJavac.compile()).isTrue();
- new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true, mStatus)
- .visit();
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true,
+ emptySet(), mConsumer, mStatus).visit();
assertNoErrors();
ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
- verify(mStatus, times(1)).greylistEntry(greylist.capture());
+ verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any());
assertThat(greylist.getValue()).isEqualTo("La/b/Class;->i:I");
}
@@ -143,12 +147,12 @@ public class AnnotationVisitorTest {
"}"));
assertThat(mJavac.compile()).isTrue();
- new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true, mStatus)
- .visit();
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true,
+ emptySet(), mConsumer, mStatus).visit();
assertNoErrors();
ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
- verify(mStatus, times(1)).greylistEntry(greylist.capture());
+ verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any());
assertThat(greylist.getValue()).isEqualTo("La/b/Class;->method()V");
}
@@ -163,10 +167,10 @@ public class AnnotationVisitorTest {
"}"));
assertThat(mJavac.compile()).isTrue();
- new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true, mStatus)
- .visit();
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true,
+ emptySet(), mConsumer, mStatus).visit();
- verify(mStatus, times(1)).error(any(String.class));
+ verify(mStatus, times(1)).error(any(), any());
}
@Test
@@ -183,11 +187,11 @@ public class AnnotationVisitorTest {
assertThat(mJavac.compile()).isTrue();
new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class$Inner"), ANNOTATION, x -> true,
- mStatus).visit();
+ emptySet(), mConsumer, mStatus).visit();
assertNoErrors();
ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
- verify(mStatus, times(1)).greylistEntry(greylist.capture());
+ verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any());
assertThat(greylist.getValue()).isEqualTo("La/b/Class$Inner;->method()V");
}
@@ -200,11 +204,11 @@ public class AnnotationVisitorTest {
"}"));
assertThat(mJavac.compile()).isTrue();
- new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true, mStatus)
- .visit();
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true,
+ emptySet(), mConsumer, mStatus).visit();
assertNoErrors();
- verify(mStatus, never()).greylistEntry(any(String.class));
+ verify(mConsumer, never()).greylistEntry(any(String.class), any());
}
@Test
@@ -218,12 +222,12 @@ public class AnnotationVisitorTest {
"}"));
assertThat(mJavac.compile()).isTrue();
- new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true, mStatus)
- .visit();
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true,
+ emptySet(), mConsumer, mStatus).visit();
assertNoErrors();
ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
- verify(mStatus, times(1)).greylistEntry(greylist.capture());
+ verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any());
assertThat(greylist.getValue()).isEqualTo("La/b/Class;->method(Ljava/lang/String;)V");
}
@@ -245,15 +249,15 @@ public class AnnotationVisitorTest {
"}"));
assertThat(mJavac.compile()).isTrue();
- new AnnotationVisitor(mJavac.getCompiledClass("a.b.Base"), ANNOTATION, x -> true, mStatus)
- .visit();
- new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true, mStatus)
- .visit();
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Base"), ANNOTATION, x -> true,
+ emptySet(), mConsumer, mStatus).visit();
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true,
+ emptySet(), mConsumer, mStatus).visit();
assertNoErrors();
ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
// A bridge method is generated for the above, so we expect 2 greylist entries.
- verify(mStatus, times(2)).greylistEntry(greylist.capture());
+ verify(mConsumer, times(2)).greylistEntry(greylist.capture(), any());
assertThat(greylist.getAllValues()).containsExactly(
"La/b/Class;->method(Ljava/lang/Object;)V",
"La/b/Class;->method(Ljava/lang/String;)V");
@@ -277,15 +281,15 @@ public class AnnotationVisitorTest {
"}"));
assertThat(mJavac.compile()).isTrue();
- new AnnotationVisitor(mJavac.getCompiledClass("a.b.Base"), ANNOTATION, x -> true, mStatus)
- .visit();
- new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true, mStatus)
- .visit();
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Base"), ANNOTATION, x -> true,
+ emptySet(), mConsumer, mStatus).visit();
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true,
+ emptySet(), mConsumer, mStatus).visit();
assertNoErrors();
ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
// A bridge method is generated for the above, so we expect 2 greylist entries.
- verify(mStatus, times(2)).greylistEntry(greylist.capture());
+ verify(mConsumer, times(2)).greylistEntry(greylist.capture(), any());
assertThat(greylist.getAllValues()).containsExactly(
"La/b/Class;->method(Ljava/lang/Object;)V",
"La/b/Class;->method(Ljava/lang/String;)V");
@@ -314,16 +318,19 @@ public class AnnotationVisitorTest {
assertThat(mJavac.compile()).isTrue();
new AnnotationVisitor(
- mJavac.getCompiledClass("a.b.Interface"), ANNOTATION, x -> true, mStatus).visit();
+ mJavac.getCompiledClass("a.b.Interface"), ANNOTATION, x -> true, emptySet(),
+ mConsumer, mStatus).visit();
new AnnotationVisitor(
- mJavac.getCompiledClass("a.b.Base"), ANNOTATION, x -> true, mStatus).visit();
+ mJavac.getCompiledClass("a.b.Base"), ANNOTATION, x -> true, emptySet(), mConsumer,
+ mStatus).visit();
new AnnotationVisitor(
- mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true, mStatus).visit();
+ mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true, emptySet(), mConsumer,
+ mStatus).visit();
assertNoErrors();
ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
// A bridge method is generated for the above, so we expect 2 greylist entries.
- verify(mStatus, times(2)).greylistEntry(greylist.capture());
+ verify(mConsumer, times(2)).greylistEntry(greylist.capture(), any());
assertThat(greylist.getAllValues()).containsExactly(
"La/b/Class;->method(Ljava/lang/Object;)V",
"La/b/Base;->method(Ljava/lang/Object;)V");
@@ -351,14 +358,104 @@ public class AnnotationVisitorTest {
"La/b/Base;->method(Ljava/lang/Object;)V",
"La/b/Class;->method(Ljava/lang/Object;)V");
new AnnotationVisitor(mJavac.getCompiledClass("a.b.Base"), ANNOTATION, publicApis,
- mStatus).visit();
+ emptySet(), mConsumer, mStatus).visit();
new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, publicApis,
- mStatus).visit();
+ emptySet(), mConsumer, mStatus).visit();
assertNoErrors();
ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
// The bridge method generated for the above, is a public API so should be excluded
- verify(mStatus, times(1)).greylistEntry(greylist.capture());
+ verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any());
assertThat(greylist.getValue()).isEqualTo("La/b/Class;->method(Ljava/lang/String;)V");
}
+
+ @Test
+ public void testVolatileField() throws IOException {
+ mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+ "package a.b;",
+ "import annotation.Anno;",
+ "public class Class {",
+ " @Anno(expectedSignature=\"La/b/Class;->field:I\")",
+ " public volatile int field;",
+ "}"));
+ assertThat(mJavac.compile()).isTrue();
+
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION,
+ member -> !member.bridge, // exclude bridge methods
+ emptySet(), mConsumer, mStatus).visit();
+ assertNoErrors();
+ ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
+ verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any());
+ assertThat(greylist.getValue()).isEqualTo("La/b/Class;->field:I");
+ }
+
+ @Test
+ public void testVolatileFieldWrongSignature() throws IOException {
+ mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+ "package a.b;",
+ "import annotation.Anno;",
+ "public class Class {",
+ " @Anno(expectedSignature=\"La/b/Class;->wrong:I\")",
+ " public volatile int field;",
+ "}"));
+ assertThat(mJavac.compile()).isTrue();
+
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true,
+ emptySet(), mConsumer, mStatus).visit();
+ verify(mStatus, times(1)).error(any(), any());
+ }
+
+ @Test
+ public void testMethodMaxTargetSdk() throws IOException {
+ mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+ "package a.b;",
+ "import annotation.Anno;",
+ "public class Class {",
+ " @Anno(maxTargetSdk=1)",
+ " public int field;",
+ "}"));
+ assertThat(mJavac.compile()).isTrue();
+
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true,
+ ImmutableSet.of(1), mConsumer, mStatus).visit();
+ assertNoErrors();
+ ArgumentCaptor<Integer> maxTargetSdk = ArgumentCaptor.forClass(Integer.class);
+ verify(mConsumer, times(1)).greylistEntry(any(), maxTargetSdk.capture());
+ assertThat(maxTargetSdk.getValue()).isEqualTo(1);
+ }
+
+ @Test
+ public void testMethodNoMaxTargetSdk() throws IOException {
+ mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+ "package a.b;",
+ "import annotation.Anno;",
+ "public class Class {",
+ " @Anno",
+ " public int field;",
+ "}"));
+ assertThat(mJavac.compile()).isTrue();
+
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true,
+ ImmutableSet.of(1), mConsumer, mStatus).visit();
+ assertNoErrors();
+ ArgumentCaptor<Integer> maxTargetSdk = ArgumentCaptor.forClass(Integer.class);
+ verify(mConsumer, times(1)).greylistEntry(any(), maxTargetSdk.capture());
+ assertThat(maxTargetSdk.getValue()).isEqualTo(null);
+ }
+
+ @Test
+ public void testMethodMaxTargetSdkOutOfRange() throws IOException {
+ mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+ "package a.b;",
+ "import annotation.Anno;",
+ "public class Class {",
+ " @Anno(maxTargetSdk=2)",
+ " public int field;",
+ "}"));
+ assertThat(mJavac.compile()).isTrue();
+
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true,
+ ImmutableSet.of(1), mConsumer, mStatus).visit();
+ verify(mStatus, times(1)).error(any(), any());
+ }
}
diff --git a/tools/class2greylist/test/src/com/android/class2greylist/Class2GreylistTest.java b/tools/class2greylist/test/src/com/android/class2greylist/Class2GreylistTest.java
new file mode 100644
index 0000000000..979044b909
--- /dev/null
+++ b/tools/class2greylist/test/src/com/android/class2greylist/Class2GreylistTest.java
@@ -0,0 +1,73 @@
+package com.android.class2greylist;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.atLeastOnce;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyZeroInteractions;
+import static org.mockito.MockitoAnnotations.initMocks;
+
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TestName;
+import org.mockito.Mock;
+
+import java.io.IOException;
+import java.util.Map;
+
+public class Class2GreylistTest {
+
+ @Mock
+ Status mStatus;
+ @Rule
+ public TestName mTestName = new TestName();
+
+ @Before
+ public void setup() throws IOException {
+ System.out.println(String.format("\n============== STARTING TEST: %s ==============\n",
+ mTestName.getMethodName()));
+ initMocks(this);
+ }
+
+ @Test
+ public void testReadGreylistMap() {
+ Class2Greylist c2gl = new Class2Greylist(mStatus, null, null, null);
+ Map<Integer, String> map = c2gl.readGreylistMap(
+ new String[]{"noApi", "1:apiOne", "3:apiThree"});
+ verifyZeroInteractions(mStatus);
+ assertThat(map).containsExactly(null, "noApi", 1, "apiOne", 3, "apiThree");
+ }
+
+ @Test
+ public void testReadGreylistMapDuplicate() {
+ Class2Greylist c2gl = new Class2Greylist(mStatus, null, null, null);
+ Map<Integer, String> map = c2gl.readGreylistMap(
+ new String[]{"noApi", "1:apiOne", "1:anotherOne"});
+ verify(mStatus, atLeastOnce()).error(any(), any());
+ }
+
+ @Test
+ public void testReadGreylistMapDuplicateNoApi() {
+ Class2Greylist c2gl = new Class2Greylist(mStatus, null, null, null);
+ Map<Integer, String> map = c2gl.readGreylistMap(
+ new String[]{"noApi", "anotherNoApi", "1:apiOne"});
+ verify(mStatus, atLeastOnce()).error(any(), any());
+ }
+
+ @Test
+ public void testReadGreylistMapInvalidInt() {
+ Class2Greylist c2gl = new Class2Greylist(mStatus, null, null, null);
+ Map<Integer, String> map = c2gl.readGreylistMap(new String[]{"noApi", "a:apiOne"});
+ verify(mStatus, atLeastOnce()).error(any(), any());
+ }
+
+ @Test
+ public void testReadGreylistMapNoFilename() {
+ Class2Greylist c2gl = new Class2Greylist(mStatus, null, null, null);
+ Map<Integer, String> map = c2gl.readGreylistMap(new String[]{"noApi", "1:"});
+ verify(mStatus, atLeastOnce()).error(any(), any());
+ }
+}
+
diff --git a/tools/dexanalyze/dexanalyze_bytecode.cc b/tools/dexanalyze/dexanalyze_bytecode.cc
index 1c5a5d548b..0bb3f911a2 100644
--- a/tools/dexanalyze/dexanalyze_bytecode.cc
+++ b/tools/dexanalyze/dexanalyze_bytecode.cc
@@ -164,13 +164,7 @@ void NewRegisterInstructions::ProcessCodeItem(const DexFile& dex_file,
std::map<size_t, TypeLinkage>& types) {
TypeLinkage& current_type = types[current_class_type.index_];
bool skip_next = false;
- size_t last_start = 0u;
for (auto inst = code_item.begin(); ; ++inst) {
- if (!count_types && last_start != buffer_.size()) {
- // Register the instruction blob.
- ++instruction_freq_[std::vector<uint8_t>(buffer_.begin() + last_start, buffer_.end())];
- last_start = buffer_.size();
- }
if (inst == code_item.end()) {
break;
}
@@ -334,31 +328,31 @@ void NewRegisterInstructions::ProcessCodeItem(const DexFile& dex_file,
}
}
- bool result = false;
uint32_t type_idx = current_type.types_.Get(receiver_type.index_);
uint32_t local_idx = types[receiver_type.index_].methods_.Get(method_idx);
+
+ // If true, we always put the return value in r0.
+ static constexpr bool kMoveToDestReg = true;
+
+ std::vector<uint32_t> new_args;
+ if (kMoveToDestReg && arg_count % 2 == 1) {
+ // Use the extra nibble to sneak in part of the type index.
+ new_args.push_back(local_idx >> 4);
+ local_idx ^= local_idx & 0xF0;
+ }
ExtendPrefix(&type_idx, &local_idx);
- ExtendPrefix(&dest_reg, &local_idx);
- if (arg_count == 0) {
- result = InstNibbles(opcode, {dest_reg, type_idx, local_idx});
- } else if (arg_count == 1) {
- result = InstNibbles(opcode, {dest_reg, type_idx, local_idx, args[0]});
- } else if (arg_count == 2) {
- result = InstNibbles(opcode, {dest_reg, type_idx, local_idx, args[0],
- args[1]});
- } else if (arg_count == 3) {
- result = InstNibbles(opcode, {dest_reg, type_idx, local_idx, args[0],
- args[1], args[2]});
- } else if (arg_count == 4) {
- result = InstNibbles(opcode, {dest_reg, type_idx, local_idx, args[0],
- args[1], args[2], args[3]});
- } else if (arg_count == 5) {
- result = InstNibbles(opcode, {dest_reg, type_idx, local_idx, args[0],
- args[1], args[2], args[3], args[4]});
+ new_args.push_back(type_idx);
+ new_args.push_back(local_idx);
+ if (!kMoveToDestReg) {
+ ExtendPrefix(&dest_reg, &local_idx);
+ new_args.push_back(dest_reg);
}
-
- if (result) {
+ new_args.insert(new_args.end(), args, args + arg_count);
+ if (InstNibbles(opcode, new_args)) {
skip_next = next_move_result;
+ if (kMoveToDestReg && dest_reg != 0u) {
+ CHECK(InstNibbles(Instruction::MOVE, {dest_reg >> 4, dest_reg & 0xF}));
+ }
continue;
}
}
@@ -466,8 +460,11 @@ void NewRegisterInstructions::ProcessCodeItem(const DexFile& dex_file,
void NewRegisterInstructions::Add(Instruction::Code opcode, const Instruction& inst) {
const uint8_t* start = reinterpret_cast<const uint8_t*>(&inst);
+ const size_t buffer_start = buffer_.size();
buffer_.push_back(opcode);
buffer_.insert(buffer_.end(), start + 1, start + 2 * inst.SizeInCodeUnits());
+ // Register the instruction blob.
+ ++instruction_freq_[std::vector<uint8_t>(buffer_.begin() + buffer_start, buffer_.end())];
}
void NewRegisterInstructions::ExtendPrefix(uint32_t* value1, uint32_t* value2) {
@@ -500,17 +497,6 @@ void NewRegisterInstructions::ExtendPrefix(uint32_t* value1, uint32_t* value2) {
*value2 &= 0XF;
}
-bool NewRegisterInstructions::InstNibblesAndIndex(uint8_t opcode,
- uint16_t idx,
- const std::vector<uint32_t>& args) {
- if (!InstNibbles(opcode, args)) {
- return false;
- }
- buffer_.push_back(static_cast<uint8_t>(idx >> 8));
- buffer_.push_back(static_cast<uint8_t>(idx));
- return true;
-}
-
bool NewRegisterInstructions::InstNibbles(uint8_t opcode, const std::vector<uint32_t>& args) {
if (verbose_level_ >= VerboseLevel::kEverything) {
std::cout << " ==> " << Instruction::Name(static_cast<Instruction::Code>(opcode)) << " ";
@@ -526,6 +512,7 @@ bool NewRegisterInstructions::InstNibbles(uint8_t opcode, const std::vector<uint
return false;
}
}
+ const size_t buffer_start = buffer_.size();
buffer_.push_back(opcode);
for (size_t i = 0; i < args.size(); i += 2) {
buffer_.push_back(args[i] << 4);
@@ -536,6 +523,8 @@ bool NewRegisterInstructions::InstNibbles(uint8_t opcode, const std::vector<uint
while (buffer_.size() % alignment_ != 0) {
buffer_.push_back(0);
}
+ // Register the instruction blob.
+ ++instruction_freq_[std::vector<uint8_t>(buffer_.begin() + buffer_start, buffer_.end())];
return true;
}
diff --git a/tools/dexanalyze/dexanalyze_bytecode.h b/tools/dexanalyze/dexanalyze_bytecode.h
index ed40ba7d9b..db009b03b8 100644
--- a/tools/dexanalyze/dexanalyze_bytecode.h
+++ b/tools/dexanalyze/dexanalyze_bytecode.h
@@ -64,7 +64,6 @@ class NewRegisterInstructions : public Experiment {
bool count_types,
std::map<size_t, TypeLinkage>& types);
void Add(Instruction::Code opcode, const Instruction& inst);
- bool InstNibblesAndIndex(uint8_t opcode, uint16_t idx, const std::vector<uint32_t>& args);
bool InstNibbles(uint8_t opcode, const std::vector<uint32_t>& args);
void ExtendPrefix(uint32_t* value1, uint32_t* value2);
bool Enabled(BytecodeExperiment experiment) const {