diff options
36 files changed, 1051 insertions, 336 deletions
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk index fc4dd55d67..123bcaa3bd 100644 --- a/build/Android.common_build.mk +++ b/build/Android.common_build.mk @@ -296,8 +296,8 @@ art_asflags := ifdef ART_IMT_SIZE art_cflags += -DIMT_SIZE=$(ART_IMT_SIZE) else - # Default is 64 - art_cflags += -DIMT_SIZE=64 + # Default is 43 + art_cflags += -DIMT_SIZE=43 endif ifeq ($(ART_HEAP_POISONING),true) diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk index a14265e30d..1afbdfcb59 100644 --- a/build/Android.gtest.mk +++ b/build/Android.gtest.mk @@ -365,6 +365,7 @@ COMPILER_GTEST_HOST_SRC_FILES_arm64 := \ COMPILER_GTEST_HOST_SRC_FILES_mips := \ $(COMPILER_GTEST_COMMON_SRC_FILES_mips) \ compiler/utils/mips/assembler_mips_test.cc \ + compiler/utils/mips/assembler_mips32r6_test.cc \ COMPILER_GTEST_HOST_SRC_FILES_mips64 := \ $(COMPILER_GTEST_COMMON_SRC_FILES_mips64) \ diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index 4fc3b5434b..eca9e2c299 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -1901,7 +1901,7 @@ void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke) __ LoadFromOffset(kLoadWord, temp, temp, mirror::Class::ImtPtrOffset(kArmPointerSize).Uint32Value()); uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( - invoke->GetImtIndex() % ImTable::kSize, kArmPointerSize)); + invoke->GetImtIndex(), kArmPointerSize)); // temp = temp->GetImtEntryAt(method_offset); __ LoadFromOffset(kLoadWord, temp, temp, method_offset); uint32_t entry_point = @@ -6783,7 +6783,7 @@ void InstructionCodeGeneratorARM::VisitClassTableGet(HClassTableGet* instruction locations->InAt(0).AsRegister<Register>(), mirror::Class::ImtPtrOffset(kArmPointerSize).Uint32Value()); method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( - instruction->GetIndex() % ImTable::kSize, kArmPointerSize)); + instruction->GetIndex(), kArmPointerSize)); } __ LoadFromOffset(kLoadWord, locations->Out().AsRegister<Register>(), diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index b63a3d4c1a..5d3c8c5590 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -3522,7 +3522,7 @@ void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invok __ Ldr(temp, MemOperand(temp, mirror::Class::ImtPtrOffset(kArm64PointerSize).Uint32Value())); uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( - invoke->GetImtIndex() % ImTable::kSize, kArm64PointerSize)); + invoke->GetImtIndex(), kArm64PointerSize)); // temp = temp->GetImtEntryAt(method_offset); __ Ldr(temp, MemOperand(temp, method_offset)); // lr = temp->GetEntryPoint(); @@ -5153,7 +5153,7 @@ void InstructionCodeGeneratorARM64::VisitClassTableGet(HClassTableGet* instructi __ Ldr(XRegisterFrom(locations->Out()), MemOperand(XRegisterFrom(locations->InAt(0)), mirror::Class::ImtPtrOffset(kArm64PointerSize).Uint32Value())); method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( - instruction->GetIndex() % ImTable::kSize, kArm64PointerSize)); + instruction->GetIndex(), kArm64PointerSize)); } __ Ldr(XRegisterFrom(locations->Out()), MemOperand(XRegisterFrom(locations->InAt(0)), method_offset)); diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc index c8e927d026..d5bad28dab 100644 --- a/compiler/optimizing/code_generator_mips.cc +++ b/compiler/optimizing/code_generator_mips.cc @@ -3720,7 +3720,7 @@ void InstructionCodeGeneratorMIPS::VisitInvokeInterface(HInvokeInterface* invoke __ LoadFromOffset(kLoadWord, temp, temp, mirror::Class::ImtPtrOffset(kMipsPointerSize).Uint32Value()); uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( - invoke->GetImtIndex() % ImTable::kSize, kMipsPointerSize)); + invoke->GetImtIndex(), kMipsPointerSize)); // temp = temp->GetImtEntryAt(method_offset); __ LoadFromOffset(kLoadWord, temp, temp, method_offset); // T9 = temp->GetEntryPoint(); @@ -5169,7 +5169,7 @@ void InstructionCodeGeneratorMIPS::VisitClassTableGet(HClassTableGet* instructio locations->InAt(0).AsRegister<Register>(), mirror::Class::ImtPtrOffset(kMipsPointerSize).Uint32Value()); method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( - instruction->GetIndex() % ImTable::kSize, kMipsPointerSize)); + instruction->GetIndex(), kMipsPointerSize)); } __ LoadFromOffset(kLoadWord, locations->Out().AsRegister<Register>(), diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc index 8d5dc84df9..539abf1de8 100644 --- a/compiler/optimizing/code_generator_mips64.cc +++ b/compiler/optimizing/code_generator_mips64.cc @@ -2954,7 +2954,7 @@ void InstructionCodeGeneratorMIPS64::VisitInvokeInterface(HInvokeInterface* invo __ LoadFromOffset(kLoadDoubleword, temp, temp, mirror::Class::ImtPtrOffset(kMips64PointerSize).Uint32Value()); uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( - invoke->GetImtIndex() % ImTable::kSize, kMips64PointerSize)); + invoke->GetImtIndex(), kMips64PointerSize)); // temp = temp->GetImtEntryAt(method_offset); __ LoadFromOffset(kLoadDoubleword, temp, temp, method_offset); // T9 = temp->GetEntryPoint(); diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index 9d0092b674..a21c295274 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -2043,7 +2043,7 @@ void InstructionCodeGeneratorX86::VisitInvokeInterface(HInvokeInterface* invoke) Address(temp, mirror::Class::ImtPtrOffset(kX86PointerSize).Uint32Value())); // temp = temp->GetImtEntryAt(method_offset); uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( - invoke->GetImtIndex() % ImTable::kSize, kX86PointerSize)); + invoke->GetImtIndex(), kX86PointerSize)); __ movl(temp, Address(temp, method_offset)); // call temp->GetEntryPoint(); __ call(Address(temp, @@ -4068,7 +4068,7 @@ void InstructionCodeGeneratorX86::VisitClassTableGet(HClassTableGet* instruction mirror::Class::ImtPtrOffset(kX86PointerSize).Uint32Value())); // temp = temp->GetImtEntryAt(method_offset); method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( - instruction->GetIndex() % ImTable::kSize, kX86PointerSize)); + instruction->GetIndex(), kX86PointerSize)); } __ movl(locations->Out().AsRegister<Register>(), Address(locations->InAt(0).AsRegister<Register>(), method_offset)); diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index a8da5f2ea5..135f0c40d0 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -2258,7 +2258,7 @@ void InstructionCodeGeneratorX86_64::VisitInvokeInterface(HInvokeInterface* invo Address(temp, mirror::Class::ImtPtrOffset(kX86_64PointerSize).Uint32Value())); // temp = temp->GetImtEntryAt(method_offset); uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( - invoke->GetImtIndex() % ImTable::kSize, kX86_64PointerSize)); + invoke->GetImtIndex(), kX86_64PointerSize)); // temp = temp->GetImtEntryAt(method_offset); __ movq(temp, Address(temp, method_offset)); // call temp->GetEntryPoint(); @@ -3986,7 +3986,7 @@ void InstructionCodeGeneratorX86_64::VisitClassTableGet(HClassTableGet* instruct Address(locations->InAt(0).AsRegister<CpuRegister>(), mirror::Class::ImtPtrOffset(kX86_64PointerSize).Uint32Value())); method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( - instruction->GetIndex() % ImTable::kSize, kX86_64PointerSize)); + instruction->GetIndex(), kX86_64PointerSize)); } __ movq(locations->Out().AsRegister<CpuRegister>(), Address(locations->InAt(0).AsRegister<CpuRegister>(), method_offset)); diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc index 27b6896150..d5e80b4759 100644 --- a/compiler/optimizing/inliner.cc +++ b/compiler/optimizing/inliner.cc @@ -657,7 +657,7 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget(HInvoke* invoke_instruction, ArtMethod* new_method = nullptr; if (invoke_instruction->IsInvokeInterface()) { new_method = ic.GetTypeAt(i)->GetImt(pointer_size)->Get( - method_index % ImTable::kSize, pointer_size); + method_index, pointer_size); if (new_method->IsRuntimeMethod()) { // Bail out as soon as we see a conflict trampoline in one of the target's // interface table. diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc index f2286e46e6..1c67bcc878 100644 --- a/compiler/optimizing/instruction_builder.cc +++ b/compiler/optimizing/instruction_builder.cc @@ -16,6 +16,7 @@ #include "instruction_builder.h" +#include "art_method-inl.h" #include "bytecode_utils.h" #include "class_linker.h" #include "driver/compiler_options.h" @@ -890,7 +891,7 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction, return_type, dex_pc, method_idx, - resolved_method->GetDexMethodIndex()); + resolved_method->GetImtIndex()); } return HandleInvoke(invoke, diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc index 29f7672b0a..7d1c2ebe0b 100644 --- a/compiler/optimizing/intrinsics_arm.cc +++ b/compiler/optimizing/intrinsics_arm.cc @@ -2031,7 +2031,7 @@ void IntrinsicLocationsBuilderARM::VisitStringGetCharsNoCheck(HInvoke* invoke) { locations->SetInAt(3, Location::RequiresRegister()); locations->SetInAt(4, Location::RequiresRegister()); - locations->AddTemp(Location::RequiresRegister()); + // Temporary registers to store lengths of strings and for calculations. locations->AddTemp(Location::RequiresRegister()); locations->AddTemp(Location::RequiresRegister()); locations->AddTemp(Location::RequiresRegister()); @@ -2059,28 +2059,55 @@ void IntrinsicCodeGeneratorARM::VisitStringGetCharsNoCheck(HInvoke* invoke) { Register dstObj = locations->InAt(3).AsRegister<Register>(); Register dstBegin = locations->InAt(4).AsRegister<Register>(); - Register src_ptr = locations->GetTemp(0).AsRegister<Register>(); - Register src_ptr_end = locations->GetTemp(1).AsRegister<Register>(); + Register num_chr = locations->GetTemp(0).AsRegister<Register>(); + Register src_ptr = locations->GetTemp(1).AsRegister<Register>(); Register dst_ptr = locations->GetTemp(2).AsRegister<Register>(); - Register tmp = locations->GetTemp(3).AsRegister<Register>(); // src range to copy. __ add(src_ptr, srcObj, ShifterOperand(value_offset)); - __ add(src_ptr_end, src_ptr, ShifterOperand(srcEnd, LSL, 1)); __ add(src_ptr, src_ptr, ShifterOperand(srcBegin, LSL, 1)); // dst to be copied. __ add(dst_ptr, dstObj, ShifterOperand(data_offset)); __ add(dst_ptr, dst_ptr, ShifterOperand(dstBegin, LSL, 1)); + __ subs(num_chr, srcEnd, ShifterOperand(srcBegin)); + // Do the copy. - Label loop, done; + Label loop, remainder, done; + + // Early out for valid zero-length retrievals. + __ b(&done, EQ); + + // Save repairing the value of num_chr on the < 4 character path. + __ subs(IP, num_chr, ShifterOperand(4)); + __ b(&remainder, LT); + + // Keep the result of the earlier subs, we are going to fetch at least 4 characters. + __ mov(num_chr, ShifterOperand(IP)); + + // Main loop used for longer fetches loads and stores 4x16-bit characters at a time. + // (LDRD/STRD fault on unaligned addresses and it's not worth inlining extra code + // to rectify these everywhere this intrinsic applies.) __ Bind(&loop); - __ cmp(src_ptr, ShifterOperand(src_ptr_end)); + __ ldr(IP, Address(src_ptr, char_size * 2)); + __ subs(num_chr, num_chr, ShifterOperand(4)); + __ str(IP, Address(dst_ptr, char_size * 2)); + __ ldr(IP, Address(src_ptr, char_size * 4, Address::PostIndex)); + __ str(IP, Address(dst_ptr, char_size * 4, Address::PostIndex)); + __ b(&loop, GE); + + __ adds(num_chr, num_chr, ShifterOperand(4)); __ b(&done, EQ); - __ ldrh(tmp, Address(src_ptr, char_size, Address::PostIndex)); - __ strh(tmp, Address(dst_ptr, char_size, Address::PostIndex)); - __ b(&loop); + + // Main loop for < 4 character case and remainder handling. Loads and stores one + // 16-bit Java character at a time. + __ Bind(&remainder); + __ ldrh(IP, Address(src_ptr, char_size, Address::PostIndex)); + __ subs(num_chr, num_chr, ShifterOperand(1)); + __ strh(IP, Address(dst_ptr, char_size, Address::PostIndex)); + __ b(&remainder, GT); + __ Bind(&done); } diff --git a/compiler/utils/assembler_test.h b/compiler/utils/assembler_test.h index 084e9011ba..afe0576906 100644 --- a/compiler/utils/assembler_test.h +++ b/compiler/utils/assembler_test.h @@ -461,7 +461,7 @@ class AssemblerTest : public testing::Test { void SetUp() OVERRIDE { arena_.reset(new ArenaAllocator(&pool_)); - assembler_.reset(new (arena_.get()) Ass(arena_.get())); + assembler_.reset(CreateAssembler(arena_.get())); test_helper_.reset( new AssemblerTestInfrastructure(GetArchitectureString(), GetAssemblerCmdName(), @@ -481,6 +481,11 @@ class AssemblerTest : public testing::Test { arena_.reset(); } + // Override this to set up any architecture-specific things, e.g., CPU revision. + virtual Ass* CreateAssembler(ArenaAllocator* arena) { + return new (arena) Ass(arena); + } + // Override this to set up any architecture-specific things, e.g., register vectors. virtual void SetUpHelpers() {} diff --git a/compiler/utils/mips/assembler_mips32r6_test.cc b/compiler/utils/mips/assembler_mips32r6_test.cc new file mode 100644 index 0000000000..ce92d602d0 --- /dev/null +++ b/compiler/utils/mips/assembler_mips32r6_test.cc @@ -0,0 +1,644 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "assembler_mips.h" + +#include <map> + +#include "base/stl_util.h" +#include "utils/assembler_test.h" + +#define __ GetAssembler()-> + +namespace art { + +struct MIPSCpuRegisterCompare { + bool operator()(const mips::Register& a, const mips::Register& b) const { + return a < b; + } +}; + +class AssemblerMIPS32r6Test : public AssemblerTest<mips::MipsAssembler, + mips::Register, + mips::FRegister, + uint32_t> { + public: + typedef AssemblerTest<mips::MipsAssembler, mips::Register, mips::FRegister, uint32_t> Base; + + AssemblerMIPS32r6Test() : + instruction_set_features_(MipsInstructionSetFeatures::FromVariant("mips32r6", nullptr)) { + } + + protected: + // Get the typically used name for this architecture, e.g., aarch64, x86-64, ... + std::string GetArchitectureString() OVERRIDE { + return "mips"; + } + + std::string GetAssemblerParameters() OVERRIDE { + return " --no-warn -32 -march=mips32r6"; + } + + std::string GetDisassembleParameters() OVERRIDE { + return " -D -bbinary -mmips:isa32r6"; + } + + mips::MipsAssembler* CreateAssembler(ArenaAllocator* arena) OVERRIDE { + return new (arena) mips::MipsAssembler(arena, instruction_set_features_.get()); + } + + void SetUpHelpers() OVERRIDE { + if (registers_.size() == 0) { + registers_.push_back(new mips::Register(mips::ZERO)); + registers_.push_back(new mips::Register(mips::AT)); + registers_.push_back(new mips::Register(mips::V0)); + registers_.push_back(new mips::Register(mips::V1)); + registers_.push_back(new mips::Register(mips::A0)); + registers_.push_back(new mips::Register(mips::A1)); + registers_.push_back(new mips::Register(mips::A2)); + registers_.push_back(new mips::Register(mips::A3)); + registers_.push_back(new mips::Register(mips::T0)); + registers_.push_back(new mips::Register(mips::T1)); + registers_.push_back(new mips::Register(mips::T2)); + registers_.push_back(new mips::Register(mips::T3)); + registers_.push_back(new mips::Register(mips::T4)); + registers_.push_back(new mips::Register(mips::T5)); + registers_.push_back(new mips::Register(mips::T6)); + registers_.push_back(new mips::Register(mips::T7)); + registers_.push_back(new mips::Register(mips::S0)); + registers_.push_back(new mips::Register(mips::S1)); + registers_.push_back(new mips::Register(mips::S2)); + registers_.push_back(new mips::Register(mips::S3)); + registers_.push_back(new mips::Register(mips::S4)); + registers_.push_back(new mips::Register(mips::S5)); + registers_.push_back(new mips::Register(mips::S6)); + registers_.push_back(new mips::Register(mips::S7)); + registers_.push_back(new mips::Register(mips::T8)); + registers_.push_back(new mips::Register(mips::T9)); + registers_.push_back(new mips::Register(mips::K0)); + registers_.push_back(new mips::Register(mips::K1)); + registers_.push_back(new mips::Register(mips::GP)); + registers_.push_back(new mips::Register(mips::SP)); + registers_.push_back(new mips::Register(mips::FP)); + registers_.push_back(new mips::Register(mips::RA)); + + secondary_register_names_.emplace(mips::Register(mips::ZERO), "zero"); + secondary_register_names_.emplace(mips::Register(mips::AT), "at"); + secondary_register_names_.emplace(mips::Register(mips::V0), "v0"); + secondary_register_names_.emplace(mips::Register(mips::V1), "v1"); + secondary_register_names_.emplace(mips::Register(mips::A0), "a0"); + secondary_register_names_.emplace(mips::Register(mips::A1), "a1"); + secondary_register_names_.emplace(mips::Register(mips::A2), "a2"); + secondary_register_names_.emplace(mips::Register(mips::A3), "a3"); + secondary_register_names_.emplace(mips::Register(mips::T0), "t0"); + secondary_register_names_.emplace(mips::Register(mips::T1), "t1"); + secondary_register_names_.emplace(mips::Register(mips::T2), "t2"); + secondary_register_names_.emplace(mips::Register(mips::T3), "t3"); + secondary_register_names_.emplace(mips::Register(mips::T4), "t4"); + secondary_register_names_.emplace(mips::Register(mips::T5), "t5"); + secondary_register_names_.emplace(mips::Register(mips::T6), "t6"); + secondary_register_names_.emplace(mips::Register(mips::T7), "t7"); + secondary_register_names_.emplace(mips::Register(mips::S0), "s0"); + secondary_register_names_.emplace(mips::Register(mips::S1), "s1"); + secondary_register_names_.emplace(mips::Register(mips::S2), "s2"); + secondary_register_names_.emplace(mips::Register(mips::S3), "s3"); + secondary_register_names_.emplace(mips::Register(mips::S4), "s4"); + secondary_register_names_.emplace(mips::Register(mips::S5), "s5"); + secondary_register_names_.emplace(mips::Register(mips::S6), "s6"); + secondary_register_names_.emplace(mips::Register(mips::S7), "s7"); + secondary_register_names_.emplace(mips::Register(mips::T8), "t8"); + secondary_register_names_.emplace(mips::Register(mips::T9), "t9"); + secondary_register_names_.emplace(mips::Register(mips::K0), "k0"); + secondary_register_names_.emplace(mips::Register(mips::K1), "k1"); + secondary_register_names_.emplace(mips::Register(mips::GP), "gp"); + secondary_register_names_.emplace(mips::Register(mips::SP), "sp"); + secondary_register_names_.emplace(mips::Register(mips::FP), "fp"); + secondary_register_names_.emplace(mips::Register(mips::RA), "ra"); + + fp_registers_.push_back(new mips::FRegister(mips::F0)); + fp_registers_.push_back(new mips::FRegister(mips::F1)); + fp_registers_.push_back(new mips::FRegister(mips::F2)); + fp_registers_.push_back(new mips::FRegister(mips::F3)); + fp_registers_.push_back(new mips::FRegister(mips::F4)); + fp_registers_.push_back(new mips::FRegister(mips::F5)); + fp_registers_.push_back(new mips::FRegister(mips::F6)); + fp_registers_.push_back(new mips::FRegister(mips::F7)); + fp_registers_.push_back(new mips::FRegister(mips::F8)); + fp_registers_.push_back(new mips::FRegister(mips::F9)); + fp_registers_.push_back(new mips::FRegister(mips::F10)); + fp_registers_.push_back(new mips::FRegister(mips::F11)); + fp_registers_.push_back(new mips::FRegister(mips::F12)); + fp_registers_.push_back(new mips::FRegister(mips::F13)); + fp_registers_.push_back(new mips::FRegister(mips::F14)); + fp_registers_.push_back(new mips::FRegister(mips::F15)); + fp_registers_.push_back(new mips::FRegister(mips::F16)); + fp_registers_.push_back(new mips::FRegister(mips::F17)); + fp_registers_.push_back(new mips::FRegister(mips::F18)); + fp_registers_.push_back(new mips::FRegister(mips::F19)); + fp_registers_.push_back(new mips::FRegister(mips::F20)); + fp_registers_.push_back(new mips::FRegister(mips::F21)); + fp_registers_.push_back(new mips::FRegister(mips::F22)); + fp_registers_.push_back(new mips::FRegister(mips::F23)); + fp_registers_.push_back(new mips::FRegister(mips::F24)); + fp_registers_.push_back(new mips::FRegister(mips::F25)); + fp_registers_.push_back(new mips::FRegister(mips::F26)); + fp_registers_.push_back(new mips::FRegister(mips::F27)); + fp_registers_.push_back(new mips::FRegister(mips::F28)); + fp_registers_.push_back(new mips::FRegister(mips::F29)); + fp_registers_.push_back(new mips::FRegister(mips::F30)); + fp_registers_.push_back(new mips::FRegister(mips::F31)); + } + } + + void TearDown() OVERRIDE { + AssemblerTest::TearDown(); + STLDeleteElements(®isters_); + STLDeleteElements(&fp_registers_); + } + + std::vector<mips::Register*> GetRegisters() OVERRIDE { + return registers_; + } + + std::vector<mips::FRegister*> GetFPRegisters() OVERRIDE { + return fp_registers_; + } + + uint32_t CreateImmediate(int64_t imm_value) OVERRIDE { + return imm_value; + } + + std::string GetSecondaryRegisterName(const mips::Register& reg) OVERRIDE { + CHECK(secondary_register_names_.find(reg) != secondary_register_names_.end()); + return secondary_register_names_[reg]; + } + + std::string RepeatInsn(size_t count, const std::string& insn) { + std::string result; + for (; count != 0u; --count) { + result += insn; + } + return result; + } + + void BranchCondTwoRegsHelper(void (mips::MipsAssembler::*f)(mips::Register, + mips::Register, + mips::MipsLabel*), + std::string instr_name) { + mips::MipsLabel label; + (Base::GetAssembler()->*f)(mips::A0, mips::A1, &label); + constexpr size_t kAdduCount1 = 63; + for (size_t i = 0; i != kAdduCount1; ++i) { + __ Addu(mips::ZERO, mips::ZERO, mips::ZERO); + } + __ Bind(&label); + constexpr size_t kAdduCount2 = 64; + for (size_t i = 0; i != kAdduCount2; ++i) { + __ Addu(mips::ZERO, mips::ZERO, mips::ZERO); + } + (Base::GetAssembler()->*f)(mips::A2, mips::A3, &label); + + std::string expected = + ".set noreorder\n" + + instr_name + " $a0, $a1, 1f\n" + "nop\n" + + RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") + + "1:\n" + + RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") + + instr_name + " $a2, $a3, 1b\n" + "nop\n"; + DriverStr(expected, instr_name); + } + + private: + std::vector<mips::Register*> registers_; + std::map<mips::Register, std::string, MIPSCpuRegisterCompare> secondary_register_names_; + + std::vector<mips::FRegister*> fp_registers_; + std::unique_ptr<const MipsInstructionSetFeatures> instruction_set_features_; +}; + + +TEST_F(AssemblerMIPS32r6Test, Toolchain) { + EXPECT_TRUE(CheckTools()); +} + +TEST_F(AssemblerMIPS32r6Test, MulR6) { + DriverStr(RepeatRRR(&mips::MipsAssembler::MulR6, "mul ${reg1}, ${reg2}, ${reg3}"), "MulR6"); +} + +TEST_F(AssemblerMIPS32r6Test, MuhR6) { + DriverStr(RepeatRRR(&mips::MipsAssembler::MuhR6, "muh ${reg1}, ${reg2}, ${reg3}"), "MuhR6"); +} + +TEST_F(AssemblerMIPS32r6Test, MuhuR6) { + DriverStr(RepeatRRR(&mips::MipsAssembler::MuhuR6, "muhu ${reg1}, ${reg2}, ${reg3}"), "MuhuR6"); +} + +TEST_F(AssemblerMIPS32r6Test, DivR6) { + DriverStr(RepeatRRR(&mips::MipsAssembler::DivR6, "div ${reg1}, ${reg2}, ${reg3}"), "DivR6"); +} + +TEST_F(AssemblerMIPS32r6Test, ModR6) { + DriverStr(RepeatRRR(&mips::MipsAssembler::ModR6, "mod ${reg1}, ${reg2}, ${reg3}"), "ModR6"); +} + +TEST_F(AssemblerMIPS32r6Test, DivuR6) { + DriverStr(RepeatRRR(&mips::MipsAssembler::DivuR6, "divu ${reg1}, ${reg2}, ${reg3}"), "DivuR6"); +} + +TEST_F(AssemblerMIPS32r6Test, ModuR6) { + DriverStr(RepeatRRR(&mips::MipsAssembler::ModuR6, "modu ${reg1}, ${reg2}, ${reg3}"), "ModuR6"); +} + +////////// +// MISC // +////////// + +TEST_F(AssemblerMIPS32r6Test, Aui) { + DriverStr(RepeatRRIb(&mips::MipsAssembler::Aui, 16, "aui ${reg1}, ${reg2}, {imm}"), "Aui"); +} + +TEST_F(AssemblerMIPS32r6Test, Bitswap) { + DriverStr(RepeatRR(&mips::MipsAssembler::Bitswap, "bitswap ${reg1}, ${reg2}"), "bitswap"); +} + +TEST_F(AssemblerMIPS32r6Test, Seleqz) { + DriverStr(RepeatRRR(&mips::MipsAssembler::Seleqz, "seleqz ${reg1}, ${reg2}, ${reg3}"), + "seleqz"); +} + +TEST_F(AssemblerMIPS32r6Test, Selnez) { + DriverStr(RepeatRRR(&mips::MipsAssembler::Selnez, "selnez ${reg1}, ${reg2}, ${reg3}"), + "selnez"); +} + +TEST_F(AssemblerMIPS32r6Test, ClzR6) { + DriverStr(RepeatRR(&mips::MipsAssembler::ClzR6, "clz ${reg1}, ${reg2}"), "clzR6"); +} + +TEST_F(AssemblerMIPS32r6Test, CloR6) { + DriverStr(RepeatRR(&mips::MipsAssembler::CloR6, "clo ${reg1}, ${reg2}"), "cloR6"); +} + +//////////////////// +// FLOATING POINT // +//////////////////// + +TEST_F(AssemblerMIPS32r6Test, SelS) { + DriverStr(RepeatFFF(&mips::MipsAssembler::SelS, "sel.s ${reg1}, ${reg2}, ${reg3}"), "sel.s"); +} + +TEST_F(AssemblerMIPS32r6Test, SelD) { + DriverStr(RepeatFFF(&mips::MipsAssembler::SelD, "sel.d ${reg1}, ${reg2}, ${reg3}"), "sel.d"); +} + +TEST_F(AssemblerMIPS32r6Test, ClassS) { + DriverStr(RepeatFF(&mips::MipsAssembler::ClassS, "class.s ${reg1}, ${reg2}"), "class.s"); +} + +TEST_F(AssemblerMIPS32r6Test, ClassD) { + DriverStr(RepeatFF(&mips::MipsAssembler::ClassD, "class.d ${reg1}, ${reg2}"), "class.d"); +} + +TEST_F(AssemblerMIPS32r6Test, MinS) { + DriverStr(RepeatFFF(&mips::MipsAssembler::MinS, "min.s ${reg1}, ${reg2}, ${reg3}"), "min.s"); +} + +TEST_F(AssemblerMIPS32r6Test, MinD) { + DriverStr(RepeatFFF(&mips::MipsAssembler::MinD, "min.d ${reg1}, ${reg2}, ${reg3}"), "min.d"); +} + +TEST_F(AssemblerMIPS32r6Test, MaxS) { + DriverStr(RepeatFFF(&mips::MipsAssembler::MaxS, "max.s ${reg1}, ${reg2}, ${reg3}"), "max.s"); +} + +TEST_F(AssemblerMIPS32r6Test, MaxD) { + DriverStr(RepeatFFF(&mips::MipsAssembler::MaxD, "max.d ${reg1}, ${reg2}, ${reg3}"), "max.d"); +} + +TEST_F(AssemblerMIPS32r6Test, CmpUnS) { + DriverStr(RepeatFFF(&mips::MipsAssembler::CmpUnS, "cmp.un.s ${reg1}, ${reg2}, ${reg3}"), + "cmp.un.s"); +} + +TEST_F(AssemblerMIPS32r6Test, CmpEqS) { + DriverStr(RepeatFFF(&mips::MipsAssembler::CmpEqS, "cmp.eq.s ${reg1}, ${reg2}, ${reg3}"), + "cmp.eq.s"); +} + +TEST_F(AssemblerMIPS32r6Test, CmpUeqS) { + DriverStr(RepeatFFF(&mips::MipsAssembler::CmpUeqS, "cmp.ueq.s ${reg1}, ${reg2}, ${reg3}"), + "cmp.ueq.s"); +} + +TEST_F(AssemblerMIPS32r6Test, CmpLtS) { + DriverStr(RepeatFFF(&mips::MipsAssembler::CmpLtS, "cmp.lt.s ${reg1}, ${reg2}, ${reg3}"), + "cmp.lt.s"); +} + +TEST_F(AssemblerMIPS32r6Test, CmpUltS) { + DriverStr(RepeatFFF(&mips::MipsAssembler::CmpUltS, "cmp.ult.s ${reg1}, ${reg2}, ${reg3}"), + "cmp.ult.s"); +} + +TEST_F(AssemblerMIPS32r6Test, CmpLeS) { + DriverStr(RepeatFFF(&mips::MipsAssembler::CmpLeS, "cmp.le.s ${reg1}, ${reg2}, ${reg3}"), + "cmp.le.s"); +} + +TEST_F(AssemblerMIPS32r6Test, CmpUleS) { + DriverStr(RepeatFFF(&mips::MipsAssembler::CmpUleS, "cmp.ule.s ${reg1}, ${reg2}, ${reg3}"), + "cmp.ule.s"); +} + +TEST_F(AssemblerMIPS32r6Test, CmpOrS) { + DriverStr(RepeatFFF(&mips::MipsAssembler::CmpOrS, "cmp.or.s ${reg1}, ${reg2}, ${reg3}"), + "cmp.or.s"); +} + +TEST_F(AssemblerMIPS32r6Test, CmpUneS) { + DriverStr(RepeatFFF(&mips::MipsAssembler::CmpUneS, "cmp.une.s ${reg1}, ${reg2}, ${reg3}"), + "cmp.une.s"); +} + +TEST_F(AssemblerMIPS32r6Test, CmpNeS) { + DriverStr(RepeatFFF(&mips::MipsAssembler::CmpNeS, "cmp.ne.s ${reg1}, ${reg2}, ${reg3}"), + "cmp.ne.s"); +} + +TEST_F(AssemblerMIPS32r6Test, CmpUnD) { + DriverStr(RepeatFFF(&mips::MipsAssembler::CmpUnD, "cmp.un.d ${reg1}, ${reg2}, ${reg3}"), + "cmp.un.d"); +} + +TEST_F(AssemblerMIPS32r6Test, CmpEqD) { + DriverStr(RepeatFFF(&mips::MipsAssembler::CmpEqD, "cmp.eq.d ${reg1}, ${reg2}, ${reg3}"), + "cmp.eq.d"); +} + +TEST_F(AssemblerMIPS32r6Test, CmpUeqD) { + DriverStr(RepeatFFF(&mips::MipsAssembler::CmpUeqD, "cmp.ueq.d ${reg1}, ${reg2}, ${reg3}"), + "cmp.ueq.d"); +} + +TEST_F(AssemblerMIPS32r6Test, CmpLtD) { + DriverStr(RepeatFFF(&mips::MipsAssembler::CmpLtD, "cmp.lt.d ${reg1}, ${reg2}, ${reg3}"), + "cmp.lt.d"); +} + +TEST_F(AssemblerMIPS32r6Test, CmpUltD) { + DriverStr(RepeatFFF(&mips::MipsAssembler::CmpUltD, "cmp.ult.d ${reg1}, ${reg2}, ${reg3}"), + "cmp.ult.d"); +} + +TEST_F(AssemblerMIPS32r6Test, CmpLeD) { + DriverStr(RepeatFFF(&mips::MipsAssembler::CmpLeD, "cmp.le.d ${reg1}, ${reg2}, ${reg3}"), + "cmp.le.d"); +} + +TEST_F(AssemblerMIPS32r6Test, CmpUleD) { + DriverStr(RepeatFFF(&mips::MipsAssembler::CmpUleD, "cmp.ule.d ${reg1}, ${reg2}, ${reg3}"), + "cmp.ule.d"); +} + +TEST_F(AssemblerMIPS32r6Test, CmpOrD) { + DriverStr(RepeatFFF(&mips::MipsAssembler::CmpOrD, "cmp.or.d ${reg1}, ${reg2}, ${reg3}"), + "cmp.or.d"); +} + +TEST_F(AssemblerMIPS32r6Test, CmpUneD) { + DriverStr(RepeatFFF(&mips::MipsAssembler::CmpUneD, "cmp.une.d ${reg1}, ${reg2}, ${reg3}"), + "cmp.une.d"); +} + +TEST_F(AssemblerMIPS32r6Test, CmpNeD) { + DriverStr(RepeatFFF(&mips::MipsAssembler::CmpNeD, "cmp.ne.d ${reg1}, ${reg2}, ${reg3}"), + "cmp.ne.d"); +} + +TEST_F(AssemblerMIPS32r6Test, LoadDFromOffset) { + __ LoadDFromOffset(mips::F0, mips::A0, -0x8000); + __ LoadDFromOffset(mips::F0, mips::A0, +0); + __ LoadDFromOffset(mips::F0, mips::A0, +0x7FF8); + __ LoadDFromOffset(mips::F0, mips::A0, +0x7FFB); + __ LoadDFromOffset(mips::F0, mips::A0, +0x7FFC); + __ LoadDFromOffset(mips::F0, mips::A0, +0x7FFF); + __ LoadDFromOffset(mips::F0, mips::A0, -0xFFF0); + __ LoadDFromOffset(mips::F0, mips::A0, -0x8008); + __ LoadDFromOffset(mips::F0, mips::A0, -0x8001); + __ LoadDFromOffset(mips::F0, mips::A0, +0x8000); + __ LoadDFromOffset(mips::F0, mips::A0, +0xFFF0); + __ LoadDFromOffset(mips::F0, mips::A0, -0x17FE8); + __ LoadDFromOffset(mips::F0, mips::A0, -0x0FFF8); + __ LoadDFromOffset(mips::F0, mips::A0, -0x0FFF1); + __ LoadDFromOffset(mips::F0, mips::A0, +0x0FFF1); + __ LoadDFromOffset(mips::F0, mips::A0, +0x0FFF8); + __ LoadDFromOffset(mips::F0, mips::A0, +0x17FE8); + __ LoadDFromOffset(mips::F0, mips::A0, -0x17FF0); + __ LoadDFromOffset(mips::F0, mips::A0, -0x17FE9); + __ LoadDFromOffset(mips::F0, mips::A0, +0x17FE9); + __ LoadDFromOffset(mips::F0, mips::A0, +0x17FF0); + __ LoadDFromOffset(mips::F0, mips::A0, +0x12345678); + + const char* expected = + "ldc1 $f0, -0x8000($a0)\n" + "ldc1 $f0, 0($a0)\n" + "ldc1 $f0, 0x7FF8($a0)\n" + "lwc1 $f0, 0x7FFB($a0)\n" + "lw $t8, 0x7FFF($a0)\n" + "mthc1 $t8, $f0\n" + "addiu $at, $a0, 0x7FF8\n" + "lwc1 $f0, 4($at)\n" + "lw $t8, 8($at)\n" + "mthc1 $t8, $f0\n" + "addiu $at, $a0, 0x7FF8\n" + "lwc1 $f0, 7($at)\n" + "lw $t8, 11($at)\n" + "mthc1 $t8, $f0\n" + "addiu $at, $a0, -0x7FF8\n" + "ldc1 $f0, -0x7FF8($at)\n" + "addiu $at, $a0, -0x7FF8\n" + "ldc1 $f0, -0x10($at)\n" + "addiu $at, $a0, -0x7FF8\n" + "lwc1 $f0, -9($at)\n" + "lw $t8, -5($at)\n" + "mthc1 $t8, $f0\n" + "addiu $at, $a0, 0x7FF8\n" + "ldc1 $f0, 8($at)\n" + "addiu $at, $a0, 0x7FF8\n" + "ldc1 $f0, 0x7FF8($at)\n" + "aui $at, $a0, 0xFFFF\n" + "ldc1 $f0, -0x7FE8($at)\n" + "aui $at, $a0, 0xFFFF\n" + "ldc1 $f0, 0x8($at)\n" + "aui $at, $a0, 0xFFFF\n" + "lwc1 $f0, 0xF($at)\n" + "lw $t8, 0x13($at)\n" + "mthc1 $t8, $f0\n" + "aui $at, $a0, 0x1\n" + "lwc1 $f0, -0xF($at)\n" + "lw $t8, -0xB($at)\n" + "mthc1 $t8, $f0\n" + "aui $at, $a0, 0x1\n" + "ldc1 $f0, -0x8($at)\n" + "aui $at, $a0, 0x1\n" + "ldc1 $f0, 0x7FE8($at)\n" + "aui $at, $a0, 0xFFFF\n" + "ldc1 $f0, -0x7FF0($at)\n" + "aui $at, $a0, 0xFFFF\n" + "lwc1 $f0, -0x7FE9($at)\n" + "lw $t8, -0x7FE5($at)\n" + "mthc1 $t8, $f0\n" + "aui $at, $a0, 0x1\n" + "lwc1 $f0, 0x7FE9($at)\n" + "lw $t8, 0x7FED($at)\n" + "mthc1 $t8, $f0\n" + "aui $at, $a0, 0x1\n" + "ldc1 $f0, 0x7FF0($at)\n" + "aui $at, $a0, 0x1234\n" + "ldc1 $f0, 0x5678($at)\n"; + DriverStr(expected, "LoadDFromOffset"); +} + +TEST_F(AssemblerMIPS32r6Test, StoreDToOffset) { + __ StoreDToOffset(mips::F0, mips::A0, -0x8000); + __ StoreDToOffset(mips::F0, mips::A0, +0); + __ StoreDToOffset(mips::F0, mips::A0, +0x7FF8); + __ StoreDToOffset(mips::F0, mips::A0, +0x7FFB); + __ StoreDToOffset(mips::F0, mips::A0, +0x7FFC); + __ StoreDToOffset(mips::F0, mips::A0, +0x7FFF); + __ StoreDToOffset(mips::F0, mips::A0, -0xFFF0); + __ StoreDToOffset(mips::F0, mips::A0, -0x8008); + __ StoreDToOffset(mips::F0, mips::A0, -0x8001); + __ StoreDToOffset(mips::F0, mips::A0, +0x8000); + __ StoreDToOffset(mips::F0, mips::A0, +0xFFF0); + __ StoreDToOffset(mips::F0, mips::A0, -0x17FE8); + __ StoreDToOffset(mips::F0, mips::A0, -0x0FFF8); + __ StoreDToOffset(mips::F0, mips::A0, -0x0FFF1); + __ StoreDToOffset(mips::F0, mips::A0, +0x0FFF1); + __ StoreDToOffset(mips::F0, mips::A0, +0x0FFF8); + __ StoreDToOffset(mips::F0, mips::A0, +0x17FE8); + __ StoreDToOffset(mips::F0, mips::A0, -0x17FF0); + __ StoreDToOffset(mips::F0, mips::A0, -0x17FE9); + __ StoreDToOffset(mips::F0, mips::A0, +0x17FE9); + __ StoreDToOffset(mips::F0, mips::A0, +0x17FF0); + __ StoreDToOffset(mips::F0, mips::A0, +0x12345678); + + const char* expected = + "sdc1 $f0, -0x8000($a0)\n" + "sdc1 $f0, 0($a0)\n" + "sdc1 $f0, 0x7FF8($a0)\n" + "mfhc1 $t8, $f0\n" + "swc1 $f0, 0x7FFB($a0)\n" + "sw $t8, 0x7FFF($a0)\n" + "addiu $at, $a0, 0x7FF8\n" + "mfhc1 $t8, $f0\n" + "swc1 $f0, 4($at)\n" + "sw $t8, 8($at)\n" + "addiu $at, $a0, 0x7FF8\n" + "mfhc1 $t8, $f0\n" + "swc1 $f0, 7($at)\n" + "sw $t8, 11($at)\n" + "addiu $at, $a0, -0x7FF8\n" + "sdc1 $f0, -0x7FF8($at)\n" + "addiu $at, $a0, -0x7FF8\n" + "sdc1 $f0, -0x10($at)\n" + "addiu $at, $a0, -0x7FF8\n" + "mfhc1 $t8, $f0\n" + "swc1 $f0, -9($at)\n" + "sw $t8, -5($at)\n" + "addiu $at, $a0, 0x7FF8\n" + "sdc1 $f0, 8($at)\n" + "addiu $at, $a0, 0x7FF8\n" + "sdc1 $f0, 0x7FF8($at)\n" + "aui $at, $a0, 0xFFFF\n" + "sdc1 $f0, -0x7FE8($at)\n" + "aui $at, $a0, 0xFFFF\n" + "sdc1 $f0, 0x8($at)\n" + "aui $at, $a0, 0xFFFF\n" + "mfhc1 $t8, $f0\n" + "swc1 $f0, 0xF($at)\n" + "sw $t8, 0x13($at)\n" + "aui $at, $a0, 0x1\n" + "mfhc1 $t8, $f0\n" + "swc1 $f0, -0xF($at)\n" + "sw $t8, -0xB($at)\n" + "aui $at, $a0, 0x1\n" + "sdc1 $f0, -0x8($at)\n" + "aui $at, $a0, 0x1\n" + "sdc1 $f0, 0x7FE8($at)\n" + "aui $at, $a0, 0xFFFF\n" + "sdc1 $f0, -0x7FF0($at)\n" + "aui $at, $a0, 0xFFFF\n" + "mfhc1 $t8, $f0\n" + "swc1 $f0, -0x7FE9($at)\n" + "sw $t8, -0x7FE5($at)\n" + "aui $at, $a0, 0x1\n" + "mfhc1 $t8, $f0\n" + "swc1 $f0, 0x7FE9($at)\n" + "sw $t8, 0x7FED($at)\n" + "aui $at, $a0, 0x1\n" + "sdc1 $f0, 0x7FF0($at)\n" + "aui $at, $a0, 0x1234\n" + "sdc1 $f0, 0x5678($at)\n"; + DriverStr(expected, "StoreDToOffset"); +} + +////////////// +// BRANCHES // +////////////// + +// TODO: MipsAssembler::Auipc +// MipsAssembler::Addiupc +// MipsAssembler::Bc +// MipsAssembler::Jic +// MipsAssembler::Jialc +// MipsAssembler::Bltc +// MipsAssembler::Bltzc +// MipsAssembler::Bgtzc +// MipsAssembler::Bgec +// MipsAssembler::Bgezc +// MipsAssembler::Blezc +// MipsAssembler::Bltuc +// MipsAssembler::Bgeuc +// MipsAssembler::Beqc +// MipsAssembler::Bnec +// MipsAssembler::Beqzc +// MipsAssembler::Bnezc +// MipsAssembler::Bc1eqz +// MipsAssembler::Bc1nez +// MipsAssembler::Buncond +// MipsAssembler::Bcond +// MipsAssembler::Call + +// TODO: AssemblerMIPS32r6Test.B +// AssemblerMIPS32r6Test.Beq +// AssemblerMIPS32r6Test.Bne +// AssemblerMIPS32r6Test.Beqz +// AssemblerMIPS32r6Test.Bnez +// AssemblerMIPS32r6Test.Bltz +// AssemblerMIPS32r6Test.Bgez +// AssemblerMIPS32r6Test.Blez +// AssemblerMIPS32r6Test.Bgtz +// AssemblerMIPS32r6Test.Blt +// AssemblerMIPS32r6Test.Bge +// AssemblerMIPS32r6Test.Bltu +// AssemblerMIPS32r6Test.Bgeu + +#undef __ + +} // namespace art diff --git a/compiler/utils/mips/assembler_mips_test.cc b/compiler/utils/mips/assembler_mips_test.cc index 57d51835f0..c722d0c333 100644 --- a/compiler/utils/mips/assembler_mips_test.cc +++ b/compiler/utils/mips/assembler_mips_test.cc @@ -2228,6 +2228,51 @@ TEST_F(AssemblerMIPSTest, Bc1t) { DriverStr(expected, "Bc1t"); } +/////////////////////// +// Loading Constants // +/////////////////////// + +TEST_F(AssemblerMIPSTest, LoadConst32) { + // IsUint<16>(value) + __ LoadConst32(mips::V0, 0); + __ LoadConst32(mips::V0, 65535); + // IsInt<16>(value) + __ LoadConst32(mips::V0, -1); + __ LoadConst32(mips::V0, -32768); + // Everything else + __ LoadConst32(mips::V0, 65536); + __ LoadConst32(mips::V0, 65537); + __ LoadConst32(mips::V0, 2147483647); + __ LoadConst32(mips::V0, -32769); + __ LoadConst32(mips::V0, -65536); + __ LoadConst32(mips::V0, -65537); + __ LoadConst32(mips::V0, -2147483647); + __ LoadConst32(mips::V0, -2147483648); + + const char* expected = + // IsUint<16>(value) + "ori $v0, $zero, 0\n" // __ LoadConst32(mips::V0, 0); + "ori $v0, $zero, 65535\n" // __ LoadConst32(mips::V0, 65535); + // IsInt<16>(value) + "addiu $v0, $zero, -1\n" // __ LoadConst32(mips::V0, -1); + "addiu $v0, $zero, -32768\n" // __ LoadConst32(mips::V0, -32768); + // Everything else + "lui $v0, 1\n" // __ LoadConst32(mips::V0, 65536); + "lui $v0, 1\n" // __ LoadConst32(mips::V0, 65537); + "ori $v0, 1\n" // " + "lui $v0, 32767\n" // __ LoadConst32(mips::V0, 2147483647); + "ori $v0, 65535\n" // " + "lui $v0, 65535\n" // __ LoadConst32(mips::V0, -32769); + "ori $v0, 32767\n" // " + "lui $v0, 65535\n" // __ LoadConst32(mips::V0, -65536); + "lui $v0, 65534\n" // __ LoadConst32(mips::V0, -65537); + "ori $v0, 65535\n" // " + "lui $v0, 32768\n" // __ LoadConst32(mips::V0, -2147483647); + "ori $v0, 1\n" // " + "lui $v0, 32768\n"; // __ LoadConst32(mips::V0, -2147483648); + DriverStr(expected, "LoadConst32"); +} + #undef __ } // namespace art diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h index 26450c41c7..32ae6ffad5 100644 --- a/runtime/art_method-inl.h +++ b/runtime/art_method-inl.h @@ -120,6 +120,10 @@ inline uint32_t ArtMethod::GetDexMethodIndex() { return dex_method_index_; } +inline uint32_t ArtMethod::GetImtIndex() { + return GetDexMethodIndex() % ImTable::kSize; +} + inline ArtMethod** ArtMethod::GetDexCacheResolvedMethods(size_t pointer_size) { return GetNativePointer<ArtMethod**>(DexCacheResolvedMethodsOffset(pointer_size), pointer_size); diff --git a/runtime/art_method.h b/runtime/art_method.h index 90b2406a1d..849af977e1 100644 --- a/runtime/art_method.h +++ b/runtime/art_method.h @@ -419,6 +419,8 @@ class ArtMethod FINAL { ALWAYS_INLINE uint32_t GetDexMethodIndex() SHARED_REQUIRES(Locks::mutator_lock_); + ALWAYS_INLINE uint32_t GetImtIndex() SHARED_REQUIRES(Locks::mutator_lock_); + void SetDexMethodIndex(uint32_t new_idx) { // Not called within a transaction. dex_method_index_ = new_idx; diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index cb34d8a121..7c003151ea 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -6140,11 +6140,6 @@ void ClassLinker::FillIMTAndConflictTables(mirror::Class* klass) { } } -static inline uint32_t GetIMTIndex(ArtMethod* interface_method) - SHARED_REQUIRES(Locks::mutator_lock_) { - return interface_method->GetDexMethodIndex() % ImTable::kSize; -} - ImtConflictTable* ClassLinker::CreateImtConflictTable(size_t count, LinearAlloc* linear_alloc, size_t image_pointer_size) { @@ -6196,7 +6191,7 @@ void ClassLinker::FillIMTFromIfTable(mirror::IfTable* if_table, // or interface methods in the IMT here they will not create extra conflicts since we compare // names and signatures in SetIMTRef. ArtMethod* interface_method = interface->GetVirtualMethod(j, image_pointer_size_); - const uint32_t imt_index = GetIMTIndex(interface_method); + const uint32_t imt_index = interface_method->GetImtIndex(); // There is only any conflicts if all of the interface methods for an IMT slot don't have // the same implementation method, keep track of this to avoid creating a conflict table in @@ -6250,7 +6245,7 @@ void ClassLinker::FillIMTFromIfTable(mirror::IfTable* if_table, } DCHECK(implementation_method != nullptr); ArtMethod* interface_method = interface->GetVirtualMethod(j, image_pointer_size_); - const uint32_t imt_index = GetIMTIndex(interface_method); + const uint32_t imt_index = interface_method->GetImtIndex(); if (!imt[imt_index]->IsRuntimeMethod() || imt[imt_index] == unimplemented_method || imt[imt_index] == imt_conflict_method) { @@ -6656,7 +6651,7 @@ bool ClassLinker::LinkInterfaceMethods( auto* interface_method = iftable->GetInterface(i)->GetVirtualMethod(j, image_pointer_size_); MethodNameAndSignatureComparator interface_name_comparator( interface_method->GetInterfaceMethodIfProxy(image_pointer_size_)); - uint32_t imt_index = GetIMTIndex(interface_method); + uint32_t imt_index = interface_method->GetImtIndex(); ArtMethod** imt_ptr = &out_imt[imt_index]; // For each method listed in the interface's method list, find the // matching method in our class's method list. We want to favor the diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h index 916ca29319..db3f88ff6e 100644 --- a/runtime/entrypoints/entrypoint_utils-inl.h +++ b/runtime/entrypoints/entrypoint_utils-inl.h @@ -19,7 +19,7 @@ #include "entrypoint_utils.h" -#include "art_method.h" +#include "art_method-inl.h" #include "class_linker-inl.h" #include "common_throws.h" #include "dex_file.h" @@ -559,7 +559,7 @@ inline ArtMethod* FindMethodFromCode(uint32_t method_idx, mirror::Object** this_ } } case kInterface: { - uint32_t imt_index = resolved_method->GetDexMethodIndex() % ImTable::kSize; + uint32_t imt_index = resolved_method->GetImtIndex(); size_t pointer_size = class_linker->GetImagePointerSize(); ArtMethod* imt_method = (*this_object)->GetClass()->GetImt(pointer_size)-> Get(imt_index, pointer_size); diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc index 7175d5436b..0a70be1c95 100644 --- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc @@ -2174,8 +2174,7 @@ extern "C" TwoWordReturn artInvokeInterfaceTrampoline(uint32_t deadbeef ATTRIBUT if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex)) { // If the dex cache already resolved the interface method, look whether we have // a match in the ImtConflictTable. - uint32_t imt_index = interface_method->GetDexMethodIndex(); - ArtMethod* conflict_method = imt->Get(imt_index % ImTable::kSize, sizeof(void*)); + ArtMethod* conflict_method = imt->Get(interface_method->GetImtIndex(), sizeof(void*)); if (LIKELY(conflict_method->IsRuntimeMethod())) { ImtConflictTable* current_table = conflict_method->GetImtConflictTable(sizeof(void*)); DCHECK(current_table != nullptr); @@ -2226,8 +2225,8 @@ extern "C" TwoWordReturn artInvokeInterfaceTrampoline(uint32_t deadbeef ATTRIBUT // We arrive here if we have found an implementation, and it is not in the ImtConflictTable. // We create a new table with the new pair { interface_method, method }. - uint32_t imt_index = interface_method->GetDexMethodIndex(); - ArtMethod* conflict_method = imt->Get(imt_index % ImTable::kSize, sizeof(void*)); + uint32_t imt_index = interface_method->GetImtIndex(); + ArtMethod* conflict_method = imt->Get(imt_index, sizeof(void*)); if (conflict_method->IsRuntimeMethod()) { ArtMethod* new_conflict_method = Runtime::Current()->GetClassLinker()->AddMethodToConflictTable( cls.Get(), @@ -2238,7 +2237,7 @@ extern "C" TwoWordReturn artInvokeInterfaceTrampoline(uint32_t deadbeef ATTRIBUT if (new_conflict_method != conflict_method) { // Update the IMT if we create a new conflict method. No fence needed here, as the // data is consistent. - imt->Set(imt_index % ImTable::kSize, + imt->Set(imt_index, new_conflict_method, sizeof(void*)); } diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc index 3f8f6284c0..dd750060b8 100644 --- a/runtime/gc/collector/concurrent_copying.cc +++ b/runtime/gc/collector/concurrent_copying.cc @@ -192,7 +192,7 @@ void ConcurrentCopying::InitializePhase() { } // Used to switch the thread roots of a thread from from-space refs to to-space refs. -class ThreadFlipVisitor : public Closure { +class ConcurrentCopying::ThreadFlipVisitor : public Closure { public: ThreadFlipVisitor(ConcurrentCopying* concurrent_copying, bool use_tlab) : concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) { @@ -229,7 +229,7 @@ class ThreadFlipVisitor : public Closure { }; // Called back from Runtime::FlipThreadRoots() during a pause. -class FlipCallback : public Closure { +class ConcurrentCopying::FlipCallback : public Closure { public: explicit FlipCallback(ConcurrentCopying* concurrent_copying) : concurrent_copying_(concurrent_copying) { @@ -304,10 +304,9 @@ void ConcurrentCopying::RecordLiveStackFreezeSize(Thread* self) { } // Used to visit objects in the immune spaces. -class ConcurrentCopyingImmuneSpaceObjVisitor { +class ConcurrentCopying::ImmuneSpaceObjVisitor { public: - explicit ConcurrentCopyingImmuneSpaceObjVisitor(ConcurrentCopying* cc) - : collector_(cc) {} + explicit ImmuneSpaceObjVisitor(ConcurrentCopying* cc) : collector_(cc) {} void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) SHARED_REQUIRES(Locks::heap_bitmap_lock_) { @@ -388,7 +387,7 @@ void ConcurrentCopying::MarkingPhase() { for (auto& space : immune_spaces_.GetSpaces()) { DCHECK(space->IsImageSpace() || space->IsZygoteSpace()); accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); - ConcurrentCopyingImmuneSpaceObjVisitor visitor(this); + ImmuneSpaceObjVisitor visitor(this); live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), reinterpret_cast<uintptr_t>(space->Limit()), visitor); @@ -487,7 +486,7 @@ void ConcurrentCopying::ReenableWeakRefAccess(Thread* self) { Runtime::Current()->BroadcastForNewSystemWeaks(); } -class DisableMarkingCheckpoint : public Closure { +class ConcurrentCopying::DisableMarkingCheckpoint : public Closure { public: explicit DisableMarkingCheckpoint(ConcurrentCopying* concurrent_copying) : concurrent_copying_(concurrent_copying) { @@ -683,9 +682,9 @@ accounting::ObjectStack* ConcurrentCopying::GetLiveStack() { // The following visitors are used to verify that there's no references to the from-space left after // marking. -class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor : public SingleRootVisitor { +class ConcurrentCopying::VerifyNoFromSpaceRefsVisitor : public SingleRootVisitor { public: - explicit ConcurrentCopyingVerifyNoFromSpaceRefsVisitor(ConcurrentCopying* collector) + explicit VerifyNoFromSpaceRefsVisitor(ConcurrentCopying* collector) : collector_(collector) {} void operator()(mirror::Object* ref) const @@ -712,16 +711,16 @@ class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor : public SingleRootVisitor { ConcurrentCopying* const collector_; }; -class ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor { +class ConcurrentCopying::VerifyNoFromSpaceRefsFieldVisitor { public: - explicit ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector) + explicit VerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector) : collector_(collector) {} void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { mirror::Object* ref = obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset); - ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector_); + VerifyNoFromSpaceRefsVisitor visitor(collector_); visitor(ref); } void operator()(mirror::Class* klass, mirror::Reference* ref) const @@ -739,7 +738,7 @@ class ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor { void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const SHARED_REQUIRES(Locks::mutator_lock_) { - ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector_); + VerifyNoFromSpaceRefsVisitor visitor(collector_); visitor(root->AsMirrorPtr()); } @@ -747,9 +746,9 @@ class ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor { ConcurrentCopying* const collector_; }; -class ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor { +class ConcurrentCopying::VerifyNoFromSpaceRefsObjectVisitor { public: - explicit ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector) + explicit VerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector) : collector_(collector) {} void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) { @@ -761,7 +760,7 @@ class ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor { ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg); space::RegionSpace* region_space = collector->RegionSpace(); CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space"; - ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor visitor(collector); + VerifyNoFromSpaceRefsFieldVisitor visitor(collector); obj->VisitReferences(visitor, visitor); if (kUseBakerReadBarrier) { CHECK(obj->GetReadBarrierPointer() == ReadBarrier::WhitePtr()) @@ -785,16 +784,15 @@ void ConcurrentCopying::VerifyNoFromSpaceReferences() { CHECK(!thread->GetIsGcMarking()); } } - ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor visitor(this); + VerifyNoFromSpaceRefsObjectVisitor visitor(this); // Roots. { ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); - ConcurrentCopyingVerifyNoFromSpaceRefsVisitor ref_visitor(this); + VerifyNoFromSpaceRefsVisitor ref_visitor(this); Runtime::Current()->VisitRoots(&ref_visitor); } // The to-space. - region_space_->WalkToSpace(ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor::ObjectCallback, - this); + region_space_->WalkToSpace(VerifyNoFromSpaceRefsObjectVisitor::ObjectCallback, this); // Non-moving spaces. { WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); @@ -802,7 +800,7 @@ void ConcurrentCopying::VerifyNoFromSpaceReferences() { } // The alloc stack. { - ConcurrentCopyingVerifyNoFromSpaceRefsVisitor ref_visitor(this); + VerifyNoFromSpaceRefsVisitor ref_visitor(this); for (auto* it = heap_->allocation_stack_->Begin(), *end = heap_->allocation_stack_->End(); it < end; ++it) { mirror::Object* const obj = it->AsMirrorPtr(); @@ -817,9 +815,9 @@ void ConcurrentCopying::VerifyNoFromSpaceReferences() { } // The following visitors are used to assert the to-space invariant. -class ConcurrentCopyingAssertToSpaceInvariantRefsVisitor { +class ConcurrentCopying::AssertToSpaceInvariantRefsVisitor { public: - explicit ConcurrentCopyingAssertToSpaceInvariantRefsVisitor(ConcurrentCopying* collector) + explicit AssertToSpaceInvariantRefsVisitor(ConcurrentCopying* collector) : collector_(collector) {} void operator()(mirror::Object* ref) const @@ -835,16 +833,16 @@ class ConcurrentCopyingAssertToSpaceInvariantRefsVisitor { ConcurrentCopying* const collector_; }; -class ConcurrentCopyingAssertToSpaceInvariantFieldVisitor { +class ConcurrentCopying::AssertToSpaceInvariantFieldVisitor { public: - explicit ConcurrentCopyingAssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector) + explicit AssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector) : collector_(collector) {} void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { mirror::Object* ref = obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset); - ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector_); + AssertToSpaceInvariantRefsVisitor visitor(collector_); visitor(ref); } void operator()(mirror::Class* klass, mirror::Reference* ref ATTRIBUTE_UNUSED) const @@ -861,7 +859,7 @@ class ConcurrentCopyingAssertToSpaceInvariantFieldVisitor { void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const SHARED_REQUIRES(Locks::mutator_lock_) { - ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector_); + AssertToSpaceInvariantRefsVisitor visitor(collector_); visitor(root->AsMirrorPtr()); } @@ -869,9 +867,9 @@ class ConcurrentCopyingAssertToSpaceInvariantFieldVisitor { ConcurrentCopying* const collector_; }; -class ConcurrentCopyingAssertToSpaceInvariantObjectVisitor { +class ConcurrentCopying::AssertToSpaceInvariantObjectVisitor { public: - explicit ConcurrentCopyingAssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector) + explicit AssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector) : collector_(collector) {} void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) { @@ -884,7 +882,7 @@ class ConcurrentCopyingAssertToSpaceInvariantObjectVisitor { space::RegionSpace* region_space = collector->RegionSpace(); CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space"; collector->AssertToSpaceInvariant(nullptr, MemberOffset(0), obj); - ConcurrentCopyingAssertToSpaceInvariantFieldVisitor visitor(collector); + AssertToSpaceInvariantFieldVisitor visitor(collector); obj->VisitReferences(visitor, visitor); } @@ -892,7 +890,7 @@ class ConcurrentCopyingAssertToSpaceInvariantObjectVisitor { ConcurrentCopying* const collector_; }; -class RevokeThreadLocalMarkStackCheckpoint : public Closure { +class ConcurrentCopying::RevokeThreadLocalMarkStackCheckpoint : public Closure { public: RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying, bool disable_weak_ref_access) @@ -1112,7 +1110,7 @@ inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) { region_space_->AddLiveBytes(to_ref, alloc_size); } if (ReadBarrier::kEnableToSpaceInvariantChecks || kIsDebugBuild) { - ConcurrentCopyingAssertToSpaceInvariantObjectVisitor visitor(this); + AssertToSpaceInvariantObjectVisitor visitor(this); visitor(to_ref); } } @@ -1484,9 +1482,9 @@ void ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace(mirror::Object* o } // Used to scan ref fields of an object. -class ConcurrentCopyingRefFieldsVisitor { +class ConcurrentCopying::RefFieldsVisitor { public: - explicit ConcurrentCopyingRefFieldsVisitor(ConcurrentCopying* collector) + explicit RefFieldsVisitor(ConcurrentCopying* collector) : collector_(collector) {} void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) @@ -1522,7 +1520,7 @@ class ConcurrentCopyingRefFieldsVisitor { // Scan ref fields of an object. inline void ConcurrentCopying::Scan(mirror::Object* to_ref) { DCHECK(!region_space_->IsInFromSpace(to_ref)); - ConcurrentCopyingRefFieldsVisitor visitor(this); + RefFieldsVisitor visitor(this); // Disable the read barrier for a performance reason. to_ref->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>( visitor, visitor); diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h index afdc0f1f98..a986a7a1db 100644 --- a/runtime/gc/collector/concurrent_copying.h +++ b/runtime/gc/collector/concurrent_copying.h @@ -243,16 +243,21 @@ class ConcurrentCopying : public GarbageCollector { accounting::ReadBarrierTable* rb_table_; bool force_evacuate_all_; // True if all regions are evacuated. - friend class ConcurrentCopyingRefFieldsVisitor; - friend class ConcurrentCopyingImmuneSpaceObjVisitor; - friend class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor; - friend class ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor; - friend class ConcurrentCopyingClearBlackPtrsVisitor; - friend class ConcurrentCopyingLostCopyVisitor; - friend class ThreadFlipVisitor; - friend class FlipCallback; - friend class ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor; - friend class RevokeThreadLocalMarkStackCheckpoint; + class AssertToSpaceInvariantFieldVisitor; + class AssertToSpaceInvariantObjectVisitor; + class AssertToSpaceInvariantRefsVisitor; + class ClearBlackPtrsVisitor; + class ComputeUnevacFromSpaceLiveRatioVisitor; + class DisableMarkingCheckpoint; + class FlipCallback; + class ImmuneSpaceObjVisitor; + class LostCopyVisitor; + class RefFieldsVisitor; + class RevokeThreadLocalMarkStackCheckpoint; + class VerifyNoFromSpaceRefsFieldVisitor; + class VerifyNoFromSpaceRefsObjectVisitor; + class VerifyNoFromSpaceRefsVisitor; + class ThreadFlipVisitor; DISALLOW_IMPLICIT_CONSTRUCTORS(ConcurrentCopying); }; diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc index 6beb60608c..43482eb7cc 100644 --- a/runtime/gc/collector/mark_compact.cc +++ b/runtime/gc/collector/mark_compact.cc @@ -52,8 +52,9 @@ void MarkCompact::BindBitmaps() { MarkCompact::MarkCompact(Heap* heap, const std::string& name_prefix) : GarbageCollector(heap, name_prefix + (name_prefix.empty() ? "" : " ") + "mark compact"), - space_(nullptr), collector_name_(name_), updating_references_(false) { -} + space_(nullptr), + collector_name_(name_), + updating_references_(false) {} void MarkCompact::RunPhases() { Thread* self = Thread::Current(); @@ -85,30 +86,20 @@ void MarkCompact::ForwardObject(mirror::Object* obj) { ++live_objects_in_space_; } -class CalculateObjectForwardingAddressVisitor { - public: - explicit CalculateObjectForwardingAddressVisitor(MarkCompact* collector) - : collector_(collector) {} - void operator()(mirror::Object* obj) const REQUIRES(Locks::mutator_lock_, - Locks::heap_bitmap_lock_) { - DCHECK_ALIGNED(obj, space::BumpPointerSpace::kAlignment); - DCHECK(collector_->IsMarked(obj) != nullptr); - collector_->ForwardObject(obj); - } - - private: - MarkCompact* const collector_; -}; void MarkCompact::CalculateObjectForwardingAddresses() { TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); // The bump pointer in the space where the next forwarding address will be. bump_pointer_ = reinterpret_cast<uint8_t*>(space_->Begin()); // Visit all the marked objects in the bitmap. - CalculateObjectForwardingAddressVisitor visitor(this); objects_before_forwarding_->VisitMarkedRange(reinterpret_cast<uintptr_t>(space_->Begin()), reinterpret_cast<uintptr_t>(space_->End()), - visitor); + [this](mirror::Object* obj) + REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { + DCHECK_ALIGNED(obj, space::BumpPointerSpace::kAlignment); + DCHECK(IsMarked(obj) != nullptr); + ForwardObject(obj); + }); } void MarkCompact::InitializePhase() { @@ -129,17 +120,6 @@ void MarkCompact::ProcessReferences(Thread* self) { false, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this); } -class BitmapSetSlowPathVisitor { - public: - void operator()(const mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) { - // Marking a large object, make sure its aligned as a sanity check. - if (!IsAligned<kPageSize>(obj)) { - Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR)); - LOG(FATAL) << obj; - } - } -}; - inline mirror::Object* MarkCompact::MarkObject(mirror::Object* obj) { if (obj == nullptr) { return nullptr; @@ -155,8 +135,15 @@ inline mirror::Object* MarkCompact::MarkObject(mirror::Object* obj) { } } else { DCHECK(!space_->HasAddress(obj)); - BitmapSetSlowPathVisitor visitor; - if (!mark_bitmap_->Set(obj, visitor)) { + auto slow_path = [this](const mirror::Object* ref) + SHARED_REQUIRES(Locks::mutator_lock_) { + // Marking a large object, make sure its aligned as a sanity check. + if (!IsAligned<kPageSize>(ref)) { + Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR)); + LOG(FATAL) << ref; + } + }; + if (!mark_bitmap_->Set(obj, slow_path)) { // This object was not previously marked. MarkStackPush(obj); } @@ -296,10 +283,9 @@ void MarkCompact::VisitRoots( } } -class UpdateRootVisitor : public RootVisitor { +class MarkCompact::UpdateRootVisitor : public RootVisitor { public: - explicit UpdateRootVisitor(MarkCompact* collector) : collector_(collector) { - } + explicit UpdateRootVisitor(MarkCompact* collector) : collector_(collector) {} void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) OVERRIDE REQUIRES(Locks::mutator_lock_) @@ -332,10 +318,10 @@ class UpdateRootVisitor : public RootVisitor { MarkCompact* const collector_; }; -class UpdateObjectReferencesVisitor { +class MarkCompact::UpdateObjectReferencesVisitor { public: - explicit UpdateObjectReferencesVisitor(MarkCompact* collector) : collector_(collector) { - } + explicit UpdateObjectReferencesVisitor(MarkCompact* collector) : collector_(collector) {} + void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::heap_bitmap_lock_) REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { collector_->UpdateObjectReferences(obj); @@ -423,10 +409,9 @@ inline void MarkCompact::UpdateHeapReference(mirror::HeapReference<mirror::Objec } } -class UpdateReferenceVisitor { +class MarkCompact::UpdateReferenceVisitor { public: - explicit UpdateReferenceVisitor(MarkCompact* collector) : collector_(collector) { - } + explicit UpdateReferenceVisitor(MarkCompact* collector) : collector_(collector) {} void operator()(mirror::Object* obj, MemberOffset offset, bool /*is_static*/) const ALWAYS_INLINE REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { @@ -501,19 +486,6 @@ bool MarkCompact::ShouldSweepSpace(space::ContinuousSpace* space) const { return space != space_ && !immune_spaces_.ContainsSpace(space); } -class MoveObjectVisitor { - public: - explicit MoveObjectVisitor(MarkCompact* collector) : collector_(collector) { - } - void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::heap_bitmap_lock_) - REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { - collector_->MoveObject(obj, obj->SizeOf()); - } - - private: - MarkCompact* const collector_; -}; - void MarkCompact::MoveObject(mirror::Object* obj, size_t len) { // Look at the forwarding address stored in the lock word to know where to copy. DCHECK(space_->HasAddress(obj)) << obj; @@ -534,10 +506,13 @@ void MarkCompact::MoveObject(mirror::Object* obj, size_t len) { void MarkCompact::MoveObjects() { TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); // Move the objects in the before forwarding bitmap. - MoveObjectVisitor visitor(this); objects_before_forwarding_->VisitMarkedRange(reinterpret_cast<uintptr_t>(space_->Begin()), reinterpret_cast<uintptr_t>(space_->End()), - visitor); + [this](mirror::Object* obj) + SHARED_REQUIRES(Locks::heap_bitmap_lock_) + REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { + MoveObject(obj, obj->SizeOf()); + }); CHECK(lock_words_to_restore_.empty()); } @@ -572,10 +547,9 @@ void MarkCompact::DelayReferenceReferent(mirror::Class* klass, mirror::Reference heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this); } -class MarkCompactMarkObjectVisitor { +class MarkCompact::MarkObjectVisitor { public: - explicit MarkCompactMarkObjectVisitor(MarkCompact* collector) : collector_(collector) { - } + explicit MarkObjectVisitor(MarkCompact* collector) : collector_(collector) {} void operator()(mirror::Object* obj, MemberOffset offset, bool /*is_static*/) const ALWAYS_INLINE REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { @@ -608,7 +582,7 @@ class MarkCompactMarkObjectVisitor { // Visit all of the references of an object and update. void MarkCompact::ScanObject(mirror::Object* obj) { - MarkCompactMarkObjectVisitor visitor(this); + MarkObjectVisitor visitor(this); obj->VisitReferences(visitor, visitor); } diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h index 48311570b5..16abfb73b8 100644 --- a/runtime/gc/collector/mark_compact.h +++ b/runtime/gc/collector/mark_compact.h @@ -222,13 +222,10 @@ class MarkCompact : public GarbageCollector { bool updating_references_; private: - friend class BitmapSetSlowPathVisitor; - friend class CalculateObjectForwardingAddressVisitor; - friend class MarkCompactMarkObjectVisitor; - friend class MoveObjectVisitor; - friend class UpdateObjectReferencesVisitor; - friend class UpdateReferenceVisitor; - friend class UpdateRootVisitor; + class MarkObjectVisitor; + class UpdateObjectReferencesVisitor; + class UpdateReferenceVisitor; + class UpdateRootVisitor; DISALLOW_IMPLICIT_CONSTRUCTORS(MarkCompact); }; diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc index ac5931fa53..9f54f1cdd4 100644 --- a/runtime/gc/collector/mark_sweep.cc +++ b/runtime/gc/collector/mark_sweep.cc @@ -266,7 +266,7 @@ void MarkSweep::MarkingPhase() { PreCleanCards(); } -class ScanObjectVisitor { +class MarkSweep::ScanObjectVisitor { public: explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) {} @@ -393,12 +393,14 @@ bool MarkSweep::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref return IsMarked(ref->AsMirrorPtr()); } -class MarkSweepMarkObjectSlowPath { +class MarkSweep::MarkObjectSlowPath { public: - explicit MarkSweepMarkObjectSlowPath(MarkSweep* mark_sweep, - mirror::Object* holder = nullptr, - MemberOffset offset = MemberOffset(0)) - : mark_sweep_(mark_sweep), holder_(holder), offset_(offset) {} + explicit MarkObjectSlowPath(MarkSweep* mark_sweep, + mirror::Object* holder = nullptr, + MemberOffset offset = MemberOffset(0)) + : mark_sweep_(mark_sweep), + holder_(holder), + offset_(offset) {} void operator()(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS { if (kProfileLargeObjects) { @@ -480,7 +482,7 @@ inline void MarkSweep::MarkObjectNonNull(mirror::Object* obj, if (kCountMarkedObjects) { ++mark_slowpath_count_; } - MarkSweepMarkObjectSlowPath visitor(this, holder, offset); + MarkObjectSlowPath visitor(this, holder, offset); // TODO: We already know that the object is not in the current_space_bitmap_ but MarkBitmap::Set // will check again. if (!mark_bitmap_->Set(obj, visitor)) { @@ -515,7 +517,7 @@ inline bool MarkSweep::MarkObjectParallel(mirror::Object* obj) { if (LIKELY(object_bitmap->HasAddress(obj))) { return !object_bitmap->AtomicTestAndSet(obj); } - MarkSweepMarkObjectSlowPath visitor(this); + MarkObjectSlowPath visitor(this); return !mark_bitmap_->AtomicTestAndSet(obj, visitor); } @@ -534,7 +536,7 @@ inline void MarkSweep::MarkObject(mirror::Object* obj, } } -class VerifyRootMarkedVisitor : public SingleRootVisitor { +class MarkSweep::VerifyRootMarkedVisitor : public SingleRootVisitor { public: explicit VerifyRootMarkedVisitor(MarkSweep* collector) : collector_(collector) { } @@ -563,7 +565,7 @@ void MarkSweep::VisitRoots(mirror::CompressedReference<mirror::Object>** roots, } } -class VerifyRootVisitor : public SingleRootVisitor { +class MarkSweep::VerifyRootVisitor : public SingleRootVisitor { public: void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { @@ -610,7 +612,7 @@ void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) { this, static_cast<VisitRootFlags>(flags | kVisitRootFlagNonMoving)); } -class DelayReferenceReferentVisitor { +class MarkSweep::DelayReferenceReferentVisitor { public: explicit DelayReferenceReferentVisitor(MarkSweep* collector) : collector_(collector) {} @@ -625,7 +627,7 @@ class DelayReferenceReferentVisitor { }; template <bool kUseFinger = false> -class MarkStackTask : public Task { +class MarkSweep::MarkStackTask : public Task { public: MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, @@ -783,7 +785,7 @@ class MarkStackTask : public Task { } }; -class CardScanTask : public MarkStackTask<false> { +class MarkSweep::CardScanTask : public MarkStackTask<false> { public: CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, @@ -948,7 +950,7 @@ void MarkSweep::ScanGrayObjects(bool paused, uint8_t minimum_age) { } } -class RecursiveMarkTask : public MarkStackTask<false> { +class MarkSweep::RecursiveMarkTask : public MarkStackTask<false> { public: RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, @@ -1061,7 +1063,7 @@ void MarkSweep::SweepSystemWeaks(Thread* self) { Runtime::Current()->SweepSystemWeaks(this); } -class VerifySystemWeakVisitor : public IsMarkedVisitor { +class MarkSweep::VerifySystemWeakVisitor : public IsMarkedVisitor { public: explicit VerifySystemWeakVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {} @@ -1090,7 +1092,7 @@ void MarkSweep::VerifySystemWeaks() { Runtime::Current()->SweepSystemWeaks(&visitor); } -class CheckpointMarkThreadRoots : public Closure, public RootVisitor { +class MarkSweep::CheckpointMarkThreadRoots : public Closure, public RootVisitor { public: CheckpointMarkThreadRoots(MarkSweep* mark_sweep, bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h index 7168f96940..9747031152 100644 --- a/runtime/gc/collector/mark_sweep.h +++ b/runtime/gc/collector/mark_sweep.h @@ -353,17 +353,17 @@ class MarkSweep : public GarbageCollector { std::unique_ptr<MemMap> sweep_array_free_buffer_mem_map_; private: - friend class CardScanTask; - friend class CheckBitmapVisitor; - friend class CheckReferenceVisitor; - friend class CheckpointMarkThreadRoots; - friend class Heap; - friend class FifoMarkStackChunk; - friend class MarkObjectVisitor; - template<bool kUseFinger> friend class MarkStackTask; - friend class MarkSweepMarkObjectSlowPath; - friend class VerifyRootMarkedVisitor; - friend class VerifyRootVisitor; + class CardScanTask; + class CheckpointMarkThreadRoots; + class DelayReferenceReferentVisitor; + template<bool kUseFinger> class MarkStackTask; + class MarkObjectSlowPath; + class RecursiveMarkTask; + class ScanObjectParallelVisitor; + class ScanObjectVisitor; + class VerifyRootMarkedVisitor; + class VerifyRootVisitor; + class VerifySystemWeakVisitor; DISALLOW_IMPLICIT_CONSTRUCTORS(MarkSweep); }; diff --git a/runtime/gc/collector/semi_space-inl.h b/runtime/gc/collector/semi_space-inl.h index e87b5ff332..78fb2d24ae 100644 --- a/runtime/gc/collector/semi_space-inl.h +++ b/runtime/gc/collector/semi_space-inl.h @@ -26,21 +26,6 @@ namespace art { namespace gc { namespace collector { -class BitmapSetSlowPathVisitor { - public: - explicit BitmapSetSlowPathVisitor(SemiSpace* semi_space) : semi_space_(semi_space) { - } - - void operator()(const mirror::Object* obj) const { - CHECK(!semi_space_->to_space_->HasAddress(obj)) << "Marking " << obj << " in to_space_"; - // Marking a large object, make sure its aligned as a sanity check. - CHECK_ALIGNED(obj, kPageSize); - } - - private: - SemiSpace* const semi_space_; -}; - inline mirror::Object* SemiSpace::GetForwardingAddressInFromSpace(mirror::Object* obj) const { DCHECK(from_space_->HasAddress(obj)); LockWord lock_word = obj->GetLockWord(false); @@ -76,8 +61,12 @@ inline void SemiSpace::MarkObject( obj_ptr->Assign(forward_address); } else if (!collect_from_space_only_ && !immune_spaces_.IsInImmuneRegion(obj)) { DCHECK(!to_space_->HasAddress(obj)) << "Tried to mark " << obj << " in to-space"; - BitmapSetSlowPathVisitor visitor(this); - if (!mark_bitmap_->Set(obj, visitor)) { + auto slow_path = [this](const mirror::Object* ref) { + CHECK(!to_space_->HasAddress(ref)) << "Marking " << ref << " in to_space_"; + // Marking a large object, make sure its aligned as a sanity check. + CHECK_ALIGNED(ref, kPageSize); + }; + if (!mark_bitmap_->Set(obj, slow_path)) { // This object was not previously marked. MarkStackPush(obj); } diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc index f37daa54e9..7a4c025c30 100644 --- a/runtime/gc/collector/semi_space.cc +++ b/runtime/gc/collector/semi_space.cc @@ -282,22 +282,11 @@ void SemiSpace::MarkingPhase() { } } -class SemiSpaceScanObjectVisitor { - public: - explicit SemiSpaceScanObjectVisitor(SemiSpace* ss) : semi_space_(ss) {} - void operator()(Object* obj) const REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { - DCHECK(obj != nullptr); - semi_space_->ScanObject(obj); - } - private: - SemiSpace* const semi_space_; -}; - // Used to verify that there's no references to the from-space. -class SemiSpaceVerifyNoFromSpaceReferencesVisitor { +class SemiSpace::VerifyNoFromSpaceReferencesVisitor { public: - explicit SemiSpaceVerifyNoFromSpaceReferencesVisitor(space::ContinuousMemMapAllocSpace* from_space) : - from_space_(from_space) {} + explicit VerifyNoFromSpaceReferencesVisitor(space::ContinuousMemMapAllocSpace* from_space) + : from_space_(from_space) {} void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { @@ -331,23 +320,10 @@ class SemiSpaceVerifyNoFromSpaceReferencesVisitor { void SemiSpace::VerifyNoFromSpaceReferences(Object* obj) { DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space"; - SemiSpaceVerifyNoFromSpaceReferencesVisitor visitor(from_space_); + VerifyNoFromSpaceReferencesVisitor visitor(from_space_); obj->VisitReferences(visitor, VoidFunctor()); } -class SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor { - public: - explicit SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor(SemiSpace* ss) : semi_space_(ss) {} - void operator()(Object* obj) const - SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { - DCHECK(obj != nullptr); - semi_space_->VerifyNoFromSpaceReferences(obj); - } - - private: - SemiSpace* const semi_space_; -}; - void SemiSpace::MarkReachableObjects() { TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); { @@ -390,10 +366,12 @@ void SemiSpace::MarkReachableObjects() { } else { TimingLogger::ScopedTiming t2("VisitLiveBits", GetTimings()); accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); - SemiSpaceScanObjectVisitor visitor(this); live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), reinterpret_cast<uintptr_t>(space->End()), - visitor); + [this](mirror::Object* obj) + REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { + ScanObject(obj); + }); } if (kIsDebugBuild) { // Verify that there are no from-space references that @@ -401,10 +379,13 @@ void SemiSpace::MarkReachableObjects() { // card table) didn't miss any from-space references in the // space. accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); - SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor visitor(this); live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), reinterpret_cast<uintptr_t>(space->End()), - visitor); + [this](Object* obj) + SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { + DCHECK(obj != nullptr); + VerifyNoFromSpaceReferences(obj); + }); } } } @@ -424,10 +405,12 @@ void SemiSpace::MarkReachableObjects() { // classes (primitive array classes) that could move though they // don't contain any other references. accounting::LargeObjectBitmap* large_live_bitmap = los->GetLiveBitmap(); - SemiSpaceScanObjectVisitor visitor(this); large_live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(los->Begin()), reinterpret_cast<uintptr_t>(los->End()), - visitor); + [this](mirror::Object* obj) + REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { + ScanObject(obj); + }); } // Recursively process the mark stack. ProcessMarkStack(); @@ -697,10 +680,9 @@ void SemiSpace::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this); } -class SemiSpaceMarkObjectVisitor { +class SemiSpace::MarkObjectVisitor { public: - explicit SemiSpaceMarkObjectVisitor(SemiSpace* collector) : collector_(collector) { - } + explicit MarkObjectVisitor(SemiSpace* collector) : collector_(collector) {} void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const ALWAYS_INLINE REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { @@ -739,7 +721,7 @@ class SemiSpaceMarkObjectVisitor { // Visit all of the references of an object and update. void SemiSpace::ScanObject(Object* obj) { DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space"; - SemiSpaceMarkObjectVisitor visitor(this); + MarkObjectVisitor visitor(this); obj->VisitReferences(visitor, visitor); } diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h index 0199e1ae56..694e536b7d 100644 --- a/runtime/gc/collector/semi_space.h +++ b/runtime/gc/collector/semi_space.h @@ -272,7 +272,9 @@ class SemiSpace : public GarbageCollector { bool swap_semi_spaces_; private: - friend class BitmapSetSlowPathVisitor; + class BitmapSetSlowPathVisitor; + class MarkObjectVisitor; + class VerifyNoFromSpaceReferencesVisitor; DISALLOW_IMPLICIT_CONSTRUCTORS(SemiSpace); }; diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc index 1ebe5cc47b..8cadc2e0fc 100644 --- a/runtime/gc/space/image_space.cc +++ b/runtime/gc/space/image_space.cc @@ -1158,6 +1158,80 @@ static bool RelocateInPlace(ImageHeader& image_header, return true; } +static MemMap* LoadImageFile(const char* image_filename, + const char* image_location, + const ImageHeader& image_header, + uint8_t* address, + int fd, + TimingLogger& logger, + std::string* error_msg) { + TimingLogger::ScopedTiming timing("MapImageFile", &logger); + const ImageHeader::StorageMode storage_mode = image_header.GetStorageMode(); + if (storage_mode == ImageHeader::kStorageModeUncompressed) { + return MemMap::MapFileAtAddress(address, + image_header.GetImageSize(), + PROT_READ | PROT_WRITE, + MAP_PRIVATE, + fd, + 0, + /*low_4gb*/true, + /*reuse*/false, + image_filename, + error_msg); + } + + if (storage_mode != ImageHeader::kStorageModeLZ4 && + storage_mode != ImageHeader::kStorageModeLZ4HC) { + *error_msg = StringPrintf("Invalid storage mode in image header %d", + static_cast<int>(storage_mode)); + return nullptr; + } + + // Reserve output and decompress into it. + std::unique_ptr<MemMap> map(MemMap::MapAnonymous(image_location, + address, + image_header.GetImageSize(), + PROT_READ | PROT_WRITE, + /*low_4gb*/true, + /*reuse*/false, + error_msg)); + if (map != nullptr) { + const size_t stored_size = image_header.GetDataSize(); + const size_t decompress_offset = sizeof(ImageHeader); // Skip the header. + std::unique_ptr<MemMap> temp_map(MemMap::MapFile(sizeof(ImageHeader) + stored_size, + PROT_READ, + MAP_PRIVATE, + fd, + /*offset*/0, + /*low_4gb*/false, + image_filename, + error_msg)); + if (temp_map == nullptr) { + DCHECK(!error_msg->empty()); + return nullptr; + } + memcpy(map->Begin(), &image_header, sizeof(ImageHeader)); + const uint64_t start = NanoTime(); + // LZ4HC and LZ4 have same internal format, both use LZ4_decompress. + TimingLogger::ScopedTiming timing2("LZ4 decompress image", &logger); + const size_t decompressed_size = LZ4_decompress_safe( + reinterpret_cast<char*>(temp_map->Begin()) + sizeof(ImageHeader), + reinterpret_cast<char*>(map->Begin()) + decompress_offset, + stored_size, + map->Size() - decompress_offset); + VLOG(image) << "Decompressing image took " << PrettyDuration(NanoTime() - start); + if (decompressed_size + sizeof(ImageHeader) != image_header.GetImageSize()) { + *error_msg = StringPrintf( + "Decompressed size does not match expected image size %zu vs %zu", + decompressed_size + sizeof(ImageHeader), + image_header.GetImageSize()); + return nullptr; + } + } + + return map.release(); +} + ImageSpace* ImageSpace::Init(const char* image_filename, const char* image_location, bool validate_oat_file, @@ -1235,91 +1309,30 @@ ImageSpace* ImageSpace::Init(const char* image_filename, return nullptr; } - // The preferred address to map the image, null specifies any address. If we manage to map the - // image at the image begin, the amount of fixup work required is minimized. - std::vector<uint8_t*> addresses(1, image_header->GetImageBegin()); - if (image_header->IsPic()) { - // Can also map at a random low_4gb address since we can relocate in-place. - addresses.push_back(nullptr); - } - - // Note: The image header is part of the image due to mmap page alignment required of offset. std::unique_ptr<MemMap> map; - std::string temp_error_msg; - for (uint8_t* address : addresses) { - TimingLogger::ScopedTiming timing("MapImageFile", &logger); - // Only care about the error message for the last address in addresses. We want to avoid the - // overhead of printing the process maps if we can relocate. - std::string* out_error_msg = (address == addresses.back()) ? &temp_error_msg : nullptr; - const ImageHeader::StorageMode storage_mode = image_header->GetStorageMode(); - if (storage_mode == ImageHeader::kStorageModeUncompressed) { - map.reset(MemMap::MapFileAtAddress(address, - image_header->GetImageSize(), - PROT_READ | PROT_WRITE, - MAP_PRIVATE, - file->Fd(), - 0, - /*low_4gb*/true, - /*reuse*/false, - image_filename, - /*out*/out_error_msg)); - } else { - if (storage_mode != ImageHeader::kStorageModeLZ4 && - storage_mode != ImageHeader::kStorageModeLZ4HC) { - *error_msg = StringPrintf("Invalid storage mode in image header %d", - static_cast<int>(storage_mode)); - return nullptr; - } - // Reserve output and decompress into it. - map.reset(MemMap::MapAnonymous(image_location, - address, - image_header->GetImageSize(), - PROT_READ | PROT_WRITE, - /*low_4gb*/true, - /*reuse*/false, - /*out*/out_error_msg)); - if (map != nullptr) { - const size_t stored_size = image_header->GetDataSize(); - const size_t decompress_offset = sizeof(ImageHeader); // Skip the header. - std::unique_ptr<MemMap> temp_map(MemMap::MapFile(sizeof(ImageHeader) + stored_size, - PROT_READ, - MAP_PRIVATE, - file->Fd(), - /*offset*/0, - /*low_4gb*/false, - image_filename, - out_error_msg)); - if (temp_map == nullptr) { - DCHECK(!out_error_msg->empty()); - return nullptr; - } - memcpy(map->Begin(), image_header, sizeof(ImageHeader)); - const uint64_t start = NanoTime(); - // LZ4HC and LZ4 have same internal format, both use LZ4_decompress. - TimingLogger::ScopedTiming timing2("LZ4 decompress image", &logger); - const size_t decompressed_size = LZ4_decompress_safe( - reinterpret_cast<char*>(temp_map->Begin()) + sizeof(ImageHeader), - reinterpret_cast<char*>(map->Begin()) + decompress_offset, - stored_size, - map->Size() - decompress_offset); - VLOG(image) << "Decompressing image took " << PrettyDuration(NanoTime() - start); - if (decompressed_size + sizeof(ImageHeader) != image_header->GetImageSize()) { - *error_msg = StringPrintf( - "Decompressed size does not match expected image size %zu vs %zu", - decompressed_size + sizeof(ImageHeader), - image_header->GetImageSize()); - return nullptr; - } - } - } - if (map != nullptr) { - break; - } - } - + // GetImageBegin is the preferred address to map the image. If we manage to map the + // image at the image begin, the amount of fixup work required is minimized. + map.reset(LoadImageFile(image_filename, + image_location, + *image_header, + image_header->GetImageBegin(), + file->Fd(), + logger, + error_msg)); + // If the header specifies PIC mode, we can also map at a random low_4gb address since we can + // relocate in-place. + if (map == nullptr && image_header->IsPic()) { + map.reset(LoadImageFile(image_filename, + image_location, + *image_header, + /* address */ nullptr, + file->Fd(), + logger, + error_msg)); + } + // Were we able to load something and continue? if (map == nullptr) { - DCHECK(!temp_error_msg.empty()); - *error_msg = temp_error_msg; + DCHECK(!error_msg->empty()); return nullptr; } DCHECK_EQ(0, memcmp(image_header, map->Begin(), sizeof(ImageHeader))); diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc index 3781c6f99d..c047ba20f5 100644 --- a/runtime/mem_map.cc +++ b/runtime/mem_map.cc @@ -342,7 +342,9 @@ MemMap* MemMap::MapAnonymous(const char* name, if (actual == MAP_FAILED) { if (error_msg != nullptr) { - PrintFileToLog("/proc/self/maps", LogSeverity::WARNING); + if (kIsDebugBuild || VLOG_IS_ON(oat)) { + PrintFileToLog("/proc/self/maps", LogSeverity::WARNING); + } *error_msg = StringPrintf("Failed anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0): %s. " "See process maps in the log.", diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h index 9670accf56..2adf54ab86 100644 --- a/runtime/mirror/class.h +++ b/runtime/mirror/class.h @@ -831,7 +831,8 @@ class MANAGED Class FINAL : public Object { ReadBarrierOption kReadBarrierOption = kWithReadBarrier> bool ShouldHaveImt() SHARED_REQUIRES(Locks::mutator_lock_) { return ShouldHaveEmbeddedVTable<kVerifyFlags, kReadBarrierOption>() && - GetIfTable() != nullptr && !IsArrayClass(); + GetIfTable<kVerifyFlags, kReadBarrierOption>() != nullptr && + !IsArrayClass<kVerifyFlags, kReadBarrierOption>(); } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, diff --git a/runtime/oat.h b/runtime/oat.h index 286394e55a..52d4c4209e 100644 --- a/runtime/oat.h +++ b/runtime/oat.h @@ -32,7 +32,7 @@ class InstructionSetFeatures; class PACKED(4) OatHeader { public: static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' }; - static constexpr uint8_t kOatVersion[] = { '0', '8', '0', '\0' }; + static constexpr uint8_t kOatVersion[] = { '0', '8', '1', '\0' }; static constexpr const char* kImageLocationKey = "image-location"; static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline"; diff --git a/test/033-class-init-deadlock/expected.txt b/test/033-class-init-deadlock/expected.txt index 182d0da00d..9e843a06f6 100644 --- a/test/033-class-init-deadlock/expected.txt +++ b/test/033-class-init-deadlock/expected.txt @@ -1,6 +1,4 @@ Deadlock test starting. -A initializing... -B initializing... Deadlock test interrupting threads. Deadlock test main thread bailing. A initialized: false diff --git a/test/033-class-init-deadlock/src/Main.java b/test/033-class-init-deadlock/src/Main.java index 32332307f5..bd4d4ab7b5 100644 --- a/test/033-class-init-deadlock/src/Main.java +++ b/test/033-class-init-deadlock/src/Main.java @@ -14,6 +14,8 @@ * limitations under the License. */ +import java.util.concurrent.CyclicBarrier; + /** * This causes most VMs to lock up. * @@ -23,6 +25,8 @@ public class Main { public static boolean aInitialized = false; public static boolean bInitialized = false; + public static CyclicBarrier barrier = new CyclicBarrier(3); + static public void main(String[] args) { Thread thread1, thread2; @@ -30,10 +34,10 @@ public class Main { thread1 = new Thread() { public void run() { new A(); } }; thread2 = new Thread() { public void run() { new B(); } }; thread1.start(); - // Give thread1 a chance to start before starting thread2. - try { Thread.sleep(1000); } catch (InterruptedException ie) { } thread2.start(); + // Not expecting any exceptions, so print them out if we get them. + try { barrier.await(); } catch (Exception e) { System.out.println(e); } try { Thread.sleep(6000); } catch (InterruptedException ie) { } System.out.println("Deadlock test interrupting threads."); @@ -48,8 +52,8 @@ public class Main { class A { static { - System.out.println("A initializing..."); - try { Thread.sleep(3000); } catch (InterruptedException ie) { } + // Not expecting any exceptions, so print them out if we get them. + try { Main.barrier.await(); } catch (Exception e) { System.out.println(e); } new B(); System.out.println("A initialized"); Main.aInitialized = true; @@ -58,8 +62,8 @@ class A { class B { static { - System.out.println("B initializing..."); - try { Thread.sleep(3000); } catch (InterruptedException ie) { } + // Not expecting any exceptions, so print them out if we get them. + try { Main.barrier.await(); } catch (Exception e) { System.out.println(e); } new A(); System.out.println("B initialized"); Main.bInitialized = true; diff --git a/test/566-polymorphic-inlining/polymorphic_inline.cc b/test/566-polymorphic-inlining/polymorphic_inline.cc index 7b2c6cbcd5..c0d93dd8a1 100644 --- a/test/566-polymorphic-inlining/polymorphic_inline.cc +++ b/test/566-polymorphic-inlining/polymorphic_inline.cc @@ -17,6 +17,7 @@ #include "art_method.h" #include "jit/jit.h" #include "jit/jit_code_cache.h" +#include "jit/profiling_info.h" #include "oat_quick_method_header.h" #include "scoped_thread_state_change.h" #include "stack_map.h" @@ -37,8 +38,10 @@ static void do_checks(jclass cls, const char* method_name) { if (code_cache->ContainsPc(header->GetCode())) { break; } else { - // sleep one second to give time to the JIT compiler. - sleep(1); + // Sleep to yield to the compiler thread. + usleep(1000); + // Will either ensure it's compiled or do the compilation itself. + jit->CompileMethod(method, soa.Self(), /* osr */ false); } } @@ -47,7 +50,25 @@ static void do_checks(jclass cls, const char* method_name) { CHECK(info.HasInlineInfo(encoding)); } -extern "C" JNIEXPORT void JNICALL Java_Main_ensureJittedAndPolymorphicInline(JNIEnv*, jclass cls) { +static void allocate_profiling_info(jclass cls, const char* method_name) { + ScopedObjectAccess soa(Thread::Current()); + mirror::Class* klass = soa.Decode<mirror::Class*>(cls); + ArtMethod* method = klass->FindDeclaredDirectMethodByName(method_name, sizeof(void*)); + ProfilingInfo::Create(soa.Self(), method, /* retry_allocation */ true); +} + +extern "C" JNIEXPORT void JNICALL Java_Main_ensureProfilingInfo566(JNIEnv*, jclass cls) { + jit::Jit* jit = Runtime::Current()->GetJit(); + if (jit == nullptr) { + return; + } + + allocate_profiling_info(cls, "testInvokeVirtual"); + allocate_profiling_info(cls, "testInvokeInterface"); + allocate_profiling_info(cls, "$noinline$testInlineToSameTarget"); +} + +extern "C" JNIEXPORT void JNICALL Java_Main_ensureJittedAndPolymorphicInline566(JNIEnv*, jclass cls) { jit::Jit* jit = Runtime::Current()->GetJit(); if (jit == nullptr) { return; diff --git a/test/566-polymorphic-inlining/src/Main.java b/test/566-polymorphic-inlining/src/Main.java index a59ce5b344..d39e6ed57b 100644 --- a/test/566-polymorphic-inlining/src/Main.java +++ b/test/566-polymorphic-inlining/src/Main.java @@ -39,6 +39,9 @@ public class Main implements Itf { itfs[1] = mains[1] = new Subclass(); itfs[2] = mains[2] = new OtherSubclass(); + // Create the profiling info eagerly to make sure they are filled. + ensureProfilingInfo566(); + // Make testInvokeVirtual and testInvokeInterface hot to get them jitted. // We pass Main and Subclass to get polymorphic inlining based on calling // the same method. @@ -51,7 +54,7 @@ public class Main implements Itf { $noinline$testInlineToSameTarget(mains[1]); } - ensureJittedAndPolymorphicInline(); + ensureJittedAndPolymorphicInline566(); // At this point, the JIT should have compiled both methods, and inline // sameInvokeVirtual and sameInvokeInterface. @@ -71,12 +74,12 @@ public class Main implements Itf { } public Class sameInvokeVirtual() { - field.getClass(); // null check to ensure we get an inlined frame in the CodeInfo + field.getClass(); // null check to ensure we get an inlined frame in the CodeInfo. return Main.class; } public Class sameInvokeInterface() { - field.getClass(); // null check to ensure we get an inlined frame in the CodeInfo + field.getClass(); // null check to ensure we get an inlined frame in the CodeInfo. return Itf.class; } @@ -95,7 +98,8 @@ public class Main implements Itf { public Object field = new Object(); - public static native void ensureJittedAndPolymorphicInline(); + public static native void ensureJittedAndPolymorphicInline566(); + public static native void ensureProfilingInfo566(); public void increment() { field.getClass(); // null check to ensure we get an inlined frame in the CodeInfo |