diff options
62 files changed, 1750 insertions, 421 deletions
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index fb116bb3da..d055b37ea7 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -697,6 +697,9 @@ void CompilerDriver::CompileOne(Thread* self, ArtMethod* method, TimingLogger* t } CompiledMethod* CompilerDriver::CompileArtMethod(Thread* self, ArtMethod* method) { + DCHECK_EQ(method, + method->GetInterfaceMethodIfProxy( + Runtime::Current()->GetClassLinker()->GetImagePointerSize())); const uint32_t method_idx = method->GetDexMethodIndex(); const uint32_t access_flags = method->GetAccessFlags(); const InvokeType invoke_type = method->GetInvokeType(); diff --git a/compiler/image_test.cc b/compiler/image_test.cc index 21d582eec4..fd6cd82f7c 100644 --- a/compiler/image_test.cc +++ b/compiler/image_test.cc @@ -97,8 +97,10 @@ TEST_F(ImageTest, WriteRead) { ASSERT_TRUE(dup_oat.get() != nullptr); { - bool success_image = - writer->Write(image_file.GetFilename(), dup_oat->GetPath(), dup_oat->GetPath()); + bool success_image = writer->Write(kInvalidImageFd, + image_file.GetFilename(), + dup_oat->GetPath(), + dup_oat->GetPath()); ASSERT_TRUE(success_image); bool success_fixup = ElfWriter::Fixup(dup_oat.get(), writer->GetOatDataBegin()); ASSERT_TRUE(success_fixup); diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc index 0e5a97ffbd..af2a4f9426 100644 --- a/compiler/image_writer.cc +++ b/compiler/image_writer.cc @@ -122,7 +122,8 @@ bool ImageWriter::PrepareImageAddressSpace() { return true; } -bool ImageWriter::Write(const std::string& image_filename, +bool ImageWriter::Write(int image_fd, + const std::string& image_filename, const std::string& oat_filename, const std::string& oat_location) { CHECK(!image_filename.empty()); @@ -178,10 +179,13 @@ bool ImageWriter::Write(const std::string& image_filename, LOG(ERROR) << "Failed to flush and close oat file " << oat_filename << " for " << oat_location; return false; } - - std::unique_ptr<File> image_file(OS::CreateEmptyFile(image_filename.c_str())); - ImageHeader* image_header = reinterpret_cast<ImageHeader*>(image_->Begin()); - if (image_file.get() == nullptr) { + std::unique_ptr<File> image_file; + if (image_fd != kInvalidImageFd) { + image_file.reset(new File(image_fd, image_filename, unix_file::kCheckSafeUsage)); + } else { + image_file.reset(OS::CreateEmptyFile(image_filename.c_str())); + } + if (image_file == nullptr) { LOG(ERROR) << "Failed to open image file " << image_filename; return false; } @@ -192,6 +196,7 @@ bool ImageWriter::Write(const std::string& image_filename, } // Write out the image + fields + methods. + ImageHeader* const image_header = reinterpret_cast<ImageHeader*>(image_->Begin()); const auto write_count = image_header->GetImageSize(); if (!image_file->WriteFully(image_->Begin(), write_count)) { PLOG(ERROR) << "Failed to write image file " << image_filename; @@ -200,7 +205,8 @@ bool ImageWriter::Write(const std::string& image_filename, } // Write out the image bitmap at the page aligned start of the image end. - const ImageSection& bitmap_section = image_header->GetImageSection(ImageHeader::kSectionImageBitmap); + const ImageSection& bitmap_section = image_header->GetImageSection( + ImageHeader::kSectionImageBitmap); CHECK_ALIGNED(bitmap_section.Offset(), kPageSize); if (!image_file->Write(reinterpret_cast<char*>(image_bitmap_->Begin()), bitmap_section.Size(), bitmap_section.Offset())) { diff --git a/compiler/image_writer.h b/compiler/image_writer.h index e235bc4553..7a2febcea1 100644 --- a/compiler/image_writer.h +++ b/compiler/image_writer.h @@ -41,6 +41,8 @@ namespace art { +static constexpr int kInvalidImageFd = -1; + // Write a Space built during compilation for use during execution. class ImageWriter FINAL { public: @@ -89,7 +91,11 @@ class ImageWriter FINAL { uint8_t* GetOatFileBegin() const; - bool Write(const std::string& image_filename, const std::string& oat_filename, + // If image_fd is not kInvalidImageFd, then we use that for the file. Otherwise we open + // image_filename. + bool Write(int image_fd, + const std::string& image_filename, + const std::string& oat_filename, const std::string& oat_location) REQUIRES(!Locks::mutator_lock_); diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc index c1b87c9cd0..d520208d32 100644 --- a/compiler/jit/jit_compiler.cc +++ b/compiler/jit/jit_compiler.cc @@ -192,7 +192,10 @@ bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method) { CompiledMethod* compiled_method = nullptr; { TimingLogger::ScopedTiming t2("Compiling", &logger); - compiled_method = compiler_driver_->CompileArtMethod(self, method); + // If we get a request to compile a proxy method, we pass the actual Java method + // of that proxy method, as the compiler does not expect a proxy method. + ArtMethod* method_to_compile = method->GetInterfaceMethodIfProxy(sizeof(void*)); + compiled_method = compiler_driver_->CompileArtMethod(self, method_to_compile); } // Trim maps to reduce memory usage. diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index 8d9794bd79..3dc3b7fba0 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -56,6 +56,8 @@ static constexpr SRegister kFpuCalleeSaves[] = // S registers. Therefore there is no need to block it. static constexpr DRegister DTMP = D31; +static constexpr uint32_t kPackedSwitchJumpTableThreshold = 6; + #define __ down_cast<ArmAssembler*>(codegen->GetAssembler())-> #define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArmWordSize, x).Int32Value() @@ -513,17 +515,6 @@ void CodeGeneratorARM::Finalize(CodeAllocator* allocator) { uint32_t new_position = __ GetAdjustedPosition(old_position); stack_map_stream_.SetStackMapNativePcOffset(i, new_position); } - // Adjust native pc offsets of block labels. - for (HBasicBlock* block : *block_order_) { - // Get the label directly from block_labels_ rather than through GetLabelOf() to avoid - // FirstNonEmptyBlock() which could lead to adjusting a label more than once. - DCHECK_LT(block->GetBlockId(), GetGraph()->GetBlocks().size()); - Label* block_label = &block_labels_[block->GetBlockId()]; - DCHECK_EQ(block_label->IsBound(), !block->IsSingleJump()); - if (block_label->IsBound()) { - __ AdjustLabelPosition(block_label); - } - } // Adjust pc offsets for the disassembly information. if (disasm_info_ != nullptr) { GeneratedCodeInterval* frame_entry_interval = disasm_info_->GetFrameEntryInterval(); @@ -538,10 +529,6 @@ void CodeGeneratorARM::Finalize(CodeAllocator* allocator) { it.code_interval.end = __ GetAdjustedPosition(it.code_interval.end); } } - // Adjust pc offsets for relative call patches. - for (MethodPatchInfo<Label>& info : relative_call_patches_) { - __ AdjustLabelPosition(&info.label); - } CodeGenerator::Finalize(allocator); } @@ -732,7 +719,8 @@ void CodeGeneratorARM::GenerateFrameExit() { } void CodeGeneratorARM::Bind(HBasicBlock* block) { - __ Bind(GetLabelOf(block)); + Label* label = GetLabelOf(block); + __ BindTrackedLabel(label); } Location CodeGeneratorARM::GetStackLocation(HLoadLocal* load) const { @@ -5255,7 +5243,7 @@ void CodeGeneratorARM::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, break; case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative: relative_call_patches_.emplace_back(invoke->GetTargetMethod()); - __ Bind(&relative_call_patches_.back().label); + __ BindTrackedLabel(&relative_call_patches_.back().label); // Arbitrarily branch to the BL itself, override at link time. __ bl(&relative_call_patches_.back().label); break; @@ -5378,25 +5366,64 @@ void LocationsBuilderARM::VisitPackedSwitch(HPackedSwitch* switch_instr) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall); locations->SetInAt(0, Location::RequiresRegister()); + if (switch_instr->GetNumEntries() >= kPackedSwitchJumpTableThreshold && + codegen_->GetAssembler()->IsThumb()) { + locations->AddTemp(Location::RequiresRegister()); // We need a temp for the table base. + if (switch_instr->GetStartValue() != 0) { + locations->AddTemp(Location::RequiresRegister()); // We need a temp for the bias. + } + } } void InstructionCodeGeneratorARM::VisitPackedSwitch(HPackedSwitch* switch_instr) { int32_t lower_bound = switch_instr->GetStartValue(); - int32_t num_entries = switch_instr->GetNumEntries(); + uint32_t num_entries = switch_instr->GetNumEntries(); LocationSummary* locations = switch_instr->GetLocations(); Register value_reg = locations->InAt(0).AsRegister<Register>(); HBasicBlock* default_block = switch_instr->GetDefaultBlock(); - // Create a series of compare/jumps. - const ArenaVector<HBasicBlock*>& successors = switch_instr->GetBlock()->GetSuccessors(); - for (int32_t i = 0; i < num_entries; i++) { - GenerateCompareWithImmediate(value_reg, lower_bound + i); - __ b(codegen_->GetLabelOf(successors[i]), EQ); - } + if (num_entries < kPackedSwitchJumpTableThreshold || !codegen_->GetAssembler()->IsThumb()) { + // Create a series of compare/jumps. + const ArenaVector<HBasicBlock*>& successors = switch_instr->GetBlock()->GetSuccessors(); + for (uint32_t i = 0; i < num_entries; i++) { + GenerateCompareWithImmediate(value_reg, lower_bound + i); + __ b(codegen_->GetLabelOf(successors[i]), EQ); + } + + // And the default for any other value. + if (!codegen_->GoesToNextBlock(switch_instr->GetBlock(), default_block)) { + __ b(codegen_->GetLabelOf(default_block)); + } + } else { + // Create a table lookup. + Register temp_reg = locations->GetTemp(0).AsRegister<Register>(); + + // Materialize a pointer to the switch table + std::vector<Label*> labels(num_entries); + const ArenaVector<HBasicBlock*>& successors = switch_instr->GetBlock()->GetSuccessors(); + for (uint32_t i = 0; i < num_entries; i++) { + labels[i] = codegen_->GetLabelOf(successors[i]); + } + JumpTable* table = __ CreateJumpTable(std::move(labels), temp_reg); + + // Remove the bias. + Register key_reg; + if (lower_bound != 0) { + key_reg = locations->GetTemp(1).AsRegister<Register>(); + __ AddConstant(key_reg, value_reg, -lower_bound); + } else { + key_reg = value_reg; + } + + // Check whether the value is in the table, jump to default block if not. + __ CmpConstant(key_reg, num_entries - 1); + __ b(codegen_->GetLabelOf(default_block), Condition::HI); + + // Load the displacement from the table. + __ ldr(temp_reg, Address(temp_reg, key_reg, Shift::LSL, 2)); - // And the default for any other value. - if (!codegen_->GoesToNextBlock(switch_instr->GetBlock(), default_block)) { - __ b(codegen_->GetLabelOf(default_block)); + // Dispatch is a direct add to the PC (for Thumb2). + __ EmitJumpTableDispatch(table, temp_reg); } } diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc index 0aaa6b3f2c..353881e47a 100644 --- a/compiler/optimizing/inliner.cc +++ b/compiler/optimizing/inliner.cc @@ -494,6 +494,26 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method, << " it is in a different dex file and requires access to the dex cache"; return false; } + + if (current->IsNewInstance() && + (current->AsNewInstance()->GetEntrypoint() == kQuickAllocObjectWithAccessCheck)) { + // Allocation entrypoint does not handle inlined frames. + return false; + } + + if (current->IsNewArray() && + (current->AsNewArray()->GetEntrypoint() == kQuickAllocArrayWithAccessCheck)) { + // Allocation entrypoint does not handle inlined frames. + return false; + } + + if (current->IsUnresolvedStaticFieldGet() || + current->IsUnresolvedInstanceFieldGet() || + current->IsUnresolvedStaticFieldSet() || + current->IsUnresolvedInstanceFieldSet()) { + // Entrypoint for unresolved fields does not handle inlined frames. + return false; + } } } number_of_inlined_instructions_ += number_of_instructions; diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc index 90f28e511e..6fbb6823d6 100644 --- a/compiler/optimizing/load_store_elimination.cc +++ b/compiler/optimizing/load_store_elimination.cc @@ -59,7 +59,7 @@ class ReferenceInfo : public ArenaObject<kArenaAllocMisc> { (use->IsInstanceFieldSet() && (reference_ == use->InputAt(1))) || (use->IsUnresolvedInstanceFieldSet() && (reference_ == use->InputAt(1))) || (use->IsStaticFieldSet() && (reference_ == use->InputAt(1))) || - (use->IsUnresolvedStaticFieldSet() && (reference_ == use->InputAt(1))) || + (use->IsUnresolvedStaticFieldSet() && (reference_ == use->InputAt(0))) || (use->IsArraySet() && (reference_ == use->InputAt(2)))) { // reference_ is merged to a phi, passed to a callee, or stored to heap. // reference_ isn't the only name that can refer to its value anymore. diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc index 8b28ff91d4..68fb0acf7f 100644 --- a/compiler/optimizing/nodes.cc +++ b/compiler/optimizing/nodes.cc @@ -1652,7 +1652,8 @@ HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) { // Update the meta information surrounding blocks: // (1) the graph they are now in, // (2) the reverse post order of that graph, - // (3) the potential loop information they are now in. + // (3) the potential loop information they are now in, + // (4) try block membership. // We don't add the entry block, the exit block, and the first block, which // has been merged with `at`. @@ -1668,41 +1669,47 @@ HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) { size_t index_of_at = IndexOfElement(outer_graph->reverse_post_order_, at); MakeRoomFor(&outer_graph->reverse_post_order_, blocks_added, index_of_at); - // Do a reverse post order of the blocks in the callee and do (1), (2), - // and (3) to the blocks that apply. - HLoopInformation* info = at->GetLoopInformation(); + HLoopInformation* loop_info = at->GetLoopInformation(); + // Copy TryCatchInformation if `at` is a try block, not if it is a catch block. + TryCatchInformation* try_catch_info = at->IsTryBlock() ? at->GetTryCatchInformation() : nullptr; + + // Do a reverse post order of the blocks in the callee and do (1), (2), (3) + // and (4) to the blocks that apply. for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) { HBasicBlock* current = it.Current(); if (current != exit_block_ && current != entry_block_ && current != first) { DCHECK(!current->IsInLoop()); + DCHECK(current->GetTryCatchInformation() == nullptr); DCHECK(current->GetGraph() == this); current->SetGraph(outer_graph); outer_graph->AddBlock(current); outer_graph->reverse_post_order_[++index_of_at] = current; - if (info != nullptr) { - current->SetLoopInformation(info); + if (loop_info != nullptr) { + current->SetLoopInformation(loop_info); for (HLoopInformationOutwardIterator loop_it(*at); !loop_it.Done(); loop_it.Advance()) { loop_it.Current()->Add(current); } } + current->SetTryCatchInformation(try_catch_info); } } - // Do (1), (2), and (3) to `to`. + // Do (1), (2), (3) and (4) to `to`. to->SetGraph(outer_graph); outer_graph->AddBlock(to); outer_graph->reverse_post_order_[++index_of_at] = to; - if (info != nullptr) { - to->SetLoopInformation(info); + if (loop_info != nullptr) { + to->SetLoopInformation(loop_info); for (HLoopInformationOutwardIterator loop_it(*at); !loop_it.Done(); loop_it.Advance()) { loop_it.Current()->Add(to); } - if (info->IsBackEdge(*at)) { + if (loop_info->IsBackEdge(*at)) { // Only `to` can become a back edge, as the inlined blocks // are predecessors of `to`. - info->ReplaceBackEdge(at, to); + loop_info->ReplaceBackEdge(at, to); } } + to->SetTryCatchInformation(try_catch_info); } // Update the next instruction id of the outer graph, so that instructions diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 7df586692b..0f2c1cffee 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -4750,6 +4750,9 @@ class HLoadClass : public HExpression<1> { return generate_clinit_check_; } void SetMustGenerateClinitCheck(bool generate_clinit_check) { + // The entrypoint the code generator is going to call does not do + // clinit of the class. + DCHECK(!NeedsAccessCheck()); generate_clinit_check_ = generate_clinit_check; } diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 6632f95ebe..8cb2cfc816 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -492,6 +492,8 @@ static void RunOptimizations(HGraph* graph, RunOptimizations(optimizations1, arraysize(optimizations1), pass_observer); + MaybeRunInliner(graph, codegen, driver, stats, dex_compilation_unit, pass_observer, handles); + // TODO: Update passes incompatible with try/catch so we have the same // pipeline for all methods. if (graph->HasTryCatch()) { @@ -507,8 +509,6 @@ static void RunOptimizations(HGraph* graph, RunOptimizations(optimizations2, arraysize(optimizations2), pass_observer); } else { - MaybeRunInliner(graph, codegen, driver, stats, dex_compilation_unit, pass_observer, handles); - HOptimization* optimizations2[] = { // BooleanSimplifier depends on the InstructionSimplifier removing // redundant suspend checks to recognize empty blocks. diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc index 807bedaa04..68e39568bb 100644 --- a/compiler/utils/arm/assembler_arm.cc +++ b/compiler/utils/arm/assembler_arm.cc @@ -16,6 +16,8 @@ #include "assembler_arm.h" +#include <algorithm> + #include "base/bit_utils.h" #include "base/logging.h" #include "entrypoints/quick/quick_entrypoints.h" @@ -922,5 +924,24 @@ uint32_t ArmAssembler::ModifiedImmediate(uint32_t value) { return value | i << 26 | imm3 << 12 | a << 7; } +void ArmAssembler::FinalizeTrackedLabels() { + if (!tracked_labels_.empty()) { + // This array should be sorted, as assembly is generated in linearized order. It isn't + // technically required, but GetAdjustedPosition() used in AdjustLabelPosition() can take + // advantage of it. So ensure that it's actually the case. + DCHECK(std::is_sorted( + tracked_labels_.begin(), + tracked_labels_.end(), + [](const Label* lhs, const Label* rhs) { return lhs->Position() < rhs->Position(); })); + + Label* last_label = nullptr; // Track duplicates, we must not adjust twice. + for (Label* label : tracked_labels_) { + DCHECK_NE(label, last_label); + AdjustLabelPosition(label); + last_label = label; + } + } +} + } // namespace arm } // namespace art diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h index d59bc6be40..4a6e6d7c3f 100644 --- a/compiler/utils/arm/assembler_arm.h +++ b/compiler/utils/arm/assembler_arm.h @@ -77,6 +77,45 @@ class Literal { DISALLOW_COPY_AND_ASSIGN(Literal); }; +// Jump table: table of labels emitted after the literals. Similar to literals. +class JumpTable { + public: + explicit JumpTable(std::vector<Label*>&& labels) + : label_(), anchor_label_(), labels_(std::move(labels)) { + } + + uint32_t GetSize() const { + return static_cast<uint32_t>(labels_.size()) * sizeof(uint32_t); + } + + const std::vector<Label*>& GetData() const { + return labels_; + } + + Label* GetLabel() { + return &label_; + } + + const Label* GetLabel() const { + return &label_; + } + + Label* GetAnchorLabel() { + return &anchor_label_; + } + + const Label* GetAnchorLabel() const { + return &anchor_label_; + } + + private: + Label label_; + Label anchor_label_; + std::vector<Label*> labels_; + + DISALLOW_COPY_AND_ASSIGN(JumpTable); +}; + class ShifterOperand { public: ShifterOperand() : type_(kUnknown), rm_(kNoRegister), rs_(kNoRegister), @@ -685,6 +724,8 @@ class ArmAssembler : public Assembler { AddConstant(rd, rd, value, cond, set_cc); } + virtual void CmpConstant(Register rn, int32_t value, Condition cond = AL) = 0; + // Load and Store. May clobber IP. virtual void LoadImmediate(Register rd, int32_t value, Condition cond = AL) = 0; void LoadSImmediate(SRegister sd, float value, Condition cond = AL) { @@ -996,11 +1037,43 @@ class ArmAssembler : public Assembler { b(label); } + // Jump table support. This is split into three functions: + // + // * CreateJumpTable creates the internal metadata to track the jump targets, and emits code to + // load the base address of the jump table. + // + // * EmitJumpTableDispatch emits the code to actually jump, assuming that the right table value + // has been loaded into a register already. + // + // * FinalizeTables emits the jump table into the literal pool. This can only be called after the + // labels for the jump targets have been finalized. + + // Create a jump table for the given labels that will be emitted when finalizing. Create a load + // sequence (or placeholder) that stores the base address into the given register. When the table + // is emitted, offsets will be relative to the location EmitJumpTableDispatch was called on (the + // anchor). + virtual JumpTable* CreateJumpTable(std::vector<Label*>&& labels, Register base_reg) = 0; + + // Emit the jump-table jump, assuming that the right value was loaded into displacement_reg. + virtual void EmitJumpTableDispatch(JumpTable* jump_table, Register displacement_reg) = 0; + + // Bind a Label that needs to be updated by the assembler in FinalizeCode() if its position + // changes due to branch/literal fixup. + void BindTrackedLabel(Label* label) { + Bind(label); + tracked_labels_.push_back(label); + } + protected: // Returns whether or not the given register is used for passing parameters. static int RegisterCompare(const Register* reg1, const Register* reg2) { return *reg1 - *reg2; } + + void FinalizeTrackedLabels(); + + // Tracked labels. Use a vector, as we need to sort before adjusting. + std::vector<Label*> tracked_labels_; }; // Slowpath entered when Thread::Current()->_exception is non-null diff --git a/compiler/utils/arm/assembler_arm32.cc b/compiler/utils/arm/assembler_arm32.cc index 6e7c828b4a..a7dbacd3a9 100644 --- a/compiler/utils/arm/assembler_arm32.cc +++ b/compiler/utils/arm/assembler_arm32.cc @@ -1385,6 +1385,21 @@ void Arm32Assembler::AddConstant(Register rd, Register rn, int32_t value, } } +void Arm32Assembler::CmpConstant(Register rn, int32_t value, Condition cond) { + ShifterOperand shifter_op; + if (ShifterOperandCanHoldArm32(value, &shifter_op)) { + cmp(rn, shifter_op, cond); + } else if (ShifterOperandCanHoldArm32(~value, &shifter_op)) { + cmn(rn, shifter_op, cond); + } else { + movw(IP, Low16Bits(value), cond); + uint16_t value_high = High16Bits(value); + if (value_high != 0) { + movt(IP, value_high, cond); + } + cmp(rn, ShifterOperand(IP), cond); + } +} void Arm32Assembler::LoadImmediate(Register rd, int32_t value, Condition cond) { ShifterOperand shifter_op; @@ -1584,6 +1599,23 @@ void Arm32Assembler::CompareAndBranchIfNonZero(Register r, Label* label) { b(label, NE); } +JumpTable* Arm32Assembler::CreateJumpTable(std::vector<Label*>&& labels ATTRIBUTE_UNUSED, + Register base_reg ATTRIBUTE_UNUSED) { + LOG(FATAL) << "CreateJumpTable is not supported on ARM32"; + UNREACHABLE(); +} + +void Arm32Assembler::EmitJumpTableDispatch(JumpTable* jump_table ATTRIBUTE_UNUSED, + Register displacement_reg ATTRIBUTE_UNUSED) { + LOG(FATAL) << "EmitJumpTableDispatch is not supported on ARM32"; + UNREACHABLE(); +} + +void Arm32Assembler::FinalizeCode() { + ArmAssembler::FinalizeCode(); + // Currently the arm32 assembler does not support fixups, and thus no tracking. We must not call + // FinalizeTrackedLabels(), which would lead to an abort. +} } // namespace arm } // namespace art diff --git a/compiler/utils/arm/assembler_arm32.h b/compiler/utils/arm/assembler_arm32.h index 4646538716..5233dcbbb0 100644 --- a/compiler/utils/arm/assembler_arm32.h +++ b/compiler/utils/arm/assembler_arm32.h @@ -261,6 +261,8 @@ class Arm32Assembler FINAL : public ArmAssembler { void AddConstant(Register rd, Register rn, int32_t value, Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE; + void CmpConstant(Register rn, int32_t value, Condition cond = AL) OVERRIDE; + // Load and Store. May clobber IP. void LoadImmediate(Register rd, int32_t value, Condition cond = AL) OVERRIDE; void MarkExceptionHandler(Label* label) OVERRIDE; @@ -308,6 +310,11 @@ class Arm32Assembler FINAL : public ArmAssembler { void MemoryBarrier(ManagedRegister scratch) OVERRIDE; + JumpTable* CreateJumpTable(std::vector<Label*>&& labels, Register base_reg) OVERRIDE; + void EmitJumpTableDispatch(JumpTable* jump_table, Register displacement_reg) OVERRIDE; + + void FinalizeCode() OVERRIDE; + private: void EmitType01(Condition cond, int type, diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc index cc87856e82..fb3aa1ea85 100644 --- a/compiler/utils/arm/assembler_thumb2.cc +++ b/compiler/utils/arm/assembler_thumb2.cc @@ -92,7 +92,7 @@ void Thumb2Assembler::BindLabel(Label* label, uint32_t bound_pc) { label->BindTo(bound_pc); } -void Thumb2Assembler::BindLiterals() { +uint32_t Thumb2Assembler::BindLiterals() { // We don't add the padding here, that's done only after adjusting the Fixup sizes. uint32_t code_size = buffer_.Size(); for (Literal& lit : literals_) { @@ -100,6 +100,15 @@ void Thumb2Assembler::BindLiterals() { BindLabel(label, code_size); code_size += lit.GetSize(); } + return code_size; +} + +void Thumb2Assembler::BindJumpTables(uint32_t code_size) { + for (JumpTable& table : jump_tables_) { + Label* label = table.GetLabel(); + BindLabel(label, code_size); + code_size += table.GetSize(); + } } void Thumb2Assembler::AdjustFixupIfNeeded(Fixup* fixup, uint32_t* current_code_size, @@ -144,7 +153,7 @@ uint32_t Thumb2Assembler::AdjustFixups() { AdjustFixupIfNeeded(fixup, ¤t_code_size, &fixups_to_recalculate); } while (!fixups_to_recalculate.empty()); - if ((current_code_size & 2) != 0 && !literals_.empty()) { + if ((current_code_size & 2) != 0 && (!literals_.empty() || !jump_tables_.empty())) { // If we need to add padding before literals, this may just push some out of range, // so recalculate all load literals. This makes up for the fact that we don't mark // load literal as a dependency of all previous Fixups even though it actually is. @@ -173,6 +182,13 @@ uint32_t Thumb2Assembler::AdjustFixups() { label->Reinitialize(); label->BindTo(old_position + literals_adjustment); } + for (JumpTable& table : jump_tables_) { + Label* label = table.GetLabel(); + DCHECK(label->IsBound()); + int old_position = label->Position(); + label->Reinitialize(); + label->BindTo(old_position + literals_adjustment); + } } return current_code_size; @@ -229,6 +245,43 @@ void Thumb2Assembler::EmitLiterals() { } } +void Thumb2Assembler::EmitJumpTables() { + if (!jump_tables_.empty()) { + // Jump tables require 4 byte alignment. (We don't support byte and half-word jump tables.) + uint32_t code_size = buffer_.Size(); + DCHECK_ALIGNED(code_size, 2); + if ((code_size & 2u) != 0u) { + Emit16(0); + } + for (JumpTable& table : jump_tables_) { + // Bulk ensure capacity, as this may be large. + size_t orig_size = buffer_.Size(); + buffer_.ExtendCapacity(orig_size + table.GetSize()); +#ifndef NDEBUG + buffer_.has_ensured_capacity_ = true; +#endif + + DCHECK_EQ(static_cast<size_t>(table.GetLabel()->Position()), buffer_.Size()); + int32_t anchor_position = table.GetAnchorLabel()->Position() + 4; + + for (Label* target : table.GetData()) { + // Ensure that the label was tracked, so that it will have the right position. + DCHECK(std::find(tracked_labels_.begin(), tracked_labels_.end(), target) != + tracked_labels_.end()); + + int32_t offset = target->Position() - anchor_position; + buffer_.Emit<int32_t>(offset); + } + +#ifndef NDEBUG + buffer_.has_ensured_capacity_ = false; +#endif + size_t new_size = buffer_.Size(); + DCHECK_LE(new_size - orig_size, table.GetSize()); + } + } +} + inline int16_t Thumb2Assembler::BEncoding16(int32_t offset, Condition cond) { DCHECK_ALIGNED(offset, 2); int16_t encoding = B15 | B14; @@ -382,12 +435,34 @@ inline int32_t Thumb2Assembler::LdrRtRnImm12Encoding(Register rt, Register rn, i return B31 | B30 | B29 | B28 | B27 | B23 | B22 | B20 | (rn << 16) | (rt << 12) | offset; } +inline int16_t Thumb2Assembler::AdrEncoding16(Register rd, int32_t offset) { + DCHECK(IsUint<10>(offset)); + DCHECK(IsAligned<4>(offset)); + DCHECK(!IsHighRegister(rd)); + return B15 | B13 | (rd << 8) | (offset >> 2); +} + +inline int32_t Thumb2Assembler::AdrEncoding32(Register rd, int32_t offset) { + DCHECK(IsUint<12>(offset)); + // Bit 26: offset[11] + // Bits 14-12: offset[10-8] + // Bits 7-0: offset[7-0] + int32_t immediate_mask = + ((offset & (1 << 11)) << (26 - 11)) | + ((offset & (7 << 8)) << (12 - 8)) | + (offset & 0xFF); + return B31 | B30 | B29 | B28 | B25 | B19 | B18 | B17 | B16 | (rd << 8) | immediate_mask; +} + void Thumb2Assembler::FinalizeCode() { ArmAssembler::FinalizeCode(); - BindLiterals(); + uint32_t size_after_literals = BindLiterals(); + BindJumpTables(size_after_literals); uint32_t adjusted_code_size = AdjustFixups(); EmitFixups(adjusted_code_size); EmitLiterals(); + FinalizeTrackedLabels(); + EmitJumpTables(); } bool Thumb2Assembler::ShifterOperandCanAlwaysHold(uint32_t immediate) { @@ -1770,6 +1845,15 @@ inline size_t Thumb2Assembler::Fixup::SizeInBytes(Size size) { case kLiteralFar: return 14u; + case kLiteralAddr1KiB: + return 2u; + case kLiteralAddr4KiB: + return 4u; + case kLiteralAddr64KiB: + return 6u; + case kLiteralAddrFar: + return 10u; + case kLongOrFPLiteral1KiB: return 4u; case kLongOrFPLiteral256KiB: @@ -1831,6 +1915,8 @@ inline int32_t Thumb2Assembler::Fixup::GetOffset(uint32_t current_code_size) con case kLiteral1KiB: case kLiteral4KiB: case kLongOrFPLiteral1KiB: + case kLiteralAddr1KiB: + case kLiteralAddr4KiB: DCHECK(diff >= 0 || (GetSize() == kLiteral1KiB && diff == -2)); diff += LiteralPoolPaddingSize(current_code_size); // Load literal instructions round down the PC+4 to a multiple of 4, so if the PC @@ -1843,12 +1929,14 @@ inline int32_t Thumb2Assembler::Fixup::GetOffset(uint32_t current_code_size) con case kLiteral1MiB: case kLiteral64KiB: case kLongOrFPLiteral256KiB: + case kLiteralAddr64KiB: DCHECK_GE(diff, 4); // The target must be at least 4 bytes after the ADD rX, PC. diff -= 4; // One extra 32-bit MOV. diff += LiteralPoolPaddingSize(current_code_size); break; case kLiteralFar: case kLongOrFPLiteralFar: + case kLiteralAddrFar: DCHECK_GE(diff, 8); // The target must be at least 4 bytes after the ADD rX, PC. diff -= 8; // Extra MOVW+MOVT; both 32-bit. diff += LiteralPoolPaddingSize(current_code_size); @@ -1929,6 +2017,29 @@ uint32_t Thumb2Assembler::Fixup::AdjustSizeIfNeeded(uint32_t current_code_size) // This encoding can reach any target. break; + case kLiteralAddr1KiB: + DCHECK(!IsHighRegister(rn_)); + if (IsUint<10>(GetOffset(current_code_size))) { + break; + } + current_code_size += IncreaseSize(kLiteralAddr4KiB); + FALLTHROUGH_INTENDED; + case kLiteralAddr4KiB: + if (IsUint<12>(GetOffset(current_code_size))) { + break; + } + current_code_size += IncreaseSize(kLiteralAddr64KiB); + FALLTHROUGH_INTENDED; + case kLiteralAddr64KiB: + if (IsUint<16>(GetOffset(current_code_size))) { + break; + } + current_code_size += IncreaseSize(kLiteralAddrFar); + FALLTHROUGH_INTENDED; + case kLiteralAddrFar: + // This encoding can reach any target. + break; + case kLongOrFPLiteral1KiB: if (IsUint<10>(GetOffset(current_code_size))) { break; @@ -2055,6 +2166,42 @@ void Thumb2Assembler::Fixup::Emit(AssemblerBuffer* buffer, uint32_t code_size) c break; } + case kLiteralAddr1KiB: { + DCHECK(type_ == kLoadLiteralAddr); + int16_t encoding = AdrEncoding16(rn_, GetOffset(code_size)); + buffer->Store<int16_t>(location_, encoding); + break; + } + case kLiteralAddr4KiB: { + DCHECK(type_ == kLoadLiteralAddr); + int32_t encoding = AdrEncoding32(rn_, GetOffset(code_size)); + buffer->Store<int16_t>(location_, encoding >> 16); + buffer->Store<int16_t>(location_ + 2u, static_cast<int16_t>(encoding & 0xffff)); + break; + } + case kLiteralAddr64KiB: { + DCHECK(type_ == kLoadLiteralAddr); + int32_t mov_encoding = MovwEncoding32(rn_, GetOffset(code_size)); + int16_t add_pc_encoding = AddRdnRmEncoding16(rn_, PC); + buffer->Store<int16_t>(location_, mov_encoding >> 16); + buffer->Store<int16_t>(location_ + 2u, static_cast<int16_t>(mov_encoding & 0xffff)); + buffer->Store<int16_t>(location_ + 4u, add_pc_encoding); + break; + } + case kLiteralAddrFar: { + DCHECK(type_ == kLoadLiteralAddr); + int32_t offset = GetOffset(code_size); + int32_t movw_encoding = MovwEncoding32(rn_, offset & 0xffff); + int32_t movt_encoding = MovtEncoding32(rn_, offset & ~0xffff); + int16_t add_pc_encoding = AddRdnRmEncoding16(rn_, PC); + buffer->Store<int16_t>(location_, movw_encoding >> 16); + buffer->Store<int16_t>(location_ + 2u, static_cast<int16_t>(movw_encoding & 0xffff)); + buffer->Store<int16_t>(location_ + 4u, movt_encoding >> 16); + buffer->Store<int16_t>(location_ + 6u, static_cast<int16_t>(movt_encoding & 0xffff)); + buffer->Store<int16_t>(location_ + 8u, add_pc_encoding); + break; + } + case kLongOrFPLiteral1KiB: { int32_t encoding = LoadWideOrFpEncoding(PC, GetOffset(code_size)); // DCHECKs type_. buffer->Store<int16_t>(location_, encoding >> 16); @@ -3260,6 +3407,25 @@ void Thumb2Assembler::AddConstant(Register rd, Register rn, int32_t value, } } +void Thumb2Assembler::CmpConstant(Register rn, int32_t value, Condition cond) { + // We prefer to select the shorter code sequence rather than selecting add for + // positive values and sub for negatives ones, which would slightly improve + // the readability of generated code for some constants. + ShifterOperand shifter_op; + if (ShifterOperandCanHold(kNoRegister, rn, CMP, value, &shifter_op)) { + cmp(rn, shifter_op, cond); + } else if (ShifterOperandCanHold(kNoRegister, rn, CMN, ~value, &shifter_op)) { + cmn(rn, shifter_op, cond); + } else { + CHECK(rn != IP); + movw(IP, Low16Bits(value), cond); + uint16_t value_high = High16Bits(value); + if (value_high != 0) { + movt(IP, value_high, cond); + } + cmp(rn, ShifterOperand(IP), cond); + } +} void Thumb2Assembler::LoadImmediate(Register rd, int32_t value, Condition cond) { ShifterOperand shifter_op; @@ -3476,5 +3642,39 @@ void Thumb2Assembler::CompareAndBranchIfNonZero(Register r, Label* label) { b(label, NE); } } + +JumpTable* Thumb2Assembler::CreateJumpTable(std::vector<Label*>&& labels, Register base_reg) { + jump_tables_.emplace_back(std::move(labels)); + JumpTable* table = &jump_tables_.back(); + DCHECK(!table->GetLabel()->IsBound()); + + bool use32bit = IsForced32Bit() || IsHighRegister(base_reg); + uint32_t location = buffer_.Size(); + Fixup::Size size = use32bit ? Fixup::kLiteralAddr4KiB : Fixup::kLiteralAddr1KiB; + FixupId fixup_id = AddFixup(Fixup::LoadLiteralAddress(location, base_reg, size)); + Emit16(static_cast<uint16_t>(table->GetLabel()->position_)); + table->GetLabel()->LinkTo(fixup_id); + if (use32bit) { + Emit16(0); + } + DCHECK_EQ(location + GetFixup(fixup_id)->GetSizeInBytes(), buffer_.Size()); + + return table; +} + +void Thumb2Assembler::EmitJumpTableDispatch(JumpTable* jump_table, Register displacement_reg) { + CHECK(!IsForced32Bit()) << "Forced 32-bit dispatch not implemented yet"; + // 32-bit ADD doesn't support PC as an input, so we need a two-instruction sequence: + // SUB ip, ip, #0 + // ADD pc, ip, reg + // TODO: Implement. + + // The anchor's position needs to be fixed up before we can compute offsets - so make it a tracked + // label. + BindTrackedLabel(jump_table->GetAnchorLabel()); + + add(PC, PC, ShifterOperand(displacement_reg)); +} + } // namespace arm } // namespace art diff --git a/compiler/utils/arm/assembler_thumb2.h b/compiler/utils/arm/assembler_thumb2.h index 055b1379ad..38fd244087 100644 --- a/compiler/utils/arm/assembler_thumb2.h +++ b/compiler/utils/arm/assembler_thumb2.h @@ -18,6 +18,7 @@ #define ART_COMPILER_UTILS_ARM_ASSEMBLER_THUMB2_H_ #include <deque> +#include <utility> #include <vector> #include "base/logging.h" @@ -304,6 +305,8 @@ class Thumb2Assembler FINAL : public ArmAssembler { void AddConstant(Register rd, Register rn, int32_t value, Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE; + void CmpConstant(Register rn, int32_t value, Condition cond = AL) OVERRIDE; + // Load and Store. May clobber IP. void LoadImmediate(Register rd, int32_t value, Condition cond = AL) OVERRIDE; void MarkExceptionHandler(Label* label) OVERRIDE; @@ -358,6 +361,12 @@ class Thumb2Assembler FINAL : public ArmAssembler { force_32bit_ = true; } + // Emit an ADR (or a sequence of instructions) to load the jump table address into base_reg. This + // will generate a fixup. + JumpTable* CreateJumpTable(std::vector<Label*>&& labels, Register base_reg) OVERRIDE; + // Emit an ADD PC, X to dispatch a jump-table jump. This will generate a fixup. + void EmitJumpTableDispatch(JumpTable* jump_table, Register displacement_reg) OVERRIDE; + private: typedef uint16_t FixupId; @@ -399,6 +408,7 @@ class Thumb2Assembler FINAL : public ArmAssembler { kCompareAndBranchXZero, // cbz/cbnz. kLoadLiteralNarrow, // Load narrrow integer literal. kLoadLiteralWide, // Load wide integer literal. + kLoadLiteralAddr, // Load address of literal (used for jump table). kLoadFPLiteralSingle, // Load FP literal single. kLoadFPLiteralDouble, // Load FP literal double. }; @@ -429,6 +439,16 @@ class Thumb2Assembler FINAL : public ArmAssembler { // MOV rX, imm16 + MOVT rX, imm16 + ADD rX, pc + LDR rX, [rX]; any offset; 14 bytes. kLiteralFar, + // Load literal base addr. + // ADR rX, label; X < 8; 8 bit immediate, shifted to 10 bit. 2 bytes. + kLiteralAddr1KiB, + // ADR rX, label; 4KiB offset. 4 bytes. + kLiteralAddr4KiB, + // MOV rX, imm16 + ADD rX, pc; 64KiB offset. 6 bytes. + kLiteralAddr64KiB, + // MOV rX, imm16 + MOVT rX, imm16 + ADD rX, pc; any offset; 10 bytes. + kLiteralAddrFar, + // Load long or FP literal variants. // VLDR s/dX, label; 32-bit insn, up to 1KiB offset; 4 bytes. kLongOrFPLiteral1KiB, @@ -457,7 +477,7 @@ class Thumb2Assembler FINAL : public ArmAssembler { } // Load narrow literal. - static Fixup LoadNarrowLiteral(uint32_t location, Register rt, Size size = kLiteral1KiB) { + static Fixup LoadNarrowLiteral(uint32_t location, Register rt, Size size) { DCHECK(size == kLiteral1KiB || size == kLiteral4KiB || size == kLiteral64KiB || size == kLiteral1MiB || size == kLiteralFar); DCHECK(!IsHighRegister(rt) || (size != kLiteral1KiB && size != kLiteral64KiB)); @@ -493,6 +513,14 @@ class Thumb2Assembler FINAL : public ArmAssembler { AL, kLoadFPLiteralDouble, size, location); } + static Fixup LoadLiteralAddress(uint32_t location, Register rt, Size size) { + DCHECK(size == kLiteralAddr1KiB || size == kLiteralAddr4KiB || size == kLiteralAddr64KiB || + size == kLiteralAddrFar); + DCHECK(!IsHighRegister(rt) || size != kLiteralAddr1KiB); + return Fixup(rt, kNoRegister, kNoSRegister, kNoDRegister, + AL, kLoadLiteralAddr, size, location); + } + Type GetType() const { return type_; } @@ -756,12 +784,14 @@ class Thumb2Assembler FINAL : public ArmAssembler { } void BindLabel(Label* label, uint32_t bound_pc); - void BindLiterals(); + uint32_t BindLiterals(); + void BindJumpTables(uint32_t code_size); void AdjustFixupIfNeeded(Fixup* fixup, uint32_t* current_code_size, std::deque<FixupId>* fixups_to_recalculate); uint32_t AdjustFixups(); void EmitFixups(uint32_t adjusted_code_size); void EmitLiterals(); + void EmitJumpTables(); static int16_t BEncoding16(int32_t offset, Condition cond); static int32_t BEncoding32(int32_t offset, Condition cond); @@ -778,6 +808,8 @@ class Thumb2Assembler FINAL : public ArmAssembler { static int32_t VldrdEncoding32(DRegister dd, Register rn, int32_t offset); static int16_t LdrRtRnImm5Encoding16(Register rt, Register rn, int32_t offset); static int32_t LdrRtRnImm12Encoding(Register rt, Register rn, int32_t offset); + static int16_t AdrEncoding16(Register rd, int32_t offset); + static int32_t AdrEncoding32(Register rd, int32_t offset); std::vector<Fixup> fixups_; std::unique_ptr<FixupId[]> fixup_dependents_; @@ -786,6 +818,9 @@ class Thumb2Assembler FINAL : public ArmAssembler { // without invalidating pointers and references to existing elements. std::deque<Literal> literals_; + // Jump table list. + std::deque<JumpTable> jump_tables_; + // Data for AdjustedPosition(), see the description there. uint32_t last_position_adjustment_; uint32_t last_old_position_; diff --git a/compiler/utils/arm/assembler_thumb2_test.cc b/compiler/utils/arm/assembler_thumb2_test.cc index 9c08ce017e..cb4b20b5ba 100644 --- a/compiler/utils/arm/assembler_thumb2_test.cc +++ b/compiler/utils/arm/assembler_thumb2_test.cc @@ -17,6 +17,7 @@ #include "assembler_thumb2.h" #include "base/stl_util.h" +#include "base/stringprintf.h" #include "utils/assembler_test.h" namespace art { @@ -1011,6 +1012,315 @@ TEST_F(AssemblerThumb2Test, LoadLiteralBeyondMax1KiBDueToAlignmentOnSecondPass) __ GetAdjustedPosition(label.Position())); } +TEST_F(AssemblerThumb2Test, BindTrackedLabel) { + Label non_tracked, tracked, branch_target; + + // A few dummy loads on entry. + constexpr size_t kLdrR0R0Count = 5; + for (size_t i = 0; i != kLdrR0R0Count; ++i) { + __ ldr(arm::R0, arm::Address(arm::R0)); + } + + // A branch that will need to be fixed up. + __ cbz(arm::R0, &branch_target); + + // Some more dummy loads. + for (size_t i = 0; i != kLdrR0R0Count; ++i) { + __ ldr(arm::R0, arm::Address(arm::R0)); + } + + // Now insert tracked and untracked label. + __ Bind(&non_tracked); + __ BindTrackedLabel(&tracked); + + // A lot of dummy loads, to ensure the branch needs resizing. + constexpr size_t kLdrR0R0CountLong = 60; + for (size_t i = 0; i != kLdrR0R0CountLong; ++i) { + __ ldr(arm::R0, arm::Address(arm::R0)); + } + + // Bind the branch target. + __ Bind(&branch_target); + + // One more load. + __ ldr(arm::R0, arm::Address(arm::R0)); + + std::string expected = + RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") + + "cmp r0, #0\n" // cbz r0, 1f + "beq.n 1f\n" + + RepeatInsn(kLdrR0R0Count + kLdrR0R0CountLong, "ldr r0, [r0]\n") + + "1:\n" + "ldr r0, [r0]\n"; + DriverStr(expected, "BindTrackedLabel"); + + // Expectation is that the tracked label should have moved. + EXPECT_LT(non_tracked.Position(), tracked.Position()); +} + +TEST_F(AssemblerThumb2Test, JumpTable) { + // The jump table. Use three labels. + Label label1, label2, label3; + std::vector<Label*> labels({ &label1, &label2, &label3 }); + + // A few dummy loads on entry, interspersed with 2 labels. + constexpr size_t kLdrR0R0Count = 5; + for (size_t i = 0; i != kLdrR0R0Count; ++i) { + __ ldr(arm::R0, arm::Address(arm::R0)); + } + __ BindTrackedLabel(&label1); + for (size_t i = 0; i != kLdrR0R0Count; ++i) { + __ ldr(arm::R0, arm::Address(arm::R0)); + } + __ BindTrackedLabel(&label2); + for (size_t i = 0; i != kLdrR0R0Count; ++i) { + __ ldr(arm::R0, arm::Address(arm::R0)); + } + + // Create the jump table, emit the base load. + arm::JumpTable* jump_table = __ CreateJumpTable(std::move(labels), arm::R1); + + // Dummy computation, stand-in for the address. We're only testing the jump table here, not how + // it's being used. + __ ldr(arm::R0, arm::Address(arm::R0)); + + // Emit the jump + __ EmitJumpTableDispatch(jump_table, arm::R1); + + // Some more dummy instructions. + for (size_t i = 0; i != kLdrR0R0Count; ++i) { + __ ldr(arm::R0, arm::Address(arm::R0)); + } + __ BindTrackedLabel(&label3); + for (size_t i = 0; i != kLdrR0R0Count; ++i) { // Note: odd so there's no alignment + __ ldr(arm::R0, arm::Address(arm::R0)); // necessary, as gcc as emits nops, + } // whereas we emit 0 != nop. + + static_assert((kLdrR0R0Count + 3) * 2 < 1 * KB, "Too much offset"); + + std::string expected = + RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") + + ".L1:\n" + + RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") + + ".L2:\n" + + RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") + + "adr r1, .Ljump_table\n" + "ldr r0, [r0]\n" + ".Lbase:\n" + "add pc, r1\n" + + RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") + + ".L3:\n" + + RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") + + ".align 2\n" + ".Ljump_table:\n" + ".4byte (.L1 - .Lbase - 4)\n" + ".4byte (.L2 - .Lbase - 4)\n" + ".4byte (.L3 - .Lbase - 4)\n"; + DriverStr(expected, "JumpTable"); +} + +// Test for >1K fixup. +TEST_F(AssemblerThumb2Test, JumpTable4K) { + // The jump table. Use three labels. + Label label1, label2, label3; + std::vector<Label*> labels({ &label1, &label2, &label3 }); + + // A few dummy loads on entry, interspersed with 2 labels. + constexpr size_t kLdrR0R0Count = 5; + for (size_t i = 0; i != kLdrR0R0Count; ++i) { + __ ldr(arm::R0, arm::Address(arm::R0)); + } + __ BindTrackedLabel(&label1); + for (size_t i = 0; i != kLdrR0R0Count; ++i) { + __ ldr(arm::R0, arm::Address(arm::R0)); + } + __ BindTrackedLabel(&label2); + for (size_t i = 0; i != kLdrR0R0Count; ++i) { + __ ldr(arm::R0, arm::Address(arm::R0)); + } + + // Create the jump table, emit the base load. + arm::JumpTable* jump_table = __ CreateJumpTable(std::move(labels), arm::R1); + + // Dummy computation, stand-in for the address. We're only testing the jump table here, not how + // it's being used. + __ ldr(arm::R0, arm::Address(arm::R0)); + + // Emit the jump + __ EmitJumpTableDispatch(jump_table, arm::R1); + + // Some more dummy instructions. + for (size_t i = 0; i != kLdrR0R0Count; ++i) { + __ ldr(arm::R0, arm::Address(arm::R0)); + } + __ BindTrackedLabel(&label3); + constexpr size_t kLdrR0R0Count2 = 600; // Note: even so there's no alignment + for (size_t i = 0; i != kLdrR0R0Count2; ++i) { // necessary, as gcc as emits nops, + __ ldr(arm::R0, arm::Address(arm::R0)); // whereas we emit 0 != nop. + } + + static_assert((kLdrR0R0Count + kLdrR0R0Count2 + 3) * 2 > 1 * KB, "Not enough offset"); + static_assert((kLdrR0R0Count + kLdrR0R0Count2 + 3) * 2 < 4 * KB, "Too much offset"); + + std::string expected = + RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") + + ".L1:\n" + + RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") + + ".L2:\n" + + RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") + + "adr r1, .Ljump_table\n" + "ldr r0, [r0]\n" + ".Lbase:\n" + "add pc, r1\n" + + RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") + + ".L3:\n" + + RepeatInsn(kLdrR0R0Count2, "ldr r0, [r0]\n") + + ".align 2\n" + ".Ljump_table:\n" + ".4byte (.L1 - .Lbase - 4)\n" + ".4byte (.L2 - .Lbase - 4)\n" + ".4byte (.L3 - .Lbase - 4)\n"; + DriverStr(expected, "JumpTable4K"); +} + +// Test for >4K fixup. +TEST_F(AssemblerThumb2Test, JumpTable64K) { + // The jump table. Use three labels. + Label label1, label2, label3; + std::vector<Label*> labels({ &label1, &label2, &label3 }); + + // A few dummy loads on entry, interspersed with 2 labels. + constexpr size_t kLdrR0R0Count = 5; + for (size_t i = 0; i != kLdrR0R0Count; ++i) { + __ ldr(arm::R0, arm::Address(arm::R0)); + } + __ BindTrackedLabel(&label1); + for (size_t i = 0; i != kLdrR0R0Count; ++i) { + __ ldr(arm::R0, arm::Address(arm::R0)); + } + __ BindTrackedLabel(&label2); + for (size_t i = 0; i != kLdrR0R0Count; ++i) { + __ ldr(arm::R0, arm::Address(arm::R0)); + } + + // Create the jump table, emit the base load. + arm::JumpTable* jump_table = __ CreateJumpTable(std::move(labels), arm::R1); + + // Dummy computation, stand-in for the address. We're only testing the jump table here, not how + // it's being used. + __ ldr(arm::R0, arm::Address(arm::R0)); + + // Emit the jump + __ EmitJumpTableDispatch(jump_table, arm::R1); + + // Some more dummy instructions. + for (size_t i = 0; i != kLdrR0R0Count; ++i) { + __ ldr(arm::R0, arm::Address(arm::R0)); + } + __ BindTrackedLabel(&label3); + constexpr size_t kLdrR0R0Count2 = 2601; // Note: odd so there's no alignment + for (size_t i = 0; i != kLdrR0R0Count2; ++i) { // necessary, as gcc as emits nops, + __ ldr(arm::R0, arm::Address(arm::R0)); // whereas we emit 0 != nop. + } + + static_assert((kLdrR0R0Count + kLdrR0R0Count2 + 3) * 2 > 4 * KB, "Not enough offset"); + static_assert((kLdrR0R0Count + kLdrR0R0Count2 + 3) * 2 < 64 * KB, "Too much offset"); + + std::string expected = + RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") + + ".L1:\n" + + RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") + + ".L2:\n" + + RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") + + // ~ adr r1, .Ljump_table, gcc as can't seem to fix up a large offset itself. + // (Note: have to use constants, as labels aren't accepted. + "movw r1, #(((3 + " + StringPrintf("%zu", kLdrR0R0Count + kLdrR0R0Count2) + + ") * 2 - 4) & 0xFFFF)\n" + "add r1, pc\n" + "ldr r0, [r0]\n" + ".Lbase:\n" + "add pc, r1\n" + + RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") + + ".L3:\n" + + RepeatInsn(kLdrR0R0Count2, "ldr r0, [r0]\n") + + ".align 2\n" + ".Ljump_table:\n" + ".4byte (.L1 - .Lbase - 4)\n" + ".4byte (.L2 - .Lbase - 4)\n" + ".4byte (.L3 - .Lbase - 4)\n"; + DriverStr(expected, "JumpTable64K"); +} + +// Test for >64K fixup. +TEST_F(AssemblerThumb2Test, JumpTableFar) { + // The jump table. Use three labels. + Label label1, label2, label3; + std::vector<Label*> labels({ &label1, &label2, &label3 }); + + // A few dummy loads on entry, interspersed with 2 labels. + constexpr size_t kLdrR0R0Count = 5; + for (size_t i = 0; i != kLdrR0R0Count; ++i) { + __ ldr(arm::R0, arm::Address(arm::R0)); + } + __ BindTrackedLabel(&label1); + for (size_t i = 0; i != kLdrR0R0Count; ++i) { + __ ldr(arm::R0, arm::Address(arm::R0)); + } + __ BindTrackedLabel(&label2); + for (size_t i = 0; i != kLdrR0R0Count; ++i) { + __ ldr(arm::R0, arm::Address(arm::R0)); + } + + // Create the jump table, emit the base load. + arm::JumpTable* jump_table = __ CreateJumpTable(std::move(labels), arm::R1); + + // Dummy computation, stand-in for the address. We're only testing the jump table here, not how + // it's being used. + __ ldr(arm::R0, arm::Address(arm::R0)); + + // Emit the jump + __ EmitJumpTableDispatch(jump_table, arm::R1); + + // Some more dummy instructions. + for (size_t i = 0; i != kLdrR0R0Count; ++i) { + __ ldr(arm::R0, arm::Address(arm::R0)); + } + __ BindTrackedLabel(&label3); + constexpr size_t kLdrR0R0Count2 = 70001; // Note: odd so there's no alignment + for (size_t i = 0; i != kLdrR0R0Count2; ++i) { // necessary, as gcc as emits nops, + __ ldr(arm::R0, arm::Address(arm::R0)); // whereas we emit 0 != nop. + } + + static_assert((kLdrR0R0Count + kLdrR0R0Count2 + 3) * 2 > 64 * KB, "Not enough offset"); + + std::string expected = + RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") + + ".L1:\n" + + RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") + + ".L2:\n" + + RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") + + // ~ adr r1, .Ljump_table, gcc as can't seem to fix up a large offset itself. + // (Note: have to use constants, as labels aren't accepted. + "movw r1, #(((3 + " + StringPrintf("%zu", kLdrR0R0Count + kLdrR0R0Count2) + + ") * 2 - 4) & 0xFFFF)\n" + "movt r1, #(((3 + " + StringPrintf("%zu", kLdrR0R0Count + kLdrR0R0Count2) + + ") * 2 - 4) >> 16)\n" + ".Lhelp:" + "add r1, pc\n" + "ldr r0, [r0]\n" + ".Lbase:\n" + "add pc, r1\n" + + RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") + + ".L3:\n" + + RepeatInsn(kLdrR0R0Count2, "ldr r0, [r0]\n") + + ".align 2\n" + ".Ljump_table:\n" + ".4byte (.L1 - .Lbase - 4)\n" + ".4byte (.L2 - .Lbase - 4)\n" + ".4byte (.L3 - .Lbase - 4)\n"; + DriverStr(expected, "JumpTableFar"); +} + TEST_F(AssemblerThumb2Test, Clz) { __ clz(arm::R0, arm::R1); diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h index d97a2a40b2..dfe6babb25 100644 --- a/compiler/utils/assembler.h +++ b/compiler/utils/assembler.h @@ -227,6 +227,8 @@ class AssemblerBuffer { // Returns the position in the instruction stream. int GetPosition() { return cursor_ - contents_; } + void ExtendCapacity(size_t min_capacity = 0u); + private: // The limit is set to kMinimumGap bytes before the end of the data area. // This leaves enough space for the longest possible instruction and allows @@ -261,8 +263,6 @@ class AssemblerBuffer { return data + capacity - kMinimumGap; } - void ExtendCapacity(size_t min_capacity = 0u); - friend class AssemblerFixup; }; diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc index 8773169010..2653807369 100644 --- a/dex2oat/dex2oat.cc +++ b/dex2oat/dex2oat.cc @@ -126,11 +126,12 @@ static std::string StrippedCommandLine() { // However, we prefer to drop this when we saw --zip-fd. if (saw_zip_fd) { - // Drop anything --zip-X, --dex-X, --oat-X, --swap-X. + // Drop anything --zip-X, --dex-X, --oat-X, --swap-X, or --app-image-X if (StartsWith(original_argv[i], "--zip-") || StartsWith(original_argv[i], "--dex-") || StartsWith(original_argv[i], "--oat-") || - StartsWith(original_argv[i], "--swap-")) { + StartsWith(original_argv[i], "--swap-") || + StartsWith(original_argv[i], "--app-image-")) { continue; } } @@ -336,6 +337,12 @@ NO_RETURN static void Usage(const char* fmt, ...) { UsageError(" --swap-fd=<file-descriptor>: specifies a file to use for swap (by descriptor)."); UsageError(" Example: --swap-fd=10"); UsageError(""); + UsageError(" --app-image-fd=<file-descriptor>: specify output file descriptor for app image."); + UsageError(" Example: --app-image-fd=10"); + UsageError(""); + UsageError(" --app-image-file=<file-name>: specify a file name for app image."); + UsageError(" Example: --app-image-file=/data/dalvik-cache/system@app@Calculator.apk.art"); + UsageError(""); std::cerr << "See log for usage error information\n"; exit(EXIT_FAILURE); } @@ -484,7 +491,8 @@ class Dex2Oat FINAL { compiled_classes_filename_(nullptr), compiled_methods_zip_filename_(nullptr), compiled_methods_filename_(nullptr), - image_(false), + app_image_(false), + boot_image_(false), is_host_(false), driver_(nullptr), dump_stats_(false), @@ -493,6 +501,7 @@ class Dex2Oat FINAL { dump_slow_timing_(kIsDebugBuild), dump_cfg_append_(false), swap_fd_(-1), + app_image_fd_(kInvalidImageFd), timings_(timings) {} ~Dex2Oat() { @@ -608,13 +617,15 @@ class Dex2Oat FINAL { } } - void ParseSwapFd(const StringPiece& option) { - ParseUintOption(option, "--swap-fd", &swap_fd_, Usage); - } - void ProcessOptions(ParserOptions* parser_options) { - image_ = (!image_filename_.empty()); - if (image_) { + boot_image_ = !image_filename_.empty(); + app_image_ = app_image_fd_ != -1 || !app_image_file_name_.empty(); + + if (IsAppImage() && IsBootImage()) { + Usage("Can't have both --image and (--app-image-fd or --app-image-file)"); + } + + if (IsBootImage()) { // We need the boot image to always be debuggable. compiler_options_->debuggable_ = true; } @@ -647,7 +658,7 @@ class Dex2Oat FINAL { android_root_ += android_root_env_var; } - if (!image_ && parser_options->boot_image_filename.empty()) { + if (!boot_image_ && parser_options->boot_image_filename.empty()) { parser_options->boot_image_filename += android_root_; parser_options->boot_image_filename += "/framework/boot.art"; } @@ -656,7 +667,7 @@ class Dex2Oat FINAL { boot_image_option_ += parser_options->boot_image_filename; } - if (image_classes_filename_ != nullptr && !image_) { + if (image_classes_filename_ != nullptr && !IsBootImage()) { Usage("--image-classes should only be used with --image"); } @@ -668,7 +679,7 @@ class Dex2Oat FINAL { Usage("--image-classes-zip should be used with --image-classes"); } - if (compiled_classes_filename_ != nullptr && !image_) { + if (compiled_classes_filename_ != nullptr && !IsBootImage()) { Usage("--compiled-classes should only be used with --image"); } @@ -912,7 +923,11 @@ class Dex2Oat FINAL { } else if (option.starts_with("--swap-file=")) { swap_file_name_ = option.substr(strlen("--swap-file=")).data(); } else if (option.starts_with("--swap-fd=")) { - ParseSwapFd(option); + ParseUintOption(option, "--swap-fd", &swap_fd_, Usage); + } else if (option.starts_with("--app-image-file=")) { + app_image_file_name_ = option.substr(strlen("--app-image-file=")).data(); + } else if (option.starts_with("--app-image-fd=")) { + ParseUintOption(option, "--app-image-fd", &app_image_fd_, Usage); } else if (option.starts_with("--verbose-methods=")) { // TODO: rather than switch off compiler logging, make all VLOG(compiler) messages // conditional on having verbost methods. @@ -974,7 +989,6 @@ class Dex2Oat FINAL { // released immediately. unlink(swap_file_name_.c_str()); } - return true; } @@ -1016,7 +1030,7 @@ class Dex2Oat FINAL { callbacks_.reset(new QuickCompilerCallbacks( verification_results_.get(), &method_inliner_map_, - image_ ? + IsBootImage() ? CompilerCallbacks::CallbackMode::kCompileBootImage : CompilerCallbacks::CallbackMode::kCompileApp)); runtime_options.push_back(std::make_pair("compilercallbacks", callbacks_.get())); @@ -1026,7 +1040,7 @@ class Dex2Oat FINAL { // Only allow no boot image for the runtime if we're compiling one. When we compile an app, // we don't want fallback mode, it will abort as we do not push a boot classpath (it might // have been stripped in preopting, anyways). - if (!image_) { + if (!IsBootImage()) { runtime_options.push_back(std::make_pair("-Xno-dex-file-fallback", nullptr)); } // Disable libsigchain. We don't don't need it during compilation and it prevents us @@ -1065,7 +1079,7 @@ class Dex2Oat FINAL { "': " << error_msg; return false; } - } else if (image_) { + } else if (IsBootImage()) { image_classes_.reset(new std::unordered_set<std::string>); } // If --compiled-classes was specified, calculate the full list of classes to compile in the @@ -1178,7 +1192,7 @@ class Dex2Oat FINAL { // If we use a swap file, ensure we are above the threshold to make it necessary. if (swap_fd_ != -1) { - if (!UseSwap(image_, dex_files_)) { + if (!UseSwap(IsBootImage(), dex_files_)) { close(swap_fd_); swap_fd_ = -1; VLOG(compiler) << "Decided to run without swap."; @@ -1192,7 +1206,7 @@ class Dex2Oat FINAL { * If we're not in interpret-only or verify-none mode, go ahead and compile small applications. * Don't bother to check if we're doing the image. */ - if (!image_ && + if (!IsBootImage() && compiler_options_->IsCompilationEnabled() && compiler_kind_ == Compiler::kQuick) { size_t num_methods = 0; @@ -1246,7 +1260,7 @@ class Dex2Oat FINAL { compiler_kind_, instruction_set_, instruction_set_features_.get(), - image_, + IsBootImage(), image_classes_.release(), compiled_classes_.release(), nullptr, @@ -1341,7 +1355,7 @@ class Dex2Oat FINAL { uint32_t image_file_location_oat_checksum = 0; uintptr_t image_file_location_oat_data_begin = 0; int32_t image_patch_delta = 0; - if (image_) { + if (IsImage()) { PrepareImageWriter(image_base_); } else { TimingLogger::ScopedTiming t3("Loading image checksum", timings_); @@ -1366,7 +1380,7 @@ class Dex2Oat FINAL { key_value_store_.get())); } - if (image_) { + if (IsImage()) { // The OatWriter constructor has already updated offsets in methods and we need to // prepare method offsets in the image address space for direct method patching. TimingLogger::ScopedTiming t2("dex2oat Prepare image address space", timings_); @@ -1391,7 +1405,7 @@ class Dex2Oat FINAL { // If we are compiling an image, invoke the image creation routine. Else just skip. bool HandleImage() { - if (image_) { + if (IsImage()) { TimingLogger::ScopedTiming t("dex2oat ImageWriter", timings_); if (!CreateImageFile()) { return false; @@ -1474,7 +1488,15 @@ class Dex2Oat FINAL { } bool IsImage() const { - return image_; + return IsAppImage() || IsBootImage(); + } + + bool IsAppImage() const { + return app_image_; + } + + bool IsBootImage() const { + return boot_image_; } bool IsHost() const { @@ -1576,7 +1598,10 @@ class Dex2Oat FINAL { bool CreateImageFile() REQUIRES(!Locks::mutator_lock_) { CHECK(image_writer_ != nullptr); - if (!image_writer_->Write(image_filename_, oat_unstripped_, oat_location_)) { + if (!image_writer_->Write(app_image_fd_, + IsBootImage() ? image_filename_ : app_image_file_name_, + oat_unstripped_, + oat_location_)) { LOG(ERROR) << "Failed to create image file " << image_filename_; return false; } @@ -1585,8 +1610,8 @@ class Dex2Oat FINAL { // Destroy ImageWriter before doing FixupElf. image_writer_.reset(); - // Do not fix up the ELF file if we are --compile-pic - if (!compiler_options_->GetCompilePic()) { + // Do not fix up the ELF file if we are --compile-pic or compiing the app image + if (!compiler_options_->GetCompilePic() && IsBootImage()) { std::unique_ptr<File> oat_file(OS::OpenFileReadWrite(oat_unstripped_.c_str())); if (oat_file.get() == nullptr) { PLOG(ERROR) << "Failed to open ELF file: " << oat_unstripped_; @@ -1748,7 +1773,8 @@ class Dex2Oat FINAL { std::unique_ptr<std::unordered_set<std::string>> image_classes_; std::unique_ptr<std::unordered_set<std::string>> compiled_classes_; std::unique_ptr<std::unordered_set<std::string>> compiled_methods_; - bool image_; + bool app_image_; + bool boot_image_; bool is_host_; std::string android_root_; std::vector<const DexFile*> dex_files_; @@ -1767,6 +1793,8 @@ class Dex2Oat FINAL { bool dump_cfg_append_; std::string swap_file_name_; int swap_fd_; + std::string app_image_file_name_; + int app_image_fd_; std::string profile_file_; // Profile file to use TimingLogger* timings_; std::unique_ptr<CumulativeLogger> compiler_phases_timings_; @@ -1895,7 +1923,7 @@ static int dex2oat(int argc, char** argv) { // 3) Compiling with --host // 4) Compiling on the host (not a target build) // Otherwise, print a stripped command line. - if (kIsDebugBuild || dex2oat.IsImage() || dex2oat.IsHost() || !kIsTargetBuild) { + if (kIsDebugBuild || dex2oat.IsBootImage() || dex2oat.IsHost() || !kIsTargetBuild) { LOG(INFO) << CommandLine(); } else { LOG(INFO) << StrippedCommandLine(); diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h index f741732046..cf548ada33 100644 --- a/runtime/art_method-inl.h +++ b/runtime/art_method-inl.h @@ -468,12 +468,6 @@ void ArtMethod::VisitRoots(RootVisitorType& visitor, size_t pointer_size) { } } -inline void ArtMethod::CopyFrom(const ArtMethod* src, size_t image_pointer_size) { - memcpy(reinterpret_cast<void*>(this), reinterpret_cast<const void*>(src), - Size(image_pointer_size)); - declaring_class_ = GcRoot<mirror::Class>(const_cast<ArtMethod*>(src)->GetDeclaringClass()); -} - } // namespace art #endif // ART_RUNTIME_ART_METHOD_INL_H_ diff --git a/runtime/art_method.cc b/runtime/art_method.cc index c1279bf6b1..f4a5f233ff 100644 --- a/runtime/art_method.cc +++ b/runtime/art_method.cc @@ -367,7 +367,7 @@ const uint8_t* ArtMethod::GetQuickenedInfo() { } const OatQuickMethodHeader* ArtMethod::GetOatQuickMethodHeader(uintptr_t pc) { - if (IsRuntimeMethod() || IsProxyMethod()) { + if (IsRuntimeMethod()) { return nullptr; } @@ -381,6 +381,12 @@ const OatQuickMethodHeader* ArtMethod::GetOatQuickMethodHeader(uintptr_t pc) { return nullptr; } + if (existing_entry_point == GetQuickProxyInvokeHandler()) { + DCHECK(IsProxyMethod() && !IsConstructor()); + // The proxy entry point does not have any method header. + return nullptr; + } + // Check whether the current entry point contains this pc. if (!class_linker->IsQuickResolutionStub(existing_entry_point) && !class_linker->IsQuickToInterpreterBridge(existing_entry_point)) { @@ -452,4 +458,28 @@ const OatQuickMethodHeader* ArtMethod::GetOatQuickMethodHeader(uintptr_t pc) { return method_header; } + +void ArtMethod::CopyFrom(ArtMethod* src, size_t image_pointer_size) { + memcpy(reinterpret_cast<void*>(this), reinterpret_cast<const void*>(src), + Size(image_pointer_size)); + declaring_class_ = GcRoot<mirror::Class>(const_cast<ArtMethod*>(src)->GetDeclaringClass()); + + // If the entry point of the method we are copying from is from JIT code, we just + // put the entry point of the new method to interpreter. We could set the entry point + // to the JIT code, but this would require taking the JIT code cache lock to notify + // it, which we do not want at this level. + Runtime* runtime = Runtime::Current(); + if (runtime->GetJit() != nullptr) { + if (runtime->GetJit()->GetCodeCache()->ContainsPc(GetEntryPointFromQuickCompiledCode())) { + SetEntryPointFromQuickCompiledCodePtrSize(GetQuickToInterpreterBridge(), image_pointer_size); + } + } + // Clear the profiling info for the same reasons as the JIT code. + if (!src->IsNative()) { + SetProfilingInfoPtrSize(nullptr, image_pointer_size); + } + // Clear hotness to let the JIT properly decide when to compile this method. + hotness_count_ = 0; +} + } // namespace art diff --git a/runtime/art_method.h b/runtime/art_method.h index 551989d182..ce9f2025ce 100644 --- a/runtime/art_method.h +++ b/runtime/art_method.h @@ -49,8 +49,8 @@ class ArtMethod FINAL { ArtMethod() : access_flags_(0), dex_code_item_offset_(0), dex_method_index_(0), method_index_(0) { } - ArtMethod(const ArtMethod& src, size_t image_pointer_size) { - CopyFrom(&src, image_pointer_size); + ArtMethod(ArtMethod* src, size_t image_pointer_size) { + CopyFrom(src, image_pointer_size); } static ArtMethod* FromReflectedMethod(const ScopedObjectAccessAlreadyRunnable& soa, @@ -313,6 +313,10 @@ class ArtMethod FINAL { SetEntryPointFromJniPtrSize(info, sizeof(void*)); } + ALWAYS_INLINE void SetProfilingInfoPtrSize(ProfilingInfo* info, size_t pointer_size) { + SetEntryPointFromJniPtrSize(info, pointer_size); + } + static MemberOffset ProfilingInfoOffset() { return EntryPointFromJniOffset(sizeof(void*)); } @@ -429,7 +433,7 @@ class ArtMethod FINAL { return pointer_size; } - void CopyFrom(const ArtMethod* src, size_t image_pointer_size) + void CopyFrom(ArtMethod* src, size_t image_pointer_size) SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE GcRoot<mirror::Class>* GetDexCacheResolvedTypes(size_t pointer_size) diff --git a/runtime/base/timing_logger.cc b/runtime/base/timing_logger.cc index f1f6f9b1c1..1942e1dc1b 100644 --- a/runtime/base/timing_logger.cc +++ b/runtime/base/timing_logger.cc @@ -125,7 +125,7 @@ void CumulativeLogger::DumpHistogram(std::ostream &os) const { histogram->CreateHistogram(&cumulative_data); histogram->PrintConfidenceIntervals(os, 0.99, cumulative_data); } - os << "Done Dumping histograms \n"; + os << "Done Dumping histograms\n"; } TimingLogger::TimingLogger(const char* name, bool precise, bool verbose) diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index 5de1cacba8..da70456369 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -5279,7 +5279,7 @@ bool ClassLinker::LinkInterfaceMethods( miranda_method = reinterpret_cast<ArtMethod*>(allocator.Alloc(method_size)); CHECK(miranda_method != nullptr); // Point the interface table at a phantom slot. - new(miranda_method) ArtMethod(*interface_method, image_pointer_size_); + new(miranda_method) ArtMethod(interface_method, image_pointer_size_); miranda_methods.push_back(miranda_method); } method_array->SetElementPtrSize(j, miranda_method, image_pointer_size_); diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h new file mode 100644 index 0000000000..26f5ad3df5 --- /dev/null +++ b/runtime/gc/collector/concurrent_copying-inl.h @@ -0,0 +1,110 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_INL_H_ +#define ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_INL_H_ + +#include "concurrent_copying.h" + +#include "gc/accounting/space_bitmap-inl.h" +#include "gc/heap.h" +#include "gc/space/region_space.h" +#include "lock_word.h" + +namespace art { +namespace gc { +namespace collector { + +inline mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref) { + if (from_ref == nullptr) { + return nullptr; + } + DCHECK(heap_->collector_type_ == kCollectorTypeCC); + if (UNLIKELY(kUseBakerReadBarrier && !is_active_)) { + // In the lock word forward address state, the read barrier bits + // in the lock word are part of the stored forwarding address and + // invalid. This is usually OK as the from-space copy of objects + // aren't accessed by mutators due to the to-space + // invariant. However, during the dex2oat image writing relocation + // and the zygote compaction, objects can be in the forward + // address state (to store the forward/relocation addresses) and + // they can still be accessed and the invalid read barrier bits + // are consulted. If they look like gray but aren't really, the + // read barriers slow path can trigger when it shouldn't. To guard + // against this, return here if the CC collector isn't running. + return from_ref; + } + DCHECK(region_space_ != nullptr) << "Read barrier slow path taken when CC isn't running?"; + space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref); + switch (rtype) { + case space::RegionSpace::RegionType::kRegionTypeToSpace: + // It's already marked. + return from_ref; + case space::RegionSpace::RegionType::kRegionTypeFromSpace: { + mirror::Object* to_ref = GetFwdPtr(from_ref); + if (kUseBakerReadBarrier) { + DCHECK_NE(to_ref, ReadBarrier::GrayPtr()) + << "from_ref=" << from_ref << " to_ref=" << to_ref; + } + if (to_ref == nullptr) { + // It isn't marked yet. Mark it by copying it to the to-space. + to_ref = Copy(from_ref); + } + DCHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref)) + << "from_ref=" << from_ref << " to_ref=" << to_ref; + return to_ref; + } + case space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace: { + // This may or may not succeed, which is ok. + if (kUseBakerReadBarrier) { + from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr()); + } + mirror::Object* to_ref = from_ref; + if (region_space_bitmap_->AtomicTestAndSet(from_ref)) { + // Already marked. + } else { + // Newly marked. + if (kUseBakerReadBarrier) { + DCHECK_EQ(to_ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr()); + } + PushOntoMarkStack(to_ref); + } + return to_ref; + } + case space::RegionSpace::RegionType::kRegionTypeNone: + return MarkNonMoving(from_ref); + default: + UNREACHABLE(); + } +} + +inline mirror::Object* ConcurrentCopying::GetFwdPtr(mirror::Object* from_ref) { + DCHECK(region_space_->IsInFromSpace(from_ref)); + LockWord lw = from_ref->GetLockWord(false); + if (lw.GetState() == LockWord::kForwardingAddress) { + mirror::Object* fwd_ptr = reinterpret_cast<mirror::Object*>(lw.ForwardingAddress()); + DCHECK(fwd_ptr != nullptr); + return fwd_ptr; + } else { + return nullptr; + } +} + +} // namespace collector +} // namespace gc +} // namespace art + +#endif // ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_INL_H_ diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc index 20e775c7aa..4a49712cbc 100644 --- a/runtime/gc/collector/concurrent_copying.cc +++ b/runtime/gc/collector/concurrent_copying.cc @@ -368,30 +368,15 @@ void ConcurrentCopying::MarkingPhase() { } } } - // TODO: Other garbage collectors uses Runtime::VisitConcurrentRoots(), refactor this part - // to also use the same function. { - TimingLogger::ScopedTiming split2("VisitConstantRoots", GetTimings()); - Runtime::Current()->VisitConstantRoots(this); - } - { - TimingLogger::ScopedTiming split3("VisitInternTableRoots", GetTimings()); - Runtime::Current()->GetInternTable()->VisitRoots(this, kVisitRootFlagAllRoots); - } - { - TimingLogger::ScopedTiming split4("VisitClassLinkerRoots", GetTimings()); - Runtime::Current()->GetClassLinker()->VisitRoots(this, kVisitRootFlagAllRoots); + TimingLogger::ScopedTiming split2("VisitConcurrentRoots", GetTimings()); + Runtime::Current()->VisitConcurrentRoots(this, kVisitRootFlagAllRoots); } { // TODO: don't visit the transaction roots if it's not active. TimingLogger::ScopedTiming split5("VisitNonThreadRoots", GetTimings()); Runtime::Current()->VisitNonThreadRoots(this); } - { - TimingLogger::ScopedTiming split6("Dbg::VisitRoots", GetTimings()); - Dbg::VisitRoots(this); - } - Runtime::Current()->GetHeap()->VisitAllocationRecords(this); // Immune spaces. for (auto& space : heap_->GetContinuousSpaces()) { @@ -594,8 +579,8 @@ void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) { Thread* self = Thread::Current(); // TODO: pass self as an argument from call sites? CHECK(thread_running_gc_ != nullptr); MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed(); - if (mark_stack_mode == kMarkStackModeThreadLocal) { - if (self == thread_running_gc_) { + if (LIKELY(mark_stack_mode == kMarkStackModeThreadLocal)) { + if (LIKELY(self == thread_running_gc_)) { // If GC-running thread, use the GC mark stack instead of a thread-local mark stack. CHECK(self->GetThreadLocalMarkStack() == nullptr); if (UNLIKELY(gc_mark_stack_->IsFull())) { @@ -663,18 +648,6 @@ accounting::ObjectStack* ConcurrentCopying::GetLiveStack() { return heap_->live_stack_.get(); } -inline mirror::Object* ConcurrentCopying::GetFwdPtr(mirror::Object* from_ref) { - DCHECK(region_space_->IsInFromSpace(from_ref)); - LockWord lw = from_ref->GetLockWord(false); - if (lw.GetState() == LockWord::kForwardingAddress) { - mirror::Object* fwd_ptr = reinterpret_cast<mirror::Object*>(lw.ForwardingAddress()); - CHECK(fwd_ptr != nullptr); - return fwd_ptr; - } else { - return nullptr; - } -} - // The following visitors are that used to verify that there's no // references to the from-space left after marking. class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor : public SingleRootVisitor { @@ -1080,7 +1053,7 @@ size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_acc return count; } -void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) { +inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) { DCHECK(!region_space_->IsInFromSpace(to_ref)); if (kUseBakerReadBarrier) { DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) @@ -1095,9 +1068,10 @@ void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) { << " " << to_ref << " " << to_ref->GetReadBarrierPointer() << " is_marked=" << IsMarked(to_ref); } - if (to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() && - to_ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr && - !IsInToSpace(to_ref->AsReference()->GetReferent<kWithoutReadBarrier>())) { +#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER + if (UNLIKELY((to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() && + to_ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr && + !IsInToSpace(to_ref->AsReference()->GetReferent<kWithoutReadBarrier>())))) { // Leave this Reference gray in the queue so that GetReferent() will trigger a read barrier. We // will change it to black or white later in ReferenceQueue::DequeuePendingReference(). CHECK(to_ref->AsReference()->IsEnqueued()) << "Left unenqueued ref gray " << to_ref; @@ -1106,14 +1080,13 @@ void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) { // be concurrently marked after the Scan() call above has enqueued the Reference, in which case // the above IsInToSpace() evaluates to true and we change the color from gray to black or white // here in this else block. -#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER if (kUseBakerReadBarrier) { if (region_space_->IsInToSpace(to_ref)) { // If to-space, change from gray to white. bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(), ReadBarrier::WhitePtr()); CHECK(success) << "Must succeed as we won the race."; - CHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr()); + DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr()); } else { // If non-moving space/unevac from space, change from gray // to black. We can't change gray to white because it's not @@ -1125,13 +1098,13 @@ void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) { bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(), ReadBarrier::BlackPtr()); CHECK(success) << "Must succeed as we won the race."; - CHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr()); + DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr()); } } + } #else - DCHECK(!kUseBakerReadBarrier); + DCHECK(!kUseBakerReadBarrier); #endif - } if (ReadBarrier::kEnableToSpaceInvariantChecks || kIsDebugBuild) { ConcurrentCopyingAssertToSpaceInvariantObjectVisitor visitor(this); visitor(to_ref); @@ -1622,6 +1595,7 @@ class ConcurrentCopyingRefFieldsVisitor { } void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const + ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) { if (!root->IsNull()) { VisitRoot(root); @@ -1629,6 +1603,7 @@ class ConcurrentCopyingRefFieldsVisitor { } void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const + ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) { collector_->MarkRoot(root); } @@ -1638,7 +1613,7 @@ class ConcurrentCopyingRefFieldsVisitor { }; // Scan ref fields of an object. -void ConcurrentCopying::Scan(mirror::Object* to_ref) { +inline void ConcurrentCopying::Scan(mirror::Object* to_ref) { DCHECK(!region_space_->IsInFromSpace(to_ref)); ConcurrentCopyingRefFieldsVisitor visitor(this); to_ref->VisitReferences(visitor, visitor); @@ -1648,9 +1623,6 @@ void ConcurrentCopying::Scan(mirror::Object* to_ref) { inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) { mirror::Object* ref = obj->GetFieldObject< mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset); - if (ref == nullptr || region_space_->IsInToSpace(ref)) { - return; - } mirror::Object* to_ref = Mark(ref); if (to_ref == ref) { return; @@ -1669,14 +1641,11 @@ inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) } // Process some roots. -void ConcurrentCopying::VisitRoots( +inline void ConcurrentCopying::VisitRoots( mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) { for (size_t i = 0; i < count; ++i) { mirror::Object** root = roots[i]; mirror::Object* ref = *root; - if (ref == nullptr || region_space_->IsInToSpace(ref)) { - continue; - } mirror::Object* to_ref = Mark(ref); if (to_ref == ref) { continue; @@ -1693,12 +1662,9 @@ void ConcurrentCopying::VisitRoots( } } -void ConcurrentCopying::MarkRoot(mirror::CompressedReference<mirror::Object>* root) { +inline void ConcurrentCopying::MarkRoot(mirror::CompressedReference<mirror::Object>* root) { DCHECK(!root->IsNull()); mirror::Object* const ref = root->AsMirrorPtr(); - if (region_space_->IsInToSpace(ref)) { - return; - } mirror::Object* to_ref = Mark(ref); if (to_ref != ref) { auto* addr = reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root); @@ -1714,7 +1680,7 @@ void ConcurrentCopying::MarkRoot(mirror::CompressedReference<mirror::Object>* ro } } -void ConcurrentCopying::VisitRoots( +inline void ConcurrentCopying::VisitRoots( mirror::CompressedReference<mirror::Object>** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) { for (size_t i = 0; i < count; ++i) { @@ -2013,148 +1979,85 @@ bool ConcurrentCopying::IsOnAllocStack(mirror::Object* ref) { return alloc_stack->Contains(ref); } -mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref) { - if (from_ref == nullptr) { - return nullptr; - } - DCHECK(from_ref != nullptr); - DCHECK(heap_->collector_type_ == kCollectorTypeCC); - if (kUseBakerReadBarrier && !is_active_) { - // In the lock word forward address state, the read barrier bits - // in the lock word are part of the stored forwarding address and - // invalid. This is usually OK as the from-space copy of objects - // aren't accessed by mutators due to the to-space - // invariant. However, during the dex2oat image writing relocation - // and the zygote compaction, objects can be in the forward - // address state (to store the forward/relocation addresses) and - // they can still be accessed and the invalid read barrier bits - // are consulted. If they look like gray but aren't really, the - // read barriers slow path can trigger when it shouldn't. To guard - // against this, return here if the CC collector isn't running. - return from_ref; - } - DCHECK(region_space_ != nullptr) << "Read barrier slow path taken when CC isn't running?"; - space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref); - if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) { - // It's already marked. - return from_ref; - } - mirror::Object* to_ref; - if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) { - to_ref = GetFwdPtr(from_ref); - if (kUseBakerReadBarrier) { - DCHECK(to_ref != ReadBarrier::GrayPtr()) << "from_ref=" << from_ref << " to_ref=" << to_ref; - } - if (to_ref == nullptr) { - // It isn't marked yet. Mark it by copying it to the to-space. - to_ref = Copy(from_ref); +mirror::Object* ConcurrentCopying::MarkNonMoving(mirror::Object* ref) { + // ref is in a non-moving space (from_ref == to_ref). + DCHECK(!region_space_->HasAddress(ref)) << ref; + if (immune_region_.ContainsObject(ref)) { + accounting::ContinuousSpaceBitmap* cc_bitmap = + cc_heap_bitmap_->GetContinuousSpaceBitmap(ref); + DCHECK(cc_bitmap != nullptr) + << "An immune space object must have a bitmap"; + if (kIsDebugBuild) { + DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(ref)->Test(ref)) + << "Immune space object must be already marked"; } - DCHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref)) - << "from_ref=" << from_ref << " to_ref=" << to_ref; - } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) { // This may or may not succeed, which is ok. if (kUseBakerReadBarrier) { - from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr()); + ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr()); } - if (region_space_bitmap_->AtomicTestAndSet(from_ref)) { + if (cc_bitmap->AtomicTestAndSet(ref)) { // Already marked. - to_ref = from_ref; } else { // Newly marked. - to_ref = from_ref; if (kUseBakerReadBarrier) { - DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); + DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr()); } - PushOntoMarkStack(to_ref); + PushOntoMarkStack(ref); } } else { - // from_ref is in a non-moving space. - DCHECK(!region_space_->HasAddress(from_ref)) << from_ref; - if (immune_region_.ContainsObject(from_ref)) { - accounting::ContinuousSpaceBitmap* cc_bitmap = - cc_heap_bitmap_->GetContinuousSpaceBitmap(from_ref); - DCHECK(cc_bitmap != nullptr) - << "An immune space object must have a bitmap"; - if (kIsDebugBuild) { - DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref)->Test(from_ref)) - << "Immune space object must be already marked"; - } - // This may or may not succeed, which is ok. + // Use the mark bitmap. + accounting::ContinuousSpaceBitmap* mark_bitmap = + heap_mark_bitmap_->GetContinuousSpaceBitmap(ref); + accounting::LargeObjectBitmap* los_bitmap = + heap_mark_bitmap_->GetLargeObjectBitmap(ref); + CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; + bool is_los = mark_bitmap == nullptr; + if (!is_los && mark_bitmap->Test(ref)) { + // Already marked. if (kUseBakerReadBarrier) { - from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr()); + DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() || + ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr()); } - if (cc_bitmap->AtomicTestAndSet(from_ref)) { - // Already marked. - to_ref = from_ref; - } else { - // Newly marked. - to_ref = from_ref; - if (kUseBakerReadBarrier) { - DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); - } - PushOntoMarkStack(to_ref); + } else if (is_los && los_bitmap->Test(ref)) { + // Already marked in LOS. + if (kUseBakerReadBarrier) { + DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() || + ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr()); } } else { - // Use the mark bitmap. - accounting::ContinuousSpaceBitmap* mark_bitmap = - heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref); - accounting::LargeObjectBitmap* los_bitmap = - heap_mark_bitmap_->GetLargeObjectBitmap(from_ref); - CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; - bool is_los = mark_bitmap == nullptr; - if (!is_los && mark_bitmap->Test(from_ref)) { - // Already marked. - to_ref = from_ref; - if (kUseBakerReadBarrier) { - DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() || - to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr()); + // Not marked. + if (IsOnAllocStack(ref)) { + // If it's on the allocation stack, it's considered marked. Keep it white. + // Objects on the allocation stack need not be marked. + if (!is_los) { + DCHECK(!mark_bitmap->Test(ref)); + } else { + DCHECK(!los_bitmap->Test(ref)); } - } else if (is_los && los_bitmap->Test(from_ref)) { - // Already marked in LOS. - to_ref = from_ref; if (kUseBakerReadBarrier) { - DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() || - to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr()); + DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr()); } } else { - // Not marked. - if (IsOnAllocStack(from_ref)) { - // If it's on the allocation stack, it's considered marked. Keep it white. - to_ref = from_ref; - // Objects on the allocation stack need not be marked. - if (!is_los) { - DCHECK(!mark_bitmap->Test(to_ref)); - } else { - DCHECK(!los_bitmap->Test(to_ref)); - } - if (kUseBakerReadBarrier) { - DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr()); - } + // Not marked or on the allocation stack. Try to mark it. + // This may or may not succeed, which is ok. + if (kUseBakerReadBarrier) { + ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr()); + } + if (!is_los && mark_bitmap->AtomicTestAndSet(ref)) { + // Already marked. + } else if (is_los && los_bitmap->AtomicTestAndSet(ref)) { + // Already marked in LOS. } else { - // Not marked or on the allocation stack. Try to mark it. - // This may or may not succeed, which is ok. + // Newly marked. if (kUseBakerReadBarrier) { - from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr()); - } - if (!is_los && mark_bitmap->AtomicTestAndSet(from_ref)) { - // Already marked. - to_ref = from_ref; - } else if (is_los && los_bitmap->AtomicTestAndSet(from_ref)) { - // Already marked in LOS. - to_ref = from_ref; - } else { - // Newly marked. - to_ref = from_ref; - if (kUseBakerReadBarrier) { - DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); - } - PushOntoMarkStack(to_ref); + DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr()); } + PushOntoMarkStack(ref); } } } } - return to_ref; + return ref; } void ConcurrentCopying::FinishPhase() { diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h index c32b19ea3a..27726e23c1 100644 --- a/runtime/gc/collector/concurrent_copying.h +++ b/runtime/gc/collector/concurrent_copying.h @@ -93,7 +93,7 @@ class ConcurrentCopying : public GarbageCollector { DCHECK(ref != nullptr); return IsMarked(ref) == ref; } - mirror::Object* Mark(mirror::Object* from_ref) SHARED_REQUIRES(Locks::mutator_lock_) + ALWAYS_INLINE mirror::Object* Mark(mirror::Object* from_ref) SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_); bool IsMarking() const { return is_marking_; @@ -183,6 +183,8 @@ class ConcurrentCopying : public GarbageCollector { void DisableMarking() SHARED_REQUIRES(Locks::mutator_lock_); void IssueDisableMarkingCheckpoint() SHARED_REQUIRES(Locks::mutator_lock_); void ExpandGcMarkStack() SHARED_REQUIRES(Locks::mutator_lock_); + mirror::Object* MarkNonMoving(mirror::Object* from_ref) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_); space::RegionSpace* region_space_; // The underlying region space. std::unique_ptr<Barrier> gc_barrier_; diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc index cfccec87cf..ce972ef976 100644 --- a/runtime/jit/jit_code_cache.cc +++ b/runtime/jit/jit_code_cache.cc @@ -25,6 +25,7 @@ #include "linear_alloc.h" #include "mem_map.h" #include "oat_file-inl.h" +#include "scoped_thread_state_change.h" #include "thread_list.h" namespace art { @@ -407,9 +408,17 @@ void JitCodeCache::GarbageCollectCache(Thread* self) { // Run a checkpoint on all threads to mark the JIT compiled code they are running. { Barrier barrier(0); - MarkCodeClosure closure(this, &barrier); - size_t threads_running_checkpoint = - Runtime::Current()->GetThreadList()->RunCheckpoint(&closure); + size_t threads_running_checkpoint = 0; + { + // Walking the stack requires the mutator lock. + // We only take the lock when running the checkpoint and not waiting so that + // when we go back to suspended, we can execute checkpoints that were requested + // concurrently, and then move to waiting for our own checkpoint to finish. + ScopedObjectAccess soa(self); + MarkCodeClosure closure(this, &barrier); + threads_running_checkpoint = + Runtime::Current()->GetThreadList()->RunCheckpoint(&closure); + } if (threads_running_checkpoint != 0) { barrier.Increment(self, threads_running_checkpoint); } diff --git a/runtime/jit/jit_instrumentation.cc b/runtime/jit/jit_instrumentation.cc index 8aaa5fa304..7931306ff6 100644 --- a/runtime/jit/jit_instrumentation.cc +++ b/runtime/jit/jit_instrumentation.cc @@ -102,15 +102,13 @@ void JitInstrumentationCache::AddSamples(Thread* self, ArtMethod* method, size_t } else { // We failed allocating. Instead of doing the collection on the Java thread, we push // an allocation to a compiler thread, that will do the collection. - thread_pool_->AddTask(self, new JitCompileTask( - method->GetInterfaceMethodIfProxy(sizeof(void*)), JitCompileTask::kAllocateProfile)); + thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kAllocateProfile)); thread_pool_->StartWorkers(self); } } if (sample_count == hot_method_threshold_) { - thread_pool_->AddTask(self, new JitCompileTask( - method->GetInterfaceMethodIfProxy(sizeof(void*)), JitCompileTask::kCompile)); + thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kCompile)); thread_pool_->StartWorkers(self); } } diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h index 022f31dc53..5c6520fcab 100644 --- a/runtime/mirror/object.h +++ b/runtime/mirror/object.h @@ -99,7 +99,7 @@ class MANAGED LOCKABLE Object { #ifndef USE_BAKER_OR_BROOKS_READ_BARRIER NO_RETURN #endif - bool AtomicSetReadBarrierPointer(Object* expected_rb_ptr, Object* rb_ptr) + ALWAYS_INLINE bool AtomicSetReadBarrierPointer(Object* expected_rb_ptr, Object* rb_ptr) SHARED_REQUIRES(Locks::mutator_lock_); void AssertReadBarrierPointer() const SHARED_REQUIRES(Locks::mutator_lock_); diff --git a/runtime/mirror/object_array-inl.h b/runtime/mirror/object_array-inl.h index 5b73557941..5337760fb8 100644 --- a/runtime/mirror/object_array-inl.h +++ b/runtime/mirror/object_array-inl.h @@ -270,7 +270,7 @@ inline MemberOffset ObjectArray<T>::OffsetOfElement(int32_t i) { } template<class T> template<typename Visitor> -void ObjectArray<T>::VisitReferences(const Visitor& visitor) { +inline void ObjectArray<T>::VisitReferences(const Visitor& visitor) { const size_t length = static_cast<size_t>(GetLength()); for (size_t i = 0; i < length; ++i) { visitor(this, OffsetOfElement(i), false); diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc index 8d5418d07d..99080f611c 100644 --- a/runtime/oat_file_assistant.cc +++ b/runtime/oat_file_assistant.cc @@ -662,23 +662,19 @@ bool OatFileAssistant::RelocateOatFile(const std::string* input_file, bool OatFileAssistant::GenerateOatFile(std::string* error_msg) { CHECK(error_msg != nullptr); - if (OatFileName() == nullptr) { + Runtime* runtime = Runtime::Current(); + if (!runtime->IsDex2OatEnabled()) { *error_msg = "Generation of oat file for dex location " + dex_location_ - + " not attempted because the oat file name could not be determined."; + + " not attempted because dex2oat is disabled."; return false; } - const std::string& oat_file_name = *OatFileName(); - Runtime* runtime = Runtime::Current(); - if (!runtime->IsDex2OatEnabled()) { - *error_msg = "Generation of oat file " + oat_file_name - + " not attempted because dex2oat is disabled"; + if (OatFileName() == nullptr) { + *error_msg = "Generation of oat file for dex location " + dex_location_ + + " not attempted because the oat file name could not be determined."; return false; } - - std::vector<std::string> args; - args.push_back("--dex-file=" + dex_location_); - args.push_back("--oat-file=" + oat_file_name); + const std::string& oat_file_name = *OatFileName(); // dex2oat ignores missing dex files and doesn't report an error. // Check explicitly here so we can detect the error properly. @@ -688,9 +684,36 @@ bool OatFileAssistant::GenerateOatFile(std::string* error_msg) { return false; } + std::unique_ptr<File> oat_file; + oat_file.reset(OS::CreateEmptyFile(oat_file_name.c_str())); + if (oat_file.get() == nullptr) { + *error_msg = "Generation of oat file " + oat_file_name + + " not attempted because the oat file could not be created."; + return false; + } + + if (fchmod(oat_file->Fd(), 0644) != 0) { + *error_msg = "Generation of oat file " + oat_file_name + + " not attempted because the oat file could not be made world readable."; + oat_file->Erase(); + return false; + } + + std::vector<std::string> args; + args.push_back("--dex-file=" + dex_location_); + args.push_back("--oat-fd=" + std::to_string(oat_file->Fd())); + args.push_back("--oat-location=" + oat_file_name); + if (!Dex2Oat(args, error_msg)) { // Manually delete the file. This ensures there is no garbage left over if // the process unexpectedly died. + oat_file->Erase(); + TEMP_FAILURE_RETRY(unlink(oat_file_name.c_str())); + return false; + } + + if (oat_file->FlushCloseOrErase() != 0) { + *error_msg = "Unable to close oat file " + oat_file_name; TEMP_FAILURE_RETRY(unlink(oat_file_name.c_str())); return false; } diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc index 2c81eddf39..c54d7f8761 100644 --- a/runtime/oat_file_assistant_test.cc +++ b/runtime/oat_file_assistant_test.cc @@ -849,6 +849,38 @@ TEST_F(OatFileAssistantTest, LoadDexNoAlternateOat) { EXPECT_FALSE(ofm.OatFileExists()); } +// Case: We have a DEX file but can't write the oat file. +// Expect: We should fail to make the oat file up to date. +TEST_F(OatFileAssistantTest, LoadDexUnwriteableAlternateOat) { + std::string dex_location = GetScratchDir() + "/LoadDexUnwriteableAlternateOat.jar"; + + // Make the oat location unwritable by inserting some non-existent + // intermediate directories. + std::string oat_location = GetScratchDir() + "/foo/bar/LoadDexUnwriteableAlternateOat.oat"; + + Copy(GetDexSrc1(), dex_location); + + OatFileAssistant oat_file_assistant( + dex_location.c_str(), oat_location.c_str(), kRuntimeISA, true); + std::string error_msg; + ASSERT_FALSE(oat_file_assistant.MakeUpToDate(&error_msg)); + + std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile(); + ASSERT_TRUE(oat_file.get() == nullptr); +} + +// Case: We don't have a DEX file and can't write the oat file. +// Expect: We should fail to generate the oat file without crashing. +TEST_F(OatFileAssistantTest, GenNoDex) { + std::string dex_location = GetScratchDir() + "/GenNoDex.jar"; + std::string oat_location = GetScratchDir() + "/GenNoDex.oat"; + + OatFileAssistant oat_file_assistant( + dex_location.c_str(), oat_location.c_str(), kRuntimeISA, true); + std::string error_msg; + ASSERT_FALSE(oat_file_assistant.GenerateOatFile(&error_msg)); +} + // Turn an absolute path into a path relative to the current working // directory. static std::string MakePathRelative(std::string target) { diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h index 4998a6a478..7de6c06f2b 100644 --- a/runtime/read_barrier-inl.h +++ b/runtime/read_barrier-inl.h @@ -19,7 +19,7 @@ #include "read_barrier.h" -#include "gc/collector/concurrent_copying.h" +#include "gc/collector/concurrent_copying-inl.h" #include "gc/heap.h" #include "mirror/object_reference.h" #include "mirror/reference.h" diff --git a/runtime/stack.cc b/runtime/stack.cc index b0727daa15..d7edfade15 100644 --- a/runtime/stack.cc +++ b/runtime/stack.cc @@ -958,26 +958,18 @@ QuickMethodFrameInfo StackVisitor::GetCurrentQuickFrameInfo() const { return runtime->GetRuntimeMethodFrameInfo(method); } - // For Proxy method we add special handling for the direct method case (there is only one - // direct method - constructor). Direct method is cloned from original - // java.lang.reflect.Proxy class together with code and as a result it is executed as usual - // quick compiled method without any stubs. So the frame info should be returned as it is a - // quick method not a stub. However, if instrumentation stubs are installed, the - // instrumentation->GetQuickCodeFor() returns the artQuickProxyInvokeHandler instead of an - // oat code pointer, thus we have to add a special case here. if (method->IsProxyMethod()) { - if (method->IsDirect()) { - CHECK(method->IsConstructor()); - const void* code_pointer = - EntryPointToCodePointer(method->GetEntryPointFromQuickCompiledCode()); - return reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].frame_info_; - } else { - return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs); - } + // There is only one direct method of a proxy class: the constructor. A direct method is + // cloned from the original java.lang.reflect.Proxy and is executed as usual quick + // compiled method without any stubs. Therefore the method must have a OatQuickMethodHeader. + DCHECK(!method->IsDirect() && !method->IsConstructor()) + << "Constructors of proxy classes must have a OatQuickMethodHeader"; + return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs); } - ClassLinker* class_linker = runtime->GetClassLinker(); + // The only remaining case is if the method is native and uses the generic JNI stub. DCHECK(method->IsNative()); + ClassLinker* class_linker = runtime->GetClassLinker(); const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(method, sizeof(void*)); DCHECK(class_linker->IsQuickGenericJniStub(entry_point)) << PrettyMethod(method); // Generic JNI frame. diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc index dcf9601b4b..b09b87fb58 100644 --- a/runtime/thread_list.cc +++ b/runtime/thread_list.cc @@ -275,9 +275,6 @@ size_t ThreadList::RunCheckpoint(Closure* checkpoint_function) { Locks::mutator_lock_->AssertNotExclusiveHeld(self); Locks::thread_list_lock_->AssertNotHeld(self); Locks::thread_suspend_count_lock_->AssertNotHeld(self); - if (kDebugLocking && gAborting == 0) { - CHECK_NE(self->GetState(), kRunnable); - } std::vector<Thread*> suspended_count_modified_threads; size_t count = 0; diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc index e1d4160aac..2db79ab229 100644 --- a/runtime/verifier/method_verifier.cc +++ b/runtime/verifier/method_verifier.cc @@ -665,20 +665,22 @@ bool MethodVerifier::Verify() { // Interfaces may always have static initializers for their fields. If we are running with // default methods enabled we also allow other public, static, non-final methods to have code. // Otherwise that is the only type of method allowed. - if (runtime->AreExperimentalFlagsEnabled(ExperimentalFlags::kDefaultMethods)) { - if (IsInstanceConstructor()) { - Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interfaces may not have non-static constructor"; - return false; - } else if (method_access_flags_ & kAccFinal) { - Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interfaces may not have final methods"; - return false; - } else if (!(method_access_flags_ & kAccPublic)) { - Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interfaces may not have non-public members"; + if (!(IsConstructor() && IsStatic())) { + if (runtime->AreExperimentalFlagsEnabled(ExperimentalFlags::kDefaultMethods)) { + if (IsInstanceConstructor()) { + Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interfaces may not have non-static constructor"; + return false; + } else if (method_access_flags_ & kAccFinal) { + Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interfaces may not have final methods"; + return false; + } else if (!(method_access_flags_ & kAccPublic)) { + Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interfaces may not have non-public members"; + return false; + } + } else { + Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interface methods must be abstract"; return false; } - } else if (!IsConstructor() || !IsStatic()) { - Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interface methods must be abstract"; - return false; } } @@ -3662,8 +3664,15 @@ ArtMethod* MethodVerifier::ResolveMethodAndCheckAccess( << PrettyMethod(res_method); return nullptr; } - // Check that interface methods match interface classes. - if (klass->IsInterface() && method_type != METHOD_INTERFACE) { + // Check that interface methods are static or match interface classes. + // We only allow statics if we don't have default methods enabled. + Runtime* runtime = Runtime::Current(); + const bool default_methods_supported = + runtime == nullptr || + runtime->AreExperimentalFlagsEnabled(ExperimentalFlags::kDefaultMethods); + if (klass->IsInterface() && + method_type != METHOD_INTERFACE && + (!default_methods_supported || method_type != METHOD_STATIC)) { Fail(VERIFY_ERROR_CLASS_CHANGE) << "non-interface method " << PrettyMethod(res_method) << " is in an interface class " << PrettyClass(klass); return nullptr; diff --git a/test/004-ReferenceMap/stack_walk_refmap_jni.cc b/test/004-ReferenceMap/stack_walk_refmap_jni.cc index 34fb3f8a01..2dbd7e8126 100644 --- a/test/004-ReferenceMap/stack_walk_refmap_jni.cc +++ b/test/004-ReferenceMap/stack_walk_refmap_jni.cc @@ -49,7 +49,9 @@ struct ReferenceMap2Visitor : public CheckReferenceMapVisitor { if (m_name.compare("f") == 0) { CHECK_REGS_CONTAIN_REFS(0x03U, true, 8); // v8: this CHECK_REGS_CONTAIN_REFS(0x06U, true, 8, 1); // v8: this, v1: x - CHECK_REGS_CONTAIN_REFS(0x08U, true, 8, 3, 1); // v8: this, v3: y, v1: x + if (!GetCurrentOatQuickMethodHeader()->IsOptimized()) { + CHECK_REGS_CONTAIN_REFS(0x08U, true, 8, 3, 1); // v8: this, v3: y, v1: x + } CHECK_REGS_CONTAIN_REFS(0x0cU, true, 8, 3, 1); // v8: this, v3: y, v1: x if (!GetCurrentOatQuickMethodHeader()->IsOptimized()) { CHECK_REGS_CONTAIN_REFS(0x0eU, true, 8, 3, 1); // v8: this, v3: y, v1: x @@ -66,9 +68,10 @@ struct ReferenceMap2Visitor : public CheckReferenceMapVisitor { CHECK_REGS_CONTAIN_REFS(0x13U, false, 3); // v3: y // Note that v0: ex can be eliminated because it's a dead merge of two different exceptions. CHECK_REGS_CONTAIN_REFS(0x18U, true, 8, 2, 1); // v8: this, v2: y, v1: x (dead v0: ex) - CHECK_REGS_CONTAIN_REFS(0x1aU, true, 8, 5, 2, 1); // v8: this, v5: x[1], v2: y, v1: x (dead v0: ex) if (!GetCurrentOatQuickMethodHeader()->IsOptimized()) { // v8: this, v5: x[1], v2: y, v1: x (dead v0: ex) + CHECK_REGS_CONTAIN_REFS(0x1aU, true, 8, 5, 2, 1); + // v8: this, v5: x[1], v2: y, v1: x (dead v0: ex) CHECK_REGS_CONTAIN_REFS(0x1dU, true, 8, 5, 2, 1); // v5 is removed from the root set because there is a "merge" operation. // See 0015: if-nez v2, 001f. diff --git a/test/004-ThreadStress/run b/test/004-ThreadStress/run deleted file mode 100755 index 27c501da8d..0000000000 --- a/test/004-ThreadStress/run +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -# -# Copyright (C) 2015 The Android Open Source Project -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Be less agressive than the default debug option for the jit code cache -# to avoid timeouts. -exec ${RUN} "$@" --runtime-option -Xjitcodecachesize:1M diff --git a/test/449-checker-bce/src/Main.java b/test/449-checker-bce/src/Main.java index 22829cddc8..ffeae7d9a2 100644 --- a/test/449-checker-bce/src/Main.java +++ b/test/449-checker-bce/src/Main.java @@ -624,12 +624,13 @@ public class Main { constantIndexing2(new int[3]); } catch (ArrayIndexOutOfBoundsException e) { assertIsManaged(); // This is to ensure that single-frame deoptimization works. - // Will need to be updated if constantIndexing2 is inlined. + // Will need to be updated if constantIndexing2 is inlined. try { // This will cause AIOOBE. constantIndexingForward6(new int[3]); } catch (ArrayIndexOutOfBoundsException e2) { - assertIsManaged(); + // Having deopted, we expect to be running interpreted at this point. + // Does not apply to debuggable, however, since we do not inline. return 99; } } diff --git a/test/538-checker-embed-constants/src/Main.java b/test/538-checker-embed-constants/src/Main.java index d8618e30fb..979c4c86c0 100644 --- a/test/538-checker-embed-constants/src/Main.java +++ b/test/538-checker-embed-constants/src/Main.java @@ -260,6 +260,29 @@ public class Main { return arg ^ 0xf00000000000000fL; } + /** + * Test that the `-1` constant is not synthesized in a register and that we + * instead simply switch between `add` and `sub` instructions with the + * constant embedded. + * We need two uses (or more) of the constant because the compiler always + * defers to immediate value handling to VIXL when it has only one use. + */ + + /// CHECK-START-ARM64: long Main.addM1(long) register (after) + /// CHECK: <<Arg:j\d+>> ParameterValue + /// CHECK: <<ConstM1:j\d+>> LongConstant -1 + /// CHECK-NOT: ParallelMove + /// CHECK: Add [<<Arg>>,<<ConstM1>>] + /// CHECK: Sub [<<Arg>>,<<ConstM1>>] + + /// CHECK-START-ARM64: long Main.addM1(long) disassembly (after) + /// CHECK: sub x{{\d+}}, x{{\d+}}, #0x1 + /// CHECK: add x{{\d+}}, x{{\d+}}, #0x1 + + public static long addM1(long arg) { + return (arg + (-1)) | (arg - (-1)); + } + public static void main(String[] args) { int arg = 0x87654321; assertIntEquals(and255(arg), 0x21); @@ -286,5 +309,7 @@ public class Main { assertLongEquals(xorNot15(longArg), 0xedcba987789abcd1L); assertLongEquals(xor0xfffffff00000000f(longArg), 0xedcba9888765432eL); assertLongEquals(xor0xf00000000000000f(longArg), 0xe23456788765432eL); + + assertLongEquals(14, addM1(7)); } } diff --git a/test/539-checker-arm64-encodable-immediates/info.txt b/test/539-checker-arm64-encodable-immediates/info.txt deleted file mode 100644 index efeef33231..0000000000 --- a/test/539-checker-arm64-encodable-immediates/info.txt +++ /dev/null @@ -1,2 +0,0 @@ -Basic tests that check the compiler recognizes when constant values can be -encoded in the immediate field of instructions. diff --git a/test/539-checker-arm64-encodable-immediates/src/Main.java b/test/539-checker-arm64-encodable-immediates/src/Main.java deleted file mode 100644 index 7e3ff9fde8..0000000000 --- a/test/539-checker-arm64-encodable-immediates/src/Main.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright (C) 2015 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -public class Main { - - public static void assertLongEquals(long expected, long result) { - if (expected != result) { - throw new Error("Expected: " + expected + ", found: " + result); - } - } - - /** - * Test that the `-1` constant is not synthesized in a register and that we - * instead simply switch between `add` and `sub` instructions with the - * constant embedded. - * We need two uses (or more) of the constant because the compiler always - * delegates the immediate value handling to VIXL when there is only one use. - */ - - /// CHECK-START-ARM64: long Main.addM1(long) register (after) - /// CHECK: <<Arg:j\d+>> ParameterValue - /// CHECK: <<ConstM1:j\d+>> LongConstant -1 - /// CHECK-NOT: ParallelMove - /// CHECK: Add [<<Arg>>,<<ConstM1>>] - /// CHECK: Sub [<<Arg>>,<<ConstM1>>] - - /// CHECK-START-ARM64: long Main.addM1(long) disassembly (after) - /// CHECK: sub x{{\d+}}, x{{\d+}}, #0x1 - /// CHECK: add x{{\d+}}, x{{\d+}}, #0x1 - - public static long addM1(long arg) { - return (arg + (-1)) | (arg - (-1)); - } - - public static void main(String[] args) { - assertLongEquals(14, addM1(7)); - } -} diff --git a/test/539-checker-arm64-encodable-immediates/expected.txt b/test/542-inline-trycatch/expected.txt index e69de29bb2..e69de29bb2 100644 --- a/test/539-checker-arm64-encodable-immediates/expected.txt +++ b/test/542-inline-trycatch/expected.txt diff --git a/test/542-inline-trycatch/info.txt b/test/542-inline-trycatch/info.txt new file mode 100644 index 0000000000..b3e50d3d61 --- /dev/null +++ b/test/542-inline-trycatch/info.txt @@ -0,0 +1 @@ +Tests inlining in the optimizing compiler under try/catch.
\ No newline at end of file diff --git a/test/542-inline-trycatch/src/Main.java b/test/542-inline-trycatch/src/Main.java new file mode 100644 index 0000000000..5a6e06fa0c --- /dev/null +++ b/test/542-inline-trycatch/src/Main.java @@ -0,0 +1,178 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +public class Main { + + // The following tests make sure that we inline methods used inside try and catch + // blocks, provided they meet other inlining criteria. To do that, we rely on + // the compiler recognizing and enforcing the $inline$ and $noinline$ markers. + + // We expect a single block to always be inlined. + + private static int $inline$SingleBlock(String str) throws NumberFormatException { + return Integer.parseInt(str); + } + + // We expect a "simple" method with multiple blocks to always be inlined. + + private static int $inline$MultipleBlocks(String str, boolean is_hex) + throws NumberFormatException { + return is_hex ? Integer.parseInt(str, 16) : Integer.parseInt(str); + } + + // We expect methods with try/catch to not be inlined. Inlined try/catch + // blocks are not supported at the moment. + + private static int $noinline$TryCatch(String str) { + try { + return Integer.parseInt(str); + } catch (NumberFormatException ex) { + return -1; + } + } + + public static void testSingleBlockFromTry() { + int val = 0; + + try { + val = $inline$SingleBlock("42"); + } catch (NumberFormatException ex) { + unreachable(); + } + assertEquals(42, val); + + try { + $inline$SingleBlock("xyz"); + unreachable(); + } catch (NumberFormatException ex) {} + } + + public static void testSingleBlockFromCatch() { + int val = 0; + + try { + throwException(); + } catch (Exception ex) { + val = $inline$SingleBlock("42"); + } + assertEquals(42, val); + } + + public static void testMultipleBlocksFromTry() { + int val = 0; + + try { + val = $inline$MultipleBlocks("42", false); + } catch (NumberFormatException ex) { + unreachable(); + } + assertEquals(42, val); + + try { + val = $inline$MultipleBlocks("20", true); + } catch (NumberFormatException ex) { + unreachable(); + } + assertEquals(32, val); + + try { + $inline$MultipleBlocks("xyz", false); + unreachable(); + } catch (NumberFormatException ex) {} + + try { + $inline$MultipleBlocks("xyz", true); + unreachable(); + } catch (NumberFormatException ex) {} + } + + public static void testMultipleBlocksFromCatch() { + int val = 0; + + try { + throwException(); + } catch (Exception ex) { + val = $inline$MultipleBlocks("42", false); + } + assertEquals(42, val); + + try { + throwException(); + } catch (Exception ex) { + val = $inline$MultipleBlocks("20", true); + } + assertEquals(32, val); + } + + public static void testTryCatchFromTry() { + int val = 0; + + try { + val = $noinline$TryCatch("42"); + } catch (NumberFormatException ex) { + unreachable(); + } + assertEquals(42, val); + + try { + val = $noinline$TryCatch("xyz"); + } catch (NumberFormatException ex) { + unreachable(); + } + assertEquals(-1, val); + } + + public static void testTryCatchFromCatch() { + int val = 0; + + try { + throwException(); + } catch (Exception ex) { + val = $noinline$TryCatch("42"); + } + assertEquals(42, val); + + try { + throwException(); + } catch (Exception ex) { + val = $noinline$TryCatch("xyz"); + } + assertEquals(-1, val); + } + + public static void main(String[] args) { + testSingleBlockFromTry(); + testSingleBlockFromCatch(); + testMultipleBlocksFromTry(); + testMultipleBlocksFromCatch(); + testTryCatchFromTry(); + testTryCatchFromCatch(); + } + + private static void assertEquals(int expected, int actual) { + if (expected != actual) { + throw new AssertionError("Wrong result: " + expected + " != " + actual); + } + } + + private static void unreachable() { + throw new Error("Unreachable"); + } + + private static void throwException() throws Exception { + throw new Exception(); + } +} diff --git a/test/542-unresolved-access-check/expected.txt b/test/542-unresolved-access-check/expected.txt new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/test/542-unresolved-access-check/expected.txt diff --git a/test/542-unresolved-access-check/info.txt b/test/542-unresolved-access-check/info.txt new file mode 100644 index 0000000000..30d45b8ec3 --- /dev/null +++ b/test/542-unresolved-access-check/info.txt @@ -0,0 +1 @@ +Test unresolved/access checks entry points with the JIT. diff --git a/test/542-unresolved-access-check/src/Main.java b/test/542-unresolved-access-check/src/Main.java new file mode 100644 index 0000000000..2bdf47f172 --- /dev/null +++ b/test/542-unresolved-access-check/src/Main.java @@ -0,0 +1,104 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.lang.reflect.Field; +import java.lang.reflect.Method; +import java.util.List; +import p1.InP1; +import p1.PlaceHolder; + + +// Custom class loader to prevent loading while verifying. +class MyClassLoader extends ClassLoader { + MyClassLoader() throws Exception { + super(MyClassLoader.class.getClassLoader()); + + // Some magic to get access to the pathList field of BaseDexClassLoader. + ClassLoader loader = getClass().getClassLoader(); + Class<?> baseDexClassLoader = loader.getClass().getSuperclass(); + Field f = baseDexClassLoader.getDeclaredField("pathList"); + f.setAccessible(true); + Object pathList = f.get(loader); + + // Some magic to get access to the dexField field of pathList. + f = pathList.getClass().getDeclaredField("dexElements"); + f.setAccessible(true); + dexElements = (Object[]) f.get(pathList); + dexFileField = dexElements[0].getClass().getDeclaredField("dexFile"); + dexFileField.setAccessible(true); + } + + Object[] dexElements; + Field dexFileField; + + protected Class<?> loadClass(String className, boolean resolve) throws ClassNotFoundException { + if (className.equals("p1.OtherInP1") && !p1.PlaceHolder.entered) { + // The request comes from the verifier. Return null to get the access check entry + // point in the compiled code. + return null; + } + // Mimic what DexPathList.findClass is doing. + try { + for (Object element : dexElements) { + Object dex = dexFileField.get(element); + Method method = dex.getClass().getDeclaredMethod( + "loadClassBinaryName", String.class, ClassLoader.class, List.class); + + if (dex != null) { + Class clazz = (Class)method.invoke(dex, className, this, null); + if (clazz != null) { + return clazz; + } + } + } + } catch (Exception e) { /* Ignore */ } + return getParent().loadClass(className); + } +} + +public class Main { + public static void main(String[] args) throws Exception { + MyClassLoader o = new MyClassLoader(); + Class foo = o.loadClass("LoadedByMyClassLoader"); + Method m = foo.getDeclaredMethod("main"); + m.invoke(null); + } +} + +class LoadedByMyClassLoader { + public static void main() throws Exception { + for (int i = 0; i < 10000; ++i) { + // Warm up the JIT. + doTheCall(i); + } + // Sleep a while to let the JIT compile things. + // TODO(ngeoffray): Remove the sleep. b/25414532 + Thread.sleep(2000); + doTheCall(10001); + } + + public static void doTheCall(int i) { + InP1.$inline$AllocateOtherInP1(i); + InP1.$inline$AllocateArrayOtherInP1(i); + InP1.$inline$UseStaticFieldOtherInP1(i); + InP1.$inline$SetStaticFieldOtherInP1(i); + InP1.$inline$UseInstanceFieldOtherInP1(i); + InP1.$inline$SetInstanceFieldOtherInP1(i); + InP1.$inline$LoadOtherInP1(i); + InP1.$inline$StaticCallOtherInP1(i); + InP1.$inline$InstanceCallOtherInP1(i); + } +} diff --git a/test/542-unresolved-access-check/src/p1/InP1.java b/test/542-unresolved-access-check/src/p1/InP1.java new file mode 100644 index 0000000000..3516c7231b --- /dev/null +++ b/test/542-unresolved-access-check/src/p1/InP1.java @@ -0,0 +1,93 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package p1; + +public class InP1 { + public static Object $inline$AllocateOtherInP1(int i) { + // Let this method execute a while to make sure the JIT sees it hot. + if (i <= 10000) { + return null; + } + // Set the flag that we have entered InP1 code to get OtherInP1 loaded. + PlaceHolder.entered = true; + return new OtherInP1(); + } + + public static Object $inline$AllocateArrayOtherInP1(int i) { + if (i <= 10000) { + return null; + } + return new OtherInP1[10]; + } + + public static Object $inline$UseStaticFieldOtherInP1(int i) { + if (i <= 10000) { + return null; + } + return OtherInP1.staticField; + } + + public static void $inline$SetStaticFieldOtherInP1(int i) { + if (i <= 10000) { + return; + } + OtherInP1.staticField = new Object(); + } + + public static Object $inline$UseInstanceFieldOtherInP1(int i) { + if (i <= 10000) { + return null; + } + return $noinline$AllocateOtherInP1().instanceField; + } + + public static void $inline$SetInstanceFieldOtherInP1(int i) { + if (i <= 10000) { + return; + } + $noinline$AllocateOtherInP1().instanceField = new Object(); + } + + public static OtherInP1 $noinline$AllocateOtherInP1() { + try { + return new OtherInP1(); + } catch (Exception e) { + throw new Error(e); + } + } + + public static Object $inline$LoadOtherInP1(int i) { + if (i <= 10000) { + return null; + } + return OtherInP1.class; + } + + public static Object $inline$StaticCallOtherInP1(int i) { + if (i <= 10000) { + return null; + } + return OtherInP1.doTheStaticCall(); + } + + public static Object $inline$InstanceCallOtherInP1(int i) { + if (i <= 10000) { + return null; + } + return $noinline$AllocateOtherInP1().doTheInstanceCall(); + } +} diff --git a/test/542-unresolved-access-check/src/p1/OtherInP1.java b/test/542-unresolved-access-check/src/p1/OtherInP1.java new file mode 100644 index 0000000000..adc1ce1297 --- /dev/null +++ b/test/542-unresolved-access-check/src/p1/OtherInP1.java @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package p1; + +class OtherInP1 { + OtherInP1() { + } + static Object staticField = new Object(); + Object instanceField = new Object(); + + static Object doTheStaticCall() { + return null; + } + + Object doTheInstanceCall() { + return null; + } +} diff --git a/test/542-unresolved-access-check/src/p1/PlaceHolder.java b/test/542-unresolved-access-check/src/p1/PlaceHolder.java new file mode 100644 index 0000000000..2bf4bdf15f --- /dev/null +++ b/test/542-unresolved-access-check/src/p1/PlaceHolder.java @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package p1; + +// Specific class for putting the 'entered' marker. If we were to put the marker +// in InP1 or in OtherInP1, the code in MyClassLoader using that marker would load +// InP1 or OtherInP1 in the system class loader, and not in MyClassLoader. +public class PlaceHolder { + public static boolean entered = false; +} diff --git a/test/962-iface-static/smali/Displayer.smali b/test/962-iface-static/smali/Displayer.smali index 06bec16432..ed4c013d3b 100644 --- a/test/962-iface-static/smali/Displayer.smali +++ b/test/962-iface-static/smali/Displayer.smali @@ -27,7 +27,7 @@ .class public LDisplayer; .super Ljava/lang/Object; -.method public static <clinit>()V +.method static constructor <clinit>()V .locals 3 sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream; const-string v0, "init" diff --git a/test/962-iface-static/smali/iface.smali b/test/962-iface-static/smali/iface.smali index 441aae669e..5b9c03ec46 100644 --- a/test/962-iface-static/smali/iface.smali +++ b/test/962-iface-static/smali/iface.smali @@ -27,7 +27,7 @@ .field public final static f:LDisplayer; -.method public static <clinit>()V +.method static constructor <clinit>()V .locals 3 new-instance v1, LDisplayer; invoke-direct {v1}, LDisplayer;-><init>()V diff --git a/test/964-default-iface-init-generated/util-src/generate_smali.py b/test/964-default-iface-init-generated/util-src/generate_smali.py index be2d3ba563..3c138abf26 100755 --- a/test/964-default-iface-init-generated/util-src/generate_smali.py +++ b/test/964-default-iface-init-generated/util-src/generate_smali.py @@ -334,7 +334,7 @@ class TestInterface(mixins.DumpMixin, mixins.Named, mixins.NameComparableMixin, # public static final Displayer field = new Displayer("{tree}"); .field public final static field:LDisplayer; -.method public static constructor <clinit>()V +.method static constructor <clinit>()V .locals 3 const-string v2, "{tree}" new-instance v1, LDisplayer; diff --git a/tools/ahat/README.txt b/tools/ahat/README.txt index 5615f8f409..d6f55aae16 100644 --- a/tools/ahat/README.txt +++ b/tools/ahat/README.txt @@ -28,8 +28,6 @@ TODO: - Use consistent order for heap columns. Sometimes I see "app" first, sometimes last (from one heap dump to another) How about, always sort by name? - * For long strings, limit the string length shown in the summary view to - something reasonable. Say 50 chars, then add a "..." at the end. * For HeapTable with single heap shown, the heap name isn't centered? * Consistently document functions. * Should help be part of an AhatHandler, that automatically gets the menu and @@ -70,6 +68,7 @@ Things to Test: showing all the instances. * That InstanceUtils.asString properly takes into account "offset" and "count" fields, if they are present. + * InstanceUtils.getDexCacheLocation Reported Issues: * Request to be able to sort tables by size. diff --git a/tools/ahat/src/InstanceUtils.java b/tools/ahat/src/InstanceUtils.java index eb9e363d8c..c2d75c4ad7 100644 --- a/tools/ahat/src/InstanceUtils.java +++ b/tools/ahat/src/InstanceUtils.java @@ -60,9 +60,21 @@ class InstanceUtils { } - // Read the string value from an hprof Instance. - // Returns null if the object can't be interpreted as a string. + /** + * Read the string value from an hprof Instance. + * Returns null if the object can't be interpreted as a string. + */ public static String asString(Instance inst) { + return asString(inst, -1); + } + + /** + * Read the string value from an hprof Instance. + * Returns null if the object can't be interpreted as a string. + * The returned string is truncated to maxChars characters. + * If maxChars is negative, the returned string is not truncated. + */ + public static String asString(Instance inst, int maxChars) { if (!isInstanceOfClass(inst, "java.lang.String")) { return null; } @@ -81,13 +93,15 @@ class InstanceUtils { // array, we should use that here. int numChars = chars.getValues().length; int count = getIntField(inst, "count", numChars); - int offset = getIntField(inst, "offset", 0); - int end = offset + count - 1; - if (count == 0) { return ""; } + if (0 <= maxChars && maxChars < count) { + count = maxChars; + } + int offset = getIntField(inst, "offset", 0); + int end = offset + count - 1; if (offset >= 0 && offset < numChars && end >= 0 && end < numChars) { return new String(chars.asCharArray(offset, count)); } @@ -234,12 +248,14 @@ class InstanceUtils { * Assuming inst represents a DexCache object, return the dex location for * that dex cache. Returns null if the given instance doesn't represent a * DexCache object or the location could not be found. + * If maxChars is non-negative, the returned location is truncated to + * maxChars in length. */ - public static String getDexCacheLocation(Instance inst) { + public static String getDexCacheLocation(Instance inst, int maxChars) { if (isInstanceOfClass(inst, "java.lang.DexCache")) { Instance location = getRefField(inst, "location"); if (location != null) { - return asString(location); + return asString(location, maxChars); } } return null; diff --git a/tools/ahat/src/Value.java b/tools/ahat/src/Value.java index 9b483faa32..4eb27b1052 100644 --- a/tools/ahat/src/Value.java +++ b/tools/ahat/src/Value.java @@ -25,6 +25,10 @@ import java.net.URI; */ class Value { + // For string literals, we limit the number of characters we show to + // kMaxChars in case the string is really long. + private static int kMaxChars = 200; + /** * Create a DocString representing a summary of the given instance. */ @@ -43,15 +47,19 @@ class Value { link.append(inst.toString()); // Annotate Strings with their values. - String stringValue = InstanceUtils.asString(inst); + String stringValue = InstanceUtils.asString(inst, kMaxChars); if (stringValue != null) { - link.appendFormat("\"%s\"", stringValue); + link.appendFormat("\"%s", stringValue); + link.append(kMaxChars == stringValue.length() ? "..." : "\""); } // Annotate DexCache with its location. - String dexCacheLocation = InstanceUtils.getDexCacheLocation(inst); + String dexCacheLocation = InstanceUtils.getDexCacheLocation(inst, kMaxChars); if (dexCacheLocation != null) { - link.append(" for " + dexCacheLocation); + link.appendFormat(" for %s", dexCacheLocation); + if (kMaxChars == dexCacheLocation.length()) { + link.append("..."); + } } URI objTarget = DocString.formattedUri("object?id=%d", inst.getId()); diff --git a/tools/ahat/src/manifest.txt b/tools/ahat/src/manifest.txt index 7efb1a770f..421de1715a 100644 --- a/tools/ahat/src/manifest.txt +++ b/tools/ahat/src/manifest.txt @@ -1,4 +1,4 @@ Name: ahat/ Implementation-Title: ahat -Implementation-Version: 0.2 +Implementation-Version: 0.3 Main-Class: com.android.ahat.Main diff --git a/tools/ahat/test/InstanceUtilsTest.java b/tools/ahat/test/InstanceUtilsTest.java index 7613df4994..11f82a2301 100644 --- a/tools/ahat/test/InstanceUtilsTest.java +++ b/tools/ahat/test/InstanceUtilsTest.java @@ -25,21 +25,49 @@ import org.junit.Test; public class InstanceUtilsTest { @Test - public void basicString() throws IOException { + public void asStringBasic() throws IOException { TestDump dump = TestDump.getTestDump(); Instance str = (Instance)dump.getDumpedThing("basicString"); assertEquals("hello, world", InstanceUtils.asString(str)); } @Test - public void nullString() throws IOException { + public void asStringTruncated() throws IOException { + TestDump dump = TestDump.getTestDump(); + Instance str = (Instance)dump.getDumpedThing("basicString"); + assertEquals("hello", InstanceUtils.asString(str, 5)); + } + + @Test + public void asStringExactMax() throws IOException { + TestDump dump = TestDump.getTestDump(); + Instance str = (Instance)dump.getDumpedThing("basicString"); + assertEquals("hello, world", InstanceUtils.asString(str, 12)); + } + + @Test + public void asStringNotTruncated() throws IOException { + TestDump dump = TestDump.getTestDump(); + Instance str = (Instance)dump.getDumpedThing("basicString"); + assertEquals("hello, world", InstanceUtils.asString(str, 50)); + } + + @Test + public void asStringNegativeMax() throws IOException { + TestDump dump = TestDump.getTestDump(); + Instance str = (Instance)dump.getDumpedThing("basicString"); + assertEquals("hello, world", InstanceUtils.asString(str, -3)); + } + + @Test + public void asStringNull() throws IOException { TestDump dump = TestDump.getTestDump(); Instance obj = (Instance)dump.getDumpedThing("nullString"); assertNull(InstanceUtils.asString(obj)); } @Test - public void notString() throws IOException { + public void asStringNotString() throws IOException { TestDump dump = TestDump.getTestDump(); Instance obj = (Instance)dump.getDumpedThing("anObject"); assertNotNull(obj); diff --git a/tools/run-libcore-tests.sh b/tools/run-libcore-tests.sh index 67a79838ee..4b5a5ca76d 100755 --- a/tools/run-libcore-tests.sh +++ b/tools/run-libcore-tests.sh @@ -57,7 +57,6 @@ working_packages=("dalvik.system" "org.apache.harmony.luni" "org.apache.harmony.nio" "org.apache.harmony.regex" - "org.apache.harmony.security" "org.apache.harmony.testframework" "org.apache.harmony.tests.java.io" "org.apache.harmony.tests.java.lang" @@ -68,6 +67,10 @@ working_packages=("dalvik.system" "tests.java.lang.String" "jsr166") +# List of packages we could run, but don't have rights to revert +# changes in case of failures. +# "org.apache.harmony.security" + vogar_args=$@ while true; do if [[ "$1" == "--mode=device" ]]; then |