From 7940e44f4517de5e2634a7e07d58d0fb26160513 Mon Sep 17 00:00:00 2001 From: Brian Carlstrom Date: Fri, 12 Jul 2013 13:46:57 -0700 Subject: Create separate Android.mk for main build targets The runtime, compiler, dex2oat, and oatdump now are in seperate trees to prevent dependency creep. They can now be individually built without rebuilding the rest of the art projects. dalvikvm and jdwpspy were already this way. Builds in the art directory should behave as before, building everything including tests. Change-Id: Ic6b1151e5ed0f823c3dd301afd2b13eb2d8feb81 --- compiler/dex/quick/codegen_util.cc | 1109 ++++++++++++++++++++++++++++++++++++ 1 file changed, 1109 insertions(+) create mode 100644 compiler/dex/quick/codegen_util.cc (limited to 'compiler/dex/quick/codegen_util.cc') diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc new file mode 100644 index 0000000000..5c10c4ce2b --- /dev/null +++ b/compiler/dex/quick/codegen_util.cc @@ -0,0 +1,1109 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "dex/compiler_internals.h" +#include "dex_file-inl.h" +#include "gc_map.h" +#include "mir_to_lir-inl.h" +#include "verifier/dex_gc_map.h" +#include "verifier/method_verifier.h" + +namespace art { + +bool Mir2Lir::IsInexpensiveConstant(RegLocation rl_src) +{ + bool res = false; + if (rl_src.is_const) { + if (rl_src.wide) { + if (rl_src.fp) { + res = InexpensiveConstantDouble(mir_graph_->ConstantValueWide(rl_src)); + } else { + res = InexpensiveConstantLong(mir_graph_->ConstantValueWide(rl_src)); + } + } else { + if (rl_src.fp) { + res = InexpensiveConstantFloat(mir_graph_->ConstantValue(rl_src)); + } else { + res = InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src)); + } + } + } + return res; +} + +void Mir2Lir::MarkSafepointPC(LIR* inst) +{ + inst->def_mask = ENCODE_ALL; + LIR* safepoint_pc = NewLIR0(kPseudoSafepointPC); + DCHECK_EQ(safepoint_pc->def_mask, ENCODE_ALL); +} + +bool Mir2Lir::FastInstance(uint32_t field_idx, int& field_offset, bool& is_volatile, bool is_put) +{ + return cu_->compiler_driver->ComputeInstanceFieldInfo( + field_idx, mir_graph_->GetCurrentDexCompilationUnit(), field_offset, is_volatile, is_put); +} + +/* Convert an instruction to a NOP */ +void Mir2Lir::NopLIR( LIR* lir) +{ + lir->flags.is_nop = true; +} + +void Mir2Lir::SetMemRefType(LIR* lir, bool is_load, int mem_type) +{ + uint64_t *mask_ptr; + uint64_t mask = ENCODE_MEM;; + DCHECK(GetTargetInstFlags(lir->opcode) & (IS_LOAD | IS_STORE)); + if (is_load) { + mask_ptr = &lir->use_mask; + } else { + mask_ptr = &lir->def_mask; + } + /* Clear out the memref flags */ + *mask_ptr &= ~mask; + /* ..and then add back the one we need */ + switch (mem_type) { + case kLiteral: + DCHECK(is_load); + *mask_ptr |= ENCODE_LITERAL; + break; + case kDalvikReg: + *mask_ptr |= ENCODE_DALVIK_REG; + break; + case kHeapRef: + *mask_ptr |= ENCODE_HEAP_REF; + break; + case kMustNotAlias: + /* Currently only loads can be marked as kMustNotAlias */ + DCHECK(!(GetTargetInstFlags(lir->opcode) & IS_STORE)); + *mask_ptr |= ENCODE_MUST_NOT_ALIAS; + break; + default: + LOG(FATAL) << "Oat: invalid memref kind - " << mem_type; + } +} + +/* + * Mark load/store instructions that access Dalvik registers through the stack. + */ +void Mir2Lir::AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load, + bool is64bit) +{ + SetMemRefType(lir, is_load, kDalvikReg); + + /* + * Store the Dalvik register id in alias_info. Mark the MSB if it is a 64-bit + * access. + */ + lir->alias_info = ENCODE_ALIAS_INFO(reg_id, is64bit); +} + +/* + * Debugging macros + */ +#define DUMP_RESOURCE_MASK(X) + +/* Pretty-print a LIR instruction */ +void Mir2Lir::DumpLIRInsn(LIR* lir, unsigned char* base_addr) +{ + int offset = lir->offset; + int dest = lir->operands[0]; + const bool dump_nop = (cu_->enable_debug & (1 << kDebugShowNops)); + + /* Handle pseudo-ops individually, and all regular insns as a group */ + switch (lir->opcode) { + case kPseudoMethodEntry: + LOG(INFO) << "-------- method entry " + << PrettyMethod(cu_->method_idx, *cu_->dex_file); + break; + case kPseudoMethodExit: + LOG(INFO) << "-------- Method_Exit"; + break; + case kPseudoBarrier: + LOG(INFO) << "-------- BARRIER"; + break; + case kPseudoEntryBlock: + LOG(INFO) << "-------- entry offset: 0x" << std::hex << dest; + break; + case kPseudoDalvikByteCodeBoundary: + if (lir->operands[0] == 0) { + lir->operands[0] = reinterpret_cast("No instruction string"); + } + LOG(INFO) << "-------- dalvik offset: 0x" << std::hex + << lir->dalvik_offset << " @ " << reinterpret_cast(lir->operands[0]); + break; + case kPseudoExitBlock: + LOG(INFO) << "-------- exit offset: 0x" << std::hex << dest; + break; + case kPseudoPseudoAlign4: + LOG(INFO) << reinterpret_cast(base_addr) + offset << " (0x" << std::hex + << offset << "): .align4"; + break; + case kPseudoEHBlockLabel: + LOG(INFO) << "Exception_Handling:"; + break; + case kPseudoTargetLabel: + case kPseudoNormalBlockLabel: + LOG(INFO) << "L" << reinterpret_cast(lir) << ":"; + break; + case kPseudoThrowTarget: + LOG(INFO) << "LT" << reinterpret_cast(lir) << ":"; + break; + case kPseudoIntrinsicRetry: + LOG(INFO) << "IR" << reinterpret_cast(lir) << ":"; + break; + case kPseudoSuspendTarget: + LOG(INFO) << "LS" << reinterpret_cast(lir) << ":"; + break; + case kPseudoSafepointPC: + LOG(INFO) << "LsafepointPC_0x" << std::hex << lir->offset << "_" << lir->dalvik_offset << ":"; + break; + case kPseudoExportedPC: + LOG(INFO) << "LexportedPC_0x" << std::hex << lir->offset << "_" << lir->dalvik_offset << ":"; + break; + case kPseudoCaseLabel: + LOG(INFO) << "LC" << reinterpret_cast(lir) << ": Case target 0x" + << std::hex << lir->operands[0] << "|" << std::dec << + lir->operands[0]; + break; + default: + if (lir->flags.is_nop && !dump_nop) { + break; + } else { + std::string op_name(BuildInsnString(GetTargetInstName(lir->opcode), + lir, base_addr)); + std::string op_operands(BuildInsnString(GetTargetInstFmt(lir->opcode), + lir, base_addr)); + LOG(INFO) << StringPrintf("%05x: %-9s%s%s", + reinterpret_cast(base_addr + offset), + op_name.c_str(), op_operands.c_str(), + lir->flags.is_nop ? "(nop)" : ""); + } + break; + } + + if (lir->use_mask && (!lir->flags.is_nop || dump_nop)) { + DUMP_RESOURCE_MASK(DumpResourceMask((LIR* ) lir, lir->use_mask, "use")); + } + if (lir->def_mask && (!lir->flags.is_nop || dump_nop)) { + DUMP_RESOURCE_MASK(DumpResourceMask((LIR* ) lir, lir->def_mask, "def")); + } +} + +void Mir2Lir::DumpPromotionMap() +{ + int num_regs = cu_->num_dalvik_registers + cu_->num_compiler_temps + 1; + for (int i = 0; i < num_regs; i++) { + PromotionMap v_reg_map = promotion_map_[i]; + std::string buf; + if (v_reg_map.fp_location == kLocPhysReg) { + StringAppendF(&buf, " : s%d", v_reg_map.FpReg & FpRegMask()); + } + + std::string buf3; + if (i < cu_->num_dalvik_registers) { + StringAppendF(&buf3, "%02d", i); + } else if (i == mir_graph_->GetMethodSReg()) { + buf3 = "Method*"; + } else { + StringAppendF(&buf3, "ct%d", i - cu_->num_dalvik_registers); + } + + LOG(INFO) << StringPrintf("V[%s] -> %s%d%s", buf3.c_str(), + v_reg_map.core_location == kLocPhysReg ? + "r" : "SP+", v_reg_map.core_location == kLocPhysReg ? + v_reg_map.core_reg : SRegOffset(i), + buf.c_str()); + } +} + +/* Dump a mapping table */ +void Mir2Lir::DumpMappingTable(const char* table_name, const std::string& descriptor, + const std::string& name, const std::string& signature, + const std::vector& v) { + if (v.size() > 0) { + std::string line(StringPrintf("\n %s %s%s_%s_table[%zu] = {", table_name, + descriptor.c_str(), name.c_str(), signature.c_str(), v.size())); + std::replace(line.begin(), line.end(), ';', '_'); + LOG(INFO) << line; + for (uint32_t i = 0; i < v.size(); i+=2) { + line = StringPrintf(" {0x%05x, 0x%04x},", v[i], v[i+1]); + LOG(INFO) << line; + } + LOG(INFO) <<" };\n\n"; + } +} + +/* Dump instructions and constant pool contents */ +void Mir2Lir::CodegenDump() +{ + LOG(INFO) << "Dumping LIR insns for " + << PrettyMethod(cu_->method_idx, *cu_->dex_file); + LIR* lir_insn; + int insns_size = cu_->code_item->insns_size_in_code_units_; + + LOG(INFO) << "Regs (excluding ins) : " << cu_->num_regs; + LOG(INFO) << "Ins : " << cu_->num_ins; + LOG(INFO) << "Outs : " << cu_->num_outs; + LOG(INFO) << "CoreSpills : " << num_core_spills_; + LOG(INFO) << "FPSpills : " << num_fp_spills_; + LOG(INFO) << "CompilerTemps : " << cu_->num_compiler_temps; + LOG(INFO) << "Frame size : " << frame_size_; + LOG(INFO) << "code size is " << total_size_ << + " bytes, Dalvik size is " << insns_size * 2; + LOG(INFO) << "expansion factor: " + << static_cast(total_size_) / static_cast(insns_size * 2); + DumpPromotionMap(); + for (lir_insn = first_lir_insn_; lir_insn != NULL; lir_insn = lir_insn->next) { + DumpLIRInsn(lir_insn, 0); + } + for (lir_insn = literal_list_; lir_insn != NULL; lir_insn = lir_insn->next) { + LOG(INFO) << StringPrintf("%x (%04x): .word (%#x)", lir_insn->offset, lir_insn->offset, + lir_insn->operands[0]); + } + + const DexFile::MethodId& method_id = + cu_->dex_file->GetMethodId(cu_->method_idx); + std::string signature(cu_->dex_file->GetMethodSignature(method_id)); + std::string name(cu_->dex_file->GetMethodName(method_id)); + std::string descriptor(cu_->dex_file->GetMethodDeclaringClassDescriptor(method_id)); + + // Dump mapping tables + DumpMappingTable("PC2Dex_MappingTable", descriptor, name, signature, pc2dex_mapping_table_); + DumpMappingTable("Dex2PC_MappingTable", descriptor, name, signature, dex2pc_mapping_table_); +} + +/* + * Search the existing constants in the literal pool for an exact or close match + * within specified delta (greater or equal to 0). + */ +LIR* Mir2Lir::ScanLiteralPool(LIR* data_target, int value, unsigned int delta) +{ + while (data_target) { + if ((static_cast(value - data_target->operands[0])) <= delta) + return data_target; + data_target = data_target->next; + } + return NULL; +} + +/* Search the existing constants in the literal pool for an exact wide match */ +LIR* Mir2Lir::ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi) +{ + bool lo_match = false; + LIR* lo_target = NULL; + while (data_target) { + if (lo_match && (data_target->operands[0] == val_hi)) { + // Record high word in case we need to expand this later. + lo_target->operands[1] = val_hi; + return lo_target; + } + lo_match = false; + if (data_target->operands[0] == val_lo) { + lo_match = true; + lo_target = data_target; + } + data_target = data_target->next; + } + return NULL; +} + +/* + * The following are building blocks to insert constants into the pool or + * instruction streams. + */ + +/* Add a 32-bit constant to the constant pool */ +LIR* Mir2Lir::AddWordData(LIR* *constant_list_p, int value) +{ + /* Add the constant to the literal pool */ + if (constant_list_p) { + LIR* new_value = static_cast(arena_->NewMem(sizeof(LIR), true, ArenaAllocator::kAllocData)); + new_value->operands[0] = value; + new_value->next = *constant_list_p; + *constant_list_p = new_value; + return new_value; + } + return NULL; +} + +/* Add a 64-bit constant to the constant pool or mixed with code */ +LIR* Mir2Lir::AddWideData(LIR* *constant_list_p, int val_lo, int val_hi) +{ + AddWordData(constant_list_p, val_hi); + return AddWordData(constant_list_p, val_lo); +} + +static void PushWord(std::vector&buf, int data) { + buf.push_back( data & 0xff); + buf.push_back( (data >> 8) & 0xff); + buf.push_back( (data >> 16) & 0xff); + buf.push_back( (data >> 24) & 0xff); +} + +static void AlignBuffer(std::vector&buf, size_t offset) { + while (buf.size() < offset) { + buf.push_back(0); + } +} + +/* Write the literal pool to the output stream */ +void Mir2Lir::InstallLiteralPools() +{ + AlignBuffer(code_buffer_, data_offset_); + LIR* data_lir = literal_list_; + while (data_lir != NULL) { + PushWord(code_buffer_, data_lir->operands[0]); + data_lir = NEXT_LIR(data_lir); + } + // Push code and method literals, record offsets for the compiler to patch. + data_lir = code_literal_list_; + while (data_lir != NULL) { + uint32_t target = data_lir->operands[0]; + cu_->compiler_driver->AddCodePatch(cu_->dex_file, + cu_->method_idx, + cu_->invoke_type, + target, + static_cast(data_lir->operands[1]), + code_buffer_.size()); + const DexFile::MethodId& id = cu_->dex_file->GetMethodId(target); + // unique based on target to ensure code deduplication works + uint32_t unique_patch_value = reinterpret_cast(&id); + PushWord(code_buffer_, unique_patch_value); + data_lir = NEXT_LIR(data_lir); + } + data_lir = method_literal_list_; + while (data_lir != NULL) { + uint32_t target = data_lir->operands[0]; + cu_->compiler_driver->AddMethodPatch(cu_->dex_file, + cu_->method_idx, + cu_->invoke_type, + target, + static_cast(data_lir->operands[1]), + code_buffer_.size()); + const DexFile::MethodId& id = cu_->dex_file->GetMethodId(target); + // unique based on target to ensure code deduplication works + uint32_t unique_patch_value = reinterpret_cast(&id); + PushWord(code_buffer_, unique_patch_value); + data_lir = NEXT_LIR(data_lir); + } +} + +/* Write the switch tables to the output stream */ +void Mir2Lir::InstallSwitchTables() +{ + GrowableArray::Iterator iterator(&switch_tables_); + while (true) { + Mir2Lir::SwitchTable* tab_rec = iterator.Next(); + if (tab_rec == NULL) break; + AlignBuffer(code_buffer_, tab_rec->offset); + /* + * For Arm, our reference point is the address of the bx + * instruction that does the launch, so we have to subtract + * the auto pc-advance. For other targets the reference point + * is a label, so we can use the offset as-is. + */ + int bx_offset = INVALID_OFFSET; + switch (cu_->instruction_set) { + case kThumb2: + bx_offset = tab_rec->anchor->offset + 4; + break; + case kX86: + bx_offset = 0; + break; + case kMips: + bx_offset = tab_rec->anchor->offset; + break; + default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set; + } + if (cu_->verbose) { + LOG(INFO) << "Switch table for offset 0x" << std::hex << bx_offset; + } + if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) { + const int* keys = reinterpret_cast(&(tab_rec->table[2])); + for (int elems = 0; elems < tab_rec->table[1]; elems++) { + int disp = tab_rec->targets[elems]->offset - bx_offset; + if (cu_->verbose) { + LOG(INFO) << " Case[" << elems << "] key: 0x" + << std::hex << keys[elems] << ", disp: 0x" + << std::hex << disp; + } + PushWord(code_buffer_, keys[elems]); + PushWord(code_buffer_, + tab_rec->targets[elems]->offset - bx_offset); + } + } else { + DCHECK_EQ(static_cast(tab_rec->table[0]), + static_cast(Instruction::kPackedSwitchSignature)); + for (int elems = 0; elems < tab_rec->table[1]; elems++) { + int disp = tab_rec->targets[elems]->offset - bx_offset; + if (cu_->verbose) { + LOG(INFO) << " Case[" << elems << "] disp: 0x" + << std::hex << disp; + } + PushWord(code_buffer_, tab_rec->targets[elems]->offset - bx_offset); + } + } + } +} + +/* Write the fill array dta to the output stream */ +void Mir2Lir::InstallFillArrayData() +{ + GrowableArray::Iterator iterator(&fill_array_data_); + while (true) { + Mir2Lir::FillArrayData *tab_rec = iterator.Next(); + if (tab_rec == NULL) break; + AlignBuffer(code_buffer_, tab_rec->offset); + for (int i = 0; i < (tab_rec->size + 1) / 2; i++) { + code_buffer_.push_back( tab_rec->table[i] & 0xFF); + code_buffer_.push_back( (tab_rec->table[i] >> 8) & 0xFF); + } + } +} + +static int AssignLiteralOffsetCommon(LIR* lir, int offset) +{ + for (;lir != NULL; lir = lir->next) { + lir->offset = offset; + offset += 4; + } + return offset; +} + +// Make sure we have a code address for every declared catch entry +bool Mir2Lir::VerifyCatchEntries() +{ + bool success = true; + for (std::set::const_iterator it = mir_graph_->catches_.begin(); + it != mir_graph_->catches_.end(); ++it) { + uint32_t dex_pc = *it; + bool found = false; + for (size_t i = 0; i < dex2pc_mapping_table_.size(); i += 2) { + if (dex_pc == dex2pc_mapping_table_[i+1]) { + found = true; + break; + } + } + if (!found) { + LOG(INFO) << "Missing native PC for catch entry @ 0x" << std::hex << dex_pc; + success = false; + } + } + // Now, try in the other direction + for (size_t i = 0; i < dex2pc_mapping_table_.size(); i += 2) { + uint32_t dex_pc = dex2pc_mapping_table_[i+1]; + if (mir_graph_->catches_.find(dex_pc) == mir_graph_->catches_.end()) { + LOG(INFO) << "Unexpected catch entry @ dex pc 0x" << std::hex << dex_pc; + success = false; + } + } + if (!success) { + LOG(INFO) << "Bad dex2pcMapping table in " << PrettyMethod(cu_->method_idx, *cu_->dex_file); + LOG(INFO) << "Entries @ decode: " << mir_graph_->catches_.size() << ", Entries in table: " + << dex2pc_mapping_table_.size()/2; + } + return success; +} + + +void Mir2Lir::CreateMappingTables() +{ + for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) { + if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) { + pc2dex_mapping_table_.push_back(tgt_lir->offset); + pc2dex_mapping_table_.push_back(tgt_lir->dalvik_offset); + } + if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoExportedPC)) { + dex2pc_mapping_table_.push_back(tgt_lir->offset); + dex2pc_mapping_table_.push_back(tgt_lir->dalvik_offset); + } + } + if (kIsDebugBuild) { + DCHECK(VerifyCatchEntries()); + } + combined_mapping_table_.push_back(pc2dex_mapping_table_.size() + + dex2pc_mapping_table_.size()); + combined_mapping_table_.push_back(pc2dex_mapping_table_.size()); + combined_mapping_table_.insert(combined_mapping_table_.end(), pc2dex_mapping_table_.begin(), + pc2dex_mapping_table_.end()); + combined_mapping_table_.insert(combined_mapping_table_.end(), dex2pc_mapping_table_.begin(), + dex2pc_mapping_table_.end()); +} + +class NativePcToReferenceMapBuilder { + public: + NativePcToReferenceMapBuilder(std::vector* table, + size_t entries, uint32_t max_native_offset, + size_t references_width) : entries_(entries), + references_width_(references_width), in_use_(entries), + table_(table) { + // Compute width in bytes needed to hold max_native_offset. + native_offset_width_ = 0; + while (max_native_offset != 0) { + native_offset_width_++; + max_native_offset >>= 8; + } + // Resize table and set up header. + table->resize((EntryWidth() * entries) + sizeof(uint32_t)); + CHECK_LT(native_offset_width_, 1U << 3); + (*table)[0] = native_offset_width_ & 7; + CHECK_LT(references_width_, 1U << 13); + (*table)[0] |= (references_width_ << 3) & 0xFF; + (*table)[1] = (references_width_ >> 5) & 0xFF; + CHECK_LT(entries, 1U << 16); + (*table)[2] = entries & 0xFF; + (*table)[3] = (entries >> 8) & 0xFF; + } + + void AddEntry(uint32_t native_offset, const uint8_t* references) { + size_t table_index = TableIndex(native_offset); + while (in_use_[table_index]) { + table_index = (table_index + 1) % entries_; + } + in_use_[table_index] = true; + SetNativeOffset(table_index, native_offset); + DCHECK_EQ(native_offset, GetNativeOffset(table_index)); + SetReferences(table_index, references); + } + + private: + size_t TableIndex(uint32_t native_offset) { + return NativePcOffsetToReferenceMap::Hash(native_offset) % entries_; + } + + uint32_t GetNativeOffset(size_t table_index) { + uint32_t native_offset = 0; + size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t); + for (size_t i = 0; i < native_offset_width_; i++) { + native_offset |= (*table_)[table_offset + i] << (i * 8); + } + return native_offset; + } + + void SetNativeOffset(size_t table_index, uint32_t native_offset) { + size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t); + for (size_t i = 0; i < native_offset_width_; i++) { + (*table_)[table_offset + i] = (native_offset >> (i * 8)) & 0xFF; + } + } + + void SetReferences(size_t table_index, const uint8_t* references) { + size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t); + memcpy(&(*table_)[table_offset + native_offset_width_], references, references_width_); + } + + size_t EntryWidth() const { + return native_offset_width_ + references_width_; + } + + // Number of entries in the table. + const size_t entries_; + // Number of bytes used to encode the reference bitmap. + const size_t references_width_; + // Number of bytes used to encode a native offset. + size_t native_offset_width_; + // Entries that are in use. + std::vector in_use_; + // The table we're building. + std::vector* const table_; +}; + +void Mir2Lir::CreateNativeGcMap() { + const std::vector& mapping_table = pc2dex_mapping_table_; + uint32_t max_native_offset = 0; + for (size_t i = 0; i < mapping_table.size(); i += 2) { + uint32_t native_offset = mapping_table[i + 0]; + if (native_offset > max_native_offset) { + max_native_offset = native_offset; + } + } + MethodReference method_ref(cu_->dex_file, cu_->method_idx); + const std::vector* gc_map_raw = verifier::MethodVerifier::GetDexGcMap(method_ref); + verifier::DexPcToReferenceMap dex_gc_map(&(*gc_map_raw)[4], gc_map_raw->size() - 4); + // Compute native offset to references size. + NativePcToReferenceMapBuilder native_gc_map_builder(&native_gc_map_, + mapping_table.size() / 2, max_native_offset, + dex_gc_map.RegWidth()); + + for (size_t i = 0; i < mapping_table.size(); i += 2) { + uint32_t native_offset = mapping_table[i + 0]; + uint32_t dex_pc = mapping_table[i + 1]; + const uint8_t* references = dex_gc_map.FindBitMap(dex_pc, false); + CHECK(references != NULL) << "Missing ref for dex pc 0x" << std::hex << dex_pc; + native_gc_map_builder.AddEntry(native_offset, references); + } +} + +/* Determine the offset of each literal field */ +int Mir2Lir::AssignLiteralOffset(int offset) +{ + offset = AssignLiteralOffsetCommon(literal_list_, offset); + offset = AssignLiteralOffsetCommon(code_literal_list_, offset); + offset = AssignLiteralOffsetCommon(method_literal_list_, offset); + return offset; +} + +int Mir2Lir::AssignSwitchTablesOffset(int offset) +{ + GrowableArray::Iterator iterator(&switch_tables_); + while (true) { + Mir2Lir::SwitchTable *tab_rec = iterator.Next(); + if (tab_rec == NULL) break; + tab_rec->offset = offset; + if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) { + offset += tab_rec->table[1] * (sizeof(int) * 2); + } else { + DCHECK_EQ(static_cast(tab_rec->table[0]), + static_cast(Instruction::kPackedSwitchSignature)); + offset += tab_rec->table[1] * sizeof(int); + } + } + return offset; +} + +int Mir2Lir::AssignFillArrayDataOffset(int offset) +{ + GrowableArray::Iterator iterator(&fill_array_data_); + while (true) { + Mir2Lir::FillArrayData *tab_rec = iterator.Next(); + if (tab_rec == NULL) break; + tab_rec->offset = offset; + offset += tab_rec->size; + // word align + offset = (offset + 3) & ~3; + } + return offset; +} + +// LIR offset assignment. +int Mir2Lir::AssignInsnOffsets() +{ + LIR* lir; + int offset = 0; + + for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) { + lir->offset = offset; + if (lir->opcode >= 0) { + if (!lir->flags.is_nop) { + offset += lir->flags.size; + } + } else if (lir->opcode == kPseudoPseudoAlign4) { + if (offset & 0x2) { + offset += 2; + lir->operands[0] = 1; + } else { + lir->operands[0] = 0; + } + } + /* Pseudo opcodes don't consume space */ + } + + return offset; +} + +/* + * Walk the compilation unit and assign offsets to instructions + * and literals and compute the total size of the compiled unit. + */ +void Mir2Lir::AssignOffsets() +{ + int offset = AssignInsnOffsets(); + + /* Const values have to be word aligned */ + offset = (offset + 3) & ~3; + + /* Set up offsets for literals */ + data_offset_ = offset; + + offset = AssignLiteralOffset(offset); + + offset = AssignSwitchTablesOffset(offset); + + offset = AssignFillArrayDataOffset(offset); + + total_size_ = offset; +} + +/* + * Go over each instruction in the list and calculate the offset from the top + * before sending them off to the assembler. If out-of-range branch distance is + * seen rearrange the instructions a bit to correct it. + */ +void Mir2Lir::AssembleLIR() +{ + AssignOffsets(); + int assembler_retries = 0; + /* + * Assemble here. Note that we generate code with optimistic assumptions + * and if found now to work, we'll have to redo the sequence and retry. + */ + + while (true) { + AssemblerStatus res = AssembleInstructions(0); + if (res == kSuccess) { + break; + } else { + assembler_retries++; + if (assembler_retries > MAX_ASSEMBLER_RETRIES) { + CodegenDump(); + LOG(FATAL) << "Assembler error - too many retries"; + } + // Redo offsets and try again + AssignOffsets(); + code_buffer_.clear(); + } + } + + // Install literals + InstallLiteralPools(); + + // Install switch tables + InstallSwitchTables(); + + // Install fill array data + InstallFillArrayData(); + + // Create the mapping table and native offset to reference map. + CreateMappingTables(); + + CreateNativeGcMap(); +} + +/* + * Insert a kPseudoCaseLabel at the beginning of the Dalvik + * offset vaddr. This label will be used to fix up the case + * branch table during the assembly phase. Be sure to set + * all resource flags on this to prevent code motion across + * target boundaries. KeyVal is just there for debugging. + */ +LIR* Mir2Lir::InsertCaseLabel(int vaddr, int keyVal) +{ + SafeMap::iterator it; + it = boundary_map_.find(vaddr); + if (it == boundary_map_.end()) { + LOG(FATAL) << "Error: didn't find vaddr 0x" << std::hex << vaddr; + } + LIR* new_label = static_cast(arena_->NewMem(sizeof(LIR), true, ArenaAllocator::kAllocLIR)); + new_label->dalvik_offset = vaddr; + new_label->opcode = kPseudoCaseLabel; + new_label->operands[0] = keyVal; + InsertLIRAfter(it->second, new_label); + return new_label; +} + +void Mir2Lir::MarkPackedCaseLabels(Mir2Lir::SwitchTable *tab_rec) +{ + const uint16_t* table = tab_rec->table; + int base_vaddr = tab_rec->vaddr; + const int *targets = reinterpret_cast(&table[4]); + int entries = table[1]; + int low_key = s4FromSwitchData(&table[2]); + for (int i = 0; i < entries; i++) { + tab_rec->targets[i] = InsertCaseLabel(base_vaddr + targets[i], i + low_key); + } +} + +void Mir2Lir::MarkSparseCaseLabels(Mir2Lir::SwitchTable *tab_rec) +{ + const uint16_t* table = tab_rec->table; + int base_vaddr = tab_rec->vaddr; + int entries = table[1]; + const int* keys = reinterpret_cast(&table[2]); + const int* targets = &keys[entries]; + for (int i = 0; i < entries; i++) { + tab_rec->targets[i] = InsertCaseLabel(base_vaddr + targets[i], keys[i]); + } +} + +void Mir2Lir::ProcessSwitchTables() +{ + GrowableArray::Iterator iterator(&switch_tables_); + while (true) { + Mir2Lir::SwitchTable *tab_rec = iterator.Next(); + if (tab_rec == NULL) break; + if (tab_rec->table[0] == Instruction::kPackedSwitchSignature) { + MarkPackedCaseLabels(tab_rec); + } else if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) { + MarkSparseCaseLabels(tab_rec); + } else { + LOG(FATAL) << "Invalid switch table"; + } + } +} + +void Mir2Lir::DumpSparseSwitchTable(const uint16_t* table) + /* + * Sparse switch data format: + * ushort ident = 0x0200 magic value + * ushort size number of entries in the table; > 0 + * int keys[size] keys, sorted low-to-high; 32-bit aligned + * int targets[size] branch targets, relative to switch opcode + * + * Total size is (2+size*4) 16-bit code units. + */ +{ + uint16_t ident = table[0]; + int entries = table[1]; + const int* keys = reinterpret_cast(&table[2]); + const int* targets = &keys[entries]; + LOG(INFO) << "Sparse switch table - ident:0x" << std::hex << ident + << ", entries: " << std::dec << entries; + for (int i = 0; i < entries; i++) { + LOG(INFO) << " Key[" << keys[i] << "] -> 0x" << std::hex << targets[i]; + } +} + +void Mir2Lir::DumpPackedSwitchTable(const uint16_t* table) + /* + * Packed switch data format: + * ushort ident = 0x0100 magic value + * ushort size number of entries in the table + * int first_key first (and lowest) switch case value + * int targets[size] branch targets, relative to switch opcode + * + * Total size is (4+size*2) 16-bit code units. + */ +{ + uint16_t ident = table[0]; + const int* targets = reinterpret_cast(&table[4]); + int entries = table[1]; + int low_key = s4FromSwitchData(&table[2]); + LOG(INFO) << "Packed switch table - ident:0x" << std::hex << ident + << ", entries: " << std::dec << entries << ", low_key: " << low_key; + for (int i = 0; i < entries; i++) { + LOG(INFO) << " Key[" << (i + low_key) << "] -> 0x" << std::hex + << targets[i]; + } +} + +/* + * Set up special LIR to mark a Dalvik byte-code instruction start and + * record it in the boundary_map. NOTE: in cases such as kMirOpCheck in + * which we split a single Dalvik instruction, only the first MIR op + * associated with a Dalvik PC should be entered into the map. + */ +LIR* Mir2Lir::MarkBoundary(int offset, const char* inst_str) +{ + LIR* res = NewLIR1(kPseudoDalvikByteCodeBoundary, reinterpret_cast(inst_str)); + if (boundary_map_.find(offset) == boundary_map_.end()) { + boundary_map_.Put(offset, res); + } + return res; +} + +bool Mir2Lir::EvaluateBranch(Instruction::Code opcode, int32_t src1, int32_t src2) +{ + bool is_taken; + switch (opcode) { + case Instruction::IF_EQ: is_taken = (src1 == src2); break; + case Instruction::IF_NE: is_taken = (src1 != src2); break; + case Instruction::IF_LT: is_taken = (src1 < src2); break; + case Instruction::IF_GE: is_taken = (src1 >= src2); break; + case Instruction::IF_GT: is_taken = (src1 > src2); break; + case Instruction::IF_LE: is_taken = (src1 <= src2); break; + case Instruction::IF_EQZ: is_taken = (src1 == 0); break; + case Instruction::IF_NEZ: is_taken = (src1 != 0); break; + case Instruction::IF_LTZ: is_taken = (src1 < 0); break; + case Instruction::IF_GEZ: is_taken = (src1 >= 0); break; + case Instruction::IF_GTZ: is_taken = (src1 > 0); break; + case Instruction::IF_LEZ: is_taken = (src1 <= 0); break; + default: + LOG(FATAL) << "Unexpected opcode " << opcode; + is_taken = false; + } + return is_taken; +} + +// Convert relation of src1/src2 to src2/src1 +ConditionCode Mir2Lir::FlipComparisonOrder(ConditionCode before) { + ConditionCode res; + switch (before) { + case kCondEq: res = kCondEq; break; + case kCondNe: res = kCondNe; break; + case kCondLt: res = kCondGt; break; + case kCondGt: res = kCondLt; break; + case kCondLe: res = kCondGe; break; + case kCondGe: res = kCondLe; break; + default: + res = static_cast(0); + LOG(FATAL) << "Unexpected ccode " << before; + } + return res; +} + +// TODO: move to mir_to_lir.cc +Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena) + : Backend(arena), + literal_list_(NULL), + method_literal_list_(NULL), + code_literal_list_(NULL), + cu_(cu), + mir_graph_(mir_graph), + switch_tables_(arena, 4, kGrowableArraySwitchTables), + fill_array_data_(arena, 4, kGrowableArrayFillArrayData), + throw_launchpads_(arena, 2048, kGrowableArrayThrowLaunchPads), + suspend_launchpads_(arena, 4, kGrowableArraySuspendLaunchPads), + intrinsic_launchpads_(arena, 2048, kGrowableArrayMisc), + data_offset_(0), + total_size_(0), + block_label_list_(NULL), + current_dalvik_offset_(0), + reg_pool_(NULL), + live_sreg_(0), + num_core_spills_(0), + num_fp_spills_(0), + frame_size_(0), + core_spill_mask_(0), + fp_spill_mask_(0), + first_lir_insn_(NULL), + last_lir_insn_(NULL) + { + promotion_map_ = static_cast + (arena_->NewMem((cu_->num_dalvik_registers + cu_->num_compiler_temps + 1) * + sizeof(promotion_map_[0]), true, ArenaAllocator::kAllocRegAlloc)); +} + +void Mir2Lir::Materialize() { + CompilerInitializeRegAlloc(); // Needs to happen after SSA naming + + /* Allocate Registers using simple local allocation scheme */ + SimpleRegAlloc(); + + //FIXME: re-enable by retrieving from mir_graph + SpecialCaseHandler special_case = kNoHandler; + + if (special_case != kNoHandler) { + /* + * Custom codegen for special cases. If for any reason the + * special codegen doesn't succeed, first_lir_insn_ will + * set to NULL; + */ + SpecialMIR2LIR(special_case); + } + + /* Convert MIR to LIR, etc. */ + if (first_lir_insn_ == NULL) { + MethodMIR2LIR(); + } + + /* Method is not empty */ + if (first_lir_insn_) { + + // mark the targets of switch statement case labels + ProcessSwitchTables(); + + /* Convert LIR into machine code. */ + AssembleLIR(); + + if (cu_->verbose) { + CodegenDump(); + } + + } + +} + +CompiledMethod* Mir2Lir::GetCompiledMethod() { + // Combine vmap tables - core regs, then fp regs - into vmap_table + std::vector vmap_table; + // Core regs may have been inserted out of order - sort first + std::sort(core_vmap_table_.begin(), core_vmap_table_.end()); + for (size_t i = 0 ; i < core_vmap_table_.size(); i++) { + // Copy, stripping out the phys register sort key + vmap_table.push_back(~(-1 << VREG_NUM_WIDTH) & core_vmap_table_[i]); + } + // If we have a frame, push a marker to take place of lr + if (frame_size_ > 0) { + vmap_table.push_back(INVALID_VREG); + } else { + DCHECK_EQ(__builtin_popcount(core_spill_mask_), 0); + DCHECK_EQ(__builtin_popcount(fp_spill_mask_), 0); + } + // Combine vmap tables - core regs, then fp regs. fp regs already sorted + for (uint32_t i = 0; i < fp_vmap_table_.size(); i++) { + vmap_table.push_back(fp_vmap_table_[i]); + } + CompiledMethod* result = + new CompiledMethod(cu_->instruction_set, code_buffer_, + frame_size_, core_spill_mask_, fp_spill_mask_, + combined_mapping_table_, vmap_table, native_gc_map_); + return result; +} + +int Mir2Lir::ComputeFrameSize() { + /* Figure out the frame size */ + static const uint32_t kAlignMask = kStackAlignment - 1; + uint32_t size = (num_core_spills_ + num_fp_spills_ + + 1 /* filler word */ + cu_->num_regs + cu_->num_outs + + cu_->num_compiler_temps + 1 /* cur_method* */) + * sizeof(uint32_t); + /* Align and set */ + return (size + kAlignMask) & ~(kAlignMask); +} + +/* + * Append an LIR instruction to the LIR list maintained by a compilation + * unit + */ +void Mir2Lir::AppendLIR(LIR* lir) +{ + if (first_lir_insn_ == NULL) { + DCHECK(last_lir_insn_ == NULL); + last_lir_insn_ = first_lir_insn_ = lir; + lir->prev = lir->next = NULL; + } else { + last_lir_insn_->next = lir; + lir->prev = last_lir_insn_; + lir->next = NULL; + last_lir_insn_ = lir; + } +} + +/* + * Insert an LIR instruction before the current instruction, which cannot be the + * first instruction. + * + * prev_lir <-> new_lir <-> current_lir + */ +void Mir2Lir::InsertLIRBefore(LIR* current_lir, LIR* new_lir) +{ + DCHECK(current_lir->prev != NULL); + LIR *prev_lir = current_lir->prev; + + prev_lir->next = new_lir; + new_lir->prev = prev_lir; + new_lir->next = current_lir; + current_lir->prev = new_lir; +} + +/* + * Insert an LIR instruction after the current instruction, which cannot be the + * first instruction. + * + * current_lir -> new_lir -> old_next + */ +void Mir2Lir::InsertLIRAfter(LIR* current_lir, LIR* new_lir) +{ + new_lir->prev = current_lir; + new_lir->next = current_lir->next; + current_lir->next = new_lir; + new_lir->next->prev = new_lir; +} + + +} // namespace art -- cgit v1.2.3-59-g8ed1b From 2ce745c06271d5223d57dbf08117b20d5b60694a Mon Sep 17 00:00:00 2001 From: Brian Carlstrom Date: Wed, 17 Jul 2013 17:44:30 -0700 Subject: Fix cpplint whitespace/braces issues Change-Id: Ide80939faf8e8690d8842dde8133902ac725ed1a --- Android.mk | 2 +- compiler/dex/arena_allocator.h | 2 +- compiler/dex/arena_bit_vector.cc | 6 +- compiler/dex/dataflow_iterator.h | 2 +- compiler/dex/frontend.cc | 2 +- compiler/dex/local_value_numbering.cc | 3 +- compiler/dex/mir_dataflow.cc | 45 +++----- compiler/dex/mir_graph.cc | 71 ++++-------- compiler/dex/mir_graph.h | 18 +-- compiler/dex/mir_optimization.cc | 69 ++++------- compiler/dex/portable/mir_to_gbc.cc | 180 ++++++++++------------------- compiler/dex/quick/arm/assemble_arm.cc | 6 +- compiler/dex/quick/arm/call_arm.cc | 57 ++++------ compiler/dex/quick/arm/fp_arm.cc | 21 ++-- compiler/dex/quick/arm/int_arm.cc | 114 +++++++------------ compiler/dex/quick/arm/target_arm.cc | 105 ++++++----------- compiler/dex/quick/arm/utility_arm.cc | 99 ++++++---------- compiler/dex/quick/codegen_util.cc | 111 ++++++------------ compiler/dex/quick/gen_common.cc | 107 ++++++----------- compiler/dex/quick/gen_invoke.cc | 81 +++++-------- compiler/dex/quick/gen_loadstore.cc | 42 +++---- compiler/dex/quick/local_optimizations.cc | 18 +-- compiler/dex/quick/mips/assemble_mips.cc | 9 +- compiler/dex/quick/mips/call_mips.cc | 30 ++--- compiler/dex/quick/mips/fp_mips.cc | 24 ++-- compiler/dex/quick/mips/int_mips.cc | 99 ++++++---------- compiler/dex/quick/mips/target_mips.cc | 105 ++++++----------- compiler/dex/quick/mips/utility_mips.cc | 81 +++++-------- compiler/dex/quick/mir_to_lir.cc | 15 +-- compiler/dex/quick/mir_to_lir.h | 2 +- compiler/dex/quick/ralloc_util.cc | 183 ++++++++++-------------------- compiler/dex/quick/x86/call_x86.cc | 27 ++--- compiler/dex/quick/x86/fp_x86.cc | 6 +- compiler/dex/quick/x86/int_x86.cc | 99 ++++++---------- compiler/dex/quick/x86/target_x86.cc | 81 +++++-------- compiler/dex/quick/x86/utility_x86.cc | 60 ++++------ compiler/dex/ssa_transformation.cc | 60 ++++------ compiler/dex/vreg_analysis.cc | 9 +- compiler/driver/compiler_driver.cc | 13 +-- compiler/image_writer.cc | 2 +- compiler/jni/jni_compiler_test.cc | 2 +- compiler/llvm/runtime_support_builder.cc | 3 +- jdwpspy/Common.h | 12 +- runtime/base/mutex.cc | 2 +- runtime/common_throws.cc | 4 +- runtime/compiled_method.cc | 6 +- runtime/dex_file.cc | 2 +- runtime/dex_instruction.h | 2 +- runtime/gc/space/large_object_space.cc | 5 +- runtime/interpreter/interpreter.cc | 2 +- runtime/mirror/abstract_method.cc | 2 +- runtime/mirror/class-inl.h | 6 +- runtime/native/dalvik_system_Zygote.cc | 2 +- runtime/oat/runtime/support_jni.cc | 2 +- runtime/oat/runtime/x86/context_x86.cc | 2 +- runtime/oat_file.cc | 3 +- runtime/runtime.cc | 2 +- runtime/thread_pool.cc | 4 +- 58 files changed, 719 insertions(+), 1410 deletions(-) (limited to 'compiler/dex/quick/codegen_util.cc') diff --git a/Android.mk b/Android.mk index 5a28723e8e..4e4928c022 100644 --- a/Android.mk +++ b/Android.mk @@ -334,7 +334,7 @@ endif .PHONY: cpplint-art cpplint-art: ./art/tools/cpplint.py \ - --filter=-,+build/header_guard, \ + --filter=-,+build/header_guard,+whitespace/braces \ $(shell find art -name *.h -o -name *$(ART_CPP_EXTENSION)) # "mm cpplint-art-aspirational" to see warnings we would like to fix diff --git a/compiler/dex/arena_allocator.h b/compiler/dex/arena_allocator.h index 23d6b9f06b..cc81e50c5b 100644 --- a/compiler/dex/arena_allocator.h +++ b/compiler/dex/arena_allocator.h @@ -86,7 +86,7 @@ struct MemStats { void Dump(std::ostream& os) const { arena_.DumpMemStats(os); } - MemStats(const ArenaAllocator &arena) : arena_(arena){}; + MemStats(const ArenaAllocator &arena) : arena_(arena) {}; private: const ArenaAllocator &arena_; }; // MemStats diff --git a/compiler/dex/arena_bit_vector.cc b/compiler/dex/arena_bit_vector.cc index 1fbf7740ac..724fdf81c7 100644 --- a/compiler/dex/arena_bit_vector.cc +++ b/compiler/dex/arena_bit_vector.cc @@ -114,8 +114,7 @@ void ArenaBitVector::Union(const ArenaBitVector* src) { } // Count the number of bits that are set. -int ArenaBitVector::NumSetBits() -{ +int ArenaBitVector::NumSetBits() { unsigned int count = 0; for (unsigned int word = 0; word < storage_size_; word++) { @@ -129,8 +128,7 @@ int ArenaBitVector::NumSetBits() * since there might be unused bits - setting those to one will confuse the * iterator. */ -void ArenaBitVector::SetInitialBits(unsigned int num_bits) -{ +void ArenaBitVector::SetInitialBits(unsigned int num_bits) { DCHECK_LE(((num_bits + 31) >> 5), storage_size_); unsigned int idx; for (idx = 0; idx < (num_bits >> 5); idx++) { diff --git a/compiler/dex/dataflow_iterator.h b/compiler/dex/dataflow_iterator.h index 4c112f9678..19468698f9 100644 --- a/compiler/dex/dataflow_iterator.h +++ b/compiler/dex/dataflow_iterator.h @@ -42,7 +42,7 @@ namespace art { class DataflowIterator { public: - virtual ~DataflowIterator(){} + virtual ~DataflowIterator() {} // Return the next BasicBlock* to visit. BasicBlock* Next() { diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc index 746d475a9b..2d7c973859 100644 --- a/compiler/dex/frontend.cc +++ b/compiler/dex/frontend.cc @@ -220,7 +220,7 @@ static CompiledMethod* CompileMethod(CompilerDriver& compiler, llvm_compilation_unit)); } else #endif - { + { // NOLINT(whitespace/braces) switch (compiler.GetInstructionSet()) { case kThumb2: cu->cg.reset(ArmCodeGenerator(cu.get(), cu->mir_graph.get(), &cu->arena)); break; diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc index ec5ab5db38..b783f3ed52 100644 --- a/compiler/dex/local_value_numbering.cc +++ b/compiler/dex/local_value_numbering.cc @@ -19,8 +19,7 @@ namespace art { -uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) -{ +uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) { uint16_t res = NO_VALUE; uint16_t opcode = mir->dalvikInsn.opcode; switch (opcode) { diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc index 6c152d2fb3..9632388e19 100644 --- a/compiler/dex/mir_dataflow.cc +++ b/compiler/dex/mir_dataflow.cc @@ -849,8 +849,7 @@ int MIRGraph::SRegToVReg(int ssa_reg) const { /* Any register that is used before being defined is considered live-in */ void MIRGraph::HandleLiveInUse(ArenaBitVector* use_v, ArenaBitVector* def_v, - ArenaBitVector* live_in_v, int dalvik_reg_id) -{ + ArenaBitVector* live_in_v, int dalvik_reg_id) { use_v->SetBit(dalvik_reg_id); if (!def_v->IsBitSet(dalvik_reg_id)) { live_in_v->SetBit(dalvik_reg_id); @@ -858,8 +857,7 @@ void MIRGraph::HandleLiveInUse(ArenaBitVector* use_v, ArenaBitVector* def_v, } /* Mark a reg as being defined */ -void MIRGraph::HandleDef(ArenaBitVector* def_v, int dalvik_reg_id) -{ +void MIRGraph::HandleDef(ArenaBitVector* def_v, int dalvik_reg_id) { def_v->SetBit(dalvik_reg_id); } @@ -867,8 +865,7 @@ void MIRGraph::HandleDef(ArenaBitVector* def_v, int dalvik_reg_id) * Find out live-in variables for natural loops. Variables that are live-in in * the main loop body are considered to be defined in the entry block. */ -bool MIRGraph::FindLocalLiveIn(BasicBlock* bb) -{ +bool MIRGraph::FindLocalLiveIn(BasicBlock* bb) { MIR* mir; ArenaBitVector *use_v, *def_v, *live_in_v; @@ -925,8 +922,7 @@ bool MIRGraph::FindLocalLiveIn(BasicBlock* bb) return true; } -int MIRGraph::AddNewSReg(int v_reg) -{ +int MIRGraph::AddNewSReg(int v_reg) { // Compiler temps always have a subscript of 0 int subscript = (v_reg < 0) ? 0 : ++ssa_last_defs_[v_reg]; int ssa_reg = GetNumSSARegs(); @@ -938,15 +934,13 @@ int MIRGraph::AddNewSReg(int v_reg) } /* Find out the latest SSA register for a given Dalvik register */ -void MIRGraph::HandleSSAUse(int* uses, int dalvik_reg, int reg_index) -{ +void MIRGraph::HandleSSAUse(int* uses, int dalvik_reg, int reg_index) { DCHECK((dalvik_reg >= 0) && (dalvik_reg < cu_->num_dalvik_registers)); uses[reg_index] = vreg_to_ssa_map_[dalvik_reg]; } /* Setup a new SSA register for a given Dalvik register */ -void MIRGraph::HandleSSADef(int* defs, int dalvik_reg, int reg_index) -{ +void MIRGraph::HandleSSADef(int* defs, int dalvik_reg, int reg_index) { DCHECK((dalvik_reg >= 0) && (dalvik_reg < cu_->num_dalvik_registers)); int ssa_reg = AddNewSReg(dalvik_reg); vreg_to_ssa_map_[dalvik_reg] = ssa_reg; @@ -954,8 +948,7 @@ void MIRGraph::HandleSSADef(int* defs, int dalvik_reg, int reg_index) } /* Look up new SSA names for format_35c instructions */ -void MIRGraph::DataFlowSSAFormat35C(MIR* mir) -{ +void MIRGraph::DataFlowSSAFormat35C(MIR* mir) { DecodedInstruction *d_insn = &mir->dalvikInsn; int num_uses = d_insn->vA; int i; @@ -973,8 +966,7 @@ void MIRGraph::DataFlowSSAFormat35C(MIR* mir) } /* Look up new SSA names for format_3rc instructions */ -void MIRGraph::DataFlowSSAFormat3RC(MIR* mir) -{ +void MIRGraph::DataFlowSSAFormat3RC(MIR* mir) { DecodedInstruction *d_insn = &mir->dalvikInsn; int num_uses = d_insn->vA; int i; @@ -992,8 +984,7 @@ void MIRGraph::DataFlowSSAFormat3RC(MIR* mir) } /* Entry function to convert a block into SSA representation */ -bool MIRGraph::DoSSAConversion(BasicBlock* bb) -{ +bool MIRGraph::DoSSAConversion(BasicBlock* bb) { MIR* mir; if (bb->data_flow_info == NULL) return false; @@ -1127,8 +1118,7 @@ bool MIRGraph::DoSSAConversion(BasicBlock* bb) } /* Setup the basic data structures for SSA conversion */ -void MIRGraph::CompilerInitializeSSAConversion() -{ +void MIRGraph::CompilerInitializeSSAConversion() { size_t num_dalvik_reg = cu_->num_dalvik_registers; ssa_base_vregs_ = new (arena_) GrowableArray(arena_, num_dalvik_reg + GetDefCount() + 128, @@ -1196,8 +1186,7 @@ void MIRGraph::CompilerInitializeSSAConversion() * and attempting to do would involve more complexity than it's * worth. */ -bool MIRGraph::InvokeUsesMethodStar(MIR* mir) -{ +bool MIRGraph::InvokeUsesMethodStar(MIR* mir) { InvokeType type; Instruction::Code opcode = mir->dalvikInsn.opcode; switch (opcode) { @@ -1246,8 +1235,7 @@ bool MIRGraph::InvokeUsesMethodStar(MIR* mir) * counts explicitly used s_regs. A later phase will add implicit * counts for things such as Method*, null-checked references, etc. */ -bool MIRGraph::CountUses(struct BasicBlock* bb) -{ +bool MIRGraph::CountUses(struct BasicBlock* bb) { if (bb->block_type != kDalvikByteCode) { return false; } @@ -1286,8 +1274,7 @@ bool MIRGraph::CountUses(struct BasicBlock* bb) return false; } -void MIRGraph::MethodUseCount() -{ +void MIRGraph::MethodUseCount() { // Now that we know, resize the lists. int num_ssa_regs = GetNumSSARegs(); use_counts_.Resize(num_ssa_regs + 32); @@ -1307,8 +1294,7 @@ void MIRGraph::MethodUseCount() } /* Verify if all the successor is connected with all the claimed predecessors */ -bool MIRGraph::VerifyPredInfo(BasicBlock* bb) -{ +bool MIRGraph::VerifyPredInfo(BasicBlock* bb) { GrowableArray::Iterator iter(bb->predecessors); while (true) { @@ -1343,8 +1329,7 @@ bool MIRGraph::VerifyPredInfo(BasicBlock* bb) return true; } -void MIRGraph::VerifyDataflow() -{ +void MIRGraph::VerifyDataflow() { /* Verify if all blocks are connected as claimed */ AllNodesIterator iter(this, false /* not iterative */); for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) { diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc index 11e100dc61..ef9955e585 100644 --- a/compiler/dex/mir_graph.cc +++ b/compiler/dex/mir_graph.cc @@ -107,8 +107,7 @@ MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena) method_sreg_(0), attributes_(METHOD_IS_LEAF), // Start with leaf assumption, change on encountering invoke. checkstats_(NULL), - arena_(arena) - { + arena_(arena) { try_block_addr_ = new (arena_) ArenaBitVector(arena_, 0, true /* expandable */); } @@ -129,8 +128,7 @@ bool MIRGraph::ContentIsInsn(const uint16_t* code_ptr) { /* * Parse an instruction, return the length of the instruction */ -int MIRGraph::ParseInsn(const uint16_t* code_ptr, DecodedInstruction* decoded_instruction) -{ +int MIRGraph::ParseInsn(const uint16_t* code_ptr, DecodedInstruction* decoded_instruction) { // Don't parse instruction data if (!ContentIsInsn(code_ptr)) { return 0; @@ -145,8 +143,7 @@ int MIRGraph::ParseInsn(const uint16_t* code_ptr, DecodedInstruction* decoded_in /* Split an existing block from the specified code offset into two */ BasicBlock* MIRGraph::SplitBlock(unsigned int code_offset, - BasicBlock* orig_block, BasicBlock** immed_pred_block_p) -{ + BasicBlock* orig_block, BasicBlock** immed_pred_block_p) { MIR* insn = orig_block->first_mir_insn; while (insn) { if (insn->offset == code_offset) break; @@ -224,8 +221,7 @@ BasicBlock* MIRGraph::SplitBlock(unsigned int code_offset, * Utilizes a map for fast lookup of the typical cases. */ BasicBlock* MIRGraph::FindBlock(unsigned int code_offset, bool split, bool create, - BasicBlock** immed_pred_block_p) -{ + BasicBlock** immed_pred_block_p) { BasicBlock* bb; unsigned int i; SafeMap::iterator it; @@ -260,8 +256,7 @@ BasicBlock* MIRGraph::FindBlock(unsigned int code_offset, bool split, bool creat } /* Identify code range in try blocks and set up the empty catch blocks */ -void MIRGraph::ProcessTryCatchBlocks() -{ +void MIRGraph::ProcessTryCatchBlocks() { int tries_size = current_code_item_->tries_size_; int offset; @@ -296,8 +291,7 @@ void MIRGraph::ProcessTryCatchBlocks() /* Process instructions with the kBranch flag */ BasicBlock* MIRGraph::ProcessCanBranch(BasicBlock* cur_block, MIR* insn, int cur_offset, int width, int flags, const uint16_t* code_ptr, - const uint16_t* code_end) -{ + const uint16_t* code_end) { int target = cur_offset; switch (insn->dalvikInsn.opcode) { case Instruction::GOTO: @@ -365,8 +359,7 @@ BasicBlock* MIRGraph::ProcessCanBranch(BasicBlock* cur_block, MIR* insn, int cur /* Process instructions with the kSwitch flag */ void MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, int cur_offset, int width, - int flags) -{ + int flags) { const uint16_t* switch_data = reinterpret_cast(GetCurrentInsns() + cur_offset + insn->dalvikInsn.vB); int size; @@ -443,8 +436,7 @@ void MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, int cur_offset /* Process instructions with the kThrow flag */ BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, int cur_offset, int width, int flags, ArenaBitVector* try_block_addr, - const uint16_t* code_ptr, const uint16_t* code_end) -{ + const uint16_t* code_ptr, const uint16_t* code_end) { bool in_try_block = try_block_addr->IsBitSet(cur_offset); /* In try block */ @@ -483,7 +475,7 @@ BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, int cur_ eh_block->predecessors->Insert(cur_block); } - if (insn->dalvikInsn.opcode == Instruction::THROW){ + if (insn->dalvikInsn.opcode == Instruction::THROW) { cur_block->explicit_throw = true; if ((code_ptr < code_end) && ContentIsInsn(code_ptr)) { // Force creation of new block following THROW via side-effect @@ -529,8 +521,7 @@ BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, int cur_ /* Parse a Dex method and insert it into the MIRGraph at the current insert point. */ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_flags, InvokeType invoke_type, uint32_t class_def_idx, - uint32_t method_idx, jobject class_loader, const DexFile& dex_file) -{ + uint32_t method_idx, jobject class_loader, const DexFile& dex_file) { current_code_item_ = code_item; method_stack_.push_back(std::make_pair(current_method_, current_offset_)); current_method_ = m_units_.size(); @@ -705,8 +696,7 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_ } } -void MIRGraph::ShowOpcodeStats() -{ +void MIRGraph::ShowOpcodeStats() { DCHECK(opcode_count_ != NULL); LOG(INFO) << "Opcode Count"; for (int i = 0; i < kNumPackedOpcodes; i++) { @@ -719,8 +709,7 @@ void MIRGraph::ShowOpcodeStats() // TODO: use a configurable base prefix, and adjust callers to supply pass name. /* Dump the CFG into a DOT graph */ -void MIRGraph::DumpCFG(const char* dir_prefix, bool all_blocks) -{ +void MIRGraph::DumpCFG(const char* dir_prefix, bool all_blocks) { FILE* file; std::string fname(PrettyMethod(cu_->method_idx, *cu_->dex_file)); ReplaceSpecialChars(fname); @@ -849,8 +838,7 @@ void MIRGraph::DumpCFG(const char* dir_prefix, bool all_blocks) } /* Insert an MIR instruction to the end of a basic block */ -void MIRGraph::AppendMIR(BasicBlock* bb, MIR* mir) -{ +void MIRGraph::AppendMIR(BasicBlock* bb, MIR* mir) { if (bb->first_mir_insn == NULL) { DCHECK(bb->last_mir_insn == NULL); bb->last_mir_insn = bb->first_mir_insn = mir; @@ -864,8 +852,7 @@ void MIRGraph::AppendMIR(BasicBlock* bb, MIR* mir) } /* Insert an MIR instruction to the head of a basic block */ -void MIRGraph::PrependMIR(BasicBlock* bb, MIR* mir) -{ +void MIRGraph::PrependMIR(BasicBlock* bb, MIR* mir) { if (bb->first_mir_insn == NULL) { DCHECK(bb->last_mir_insn == NULL); bb->last_mir_insn = bb->first_mir_insn = mir; @@ -879,8 +866,7 @@ void MIRGraph::PrependMIR(BasicBlock* bb, MIR* mir) } /* Insert a MIR instruction after the specified MIR */ -void MIRGraph::InsertMIRAfter(BasicBlock* bb, MIR* current_mir, MIR* new_mir) -{ +void MIRGraph::InsertMIRAfter(BasicBlock* bb, MIR* current_mir, MIR* new_mir) { new_mir->prev = current_mir; new_mir->next = current_mir->next; current_mir->next = new_mir; @@ -894,8 +880,7 @@ void MIRGraph::InsertMIRAfter(BasicBlock* bb, MIR* current_mir, MIR* new_mir) } } -char* MIRGraph::GetDalvikDisassembly(const MIR* mir) -{ +char* MIRGraph::GetDalvikDisassembly(const MIR* mir) { DecodedInstruction insn = mir->dalvikInsn; std::string str; int flags = 0; @@ -1024,8 +1009,7 @@ char* MIRGraph::GetDalvikDisassembly(const MIR* mir) } /* Turn method name into a legal Linux file name */ -void MIRGraph::ReplaceSpecialChars(std::string& str) -{ +void MIRGraph::ReplaceSpecialChars(std::string& str) { static const struct { const char before; const char after; } match[] = {{'/','-'}, {';','#'}, {' ','#'}, {'$','+'}, {'(','@'}, {')','@'}, {'<','='}, {'>','='}}; @@ -1034,8 +1018,7 @@ void MIRGraph::ReplaceSpecialChars(std::string& str) } } -std::string MIRGraph::GetSSAName(int ssa_reg) -{ +std::string MIRGraph::GetSSAName(int ssa_reg) { // TODO: This value is needed for LLVM and debugging. Currently, we compute this and then copy to // the arena. We should be smarter and just place straight into the arena, or compute the // value more lazily. @@ -1043,8 +1026,7 @@ std::string MIRGraph::GetSSAName(int ssa_reg) } // Similar to GetSSAName, but if ssa name represents an immediate show that as well. -std::string MIRGraph::GetSSANameWithConst(int ssa_reg, bool singles_only) -{ +std::string MIRGraph::GetSSANameWithConst(int ssa_reg, bool singles_only) { if (reg_location_ == NULL) { // Pre-SSA - just use the standard name return GetSSAName(ssa_reg); @@ -1062,8 +1044,7 @@ std::string MIRGraph::GetSSANameWithConst(int ssa_reg, bool singles_only) } } -void MIRGraph::GetBlockName(BasicBlock* bb, char* name) -{ +void MIRGraph::GetBlockName(BasicBlock* bb, char* name) { switch (bb->block_type) { case kEntryBlock: snprintf(name, BLOCK_NAME_LEN, "entry_%d", bb->id); @@ -1084,16 +1065,14 @@ void MIRGraph::GetBlockName(BasicBlock* bb, char* name) } } -const char* MIRGraph::GetShortyFromTargetIdx(int target_idx) -{ +const char* MIRGraph::GetShortyFromTargetIdx(int target_idx) { // FIXME: use current code unit for inline support. const DexFile::MethodId& method_id = cu_->dex_file->GetMethodId(target_idx); return cu_->dex_file->GetShorty(method_id.proto_idx_); } /* Debug Utility - dump a compilation unit */ -void MIRGraph::DumpMIRGraph() -{ +void MIRGraph::DumpMIRGraph() { BasicBlock* bb; const char* block_type_names[] = { "Entry Block", @@ -1135,8 +1114,7 @@ void MIRGraph::DumpMIRGraph() * MOVE_RESULT and incorporate it into the invoke. */ CallInfo* MIRGraph::NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type, - bool is_range) -{ + bool is_range) { CallInfo* info = static_cast(arena_->NewMem(sizeof(CallInfo), true, ArenaAllocator::kAllocMisc)); MIR* move_result_mir = FindMoveResult(bb, mir); @@ -1163,8 +1141,7 @@ CallInfo* MIRGraph::NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type, } // Allocate a new basic block. -BasicBlock* MIRGraph::NewMemBB(BBType block_type, int block_id) -{ +BasicBlock* MIRGraph::NewMemBB(BBType block_type, int block_id) { BasicBlock* bb = static_cast(arena_->NewMem(sizeof(BasicBlock), true, ArenaAllocator::kAllocBB)); bb->block_type = block_type; diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h index a40fa97ad5..f86e13016d 100644 --- a/compiler/dex/mir_graph.h +++ b/compiler/dex/mir_graph.h @@ -452,43 +452,37 @@ class MIRGraph { return ssa_subscripts_->Get(ssa_reg); } - RegLocation GetRawSrc(MIR* mir, int num) - { + RegLocation GetRawSrc(MIR* mir, int num) { DCHECK(num < mir->ssa_rep->num_uses); RegLocation res = reg_location_[mir->ssa_rep->uses[num]]; return res; } - RegLocation GetRawDest(MIR* mir) - { + RegLocation GetRawDest(MIR* mir) { DCHECK_GT(mir->ssa_rep->num_defs, 0); RegLocation res = reg_location_[mir->ssa_rep->defs[0]]; return res; } - RegLocation GetDest(MIR* mir) - { + RegLocation GetDest(MIR* mir) { RegLocation res = GetRawDest(mir); DCHECK(!res.wide); return res; } - RegLocation GetSrc(MIR* mir, int num) - { + RegLocation GetSrc(MIR* mir, int num) { RegLocation res = GetRawSrc(mir, num); DCHECK(!res.wide); return res; } - RegLocation GetDestWide(MIR* mir) - { + RegLocation GetDestWide(MIR* mir) { RegLocation res = GetRawDest(mir); DCHECK(res.wide); return res; } - RegLocation GetSrcWide(MIR* mir, int low) - { + RegLocation GetSrcWide(MIR* mir, int low) { RegLocation res = GetRawSrc(mir, low); DCHECK(res.wide); return res; diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc index 6b8f3f0915..306dbc7b6b 100644 --- a/compiler/dex/mir_optimization.cc +++ b/compiler/dex/mir_optimization.cc @@ -20,27 +20,23 @@ namespace art { -static unsigned int Predecessors(BasicBlock* bb) -{ +static unsigned int Predecessors(BasicBlock* bb) { return bb->predecessors->Size(); } /* Setup a constant value for opcodes thare have the DF_SETS_CONST attribute */ -void MIRGraph::SetConstant(int32_t ssa_reg, int value) -{ +void MIRGraph::SetConstant(int32_t ssa_reg, int value) { is_constant_v_->SetBit(ssa_reg); constant_values_[ssa_reg] = value; } -void MIRGraph::SetConstantWide(int ssa_reg, int64_t value) -{ +void MIRGraph::SetConstantWide(int ssa_reg, int64_t value) { is_constant_v_->SetBit(ssa_reg); constant_values_[ssa_reg] = Low32Bits(value); constant_values_[ssa_reg + 1] = High32Bits(value); } -void MIRGraph::DoConstantPropogation(BasicBlock* bb) -{ +void MIRGraph::DoConstantPropogation(BasicBlock* bb) { MIR* mir; for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) { @@ -96,8 +92,7 @@ void MIRGraph::DoConstantPropogation(BasicBlock* bb) /* TODO: implement code to handle arithmetic operations */ } -void MIRGraph::PropagateConstants() -{ +void MIRGraph::PropagateConstants() { is_constant_v_ = new (arena_) ArenaBitVector(arena_, GetNumSSARegs(), false); constant_values_ = static_cast(arena_->NewMem(sizeof(int) * GetNumSSARegs(), true, ArenaAllocator::kAllocDFInfo)); @@ -108,8 +103,7 @@ void MIRGraph::PropagateConstants() } /* Advance to next strictly dominated MIR node in an extended basic block */ -static MIR* AdvanceMIR(BasicBlock** p_bb, MIR* mir) -{ +static MIR* AdvanceMIR(BasicBlock** p_bb, MIR* mir) { BasicBlock* bb = *p_bb; if (mir != NULL) { mir = mir->next; @@ -133,8 +127,7 @@ static MIR* AdvanceMIR(BasicBlock** p_bb, MIR* mir) * opcodes or incoming arcs. However, if the result of the invoke is not * used, a move-result may not be present. */ -MIR* MIRGraph::FindMoveResult(BasicBlock* bb, MIR* mir) -{ +MIR* MIRGraph::FindMoveResult(BasicBlock* bb, MIR* mir) { BasicBlock* tbb = bb; mir = AdvanceMIR(&tbb, mir); while (mir != NULL) { @@ -154,8 +147,7 @@ MIR* MIRGraph::FindMoveResult(BasicBlock* bb, MIR* mir) return mir; } -static BasicBlock* NextDominatedBlock(BasicBlock* bb) -{ +static BasicBlock* NextDominatedBlock(BasicBlock* bb) { if (bb->block_type == kDead) { return NULL; } @@ -169,8 +161,7 @@ static BasicBlock* NextDominatedBlock(BasicBlock* bb) return bb; } -static MIR* FindPhi(BasicBlock* bb, int ssa_name) -{ +static MIR* FindPhi(BasicBlock* bb, int ssa_name) { for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) { if (static_cast(mir->dalvikInsn.opcode) == kMirOpPhi) { for (int i = 0; i < mir->ssa_rep->num_uses; i++) { @@ -183,8 +174,7 @@ static MIR* FindPhi(BasicBlock* bb, int ssa_name) return NULL; } -static SelectInstructionKind SelectKind(MIR* mir) -{ +static SelectInstructionKind SelectKind(MIR* mir) { switch (mir->dalvikInsn.opcode) { case Instruction::MOVE: case Instruction::MOVE_OBJECT: @@ -206,15 +196,13 @@ static SelectInstructionKind SelectKind(MIR* mir) return kSelectNone; } -int MIRGraph::GetSSAUseCount(int s_reg) -{ +int MIRGraph::GetSSAUseCount(int s_reg) { return raw_use_counts_.Get(s_reg); } /* Do some MIR-level extended basic block optimizations */ -bool MIRGraph::BasicBlockOpt(BasicBlock* bb) -{ +bool MIRGraph::BasicBlockOpt(BasicBlock* bb) { if (bb->block_type == kDead) { return true; } @@ -474,8 +462,7 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) return true; } -void MIRGraph::NullCheckEliminationInit(struct BasicBlock* bb) -{ +void MIRGraph::NullCheckEliminationInit(struct BasicBlock* bb) { if (bb->data_flow_info != NULL) { bb->data_flow_info->ending_null_check_v = new (arena_) ArenaBitVector(arena_, GetNumSSARegs(), false, kBitMapNullCheck); @@ -483,8 +470,7 @@ void MIRGraph::NullCheckEliminationInit(struct BasicBlock* bb) } /* Collect stats on number of checks removed */ -void MIRGraph::CountChecks(struct BasicBlock* bb) -{ +void MIRGraph::CountChecks(struct BasicBlock* bb) { if (bb->data_flow_info != NULL) { for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) { if (mir->ssa_rep == NULL) { @@ -508,8 +494,7 @@ void MIRGraph::CountChecks(struct BasicBlock* bb) } /* Try to make common case the fallthrough path */ -static bool LayoutBlocks(struct BasicBlock* bb) -{ +static bool LayoutBlocks(struct BasicBlock* bb) { // TODO: For now, just looking for direct throws. Consider generalizing for profile feedback if (!bb->explicit_throw) { return false; @@ -556,8 +541,7 @@ static bool LayoutBlocks(struct BasicBlock* bb) } /* Combine any basic blocks terminated by instructions that we now know can't throw */ -bool MIRGraph::CombineBlocks(struct BasicBlock* bb) -{ +bool MIRGraph::CombineBlocks(struct BasicBlock* bb) { // Loop here to allow combining a sequence of blocks while (true) { // Check termination conditions @@ -625,8 +609,7 @@ bool MIRGraph::CombineBlocks(struct BasicBlock* bb) } /* Eliminate unnecessary null checks for a basic block. */ -bool MIRGraph::EliminateNullChecks(struct BasicBlock* bb) -{ +bool MIRGraph::EliminateNullChecks(struct BasicBlock* bb) { if (bb->data_flow_info == NULL) return false; /* @@ -770,8 +753,7 @@ bool MIRGraph::EliminateNullChecks(struct BasicBlock* bb) return changed; } -void MIRGraph::NullCheckElimination() -{ +void MIRGraph::NullCheckElimination() { if (!(cu_->disable_opt & (1 << kNullCheckElimination))) { DCHECK(temp_ssa_register_v_ != NULL); AllNodesIterator iter(this, false /* not iterative */); @@ -789,8 +771,7 @@ void MIRGraph::NullCheckElimination() } } -void MIRGraph::BasicBlockCombine() -{ +void MIRGraph::BasicBlockCombine() { PreOrderDfsIterator iter(this, false /* not iterative */); for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) { CombineBlocks(bb); @@ -800,8 +781,7 @@ void MIRGraph::BasicBlockCombine() } } -void MIRGraph::CodeLayout() -{ +void MIRGraph::CodeLayout() { if (cu_->enable_debug & (1 << kDebugVerifyDataflow)) { VerifyDataflow(); } @@ -814,8 +794,7 @@ void MIRGraph::CodeLayout() } } -void MIRGraph::DumpCheckStats() -{ +void MIRGraph::DumpCheckStats() { Checkstats* stats = static_cast(arena_->NewMem(sizeof(Checkstats), true, ArenaAllocator::kAllocDFInfo)); @@ -840,8 +819,7 @@ void MIRGraph::DumpCheckStats() } } -bool MIRGraph::BuildExtendedBBList(struct BasicBlock* bb) -{ +bool MIRGraph::BuildExtendedBBList(struct BasicBlock* bb) { if (bb->visited) return false; if (!((bb->block_type == kEntryBlock) || (bb->block_type == kDalvikByteCode) || (bb->block_type == kExitBlock))) { @@ -871,8 +849,7 @@ bool MIRGraph::BuildExtendedBBList(struct BasicBlock* bb) } -void MIRGraph::BasicBlockOptimization() -{ +void MIRGraph::BasicBlockOptimization() { if (!(cu_->disable_opt & (1 << kBBOpt))) { DCHECK_EQ(cu_->num_compiler_temps, 0); ClearAllVisitedFlags(); diff --git a/compiler/dex/portable/mir_to_gbc.cc b/compiler/dex/portable/mir_to_gbc.cc index 2be1ef435b..4317d1e354 100644 --- a/compiler/dex/portable/mir_to_gbc.cc +++ b/compiler/dex/portable/mir_to_gbc.cc @@ -42,18 +42,15 @@ const char kCatchBlock = 'C'; namespace art { -::llvm::BasicBlock* MirConverter::GetLLVMBlock(int id) -{ +::llvm::BasicBlock* MirConverter::GetLLVMBlock(int id) { return id_to_block_map_.Get(id); } -::llvm::Value* MirConverter::GetLLVMValue(int s_reg) -{ +::llvm::Value* MirConverter::GetLLVMValue(int s_reg) { return llvm_values_.Get(s_reg); } -void MirConverter::SetVregOnValue(::llvm::Value* val, int s_reg) -{ +void MirConverter::SetVregOnValue(::llvm::Value* val, int s_reg) { // Set vreg for debugging art::llvm::IntrinsicHelper::IntrinsicId id = art::llvm::IntrinsicHelper::SetVReg; ::llvm::Function* func = intrinsic_helper_->GetIntrinsicFunction(id); @@ -64,8 +61,7 @@ void MirConverter::SetVregOnValue(::llvm::Value* val, int s_reg) } // Replace the placeholder value with the real definition -void MirConverter::DefineValueOnly(::llvm::Value* val, int s_reg) -{ +void MirConverter::DefineValueOnly(::llvm::Value* val, int s_reg) { ::llvm::Value* placeholder = GetLLVMValue(s_reg); if (placeholder == NULL) { // This can happen on instruction rewrite on verification failure @@ -81,14 +77,12 @@ void MirConverter::DefineValueOnly(::llvm::Value* val, int s_reg) } -void MirConverter::DefineValue(::llvm::Value* val, int s_reg) -{ +void MirConverter::DefineValue(::llvm::Value* val, int s_reg) { DefineValueOnly(val, s_reg); SetVregOnValue(val, s_reg); } -::llvm::Type* MirConverter::LlvmTypeFromLocRec(RegLocation loc) -{ +::llvm::Type* MirConverter::LlvmTypeFromLocRec(RegLocation loc) { ::llvm::Type* res = NULL; if (loc.wide) { if (loc.fp) @@ -108,8 +102,7 @@ void MirConverter::DefineValue(::llvm::Value* val, int s_reg) return res; } -void MirConverter::InitIR() -{ +void MirConverter::InitIR() { if (llvm_info_ == NULL) { CompilerTls* tls = cu_->compiler_driver->GetTls(); CHECK(tls != NULL); @@ -125,16 +118,14 @@ void MirConverter::InitIR() irb_ = llvm_info_->GetIRBuilder(); } -::llvm::BasicBlock* MirConverter::FindCaseTarget(uint32_t vaddr) -{ +::llvm::BasicBlock* MirConverter::FindCaseTarget(uint32_t vaddr) { BasicBlock* bb = mir_graph_->FindBlock(vaddr); DCHECK(bb != NULL); return GetLLVMBlock(bb->id); } void MirConverter::ConvertPackedSwitch(BasicBlock* bb, - int32_t table_offset, RegLocation rl_src) -{ + int32_t table_offset, RegLocation rl_src) { const Instruction::PackedSwitchPayload* payload = reinterpret_cast( cu_->insns + current_dalvik_offset_ + table_offset); @@ -158,8 +149,7 @@ void MirConverter::ConvertPackedSwitch(BasicBlock* bb, } void MirConverter::ConvertSparseSwitch(BasicBlock* bb, - int32_t table_offset, RegLocation rl_src) -{ + int32_t table_offset, RegLocation rl_src) { const Instruction::SparseSwitchPayload* payload = reinterpret_cast( cu_->insns + current_dalvik_offset_ + table_offset); @@ -186,8 +176,7 @@ void MirConverter::ConvertSparseSwitch(BasicBlock* bb, } void MirConverter::ConvertSget(int32_t field_index, - art::llvm::IntrinsicHelper::IntrinsicId id, RegLocation rl_dest) -{ + art::llvm::IntrinsicHelper::IntrinsicId id, RegLocation rl_dest) { ::llvm::Constant* field_idx = irb_->getInt32(field_index); ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id); ::llvm::Value* res = irb_->CreateCall(intr, field_idx); @@ -195,8 +184,7 @@ void MirConverter::ConvertSget(int32_t field_index, } void MirConverter::ConvertSput(int32_t field_index, - art::llvm::IntrinsicHelper::IntrinsicId id, RegLocation rl_src) -{ + art::llvm::IntrinsicHelper::IntrinsicId id, RegLocation rl_src) { ::llvm::SmallVector< ::llvm::Value*, 2> args; args.push_back(irb_->getInt32(field_index)); args.push_back(GetLLVMValue(rl_src.orig_sreg)); @@ -204,8 +192,7 @@ void MirConverter::ConvertSput(int32_t field_index, irb_->CreateCall(intr, args); } -void MirConverter::ConvertFillArrayData(int32_t offset, RegLocation rl_array) -{ +void MirConverter::ConvertFillArrayData(int32_t offset, RegLocation rl_array) { art::llvm::IntrinsicHelper::IntrinsicId id; id = art::llvm::IntrinsicHelper::HLFillArrayData; ::llvm::SmallVector< ::llvm::Value*, 2> args; @@ -216,8 +203,7 @@ void MirConverter::ConvertFillArrayData(int32_t offset, RegLocation rl_array) } ::llvm::Value* MirConverter::EmitConst(::llvm::ArrayRef< ::llvm::Value*> src, - RegLocation loc) -{ + RegLocation loc) { art::llvm::IntrinsicHelper::IntrinsicId id; if (loc.wide) { if (loc.fp) { @@ -238,16 +224,14 @@ void MirConverter::ConvertFillArrayData(int32_t offset, RegLocation rl_array) return irb_->CreateCall(intr, src); } -void MirConverter::EmitPopShadowFrame() -{ +void MirConverter::EmitPopShadowFrame() { ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction( art::llvm::IntrinsicHelper::PopShadowFrame); irb_->CreateCall(intr); } ::llvm::Value* MirConverter::EmitCopy(::llvm::ArrayRef< ::llvm::Value*> src, - RegLocation loc) -{ + RegLocation loc) { art::llvm::IntrinsicHelper::IntrinsicId id; if (loc.wide) { if (loc.fp) { @@ -268,16 +252,14 @@ void MirConverter::EmitPopShadowFrame() return irb_->CreateCall(intr, src); } -void MirConverter::ConvertMoveException(RegLocation rl_dest) -{ +void MirConverter::ConvertMoveException(RegLocation rl_dest) { ::llvm::Function* func = intrinsic_helper_->GetIntrinsicFunction( art::llvm::IntrinsicHelper::GetException); ::llvm::Value* res = irb_->CreateCall(func); DefineValue(res, rl_dest.orig_sreg); } -void MirConverter::ConvertThrow(RegLocation rl_src) -{ +void MirConverter::ConvertThrow(RegLocation rl_src) { ::llvm::Value* src = GetLLVMValue(rl_src.orig_sreg); ::llvm::Function* func = intrinsic_helper_->GetIntrinsicFunction( art::llvm::IntrinsicHelper::HLThrowException); @@ -286,8 +268,7 @@ void MirConverter::ConvertThrow(RegLocation rl_src) void MirConverter::ConvertMonitorEnterExit(int opt_flags, art::llvm::IntrinsicHelper::IntrinsicId id, - RegLocation rl_src) -{ + RegLocation rl_src) { ::llvm::SmallVector< ::llvm::Value*, 2> args; args.push_back(irb_->getInt32(opt_flags)); args.push_back(GetLLVMValue(rl_src.orig_sreg)); @@ -296,8 +277,7 @@ void MirConverter::ConvertMonitorEnterExit(int opt_flags, } void MirConverter::ConvertArrayLength(int opt_flags, - RegLocation rl_dest, RegLocation rl_src) -{ + RegLocation rl_dest, RegLocation rl_src) { ::llvm::SmallVector< ::llvm::Value*, 2> args; args.push_back(irb_->getInt32(opt_flags)); args.push_back(GetLLVMValue(rl_src.orig_sreg)); @@ -307,8 +287,7 @@ void MirConverter::ConvertArrayLength(int opt_flags, DefineValue(res, rl_dest.orig_sreg); } -void MirConverter::EmitSuspendCheck() -{ +void MirConverter::EmitSuspendCheck() { art::llvm::IntrinsicHelper::IntrinsicId id = art::llvm::IntrinsicHelper::CheckSuspend; ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id); @@ -316,8 +295,7 @@ void MirConverter::EmitSuspendCheck() } ::llvm::Value* MirConverter::ConvertCompare(ConditionCode cc, - ::llvm::Value* src1, ::llvm::Value* src2) -{ + ::llvm::Value* src1, ::llvm::Value* src2) { ::llvm::Value* res = NULL; DCHECK_EQ(src1->getType(), src2->getType()); switch(cc) { @@ -333,8 +311,7 @@ void MirConverter::EmitSuspendCheck() } void MirConverter::ConvertCompareAndBranch(BasicBlock* bb, MIR* mir, - ConditionCode cc, RegLocation rl_src1, RegLocation rl_src2) -{ + ConditionCode cc, RegLocation rl_src1, RegLocation rl_src2) { if (bb->taken->start_offset <= mir->offset) { EmitSuspendCheck(); } @@ -349,8 +326,7 @@ void MirConverter::ConvertCompareAndBranch(BasicBlock* bb, MIR* mir, } void MirConverter::ConvertCompareZeroAndBranch(BasicBlock* bb, - MIR* mir, ConditionCode cc, RegLocation rl_src1) -{ + MIR* mir, ConditionCode cc, RegLocation rl_src1) { if (bb->taken->start_offset <= mir->offset) { EmitSuspendCheck(); } @@ -369,8 +345,7 @@ void MirConverter::ConvertCompareZeroAndBranch(BasicBlock* bb, } ::llvm::Value* MirConverter::GenDivModOp(bool is_div, bool is_long, - ::llvm::Value* src1, ::llvm::Value* src2) -{ + ::llvm::Value* src1, ::llvm::Value* src2) { art::llvm::IntrinsicHelper::IntrinsicId id; if (is_long) { if (is_div) { @@ -393,8 +368,7 @@ void MirConverter::ConvertCompareZeroAndBranch(BasicBlock* bb, } ::llvm::Value* MirConverter::GenArithOp(OpKind op, bool is_long, - ::llvm::Value* src1, ::llvm::Value* src2) -{ + ::llvm::Value* src1, ::llvm::Value* src2) { ::llvm::Value* res = NULL; switch(op) { case kOpAdd: res = irb_->CreateAdd(src1, src2); break; @@ -416,8 +390,7 @@ void MirConverter::ConvertCompareZeroAndBranch(BasicBlock* bb, } void MirConverter::ConvertFPArithOp(OpKind op, RegLocation rl_dest, - RegLocation rl_src1, RegLocation rl_src2) -{ + RegLocation rl_src1, RegLocation rl_src2) { ::llvm::Value* src1 = GetLLVMValue(rl_src1.orig_sreg); ::llvm::Value* src2 = GetLLVMValue(rl_src2.orig_sreg); ::llvm::Value* res = NULL; @@ -434,8 +407,7 @@ void MirConverter::ConvertFPArithOp(OpKind op, RegLocation rl_dest, } void MirConverter::ConvertShift(art::llvm::IntrinsicHelper::IntrinsicId id, - RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) -{ + RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) { ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id); ::llvm::SmallVector< ::llvm::Value*, 2>args; args.push_back(GetLLVMValue(rl_src1.orig_sreg)); @@ -445,8 +417,7 @@ void MirConverter::ConvertShift(art::llvm::IntrinsicHelper::IntrinsicId id, } void MirConverter::ConvertShiftLit(art::llvm::IntrinsicHelper::IntrinsicId id, - RegLocation rl_dest, RegLocation rl_src, int shift_amount) -{ + RegLocation rl_dest, RegLocation rl_src, int shift_amount) { ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id); ::llvm::SmallVector< ::llvm::Value*, 2>args; args.push_back(GetLLVMValue(rl_src.orig_sreg)); @@ -456,8 +427,7 @@ void MirConverter::ConvertShiftLit(art::llvm::IntrinsicHelper::IntrinsicId id, } void MirConverter::ConvertArithOp(OpKind op, RegLocation rl_dest, - RegLocation rl_src1, RegLocation rl_src2) -{ + RegLocation rl_src1, RegLocation rl_src2) { ::llvm::Value* src1 = GetLLVMValue(rl_src1.orig_sreg); ::llvm::Value* src2 = GetLLVMValue(rl_src2.orig_sreg); DCHECK_EQ(src1->getType(), src2->getType()); @@ -466,8 +436,7 @@ void MirConverter::ConvertArithOp(OpKind op, RegLocation rl_dest, } void MirConverter::ConvertArithOpLit(OpKind op, RegLocation rl_dest, - RegLocation rl_src1, int32_t imm) -{ + RegLocation rl_src1, int32_t imm) { ::llvm::Value* src1 = GetLLVMValue(rl_src1.orig_sreg); ::llvm::Value* src2 = irb_->getInt32(imm); ::llvm::Value* res = GenArithOp(op, rl_dest.wide, src1, src2); @@ -480,8 +449,7 @@ void MirConverter::ConvertArithOpLit(OpKind op, RegLocation rl_dest, * The requirements are similar. */ void MirConverter::ConvertInvoke(BasicBlock* bb, MIR* mir, - InvokeType invoke_type, bool is_range, bool is_filled_new_array) -{ + InvokeType invoke_type, bool is_range, bool is_filled_new_array) { CallInfo* info = mir_graph_->NewMemCallInfo(bb, mir, invoke_type, is_range); ::llvm::SmallVector< ::llvm::Value*, 10> args; // Insert the invoke_type @@ -529,16 +497,14 @@ void MirConverter::ConvertInvoke(BasicBlock* bb, MIR* mir, } void MirConverter::ConvertConstObject(uint32_t idx, - art::llvm::IntrinsicHelper::IntrinsicId id, RegLocation rl_dest) -{ + art::llvm::IntrinsicHelper::IntrinsicId id, RegLocation rl_dest) { ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id); ::llvm::Value* index = irb_->getInt32(idx); ::llvm::Value* res = irb_->CreateCall(intr, index); DefineValue(res, rl_dest.orig_sreg); } -void MirConverter::ConvertCheckCast(uint32_t type_idx, RegLocation rl_src) -{ +void MirConverter::ConvertCheckCast(uint32_t type_idx, RegLocation rl_src) { art::llvm::IntrinsicHelper::IntrinsicId id; id = art::llvm::IntrinsicHelper::HLCheckCast; ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id); @@ -548,8 +514,7 @@ void MirConverter::ConvertCheckCast(uint32_t type_idx, RegLocation rl_src) irb_->CreateCall(intr, args); } -void MirConverter::ConvertNewInstance(uint32_t type_idx, RegLocation rl_dest) -{ +void MirConverter::ConvertNewInstance(uint32_t type_idx, RegLocation rl_dest) { art::llvm::IntrinsicHelper::IntrinsicId id; id = art::llvm::IntrinsicHelper::NewInstance; ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id); @@ -559,8 +524,7 @@ void MirConverter::ConvertNewInstance(uint32_t type_idx, RegLocation rl_dest) } void MirConverter::ConvertNewArray(uint32_t type_idx, - RegLocation rl_dest, RegLocation rl_src) -{ + RegLocation rl_dest, RegLocation rl_src) { art::llvm::IntrinsicHelper::IntrinsicId id; id = art::llvm::IntrinsicHelper::NewArray; ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id); @@ -573,8 +537,7 @@ void MirConverter::ConvertNewArray(uint32_t type_idx, void MirConverter::ConvertAget(int opt_flags, art::llvm::IntrinsicHelper::IntrinsicId id, - RegLocation rl_dest, RegLocation rl_array, RegLocation rl_index) -{ + RegLocation rl_dest, RegLocation rl_array, RegLocation rl_index) { ::llvm::SmallVector< ::llvm::Value*, 3> args; args.push_back(irb_->getInt32(opt_flags)); args.push_back(GetLLVMValue(rl_array.orig_sreg)); @@ -586,8 +549,7 @@ void MirConverter::ConvertAget(int opt_flags, void MirConverter::ConvertAput(int opt_flags, art::llvm::IntrinsicHelper::IntrinsicId id, - RegLocation rl_src, RegLocation rl_array, RegLocation rl_index) -{ + RegLocation rl_src, RegLocation rl_array, RegLocation rl_index) { ::llvm::SmallVector< ::llvm::Value*, 4> args; args.push_back(irb_->getInt32(opt_flags)); args.push_back(GetLLVMValue(rl_src.orig_sreg)); @@ -599,8 +561,7 @@ void MirConverter::ConvertAput(int opt_flags, void MirConverter::ConvertIget(int opt_flags, art::llvm::IntrinsicHelper::IntrinsicId id, - RegLocation rl_dest, RegLocation rl_obj, int field_index) -{ + RegLocation rl_dest, RegLocation rl_obj, int field_index) { ::llvm::SmallVector< ::llvm::Value*, 3> args; args.push_back(irb_->getInt32(opt_flags)); args.push_back(GetLLVMValue(rl_obj.orig_sreg)); @@ -612,8 +573,7 @@ void MirConverter::ConvertIget(int opt_flags, void MirConverter::ConvertIput(int opt_flags, art::llvm::IntrinsicHelper::IntrinsicId id, - RegLocation rl_src, RegLocation rl_obj, int field_index) -{ + RegLocation rl_src, RegLocation rl_obj, int field_index) { ::llvm::SmallVector< ::llvm::Value*, 4> args; args.push_back(irb_->getInt32(opt_flags)); args.push_back(GetLLVMValue(rl_src.orig_sreg)); @@ -624,8 +584,7 @@ void MirConverter::ConvertIput(int opt_flags, } void MirConverter::ConvertInstanceOf(uint32_t type_idx, - RegLocation rl_dest, RegLocation rl_src) -{ + RegLocation rl_dest, RegLocation rl_src) { art::llvm::IntrinsicHelper::IntrinsicId id; id = art::llvm::IntrinsicHelper::InstanceOf; ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id); @@ -636,29 +595,25 @@ void MirConverter::ConvertInstanceOf(uint32_t type_idx, DefineValue(res, rl_dest.orig_sreg); } -void MirConverter::ConvertIntToLong(RegLocation rl_dest, RegLocation rl_src) -{ +void MirConverter::ConvertIntToLong(RegLocation rl_dest, RegLocation rl_src) { ::llvm::Value* res = irb_->CreateSExt(GetLLVMValue(rl_src.orig_sreg), irb_->getInt64Ty()); DefineValue(res, rl_dest.orig_sreg); } -void MirConverter::ConvertLongToInt(RegLocation rl_dest, RegLocation rl_src) -{ +void MirConverter::ConvertLongToInt(RegLocation rl_dest, RegLocation rl_src) { ::llvm::Value* src = GetLLVMValue(rl_src.orig_sreg); ::llvm::Value* res = irb_->CreateTrunc(src, irb_->getInt32Ty()); DefineValue(res, rl_dest.orig_sreg); } -void MirConverter::ConvertFloatToDouble(RegLocation rl_dest, RegLocation rl_src) -{ +void MirConverter::ConvertFloatToDouble(RegLocation rl_dest, RegLocation rl_src) { ::llvm::Value* src = GetLLVMValue(rl_src.orig_sreg); ::llvm::Value* res = irb_->CreateFPExt(src, irb_->getDoubleTy()); DefineValue(res, rl_dest.orig_sreg); } -void MirConverter::ConvertDoubleToFloat(RegLocation rl_dest, RegLocation rl_src) -{ +void MirConverter::ConvertDoubleToFloat(RegLocation rl_dest, RegLocation rl_src) { ::llvm::Value* src = GetLLVMValue(rl_src.orig_sreg); ::llvm::Value* res = irb_->CreateFPTrunc(src, irb_->getFloatTy()); DefineValue(res, rl_dest.orig_sreg); @@ -666,8 +621,7 @@ void MirConverter::ConvertDoubleToFloat(RegLocation rl_dest, RegLocation rl_src) void MirConverter::ConvertWideComparison(art::llvm::IntrinsicHelper::IntrinsicId id, RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2) -{ + RegLocation rl_src2) { DCHECK_EQ(rl_src1.fp, rl_src2.fp); DCHECK_EQ(rl_src1.wide, rl_src2.wide); ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id); @@ -679,23 +633,20 @@ void MirConverter::ConvertWideComparison(art::llvm::IntrinsicHelper::IntrinsicId } void MirConverter::ConvertIntNarrowing(RegLocation rl_dest, RegLocation rl_src, - art::llvm::IntrinsicHelper::IntrinsicId id) -{ + art::llvm::IntrinsicHelper::IntrinsicId id) { ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id); ::llvm::Value* res = irb_->CreateCall(intr, GetLLVMValue(rl_src.orig_sreg)); DefineValue(res, rl_dest.orig_sreg); } -void MirConverter::ConvertNeg(RegLocation rl_dest, RegLocation rl_src) -{ +void MirConverter::ConvertNeg(RegLocation rl_dest, RegLocation rl_src) { ::llvm::Value* res = irb_->CreateNeg(GetLLVMValue(rl_src.orig_sreg)); DefineValue(res, rl_dest.orig_sreg); } void MirConverter::ConvertIntToFP(::llvm::Type* ty, RegLocation rl_dest, - RegLocation rl_src) -{ + RegLocation rl_src) { ::llvm::Value* res = irb_->CreateSIToFP(GetLLVMValue(rl_src.orig_sreg), ty); DefineValue(res, rl_dest.orig_sreg); @@ -703,23 +654,20 @@ void MirConverter::ConvertIntToFP(::llvm::Type* ty, RegLocation rl_dest, void MirConverter::ConvertFPToInt(art::llvm::IntrinsicHelper::IntrinsicId id, RegLocation rl_dest, - RegLocation rl_src) -{ + RegLocation rl_src) { ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id); ::llvm::Value* res = irb_->CreateCall(intr, GetLLVMValue(rl_src.orig_sreg)); DefineValue(res, rl_dest.orig_sreg); } -void MirConverter::ConvertNegFP(RegLocation rl_dest, RegLocation rl_src) -{ +void MirConverter::ConvertNegFP(RegLocation rl_dest, RegLocation rl_src) { ::llvm::Value* res = irb_->CreateFNeg(GetLLVMValue(rl_src.orig_sreg)); DefineValue(res, rl_dest.orig_sreg); } -void MirConverter::ConvertNot(RegLocation rl_dest, RegLocation rl_src) -{ +void MirConverter::ConvertNot(RegLocation rl_dest, RegLocation rl_src) { ::llvm::Value* src = GetLLVMValue(rl_src.orig_sreg); ::llvm::Value* res = irb_->CreateXor(src, static_cast(-1)); DefineValue(res, rl_dest.orig_sreg); @@ -737,8 +685,7 @@ void MirConverter::EmitConstructorBarrier() { * when necessary. */ bool MirConverter::ConvertMIRNode(MIR* mir, BasicBlock* bb, - ::llvm::BasicBlock* llvm_bb) -{ + ::llvm::BasicBlock* llvm_bb) { bool res = false; // Assume success RegLocation rl_src[3]; RegLocation rl_dest = mir_graph_->GetBadLoc(); @@ -1556,8 +1503,7 @@ bool MirConverter::ConvertMIRNode(MIR* mir, BasicBlock* bb, return res; } -void MirConverter::SetDexOffset(int32_t offset) -{ +void MirConverter::SetDexOffset(int32_t offset) { current_dalvik_offset_ = offset; ::llvm::SmallVector< ::llvm::Value*, 1> array_ref; array_ref.push_back(irb_->getInt32(offset)); @@ -1566,8 +1512,7 @@ void MirConverter::SetDexOffset(int32_t offset) } // Attach method info as metadata to special intrinsic -void MirConverter::SetMethodInfo() -{ +void MirConverter::SetMethodInfo() { // We don't want dex offset on this irb_->SetDexOffset(NULL); art::llvm::IntrinsicHelper::IntrinsicId id; @@ -1585,8 +1530,7 @@ void MirConverter::SetMethodInfo() SetDexOffset(current_dalvik_offset_); } -void MirConverter::HandlePhiNodes(BasicBlock* bb, ::llvm::BasicBlock* llvm_bb) -{ +void MirConverter::HandlePhiNodes(BasicBlock* bb, ::llvm::BasicBlock* llvm_bb) { SetDexOffset(bb->start_offset); for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) { int opcode = mir->dalvikInsn.opcode; @@ -1636,8 +1580,7 @@ void MirConverter::HandlePhiNodes(BasicBlock* bb, ::llvm::BasicBlock* llvm_bb) /* Extended MIR instructions like PHI */ void MirConverter::ConvertExtendedMIR(BasicBlock* bb, MIR* mir, - ::llvm::BasicBlock* llvm_bb) -{ + ::llvm::BasicBlock* llvm_bb) { switch (static_cast(mir->dalvikInsn.opcode)) { case kMirOpPhi: { @@ -1684,8 +1627,7 @@ void MirConverter::ConvertExtendedMIR(BasicBlock* bb, MIR* mir, } /* Handle the content in each basic block */ -bool MirConverter::BlockBitcodeConversion(BasicBlock* bb) -{ +bool MirConverter::BlockBitcodeConversion(BasicBlock* bb) { if (bb->block_type == kDead) return false; ::llvm::BasicBlock* llvm_bb = GetLLVMBlock(bb->id); if (llvm_bb == NULL) { @@ -1901,8 +1843,7 @@ bool MirConverter::CreateFunction() { return true; } -bool MirConverter::CreateLLVMBasicBlock(BasicBlock* bb) -{ +bool MirConverter::CreateLLVMBasicBlock(BasicBlock* bb) { // Skip the exit block if ((bb->block_type == kDead) ||(bb->block_type == kExitBlock)) { id_to_block_map_.Put(bb->id, NULL); @@ -1933,8 +1874,7 @@ bool MirConverter::CreateLLVMBasicBlock(BasicBlock* bb) * o Iterate through the MIR a basic block at a time, setting arguments * to recovered ssa name. */ -void MirConverter::MethodMIR2Bitcode() -{ +void MirConverter::MethodMIR2Bitcode() { InitIR(); // Create the function diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc index e804215c11..9e144579db 100644 --- a/compiler/dex/quick/arm/assemble_arm.cc +++ b/compiler/dex/quick/arm/assemble_arm.cc @@ -1002,8 +1002,7 @@ const ArmEncodingMap ArmMir2Lir::EncodingMap[kArmLast] = { * discover that pc-relative displacements may not fit the selected * instruction. */ -AssemblerStatus ArmMir2Lir::AssembleInstructions(uintptr_t start_addr) -{ +AssemblerStatus ArmMir2Lir::AssembleInstructions(uintptr_t start_addr) { LIR* lir; AssemblerStatus res = kSuccess; // Assume success @@ -1389,8 +1388,7 @@ AssemblerStatus ArmMir2Lir::AssembleInstructions(uintptr_t start_addr) return res; } -int ArmMir2Lir::GetInsnSize(LIR* lir) -{ +int ArmMir2Lir::GetInsnSize(LIR* lir) { return EncodingMap[lir->opcode].size; } diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc index a6720ce6f2..0e813247db 100644 --- a/compiler/dex/quick/arm/call_arm.cc +++ b/compiler/dex/quick/arm/call_arm.cc @@ -25,8 +25,7 @@ namespace art { /* Return the position of an ssa name within the argument list */ -int ArmMir2Lir::InPosition(int s_reg) -{ +int ArmMir2Lir::InPosition(int s_reg) { int v_reg = mir_graph_->SRegToVReg(s_reg); return v_reg - cu_->num_regs; } @@ -36,8 +35,7 @@ int ArmMir2Lir::InPosition(int s_reg) * there. NOTE: all live arg registers must be locked prior to this call * to avoid having them allocated as a temp by downstream utilities. */ -RegLocation ArmMir2Lir::ArgLoc(RegLocation loc) -{ +RegLocation ArmMir2Lir::ArgLoc(RegLocation loc) { int arg_num = InPosition(loc.s_reg_low); if (loc.wide) { if (arg_num == 2) { @@ -66,8 +64,7 @@ RegLocation ArmMir2Lir::ArgLoc(RegLocation loc) * the frame, we can't use the normal LoadValue() because it assumed * a proper frame - and we're frameless. */ -RegLocation ArmMir2Lir::LoadArg(RegLocation loc) -{ +RegLocation ArmMir2Lir::LoadArg(RegLocation loc) { if (loc.location == kLocDalvikFrame) { int start = (InPosition(loc.s_reg_low) + 1) * sizeof(uint32_t); loc.low_reg = AllocTemp(); @@ -82,8 +79,7 @@ RegLocation ArmMir2Lir::LoadArg(RegLocation loc) } /* Lock any referenced arguments that arrive in registers */ -void ArmMir2Lir::LockLiveArgs(MIR* mir) -{ +void ArmMir2Lir::LockLiveArgs(MIR* mir) { int first_in = cu_->num_regs; const int num_arg_regs = 3; // TODO: generalize & move to RegUtil.cc for (int i = 0; i < mir->ssa_rep->num_uses; i++) { @@ -97,8 +93,7 @@ void ArmMir2Lir::LockLiveArgs(MIR* mir) /* Find the next MIR, which may be in a following basic block */ // TODO: should this be a utility in mir_graph? -MIR* ArmMir2Lir::GetNextMir(BasicBlock** p_bb, MIR* mir) -{ +MIR* ArmMir2Lir::GetNextMir(BasicBlock** p_bb, MIR* mir) { BasicBlock* bb = *p_bb; MIR* orig_mir = mir; while (bb != NULL) { @@ -123,8 +118,7 @@ MIR* ArmMir2Lir::GetNextMir(BasicBlock** p_bb, MIR* mir) /* Used for the "verbose" listing */ //TODO: move to common code -void ArmMir2Lir::GenPrintLabel(MIR* mir) -{ +void ArmMir2Lir::GenPrintLabel(MIR* mir) { /* Mark the beginning of a Dalvik instruction for line tracking */ char* inst_str = cu_->verbose ? mir_graph_->GetDalvikDisassembly(mir) : NULL; @@ -132,8 +126,7 @@ void ArmMir2Lir::GenPrintLabel(MIR* mir) } MIR* ArmMir2Lir::SpecialIGet(BasicBlock** bb, MIR* mir, - OpSize size, bool long_or_double, bool is_object) -{ + OpSize size, bool long_or_double, bool is_object) { int field_offset; bool is_volatile; uint32_t field_idx = mir->dalvikInsn.vC; @@ -158,8 +151,7 @@ MIR* ArmMir2Lir::SpecialIGet(BasicBlock** bb, MIR* mir, } MIR* ArmMir2Lir::SpecialIPut(BasicBlock** bb, MIR* mir, - OpSize size, bool long_or_double, bool is_object) -{ + OpSize size, bool long_or_double, bool is_object) { int field_offset; bool is_volatile; uint32_t field_idx = mir->dalvikInsn.vC; @@ -192,8 +184,7 @@ MIR* ArmMir2Lir::SpecialIPut(BasicBlock** bb, MIR* mir, return GetNextMir(bb, mir); } -MIR* ArmMir2Lir::SpecialIdentity(MIR* mir) -{ +MIR* ArmMir2Lir::SpecialIdentity(MIR* mir) { RegLocation rl_src; RegLocation rl_dest; bool wide = (mir->ssa_rep->num_uses == 2); @@ -225,8 +216,7 @@ MIR* ArmMir2Lir::SpecialIdentity(MIR* mir) * Special-case code genration for simple non-throwing leaf methods. */ void ArmMir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, - SpecialCaseHandler special_case) -{ + SpecialCaseHandler special_case) { current_dalvik_offset_ = mir->offset; MIR* next_mir = NULL; switch (special_case) { @@ -319,8 +309,7 @@ void ArmMir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, * cbnz r_idx, lp */ void ArmMir2Lir::GenSparseSwitch(MIR* mir, uint32_t table_offset, - RegLocation rl_src) -{ + RegLocation rl_src) { const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; if (cu_->verbose) { DumpSparseSwitchTable(table); @@ -369,8 +358,7 @@ void ArmMir2Lir::GenSparseSwitch(MIR* mir, uint32_t table_offset, void ArmMir2Lir::GenPackedSwitch(MIR* mir, uint32_t table_offset, - RegLocation rl_src) -{ + RegLocation rl_src) { const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; if (cu_->verbose) { DumpPackedSwitchTable(table); @@ -427,8 +415,7 @@ void ArmMir2Lir::GenPackedSwitch(MIR* mir, uint32_t table_offset, * * Total size is 4+(width * size + 1)/2 16-bit code units. */ -void ArmMir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) -{ +void ArmMir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) { const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; // Add the table to the list - we'll process it later FillArrayData *tab_rec = @@ -480,8 +467,7 @@ void ArmMir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) * preserved. * */ -void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) -{ +void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { FlushAllRegs(); DCHECK_EQ(LW_SHAPE_THIN, 0); LoadValueDirectFixed(rl_src, r0); // Get obj @@ -515,8 +501,7 @@ void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) * a zero recursion count, it's safe to punch it back to the * initial, unlock thin state with a store word. */ -void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) -{ +void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { DCHECK_EQ(LW_SHAPE_THIN, 0); FlushAllRegs(); LoadValueDirectFixed(rl_src, r0); // Get obj @@ -541,8 +526,7 @@ void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) GenMemBarrier(kStoreLoad); } -void ArmMir2Lir::GenMoveException(RegLocation rl_dest) -{ +void ArmMir2Lir::GenMoveException(RegLocation rl_dest) { int ex_offset = Thread::ExceptionOffset().Int32Value(); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); int reset_reg = AllocTemp(); @@ -556,8 +540,7 @@ void ArmMir2Lir::GenMoveException(RegLocation rl_dest) /* * Mark garbage collection card. Skip if the value we're storing is null. */ -void ArmMir2Lir::MarkGCCard(int val_reg, int tgt_addr_reg) -{ +void ArmMir2Lir::MarkGCCard(int val_reg, int tgt_addr_reg) { int reg_card_base = AllocTemp(); int reg_card_no = AllocTemp(); LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL); @@ -571,8 +554,7 @@ void ArmMir2Lir::MarkGCCard(int val_reg, int tgt_addr_reg) FreeTemp(reg_card_no); } -void ArmMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) -{ +void ArmMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) { int spill_count = num_core_spills_ + num_fp_spills_; /* * On entry, r0, r1, r2 & r3 are live. Let the register allocation @@ -624,8 +606,7 @@ void ArmMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) FreeTemp(r3); } -void ArmMir2Lir::GenExitSequence() -{ +void ArmMir2Lir::GenExitSequence() { int spill_count = num_core_spills_ + num_fp_spills_; /* * In the exit path, r0/r1 are live - make sure they aren't diff --git a/compiler/dex/quick/arm/fp_arm.cc b/compiler/dex/quick/arm/fp_arm.cc index 53a5e1a6dc..2c626a0e8f 100644 --- a/compiler/dex/quick/arm/fp_arm.cc +++ b/compiler/dex/quick/arm/fp_arm.cc @@ -21,8 +21,7 @@ namespace art { void ArmMir2Lir::GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, - RegLocation rl_src1, RegLocation rl_src2) -{ + RegLocation rl_src1, RegLocation rl_src2) { int op = kThumbBkpt; RegLocation rl_result; @@ -68,8 +67,7 @@ void ArmMir2Lir::GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, } void ArmMir2Lir::GenArithOpDouble(Instruction::Code opcode, - RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) -{ + RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) { int op = kThumbBkpt; RegLocation rl_result; @@ -117,8 +115,7 @@ void ArmMir2Lir::GenArithOpDouble(Instruction::Code opcode, } void ArmMir2Lir::GenConversion(Instruction::Code opcode, - RegLocation rl_dest, RegLocation rl_src) -{ + RegLocation rl_dest, RegLocation rl_src) { int op = kThumbBkpt; int src_reg; RegLocation rl_result; @@ -176,8 +173,7 @@ void ArmMir2Lir::GenConversion(Instruction::Code opcode, } void ArmMir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, - bool is_double) -{ + bool is_double) { LIR* target = &block_label_list_[bb->taken->id]; RegLocation rl_src1; RegLocation rl_src2; @@ -229,8 +225,7 @@ void ArmMir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, void ArmMir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, - RegLocation rl_src1, RegLocation rl_src2) -{ + RegLocation rl_src1, RegLocation rl_src2) { bool is_double = false; int default_result = -1; RegLocation rl_result; @@ -288,8 +283,7 @@ void ArmMir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, StoreValue(rl_dest, rl_result); } -void ArmMir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src) -{ +void ArmMir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src) { RegLocation rl_result; rl_src = LoadValue(rl_src, kFPReg); rl_result = EvalLoc(rl_dest, kFPReg, true); @@ -297,8 +291,7 @@ void ArmMir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src) StoreValue(rl_dest, rl_result); } -void ArmMir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) -{ +void ArmMir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) { RegLocation rl_result; rl_src = LoadValueWide(rl_src, kFPReg); rl_result = EvalLoc(rl_dest, kFPReg, true); diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc index feea896e9f..ee2d76c7b7 100644 --- a/compiler/dex/quick/arm/int_arm.cc +++ b/compiler/dex/quick/arm/int_arm.cc @@ -25,8 +25,7 @@ namespace art { LIR* ArmMir2Lir::OpCmpBranch(ConditionCode cond, int src1, - int src2, LIR* target) -{ + int src2, LIR* target) { OpRegReg(kOpCmp, src1, src2); return OpCondBranch(cond, target); } @@ -41,8 +40,7 @@ LIR* ArmMir2Lir::OpCmpBranch(ConditionCode cond, int src1, * met, and an "E" means the instruction is executed if the condition * is not met. */ -LIR* ArmMir2Lir::OpIT(ConditionCode ccode, const char* guide) -{ +LIR* ArmMir2Lir::OpIT(ConditionCode ccode, const char* guide) { int mask; int mask3 = 0; int mask2 = 0; @@ -86,8 +84,7 @@ LIR* ArmMir2Lir::OpIT(ConditionCode ccode, const char* guide) * done: */ void ArmMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2) -{ + RegLocation rl_src2) { LIR* target1; LIR* target2; rl_src1 = LoadValueWide(rl_src1, kCoreReg); @@ -121,8 +118,7 @@ void ArmMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, } void ArmMir2Lir::GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1, - int64_t val, ConditionCode ccode) -{ + int64_t val, ConditionCode ccode) { int32_t val_lo = Low32Bits(val); int32_t val_hi = High32Bits(val); DCHECK(ModifiedImmediate(val_lo) >= 0); @@ -180,8 +176,7 @@ void ArmMir2Lir::GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1, OpCmpImmBranch(ccode, low_reg, val_lo, taken); } -void ArmMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) -{ +void ArmMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) { RegLocation rl_result; RegLocation rl_src = mir_graph_->GetSrc(mir, 0); // Temporary debugging code @@ -249,8 +244,7 @@ void ArmMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) StoreValue(rl_dest, rl_result); } -void ArmMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) -{ +void ArmMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) { RegLocation rl_src1 = mir_graph_->GetSrcWide(mir, 0); RegLocation rl_src2 = mir_graph_->GetSrcWide(mir, 2); // Normalize such that if either operand is constant, src2 will be constant. @@ -315,8 +309,7 @@ void ArmMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) * is responsible for setting branch target field. */ LIR* ArmMir2Lir::OpCmpImmBranch(ConditionCode cond, int reg, int check_value, - LIR* target) -{ + LIR* target) { LIR* branch; int mod_imm; ArmConditionCode arm_cond = ArmConditionEncoding(cond); @@ -341,8 +334,7 @@ LIR* ArmMir2Lir::OpCmpImmBranch(ConditionCode cond, int reg, int check_value, return branch; } -LIR* ArmMir2Lir::OpRegCopyNoInsert(int r_dest, int r_src) -{ +LIR* ArmMir2Lir::OpRegCopyNoInsert(int r_dest, int r_src) { LIR* res; int opcode; if (ARM_FPREG(r_dest) || ARM_FPREG(r_src)) @@ -362,16 +354,14 @@ LIR* ArmMir2Lir::OpRegCopyNoInsert(int r_dest, int r_src) return res; } -LIR* ArmMir2Lir::OpRegCopy(int r_dest, int r_src) -{ +LIR* ArmMir2Lir::OpRegCopy(int r_dest, int r_src) { LIR* res = OpRegCopyNoInsert(r_dest, r_src); AppendLIR(res); return res; } void ArmMir2Lir::OpRegCopyWide(int dest_lo, int dest_hi, int src_lo, - int src_hi) -{ + int src_hi) { bool dest_fp = ARM_FPREG(dest_lo) && ARM_FPREG(dest_hi); bool src_fp = ARM_FPREG(src_lo) && ARM_FPREG(src_hi); DCHECK_EQ(ARM_FPREG(src_lo), ARM_FPREG(src_hi)); @@ -426,8 +416,7 @@ static const MagicTable magic_table[] = { // Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4) bool ArmMir2Lir::SmallLiteralDivide(Instruction::Code dalvik_opcode, - RegLocation rl_src, RegLocation rl_dest, int lit) -{ + RegLocation rl_src, RegLocation rl_dest, int lit) { if ((lit < 0) || (lit >= static_cast(sizeof(magic_table)/sizeof(magic_table[0])))) { return false; } @@ -471,28 +460,24 @@ bool ArmMir2Lir::SmallLiteralDivide(Instruction::Code dalvik_opcode, } LIR* ArmMir2Lir::GenRegMemCheck(ConditionCode c_code, - int reg1, int base, int offset, ThrowKind kind) -{ + int reg1, int base, int offset, ThrowKind kind) { LOG(FATAL) << "Unexpected use of GenRegMemCheck for Arm"; return NULL; } RegLocation ArmMir2Lir::GenDivRemLit(RegLocation rl_dest, int reg1, int lit, - bool is_div) -{ + bool is_div) { LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm"; return rl_dest; } RegLocation ArmMir2Lir::GenDivRem(RegLocation rl_dest, int reg1, int reg2, - bool is_div) -{ + bool is_div) { LOG(FATAL) << "Unexpected use of GenDivRem for Arm"; return rl_dest; } -bool ArmMir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min) -{ +bool ArmMir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min) { DCHECK_EQ(cu_->instruction_set, kThumb2); RegLocation rl_src1 = info->args[0]; RegLocation rl_src2 = info->args[1]; @@ -509,13 +494,11 @@ bool ArmMir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min) return true; } -void ArmMir2Lir::OpLea(int rBase, int reg1, int reg2, int scale, int offset) -{ +void ArmMir2Lir::OpLea(int rBase, int reg1, int reg2, int scale, int offset) { LOG(FATAL) << "Unexpected use of OpLea for Arm"; } -void ArmMir2Lir::OpTlsCmp(int offset, int val) -{ +void ArmMir2Lir::OpTlsCmp(int offset, int val) { LOG(FATAL) << "Unexpected use of OpTlsCmp for Arm"; } @@ -577,25 +560,21 @@ bool ArmMir2Lir::GenInlinedCas32(CallInfo* info, bool need_write_barrier) { return true; } -LIR* ArmMir2Lir::OpPcRelLoad(int reg, LIR* target) -{ +LIR* ArmMir2Lir::OpPcRelLoad(int reg, LIR* target) { return RawLIR(current_dalvik_offset_, kThumb2LdrPcRel12, reg, 0, 0, 0, 0, target); } -LIR* ArmMir2Lir::OpVldm(int rBase, int count) -{ +LIR* ArmMir2Lir::OpVldm(int rBase, int count) { return NewLIR3(kThumb2Vldms, rBase, fr0, count); } -LIR* ArmMir2Lir::OpVstm(int rBase, int count) -{ +LIR* ArmMir2Lir::OpVstm(int rBase, int count) { return NewLIR3(kThumb2Vstms, rBase, fr0, count); } void ArmMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit, - int first_bit, int second_bit) -{ + int first_bit, int second_bit) { OpRegRegRegShift(kOpAdd, rl_result.low_reg, rl_src.low_reg, rl_src.low_reg, EncodeShift(kArmLsl, second_bit - first_bit)); if (first_bit != 0) { @@ -603,8 +582,7 @@ void ArmMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src, } } -void ArmMir2Lir::GenDivZeroCheck(int reg_lo, int reg_hi) -{ +void ArmMir2Lir::GenDivZeroCheck(int reg_lo, int reg_hi) { int t_reg = AllocTemp(); NewLIR4(kThumb2OrrRRRs, t_reg, reg_lo, reg_hi, 0); FreeTemp(t_reg); @@ -612,22 +590,19 @@ void ArmMir2Lir::GenDivZeroCheck(int reg_lo, int reg_hi) } // Test suspend flag, return target of taken suspend branch -LIR* ArmMir2Lir::OpTestSuspend(LIR* target) -{ +LIR* ArmMir2Lir::OpTestSuspend(LIR* target) { NewLIR2(kThumbSubRI8, rARM_SUSPEND, 1); return OpCondBranch((target == NULL) ? kCondEq : kCondNe, target); } // Decrement register and branch on condition -LIR* ArmMir2Lir::OpDecAndBranch(ConditionCode c_code, int reg, LIR* target) -{ +LIR* ArmMir2Lir::OpDecAndBranch(ConditionCode c_code, int reg, LIR* target) { // Combine sub & test using sub setflags encoding here NewLIR3(kThumb2SubsRRI12, reg, reg, 1); return OpCondBranch(c_code, target); } -void ArmMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) -{ +void ArmMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) { #if ANDROID_SMP != 0 int dmb_flavor; // TODO: revisit Arm barrier kinds @@ -646,8 +621,7 @@ void ArmMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) #endif } -void ArmMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) -{ +void ArmMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) { rl_src = LoadValueWide(rl_src, kCoreReg); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); int z_reg = AllocTemp(); @@ -672,16 +646,14 @@ void ArmMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) * is not usual for dx to generate, but it is legal (for now). In a future rev of * dex, we'll want to make this case illegal. */ -bool ArmMir2Lir::BadOverlap(RegLocation rl_src, RegLocation rl_dest) -{ +bool ArmMir2Lir::BadOverlap(RegLocation rl_src, RegLocation rl_dest) { DCHECK(rl_src.wide); DCHECK(rl_dest.wide); return (abs(mir_graph_->SRegToVReg(rl_src.s_reg_low) - mir_graph_->SRegToVReg(rl_dest.s_reg_low)) == 1); } void ArmMir2Lir::GenMulLong(RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2) -{ + RegLocation rl_src2) { /* * To pull off inline multiply, we have a worst-case requirement of 8 temporary * registers. Normally for Arm, we get 5. We can get to 6 by including @@ -754,32 +726,27 @@ void ArmMir2Lir::GenMulLong(RegLocation rl_dest, RegLocation rl_src1, } void ArmMir2Lir::GenAddLong(RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2) -{ + RegLocation rl_src2) { LOG(FATAL) << "Unexpected use of GenAddLong for Arm"; } void ArmMir2Lir::GenSubLong(RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2) -{ + RegLocation rl_src2) { LOG(FATAL) << "Unexpected use of GenSubLong for Arm"; } void ArmMir2Lir::GenAndLong(RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2) -{ + RegLocation rl_src2) { LOG(FATAL) << "Unexpected use of GenAndLong for Arm"; } void ArmMir2Lir::GenOrLong(RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2) -{ + RegLocation rl_src2) { LOG(FATAL) << "Unexpected use of GenOrLong for Arm"; } void ArmMir2Lir::GenXorLong(RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2) -{ + RegLocation rl_src2) { LOG(FATAL) << "Unexpected use of genXoLong for Arm"; } @@ -787,8 +754,7 @@ void ArmMir2Lir::GenXorLong(RegLocation rl_dest, RegLocation rl_src1, * Generate array load */ void ArmMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, - RegLocation rl_index, RegLocation rl_dest, int scale) -{ + RegLocation rl_index, RegLocation rl_dest, int scale) { RegisterClass reg_class = oat_reg_class_by_size(size); int len_offset = mirror::Array::LengthOffset().Int32Value(); int data_offset; @@ -878,8 +844,7 @@ void ArmMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, * */ void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, - RegLocation rl_index, RegLocation rl_src, int scale) -{ + RegLocation rl_index, RegLocation rl_src, int scale) { RegisterClass reg_class = oat_reg_class_by_size(size); int len_offset = mirror::Array::LengthOffset().Int32Value(); int data_offset; @@ -968,8 +933,7 @@ void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, * */ void ArmMir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, - RegLocation rl_index, RegLocation rl_src, int scale) -{ + RegLocation rl_index, RegLocation rl_src, int scale) { int len_offset = mirror::Array::LengthOffset().Int32Value(); int data_offset = mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value(); @@ -1025,8 +989,7 @@ void ArmMir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, } void ArmMir2Lir::GenShiftImmOpLong(Instruction::Code opcode, - RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift) -{ + RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift) { rl_src = LoadValueWide(rl_src, kCoreReg); // Per spec, we only care about low 6 bits of shift amount. int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f; @@ -1099,8 +1062,7 @@ void ArmMir2Lir::GenShiftImmOpLong(Instruction::Code opcode, } void ArmMir2Lir::GenArithImmOpLong(Instruction::Code opcode, - RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) -{ + RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) { if ((opcode == Instruction::SUB_LONG_2ADDR) || (opcode == Instruction::SUB_LONG)) { if (!rl_src2.is_const) { // Don't bother with special handling for subtract from immediate. diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc index 4bece136bc..7021593a79 100644 --- a/compiler/dex/quick/arm/target_arm.cc +++ b/compiler/dex/quick/arm/target_arm.cc @@ -34,26 +34,22 @@ static int core_temps[] = {r0, r1, r2, r3, r12}; static int fp_temps[] = {fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7, fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15}; -RegLocation ArmMir2Lir::LocCReturn() -{ +RegLocation ArmMir2Lir::LocCReturn() { RegLocation res = ARM_LOC_C_RETURN; return res; } -RegLocation ArmMir2Lir::LocCReturnWide() -{ +RegLocation ArmMir2Lir::LocCReturnWide() { RegLocation res = ARM_LOC_C_RETURN_WIDE; return res; } -RegLocation ArmMir2Lir::LocCReturnFloat() -{ +RegLocation ArmMir2Lir::LocCReturnFloat() { RegLocation res = ARM_LOC_C_RETURN_FLOAT; return res; } -RegLocation ArmMir2Lir::LocCReturnDouble() -{ +RegLocation ArmMir2Lir::LocCReturnDouble() { RegLocation res = ARM_LOC_C_RETURN_DOUBLE; return res; } @@ -85,28 +81,24 @@ int ArmMir2Lir::TargetReg(SpecialTargetRegister reg) { // Create a double from a pair of singles. -int ArmMir2Lir::S2d(int low_reg, int high_reg) -{ +int ArmMir2Lir::S2d(int low_reg, int high_reg) { return ARM_S2D(low_reg, high_reg); } // Return mask to strip off fp reg flags and bias. -uint32_t ArmMir2Lir::FpRegMask() -{ +uint32_t ArmMir2Lir::FpRegMask() { return ARM_FP_REG_MASK; } // True if both regs single, both core or both double. -bool ArmMir2Lir::SameRegType(int reg1, int reg2) -{ +bool ArmMir2Lir::SameRegType(int reg1, int reg2) { return (ARM_REGTYPE(reg1) == ARM_REGTYPE(reg2)); } /* * Decode the register id. */ -uint64_t ArmMir2Lir::GetRegMaskCommon(int reg) -{ +uint64_t ArmMir2Lir::GetRegMaskCommon(int reg) { uint64_t seed; int shift; int reg_id; @@ -122,13 +114,11 @@ uint64_t ArmMir2Lir::GetRegMaskCommon(int reg) return (seed << shift); } -uint64_t ArmMir2Lir::GetPCUseDefEncoding() -{ +uint64_t ArmMir2Lir::GetPCUseDefEncoding() { return ENCODE_ARM_REG_PC; } -void ArmMir2Lir::SetupTargetResourceMasks(LIR* lir) -{ +void ArmMir2Lir::SetupTargetResourceMasks(LIR* lir) { DCHECK_EQ(cu_->instruction_set, kThumb2); // Thumb2 specific setup @@ -203,8 +193,7 @@ void ArmMir2Lir::SetupTargetResourceMasks(LIR* lir) } } -ArmConditionCode ArmMir2Lir::ArmConditionEncoding(ConditionCode ccode) -{ +ArmConditionCode ArmMir2Lir::ArmConditionEncoding(ConditionCode ccode) { ArmConditionCode res; switch (ccode) { case kCondEq: res = kArmCondEq; break; @@ -257,8 +246,7 @@ static const char* shift_names[4] = { "ror"}; /* Decode and print a ARM register name */ -static char* DecodeRegList(int opcode, int vector, char* buf) -{ +static char* DecodeRegList(int opcode, int vector, char* buf) { int i; bool printed = false; buf[0] = 0; @@ -281,8 +269,7 @@ static char* DecodeRegList(int opcode, int vector, char* buf) return buf; } -static char* DecodeFPCSRegList(int count, int base, char* buf) -{ +static char* DecodeFPCSRegList(int count, int base, char* buf) { sprintf(buf, "s%d", base); for (int i = 1; i < count; i++) { sprintf(buf + strlen(buf), ", s%d",base + i); @@ -290,8 +277,7 @@ static char* DecodeFPCSRegList(int count, int base, char* buf) return buf; } -static int ExpandImmediate(int value) -{ +static int ExpandImmediate(int value) { int mode = (value & 0xf00) >> 8; uint32_t bits = value & 0xff; switch (mode) { @@ -316,8 +302,7 @@ const char* cc_names[] = {"eq","ne","cs","cc","mi","pl","vs","vc", * Interpret a format string and build a string no longer than size * See format key in Assemble.c. */ -std::string ArmMir2Lir::BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr) -{ +std::string ArmMir2Lir::BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr) { std::string buf; int i; const char* fmt_end = &fmt[strlen(fmt)]; @@ -455,8 +440,7 @@ std::string ArmMir2Lir::BuildInsnString(const char* fmt, LIR* lir, unsigned char return buf; } -void ArmMir2Lir::DumpResourceMask(LIR* arm_lir, uint64_t mask, const char* prefix) -{ +void ArmMir2Lir::DumpResourceMask(LIR* arm_lir, uint64_t mask, const char* prefix) { char buf[256]; buf[0] = 0; @@ -501,8 +485,7 @@ void ArmMir2Lir::DumpResourceMask(LIR* arm_lir, uint64_t mask, const char* prefi } } -bool ArmMir2Lir::IsUnconditionalBranch(LIR* lir) -{ +bool ArmMir2Lir::IsUnconditionalBranch(LIR* lir) { return ((lir->opcode == kThumbBUncond) || (lir->opcode == kThumb2BUncond)); } @@ -527,8 +510,7 @@ Mir2Lir* ArmCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, * Alloc a pair of core registers, or a double. Low reg in low byte, * high reg in next byte. */ -int ArmMir2Lir::AllocTypedTempPair(bool fp_hint, int reg_class) -{ +int ArmMir2Lir::AllocTypedTempPair(bool fp_hint, int reg_class) { int high_reg; int low_reg; int res = 0; @@ -544,15 +526,13 @@ int ArmMir2Lir::AllocTypedTempPair(bool fp_hint, int reg_class) return res; } -int ArmMir2Lir::AllocTypedTemp(bool fp_hint, int reg_class) -{ +int ArmMir2Lir::AllocTypedTemp(bool fp_hint, int reg_class) { if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) return AllocTempFloat(); return AllocTemp(); } -void ArmMir2Lir::CompilerInitializeRegAlloc() -{ +void ArmMir2Lir::CompilerInitializeRegAlloc() { int num_regs = sizeof(core_regs)/sizeof(*core_regs); int num_reserved = sizeof(ReservedRegs)/sizeof(*ReservedRegs); int num_temps = sizeof(core_temps)/sizeof(*core_temps); @@ -591,8 +571,7 @@ void ArmMir2Lir::CompilerInitializeRegAlloc() } void ArmMir2Lir::FreeRegLocTemps(RegLocation rl_keep, - RegLocation rl_free) -{ + RegLocation rl_free) { if ((rl_free.low_reg != rl_keep.low_reg) && (rl_free.low_reg != rl_keep.high_reg) && (rl_free.high_reg != rl_keep.low_reg) && (rl_free.high_reg != rl_keep.high_reg)) { // No overlap, free both @@ -606,8 +585,7 @@ void ArmMir2Lir::FreeRegLocTemps(RegLocation rl_keep, * machinery is in place, always spill lr. */ -void ArmMir2Lir::AdjustSpillMask() -{ +void ArmMir2Lir::AdjustSpillMask() { core_spill_mask_ |= (1 << rARM_LR); num_core_spills_++; } @@ -618,8 +596,7 @@ void ArmMir2Lir::AdjustSpillMask() * include any holes in the mask. Associate holes with * Dalvik register INVALID_VREG (0xFFFFU). */ -void ArmMir2Lir::MarkPreservedSingle(int v_reg, int reg) -{ +void ArmMir2Lir::MarkPreservedSingle(int v_reg, int reg) { DCHECK_GE(reg, ARM_FP_REG_MASK + ARM_FP_CALLEE_SAVE_BASE); reg = (reg & ARM_FP_REG_MASK) - ARM_FP_CALLEE_SAVE_BASE; // Ensure fp_vmap_table is large enough @@ -634,8 +611,7 @@ void ArmMir2Lir::MarkPreservedSingle(int v_reg, int reg) fp_spill_mask_ = ((1 << num_fp_spills_) - 1) << ARM_FP_CALLEE_SAVE_BASE; } -void ArmMir2Lir::FlushRegWide(int reg1, int reg2) -{ +void ArmMir2Lir::FlushRegWide(int reg1, int reg2) { RegisterInfo* info1 = GetRegInfo(reg1); RegisterInfo* info2 = GetRegInfo(reg2); DCHECK(info1 && info2 && info1->pair && info2->pair && @@ -657,8 +633,7 @@ void ArmMir2Lir::FlushRegWide(int reg1, int reg2) } } -void ArmMir2Lir::FlushReg(int reg) -{ +void ArmMir2Lir::FlushReg(int reg) { RegisterInfo* info = GetRegInfo(reg); if (info->live && info->dirty) { info->dirty = false; @@ -673,8 +648,7 @@ bool ArmMir2Lir::IsFpReg(int reg) { } /* Clobber all regs that might be used by an external C call */ -void ArmMir2Lir::ClobberCalleeSave() -{ +void ArmMir2Lir::ClobberCalleeSave() { Clobber(r0); Clobber(r1); Clobber(r2); @@ -699,8 +673,7 @@ void ArmMir2Lir::ClobberCalleeSave() Clobber(fr15); } -RegLocation ArmMir2Lir::GetReturnWideAlt() -{ +RegLocation ArmMir2Lir::GetReturnWideAlt() { RegLocation res = LocCReturnWide(); res.low_reg = r2; res.high_reg = r3; @@ -712,8 +685,7 @@ RegLocation ArmMir2Lir::GetReturnWideAlt() return res; } -RegLocation ArmMir2Lir::GetReturnAlt() -{ +RegLocation ArmMir2Lir::GetReturnAlt() { RegLocation res = LocCReturn(); res.low_reg = r1; Clobber(r1); @@ -721,15 +693,13 @@ RegLocation ArmMir2Lir::GetReturnAlt() return res; } -ArmMir2Lir::RegisterInfo* ArmMir2Lir::GetRegInfo(int reg) -{ +ArmMir2Lir::RegisterInfo* ArmMir2Lir::GetRegInfo(int reg) { return ARM_FPREG(reg) ? ®_pool_->FPRegs[reg & ARM_FP_REG_MASK] : ®_pool_->core_regs[reg]; } /* To be used when explicitly managing register use */ -void ArmMir2Lir::LockCallTemps() -{ +void ArmMir2Lir::LockCallTemps() { LockTemp(r0); LockTemp(r1); LockTemp(r2); @@ -737,32 +707,27 @@ void ArmMir2Lir::LockCallTemps() } /* To be used when explicitly managing register use */ -void ArmMir2Lir::FreeCallTemps() -{ +void ArmMir2Lir::FreeCallTemps() { FreeTemp(r0); FreeTemp(r1); FreeTemp(r2); FreeTemp(r3); } -int ArmMir2Lir::LoadHelper(int offset) -{ +int ArmMir2Lir::LoadHelper(int offset) { LoadWordDisp(rARM_SELF, offset, rARM_LR); return rARM_LR; } -uint64_t ArmMir2Lir::GetTargetInstFlags(int opcode) -{ +uint64_t ArmMir2Lir::GetTargetInstFlags(int opcode) { return ArmMir2Lir::EncodingMap[opcode].flags; } -const char* ArmMir2Lir::GetTargetInstName(int opcode) -{ +const char* ArmMir2Lir::GetTargetInstName(int opcode) { return ArmMir2Lir::EncodingMap[opcode].name; } -const char* ArmMir2Lir::GetTargetInstFmt(int opcode) -{ +const char* ArmMir2Lir::GetTargetInstFmt(int opcode) { return ArmMir2Lir::EncodingMap[opcode].fmt; } diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc index abf921f8ad..80f597d640 100644 --- a/compiler/dex/quick/arm/utility_arm.cc +++ b/compiler/dex/quick/arm/utility_arm.cc @@ -22,8 +22,7 @@ namespace art { /* This file contains codegen for the Thumb ISA. */ -static int EncodeImmSingle(int value) -{ +static int EncodeImmSingle(int value) { int res; int bit_a = (value & 0x80000000) >> 31; int not_bit_b = (value & 0x40000000) >> 30; @@ -48,8 +47,7 @@ static int EncodeImmSingle(int value) * Determine whether value can be encoded as a Thumb2 floating point * immediate. If not, return -1. If so return encoded 8-bit value. */ -static int EncodeImmDouble(int64_t value) -{ +static int EncodeImmDouble(int64_t value) { int res; int bit_a = (value & 0x8000000000000000ll) >> 63; int not_bit_b = (value & 0x4000000000000000ll) >> 62; @@ -70,8 +68,7 @@ static int EncodeImmDouble(int64_t value) return res; } -LIR* ArmMir2Lir::LoadFPConstantValue(int r_dest, int value) -{ +LIR* ArmMir2Lir::LoadFPConstantValue(int r_dest, int value) { DCHECK(ARM_SINGLEREG(r_dest)); if (value == 0) { // TODO: we need better info about the target CPU. a vector exclusive or @@ -98,8 +95,7 @@ LIR* ArmMir2Lir::LoadFPConstantValue(int r_dest, int value) return load_pc_rel; } -static int LeadingZeros(uint32_t val) -{ +static int LeadingZeros(uint32_t val) { uint32_t alt; int n; int count; @@ -121,8 +117,7 @@ static int LeadingZeros(uint32_t val) * Determine whether value can be encoded as a Thumb2 modified * immediate. If not, return -1. If so, return i:imm3:a:bcdefgh form. */ -int ArmMir2Lir::ModifiedImmediate(uint32_t value) -{ +int ArmMir2Lir::ModifiedImmediate(uint32_t value) { int z_leading; int z_trailing; uint32_t b0 = value & 0xff; @@ -151,23 +146,19 @@ int ArmMir2Lir::ModifiedImmediate(uint32_t value) return value | ((0x8 + z_leading) << 7); /* [01000..11111]:bcdefgh */ } -bool ArmMir2Lir::InexpensiveConstantInt(int32_t value) -{ +bool ArmMir2Lir::InexpensiveConstantInt(int32_t value) { return (ModifiedImmediate(value) >= 0) || (ModifiedImmediate(~value) >= 0); } -bool ArmMir2Lir::InexpensiveConstantFloat(int32_t value) -{ +bool ArmMir2Lir::InexpensiveConstantFloat(int32_t value) { return EncodeImmSingle(value) >= 0; } -bool ArmMir2Lir::InexpensiveConstantLong(int64_t value) -{ +bool ArmMir2Lir::InexpensiveConstantLong(int64_t value) { return InexpensiveConstantInt(High32Bits(value)) && InexpensiveConstantInt(Low32Bits(value)); } -bool ArmMir2Lir::InexpensiveConstantDouble(int64_t value) -{ +bool ArmMir2Lir::InexpensiveConstantDouble(int64_t value) { return EncodeImmDouble(value) >= 0; } @@ -179,8 +170,7 @@ bool ArmMir2Lir::InexpensiveConstantDouble(int64_t value) * 1) r_dest is freshly returned from AllocTemp or * 2) The codegen is under fixed register usage */ -LIR* ArmMir2Lir::LoadConstantNoClobber(int r_dest, int value) -{ +LIR* ArmMir2Lir::LoadConstantNoClobber(int r_dest, int value) { LIR* res; int mod_imm; @@ -214,23 +204,20 @@ LIR* ArmMir2Lir::LoadConstantNoClobber(int r_dest, int value) return res; } -LIR* ArmMir2Lir::OpUnconditionalBranch(LIR* target) -{ +LIR* ArmMir2Lir::OpUnconditionalBranch(LIR* target) { LIR* res = NewLIR1(kThumbBUncond, 0 /* offset to be patched during assembly*/); res->target = target; return res; } -LIR* ArmMir2Lir::OpCondBranch(ConditionCode cc, LIR* target) -{ +LIR* ArmMir2Lir::OpCondBranch(ConditionCode cc, LIR* target) { LIR* branch = NewLIR2(kThumb2BCond, 0 /* offset to be patched */, ArmConditionEncoding(cc)); branch->target = target; return branch; } -LIR* ArmMir2Lir::OpReg(OpKind op, int r_dest_src) -{ +LIR* ArmMir2Lir::OpReg(OpKind op, int r_dest_src) { ArmOpcode opcode = kThumbBkpt; switch (op) { case kOpBlx: @@ -243,8 +230,7 @@ LIR* ArmMir2Lir::OpReg(OpKind op, int r_dest_src) } LIR* ArmMir2Lir::OpRegRegShift(OpKind op, int r_dest_src1, int r_src2, - int shift) -{ + int shift) { bool thumb_form = ((shift == 0) && ARM_LOWREG(r_dest_src1) && ARM_LOWREG(r_src2)); ArmOpcode opcode = kThumbBkpt; switch (op) { @@ -358,14 +344,12 @@ LIR* ArmMir2Lir::OpRegRegShift(OpKind op, int r_dest_src1, int r_src2, } } -LIR* ArmMir2Lir::OpRegReg(OpKind op, int r_dest_src1, int r_src2) -{ +LIR* ArmMir2Lir::OpRegReg(OpKind op, int r_dest_src1, int r_src2) { return OpRegRegShift(op, r_dest_src1, r_src2, 0); } LIR* ArmMir2Lir::OpRegRegRegShift(OpKind op, int r_dest, int r_src1, - int r_src2, int shift) -{ + int r_src2, int shift) { ArmOpcode opcode = kThumbBkpt; bool thumb_form = (shift == 0) && ARM_LOWREG(r_dest) && ARM_LOWREG(r_src1) && ARM_LOWREG(r_src2); @@ -430,13 +414,11 @@ LIR* ArmMir2Lir::OpRegRegRegShift(OpKind op, int r_dest, int r_src1, } } -LIR* ArmMir2Lir::OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2) -{ +LIR* ArmMir2Lir::OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2) { return OpRegRegRegShift(op, r_dest, r_src1, r_src2, 0); } -LIR* ArmMir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src1, int value) -{ +LIR* ArmMir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src1, int value) { LIR* res; bool neg = (value < 0); int abs_value = (neg) ? -value : value; @@ -560,8 +542,7 @@ LIR* ArmMir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src1, int value) } /* Handle Thumb-only variants here - otherwise punt to OpRegRegImm */ -LIR* ArmMir2Lir::OpRegImm(OpKind op, int r_dest_src1, int value) -{ +LIR* ArmMir2Lir::OpRegImm(OpKind op, int r_dest_src1, int value) { bool neg = (value < 0); int abs_value = (neg) ? -value : value; bool short_form = (((abs_value & 0xff) == abs_value) && ARM_LOWREG(r_dest_src1)); @@ -605,8 +586,7 @@ LIR* ArmMir2Lir::OpRegImm(OpKind op, int r_dest_src1, int value) } } -LIR* ArmMir2Lir::LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value) -{ +LIR* ArmMir2Lir::LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value) { LIR* res = NULL; int32_t val_lo = Low32Bits(value); int32_t val_hi = High32Bits(value); @@ -656,8 +636,7 @@ int ArmMir2Lir::EncodeShift(int code, int amount) { } LIR* ArmMir2Lir::LoadBaseIndexed(int rBase, int r_index, int r_dest, - int scale, OpSize size) -{ + int scale, OpSize size) { bool all_low_regs = ARM_LOWREG(rBase) && ARM_LOWREG(r_index) && ARM_LOWREG(r_dest); LIR* load; ArmOpcode opcode = kThumbBkpt; @@ -721,8 +700,7 @@ LIR* ArmMir2Lir::LoadBaseIndexed(int rBase, int r_index, int r_dest, } LIR* ArmMir2Lir::StoreBaseIndexed(int rBase, int r_index, int r_src, - int scale, OpSize size) -{ + int scale, OpSize size) { bool all_low_regs = ARM_LOWREG(rBase) && ARM_LOWREG(r_index) && ARM_LOWREG(r_src); LIR* store = NULL; ArmOpcode opcode = kThumbBkpt; @@ -787,8 +765,7 @@ LIR* ArmMir2Lir::StoreBaseIndexed(int rBase, int r_index, int r_src, * performing null check, incoming MIR can be null. */ LIR* ArmMir2Lir::LoadBaseDispBody(int rBase, int displacement, int r_dest, - int r_dest_hi, OpSize size, int s_reg) -{ + int r_dest_hi, OpSize size, int s_reg) { LIR* load = NULL; ArmOpcode opcode = kThumbBkpt; bool short_form = false; @@ -908,14 +885,12 @@ LIR* ArmMir2Lir::LoadBaseDispBody(int rBase, int displacement, int r_dest, } LIR* ArmMir2Lir::LoadBaseDisp(int rBase, int displacement, int r_dest, - OpSize size, int s_reg) -{ + OpSize size, int s_reg) { return LoadBaseDispBody(rBase, displacement, r_dest, -1, size, s_reg); } LIR* ArmMir2Lir::LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, - int r_dest_hi, int s_reg) -{ + int r_dest_hi, int s_reg) { return LoadBaseDispBody(rBase, displacement, r_dest_lo, r_dest_hi, kLong, s_reg); } @@ -1024,19 +999,16 @@ LIR* ArmMir2Lir::StoreBaseDispBody(int rBase, int displacement, } LIR* ArmMir2Lir::StoreBaseDisp(int rBase, int displacement, int r_src, - OpSize size) -{ + OpSize size) { return StoreBaseDispBody(rBase, displacement, r_src, -1, size); } LIR* ArmMir2Lir::StoreBaseDispWide(int rBase, int displacement, - int r_src_lo, int r_src_hi) -{ + int r_src_lo, int r_src_hi) { return StoreBaseDispBody(rBase, displacement, r_src_lo, r_src_hi, kLong); } -LIR* ArmMir2Lir::OpFpRegCopy(int r_dest, int r_src) -{ +LIR* ArmMir2Lir::OpFpRegCopy(int r_dest, int r_src) { int opcode; DCHECK_EQ(ARM_DOUBLEREG(r_dest), ARM_DOUBLEREG(r_src)); if (ARM_DOUBLEREG(r_dest)) { @@ -1056,36 +1028,31 @@ LIR* ArmMir2Lir::OpFpRegCopy(int r_dest, int r_src) return res; } -LIR* ArmMir2Lir::OpThreadMem(OpKind op, int thread_offset) -{ +LIR* ArmMir2Lir::OpThreadMem(OpKind op, int thread_offset) { LOG(FATAL) << "Unexpected use of OpThreadMem for Arm"; return NULL; } -LIR* ArmMir2Lir::OpMem(OpKind op, int rBase, int disp) -{ +LIR* ArmMir2Lir::OpMem(OpKind op, int rBase, int disp) { LOG(FATAL) << "Unexpected use of OpMem for Arm"; return NULL; } LIR* ArmMir2Lir::StoreBaseIndexedDisp(int rBase, int r_index, int scale, int displacement, int r_src, int r_src_hi, OpSize size, - int s_reg) -{ + int s_reg) { LOG(FATAL) << "Unexpected use of StoreBaseIndexedDisp for Arm"; return NULL; } -LIR* ArmMir2Lir::OpRegMem(OpKind op, int r_dest, int rBase, int offset) -{ +LIR* ArmMir2Lir::OpRegMem(OpKind op, int r_dest, int rBase, int offset) { LOG(FATAL) << "Unexpected use of OpRegMem for Arm"; return NULL; } LIR* ArmMir2Lir::LoadBaseIndexedDisp(int rBase, int r_index, int scale, int displacement, int r_dest, int r_dest_hi, OpSize size, - int s_reg) -{ + int s_reg) { LOG(FATAL) << "Unexpected use of LoadBaseIndexedDisp for Arm"; return NULL; } diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc index 5c10c4ce2b..e728d2769b 100644 --- a/compiler/dex/quick/codegen_util.cc +++ b/compiler/dex/quick/codegen_util.cc @@ -23,8 +23,7 @@ namespace art { -bool Mir2Lir::IsInexpensiveConstant(RegLocation rl_src) -{ +bool Mir2Lir::IsInexpensiveConstant(RegLocation rl_src) { bool res = false; if (rl_src.is_const) { if (rl_src.wide) { @@ -44,27 +43,23 @@ bool Mir2Lir::IsInexpensiveConstant(RegLocation rl_src) return res; } -void Mir2Lir::MarkSafepointPC(LIR* inst) -{ +void Mir2Lir::MarkSafepointPC(LIR* inst) { inst->def_mask = ENCODE_ALL; LIR* safepoint_pc = NewLIR0(kPseudoSafepointPC); DCHECK_EQ(safepoint_pc->def_mask, ENCODE_ALL); } -bool Mir2Lir::FastInstance(uint32_t field_idx, int& field_offset, bool& is_volatile, bool is_put) -{ +bool Mir2Lir::FastInstance(uint32_t field_idx, int& field_offset, bool& is_volatile, bool is_put) { return cu_->compiler_driver->ComputeInstanceFieldInfo( field_idx, mir_graph_->GetCurrentDexCompilationUnit(), field_offset, is_volatile, is_put); } /* Convert an instruction to a NOP */ -void Mir2Lir::NopLIR( LIR* lir) -{ +void Mir2Lir::NopLIR( LIR* lir) { lir->flags.is_nop = true; } -void Mir2Lir::SetMemRefType(LIR* lir, bool is_load, int mem_type) -{ +void Mir2Lir::SetMemRefType(LIR* lir, bool is_load, int mem_type) { uint64_t *mask_ptr; uint64_t mask = ENCODE_MEM;; DCHECK(GetTargetInstFlags(lir->opcode) & (IS_LOAD | IS_STORE)); @@ -101,8 +96,7 @@ void Mir2Lir::SetMemRefType(LIR* lir, bool is_load, int mem_type) * Mark load/store instructions that access Dalvik registers through the stack. */ void Mir2Lir::AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load, - bool is64bit) -{ + bool is64bit) { SetMemRefType(lir, is_load, kDalvikReg); /* @@ -118,8 +112,7 @@ void Mir2Lir::AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load, #define DUMP_RESOURCE_MASK(X) /* Pretty-print a LIR instruction */ -void Mir2Lir::DumpLIRInsn(LIR* lir, unsigned char* base_addr) -{ +void Mir2Lir::DumpLIRInsn(LIR* lir, unsigned char* base_addr) { int offset = lir->offset; int dest = lir->operands[0]; const bool dump_nop = (cu_->enable_debug & (1 << kDebugShowNops)); @@ -204,8 +197,7 @@ void Mir2Lir::DumpLIRInsn(LIR* lir, unsigned char* base_addr) } } -void Mir2Lir::DumpPromotionMap() -{ +void Mir2Lir::DumpPromotionMap() { int num_regs = cu_->num_dalvik_registers + cu_->num_compiler_temps + 1; for (int i = 0; i < num_regs; i++) { PromotionMap v_reg_map = promotion_map_[i]; @@ -249,8 +241,7 @@ void Mir2Lir::DumpMappingTable(const char* table_name, const std::string& descri } /* Dump instructions and constant pool contents */ -void Mir2Lir::CodegenDump() -{ +void Mir2Lir::CodegenDump() { LOG(INFO) << "Dumping LIR insns for " << PrettyMethod(cu_->method_idx, *cu_->dex_file); LIR* lir_insn; @@ -291,8 +282,7 @@ void Mir2Lir::CodegenDump() * Search the existing constants in the literal pool for an exact or close match * within specified delta (greater or equal to 0). */ -LIR* Mir2Lir::ScanLiteralPool(LIR* data_target, int value, unsigned int delta) -{ +LIR* Mir2Lir::ScanLiteralPool(LIR* data_target, int value, unsigned int delta) { while (data_target) { if ((static_cast(value - data_target->operands[0])) <= delta) return data_target; @@ -302,8 +292,7 @@ LIR* Mir2Lir::ScanLiteralPool(LIR* data_target, int value, unsigned int delta) } /* Search the existing constants in the literal pool for an exact wide match */ -LIR* Mir2Lir::ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi) -{ +LIR* Mir2Lir::ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi) { bool lo_match = false; LIR* lo_target = NULL; while (data_target) { @@ -328,8 +317,7 @@ LIR* Mir2Lir::ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi) */ /* Add a 32-bit constant to the constant pool */ -LIR* Mir2Lir::AddWordData(LIR* *constant_list_p, int value) -{ +LIR* Mir2Lir::AddWordData(LIR* *constant_list_p, int value) { /* Add the constant to the literal pool */ if (constant_list_p) { LIR* new_value = static_cast(arena_->NewMem(sizeof(LIR), true, ArenaAllocator::kAllocData)); @@ -342,8 +330,7 @@ LIR* Mir2Lir::AddWordData(LIR* *constant_list_p, int value) } /* Add a 64-bit constant to the constant pool or mixed with code */ -LIR* Mir2Lir::AddWideData(LIR* *constant_list_p, int val_lo, int val_hi) -{ +LIR* Mir2Lir::AddWideData(LIR* *constant_list_p, int val_lo, int val_hi) { AddWordData(constant_list_p, val_hi); return AddWordData(constant_list_p, val_lo); } @@ -362,8 +349,7 @@ static void AlignBuffer(std::vector&buf, size_t offset) { } /* Write the literal pool to the output stream */ -void Mir2Lir::InstallLiteralPools() -{ +void Mir2Lir::InstallLiteralPools() { AlignBuffer(code_buffer_, data_offset_); LIR* data_lir = literal_list_; while (data_lir != NULL) { @@ -404,8 +390,7 @@ void Mir2Lir::InstallLiteralPools() } /* Write the switch tables to the output stream */ -void Mir2Lir::InstallSwitchTables() -{ +void Mir2Lir::InstallSwitchTables() { GrowableArray::Iterator iterator(&switch_tables_); while (true) { Mir2Lir::SwitchTable* tab_rec = iterator.Next(); @@ -462,8 +447,7 @@ void Mir2Lir::InstallSwitchTables() } /* Write the fill array dta to the output stream */ -void Mir2Lir::InstallFillArrayData() -{ +void Mir2Lir::InstallFillArrayData() { GrowableArray::Iterator iterator(&fill_array_data_); while (true) { Mir2Lir::FillArrayData *tab_rec = iterator.Next(); @@ -476,8 +460,7 @@ void Mir2Lir::InstallFillArrayData() } } -static int AssignLiteralOffsetCommon(LIR* lir, int offset) -{ +static int AssignLiteralOffsetCommon(LIR* lir, int offset) { for (;lir != NULL; lir = lir->next) { lir->offset = offset; offset += 4; @@ -486,8 +469,7 @@ static int AssignLiteralOffsetCommon(LIR* lir, int offset) } // Make sure we have a code address for every declared catch entry -bool Mir2Lir::VerifyCatchEntries() -{ +bool Mir2Lir::VerifyCatchEntries() { bool success = true; for (std::set::const_iterator it = mir_graph_->catches_.begin(); it != mir_graph_->catches_.end(); ++it) { @@ -521,8 +503,7 @@ bool Mir2Lir::VerifyCatchEntries() } -void Mir2Lir::CreateMappingTables() -{ +void Mir2Lir::CreateMappingTables() { for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) { if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) { pc2dex_mapping_table_.push_back(tgt_lir->offset); @@ -650,16 +631,14 @@ void Mir2Lir::CreateNativeGcMap() { } /* Determine the offset of each literal field */ -int Mir2Lir::AssignLiteralOffset(int offset) -{ +int Mir2Lir::AssignLiteralOffset(int offset) { offset = AssignLiteralOffsetCommon(literal_list_, offset); offset = AssignLiteralOffsetCommon(code_literal_list_, offset); offset = AssignLiteralOffsetCommon(method_literal_list_, offset); return offset; } -int Mir2Lir::AssignSwitchTablesOffset(int offset) -{ +int Mir2Lir::AssignSwitchTablesOffset(int offset) { GrowableArray::Iterator iterator(&switch_tables_); while (true) { Mir2Lir::SwitchTable *tab_rec = iterator.Next(); @@ -676,8 +655,7 @@ int Mir2Lir::AssignSwitchTablesOffset(int offset) return offset; } -int Mir2Lir::AssignFillArrayDataOffset(int offset) -{ +int Mir2Lir::AssignFillArrayDataOffset(int offset) { GrowableArray::Iterator iterator(&fill_array_data_); while (true) { Mir2Lir::FillArrayData *tab_rec = iterator.Next(); @@ -691,8 +669,7 @@ int Mir2Lir::AssignFillArrayDataOffset(int offset) } // LIR offset assignment. -int Mir2Lir::AssignInsnOffsets() -{ +int Mir2Lir::AssignInsnOffsets() { LIR* lir; int offset = 0; @@ -720,8 +697,7 @@ int Mir2Lir::AssignInsnOffsets() * Walk the compilation unit and assign offsets to instructions * and literals and compute the total size of the compiled unit. */ -void Mir2Lir::AssignOffsets() -{ +void Mir2Lir::AssignOffsets() { int offset = AssignInsnOffsets(); /* Const values have to be word aligned */ @@ -744,8 +720,7 @@ void Mir2Lir::AssignOffsets() * before sending them off to the assembler. If out-of-range branch distance is * seen rearrange the instructions a bit to correct it. */ -void Mir2Lir::AssembleLIR() -{ +void Mir2Lir::AssembleLIR() { AssignOffsets(); int assembler_retries = 0; /* @@ -791,8 +766,7 @@ void Mir2Lir::AssembleLIR() * all resource flags on this to prevent code motion across * target boundaries. KeyVal is just there for debugging. */ -LIR* Mir2Lir::InsertCaseLabel(int vaddr, int keyVal) -{ +LIR* Mir2Lir::InsertCaseLabel(int vaddr, int keyVal) { SafeMap::iterator it; it = boundary_map_.find(vaddr); if (it == boundary_map_.end()) { @@ -806,8 +780,7 @@ LIR* Mir2Lir::InsertCaseLabel(int vaddr, int keyVal) return new_label; } -void Mir2Lir::MarkPackedCaseLabels(Mir2Lir::SwitchTable *tab_rec) -{ +void Mir2Lir::MarkPackedCaseLabels(Mir2Lir::SwitchTable *tab_rec) { const uint16_t* table = tab_rec->table; int base_vaddr = tab_rec->vaddr; const int *targets = reinterpret_cast(&table[4]); @@ -818,8 +791,7 @@ void Mir2Lir::MarkPackedCaseLabels(Mir2Lir::SwitchTable *tab_rec) } } -void Mir2Lir::MarkSparseCaseLabels(Mir2Lir::SwitchTable *tab_rec) -{ +void Mir2Lir::MarkSparseCaseLabels(Mir2Lir::SwitchTable *tab_rec) { const uint16_t* table = tab_rec->table; int base_vaddr = tab_rec->vaddr; int entries = table[1]; @@ -830,8 +802,7 @@ void Mir2Lir::MarkSparseCaseLabels(Mir2Lir::SwitchTable *tab_rec) } } -void Mir2Lir::ProcessSwitchTables() -{ +void Mir2Lir::ProcessSwitchTables() { GrowableArray::Iterator iterator(&switch_tables_); while (true) { Mir2Lir::SwitchTable *tab_rec = iterator.Next(); @@ -846,7 +817,7 @@ void Mir2Lir::ProcessSwitchTables() } } -void Mir2Lir::DumpSparseSwitchTable(const uint16_t* table) +void Mir2Lir::DumpSparseSwitchTable(const uint16_t* table) { /* * Sparse switch data format: * ushort ident = 0x0200 magic value @@ -856,7 +827,6 @@ void Mir2Lir::DumpSparseSwitchTable(const uint16_t* table) * * Total size is (2+size*4) 16-bit code units. */ -{ uint16_t ident = table[0]; int entries = table[1]; const int* keys = reinterpret_cast(&table[2]); @@ -868,7 +838,7 @@ void Mir2Lir::DumpSparseSwitchTable(const uint16_t* table) } } -void Mir2Lir::DumpPackedSwitchTable(const uint16_t* table) +void Mir2Lir::DumpPackedSwitchTable(const uint16_t* table) { /* * Packed switch data format: * ushort ident = 0x0100 magic value @@ -878,7 +848,6 @@ void Mir2Lir::DumpPackedSwitchTable(const uint16_t* table) * * Total size is (4+size*2) 16-bit code units. */ -{ uint16_t ident = table[0]; const int* targets = reinterpret_cast(&table[4]); int entries = table[1]; @@ -897,8 +866,7 @@ void Mir2Lir::DumpPackedSwitchTable(const uint16_t* table) * which we split a single Dalvik instruction, only the first MIR op * associated with a Dalvik PC should be entered into the map. */ -LIR* Mir2Lir::MarkBoundary(int offset, const char* inst_str) -{ +LIR* Mir2Lir::MarkBoundary(int offset, const char* inst_str) { LIR* res = NewLIR1(kPseudoDalvikByteCodeBoundary, reinterpret_cast(inst_str)); if (boundary_map_.find(offset) == boundary_map_.end()) { boundary_map_.Put(offset, res); @@ -906,8 +874,7 @@ LIR* Mir2Lir::MarkBoundary(int offset, const char* inst_str) return res; } -bool Mir2Lir::EvaluateBranch(Instruction::Code opcode, int32_t src1, int32_t src2) -{ +bool Mir2Lir::EvaluateBranch(Instruction::Code opcode, int32_t src1, int32_t src2) { bool is_taken; switch (opcode) { case Instruction::IF_EQ: is_taken = (src1 == src2); break; @@ -971,8 +938,7 @@ Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena core_spill_mask_(0), fp_spill_mask_(0), first_lir_insn_(NULL), - last_lir_insn_(NULL) - { + last_lir_insn_(NULL) { promotion_map_ = static_cast (arena_->NewMem((cu_->num_dalvik_registers + cu_->num_compiler_temps + 1) * sizeof(promotion_map_[0]), true, ArenaAllocator::kAllocRegAlloc)); @@ -1060,8 +1026,7 @@ int Mir2Lir::ComputeFrameSize() { * Append an LIR instruction to the LIR list maintained by a compilation * unit */ -void Mir2Lir::AppendLIR(LIR* lir) -{ +void Mir2Lir::AppendLIR(LIR* lir) { if (first_lir_insn_ == NULL) { DCHECK(last_lir_insn_ == NULL); last_lir_insn_ = first_lir_insn_ = lir; @@ -1080,8 +1045,7 @@ void Mir2Lir::AppendLIR(LIR* lir) * * prev_lir <-> new_lir <-> current_lir */ -void Mir2Lir::InsertLIRBefore(LIR* current_lir, LIR* new_lir) -{ +void Mir2Lir::InsertLIRBefore(LIR* current_lir, LIR* new_lir) { DCHECK(current_lir->prev != NULL); LIR *prev_lir = current_lir->prev; @@ -1097,8 +1061,7 @@ void Mir2Lir::InsertLIRBefore(LIR* current_lir, LIR* new_lir) * * current_lir -> new_lir -> old_next */ -void Mir2Lir::InsertLIRAfter(LIR* current_lir, LIR* new_lir) -{ +void Mir2Lir::InsertLIRAfter(LIR* current_lir, LIR* new_lir) { new_lir->prev = current_lir; new_lir->next = current_lir->next; current_lir->next = new_lir; diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc index 865b9c5c4d..a34d2a9e76 100644 --- a/compiler/dex/quick/gen_common.cc +++ b/compiler/dex/quick/gen_common.cc @@ -33,8 +33,7 @@ namespace art { * Generate an kPseudoBarrier marker to indicate the boundary of special * blocks. */ -void Mir2Lir::GenBarrier() -{ +void Mir2Lir::GenBarrier() { LIR* barrier = NewLIR0(kPseudoBarrier); /* Mark all resources as being clobbered */ barrier->def_mask = -1; @@ -42,8 +41,7 @@ void Mir2Lir::GenBarrier() // FIXME: need to do some work to split out targets with // condition codes and those without -LIR* Mir2Lir::GenCheck(ConditionCode c_code, ThrowKind kind) -{ +LIR* Mir2Lir::GenCheck(ConditionCode c_code, ThrowKind kind) { DCHECK_NE(cu_->instruction_set, kMips); LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_); LIR* branch = OpCondBranch(c_code, tgt); @@ -52,8 +50,7 @@ LIR* Mir2Lir::GenCheck(ConditionCode c_code, ThrowKind kind) return branch; } -LIR* Mir2Lir::GenImmedCheck(ConditionCode c_code, int reg, int imm_val, ThrowKind kind) -{ +LIR* Mir2Lir::GenImmedCheck(ConditionCode c_code, int reg, int imm_val, ThrowKind kind) { LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg, imm_val); LIR* branch; if (c_code == kCondAl) { @@ -67,8 +64,7 @@ LIR* Mir2Lir::GenImmedCheck(ConditionCode c_code, int reg, int imm_val, ThrowKin } /* Perform null-check on a register. */ -LIR* Mir2Lir::GenNullCheck(int s_reg, int m_reg, int opt_flags) -{ +LIR* Mir2Lir::GenNullCheck(int s_reg, int m_reg, int opt_flags) { if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && opt_flags & MIR_IGNORE_NULL_CHECK) { return NULL; @@ -78,8 +74,7 @@ LIR* Mir2Lir::GenNullCheck(int s_reg, int m_reg, int opt_flags) /* Perform check on two registers */ LIR* Mir2Lir::GenRegRegCheck(ConditionCode c_code, int reg1, int reg2, - ThrowKind kind) -{ + ThrowKind kind) { LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg1, reg2); LIR* branch = OpCmpBranch(c_code, reg1, reg2, tgt); // Remember branch target - will process later @@ -89,8 +84,7 @@ LIR* Mir2Lir::GenRegRegCheck(ConditionCode c_code, int reg1, int reg2, void Mir2Lir::GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1, RegLocation rl_src2, LIR* taken, - LIR* fall_through) -{ + LIR* fall_through) { ConditionCode cond; switch (opcode) { case Instruction::IF_EQ: @@ -143,8 +137,7 @@ void Mir2Lir::GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1, } void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken, - LIR* fall_through) -{ + LIR* fall_through) { ConditionCode cond; rl_src = LoadValue(rl_src, kCoreReg); switch (opcode) { @@ -174,8 +167,7 @@ void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_s OpUnconditionalBranch(fall_through); } -void Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) -{ +void Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) { RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); if (rl_src.location == kLocPhysReg) { OpRegCopy(rl_result.low_reg, rl_src.low_reg); @@ -187,8 +179,7 @@ void Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) } void Mir2Lir::GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest, - RegLocation rl_src) -{ + RegLocation rl_src) { rl_src = LoadValue(rl_src, kCoreReg); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); OpKind op = kOpInvalid; @@ -215,8 +206,7 @@ void Mir2Lir::GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest, * Note: AllocFromCode will handle checks for errNegativeArraySize. */ void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest, - RegLocation rl_src) -{ + RegLocation rl_src) { FlushAllRegs(); /* Everything to home location */ int func_offset; if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file, @@ -236,8 +226,7 @@ void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest, * code throws runtime exception "bad Filled array req" for 'D' and 'J'. * Current code also throws internal unimp if not 'L', '[' or 'I'. */ -void Mir2Lir::GenFilledNewArray(CallInfo* info) -{ +void Mir2Lir::GenFilledNewArray(CallInfo* info) { int elems = info->num_arg_words; int type_idx = info->index; FlushAllRegs(); /* Everything to home location */ @@ -342,8 +331,7 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) } void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_double, - bool is_object) -{ + bool is_object) { int field_offset; int ssb_index; bool is_volatile; @@ -428,8 +416,7 @@ void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_do } void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest, - bool is_long_or_double, bool is_object) -{ + bool is_long_or_double, bool is_object) { int field_offset; int ssb_index; bool is_volatile; @@ -510,8 +497,7 @@ void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest, } } -void Mir2Lir::HandleSuspendLaunchPads() -{ +void Mir2Lir::HandleSuspendLaunchPads() { int num_elems = suspend_launchpads_.Size(); int helper_offset = ENTRYPOINT_OFFSET(pTestSuspendFromCode); for (int i = 0; i < num_elems; i++) { @@ -527,8 +513,7 @@ void Mir2Lir::HandleSuspendLaunchPads() } } -void Mir2Lir::HandleIntrinsicLaunchPads() -{ +void Mir2Lir::HandleIntrinsicLaunchPads() { int num_elems = intrinsic_launchpads_.Size(); for (int i = 0; i < num_elems; i++) { ResetRegPool(); @@ -546,8 +531,7 @@ void Mir2Lir::HandleIntrinsicLaunchPads() } } -void Mir2Lir::HandleThrowLaunchPads() -{ +void Mir2Lir::HandleThrowLaunchPads() { int num_elems = throw_launchpads_.Size(); for (int i = 0; i < num_elems; i++) { ResetRegPool(); @@ -636,8 +620,7 @@ void Mir2Lir::HandleThrowLaunchPads() void Mir2Lir::GenIGet(uint32_t field_idx, int opt_flags, OpSize size, RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double, - bool is_object) -{ + bool is_object) { int field_offset; bool is_volatile; @@ -697,8 +680,7 @@ void Mir2Lir::GenIGet(uint32_t field_idx, int opt_flags, OpSize size, void Mir2Lir::GenIPut(uint32_t field_idx, int opt_flags, OpSize size, RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double, - bool is_object) -{ + bool is_object) { int field_offset; bool is_volatile; @@ -744,8 +726,7 @@ void Mir2Lir::GenIPut(uint32_t field_idx, int opt_flags, OpSize size, } } -void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) -{ +void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) { RegLocation rl_method = LoadCurrMethod(); int res_reg = AllocTemp(); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); @@ -803,8 +784,7 @@ void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) } } -void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) -{ +void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) { /* NOTE: Most strings should be available at compile time */ int32_t offset_of_string = mirror::Array::DataOffset(sizeof(mirror::String*)).Int32Value() + (sizeof(mirror::String*) * string_idx); @@ -860,8 +840,7 @@ void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) * Let helper function take care of everything. Will * call Class::NewInstanceFromCode(type_idx, method); */ -void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) -{ +void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) { FlushAllRegs(); /* Everything to home location */ // alloc will always check for resolution, do we also need to verify // access because the verifier was unable to? @@ -877,8 +856,7 @@ void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) StoreValue(rl_dest, rl_result); } -void Mir2Lir::GenThrow(RegLocation rl_src) -{ +void Mir2Lir::GenThrow(RegLocation rl_src) { FlushAllRegs(); CallRuntimeHelperRegLocation(ENTRYPOINT_OFFSET(pDeliverException), rl_src, true); } @@ -1065,8 +1043,7 @@ void Mir2Lir::GenInstanceof(uint32_t type_idx, RegLocation rl_dest, RegLocation } } -void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_src) -{ +void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_src) { bool type_known_final, type_known_abstract, use_declaring_class; bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file, @@ -1142,8 +1119,7 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_ } void Mir2Lir::GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest, - RegLocation rl_src1, RegLocation rl_src2) -{ + RegLocation rl_src1, RegLocation rl_src2) { RegLocation rl_result; if (cu_->instruction_set == kThumb2) { /* @@ -1161,7 +1137,7 @@ void Mir2Lir::GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_des rl_src2 = LoadValueWide(rl_src2, kCoreReg); rl_result = EvalLoc(rl_dest, kCoreReg, true); // The longs may overlap - use intermediate temp if so - if ((rl_result.low_reg == rl_src1.high_reg) || (rl_result.low_reg == rl_src2.high_reg)){ + if ((rl_result.low_reg == rl_src1.high_reg) || (rl_result.low_reg == rl_src2.high_reg)) { int t_reg = AllocTemp(); OpRegRegReg(first_op, t_reg, rl_src1.low_reg, rl_src2.low_reg); OpRegRegReg(second_op, rl_result.high_reg, rl_src1.high_reg, rl_src2.high_reg); @@ -1190,8 +1166,7 @@ void Mir2Lir::GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_des void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, - RegLocation rl_src1, RegLocation rl_shift) -{ + RegLocation rl_src1, RegLocation rl_shift) { int func_offset = -1; // Make gcc happy switch (opcode) { @@ -1218,8 +1193,7 @@ void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, - RegLocation rl_src1, RegLocation rl_src2) -{ + RegLocation rl_src1, RegLocation rl_src2) { OpKind op = kOpBkpt; bool is_div_rem = false; bool check_zero = false; @@ -1353,14 +1327,12 @@ void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, * or produce corresponding Thumb instructions directly. */ -static bool IsPowerOfTwo(int x) -{ +static bool IsPowerOfTwo(int x) { return (x & (x - 1)) == 0; } // Returns true if no more than two bits are set in 'x'. -static bool IsPopCountLE2(unsigned int x) -{ +static bool IsPopCountLE2(unsigned int x) { x &= x - 1; return (x & (x - 1)) == 0; } @@ -1382,8 +1354,7 @@ static int LowestSetBit(unsigned int x) { // Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit' // and store the result in 'rl_dest'. bool Mir2Lir::HandleEasyDivide(Instruction::Code dalvik_opcode, - RegLocation rl_src, RegLocation rl_dest, int lit) -{ + RegLocation rl_src, RegLocation rl_dest, int lit) { if ((lit < 2) || ((cu_->instruction_set != kThumb2) && !IsPowerOfTwo(lit))) { return false; } @@ -1435,8 +1406,7 @@ bool Mir2Lir::HandleEasyDivide(Instruction::Code dalvik_opcode, // Returns true if it added instructions to 'cu' to multiply 'rl_src' by 'lit' // and store the result in 'rl_dest'. -bool Mir2Lir::HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) -{ +bool Mir2Lir::HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) { // Can we simplify this multiplication? bool power_of_two = false; bool pop_count_le2 = false; @@ -1476,8 +1446,7 @@ bool Mir2Lir::HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int li } void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src, - int lit) -{ + int lit) { RegLocation rl_result; OpKind op = static_cast(0); /* Make gcc happy */ int shift_op = false; @@ -1613,8 +1582,7 @@ void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, Re } void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, - RegLocation rl_src1, RegLocation rl_src2) -{ + RegLocation rl_src1, RegLocation rl_src2) { RegLocation rl_result; OpKind first_op = kOpBkpt; OpKind second_op = kOpBkpt; @@ -1741,8 +1709,7 @@ void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, } void Mir2Lir::GenConversionCall(int func_offset, - RegLocation rl_dest, RegLocation rl_src) -{ + RegLocation rl_dest, RegLocation rl_src) { /* * Don't optimize the register usage since it calls out to support * functions @@ -1767,8 +1734,7 @@ void Mir2Lir::GenConversionCall(int func_offset, } /* Check if we need to check for pending suspend request */ -void Mir2Lir::GenSuspendTest(int opt_flags) -{ +void Mir2Lir::GenSuspendTest(int opt_flags) { if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { return; } @@ -1782,8 +1748,7 @@ void Mir2Lir::GenSuspendTest(int opt_flags) } /* Check if we need to check for pending suspend request */ -void Mir2Lir::GenSuspendTestAndBranch(int opt_flags, LIR* target) -{ +void Mir2Lir::GenSuspendTestAndBranch(int opt_flags, LIR* target) { if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { OpUnconditionalBranch(target); return; diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc index e3993e0617..14e395cdac 100644 --- a/compiler/dex/quick/gen_invoke.cc +++ b/compiler/dex/quick/gen_invoke.cc @@ -37,14 +37,12 @@ namespace art { * has a memory call operation, part 1 is a NOP for x86. For other targets, * load arguments between the two parts. */ -int Mir2Lir::CallHelperSetup(int helper_offset) -{ +int Mir2Lir::CallHelperSetup(int helper_offset) { return (cu_->instruction_set == kX86) ? 0 : LoadHelper(helper_offset); } /* NOTE: if r_tgt is a temp, it will be freed following use */ -LIR* Mir2Lir::CallHelper(int r_tgt, int helper_offset, bool safepoint_pc) -{ +LIR* Mir2Lir::CallHelper(int r_tgt, int helper_offset, bool safepoint_pc) { LIR* call_inst; if (cu_->instruction_set == kX86) { call_inst = OpThreadMem(kOpBlx, helper_offset); @@ -233,8 +231,7 @@ void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation(int helper_offset, * ArgLocs is an array of location records describing the incoming arguments * with one location record per word of argument. */ -void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) -{ +void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) { /* * Dummy up a RegLocation for the incoming Method* * It will attempt to keep kArg0 live (or copy it to home location @@ -316,8 +313,7 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info, int state, const MethodReference& target_method, uint32_t unused, uintptr_t direct_code, uintptr_t direct_method, - InvokeType type) -{ + InvokeType type) { Mir2Lir* cg = static_cast(cu->cg.get()); if (cu->instruction_set != kThumb2) { // Disable sharpening @@ -420,8 +416,7 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info, static int NextVCallInsn(CompilationUnit* cu, CallInfo* info, int state, const MethodReference& target_method, uint32_t method_idx, uintptr_t unused, uintptr_t unused2, - InvokeType unused3) -{ + InvokeType unused3) { Mir2Lir* cg = static_cast(cu->cg.get()); /* * This is the fast path in which the target virtual method is @@ -469,8 +464,7 @@ static int NextVCallInsn(CompilationUnit* cu, CallInfo* info, static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state, const MethodReference& target_method, uint32_t unused, uintptr_t unused2, - uintptr_t direct_method, InvokeType unused4) -{ + uintptr_t direct_method, InvokeType unused4) { Mir2Lir* cg = static_cast(cu->cg.get()); if (cu->instruction_set != kThumb2) { // Disable sharpening @@ -536,8 +530,7 @@ static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state, static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info, int trampoline, int state, const MethodReference& target_method, - uint32_t method_idx) -{ + uint32_t method_idx) { Mir2Lir* cg = static_cast(cu->cg.get()); /* * This handles the case in which the base method is not fully @@ -561,8 +554,7 @@ static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info, const MethodReference& target_method, uint32_t method_idx, uintptr_t unused, uintptr_t unused2, - InvokeType unused3) -{ + InvokeType unused3) { int trampoline = ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck); return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0); } @@ -570,8 +562,7 @@ static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info, static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state, const MethodReference& target_method, uint32_t method_idx, uintptr_t unused, - uintptr_t unused2, InvokeType unused3) -{ + uintptr_t unused2, InvokeType unused3) { int trampoline = ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck); return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0); } @@ -579,8 +570,7 @@ static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state, static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state, const MethodReference& target_method, uint32_t method_idx, uintptr_t unused, - uintptr_t unused2, InvokeType unused3) -{ + uintptr_t unused2, InvokeType unused3) { int trampoline = ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck); return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0); } @@ -588,8 +578,7 @@ static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state, static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state, const MethodReference& target_method, uint32_t method_idx, uintptr_t unused, - uintptr_t unused2, InvokeType unused3) -{ + uintptr_t unused2, InvokeType unused3) { int trampoline = ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck); return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0); } @@ -599,8 +588,7 @@ static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu, const MethodReference& target_method, uint32_t unused, uintptr_t unused2, uintptr_t unused3, - InvokeType unused4) -{ + InvokeType unused4) { int trampoline = ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck); return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0); } @@ -609,8 +597,7 @@ int Mir2Lir::LoadArgRegs(CallInfo* info, int call_state, NextCallInsn next_call_insn, const MethodReference& target_method, uint32_t vtable_idx, uintptr_t direct_code, - uintptr_t direct_method, InvokeType type, bool skip_this) -{ + uintptr_t direct_method, InvokeType type, bool skip_this) { int last_arg_reg = TargetReg(kArg3); int next_reg = TargetReg(kArg1); int next_arg = 0; @@ -649,8 +636,7 @@ int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info, int call_state, LIR** pcrLabel, NextCallInsn next_call_insn, const MethodReference& target_method, uint32_t vtable_idx, uintptr_t direct_code, - uintptr_t direct_method, InvokeType type, bool skip_this) -{ + uintptr_t direct_method, InvokeType type, bool skip_this) { RegLocation rl_arg; /* If no arguments, just return */ @@ -749,8 +735,7 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, LIR** pcrLabel, NextCallInsn next_call_insn, const MethodReference& target_method, uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method, - InvokeType type, bool skip_this) -{ + InvokeType type, bool skip_this) { // If we can treat it as non-range (Jumbo ops will use range form) if (info->num_arg_words <= 5) @@ -833,8 +818,7 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, return call_state; } -RegLocation Mir2Lir::InlineTarget(CallInfo* info) -{ +RegLocation Mir2Lir::InlineTarget(CallInfo* info) { RegLocation res; if (info->result.location == kLocInvalid) { res = GetReturn(false); @@ -844,8 +828,7 @@ RegLocation Mir2Lir::InlineTarget(CallInfo* info) return res; } -RegLocation Mir2Lir::InlineTargetWide(CallInfo* info) -{ +RegLocation Mir2Lir::InlineTargetWide(CallInfo* info) { RegLocation res; if (info->result.location == kLocInvalid) { res = GetReturnWide(false); @@ -855,8 +838,7 @@ RegLocation Mir2Lir::InlineTargetWide(CallInfo* info) return res; } -bool Mir2Lir::GenInlinedCharAt(CallInfo* info) -{ +bool Mir2Lir::GenInlinedCharAt(CallInfo* info) { if (cu_->instruction_set == kMips) { // TODO - add Mips implementation return false; @@ -932,8 +914,7 @@ bool Mir2Lir::GenInlinedCharAt(CallInfo* info) } // Generates an inlined String.is_empty or String.length. -bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty) -{ +bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty) { if (cu_->instruction_set == kMips) { // TODO - add Mips implementation return false; @@ -961,8 +942,7 @@ bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty) return true; } -bool Mir2Lir::GenInlinedAbsInt(CallInfo* info) -{ +bool Mir2Lir::GenInlinedAbsInt(CallInfo* info) { if (cu_->instruction_set == kMips) { // TODO - add Mips implementation return false; @@ -980,8 +960,7 @@ bool Mir2Lir::GenInlinedAbsInt(CallInfo* info) return true; } -bool Mir2Lir::GenInlinedAbsLong(CallInfo* info) -{ +bool Mir2Lir::GenInlinedAbsLong(CallInfo* info) { if (cu_->instruction_set == kMips) { // TODO - add Mips implementation return false; @@ -1022,8 +1001,7 @@ bool Mir2Lir::GenInlinedAbsLong(CallInfo* info) } } -bool Mir2Lir::GenInlinedFloatCvt(CallInfo* info) -{ +bool Mir2Lir::GenInlinedFloatCvt(CallInfo* info) { if (cu_->instruction_set == kMips) { // TODO - add Mips implementation return false; @@ -1034,8 +1012,7 @@ bool Mir2Lir::GenInlinedFloatCvt(CallInfo* info) return true; } -bool Mir2Lir::GenInlinedDoubleCvt(CallInfo* info) -{ +bool Mir2Lir::GenInlinedDoubleCvt(CallInfo* info) { if (cu_->instruction_set == kMips) { // TODO - add Mips implementation return false; @@ -1050,8 +1027,7 @@ bool Mir2Lir::GenInlinedDoubleCvt(CallInfo* info) * Fast string.index_of(I) & (II). Tests for simple case of char <= 0xffff, * otherwise bails to standard library code. */ -bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) -{ +bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { if (cu_->instruction_set == kMips) { // TODO - add Mips implementation return false; @@ -1094,8 +1070,7 @@ bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) } /* Fast string.compareTo(Ljava/lang/string;)I. */ -bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) -{ +bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) { if (cu_->instruction_set == kMips) { // TODO - add Mips implementation return false; @@ -1211,8 +1186,7 @@ bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long, return true; } -bool Mir2Lir::GenIntrinsic(CallInfo* info) -{ +bool Mir2Lir::GenIntrinsic(CallInfo* info) { if (info->opt_flags & MIR_INLINED) { return false; } @@ -1358,8 +1332,7 @@ bool Mir2Lir::GenIntrinsic(CallInfo* info) return false; } -void Mir2Lir::GenInvoke(CallInfo* info) -{ +void Mir2Lir::GenInvoke(CallInfo* info) { if (GenIntrinsic(info)) { return; } diff --git a/compiler/dex/quick/gen_loadstore.cc b/compiler/dex/quick/gen_loadstore.cc index 6a25c1db45..353910606e 100644 --- a/compiler/dex/quick/gen_loadstore.cc +++ b/compiler/dex/quick/gen_loadstore.cc @@ -27,8 +27,7 @@ namespace art { * Load an immediate value into a fixed or temp register. Target * register is clobbered, and marked in_use. */ -LIR* Mir2Lir::LoadConstant(int r_dest, int value) -{ +LIR* Mir2Lir::LoadConstant(int r_dest, int value) { if (IsTemp(r_dest)) { Clobber(r_dest); MarkInUse(r_dest); @@ -41,8 +40,7 @@ LIR* Mir2Lir::LoadConstant(int r_dest, int value) * promoted floating point register, also copy a zero into the int/ref identity of * that sreg. */ -void Mir2Lir::Workaround7250540(RegLocation rl_dest, int zero_reg) -{ +void Mir2Lir::Workaround7250540(RegLocation rl_dest, int zero_reg) { if (rl_dest.fp) { int pmap_index = SRegToPMap(rl_dest.s_reg_low); if (promotion_map_[pmap_index].fp_location == kLocPhysReg) { @@ -77,14 +75,12 @@ void Mir2Lir::Workaround7250540(RegLocation rl_dest, int zero_reg) } /* Load a word at base + displacement. Displacement must be word multiple */ -LIR* Mir2Lir::LoadWordDisp(int rBase, int displacement, int r_dest) -{ +LIR* Mir2Lir::LoadWordDisp(int rBase, int displacement, int r_dest) { return LoadBaseDisp(rBase, displacement, r_dest, kWord, INVALID_SREG); } -LIR* Mir2Lir::StoreWordDisp(int rBase, int displacement, int r_src) -{ +LIR* Mir2Lir::StoreWordDisp(int rBase, int displacement, int r_src) { return StoreBaseDisp(rBase, displacement, r_src, kWord); } @@ -93,8 +89,7 @@ LIR* Mir2Lir::StoreWordDisp(int rBase, int displacement, int r_src) * using this routine, as it doesn't perform any bookkeeping regarding * register liveness. That is the responsibility of the caller. */ -void Mir2Lir::LoadValueDirect(RegLocation rl_src, int r_dest) -{ +void Mir2Lir::LoadValueDirect(RegLocation rl_src, int r_dest) { rl_src = UpdateLoc(rl_src); if (rl_src.location == kLocPhysReg) { OpRegCopy(r_dest, rl_src.low_reg); @@ -112,8 +107,7 @@ void Mir2Lir::LoadValueDirect(RegLocation rl_src, int r_dest) * register. Should be used when loading to a fixed register (for example, * loading arguments to an out of line call. */ -void Mir2Lir::LoadValueDirectFixed(RegLocation rl_src, int r_dest) -{ +void Mir2Lir::LoadValueDirectFixed(RegLocation rl_src, int r_dest) { Clobber(r_dest); MarkInUse(r_dest); LoadValueDirect(rl_src, r_dest); @@ -125,8 +119,7 @@ void Mir2Lir::LoadValueDirectFixed(RegLocation rl_src, int r_dest) * register liveness. That is the responsibility of the caller. */ void Mir2Lir::LoadValueDirectWide(RegLocation rl_src, int reg_lo, - int reg_hi) -{ + int reg_hi) { rl_src = UpdateLocWide(rl_src); if (rl_src.location == kLocPhysReg) { OpRegCopyWide(reg_lo, reg_hi, rl_src.low_reg, rl_src.high_reg); @@ -146,8 +139,7 @@ void Mir2Lir::LoadValueDirectWide(RegLocation rl_src, int reg_lo, * loading arguments to an out of line call. */ void Mir2Lir::LoadValueDirectWideFixed(RegLocation rl_src, int reg_lo, - int reg_hi) -{ + int reg_hi) { Clobber(reg_lo); Clobber(reg_hi); MarkInUse(reg_lo); @@ -155,8 +147,7 @@ void Mir2Lir::LoadValueDirectWideFixed(RegLocation rl_src, int reg_lo, LoadValueDirectWide(rl_src, reg_lo, reg_hi); } -RegLocation Mir2Lir::LoadValue(RegLocation rl_src, RegisterClass op_kind) -{ +RegLocation Mir2Lir::LoadValue(RegLocation rl_src, RegisterClass op_kind) { rl_src = EvalLoc(rl_src, op_kind, false); if (IsInexpensiveConstant(rl_src) || rl_src.location != kLocPhysReg) { LoadValueDirect(rl_src, rl_src.low_reg); @@ -166,8 +157,7 @@ RegLocation Mir2Lir::LoadValue(RegLocation rl_src, RegisterClass op_kind) return rl_src; } -void Mir2Lir::StoreValue(RegLocation rl_dest, RegLocation rl_src) -{ +void Mir2Lir::StoreValue(RegLocation rl_dest, RegLocation rl_src) { /* * Sanity checking - should never try to store to the same * ssa name during the compilation of a single instruction @@ -222,8 +212,7 @@ void Mir2Lir::StoreValue(RegLocation rl_dest, RegLocation rl_src) } } -RegLocation Mir2Lir::LoadValueWide(RegLocation rl_src, RegisterClass op_kind) -{ +RegLocation Mir2Lir::LoadValueWide(RegLocation rl_src, RegisterClass op_kind) { DCHECK(rl_src.wide); rl_src = EvalLoc(rl_src, op_kind, false); if (IsInexpensiveConstant(rl_src) || rl_src.location != kLocPhysReg) { @@ -235,8 +224,7 @@ RegLocation Mir2Lir::LoadValueWide(RegLocation rl_src, RegisterClass op_kind) return rl_src; } -void Mir2Lir::StoreValueWide(RegLocation rl_dest, RegLocation rl_src) -{ +void Mir2Lir::StoreValueWide(RegLocation rl_dest, RegLocation rl_src) { /* * Sanity checking - should never try to store to the same * ssa name during the compilation of a single instruction @@ -299,13 +287,11 @@ void Mir2Lir::StoreValueWide(RegLocation rl_dest, RegLocation rl_src) } /* Utilities to load the current Method* */ -void Mir2Lir::LoadCurrMethodDirect(int r_tgt) -{ +void Mir2Lir::LoadCurrMethodDirect(int r_tgt) { LoadValueDirectFixed(mir_graph_->GetMethodLoc(), r_tgt); } -RegLocation Mir2Lir::LoadCurrMethod() -{ +RegLocation Mir2Lir::LoadCurrMethod() { return LoadValue(mir_graph_->GetMethodLoc(), kCoreReg); } diff --git a/compiler/dex/quick/local_optimizations.cc b/compiler/dex/quick/local_optimizations.cc index ac654d8f21..eb27bf8b5d 100644 --- a/compiler/dex/quick/local_optimizations.cc +++ b/compiler/dex/quick/local_optimizations.cc @@ -29,8 +29,7 @@ namespace art { #define LDLD_DISTANCE 4 #define LD_LATENCY 2 -static bool IsDalvikRegisterClobbered(LIR* lir1, LIR* lir2) -{ +static bool IsDalvikRegisterClobbered(LIR* lir1, LIR* lir2) { int reg1Lo = DECODE_ALIAS_INFO_REG(lir1->alias_info); int reg1Hi = reg1Lo + DECODE_ALIAS_INFO_WIDE(lir1->alias_info); int reg2Lo = DECODE_ALIAS_INFO_REG(lir2->alias_info); @@ -40,8 +39,7 @@ static bool IsDalvikRegisterClobbered(LIR* lir1, LIR* lir2) } /* Convert a more expensive instruction (ie load) into a move */ -void Mir2Lir::ConvertMemOpIntoMove(LIR* orig_lir, int dest, int src) -{ +void Mir2Lir::ConvertMemOpIntoMove(LIR* orig_lir, int dest, int src) { /* Insert a move to replace the load */ LIR* move_lir; move_lir = OpRegCopyNoInsert(dest, src); @@ -72,8 +70,7 @@ void Mir2Lir::ConvertMemOpIntoMove(LIR* orig_lir, int dest, int src) * 1) They are must-aliases * 2) The memory location is not written to in between */ -void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) -{ +void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) { LIR* this_lir; if (head_lir == tail_lir) return; @@ -268,8 +265,7 @@ void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) * Perform a pass of bottom-up walk, from the second instruction in the * superblock, to try to hoist loads to earlier slots. */ -void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir) -{ +void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir) { LIR* this_lir, *check_lir; /* * Store the list of independent instructions that can be hoisted past. @@ -447,8 +443,7 @@ void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir) } } -void Mir2Lir::ApplyLocalOptimizations(LIR* head_lir, LIR* tail_lir) -{ +void Mir2Lir::ApplyLocalOptimizations(LIR* head_lir, LIR* tail_lir) { if (!(cu_->disable_opt & (1 << kLoadStoreElimination))) { ApplyLoadStoreElimination(head_lir, tail_lir); } @@ -462,8 +457,7 @@ void Mir2Lir::ApplyLocalOptimizations(LIR* head_lir, LIR* tail_lir) * Note: new redundant branches may be inserted later, and we'll * use a check in final instruction assembly to nop those out. */ -void Mir2Lir::RemoveRedundantBranches() -{ +void Mir2Lir::RemoveRedundantBranches() { LIR* this_lir; for (this_lir = first_lir_insn_; this_lir != last_lir_insn_; this_lir = NEXT_LIR(this_lir)) { diff --git a/compiler/dex/quick/mips/assemble_mips.cc b/compiler/dex/quick/mips/assemble_mips.cc index 2482aa4fbb..dcfb13f549 100644 --- a/compiler/dex/quick/mips/assemble_mips.cc +++ b/compiler/dex/quick/mips/assemble_mips.cc @@ -457,8 +457,7 @@ const MipsEncodingMap MipsMir2Lir::EncodingMap[kMipsLast] = { * NOTE: An out-of-range bal isn't supported because it should * never happen with the current PIC model. */ -void MipsMir2Lir::ConvertShortToLongBranch(LIR* lir) -{ +void MipsMir2Lir::ConvertShortToLongBranch(LIR* lir) { // For conditional branches we'll need to reverse the sense bool unconditional = false; int opcode = lir->opcode; @@ -513,8 +512,7 @@ void MipsMir2Lir::ConvertShortToLongBranch(LIR* lir) * instruction. In those cases we will try to substitute a new code * sequence or request that the trace be shortened and retried. */ -AssemblerStatus MipsMir2Lir::AssembleInstructions(uintptr_t start_addr) -{ +AssemblerStatus MipsMir2Lir::AssembleInstructions(uintptr_t start_addr) { LIR *lir; AssemblerStatus res = kSuccess; // Assume success @@ -708,8 +706,7 @@ AssemblerStatus MipsMir2Lir::AssembleInstructions(uintptr_t start_addr) return res; } -int MipsMir2Lir::GetInsnSize(LIR* lir) -{ +int MipsMir2Lir::GetInsnSize(LIR* lir) { return EncodingMap[lir->opcode].size; } diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc index eb0302e80f..db5764336a 100644 --- a/compiler/dex/quick/mips/call_mips.cc +++ b/compiler/dex/quick/mips/call_mips.cc @@ -24,8 +24,7 @@ namespace art { void MipsMir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, - SpecialCaseHandler special_case) -{ + SpecialCaseHandler special_case) { // TODO } @@ -61,8 +60,7 @@ void MipsMir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, * */ void MipsMir2Lir::GenSparseSwitch(MIR* mir, uint32_t table_offset, - RegLocation rl_src) -{ + RegLocation rl_src) { const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; if (cu_->verbose) { DumpSparseSwitchTable(table); @@ -142,8 +140,7 @@ void MipsMir2Lir::GenSparseSwitch(MIR* mir, uint32_t table_offset, * done: */ void MipsMir2Lir::GenPackedSwitch(MIR* mir, uint32_t table_offset, - RegLocation rl_src) -{ + RegLocation rl_src) { const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; if (cu_->verbose) { DumpPackedSwitchTable(table); @@ -227,8 +224,7 @@ void MipsMir2Lir::GenPackedSwitch(MIR* mir, uint32_t table_offset, * * Total size is 4+(width * size + 1)/2 16-bit code units. */ -void MipsMir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) -{ +void MipsMir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) { const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; // Add the table to the list - we'll process it later FillArrayData *tab_rec = @@ -270,8 +266,7 @@ void MipsMir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) /* * TODO: implement fast path to short-circuit thin-lock case */ -void MipsMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) -{ +void MipsMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { FlushAllRegs(); LoadValueDirectFixed(rl_src, rMIPS_ARG0); // Get obj LockCallTemps(); // Prepare for explicit register usage @@ -286,8 +281,7 @@ void MipsMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) /* * TODO: implement fast path to short-circuit thin-lock case */ -void MipsMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) -{ +void MipsMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { FlushAllRegs(); LoadValueDirectFixed(rl_src, rMIPS_ARG0); // Get obj LockCallTemps(); // Prepare for explicit register usage @@ -299,8 +293,7 @@ void MipsMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) MarkSafepointPC(call_inst); } -void MipsMir2Lir::GenMoveException(RegLocation rl_dest) -{ +void MipsMir2Lir::GenMoveException(RegLocation rl_dest) { int ex_offset = Thread::ExceptionOffset().Int32Value(); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); int reset_reg = AllocTemp(); @@ -314,8 +307,7 @@ void MipsMir2Lir::GenMoveException(RegLocation rl_dest) /* * Mark garbage collection card. Skip if the value we're storing is null. */ -void MipsMir2Lir::MarkGCCard(int val_reg, int tgt_addr_reg) -{ +void MipsMir2Lir::MarkGCCard(int val_reg, int tgt_addr_reg) { int reg_card_base = AllocTemp(); int reg_card_no = AllocTemp(); LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL); @@ -328,8 +320,7 @@ void MipsMir2Lir::MarkGCCard(int val_reg, int tgt_addr_reg) FreeTemp(reg_card_base); FreeTemp(reg_card_no); } -void MipsMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) -{ +void MipsMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) { int spill_count = num_core_spills_ + num_fp_spills_; /* * On entry, rMIPS_ARG0, rMIPS_ARG1, rMIPS_ARG2 & rMIPS_ARG3 are live. Let the register @@ -375,8 +366,7 @@ void MipsMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) FreeTemp(rMIPS_ARG3); } -void MipsMir2Lir::GenExitSequence() -{ +void MipsMir2Lir::GenExitSequence() { /* * In the exit path, rMIPS_RET0/rMIPS_RET1 are live - make sure they aren't * allocated by the register utilities as temps. diff --git a/compiler/dex/quick/mips/fp_mips.cc b/compiler/dex/quick/mips/fp_mips.cc index 8581d5beb6..2e744a2afc 100644 --- a/compiler/dex/quick/mips/fp_mips.cc +++ b/compiler/dex/quick/mips/fp_mips.cc @@ -22,8 +22,7 @@ namespace art { void MipsMir2Lir::GenArithOpFloat(Instruction::Code opcode, - RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) -{ + RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) { int op = kMipsNop; RegLocation rl_result; @@ -69,8 +68,7 @@ void MipsMir2Lir::GenArithOpFloat(Instruction::Code opcode, } void MipsMir2Lir::GenArithOpDouble(Instruction::Code opcode, - RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) -{ + RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) { int op = kMipsNop; RegLocation rl_result; @@ -117,8 +115,7 @@ void MipsMir2Lir::GenArithOpDouble(Instruction::Code opcode, } void MipsMir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest, - RegLocation rl_src) -{ + RegLocation rl_src) { int op = kMipsNop; int src_reg; RegLocation rl_result; @@ -175,8 +172,7 @@ void MipsMir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest, } void MipsMir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, - RegLocation rl_src1, RegLocation rl_src2) -{ + RegLocation rl_src1, RegLocation rl_src2) { bool wide = true; int offset = -1; // Make gcc happy. @@ -215,13 +211,11 @@ void MipsMir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, } void MipsMir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, - bool gt_bias, bool is_double) -{ + bool gt_bias, bool is_double) { UNIMPLEMENTED(FATAL) << "Need codegen for fused fp cmp branch"; } -void MipsMir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src) -{ +void MipsMir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src) { RegLocation rl_result; rl_src = LoadValue(rl_src, kCoreReg); rl_result = EvalLoc(rl_dest, kCoreReg, true); @@ -229,8 +223,7 @@ void MipsMir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src) StoreValue(rl_dest, rl_result); } -void MipsMir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) -{ +void MipsMir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) { RegLocation rl_result; rl_src = LoadValueWide(rl_src, kCoreReg); rl_result = EvalLoc(rl_dest, kCoreReg, true); @@ -239,8 +232,7 @@ void MipsMir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) StoreValueWide(rl_dest, rl_result); } -bool MipsMir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min) -{ +bool MipsMir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min) { // TODO: need Mips implementation return false; } diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc index 8bfc4e1f91..03a58cc958 100644 --- a/compiler/dex/quick/mips/int_mips.cc +++ b/compiler/dex/quick/mips/int_mips.cc @@ -41,8 +41,7 @@ namespace art { * */ void MipsMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2) -{ + RegLocation rl_src2) { rl_src1 = LoadValueWide(rl_src1, kCoreReg); rl_src2 = LoadValueWide(rl_src2, kCoreReg); int t0 = AllocTemp(); @@ -63,8 +62,7 @@ void MipsMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, } LIR* MipsMir2Lir::OpCmpBranch(ConditionCode cond, int src1, int src2, - LIR* target) -{ + LIR* target) { LIR* branch; MipsOpCode slt_op; MipsOpCode br_op; @@ -131,8 +129,7 @@ LIR* MipsMir2Lir::OpCmpBranch(ConditionCode cond, int src1, int src2, } LIR* MipsMir2Lir::OpCmpImmBranch(ConditionCode cond, int reg, - int check_value, LIR* target) -{ + int check_value, LIR* target) { LIR* branch; if (check_value != 0) { // TUNING: handle s16 & kCondLt/Mi case using slti @@ -164,8 +161,7 @@ LIR* MipsMir2Lir::OpCmpImmBranch(ConditionCode cond, int reg, return branch; } -LIR* MipsMir2Lir::OpRegCopyNoInsert(int r_dest, int r_src) -{ +LIR* MipsMir2Lir::OpRegCopyNoInsert(int r_dest, int r_src) { if (MIPS_FPREG(r_dest) || MIPS_FPREG(r_src)) return OpFpRegCopy(r_dest, r_src); LIR* res = RawLIR(current_dalvik_offset_, kMipsMove, @@ -176,16 +172,14 @@ LIR* MipsMir2Lir::OpRegCopyNoInsert(int r_dest, int r_src) return res; } -LIR* MipsMir2Lir::OpRegCopy(int r_dest, int r_src) -{ +LIR* MipsMir2Lir::OpRegCopy(int r_dest, int r_src) { LIR *res = OpRegCopyNoInsert(r_dest, r_src); AppendLIR(res); return res; } void MipsMir2Lir::OpRegCopyWide(int dest_lo, int dest_hi, int src_lo, - int src_hi) -{ + int src_hi) { bool dest_fp = MIPS_FPREG(dest_lo) && MIPS_FPREG(dest_hi); bool src_fp = MIPS_FPREG(src_lo) && MIPS_FPREG(src_hi); assert(MIPS_FPREG(src_lo) == MIPS_FPREG(src_hi)); @@ -215,26 +209,22 @@ void MipsMir2Lir::OpRegCopyWide(int dest_lo, int dest_hi, int src_lo, } } -void MipsMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) -{ +void MipsMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) { UNIMPLEMENTED(FATAL) << "Need codegen for select"; } -void MipsMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) -{ +void MipsMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) { UNIMPLEMENTED(FATAL) << "Need codegen for fused long cmp branch"; } LIR* MipsMir2Lir::GenRegMemCheck(ConditionCode c_code, - int reg1, int base, int offset, ThrowKind kind) -{ + int reg1, int base, int offset, ThrowKind kind) { LOG(FATAL) << "Unexpected use of GenRegMemCheck for Arm"; return NULL; } RegLocation MipsMir2Lir::GenDivRem(RegLocation rl_dest, int reg1, int reg2, - bool is_div) -{ + bool is_div) { NewLIR4(kMipsDiv, r_HI, r_LO, reg1, reg2); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); if (is_div) { @@ -246,8 +236,7 @@ RegLocation MipsMir2Lir::GenDivRem(RegLocation rl_dest, int reg1, int reg2, } RegLocation MipsMir2Lir::GenDivRemLit(RegLocation rl_dest, int reg1, int lit, - bool is_div) -{ + bool is_div) { int t_reg = AllocTemp(); NewLIR3(kMipsAddiu, t_reg, r_ZERO, lit); NewLIR4(kMipsDiv, r_HI, r_LO, reg1, t_reg); @@ -261,13 +250,11 @@ RegLocation MipsMir2Lir::GenDivRemLit(RegLocation rl_dest, int reg1, int lit, return rl_result; } -void MipsMir2Lir::OpLea(int rBase, int reg1, int reg2, int scale, int offset) -{ +void MipsMir2Lir::OpLea(int rBase, int reg1, int reg2, int scale, int offset) { LOG(FATAL) << "Unexpected use of OpLea for Arm"; } -void MipsMir2Lir::OpTlsCmp(int offset, int val) -{ +void MipsMir2Lir::OpTlsCmp(int offset, int val) { LOG(FATAL) << "Unexpected use of OpTlsCmp for Arm"; } @@ -286,22 +273,19 @@ LIR* MipsMir2Lir::OpPcRelLoad(int reg, LIR* target) { return NULL; } -LIR* MipsMir2Lir::OpVldm(int rBase, int count) -{ +LIR* MipsMir2Lir::OpVldm(int rBase, int count) { LOG(FATAL) << "Unexpected use of OpVldm for Mips"; return NULL; } -LIR* MipsMir2Lir::OpVstm(int rBase, int count) -{ +LIR* MipsMir2Lir::OpVstm(int rBase, int count) { LOG(FATAL) << "Unexpected use of OpVstm for Mips"; return NULL; } void MipsMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit, - int first_bit, int second_bit) -{ + int first_bit, int second_bit) { int t_reg = AllocTemp(); OpRegRegImm(kOpLsl, t_reg, rl_src.low_reg, second_bit - first_bit); OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, t_reg); @@ -311,8 +295,7 @@ void MipsMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src, } } -void MipsMir2Lir::GenDivZeroCheck(int reg_lo, int reg_hi) -{ +void MipsMir2Lir::GenDivZeroCheck(int reg_lo, int reg_hi) { int t_reg = AllocTemp(); OpRegRegReg(kOpOr, t_reg, reg_lo, reg_hi); GenImmedCheck(kCondEq, t_reg, 0, kThrowDivZero); @@ -320,41 +303,35 @@ void MipsMir2Lir::GenDivZeroCheck(int reg_lo, int reg_hi) } // Test suspend flag, return target of taken suspend branch -LIR* MipsMir2Lir::OpTestSuspend(LIR* target) -{ +LIR* MipsMir2Lir::OpTestSuspend(LIR* target) { OpRegImm(kOpSub, rMIPS_SUSPEND, 1); return OpCmpImmBranch((target == NULL) ? kCondEq : kCondNe, rMIPS_SUSPEND, 0, target); } // Decrement register and branch on condition -LIR* MipsMir2Lir::OpDecAndBranch(ConditionCode c_code, int reg, LIR* target) -{ +LIR* MipsMir2Lir::OpDecAndBranch(ConditionCode c_code, int reg, LIR* target) { OpRegImm(kOpSub, reg, 1); return OpCmpImmBranch(c_code, reg, 0, target); } bool MipsMir2Lir::SmallLiteralDivide(Instruction::Code dalvik_opcode, - RegLocation rl_src, RegLocation rl_dest, int lit) -{ + RegLocation rl_src, RegLocation rl_dest, int lit) { LOG(FATAL) << "Unexpected use of smallLiteralDive in Mips"; return false; } -LIR* MipsMir2Lir::OpIT(ConditionCode cond, const char* guide) -{ +LIR* MipsMir2Lir::OpIT(ConditionCode cond, const char* guide) { LOG(FATAL) << "Unexpected use of OpIT in Mips"; return NULL; } void MipsMir2Lir::GenMulLong(RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2) -{ + RegLocation rl_src2) { LOG(FATAL) << "Unexpected use of GenMulLong for Mips"; } void MipsMir2Lir::GenAddLong(RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2) -{ + RegLocation rl_src2) { rl_src1 = LoadValueWide(rl_src1, kCoreReg); rl_src2 = LoadValueWide(rl_src2, kCoreReg); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); @@ -376,8 +353,7 @@ void MipsMir2Lir::GenAddLong(RegLocation rl_dest, RegLocation rl_src1, } void MipsMir2Lir::GenSubLong(RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2) -{ + RegLocation rl_src2) { rl_src1 = LoadValueWide(rl_src1, kCoreReg); rl_src2 = LoadValueWide(rl_src2, kCoreReg); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); @@ -398,8 +374,7 @@ void MipsMir2Lir::GenSubLong(RegLocation rl_dest, RegLocation rl_src1, StoreValueWide(rl_dest, rl_result); } -void MipsMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) -{ +void MipsMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) { rl_src = LoadValueWide(rl_src, kCoreReg); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); /* @@ -420,20 +395,17 @@ void MipsMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) } void MipsMir2Lir::GenAndLong(RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2) -{ + RegLocation rl_src2) { LOG(FATAL) << "Unexpected use of GenAndLong for Mips"; } void MipsMir2Lir::GenOrLong(RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2) -{ + RegLocation rl_src2) { LOG(FATAL) << "Unexpected use of GenOrLong for Mips"; } void MipsMir2Lir::GenXorLong(RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2) -{ + RegLocation rl_src2) { LOG(FATAL) << "Unexpected use of GenXorLong for Mips"; } @@ -441,8 +413,7 @@ void MipsMir2Lir::GenXorLong(RegLocation rl_dest, RegLocation rl_src1, * Generate array load */ void MipsMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, - RegLocation rl_index, RegLocation rl_dest, int scale) -{ + RegLocation rl_index, RegLocation rl_dest, int scale) { RegisterClass reg_class = oat_reg_class_by_size(size); int len_offset = mirror::Array::LengthOffset().Int32Value(); int data_offset; @@ -513,8 +484,7 @@ void MipsMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, * */ void MipsMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, - RegLocation rl_index, RegLocation rl_src, int scale) -{ + RegLocation rl_index, RegLocation rl_src, int scale) { RegisterClass reg_class = oat_reg_class_by_size(size); int len_offset = mirror::Array::LengthOffset().Int32Value(); int data_offset; @@ -586,8 +556,7 @@ void MipsMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, * */ void MipsMir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, - RegLocation rl_index, RegLocation rl_src, int scale) -{ + RegLocation rl_index, RegLocation rl_src, int scale) { int len_offset = mirror::Array::LengthOffset().Int32Value(); int data_offset = mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value(); @@ -643,15 +612,13 @@ void MipsMir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, } void MipsMir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest, - RegLocation rl_src1, RegLocation rl_shift) -{ + RegLocation rl_src1, RegLocation rl_shift) { // Default implementation is just to ignore the constant case. GenShiftOpLong(opcode, rl_dest, rl_src1, rl_shift); } void MipsMir2Lir::GenArithImmOpLong(Instruction::Code opcode, - RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) -{ + RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) { // Default - bail to non-const handler. GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2); } diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc index cab2c1b53d..bd20e00404 100644 --- a/compiler/dex/quick/mips/target_mips.cc +++ b/compiler/dex/quick/mips/target_mips.cc @@ -36,26 +36,22 @@ static int FpRegs[] = {r_F0, r_F1, r_F2, r_F3, r_F4, r_F5, r_F6, r_F7, static int fp_temps[] = {r_F0, r_F1, r_F2, r_F3, r_F4, r_F5, r_F6, r_F7, r_F8, r_F9, r_F10, r_F11, r_F12, r_F13, r_F14, r_F15}; -RegLocation MipsMir2Lir::LocCReturn() -{ +RegLocation MipsMir2Lir::LocCReturn() { RegLocation res = MIPS_LOC_C_RETURN; return res; } -RegLocation MipsMir2Lir::LocCReturnWide() -{ +RegLocation MipsMir2Lir::LocCReturnWide() { RegLocation res = MIPS_LOC_C_RETURN_WIDE; return res; } -RegLocation MipsMir2Lir::LocCReturnFloat() -{ +RegLocation MipsMir2Lir::LocCReturnFloat() { RegLocation res = MIPS_LOC_C_RETURN_FLOAT; return res; } -RegLocation MipsMir2Lir::LocCReturnDouble() -{ +RegLocation MipsMir2Lir::LocCReturnDouble() { RegLocation res = MIPS_LOC_C_RETURN_DOUBLE; return res; } @@ -86,28 +82,24 @@ int MipsMir2Lir::TargetReg(SpecialTargetRegister reg) { } // Create a double from a pair of singles. -int MipsMir2Lir::S2d(int low_reg, int high_reg) -{ +int MipsMir2Lir::S2d(int low_reg, int high_reg) { return MIPS_S2D(low_reg, high_reg); } // Return mask to strip off fp reg flags and bias. -uint32_t MipsMir2Lir::FpRegMask() -{ +uint32_t MipsMir2Lir::FpRegMask() { return MIPS_FP_REG_MASK; } // True if both regs single, both core or both double. -bool MipsMir2Lir::SameRegType(int reg1, int reg2) -{ +bool MipsMir2Lir::SameRegType(int reg1, int reg2) { return (MIPS_REGTYPE(reg1) == MIPS_REGTYPE(reg2)); } /* * Decode the register id. */ -uint64_t MipsMir2Lir::GetRegMaskCommon(int reg) -{ +uint64_t MipsMir2Lir::GetRegMaskCommon(int reg) { uint64_t seed; int shift; int reg_id; @@ -123,14 +115,12 @@ uint64_t MipsMir2Lir::GetRegMaskCommon(int reg) return (seed << shift); } -uint64_t MipsMir2Lir::GetPCUseDefEncoding() -{ +uint64_t MipsMir2Lir::GetPCUseDefEncoding() { return ENCODE_MIPS_REG_PC; } -void MipsMir2Lir::SetupTargetResourceMasks(LIR* lir) -{ +void MipsMir2Lir::SetupTargetResourceMasks(LIR* lir) { DCHECK_EQ(cu_->instruction_set, kMips); // Mips-specific resource map setup here. @@ -162,8 +152,7 @@ static const char *mips_reg_name[MIPS_REG_COUNT] = { * Interpret a format string and build a string no longer than size * See format key in Assemble.c. */ -std::string MipsMir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) -{ +std::string MipsMir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) { std::string buf; int i; const char *fmt_end = &fmt[strlen(fmt)]; @@ -255,8 +244,7 @@ std::string MipsMir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned cha } // FIXME: need to redo resource maps for MIPS - fix this at that time -void MipsMir2Lir::DumpResourceMask(LIR *mips_lir, uint64_t mask, const char *prefix) -{ +void MipsMir2Lir::DumpResourceMask(LIR *mips_lir, uint64_t mask, const char *prefix) { char buf[256]; buf[0] = 0; @@ -306,8 +294,7 @@ void MipsMir2Lir::DumpResourceMask(LIR *mips_lir, uint64_t mask, const char *pre * machinery is in place, always spill lr. */ -void MipsMir2Lir::AdjustSpillMask() -{ +void MipsMir2Lir::AdjustSpillMask() { core_spill_mask_ |= (1 << r_RA); num_core_spills_++; } @@ -318,13 +305,11 @@ void MipsMir2Lir::AdjustSpillMask() * include any holes in the mask. Associate holes with * Dalvik register INVALID_VREG (0xFFFFU). */ -void MipsMir2Lir::MarkPreservedSingle(int s_reg, int reg) -{ +void MipsMir2Lir::MarkPreservedSingle(int s_reg, int reg) { LOG(FATAL) << "No support yet for promoted FP regs"; } -void MipsMir2Lir::FlushRegWide(int reg1, int reg2) -{ +void MipsMir2Lir::FlushRegWide(int reg1, int reg2) { RegisterInfo* info1 = GetRegInfo(reg1); RegisterInfo* info2 = GetRegInfo(reg2); DCHECK(info1 && info2 && info1->pair && info2->pair && @@ -345,8 +330,7 @@ void MipsMir2Lir::FlushRegWide(int reg1, int reg2) } } -void MipsMir2Lir::FlushReg(int reg) -{ +void MipsMir2Lir::FlushReg(int reg) { RegisterInfo* info = GetRegInfo(reg); if (info->live && info->dirty) { info->dirty = false; @@ -361,8 +345,7 @@ bool MipsMir2Lir::IsFpReg(int reg) { } /* Clobber all regs that might be used by an external C call */ -void MipsMir2Lir::ClobberCalleeSave() -{ +void MipsMir2Lir::ClobberCalleeSave() { Clobber(r_ZERO); Clobber(r_AT); Clobber(r_V0); @@ -404,29 +387,25 @@ void MipsMir2Lir::ClobberCalleeSave() Clobber(r_F15); } -RegLocation MipsMir2Lir::GetReturnWideAlt() -{ +RegLocation MipsMir2Lir::GetReturnWideAlt() { UNIMPLEMENTED(FATAL) << "No GetReturnWideAlt for MIPS"; RegLocation res = LocCReturnWide(); return res; } -RegLocation MipsMir2Lir::GetReturnAlt() -{ +RegLocation MipsMir2Lir::GetReturnAlt() { UNIMPLEMENTED(FATAL) << "No GetReturnAlt for MIPS"; RegLocation res = LocCReturn(); return res; } -MipsMir2Lir::RegisterInfo* MipsMir2Lir::GetRegInfo(int reg) -{ +MipsMir2Lir::RegisterInfo* MipsMir2Lir::GetRegInfo(int reg) { return MIPS_FPREG(reg) ? ®_pool_->FPRegs[reg & MIPS_FP_REG_MASK] : ®_pool_->core_regs[reg]; } /* To be used when explicitly managing register use */ -void MipsMir2Lir::LockCallTemps() -{ +void MipsMir2Lir::LockCallTemps() { LockTemp(rMIPS_ARG0); LockTemp(rMIPS_ARG1); LockTemp(rMIPS_ARG2); @@ -434,16 +413,14 @@ void MipsMir2Lir::LockCallTemps() } /* To be used when explicitly managing register use */ -void MipsMir2Lir::FreeCallTemps() -{ +void MipsMir2Lir::FreeCallTemps() { FreeTemp(rMIPS_ARG0); FreeTemp(rMIPS_ARG1); FreeTemp(rMIPS_ARG2); FreeTemp(rMIPS_ARG3); } -void MipsMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) -{ +void MipsMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) { #if ANDROID_SMP != 0 NewLIR1(kMipsSync, 0 /* Only stype currently supported */); #endif @@ -454,8 +431,7 @@ void MipsMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) * high reg in next byte. */ int MipsMir2Lir::AllocTypedTempPair(bool fp_hint, - int reg_class) -{ + int reg_class) { int high_reg; int low_reg; int res = 0; @@ -473,17 +449,14 @@ int MipsMir2Lir::AllocTypedTempPair(bool fp_hint, return res; } -int MipsMir2Lir::AllocTypedTemp(bool fp_hint, int reg_class) -{ - if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) -{ +int MipsMir2Lir::AllocTypedTemp(bool fp_hint, int reg_class) { + if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) { return AllocTempFloat(); } return AllocTemp(); } -void MipsMir2Lir::CompilerInitializeRegAlloc() -{ +void MipsMir2Lir::CompilerInitializeRegAlloc() { int num_regs = sizeof(core_regs)/sizeof(*core_regs); int num_reserved = sizeof(ReservedRegs)/sizeof(*ReservedRegs); int num_temps = sizeof(core_temps)/sizeof(*core_temps); @@ -518,8 +491,7 @@ void MipsMir2Lir::CompilerInitializeRegAlloc() } } -void MipsMir2Lir::FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free) -{ +void MipsMir2Lir::FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free) { if ((rl_free.low_reg != rl_keep.low_reg) && (rl_free.low_reg != rl_keep.high_reg) && (rl_free.high_reg != rl_keep.low_reg) && (rl_free.high_reg != rl_keep.high_reg)) { // No overlap, free both @@ -533,14 +505,12 @@ void MipsMir2Lir::FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free) * ensure that all branch instructions can be restarted if * there is a trap in the shadow. Allocate a temp register. */ -int MipsMir2Lir::LoadHelper(int offset) -{ +int MipsMir2Lir::LoadHelper(int offset) { LoadWordDisp(rMIPS_SELF, offset, r_T9); return r_T9; } -void MipsMir2Lir::SpillCoreRegs() -{ +void MipsMir2Lir::SpillCoreRegs() { if (num_core_spills_ == 0) { return; } @@ -555,8 +525,7 @@ void MipsMir2Lir::SpillCoreRegs() } } -void MipsMir2Lir::UnSpillCoreRegs() -{ +void MipsMir2Lir::UnSpillCoreRegs() { if (num_core_spills_ == 0) { return; } @@ -571,8 +540,7 @@ void MipsMir2Lir::UnSpillCoreRegs() OpRegImm(kOpAdd, rMIPS_SP, frame_size_); } -bool MipsMir2Lir::IsUnconditionalBranch(LIR* lir) -{ +bool MipsMir2Lir::IsUnconditionalBranch(LIR* lir) { return (lir->opcode == kMipsB); } @@ -592,18 +560,15 @@ Mir2Lir* MipsCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, return new MipsMir2Lir(cu, mir_graph, arena); } -uint64_t MipsMir2Lir::GetTargetInstFlags(int opcode) -{ +uint64_t MipsMir2Lir::GetTargetInstFlags(int opcode) { return MipsMir2Lir::EncodingMap[opcode].flags; } -const char* MipsMir2Lir::GetTargetInstName(int opcode) -{ +const char* MipsMir2Lir::GetTargetInstName(int opcode) { return MipsMir2Lir::EncodingMap[opcode].name; } -const char* MipsMir2Lir::GetTargetInstFmt(int opcode) -{ +const char* MipsMir2Lir::GetTargetInstFmt(int opcode) { return MipsMir2Lir::EncodingMap[opcode].fmt; } diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc index 8daafc8d96..089764ff45 100644 --- a/compiler/dex/quick/mips/utility_mips.cc +++ b/compiler/dex/quick/mips/utility_mips.cc @@ -21,8 +21,7 @@ namespace art { /* This file contains codegen for the MIPS32 ISA. */ -LIR* MipsMir2Lir::OpFpRegCopy(int r_dest, int r_src) -{ +LIR* MipsMir2Lir::OpFpRegCopy(int r_dest, int r_src) { int opcode; /* must be both DOUBLE or both not DOUBLE */ DCHECK_EQ(MIPS_DOUBLEREG(r_dest),MIPS_DOUBLEREG(r_src)); @@ -51,23 +50,19 @@ LIR* MipsMir2Lir::OpFpRegCopy(int r_dest, int r_src) return res; } -bool MipsMir2Lir::InexpensiveConstantInt(int32_t value) -{ +bool MipsMir2Lir::InexpensiveConstantInt(int32_t value) { return ((value == 0) || IsUint(16, value) || ((value < 0) && (value >= -32768))); } -bool MipsMir2Lir::InexpensiveConstantFloat(int32_t value) -{ +bool MipsMir2Lir::InexpensiveConstantFloat(int32_t value) { return false; // TUNING } -bool MipsMir2Lir::InexpensiveConstantLong(int64_t value) -{ +bool MipsMir2Lir::InexpensiveConstantLong(int64_t value) { return false; // TUNING } -bool MipsMir2Lir::InexpensiveConstantDouble(int64_t value) -{ +bool MipsMir2Lir::InexpensiveConstantDouble(int64_t value) { return false; // TUNING } @@ -80,8 +75,7 @@ bool MipsMir2Lir::InexpensiveConstantDouble(int64_t value) * 1) r_dest is freshly returned from AllocTemp or * 2) The codegen is under fixed register usage */ -LIR* MipsMir2Lir::LoadConstantNoClobber(int r_dest, int value) -{ +LIR* MipsMir2Lir::LoadConstantNoClobber(int r_dest, int value) { LIR *res; int r_dest_save = r_dest; @@ -112,15 +106,13 @@ LIR* MipsMir2Lir::LoadConstantNoClobber(int r_dest, int value) return res; } -LIR* MipsMir2Lir::OpUnconditionalBranch(LIR* target) -{ +LIR* MipsMir2Lir::OpUnconditionalBranch(LIR* target) { LIR* res = NewLIR1(kMipsB, 0 /* offset to be patched during assembly*/ ); res->target = target; return res; } -LIR* MipsMir2Lir::OpReg(OpKind op, int r_dest_src) -{ +LIR* MipsMir2Lir::OpReg(OpKind op, int r_dest_src) { MipsOpCode opcode = kMipsNop; switch (op) { case kOpBlx: @@ -136,8 +128,7 @@ LIR* MipsMir2Lir::OpReg(OpKind op, int r_dest_src) } LIR* MipsMir2Lir::OpRegImm(OpKind op, int r_dest_src1, - int value) -{ + int value) { LIR *res; bool neg = (value < 0); int abs_value = (neg) ? -value : value; @@ -167,8 +158,7 @@ LIR* MipsMir2Lir::OpRegImm(OpKind op, int r_dest_src1, return res; } -LIR* MipsMir2Lir::OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2) -{ +LIR* MipsMir2Lir::OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2) { MipsOpCode opcode = kMipsNop; switch (op) { case kOpAdd: @@ -209,8 +199,7 @@ LIR* MipsMir2Lir::OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2) return NewLIR3(opcode, r_dest, r_src1, r_src2); } -LIR* MipsMir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src1, int value) -{ +LIR* MipsMir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src1, int value) { LIR *res; MipsOpCode opcode = kMipsNop; bool short_form = true; @@ -298,8 +287,7 @@ LIR* MipsMir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src1, int value) return res; } -LIR* MipsMir2Lir::OpRegReg(OpKind op, int r_dest_src1, int r_src2) -{ +LIR* MipsMir2Lir::OpRegReg(OpKind op, int r_dest_src1, int r_src2) { MipsOpCode opcode = kMipsNop; LIR *res; switch (op) { @@ -342,8 +330,7 @@ LIR* MipsMir2Lir::OpRegReg(OpKind op, int r_dest_src1, int r_src2) return NewLIR2(opcode, r_dest_src1, r_src2); } -LIR* MipsMir2Lir::LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value) -{ +LIR* MipsMir2Lir::LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value) { LIR *res; res = LoadConstantNoClobber(r_dest_lo, Low32Bits(value)); LoadConstantNoClobber(r_dest_hi, High32Bits(value)); @@ -352,8 +339,7 @@ LIR* MipsMir2Lir::LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value) /* Load value from base + scaled index. */ LIR* MipsMir2Lir::LoadBaseIndexed(int rBase, int r_index, int r_dest, - int scale, OpSize size) -{ + int scale, OpSize size) { LIR *first = NULL; LIR *res; MipsOpCode opcode = kMipsNop; @@ -405,8 +391,7 @@ LIR* MipsMir2Lir::LoadBaseIndexed(int rBase, int r_index, int r_dest, /* store value base base + scaled index. */ LIR* MipsMir2Lir::StoreBaseIndexed(int rBase, int r_index, int r_src, - int scale, OpSize size) -{ + int scale, OpSize size) { LIR *first = NULL; MipsOpCode opcode = kMipsNop; int r_new_index = r_index; @@ -452,7 +437,7 @@ LIR* MipsMir2Lir::StoreBaseIndexed(int rBase, int r_index, int r_src, } LIR* MipsMir2Lir::LoadBaseDispBody(int rBase, int displacement, int r_dest, - int r_dest_hi, OpSize size, int s_reg) + int r_dest_hi, OpSize size, int s_reg) { /* * Load value from base + displacement. Optionally perform null check * on base (which must have an associated s_reg and MIR). If not @@ -461,7 +446,6 @@ LIR* MipsMir2Lir::LoadBaseDispBody(int rBase, int displacement, int r_dest, * and base and dest are the same, spill some other register to * rlp and then restore. */ -{ LIR *res; LIR *load = NULL; LIR *load2 = NULL; @@ -551,21 +535,18 @@ LIR* MipsMir2Lir::LoadBaseDispBody(int rBase, int displacement, int r_dest, } LIR* MipsMir2Lir::LoadBaseDisp(int rBase, int displacement, int r_dest, - OpSize size, int s_reg) -{ + OpSize size, int s_reg) { return LoadBaseDispBody(rBase, displacement, r_dest, -1, size, s_reg); } LIR* MipsMir2Lir::LoadBaseDispWide(int rBase, int displacement, - int r_dest_lo, int r_dest_hi, int s_reg) -{ + int r_dest_lo, int r_dest_hi, int s_reg) { return LoadBaseDispBody(rBase, displacement, r_dest_lo, r_dest_hi, kLong, s_reg); } LIR* MipsMir2Lir::StoreBaseDispBody(int rBase, int displacement, - int r_src, int r_src_hi, OpSize size) -{ + int r_src, int r_src_hi, OpSize size) { LIR *res; LIR *store = NULL; LIR *store2 = NULL; @@ -647,52 +628,44 @@ LIR* MipsMir2Lir::StoreBaseDispBody(int rBase, int displacement, } LIR* MipsMir2Lir::StoreBaseDisp(int rBase, int displacement, int r_src, - OpSize size) -{ + OpSize size) { return StoreBaseDispBody(rBase, displacement, r_src, -1, size); } LIR* MipsMir2Lir::StoreBaseDispWide(int rBase, int displacement, - int r_src_lo, int r_src_hi) -{ + int r_src_lo, int r_src_hi) { return StoreBaseDispBody(rBase, displacement, r_src_lo, r_src_hi, kLong); } -LIR* MipsMir2Lir::OpThreadMem(OpKind op, int thread_offset) -{ +LIR* MipsMir2Lir::OpThreadMem(OpKind op, int thread_offset) { LOG(FATAL) << "Unexpected use of OpThreadMem for MIPS"; return NULL; } -LIR* MipsMir2Lir::OpMem(OpKind op, int rBase, int disp) -{ +LIR* MipsMir2Lir::OpMem(OpKind op, int rBase, int disp) { LOG(FATAL) << "Unexpected use of OpMem for MIPS"; return NULL; } LIR* MipsMir2Lir::StoreBaseIndexedDisp( int rBase, int r_index, int scale, int displacement, - int r_src, int r_src_hi, OpSize size, int s_reg) -{ + int r_src, int r_src_hi, OpSize size, int s_reg) { LOG(FATAL) << "Unexpected use of StoreBaseIndexedDisp for MIPS"; return NULL; } LIR* MipsMir2Lir::OpRegMem(OpKind op, int r_dest, int rBase, - int offset) -{ + int offset) { LOG(FATAL) << "Unexpected use of OpRegMem for MIPS"; return NULL; } LIR* MipsMir2Lir::LoadBaseIndexedDisp(int rBase, int r_index, int scale, int displacement, - int r_dest, int r_dest_hi, OpSize size, int s_reg) -{ + int r_dest, int r_dest_hi, OpSize size, int s_reg) { LOG(FATAL) << "Unexpected use of LoadBaseIndexedDisp for MIPS"; return NULL; } -LIR* MipsMir2Lir::OpCondBranch(ConditionCode cc, LIR* target) -{ +LIR* MipsMir2Lir::OpCondBranch(ConditionCode cc, LIR* target) { LOG(FATAL) << "Unexpected use of OpCondBranch for MIPS"; return NULL; } diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc index 4562482a06..b758fb538e 100644 --- a/compiler/dex/quick/mir_to_lir.cc +++ b/compiler/dex/quick/mir_to_lir.cc @@ -26,8 +26,7 @@ namespace art { * load/store utilities here, or target-dependent genXX() handlers * when necessary. */ -void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list) -{ +void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list) { RegLocation rl_src[3]; RegLocation rl_dest = mir_graph_->GetBadLoc(); RegLocation rl_result = mir_graph_->GetBadLoc(); @@ -659,8 +658,7 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list } // Process extended MIR instructions -void Mir2Lir::HandleExtendedMethodMIR(BasicBlock* bb, MIR* mir) -{ +void Mir2Lir::HandleExtendedMethodMIR(BasicBlock* bb, MIR* mir) { switch (static_cast(mir->dalvikInsn.opcode)) { case kMirOpCopy: { RegLocation rl_src = mir_graph_->GetSrc(mir, 0); @@ -692,8 +690,7 @@ void Mir2Lir::HandleExtendedMethodMIR(BasicBlock* bb, MIR* mir) } // Handle the content in each basic block. -bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) -{ +bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) { if (bb->block_type == kDead) return false; current_dalvik_offset_ = bb->start_offset; MIR* mir; @@ -787,8 +784,7 @@ bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) return false; } -void Mir2Lir::SpecialMIR2LIR(SpecialCaseHandler special_case) -{ +void Mir2Lir::SpecialMIR2LIR(SpecialCaseHandler special_case) { // Find the first DalvikByteCode block. int num_reachable_blocks = mir_graph_->GetNumReachableBlocks(); BasicBlock*bb = NULL; @@ -817,8 +813,7 @@ void Mir2Lir::SpecialMIR2LIR(SpecialCaseHandler special_case) GenSpecialCase(bb, mir, special_case); } -void Mir2Lir::MethodMIR2LIR() -{ +void Mir2Lir::MethodMIR2LIR() { // Hold the labels of each block. block_label_list_ = static_cast(arena_->NewMem(sizeof(LIR) * mir_graph_->GetNumBlocks(), true, diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h index bec86c181e..abb687cb84 100644 --- a/compiler/dex/quick/mir_to_lir.h +++ b/compiler/dex/quick/mir_to_lir.h @@ -226,7 +226,7 @@ class Mir2Lir : public Backend { bool first_in_pair; }; - virtual ~Mir2Lir(){}; + virtual ~Mir2Lir() {}; int32_t s4FromSwitchData(const void* switch_data) { return *reinterpret_cast(switch_data); diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc index 8f43542098..4c91223687 100644 --- a/compiler/dex/quick/ralloc_util.cc +++ b/compiler/dex/quick/ralloc_util.cc @@ -27,8 +27,7 @@ namespace art { * not affect the "liveness" of a temp register, which will stay * live until it is either explicitly killed or reallocated. */ -void Mir2Lir::ResetRegPool() -{ +void Mir2Lir::ResetRegPool() { int i; for (i=0; i < reg_pool_->num_core_regs; i++) { if (reg_pool_->core_regs[i].is_temp) @@ -48,8 +47,7 @@ void Mir2Lir::ResetRegPool() * Set up temp & preserved register pools specialized by target. * Note: num_regs may be zero. */ -void Mir2Lir::CompilerInitPool(RegisterInfo* regs, int* reg_nums, int num) -{ +void Mir2Lir::CompilerInitPool(RegisterInfo* regs, int* reg_nums, int num) { int i; for (i=0; i < num; i++) { regs[i].reg = reg_nums[i]; @@ -62,8 +60,7 @@ void Mir2Lir::CompilerInitPool(RegisterInfo* regs, int* reg_nums, int num) } } -void Mir2Lir::DumpRegPool(RegisterInfo* p, int num_regs) -{ +void Mir2Lir::DumpRegPool(RegisterInfo* p, int num_regs) { LOG(INFO) << "================================================"; for (int i = 0; i < num_regs; i++) { LOG(INFO) << StringPrintf( @@ -75,18 +72,15 @@ void Mir2Lir::DumpRegPool(RegisterInfo* p, int num_regs) LOG(INFO) << "================================================"; } -void Mir2Lir::DumpCoreRegPool() -{ +void Mir2Lir::DumpCoreRegPool() { DumpRegPool(reg_pool_->core_regs, reg_pool_->num_core_regs); } -void Mir2Lir::DumpFpRegPool() -{ +void Mir2Lir::DumpFpRegPool() { DumpRegPool(reg_pool_->FPRegs, reg_pool_->num_fp_regs); } -void Mir2Lir::ClobberSRegBody(RegisterInfo* p, int num_regs, int s_reg) -{ +void Mir2Lir::ClobberSRegBody(RegisterInfo* p, int num_regs, int s_reg) { int i; for (i=0; i< num_regs; i++) { if (p[i].s_reg == s_reg) { @@ -110,8 +104,7 @@ void Mir2Lir::ClobberSRegBody(RegisterInfo* p, int num_regs, int s_reg) * changes (for example: INT_TO_FLOAT v1, v1). Revisit when improved register allocation is * addressed. */ -void Mir2Lir::ClobberSReg(int s_reg) -{ +void Mir2Lir::ClobberSReg(int s_reg) { /* Reset live temp tracking sanity checker */ if (kIsDebugBuild) { if (s_reg == live_sreg_) { @@ -131,8 +124,7 @@ void Mir2Lir::ClobberSReg(int s_reg) * ssa name (above the last original Dalvik register). This function * maps SSA names to positions in the promotion_map array. */ -int Mir2Lir::SRegToPMap(int s_reg) -{ +int Mir2Lir::SRegToPMap(int s_reg) { DCHECK_LT(s_reg, mir_graph_->GetNumSSARegs()); DCHECK_GE(s_reg, 0); int v_reg = mir_graph_->SRegToVReg(s_reg); @@ -146,8 +138,7 @@ int Mir2Lir::SRegToPMap(int s_reg) } } -void Mir2Lir::RecordCorePromotion(int reg, int s_reg) -{ +void Mir2Lir::RecordCorePromotion(int reg, int s_reg) { int p_map_idx = SRegToPMap(s_reg); int v_reg = mir_graph_->SRegToVReg(s_reg); GetRegInfo(reg)->in_use = true; @@ -160,8 +151,7 @@ void Mir2Lir::RecordCorePromotion(int reg, int s_reg) } /* Reserve a callee-save register. Return -1 if none available */ -int Mir2Lir::AllocPreservedCoreReg(int s_reg) -{ +int Mir2Lir::AllocPreservedCoreReg(int s_reg) { int res = -1; RegisterInfo* core_regs = reg_pool_->core_regs; for (int i = 0; i < reg_pool_->num_core_regs; i++) { @@ -174,8 +164,7 @@ int Mir2Lir::AllocPreservedCoreReg(int s_reg) return res; } -void Mir2Lir::RecordFpPromotion(int reg, int s_reg) -{ +void Mir2Lir::RecordFpPromotion(int reg, int s_reg) { int p_map_idx = SRegToPMap(s_reg); int v_reg = mir_graph_->SRegToVReg(s_reg); GetRegInfo(reg)->in_use = true; @@ -189,8 +178,7 @@ void Mir2Lir::RecordFpPromotion(int reg, int s_reg) * even/odd allocation, but go ahead and allocate anything if not * available. If nothing's available, return -1. */ -int Mir2Lir::AllocPreservedSingle(int s_reg, bool even) -{ +int Mir2Lir::AllocPreservedSingle(int s_reg, bool even) { int res = -1; RegisterInfo* FPRegs = reg_pool_->FPRegs; for (int i = 0; i < reg_pool_->num_fp_regs; i++) { @@ -212,8 +200,7 @@ int Mir2Lir::AllocPreservedSingle(int s_reg, bool even) * allocate if we can't meet the requirements for the pair of * s_reg<=sX[even] & (s_reg+1)<= sX+1. */ -int Mir2Lir::AllocPreservedDouble(int s_reg) -{ +int Mir2Lir::AllocPreservedDouble(int s_reg) { int res = -1; // Assume failure int v_reg = mir_graph_->SRegToVReg(s_reg); int p_map_idx = SRegToPMap(s_reg); @@ -269,8 +256,7 @@ int Mir2Lir::AllocPreservedDouble(int s_reg) * single regs (but if can't still attempt to allocate a single, preferring * first to allocate an odd register. */ -int Mir2Lir::AllocPreservedFPReg(int s_reg, bool double_start) -{ +int Mir2Lir::AllocPreservedFPReg(int s_reg, bool double_start) { int res = -1; if (double_start) { res = AllocPreservedDouble(s_reg); @@ -284,8 +270,7 @@ int Mir2Lir::AllocPreservedFPReg(int s_reg, bool double_start) } int Mir2Lir::AllocTempBody(RegisterInfo* p, int num_regs, int* next_temp, - bool required) -{ + bool required) { int i; int next = *next_temp; for (i=0; i< num_regs; i++) { @@ -323,8 +308,7 @@ int Mir2Lir::AllocTempBody(RegisterInfo* p, int num_regs, int* next_temp, } //REDO: too many assumptions. -int Mir2Lir::AllocTempDouble() -{ +int Mir2Lir::AllocTempDouble() { RegisterInfo* p = reg_pool_->FPRegs; int num_regs = reg_pool_->num_fp_regs; /* Start looking at an even reg */ @@ -377,29 +361,25 @@ int Mir2Lir::AllocTempDouble() } /* Return a temp if one is available, -1 otherwise */ -int Mir2Lir::AllocFreeTemp() -{ +int Mir2Lir::AllocFreeTemp() { return AllocTempBody(reg_pool_->core_regs, reg_pool_->num_core_regs, ®_pool_->next_core_reg, true); } -int Mir2Lir::AllocTemp() -{ +int Mir2Lir::AllocTemp() { return AllocTempBody(reg_pool_->core_regs, reg_pool_->num_core_regs, ®_pool_->next_core_reg, true); } -int Mir2Lir::AllocTempFloat() -{ +int Mir2Lir::AllocTempFloat() { return AllocTempBody(reg_pool_->FPRegs, reg_pool_->num_fp_regs, ®_pool_->next_fp_reg, true); } -Mir2Lir::RegisterInfo* Mir2Lir::AllocLiveBody(RegisterInfo* p, int num_regs, int s_reg) -{ +Mir2Lir::RegisterInfo* Mir2Lir::AllocLiveBody(RegisterInfo* p, int num_regs, int s_reg) { int i; if (s_reg == -1) return NULL; @@ -413,8 +393,7 @@ Mir2Lir::RegisterInfo* Mir2Lir::AllocLiveBody(RegisterInfo* p, int num_regs, int return NULL; } -Mir2Lir::RegisterInfo* Mir2Lir::AllocLive(int s_reg, int reg_class) -{ +Mir2Lir::RegisterInfo* Mir2Lir::AllocLive(int s_reg, int reg_class) { RegisterInfo* res = NULL; switch (reg_class) { case kAnyReg: @@ -437,8 +416,7 @@ Mir2Lir::RegisterInfo* Mir2Lir::AllocLive(int s_reg, int reg_class) return res; } -void Mir2Lir::FreeTemp(int reg) -{ +void Mir2Lir::FreeTemp(int reg) { RegisterInfo* p = reg_pool_->core_regs; int num_regs = reg_pool_->num_core_regs; int i; @@ -465,8 +443,7 @@ void Mir2Lir::FreeTemp(int reg) LOG(FATAL) << "Tried to free a non-existant temp: r" << reg; } -Mir2Lir::RegisterInfo* Mir2Lir::IsLive(int reg) -{ +Mir2Lir::RegisterInfo* Mir2Lir::IsLive(int reg) { RegisterInfo* p = reg_pool_->core_regs; int num_regs = reg_pool_->num_core_regs; int i; @@ -485,20 +462,17 @@ Mir2Lir::RegisterInfo* Mir2Lir::IsLive(int reg) return NULL; } -Mir2Lir::RegisterInfo* Mir2Lir::IsTemp(int reg) -{ +Mir2Lir::RegisterInfo* Mir2Lir::IsTemp(int reg) { RegisterInfo* p = GetRegInfo(reg); return (p->is_temp) ? p : NULL; } -Mir2Lir::RegisterInfo* Mir2Lir::IsPromoted(int reg) -{ +Mir2Lir::RegisterInfo* Mir2Lir::IsPromoted(int reg) { RegisterInfo* p = GetRegInfo(reg); return (p->is_temp) ? NULL : p; } -bool Mir2Lir::IsDirty(int reg) -{ +bool Mir2Lir::IsDirty(int reg) { RegisterInfo* p = GetRegInfo(reg); return p->dirty; } @@ -508,8 +482,7 @@ bool Mir2Lir::IsDirty(int reg) * register. No check is made to see if the register was previously * allocated. Use with caution. */ -void Mir2Lir::LockTemp(int reg) -{ +void Mir2Lir::LockTemp(int reg) { RegisterInfo* p = reg_pool_->core_regs; int num_regs = reg_pool_->num_core_regs; int i; @@ -534,13 +507,11 @@ void Mir2Lir::LockTemp(int reg) LOG(FATAL) << "Tried to lock a non-existant temp: r" << reg; } -void Mir2Lir::ResetDef(int reg) -{ +void Mir2Lir::ResetDef(int reg) { ResetDefBody(GetRegInfo(reg)); } -void Mir2Lir::NullifyRange(LIR *start, LIR *finish, int s_reg1, int s_reg2) -{ +void Mir2Lir::NullifyRange(LIR *start, LIR *finish, int s_reg1, int s_reg2) { if (start && finish) { LIR *p; DCHECK_EQ(s_reg1, s_reg2); @@ -557,8 +528,7 @@ void Mir2Lir::NullifyRange(LIR *start, LIR *finish, int s_reg1, int s_reg2) * on entry start points to the LIR prior to the beginning of the * sequence. */ -void Mir2Lir::MarkDef(RegLocation rl, LIR *start, LIR *finish) -{ +void Mir2Lir::MarkDef(RegLocation rl, LIR *start, LIR *finish) { DCHECK(!rl.wide); DCHECK(start && start->next); DCHECK(finish); @@ -572,8 +542,7 @@ void Mir2Lir::MarkDef(RegLocation rl, LIR *start, LIR *finish) * on entry start points to the LIR prior to the beginning of the * sequence. */ -void Mir2Lir::MarkDefWide(RegLocation rl, LIR *start, LIR *finish) -{ +void Mir2Lir::MarkDefWide(RegLocation rl, LIR *start, LIR *finish) { DCHECK(rl.wide); DCHECK(start && start->next); DCHECK(finish); @@ -583,8 +552,7 @@ void Mir2Lir::MarkDefWide(RegLocation rl, LIR *start, LIR *finish) p->def_end = finish; } -RegLocation Mir2Lir::WideToNarrow(RegLocation rl) -{ +RegLocation Mir2Lir::WideToNarrow(RegLocation rl) { DCHECK(rl.wide); if (rl.location == kLocPhysReg) { RegisterInfo* info_lo = GetRegInfo(rl.low_reg); @@ -604,8 +572,7 @@ RegLocation Mir2Lir::WideToNarrow(RegLocation rl) return rl; } -void Mir2Lir::ResetDefLoc(RegLocation rl) -{ +void Mir2Lir::ResetDefLoc(RegLocation rl) { DCHECK(!rl.wide); RegisterInfo* p = IsTemp(rl.low_reg); if (p && !(cu_->disable_opt & (1 << kSuppressLoads))) { @@ -615,8 +582,7 @@ void Mir2Lir::ResetDefLoc(RegLocation rl) ResetDef(rl.low_reg); } -void Mir2Lir::ResetDefLocWide(RegLocation rl) -{ +void Mir2Lir::ResetDefLocWide(RegLocation rl) { DCHECK(rl.wide); RegisterInfo* p_low = IsTemp(rl.low_reg); RegisterInfo* p_high = IsTemp(rl.high_reg); @@ -631,8 +597,7 @@ void Mir2Lir::ResetDefLocWide(RegLocation rl) ResetDef(rl.high_reg); } -void Mir2Lir::ResetDefTracking() -{ +void Mir2Lir::ResetDefTracking() { int i; for (i=0; i< reg_pool_->num_core_regs; i++) { ResetDefBody(®_pool_->core_regs[i]); @@ -642,8 +607,7 @@ void Mir2Lir::ResetDefTracking() } } -void Mir2Lir::ClobberAllRegs() -{ +void Mir2Lir::ClobberAllRegs() { int i; for (i=0; i< reg_pool_->num_core_regs; i++) { ClobberBody(®_pool_->core_regs[i]); @@ -654,8 +618,7 @@ void Mir2Lir::ClobberAllRegs() } // Make sure nothing is live and dirty -void Mir2Lir::FlushAllRegsBody(RegisterInfo* info, int num_regs) -{ +void Mir2Lir::FlushAllRegsBody(RegisterInfo* info, int num_regs) { int i; for (i=0; i < num_regs; i++) { if (info[i].live && info[i].dirty) { @@ -668,8 +631,7 @@ void Mir2Lir::FlushAllRegsBody(RegisterInfo* info, int num_regs) } } -void Mir2Lir::FlushAllRegs() -{ +void Mir2Lir::FlushAllRegs() { FlushAllRegsBody(reg_pool_->core_regs, reg_pool_->num_core_regs); FlushAllRegsBody(reg_pool_->FPRegs, @@ -679,8 +641,7 @@ void Mir2Lir::FlushAllRegs() //TUNING: rewrite all of this reg stuff. Probably use an attribute table -bool Mir2Lir::RegClassMatches(int reg_class, int reg) -{ +bool Mir2Lir::RegClassMatches(int reg_class, int reg) { if (reg_class == kAnyReg) { return true; } else if (reg_class == kCoreReg) { @@ -690,8 +651,7 @@ bool Mir2Lir::RegClassMatches(int reg_class, int reg) } } -void Mir2Lir::MarkLive(int reg, int s_reg) -{ +void Mir2Lir::MarkLive(int reg, int s_reg) { RegisterInfo* info = GetRegInfo(reg); if ((info->reg == reg) && (info->s_reg == s_reg) && info->live) { return; /* already live */ @@ -708,20 +668,17 @@ void Mir2Lir::MarkLive(int reg, int s_reg) info->s_reg = s_reg; } -void Mir2Lir::MarkTemp(int reg) -{ +void Mir2Lir::MarkTemp(int reg) { RegisterInfo* info = GetRegInfo(reg); info->is_temp = true; } -void Mir2Lir::UnmarkTemp(int reg) -{ +void Mir2Lir::UnmarkTemp(int reg) { RegisterInfo* info = GetRegInfo(reg); info->is_temp = false; } -void Mir2Lir::MarkPair(int low_reg, int high_reg) -{ +void Mir2Lir::MarkPair(int low_reg, int high_reg) { RegisterInfo* info_lo = GetRegInfo(low_reg); RegisterInfo* info_hi = GetRegInfo(high_reg); info_lo->pair = info_hi->pair = true; @@ -729,8 +686,7 @@ void Mir2Lir::MarkPair(int low_reg, int high_reg) info_hi->partner = low_reg; } -void Mir2Lir::MarkClean(RegLocation loc) -{ +void Mir2Lir::MarkClean(RegLocation loc) { RegisterInfo* info = GetRegInfo(loc.low_reg); info->dirty = false; if (loc.wide) { @@ -739,8 +695,7 @@ void Mir2Lir::MarkClean(RegLocation loc) } } -void Mir2Lir::MarkDirty(RegLocation loc) -{ +void Mir2Lir::MarkDirty(RegLocation loc) { if (loc.home) { // If already home, can't be dirty return; @@ -753,14 +708,12 @@ void Mir2Lir::MarkDirty(RegLocation loc) } } -void Mir2Lir::MarkInUse(int reg) -{ +void Mir2Lir::MarkInUse(int reg) { RegisterInfo* info = GetRegInfo(reg); info->in_use = true; } -void Mir2Lir::CopyRegInfo(int new_reg, int old_reg) -{ +void Mir2Lir::CopyRegInfo(int new_reg, int old_reg) { RegisterInfo* new_info = GetRegInfo(new_reg); RegisterInfo* old_info = GetRegInfo(old_reg); // Target temp status must not change @@ -771,8 +724,7 @@ void Mir2Lir::CopyRegInfo(int new_reg, int old_reg) new_info->reg = new_reg; } -bool Mir2Lir::CheckCorePoolSanity() -{ +bool Mir2Lir::CheckCorePoolSanity() { for (static int i = 0; i < reg_pool_->num_core_regs; i++) { if (reg_pool_->core_regs[i].pair) { static int my_reg = reg_pool_->core_regs[i].reg; @@ -808,8 +760,7 @@ bool Mir2Lir::CheckCorePoolSanity() * if it's worthwhile trying to be more clever here. */ -RegLocation Mir2Lir::UpdateLoc(RegLocation loc) -{ +RegLocation Mir2Lir::UpdateLoc(RegLocation loc) { DCHECK(!loc.wide); DCHECK(CheckCorePoolSanity()); if (loc.location != kLocPhysReg) { @@ -832,8 +783,7 @@ RegLocation Mir2Lir::UpdateLoc(RegLocation loc) } /* see comments for update_loc */ -RegLocation Mir2Lir::UpdateLocWide(RegLocation loc) -{ +RegLocation Mir2Lir::UpdateLocWide(RegLocation loc) { DCHECK(loc.wide); DCHECK(CheckCorePoolSanity()); if (loc.location != kLocPhysReg) { @@ -886,16 +836,14 @@ RegLocation Mir2Lir::UpdateLocWide(RegLocation loc) /* For use in cases we don't know (or care) width */ -RegLocation Mir2Lir::UpdateRawLoc(RegLocation loc) -{ +RegLocation Mir2Lir::UpdateRawLoc(RegLocation loc) { if (loc.wide) return UpdateLocWide(loc); else return UpdateLoc(loc); } -RegLocation Mir2Lir::EvalLocWide(RegLocation loc, int reg_class, bool update) -{ +RegLocation Mir2Lir::EvalLocWide(RegLocation loc, int reg_class, bool update) { DCHECK(loc.wide); int new_regs; int low_reg; @@ -942,8 +890,7 @@ RegLocation Mir2Lir::EvalLocWide(RegLocation loc, int reg_class, bool update) return loc; } -RegLocation Mir2Lir::EvalLoc(RegLocation loc, int reg_class, bool update) -{ +RegLocation Mir2Lir::EvalLoc(RegLocation loc, int reg_class, bool update) { int new_reg; if (loc.wide) @@ -992,15 +939,13 @@ void Mir2Lir::CountRefs(RefCounts* core_counts, RefCounts* fp_counts) { } /* qsort callback function, sort descending */ -static int SortCounts(const void *val1, const void *val2) -{ +static int SortCounts(const void *val1, const void *val2) { const Mir2Lir::RefCounts* op1 = reinterpret_cast(val1); const Mir2Lir::RefCounts* op2 = reinterpret_cast(val2); return (op1->count == op2->count) ? 0 : (op1->count < op2->count ? 1 : -1); } -void Mir2Lir::DumpCounts(const RefCounts* arr, int size, const char* msg) -{ +void Mir2Lir::DumpCounts(const RefCounts* arr, int size, const char* msg) { LOG(INFO) << msg; for (int i = 0; i < size; i++) { LOG(INFO) << "s_reg[" << arr[i].s_reg << "]: " << arr[i].count; @@ -1011,8 +956,7 @@ void Mir2Lir::DumpCounts(const RefCounts* arr, int size, const char* msg) * Note: some portions of this code required even if the kPromoteRegs * optimization is disabled. */ -void Mir2Lir::DoPromotion() -{ +void Mir2Lir::DoPromotion() { int reg_bias = cu_->num_compiler_temps + 1; int dalvik_regs = cu_->num_dalvik_registers; int num_regs = dalvik_regs + reg_bias; @@ -1158,21 +1102,18 @@ void Mir2Lir::DoPromotion() } /* Returns sp-relative offset in bytes for a VReg */ -int Mir2Lir::VRegOffset(int v_reg) -{ +int Mir2Lir::VRegOffset(int v_reg) { return StackVisitor::GetVRegOffset(cu_->code_item, core_spill_mask_, fp_spill_mask_, frame_size_, v_reg); } /* Returns sp-relative offset in bytes for a SReg */ -int Mir2Lir::SRegOffset(int s_reg) -{ +int Mir2Lir::SRegOffset(int s_reg) { return VRegOffset(mir_graph_->SRegToVReg(s_reg)); } /* Mark register usage state and return long retloc */ -RegLocation Mir2Lir::GetReturnWide(bool is_double) -{ +RegLocation Mir2Lir::GetReturnWide(bool is_double) { RegLocation gpr_res = LocCReturnWide(); RegLocation fpr_res = LocCReturnDouble(); RegLocation res = is_double ? fpr_res : gpr_res; @@ -1184,8 +1125,7 @@ RegLocation Mir2Lir::GetReturnWide(bool is_double) return res; } -RegLocation Mir2Lir::GetReturn(bool is_float) -{ +RegLocation Mir2Lir::GetReturn(bool is_float) { RegLocation gpr_res = LocCReturn(); RegLocation fpr_res = LocCReturnFloat(); RegLocation res = is_float ? fpr_res : gpr_res; @@ -1198,8 +1138,7 @@ RegLocation Mir2Lir::GetReturn(bool is_float) return res; } -void Mir2Lir::SimpleRegAlloc() -{ +void Mir2Lir::SimpleRegAlloc() { DoPromotion(); if (cu_->verbose && !(cu_->disable_opt & (1 << kPromoteRegs))) { diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc index d60be72c31..1aeb39ae4b 100644 --- a/compiler/dex/quick/x86/call_x86.cc +++ b/compiler/dex/quick/x86/call_x86.cc @@ -23,8 +23,7 @@ namespace art { void X86Mir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, - SpecialCaseHandler special_case) -{ + SpecialCaseHandler special_case) { // TODO } @@ -33,8 +32,7 @@ void X86Mir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, * pairs. */ void X86Mir2Lir::GenSparseSwitch(MIR* mir, uint32_t table_offset, - RegLocation rl_src) -{ + RegLocation rl_src) { const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; if (cu_->verbose) { DumpSparseSwitchTable(table); @@ -69,8 +67,7 @@ void X86Mir2Lir::GenSparseSwitch(MIR* mir, uint32_t table_offset, * done: */ void X86Mir2Lir::GenPackedSwitch(MIR* mir, uint32_t table_offset, - RegLocation rl_src) -{ + RegLocation rl_src) { const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; if (cu_->verbose) { DumpPackedSwitchTable(table); @@ -130,8 +127,7 @@ void X86Mir2Lir::GenPackedSwitch(MIR* mir, uint32_t table_offset, * * Total size is 4+(width * size + 1)/2 16-bit code units. */ -void X86Mir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) -{ +void X86Mir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) { const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; // Add the table to the list - we'll process it later FillArrayData *tab_rec = @@ -156,8 +152,7 @@ void X86Mir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) rX86_ARG1, true); } -void X86Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) -{ +void X86Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { FlushAllRegs(); LoadValueDirectFixed(rl_src, rCX); // Get obj LockCallTemps(); // Prepare for explicit register usage @@ -174,8 +169,7 @@ void X86Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) branch->target = NewLIR0(kPseudoTargetLabel); } -void X86Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) -{ +void X86Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { FlushAllRegs(); LoadValueDirectFixed(rl_src, rAX); // Get obj LockCallTemps(); // Prepare for explicit register usage @@ -195,8 +189,7 @@ void X86Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) branch2->target = NewLIR0(kPseudoTargetLabel); } -void X86Mir2Lir::GenMoveException(RegLocation rl_dest) -{ +void X86Mir2Lir::GenMoveException(RegLocation rl_dest) { int ex_offset = Thread::ExceptionOffset().Int32Value(); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); NewLIR2(kX86Mov32RT, rl_result.low_reg, ex_offset); @@ -207,8 +200,7 @@ void X86Mir2Lir::GenMoveException(RegLocation rl_dest) /* * Mark garbage collection card. Skip if the value we're storing is null. */ -void X86Mir2Lir::MarkGCCard(int val_reg, int tgt_addr_reg) -{ +void X86Mir2Lir::MarkGCCard(int val_reg, int tgt_addr_reg) { int reg_card_base = AllocTemp(); int reg_card_no = AllocTemp(); LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL); @@ -222,8 +214,7 @@ void X86Mir2Lir::MarkGCCard(int val_reg, int tgt_addr_reg) FreeTemp(reg_card_no); } -void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) -{ +void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) { /* * On entry, rX86_ARG0, rX86_ARG1, rX86_ARG2 are live. Let the register * allocation mechanism know so it doesn't try to use any of them when diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc index 906b4cc759..f2ecf6c959 100644 --- a/compiler/dex/quick/x86/fp_x86.cc +++ b/compiler/dex/quick/x86/fp_x86.cc @@ -349,8 +349,7 @@ void X86Mir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, OpCondBranch(ccode, taken); } -void X86Mir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src) -{ +void X86Mir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src) { RegLocation rl_result; rl_src = LoadValue(rl_src, kCoreReg); rl_result = EvalLoc(rl_dest, kCoreReg, true); @@ -358,8 +357,7 @@ void X86Mir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src) StoreValue(rl_dest, rl_result); } -void X86Mir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) -{ +void X86Mir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) { RegLocation rl_result; rl_src = LoadValueWide(rl_src, kCoreReg); rl_result = EvalLoc(rl_dest, kCoreReg, true); diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc index 97d9d2deed..3be24df565 100644 --- a/compiler/dex/quick/x86/int_x86.cc +++ b/compiler/dex/quick/x86/int_x86.cc @@ -27,8 +27,7 @@ namespace art { * Perform register memory operation. */ LIR* X86Mir2Lir::GenRegMemCheck(ConditionCode c_code, - int reg1, int base, int offset, ThrowKind kind) -{ + int reg1, int base, int offset, ThrowKind kind) { LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg1, base, offset); OpRegMem(kOpCmp, reg1, base, offset); @@ -45,8 +44,7 @@ LIR* X86Mir2Lir::GenRegMemCheck(ConditionCode c_code, * x > y return 1 */ void X86Mir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2) -{ + RegLocation rl_src2) { FlushAllRegs(); LockCallTemps(); // Prepare for explicit register usage LoadValueDirectWideFixed(rl_src1, r0, r1); @@ -88,8 +86,7 @@ X86ConditionCode X86ConditionEncoding(ConditionCode cond) { } LIR* X86Mir2Lir::OpCmpBranch(ConditionCode cond, int src1, int src2, - LIR* target) -{ + LIR* target) { NewLIR2(kX86Cmp32RR, src1, src2); X86ConditionCode cc = X86ConditionEncoding(cond); LIR* branch = NewLIR2(kX86Jcc8, 0 /* lir operand for Jcc offset */ , @@ -99,8 +96,7 @@ LIR* X86Mir2Lir::OpCmpBranch(ConditionCode cond, int src1, int src2, } LIR* X86Mir2Lir::OpCmpImmBranch(ConditionCode cond, int reg, - int check_value, LIR* target) -{ + int check_value, LIR* target) { if ((check_value == 0) && (cond == kCondEq || cond == kCondNe)) { // TODO: when check_value == 0 and reg is rCX, use the jcxz/nz opcode NewLIR2(kX86Test32RR, reg, reg); @@ -113,8 +109,7 @@ LIR* X86Mir2Lir::OpCmpImmBranch(ConditionCode cond, int reg, return branch; } -LIR* X86Mir2Lir::OpRegCopyNoInsert(int r_dest, int r_src) -{ +LIR* X86Mir2Lir::OpRegCopyNoInsert(int r_dest, int r_src) { if (X86_FPREG(r_dest) || X86_FPREG(r_src)) return OpFpRegCopy(r_dest, r_src); LIR* res = RawLIR(current_dalvik_offset_, kX86Mov32RR, @@ -125,16 +120,14 @@ LIR* X86Mir2Lir::OpRegCopyNoInsert(int r_dest, int r_src) return res; } -LIR* X86Mir2Lir::OpRegCopy(int r_dest, int r_src) -{ +LIR* X86Mir2Lir::OpRegCopy(int r_dest, int r_src) { LIR *res = OpRegCopyNoInsert(r_dest, r_src); AppendLIR(res); return res; } void X86Mir2Lir::OpRegCopyWide(int dest_lo, int dest_hi, - int src_lo, int src_hi) -{ + int src_lo, int src_hi) { bool dest_fp = X86_FPREG(dest_lo) && X86_FPREG(dest_hi); bool src_fp = X86_FPREG(src_lo) && X86_FPREG(src_hi); assert(X86_FPREG(src_lo) == X86_FPREG(src_hi)); @@ -168,8 +161,7 @@ void X86Mir2Lir::OpRegCopyWide(int dest_lo, int dest_hi, } } -void X86Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) -{ +void X86Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) { UNIMPLEMENTED(FATAL) << "Need codegen for GenSelect"; } @@ -213,21 +205,18 @@ void X86Mir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) { } RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, int reg_lo, - int lit, bool is_div) -{ + int lit, bool is_div) { LOG(FATAL) << "Unexpected use of GenDivRemLit for x86"; return rl_dest; } RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest, int reg_lo, - int reg_hi, bool is_div) -{ + int reg_hi, bool is_div) { LOG(FATAL) << "Unexpected use of GenDivRem for x86"; return rl_dest; } -bool X86Mir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min) -{ +bool X86Mir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min) { DCHECK_EQ(cu_->instruction_set, kX86); RegLocation rl_src1 = info->args[0]; RegLocation rl_src2 = info->args[1]; @@ -247,13 +236,11 @@ bool X86Mir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min) return true; } -void X86Mir2Lir::OpLea(int rBase, int reg1, int reg2, int scale, int offset) -{ +void X86Mir2Lir::OpLea(int rBase, int reg1, int reg2, int scale, int offset) { NewLIR5(kX86Lea32RA, rBase, reg1, reg2, scale, offset); } -void X86Mir2Lir::OpTlsCmp(int offset, int val) -{ +void X86Mir2Lir::OpTlsCmp(int offset, int val) { NewLIR2(kX86Cmp16TI8, offset, val); } @@ -267,22 +254,19 @@ LIR* X86Mir2Lir::OpPcRelLoad(int reg, LIR* target) { return NULL; } -LIR* X86Mir2Lir::OpVldm(int rBase, int count) -{ +LIR* X86Mir2Lir::OpVldm(int rBase, int count) { LOG(FATAL) << "Unexpected use of OpVldm for x86"; return NULL; } -LIR* X86Mir2Lir::OpVstm(int rBase, int count) -{ +LIR* X86Mir2Lir::OpVstm(int rBase, int count) { LOG(FATAL) << "Unexpected use of OpVstm for x86"; return NULL; } void X86Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit, - int first_bit, int second_bit) -{ + int first_bit, int second_bit) { int t_reg = AllocTemp(); OpRegRegImm(kOpLsl, t_reg, rl_src.low_reg, second_bit - first_bit); OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, t_reg); @@ -292,8 +276,7 @@ void X86Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src, } } -void X86Mir2Lir::GenDivZeroCheck(int reg_lo, int reg_hi) -{ +void X86Mir2Lir::GenDivZeroCheck(int reg_lo, int reg_hi) { int t_reg = AllocTemp(); OpRegRegReg(kOpOr, t_reg, reg_lo, reg_hi); GenImmedCheck(kCondEq, t_reg, 0, kThrowDivZero); @@ -301,40 +284,34 @@ void X86Mir2Lir::GenDivZeroCheck(int reg_lo, int reg_hi) } // Test suspend flag, return target of taken suspend branch -LIR* X86Mir2Lir::OpTestSuspend(LIR* target) -{ +LIR* X86Mir2Lir::OpTestSuspend(LIR* target) { OpTlsCmp(Thread::ThreadFlagsOffset().Int32Value(), 0); return OpCondBranch((target == NULL) ? kCondNe : kCondEq, target); } // Decrement register and branch on condition -LIR* X86Mir2Lir::OpDecAndBranch(ConditionCode c_code, int reg, LIR* target) -{ +LIR* X86Mir2Lir::OpDecAndBranch(ConditionCode c_code, int reg, LIR* target) { OpRegImm(kOpSub, reg, 1); return OpCmpImmBranch(c_code, reg, 0, target); } bool X86Mir2Lir::SmallLiteralDivide(Instruction::Code dalvik_opcode, - RegLocation rl_src, RegLocation rl_dest, int lit) -{ + RegLocation rl_src, RegLocation rl_dest, int lit) { LOG(FATAL) << "Unexpected use of smallLiteralDive in x86"; return false; } -LIR* X86Mir2Lir::OpIT(ConditionCode cond, const char* guide) -{ +LIR* X86Mir2Lir::OpIT(ConditionCode cond, const char* guide) { LOG(FATAL) << "Unexpected use of OpIT in x86"; return NULL; } void X86Mir2Lir::GenMulLong(RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2) -{ + RegLocation rl_src2) { LOG(FATAL) << "Unexpected use of GenX86Long for x86"; } void X86Mir2Lir::GenAddLong(RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2) -{ + RegLocation rl_src2) { // TODO: fixed register usage here as we only have 4 temps and temporary allocation isn't smart // enough. FlushAllRegs(); @@ -350,8 +327,7 @@ void X86Mir2Lir::GenAddLong(RegLocation rl_dest, RegLocation rl_src1, } void X86Mir2Lir::GenSubLong(RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2) -{ + RegLocation rl_src2) { // TODO: fixed register usage here as we only have 4 temps and temporary allocation isn't smart // enough. FlushAllRegs(); @@ -367,8 +343,7 @@ void X86Mir2Lir::GenSubLong(RegLocation rl_dest, RegLocation rl_src1, } void X86Mir2Lir::GenAndLong(RegLocation rl_dest, RegLocation rl_src1, - RegLocation rl_src2) -{ + RegLocation rl_src2) { // TODO: fixed register usage here as we only have 4 temps and temporary allocation isn't smart // enough. FlushAllRegs(); @@ -384,8 +359,7 @@ void X86Mir2Lir::GenAndLong(RegLocation rl_dest, RegLocation rl_src1, } void X86Mir2Lir::GenOrLong(RegLocation rl_dest, - RegLocation rl_src1, RegLocation rl_src2) -{ + RegLocation rl_src1, RegLocation rl_src2) { // TODO: fixed register usage here as we only have 4 temps and temporary allocation isn't smart // enough. FlushAllRegs(); @@ -401,8 +375,7 @@ void X86Mir2Lir::GenOrLong(RegLocation rl_dest, } void X86Mir2Lir::GenXorLong(RegLocation rl_dest, - RegLocation rl_src1, RegLocation rl_src2) -{ + RegLocation rl_src1, RegLocation rl_src2) { // TODO: fixed register usage here as we only have 4 temps and temporary allocation isn't smart // enough. FlushAllRegs(); @@ -417,8 +390,7 @@ void X86Mir2Lir::GenXorLong(RegLocation rl_dest, StoreValueWide(rl_dest, rl_result); } -void X86Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) -{ +void X86Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) { FlushAllRegs(); LockCallTemps(); // Prepare for explicit register usage LoadValueDirectWideFixed(rl_src, r0, r1); @@ -447,8 +419,7 @@ void X86Mir2Lir::OpRegThreadMem(OpKind op, int r_dest, int thread_offset) { * Generate array load */ void X86Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, - RegLocation rl_index, RegLocation rl_dest, int scale) -{ + RegLocation rl_index, RegLocation rl_dest, int scale) { RegisterClass reg_class = oat_reg_class_by_size(size); int len_offset = mirror::Array::LengthOffset().Int32Value(); int data_offset; @@ -495,8 +466,7 @@ void X86Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, * */ void X86Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, - RegLocation rl_index, RegLocation rl_src, int scale) -{ + RegLocation rl_index, RegLocation rl_src, int scale) { RegisterClass reg_class = oat_reg_class_by_size(size); int len_offset = mirror::Array::LengthOffset().Int32Value(); int data_offset; @@ -539,8 +509,7 @@ void X86Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, * */ void X86Mir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, - RegLocation rl_index, RegLocation rl_src, int scale) -{ + RegLocation rl_index, RegLocation rl_src, int scale) { int len_offset = mirror::Array::LengthOffset().Int32Value(); int data_offset = mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value(); @@ -590,15 +559,13 @@ void X86Mir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, } void X86Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest, - RegLocation rl_src1, RegLocation rl_shift) -{ + RegLocation rl_src1, RegLocation rl_shift) { // Default implementation is just to ignore the constant case. GenShiftOpLong(opcode, rl_dest, rl_src1, rl_shift); } void X86Mir2Lir::GenArithImmOpLong(Instruction::Code opcode, - RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) -{ + RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) { // Default - bail to non-const handler. GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2); } diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc index c421ef3f11..5b64a6b5c3 100644 --- a/compiler/dex/quick/x86/target_x86.cc +++ b/compiler/dex/quick/x86/target_x86.cc @@ -45,26 +45,22 @@ namespace art { #endif }; -RegLocation X86Mir2Lir::LocCReturn() -{ +RegLocation X86Mir2Lir::LocCReturn() { RegLocation res = X86_LOC_C_RETURN; return res; } -RegLocation X86Mir2Lir::LocCReturnWide() -{ +RegLocation X86Mir2Lir::LocCReturnWide() { RegLocation res = X86_LOC_C_RETURN_WIDE; return res; } -RegLocation X86Mir2Lir::LocCReturnFloat() -{ +RegLocation X86Mir2Lir::LocCReturnFloat() { RegLocation res = X86_LOC_C_RETURN_FLOAT; return res; } -RegLocation X86Mir2Lir::LocCReturnDouble() -{ +RegLocation X86Mir2Lir::LocCReturnDouble() { RegLocation res = X86_LOC_C_RETURN_DOUBLE; return res; } @@ -95,28 +91,24 @@ int X86Mir2Lir::TargetReg(SpecialTargetRegister reg) { } // Create a double from a pair of singles. -int X86Mir2Lir::S2d(int low_reg, int high_reg) -{ +int X86Mir2Lir::S2d(int low_reg, int high_reg) { return X86_S2D(low_reg, high_reg); } // Return mask to strip off fp reg flags and bias. -uint32_t X86Mir2Lir::FpRegMask() -{ +uint32_t X86Mir2Lir::FpRegMask() { return X86_FP_REG_MASK; } // True if both regs single, both core or both double. -bool X86Mir2Lir::SameRegType(int reg1, int reg2) -{ +bool X86Mir2Lir::SameRegType(int reg1, int reg2) { return (X86_REGTYPE(reg1) == X86_REGTYPE(reg2)); } /* * Decode the register id. */ -uint64_t X86Mir2Lir::GetRegMaskCommon(int reg) -{ +uint64_t X86Mir2Lir::GetRegMaskCommon(int reg) { uint64_t seed; int shift; int reg_id; @@ -131,8 +123,7 @@ uint64_t X86Mir2Lir::GetRegMaskCommon(int reg) return (seed << shift); } -uint64_t X86Mir2Lir::GetPCUseDefEncoding() -{ +uint64_t X86Mir2Lir::GetPCUseDefEncoding() { /* * FIXME: might make sense to use a virtual resource encoding bit for pc. Might be * able to clean up some of the x86/Arm_Mips differences @@ -141,8 +132,7 @@ uint64_t X86Mir2Lir::GetPCUseDefEncoding() return 0ULL; } -void X86Mir2Lir::SetupTargetResourceMasks(LIR* lir) -{ +void X86Mir2Lir::SetupTargetResourceMasks(LIR* lir) { DCHECK_EQ(cu_->instruction_set, kX86); // X86-specific resource map setup here. @@ -263,8 +253,7 @@ std::string X86Mir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char return buf; } -void X86Mir2Lir::DumpResourceMask(LIR *x86LIR, uint64_t mask, const char *prefix) -{ +void X86Mir2Lir::DumpResourceMask(LIR *x86LIR, uint64_t mask, const char *prefix) { char buf[256]; buf[0] = 0; @@ -317,16 +306,14 @@ void X86Mir2Lir::AdjustSpillMask() { * include any holes in the mask. Associate holes with * Dalvik register INVALID_VREG (0xFFFFU). */ -void X86Mir2Lir::MarkPreservedSingle(int v_reg, int reg) -{ +void X86Mir2Lir::MarkPreservedSingle(int v_reg, int reg) { UNIMPLEMENTED(WARNING) << "MarkPreservedSingle"; #if 0 LOG(FATAL) << "No support yet for promoted FP regs"; #endif } -void X86Mir2Lir::FlushRegWide(int reg1, int reg2) -{ +void X86Mir2Lir::FlushRegWide(int reg1, int reg2) { RegisterInfo* info1 = GetRegInfo(reg1); RegisterInfo* info2 = GetRegInfo(reg2); DCHECK(info1 && info2 && info1->pair && info2->pair && @@ -347,8 +334,7 @@ void X86Mir2Lir::FlushRegWide(int reg1, int reg2) } } -void X86Mir2Lir::FlushReg(int reg) -{ +void X86Mir2Lir::FlushReg(int reg) { RegisterInfo* info = GetRegInfo(reg); if (info->live && info->dirty) { info->dirty = false; @@ -363,8 +349,7 @@ bool X86Mir2Lir::IsFpReg(int reg) { } /* Clobber all regs that might be used by an external C call */ -void X86Mir2Lir::ClobberCalleeSave() -{ +void X86Mir2Lir::ClobberCalleeSave() { Clobber(rAX); Clobber(rCX); Clobber(rDX); @@ -382,8 +367,7 @@ RegLocation X86Mir2Lir::GetReturnWideAlt() { return res; } -RegLocation X86Mir2Lir::GetReturnAlt() -{ +RegLocation X86Mir2Lir::GetReturnAlt() { RegLocation res = LocCReturn(); res.low_reg = rDX; Clobber(rDX); @@ -391,15 +375,13 @@ RegLocation X86Mir2Lir::GetReturnAlt() return res; } -X86Mir2Lir::RegisterInfo* X86Mir2Lir::GetRegInfo(int reg) -{ +X86Mir2Lir::RegisterInfo* X86Mir2Lir::GetRegInfo(int reg) { return X86_FPREG(reg) ? ®_pool_->FPRegs[reg & X86_FP_REG_MASK] : ®_pool_->core_regs[reg]; } /* To be used when explicitly managing register use */ -void X86Mir2Lir::LockCallTemps() -{ +void X86Mir2Lir::LockCallTemps() { LockTemp(rX86_ARG0); LockTemp(rX86_ARG1); LockTemp(rX86_ARG2); @@ -407,16 +389,14 @@ void X86Mir2Lir::LockCallTemps() } /* To be used when explicitly managing register use */ -void X86Mir2Lir::FreeCallTemps() -{ +void X86Mir2Lir::FreeCallTemps() { FreeTemp(rX86_ARG0); FreeTemp(rX86_ARG1); FreeTemp(rX86_ARG2); FreeTemp(rX86_ARG3); } -void X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) -{ +void X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) { #if ANDROID_SMP != 0 // TODO: optimize fences NewLIR0(kX86Mfence); @@ -427,8 +407,7 @@ void X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) * high reg in next byte. */ int X86Mir2Lir::AllocTypedTempPair(bool fp_hint, - int reg_class) -{ + int reg_class) { int high_reg; int low_reg; int res = 0; @@ -485,8 +464,7 @@ void X86Mir2Lir::CompilerInitializeRegAlloc() { } void X86Mir2Lir::FreeRegLocTemps(RegLocation rl_keep, - RegLocation rl_free) -{ + RegLocation rl_free) { if ((rl_free.low_reg != rl_keep.low_reg) && (rl_free.low_reg != rl_keep.high_reg) && (rl_free.high_reg != rl_keep.low_reg) && (rl_free.high_reg != rl_keep.high_reg)) { // No overlap, free both @@ -525,8 +503,7 @@ void X86Mir2Lir::UnSpillCoreRegs() { } } -bool X86Mir2Lir::IsUnconditionalBranch(LIR* lir) -{ +bool X86Mir2Lir::IsUnconditionalBranch(LIR* lir) { return (lir->opcode == kX86Jmp8 || lir->opcode == kX86Jmp32); } @@ -547,24 +524,20 @@ Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, } // Not used in x86 -int X86Mir2Lir::LoadHelper(int offset) -{ +int X86Mir2Lir::LoadHelper(int offset) { LOG(FATAL) << "Unexpected use of LoadHelper in x86"; return INVALID_REG; } -uint64_t X86Mir2Lir::GetTargetInstFlags(int opcode) -{ +uint64_t X86Mir2Lir::GetTargetInstFlags(int opcode) { return X86Mir2Lir::EncodingMap[opcode].flags; } -const char* X86Mir2Lir::GetTargetInstName(int opcode) -{ +const char* X86Mir2Lir::GetTargetInstName(int opcode) { return X86Mir2Lir::EncodingMap[opcode].name; } -const char* X86Mir2Lir::GetTargetInstFmt(int opcode) -{ +const char* X86Mir2Lir::GetTargetInstFmt(int opcode) { return X86Mir2Lir::EncodingMap[opcode].fmt; } diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc index fb07ff1e22..6376e3b87a 100644 --- a/compiler/dex/quick/x86/utility_x86.cc +++ b/compiler/dex/quick/x86/utility_x86.cc @@ -22,8 +22,7 @@ namespace art { /* This file contains codegen for the X86 ISA */ -LIR* X86Mir2Lir::OpFpRegCopy(int r_dest, int r_src) -{ +LIR* X86Mir2Lir::OpFpRegCopy(int r_dest, int r_src) { int opcode; /* must be both DOUBLE or both not DOUBLE */ DCHECK_EQ(X86_DOUBLEREG(r_dest), X86_DOUBLEREG(r_src)); @@ -49,23 +48,19 @@ LIR* X86Mir2Lir::OpFpRegCopy(int r_dest, int r_src) return res; } -bool X86Mir2Lir::InexpensiveConstantInt(int32_t value) -{ +bool X86Mir2Lir::InexpensiveConstantInt(int32_t value) { return true; } -bool X86Mir2Lir::InexpensiveConstantFloat(int32_t value) -{ +bool X86Mir2Lir::InexpensiveConstantFloat(int32_t value) { return false; } -bool X86Mir2Lir::InexpensiveConstantLong(int64_t value) -{ +bool X86Mir2Lir::InexpensiveConstantLong(int64_t value) { return true; } -bool X86Mir2Lir::InexpensiveConstantDouble(int64_t value) -{ +bool X86Mir2Lir::InexpensiveConstantDouble(int64_t value) { return false; // TUNING } @@ -78,8 +73,7 @@ bool X86Mir2Lir::InexpensiveConstantDouble(int64_t value) * 1) r_dest is freshly returned from AllocTemp or * 2) The codegen is under fixed register usage */ -LIR* X86Mir2Lir::LoadConstantNoClobber(int r_dest, int value) -{ +LIR* X86Mir2Lir::LoadConstantNoClobber(int r_dest, int value) { int r_dest_save = r_dest; if (X86_FPREG(r_dest)) { if (value == 0) { @@ -105,23 +99,20 @@ LIR* X86Mir2Lir::LoadConstantNoClobber(int r_dest, int value) return res; } -LIR* X86Mir2Lir::OpUnconditionalBranch(LIR* target) -{ +LIR* X86Mir2Lir::OpUnconditionalBranch(LIR* target) { LIR* res = NewLIR1(kX86Jmp8, 0 /* offset to be patched during assembly*/ ); res->target = target; return res; } -LIR* X86Mir2Lir::OpCondBranch(ConditionCode cc, LIR* target) -{ +LIR* X86Mir2Lir::OpCondBranch(ConditionCode cc, LIR* target) { LIR* branch = NewLIR2(kX86Jcc8, 0 /* offset to be patched */, X86ConditionEncoding(cc)); branch->target = target; return branch; } -LIR* X86Mir2Lir::OpReg(OpKind op, int r_dest_src) -{ +LIR* X86Mir2Lir::OpReg(OpKind op, int r_dest_src) { X86OpCode opcode = kX86Bkpt; switch (op) { case kOpNeg: opcode = kX86Neg32R; break; @@ -133,8 +124,7 @@ LIR* X86Mir2Lir::OpReg(OpKind op, int r_dest_src) return NewLIR1(opcode, r_dest_src); } -LIR* X86Mir2Lir::OpRegImm(OpKind op, int r_dest_src1, int value) -{ +LIR* X86Mir2Lir::OpRegImm(OpKind op, int r_dest_src1, int value) { X86OpCode opcode = kX86Bkpt; bool byte_imm = IS_SIMM8(value); DCHECK(!X86_FPREG(r_dest_src1)); @@ -160,8 +150,7 @@ LIR* X86Mir2Lir::OpRegImm(OpKind op, int r_dest_src1, int value) return NewLIR2(opcode, r_dest_src1, value); } -LIR* X86Mir2Lir::OpRegReg(OpKind op, int r_dest_src1, int r_src2) -{ +LIR* X86Mir2Lir::OpRegReg(OpKind op, int r_dest_src1, int r_src2) { X86OpCode opcode = kX86Nop; bool src2_must_be_cx = false; switch (op) { @@ -207,8 +196,7 @@ LIR* X86Mir2Lir::OpRegReg(OpKind op, int r_dest_src1, int r_src2) } LIR* X86Mir2Lir::OpRegMem(OpKind op, int r_dest, int rBase, - int offset) -{ + int offset) { X86OpCode opcode = kX86Nop; switch (op) { // X86 binary opcodes @@ -231,8 +219,7 @@ LIR* X86Mir2Lir::OpRegMem(OpKind op, int r_dest, int rBase, } LIR* X86Mir2Lir::OpRegRegReg(OpKind op, int r_dest, int r_src1, - int r_src2) -{ + int r_src2) { if (r_dest != r_src1 && r_dest != r_src2) { if (op == kOpAdd) { // lea special case, except can't encode rbp as base if (r_src1 == r_src2) { @@ -280,8 +267,7 @@ LIR* X86Mir2Lir::OpRegRegReg(OpKind op, int r_dest, int r_src1, } LIR* X86Mir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src, - int value) -{ + int value) { if (op == kOpMul) { X86OpCode opcode = IS_SIMM8(value) ? kX86Imul32RRI8 : kX86Imul32RRI; return NewLIR3(opcode, r_dest, r_src, value); @@ -306,8 +292,7 @@ LIR* X86Mir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src, return OpRegImm(op, r_dest, value); } -LIR* X86Mir2Lir::OpThreadMem(OpKind op, int thread_offset) -{ +LIR* X86Mir2Lir::OpThreadMem(OpKind op, int thread_offset) { X86OpCode opcode = kX86Bkpt; switch (op) { case kOpBlx: opcode = kX86CallT; break; @@ -318,8 +303,7 @@ LIR* X86Mir2Lir::OpThreadMem(OpKind op, int thread_offset) return NewLIR1(opcode, thread_offset); } -LIR* X86Mir2Lir::OpMem(OpKind op, int rBase, int disp) -{ +LIR* X86Mir2Lir::OpMem(OpKind op, int rBase, int disp) { X86OpCode opcode = kX86Bkpt; switch (op) { case kOpBlx: opcode = kX86CallM; break; @@ -330,8 +314,7 @@ LIR* X86Mir2Lir::OpMem(OpKind op, int rBase, int disp) return NewLIR2(opcode, rBase, disp); } -LIR* X86Mir2Lir::LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value) -{ +LIR* X86Mir2Lir::LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value) { int32_t val_lo = Low32Bits(value); int32_t val_hi = High32Bits(value); LIR *res; @@ -558,23 +541,20 @@ LIR* X86Mir2Lir::StoreBaseIndexedDisp(int rBase, int r_index, int scale, /* store value base base + scaled index. */ LIR* X86Mir2Lir::StoreBaseIndexed(int rBase, int r_index, int r_src, - int scale, OpSize size) -{ + int scale, OpSize size) { return StoreBaseIndexedDisp(rBase, r_index, scale, 0, r_src, INVALID_REG, size, INVALID_SREG); } LIR* X86Mir2Lir::StoreBaseDisp(int rBase, int displacement, - int r_src, OpSize size) -{ + int r_src, OpSize size) { return StoreBaseIndexedDisp(rBase, INVALID_REG, 0, displacement, r_src, INVALID_REG, size, INVALID_SREG); } LIR* X86Mir2Lir::StoreBaseDispWide(int rBase, int displacement, - int r_src_lo, int r_src_hi) -{ + int r_src_lo, int r_src_hi) { return StoreBaseIndexedDisp(rBase, INVALID_REG, 0, displacement, r_src_lo, r_src_hi, kLong, INVALID_SREG); } diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc index 41820720d8..ccd2454a49 100644 --- a/compiler/dex/ssa_transformation.cc +++ b/compiler/dex/ssa_transformation.cc @@ -37,8 +37,7 @@ BasicBlock* MIRGraph::NeedsVisit(BasicBlock* bb) { return bb; } -BasicBlock* MIRGraph::NextUnvisitedSuccessor(BasicBlock* bb) -{ +BasicBlock* MIRGraph::NextUnvisitedSuccessor(BasicBlock* bb) { BasicBlock* res = NeedsVisit(bb->fall_through); if (res == NULL) { res = NeedsVisit(bb->taken); @@ -57,15 +56,13 @@ BasicBlock* MIRGraph::NextUnvisitedSuccessor(BasicBlock* bb) return res; } -void MIRGraph::MarkPreOrder(BasicBlock* block) -{ +void MIRGraph::MarkPreOrder(BasicBlock* block) { block->visited = true; /* Enqueue the pre_order block id */ dfs_order_->Insert(block->id); } -void MIRGraph::RecordDFSOrders(BasicBlock* block) -{ +void MIRGraph::RecordDFSOrders(BasicBlock* block) { std::vector succ; MarkPreOrder(block); succ.push_back(block); @@ -84,8 +81,7 @@ void MIRGraph::RecordDFSOrders(BasicBlock* block) } /* Sort the blocks by the Depth-First-Search */ -void MIRGraph::ComputeDFSOrders() -{ +void MIRGraph::ComputeDFSOrders() { /* Initialize or reset the DFS pre_order list */ if (dfs_order_ == NULL) { dfs_order_ = new (arena_) GrowableArray(arena_, GetNumBlocks(), kGrowableArrayDfsOrder); @@ -115,8 +111,7 @@ void MIRGraph::ComputeDFSOrders() * Mark block bit on the per-Dalvik register vector to denote that Dalvik * register idx is defined in BasicBlock bb. */ -bool MIRGraph::FillDefBlockMatrix(BasicBlock* bb) -{ +bool MIRGraph::FillDefBlockMatrix(BasicBlock* bb) { if (bb->data_flow_info == NULL) return false; ArenaBitVector::Iterator iterator(bb->data_flow_info->def_v); @@ -129,8 +124,7 @@ bool MIRGraph::FillDefBlockMatrix(BasicBlock* bb) return true; } -void MIRGraph::ComputeDefBlockMatrix() -{ +void MIRGraph::ComputeDefBlockMatrix() { int num_registers = cu_->num_dalvik_registers; /* Allocate num_dalvik_registers bit vector pointers */ def_block_matrix_ = static_cast @@ -203,8 +197,7 @@ void MIRGraph::ComputeDomPostOrderTraversal(BasicBlock* bb) { } void MIRGraph::CheckForDominanceFrontier(BasicBlock* dom_bb, - const BasicBlock* succ_bb) -{ + const BasicBlock* succ_bb) { /* * TODO - evaluate whether phi will ever need to be inserted into exit * blocks. @@ -217,8 +210,7 @@ void MIRGraph::CheckForDominanceFrontier(BasicBlock* dom_bb, } /* Worker function to compute the dominance frontier */ -bool MIRGraph::ComputeDominanceFrontier(BasicBlock* bb) -{ +bool MIRGraph::ComputeDominanceFrontier(BasicBlock* bb) { /* Calculate DF_local */ if (bb->taken) { CheckForDominanceFrontier(bb, bb->taken); @@ -257,8 +249,7 @@ bool MIRGraph::ComputeDominanceFrontier(BasicBlock* bb) } /* Worker function for initializing domination-related data structures */ -void MIRGraph::InitializeDominationInfo(BasicBlock* bb) -{ +void MIRGraph::InitializeDominationInfo(BasicBlock* bb) { int num_total_blocks = GetBasicBlockListCount(); if (bb->dominators == NULL ) { @@ -284,8 +275,7 @@ void MIRGraph::InitializeDominationInfo(BasicBlock* bb) * Given the ordering of i_dom_list, this common parent represents the * last element of the intersection of block1 and block2 dominators. */ -int MIRGraph::FindCommonParent(int block1, int block2) -{ +int MIRGraph::FindCommonParent(int block1, int block2) { while (block1 != block2) { while (block1 < block2) { block1 = i_dom_list_[block1]; @@ -300,8 +290,7 @@ int MIRGraph::FindCommonParent(int block1, int block2) } /* Worker function to compute each block's immediate dominator */ -bool MIRGraph::ComputeblockIDom(BasicBlock* bb) -{ +bool MIRGraph::ComputeblockIDom(BasicBlock* bb) { /* Special-case entry block */ if (bb == GetEntryBlock()) { return false; @@ -343,8 +332,7 @@ bool MIRGraph::ComputeblockIDom(BasicBlock* bb) } /* Worker function to compute each block's domintors */ -bool MIRGraph::ComputeBlockDominators(BasicBlock* bb) -{ +bool MIRGraph::ComputeBlockDominators(BasicBlock* bb) { if (bb == GetEntryBlock()) { bb->dominators->ClearAllBits(); } else { @@ -354,8 +342,7 @@ bool MIRGraph::ComputeBlockDominators(BasicBlock* bb) return false; } -bool MIRGraph::SetDominators(BasicBlock* bb) -{ +bool MIRGraph::SetDominators(BasicBlock* bb) { if (bb != GetEntryBlock()) { int idom_dfs_idx = i_dom_list_[bb->dfs_id]; DCHECK_NE(idom_dfs_idx, NOTVISITED); @@ -369,8 +356,7 @@ bool MIRGraph::SetDominators(BasicBlock* bb) } /* Compute dominators, immediate dominator, and dominance fronter */ -void MIRGraph::ComputeDominators() -{ +void MIRGraph::ComputeDominators() { int num_reachable_blocks = num_reachable_blocks_; int num_total_blocks = GetBasicBlockListCount(); @@ -435,8 +421,7 @@ void MIRGraph::ComputeDominators() * This is probably not general enough to be placed in BitVector.[ch]. */ void MIRGraph::ComputeSuccLineIn(ArenaBitVector* dest, const ArenaBitVector* src1, - const ArenaBitVector* src2) -{ + const ArenaBitVector* src2) { if (dest->GetStorageSize() != src1->GetStorageSize() || dest->GetStorageSize() != src2->GetStorageSize() || dest->IsExpandable() != src1->IsExpandable() || @@ -455,8 +440,7 @@ void MIRGraph::ComputeSuccLineIn(ArenaBitVector* dest, const ArenaBitVector* src * The calculated result is used for phi-node pruning - where we only need to * insert a phi node if the variable is live-in to the block. */ -bool MIRGraph::ComputeBlockLiveIns(BasicBlock* bb) -{ +bool MIRGraph::ComputeBlockLiveIns(BasicBlock* bb) { ArenaBitVector* temp_dalvik_register_v = temp_dalvik_register_v_; if (bb->data_flow_info == NULL) return false; @@ -487,8 +471,7 @@ bool MIRGraph::ComputeBlockLiveIns(BasicBlock* bb) } /* Insert phi nodes to for each variable to the dominance frontiers */ -void MIRGraph::InsertPhiNodes() -{ +void MIRGraph::InsertPhiNodes() { int dalvik_reg; ArenaBitVector* phi_blocks = new (arena_) ArenaBitVector(arena_, GetNumBlocks(), false, kBitMapPhi); @@ -569,8 +552,7 @@ void MIRGraph::InsertPhiNodes() * Worker function to insert phi-operands with latest SSA names from * predecessor blocks */ -bool MIRGraph::InsertPhiNodeOperands(BasicBlock* bb) -{ +bool MIRGraph::InsertPhiNodeOperands(BasicBlock* bb) { MIR *mir; std::vector uses; std::vector incoming_arc; @@ -622,8 +604,7 @@ bool MIRGraph::InsertPhiNodeOperands(BasicBlock* bb) return true; } -void MIRGraph::DoDFSPreOrderSSARename(BasicBlock* block) -{ +void MIRGraph::DoDFSPreOrderSSARename(BasicBlock* block) { if (block->visited || block->hidden) return; block->visited = true; @@ -663,8 +644,7 @@ void MIRGraph::DoDFSPreOrderSSARename(BasicBlock* block) } /* Perform SSA transformation for the whole method */ -void MIRGraph::SSATransformation() -{ +void MIRGraph::SSATransformation() { /* Compute the DFS order */ ComputeDFSOrders(); diff --git a/compiler/dex/vreg_analysis.cc b/compiler/dex/vreg_analysis.cc index adbda5c18f..8df6dd93e7 100644 --- a/compiler/dex/vreg_analysis.cc +++ b/compiler/dex/vreg_analysis.cc @@ -72,8 +72,7 @@ bool MIRGraph::SetHigh(int index, bool is_high) { * as it doesn't propagate. We're guaranteed at least one pass through * the cfg. */ -bool MIRGraph::InferTypeAndSize(BasicBlock* bb) -{ +bool MIRGraph::InferTypeAndSize(BasicBlock* bb) { MIR *mir; bool changed = false; // Did anything change? @@ -333,8 +332,7 @@ bool MIRGraph::InferTypeAndSize(BasicBlock* bb) static const char* storage_name[] = {" Frame ", "PhysReg", " Spill "}; -void MIRGraph::DumpRegLocTable(RegLocation* table, int count) -{ +void MIRGraph::DumpRegLocTable(RegLocation* table, int count) { //FIXME: Quick-specific. Move to Quick (and make a generic version for MIRGraph? Mir2Lir* cg = static_cast(cu_->cg.get()); if (cg != NULL) { @@ -374,8 +372,7 @@ static const RegLocation fresh_loc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0, * allocation is done on the fly. We also do some initialization and * type inference here. */ -void MIRGraph::BuildRegLocations() -{ +void MIRGraph::BuildRegLocations() { int i; RegLocation* loc; diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index 9e71dff464..d1d21b1d03 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -356,14 +356,14 @@ CompilerDriver::CompilerDriver(CompilerBackend compiler_backend, InstructionSet jni_compiler_(NULL), compiler_enable_auto_elf_loading_(NULL), compiler_get_method_code_addr_(NULL), - support_boot_image_fixup_(true) -{ + support_boot_image_fixup_(true) { + CHECK_PTHREAD_CALL(pthread_key_create, (&tls_key_, NULL), "compiler tls key"); // TODO: more work needed to combine initializations and allow per-method backend selection typedef void (*InitCompilerContextFn)(CompilerDriver&); InitCompilerContextFn init_compiler_context; - if (compiler_backend_ == kPortable){ + if (compiler_backend_ == kPortable) { // Initialize compiler_context_ init_compiler_context = reinterpret_cast(ArtInitCompilerContext); compiler_ = reinterpret_cast(ArtCompileMethod); @@ -1411,10 +1411,7 @@ class ParallelCompilationManager { begin_(begin), end_(end), callback_(callback), - stripe_(stripe) - { - - } + stripe_(stripe) {} virtual void Run(Thread* self) { for (size_t i = begin_; i < end_; i += stripe_) { @@ -2095,7 +2092,7 @@ static void InitializeClass(const ParallelCompilationManager* manager, size_t cl } if (!is_black_listed) { LOG(INFO) << "Initializing: " << descriptor; - if (StringPiece(descriptor) == "Ljava/lang/Void;"){ + if (StringPiece(descriptor) == "Ljava/lang/Void;") { // Hand initialize j.l.Void to avoid Dex file operations in un-started runtime. mirror::ObjectArray* fields = klass->GetSFields(); CHECK_EQ(fields->GetLength(), 1); diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc index 8d32a915dd..7fd1a7cb10 100644 --- a/compiler/image_writer.cc +++ b/compiler/image_writer.cc @@ -108,7 +108,7 @@ bool ImageWriter::Write(const std::string& image_filename, return false; } #ifndef NDEBUG - { + { // NOLINT(whitespace/braces) ScopedObjectAccess soa(Thread::Current()); CheckNonImageClassesRemoved(); } diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc index 560a146052..4b6967faa0 100644 --- a/compiler/jni/jni_compiler_test.cc +++ b/compiler/jni/jni_compiler_test.cc @@ -68,7 +68,7 @@ class JniCompilerTest : public CommonTest { void SetUpForTest(bool direct, const char* method_name, const char* method_sig, void* native_fnptr) { // Initialize class loader and compile method when runtime not started. - if (!runtime_->IsStarted()){ + if (!runtime_->IsStarted()) { { ScopedObjectAccess soa(Thread::Current()); class_loader_ = LoadDex("MyClassNatives"); diff --git a/compiler/llvm/runtime_support_builder.cc b/compiler/llvm/runtime_support_builder.cc index 28405f67d4..976aa8f50d 100644 --- a/compiler/llvm/runtime_support_builder.cc +++ b/compiler/llvm/runtime_support_builder.cc @@ -38,8 +38,7 @@ using namespace runtime_support; RuntimeSupportBuilder::RuntimeSupportBuilder(::llvm::LLVMContext& context, ::llvm::Module& module, IRBuilder& irb) - : context_(context), module_(module), irb_(irb) -{ + : context_(context), module_(module), irb_(irb) { memset(target_runtime_support_func_, 0, sizeof(target_runtime_support_func_)); #define GET_RUNTIME_SUPPORT_FUNC_DECL(ID, NAME) \ do { \ diff --git a/jdwpspy/Common.h b/jdwpspy/Common.h index 33f1a670ea..30a49fba76 100644 --- a/jdwpspy/Common.h +++ b/jdwpspy/Common.h @@ -26,16 +26,14 @@ typedef uint64_t u8; /* * Get 1 byte. (Included to make the code more legible.) */ -INLINE u1 get1(unsigned const char* pSrc) -{ +INLINE u1 get1(unsigned const char* pSrc) { return *pSrc; } /* * Get 2 big-endian bytes. */ -INLINE u2 get2BE(unsigned char const* pSrc) -{ +INLINE u2 get2BE(unsigned char const* pSrc) { u2 result; result = *pSrc++ << 8; @@ -47,8 +45,7 @@ INLINE u2 get2BE(unsigned char const* pSrc) /* * Get 4 big-endian bytes. */ -INLINE u4 get4BE(unsigned char const* pSrc) -{ +INLINE u4 get4BE(unsigned char const* pSrc) { u4 result; result = *pSrc++ << 24; @@ -62,8 +59,7 @@ INLINE u4 get4BE(unsigned char const* pSrc) /* * Get 8 big-endian bytes. */ -INLINE u8 get8BE(unsigned char const* pSrc) -{ +INLINE u8 get8BE(unsigned char const* pSrc) { u8 result; result = (u8) *pSrc++ << 56; diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc index fbec826af2..bb4b5c5dab 100644 --- a/runtime/base/mutex.cc +++ b/runtime/base/mutex.cc @@ -465,7 +465,7 @@ ReaderWriterMutex::ReaderWriterMutex(const char* name, LockLevel level) : #if ART_USE_FUTEXES , state_(0), exclusive_owner_(0), num_pending_readers_(0), num_pending_writers_(0) #endif -{ +{ // NOLINT(whitespace/braces) #if !ART_USE_FUTEXES CHECK_MUTEX_CALL(pthread_rwlock_init, (&rwlock_, NULL)); #endif diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc index 04979016e3..2a55e3138b 100644 --- a/runtime/common_throws.cc +++ b/runtime/common_throws.cc @@ -169,7 +169,7 @@ void ThrowIllegalAccessErrorFinalField(const mirror::AbstractMethod* referrer, msg.str().c_str()); } -void ThrowIllegalAccessError(mirror::Class* referrer, const char* fmt, ...){ +void ThrowIllegalAccessError(mirror::Class* referrer, const char* fmt, ...) { va_list args; va_start(args, fmt); ThrowException(NULL, "Ljava/lang/IllegalAccessError;", referrer, fmt, &args); @@ -222,7 +222,7 @@ void ThrowIncompatibleClassChangeErrorField(const mirror::Field* resolved_field, msg.str().c_str()); } -void ThrowIncompatibleClassChangeError(const mirror::Class* referrer, const char* fmt, ...){ +void ThrowIncompatibleClassChangeError(const mirror::Class* referrer, const char* fmt, ...) { va_list args; va_start(args, fmt); ThrowException(NULL, "Ljava/lang/IncompatibleClassChangeError;", referrer, fmt, &args); diff --git a/runtime/compiled_method.cc b/runtime/compiled_method.cc index 757a324155..49706ae8eb 100644 --- a/runtime/compiled_method.cc +++ b/runtime/compiled_method.cc @@ -19,8 +19,7 @@ namespace art { CompiledCode::CompiledCode(InstructionSet instruction_set, const std::vector& code) - : instruction_set_(instruction_set), code_(code) -{ + : instruction_set_(instruction_set), code_(code) { CHECK_NE(code.size(), 0U); } @@ -118,8 +117,7 @@ CompiledMethod::CompiledMethod(InstructionSet instruction_set, const std::vector& native_gc_map) : CompiledCode(instruction_set, code), frame_size_in_bytes_(frame_size_in_bytes), core_spill_mask_(core_spill_mask), fp_spill_mask_(fp_spill_mask), - gc_map_(native_gc_map) -{ + gc_map_(native_gc_map) { DCHECK_EQ(vmap_table.size(), static_cast(__builtin_popcount(core_spill_mask) + __builtin_popcount(fp_spill_mask))); diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc index 1e37dcde07..cd34c3c89e 100644 --- a/runtime/dex_file.cc +++ b/runtime/dex_file.cc @@ -252,7 +252,7 @@ DexFile::~DexFile() { class ScopedJniMonitorLock { public: - ScopedJniMonitorLock(JNIEnv* env, jobject locked) : env_(env), locked_(locked){ + ScopedJniMonitorLock(JNIEnv* env, jobject locked) : env_(env), locked_(locked) { env->MonitorEnter(locked_); } ~ScopedJniMonitorLock() { diff --git a/runtime/dex_instruction.h b/runtime/dex_instruction.h index d2ad989395..c3424dcdff 100644 --- a/runtime/dex_instruction.h +++ b/runtime/dex_instruction.h @@ -82,7 +82,7 @@ class Instruction { // TODO: the code layout below is deliberate to avoid this enum being picked up by // generate-operator-out.py. enum Code - { + { // NOLINT(whitespace/braces) #define INSTRUCTION_ENUM(opcode, cname, p, f, r, i, a, v) cname = opcode, #include "dex_instruction_list.h" DEX_INSTRUCTION_LIST(INSTRUCTION_ENUM) diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc index 3cee1b7355..f7d776fbfb 100644 --- a/runtime/gc/space/large_object_space.cc +++ b/runtime/gc/space/large_object_space.cc @@ -49,8 +49,7 @@ void LargeObjectSpace::CopyLiveToMarked() { LargeObjectMapSpace::LargeObjectMapSpace(const std::string& name) : LargeObjectSpace(name), - lock_("large object map space lock", kAllocSpaceLock) -{ + lock_("large object map space lock", kAllocSpaceLock) { } @@ -274,7 +273,7 @@ mirror::Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes) { return reinterpret_cast(addr); } -void FreeListSpace::Dump(std::ostream& os) const{ +void FreeListSpace::Dump(std::ostream& os) const { os << GetName() << " -" << " begin: " << reinterpret_cast(Begin()) << " end: " << reinterpret_cast(End()); diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc index 16e04a5a3f..2fb272cef4 100644 --- a/runtime/interpreter/interpreter.cc +++ b/runtime/interpreter/interpreter.cc @@ -2574,7 +2574,7 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_1xx); break; } - case Instruction::SHL_INT_2ADDR:{ + case Instruction::SHL_INT_2ADDR: { PREAMBLE(); uint32_t vregA = inst->VRegA_12x(); shadow_frame.SetVReg(vregA, diff --git a/runtime/mirror/abstract_method.cc b/runtime/mirror/abstract_method.cc index 88a9dc1aa6..58ef5f7bc8 100644 --- a/runtime/mirror/abstract_method.cc +++ b/runtime/mirror/abstract_method.cc @@ -262,7 +262,7 @@ void AbstractMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JV Runtime* runtime = Runtime::Current(); // Call the invoke stub, passing everything as arguments. - if (UNLIKELY(!runtime->IsStarted())){ + if (UNLIKELY(!runtime->IsStarted())) { LOG(INFO) << "Not invoking " << PrettyMethod(this) << " for a runtime that isn't started"; if (result != NULL) { result->SetJ(0); diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h index d323c3333b..52906a2650 100644 --- a/runtime/mirror/class-inl.h +++ b/runtime/mirror/class-inl.h @@ -75,7 +75,7 @@ inline AbstractMethod* Class::GetDirectMethod(int32_t i) const } inline void Class::SetDirectMethod(uint32_t i, AbstractMethod* f) // TODO: uint16_t - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_){ + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectArray* direct_methods = GetFieldObject*>( OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_), false); @@ -308,13 +308,13 @@ inline size_t Class::NumInstanceFields() const { } inline Field* Class::GetInstanceField(uint32_t i) const // TODO: uint16_t - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_){ + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK_NE(NumInstanceFields(), 0U); return GetIFields()->Get(i); } inline void Class::SetInstanceField(uint32_t i, Field* f) // TODO: uint16_t - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_){ + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ObjectArray* ifields= GetFieldObject*>( OFFSET_OF_OBJECT_MEMBER(Class, ifields_), false); ifields->Set(i, f); diff --git a/runtime/native/dalvik_system_Zygote.cc b/runtime/native/dalvik_system_Zygote.cc index 9b995f421d..e6b4513972 100644 --- a/runtime/native/dalvik_system_Zygote.cc +++ b/runtime/native/dalvik_system_Zygote.cc @@ -492,7 +492,7 @@ static pid_t ForkAndSpecializeCommon(JNIEnv* env, uid_t uid, gid_t gid, jintArra SetSchedulerPolicy(); #if defined(HAVE_ANDROID_OS) - { + { // NOLINT(whitespace/braces) const char* se_info_c_str = NULL; UniquePtr se_info; if (java_se_info != NULL) { diff --git a/runtime/oat/runtime/support_jni.cc b/runtime/oat/runtime/support_jni.cc index 8f0f7ca93d..25f6930dd0 100644 --- a/runtime/oat/runtime/support_jni.cc +++ b/runtime/oat/runtime/support_jni.cc @@ -104,7 +104,7 @@ static void WorkAroundJniBugsForJobject(intptr_t* arg_ptr) { } extern "C" const void* artWorkAroundAppJniBugs(Thread* self, intptr_t* sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_){ + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(Thread::Current() == self); // TODO: this code is specific to ARM // On entry the stack pointed by sp is: diff --git a/runtime/oat/runtime/x86/context_x86.cc b/runtime/oat/runtime/x86/context_x86.cc index ceb10bd6ca..c728ae97ec 100644 --- a/runtime/oat/runtime/x86/context_x86.cc +++ b/runtime/oat/runtime/x86/context_x86.cc @@ -61,7 +61,7 @@ void X86Context::SmashCallerSaves() { gprs_[EBX] = NULL; } -void X86Context::SetGPR(uint32_t reg, uintptr_t value){ +void X86Context::SetGPR(uint32_t reg, uintptr_t value) { CHECK_LT(reg, static_cast(kNumberOfCpuRegisters)); CHECK_NE(gprs_[reg], &gZero); CHECK(gprs_[reg] != NULL); diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc index 0f29915a9b..bb8341ee9f 100644 --- a/runtime/oat_file.cc +++ b/runtime/oat_file.cc @@ -358,8 +358,7 @@ OatFile::OatMethod::OatMethod(const byte* base, fp_spill_mask_(fp_spill_mask), mapping_table_offset_(mapping_table_offset), vmap_table_offset_(vmap_table_offset), - native_gc_map_offset_(gc_map_offset) -{ + native_gc_map_offset_(gc_map_offset) { #ifndef NDEBUG if (mapping_table_offset_ != 0) { // implies non-native, non-stub code if (vmap_table_offset_ == 0) { diff --git a/runtime/runtime.cc b/runtime/runtime.cc index e5fb46fa5b..14d4c8a592 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -566,7 +566,7 @@ Runtime::ParsedOptions* Runtime::ParsedOptions::Create(const Options& options, b Trace::SetDefaultClockSource(kProfilerClockSourceDual); } else if (option == "-small") { parsed->small_mode_ = true; - }else if (option == "-sea_ir") { + } else if (option == "-sea_ir") { parsed->sea_ir_mode_ = true; } else if (StartsWith(option, "-small-mode-methods-max:")) { parsed->small_mode_method_threshold_ = ParseIntegerOrDie(option); diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc index f0f6f1844d..784a7caadf 100644 --- a/runtime/thread_pool.cc +++ b/runtime/thread_pool.cc @@ -60,7 +60,7 @@ void* ThreadPoolWorker::Callback(void* arg) { return NULL; } -void ThreadPool::AddTask(Thread* self, Task* task){ +void ThreadPool::AddTask(Thread* self, Task* task) { MutexLock mu(self, task_queue_lock_); tasks_.push_back(task); // If we have any waiters, signal one. @@ -173,7 +173,7 @@ void ThreadPool::Wait(Thread* self, bool do_work, bool may_hold_locks) { } } -size_t ThreadPool::GetTaskCount(Thread* self){ +size_t ThreadPool::GetTaskCount(Thread* self) { MutexLock mu(self, task_queue_lock_); return tasks_.size(); } -- cgit v1.2.3-59-g8ed1b From f69863b3039fc621ff4250e262d2a024d5e79ec8 Mon Sep 17 00:00:00 2001 From: Brian Carlstrom Date: Wed, 17 Jul 2013 21:53:13 -0700 Subject: Fix cpplint whitespace/newline issues Change-Id: Ie2049d9f667339e41f36c4f5d09f0d10d8d2c762 --- Android.mk | 2 +- compiler/dex/frontend.cc | 9 +++++--- compiler/dex/quick/codegen_util.cc | 2 +- compiler/dex/quick/mips/utility_mips.cc | 15 +++++-------- compiler/llvm/md_builder.cc | 38 +++++++++++++++++++++++---------- runtime/base/logging.cc | 28 ++++++++++++++++++------ runtime/debugger.h | 2 +- runtime/gc/heap.cc | 2 +- runtime/runtime_support.h | 8 +++++-- runtime/thread.cc | 6 +++++- 10 files changed, 74 insertions(+), 38 deletions(-) (limited to 'compiler/dex/quick/codegen_util.cc') diff --git a/Android.mk b/Android.mk index ebebc61170..27bd894f13 100644 --- a/Android.mk +++ b/Android.mk @@ -334,7 +334,7 @@ endif .PHONY: cpplint-art cpplint-art: ./art/tools/cpplint.py \ - --filter=-,+build/header_guard,+whitespace/braces,+whitespace/comma,+runtime/explicit \ + --filter=-,+build/header_guard,+whitespace/braces,+whitespace/comma,+runtime/explicit,+whitespace/newline \ $(shell find art -name *.h -o -name *$(ART_CPP_EXTENSION)) # "mm cpplint-art-aspirational" to see warnings we would like to fix diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc index 2d7c973859..08039147a0 100644 --- a/compiler/dex/frontend.cc +++ b/compiler/dex/frontend.cc @@ -223,11 +223,14 @@ static CompiledMethod* CompileMethod(CompilerDriver& compiler, { // NOLINT(whitespace/braces) switch (compiler.GetInstructionSet()) { case kThumb2: - cu->cg.reset(ArmCodeGenerator(cu.get(), cu->mir_graph.get(), &cu->arena)); break; + cu->cg.reset(ArmCodeGenerator(cu.get(), cu->mir_graph.get(), &cu->arena)); + break; case kMips: - cu->cg.reset(MipsCodeGenerator(cu.get(), cu->mir_graph.get(), &cu->arena)); break; + cu->cg.reset(MipsCodeGenerator(cu.get(), cu->mir_graph.get(), &cu->arena)); + break; case kX86: - cu->cg.reset(X86CodeGenerator(cu.get(), cu->mir_graph.get(), &cu->arena)); break; + cu->cg.reset(X86CodeGenerator(cu.get(), cu->mir_graph.get(), &cu->arena)); + break; default: LOG(FATAL) << "Unexpected instruction set: " << compiler.GetInstructionSet(); } diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc index e728d2769b..e169dc8f54 100644 --- a/compiler/dex/quick/codegen_util.cc +++ b/compiler/dex/quick/codegen_util.cc @@ -61,7 +61,7 @@ void Mir2Lir::NopLIR( LIR* lir) { void Mir2Lir::SetMemRefType(LIR* lir, bool is_load, int mem_type) { uint64_t *mask_ptr; - uint64_t mask = ENCODE_MEM;; + uint64_t mask = ENCODE_MEM; DCHECK(GetTargetInstFlags(lir->opcode) & (IS_LOAD | IS_STORE)); if (is_load) { mask_ptr = &lir->use_mask; diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc index 1975d1a4c1..8510006051 100644 --- a/compiler/dex/quick/mips/utility_mips.cc +++ b/compiler/dex/quick/mips/utility_mips.cc @@ -208,8 +208,7 @@ LIR* MipsMir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src1, int value) { case kOpAdd: if (IS_SIMM16(value)) { opcode = kMipsAddiu; - } - else { + } else { short_form = false; opcode = kMipsAddu; } @@ -218,8 +217,7 @@ LIR* MipsMir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src1, int value) { if (IS_SIMM16((-value))) { value = -value; opcode = kMipsAddiu; - } - else { + } else { short_form = false; opcode = kMipsSubu; } @@ -239,8 +237,7 @@ LIR* MipsMir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src1, int value) { case kOpAnd: if (IS_UIMM16((value))) { opcode = kMipsAndi; - } - else { + } else { short_form = false; opcode = kMipsAnd; } @@ -248,8 +245,7 @@ LIR* MipsMir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src1, int value) { case kOpOr: if (IS_UIMM16((value))) { opcode = kMipsOri; - } - else { + } else { short_form = false; opcode = kMipsOr; } @@ -257,8 +253,7 @@ LIR* MipsMir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src1, int value) { case kOpXor: if (IS_UIMM16((value))) { opcode = kMipsXori; - } - else { + } else { short_form = false; opcode = kMipsXor; } diff --git a/compiler/llvm/md_builder.cc b/compiler/llvm/md_builder.cc index 3884f51056..1bd76dddba 100644 --- a/compiler/llvm/md_builder.cc +++ b/compiler/llvm/md_builder.cc @@ -33,17 +33,33 @@ namespace llvm { ::llvm::MDNode*& spec_ty = tbaa_special_type_[sty_id]; if (spec_ty == NULL) { switch (sty_id) { - case kTBAARegister: spec_ty = createTBAANode("Register", tbaa_root_); break; - case kTBAAStackTemp: spec_ty = createTBAANode("StackTemp", tbaa_root_); break; - case kTBAAHeapArray: spec_ty = createTBAANode("HeapArray", tbaa_root_); break; - case kTBAAHeapInstance: spec_ty = createTBAANode("HeapInstance", tbaa_root_); break; - case kTBAAHeapStatic: spec_ty = createTBAANode("HeapStatic", tbaa_root_); break; - case kTBAAJRuntime: spec_ty = createTBAANode("JRuntime", tbaa_root_); break; - case kTBAARuntimeInfo: spec_ty = createTBAANode("RuntimeInfo", - GetTBAASpecialType(kTBAAJRuntime)); break; - case kTBAAShadowFrame: spec_ty = createTBAANode("ShadowFrame", - GetTBAASpecialType(kTBAAJRuntime)); break; - case kTBAAConstJObject: spec_ty = createTBAANode("ConstJObject", tbaa_root_, true); break; + case kTBAARegister: + spec_ty = createTBAANode("Register", tbaa_root_); + break; + case kTBAAStackTemp: + spec_ty = createTBAANode("StackTemp", tbaa_root_); + break; + case kTBAAHeapArray: + spec_ty = createTBAANode("HeapArray", tbaa_root_); + break; + case kTBAAHeapInstance: + spec_ty = createTBAANode("HeapInstance", tbaa_root_); + break; + case kTBAAHeapStatic: + spec_ty = createTBAANode("HeapStatic", tbaa_root_); + break; + case kTBAAJRuntime: + spec_ty = createTBAANode("JRuntime", tbaa_root_); + break; + case kTBAARuntimeInfo: + spec_ty = createTBAANode("RuntimeInfo", GetTBAASpecialType(kTBAAJRuntime)); + break; + case kTBAAShadowFrame: + spec_ty = createTBAANode("ShadowFrame", GetTBAASpecialType(kTBAAJRuntime)); + break; + case kTBAAConstJObject: + spec_ty = createTBAANode("ConstJObject", tbaa_root_, true); + break; default: LOG(FATAL) << "Unknown TBAA special type: " << sty_id; break; diff --git a/runtime/base/logging.cc b/runtime/base/logging.cc index 2c6b6a8ce5..bf19e8d734 100644 --- a/runtime/base/logging.cc +++ b/runtime/base/logging.cc @@ -87,14 +87,28 @@ void InitLogging(char* argv[]) { std::string spec(specs[i]); if (spec.size() == 3 && StartsWith(spec, "*:")) { switch (spec[2]) { - case 'v': gMinimumLogSeverity = VERBOSE; continue; - case 'd': gMinimumLogSeverity = DEBUG; continue; - case 'i': gMinimumLogSeverity = INFO; continue; - case 'w': gMinimumLogSeverity = WARNING; continue; - case 'e': gMinimumLogSeverity = ERROR; continue; - case 'f': gMinimumLogSeverity = FATAL; continue; + case 'v': + gMinimumLogSeverity = VERBOSE; + continue; + case 'd': + gMinimumLogSeverity = DEBUG; + continue; + case 'i': + gMinimumLogSeverity = INFO; + continue; + case 'w': + gMinimumLogSeverity = WARNING; + continue; + case 'e': + gMinimumLogSeverity = ERROR; + continue; + case 'f': + gMinimumLogSeverity = FATAL; + continue; // liblog will even suppress FATAL if you say 's' for silent, but that's crazy! - case 's': gMinimumLogSeverity = FATAL; continue; + case 's': + gMinimumLogSeverity = FATAL; + continue; } } LOG(FATAL) << "unsupported '" << spec << "' in ANDROID_LOG_TAGS (" << tags << ")"; diff --git a/runtime/debugger.h b/runtime/debugger.h index 94f3cbed76..28a2c60f8c 100644 --- a/runtime/debugger.h +++ b/runtime/debugger.h @@ -238,7 +238,7 @@ class Dbg { static JDWP::JdwpTag GetFieldBasicTag(JDWP::FieldId field_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpTag GetStaticFieldBasicTag(JDWP::FieldId field_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);; + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpError GetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id, JDWP::ExpandBuf* pReply) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index 85b7bf01b9..deb1b8c3e5 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -1538,7 +1538,7 @@ collector::GcType Heap::WaitForConcurrentGcToComplete(Thread* self) { gc_complete_cond_->Wait(self); } last_gc_type = last_gc_type_; - wait_time = NanoTime() - wait_start;; + wait_time = NanoTime() - wait_start; total_wait_time_ += wait_time; } if (wait_time > kLongGcPauseThreshold) { diff --git a/runtime/runtime_support.h b/runtime/runtime_support.h index 051981f99e..a6c3b38345 100644 --- a/runtime/runtime_support.h +++ b/runtime/runtime_support.h @@ -176,8 +176,12 @@ static inline mirror::Field* FindFieldFast(uint32_t field_idx, case StaticObjectWrite: is_primitive = false; is_set = true; is_static = true; break; case StaticPrimitiveRead: is_primitive = true; is_set = false; is_static = true; break; case StaticPrimitiveWrite: is_primitive = true; is_set = true; is_static = true; break; - default: LOG(FATAL) << "UNREACHABLE"; // Assignment below to avoid GCC warnings. - is_primitive = true; is_set = true; is_static = true; break; + default: + LOG(FATAL) << "UNREACHABLE"; // Assignment below to avoid GCC warnings. + is_primitive = true; + is_set = true; + is_static = true; + break; } if (UNLIKELY(resolved_field->IsStatic() != is_static)) { // Incompatible class change. diff --git a/runtime/thread.cc b/runtime/thread.cc index 6114c63fbe..dd55195c15 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -1703,7 +1703,11 @@ static const EntryPointInfo gThreadEntryPointInfo[] = { void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset, size_t size_of_pointers) { CHECK_EQ(size_of_pointers, 4U); // TODO: support 64-bit targets. -#define DO_THREAD_OFFSET(x) if (offset == static_cast(OFFSETOF_VOLATILE_MEMBER(Thread, x))) { os << # x; return; } +#define DO_THREAD_OFFSET(x) \ + if (offset == static_cast(OFFSETOF_VOLATILE_MEMBER(Thread, x))) { \ + os << # x; \ + return; \ + } DO_THREAD_OFFSET(state_and_flags_); DO_THREAD_OFFSET(card_table_); DO_THREAD_OFFSET(exception_); -- cgit v1.2.3-59-g8ed1b From 0cd7ec2dcd8d7ba30bf3ca420b40dac52849876c Mon Sep 17 00:00:00 2001 From: Brian Carlstrom Date: Wed, 17 Jul 2013 23:40:20 -0700 Subject: Fix cpplint whitespace/blank_line issues Change-Id: Ice937e95e23dd622c17054551d4ae4cebd0ef8a2 --- Android.mk | 6 +- compiler/dex/arena_allocator.h | 3 - compiler/dex/arena_bit_vector.h | 1 - compiler/dex/backend.h | 2 - compiler/dex/dataflow_iterator.h | 3 - compiler/dex/growable_array.h | 1 - compiler/dex/local_value_numbering.cc | 1 - compiler/dex/local_value_numbering.h | 1 - compiler/dex/mir_graph.cc | 1 - compiler/dex/mir_graph.h | 1 - compiler/dex/portable/mir_to_gbc.cc | 6 +- compiler/dex/portable/mir_to_gbc.h | 1 - compiler/dex/quick/arm/assemble_arm.cc | 1 - compiler/dex/quick/codegen_util.cc | 3 - compiler/dex/quick/gen_invoke.cc | 1 - compiler/dex/quick/local_optimizations.cc | 45 +++++++---- compiler/dex/quick/mips/codegen_mips.h | 2 - compiler/dex/quick/mir_to_lir.h | 1 - compiler/dex/quick/x86/codegen_x86.h | 1 - compiler/dex/ssa_transformation.cc | 97 ++++++++++++++++-------- compiler/driver/compiler_driver.cc | 2 +- compiler/elf_writer_mclinker.cc | 1 - compiler/elf_writer_mclinker.h | 1 - compiler/elf_writer_test.cc | 1 - compiler/jni/portable/jni_compiler.cc | 9 +-- compiler/jni/quick/x86/calling_convention_x86.cc | 1 - compiler/llvm/gbc_expander.cc | 5 -- compiler/llvm/ir_builder.h | 2 - compiler/llvm/llvm_compilation_unit.cc | 1 - compiler/oat_writer.cc | 1 - dex2oat/dex2oat.cc | 1 - runtime/atomic_integer.h | 3 +- runtime/barrier.cc | 2 +- runtime/barrier_test.cc | 9 +-- runtime/base/histogram-inl.h | 1 - runtime/base/histogram.h | 1 - runtime/base/timing_logger.h | 3 - runtime/debugger.cc | 2 - runtime/dex_method_iterator.h | 1 - runtime/gc/accounting/heap_bitmap-inl.h | 1 - runtime/gc/accounting/heap_bitmap.h | 1 - runtime/gc/accounting/space_bitmap.cc | 4 +- runtime/gc/accounting/space_bitmap.h | 1 + runtime/gc/collector/garbage_collector.h | 1 - runtime/gc/space/image_space.h | 1 - runtime/gc/space/large_object_space.cc | 8 +- runtime/gc/space/large_object_space.h | 2 +- runtime/image_test.cc | 1 - runtime/interpreter/interpreter.cc | 1 - runtime/jdwp/jdwp_handler.cc | 1 - runtime/mirror/abstract_method.h | 8 +- runtime/mirror/class.cc | 4 +- runtime/oat/runtime/argument_visitor.h | 3 +- runtime/oat_file.cc | 1 - runtime/runtime_support_llvm.cc | 2 - runtime/runtime_support_llvm.h | 3 - runtime/stack.h | 3 +- runtime/thread.cc | 4 +- runtime/thread_pool.cc | 13 +--- runtime/thread_pool.h | 4 +- runtime/thread_pool_test.cc | 4 +- runtime/trace.h | 1 + runtime/verifier/method_verifier.cc | 1 - runtime/verifier/reg_type.h | 4 + runtime/verifier/reg_type_test.cc | 4 +- runtime/verifier/register_line.cc | 1 - test/ReferenceMap/stack_walk_refmap_jni.cc | 2 +- test/StackWalk/stack_walk_jni.cc | 2 +- 68 files changed, 134 insertions(+), 177 deletions(-) (limited to 'compiler/dex/quick/codegen_util.cc') diff --git a/Android.mk b/Android.mk index 27bd894f13..971eb2f202 100644 --- a/Android.mk +++ b/Android.mk @@ -334,15 +334,15 @@ endif .PHONY: cpplint-art cpplint-art: ./art/tools/cpplint.py \ - --filter=-,+build/header_guard,+whitespace/braces,+whitespace/comma,+runtime/explicit,+whitespace/newline \ - $(shell find art -name *.h -o -name *$(ART_CPP_EXTENSION)) + --filter=-,+build/header_guard,+whitespace/braces,+whitespace/comma,+runtime/explicit,+whitespace/newline,+whitespace/parens \ + $(shell find art -name *.h -o -name *$(ART_CPP_EXTENSION) | grep -v art/compiler/llvm/generated/) # "mm cpplint-art-aspirational" to see warnings we would like to fix .PHONY: cpplint-art-aspirational cpplint-art-aspirational: ./art/tools/cpplint.py \ --filter=-whitespace/comments,-whitespace/line_length,-build/include,-readability/function,-readability/streams,-readability/todo,-runtime/references \ - $(shell find art -name *.h -o -name *$(ART_CPP_EXTENSION)) + $(shell find art -name *.h -o -name *$(ART_CPP_EXTENSION) | grep -v art/compiler/llvm/generated/) ######################################################################## # targets to switch back and forth from libdvm to libart diff --git a/compiler/dex/arena_allocator.h b/compiler/dex/arena_allocator.h index 0ad859ea9a..cd2141a3c3 100644 --- a/compiler/dex/arena_allocator.h +++ b/compiler/dex/arena_allocator.h @@ -28,7 +28,6 @@ namespace art { class ArenaAllocator { public: - // Type of allocation for memory tuning. enum ArenaAllocKind { kAllocMisc, @@ -57,7 +56,6 @@ class ArenaAllocator { void DumpMemStats(std::ostream& os) const; private: - // Variable-length allocation block. struct ArenaMemBlock { size_t block_size; @@ -77,7 +75,6 @@ class ArenaAllocator { uint32_t alloc_stats_[kNumAllocKinds]; // Bytes used by various allocation kinds. uint32_t lost_bytes_; // Lost memory at end of too-small region uint32_t num_allocations_; - }; // ArenaAllocator diff --git a/compiler/dex/arena_bit_vector.h b/compiler/dex/arena_bit_vector.h index 7e5c436f4c..de30859bfd 100644 --- a/compiler/dex/arena_bit_vector.h +++ b/compiler/dex/arena_bit_vector.h @@ -30,7 +30,6 @@ namespace art { */ class ArenaBitVector { public: - class Iterator { public: explicit Iterator(ArenaBitVector* bit_vector) diff --git a/compiler/dex/backend.h b/compiler/dex/backend.h index 7fa8e9992a..acfec42352 100644 --- a/compiler/dex/backend.h +++ b/compiler/dex/backend.h @@ -23,7 +23,6 @@ namespace art { class Backend { - public: virtual ~Backend() {}; virtual void Materialize() = 0; @@ -32,7 +31,6 @@ class Backend { protected: explicit Backend(ArenaAllocator* arena) : arena_(arena) {}; ArenaAllocator* const arena_; - }; // Class Backend } // namespace art diff --git a/compiler/dex/dataflow_iterator.h b/compiler/dex/dataflow_iterator.h index 19468698f9..e427862956 100644 --- a/compiler/dex/dataflow_iterator.h +++ b/compiler/dex/dataflow_iterator.h @@ -41,7 +41,6 @@ namespace art { */ class DataflowIterator { public: - virtual ~DataflowIterator() {} // Return the next BasicBlock* to visit. @@ -81,7 +80,6 @@ namespace art { GrowableArray* block_id_list_; int idx_; bool changed_; - }; // DataflowIterator class ReachableNodesIterator : public DataflowIterator { @@ -106,7 +104,6 @@ namespace art { class PostOrderDfsIterator : public DataflowIterator { public: - PostOrderDfsIterator(MIRGraph* mir_graph, bool is_iterative) : DataflowIterator(mir_graph, is_iterative, 0, mir_graph->GetNumReachableBlocks(), false) { diff --git a/compiler/dex/growable_array.h b/compiler/dex/growable_array.h index 6d26bc216d..3bfbcd4edf 100644 --- a/compiler/dex/growable_array.h +++ b/compiler/dex/growable_array.h @@ -46,7 +46,6 @@ enum OatListKind { template class GrowableArray { public: - class Iterator { public: explicit Iterator(GrowableArray* g_list) diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc index b783f3ed52..35d29235f2 100644 --- a/compiler/dex/local_value_numbering.cc +++ b/compiler/dex/local_value_numbering.cc @@ -509,7 +509,6 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) { AdvanceMemoryVersion(NO_VALUE, field_ref); } break; - } return res; } diff --git a/compiler/dex/local_value_numbering.h b/compiler/dex/local_value_numbering.h index 09ed7aec8d..d29600a479 100644 --- a/compiler/dex/local_value_numbering.h +++ b/compiler/dex/local_value_numbering.h @@ -135,7 +135,6 @@ class LocalValueNumbering { ValueMap value_map_; MemoryVersionMap memory_version_map_; std::set null_checked_; - }; } // namespace art diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc index a9af477d2a..0b3fa46faa 100644 --- a/compiler/dex/mir_graph.cc +++ b/compiler/dex/mir_graph.cc @@ -804,7 +804,6 @@ void MIRGraph::DumpCFG(const char* dir_prefix, bool all_blocks) { if (bb->successor_block_list.block_list_type == kPackedSwitch || bb->successor_block_list.block_list_type == kSparseSwitch) { - GrowableArray::Iterator iter(bb->successor_block_list.blocks); succ_id = 0; diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h index f86e13016d..f6011e06e6 100644 --- a/compiler/dex/mir_graph.h +++ b/compiler/dex/mir_graph.h @@ -553,7 +553,6 @@ class MIRGraph { static const char* extended_mir_op_names_[kMirOpLast - kMirOpFirst]; private: - int FindCommonParent(int block1, int block2); void ComputeSuccLineIn(ArenaBitVector* dest, const ArenaBitVector* src1, const ArenaBitVector* src2); diff --git a/compiler/dex/portable/mir_to_gbc.cc b/compiler/dex/portable/mir_to_gbc.cc index 4317d1e354..cfd3dafbee 100644 --- a/compiler/dex/portable/mir_to_gbc.cc +++ b/compiler/dex/portable/mir_to_gbc.cc @@ -74,7 +74,6 @@ void MirConverter::DefineValueOnly(::llvm::Value* val, int s_reg) { ::llvm::Instruction* inst = ::llvm::dyn_cast< ::llvm::Instruction>(placeholder); DCHECK(inst != NULL); inst->eraseFromParent(); - } void MirConverter::DefineValue(::llvm::Value* val, int s_reg) { @@ -1580,8 +1579,7 @@ void MirConverter::HandlePhiNodes(BasicBlock* bb, ::llvm::BasicBlock* llvm_bb) { /* Extended MIR instructions like PHI */ void MirConverter::ConvertExtendedMIR(BasicBlock* bb, MIR* mir, - ::llvm::BasicBlock* llvm_bb) { - + ::llvm::BasicBlock* llvm_bb) { switch (static_cast(mir->dalvikInsn.opcode)) { case kMirOpPhi: { // The llvm Phi node already emitted - just DefineValue() here. @@ -1706,7 +1704,6 @@ bool MirConverter::BlockBitcodeConversion(BasicBlock* bb) { HandlePhiNodes(bb, llvm_bb); for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) { - SetDexOffset(mir->offset); int opcode = mir->dalvikInsn.opcode; @@ -1795,7 +1792,6 @@ char RemapShorty(char shorty_type) { } ::llvm::FunctionType* MirConverter::GetFunctionType() { - // Get return type ::llvm::Type* ret_type = irb_->getJType(RemapShorty(cu_->shorty[0])); diff --git a/compiler/dex/portable/mir_to_gbc.h b/compiler/dex/portable/mir_to_gbc.h index 278631466f..2b681f6097 100644 --- a/compiler/dex/portable/mir_to_gbc.h +++ b/compiler/dex/portable/mir_to_gbc.h @@ -41,7 +41,6 @@ Backend* PortableCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_gr llvm::LlvmCompilationUnit* const llvm_compilation_unit); class MirConverter : public Backend { - public: // TODO: flesh out and integrate into new world order. MirConverter(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena, diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc index f4aa1f3212..0649c9f319 100644 --- a/compiler/dex/quick/arm/assemble_arm.cc +++ b/compiler/dex/quick/arm/assemble_arm.cc @@ -1007,7 +1007,6 @@ AssemblerStatus ArmMir2Lir::AssembleInstructions(uintptr_t start_addr) { AssemblerStatus res = kSuccess; // Assume success for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) { - if (lir->opcode < 0) { /* 1 means padding is needed */ if ((lir->opcode == kPseudoPseudoAlign4) && (lir->operands[0] == 1)) { diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc index e169dc8f54..8698b1f9ed 100644 --- a/compiler/dex/quick/codegen_util.cc +++ b/compiler/dex/quick/codegen_util.cc @@ -969,7 +969,6 @@ void Mir2Lir::Materialize() { /* Method is not empty */ if (first_lir_insn_) { - // mark the targets of switch statement case labels ProcessSwitchTables(); @@ -979,9 +978,7 @@ void Mir2Lir::Materialize() { if (cu_->verbose) { CodegenDump(); } - } - } CompiledMethod* Mir2Lir::GetCompiledMethod() { diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc index 14e395cdac..fd8f86b5fc 100644 --- a/compiler/dex/quick/gen_invoke.cc +++ b/compiler/dex/quick/gen_invoke.cc @@ -736,7 +736,6 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, const MethodReference& target_method, uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method, InvokeType type, bool skip_this) { - // If we can treat it as non-range (Jumbo ops will use range form) if (info->num_arg_words <= 5) return GenDalvikArgsNoRange(info, call_state, pcrLabel, diff --git a/compiler/dex/quick/local_optimizations.cc b/compiler/dex/quick/local_optimizations.cc index eb27bf8b5d..2e9c845d05 100644 --- a/compiler/dex/quick/local_optimizations.cc +++ b/compiler/dex/quick/local_optimizations.cc @@ -73,11 +73,14 @@ void Mir2Lir::ConvertMemOpIntoMove(LIR* orig_lir, int dest, int src) { void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) { LIR* this_lir; - if (head_lir == tail_lir) return; + if (head_lir == tail_lir) { + return; + } for (this_lir = PREV_LIR(tail_lir); this_lir != head_lir; this_lir = PREV_LIR(this_lir)) { - - if (is_pseudo_opcode(this_lir->opcode)) continue; + if (is_pseudo_opcode(this_lir->opcode)) { + continue; + } int sink_distance = 0; @@ -110,7 +113,9 @@ void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) { * Currently only eliminate redundant ld/st for constant and Dalvik * register accesses. */ - if (!(this_mem_mask & (ENCODE_LITERAL | ENCODE_DALVIK_REG))) continue; + if (!(this_mem_mask & (ENCODE_LITERAL | ENCODE_DALVIK_REG))) { + continue; + } uint64_t stop_def_reg_mask = this_lir->def_mask & ~ENCODE_MEM; uint64_t stop_use_reg_mask; @@ -127,12 +132,13 @@ void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) { } for (check_lir = NEXT_LIR(this_lir); check_lir != tail_lir; check_lir = NEXT_LIR(check_lir)) { - /* * Skip already dead instructions (whose dataflow information is * outdated and misleading). */ - if (check_lir->flags.is_nop || is_pseudo_opcode(check_lir->opcode)) continue; + if (check_lir->flags.is_nop || is_pseudo_opcode(check_lir->opcode)) { + continue; + } uint64_t check_mem_mask = (check_lir->use_mask | check_lir->def_mask) & ENCODE_MEM; uint64_t alias_condition = this_mem_mask & check_mem_mask; @@ -274,12 +280,15 @@ void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir) { LIR* prev_inst_list[MAX_HOIST_DISTANCE]; /* Empty block */ - if (head_lir == tail_lir) return; + if (head_lir == tail_lir) { + return; + } /* Start from the second instruction */ for (this_lir = NEXT_LIR(head_lir); this_lir != tail_lir; this_lir = NEXT_LIR(this_lir)) { - - if (is_pseudo_opcode(this_lir->opcode)) continue; + if (is_pseudo_opcode(this_lir->opcode)) { + continue; + } uint64_t target_flags = GetTargetInstFlags(this_lir->opcode); /* Skip non-interesting instructions */ @@ -312,12 +321,13 @@ void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir) { /* Try to hoist the load to a good spot */ for (check_lir = PREV_LIR(this_lir); check_lir != head_lir; check_lir = PREV_LIR(check_lir)) { - /* * Skip already dead instructions (whose dataflow information is * outdated and misleading). */ - if (check_lir->flags.is_nop) continue; + if (check_lir->flags.is_nop) { + continue; + } uint64_t check_mem_mask = check_lir->def_mask & ENCODE_MEM; uint64_t alias_condition = stop_use_all_mask & check_mem_mask; @@ -355,7 +365,9 @@ void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir) { */ if (stop_here || !is_pseudo_opcode(check_lir->opcode)) { prev_inst_list[next_slot++] = check_lir; - if (next_slot == MAX_HOIST_DISTANCE) break; + if (next_slot == MAX_HOIST_DISTANCE) { + break; + } } /* Found a new place to put the load - move it here */ @@ -400,12 +412,16 @@ void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir) { * If the first instruction is a load, don't hoist anything * above it since it is unlikely to be beneficial. */ - if (GetTargetInstFlags(cur_lir->opcode) & IS_LOAD) continue; + if (GetTargetInstFlags(cur_lir->opcode) & IS_LOAD) { + continue; + } /* * If the remaining number of slots is less than LD_LATENCY, * insert the hoisted load here. */ - if (slot < LD_LATENCY) break; + if (slot < LD_LATENCY) { + break; + } } // Don't look across a barrier label @@ -461,7 +477,6 @@ void Mir2Lir::RemoveRedundantBranches() { LIR* this_lir; for (this_lir = first_lir_insn_; this_lir != last_lir_insn_; this_lir = NEXT_LIR(this_lir)) { - /* Branch to the next instruction */ if (IsUnconditionalBranch(this_lir)) { LIR* next_lir = this_lir; diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h index 376ad7f10e..802ff625c9 100644 --- a/compiler/dex/quick/mips/codegen_mips.h +++ b/compiler/dex/quick/mips/codegen_mips.h @@ -24,7 +24,6 @@ namespace art { class MipsMir2Lir : public Mir2Lir { public: - MipsMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena); // Required for target - codegen utilities. @@ -175,7 +174,6 @@ class MipsMir2Lir : public Mir2Lir { private: void ConvertShortToLongBranch(LIR* lir); - }; } // namespace art diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h index abb687cb84..41e5a2d988 100644 --- a/compiler/dex/quick/mir_to_lir.h +++ b/compiler/dex/quick/mir_to_lir.h @@ -166,7 +166,6 @@ Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, #define is_pseudo_opcode(opcode) (static_cast(opcode) < 0) class Mir2Lir : public Backend { - public: struct SwitchTable { int offset; diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h index 4fa9dfb4d9..edb5ae57c2 100644 --- a/compiler/dex/quick/x86/codegen_x86.h +++ b/compiler/dex/quick/x86/codegen_x86.h @@ -24,7 +24,6 @@ namespace art { class X86Mir2Lir : public Mir2Lir { public: - X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena); // Required for target - codegen helpers. diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc index ccd2454a49..3a0cbcc67c 100644 --- a/compiler/dex/ssa_transformation.cc +++ b/compiler/dex/ssa_transformation.cc @@ -46,9 +46,13 @@ BasicBlock* MIRGraph::NextUnvisitedSuccessor(BasicBlock* bb) { GrowableArray::Iterator iterator(bb->successor_block_list.blocks); while (true) { SuccessorBlockInfo *sbi = iterator.Next(); - if (sbi == NULL) break; + if (sbi == NULL) { + break; + } res = NeedsVisit(sbi->block); - if (res != NULL) break; + if (res != NULL) { + break; + } } } } @@ -112,12 +116,16 @@ void MIRGraph::ComputeDFSOrders() { * register idx is defined in BasicBlock bb. */ bool MIRGraph::FillDefBlockMatrix(BasicBlock* bb) { - if (bb->data_flow_info == NULL) return false; + if (bb->data_flow_info == NULL) { + return false; + } ArenaBitVector::Iterator iterator(bb->data_flow_info->def_v); while (true) { int idx = iterator.Next(); - if (idx == -1) break; + if (idx == -1) { + break; + } /* Block bb defines register idx */ def_block_matrix_[idx]->SetBit(bb->id); } @@ -222,7 +230,9 @@ bool MIRGraph::ComputeDominanceFrontier(BasicBlock* bb) { GrowableArray::Iterator iterator(bb->successor_block_list.blocks); while (true) { SuccessorBlockInfo *successor_block_info = iterator.Next(); - if (successor_block_info == NULL) break; + if (successor_block_info == NULL) { + break; + } BasicBlock* succ_bb = successor_block_info->block; CheckForDominanceFrontier(bb, succ_bb); } @@ -233,13 +243,17 @@ bool MIRGraph::ComputeDominanceFrontier(BasicBlock* bb) { while (true) { //TUNING: hot call to BitVectorIteratorNext int dominated_idx = bv_iterator.Next(); - if (dominated_idx == -1) break; + if (dominated_idx == -1) { + break; + } BasicBlock* dominated_bb = GetBasicBlock(dominated_idx); ArenaBitVector::Iterator df_iterator(dominated_bb->dom_frontier); while (true) { //TUNING: hot call to BitVectorIteratorNext int df_up_idx = df_iterator.Next(); - if (df_up_idx == -1) break; + if (df_up_idx == -1) { + break; + } BasicBlock* df_up_block = GetBasicBlock(df_up_idx); CheckForDominanceFrontier(bb, df_up_block); } @@ -313,7 +327,9 @@ bool MIRGraph::ComputeblockIDom(BasicBlock* bb) { /* Scan the rest of the predecessors */ while (true) { BasicBlock* pred_bb = iter.Next(); - if (!pred_bb) break; + if (!pred_bb) { + break; + } if (i_dom_list_[pred_bb->dfs_id] == NOTVISITED) { continue; } else { @@ -443,7 +459,9 @@ void MIRGraph::ComputeSuccLineIn(ArenaBitVector* dest, const ArenaBitVector* src bool MIRGraph::ComputeBlockLiveIns(BasicBlock* bb) { ArenaBitVector* temp_dalvik_register_v = temp_dalvik_register_v_; - if (bb->data_flow_info == NULL) return false; + if (bb->data_flow_info == NULL) { + return false; + } temp_dalvik_register_v->Copy(bb->data_flow_info->live_in_v); if (bb->taken && bb->taken->data_flow_info) ComputeSuccLineIn(temp_dalvik_register_v, bb->taken->data_flow_info->live_in_v, @@ -455,7 +473,9 @@ bool MIRGraph::ComputeBlockLiveIns(BasicBlock* bb) { GrowableArray::Iterator iterator(bb->successor_block_list.blocks); while (true) { SuccessorBlockInfo *successor_block_info = iterator.Next(); - if (successor_block_info == NULL) break; + if (successor_block_info == NULL) { + break; + } BasicBlock* succ_bb = successor_block_info->block; if (succ_bb->data_flow_info) { ComputeSuccLineIn(temp_dalvik_register_v, succ_bb->data_flow_info->live_in_v, @@ -504,25 +524,27 @@ void MIRGraph::InsertPhiNodes() { while (true) { int idx = iterator.Next(); - if (idx == -1) break; - BasicBlock* def_bb = GetBasicBlock(idx); + if (idx == -1) { + break; + } + BasicBlock* def_bb = GetBasicBlock(idx); - /* Merge the dominance frontier to tmp_blocks */ - //TUNING: hot call to Union(). - if (def_bb->dom_frontier != NULL) { - tmp_blocks->Union(def_bb->dom_frontier); - } + /* Merge the dominance frontier to tmp_blocks */ + //TUNING: hot call to Union(). + if (def_bb->dom_frontier != NULL) { + tmp_blocks->Union(def_bb->dom_frontier); } - if (!phi_blocks->Equal(tmp_blocks)) { - change = true; - phi_blocks->Copy(tmp_blocks); - - /* - * Iterate through the original blocks plus the new ones in - * the dominance frontier. - */ - input_blocks->Copy(phi_blocks); - input_blocks->Union(def_block_matrix_[dalvik_reg]); + } + if (!phi_blocks->Equal(tmp_blocks)) { + change = true; + phi_blocks->Copy(tmp_blocks); + + /* + * Iterate through the original blocks plus the new ones in + * the dominance frontier. + */ + input_blocks->Copy(phi_blocks); + input_blocks->Union(def_block_matrix_[dalvik_reg]); } } while (change); @@ -533,10 +555,14 @@ void MIRGraph::InsertPhiNodes() { ArenaBitVector::Iterator iterator(phi_blocks); while (true) { int idx = iterator.Next(); - if (idx == -1) break; + if (idx == -1) { + break; + } BasicBlock* phi_bb = GetBasicBlock(idx); /* Variable will be clobbered before being used - no need for phi */ - if (!phi_bb->data_flow_info->live_in_v->IsBitSet(dalvik_reg)) continue; + if (!phi_bb->data_flow_info->live_in_v->IsBitSet(dalvik_reg)) { + continue; + } MIR *phi = static_cast(arena_->NewMem(sizeof(MIR), true, ArenaAllocator::kAllocDFInfo)); phi->dalvikInsn.opcode = static_cast(kMirOpPhi); @@ -572,7 +598,9 @@ bool MIRGraph::InsertPhiNodeOperands(BasicBlock* bb) { GrowableArray::Iterator iter(bb->predecessors); while (true) { BasicBlock* pred_bb = iter.Next(); - if (!pred_bb) break; + if (!pred_bb) { + break; + } int ssa_reg = pred_bb->data_flow_info->vreg_to_ssa_map[v_reg]; uses.push_back(ssa_reg); incoming_arc.push_back(pred_bb->id); @@ -605,8 +633,9 @@ bool MIRGraph::InsertPhiNodeOperands(BasicBlock* bb) { } void MIRGraph::DoDFSPreOrderSSARename(BasicBlock* block) { - - if (block->visited || block->hidden) return; + if (block->visited || block->hidden) { + return; + } block->visited = true; /* Process this block */ @@ -632,7 +661,9 @@ void MIRGraph::DoDFSPreOrderSSARename(BasicBlock* block) { GrowableArray::Iterator iterator(block->successor_block_list.blocks); while (true) { SuccessorBlockInfo *successor_block_info = iterator.Next(); - if (successor_block_info == NULL) break; + if (successor_block_info == NULL) { + break; + } BasicBlock* succ_bb = successor_block_info->block; DoDFSPreOrderSSARename(succ_bb); /* Restore SSA map snapshot */ diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index d1d21b1d03..f1082db9bc 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -1402,7 +1402,6 @@ class ParallelCompilationManager { } private: - class ForAllClosure : public Task { public: ForAllClosure(ParallelCompilationManager* manager, size_t begin, size_t end, Callback* callback, @@ -1423,6 +1422,7 @@ class ParallelCompilationManager { virtual void Finalize() { delete this; } + private: const ParallelCompilationManager* const manager_; const size_t begin_; diff --git a/compiler/elf_writer_mclinker.cc b/compiler/elf_writer_mclinker.cc index 472a606cc6..05f3b025e7 100644 --- a/compiler/elf_writer_mclinker.cc +++ b/compiler/elf_writer_mclinker.cc @@ -307,7 +307,6 @@ void ElfWriterMclinker::AddRuntimeInputs(const std::string& android_root, bool i // TODO: ownership of libm_lib_input? mcld::Input* libm_lib_input_input = ir_builder_->ReadInput(libm_lib, libm_lib); CHECK(libm_lib_input_input != NULL); - } #endif diff --git a/compiler/elf_writer_mclinker.h b/compiler/elf_writer_mclinker.h index 468fa9a84f..3b33bc4986 100644 --- a/compiler/elf_writer_mclinker.h +++ b/compiler/elf_writer_mclinker.h @@ -38,7 +38,6 @@ class CompiledCode; class ElfWriterMclinker : public ElfWriter { public: - // Write an ELF file. Returns true on success, false on failure. static bool Create(File* file, std::vector& oat_contents, diff --git a/compiler/elf_writer_test.cc b/compiler/elf_writer_test.cc index 4a02b61242..e48806ecc4 100644 --- a/compiler/elf_writer_test.cc +++ b/compiler/elf_writer_test.cc @@ -22,7 +22,6 @@ namespace art { class ElfWriterTest : public CommonTest { - protected: virtual void SetUp() { ReserveImageSpace(); diff --git a/compiler/jni/portable/jni_compiler.cc b/compiler/jni/portable/jni_compiler.cc index 44d0c2d215..57b8a315a1 100644 --- a/compiler/jni/portable/jni_compiler.cc +++ b/compiler/jni/portable/jni_compiler.cc @@ -46,11 +46,10 @@ using namespace runtime_support; JniCompiler::JniCompiler(LlvmCompilationUnit* cunit, const CompilerDriver& driver, const DexCompilationUnit* dex_compilation_unit) -: cunit_(cunit), driver_(&driver), module_(cunit_->GetModule()), - context_(cunit_->GetLLVMContext()), irb_(*cunit_->GetIRBuilder()), - dex_compilation_unit_(dex_compilation_unit), - func_(NULL), elf_func_idx_(0) { - + : cunit_(cunit), driver_(&driver), module_(cunit_->GetModule()), + context_(cunit_->GetLLVMContext()), irb_(*cunit_->GetIRBuilder()), + dex_compilation_unit_(dex_compilation_unit), + func_(NULL), elf_func_idx_(0) { // Check: Ensure that JNI compiler will only get "native" method CHECK(dex_compilation_unit->IsNative()); } diff --git a/compiler/jni/quick/x86/calling_convention_x86.cc b/compiler/jni/quick/x86/calling_convention_x86.cc index b671bd190c..45dd42960c 100644 --- a/compiler/jni/quick/x86/calling_convention_x86.cc +++ b/compiler/jni/quick/x86/calling_convention_x86.cc @@ -159,7 +159,6 @@ size_t X86JniCallingConvention::NumberOfOutgoingStackArgs() { // count JNIEnv* and return pc (pushed after Method*) size_t total_args = static_args + param_args + 2; return total_args; - } } // namespace x86 diff --git a/compiler/llvm/gbc_expander.cc b/compiler/llvm/gbc_expander.cc index b139e322f1..94cc9731aa 100644 --- a/compiler/llvm/gbc_expander.cc +++ b/compiler/llvm/gbc_expander.cc @@ -361,7 +361,6 @@ class GBCExpanderPass : public llvm::FunctionPass { llvm::Value* ExpandIntrinsic(IntrinsicHelper::IntrinsicId intr_id, llvm::CallInst& call_inst); - }; char GBCExpanderPass::ID = 0; @@ -710,7 +709,6 @@ llvm::Value* GBCExpanderPass::EmitLoadArrayLength(llvm::Value* array) { art::mirror::Array::LengthOffset().Int32Value(), irb_.getJIntTy(), kTBAAConstJObject); - } llvm::Value* @@ -751,7 +749,6 @@ EmitLoadVirtualCalleeMethodObjectAddr(int vtable_idx, llvm::Value* this_addr) { llvm::Value* GBCExpanderPass::EmitArrayGEP(llvm::Value* array_addr, llvm::Value* index_value, JType elem_jty) { - int data_offset; if (elem_jty == kLong || elem_jty == kDouble || (elem_jty == kObject && sizeof(uint64_t) == sizeof(art::mirror::Object*))) { @@ -1426,7 +1423,6 @@ llvm::Value* GBCExpanderPass::Expand_LongCompare(llvm::Value* src1_value, llvm:: llvm::Value* GBCExpanderPass::EmitCompareResultSelection(llvm::Value* cmp_eq, llvm::Value* cmp_lt) { - llvm::Constant* zero = irb_.getJInt(0); llvm::Constant* pos1 = irb_.getJInt(1); llvm::Constant* neg1 = irb_.getJInt(-1); @@ -2437,7 +2433,6 @@ EmitCallRuntimeForCalleeMethodObjectAddr(uint32_t callee_method_idx, llvm::Value* this_addr, uint32_t dex_pc, bool is_fast_path) { - llvm::Function* runtime_func = NULL; switch (invoke_type) { diff --git a/compiler/llvm/ir_builder.h b/compiler/llvm/ir_builder.h index 65da005e9b..c81ba278a8 100644 --- a/compiler/llvm/ir_builder.h +++ b/compiler/llvm/ir_builder.h @@ -219,7 +219,6 @@ class IRBuilder : public LLVMIRBuilder { ::llvm::Value* CreatePtrDisp(::llvm::Value* base, ::llvm::Value* offset, ::llvm::PointerType* ret_ty) { - ::llvm::Value* base_int = CreatePtrToInt(base, getPtrEquivIntTy()); ::llvm::Value* result_int = CreateAdd(base_int, offset); ::llvm::Value* result = CreateIntToPtr(result_int, ret_ty); @@ -232,7 +231,6 @@ class IRBuilder : public LLVMIRBuilder { ::llvm::Value* count, ::llvm::Value* offset, ::llvm::PointerType* ret_ty) { - ::llvm::Value* block_offset = CreateMul(bs, count); ::llvm::Value* total_offset = CreateAdd(block_offset, offset); diff --git a/compiler/llvm/llvm_compilation_unit.cc b/compiler/llvm/llvm_compilation_unit.cc index dfb572477e..1f2b977921 100644 --- a/compiler/llvm/llvm_compilation_unit.cc +++ b/compiler/llvm/llvm_compilation_unit.cc @@ -166,7 +166,6 @@ void LlvmCompilationUnit::DumpBitcodeToString(std::string& str_buffer) { } bool LlvmCompilationUnit::Materialize() { - const bool kDumpBitcode = false; if (kDumpBitcode) { // Dump the bitcode for debugging diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc index 0bfa4ec328..4c32506d43 100644 --- a/compiler/oat_writer.cc +++ b/compiler/oat_writer.cc @@ -83,7 +83,6 @@ OatWriter::OatWriter(const std::vector& dex_files, size_oat_dex_file_methods_offsets_(0), size_oat_class_status_(0), size_oat_class_method_offsets_(0) { - size_t offset = InitOatHeader(); offset = InitOatDexFiles(offset); offset = InitDexFiles(offset); diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc index 541c916936..9e23d3e7d3 100644 --- a/dex2oat/dex2oat.cc +++ b/dex2oat/dex2oat.cc @@ -440,7 +440,6 @@ static size_t OpenDexFiles(const std::vector& dex_filenames, // during development when fatal aborts lead to a cascade of failures // that result in a deadlock. class WatchDog { - // WatchDog defines its own CHECK_PTHREAD_CALL to avoid using Log which uses locks #undef CHECK_PTHREAD_CALL #define CHECK_WATCH_DOG_PTHREAD_CALL(call, args, what) \ diff --git a/runtime/atomic_integer.h b/runtime/atomic_integer.h index 117e837bdb..6711722672 100644 --- a/runtime/atomic_integer.h +++ b/runtime/atomic_integer.h @@ -70,10 +70,11 @@ class AtomicInteger { bool success = android_atomic_cas(expected_value, new_value, &value_) == 0; return success; } + private: volatile int32_t value_; }; -} +} // namespace art #endif // ART_RUNTIME_ATOMIC_INTEGER_H_ diff --git a/runtime/barrier.cc b/runtime/barrier.cc index 250d468adb..a64499848e 100644 --- a/runtime/barrier.cc +++ b/runtime/barrier.cc @@ -60,4 +60,4 @@ Barrier::~Barrier() { CHECK(!count_) << "Attempted to destroy barrier with non zero count"; } -} +} // namespace art diff --git a/runtime/barrier_test.cc b/runtime/barrier_test.cc index d26ae9e20f..298ae569fb 100644 --- a/runtime/barrier_test.cc +++ b/runtime/barrier_test.cc @@ -32,9 +32,7 @@ class CheckWaitTask : public Task { : barrier_(barrier), count1_(count1), count2_(count2), - count3_(count3) { - - } + count3_(count3) {} void Run(Thread* self) { LOG(INFO) << "Before barrier 1 " << *self; @@ -50,6 +48,7 @@ class CheckWaitTask : public Task { virtual void Finalize() { delete this; } + private: Barrier* const barrier_; AtomicInteger* const count1_; @@ -100,9 +99,7 @@ class CheckPassTask : public Task { CheckPassTask(Barrier* barrier, AtomicInteger* count, size_t subtasks) : barrier_(barrier), count_(count), - subtasks_(subtasks) { - - } + subtasks_(subtasks) {} void Run(Thread* self) { for (size_t i = 0; i < subtasks_; ++i) { diff --git a/runtime/base/histogram-inl.h b/runtime/base/histogram-inl.h index bbca60308a..d572cf9cba 100644 --- a/runtime/base/histogram-inl.h +++ b/runtime/base/histogram-inl.h @@ -212,7 +212,6 @@ inline double Histogram::Percentile(double per) const { DCHECK_GT(cumulative_perc_.size(), 0ull); size_t idx, upper_idx = 0, lower_idx = 0; for (idx = 0; idx < cumulative_perc_.size(); idx++) { - if (per <= cumulative_perc_[idx]) { upper_idx = idx; break; diff --git a/runtime/base/histogram.h b/runtime/base/histogram.h index dfb556bd79..33a1e6518b 100644 --- a/runtime/base/histogram.h +++ b/runtime/base/histogram.h @@ -30,7 +30,6 @@ namespace art { // Designed to be simple and used with timing logger in art. template class Histogram { - const double kAdjust; const Value kBucketWidth; const size_t kInitialBucketCount; diff --git a/runtime/base/timing_logger.h b/runtime/base/timing_logger.h index 816cbeadec..0f00a046e5 100644 --- a/runtime/base/timing_logger.h +++ b/runtime/base/timing_logger.h @@ -50,9 +50,7 @@ namespace base { } // namespace base class CumulativeLogger { - public: - explicit CumulativeLogger(const std::string& name); void prepare_stats(); ~CumulativeLogger(); @@ -68,7 +66,6 @@ class CumulativeLogger { void AddNewLogger(const base::NewTimingLogger& logger) LOCKS_EXCLUDED(lock_); private: - void AddPair(const std::string &label, uint64_t delta_time) EXCLUSIVE_LOCKS_REQUIRED(lock_); void DumpHistogram(std::ostream &os) EXCLUSIVE_LOCKS_REQUIRED(lock_); diff --git a/runtime/debugger.cc b/runtime/debugger.cc index 5a31c87935..b502c9ab58 100644 --- a/runtime/debugger.cc +++ b/runtime/debugger.cc @@ -155,7 +155,6 @@ class DebugInstrumentationListener : public instrumentation::InstrumentationList SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Dbg::PostException(thread, throw_location, catch_method, catch_dex_pc, exception_object); } - } gDebugInstrumentationListener; // JDWP is allowed unless the Zygote forbids it. @@ -761,7 +760,6 @@ JDWP::JdwpError Dbg::GetContendedMonitor(JDWP::ObjectId thread_id, JDWP::ObjectI JDWP::JdwpError Dbg::GetInstanceCounts(const std::vector& class_ids, std::vector& counts) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - std::vector classes; counts.clear(); for (size_t i = 0; i < class_ids.size(); ++i) { diff --git a/runtime/dex_method_iterator.h b/runtime/dex_method_iterator.h index e915d77e6d..1975e48330 100644 --- a/runtime/dex_method_iterator.h +++ b/runtime/dex_method_iterator.h @@ -120,7 +120,6 @@ class DexMethodIterator { } private: - ClassDataItemIterator& GetIterator() const { CHECK(it_.get() != NULL); return *it_.get(); diff --git a/runtime/gc/accounting/heap_bitmap-inl.h b/runtime/gc/accounting/heap_bitmap-inl.h index 76226041d1..5edea95dc3 100644 --- a/runtime/gc/accounting/heap_bitmap-inl.h +++ b/runtime/gc/accounting/heap_bitmap-inl.h @@ -40,7 +40,6 @@ inline void HeapBitmap::Visit(const Visitor& visitor) { SpaceSetMap* set = *it; set->Visit(visitor); } - } } // namespace accounting diff --git a/runtime/gc/accounting/heap_bitmap.h b/runtime/gc/accounting/heap_bitmap.h index f4b725c4e9..1710579619 100644 --- a/runtime/gc/accounting/heap_bitmap.h +++ b/runtime/gc/accounting/heap_bitmap.h @@ -106,7 +106,6 @@ class HeapBitmap { explicit HeapBitmap(Heap* heap) : heap_(heap) {} private: - const Heap* const heap_; void AddContinuousSpaceBitmap(SpaceBitmap* bitmap); diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc index 19f1128963..6edc067cc7 100644 --- a/runtime/gc/accounting/space_bitmap.cc +++ b/runtime/gc/accounting/space_bitmap.cc @@ -64,9 +64,7 @@ SpaceBitmap* SpaceBitmap::Create(const std::string& name, byte* heap_begin, size } // Clean up any resources associated with the bitmap. -SpaceBitmap::~SpaceBitmap() { - -} +SpaceBitmap::~SpaceBitmap() {} void SpaceBitmap::SetHeapLimit(uintptr_t new_end) { DCHECK(IsAligned(new_end)); diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h index 5a1bfe3250..bf4c1ed9af 100644 --- a/runtime/gc/accounting/space_bitmap.h +++ b/runtime/gc/accounting/space_bitmap.h @@ -174,6 +174,7 @@ class SpaceBitmap { const size_t index = OffsetToIndex(offset); return &bitmap_begin_[index]; } + private: // TODO: heap_end_ is initialized so that the heap bitmap is empty, this doesn't require the -1, // however, we document that this is expected on heap_end_ diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h index a22faac43b..1684664eff 100644 --- a/runtime/gc/collector/garbage_collector.h +++ b/runtime/gc/collector/garbage_collector.h @@ -79,7 +79,6 @@ class GarbageCollector { void SwapBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); protected: - // The initial phase. Done without mutators paused. virtual void InitializePhase() = 0; diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h index fde2b419ac..bdda9fa4b1 100644 --- a/runtime/gc/space/image_space.h +++ b/runtime/gc/space/image_space.h @@ -78,7 +78,6 @@ class ImageSpace : public MemMapSpace { void Dump(std::ostream& os) const; private: - // Tries to initialize an ImageSpace from the given image path, // returning NULL on error. // diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc index f7d776fbfb..6aedd9cf2b 100644 --- a/runtime/gc/space/large_object_space.cc +++ b/runtime/gc/space/large_object_space.cc @@ -49,9 +49,7 @@ void LargeObjectSpace::CopyLiveToMarked() { LargeObjectMapSpace::LargeObjectMapSpace(const std::string& name) : LargeObjectSpace(name), - lock_("large object map space lock", kAllocSpaceLock) { - -} + lock_("large object map space lock", kAllocSpaceLock) {} LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) { return new LargeObjectMapSpace(name); @@ -147,9 +145,7 @@ FreeListSpace::FreeListSpace(const std::string& name, MemMap* mem_map, byte* beg AddFreeChunk(begin_, end_ - begin_, NULL); } -FreeListSpace::~FreeListSpace() { - -} +FreeListSpace::~FreeListSpace() {} void FreeListSpace::AddFreeChunk(void* address, size_t size, Chunk* previous) { Chunk* chunk = ChunkFromAddr(address); diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h index db845db4e6..20a48673b6 100644 --- a/runtime/gc/space/large_object_space.h +++ b/runtime/gc/space/large_object_space.h @@ -60,7 +60,6 @@ class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace { size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs); protected: - explicit LargeObjectSpace(const std::string& name); // Approximate number of bytes which have been allocated into the space. @@ -165,6 +164,7 @@ class FreeListSpace : public LargeObjectSpace { DCHECK(m_previous == NULL || (m_previous != NULL && m_previous + m_previous->GetSize() / kAlignment == this)); } + private: size_t m_size; Chunk* m_previous; diff --git a/runtime/image_test.cc b/runtime/image_test.cc index 9ab1d7475b..ee50118b06 100644 --- a/runtime/image_test.cc +++ b/runtime/image_test.cc @@ -31,7 +31,6 @@ namespace art { class ImageTest : public CommonTest { - protected: virtual void SetUp() { ReserveImageSpace(); diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc index 2fb272cef4..45314c231b 100644 --- a/runtime/interpreter/interpreter.cc +++ b/runtime/interpreter/interpreter.cc @@ -366,7 +366,6 @@ static void InterpreterJni(Thread* self, AbstractMethod* method, StringPiece sho { ScopedThreadStateChange tsc(self, kNative); jresult = fn(soa.Env(), rcvr.get(), arg0.get()); - } result->SetL(soa.Decode(jresult)); ScopedThreadStateChange tsc(self, kNative); diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc index 8ef146c096..e141496c3b 100644 --- a/runtime/jdwp/jdwp_handler.cc +++ b/runtime/jdwp/jdwp_handler.cc @@ -361,7 +361,6 @@ static JdwpError VM_Capabilities(JdwpState*, Request&, ExpandBuf* reply) static JdwpError VM_CapabilitiesNew(JdwpState*, Request& request, ExpandBuf* reply) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - // The first few capabilities are the same as those reported by the older call. VM_Capabilities(NULL, request, reply); diff --git a/runtime/mirror/abstract_method.h b/runtime/mirror/abstract_method.h index d909058e0d..bbebecebb4 100644 --- a/runtime/mirror/abstract_method.h +++ b/runtime/mirror/abstract_method.h @@ -497,13 +497,9 @@ class MANAGED AbstractMethod : public Object { DISALLOW_IMPLICIT_CONSTRUCTORS(AbstractMethod); }; -class MANAGED Method : public AbstractMethod { +class MANAGED Method : public AbstractMethod {}; -}; - -class MANAGED Constructor : public AbstractMethod { - -}; +class MANAGED Constructor : public AbstractMethod {}; class MANAGED AbstractMethodClass : public Class { private: diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc index 2d2130c39e..e490d97f80 100644 --- a/runtime/mirror/class.cc +++ b/runtime/mirror/class.cc @@ -320,13 +320,11 @@ bool Class::IsFieldClass() const { Class* java_lang_Class = GetClass(); Class* java_lang_reflect_Field = java_lang_Class->GetInstanceField(0)->GetClass(); return this == java_lang_reflect_Field; - } bool Class::IsMethodClass() const { return (this == AbstractMethod::GetMethodClass()) || - (this == AbstractMethod::GetConstructorClass()); - + (this == AbstractMethod::GetConstructorClass()); } void Class::SetClassLoader(ClassLoader* new_class_loader) { diff --git a/runtime/oat/runtime/argument_visitor.h b/runtime/oat/runtime/argument_visitor.h index d92ff19d13..aaf93f7db7 100644 --- a/runtime/oat/runtime/argument_visitor.h +++ b/runtime/oat/runtime/argument_visitor.h @@ -199,7 +199,6 @@ class QuickArgumentVisitor { uint64_t low_half = *reinterpret_cast(GetParamAddress()); uint64_t high_half = *reinterpret_cast(stack_args_); return (low_half & 0xffffffffULL) | (high_half << 32); - } void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { @@ -244,6 +243,6 @@ class QuickArgumentVisitor { bool is_split_long_or_double_; }; -} +} // namespace art #endif // ART_RUNTIME_OAT_RUNTIME_ARGUMENT_VISITOR_H_ diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc index bb8341ee9f..6562633bc3 100644 --- a/runtime/oat_file.cc +++ b/runtime/oat_file.cc @@ -122,7 +122,6 @@ OatFile::~OatFile() { } bool OatFile::Dlopen(const std::string& elf_filename, byte* requested_base) { - char* absolute_path = realpath(elf_filename.c_str(), NULL); if (absolute_path == NULL) { return false; diff --git a/runtime/runtime_support_llvm.cc b/runtime/runtime_support_llvm.cc index cbdefe8a03..d703db29d5 100644 --- a/runtime/runtime_support_llvm.cc +++ b/runtime/runtime_support_llvm.cc @@ -50,7 +50,6 @@ using namespace art; extern "C" { - class ShadowFrameCopyVisitor : public StackVisitor { public: explicit ShadowFrameCopyVisitor(Thread* self) : StackVisitor(self, NULL), prev_frame_(NULL), @@ -844,5 +843,4 @@ void art_portable_proxy_invoke_handler_from_code(mirror::AbstractMethod* proxy_m void art_portable_constructor_barrier() { LOG(FATAL) << "Implemented by IRBuilder."; } - } // extern "C" diff --git a/runtime/runtime_support_llvm.h b/runtime/runtime_support_llvm.h index 566f7bcb16..43ea953a96 100644 --- a/runtime/runtime_support_llvm.h +++ b/runtime/runtime_support_llvm.h @@ -18,13 +18,10 @@ #define ART_RUNTIME_RUNTIME_SUPPORT_LLVM_H_ extern "C" { - //---------------------------------------------------------------------------- // Runtime Support Function Lookup Callback //---------------------------------------------------------------------------- - void* art_portable_find_runtime_support_func(void* context, const char* name); - } // extern "C" #endif // ART_RUNTIME_RUNTIME_SUPPORT_LLVM_H_ diff --git a/runtime/stack.h b/runtime/stack.h index 0e2c4c5b86..99ba898362 100644 --- a/runtime/stack.h +++ b/runtime/stack.h @@ -554,7 +554,6 @@ class StackVisitor { static void DescribeStack(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); private: - instrumentation::InstrumentationStackFrame GetInstrumentationStackFrame(uint32_t depth) const; void SanityCheckFrame() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -567,6 +566,7 @@ class StackVisitor { size_t num_frames_; // Depth of the frame we're currently at. size_t cur_depth_; + protected: Context* const context_; }; @@ -638,6 +638,7 @@ class VmapTable { spill_shifts--; // wind back one as we want the last match return spill_shifts; } + private: const uint16_t* table_; }; diff --git a/runtime/thread.cc b/runtime/thread.cc index dd55195c15..a1fb862a17 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -2104,9 +2104,7 @@ class ReferenceMapVisitor : public StackVisitor { class RootCallbackVisitor { public: - RootCallbackVisitor(RootVisitor* visitor, void* arg) : visitor_(visitor), arg_(arg) { - - } + RootCallbackVisitor(RootVisitor* visitor, void* arg) : visitor_(visitor), arg_(arg) {} void operator()(const mirror::Object* obj, size_t, const StackVisitor*) const { visitor_(obj, arg_); diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc index 784a7caadf..067ef2d5d8 100644 --- a/runtime/thread_pool.cc +++ b/runtime/thread_pool.cc @@ -180,10 +180,7 @@ size_t ThreadPool::GetTaskCount(Thread* self) { WorkStealingWorker::WorkStealingWorker(ThreadPool* thread_pool, const std::string& name, size_t stack_size) - : ThreadPoolWorker(thread_pool, name, stack_size), - task_(NULL) { - -} + : ThreadPoolWorker(thread_pool, name, stack_size), task_(NULL) {} void WorkStealingWorker::Run() { Thread* self = Thread::Current(); @@ -254,9 +251,7 @@ void WorkStealingWorker::Run() { } } -WorkStealingWorker::~WorkStealingWorker() { - -} +WorkStealingWorker::~WorkStealingWorker() {} WorkStealingThreadPool::WorkStealingThreadPool(size_t num_threads) : ThreadPool(0), @@ -288,8 +283,6 @@ WorkStealingTask* WorkStealingThreadPool::FindTaskToStealFrom(Thread* self) { return NULL; } -WorkStealingThreadPool::~WorkStealingThreadPool() { - -} +WorkStealingThreadPool::~WorkStealingThreadPool() {} } // namespace art diff --git a/runtime/thread_pool.h b/runtime/thread_pool.h index b9f185d5f9..7b626fbbe1 100644 --- a/runtime/thread_pool.h +++ b/runtime/thread_pool.h @@ -124,9 +124,7 @@ class ThreadPool { class WorkStealingTask : public Task { public: - WorkStealingTask() : ref_count_(0) { - - } + WorkStealingTask() : ref_count_(0) {} size_t GetRefCount() const { return ref_count_; diff --git a/runtime/thread_pool_test.cc b/runtime/thread_pool_test.cc index 9b66318d8d..98178bc40c 100644 --- a/runtime/thread_pool_test.cc +++ b/runtime/thread_pool_test.cc @@ -105,9 +105,7 @@ class TreeTask : public Task { TreeTask(ThreadPool* const thread_pool, AtomicInteger* count, int depth) : thread_pool_(thread_pool), count_(count), - depth_(depth) { - - } + depth_(depth) {} void Run(Thread* self) { if (depth_ > 1) { diff --git a/runtime/trace.h b/runtime/trace.h index 5bd6a8d5ca..bd9c140d26 100644 --- a/runtime/trace.h +++ b/runtime/trace.h @@ -78,6 +78,7 @@ class Trace : public instrumentation::InstrumentationListener { mirror::AbstractMethod* catch_method, uint32_t catch_dex_pc, mirror::Throwable* exception_object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + private: explicit Trace(File* trace_file, int buffer_size, int flags); diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc index 5a70f2a696..ff7f594501 100644 --- a/runtime/verifier/method_verifier.cc +++ b/runtime/verifier/method_verifier.cc @@ -3749,7 +3749,6 @@ MethodVerifier::MethodSafeCastSet* MethodVerifier::GenerateSafeCastSet() { } MethodVerifier::PcToConcreteMethodMap* MethodVerifier::GenerateDevirtMap() { - // It is risky to rely on reg_types for sharpening in cases of soft // verification, we might end up sharpening to a wrong implementation. Just abort. if (!failure_messages_.empty()) { diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h index c66e7cb514..5b806c47e5 100644 --- a/runtime/verifier/reg_type.h +++ b/runtime/verifier/reg_type.h @@ -309,6 +309,7 @@ class ConflictType : public RegType { // Destroy the singleton instance. static void Destroy(); + private: ConflictType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) @@ -338,6 +339,7 @@ class UndefinedType : public RegType { // Destroy the singleton instance. static void Destroy(); + private: UndefinedType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) @@ -875,6 +877,7 @@ class UnresolvedSuperClass : public UnresolvedType { } std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + private: void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -909,6 +912,7 @@ class UnresolvedMergedType : public UnresolvedType { } std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + private: void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc index f37edff6ac..d2c9dd6ba7 100644 --- a/runtime/verifier/reg_type_test.cc +++ b/runtime/verifier/reg_type_test.cc @@ -414,7 +414,6 @@ TEST_F(RegTypeReferenceTest, Dump) { EXPECT_EQ(expected, unresolved_merged.Dump()); } - TEST_F(RegTypeReferenceTest, JavalangString) { // Add a class to the cache then look for the same class and make sure it is a // Hit the second time. Then check for the same effect when using @@ -433,8 +432,8 @@ TEST_F(RegTypeReferenceTest, JavalangString) { const RegType& ref_type_unintialized = cache.Uninitialized(ref_type, 0110ull); EXPECT_TRUE(ref_type_unintialized.IsUninitializedReference()); EXPECT_FALSE(ref_type_unintialized.IsUnresolvedAndUninitializedReference()); - } + TEST_F(RegTypeReferenceTest, JavalangObject) { // Add a class to the cache then look for the same class and make sure it is a // Hit the second time. Then I am checking for the same effect when using @@ -474,7 +473,6 @@ TEST_F(RegTypeReferenceTest, Merging) { TEST_F(RegTypeTest, ConstPrecision) { - // Tests creating primitive types types. ScopedObjectAccess soa(Thread::Current()); RegTypeCache cache_new(true); diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc index 3a2145b9bb..d2abaac6f7 100644 --- a/runtime/verifier/register_line.cc +++ b/runtime/verifier/register_line.cc @@ -254,7 +254,6 @@ void RegisterLine::CopyResultRegister2(uint32_t vdst) { SetRegisterTypeWide(vdst, type_l, type_h); // also sets the high result_[0] = verifier_->GetRegTypeCache()->Undefined().GetId(); result_[1] = verifier_->GetRegTypeCache()->Undefined().GetId(); - } } diff --git a/test/ReferenceMap/stack_walk_refmap_jni.cc b/test/ReferenceMap/stack_walk_refmap_jni.cc index 9ef4a59dc1..492916ed90 100644 --- a/test/ReferenceMap/stack_walk_refmap_jni.cc +++ b/test/ReferenceMap/stack_walk_refmap_jni.cc @@ -280,4 +280,4 @@ extern "C" JNIEXPORT jint JNICALL Java_ReferenceMap_refmap(JNIEnv*, jobject, jin return count + 1; } -} +} // namespace art diff --git a/test/StackWalk/stack_walk_jni.cc b/test/StackWalk/stack_walk_jni.cc index 4b472daa5e..fc156b15d1 100644 --- a/test/StackWalk/stack_walk_jni.cc +++ b/test/StackWalk/stack_walk_jni.cc @@ -127,4 +127,4 @@ extern "C" JNIEXPORT jint JNICALL Java_StackWalk2_refmap2(JNIEnv*, jobject, jint return count + 1; } -} +} // namespace art -- cgit v1.2.3-59-g8ed1b From df62950e7a32031b82360c407d46a37b94188fbb Mon Sep 17 00:00:00 2001 From: Brian Carlstrom Date: Wed, 17 Jul 2013 22:39:56 -0700 Subject: Fix cpplint whitespace/parens issues Change-Id: Ifc678d59a8bed24ffddde5a0e543620b17b0aba9 --- compiler/dex/arena_bit_vector.h | 2 +- compiler/dex/dataflow_iterator.h | 2 +- compiler/dex/frontend.cc | 4 ++-- compiler/dex/mir_dataflow.cc | 4 ++-- compiler/dex/mir_graph.cc | 11 ++++++----- compiler/dex/mir_optimization.cc | 4 ++-- compiler/dex/portable/mir_to_gbc.cc | 8 ++++---- compiler/dex/quick/arm/fp_arm.cc | 2 +- compiler/dex/quick/arm/int_arm.cc | 10 +++++----- compiler/dex/quick/arm/utility_arm.cc | 2 +- compiler/dex/quick/codegen_util.cc | 18 +++++++++--------- compiler/dex/quick/gen_common.cc | 4 ++-- compiler/dex/quick/mips/utility_mips.cc | 6 +++--- compiler/dex/quick/mir_to_lir.h | 2 +- compiler/dex/quick/ralloc_util.cc | 3 +-- compiler/dex/quick/x86/utility_x86.cc | 2 +- compiler/dex/ssa_transformation.cc | 2 +- compiler/dex/vreg_analysis.cc | 2 +- compiler/llvm/llvm_compilation_unit.cc | 2 +- compiler/oat_writer.cc | 3 +-- runtime/atomic_integer.h | 2 +- runtime/base/mutex-inl.h | 4 ++-- runtime/base/mutex.cc | 16 ++++++++-------- runtime/check_jni.cc | 2 +- runtime/class_linker_test.cc | 14 +++++++------- runtime/common_test.h | 5 ++--- runtime/debugger.cc | 4 ++-- runtime/dex_file.h | 2 +- runtime/dex_instruction.cc | 5 ++--- runtime/disassembler_arm.cc | 4 ++-- runtime/elf_file.cc | 2 +- runtime/gc/accounting/atomic_stack.h | 2 +- runtime/gc/accounting/mod_union_table.cc | 30 +++++++++++++++--------------- runtime/gc/accounting/space_bitmap.h | 4 ++-- runtime/gc/accounting/space_bitmap_test.cc | 2 +- runtime/gc/collector/mark_sweep.cc | 16 ++++++++-------- runtime/gc/heap.cc | 20 ++++++++++---------- runtime/gc/heap.h | 2 +- runtime/interpreter/interpreter.cc | 10 +++++----- runtime/oat.h | 3 +-- runtime/reflection.cc | 9 +++------ runtime/stack.cc | 6 +++--- runtime/thread.cc | 2 +- runtime/thread.h | 2 +- runtime/thread_list.cc | 2 +- runtime/trace.cc | 2 +- runtime/utils.h | 6 +++--- runtime/verifier/method_verifier.cc | 16 ++++++++-------- runtime/verifier/reg_type.cc | 4 ++-- runtime/verifier/reg_type_cache.cc | 2 +- runtime/verifier/reg_type_cache.h | 2 +- runtime/verifier/reg_type_test.cc | 2 +- test/ReferenceMap/stack_walk_refmap_jni.cc | 4 ++-- test/StackWalk/stack_walk_jni.cc | 4 ++-- tools/cpplint.py | 5 ++++- 55 files changed, 153 insertions(+), 157 deletions(-) (limited to 'compiler/dex/quick/codegen_util.cc') diff --git a/compiler/dex/arena_bit_vector.h b/compiler/dex/arena_bit_vector.h index de30859bfd..2a05b77092 100644 --- a/compiler/dex/arena_bit_vector.h +++ b/compiler/dex/arena_bit_vector.h @@ -83,7 +83,7 @@ class ArenaBitVector { OatBitMapKind kind = kBitMapMisc); ~ArenaBitVector() {}; - static void* operator new( size_t size, ArenaAllocator* arena) { + static void* operator new(size_t size, ArenaAllocator* arena) { return arena->NewMem(sizeof(ArenaBitVector), true, ArenaAllocator::kAllocGrowableBitMap); } static void operator delete(void* p) {}; // Nop. diff --git a/compiler/dex/dataflow_iterator.h b/compiler/dex/dataflow_iterator.h index e427862956..847a614727 100644 --- a/compiler/dex/dataflow_iterator.h +++ b/compiler/dex/dataflow_iterator.h @@ -137,7 +137,7 @@ namespace art { AllNodesIterator(MIRGraph* mir_graph, bool is_iterative) : DataflowIterator(mir_graph, is_iterative, 0, 0, false) { all_nodes_iterator_ = - new (mir_graph->GetArena()) GrowableArray::Iterator (mir_graph->GetBlockList()); + new (mir_graph->GetArena()) GrowableArray::Iterator(mir_graph->GetBlockList()); } void Reset() { diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc index 08039147a0..ae160d607d 100644 --- a/compiler/dex/frontend.cc +++ b/compiler/dex/frontend.cc @@ -53,7 +53,7 @@ LLVMInfo::LLVMInfo() { llvm_module_ = new ::llvm::Module("art", *llvm_context_); ::llvm::StructType::create(*llvm_context_, "JavaObject"); art::llvm::makeLLVMModuleContents(llvm_module_); - intrinsic_helper_.reset( new art::llvm::IntrinsicHelper(*llvm_context_, *llvm_module_)); + intrinsic_helper_.reset(new art::llvm::IntrinsicHelper(*llvm_context_, *llvm_module_)); ir_builder_.reset(new art::llvm::IRBuilder(*llvm_context_, *llvm_module_, *intrinsic_helper_)); } @@ -276,7 +276,7 @@ CompiledMethod* CompileOneMethod(CompilerDriver& compiler, #if defined(ART_USE_PORTABLE_COMPILER) , llvm_compilation_unit #endif - ); + ); // NOLINT(whitespace/parens) } } // namespace art diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc index 9632388e19..be19d5a6ae 100644 --- a/compiler/dex/mir_dataflow.cc +++ b/compiler/dex/mir_dataflow.cc @@ -1122,9 +1122,9 @@ void MIRGraph::CompilerInitializeSSAConversion() { size_t num_dalvik_reg = cu_->num_dalvik_registers; ssa_base_vregs_ = new (arena_) GrowableArray(arena_, num_dalvik_reg + GetDefCount() + 128, - kGrowableArraySSAtoDalvikMap); + kGrowableArraySSAtoDalvikMap); ssa_subscripts_ = new (arena_) GrowableArray(arena_, num_dalvik_reg + GetDefCount() + 128, - kGrowableArraySSAtoDalvikMap); + kGrowableArraySSAtoDalvikMap); /* * Initial number of SSA registers is equal to the number of Dalvik * registers. diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc index 0b3fa46faa..634c576874 100644 --- a/compiler/dex/mir_graph.cc +++ b/compiler/dex/mir_graph.cc @@ -410,7 +410,7 @@ void MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, int cur_offset (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ? kPackedSwitch : kSparseSwitch; cur_block->successor_block_list.blocks = - new (arena_)GrowableArray(arena_, size, kGrowableArraySuccessorBlocks); + new (arena_) GrowableArray(arena_, size, kGrowableArraySuccessorBlocks); for (i = 0; i < size; i++) { BasicBlock *case_block = FindBlock(cur_offset + target_table[i], /* split */ true, @@ -427,8 +427,8 @@ void MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, int cur_offset } /* Fall-through case */ - BasicBlock* fallthrough_block = FindBlock( cur_offset + width, /* split */ false, - /* create */ true, /* immed_pred_block_p */ NULL); + BasicBlock* fallthrough_block = FindBlock(cur_offset + width, /* split */ false, + /* create */ true, /* immed_pred_block_p */ NULL); cur_block->fall_through = fallthrough_block; fallthrough_block->predecessors->Insert(cur_block); } @@ -1146,8 +1146,9 @@ BasicBlock* MIRGraph::NewMemBB(BBType block_type, int block_id) { bb->block_type = block_type; bb->id = block_id; // TUNING: better estimate of the exit block predecessors? - bb->predecessors = new (arena_) - GrowableArray(arena_, (block_type == kExitBlock) ? 2048 : 2, kGrowableArrayPredecessors); + bb->predecessors = new (arena_) GrowableArray(arena_, + (block_type == kExitBlock) ? 2048 : 2, + kGrowableArrayPredecessors); bb->successor_block_list.block_list_type = kNotUsed; block_id_map_.Put(block_id, block_id); return bb; diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc index 882b81a4a6..f83bbb23c6 100644 --- a/compiler/dex/mir_optimization.cc +++ b/compiler/dex/mir_optimization.cc @@ -228,7 +228,7 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) { MIR* mir_next = mir->next; Instruction::Code br_opcode = mir_next->dalvikInsn.opcode; ConditionCode ccode = kCondNv; - switch(br_opcode) { + switch (br_opcode) { case Instruction::IF_EQZ: ccode = kCondEq; break; @@ -255,7 +255,7 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) { (mir->ssa_rep->defs[0] == mir_next->ssa_rep->uses[0]) && (GetSSAUseCount(mir->ssa_rep->defs[0]) == 1)) { mir_next->dalvikInsn.arg[0] = ccode; - switch(opcode) { + switch (opcode) { case Instruction::CMPL_FLOAT: mir_next->dalvikInsn.opcode = static_cast(kMirOpFusedCmplFloat); diff --git a/compiler/dex/portable/mir_to_gbc.cc b/compiler/dex/portable/mir_to_gbc.cc index cfd3dafbee..85ffec50e7 100644 --- a/compiler/dex/portable/mir_to_gbc.cc +++ b/compiler/dex/portable/mir_to_gbc.cc @@ -297,7 +297,7 @@ void MirConverter::EmitSuspendCheck() { ::llvm::Value* src1, ::llvm::Value* src2) { ::llvm::Value* res = NULL; DCHECK_EQ(src1->getType(), src2->getType()); - switch(cc) { + switch (cc) { case kCondEq: res = irb_->CreateICmpEQ(src1, src2); break; case kCondNe: res = irb_->CreateICmpNE(src1, src2); break; case kCondLt: res = irb_->CreateICmpSLT(src1, src2); break; @@ -369,7 +369,7 @@ void MirConverter::ConvertCompareZeroAndBranch(BasicBlock* bb, ::llvm::Value* MirConverter::GenArithOp(OpKind op, bool is_long, ::llvm::Value* src1, ::llvm::Value* src2) { ::llvm::Value* res = NULL; - switch(op) { + switch (op) { case kOpAdd: res = irb_->CreateAdd(src1, src2); break; case kOpSub: res = irb_->CreateSub(src1, src2); break; case kOpRsub: res = irb_->CreateSub(src2, src1); break; @@ -393,7 +393,7 @@ void MirConverter::ConvertFPArithOp(OpKind op, RegLocation rl_dest, ::llvm::Value* src1 = GetLLVMValue(rl_src1.orig_sreg); ::llvm::Value* src2 = GetLLVMValue(rl_src2.orig_sreg); ::llvm::Value* res = NULL; - switch(op) { + switch (op) { case kOpAdd: res = irb_->CreateFAdd(src1, src2); break; case kOpSub: res = irb_->CreateFSub(src1, src2); break; case kOpMul: res = irb_->CreateFMul(src1, src2); break; @@ -1781,7 +1781,7 @@ char RemapShorty(char shorty_type) { * types (which is valid so long as we always do a real expansion of passed * arguments and field loads). */ - switch(shorty_type) { + switch (shorty_type) { case 'Z' : shorty_type = 'I'; break; case 'B' : shorty_type = 'I'; break; case 'S' : shorty_type = 'I'; break; diff --git a/compiler/dex/quick/arm/fp_arm.cc b/compiler/dex/quick/arm/fp_arm.cc index 2c626a0e8f..8f73f0c2f5 100644 --- a/compiler/dex/quick/arm/fp_arm.cc +++ b/compiler/dex/quick/arm/fp_arm.cc @@ -193,7 +193,7 @@ void ArmMir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, } NewLIR0(kThumb2Fmstat); ConditionCode ccode = static_cast(mir->dalvikInsn.arg[0]); - switch(ccode) { + switch (ccode) { case kCondEq: case kCondNe: break; diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc index e12df6c56c..3a367c984d 100644 --- a/compiler/dex/quick/arm/int_arm.cc +++ b/compiler/dex/quick/arm/int_arm.cc @@ -129,7 +129,7 @@ void ArmMir2Lir::GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1, int32_t low_reg = rl_src1.low_reg; int32_t high_reg = rl_src1.high_reg; - switch(ccode) { + switch (ccode) { case kCondEq: case kCondNe: LIR* target; @@ -270,7 +270,7 @@ void ArmMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) { rl_src1 = LoadValueWide(rl_src1, kCoreReg); rl_src2 = LoadValueWide(rl_src2, kCoreReg); OpRegReg(kOpCmp, rl_src1.high_reg, rl_src2.high_reg); - switch(ccode) { + switch (ccode) { case kCondEq: OpCondBranch(kCondNe, not_taken); break; @@ -436,7 +436,7 @@ bool ArmMir2Lir::SmallLiteralDivide(Instruction::Code dalvik_opcode, int r_hi = AllocTemp(); int r_lo = AllocTemp(); NewLIR4(kThumb2Smull, r_lo, r_hi, r_magic, rl_src.low_reg); - switch(pattern) { + switch (pattern) { case Divide3: OpRegRegRegShift(kOpSub, rl_result.low_reg, r_hi, rl_src.low_reg, EncodeShift(kArmAsr, 31)); @@ -1002,7 +1002,7 @@ void ArmMir2Lir::GenShiftImmOpLong(Instruction::Code opcode, return; } RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); - switch(opcode) { + switch (opcode) { case Instruction::SHL_LONG: case Instruction::SHL_LONG_2ADDR: if (shift_amount == 1) { @@ -1090,7 +1090,7 @@ void ArmMir2Lir::GenArithImmOpLong(Instruction::Code opcode, int32_t mod_imm_hi = ModifiedImmediate(val_hi); // Only a subset of add/sub immediate instructions set carry - so bail if we don't fit - switch(opcode) { + switch (opcode) { case Instruction::ADD_LONG: case Instruction::ADD_LONG_2ADDR: case Instruction::SUB_LONG: diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc index 80f597d640..305a14798a 100644 --- a/compiler/dex/quick/arm/utility_arm.cc +++ b/compiler/dex/quick/arm/utility_arm.cc @@ -549,7 +549,7 @@ LIR* ArmMir2Lir::OpRegImm(OpKind op, int r_dest_src1, int value) { ArmOpcode opcode = kThumbBkpt; switch (op) { case kOpAdd: - if ( !neg && (r_dest_src1 == r13sp) && (value <= 508)) { /* sp */ + if (!neg && (r_dest_src1 == r13sp) && (value <= 508)) { /* sp */ DCHECK_EQ((value & 0x3), 0); return NewLIR1(kThumbAddSpI7, value >> 2); } else if (short_form) { diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc index 8698b1f9ed..7a59644273 100644 --- a/compiler/dex/quick/codegen_util.cc +++ b/compiler/dex/quick/codegen_util.cc @@ -55,7 +55,7 @@ bool Mir2Lir::FastInstance(uint32_t field_idx, int& field_offset, bool& is_volat } /* Convert an instruction to a NOP */ -void Mir2Lir::NopLIR( LIR* lir) { +void Mir2Lir::NopLIR(LIR* lir) { lir->flags.is_nop = true; } @@ -190,10 +190,10 @@ void Mir2Lir::DumpLIRInsn(LIR* lir, unsigned char* base_addr) { } if (lir->use_mask && (!lir->flags.is_nop || dump_nop)) { - DUMP_RESOURCE_MASK(DumpResourceMask((LIR* ) lir, lir->use_mask, "use")); + DUMP_RESOURCE_MASK(DumpResourceMask((LIR*) lir, lir->use_mask, "use")); } if (lir->def_mask && (!lir->flags.is_nop || dump_nop)) { - DUMP_RESOURCE_MASK(DumpResourceMask((LIR* ) lir, lir->def_mask, "def")); + DUMP_RESOURCE_MASK(DumpResourceMask((LIR*) lir, lir->def_mask, "def")); } } @@ -336,10 +336,10 @@ LIR* Mir2Lir::AddWideData(LIR* *constant_list_p, int val_lo, int val_hi) { } static void PushWord(std::vector&buf, int data) { - buf.push_back( data & 0xff); - buf.push_back( (data >> 8) & 0xff); - buf.push_back( (data >> 16) & 0xff); - buf.push_back( (data >> 24) & 0xff); + buf.push_back(data & 0xff); + buf.push_back((data >> 8) & 0xff); + buf.push_back((data >> 16) & 0xff); + buf.push_back((data >> 24) & 0xff); } static void AlignBuffer(std::vector&buf, size_t offset) { @@ -454,8 +454,8 @@ void Mir2Lir::InstallFillArrayData() { if (tab_rec == NULL) break; AlignBuffer(code_buffer_, tab_rec->offset); for (int i = 0; i < (tab_rec->size + 1) / 2; i++) { - code_buffer_.push_back( tab_rec->table[i] & 0xFF); - code_buffer_.push_back( (tab_rec->table[i] >> 8) & 0xFF); + code_buffer_.push_back(tab_rec->table[i] & 0xFF); + code_buffer_.push_back((tab_rec->table[i] >> 8) & 0xFF); } } } diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc index a34d2a9e76..d1bfd2d9d9 100644 --- a/compiler/dex/quick/gen_common.cc +++ b/compiler/dex/quick/gen_common.cc @@ -279,7 +279,7 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) { int r_dst = AllocTemp(); int r_idx = AllocTemp(); int r_val = INVALID_REG; - switch(cu_->instruction_set) { + switch (cu_->instruction_set) { case kThumb2: r_val = TargetReg(kLr); break; @@ -1311,7 +1311,7 @@ void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, GenImmedCheck(kCondEq, TargetReg(kArg1), 0, kThrowDivZero); } // NOTE: callout here is not a safepoint - CallHelper(r_tgt, func_offset, false /* not a safepoint */ ); + CallHelper(r_tgt, func_offset, false /* not a safepoint */); if (op == kOpDiv) rl_result = GetReturn(false); else diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc index 8510006051..127d191a5d 100644 --- a/compiler/dex/quick/mips/utility_mips.cc +++ b/compiler/dex/quick/mips/utility_mips.cc @@ -107,7 +107,7 @@ LIR* MipsMir2Lir::LoadConstantNoClobber(int r_dest, int value) { } LIR* MipsMir2Lir::OpUnconditionalBranch(LIR* target) { - LIR* res = NewLIR1(kMipsB, 0 /* offset to be patched during assembly*/ ); + LIR* res = NewLIR1(kMipsB, 0 /* offset to be patched during assembly*/); res->target = target; return res; } @@ -642,8 +642,8 @@ LIR* MipsMir2Lir::OpMem(OpKind op, int rBase, int disp) { return NULL; } -LIR* MipsMir2Lir::StoreBaseIndexedDisp( int rBase, int r_index, int scale, int displacement, - int r_src, int r_src_hi, OpSize size, int s_reg) { +LIR* MipsMir2Lir::StoreBaseIndexedDisp(int rBase, int r_index, int scale, int displacement, + int r_src, int r_src_hi, OpSize size, int s_reg) { LOG(FATAL) << "Unexpected use of StoreBaseIndexedDisp for MIPS"; return NULL; } diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h index 41e5a2d988..7765eaaeef 100644 --- a/compiler/dex/quick/mir_to_lir.h +++ b/compiler/dex/quick/mir_to_lir.h @@ -233,7 +233,7 @@ class Mir2Lir : public Backend { RegisterClass oat_reg_class_by_size(OpSize size) { return (size == kUnsignedHalf || size == kSignedHalf || size == kUnsignedByte || - size == kSignedByte ) ? kCoreReg : kAnyReg; + size == kSignedByte) ? kCoreReg : kAnyReg; } size_t CodeBufferSizeInBytes() { diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc index 4c91223687..bc3740a4e1 100644 --- a/compiler/dex/quick/ralloc_util.cc +++ b/compiler/dex/quick/ralloc_util.cc @@ -1021,8 +1021,7 @@ void Mir2Lir::DoPromotion() { if (!(cu_->disable_opt & (1 << kPromoteRegs))) { // Promote FpRegs - for (int i = 0; (i < num_regs) && - (FpRegs[i].count >= promotion_threshold ); i++) { + for (int i = 0; (i < num_regs) && (FpRegs[i].count >= promotion_threshold); i++) { int p_map_idx = SRegToPMap(FpRegs[i].s_reg); if (promotion_map_[p_map_idx].fp_location != kLocPhysReg) { int reg = AllocPreservedFPReg(FpRegs[i].s_reg, diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc index 6376e3b87a..75367a340a 100644 --- a/compiler/dex/quick/x86/utility_x86.cc +++ b/compiler/dex/quick/x86/utility_x86.cc @@ -100,7 +100,7 @@ LIR* X86Mir2Lir::LoadConstantNoClobber(int r_dest, int value) { } LIR* X86Mir2Lir::OpUnconditionalBranch(LIR* target) { - LIR* res = NewLIR1(kX86Jmp8, 0 /* offset to be patched during assembly*/ ); + LIR* res = NewLIR1(kX86Jmp8, 0 /* offset to be patched during assembly*/); res->target = target; return res; } diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc index 3a0cbcc67c..7739e2996f 100644 --- a/compiler/dex/ssa_transformation.cc +++ b/compiler/dex/ssa_transformation.cc @@ -266,7 +266,7 @@ bool MIRGraph::ComputeDominanceFrontier(BasicBlock* bb) { void MIRGraph::InitializeDominationInfo(BasicBlock* bb) { int num_total_blocks = GetBasicBlockListCount(); - if (bb->dominators == NULL ) { + if (bb->dominators == NULL) { bb->dominators = new (arena_) ArenaBitVector(arena_, num_total_blocks, false /* expandable */, kBitMapDominators); bb->i_dominated = new (arena_) ArenaBitVector(arena_, num_total_blocks, diff --git a/compiler/dex/vreg_analysis.cc b/compiler/dex/vreg_analysis.cc index 10bbd1f8b8..f361dd75e0 100644 --- a/compiler/dex/vreg_analysis.cc +++ b/compiler/dex/vreg_analysis.cc @@ -160,7 +160,7 @@ bool MIRGraph::InferTypeAndSize(BasicBlock* bb) { if ((mir->dalvikInsn.opcode == Instruction::RETURN) || (mir->dalvikInsn.opcode == Instruction::RETURN_WIDE) || (mir->dalvikInsn.opcode == Instruction::RETURN_OBJECT)) { - switch(cu_->shorty[0]) { + switch (cu_->shorty[0]) { case 'I': changed |= SetCore(ssa_rep->uses[0], true); break; diff --git a/compiler/llvm/llvm_compilation_unit.cc b/compiler/llvm/llvm_compilation_unit.cc index 1f2b977921..592059e6a6 100644 --- a/compiler/llvm/llvm_compilation_unit.cc +++ b/compiler/llvm/llvm_compilation_unit.cc @@ -114,7 +114,7 @@ LlvmCompilationUnit::LlvmCompilationUnit(const CompilerLLVM* compiler_llvm, size irb_.reset(new IRBuilder(*context_, *module_, *intrinsic_helper_)); // We always need a switch case, so just use a normal function. - switch(GetInstructionSet()) { + switch (GetInstructionSet()) { default: runtime_support_.reset(new RuntimeSupportBuilder(*context_, *module_, *irb_)); break; diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc index 4c32506d43..da05c49e0e 100644 --- a/compiler/oat_writer.cc +++ b/compiler/oat_writer.cc @@ -399,8 +399,7 @@ size_t OatWriter::InitOatCodeMethod(size_t offset, size_t oat_class_index, fp_spill_mask, mapping_table_offset, vmap_table_offset, - gc_map_offset - ); + gc_map_offset); if (compiler_driver_->IsImage()) { ClassLinker* linker = Runtime::Current()->GetClassLinker(); diff --git a/runtime/atomic_integer.h b/runtime/atomic_integer.h index 6711722672..05024b4680 100644 --- a/runtime/atomic_integer.h +++ b/runtime/atomic_integer.h @@ -34,7 +34,7 @@ class AtomicInteger { return *this; } - operator int32_t () const { + operator int32_t() const { return value_; } diff --git a/runtime/base/mutex-inl.h b/runtime/base/mutex-inl.h index 07157da7aa..b3f5092c76 100644 --- a/runtime/base/mutex-inl.h +++ b/runtime/base/mutex-inl.h @@ -148,7 +148,7 @@ inline void ReaderWriterMutex::SharedLock(Thread* self) { } android_atomic_dec(&num_pending_readers_); } - } while(!done); + } while (!done); #else CHECK_MUTEX_CALL(pthread_rwlock_rdlock, (&rwlock_)); #endif @@ -176,7 +176,7 @@ inline void ReaderWriterMutex::SharedUnlock(Thread* self) { } else { LOG(FATAL) << "Unexpected state_:" << cur_state << " for " << name_; } - } while(!done); + } while (!done); #else CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_)); #endif diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc index 25c0b9ea8e..1df0207503 100644 --- a/runtime/base/mutex.cc +++ b/runtime/base/mutex.cc @@ -178,7 +178,7 @@ void BaseMutex::RecordContention(uint64_t blocked_tid, uint64_t owner_tid, uint6 do { slot = cur_content_log_entry_; new_slot = (slot + 1) % kContentionLogSize; - } while(!cur_content_log_entry_.CompareAndSwap(slot, new_slot)); + } while (!cur_content_log_entry_.CompareAndSwap(slot, new_slot)); contention_log_[new_slot].blocked_tid = blocked_tid; contention_log_[new_slot].owner_tid = owner_tid; contention_log_[new_slot].count = 1; @@ -312,7 +312,7 @@ void Mutex::ExclusiveLock(Thread* self) { } android_atomic_dec(&num_contenders_); } - } while(!done); + } while (!done); DCHECK_EQ(state_, 1); exclusive_owner_ = SafeGetTid(self); #else @@ -344,7 +344,7 @@ bool Mutex::ExclusiveTryLock(Thread* self) { } else { return false; } - } while(!done); + } while (!done); DCHECK_EQ(state_, 1); exclusive_owner_ = SafeGetTid(self); #else @@ -404,7 +404,7 @@ void Mutex::ExclusiveUnlock(Thread* self) { _exit(1); } } - } while(!done); + } while (!done); #else CHECK_MUTEX_CALL(pthread_mutex_unlock, (&mutex_)); #endif @@ -513,7 +513,7 @@ void ReaderWriterMutex::ExclusiveLock(Thread* self) { } android_atomic_dec(&num_pending_writers_); } - } while(!done); + } while (!done); DCHECK_EQ(state_, -1); exclusive_owner_ = SafeGetTid(self); #else @@ -545,7 +545,7 @@ void ReaderWriterMutex::ExclusiveUnlock(Thread* self) { } else { LOG(FATAL) << "Unexpected state_:" << cur_state << " for " << name_; } - } while(!done); + } while (!done); #else CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_)); #endif @@ -583,7 +583,7 @@ bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32 } android_atomic_dec(&num_pending_writers_); } - } while(!done); + } while (!done); exclusive_owner_ = SafeGetTid(self); #else timespec ts; @@ -616,7 +616,7 @@ bool ReaderWriterMutex::SharedTryLock(Thread* self) { // Owner holds it exclusively. return false; } - } while(!done); + } while (!done); #else int result = pthread_rwlock_tryrdlock(&rwlock_); if (result == EBUSY) { diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc index 403a2eb348..7429ab11a9 100644 --- a/runtime/check_jni.cc +++ b/runtime/check_jni.cc @@ -401,7 +401,7 @@ class ScopedCheck { * * Use the kFlag_NullableUtf flag where 'u' field(s) are nullable. */ - void Check(bool entry, const char* fmt0, ...) SHARED_LOCKS_REQUIRED (Locks::mutator_lock_) { + void Check(bool entry, const char* fmt0, ...) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { va_list ap; const mirror::AbstractMethod* traceMethod = NULL; diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc index e5844b0038..3c1cd7850e 100644 --- a/runtime/class_linker_test.cc +++ b/runtime/class_linker_test.cc @@ -937,14 +937,14 @@ TEST_F(ClassLinkerTest, StaticFields) { // TODO: Remove EXPECT_FALSE when GCC can handle EXPECT_EQ // http://code.google.com/p/googletest/issues/detail?id=322 - EXPECT_FALSE( s0->GetBoolean(statics)); - EXPECT_EQ(6, s1->GetByte(statics)); - EXPECT_EQ('b', s2->GetChar(statics)); - EXPECT_EQ(-535, s3->GetShort(statics)); - EXPECT_EQ(2000000001, s4->GetInt(statics)); + EXPECT_FALSE(s0->GetBoolean(statics)); + EXPECT_EQ(6, s1->GetByte(statics)); + EXPECT_EQ('b', s2->GetChar(statics)); + EXPECT_EQ(-535, s3->GetShort(statics)); + EXPECT_EQ(2000000001, s4->GetInt(statics)); EXPECT_EQ(0x34567890abcdef12LL, s5->GetLong(statics)); - EXPECT_EQ(0.75, s6->GetFloat(statics)); - EXPECT_EQ(16777219, s7->GetDouble(statics)); + EXPECT_EQ(0.75, s6->GetFloat(statics)); + EXPECT_EQ(16777219, s7->GetDouble(statics)); EXPECT_TRUE(s8->GetObject(statics)->AsString()->Equals("robot")); } diff --git a/runtime/common_test.h b/runtime/common_test.h index 73c47b5a8c..e735e279b6 100644 --- a/runtime/common_test.h +++ b/runtime/common_test.h @@ -178,8 +178,7 @@ class CommonTest : public testing::Test { fp_spill_mask, reinterpret_cast(mapping_table), reinterpret_cast(vmap_table), - reinterpret_cast(gc_map) - ); + reinterpret_cast(gc_map)); } void MakeExecutable(mirror::AbstractMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { @@ -308,7 +307,7 @@ class CommonTest : public testing::Test { options.push_back(std::make_pair("-Xcheck:jni", reinterpret_cast(NULL))); options.push_back(std::make_pair(min_heap_string.c_str(), reinterpret_cast(NULL))); options.push_back(std::make_pair(max_heap_string.c_str(), reinterpret_cast(NULL))); - if(!Runtime::Create(options, false)) { + if (!Runtime::Create(options, false)) { LOG(FATAL) << "Failed to create runtime"; return; } diff --git a/runtime/debugger.cc b/runtime/debugger.cc index b502c9ab58..4fbee51045 100644 --- a/runtime/debugger.cc +++ b/runtime/debugger.cc @@ -184,7 +184,7 @@ static Dbg::HpsgWhat gDdmNhsgWhat; static ObjectRegistry* gRegistry = NULL; // Recent allocation tracking. -static Mutex gAllocTrackerLock DEFAULT_MUTEX_ACQUIRED_AFTER ("AllocTracker lock"); +static Mutex gAllocTrackerLock DEFAULT_MUTEX_ACQUIRED_AFTER("AllocTracker lock"); AllocRecord* Dbg::recent_allocation_records_ PT_GUARDED_BY(gAllocTrackerLock) = NULL; // TODO: CircularBuffer static size_t gAllocRecordMax GUARDED_BY(gAllocTrackerLock) = 0; static size_t gAllocRecordHead GUARDED_BY(gAllocTrackerLock) = 0; @@ -2761,7 +2761,7 @@ JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId thread_id, JDWP::ObjectId objec VLOG(jdwp) << " Control has returned from event thread"; /* wait for thread to re-suspend itself */ - SuspendThread(thread_id, false /* request_suspension */ ); + SuspendThread(thread_id, false /* request_suspension */); self->TransitionFromSuspendedToRunnable(); } diff --git a/runtime/dex_file.h b/runtime/dex_file.h index 28e06cc5b9..8edeb18418 100644 --- a/runtime/dex_file.h +++ b/runtime/dex_file.h @@ -1039,7 +1039,7 @@ class ClassDataItemIterator { } InvokeType GetMethodInvokeType(const DexFile::ClassDef& class_def) const { if (HasNextDirectMethod()) { - if ((GetMemberAccessFlags() & kAccStatic) != 0 ) { + if ((GetMemberAccessFlags() & kAccStatic) != 0) { return kStatic; } else { return kDirect; diff --git a/runtime/dex_instruction.cc b/runtime/dex_instruction.cc index 6527f103eb..a36d7c5166 100644 --- a/runtime/dex_instruction.cc +++ b/runtime/dex_instruction.cc @@ -56,12 +56,11 @@ int const Instruction::kInstructionVerifyFlags[] = { int const Instruction::kInstructionSizeInCodeUnits[] = { #define INSTRUCTION_SIZE(opcode, c, p, format, r, i, a, v) \ - (( opcode == NOP ) ? -1 : \ + ((opcode == NOP) ? -1 : \ ((format >= k10x) && (format <= k10t)) ? 1 : \ ((format >= k20t) && (format <= k22c)) ? 2 : \ ((format >= k32x) && (format <= k3rc)) ? 3 : \ - ( format == k51l ) ? 5 : -1 \ - ), + (format == k51l) ? 5 : -1), #include "dex_instruction_list.h" DEX_INSTRUCTION_LIST(INSTRUCTION_SIZE) #undef DEX_INSTRUCTION_LIST diff --git a/runtime/disassembler_arm.cc b/runtime/disassembler_arm.cc index 172bef84d6..3c9cb6ecab 100644 --- a/runtime/disassembler_arm.cc +++ b/runtime/disassembler_arm.cc @@ -1184,7 +1184,7 @@ size_t DisassemblerArm::DumpThumb16(std::ostream& os, const uint8_t* instr_ptr) ThumbRegister Rm(instr, 6); ThumbRegister Rn(instr, 3); ThumbRegister Rt(instr, 0); - switch(opB) { + switch (opB) { case 0: opcode << "str"; break; case 1: opcode << "strh"; break; case 2: opcode << "strb"; break; @@ -1206,7 +1206,7 @@ size_t DisassemblerArm::DumpThumb16(std::ostream& os, const uint8_t* instr_ptr) uint16_t opB = (instr >> 11) & 1; ThumbRegister Rn(instr, 3); ThumbRegister Rt(instr, 0); - switch(opA) { + switch (opA) { case 6: imm5 <<= 2; opcode << (opB == 0 ? "str" : "ldr"); diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc index da122e68ee..6ce36e8f6a 100644 --- a/runtime/elf_file.cc +++ b/runtime/elf_file.cc @@ -366,7 +366,7 @@ static unsigned elfhash(const char *_name) { const unsigned char *name = (const unsigned char *) _name; unsigned h = 0, g; - while(*name) { + while (*name) { h = (h << 4) + *name++; g = h & 0xf0000000; h ^= g; diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h index 5310c18ec6..1e8beda0fd 100644 --- a/runtime/gc/accounting/atomic_stack.h +++ b/runtime/gc/accounting/atomic_stack.h @@ -66,7 +66,7 @@ class AtomicStack { // Stack overflow. return false; } - } while(!back_index_.CompareAndSwap(index, index + 1)); + } while (!back_index_.CompareAndSwap(index, index + 1)); begin_[index] = value; return true; } diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc index aa02f82679..91c92537b5 100644 --- a/runtime/gc/accounting/mod_union_table.cc +++ b/runtime/gc/accounting/mod_union_table.cc @@ -44,8 +44,8 @@ class MarkIfReachesAllocspaceVisitor { } // Extra parameters are required since we use this same visitor signature for checking objects. - void operator ()(const Object* obj, const Object* ref, const MemberOffset& /* offset */, - bool /* is_static */) const { + void operator()(const Object* obj, const Object* ref, const MemberOffset& /* offset */, + bool /* is_static */) const { // TODO: Optimize? // TODO: C++0x auto const std::vector& spaces = heap_->GetContinuousSpaces(); @@ -70,7 +70,7 @@ class ModUnionVisitor { bitmap_(bitmap) { } - void operator ()(const Object* obj) const + void operator()(const Object* obj) const SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { DCHECK(obj != NULL); @@ -90,7 +90,7 @@ class ModUnionClearCardSetVisitor { : cleared_cards_(cleared_cards) { } - inline void operator ()(byte* card, byte expected_value, byte new_value) const { + inline void operator()(byte* card, byte expected_value, byte new_value) const { if (expected_value == CardTable::kCardDirty) { cleared_cards_->insert(card); } @@ -106,7 +106,7 @@ class ModUnionClearCardVisitor { : cleared_cards_(cleared_cards) { } - void operator ()(byte* card, byte expected_card, byte new_card) const { + void operator()(byte* card, byte expected_card, byte new_card) const { if (expected_card == CardTable::kCardDirty) { cleared_cards_->push_back(card); } @@ -120,7 +120,7 @@ class ModUnionScanImageRootVisitor { explicit ModUnionScanImageRootVisitor(collector::MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {} - void operator ()(const Object* root) const + void operator()(const Object* root) const EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(root != NULL); @@ -147,8 +147,8 @@ class AddToReferenceArrayVisitor { } // Extra parameters are required since we use this same visitor signature for checking objects. - void operator ()(const Object* obj, const Object* ref, const MemberOffset& /* offset */, - bool /* is_static */) const { + void operator()(const Object* obj, const Object* ref, const MemberOffset& /* offset */, + bool /* is_static */) const { // Only add the reference if it is non null and fits our criteria. if (ref != NULL && mod_union_table_->AddReference(obj, ref)) { references_->push_back(ref); @@ -168,7 +168,7 @@ class ModUnionReferenceVisitor { references_(references) { } - void operator ()(const Object* obj) const + void operator()(const Object* obj) const SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { DCHECK(obj != NULL); // We don't have an early exit since we use the visitor pattern, an early @@ -191,8 +191,8 @@ class CheckReferenceVisitor { // Extra parameters are required since we use this same visitor signature for checking objects. // TODO: Fixme when anotatalysis works with visitors. - void operator ()(const Object* obj, const Object* ref, const MemberOffset& /* offset */, - bool /* is_static */) const + void operator()(const Object* obj, const Object* ref, const MemberOffset& /* offset */, + bool /* is_static */) const SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { Heap* heap = mod_union_table_->GetHeap(); if (ref != NULL && mod_union_table_->AddReference(obj, ref) && @@ -216,13 +216,13 @@ class CheckReferenceVisitor { class ModUnionCheckReferences { public: - explicit ModUnionCheckReferences (ModUnionTableReferenceCache* mod_union_table, - const std::set& references) + explicit ModUnionCheckReferences(ModUnionTableReferenceCache* mod_union_table, + const std::set& references) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) : mod_union_table_(mod_union_table), references_(references) { } - void operator ()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS { + void operator()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS { Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current()); DCHECK(obj != NULL); CheckReferenceVisitor visitor(mod_union_table_, references_); @@ -333,7 +333,7 @@ void ModUnionTableReferenceCache::MarkReferences(collector::MarkSweep* mark_swee typedef SafeMap >::const_iterator It; for (It it = references_.begin(); it != references_.end(); ++it) { typedef std::vector::const_iterator It2; - for (It2 it_ref = it->second.begin(); it_ref != it->second.end(); ++it_ref ) { + for (It2 it_ref = it->second.begin(); it_ref != it->second.end(); ++it_ref) { mark_sweep->MarkRoot(*it_ref); ++count; } diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h index bf4c1ed9af..77f93a266c 100644 --- a/runtime/gc/accounting/space_bitmap.h +++ b/runtime/gc/accounting/space_bitmap.h @@ -103,7 +103,7 @@ class SpaceBitmap { : bitmap_(bitmap) { } - void operator ()(mirror::Object* obj) const { + void operator()(mirror::Object* obj) const { bitmap_->Clear(obj); } private: @@ -112,7 +112,7 @@ class SpaceBitmap { template void VisitRange(uintptr_t visit_begin, uintptr_t visit_end, const Visitor& visitor) const { - for (; visit_begin < visit_end; visit_begin += kAlignment ) { + for (; visit_begin < visit_end; visit_begin += kAlignment) { visitor(reinterpret_cast(visit_begin)); } } diff --git a/runtime/gc/accounting/space_bitmap_test.cc b/runtime/gc/accounting/space_bitmap_test.cc index d00d7c2739..516a44991e 100644 --- a/runtime/gc/accounting/space_bitmap_test.cc +++ b/runtime/gc/accounting/space_bitmap_test.cc @@ -46,7 +46,7 @@ class BitmapVerify { begin_(begin), end_(end) {} - void operator ()(const mirror::Object* obj) { + void operator()(const mirror::Object* obj) { EXPECT_TRUE(obj >= begin_); EXPECT_TRUE(obj <= end_); EXPECT_TRUE(bitmap_->Test(obj) == ((reinterpret_cast(obj) & 0xF) != 0)); diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc index a5bad2f958..2ca5f2dd7a 100644 --- a/runtime/gc/collector/mark_sweep.cc +++ b/runtime/gc/collector/mark_sweep.cc @@ -72,7 +72,7 @@ class SetFingerVisitor { public: explicit SetFingerVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {} - void operator ()(void* finger) const { + void operator()(void* finger) const { mark_sweep_->SetFinger(reinterpret_cast(finger)); } @@ -524,7 +524,7 @@ class CheckObjectVisitor { public: explicit CheckObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {} - void operator ()(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) const + void operator()(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) const NO_THREAD_SAFETY_ANALYSIS { if (kDebugLocking) { Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current()); @@ -565,7 +565,7 @@ class ScanObjectVisitor { explicit ScanObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {} // TODO: Fixme when anotatalysis works with visitors. - void operator ()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS { + void operator()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS { if (kDebugLocking) { Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); @@ -609,7 +609,7 @@ class CheckBitmapVisitor { public: explicit CheckBitmapVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {} - void operator ()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS { + void operator()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS { if (kDebugLocking) { Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current()); } @@ -1081,8 +1081,8 @@ class MarkObjectVisitor { explicit MarkObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {} // TODO: Fixme when anotatalysis works with visitors. - void operator ()(const Object* /* obj */, const Object* ref, const MemberOffset& /* offset */, - bool /* is_static */) const + void operator()(const Object* /* obj */, const Object* ref, const MemberOffset& /* offset */, + bool /* is_static */) const NO_THREAD_SAFETY_ANALYSIS { if (kDebugLocking) { Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); @@ -1148,8 +1148,8 @@ class MarkStackChunk : public Task { public: explicit MarkObjectParallelVisitor(MarkStackChunk* chunk_task) : chunk_task_(chunk_task) {} - void operator ()(const Object* /* obj */, const Object* ref, - const MemberOffset& /* offset */, bool /* is_static */) const { + void operator()(const Object* /* obj */, const Object* ref, + const MemberOffset& /* offset */, bool /* is_static */) const { if (ref != NULL && chunk_task_->mark_sweep_->MarkObjectParallel(ref)) { chunk_task_->MarkStackPush(ref); } diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index deb1b8c3e5..7bd7c5d7e2 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -228,7 +228,7 @@ void Heap::DeleteThreadPool() { // Sort spaces based on begin address struct ContinuousSpaceSorter { - bool operator ()(const space::ContinuousSpace* a, const space::ContinuousSpace* b) const { + bool operator()(const space::ContinuousSpace* a, const space::ContinuousSpace* b) const { return a->Begin() < b->Begin(); } }; @@ -894,8 +894,8 @@ class ReferringObjectsFinder { } // For MarkSweep::VisitObjectReferences. - void operator ()(const mirror::Object* referrer, const mirror::Object* object, - const MemberOffset&, bool) const { + void operator()(const mirror::Object* referrer, const mirror::Object* object, + const MemberOffset&, bool) const { if (object == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) { referring_objects_.push_back(const_cast(referrer)); } @@ -1165,7 +1165,7 @@ static void RootMatchesObjectVisitor(const mirror::Object* root, void* arg) { class ScanVisitor { public: - void operator ()(const mirror::Object* obj) const { + void operator()(const mirror::Object* obj) const { LOG(INFO) << "Would have rescanned object " << obj; } }; @@ -1183,8 +1183,8 @@ class VerifyReferenceVisitor { // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for smarter // analysis on visitors. - void operator ()(const mirror::Object* obj, const mirror::Object* ref, - const MemberOffset& offset, bool /* is_static */) const + void operator()(const mirror::Object* obj, const mirror::Object* ref, + const MemberOffset& offset, bool /* is_static */) const NO_THREAD_SAFETY_ANALYSIS { // Verify that the reference is live. if (UNLIKELY(ref != NULL && !IsLive(ref))) { @@ -1264,7 +1264,7 @@ class VerifyObjectVisitor { public: explicit VerifyObjectVisitor(Heap* heap) : heap_(heap), failed_(false) {} - void operator ()(const mirror::Object* obj) const + void operator()(const mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { // Note: we are verifying the references in obj but not obj itself, this is because obj must // be live or else how did we find it in the live bitmap? @@ -1311,8 +1311,8 @@ class VerifyReferenceCardVisitor { // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for // annotalysis on visitors. - void operator ()(const mirror::Object* obj, const mirror::Object* ref, const MemberOffset& offset, - bool is_static) const NO_THREAD_SAFETY_ANALYSIS { + void operator()(const mirror::Object* obj, const mirror::Object* ref, const MemberOffset& offset, + bool is_static) const NO_THREAD_SAFETY_ANALYSIS { // Filter out class references since changing an object's class does not mark the card as dirty. // Also handles large objects, since the only reference they hold is a class reference. if (ref != NULL && !ref->IsClass()) { @@ -1378,7 +1378,7 @@ class VerifyLiveStackReferences { : heap_(heap), failed_(false) {} - void operator ()(const mirror::Object* obj) const + void operator()(const mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { VerifyReferenceCardVisitor visitor(heap_, const_cast(&failed_)); collector::MarkSweep::VisitObjectReferences(obj, visitor); diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h index 630d063ff0..32e068cdae 100644 --- a/runtime/gc/heap.h +++ b/runtime/gc/heap.h @@ -70,7 +70,7 @@ namespace space { class AgeCardVisitor { public: - byte operator ()(byte card) const { + byte operator()(byte card) const { if (card == accounting::CardTable::kCardDirty) { return card - 1; } else { diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc index 45314c231b..376d3be0a7 100644 --- a/runtime/interpreter/interpreter.cc +++ b/runtime/interpreter/interpreter.cc @@ -969,11 +969,11 @@ static inline const Instruction* FindNextInstructionFollowingException(Thread* s return JValue(); /* Handled in caller. */ \ } \ } else { \ - inst = inst-> next_function (); \ + inst = inst->next_function(); \ } static void UnexpectedOpcode(const Instruction* inst, MethodHelper& mh) - __attribute__ ((cold, noreturn, noinline)); + __attribute__((cold, noreturn, noinline)); static void UnexpectedOpcode(const Instruction* inst, MethodHelper& mh) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { @@ -989,7 +989,7 @@ static void UnexpectedOpcode(const Instruction* inst, MethodHelper& mh) template static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register) - NO_THREAD_SAFETY_ANALYSIS __attribute__ ((hot)); + NO_THREAD_SAFETY_ANALYSIS __attribute__((hot)); template static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem* code_item, @@ -1254,7 +1254,7 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte if (UNLIKELY(s == NULL)) { HANDLE_PENDING_EXCEPTION(); } else { - shadow_frame.SetVRegReference( inst->VRegA_21c(), s); + shadow_frame.SetVRegReference(inst->VRegA_21c(), s); inst = inst->Next_2xx(); } break; @@ -1265,7 +1265,7 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte if (UNLIKELY(s == NULL)) { HANDLE_PENDING_EXCEPTION(); } else { - shadow_frame.SetVRegReference( inst->VRegA_31c(), s); + shadow_frame.SetVRegReference(inst->VRegA_31c(), s); inst = inst->Next_3xx(); } break; diff --git a/runtime/oat.h b/runtime/oat.h index fb28962762..4bd1871a71 100644 --- a/runtime/oat.h +++ b/runtime/oat.h @@ -97,8 +97,7 @@ class PACKED(4) OatMethodOffsets { uint32_t fp_spill_mask, uint32_t mapping_table_offset, uint32_t vmap_table_offset, - uint32_t gc_map_offset - ); + uint32_t gc_map_offset); ~OatMethodOffsets(); diff --git a/runtime/reflection.cc b/runtime/reflection.cc index 467575cdf5..359b539fff 100644 --- a/runtime/reflection.cc +++ b/runtime/reflection.cc @@ -206,8 +206,7 @@ bool ConvertPrimitiveValue(const ThrowLocation* throw_location, bool unbox_for_r ThrowClassCastException(throw_location, StringPrintf("Couldn't convert result of type %s to %s", PrettyDescriptor(srcType).c_str(), - PrettyDescriptor(dstType).c_str() - ).c_str()); + PrettyDescriptor(dstType).c_str()).c_str()); } return false; } @@ -297,8 +296,7 @@ static bool UnboxPrimitive(const ThrowLocation* throw_location, mirror::Object* ThrowClassCastException(throw_location, StringPrintf("Couldn't convert result of type %s to %s", PrettyTypeOf(o).c_str(), - PrettyDescriptor(dst_class).c_str() - ).c_str()); + PrettyDescriptor(dst_class).c_str()).c_str()); } return false; } @@ -359,8 +357,7 @@ static bool UnboxPrimitive(const ThrowLocation* throw_location, mirror::Object* StringPrintf("%s has type %s, got %s", UnboxingFailureKind(m, index, f).c_str(), PrettyDescriptor(dst_class).c_str(), - PrettyDescriptor(src_descriptor.c_str()).c_str() - ).c_str()); + PrettyDescriptor(src_descriptor.c_str()).c_str()).c_str()); return false; } diff --git a/runtime/stack.cc b/runtime/stack.cc index fcd0f2dc7e..bf0f78fd37 100644 --- a/runtime/stack.cc +++ b/runtime/stack.cc @@ -185,12 +185,12 @@ void StackVisitor::SetVReg(mirror::AbstractMethod* m, uint16_t vreg, uint32_t ne } uintptr_t StackVisitor::GetGPR(uint32_t reg) const { - DCHECK (cur_quick_frame_ != NULL) << "This is a quick frame routine"; + DCHECK(cur_quick_frame_ != NULL) << "This is a quick frame routine"; return context_->GetGPR(reg); } void StackVisitor::SetGPR(uint32_t reg, uintptr_t value) { - DCHECK (cur_quick_frame_ != NULL) << "This is a quick frame routine"; + DCHECK(cur_quick_frame_ != NULL) << "This is a quick frame routine"; context_->SetGPR(reg, value); } @@ -344,7 +344,7 @@ void StackVisitor::WalkStack(bool include_transitions) { } cur_depth_++; cur_shadow_frame_ = cur_shadow_frame_->GetLink(); - } while(cur_shadow_frame_ != NULL); + } while (cur_shadow_frame_ != NULL); } if (include_transitions) { bool should_continue = VisitFrame(); diff --git a/runtime/thread.cc b/runtime/thread.cc index a1fb862a17..fb50ed1ef3 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -1816,7 +1816,7 @@ class CatchBlockStackVisitor : public StackVisitor { m->GetDexMethodIndex(), m, m->GetAccessFlags(), false, true); verifier.Verify(); std::vector kinds = verifier.DescribeVRegs(dex_pc); - for(uint16_t reg = 0; reg < num_regs; reg++) { + for (uint16_t reg = 0; reg < num_regs; reg++) { VRegKind kind = static_cast(kinds.at(reg * 2)); switch (kind) { case kUndefined: diff --git a/runtime/thread.h b/runtime/thread.h index 64ff7c22fa..5c9e8c3ec7 100644 --- a/runtime/thread.h +++ b/runtime/thread.h @@ -104,7 +104,7 @@ class PACKED(4) Thread { static Thread* Current() { // We rely on Thread::Current returning NULL for a detached thread, so it's not obvious // that we can replace this with a direct %fs access on x86. - if(!is_started_) { + if (!is_started_) { return NULL; } else { void* thread = pthread_getspecific(Thread::pthread_key_self_); diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc index 59c38b4345..7aa835a387 100644 --- a/runtime/thread_list.cc +++ b/runtime/thread_list.cc @@ -468,7 +468,7 @@ void ThreadList::WaitForOtherNonDaemonThreadsToExit() { // Wait for another thread to exit before re-checking. thread_exit_cond_.Wait(self); } - } while(!all_threads_are_daemons); + } while (!all_threads_are_daemons); } void ThreadList::SuspendAllDaemonThreads() { diff --git a/runtime/trace.cc b/runtime/trace.cc index 32932907e5..2227b8dde7 100644 --- a/runtime/trace.cc +++ b/runtime/trace.cc @@ -230,7 +230,7 @@ void Trace::Start(const char* trace_filename, int trace_fd, int buffer_size, int // Create Trace object. { MutexLock mu(self, *Locks::trace_lock_); - if(the_trace_ != NULL) { + if (the_trace_ != NULL) { LOG(ERROR) << "Trace already in progress, ignoring this request"; } else { the_trace_ = new Trace(trace_file.release(), buffer_size, flags); diff --git a/runtime/utils.h b/runtime/utils.h index a08e46524b..72597f5ea4 100644 --- a/runtime/utils.h +++ b/runtime/utils.h @@ -352,18 +352,18 @@ bool IsValidOatFilename(const std::string& filename); class VoidFunctor { public: template - inline void operator () (A a) const { + inline void operator() (A a) const { UNUSED(a); } template - inline void operator () (A a, B b) const { + inline void operator() (A a, B b) const { UNUSED(a); UNUSED(b); } template - inline void operator () (A a, B b, C c) const { + inline void operator() (A a, B b, C c) const { UNUSED(a); UNUSED(b); UNUSED(c); diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc index ff7f594501..59de9b34aa 100644 --- a/runtime/verifier/method_verifier.cc +++ b/runtime/verifier/method_verifier.cc @@ -1014,12 +1014,12 @@ bool MethodVerifier::VerifyCodeFlow() { verifier::MethodVerifier::SetDexGcMap(ref, *dex_gc_map); MethodVerifier::MethodSafeCastSet* method_to_safe_casts = GenerateSafeCastSet(); - if(method_to_safe_casts != NULL ) { + if (method_to_safe_casts != NULL) { SetSafeCastMap(ref, method_to_safe_casts); } MethodVerifier::PcToConcreteMethodMap* pc_to_concrete_method = GenerateDevirtMap(); - if(pc_to_concrete_method != NULL ) { + if (pc_to_concrete_method != NULL) { SetDevirtMap(ref, pc_to_concrete_method); } return true; @@ -1824,7 +1824,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { uint32_t instance_of_idx = 0; if (0 != work_insn_idx_) { instance_of_idx = work_insn_idx_ - 1; - while(0 != instance_of_idx && !insn_flags_[instance_of_idx].IsOpcode()) { + while (0 != instance_of_idx && !insn_flags_[instance_of_idx].IsOpcode()) { instance_of_idx--; } CHECK(insn_flags_[instance_of_idx].IsOpcode()); @@ -1854,7 +1854,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { // which is not done because of the multiple inheritance implications. const RegType& cast_type = ResolveClassAndCheckAccess(instance_of_inst->VRegC_22c()); - if(!cast_type.IsUnresolvedTypes() && !cast_type.GetClass()->IsInterface()) { + if (!cast_type.IsUnresolvedTypes() && !cast_type.GetClass()->IsInterface()) { RegisterLine* update_line = new RegisterLine(code_item_->registers_size_, this); if (inst->Opcode() == Instruction::IF_EQZ) { fallthrough_line.reset(update_line); @@ -1868,7 +1868,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { // register encoding space of instance-of, and propagate type information to the source // of the move-object. uint32_t move_idx = instance_of_idx - 1; - while(0 != move_idx && !insn_flags_[move_idx].IsOpcode()) { + while (0 != move_idx && !insn_flags_[move_idx].IsOpcode()) { move_idx--; } CHECK(insn_flags_[move_idx].IsOpcode()); @@ -3766,7 +3766,7 @@ MethodVerifier::PcToConcreteMethodMap* MethodVerifier::GenerateDevirtMap() { bool is_interface = (inst->Opcode() == Instruction::INVOKE_INTERFACE) || (inst->Opcode() == Instruction::INVOKE_INTERFACE_RANGE); - if(!is_interface && !is_virtual) { + if (!is_interface && !is_virtual) { continue; } // Get reg type for register holding the reference to the object that will be dispatched upon. @@ -3792,7 +3792,7 @@ MethodVerifier::PcToConcreteMethodMap* MethodVerifier::GenerateDevirtMap() { } mirror::AbstractMethod* abstract_method = dex_cache_->GetResolvedMethod(is_range ? inst->VRegB_3rc() : inst->VRegB_35c()); - if(abstract_method == NULL) { + if (abstract_method == NULL) { // If the method is not found in the cache this means that it was never found // by ResolveMethodAndCheckAccess() called when verifying invoke_*. continue; @@ -3986,7 +3986,7 @@ const MethodReference* MethodVerifier::GetDevirtMap(const MethodReference& ref, // Look up the PC in the map, get the concrete method to execute and return its reference. MethodVerifier::PcToConcreteMethodMap::const_iterator pc_to_concrete_method = it->second->find(dex_pc); - if(pc_to_concrete_method != it->second->end()) { + if (pc_to_concrete_method != it->second->end()) { return &(pc_to_concrete_method->second); } else { return NULL; diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc index 1c61a29cee..8418928a83 100644 --- a/runtime/verifier/reg_type.cc +++ b/runtime/verifier/reg_type.cc @@ -211,7 +211,7 @@ void LongHiType::Destroy() { } LongLoType* LongLoType::GetInstance() { - CHECK (instance_ != NULL); + CHECK(instance_ != NULL); return instance_; } @@ -355,7 +355,7 @@ BooleanType* BooleanType::GetInstance() { } void BooleanType::Destroy() { - if(BooleanType::instance != NULL) { + if (BooleanType::instance != NULL) { delete instance; instance = NULL; } diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc index 6013250835..22c585ca29 100644 --- a/runtime/verifier/reg_type_cache.cc +++ b/runtime/verifier/reg_type_cache.cc @@ -377,7 +377,7 @@ const RegType& RegTypeCache::FromUninitialized(const RegType& uninit_type) { entry = new UnresolvedReferenceType(descriptor.c_str(), entries_.size()); } else { mirror::Class* klass = uninit_type.GetClass(); - if(uninit_type.IsUninitializedThisReference() && !klass->IsFinal()) { + if (uninit_type.IsUninitializedThisReference() && !klass->IsFinal()) { // For uninitialized "this reference" look for reference types that are not precise. for (size_t i = primitive_count_; i < entries_.size(); i++) { RegType* cur_entry = entries_[i]; diff --git a/runtime/verifier/reg_type_cache.h b/runtime/verifier/reg_type_cache.h index 814dff79f6..24117586e3 100644 --- a/runtime/verifier/reg_type_cache.h +++ b/runtime/verifier/reg_type_cache.h @@ -44,7 +44,7 @@ class RegTypeCache { } ~RegTypeCache(); static void Init() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - if(!RegTypeCache::primitive_initialized_) { + if (!RegTypeCache::primitive_initialized_) { CHECK_EQ(RegTypeCache::primitive_count_, 0); CreatePrimitiveTypes(); CHECK_EQ(RegTypeCache::primitive_count_, kNumPrimitives); diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc index d2c9dd6ba7..a24c3c9120 100644 --- a/runtime/verifier/reg_type_test.cc +++ b/runtime/verifier/reg_type_test.cc @@ -405,7 +405,7 @@ TEST_F(RegTypeReferenceTest, Dump) { std::string expected = "Unresolved Reference: java.lang.DoesNotExist"; EXPECT_EQ(expected, unresolved_ref.Dump()); expected = "Precise Reference: java.lang.String"; - EXPECT_EQ( expected, resolved_ref.Dump()); + EXPECT_EQ(expected, resolved_ref.Dump()); expected ="Uninitialized Reference: java.lang.String Allocation PC: 10"; EXPECT_EQ(expected, resolved_unintialiesd.Dump()); expected = "Unresolved And Uninitialized Reference: java.lang.DoesNotExist Allocation PC: 12"; diff --git a/test/ReferenceMap/stack_walk_refmap_jni.cc b/test/ReferenceMap/stack_walk_refmap_jni.cc index 492916ed90..ccdbffd338 100644 --- a/test/ReferenceMap/stack_walk_refmap_jni.cc +++ b/test/ReferenceMap/stack_walk_refmap_jni.cc @@ -33,8 +33,8 @@ namespace art { #define IS_IN_REF_BITMAP(mh, ref_bitmap, reg) \ - ( ((reg) < mh.GetCodeItem()->registers_size_) && \ - (( *((ref_bitmap) + (reg)/8) >> ((reg) % 8) ) & 0x01) ) + (((reg) < mh.GetCodeItem()->registers_size_) && \ + ((*((ref_bitmap) + (reg)/8) >> ((reg) % 8) ) & 0x01)) #define CHECK_REGS_CONTAIN_REFS(...) \ do { \ diff --git a/test/StackWalk/stack_walk_jni.cc b/test/StackWalk/stack_walk_jni.cc index fc156b15d1..d100c1071d 100644 --- a/test/StackWalk/stack_walk_jni.cc +++ b/test/StackWalk/stack_walk_jni.cc @@ -31,8 +31,8 @@ namespace art { #define REG(mh, reg_bitmap, reg) \ - ( ((reg) < mh.GetCodeItem()->registers_size_) && \ - (( *((reg_bitmap) + (reg)/8) >> ((reg) % 8) ) & 0x01) ) + (((reg) < mh.GetCodeItem()->registers_size_) && \ + ((*((reg_bitmap) + (reg)/8) >> ((reg) % 8) ) & 0x01)) #define CHECK_REGS(...) if (!IsShadowFrame()) { \ int t[] = {__VA_ARGS__}; \ diff --git a/tools/cpplint.py b/tools/cpplint.py index 30c712856e..da5a938fd3 100755 --- a/tools/cpplint.py +++ b/tools/cpplint.py @@ -1526,7 +1526,10 @@ def CheckSpacingForFunctionCall(filename, line, linenum, error): # Note that we assume the contents of [] to be short enough that # they'll never need to wrap. if ( # Ignore control structures. - not Search(r'\b(if|for|while|switch|return|delete)\b', fncall) and + # BEGIN android-changed + # not Search(r'\b(if|for|while|switch|return|delete)\b', fncall) and + not Search(r'\b(if|for|while|switch|return|delete|new)\b', fncall) and + # END android-changed # Ignore pointers/references to functions. not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and # Ignore pointers/references to arrays. -- cgit v1.2.3-59-g8ed1b From 02c8cc6d1312a2b55533f02f6369dc7c94672f90 Mon Sep 17 00:00:00 2001 From: Brian Carlstrom Date: Thu, 18 Jul 2013 15:54:44 -0700 Subject: Fixing cpplint whitespace/blank_line, whitespace/end_of_line, whitespace/labels, whitespace/semicolon issues Change-Id: Ide4f8ea608338b3fed528de7582cfeb2011997b6 --- Android.mk | 2 +- compiler/dex/mir_graph.cc | 7 +++--- compiler/dex/mir_optimization.cc | 4 ++-- compiler/dex/quick/codegen_util.cc | 2 +- compiler/dex/quick/ralloc_util.cc | 2 +- compiler/dex/vreg_analysis.cc | 2 +- compiler/llvm/llvm_compilation_unit.cc | 2 +- runtime/base/histogram_test.cc | 2 -- runtime/base/mutex.cc | 4 ++-- runtime/base/timing_logger.cc | 1 - runtime/dex_instruction.h | 2 +- runtime/elf_file.cc | 34 ++++++++++++++--------------- runtime/gc/accounting/mod_union_table-inl.h | 2 +- runtime/gc/accounting/mod_union_table.cc | 4 ++-- runtime/gc/accounting/space_bitmap_test.cc | 2 +- runtime/gc/collector/mark_sweep.cc | 4 ++-- runtime/gc/collector/partial_mark_sweep.h | 2 +- runtime/gc/collector/sticky_mark_sweep.h | 2 +- runtime/gc/space/large_object_space.h | 2 +- runtime/gc/space/space.cc | 4 ++-- runtime/runtime_support.h | 2 +- runtime/runtime_support_llvm.cc | 2 -- runtime/thread_pool.h | 2 +- runtime/verifier/method_verifier.cc | 4 ++-- runtime/verifier/reg_type.h | 2 +- 25 files changed, 47 insertions(+), 51 deletions(-) (limited to 'compiler/dex/quick/codegen_util.cc') diff --git a/Android.mk b/Android.mk index 4ef4372db0..c1561de07f 100644 --- a/Android.mk +++ b/Android.mk @@ -334,7 +334,7 @@ endif .PHONY: cpplint-art cpplint-art: ./art/tools/cpplint.py \ - --filter=-,+build/header_guard,+whitespace/braces,+whitespace/comma,+runtime/explicit,+whitespace/newline,+whitespace/parens,+build/namespaces,+readability/fn_size,+whitespace/operators,+readability/braces,+whitespace/indent \ + --filter=-,+build/header_guard,+whitespace/braces,+whitespace/comma,+runtime/explicit,+whitespace/newline,+whitespace/parens,+build/namespaces,+readability/fn_size,+whitespace/operators,+readability/braces,+whitespace/indent,+whitespace/blank_line,+whitespace/end_of_line,+whitespace/labels,+whitespace/semicolon \ $(shell find art -name *.h -o -name *$(ART_CPP_EXTENSION) | grep -v art/compiler/llvm/generated/) # "mm cpplint-art-aspirational" to see warnings we would like to fix diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc index a768a19744..c2ee8e8812 100644 --- a/compiler/dex/mir_graph.cc +++ b/compiler/dex/mir_graph.cc @@ -453,7 +453,7 @@ BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, int cur_ cur_block->successor_block_list.blocks = new (arena_) GrowableArray(arena_, 2, kGrowableArraySuccessorBlocks); - for (;iterator.HasNext(); iterator.Next()) { + for (; iterator.HasNext(); iterator.Next()) { BasicBlock *catch_block = FindBlock(iterator.GetHandlerAddress(), false /* split*/, false /* creat */, NULL /* immed_pred_block_p */); catch_block->catch_entry = true; @@ -994,9 +994,10 @@ char* MIRGraph::GetDalvikDisassembly(const MIR* mir) { case Instruction::k22b: str.append(StringPrintf(", #%d", insn.vC)); break; - default: - ; // Nothing left to print + default: { + // Nothing left to print } + } } if (nop) { str.append("]--optimized away"); diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc index 22c738ae70..d79b26e4b9 100644 --- a/compiler/dex/mir_optimization.cc +++ b/compiler/dex/mir_optimization.cc @@ -191,9 +191,9 @@ static SelectInstructionKind SelectKind(MIR* mir) { case Instruction::GOTO_16: case Instruction::GOTO_32: return kSelectGoto; - default:; + default: + return kSelectNone; } - return kSelectNone; } int MIRGraph::GetSSAUseCount(int s_reg) { diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc index 7a59644273..8daa397135 100644 --- a/compiler/dex/quick/codegen_util.cc +++ b/compiler/dex/quick/codegen_util.cc @@ -461,7 +461,7 @@ void Mir2Lir::InstallFillArrayData() { } static int AssignLiteralOffsetCommon(LIR* lir, int offset) { - for (;lir != NULL; lir = lir->next) { + for (; lir != NULL; lir = lir->next) { lir->offset = offset; offset += 4; } diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc index 92bd94eb78..4542f8f0b0 100644 --- a/compiler/dex/quick/ralloc_util.cc +++ b/compiler/dex/quick/ralloc_util.cc @@ -507,7 +507,7 @@ void Mir2Lir::NullifyRange(LIR *start, LIR *finish, int s_reg1, int s_reg2) { if (start && finish) { LIR *p; DCHECK_EQ(s_reg1, s_reg2); - for (p = start; ;p = p->next) { + for (p = start; ; p = p->next) { NopLIR(p); if (p == finish) break; diff --git a/compiler/dex/vreg_analysis.cc b/compiler/dex/vreg_analysis.cc index a97d1ecccf..b50fe71b85 100644 --- a/compiler/dex/vreg_analysis.cc +++ b/compiler/dex/vreg_analysis.cc @@ -282,7 +282,7 @@ bool MIRGraph::InferTypeAndSize(BasicBlock* bb) { bool defined_ref = rl_temp.defined && rl_temp.ref; bool is_wide = rl_temp.wide || ((attrs & DF_A_WIDE) != 0); bool is_high = is_phi && rl_temp.wide && rl_temp.high_word; - for (int i = 0; i < ssa_rep->num_uses;i++) { + for (int i = 0; i < ssa_rep->num_uses; i++) { rl_temp = reg_location_[ssa_rep->uses[i]]; defined_fp |= rl_temp.defined && rl_temp.fp; defined_core |= rl_temp.defined && rl_temp.core; diff --git a/compiler/llvm/llvm_compilation_unit.cc b/compiler/llvm/llvm_compilation_unit.cc index 592059e6a6..d4568b5a4c 100644 --- a/compiler/llvm/llvm_compilation_unit.cc +++ b/compiler/llvm/llvm_compilation_unit.cc @@ -19,7 +19,7 @@ // #define stat64 stat // #define fstat64 fstat // #define lstat64 lstat -// +// // which causes grief. bionic probably should not do that. #include diff --git a/runtime/base/histogram_test.cc b/runtime/base/histogram_test.cc index 9f3587aa7f..5592f1cedc 100644 --- a/runtime/base/histogram_test.cc +++ b/runtime/base/histogram_test.cc @@ -129,7 +129,6 @@ TEST(Histtest, UpdateRange) { EXPECT_GE(PerValue, 132); EXPECT_LE(PerValue, 145); } -; TEST(Histtest, Reset) { UniquePtr > hist(new Histogram("Reset")); @@ -173,7 +172,6 @@ TEST(Histtest, Reset) { EXPECT_GE(PerValue, 132); EXPECT_LE(PerValue, 145); } -; TEST(Histtest, MultipleCreateHist) { UniquePtr > hist(new Histogram("MultipleCreateHist")); diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc index 1df0207503..af45f3fcd9 100644 --- a/runtime/base/mutex.cc +++ b/runtime/base/mutex.cc @@ -460,8 +460,8 @@ std::ostream& operator<<(std::ostream& os, const Mutex& mu) { return os; } -ReaderWriterMutex::ReaderWriterMutex(const char* name, LockLevel level) : - BaseMutex(name, level) +ReaderWriterMutex::ReaderWriterMutex(const char* name, LockLevel level) + : BaseMutex(name, level) #if ART_USE_FUTEXES , state_(0), exclusive_owner_(0), num_pending_readers_(0), num_pending_writers_(0) #endif diff --git a/runtime/base/timing_logger.cc b/runtime/base/timing_logger.cc index c7cbbe504f..bf6fd17a49 100644 --- a/runtime/base/timing_logger.cc +++ b/runtime/base/timing_logger.cc @@ -46,7 +46,6 @@ void TimingLogger::AddSplit(const std::string &label) { uint64_t TimingLogger::GetTotalNs() const { return times_.back() - times_.front(); } -; void TimingLogger::Dump(std::ostream &os) const { uint64_t largest_time = 0; diff --git a/runtime/dex_instruction.h b/runtime/dex_instruction.h index c3424dcdff..502f378d8a 100644 --- a/runtime/dex_instruction.h +++ b/runtime/dex_instruction.h @@ -88,7 +88,7 @@ class Instruction { DEX_INSTRUCTION_LIST(INSTRUCTION_ENUM) #undef DEX_INSTRUCTION_LIST #undef INSTRUCTION_ENUM - } ; + }; enum Format { k10x, // op diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc index 6ce36e8f6a..0be274c3e3 100644 --- a/runtime/elf_file.cc +++ b/runtime/elf_file.cc @@ -22,23 +22,23 @@ namespace art { -ElfFile::ElfFile() : - file_(NULL), - writable_(false), - program_header_only_(false), - header_(NULL), - base_address_(NULL), - program_headers_start_(NULL), - section_headers_start_(NULL), - dynamic_program_header_(NULL), - dynamic_section_start_(NULL), - symtab_section_start_(NULL), - dynsym_section_start_(NULL), - strtab_section_start_(NULL), - dynstr_section_start_(NULL), - hash_section_start_(NULL), - symtab_symbol_table_(NULL), - dynsym_symbol_table_(NULL) {} +ElfFile::ElfFile() + : file_(NULL), + writable_(false), + program_header_only_(false), + header_(NULL), + base_address_(NULL), + program_headers_start_(NULL), + section_headers_start_(NULL), + dynamic_program_header_(NULL), + dynamic_section_start_(NULL), + symtab_section_start_(NULL), + dynsym_section_start_(NULL), + strtab_section_start_(NULL), + dynstr_section_start_(NULL), + hash_section_start_(NULL), + symtab_symbol_table_(NULL), + dynsym_symbol_table_(NULL) {} ElfFile* ElfFile::Open(File* file, bool writable, bool program_header_only) { UniquePtr elf_file(new ElfFile()); diff --git a/runtime/gc/accounting/mod_union_table-inl.h b/runtime/gc/accounting/mod_union_table-inl.h index 9ea74d4d92..29450c1d34 100644 --- a/runtime/gc/accounting/mod_union_table-inl.h +++ b/runtime/gc/accounting/mod_union_table-inl.h @@ -27,7 +27,7 @@ namespace accounting { // A mod-union table to record image references to the Zygote and alloc space. class ModUnionTableToZygoteAllocspace : public ModUnionTableReferenceCache { -public: + public: explicit ModUnionTableToZygoteAllocspace(Heap* heap) : ModUnionTableReferenceCache(heap) {} bool AddReference(const mirror::Object* /* obj */, const mirror::Object* ref) { diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc index 940ed13080..b33cbceba1 100644 --- a/runtime/gc/accounting/mod_union_table.cc +++ b/runtime/gc/accounting/mod_union_table.cc @@ -252,7 +252,7 @@ void ModUnionTableReferenceCache::Verify() { const byte* card = &*it->first; if (*card == CardTable::kCardClean) { std::set reference_set; - for (It2 itr = it->second.begin(); itr != it->second.end();++itr) { + for (It2 itr = it->second.begin(); itr != it->second.end(); ++itr) { reference_set.insert(*itr); } ModUnionCheckReferences visitor(this, reference_set); @@ -284,7 +284,7 @@ void ModUnionTableReferenceCache::Dump(std::ostream& os) { uintptr_t end = start + CardTable::kCardSize; os << reinterpret_cast(start) << "-" << reinterpret_cast(end) << "->{"; typedef std::vector::const_iterator It3; - for (It3 itr = it->second.begin(); itr != it->second.end();++itr) { + for (It3 itr = it->second.begin(); itr != it->second.end(); ++itr) { os << reinterpret_cast(*itr) << ","; } os << "},"; diff --git a/runtime/gc/accounting/space_bitmap_test.cc b/runtime/gc/accounting/space_bitmap_test.cc index 516a44991e..806f301ec1 100644 --- a/runtime/gc/accounting/space_bitmap_test.cc +++ b/runtime/gc/accounting/space_bitmap_test.cc @@ -66,7 +66,7 @@ TEST_F(SpaceBitmapTest, ScanRange) { EXPECT_TRUE(space_bitmap.get() != NULL); // Set all the odd bits in the first BitsPerWord * 3 to one. - for (size_t j = 0;j < kBitsPerWord * 3; ++j) { + for (size_t j = 0; j < kBitsPerWord * 3; ++j) { const mirror::Object* obj = reinterpret_cast(heap_begin + j * SpaceBitmap::kAlignment); if (reinterpret_cast(obj) & 0xF) { diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc index 2d07f06269..dd72c0ab28 100644 --- a/runtime/gc/collector/mark_sweep.cc +++ b/runtime/gc/collector/mark_sweep.cc @@ -894,7 +894,7 @@ void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitma // Empty the allocation stack. Thread* self = Thread::Current(); - for (size_t i = 0;i < count;++i) { + for (size_t i = 0; i < count; ++i) { Object* obj = objects[i]; // There should only be objects in the AllocSpace/LargeObjectSpace in the allocation stack. if (LIKELY(mark_bitmap->HasAddress(obj))) { @@ -1271,7 +1271,7 @@ void MarkSweep::ProcessMarkStack() { const size_t fifo_size = 4; const size_t fifo_mask = fifo_size - 1; const Object* fifo[fifo_size]; - for (size_t i = 0;i < fifo_size;++i) { + for (size_t i = 0; i < fifo_size; ++i) { fifo[i] = NULL; } size_t fifo_pos = 0; diff --git a/runtime/gc/collector/partial_mark_sweep.h b/runtime/gc/collector/partial_mark_sweep.h index 718c62e5f0..25304b999a 100644 --- a/runtime/gc/collector/partial_mark_sweep.h +++ b/runtime/gc/collector/partial_mark_sweep.h @@ -33,7 +33,7 @@ class PartialMarkSweep : public MarkSweep { explicit PartialMarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix = ""); ~PartialMarkSweep() {} -protected: + protected: // Bind the live bits to the mark bits of bitmaps for spaces that aren't collected for partial // collections, ie the Zygote space. Also mark this space is immune. virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); diff --git a/runtime/gc/collector/sticky_mark_sweep.h b/runtime/gc/collector/sticky_mark_sweep.h index 8cd4aedf62..e009b62c4b 100644 --- a/runtime/gc/collector/sticky_mark_sweep.h +++ b/runtime/gc/collector/sticky_mark_sweep.h @@ -34,7 +34,7 @@ class StickyMarkSweep : public PartialMarkSweep { explicit StickyMarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix = ""); ~StickyMarkSweep() {} -protected: + protected: // Bind the live bits to the mark bits of bitmaps for all spaces, all spaces other than the // alloc space will be marked as immune. void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h index 20a48673b6..09c55ec566 100644 --- a/runtime/gc/space/large_object_space.h +++ b/runtime/gc/space/large_object_space.h @@ -89,7 +89,7 @@ class LargeObjectMapSpace : public LargeObjectSpace { // TODO: disabling thread safety analysis as this may be called when we already hold lock_. bool Contains(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS; -private: + private: explicit LargeObjectMapSpace(const std::string& name); virtual ~LargeObjectMapSpace() {} diff --git a/runtime/gc/space/space.cc b/runtime/gc/space/space.cc index eae281ad40..de48b743f5 100644 --- a/runtime/gc/space/space.cc +++ b/runtime/gc/space/space.cc @@ -22,8 +22,8 @@ namespace art { namespace gc { namespace space { -Space::Space(const std::string& name, GcRetentionPolicy gc_retention_policy) : - name_(name), gc_retention_policy_(gc_retention_policy) { } +Space::Space(const std::string& name, GcRetentionPolicy gc_retention_policy) + : name_(name), gc_retention_policy_(gc_retention_policy) { } void Space::Dump(std::ostream& os) const { os << GetName() << ":" << GetGcRetentionPolicy(); diff --git a/runtime/runtime_support.h b/runtime/runtime_support.h index a6c3b38345..43c678428b 100644 --- a/runtime/runtime_support.h +++ b/runtime/runtime_support.h @@ -354,7 +354,7 @@ static inline void CheckSuspend(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mut JValue InvokeProxyInvocationHandler(ScopedObjectAccessUnchecked& soa, const char* shorty, jobject rcvr_jobj, jobject interface_method_jobj, std::vector& args) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ; + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Entry point for deoptimization. static inline uintptr_t GetDeoptimizationEntryPoint() { diff --git a/runtime/runtime_support_llvm.cc b/runtime/runtime_support_llvm.cc index 7b4c3ca9c3..9d83f9e49e 100644 --- a/runtime/runtime_support_llvm.cc +++ b/runtime/runtime_support_llvm.cc @@ -105,7 +105,6 @@ class ShadowFrameCopyVisitor : public StackVisitor { } // namespace art extern "C" { - using ::art::CatchHandlerIterator; using ::art::DexFile; using ::art::FindFieldFast; @@ -923,5 +922,4 @@ void art_portable_proxy_invoke_handler_from_code(AbstractMethod* proxy_method, . void art_portable_constructor_barrier() { LOG(FATAL) << "Implemented by IRBuilder."; } - } // extern "C" diff --git a/runtime/thread_pool.h b/runtime/thread_pool.h index 7b626fbbe1..c26926cf74 100644 --- a/runtime/thread_pool.h +++ b/runtime/thread_pool.h @@ -30,7 +30,7 @@ namespace art { class ThreadPool; class Task : public Closure { -public: + public: // Called when references reaches 0. virtual void Finalize() { } }; diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc index f414b791ea..ca4dce4b13 100644 --- a/runtime/verifier/method_verifier.cc +++ b/runtime/verifier/method_verifier.cc @@ -58,7 +58,7 @@ void PcToRegisterLineTable::Init(RegisterTrackingMode mode, InstructionFlags* fl interesting = flags[i].IsOpcode(); break; case kTrackCompilerInterestPoints: - interesting = flags[i].IsCompileTimeInfoPoint() || flags[i].IsBranchTarget() ; + interesting = flags[i].IsCompileTimeInfoPoint() || flags[i].IsBranchTarget(); break; case kTrackRegsBranches: interesting = flags[i].IsBranchTarget(); @@ -3756,7 +3756,7 @@ MethodVerifier::PcToConcreteMethodMap* MethodVerifier::GenerateDevirtMap() { } UniquePtr pc_to_concrete_method_map; - const uint16_t* insns = code_item_->insns_ ; + const uint16_t* insns = code_item_->insns_; const Instruction* inst = Instruction::At(insns); const Instruction* end = Instruction::At(insns + code_item_->insns_size_in_code_units_); diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h index fa9fc8c62b..33f4195885 100644 --- a/runtime/verifier/reg_type.h +++ b/runtime/verifier/reg_type.h @@ -200,7 +200,7 @@ class RegType { bool IsJavaLangObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool IsArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool IsObjectArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - Primitive::Type GetPrimitiveType() const ; + Primitive::Type GetPrimitiveType() const; bool IsJavaLangObjectArray() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool IsInstantiableTypes() const; const std::string& GetDescriptor() const { -- cgit v1.2.3-59-g8ed1b From 2d88862f0752a7a0e65145b088f49dabd49d4284 Mon Sep 17 00:00:00 2001 From: Brian Carlstrom Date: Thu, 18 Jul 2013 17:02:00 -0700 Subject: Fixing cpplint readability/casting issues Change-Id: I6821da0e23737995a9b884a04e9b63fac640cd05 --- Android.mk | 2 +- compiler/dex/quick/codegen_util.cc | 4 ++-- compiler/dex/quick/gen_invoke.cc | 2 +- compiler/llvm/runtime_support_builder.cc | 2 +- runtime/atomic.cc | 2 +- runtime/base/histogram-inl.h | 2 +- runtime/common_test.h | 3 ++- runtime/debugger.cc | 4 ++-- runtime/debugger.h | 2 +- runtime/gc/heap.cc | 2 +- runtime/hprof/hprof.cc | 12 ++++++------ runtime/instrumentation.cc | 5 +++-- runtime/jdwp/jdwp_adb.cc | 4 ++-- runtime/mem_map.cc | 3 ++- runtime/native/java_lang_System.cc | 2 +- runtime/thread_x86.cc | 2 +- 16 files changed, 28 insertions(+), 25 deletions(-) (limited to 'compiler/dex/quick/codegen_util.cc') diff --git a/Android.mk b/Android.mk index e660827eab..8331b69c2a 100644 --- a/Android.mk +++ b/Android.mk @@ -334,7 +334,7 @@ endif .PHONY: cpplint-art cpplint-art: ./art/tools/cpplint.py \ - --filter=-,+build/header_guard,+whitespace/braces,+whitespace/comma,+runtime/explicit,+whitespace/newline,+whitespace/parens,+build/namespaces,+readability/fn_size,+whitespace/operators,+readability/braces,+whitespace/indent,+whitespace/blank_line,+whitespace/end_of_line,+whitespace/labels,+whitespace/semicolon,+legal/copyright \ + --filter=-,+build/header_guard,+whitespace/braces,+whitespace/comma,+runtime/explicit,+whitespace/newline,+whitespace/parens,+build/namespaces,+readability/fn_size,+whitespace/operators,+readability/braces,+whitespace/indent,+whitespace/blank_line,+whitespace/end_of_line,+whitespace/labels,+whitespace/semicolon,+legal/copyright,+readability/casting \ $(shell find art -name *.h -o -name *$(ART_CPP_EXTENSION) | grep -v art/compiler/llvm/generated/) # "mm cpplint-art-aspirational" to see warnings we would like to fix diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc index 8daa397135..630e294b37 100644 --- a/compiler/dex/quick/codegen_util.cc +++ b/compiler/dex/quick/codegen_util.cc @@ -190,10 +190,10 @@ void Mir2Lir::DumpLIRInsn(LIR* lir, unsigned char* base_addr) { } if (lir->use_mask && (!lir->flags.is_nop || dump_nop)) { - DUMP_RESOURCE_MASK(DumpResourceMask((LIR*) lir, lir->use_mask, "use")); + DUMP_RESOURCE_MASK(DumpResourceMask(lir, lir->use_mask, "use")); } if (lir->def_mask && (!lir->flags.is_nop || dump_nop)) { - DUMP_RESOURCE_MASK(DumpResourceMask((LIR*) lir, lir->def_mask, "def")); + DUMP_RESOURCE_MASK(DumpResourceMask(lir, lir->def_mask, "def")); } } diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc index 8840526ec9..13a59bf69a 100644 --- a/compiler/dex/quick/gen_invoke.cc +++ b/compiler/dex/quick/gen_invoke.cc @@ -1113,7 +1113,7 @@ bool Mir2Lir::GenInlinedCurrentThread(CallInfo* info) { LoadWordDisp(TargetReg(kSelf), offset, rl_result.low_reg); } else { CHECK(cu_->instruction_set == kX86); - ((X86Mir2Lir*)this)->OpRegThreadMem(kOpMov, rl_result.low_reg, offset); + reinterpret_cast(this)->OpRegThreadMem(kOpMov, rl_result.low_reg, offset); } StoreValue(rl_dest, rl_result); return true; diff --git a/compiler/llvm/runtime_support_builder.cc b/compiler/llvm/runtime_support_builder.cc index 19ccc3603c..e6479e0fd8 100644 --- a/compiler/llvm/runtime_support_builder.cc +++ b/compiler/llvm/runtime_support_builder.cc @@ -43,7 +43,7 @@ RuntimeSupportBuilder::RuntimeSupportBuilder(::llvm::LLVMContext& context, #define GET_RUNTIME_SUPPORT_FUNC_DECL(ID, NAME) \ do { \ ::llvm::Function* fn = module_.getFunction(#NAME); \ - DCHECK_NE(fn, (void*)NULL) << "Function not found: " << #NAME; \ + DCHECK(fn != NULL) << "Function not found: " << #NAME; \ runtime_support_func_decls_[runtime_support::ID] = fn; \ } while (0); diff --git a/runtime/atomic.cc b/runtime/atomic.cc index f2a998289c..c91db793ba 100644 --- a/runtime/atomic.cc +++ b/runtime/atomic.cc @@ -34,7 +34,7 @@ static const size_t kSwapMutexCount = 32; static std::vector* gSwapMutexes; static Mutex& GetSwapMutex(const volatile int64_t* addr) { - return *(*gSwapMutexes)[((unsigned)(void*)(addr) >> 3U) % kSwapMutexCount]; + return *(*gSwapMutexes)[(reinterpret_cast(addr) >> 3U) % kSwapMutexCount]; } #endif diff --git a/runtime/base/histogram-inl.h b/runtime/base/histogram-inl.h index d572cf9cba..1a63cf477f 100644 --- a/runtime/base/histogram-inl.h +++ b/runtime/base/histogram-inl.h @@ -66,7 +66,7 @@ template inline size_t Histogram::FindBucket(Value val) { // dividing the value by the bucket width. DCHECK_GE(val, min_); DCHECK_LE(val, max_); - size_t bucket_idx = static_cast((double)(val - min_) / bucket_width_); + size_t bucket_idx = static_cast(static_cast(val - min_) / bucket_width_); DCHECK_GE(bucket_idx, 0ul); DCHECK_LE(bucket_idx, bucket_count_); return bucket_idx; diff --git a/runtime/common_test.h b/runtime/common_test.h index 778ca63826..03a45aa20b 100644 --- a/runtime/common_test.h +++ b/runtime/common_test.h @@ -508,7 +508,8 @@ class CommonTest : public testing::Test { void ReserveImageSpace() { // Reserve where the image will be loaded up front so that other parts of test set up don't // accidentally end up colliding with the fixed memory address when we need to load the image. - image_reservation_.reset(MemMap::MapAnonymous("image reservation", (byte*)ART_BASE_ADDRESS, + image_reservation_.reset(MemMap::MapAnonymous("image reservation", + reinterpret_cast(ART_BASE_ADDRESS), (size_t)100 * 1024 * 1024, // 100MB PROT_NONE)); } diff --git a/runtime/debugger.cc b/runtime/debugger.cc index 4fbee51045..9e9dd8736c 100644 --- a/runtime/debugger.cc +++ b/runtime/debugger.cc @@ -3281,7 +3281,7 @@ class HeapChunkContext { const size_t kMaxFreeLen = 2 * kPageSize; void* freeStart = startOfNextMemoryChunk_; void* freeEnd = start; - size_t freeLen = (char*)freeEnd - (char*)freeStart; + size_t freeLen = reinterpret_cast(freeEnd) - reinterpret_cast(freeStart); if (!native || freeLen < kMaxFreeLen) { AppendChunk(HPSG_STATE(SOLIDITY_FREE, 0), freeStart, freeLen); flush = false; @@ -3302,7 +3302,7 @@ class HeapChunkContext { // allocation then the first sizeof(size_t) may belong to it. const size_t dlMallocOverhead = sizeof(size_t); AppendChunk(state, start, used_bytes + dlMallocOverhead); - startOfNextMemoryChunk_ = (char*)start + used_bytes + dlMallocOverhead; + startOfNextMemoryChunk_ = reinterpret_cast(start) + used_bytes + dlMallocOverhead; } void AppendChunk(uint8_t state, void* ptr, size_t length) diff --git a/runtime/debugger.h b/runtime/debugger.h index 28a2c60f8c..9005fda392 100644 --- a/runtime/debugger.h +++ b/runtime/debugger.h @@ -417,7 +417,7 @@ class Dbg { SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); private: - static void DdmBroadcast(bool) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void DdmBroadcast(bool connect) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void PostThreadStartOrStop(Thread*, uint32_t) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index 170915d8fc..341b62f48a 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -315,7 +315,7 @@ void Heap::DumpGcPerformanceInfo(std::ostream& os) { size_t total_objects_allocated = GetObjectsAllocatedEver(); size_t total_bytes_allocated = GetBytesAllocatedEver(); if (total_duration != 0) { - const double total_seconds = double(total_duration / 1000) / 1000000.0; + const double total_seconds = static_cast(total_duration / 1000) / 1000000.0; os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n"; os << "Mean GC size throughput: " << PrettySize(GetBytesFreedEver() / total_seconds) << "/s\n"; diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc index d66ec7933b..3c8099a917 100644 --- a/runtime/hprof/hprof.cc +++ b/runtime/hprof/hprof.cc @@ -72,7 +72,7 @@ namespace hprof { #define U2_TO_BUF_BE(buf, offset, value) \ do { \ unsigned char* buf_ = (unsigned char*)(buf); \ - int offset_ = (int)(offset); \ + int offset_ = static_cast(offset); \ uint16_t value_ = (uint16_t)(value); \ buf_[offset_ + 0] = (unsigned char)(value_ >> 8); \ buf_[offset_ + 1] = (unsigned char)(value_ ); \ @@ -81,7 +81,7 @@ namespace hprof { #define U4_TO_BUF_BE(buf, offset, value) \ do { \ unsigned char* buf_ = (unsigned char*)(buf); \ - int offset_ = (int)(offset); \ + int offset_ = static_cast(offset); \ uint32_t value_ = (uint32_t)(value); \ buf_[offset_ + 0] = (unsigned char)(value_ >> 24); \ buf_[offset_ + 1] = (unsigned char)(value_ >> 16); \ @@ -92,7 +92,7 @@ namespace hprof { #define U8_TO_BUF_BE(buf, offset, value) \ do { \ unsigned char* buf_ = (unsigned char*)(buf); \ - int offset_ = (int)(offset); \ + int offset_ = static_cast(offset); \ uint64_t value_ = (uint64_t)(value); \ buf_[offset_ + 0] = (unsigned char)(value_ >> 56); \ buf_[offset_ + 1] = (unsigned char)(value_ >> 48); \ @@ -222,7 +222,7 @@ class HprofRecord { return UNIQUE_ERROR; } nb = fwrite(body_, 1, length_, fp_); - if (nb != (int)length_) { + if (nb != static_cast(length_)) { return UNIQUE_ERROR; } @@ -984,9 +984,9 @@ int Hprof::DumpHeapObject(mirror::Object* obj) { if (size == 1) { rec->AddU1List((const uint8_t*)aobj->GetRawData(sizeof(uint8_t)), length); } else if (size == 2) { - rec->AddU2List((const uint16_t*)(void*)aobj->GetRawData(sizeof(uint16_t)), length); + rec->AddU2List((const uint16_t*)aobj->GetRawData(sizeof(uint16_t)), length); } else if (size == 4) { - rec->AddU4List((const uint32_t*)(void*)aobj->GetRawData(sizeof(uint32_t)), length); + rec->AddU4List((const uint32_t*)aobj->GetRawData(sizeof(uint32_t)), length); } else if (size == 8) { rec->AddU8List((const uint64_t*)aobj->GetRawData(sizeof(uint64_t)), length); } diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc index 8598d6d4f8..bbd205227d 100644 --- a/runtime/instrumentation.cc +++ b/runtime/instrumentation.cc @@ -473,7 +473,7 @@ void Instrumentation::PushInstrumentationStackFrame(Thread* self, mirror::Object size_t frame_id = StackVisitor::ComputeNumFrames(self); std::deque* stack = self->GetInstrumentationStack(); if (kVerboseInstrumentation) { - LOG(INFO) << "Entering " << PrettyMethod(method) << " from PC " << (void*)lr; + LOG(INFO) << "Entering " << PrettyMethod(method) << " from PC " << reinterpret_cast(lr); } instrumentation::InstrumentationStackFrame instrumentation_frame(this_object, method, lr, frame_id, interpreter_entry); @@ -530,7 +530,8 @@ uint64_t Instrumentation::PopInstrumentationStackFrame(Thread* self, uintptr_t* (static_cast(*return_pc) << 32); } else { if (kVerboseInstrumentation) { - LOG(INFO) << "Returning from " << PrettyMethod(method) << " to PC " << (void*)(*return_pc); + LOG(INFO) << "Returning from " << PrettyMethod(method) + << " to PC " << reinterpret_cast(*return_pc); } return *return_pc; } diff --git a/runtime/jdwp/jdwp_adb.cc b/runtime/jdwp/jdwp_adb.cc index 9b9fe4c02b..2bfe63e3d4 100644 --- a/runtime/jdwp/jdwp_adb.cc +++ b/runtime/jdwp/jdwp_adb.cc @@ -157,7 +157,7 @@ int JdwpAdbState::ReceiveClientFd() { cmsg->cmsg_len = msg.msg_controllen; cmsg->cmsg_level = SOL_SOCKET; cmsg->cmsg_type = SCM_RIGHTS; - ((int*)(void*)CMSG_DATA(cmsg))[0] = -1; + (reinterpret_cast(CMSG_DATA(cmsg)))[0] = -1; int rc = TEMP_FAILURE_RETRY(recvmsg(control_sock_, &msg, 0)); @@ -170,7 +170,7 @@ int JdwpAdbState::ReceiveClientFd() { return -1; } - return ((int*)(void*)CMSG_DATA(cmsg))[0]; + return (reinterpret_cast(CMSG_DATA(cmsg)))[0]; } /* diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc index c75dffa63c..a0f389cb1e 100644 --- a/runtime/mem_map.cc +++ b/runtime/mem_map.cc @@ -192,7 +192,8 @@ bool MemMap::ProtectRegion(uint8_t* addr, size_t length, int prot) { * (The address must be page-aligned, the length doesn't need to be, * but we do need to ensure we cover the same range.) */ - uint8_t* alignAddr = (uint8_t*) ((uintptr_t) addr & ~(kPageSize-1)); + uint8_t* alignAddr = reinterpret_cast(RoundDown(reinterpret_cast(addr), + kPageSize)); size_t alignLength = length + (addr - alignAddr); if (mprotect(alignAddr, alignLength, prot) == 0) { diff --git a/runtime/native/java_lang_System.cc b/runtime/native/java_lang_System.cc index 2462f2fd8e..30b4dc7ef5 100644 --- a/runtime/native/java_lang_System.cc +++ b/runtime/native/java_lang_System.cc @@ -123,7 +123,7 @@ void MemmoveWords(void* dst, const void* src, size_t n) { // Check for leftovers. Either we finished exactly, or we have one remaining 16-bit chunk. if ((n & 0x02) != 0) { - *(uint16_t*)d = *(uint16_t*)s; + *reinterpret_cast(d) = *reinterpret_cast(s); } } else { // Copy backward, starting at the end. diff --git a/runtime/thread_x86.cc b/runtime/thread_x86.cc index 959f317471..c398b2877a 100644 --- a/runtime/thread_x86.cc +++ b/runtime/thread_x86.cc @@ -73,7 +73,7 @@ void Thread::InitCpu() { entry.d = seg_32bit; entry.g = limit_in_pages; - entry_number = i386_set_ldt(LDT_AUTO_ALLOC, (ldt_entry*)(void*)(&entry), 1); + entry_number = i386_set_ldt(LDT_AUTO_ALLOC, reinterpret_cast(&entry), 1); if (entry_number == -1) { PLOG(FATAL) << "i386_set_ldt failed"; } -- cgit v1.2.3-59-g8ed1b From 479f83c196d5a95e36196eac548dc6019e70a5be Mon Sep 17 00:00:00 2001 From: buzbee Date: Fri, 19 Jul 2013 10:58:21 -0700 Subject: Dex compiler: re-enable method pattern matching The dex compiler's mechanism to detect simple methods and emit streamlined code was disabled during the last big restructuring (there was a question of how to make it useful for Portable as well as Quick). This CL does not address the Portable question, but turns the optimization back on for Quick. See b/9428200 Change-Id: I9f25b41219d7a243ec64efb18278e5a874766f4d --- compiler/dex/mir_graph.cc | 8 +++----- compiler/dex/mir_graph.h | 9 +++++++++ compiler/dex/quick/codegen_util.cc | 7 ++----- 3 files changed, 14 insertions(+), 10 deletions(-) (limited to 'compiler/dex/quick/codegen_util.cc') diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc index 90e68abad8..264604c355 100644 --- a/compiler/dex/mir_graph.cc +++ b/compiler/dex/mir_graph.cc @@ -107,6 +107,7 @@ MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena) method_sreg_(0), attributes_(METHOD_IS_LEAF), // Start with leaf assumption, change on encountering invoke. checkstats_(NULL), + special_case_(kNoHandler), arena_(arena) { try_block_addr_ = new (arena_) ArenaBitVector(arena_, 0, true /* expandable */); } @@ -590,9 +591,6 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_ bool* dead_pattern = static_cast(arena_->NewMem(sizeof(bool) * num_patterns, true, ArenaAllocator::kAllocMisc)); - SpecialCaseHandler special_case = kNoHandler; - // FIXME - wire this up - (void)special_case; int pattern_pos = 0; /* Parse all instructions and put them into containing basic blocks */ @@ -614,12 +612,12 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_ /* Possible simple method? */ if (live_pattern) { live_pattern = false; - special_case = kNoHandler; + special_case_ = kNoHandler; for (int i = 0; i < num_patterns; i++) { if (!dead_pattern[i]) { if (special_patterns[i].opcodes[pattern_pos] == opcode) { live_pattern = true; - special_case = special_patterns[i].handler_code; + special_case_ = special_patterns[i].handler_code; } else { dead_pattern[i] = true; } diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h index 9c63d9c5ed..342d2a296a 100644 --- a/compiler/dex/mir_graph.h +++ b/compiler/dex/mir_graph.h @@ -509,6 +509,14 @@ class MIRGraph { return reg_location_[method_sreg_]; } + bool IsSpecialCase() { + return special_case_ != kNoHandler; + } + + SpecialCaseHandler GetSpecialCase() { + return special_case_; + } + void BasicBlockCombine(); void CodeLayout(); void DumpCheckStats(); @@ -655,6 +663,7 @@ class MIRGraph { int method_sreg_; unsigned int attributes_; Checkstats* checkstats_; + SpecialCaseHandler special_case_; ArenaAllocator* arena_; }; diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc index 630e294b37..9e9b39e3ef 100644 --- a/compiler/dex/quick/codegen_util.cc +++ b/compiler/dex/quick/codegen_util.cc @@ -950,16 +950,13 @@ void Mir2Lir::Materialize() { /* Allocate Registers using simple local allocation scheme */ SimpleRegAlloc(); - //FIXME: re-enable by retrieving from mir_graph - SpecialCaseHandler special_case = kNoHandler; - - if (special_case != kNoHandler) { + if (mir_graph_->IsSpecialCase()) { /* * Custom codegen for special cases. If for any reason the * special codegen doesn't succeed, first_lir_insn_ will * set to NULL; */ - SpecialMIR2LIR(special_case); + SpecialMIR2LIR(mir_graph_->GetSpecialCase()); } /* Convert MIR to LIR, etc. */ -- cgit v1.2.3-59-g8ed1b From 7934ac288acfb2552bb0b06ec1f61e5820d924a4 Mon Sep 17 00:00:00 2001 From: Brian Carlstrom Date: Fri, 26 Jul 2013 10:54:15 -0700 Subject: Fix cpplint whitespace/comments issues Change-Id: Iae286862c85fb8fd8901eae1204cd6d271d69496 --- build/Android.cpplint.mk | 2 +- compiler/dex/arena_allocator.h | 2 +- compiler/dex/compiler_enums.h | 20 +- compiler/dex/dataflow_iterator.h | 2 +- compiler/dex/dex_to_dex_compiler.cc | 4 +- compiler/dex/frontend.cc | 56 +- compiler/dex/frontend.h | 2 +- compiler/dex/local_value_numbering.h | 2 +- compiler/dex/mir_graph.cc | 12 +- compiler/dex/mir_graph.h | 6 +- compiler/dex/mir_optimization.cc | 2 +- compiler/dex/portable/mir_to_gbc.cc | 6 +- compiler/dex/quick/arm/arm_lir.h | 28 +- compiler/dex/quick/arm/call_arm.cc | 6 +- compiler/dex/quick/arm/fp_arm.cc | 2 +- compiler/dex/quick/arm/int_arm.cc | 16 +- compiler/dex/quick/arm/target_arm.cc | 2 +- compiler/dex/quick/arm/utility_arm.cc | 6 +- compiler/dex/quick/codegen_util.cc | 2 +- compiler/dex/quick/gen_common.cc | 10 +- compiler/dex/quick/gen_invoke.cc | 14 +- compiler/dex/quick/mips/call_mips.cc | 2 +- compiler/dex/quick/mips/fp_mips.cc | 4 +- compiler/dex/quick/mips/int_mips.cc | 6 +- compiler/dex/quick/mips/mips_lir.h | 58 +- compiler/dex/quick/mips/target_mips.cc | 4 +- compiler/dex/quick/mips/utility_mips.cc | 2 +- compiler/dex/quick/mir_to_lir.h | 4 +- compiler/dex/quick/ralloc_util.cc | 10 +- compiler/dex/quick/x86/assemble_x86.cc | 4 +- compiler/dex/quick/x86/call_x86.cc | 2 +- compiler/dex/quick/x86/fp_x86.cc | 2 +- compiler/dex/quick/x86/target_x86.cc | 4 +- compiler/dex/quick/x86/utility_x86.cc | 10 +- compiler/dex/quick/x86/x86_lir.h | 36 +- compiler/dex/ssa_transformation.cc | 8 +- compiler/dex/vreg_analysis.cc | 2 +- compiler/driver/compiler_driver.cc | 808 ++++++++++----------- compiler/driver/dex_compilation_unit.cc | 2 +- compiler/driver/dex_compilation_unit.h | 2 +- compiler/elf_fixup.cc | 32 +- compiler/elf_writer_mclinker.h | 2 +- compiler/elf_writer_quick.cc | 4 +- compiler/image_writer.cc | 6 +- compiler/jni/portable/jni_compiler.cc | 14 +- compiler/llvm/backend_types.h | 4 +- compiler/llvm/compiler_llvm.cc | 8 +- compiler/llvm/compiler_llvm.h | 4 +- compiler/llvm/gbc_expander.cc | 46 +- compiler/llvm/generated/art_module.cc | 132 ++-- compiler/llvm/intrinsic_helper.cc | 4 +- compiler/llvm/intrinsic_helper.h | 4 +- compiler/llvm/ir_builder.cc | 4 +- compiler/llvm/ir_builder.h | 4 +- compiler/llvm/llvm_compilation_unit.cc | 12 +- compiler/llvm/llvm_compilation_unit.h | 6 +- compiler/llvm/md_builder.cc | 4 +- compiler/llvm/md_builder.h | 4 +- compiler/llvm/runtime_support_builder.cc | 4 +- compiler/llvm/runtime_support_builder.h | 4 +- compiler/llvm/runtime_support_builder_arm.cc | 6 +- compiler/llvm/runtime_support_builder_arm.h | 4 +- compiler/llvm/runtime_support_builder_thumb2.cc | 6 +- compiler/llvm/runtime_support_builder_thumb2.h | 4 +- compiler/llvm/runtime_support_builder_x86.cc | 4 +- compiler/llvm/runtime_support_builder_x86.h | 4 +- compiler/llvm/runtime_support_llvm_func.h | 6 +- compiler/sea_ir/code_gen.cc | 4 +- compiler/sea_ir/code_gen.h | 8 +- compiler/sea_ir/frontend.cc | 4 +- compiler/sea_ir/instruction_nodes.h | 6 +- compiler/sea_ir/instruction_tools.cc | 2 +- compiler/sea_ir/instruction_tools.h | 2 +- compiler/sea_ir/sea.cc | 28 +- compiler/sea_ir/sea.h | 4 +- compiler/sea_ir/sea_node.h | 2 +- compiler/sea_ir/visitor.h | 4 +- compiler/stubs/portable/stubs.cc | 14 +- compiler/stubs/quick/stubs.cc | 12 +- compiler/utils/scoped_hashtable.h | 2 +- compiler/utils/scoped_hashtable_test.cc | 2 +- dalvikvm/dalvikvm.cc | 2 +- dex2oat/dex2oat.cc | 6 +- jdwpspy/Net.cpp | 2 +- oatdump/oatdump.cc | 2 +- runtime/base/histogram_test.cc | 28 +- runtime/base/logging.cc | 2 +- runtime/base/logging.h | 4 +- runtime/base/macros.h | 2 +- runtime/base/mutex-inl.h | 2 +- runtime/base/mutex.cc | 10 +- runtime/base/unix_file/mapped_file_test.cc | 2 +- runtime/check_jni.cc | 56 +- runtime/class_linker.cc | 6 +- runtime/class_linker.h | 4 +- runtime/common_test.h | 8 +- runtime/compiled_method.cc | 2 +- runtime/debugger.cc | 44 +- runtime/debugger.h | 2 +- runtime/dex_file.cc | 2 +- runtime/dex_file.h | 2 +- runtime/dex_file_verifier.cc | 2 +- runtime/dex_instruction-inl.h | 8 +- runtime/dex_instruction.cc | 8 +- runtime/dex_instruction.h | 4 +- runtime/dex_instruction_list.h | 2 +- runtime/disassembler_arm.cc | 52 +- runtime/disassembler_mips.cc | 42 +- runtime/disassembler_x86.cc | 2 +- runtime/exception_test.cc | 2 +- runtime/gc/accounting/card_table.cc | 2 +- runtime/gc/accounting/space_bitmap-inl.h | 2 +- runtime/gc/collector/mark_sweep.cc | 4 +- runtime/gc/collector/mark_sweep.h | 2 +- runtime/gc/heap.cc | 2 +- runtime/gc/space/dlmalloc_space.cc | 4 +- runtime/gc/space/space.h | 2 +- runtime/hprof/hprof.cc | 18 +- runtime/indirect_reference_table.cc | 2 +- runtime/indirect_reference_table.h | 8 +- runtime/instrumentation.cc | 14 +- runtime/intern_table.cc | 8 +- runtime/intern_table_test.cc | 2 +- runtime/interpreter/interpreter.cc | 6 +- runtime/invoke_type.h | 10 +- runtime/jdwp/jdwp.h | 8 +- runtime/jdwp/jdwp_constants.h | 8 +- runtime/jdwp/jdwp_event.cc | 6 +- runtime/jdwp/jdwp_handler.cc | 6 +- runtime/jdwp/jdwp_priv.h | 4 +- runtime/jdwp/jdwp_request.cc | 2 +- runtime/jdwp/object_registry.cc | 6 +- runtime/jni_internal.cc | 26 +- runtime/jvalue.h | 4 +- runtime/locks.h | 2 +- runtime/log_severity.h | 2 +- runtime/mem_map.cc | 6 +- runtime/mirror/dex_cache.h | 2 +- runtime/mirror/field.cc | 2 +- runtime/mirror/throwable.h | 2 +- runtime/monitor.cc | 6 +- runtime/native/dalvik_system_VMDebug.cc | 4 +- runtime/native/dalvik_system_Zygote.cc | 2 +- runtime/native/java_lang_Thread.cc | 2 +- runtime/oat/runtime/arm/context_arm.cc | 2 +- runtime/oat/runtime/mips/context_mips.cc | 2 +- runtime/oat/runtime/support_interpreter.cc | 2 +- runtime/oat/runtime/support_jni.cc | 2 +- runtime/oat/runtime/support_stubs.cc | 4 +- .../oat/runtime/x86/oat_support_entrypoints_x86.cc | 12 +- runtime/oat_test.cc | 2 +- runtime/reference_table.cc | 2 +- runtime/reference_table.h | 2 +- runtime/reflection.cc | 2 +- runtime/runtime.cc | 32 +- runtime/runtime_linux.cc | 2 +- runtime/runtime_support.cc | 8 +- runtime/runtime_support_llvm_func_list.h | 2 +- runtime/safe_map.h | 2 +- runtime/scoped_thread_state_change.h | 2 +- runtime/signal_catcher.cc | 2 +- runtime/stack.cc | 8 +- runtime/stack.h | 6 +- runtime/thread.cc | 20 +- runtime/thread.h | 6 +- runtime/thread_linux.cc | 2 +- runtime/thread_list.cc | 6 +- runtime/thread_list.h | 4 +- runtime/thread_state.h | 40 +- runtime/trace.cc | 14 +- runtime/utils.cc | 34 +- runtime/verifier/dex_gc_map.h | 6 +- runtime/verifier/method_verifier.cc | 16 +- runtime/verifier/method_verifier.h | 22 +- runtime/verifier/reg_type_cache.cc | 2 +- runtime/verifier/reg_type_test.cc | 2 +- runtime/zip_archive.cc | 2 +- test/ReferenceMap/stack_walk_refmap_jni.cc | 44 +- test/StackWalk/stack_walk_jni.cc | 2 +- 179 files changed, 1208 insertions(+), 1208 deletions(-) (limited to 'compiler/dex/quick/codegen_util.cc') diff --git a/build/Android.cpplint.mk b/build/Android.cpplint.mk index eabaf31cca..adb87cb4e9 100644 --- a/build/Android.cpplint.mk +++ b/build/Android.cpplint.mk @@ -15,7 +15,7 @@ # ART_CPPLINT := art/tools/cpplint.py -ART_CPPLINT_FILTER := --filter=-whitespace/comments,-whitespace/line_length,-build/include,-readability/function,-readability/streams,-readability/todo,-runtime/references,-runtime/sizeof,-runtime/threadsafe_fn,-runtime/printf +ART_CPPLINT_FILTER := --filter=-whitespace/line_length,-build/include,-readability/function,-readability/streams,-readability/todo,-runtime/references,-runtime/sizeof,-runtime/threadsafe_fn,-runtime/printf ART_CPPLINT_SRC := $(shell find art -name *.h -o -name *$(ART_CPP_EXTENSION) | grep -v art/compiler/llvm/generated/) # "mm cpplint-art" to verify we aren't regressing diff --git a/compiler/dex/arena_allocator.h b/compiler/dex/arena_allocator.h index 3bd733e753..e8e2c027d0 100644 --- a/compiler/dex/arena_allocator.h +++ b/compiler/dex/arena_allocator.h @@ -86,7 +86,7 @@ struct MemStats { explicit MemStats(const ArenaAllocator &arena) : arena_(arena) {} private: const ArenaAllocator &arena_; -}; // MemStats +}; // MemStats } // namespace art diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h index 88240e8c40..97a682f2aa 100644 --- a/compiler/dex/compiler_enums.h +++ b/compiler/dex/compiler_enums.h @@ -48,7 +48,7 @@ enum SpecialTargetRegister { }; enum RegLocationType { - kLocDalvikFrame = 0, // Normal Dalvik register + kLocDalvikFrame = 0, // Normal Dalvik register kLocPhysReg, kLocCompilerTemp, kLocInvalid @@ -249,20 +249,20 @@ enum X86ConditionCode { kX86CondC = kX86CondB, // carry kX86CondNb = 0x3, // not-below - kX86CondAe = kX86CondNb, // above-equal - kX86CondNc = kX86CondNb, // not-carry + kX86CondAe = kX86CondNb, // above-equal + kX86CondNc = kX86CondNb, // not-carry kX86CondZ = 0x4, // zero kX86CondEq = kX86CondZ, // equal kX86CondNz = 0x5, // not-zero - kX86CondNe = kX86CondNz, // not-equal + kX86CondNe = kX86CondNz, // not-equal kX86CondBe = 0x6, // below-equal - kX86CondNa = kX86CondBe, // not-above + kX86CondNa = kX86CondBe, // not-above kX86CondNbe = 0x7, // not-below-equal - kX86CondA = kX86CondNbe,// above + kX86CondA = kX86CondNbe, // above kX86CondS = 0x8, // sign kX86CondNs = 0x9, // not-sign @@ -277,13 +277,13 @@ enum X86ConditionCode { kX86CondNge = kX86CondL, // not-greater-equal kX86CondNl = 0xd, // not-less-than - kX86CondGe = kX86CondNl, // not-greater-equal + kX86CondGe = kX86CondNl, // not-greater-equal kX86CondLe = 0xe, // less-than-equal - kX86CondNg = kX86CondLe, // not-greater + kX86CondNg = kX86CondLe, // not-greater kX86CondNle = 0xf, // not-less-than - kX86CondG = kX86CondNle,// greater + kX86CondG = kX86CondNle, // greater }; std::ostream& operator<<(std::ostream& os, const X86ConditionCode& kind); @@ -349,7 +349,7 @@ enum OpFeatureFlags { kIsIT, kMemLoad, kMemStore, - kPCRelFixup, // x86 FIXME: add NEEDS_FIXUP to instruction attributes. + kPCRelFixup, // x86 FIXME: add NEEDS_FIXUP to instruction attributes. kRegDef0, kRegDef1, kRegDefA, diff --git a/compiler/dex/dataflow_iterator.h b/compiler/dex/dataflow_iterator.h index 847a614727..da44ffd99c 100644 --- a/compiler/dex/dataflow_iterator.h +++ b/compiler/dex/dataflow_iterator.h @@ -80,7 +80,7 @@ namespace art { GrowableArray* block_id_list_; int idx_; bool changed_; - }; // DataflowIterator + }; // DataflowIterator class ReachableNodesIterator : public DataflowIterator { public: diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc index 28c325726e..3c491ce20f 100644 --- a/compiler/dex/dex_to_dex_compiler.cc +++ b/compiler/dex/dex_to_dex_compiler.cc @@ -240,12 +240,12 @@ Instruction* DexCompiler::CompileCheckCast(Instruction* inst, uint32_t dex_pc) { // We are modifying 4 consecutive bytes. ScopedDexWriteAccess sdwa(GetModifiableDexFile(), inst, 4u); inst->SetOpcode(Instruction::NOP); - inst->SetVRegA_10x(0u); // keep compliant with verifier. + inst->SetVRegA_10x(0u); // keep compliant with verifier. // Get to next instruction which is the second half of check-cast and replace // it by a NOP. inst = const_cast(inst->Next()); inst->SetOpcode(Instruction::NOP); - inst->SetVRegA_10x(0u); // keep compliant with verifier. + inst->SetVRegA_10x(0u); // keep compliant with verifier. return inst; } diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc index 113a80a96c..9cc4d18d37 100644 --- a/compiler/dex/frontend.cc +++ b/compiler/dex/frontend.cc @@ -72,37 +72,37 @@ extern "C" void ArtUnInitQuickCompilerContext(art::CompilerDriver& compiler) { } /* Default optimizer/debug setting for the compiler. */ -static uint32_t kCompilerOptimizerDisableFlags = 0 | // Disable specific optimizations +static uint32_t kCompilerOptimizerDisableFlags = 0 | // Disable specific optimizations (1 << kLoadStoreElimination) | - //(1 << kLoadHoisting) | - //(1 << kSuppressLoads) | - //(1 << kNullCheckElimination) | - //(1 << kPromoteRegs) | - //(1 << kTrackLiveTemps) | - //(1 << kSafeOptimizations) | - //(1 << kBBOpt) | - //(1 << kMatch) | - //(1 << kPromoteCompilerTemps) | + // (1 << kLoadHoisting) | + // (1 << kSuppressLoads) | + // (1 << kNullCheckElimination) | + // (1 << kPromoteRegs) | + // (1 << kTrackLiveTemps) | + // (1 << kSafeOptimizations) | + // (1 << kBBOpt) | + // (1 << kMatch) | + // (1 << kPromoteCompilerTemps) | 0; static uint32_t kCompilerDebugFlags = 0 | // Enable debug/testing modes - //(1 << kDebugDisplayMissingTargets) | - //(1 << kDebugVerbose) | - //(1 << kDebugDumpCFG) | - //(1 << kDebugSlowFieldPath) | - //(1 << kDebugSlowInvokePath) | - //(1 << kDebugSlowStringPath) | - //(1 << kDebugSlowestFieldPath) | - //(1 << kDebugSlowestStringPath) | - //(1 << kDebugExerciseResolveMethod) | - //(1 << kDebugVerifyDataflow) | - //(1 << kDebugShowMemoryUsage) | - //(1 << kDebugShowNops) | - //(1 << kDebugCountOpcodes) | - //(1 << kDebugDumpCheckStats) | - //(1 << kDebugDumpBitcodeFile) | - //(1 << kDebugVerifyBitcode) | - //(1 << kDebugShowSummaryMemoryUsage) | + // (1 << kDebugDisplayMissingTargets) | + // (1 << kDebugVerbose) | + // (1 << kDebugDumpCFG) | + // (1 << kDebugSlowFieldPath) | + // (1 << kDebugSlowInvokePath) | + // (1 << kDebugSlowStringPath) | + // (1 << kDebugSlowestFieldPath) | + // (1 << kDebugSlowestStringPath) | + // (1 << kDebugExerciseResolveMethod) | + // (1 << kDebugVerifyDataflow) | + // (1 << kDebugShowMemoryUsage) | + // (1 << kDebugShowNops) | + // (1 << kDebugCountOpcodes) | + // (1 << kDebugDumpCheckStats) | + // (1 << kDebugDumpBitcodeFile) | + // (1 << kDebugVerifyBitcode) | + // (1 << kDebugShowSummaryMemoryUsage) | 0; static CompiledMethod* CompileMethod(CompilerDriver& compiler, @@ -277,7 +277,7 @@ CompiledMethod* CompileOneMethod(CompilerDriver& compiler, #if defined(ART_USE_PORTABLE_COMPILER) , llvm_compilation_unit #endif - ); // NOLINT(whitespace/parens) + ); // NOLINT(whitespace/parens) } } // namespace art diff --git a/compiler/dex/frontend.h b/compiler/dex/frontend.h index a86338950c..5c68ab4244 100644 --- a/compiler/dex/frontend.h +++ b/compiler/dex/frontend.h @@ -102,7 +102,7 @@ class LLVMInfo { private: UniquePtr< ::llvm::LLVMContext> llvm_context_; - ::llvm::Module* llvm_module_; // Managed by context_. + ::llvm::Module* llvm_module_; // Managed by context_. UniquePtr intrinsic_helper_; UniquePtr ir_builder_; }; diff --git a/compiler/dex/local_value_numbering.h b/compiler/dex/local_value_numbering.h index e3fd7ad2da..33ca8f1ad8 100644 --- a/compiler/dex/local_value_numbering.h +++ b/compiler/dex/local_value_numbering.h @@ -137,6 +137,6 @@ class LocalValueNumbering { std::set null_checked_; }; -} // namespace art +} // namespace art #endif // ART_COMPILER_DEX_LOCAL_VALUE_NUMBERING_H_ diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc index 264604c355..6b010ed9b3 100644 --- a/compiler/dex/mir_graph.cc +++ b/compiler/dex/mir_graph.cc @@ -972,23 +972,23 @@ char* MIRGraph::GetDalvikDisassembly(const MIR* mir) { } } switch (dalvik_format) { - case Instruction::k11n: // Add one immediate from vB + case Instruction::k11n: // Add one immediate from vB case Instruction::k21s: case Instruction::k31i: case Instruction::k21h: str.append(StringPrintf(", #%d", insn.vB)); break; - case Instruction::k51l: // Add one wide immediate + case Instruction::k51l: // Add one wide immediate str.append(StringPrintf(", #%lld", insn.vB_wide)); break; - case Instruction::k21c: // One register, one string/type/method index + case Instruction::k21c: // One register, one string/type/method index case Instruction::k31c: str.append(StringPrintf(", index #%d", insn.vB)); break; - case Instruction::k22c: // Two registers, one string/type/method index + case Instruction::k22c: // Two registers, one string/type/method index str.append(StringPrintf(", index #%d", insn.vC)); break; - case Instruction::k22s: // Add one immediate from vC + case Instruction::k22s: // Add one immediate from vC case Instruction::k22b: str.append(StringPrintf(", #%d", insn.vC)); break; @@ -1154,4 +1154,4 @@ BasicBlock* MIRGraph::NewMemBB(BBType block_type, int block_id) { return bb; } -} // namespace art +} // namespace art diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h index 342d2a296a..e9ec949f23 100644 --- a/compiler/dex/mir_graph.h +++ b/compiler/dex/mir_graph.h @@ -273,7 +273,7 @@ struct RegLocation { unsigned fp:1; // Floating point? unsigned core:1; // Non-floating point? unsigned ref:1; // Something GC cares about. - unsigned high_word:1; // High word of pair? + unsigned high_word:1; // High word of pair? unsigned home:1; // Does this represent the home location? uint8_t low_reg; // First physical register. uint8_t high_reg; // 2nd physical register (if wide). @@ -650,7 +650,7 @@ class MIRGraph { BasicBlock* cur_block_; int num_blocks_; const DexFile::CodeItem* current_code_item_; - SafeMap block_map_; // FindBlock lookup cache. + SafeMap block_map_; // FindBlock lookup cache. std::vector m_units_; // List of methods included in this graph typedef std::pair MIRLocation; // Insert point, (m_unit_ index, offset) std::vector method_stack_; // Include stack @@ -659,7 +659,7 @@ class MIRGraph { int def_count_; // Used to estimate size of ssa name storage. int* opcode_count_; // Dex opcode coverage stats. int num_ssa_regs_; // Number of names following SSA transformation. - std::vector extended_basic_blocks_; // Heads of block "traces". + std::vector extended_basic_blocks_; // Heads of block "traces". int method_sreg_; unsigned int attributes_; Checkstats* checkstats_; diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc index d79b26e4b9..a6314f4cab 100644 --- a/compiler/dex/mir_optimization.cc +++ b/compiler/dex/mir_optimization.cc @@ -845,7 +845,7 @@ bool MIRGraph::BuildExtendedBBList(struct BasicBlock* bb) { bb = NextDominatedBlock(bb); } } - return false; // Not iterative - return value will be ignored + return false; // Not iterative - return value will be ignored } diff --git a/compiler/dex/portable/mir_to_gbc.cc b/compiler/dex/portable/mir_to_gbc.cc index 6fc01bdff2..7831cf6f7a 100644 --- a/compiler/dex/portable/mir_to_gbc.cc +++ b/compiler/dex/portable/mir_to_gbc.cc @@ -1648,7 +1648,7 @@ bool MirConverter::BlockBitcodeConversion(BasicBlock* bb) { if (bb->block_type == kEntryBlock) { SetMethodInfo(); - { // Allocate shadowframe. + { // Allocate shadowframe. art::llvm::IntrinsicHelper::IntrinsicId id = art::llvm::IntrinsicHelper::AllocaShadowFrame; ::llvm::Function* func = intrinsic_helper_->GetIntrinsicFunction(id); @@ -1656,7 +1656,7 @@ bool MirConverter::BlockBitcodeConversion(BasicBlock* bb) { irb_->CreateCall(func, entries); } - { // Store arguments to vregs. + { // Store arguments to vregs. uint16_t arg_reg = cu_->num_regs; ::llvm::Function::arg_iterator arg_iter(func_->arg_begin()); @@ -1666,7 +1666,7 @@ bool MirConverter::BlockBitcodeConversion(BasicBlock* bb) { uint32_t shorty_size = strlen(shorty); CHECK_GE(shorty_size, 1u); - ++arg_iter; // skip method object + ++arg_iter; // skip method object if ((cu_->access_flags & kAccStatic) == 0) { SetVregOnValue(arg_iter, arg_reg); diff --git a/compiler/dex/quick/arm/arm_lir.h b/compiler/dex/quick/arm/arm_lir.h index 93fee05e4e..2f54190ae7 100644 --- a/compiler/dex/quick/arm/arm_lir.h +++ b/compiler/dex/quick/arm/arm_lir.h @@ -239,7 +239,7 @@ enum ArmShiftEncodings { */ enum ArmOpcode { kArmFirst = 0, - kArm16BitData = kArmFirst, // DATA [0] rd[15..0]. + kArm16BitData = kArmFirst, // DATA [0] rd[15..0]. kThumbAdcRR, // adc [0100000101] rm[5..3] rd[2..0]. kThumbAddRRI3, // add(1) [0001110] imm_3[8..6] rn[5..3] rd[2..0]*/ kThumbAddRI8, // add(2) [00110] rd[10..8] imm_8[7..0]. @@ -332,12 +332,12 @@ enum ArmOpcode { kThumb2VcvtDF, // vcvt.F32.F64 vd, vm [1110111010110111] vd[15..12] [10111100] vm[3..0]. kThumb2Vsqrts, // vsqrt.f32 vd, vm [1110111010110001] vd[15..12] [10101100] vm[3..0]. kThumb2Vsqrtd, // vsqrt.f64 vd, vm [1110111010110001] vd[15..12] [10111100] vm[3..0]. - kThumb2MovImmShift,// mov(T2) rd, # [11110] i [00001001111] imm3 rd[11..8] imm8. + kThumb2MovImmShift, // mov(T2) rd, # [11110] i [00001001111] imm3 rd[11..8] imm8. kThumb2MovImm16, // mov(T3) rd, # [11110] i [0010100] imm4 [0] imm3 rd[11..8] imm8. kThumb2StrRRI12, // str(Imm,T3) rd,[rn,#imm12] [111110001100] rn[19..16] rt[15..12] imm12[11..0]. kThumb2LdrRRI12, // str(Imm,T3) rd,[rn,#imm12] [111110001100] rn[19..16] rt[15..12] imm12[11..0]. - kThumb2StrRRI8Predec, // str(Imm,T4) rd,[rn,#-imm8] [111110000100] rn[19..16] rt[15..12] [1100] imm[7..0]*/ - kThumb2LdrRRI8Predec, // ldr(Imm,T4) rd,[rn,#-imm8] [111110000101] rn[19..16] rt[15..12] [1100] imm[7..0]*/ + kThumb2StrRRI8Predec, // str(Imm,T4) rd,[rn,#-imm8] [111110000100] rn[19..16] rt[15..12] [1100] imm[7..0]*/ + kThumb2LdrRRI8Predec, // ldr(Imm,T4) rd,[rn,#-imm8] [111110000101] rn[19..16] rt[15..12] [1100] imm[7..0]*/ kThumb2Cbnz, // cbnz rd,