summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--build/Android.common_build.mk16
-rw-r--r--compiler/compilers.cc159
-rw-r--r--compiler/dex/compiler_enums.h31
-rw-r--r--compiler/dex/quick/arm/int_arm.cc8
-rw-r--r--compiler/dex/quick/arm64/arm64_lir.h3
-rw-r--r--compiler/dex/quick/arm64/assemble_arm64.cc44
-rw-r--r--compiler/dex/quick/arm64/int_arm64.cc13
-rw-r--r--compiler/dex/quick/mir_to_lir.cc10
-rwxr-xr-xcompiler/dex/quick/x86/target_x86.cc8
-rw-r--r--compiler/dex/quick/x86/utility_x86.cc1
-rw-r--r--compiler/image_test.cc5
-rw-r--r--compiler/image_writer.cc43
-rw-r--r--compiler/llvm/ir_builder.h2
-rw-r--r--compiler/optimizing/code_generator.cc4
-rw-r--r--compiler/optimizing/code_generator_arm.cc6
-rw-r--r--compiler/optimizing/code_generator_arm.h3
-rw-r--r--compiler/optimizing/code_generator_arm64.cc6
-rw-r--r--compiler/optimizing/code_generator_arm64.h4
-rw-r--r--compiler/optimizing/code_generator_x86.cc8
-rw-r--r--compiler/optimizing/code_generator_x86.h3
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc6
-rw-r--r--compiler/optimizing/code_generator_x86_64.h3
-rw-r--r--compiler/optimizing/constant_folding.h6
-rw-r--r--compiler/optimizing/constant_folding_test.cc5
-rw-r--r--compiler/optimizing/dead_code_elimination.h6
-rw-r--r--compiler/optimizing/dead_code_elimination_test.cc3
-rw-r--r--compiler/optimizing/gvn.h10
-rw-r--r--compiler/optimizing/instruction_simplifier.cc19
-rw-r--r--compiler/optimizing/instruction_simplifier.h13
-rw-r--r--compiler/optimizing/locations.h8
-rw-r--r--compiler/optimizing/optimization.cc6
-rw-r--r--compiler/optimizing/optimization.h13
-rw-r--r--compiler/optimizing/optimizing_compiler.cc32
-rw-r--r--compiler/optimizing/register_allocator.cc34
-rw-r--r--compiler/optimizing/ssa_phi_elimination.h17
-rw-r--r--compiler/utils/arm/assembler_arm32.cc2
-rw-r--r--compiler/utils/arm/assembler_thumb2.cc2
-rw-r--r--compiler/utils/arm64/assembler_arm64.cc2
-rw-r--r--compiler/utils/x86/assembler_x86.cc2
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.cc2
-rw-r--r--dex2oat/dex2oat.cc198
-rw-r--r--disassembler/disassembler.h10
-rw-r--r--disassembler/disassembler_arm64.cc80
-rw-r--r--disassembler/disassembler_arm64.h30
-rw-r--r--oatdump/oatdump.cc7
-rw-r--r--patchoat/patchoat.cc26
-rw-r--r--runtime/Android.mk1
-rw-r--r--runtime/arch/arm/instruction_set_features_arm.cc10
-rw-r--r--runtime/arch/arm64/instruction_set_features_arm64.cc5
-rw-r--r--runtime/arch/mips/instruction_set_features_mips.cc4
-rw-r--r--runtime/arch/x86/instruction_set_features_x86.cc4
-rw-r--r--runtime/base/scoped_flock.cc8
-rw-r--r--runtime/base/unix_file/fd_file.cc105
-rw-r--r--runtime/base/unix_file/fd_file.h59
-rw-r--r--runtime/base/unix_file/fd_file_test.cc5
-rw-r--r--runtime/base/unix_file/random_access_file_test.h9
-rw-r--r--runtime/base/unix_file/random_access_file_utils_test.cc4
-rw-r--r--runtime/class_linker.cc24
-rw-r--r--runtime/class_linker.h4
-rw-r--r--runtime/common_runtime_test.cc9
-rw-r--r--runtime/debugger.cc145
-rw-r--r--runtime/debugger.h3
-rw-r--r--runtime/dex_file_test.cc3
-rw-r--r--runtime/dex_file_verifier_test.cc6
-rw-r--r--runtime/gc/heap.cc11
-rw-r--r--runtime/gc/space/image_space.cc48
-rw-r--r--runtime/hprof/hprof.cc9
-rw-r--r--runtime/jdwp/jdwp_event.cc4
-rw-r--r--runtime/jni_internal_test.cc115
-rw-r--r--runtime/signal_catcher.cc12
-rw-r--r--runtime/trace.cc9
-rw-r--r--runtime/utils.cc11
-rw-r--r--runtime/zip_archive_test.cc2
-rw-r--r--test/083-compiler-regressions/expected.txt1
-rw-r--r--test/083-compiler-regressions/src/Main.java26
-rw-r--r--test/430-live-register-slow-path/expected.txt0
-rw-r--r--test/430-live-register-slow-path/info.txt2
-rw-r--r--test/430-live-register-slow-path/src/Main.java39
-rw-r--r--test/800-smali/expected.txt1
-rw-r--r--test/800-smali/smali/BadCaseInOpRegRegReg.smali13
-rw-r--r--test/800-smali/src/Main.java1
-rw-r--r--test/Android.run-test.mk11
82 files changed, 1088 insertions, 554 deletions
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index 7cb23f8f19..7b38e5ed4c 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -244,7 +244,7 @@ art_target_non_debug_cflags := $(art_non_debug_cflags)
ifeq ($(HOST_OS),linux)
# Larger frame-size for host clang builds today
ifndef SANITIZE_HOST
- art_host_non_debug_cflags += -Wframe-larger-than=2600
+ art_host_non_debug_cflags += -Wframe-larger-than=2700
endif
art_target_non_debug_cflags += -Wframe-larger-than=1728
endif
@@ -252,7 +252,7 @@ endif
ifndef LIBART_IMG_HOST_BASE_ADDRESS
$(error LIBART_IMG_HOST_BASE_ADDRESS unset)
endif
-ART_HOST_CFLAGS += $(art_cflags) -DANDROID_SMP=1 -DART_BASE_ADDRESS=$(LIBART_IMG_HOST_BASE_ADDRESS)
+ART_HOST_CFLAGS += $(art_cflags) -DART_BASE_ADDRESS=$(LIBART_IMG_HOST_BASE_ADDRESS)
ART_HOST_CFLAGS += -DART_DEFAULT_INSTRUCTION_SET_FEATURES=default
ifndef LIBART_IMG_TARGET_BASE_ADDRESS
@@ -283,18 +283,6 @@ endif
ART_TARGET_CFLAGS += -DART_BASE_ADDRESS_MIN_DELTA=$(LIBART_IMG_TARGET_MIN_BASE_ADDRESS_DELTA)
ART_TARGET_CFLAGS += -DART_BASE_ADDRESS_MAX_DELTA=$(LIBART_IMG_TARGET_MAX_BASE_ADDRESS_DELTA)
-ifeq ($(TARGET_CPU_SMP),true)
- ART_TARGET_CFLAGS += -DANDROID_SMP=1
-else
- ifeq ($(TARGET_CPU_SMP),false)
- ART_TARGET_CFLAGS += -DANDROID_SMP=0
- else
- $(warning TARGET_CPU_SMP should be (true|false), found $(TARGET_CPU_SMP))
- # Make sure we emit barriers for the worst case.
- ART_TARGET_CFLAGS += -DANDROID_SMP=1
- endif
-endif
-
# To use oprofile_android --callgraph, uncomment this and recompile with "mmm art -B -j16"
# ART_TARGET_CFLAGS += -fno-omit-frame-pointer -marm -mapcs
diff --git a/compiler/compilers.cc b/compiler/compilers.cc
deleted file mode 100644
index 2481128e4d..0000000000
--- a/compiler/compilers.cc
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "compilers.h"
-
-#include "dex/mir_graph.h"
-#include "dex/quick/mir_to_lir.h"
-#include "elf_writer_quick.h"
-#include "mirror/art_method-inl.h"
-
-namespace art {
-
-extern "C" void ArtInitQuickCompilerContext(art::CompilerDriver* driver);
-extern "C" void ArtUnInitQuickCompilerContext(art::CompilerDriver* driver);
-extern "C" art::CompiledMethod* ArtQuickCompileMethod(art::CompilerDriver* driver,
- const art::DexFile::CodeItem* code_item,
- uint32_t access_flags,
- art::InvokeType invoke_type,
- uint16_t class_def_idx,
- uint32_t method_idx,
- jobject class_loader,
- const art::DexFile& dex_file);
-
-extern "C" art::CompiledMethod* ArtQuickJniCompileMethod(art::CompilerDriver* driver,
- uint32_t access_flags, uint32_t method_idx,
- const art::DexFile& dex_file);
-
-// Hack for CFI CIE initialization
-extern std::vector<uint8_t>* X86CFIInitialization(bool is_x86_64);
-
-void QuickCompiler::Init() const {
- ArtInitQuickCompilerContext(GetCompilerDriver());
-}
-
-void QuickCompiler::UnInit() const {
- ArtUnInitQuickCompilerContext(GetCompilerDriver());
-}
-
-CompiledMethod* QuickCompiler::Compile(const DexFile::CodeItem* code_item,
- uint32_t access_flags,
- InvokeType invoke_type,
- uint16_t class_def_idx,
- uint32_t method_idx,
- jobject class_loader,
- const DexFile& dex_file) const {
- CompiledMethod* method = TryCompileWithSeaIR(code_item,
- access_flags,
- invoke_type,
- class_def_idx,
- method_idx,
- class_loader,
- dex_file);
- if (method != nullptr) {
- return method;
- }
-
- return ArtQuickCompileMethod(GetCompilerDriver(),
- code_item,
- access_flags,
- invoke_type,
- class_def_idx,
- method_idx,
- class_loader,
- dex_file);
-}
-
-CompiledMethod* QuickCompiler::JniCompile(uint32_t access_flags,
- uint32_t method_idx,
- const DexFile& dex_file) const {
- return ArtQuickJniCompileMethod(GetCompilerDriver(), access_flags, method_idx, dex_file);
-}
-
-uintptr_t QuickCompiler::GetEntryPointOf(mirror::ArtMethod* method) const {
- size_t pointer_size = InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet());
- return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize(
- pointer_size));
-}
-
-bool QuickCompiler::WriteElf(art::File* file,
- OatWriter* oat_writer,
- const std::vector<const art::DexFile*>& dex_files,
- const std::string& android_root,
- bool is_host) const {
- return art::ElfWriterQuick::Create(file, oat_writer, dex_files, android_root, is_host,
- *GetCompilerDriver());
-}
-
-Backend* QuickCompiler::GetCodeGenerator(CompilationUnit* cu, void* compilation_unit) const {
- Mir2Lir* mir_to_lir = nullptr;
- switch (cu->instruction_set) {
- case kThumb2:
- mir_to_lir = ArmCodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
- break;
- case kArm64:
- mir_to_lir = Arm64CodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
- break;
- case kMips:
- mir_to_lir = MipsCodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
- break;
- case kX86:
- // Fall-through.
- case kX86_64:
- mir_to_lir = X86CodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
- break;
- default:
- LOG(FATAL) << "Unexpected instruction set: " << cu->instruction_set;
- }
-
- /* The number of compiler temporaries depends on backend so set it up now if possible */
- if (mir_to_lir) {
- size_t max_temps = mir_to_lir->GetMaxPossibleCompilerTemps();
- bool set_max = cu->mir_graph->SetMaxAvailableNonSpecialCompilerTemps(max_temps);
- CHECK(set_max);
- }
- return mir_to_lir;
-}
-
-std::vector<uint8_t>* QuickCompiler::GetCallFrameInformationInitialization(
- const CompilerDriver& driver) const {
- if (driver.GetInstructionSet() == kX86) {
- return X86CFIInitialization(false);
- }
- if (driver.GetInstructionSet() == kX86_64) {
- return X86CFIInitialization(true);
- }
- return nullptr;
-}
-
-CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
- uint32_t access_flags,
- InvokeType invoke_type,
- uint16_t class_def_idx,
- uint32_t method_idx,
- jobject class_loader,
- const DexFile& dex_file) const {
- CompiledMethod* method = TryCompile(code_item, access_flags, invoke_type, class_def_idx,
- method_idx, class_loader, dex_file);
- if (method != nullptr) {
- return method;
- }
-
- return QuickCompiler::Compile(code_item, access_flags, invoke_type, class_def_idx, method_idx,
- class_loader, dex_file);
-}
-
-} // namespace art
diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h
index 5d877fdf80..b56fd6f5c7 100644
--- a/compiler/dex/compiler_enums.h
+++ b/compiler/dex/compiler_enums.h
@@ -610,21 +610,22 @@ std::ostream& operator<<(std::ostream& os, const SelectInstructionKind& kind);
// LIR fixup kinds for Arm
enum FixupKind {
kFixupNone,
- kFixupLabel, // For labels we just adjust the offset.
- kFixupLoad, // Mostly for immediates.
- kFixupVLoad, // FP load which *may* be pc-relative.
- kFixupCBxZ, // Cbz, Cbnz.
- kFixupTBxZ, // Tbz, Tbnz.
- kFixupPushPop, // Not really pc relative, but changes size based on args.
- kFixupCondBranch, // Conditional branch
- kFixupT1Branch, // Thumb1 Unconditional branch
- kFixupT2Branch, // Thumb2 Unconditional branch
- kFixupBlx1, // Blx1 (start of Blx1/Blx2 pair).
- kFixupBl1, // Bl1 (start of Bl1/Bl2 pair).
- kFixupAdr, // Adr.
- kFixupMovImmLST, // kThumb2MovImm16LST.
- kFixupMovImmHST, // kThumb2MovImm16HST.
- kFixupAlign4, // Align to 4-byte boundary.
+ kFixupLabel, // For labels we just adjust the offset.
+ kFixupLoad, // Mostly for immediates.
+ kFixupVLoad, // FP load which *may* be pc-relative.
+ kFixupCBxZ, // Cbz, Cbnz.
+ kFixupTBxZ, // Tbz, Tbnz.
+ kFixupPushPop, // Not really pc relative, but changes size based on args.
+ kFixupCondBranch, // Conditional branch
+ kFixupT1Branch, // Thumb1 Unconditional branch
+ kFixupT2Branch, // Thumb2 Unconditional branch
+ kFixupBlx1, // Blx1 (start of Blx1/Blx2 pair).
+ kFixupBl1, // Bl1 (start of Bl1/Bl2 pair).
+ kFixupAdr, // Adr.
+ kFixupMovImmLST, // kThumb2MovImm16LST.
+ kFixupMovImmHST, // kThumb2MovImm16HST.
+ kFixupAlign4, // Align to 4-byte boundary.
+ kFixupA53Erratum835769, // Cortex A53 Erratum 835769.
};
std::ostream& operator<<(std::ostream& os, const FixupKind& kind);
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index 57544b5187..cab039bfd4 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -16,6 +16,7 @@
/* This file contains codegen for the Thumb2 ISA. */
+#include "arch/instruction_set_features.h"
#include "arm_lir.h"
#include "codegen_arm.h"
#include "dex/quick/mir_to_lir-inl.h"
@@ -1119,7 +1120,9 @@ LIR* ArmMir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* targe
}
bool ArmMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
-#if ANDROID_SMP != 0
+ if (!cu_->GetInstructionSetFeatures()->IsSmp()) {
+ return false;
+ }
// Start off with using the last LIR as the barrier. If it is not enough, then we will generate one.
LIR* barrier = last_lir_insn_;
@@ -1149,9 +1152,6 @@ bool ArmMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
DCHECK(!barrier->flags.use_def_invalid);
barrier->u.m.def_mask = &kEncodeAll;
return ret;
-#else
- return false;
-#endif
}
void ArmMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
diff --git a/compiler/dex/quick/arm64/arm64_lir.h b/compiler/dex/quick/arm64/arm64_lir.h
index 973279e8b7..f8a7310c20 100644
--- a/compiler/dex/quick/arm64/arm64_lir.h
+++ b/compiler/dex/quick/arm64/arm64_lir.h
@@ -320,6 +320,7 @@ enum A64Opcode {
kA64Mul3rrr, // mul [00011011000] rm[20-16] [011111] rn[9-5] rd[4-0].
kA64Msub4rrrr, // msub[s0011011000] rm[20-16] [1] ra[14-10] rn[9-5] rd[4-0].
kA64Neg3rro, // neg alias of "sub arg0, rzr, arg1, arg2".
+ kA64Nop0, // nop alias of "hint #0" [11010101000000110010000000011111].
kA64Orr3Rrl, // orr [s01100100] N[22] imm_r[21-16] imm_s[15-10] rn[9-5] rd[4-0].
kA64Orr4rrro, // orr [s0101010] shift[23-22] [0] rm[20-16] imm_6[15-10] rn[9-5] rd[4-0].
kA64Ret, // ret [11010110010111110000001111000000].
@@ -332,7 +333,7 @@ enum A64Opcode {
kA64Scvtf2fw, // scvtf [000111100s100010000000] rn[9-5] rd[4-0].
kA64Scvtf2fx, // scvtf [100111100s100010000000] rn[9-5] rd[4-0].
kA64Sdiv3rrr, // sdiv[s0011010110] rm[20-16] [000011] rn[9-5] rd[4-0].
- kA64Smaddl4xwwx, // smaddl [10011011001] rm[20-16] [0] ra[14-10] rn[9-5] rd[4-0].
+ kA64Smull3xww, // smull [10011011001] rm[20-16] [011111] rn[9-5] rd[4-0].
kA64Smulh3xxx, // smulh [10011011010] rm[20-16] [011111] rn[9-5] rd[4-0].
kA64Stp4ffXD, // stp [0s10110100] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
kA64Stp4rrXD, // stp [s010100100] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
index 9cdabf18f0..cab11cc4a5 100644
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ b/compiler/dex/quick/arm64/assemble_arm64.cc
@@ -14,8 +14,10 @@
* limitations under the License.
*/
-#include "arm64_lir.h"
#include "codegen_arm64.h"
+
+#include "arch/arm64/instruction_set_features_arm64.h"
+#include "arm64_lir.h"
#include "dex/quick/mir_to_lir-inl.h"
namespace art {
@@ -468,13 +470,17 @@ const A64EncodingMap Arm64Mir2Lir::EncodingMap[kA64Last] = {
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
"mul", "!0r, !1r, !2r", kFixupNone),
ENCODING_MAP(WIDE(kA64Msub4rrrr), SF_VARIANTS(0x1b008000),
- kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 14, 10,
- kFmtRegR, 20, 16, IS_QUAD_OP | REG_DEF0_USE123,
- "msub", "!0r, !1r, !3r, !2r", kFixupNone),
+ kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
+ kFmtRegR, 14, 10, IS_QUAD_OP | REG_DEF0_USE123 | NEEDS_FIXUP,
+ "msub", "!0r, !1r, !2r, !3r", kFixupA53Erratum835769),
ENCODING_MAP(WIDE(kA64Neg3rro), SF_VARIANTS(0x4b0003e0),
kFmtRegR, 4, 0, kFmtRegR, 20, 16, kFmtShift, -1, -1,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
"neg", "!0r, !1r!2o", kFixupNone),
+ ENCODING_MAP(kA64Nop0, NO_VARIANTS(0xd503201f),
+ kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, NO_OPERAND,
+ "nop", "", kFixupNone),
ENCODING_MAP(WIDE(kA64Orr3Rrl), SF_VARIANTS(0x32000000),
kFmtRegROrSp, 4, 0, kFmtRegR, 9, 5, kFmtBitBlt, 22, 10,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
@@ -523,10 +529,10 @@ const A64EncodingMap Arm64Mir2Lir::EncodingMap[kA64Last] = {
kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
"sdiv", "!0r, !1r, !2r", kFixupNone),
- ENCODING_MAP(WIDE(kA64Smaddl4xwwx), NO_VARIANTS(0x9b200000),
+ ENCODING_MAP(kA64Smull3xww, NO_VARIANTS(0x9b207c00),
kFmtRegX, 4, 0, kFmtRegW, 9, 5, kFmtRegW, 20, 16,
- kFmtRegX, 14, 10, IS_QUAD_OP | REG_DEF0_USE123,
- "smaddl", "!0x, !1w, !2w, !3x", kFixupNone),
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "smull", "!0x, !1w, !2w", kFixupNone),
ENCODING_MAP(kA64Smulh3xxx, NO_VARIANTS(0x9b407c00),
kFmtRegX, 4, 0, kFmtRegX, 9, 5, kFmtRegX, 20, 16,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
@@ -988,6 +994,30 @@ void Arm64Mir2Lir::AssembleLIR() {
lir->operands[1] = delta;
break;
}
+ case kFixupA53Erratum835769:
+ // Avoid emitting code that could trigger Cortex A53's erratum 835769.
+ // This fixup should be carried out for all multiply-accumulate instructions: madd, msub,
+ // smaddl, smsubl, umaddl and umsubl.
+ if (cu_->GetInstructionSetFeatures()->AsArm64InstructionSetFeatures()
+ ->NeedFixCortexA53_835769()) {
+ // Check that this is a 64-bit multiply-accumulate.
+ if (IS_WIDE(lir->opcode)) {
+ uint64_t prev_insn_flags = EncodingMap[UNWIDE(lir->prev->opcode)].flags;
+ // Check that the instruction preceding the multiply-accumulate is a load or store.
+ if ((prev_insn_flags & IS_LOAD) != 0 || (prev_insn_flags & IS_STORE) != 0) {
+ // insert a NOP between the load/store and the multiply-accumulate.
+ LIR* new_lir = RawLIR(lir->dalvik_offset, kA64Nop0, 0, 0, 0, 0, 0, NULL);
+ new_lir->offset = lir->offset;
+ new_lir->flags.fixup = kFixupNone;
+ new_lir->flags.size = EncodingMap[kA64Nop0].size;
+ InsertLIRBefore(lir, new_lir);
+ lir->offset += new_lir->flags.size;
+ offset_adjustment += new_lir->flags.size;
+ res = kRetryAll;
+ }
+ }
+ }
+ break;
default:
LOG(FATAL) << "Unexpected case " << lir->flags.fixup;
}
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index 8a5a58c949..0e00698388 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -16,6 +16,7 @@
/* This file contains codegen for the Thumb2 ISA. */
+#include "arch/instruction_set_features.h"
#include "arm64_lir.h"
#include "codegen_arm64.h"
#include "dex/quick/mir_to_lir-inl.h"
@@ -427,8 +428,7 @@ bool Arm64Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_d
rl_src = LoadValue(rl_src, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
RegStorage r_long_mul = AllocTemp();
- NewLIR4(kA64Smaddl4xwwx, As64BitReg(r_long_mul).GetReg(),
- r_magic.GetReg(), rl_src.reg.GetReg(), rxzr);
+ NewLIR3(kA64Smull3xww, As64BitReg(r_long_mul).GetReg(), r_magic.GetReg(), rl_src.reg.GetReg());
switch (pattern) {
case Divide3:
OpRegRegImm(kOpLsr, As64BitReg(r_long_mul), As64BitReg(r_long_mul), 32);
@@ -648,7 +648,7 @@ RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage r_src1, RegS
}
OpRegRegReg(kOpDiv, temp, r_src1, r_src2);
NewLIR4(kA64Msub4rrrr | wide, rl_result.reg.GetReg(), temp.GetReg(),
- r_src1.GetReg(), r_src2.GetReg());
+ r_src2.GetReg(), r_src1.GetReg());
FreeTemp(temp);
}
return rl_result;
@@ -979,7 +979,9 @@ LIR* Arm64Mir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* tar
}
bool Arm64Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
-#if ANDROID_SMP != 0
+ if (!cu_->GetInstructionSetFeatures()->IsSmp()) {
+ return false;
+ }
// Start off with using the last LIR as the barrier. If it is not enough, then we will generate one.
LIR* barrier = last_lir_insn_;
@@ -1015,9 +1017,6 @@ bool Arm64Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
DCHECK(!barrier->flags.use_def_invalid);
barrier->u.m.def_mask = &kEncodeAll;
return ret;
-#else
- return false;
-#endif
}
void Arm64Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index c945f7f891..70ef991dee 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -201,6 +201,16 @@ void Mir2Lir::LoadArgDirect(int in_position, RegLocation rl_dest) {
RegStorage reg_arg_low = GetArgMappingToPhysicalReg(in_position);
RegStorage reg_arg_high = GetArgMappingToPhysicalReg(in_position + 1);
+ if (cu_->instruction_set == kX86) {
+ // Can't handle double split between reg & memory. Flush reg half to memory.
+ if (rl_dest.reg.IsDouble() && (reg_arg_low.Valid() != reg_arg_high.Valid())) {
+ DCHECK(reg_arg_low.Valid());
+ DCHECK(!reg_arg_high.Valid());
+ Store32Disp(TargetPtrReg(kSp), offset, reg_arg_low);
+ reg_arg_low = RegStorage::InvalidReg();
+ }
+ }
+
if (reg_arg_low.Valid() && reg_arg_high.Valid()) {
OpRegCopyWide(rl_dest.reg, RegStorage::MakeRegPair(reg_arg_low, reg_arg_high));
} else if (reg_arg_low.Valid() && !reg_arg_high.Valid()) {
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index f5f71132e4..ead31b37b6 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -18,6 +18,7 @@
#include <inttypes.h>
#include <string>
+#include "arch/instruction_set_features.h"
#include "backend_x86.h"
#include "codegen_x86.h"
#include "dex/compiler_internals.h"
@@ -594,7 +595,9 @@ bool X86Mir2Lir::ProvidesFullMemoryBarrier(X86OpCode opcode) {
}
bool X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
-#if ANDROID_SMP != 0
+ if (!cu_->GetInstructionSetFeatures()->IsSmp()) {
+ return false;
+ }
// Start off with using the last LIR as the barrier. If it is not enough, then we will update it.
LIR* mem_barrier = last_lir_insn_;
@@ -630,9 +633,6 @@ bool X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
mem_barrier->u.m.def_mask = &kEncodeAll;
}
return ret;
-#else
- return false;
-#endif
}
void X86Mir2Lir::CompilerInitializeRegAlloc() {
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index c1c79caa19..ad3222cd84 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -488,6 +488,7 @@ LIR* X86Mir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1,
case kOpAdc:
case kOpAnd:
case kOpXor:
+ case kOpMul:
break;
default:
LOG(FATAL) << "Bad case in OpRegRegReg " << op;
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index 7e2be3ee0c..dac1ef4c06 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -105,13 +105,16 @@ TEST_F(ImageTest, WriteRead) {
ASSERT_TRUE(success_image);
bool success_fixup = ElfWriter::Fixup(dup_oat.get(), writer->GetOatDataBegin());
ASSERT_TRUE(success_fixup);
+
+ ASSERT_EQ(dup_oat->FlushCloseOrErase(), 0) << "Could not flush and close oat file "
+ << oat_file.GetFilename();
}
{
std::unique_ptr<File> file(OS::OpenFileForReading(image_file.GetFilename().c_str()));
ASSERT_TRUE(file.get() != NULL);
ImageHeader image_header;
- file->ReadFully(&image_header, sizeof(image_header));
+ ASSERT_EQ(file->ReadFully(&image_header, sizeof(image_header)), true);
ASSERT_TRUE(image_header.IsValid());
ASSERT_GE(image_header.GetImageBitmapOffset(), sizeof(image_header));
ASSERT_NE(0U, image_header.GetImageBitmapSize());
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 64d2de1b45..b03727b068 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -149,6 +149,11 @@ bool ImageWriter::Write(const std::string& image_filename,
SetOatChecksumFromElfFile(oat_file.get());
+ if (oat_file->FlushCloseOrErase() != 0) {
+ LOG(ERROR) << "Failed to flush and close oat file " << oat_filename << " for " << oat_location;
+ return false;
+ }
+
std::unique_ptr<File> image_file(OS::CreateEmptyFile(image_filename.c_str()));
ImageHeader* image_header = reinterpret_cast<ImageHeader*>(image_->Begin());
if (image_file.get() == NULL) {
@@ -157,6 +162,7 @@ bool ImageWriter::Write(const std::string& image_filename,
}
if (fchmod(image_file->Fd(), 0644) != 0) {
PLOG(ERROR) << "Failed to make image file world readable: " << image_filename;
+ image_file->Erase();
return EXIT_FAILURE;
}
@@ -164,6 +170,7 @@ bool ImageWriter::Write(const std::string& image_filename,
CHECK_EQ(image_end_, image_header->GetImageSize());
if (!image_file->WriteFully(image_->Begin(), image_end_)) {
PLOG(ERROR) << "Failed to write image file " << image_filename;
+ image_file->Erase();
return false;
}
@@ -173,9 +180,14 @@ bool ImageWriter::Write(const std::string& image_filename,
image_header->GetImageBitmapSize(),
image_header->GetImageBitmapOffset())) {
PLOG(ERROR) << "Failed to write image file " << image_filename;
+ image_file->Erase();
return false;
}
+ if (image_file->FlushCloseOrErase() != 0) {
+ PLOG(ERROR) << "Failed to flush and close image file " << image_filename;
+ return false;
+ }
return true;
}
@@ -315,23 +327,16 @@ class StringLengthComparator {
Handle<mirror::ObjectArray<mirror::String>> strings_;
};
-// If string a is a prefix of b or b is a prefix of a then they are considered equal. This
-// enables us to find prefixes instead of exact matches. Otherwise we do a normal string
-// comparison. The strings compared of the form <position, length> inside of the chars_ array.
+// Normal string < comparison through the chars_ array.
class SubstringComparator {
public:
explicit SubstringComparator(const std::vector<uint16_t>* const chars) : chars_(chars) {
}
bool operator()(const std::pair<size_t, size_t>& a, const std::pair<size_t, size_t>& b) {
- size_t compare_length = std::min(a.second, b.second);
- const uint16_t* ptr_a = &chars_->at(a.first);
- const uint16_t* ptr_b = &chars_->at(b.first);
- for (size_t i = 0; i < compare_length; ++i) {
- if (ptr_a[i] != ptr_b[i]) {
- return ptr_a[i] < ptr_b[i];
- }
- }
- return false;
+ return std::lexicographical_compare(chars_->begin() + a.first,
+ chars_->begin() + a.first + a.second,
+ chars_->begin() + b.first,
+ chars_->begin() + b.first + b.second);
}
private:
@@ -387,11 +392,15 @@ void ImageWriter::ProcessStrings() {
// Try to see if the string exists as a prefix of an existing string.
size_t new_offset = 0;
std::pair<size_t, size_t> new_string(num_chars - length, length);
- auto it = existing_strings.find(new_string);
+ auto it = existing_strings.lower_bound(new_string);
+ bool is_prefix = false;
if (it != existing_strings.end()) {
- for (size_t j = 0; j < length; ++j) {
- DCHECK_EQ(combined_chars[it->first + j], s->CharAt(j));
- }
+ CHECK_LE(length, it->second);
+ is_prefix = std::equal(combined_chars.begin() + it->first,
+ combined_chars.begin() + it->first + it->second,
+ combined_chars.begin() + new_string.first);
+ }
+ if (is_prefix) {
// Shares a prefix, set the offset to where the new offset will be.
new_offset = it->first;
// Remove the added chars.
@@ -413,7 +422,7 @@ void ImageWriter::ProcessStrings() {
for (size_t i = 0; i < total_strings; ++i) {
strings->GetWithoutChecks(i)->SetArray(array);
}
- VLOG(compiler) << "Total # image strings=" << total_strings << " combined length="
+ LOG(INFO) << "Total # image strings=" << total_strings << " combined length="
<< total_length << " prefix saved chars=" << prefix_saved_chars;
ComputeEagerResolvedStrings();
}
diff --git a/compiler/llvm/ir_builder.h b/compiler/llvm/ir_builder.h
index 03498efcbb..990ba02d3d 100644
--- a/compiler/llvm/ir_builder.h
+++ b/compiler/llvm/ir_builder.h
@@ -101,10 +101,8 @@ class IRBuilder : public LLVMIRBuilder {
// Extend memory barrier
//--------------------------------------------------------------------------
void CreateMemoryBarrier(MemBarrierKind barrier_kind) {
-#if ANDROID_SMP
// TODO: select atomic ordering according to given barrier kind.
CreateFence(::llvm::SequentiallyConsistent);
-#endif
}
//--------------------------------------------------------------------------
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 4d71cb780a..0b593275c7 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -589,12 +589,14 @@ void CodeGenerator::SaveLiveRegisters(LocationSummary* locations) {
if (locations->RegisterContainsObject(i)) {
locations->SetStackBit(stack_offset / kVRegSize);
}
+ DCHECK_LT(stack_offset, GetFrameSize() - FrameEntrySpillSize());
stack_offset += SaveCoreRegister(stack_offset, i);
}
}
for (size_t i = 0, e = GetNumberOfFloatingPointRegisters(); i < e; ++i) {
if (register_set->ContainsFloatingPointRegister(i)) {
+ DCHECK_LT(stack_offset, GetFrameSize() - FrameEntrySpillSize());
stack_offset += SaveFloatingPointRegister(stack_offset, i);
}
}
@@ -605,12 +607,14 @@ void CodeGenerator::RestoreLiveRegisters(LocationSummary* locations) {
size_t stack_offset = first_register_slot_in_slow_path_;
for (size_t i = 0, e = GetNumberOfCoreRegisters(); i < e; ++i) {
if (register_set->ContainsCoreRegister(i)) {
+ DCHECK_LT(stack_offset, GetFrameSize() - FrameEntrySpillSize());
stack_offset += RestoreCoreRegister(stack_offset, i);
}
}
for (size_t i = 0, e = GetNumberOfFloatingPointRegisters(); i < e; ++i) {
if (register_set->ContainsFloatingPointRegister(i)) {
+ DCHECK_LT(stack_offset, GetFrameSize() - FrameEntrySpillSize());
stack_offset += RestoreFloatingPointRegister(stack_offset, i);
}
}
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index dc0a829f65..1701ef5f0a 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -1189,7 +1189,7 @@ void InstructionCodeGeneratorARM::VisitInvokeStatic(HInvokeStatic* invoke) {
// LR = temp[offset_of_quick_compiled_code]
__ LoadFromOffset(kLoadWord, LR, temp,
mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- kArmPointerSize).Int32Value());
+ kArmWordSize).Int32Value());
// LR()
__ blx(LR);
@@ -1231,7 +1231,7 @@ void InstructionCodeGeneratorARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
}
// temp = temp->GetMethodAt(method_offset);
uint32_t entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- kArmPointerSize).Int32Value();
+ kArmWordSize).Int32Value();
__ LoadFromOffset(kLoadWord, temp, temp, method_offset);
// LR = temp->GetEntryPoint();
__ LoadFromOffset(kLoadWord, LR, temp, entry_point);
@@ -1268,7 +1268,7 @@ void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke)
}
// temp = temp->GetImtEntryAt(method_offset);
uint32_t entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- kArmPointerSize).Int32Value();
+ kArmWordSize).Int32Value();
__ LoadFromOffset(kLoadWord, temp, temp, method_offset);
// LR = temp->GetEntryPoint();
__ LoadFromOffset(kLoadWord, LR, temp, entry_point);
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index acc3fd6a25..c00fac1a37 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -28,7 +28,8 @@ namespace arm {
class CodeGeneratorARM;
class SlowPathCodeARM;
-static constexpr size_t kArmWordSize = 4;
+// Use a local definition to prevent copying mistakes.
+static constexpr size_t kArmWordSize = kArmPointerSize;
static constexpr Register kParameterCoreRegisters[] = { R1, R2, R3 };
static constexpr RegisterPair kParameterCorePairRegisters[] = { R1_R2, R2_R3 };
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 2c586a15f6..82dced5e4f 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1393,7 +1393,7 @@ void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invok
(invoke->GetImtIndex() % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry);
Location receiver = invoke->GetLocations()->InAt(0);
Offset class_offset = mirror::Object::ClassOffset();
- Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64PointerSize);
+ Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
// The register ip1 is required to be used for the hidden argument in
// art_quick_imt_conflict_trampoline, so prevent VIXL from using it.
@@ -1450,7 +1450,7 @@ void InstructionCodeGeneratorARM64::VisitInvokeStatic(HInvokeStatic* invoke) {
// lr = temp->entry_point_from_quick_compiled_code_;
__ Ldr(lr, MemOperand(temp.X(),
mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- kArm64PointerSize).SizeValue()));
+ kArm64WordSize).SizeValue()));
// lr();
__ Blr(lr);
@@ -1465,7 +1465,7 @@ void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
size_t method_offset = mirror::Class::EmbeddedVTableOffset().SizeValue() +
invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry);
Offset class_offset = mirror::Object::ClassOffset();
- Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64PointerSize);
+ Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
// temp = object->GetClass();
if (receiver.IsStackSlot()) {
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 6b71b94532..a40f27fafa 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -31,7 +31,9 @@ namespace arm64 {
class CodeGeneratorARM64;
class SlowPathCodeARM64;
-static constexpr size_t kArm64WordSize = 8;
+// Use a local definition to prevent copying mistakes.
+static constexpr size_t kArm64WordSize = kArm64PointerSize;
+
static const vixl::Register kParameterCoreRegisters[] = {
vixl::x1, vixl::x2, vixl::x3, vixl::x4, vixl::x5, vixl::x6, vixl::x7
};
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index b0f36ce0b4..3c53cea0bf 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1120,7 +1120,8 @@ void InstructionCodeGeneratorX86::VisitInvokeStatic(HInvokeStatic* invoke) {
// temp = temp[index_in_cache]
__ movl(temp, Address(temp, CodeGenerator::GetCacheOffset(invoke->GetIndexInDexCache())));
// (temp + offset_of_quick_compiled_code)()
- __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(4).Int32Value()));
+ __ call(Address(
+ temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value()));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1184,7 +1185,8 @@ void InstructionCodeGeneratorX86::VisitInvokeVirtual(HInvokeVirtual* invoke) {
// temp = temp->GetMethodAt(method_offset);
__ movl(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
- __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(4).Int32Value()));
+ __ call(Address(
+ temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value()));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1220,7 +1222,7 @@ void InstructionCodeGeneratorX86::VisitInvokeInterface(HInvokeInterface* invoke)
__ movl(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
__ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- kX86PointerSize).Int32Value()));
+ kX86WordSize).Int32Value()));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 8252f81c7f..0aff6cc493 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -25,7 +25,8 @@
namespace art {
namespace x86 {
-static constexpr size_t kX86WordSize = 4;
+// Use a local definition to prevent copying mistakes.
+static constexpr size_t kX86WordSize = kX86PointerSize;
class CodeGeneratorX86;
class SlowPathCodeX86;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index e9c67e3e6d..97f5e5c7ac 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1113,7 +1113,7 @@ void InstructionCodeGeneratorX86_64::VisitInvokeStatic(HInvokeStatic* invoke) {
__ movl(temp, Address(temp, CodeGenerator::GetCacheOffset(invoke->GetIndexInDexCache())));
// (temp + offset_of_quick_compiled_code)()
__ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- kX86_64PointerSize).SizeValue()));
+ kX86_64WordSize).SizeValue()));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1173,7 +1173,7 @@ void InstructionCodeGeneratorX86_64::VisitInvokeVirtual(HInvokeVirtual* invoke)
__ movl(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
__ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- kX86_64PointerSize).SizeValue()));
+ kX86_64WordSize).SizeValue()));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1209,7 +1209,7 @@ void InstructionCodeGeneratorX86_64::VisitInvokeInterface(HInvokeInterface* invo
__ movl(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
__ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- kX86_64PointerSize).SizeValue()));
+ kX86_64WordSize).SizeValue()));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 86f3b4ebf7..29c679d8f1 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -25,7 +25,8 @@
namespace art {
namespace x86_64 {
-static constexpr size_t kX86_64WordSize = 8;
+// Use a local definition to prevent copying mistakes.
+static constexpr size_t kX86_64WordSize = kX86_64PointerSize;
static constexpr Register kParameterCoreRegisters[] = { RSI, RDX, RCX, R8, R9 };
static constexpr FloatRegister kParameterFloatRegisters[] =
diff --git a/compiler/optimizing/constant_folding.h b/compiler/optimizing/constant_folding.h
index d2acfa6973..ac00824e33 100644
--- a/compiler/optimizing/constant_folding.h
+++ b/compiler/optimizing/constant_folding.h
@@ -32,10 +32,10 @@ namespace art {
*/
class HConstantFolding : public HOptimization {
public:
- HConstantFolding(HGraph* graph, const HGraphVisualizer& visualizer)
- : HOptimization(graph, true, kConstantFoldingPassName, visualizer) {}
+ explicit HConstantFolding(HGraph* graph)
+ : HOptimization(graph, true, kConstantFoldingPassName) {}
- virtual void Run() OVERRIDE;
+ void Run() OVERRIDE;
static constexpr const char* kConstantFoldingPassName = "constant_folding";
diff --git a/compiler/optimizing/constant_folding_test.cc b/compiler/optimizing/constant_folding_test.cc
index 856c5165a3..a56b9d9a12 100644
--- a/compiler/optimizing/constant_folding_test.cc
+++ b/compiler/optimizing/constant_folding_test.cc
@@ -47,8 +47,7 @@ static void TestCode(const uint16_t* data,
ASSERT_EQ(expected_before, actual_before);
x86::CodeGeneratorX86 codegen(graph);
- HGraphVisualizer visualizer(nullptr, graph, codegen, "");
- HConstantFolding(graph, visualizer).Run();
+ HConstantFolding(graph).Run();
SSAChecker ssa_checker(&allocator, graph);
ssa_checker.Run();
ASSERT_TRUE(ssa_checker.IsValid());
@@ -60,7 +59,7 @@ static void TestCode(const uint16_t* data,
check_after_cf(graph);
- HDeadCodeElimination(graph, visualizer).Run();
+ HDeadCodeElimination(graph).Run();
ssa_checker.Run();
ASSERT_TRUE(ssa_checker.IsValid());
diff --git a/compiler/optimizing/dead_code_elimination.h b/compiler/optimizing/dead_code_elimination.h
index a4446ae04d..3db2c3ff3f 100644
--- a/compiler/optimizing/dead_code_elimination.h
+++ b/compiler/optimizing/dead_code_elimination.h
@@ -28,10 +28,10 @@ namespace art {
*/
class HDeadCodeElimination : public HOptimization {
public:
- HDeadCodeElimination(HGraph* graph, const HGraphVisualizer& visualizer)
- : HOptimization(graph, true, kDeadCodeEliminationPassName, visualizer) {}
+ explicit HDeadCodeElimination(HGraph* graph)
+ : HOptimization(graph, true, kDeadCodeEliminationPassName) {}
- virtual void Run() OVERRIDE;
+ void Run() OVERRIDE;
static constexpr const char* kDeadCodeEliminationPassName =
"dead_code_elimination";
diff --git a/compiler/optimizing/dead_code_elimination_test.cc b/compiler/optimizing/dead_code_elimination_test.cc
index 0c6807482a..5d4b9cb024 100644
--- a/compiler/optimizing/dead_code_elimination_test.cc
+++ b/compiler/optimizing/dead_code_elimination_test.cc
@@ -41,8 +41,7 @@ static void TestCode(const uint16_t* data,
ASSERT_EQ(actual_before, expected_before);
x86::CodeGeneratorX86 codegen(graph);
- HGraphVisualizer visualizer(nullptr, graph, codegen, "");
- HDeadCodeElimination(graph, visualizer).Run();
+ HDeadCodeElimination(graph).Run();
SSAChecker ssa_checker(&allocator, graph);
ssa_checker.Run();
ASSERT_TRUE(ssa_checker.IsValid());
diff --git a/compiler/optimizing/gvn.h b/compiler/optimizing/gvn.h
index 8d2c77475c..a841d5f65a 100644
--- a/compiler/optimizing/gvn.h
+++ b/compiler/optimizing/gvn.h
@@ -18,6 +18,7 @@
#define ART_COMPILER_OPTIMIZING_GVN_H_
#include "nodes.h"
+#include "optimization.h"
namespace art {
@@ -165,11 +166,11 @@ class ValueSet : public ArenaObject<kArenaAllocMisc> {
/**
* Optimization phase that removes redundant instruction.
*/
-class GlobalValueNumberer : public ValueObject {
+class GlobalValueNumberer : public HOptimization {
public:
GlobalValueNumberer(ArenaAllocator* allocator, HGraph* graph)
- : allocator_(allocator),
- graph_(graph),
+ : HOptimization(graph, true, "GVN"),
+ allocator_(allocator),
block_effects_(allocator, graph->GetBlocks().Size()),
loop_effects_(allocator, graph->GetBlocks().Size()),
sets_(allocator, graph->GetBlocks().Size()),
@@ -186,7 +187,7 @@ class GlobalValueNumberer : public ValueObject {
}
}
- void Run();
+ void Run() OVERRIDE;
private:
// Per-block GVN. Will also update the ValueSet of the dominated and
@@ -202,7 +203,6 @@ class GlobalValueNumberer : public ValueObject {
SideEffects GetBlockEffects(HBasicBlock* block) const;
ArenaAllocator* const allocator_;
- HGraph* const graph_;
// Side effects of individual blocks, that is the union of the side effects
// of the instructions in the block.
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 3e8361eca1..3d65e9a0a4 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -18,11 +18,22 @@
namespace art {
+class InstructionSimplifierVisitor : public HGraphVisitor {
+ public:
+ explicit InstructionSimplifierVisitor(HGraph* graph) : HGraphVisitor(graph) {}
+
+ private:
+ void VisitSuspendCheck(HSuspendCheck* check) OVERRIDE;
+ void VisitEqual(HEqual* equal) OVERRIDE;
+ void VisitArraySet(HArraySet* equal) OVERRIDE;
+};
+
void InstructionSimplifier::Run() {
- VisitInsertionOrder();
+ InstructionSimplifierVisitor visitor(graph_);
+ visitor.VisitInsertionOrder();
}
-void InstructionSimplifier::VisitSuspendCheck(HSuspendCheck* check) {
+void InstructionSimplifierVisitor::VisitSuspendCheck(HSuspendCheck* check) {
HBasicBlock* block = check->GetBlock();
// Currently always keep the suspend check at entry.
if (block->IsEntryBlock()) return;
@@ -38,7 +49,7 @@ void InstructionSimplifier::VisitSuspendCheck(HSuspendCheck* check) {
block->RemoveInstruction(check);
}
-void InstructionSimplifier::VisitEqual(HEqual* equal) {
+void InstructionSimplifierVisitor::VisitEqual(HEqual* equal) {
HInstruction* input1 = equal->InputAt(0);
HInstruction* input2 = equal->InputAt(1);
if (input1->GetType() == Primitive::kPrimBoolean && input2->IsIntConstant()) {
@@ -55,7 +66,7 @@ void InstructionSimplifier::VisitEqual(HEqual* equal) {
}
}
-void InstructionSimplifier::VisitArraySet(HArraySet* instruction) {
+void InstructionSimplifierVisitor::VisitArraySet(HArraySet* instruction) {
HInstruction* value = instruction->GetValue();
if (value->GetType() != Primitive::kPrimNot) return;
diff --git a/compiler/optimizing/instruction_simplifier.h b/compiler/optimizing/instruction_simplifier.h
index 3844d57439..7068c7fc10 100644
--- a/compiler/optimizing/instruction_simplifier.h
+++ b/compiler/optimizing/instruction_simplifier.h
@@ -18,22 +18,19 @@
#define ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_H_
#include "nodes.h"
+#include "optimization.h"
namespace art {
/**
* Implements optimizations specific to each instruction.
*/
-class InstructionSimplifier : public HGraphVisitor {
+class InstructionSimplifier : public HOptimization {
public:
- explicit InstructionSimplifier(HGraph* graph) : HGraphVisitor(graph) {}
+ explicit InstructionSimplifier(HGraph* graph)
+ : HOptimization(graph, true, "instruction_simplifier") {}
- void Run();
-
- private:
- virtual void VisitSuspendCheck(HSuspendCheck* check) OVERRIDE;
- virtual void VisitEqual(HEqual* equal) OVERRIDE;
- virtual void VisitArraySet(HArraySet* equal) OVERRIDE;
+ void Run() OVERRIDE;
};
} // namespace art
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index d1555d4e11..e1c8e8ed6e 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -391,6 +391,10 @@ class RegisterSet : public ValueObject {
return (register_set & (1 << reg)) != 0;
}
+ size_t GetNumberOfRegisters() const {
+ return __builtin_popcount(core_registers_) + __builtin_popcount(floating_point_registers_);
+ }
+
private:
uint32_t core_registers_;
uint32_t floating_point_registers_;
@@ -503,6 +507,10 @@ class LocationSummary : public ArenaObject<kArenaAllocMisc> {
return &live_registers_;
}
+ size_t GetNumberOfLiveRegisters() const {
+ return live_registers_.GetNumberOfRegisters();
+ }
+
bool InputOverlapsWithOutputOrTemp(uint32_t input_index, bool is_environment) const {
if (is_environment) return true;
if ((input_index == 0)
diff --git a/compiler/optimizing/optimization.cc b/compiler/optimizing/optimization.cc
index ea98186d11..d1178d5798 100644
--- a/compiler/optimizing/optimization.cc
+++ b/compiler/optimizing/optimization.cc
@@ -21,12 +21,6 @@
namespace art {
-void HOptimization::Execute() {
- Run();
- visualizer_.DumpGraph(pass_name_);
- Check();
-}
-
void HOptimization::Check() {
if (kIsDebugBuild) {
if (is_in_ssa_form_) {
diff --git a/compiler/optimizing/optimization.h b/compiler/optimizing/optimization.h
index 59683e2075..d281248f4a 100644
--- a/compiler/optimizing/optimization.h
+++ b/compiler/optimizing/optimization.h
@@ -29,25 +29,19 @@ class HOptimization : public ValueObject {
public:
HOptimization(HGraph* graph,
bool is_in_ssa_form,
- const char* pass_name,
- const HGraphVisualizer& visualizer)
+ const char* pass_name)
: graph_(graph),
is_in_ssa_form_(is_in_ssa_form),
- pass_name_(pass_name),
- visualizer_(visualizer) {}
+ pass_name_(pass_name) {}
virtual ~HOptimization() {}
- // Execute the optimization pass.
- void Execute();
-
// Return the name of the pass.
const char* GetPassName() const { return pass_name_; }
// Peform the analysis itself.
virtual void Run() = 0;
- private:
// Verify the graph; abort if it is not valid.
void Check();
@@ -59,9 +53,6 @@ class HOptimization : public ValueObject {
const bool is_in_ssa_form_;
// Optimization pass name.
const char* pass_name_;
- // A graph visualiser invoked after the execution of the optimization
- // pass if enabled.
- const HGraphVisualizer& visualizer_;
DISALLOW_COPY_AND_ASSIGN(HOptimization);
};
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 0de0907520..42ac77d1d8 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -167,7 +167,8 @@ CompiledMethod* OptimizingCompiler::JniCompile(uint32_t access_flags,
}
uintptr_t OptimizingCompiler::GetEntryPointOf(mirror::ArtMethod* method) const {
- return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCode());
+ return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize(
+ InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet())));
}
bool OptimizingCompiler::WriteElf(art::File* file, OatWriter* oat_writer,
@@ -189,6 +190,25 @@ static bool CanOptimize(const DexFile::CodeItem& code_item) {
return code_item.tries_size_ == 0;
}
+static void RunOptimizations(HGraph* graph, const HGraphVisualizer& visualizer) {
+ HDeadCodeElimination opt1(graph);
+ HConstantFolding opt2(graph);
+ SsaRedundantPhiElimination opt3(graph);
+ SsaDeadPhiElimination opt4(graph);
+ InstructionSimplifier opt5(graph);
+ GlobalValueNumberer opt6(graph->GetArena(), graph);
+ InstructionSimplifier opt7(graph);
+
+ HOptimization* optimizations[] = { &opt1, &opt2, &opt3, &opt4, &opt5, &opt6, &opt7 };
+
+ for (size_t i = 0; i < arraysize(optimizations); ++i) {
+ HOptimization* optimization = optimizations[i];
+ optimization->Run();
+ optimization->Check();
+ visualizer.DumpGraph(optimization->GetPassName());
+ }
+}
+
CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
uint32_t access_flags,
InvokeType invoke_type,
@@ -256,17 +276,9 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
visualizer.DumpGraph("ssa");
graph->FindNaturalLoops();
- HDeadCodeElimination(graph, visualizer).Execute();
- HConstantFolding(graph, visualizer).Execute();
+ RunOptimizations(graph, visualizer);
- SsaRedundantPhiElimination(graph).Run();
- SsaDeadPhiElimination(graph).Run();
- InstructionSimplifier(graph).Run();
- GlobalValueNumberer(graph->GetArena(), graph).Run();
- visualizer.DumpGraph(kGVNPassName);
- InstructionSimplifier(graph).Run();
PrepareForRegisterAllocation(graph).Run();
-
SsaLivenessAnalysis liveness(*graph, codegen);
liveness.Analyze();
visualizer.DumpGraph(kLivenessPassName);
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 4d6e66413d..2948496e15 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -215,9 +215,16 @@ void RegisterAllocator::ProcessInstruction(HInstruction* instruction) {
// By adding the following interval in the algorithm, we can compute this
// maximum before updating locations.
LiveInterval* interval = LiveInterval::MakeSlowPathInterval(allocator_, instruction);
- interval->AddRange(position, position + 1);
- unhandled_core_intervals_.Add(interval);
- unhandled_fp_intervals_.Add(interval);
+ // The start of the interval must be after the position of the safepoint, so that
+ // we can just check the number of active registers at that position. Note that this
+ // will include the current interval in the computation of
+ // `maximum_number_of_live_registers`, so we need a better strategy if this becomes
+ // a problem.
+ // TODO: We could put the logic in AddSorted, to ensure the safepoint range is
+ // after all other intervals starting at that same position.
+ interval->AddRange(position + 1, position + 2);
+ AddSorted(&unhandled_core_intervals_, interval);
+ AddSorted(&unhandled_fp_intervals_, interval);
}
}
@@ -250,6 +257,7 @@ void RegisterAllocator::ProcessInstruction(HInstruction* instruction) {
: unhandled_fp_intervals_;
DCHECK(unhandled.IsEmpty() || current->StartsBeforeOrAt(unhandled.Peek()));
+
// Some instructions define their output in fixed register/stack slot. We need
// to ensure we know these locations before doing register allocation. For a
// given register, we create an interval that covers these locations. The register
@@ -475,6 +483,17 @@ void RegisterAllocator::LinearScan() {
LiveInterval* current = unhandled_->Pop();
DCHECK(!current->IsFixed() && !current->HasSpillSlot());
DCHECK(unhandled_->IsEmpty() || unhandled_->Peek()->GetStart() >= current->GetStart());
+
+ if (current->IsSlowPathSafepoint()) {
+ // Synthesized interval to record the maximum number of live registers
+ // at safepoints. No need to allocate a register for it.
+ // We know that current actives are all live at the safepoint (modulo
+ // the one created by the safepoint).
+ maximum_number_of_live_registers_ =
+ std::max(maximum_number_of_live_registers_, active_.Size());
+ continue;
+ }
+
size_t position = current->GetStart();
// Remember the inactive_ size here since the ones moved to inactive_ from
@@ -515,14 +534,6 @@ void RegisterAllocator::LinearScan() {
}
}
- if (current->IsSlowPathSafepoint()) {
- // Synthesized interval to record the maximum number of live registers
- // at safepoints. No need to allocate a register for it.
- maximum_number_of_live_registers_ =
- std::max(maximum_number_of_live_registers_, active_.Size());
- continue;
- }
-
// (4) Try to find an available register.
bool success = TryAllocateFreeReg(current);
@@ -1062,6 +1073,7 @@ void RegisterAllocator::ConnectSiblings(LiveInterval* interval) {
switch (source.GetKind()) {
case Location::kRegister: {
locations->AddLiveRegister(source);
+ DCHECK_LE(locations->GetNumberOfLiveRegisters(), maximum_number_of_live_registers_);
if (current->GetType() == Primitive::kPrimNot) {
locations->SetRegisterBit(source.reg());
}
diff --git a/compiler/optimizing/ssa_phi_elimination.h b/compiler/optimizing/ssa_phi_elimination.h
index 5274f09f3f..b7899712d6 100644
--- a/compiler/optimizing/ssa_phi_elimination.h
+++ b/compiler/optimizing/ssa_phi_elimination.h
@@ -18,6 +18,7 @@
#define ART_COMPILER_OPTIMIZING_SSA_PHI_ELIMINATION_H_
#include "nodes.h"
+#include "optimization.h"
namespace art {
@@ -25,15 +26,15 @@ namespace art {
* Optimization phase that removes dead phis from the graph. Dead phis are unused
* phis, or phis only used by other phis.
*/
-class SsaDeadPhiElimination : public ValueObject {
+class SsaDeadPhiElimination : public HOptimization {
public:
explicit SsaDeadPhiElimination(HGraph* graph)
- : graph_(graph), worklist_(graph->GetArena(), kDefaultWorklistSize) {}
+ : HOptimization(graph, true, "dead_phi_elimination"),
+ worklist_(graph->GetArena(), kDefaultWorklistSize) {}
- void Run();
+ void Run() OVERRIDE;
private:
- HGraph* const graph_;
GrowableArray<HPhi*> worklist_;
static constexpr size_t kDefaultWorklistSize = 8;
@@ -47,15 +48,15 @@ class SsaDeadPhiElimination : public ValueObject {
* registers might be updated with the same value, or not updated at all. We can just
* replace the phi with the value when entering the loop.
*/
-class SsaRedundantPhiElimination : public ValueObject {
+class SsaRedundantPhiElimination : public HOptimization {
public:
explicit SsaRedundantPhiElimination(HGraph* graph)
- : graph_(graph), worklist_(graph->GetArena(), kDefaultWorklistSize) {}
+ : HOptimization(graph, true, "redundant_phi_elimination"),
+ worklist_(graph->GetArena(), kDefaultWorklistSize) {}
- void Run();
+ void Run() OVERRIDE;
private:
- HGraph* const graph_;
GrowableArray<HPhi*> worklist_;
static constexpr size_t kDefaultWorklistSize = 8;
diff --git a/compiler/utils/arm/assembler_arm32.cc b/compiler/utils/arm/assembler_arm32.cc
index 39ebf6803c..a1594b02ac 100644
--- a/compiler/utils/arm/assembler_arm32.cc
+++ b/compiler/utils/arm/assembler_arm32.cc
@@ -1513,10 +1513,8 @@ void Arm32Assembler::MemoryBarrier(ManagedRegister mscratch) {
void Arm32Assembler::dmb(DmbOptions flavor) {
-#if ANDROID_SMP != 0
int32_t encoding = 0xf57ff05f; // dmb
Emit(encoding | flavor);
-#endif
}
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index 3ab9b2ba03..a34920999e 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -2599,10 +2599,8 @@ void Thumb2Assembler::MemoryBarrier(ManagedRegister mscratch) {
void Thumb2Assembler::dmb(DmbOptions flavor) {
-#if ANDROID_SMP != 0
int32_t encoding = 0xf3bf8f50; // dmb in T1 encoding.
Emit32(encoding | flavor);
-#endif
}
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index 02011b87a0..390f2ea449 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -476,9 +476,7 @@ void Arm64Assembler::Copy(FrameOffset /*dst*/, Offset /*dest_offset*/,
void Arm64Assembler::MemoryBarrier(ManagedRegister m_scratch ATTRIBUTE_UNUSED) {
// TODO: Should we check that m_scratch is IP? - see arm.
-#if ANDROID_SMP != 0
___ Dmb(vixl::InnerShareable, vixl::BarrierAll);
-#endif
}
void Arm64Assembler::SignExtend(ManagedRegister mreg, size_t size) {
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 8ebb40e338..afa4a3b958 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -1830,9 +1830,7 @@ void X86Assembler::Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, O
}
void X86Assembler::MemoryBarrier(ManagedRegister) {
-#if ANDROID_SMP != 0
mfence();
-#endif
}
void X86Assembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 2bb2ed8c9c..8c428f455e 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -2371,9 +2371,7 @@ void X86_64Assembler::Copy(FrameOffset dest, Offset dest_offset, FrameOffset src
}
void X86_64Assembler::MemoryBarrier(ManagedRegister) {
-#if ANDROID_SMP != 0
mfence();
-#endif
}
void X86_64Assembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 2d2a82e985..7d4b726292 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -942,9 +942,11 @@ class Dex2Oat FINAL {
oat_location_ = oat_filename_;
}
} else {
- oat_file_.reset(new File(oat_fd_, oat_location_));
+ oat_file_.reset(new File(oat_fd_, oat_location_, true));
oat_file_->DisableAutoClose();
- oat_file_->SetLength(0);
+ if (oat_file_->SetLength(0) != 0) {
+ PLOG(WARNING) << "Truncating oat file " << oat_location_ << " failed.";
+ }
}
if (oat_file_.get() == nullptr) {
PLOG(ERROR) << "Failed to create oat file: " << oat_location_;
@@ -952,6 +954,7 @@ class Dex2Oat FINAL {
}
if (create_file && fchmod(oat_file_->Fd(), 0644) != 0) {
PLOG(ERROR) << "Failed to make oat file world readable: " << oat_location_;
+ oat_file_->Erase();
return false;
}
return true;
@@ -1075,7 +1078,10 @@ class Dex2Oat FINAL {
<< ". Try: adb shell chmod 777 /data/local/tmp";
continue;
}
- tmp_file->WriteFully(dex_file->Begin(), dex_file->Size());
+ // This is just dumping files for debugging. Ignore errors, and leave remnants.
+ UNUSED(tmp_file->WriteFully(dex_file->Begin(), dex_file->Size()));
+ UNUSED(tmp_file->Flush());
+ UNUSED(tmp_file->Close());
LOG(INFO) << "Wrote input to " << tmp_file_name;
}
}
@@ -1214,6 +1220,8 @@ class Dex2Oat FINAL {
// Write out the generated code part. Calls the OatWriter and ElfBuilder. Also prepares the
// ImageWriter, if necessary.
+ // Note: Flushing (and closing) the file is the caller's responsibility, except for the failure
+ // case (when the file will be explicitly erased).
bool CreateOatFile() {
CHECK(key_value_store_.get() != nullptr);
@@ -1266,15 +1274,7 @@ class Dex2Oat FINAL {
if (!driver_->WriteElf(android_root_, is_host_, dex_files_, oat_writer.get(),
oat_file_.get())) {
LOG(ERROR) << "Failed to write ELF file " << oat_file_->GetPath();
- return false;
- }
- }
-
- // Flush result to disk.
- {
- TimingLogger::ScopedTiming t2("dex2oat Flush ELF", timings_);
- if (oat_file_->Flush() != 0) {
- LOG(ERROR) << "Failed to flush ELF file " << oat_file_->GetPath();
+ oat_file_->Erase();
return false;
}
}
@@ -1295,14 +1295,19 @@ class Dex2Oat FINAL {
return true;
}
- // Strip the oat file, if requested. This first creates a copy from unstripped to stripped, and
- // then runs the ElfStripper. Currently only relevant for the portable compiler.
- bool Strip() {
+ // Create a copy from unstripped to stripped.
+ bool CopyUnstrippedToStripped() {
// If we don't want to strip in place, copy from unstripped location to stripped location.
// We need to strip after image creation because FixupElf needs to use .strtab.
if (oat_unstripped_ != oat_stripped_) {
+ // If the oat file is still open, flush it.
+ if (oat_file_.get() != nullptr && oat_file_->IsOpened()) {
+ if (!FlushCloseOatFile()) {
+ return false;
+ }
+ }
+
TimingLogger::ScopedTiming t("dex2oat OatFile copy", timings_);
- oat_file_.reset();
std::unique_ptr<File> in(OS::OpenFileForReading(oat_unstripped_.c_str()));
std::unique_ptr<File> out(OS::CreateEmptyFile(oat_stripped_.c_str()));
size_t buffer_size = 8192;
@@ -1315,14 +1320,27 @@ class Dex2Oat FINAL {
bool write_ok = out->WriteFully(buffer.get(), bytes_read);
CHECK(write_ok);
}
- oat_file_.reset(out.release());
+ if (kUsePortableCompiler) {
+ oat_file_.reset(out.release());
+ } else {
+ if (out->FlushCloseOrErase() != 0) {
+ PLOG(ERROR) << "Failed to flush and close copied oat file: " << oat_stripped_;
+ return false;
+ }
+ }
VLOG(compiler) << "Oat file copied successfully (stripped): " << oat_stripped_;
}
+ return true;
+ }
+ // Run the ElfStripper. Currently only relevant for the portable compiler.
+ bool Strip() {
if (kUsePortableCompiler) {
// Portable includes debug symbols unconditionally. If we are not supposed to create them,
// strip them now. Quick generates debug symbols only when the flag(s) are set.
if (!compiler_options_->GetIncludeDebugSymbols()) {
+ CHECK(oat_file_.get() != nullptr && oat_file_->IsOpened());
+
TimingLogger::ScopedTiming t("dex2oat ElfStripper", timings_);
// Strip unneeded sections for target
off_t seek_actual = lseek(oat_file_->Fd(), 0, SEEK_SET);
@@ -1330,6 +1348,11 @@ class Dex2Oat FINAL {
std::string error_msg;
if (!ElfFile::Strip(oat_file_.get(), &error_msg)) {
LOG(ERROR) << "Failed to strip elf file: " << error_msg;
+ oat_file_->Erase();
+ return false;
+ }
+
+ if (!FlushCloseOatFile()) {
return false;
}
@@ -1343,6 +1366,31 @@ class Dex2Oat FINAL {
return true;
}
+ bool FlushOatFile() {
+ if (oat_file_.get() != nullptr) {
+ TimingLogger::ScopedTiming t2("dex2oat Flush ELF", timings_);
+ if (oat_file_->Flush() != 0) {
+ PLOG(ERROR) << "Failed to flush oat file: " << oat_location_ << " / "
+ << oat_filename_;
+ oat_file_->Erase();
+ return false;
+ }
+ }
+ return true;
+ }
+
+ bool FlushCloseOatFile() {
+ if (oat_file_.get() != nullptr) {
+ std::unique_ptr<File> tmp(oat_file_.release());
+ if (tmp->FlushCloseOrErase() != 0) {
+ PLOG(ERROR) << "Failed to flush and close oat file: " << oat_location_ << " / "
+ << oat_filename_;
+ return false;
+ }
+ }
+ return true;
+ }
+
void DumpTiming() {
if (dump_timing_ || (dump_slow_timing_ && timings_->GetTotalNs() > MsToNs(1000))) {
LOG(INFO) << Dumpable<TimingLogger>(*timings_);
@@ -1356,6 +1404,10 @@ class Dex2Oat FINAL {
return compiler_options_.get();
}
+ bool IsImage() const {
+ return image_;
+ }
+
bool IsHost() const {
return is_host_;
}
@@ -1451,18 +1503,24 @@ class Dex2Oat FINAL {
// Destroy ImageWriter before doing FixupElf.
image_writer_.reset();
- std::unique_ptr<File> oat_file(OS::OpenFileReadWrite(oat_unstripped_.c_str()));
- if (oat_file.get() == nullptr) {
- PLOG(ERROR) << "Failed to open ELF file: " << oat_unstripped_;
- return false;
- }
-
// Do not fix up the ELF file if we are --compile-pic
if (!compiler_options_->GetCompilePic()) {
+ std::unique_ptr<File> oat_file(OS::OpenFileReadWrite(oat_unstripped_.c_str()));
+ if (oat_file.get() == nullptr) {
+ PLOG(ERROR) << "Failed to open ELF file: " << oat_unstripped_;
+ return false;
+ }
+
if (!ElfWriter::Fixup(oat_file.get(), oat_data_begin)) {
+ oat_file->Erase();
LOG(ERROR) << "Failed to fixup ELF file " << oat_file->GetPath();
return false;
}
+
+ if (oat_file->FlushCloseOrErase()) {
+ PLOG(ERROR) << "Failed to flush and close fixed ELF file " << oat_file->GetPath();
+ return false;
+ }
}
return true;
@@ -1609,49 +1667,121 @@ static void b13564922() {
#endif
}
-static int dex2oat(int argc, char** argv) {
- b13564922();
+static int CompileImage(Dex2Oat& dex2oat) {
+ dex2oat.Compile();
- TimingLogger timings("compiler", false, false);
+ // Create the boot.oat.
+ if (!dex2oat.CreateOatFile()) {
+ return EXIT_FAILURE;
+ }
- Dex2Oat dex2oat(&timings);
+ // Flush and close the boot.oat. We always expect the output file by name, and it will be
+ // re-opened from the unstripped name.
+ if (!dex2oat.FlushCloseOatFile()) {
+ return EXIT_FAILURE;
+ }
- // Parse arguments. Argument mistakes will lead to exit(EXIT_FAILURE) in UsageError.
- dex2oat.ParseArgs(argc, argv);
+ // Creates the boot.art and patches the boot.oat.
+ if (!dex2oat.HandleImage()) {
+ return EXIT_FAILURE;
+ }
- // Check early that the result of compilation can be written
- if (!dex2oat.OpenFile()) {
+ // When given --host, finish early without stripping.
+ if (dex2oat.IsHost()) {
+ dex2oat.DumpTiming();
+ return EXIT_SUCCESS;
+ }
+
+ // Copy unstripped to stripped location, if necessary.
+ if (!dex2oat.CopyUnstrippedToStripped()) {
return EXIT_FAILURE;
}
- LOG(INFO) << CommandLine();
+ // Strip, if necessary.
+ if (!dex2oat.Strip()) {
+ return EXIT_FAILURE;
+ }
- if (!dex2oat.Setup()) {
+ // FlushClose again, as stripping might have re-opened the oat file.
+ if (!dex2oat.FlushCloseOatFile()) {
return EXIT_FAILURE;
}
+ dex2oat.DumpTiming();
+ return EXIT_SUCCESS;
+}
+
+static int CompileApp(Dex2Oat& dex2oat) {
dex2oat.Compile();
+ // Create the app oat.
if (!dex2oat.CreateOatFile()) {
return EXIT_FAILURE;
}
- if (!dex2oat.HandleImage()) {
+ // Do not close the oat file here. We might haven gotten the output file by file descriptor,
+ // which we would lose.
+ if (!dex2oat.FlushOatFile()) {
return EXIT_FAILURE;
}
+ // When given --host, finish early without stripping.
if (dex2oat.IsHost()) {
+ if (!dex2oat.FlushCloseOatFile()) {
+ return EXIT_FAILURE;
+ }
+
dex2oat.DumpTiming();
return EXIT_SUCCESS;
}
+ // Copy unstripped to stripped location, if necessary. This will implicitly flush & close the
+ // unstripped version. If this is given, we expect to be able to open writable files by name.
+ if (!dex2oat.CopyUnstrippedToStripped()) {
+ return EXIT_FAILURE;
+ }
+
+ // Strip, if necessary.
if (!dex2oat.Strip()) {
return EXIT_FAILURE;
}
+ // Flush and close the file.
+ if (!dex2oat.FlushCloseOatFile()) {
+ return EXIT_FAILURE;
+ }
+
dex2oat.DumpTiming();
return EXIT_SUCCESS;
}
+
+static int dex2oat(int argc, char** argv) {
+ b13564922();
+
+ TimingLogger timings("compiler", false, false);
+
+ Dex2Oat dex2oat(&timings);
+
+ // Parse arguments. Argument mistakes will lead to exit(EXIT_FAILURE) in UsageError.
+ dex2oat.ParseArgs(argc, argv);
+
+ // Check early that the result of compilation can be written
+ if (!dex2oat.OpenFile()) {
+ return EXIT_FAILURE;
+ }
+
+ LOG(INFO) << CommandLine();
+
+ if (!dex2oat.Setup()) {
+ return EXIT_FAILURE;
+ }
+
+ if (dex2oat.IsImage()) {
+ return CompileImage(dex2oat);
+ } else {
+ return CompileApp(dex2oat);
+ }
+}
} // namespace art
int main(int argc, char** argv) {
diff --git a/disassembler/disassembler.h b/disassembler/disassembler.h
index 9cd631c984..966ee3ac89 100644
--- a/disassembler/disassembler.h
+++ b/disassembler/disassembler.h
@@ -34,8 +34,14 @@ class DisassemblerOptions {
// Base addess for calculating relative code offsets when absolute_addresses_ is false.
const uint8_t* const base_address_;
- DisassemblerOptions(bool absolute_addresses, const uint8_t* base_address)
- : absolute_addresses_(absolute_addresses), base_address_(base_address) {}
+ // If set, the disassembler is allowed to look at load targets in literal
+ // pools.
+ const bool can_read_literals_;
+
+ DisassemblerOptions(bool absolute_addresses, const uint8_t* base_address,
+ bool can_read_literals)
+ : absolute_addresses_(absolute_addresses), base_address_(base_address),
+ can_read_literals_(can_read_literals) {}
private:
DISALLOW_COPY_AND_ASSIGN(DisassemblerOptions);
diff --git a/disassembler/disassembler_arm64.cc b/disassembler/disassembler_arm64.cc
index 229ac9755f..fe50421ed4 100644
--- a/disassembler/disassembler_arm64.cc
+++ b/disassembler/disassembler_arm64.cc
@@ -27,10 +27,88 @@
namespace art {
namespace arm64 {
+void CustomDisassembler::AppendRegisterNameToOutput(
+ const vixl::Instruction* instr,
+ const vixl::CPURegister& reg) {
+ USE(instr);
+ if (reg.IsRegister()) {
+ // This enumeration should mirror the declarations in
+ // runtime/arch/arm64/registers_arm64.h. We do not include that file to
+ // avoid a dependency on libart.
+ enum {
+ TR = 18,
+ ETR = 21,
+ IP0 = 16,
+ IP1 = 17,
+ FP = 29,
+ LR = 30
+ };
+ switch (reg.code()) {
+ case IP0: AppendToOutput(reg.Is64Bits() ? "ip0" : "wip0"); return;
+ case IP1: AppendToOutput(reg.Is64Bits() ? "ip1" : "wip1"); return;
+ case TR: AppendToOutput(reg.Is64Bits() ? "tr" : "w18"); return;
+ case ETR: AppendToOutput(reg.Is64Bits() ? "etr" : "w21"); return;
+ case FP: AppendToOutput(reg.Is64Bits() ? "fp" : "w29"); return;
+ case LR: AppendToOutput(reg.Is64Bits() ? "lr" : "w30"); return;
+ default:
+ // Fall through.
+ break;
+ }
+ }
+ // Print other register names as usual.
+ Disassembler::AppendRegisterNameToOutput(instr, reg);
+}
+
+void CustomDisassembler::VisitLoadLiteral(const vixl::Instruction* instr) {
+ Disassembler::VisitLoadLiteral(instr);
+
+ if (!read_literals_) {
+ return;
+ }
+
+ char* buffer = buffer_;
+ char* buffer_end = buffer_ + buffer_size_;
+
+ // Find the end position in the buffer.
+ while ((*buffer != 0) && (buffer < buffer_end)) {
+ ++buffer;
+ }
+
+ void* data_address = instr->LiteralAddress();
+ ptrdiff_t buf_size_remaining = buffer_end - buffer;
+ vixl::Instr op = instr->Mask(vixl::LoadLiteralMask);
+
+ switch (op) {
+ case vixl::LDR_w_lit:
+ case vixl::LDR_x_lit:
+ case vixl::LDRSW_x_lit: {
+ int64_t data = op == vixl::LDR_x_lit ? *reinterpret_cast<int64_t*>(data_address)
+ : *reinterpret_cast<int32_t*>(data_address);
+ snprintf(buffer, buf_size_remaining, " (0x%" PRIx64 " / %" PRId64 ")", data, data);
+ break;
+ }
+ case vixl::LDR_s_lit:
+ case vixl::LDR_d_lit: {
+ double data = (op == vixl::LDR_s_lit) ? *reinterpret_cast<float*>(data_address)
+ : *reinterpret_cast<double*>(data_address);
+ snprintf(buffer, buf_size_remaining, " (%g)", data);
+ break;
+ }
+ default:
+ break;
+ }
+}
+
size_t DisassemblerArm64::Dump(std::ostream& os, const uint8_t* begin) {
const vixl::Instruction* instr = reinterpret_cast<const vixl::Instruction*>(begin);
decoder.Decode(instr);
- os << FormatInstructionPointer(begin)
+ // TODO: Use FormatInstructionPointer() once VIXL provides the appropriate
+ // features.
+ // VIXL does not yet allow remapping addresses disassembled. Using
+ // FormatInstructionPointer() would show incoherences between the instruction
+ // location addresses and the target addresses disassembled by VIXL (eg. for
+ // branch instructions).
+ os << StringPrintf("%p", instr)
<< StringPrintf(": %08x\t%s\n", instr->InstructionBits(), disasm.GetOutput());
return vixl::kInstructionSize;
}
diff --git a/disassembler/disassembler_arm64.h b/disassembler/disassembler_arm64.h
index e56fe4f08a..a370b8ded9 100644
--- a/disassembler/disassembler_arm64.h
+++ b/disassembler/disassembler_arm64.h
@@ -28,9 +28,35 @@
namespace art {
namespace arm64 {
+class CustomDisassembler FINAL : public vixl::Disassembler {
+ public:
+ explicit CustomDisassembler(bool read_literals) :
+ vixl::Disassembler(), read_literals_(read_literals) {}
+
+ // Use register aliases in the disassembly.
+ virtual void AppendRegisterNameToOutput(const vixl::Instruction* instr,
+ const vixl::CPURegister& reg) OVERRIDE;
+
+ // Improve the disassembly of literal load instructions.
+ virtual void VisitLoadLiteral(const vixl::Instruction* instr) OVERRIDE;
+
+ private:
+ // Indicate if the disassembler should read data loaded from literal pools.
+ // This should only be enabled if reading the target of literal loads is safe.
+ // Here are possible outputs when the option is on or off:
+ // read_literals_ | disassembly
+ // true | 0x72681558: 1c000acb ldr s11, pc+344 (addr 0x726816b0)
+ // false | 0x72681558: 1c000acb ldr s11, pc+344 (addr 0x726816b0) (3.40282e+38)
+ const bool read_literals_;
+};
+
class DisassemblerArm64 FINAL : public Disassembler {
public:
- explicit DisassemblerArm64(DisassemblerOptions* options) : Disassembler(options) {
+ // TODO: Update this code once VIXL provides the ability to map code addresses
+ // to disassemble as a different address (the way FormatInstructionPointer()
+ // does).
+ explicit DisassemblerArm64(DisassemblerOptions* options) :
+ Disassembler(options), disasm(options->can_read_literals_) {
decoder.AppendVisitor(&disasm);
}
@@ -39,7 +65,7 @@ class DisassemblerArm64 FINAL : public Disassembler {
private:
vixl::Decoder decoder;
- vixl::Disassembler disasm;
+ CustomDisassembler disasm;
DISALLOW_COPY_AND_ASSIGN(DisassemblerArm64);
};
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index d6309f7be3..feee598c94 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -182,8 +182,8 @@ class OatSymbolizer FINAL : public CodeOutput {
bool result = builder_->Write();
- elf_output_->Flush();
- elf_output_->Close();
+ // Ignore I/O errors.
+ UNUSED(elf_output_->FlushClose());
return result;
}
@@ -388,7 +388,8 @@ class OatDumper {
options_(options),
disassembler_(Disassembler::Create(oat_file_.GetOatHeader().GetInstructionSet(),
new DisassemblerOptions(options_->absolute_addresses_,
- oat_file.Begin()))) {
+ oat_file.Begin(),
+ true /* can_read_litals_ */))) {
CHECK(options_->class_loader_ != nullptr);
AddAllOffsets();
}
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 281649e071..b15c7127ee 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -904,6 +904,20 @@ static File* CreateOrOpen(const char* name, bool* created) {
}
}
+// Either try to close the file (close=true), or erase it.
+static bool FinishFile(File* file, bool close) {
+ if (close) {
+ if (file->FlushCloseOrErase() != 0) {
+ PLOG(ERROR) << "Failed to flush and close file.";
+ return false;
+ }
+ return true;
+ } else {
+ file->Erase();
+ return false;
+ }
+}
+
static int patchoat(int argc, char **argv) {
InitLogging(argv);
MemMap::Init();
@@ -1175,7 +1189,7 @@ static int patchoat(int argc, char **argv) {
if (output_image_filename.empty()) {
output_image_filename = "output-image-file";
}
- output_image.reset(new File(output_image_fd, output_image_filename));
+ output_image.reset(new File(output_image_fd, output_image_filename, true));
} else {
CHECK(!output_image_filename.empty());
output_image.reset(CreateOrOpen(output_image_filename.c_str(), &new_image_out));
@@ -1189,7 +1203,7 @@ static int patchoat(int argc, char **argv) {
if (input_oat_filename.empty()) {
input_oat_filename = "input-oat-file";
}
- input_oat.reset(new File(input_oat_fd, input_oat_filename));
+ input_oat.reset(new File(input_oat_fd, input_oat_filename, false));
if (input_oat == nullptr) {
// Unlikely, but ensure exhaustive logging in non-0 exit code case
LOG(ERROR) << "Failed to open input oat file by its FD" << input_oat_fd;
@@ -1208,7 +1222,7 @@ static int patchoat(int argc, char **argv) {
if (output_oat_filename.empty()) {
output_oat_filename = "output-oat-file";
}
- output_oat.reset(new File(output_oat_fd, output_oat_filename));
+ output_oat.reset(new File(output_oat_fd, output_oat_filename, true));
if (output_oat == nullptr) {
// Unlikely, but ensure exhaustive logging in non-0 exit code case
LOG(ERROR) << "Failed to open output oat file by its FD" << output_oat_fd;
@@ -1281,14 +1295,20 @@ static int patchoat(int argc, char **argv) {
output_oat.get(), output_image.get(), isa, &timings,
output_oat_fd >= 0, // was it opened from FD?
new_oat_out);
+ // The order here doesn't matter. If the first one is successfully saved and the second one
+ // erased, ImageSpace will still detect a problem and not use the files.
+ ret = ret && FinishFile(output_image.get(), ret);
+ ret = ret && FinishFile(output_oat.get(), ret);
} else if (have_oat_files) {
TimingLogger::ScopedTiming pt("patch oat", &timings);
ret = PatchOat::Patch(input_oat.get(), base_delta, output_oat.get(), &timings,
output_oat_fd >= 0, // was it opened from FD?
new_oat_out);
+ ret = ret && FinishFile(output_oat.get(), ret);
} else if (have_image_files) {
TimingLogger::ScopedTiming pt("patch image", &timings);
ret = PatchOat::Patch(input_image_location, base_delta, output_image.get(), isa, &timings);
+ ret = ret && FinishFile(output_image.get(), ret);
} else {
CHECK(false);
ret = true;
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 25fe45f5cd..58f7940eda 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -302,6 +302,7 @@ LIBART_ENUM_OPERATOR_OUT_HEADER_FILES := \
base/allocator.h \
base/mutex.h \
debugger.h \
+ base/unix_file/fd_file.h \
dex_file.h \
dex_instruction.h \
gc/allocator/rosalloc.h \
diff --git a/runtime/arch/arm/instruction_set_features_arm.cc b/runtime/arch/arm/instruction_set_features_arm.cc
index f49c0371e0..f8590d3bd2 100644
--- a/runtime/arch/arm/instruction_set_features_arm.cc
+++ b/runtime/arch/arm/instruction_set_features_arm.cc
@@ -108,12 +108,7 @@ const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromBitmap(uint32_t
}
const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromCppDefines() {
-#if defined(HAVE_ANDROID_OS) && (ANDROID_SMP == 0)
- const bool smp = false;
-#else
const bool smp = true;
-#endif
-
#if defined(__ARM_ARCH_EXT_IDIV__)
const bool has_div = true;
#else
@@ -204,11 +199,8 @@ static void bad_divide_inst_handle(int signo ATTRIBUTE_UNUSED, siginfo_t* si ATT
}
const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromAssembly() {
-#if defined(HAVE_ANDROID_OS) && (ANDROID_SMP == 0)
- const bool smp = false;
-#else
const bool smp = true;
-#endif
+
// See if have a sdiv instruction. Register a signal handler and try to execute an sdiv
// instruction. If we get a SIGILL then it's not supported.
struct sigaction sa, osa;
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.cc b/runtime/arch/arm64/instruction_set_features_arm64.cc
index 5bc943c93f..a1270dcde9 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64.cc
+++ b/runtime/arch/arm64/instruction_set_features_arm64.cc
@@ -58,12 +58,7 @@ const Arm64InstructionSetFeatures* Arm64InstructionSetFeatures::FromBitmap(uint3
}
const Arm64InstructionSetFeatures* Arm64InstructionSetFeatures::FromCppDefines() {
-#if defined(HAVE_ANDROID_OS) && (ANDROID_SMP == 0)
- const bool smp = false;
-#else
const bool smp = true;
-#endif
-
const bool is_a53 = true; // Pessimistically assume all ARM64s are A53s.
return new Arm64InstructionSetFeatures(smp, is_a53);
}
diff --git a/runtime/arch/mips/instruction_set_features_mips.cc b/runtime/arch/mips/instruction_set_features_mips.cc
index efec993340..11be2a8bc2 100644
--- a/runtime/arch/mips/instruction_set_features_mips.cc
+++ b/runtime/arch/mips/instruction_set_features_mips.cc
@@ -44,11 +44,7 @@ const MipsInstructionSetFeatures* MipsInstructionSetFeatures::FromBitmap(uint32_
}
const MipsInstructionSetFeatures* MipsInstructionSetFeatures::FromCppDefines() {
-#if defined(HAVE_ANDROID_OS) && (ANDROID_SMP == 0)
- const bool smp = false;
-#else
const bool smp = true;
-#endif
// TODO: here we assume the FPU is always 32-bit.
const bool fpu_32bit = true;
diff --git a/runtime/arch/x86/instruction_set_features_x86.cc b/runtime/arch/x86/instruction_set_features_x86.cc
index 32cf909fd1..a12773dc9c 100644
--- a/runtime/arch/x86/instruction_set_features_x86.cc
+++ b/runtime/arch/x86/instruction_set_features_x86.cc
@@ -70,11 +70,7 @@ const X86InstructionSetFeatures* X86InstructionSetFeatures::FromBitmap(uint32_t
}
const X86InstructionSetFeatures* X86InstructionSetFeatures::FromCppDefines(bool x86_64) {
-#if defined(HAVE_ANDROID_OS) && (ANDROID_SMP == 0)
- const bool smp = false;
-#else
const bool smp = true;
-#endif
#ifndef __SSSE3__
const bool has_SSSE3 = false;
diff --git a/runtime/base/scoped_flock.cc b/runtime/base/scoped_flock.cc
index bf091d00d2..0e93eee627 100644
--- a/runtime/base/scoped_flock.cc
+++ b/runtime/base/scoped_flock.cc
@@ -27,6 +27,9 @@ namespace art {
bool ScopedFlock::Init(const char* filename, std::string* error_msg) {
while (true) {
+ if (file_.get() != nullptr) {
+ UNUSED(file_->FlushCloseOrErase()); // Ignore result.
+ }
file_.reset(OS::OpenFileWithFlags(filename, O_CREAT | O_RDWR));
if (file_.get() == NULL) {
*error_msg = StringPrintf("Failed to open file '%s': %s", filename, strerror(errno));
@@ -59,7 +62,7 @@ bool ScopedFlock::Init(const char* filename, std::string* error_msg) {
}
bool ScopedFlock::Init(File* file, std::string* error_msg) {
- file_.reset(new File(dup(file->Fd())));
+ file_.reset(new File(dup(file->Fd()), true));
if (file_->Fd() == -1) {
file_.reset();
*error_msg = StringPrintf("Failed to duplicate open file '%s': %s",
@@ -89,6 +92,9 @@ ScopedFlock::~ScopedFlock() {
if (file_.get() != NULL) {
int flock_result = TEMP_FAILURE_RETRY(flock(file_->Fd(), LOCK_UN));
CHECK_EQ(0, flock_result);
+ if (file_->FlushCloseOrErase() != 0) {
+ PLOG(WARNING) << "Could not close scoped file lock file.";
+ }
}
}
diff --git a/runtime/base/unix_file/fd_file.cc b/runtime/base/unix_file/fd_file.cc
index f29a7ec974..6e5e7a1582 100644
--- a/runtime/base/unix_file/fd_file.cc
+++ b/runtime/base/unix_file/fd_file.cc
@@ -14,28 +14,68 @@
* limitations under the License.
*/
-#include "base/logging.h"
#include "base/unix_file/fd_file.h"
+
#include <errno.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
+#include "base/logging.h"
+
namespace unix_file {
-FdFile::FdFile() : fd_(-1), auto_close_(true) {
+FdFile::FdFile() : guard_state_(GuardState::kClosed), fd_(-1), auto_close_(true) {
}
-FdFile::FdFile(int fd) : fd_(fd), auto_close_(true) {
+FdFile::FdFile(int fd, bool check_usage)
+ : guard_state_(check_usage ? GuardState::kBase : GuardState::kNoCheck),
+ fd_(fd), auto_close_(true) {
}
-FdFile::FdFile(int fd, const std::string& path) : fd_(fd), file_path_(path), auto_close_(true) {
+FdFile::FdFile(int fd, const std::string& path, bool check_usage)
+ : guard_state_(check_usage ? GuardState::kBase : GuardState::kNoCheck),
+ fd_(fd), file_path_(path), auto_close_(true) {
CHECK_NE(0U, path.size());
}
FdFile::~FdFile() {
+ if (kCheckSafeUsage && (guard_state_ < GuardState::kNoCheck)) {
+ if (guard_state_ < GuardState::kFlushed) {
+ LOG(::art::ERROR) << "File " << file_path_ << " wasn't explicitly flushed before destruction.";
+ }
+ if (guard_state_ < GuardState::kClosed) {
+ LOG(::art::ERROR) << "File " << file_path_ << " wasn't explicitly closed before destruction.";
+ }
+ CHECK_GE(guard_state_, GuardState::kClosed);
+ }
if (auto_close_ && fd_ != -1) {
- Close();
+ if (Close() != 0) {
+ PLOG(::art::WARNING) << "Failed to close file " << file_path_;
+ }
+ }
+}
+
+void FdFile::moveTo(GuardState target, GuardState warn_threshold, const char* warning) {
+ if (kCheckSafeUsage) {
+ if (guard_state_ < GuardState::kNoCheck) {
+ if (warn_threshold < GuardState::kNoCheck && guard_state_ >= warn_threshold) {
+ LOG(::art::ERROR) << warning;
+ }
+ guard_state_ = target;
+ }
+ }
+}
+
+void FdFile::moveUp(GuardState target, const char* warning) {
+ if (kCheckSafeUsage) {
+ if (guard_state_ < GuardState::kNoCheck) {
+ if (guard_state_ < target) {
+ guard_state_ = target;
+ } else if (target < guard_state_) {
+ LOG(::art::ERROR) << warning;
+ }
+ }
}
}
@@ -54,11 +94,28 @@ bool FdFile::Open(const std::string& path, int flags, mode_t mode) {
return false;
}
file_path_ = path;
+ static_assert(O_RDONLY == 0, "Readonly flag has unexpected value.");
+ if (kCheckSafeUsage && (flags & (O_RDWR | O_CREAT | O_WRONLY)) != 0) {
+ // Start in the base state (not flushed, not closed).
+ guard_state_ = GuardState::kBase;
+ } else {
+ // We are not concerned with read-only files. In that case, proper flushing and closing is
+ // not important.
+ guard_state_ = GuardState::kNoCheck;
+ }
return true;
}
int FdFile::Close() {
int result = TEMP_FAILURE_RETRY(close(fd_));
+
+ // Test here, so the file is closed and not leaked.
+ if (kCheckSafeUsage) {
+ CHECK_GE(guard_state_, GuardState::kFlushed) << "File " << file_path_
+ << " has not been flushed before closing.";
+ moveUp(GuardState::kClosed, nullptr);
+ }
+
if (result == -1) {
return -errno;
} else {
@@ -74,6 +131,7 @@ int FdFile::Flush() {
#else
int rc = TEMP_FAILURE_RETRY(fsync(fd_));
#endif
+ moveUp(GuardState::kFlushed, "Flushing closed file.");
return (rc == -1) ? -errno : rc;
}
@@ -92,6 +150,7 @@ int FdFile::SetLength(int64_t new_length) {
#else
int rc = TEMP_FAILURE_RETRY(ftruncate(fd_, new_length));
#endif
+ moveTo(GuardState::kBase, GuardState::kClosed, "Truncating closed file.");
return (rc == -1) ? -errno : rc;
}
@@ -107,6 +166,7 @@ int64_t FdFile::Write(const char* buf, int64_t byte_count, int64_t offset) {
#else
int rc = TEMP_FAILURE_RETRY(pwrite(fd_, buf, byte_count, offset));
#endif
+ moveTo(GuardState::kBase, GuardState::kClosed, "Writing into closed file.");
return (rc == -1) ? -errno : rc;
}
@@ -135,6 +195,7 @@ bool FdFile::ReadFully(void* buffer, size_t byte_count) {
bool FdFile::WriteFully(const void* buffer, size_t byte_count) {
const char* ptr = static_cast<const char*>(buffer);
+ moveTo(GuardState::kBase, GuardState::kClosed, "Writing into closed file.");
while (byte_count > 0) {
ssize_t bytes_written = TEMP_FAILURE_RETRY(write(fd_, ptr, byte_count));
if (bytes_written == -1) {
@@ -146,4 +207,38 @@ bool FdFile::WriteFully(const void* buffer, size_t byte_count) {
return true;
}
+void FdFile::Erase() {
+ TEMP_FAILURE_RETRY(SetLength(0));
+ TEMP_FAILURE_RETRY(Flush());
+ TEMP_FAILURE_RETRY(Close());
+}
+
+int FdFile::FlushCloseOrErase() {
+ int flush_result = TEMP_FAILURE_RETRY(Flush());
+ if (flush_result != 0) {
+ LOG(::art::ERROR) << "CloseOrErase failed while flushing a file.";
+ Erase();
+ return flush_result;
+ }
+ int close_result = TEMP_FAILURE_RETRY(Close());
+ if (close_result != 0) {
+ LOG(::art::ERROR) << "CloseOrErase failed while closing a file.";
+ Erase();
+ return close_result;
+ }
+ return 0;
+}
+
+int FdFile::FlushClose() {
+ int flush_result = TEMP_FAILURE_RETRY(Flush());
+ if (flush_result != 0) {
+ LOG(::art::ERROR) << "FlushClose failed while flushing a file.";
+ }
+ int close_result = TEMP_FAILURE_RETRY(Close());
+ if (close_result != 0) {
+ LOG(::art::ERROR) << "FlushClose failed while closing a file.";
+ }
+ return (flush_result != 0) ? flush_result : close_result;
+}
+
} // namespace unix_file
diff --git a/runtime/base/unix_file/fd_file.h b/runtime/base/unix_file/fd_file.h
index 01f4ca2819..8db2ee4a81 100644
--- a/runtime/base/unix_file/fd_file.h
+++ b/runtime/base/unix_file/fd_file.h
@@ -24,6 +24,9 @@
namespace unix_file {
+// If true, check whether Flush and Close are called before destruction.
+static constexpr bool kCheckSafeUsage = true;
+
// A RandomAccessFile implementation backed by a file descriptor.
//
// Not thread safe.
@@ -32,8 +35,8 @@ class FdFile : public RandomAccessFile {
FdFile();
// Creates an FdFile using the given file descriptor. Takes ownership of the
// file descriptor. (Use DisableAutoClose to retain ownership.)
- explicit FdFile(int fd);
- explicit FdFile(int fd, const std::string& path);
+ explicit FdFile(int fd, bool checkUsage);
+ explicit FdFile(int fd, const std::string& path, bool checkUsage);
// Destroys an FdFile, closing the file descriptor if Close hasn't already
// been called. (If you care about the return value of Close, call it
@@ -47,12 +50,21 @@ class FdFile : public RandomAccessFile {
bool Open(const std::string& file_path, int flags, mode_t mode);
// RandomAccessFile API.
- virtual int Close();
- virtual int64_t Read(char* buf, int64_t byte_count, int64_t offset) const;
- virtual int SetLength(int64_t new_length);
+ virtual int Close() WARN_UNUSED;
+ virtual int64_t Read(char* buf, int64_t byte_count, int64_t offset) const WARN_UNUSED;
+ virtual int SetLength(int64_t new_length) WARN_UNUSED;
virtual int64_t GetLength() const;
- virtual int64_t Write(const char* buf, int64_t byte_count, int64_t offset);
- virtual int Flush();
+ virtual int64_t Write(const char* buf, int64_t byte_count, int64_t offset) WARN_UNUSED;
+ virtual int Flush() WARN_UNUSED;
+
+ // Short for SetLength(0); Flush(); Close();
+ void Erase();
+
+ // Try to Flush(), then try to Close(); If either fails, call Erase().
+ int FlushCloseOrErase() WARN_UNUSED;
+
+ // Try to Flush and Close(). Attempts both, but returns the first error.
+ int FlushClose() WARN_UNUSED;
// Bonus API.
int Fd() const;
@@ -61,8 +73,35 @@ class FdFile : public RandomAccessFile {
return file_path_;
}
void DisableAutoClose();
- bool ReadFully(void* buffer, size_t byte_count);
- bool WriteFully(const void* buffer, size_t byte_count);
+ bool ReadFully(void* buffer, size_t byte_count) WARN_UNUSED;
+ bool WriteFully(const void* buffer, size_t byte_count) WARN_UNUSED;
+
+ // This enum is public so that we can define the << operator over it.
+ enum class GuardState {
+ kBase, // Base, file has not been flushed or closed.
+ kFlushed, // File has been flushed, but not closed.
+ kClosed, // File has been flushed and closed.
+ kNoCheck // Do not check for the current file instance.
+ };
+
+ protected:
+ // If the guard state indicates checking (!=kNoCheck), go to the target state "target". Print the
+ // given warning if the current state is or exceeds warn_threshold.
+ void moveTo(GuardState target, GuardState warn_threshold, const char* warning);
+
+ // If the guard state indicates checking (<kNoCheck), and is below the target state "target", go
+ // to "target." If the current state is higher (excluding kNoCheck) than the trg state, print the
+ // warning.
+ void moveUp(GuardState target, const char* warning);
+
+ // Forcefully sets the state to the given one. This can overwrite kNoCheck.
+ void resetGuard(GuardState new_state) {
+ if (kCheckSafeUsage) {
+ guard_state_ = new_state;
+ }
+ }
+
+ GuardState guard_state_;
private:
int fd_;
@@ -72,6 +111,8 @@ class FdFile : public RandomAccessFile {
DISALLOW_COPY_AND_ASSIGN(FdFile);
};
+std::ostream& operator<<(std::ostream& os, const FdFile::GuardState& kind);
+
} // namespace unix_file
#endif // ART_RUNTIME_BASE_UNIX_FILE_FD_FILE_H_
diff --git a/runtime/base/unix_file/fd_file_test.cc b/runtime/base/unix_file/fd_file_test.cc
index 3481f2ff9f..a7e5b96460 100644
--- a/runtime/base/unix_file/fd_file_test.cc
+++ b/runtime/base/unix_file/fd_file_test.cc
@@ -24,7 +24,7 @@ namespace unix_file {
class FdFileTest : public RandomAccessFileTest {
protected:
virtual RandomAccessFile* MakeTestFile() {
- return new FdFile(fileno(tmpfile()));
+ return new FdFile(fileno(tmpfile()), false);
}
};
@@ -53,6 +53,7 @@ TEST_F(FdFileTest, OpenClose) {
ASSERT_TRUE(file.Open(good_path, O_CREAT | O_WRONLY));
EXPECT_GE(file.Fd(), 0);
EXPECT_TRUE(file.IsOpened());
+ EXPECT_EQ(0, file.Flush());
EXPECT_EQ(0, file.Close());
EXPECT_EQ(-1, file.Fd());
EXPECT_FALSE(file.IsOpened());
@@ -60,7 +61,7 @@ TEST_F(FdFileTest, OpenClose) {
EXPECT_GE(file.Fd(), 0);
EXPECT_TRUE(file.IsOpened());
- file.Close();
+ ASSERT_EQ(file.Close(), 0);
ASSERT_EQ(unlink(good_path.c_str()), 0);
}
diff --git a/runtime/base/unix_file/random_access_file_test.h b/runtime/base/unix_file/random_access_file_test.h
index 0002433628..e7ace4caaa 100644
--- a/runtime/base/unix_file/random_access_file_test.h
+++ b/runtime/base/unix_file/random_access_file_test.h
@@ -76,6 +76,8 @@ class RandomAccessFileTest : public testing::Test {
ASSERT_EQ(content.size(), static_cast<uint64_t>(file->Write(content.data(), content.size(), 0)));
TestReadContent(content, file.get());
+
+ CleanUp(file.get());
}
void TestReadContent(const std::string& content, RandomAccessFile* file) {
@@ -131,6 +133,8 @@ class RandomAccessFileTest : public testing::Test {
ASSERT_EQ(new_length, file->GetLength());
ASSERT_TRUE(ReadString(file.get(), &new_content));
ASSERT_EQ('\0', new_content[new_length - 1]);
+
+ CleanUp(file.get());
}
void TestWrite() {
@@ -163,6 +167,11 @@ class RandomAccessFileTest : public testing::Test {
ASSERT_EQ(file->GetLength(), new_length);
ASSERT_TRUE(ReadString(file.get(), &new_content));
ASSERT_EQ(std::string("hello\0hello", new_length), new_content);
+
+ CleanUp(file.get());
+ }
+
+ virtual void CleanUp(RandomAccessFile* file ATTRIBUTE_UNUSED) {
}
protected:
diff --git a/runtime/base/unix_file/random_access_file_utils_test.cc b/runtime/base/unix_file/random_access_file_utils_test.cc
index 63179220a2..9457d22424 100644
--- a/runtime/base/unix_file/random_access_file_utils_test.cc
+++ b/runtime/base/unix_file/random_access_file_utils_test.cc
@@ -37,14 +37,14 @@ TEST_F(RandomAccessFileUtilsTest, CopyFile) {
}
TEST_F(RandomAccessFileUtilsTest, BadSrc) {
- FdFile src(-1);
+ FdFile src(-1, false);
StringFile dst;
ASSERT_FALSE(CopyFile(src, &dst));
}
TEST_F(RandomAccessFileUtilsTest, BadDst) {
StringFile src;
- FdFile dst(-1);
+ FdFile dst(-1, false);
// We need some source content to trigger a write.
// Copying an empty file is a no-op.
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 68e20f2763..e1b79c9f87 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -2060,6 +2060,7 @@ mirror::Class* ClassLinker::FindClassInPathClassLoader(ScopedObjectAccessAlready
Thread* self, const char* descriptor,
size_t hash,
Handle<mirror::ClassLoader> class_loader) {
+ // Can we special case for a well understood PathClassLoader with the BootClassLoader as parent?
if (class_loader->GetClass() !=
soa.Decode<mirror::Class*>(WellKnownClasses::dalvik_system_PathClassLoader) ||
class_loader->GetParent()->GetClass() !=
@@ -2071,17 +2072,21 @@ mirror::Class* ClassLinker::FindClassInPathClassLoader(ScopedObjectAccessAlready
if (pair.second != nullptr) {
mirror::Class* klass = LookupClass(self, descriptor, hash, nullptr);
if (klass != nullptr) {
- return EnsureResolved(self, descriptor, klass);
+ // May return null if resolution on another thread fails.
+ klass = EnsureResolved(self, descriptor, klass);
+ } else {
+ // May OOME.
+ klass = DefineClass(self, descriptor, hash, NullHandle<mirror::ClassLoader>(), *pair.first,
+ *pair.second);
}
- klass = DefineClass(self, descriptor, hash, NullHandle<mirror::ClassLoader>(), *pair.first,
- *pair.second);
- if (klass != nullptr) {
- return klass;
+ if (klass == nullptr) {
+ CHECK(self->IsExceptionPending()) << descriptor;
+ self->ClearException();
}
- CHECK(self->IsExceptionPending()) << descriptor;
- self->ClearException();
+ return klass;
} else {
- // RegisterDexFile may allocate dex caches (and cause thread suspension).
+ // Handle as if this is the child PathClassLoader.
+ // Handles as RegisterDexFile may allocate dex caches (and cause thread suspension).
StackHandleScope<3> hs(self);
// The class loader is a PathClassLoader which inherits from BaseDexClassLoader.
// We need to get the DexPathList and loop through it.
@@ -2138,8 +2143,9 @@ mirror::Class* ClassLinker::FindClassInPathClassLoader(ScopedObjectAccessAlready
}
}
}
+ self->AssertNoPendingException();
+ return nullptr;
}
- return nullptr;
}
mirror::Class* ClassLinker::FindClass(Thread* self, const char* descriptor,
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 006354f948..b78d0b5bc3 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -117,8 +117,8 @@ class ClassLinker {
Handle<mirror::ClassLoader> class_loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Find a class in the path class loader, loading it if necessary. Hash function is supposed to
- // be ComputeModifiedUtf8Hash(descriptor).
+ // Find a class in the path class loader, loading it if necessary without using JNI. Hash
+ // function is supposed to be ComputeModifiedUtf8Hash(descriptor).
mirror::Class* FindClassInPathClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
Thread* self, const char* descriptor, size_t hash,
Handle<mirror::ClassLoader> class_loader)
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index 6e3ebc24d8..03b33e962a 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -59,7 +59,7 @@ ScratchFile::ScratchFile() {
filename_ += "/TmpFile-XXXXXX";
int fd = mkstemp(&filename_[0]);
CHECK_NE(-1, fd);
- file_.reset(new File(fd, GetFilename()));
+ file_.reset(new File(fd, GetFilename(), true));
}
ScratchFile::ScratchFile(const ScratchFile& other, const char* suffix) {
@@ -67,7 +67,7 @@ ScratchFile::ScratchFile(const ScratchFile& other, const char* suffix) {
filename_ += suffix;
int fd = open(filename_.c_str(), O_RDWR | O_CREAT, 0666);
CHECK_NE(-1, fd);
- file_.reset(new File(fd, GetFilename()));
+ file_.reset(new File(fd, GetFilename(), true));
}
ScratchFile::ScratchFile(File* file) {
@@ -88,6 +88,11 @@ void ScratchFile::Unlink() {
if (!OS::FileExists(filename_.c_str())) {
return;
}
+ if (file_.get() != nullptr) {
+ if (file_->FlushCloseOrErase() != 0) {
+ PLOG(WARNING) << "Error closing scratch file.";
+ }
+ }
int unlink_result = unlink(filename_.c_str());
CHECK_EQ(0, unlink_result);
}
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index e2f6085ae2..ef5db2d5ea 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -183,16 +183,20 @@ class AllocRecord {
class Breakpoint {
public:
- Breakpoint(mirror::ArtMethod* method, uint32_t dex_pc, bool need_full_deoptimization)
+ Breakpoint(mirror::ArtMethod* method, uint32_t dex_pc,
+ DeoptimizationRequest::Kind deoptimization_kind)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : method_(nullptr), dex_pc_(dex_pc), need_full_deoptimization_(need_full_deoptimization) {
+ : method_(nullptr), dex_pc_(dex_pc), deoptimization_kind_(deoptimization_kind) {
+ CHECK(deoptimization_kind_ == DeoptimizationRequest::kNothing ||
+ deoptimization_kind_ == DeoptimizationRequest::kSelectiveDeoptimization ||
+ deoptimization_kind_ == DeoptimizationRequest::kFullDeoptimization);
ScopedObjectAccessUnchecked soa(Thread::Current());
method_ = soa.EncodeMethod(method);
}
Breakpoint(const Breakpoint& other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
: method_(nullptr), dex_pc_(other.dex_pc_),
- need_full_deoptimization_(other.need_full_deoptimization_) {
+ deoptimization_kind_(other.deoptimization_kind_) {
ScopedObjectAccessUnchecked soa(Thread::Current());
method_ = soa.EncodeMethod(other.Method());
}
@@ -206,8 +210,8 @@ class Breakpoint {
return dex_pc_;
}
- bool NeedFullDeoptimization() const {
- return need_full_deoptimization_;
+ DeoptimizationRequest::Kind GetDeoptimizationKind() const {
+ return deoptimization_kind_;
}
private:
@@ -216,7 +220,7 @@ class Breakpoint {
uint32_t dex_pc_;
// Indicates whether breakpoint needs full deoptimization or selective deoptimization.
- bool need_full_deoptimization_;
+ DeoptimizationRequest::Kind deoptimization_kind_;
};
static std::ostream& operator<<(std::ostream& os, const Breakpoint& rhs)
@@ -736,6 +740,12 @@ bool Dbg::IsDisposed() {
return gDisposed;
}
+bool Dbg::RequiresDeoptimization() {
+ // We don't need deoptimization if everything runs with interpreter after
+ // enabling -Xint mode.
+ return !Runtime::Current()->GetInstrumentation()->IsForcedInterpretOnly();
+}
+
void Dbg::GoActive() {
// Enable all debugging features, including scans for breakpoints.
// This is a no-op if we're already active.
@@ -768,7 +778,9 @@ void Dbg::GoActive() {
Thread* self = Thread::Current();
ThreadState old_state = self->SetStateUnsafe(kRunnable);
CHECK_NE(old_state, kRunnable);
- runtime->GetInstrumentation()->EnableDeoptimization();
+ if (RequiresDeoptimization()) {
+ runtime->GetInstrumentation()->EnableDeoptimization();
+ }
instrumentation_events_ = 0;
gDebuggerActive = true;
CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
@@ -806,7 +818,9 @@ void Dbg::Disconnected() {
instrumentation_events_);
instrumentation_events_ = 0;
}
- runtime->GetInstrumentation()->DisableDeoptimization();
+ if (RequiresDeoptimization()) {
+ runtime->GetInstrumentation()->DisableDeoptimization();
+ }
gDebuggerActive = false;
}
gRegistry->Clear();
@@ -3035,9 +3049,11 @@ void Dbg::ProcessDeoptimizationRequest(const DeoptimizationRequest& request) {
}
void Dbg::DelayFullUndeoptimization() {
- MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
- ++delayed_full_undeoptimization_count_;
- DCHECK_LE(delayed_full_undeoptimization_count_, full_deoptimization_event_count_);
+ if (RequiresDeoptimization()) {
+ MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
+ ++delayed_full_undeoptimization_count_;
+ DCHECK_LE(delayed_full_undeoptimization_count_, full_deoptimization_event_count_);
+ }
}
void Dbg::ProcessDelayedFullUndeoptimizations() {
@@ -3196,64 +3212,101 @@ static const Breakpoint* FindFirstBreakpointForMethod(mirror::ArtMethod* m)
}
// Sanity checks all existing breakpoints on the same method.
-static void SanityCheckExistingBreakpoints(mirror::ArtMethod* m, bool need_full_deoptimization)
+static void SanityCheckExistingBreakpoints(mirror::ArtMethod* m,
+ DeoptimizationRequest::Kind deoptimization_kind)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::breakpoint_lock_) {
for (const Breakpoint& breakpoint : gBreakpoints) {
- CHECK_EQ(need_full_deoptimization, breakpoint.NeedFullDeoptimization());
+ if (breakpoint.Method() == m) {
+ CHECK_EQ(deoptimization_kind, breakpoint.GetDeoptimizationKind());
+ }
}
- if (need_full_deoptimization) {
+ instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
+ if (deoptimization_kind == DeoptimizationRequest::kFullDeoptimization) {
// We should have deoptimized everything but not "selectively" deoptimized this method.
- CHECK(Runtime::Current()->GetInstrumentation()->AreAllMethodsDeoptimized());
- CHECK(!Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
- } else {
+ CHECK(instrumentation->AreAllMethodsDeoptimized());
+ CHECK(!instrumentation->IsDeoptimized(m));
+ } else if (deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization) {
// We should have "selectively" deoptimized this method.
// Note: while we have not deoptimized everything for this method, we may have done it for
// another event.
- CHECK(Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
+ CHECK(instrumentation->IsDeoptimized(m));
+ } else {
+ // This method does not require deoptimization.
+ CHECK_EQ(deoptimization_kind, DeoptimizationRequest::kNothing);
+ CHECK(!instrumentation->IsDeoptimized(m));
}
}
-// Installs a breakpoint at the specified location. Also indicates through the deoptimization
-// request if we need to deoptimize.
-void Dbg::WatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) {
- Thread* const self = Thread::Current();
- mirror::ArtMethod* m = FromMethodId(location->method_id);
- DCHECK(m != nullptr) << "No method for method id " << location->method_id;
-
+static DeoptimizationRequest::Kind GetRequiredDeoptimizationKind(Thread* self,
+ mirror::ArtMethod* m)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (!Dbg::RequiresDeoptimization()) {
+ // We already run in interpreter-only mode so we don't need to deoptimize anything.
+ VLOG(jdwp) << "No need for deoptimization when fully running with interpreter for method "
+ << PrettyMethod(m);
+ return DeoptimizationRequest::kNothing;
+ }
const Breakpoint* existing_breakpoint;
{
ReaderMutexLock mu(self, *Locks::breakpoint_lock_);
existing_breakpoint = FindFirstBreakpointForMethod(m);
}
- bool need_full_deoptimization;
if (existing_breakpoint == nullptr) {
// There is no breakpoint on this method yet: we need to deoptimize. If this method may be
// inlined, we deoptimize everything; otherwise we deoptimize only this method.
// Note: IsMethodPossiblyInlined goes into the method verifier and may cause thread suspension.
// Therefore we must not hold any lock when we call it.
- need_full_deoptimization = IsMethodPossiblyInlined(self, m);
+ bool need_full_deoptimization = IsMethodPossiblyInlined(self, m);
if (need_full_deoptimization) {
- req->SetKind(DeoptimizationRequest::kFullDeoptimization);
- req->SetMethod(nullptr);
+ VLOG(jdwp) << "Need full deoptimization because of possible inlining of method "
+ << PrettyMethod(m);
+ return DeoptimizationRequest::kFullDeoptimization;
} else {
- req->SetKind(DeoptimizationRequest::kSelectiveDeoptimization);
- req->SetMethod(m);
+ // We don't need to deoptimize if the method has not been compiled.
+ ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+ const bool is_compiled = class_linker->GetOatMethodQuickCodeFor(m) != nullptr;
+ if (is_compiled) {
+ VLOG(jdwp) << "Need selective deoptimization for compiled method " << PrettyMethod(m);
+ return DeoptimizationRequest::kSelectiveDeoptimization;
+ } else {
+ // Method is not compiled: we don't need to deoptimize.
+ VLOG(jdwp) << "No need for deoptimization for non-compiled method " << PrettyMethod(m);
+ return DeoptimizationRequest::kNothing;
+ }
}
} else {
// There is at least one breakpoint for this method: we don't need to deoptimize.
- req->SetKind(DeoptimizationRequest::kNothing);
- req->SetMethod(nullptr);
-
- need_full_deoptimization = existing_breakpoint->NeedFullDeoptimization();
+ // Let's check that all breakpoints are configured the same way for deoptimization.
+ VLOG(jdwp) << "Breakpoint already set: no deoptimization is required";
+ DeoptimizationRequest::Kind deoptimization_kind = existing_breakpoint->GetDeoptimizationKind();
if (kIsDebugBuild) {
ReaderMutexLock mu(self, *Locks::breakpoint_lock_);
- SanityCheckExistingBreakpoints(m, need_full_deoptimization);
+ SanityCheckExistingBreakpoints(m, deoptimization_kind);
}
+ return DeoptimizationRequest::kNothing;
+ }
+}
+
+// Installs a breakpoint at the specified location. Also indicates through the deoptimization
+// request if we need to deoptimize.
+void Dbg::WatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) {
+ Thread* const self = Thread::Current();
+ mirror::ArtMethod* m = FromMethodId(location->method_id);
+ DCHECK(m != nullptr) << "No method for method id " << location->method_id;
+
+ const DeoptimizationRequest::Kind deoptimization_kind = GetRequiredDeoptimizationKind(self, m);
+ req->SetKind(deoptimization_kind);
+ if (deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization) {
+ req->SetMethod(m);
+ } else {
+ CHECK(deoptimization_kind == DeoptimizationRequest::kNothing ||
+ deoptimization_kind == DeoptimizationRequest::kFullDeoptimization);
+ req->SetMethod(nullptr);
}
{
WriterMutexLock mu(self, *Locks::breakpoint_lock_);
- gBreakpoints.push_back(Breakpoint(m, location->dex_pc, need_full_deoptimization));
+ gBreakpoints.push_back(Breakpoint(m, location->dex_pc, deoptimization_kind));
VLOG(jdwp) << "Set breakpoint #" << (gBreakpoints.size() - 1) << ": "
<< gBreakpoints[gBreakpoints.size() - 1];
}
@@ -3265,12 +3318,13 @@ void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequ
WriterMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
mirror::ArtMethod* m = FromMethodId(location->method_id);
DCHECK(m != nullptr) << "No method for method id " << location->method_id;
- bool need_full_deoptimization = false;
+ DeoptimizationRequest::Kind deoptimization_kind = DeoptimizationRequest::kNothing;
for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
if (gBreakpoints[i].DexPc() == location->dex_pc && gBreakpoints[i].Method() == m) {
VLOG(jdwp) << "Removed breakpoint #" << i << ": " << gBreakpoints[i];
- need_full_deoptimization = gBreakpoints[i].NeedFullDeoptimization();
- DCHECK_NE(need_full_deoptimization, Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
+ deoptimization_kind = gBreakpoints[i].GetDeoptimizationKind();
+ DCHECK_EQ(deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization,
+ Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
gBreakpoints.erase(gBreakpoints.begin() + i);
break;
}
@@ -3278,21 +3332,26 @@ void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequ
const Breakpoint* const existing_breakpoint = FindFirstBreakpointForMethod(m);
if (existing_breakpoint == nullptr) {
// There is no more breakpoint on this method: we need to undeoptimize.
- if (need_full_deoptimization) {
+ if (deoptimization_kind == DeoptimizationRequest::kFullDeoptimization) {
// This method required full deoptimization: we need to undeoptimize everything.
req->SetKind(DeoptimizationRequest::kFullUndeoptimization);
req->SetMethod(nullptr);
- } else {
+ } else if (deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization) {
// This method required selective deoptimization: we need to undeoptimize only that method.
req->SetKind(DeoptimizationRequest::kSelectiveUndeoptimization);
req->SetMethod(m);
+ } else {
+ // This method had no need for deoptimization: do nothing.
+ CHECK_EQ(deoptimization_kind, DeoptimizationRequest::kNothing);
+ req->SetKind(DeoptimizationRequest::kNothing);
+ req->SetMethod(nullptr);
}
} else {
// There is at least one breakpoint for this method: we don't need to undeoptimize.
req->SetKind(DeoptimizationRequest::kNothing);
req->SetMethod(nullptr);
if (kIsDebugBuild) {
- SanityCheckExistingBreakpoints(m, need_full_deoptimization);
+ SanityCheckExistingBreakpoints(m, deoptimization_kind);
}
}
}
diff --git a/runtime/debugger.h b/runtime/debugger.h
index 488ba7f728..92031634cd 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -523,6 +523,9 @@ class Dbg {
LOCKS_EXCLUDED(Locks::breakpoint_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Indicates whether we need deoptimization for debugging.
+ static bool RequiresDeoptimization();
+
// Records deoptimization request in the queue.
static void RequestDeoptimization(const DeoptimizationRequest& req)
LOCKS_EXCLUDED(Locks::deoptimization_lock_)
diff --git a/runtime/dex_file_test.cc b/runtime/dex_file_test.cc
index 134e284999..b304779568 100644
--- a/runtime/dex_file_test.cc
+++ b/runtime/dex_file_test.cc
@@ -146,6 +146,9 @@ static const DexFile* OpenDexFileBase64(const char* base64,
if (!file->WriteFully(dex_bytes.get(), length)) {
PLOG(FATAL) << "Failed to write base64 as dex file";
}
+ if (file->FlushCloseOrErase() != 0) {
+ PLOG(FATAL) << "Could not flush and close test file.";
+ }
file.reset();
// read dex file
diff --git a/runtime/dex_file_verifier_test.cc b/runtime/dex_file_verifier_test.cc
index addd94833e..ec1e5f02fe 100644
--- a/runtime/dex_file_verifier_test.cc
+++ b/runtime/dex_file_verifier_test.cc
@@ -115,6 +115,9 @@ static const DexFile* OpenDexFileBase64(const char* base64, const char* location
if (!file->WriteFully(dex_bytes.get(), length)) {
PLOG(FATAL) << "Failed to write base64 as dex file";
}
+ if (file->FlushCloseOrErase() != 0) {
+ PLOG(FATAL) << "Could not flush and close test file.";
+ }
file.reset();
// read dex file
@@ -177,6 +180,9 @@ static const DexFile* FixChecksumAndOpen(uint8_t* bytes, size_t length, const ch
if (!file->WriteFully(bytes, length)) {
PLOG(FATAL) << "Failed to write base64 as dex file";
}
+ if (file->FlushCloseOrErase() != 0) {
+ PLOG(FATAL) << "Could not flush and close test file.";
+ }
file.reset();
// read dex file
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 3f747eefca..0cceaa4467 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -96,6 +96,8 @@ static const size_t kDefaultMarkStackSize = 64 * KB;
static const char* kDlMallocSpaceName[2] = {"main dlmalloc space", "main dlmalloc space 1"};
static const char* kRosAllocSpaceName[2] = {"main rosalloc space", "main rosalloc space 1"};
static const char* kMemMapSpaceName[2] = {"main space", "main space 1"};
+static const char* kNonMovingSpaceName = "non moving space";
+static const char* kZygoteSpaceName = "zygote space";
static constexpr size_t kGSSBumpPointerSpaceCapacity = 32 * MB;
Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free,
@@ -258,10 +260,14 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
std::string error_str;
std::unique_ptr<MemMap> non_moving_space_mem_map;
if (separate_non_moving_space) {
+ // If we are the zygote, the non moving space becomes the zygote space when we run
+ // PreZygoteFork the first time. In this case, call the map "zygote space" since we can't
+ // rename the mem map later.
+ const char* space_name = is_zygote ? kZygoteSpaceName: kNonMovingSpaceName;
// Reserve the non moving mem map before the other two since it needs to be at a specific
// address.
non_moving_space_mem_map.reset(
- MemMap::MapAnonymous("non moving space", requested_alloc_space_begin,
+ MemMap::MapAnonymous(space_name, requested_alloc_space_begin,
non_moving_space_capacity, PROT_READ | PROT_WRITE, true, &error_str));
CHECK(non_moving_space_mem_map != nullptr) << error_str;
// Try to reserve virtual memory at a lower address if we have a separate non moving space.
@@ -1976,7 +1982,8 @@ void Heap::PreZygoteFork() {
// from this point on.
RemoveRememberedSet(old_alloc_space);
}
- zygote_space_ = old_alloc_space->CreateZygoteSpace("alloc space", low_memory_mode_,
+ // Remaining space becomes the new non moving space.
+ zygote_space_ = old_alloc_space->CreateZygoteSpace(kNonMovingSpaceName, low_memory_mode_,
&non_moving_space_);
CHECK(!non_moving_space_->CanMoveObjects());
if (same_space) {
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index b23212842a..071997f6b5 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -74,17 +74,17 @@ static int32_t ChooseRelocationOffsetDelta(int32_t min_delta, int32_t max_delta)
// out-of-date. We also don't really care if this fails since it is just a convenience.
// Adapted from prune_dex_cache(const char* subdir) in frameworks/native/cmds/installd/commands.c
// Note this should only be used during first boot.
-static void RealPruneDexCache(const std::string& cache_dir_path);
+static void RealPruneDalvikCache(const std::string& cache_dir_path);
-static void PruneDexCache(InstructionSet isa) {
+static void PruneDalvikCache(InstructionSet isa) {
CHECK_NE(isa, kNone);
// Prune the base /data/dalvik-cache.
- RealPruneDexCache(GetDalvikCacheOrDie(".", false));
+ RealPruneDalvikCache(GetDalvikCacheOrDie(".", false));
// Prune /data/dalvik-cache/<isa>.
- RealPruneDexCache(GetDalvikCacheOrDie(GetInstructionSetString(isa), false));
+ RealPruneDalvikCache(GetDalvikCacheOrDie(GetInstructionSetString(isa), false));
}
-static void RealPruneDexCache(const std::string& cache_dir_path) {
+static void RealPruneDalvikCache(const std::string& cache_dir_path) {
if (!OS::DirectoryExists(cache_dir_path.c_str())) {
return;
}
@@ -118,6 +118,28 @@ static void RealPruneDexCache(const std::string& cache_dir_path) {
CHECK_EQ(0, TEMP_FAILURE_RETRY(closedir(cache_dir))) << "Unable to close directory.";
}
+// We write out an empty file to the zygote's ISA specific cache dir at the start of
+// every zygote boot and delete it when the boot completes. If we find a file already
+// present, it usually means the boot didn't complete. We wipe the entire dalvik
+// cache if that's the case.
+static void MarkZygoteStart(const InstructionSet isa) {
+ const std::string isa_subdir = GetDalvikCacheOrDie(GetInstructionSetString(isa), false);
+ const std::string boot_marker = isa_subdir + "/.booting";
+
+ if (OS::FileExists(boot_marker.c_str())) {
+ LOG(WARNING) << "Incomplete boot detected. Pruning dalvik cache";
+ RealPruneDalvikCache(isa_subdir);
+ }
+
+ VLOG(startup) << "Creating boot start marker: " << boot_marker;
+ std::unique_ptr<File> f(OS::CreateEmptyFile(boot_marker.c_str()));
+ if (f.get() != nullptr) {
+ if (f->FlushCloseOrErase() != 0) {
+ PLOG(WARNING) << "Failed to write boot marker.";
+ }
+ }
+}
+
static bool GenerateImage(const std::string& image_filename, InstructionSet image_isa,
std::string* error_msg) {
const std::string boot_class_path_string(Runtime::Current()->GetBootClassPathString());
@@ -130,7 +152,7 @@ static bool GenerateImage(const std::string& image_filename, InstructionSet imag
// We should clean up so we are more likely to have room for the image.
if (Runtime::Current()->IsZygote()) {
LOG(INFO) << "Pruning dalvik-cache since we are generating an image and will need to recompile";
- PruneDexCache(image_isa);
+ PruneDalvikCache(image_isa);
}
std::vector<std::string> arg_vector;
@@ -232,7 +254,7 @@ static bool RelocateImage(const char* image_location, const char* dest_filename,
// We should clean up so we are more likely to have room for the image.
if (Runtime::Current()->IsZygote()) {
LOG(INFO) << "Pruning dalvik-cache since we are relocating an image and will need to recompile";
- PruneDexCache(isa);
+ PruneDalvikCache(isa);
}
std::string patchoat(Runtime::Current()->GetPatchoatExecutable());
@@ -427,6 +449,10 @@ ImageSpace* ImageSpace::Create(const char* image_location,
&has_system, &cache_filename, &dalvik_cache_exists,
&has_cache, &is_global_cache);
+ if (Runtime::Current()->IsZygote()) {
+ MarkZygoteStart(image_isa);
+ }
+
ImageSpace* space;
bool relocate = Runtime::Current()->ShouldRelocate();
bool can_compile = Runtime::Current()->IsImageDex2OatEnabled();
@@ -475,7 +501,7 @@ ImageSpace* ImageSpace::Create(const char* image_location,
// Since ImageCreationAllowed was true above, we are the zygote
// and therefore the only process expected to generate these for
// the device.
- PruneDexCache(image_isa);
+ PruneDalvikCache(image_isa);
return nullptr;
}
}
@@ -530,7 +556,7 @@ ImageSpace* ImageSpace::Create(const char* image_location,
"but image failed to load: %s",
image_location, cache_filename.c_str(), system_filename.c_str(),
error_msg->c_str());
- PruneDexCache(image_isa);
+ PruneDalvikCache(image_isa);
return nullptr;
} else if (is_system) {
// If the /system file exists, it should be up-to-date, don't try to generate it.
@@ -558,13 +584,13 @@ ImageSpace* ImageSpace::Create(const char* image_location,
// Since ImageCreationAllowed was true above, we are the zygote
// and therefore the only process expected to generate these for
// the device.
- PruneDexCache(image_isa);
+ PruneDalvikCache(image_isa);
return nullptr;
} else {
// Check whether there is enough space left over after we have generated the image.
if (!CheckSpace(cache_filename, error_msg)) {
// No. Delete the generated image and try to run out of the dex files.
- PruneDexCache(image_isa);
+ PruneDalvikCache(image_isa);
return nullptr;
}
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index 14d7432508..3069581522 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -475,9 +475,14 @@ class Hprof {
}
}
- std::unique_ptr<File> file(new File(out_fd, filename_));
+ std::unique_ptr<File> file(new File(out_fd, filename_, true));
okay = file->WriteFully(header_data_ptr_, header_data_size_) &&
- file->WriteFully(body_data_ptr_, body_data_size_);
+ file->WriteFully(body_data_ptr_, body_data_size_);
+ if (okay) {
+ okay = file->FlushCloseOrErase() == 0;
+ } else {
+ file->Erase();
+ }
if (!okay) {
std::string msg(StringPrintf("Couldn't dump heap; writing \"%s\" failed: %s",
filename_.c_str(), strerror(errno)));
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index 44f713ca96..1e0a2d2c22 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -125,6 +125,10 @@ struct ModBasket {
};
static bool NeedsFullDeoptimization(JdwpEventKind eventKind) {
+ if (!Dbg::RequiresDeoptimization()) {
+ // We don't need deoptimization for debugging.
+ return false;
+ }
switch (eventKind) {
case EK_METHOD_ENTRY:
case EK_METHOD_EXIT:
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index c24ef05486..62b6b3407f 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -390,59 +390,72 @@ class JniInternalTest : public CommonCompilerTest {
void ReleasePrimitiveArrayElementsOfWrongType(bool check_jni) {
bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
CheckJniAbortCatcher jni_abort_catcher;
-
- jbooleanArray array = env_->NewBooleanArray(10);
- ASSERT_TRUE(array != nullptr);
- jboolean is_copy;
- jboolean* elements = env_->GetBooleanArrayElements(array, &is_copy);
- ASSERT_TRUE(elements != nullptr);
- env_->ReleaseByteArrayElements(reinterpret_cast<jbyteArray>(array),
- reinterpret_cast<jbyte*>(elements), 0);
- jni_abort_catcher.Check(
- check_jni ? "incompatible array type boolean[] expected byte[]"
- : "attempt to release byte primitive array elements with an object of type boolean[]");
- env_->ReleaseShortArrayElements(reinterpret_cast<jshortArray>(array),
- reinterpret_cast<jshort*>(elements), 0);
- jni_abort_catcher.Check(
- check_jni ? "incompatible array type boolean[] expected short[]"
- : "attempt to release short primitive array elements with an object of type boolean[]");
- env_->ReleaseCharArrayElements(reinterpret_cast<jcharArray>(array),
- reinterpret_cast<jchar*>(elements), 0);
- jni_abort_catcher.Check(
- check_jni ? "incompatible array type boolean[] expected char[]"
- : "attempt to release char primitive array elements with an object of type boolean[]");
- env_->ReleaseIntArrayElements(reinterpret_cast<jintArray>(array),
- reinterpret_cast<jint*>(elements), 0);
- jni_abort_catcher.Check(
- check_jni ? "incompatible array type boolean[] expected int[]"
- : "attempt to release int primitive array elements with an object of type boolean[]");
- env_->ReleaseLongArrayElements(reinterpret_cast<jlongArray>(array),
- reinterpret_cast<jlong*>(elements), 0);
- jni_abort_catcher.Check(
- check_jni ? "incompatible array type boolean[] expected long[]"
- : "attempt to release long primitive array elements with an object of type boolean[]");
- env_->ReleaseFloatArrayElements(reinterpret_cast<jfloatArray>(array),
- reinterpret_cast<jfloat*>(elements), 0);
- jni_abort_catcher.Check(
- check_jni ? "incompatible array type boolean[] expected float[]"
- : "attempt to release float primitive array elements with an object of type boolean[]");
- env_->ReleaseDoubleArrayElements(reinterpret_cast<jdoubleArray>(array),
- reinterpret_cast<jdouble*>(elements), 0);
- jni_abort_catcher.Check(
- check_jni ? "incompatible array type boolean[] expected double[]"
- : "attempt to release double primitive array elements with an object of type boolean[]");
- jbyteArray array2 = env_->NewByteArray(10);
- env_->ReleaseBooleanArrayElements(reinterpret_cast<jbooleanArray>(array2), elements, 0);
- jni_abort_catcher.Check(
- check_jni ? "incompatible array type byte[] expected boolean[]"
- : "attempt to release boolean primitive array elements with an object of type byte[]");
- jobject object = env_->NewStringUTF("Test String");
- env_->ReleaseBooleanArrayElements(reinterpret_cast<jbooleanArray>(object), elements, 0);
- jni_abort_catcher.Check(
- check_jni ? "jarray argument has non-array type: java.lang.String"
- : "attempt to release boolean primitive array elements with an object of type "
+ {
+ jbooleanArray array = env_->NewBooleanArray(10);
+ ASSERT_TRUE(array != nullptr);
+ jboolean is_copy;
+ jboolean* elements = env_->GetBooleanArrayElements(array, &is_copy);
+ ASSERT_TRUE(elements != nullptr);
+ env_->ReleaseByteArrayElements(reinterpret_cast<jbyteArray>(array),
+ reinterpret_cast<jbyte*>(elements), 0);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected byte[]"
+ : "attempt to release byte primitive array elements with an object of type boolean[]");
+ env_->ReleaseShortArrayElements(reinterpret_cast<jshortArray>(array),
+ reinterpret_cast<jshort*>(elements), 0);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected short[]"
+ : "attempt to release short primitive array elements with an object of type boolean[]");
+ env_->ReleaseCharArrayElements(reinterpret_cast<jcharArray>(array),
+ reinterpret_cast<jchar*>(elements), 0);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected char[]"
+ : "attempt to release char primitive array elements with an object of type boolean[]");
+ env_->ReleaseIntArrayElements(reinterpret_cast<jintArray>(array),
+ reinterpret_cast<jint*>(elements), 0);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected int[]"
+ : "attempt to release int primitive array elements with an object of type boolean[]");
+ env_->ReleaseLongArrayElements(reinterpret_cast<jlongArray>(array),
+ reinterpret_cast<jlong*>(elements), 0);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected long[]"
+ : "attempt to release long primitive array elements with an object of type boolean[]");
+ env_->ReleaseFloatArrayElements(reinterpret_cast<jfloatArray>(array),
+ reinterpret_cast<jfloat*>(elements), 0);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected float[]"
+ : "attempt to release float primitive array elements with an object of type boolean[]");
+ env_->ReleaseDoubleArrayElements(reinterpret_cast<jdoubleArray>(array),
+ reinterpret_cast<jdouble*>(elements), 0);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected double[]"
+ : "attempt to release double primitive array elements with an object of type boolean[]");
+
+ // Don't leak the elements array.
+ env_->ReleaseBooleanArrayElements(array, elements, 0);
+ }
+ {
+ jbyteArray array = env_->NewByteArray(10);
+ jboolean is_copy;
+ jbyte* elements = env_->GetByteArrayElements(array, &is_copy);
+
+ env_->ReleaseBooleanArrayElements(reinterpret_cast<jbooleanArray>(array),
+ reinterpret_cast<jboolean*>(elements), 0);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type byte[] expected boolean[]"
+ : "attempt to release boolean primitive array elements with an object of type byte[]");
+ jobject object = env_->NewStringUTF("Test String");
+ env_->ReleaseBooleanArrayElements(reinterpret_cast<jbooleanArray>(object),
+ reinterpret_cast<jboolean*>(elements), 0);
+ jni_abort_catcher.Check(
+ check_jni ? "jarray argument has non-array type: java.lang.String"
+ : "attempt to release boolean primitive array elements with an object of type "
"java.lang.String");
+ // Don't leak the elements array.
+ env_->ReleaseByteArrayElements(array, elements, 0);
+ }
EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
}
diff --git a/runtime/signal_catcher.cc b/runtime/signal_catcher.cc
index d448460dc3..e377542e45 100644
--- a/runtime/signal_catcher.cc
+++ b/runtime/signal_catcher.cc
@@ -110,11 +110,17 @@ void SignalCatcher::Output(const std::string& s) {
PLOG(ERROR) << "Unable to open stack trace file '" << stack_trace_file_ << "'";
return;
}
- std::unique_ptr<File> file(new File(fd, stack_trace_file_));
- if (!file->WriteFully(s.data(), s.size())) {
- PLOG(ERROR) << "Failed to write stack traces to '" << stack_trace_file_ << "'";
+ std::unique_ptr<File> file(new File(fd, stack_trace_file_, true));
+ bool success = file->WriteFully(s.data(), s.size());
+ if (success) {
+ success = file->FlushCloseOrErase() == 0;
} else {
+ file->Erase();
+ }
+ if (success) {
LOG(INFO) << "Wrote stack traces to '" << stack_trace_file_ << "'";
+ } else {
+ PLOG(ERROR) << "Failed to write stack traces to '" << stack_trace_file_ << "'";
}
}
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 29c01e4d47..2cc50b3732 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -431,6 +431,15 @@ void Trace::Stop() {
instrumentation::Instrumentation::kMethodExited |
instrumentation::Instrumentation::kMethodUnwind);
}
+ if (the_trace->trace_file_.get() != nullptr) {
+ // Do not try to erase, so flush and close explicitly.
+ if (the_trace->trace_file_->Flush() != 0) {
+ PLOG(ERROR) << "Could not flush trace file.";
+ }
+ if (the_trace->trace_file_->Close() != 0) {
+ PLOG(ERROR) << "Could not close trace file.";
+ }
+ }
delete the_trace;
}
runtime->GetThreadList()->ResumeAll();
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 9a4c8759b8..ad46be644f 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -1120,13 +1120,20 @@ std::string GetSchedulerGroupName(pid_t tid) {
void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix,
mirror::ArtMethod* current_method) {
- // TODO: enable on __linux__ b/15446488.
-#if 0
+#if __linux__
// b/18119146
if (RUNNING_ON_VALGRIND != 0) {
return;
}
+#if !defined(HAVE_ANDROID_OS)
+ if (GetTid() != tid) {
+ // TODO: dumping of other threads is disabled to avoid crashes during stress testing.
+ // b/15446488.
+ return;
+ }
+#endif
+
std::unique_ptr<Backtrace> backtrace(Backtrace::Create(BACKTRACE_CURRENT_PROCESS, tid));
if (!backtrace->Unwind(0)) {
os << prefix << "(backtrace::Unwind failed for thread " << tid << ")\n";
diff --git a/runtime/zip_archive_test.cc b/runtime/zip_archive_test.cc
index 96abee2dc3..70a4ddaabf 100644
--- a/runtime/zip_archive_test.cc
+++ b/runtime/zip_archive_test.cc
@@ -41,7 +41,7 @@ TEST_F(ZipArchiveTest, FindAndExtract) {
ScratchFile tmp;
ASSERT_NE(-1, tmp.GetFd());
- std::unique_ptr<File> file(new File(tmp.GetFd(), tmp.GetFilename()));
+ std::unique_ptr<File> file(new File(tmp.GetFd(), tmp.GetFilename(), false));
ASSERT_TRUE(file.get() != NULL);
bool success = zip_entry->ExtractToFile(*file, &error_msg);
ASSERT_TRUE(success) << error_msg;
diff --git a/test/083-compiler-regressions/expected.txt b/test/083-compiler-regressions/expected.txt
index 51bf8471c5..78c92fc51a 100644
--- a/test/083-compiler-regressions/expected.txt
+++ b/test/083-compiler-regressions/expected.txt
@@ -1,3 +1,4 @@
+b17325447 passes
b17630605 passes
b17411468 passes
b2296099 passes
diff --git a/test/083-compiler-regressions/src/Main.java b/test/083-compiler-regressions/src/Main.java
index 9ad8ea7b1b..285c3608ca 100644
--- a/test/083-compiler-regressions/src/Main.java
+++ b/test/083-compiler-regressions/src/Main.java
@@ -30,6 +30,7 @@ public class Main {
}
public static void main(String args[]) throws Exception {
+ b17325447();
b17630605();
b17411468();
b2296099Test();
@@ -64,6 +65,31 @@ public class Main {
minDoubleWith3ConstsTest();
}
+ public static double b17325447_i1(int i1, double f) {
+ return f;
+ }
+
+ public static double b17325447_i2(int i1, int i2, double f) {
+ return f;
+ }
+
+ public static double b17325447_i3(int i1, int i2, int i3, double f) {
+ return f;
+ }
+
+ public static void b17325447() {
+ // b/17325447 - x86 handling of special identity method w/ double spanning reg/mem.
+ double d = 0.0;
+ d += b17325447_i1(123, 1.0);
+ d += b17325447_i2(123, 456, 2.0);
+ d += b17325447_i3(123, 456, 789, 3.0);
+ if (d == 6.0) {
+ System.out.println("b17325447 passes");
+ } else {
+ System.out.println("b17325447 fails: " + d);
+ }
+ }
+
public static void b17630605() {
// b/17630605 - failure to properly handle min long immediates.
long a1 = 40455547223404749L;
diff --git a/test/430-live-register-slow-path/expected.txt b/test/430-live-register-slow-path/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/430-live-register-slow-path/expected.txt
diff --git a/test/430-live-register-slow-path/info.txt b/test/430-live-register-slow-path/info.txt
new file mode 100644
index 0000000000..6f2af283b9
--- /dev/null
+++ b/test/430-live-register-slow-path/info.txt
@@ -0,0 +1,2 @@
+Regression test for the linear scan register allocator. It used
+to miscompute the number of live registers at a safepoint.
diff --git a/test/430-live-register-slow-path/src/Main.java b/test/430-live-register-slow-path/src/Main.java
new file mode 100644
index 0000000000..b84e6479f0
--- /dev/null
+++ b/test/430-live-register-slow-path/src/Main.java
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) {
+ $opt$TestSlowPath();
+ }
+
+ public static void $opt$TestSlowPath() {
+ Object[] o = bar();
+ assertEquals(0, o.length);
+ // The slowpath of the instanceof requires the live register
+ // holding `o` to be saved before going into runtime. The linear
+ // scan register allocator used to miscompute the number of
+ // live registers at a safepoint, so the place at which the register
+ // was saved was wrong.
+ doCall(o instanceof Interface[], o);
+ }
+
+ public static void assertEquals(int a, int b) {}
+ public static boolean doCall(boolean val, Object o) { return val; }
+
+ static Object[] bar() { return new Object[0]; }
+
+ static interface Interface {}
+}
diff --git a/test/800-smali/expected.txt b/test/800-smali/expected.txt
index 01d7b8164f..7674a8ab68 100644
--- a/test/800-smali/expected.txt
+++ b/test/800-smali/expected.txt
@@ -5,4 +5,5 @@ negLong
sameFieldNames
b/18380491
invoke-super abstract
+BadCaseInOpRegRegReg
Done!
diff --git a/test/800-smali/smali/BadCaseInOpRegRegReg.smali b/test/800-smali/smali/BadCaseInOpRegRegReg.smali
new file mode 100644
index 0000000000..2683790365
--- /dev/null
+++ b/test/800-smali/smali/BadCaseInOpRegRegReg.smali
@@ -0,0 +1,13 @@
+.class public LBadCaseInOpRegRegReg;
+
+.super Ljava/lang/Object;
+
+.method public static getInt()I
+ .registers 2
+ const/4 v0, 0x0
+ const/4 v1, 0x1
+ add-int/2addr v0, v1
+ add-int/lit8 v1, v0, 0x1
+ mul-int v0, v1, v0
+ return v0
+.end method
diff --git a/test/800-smali/src/Main.java b/test/800-smali/src/Main.java
index 3f613efd78..8d318c354b 100644
--- a/test/800-smali/src/Main.java
+++ b/test/800-smali/src/Main.java
@@ -60,6 +60,7 @@ public class Main {
new Object[]{42}, null, 42));
testCases.add(new TestCase("invoke-super abstract", "B18380491ConcreteClass", "foo",
new Object[]{0}, new AbstractMethodError(), null));
+ testCases.add(new TestCase("BadCaseInOpRegRegReg", "BadCaseInOpRegRegReg", "getInt", null, null, 2));
}
public void runTests() {
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 11713d4853..29da2f6200 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -177,15 +177,6 @@ endif
TEST_ART_TIMING_SENSITIVE_RUN_TESTS :=
-TEST_ART_BROKEN_RUN_TESTS := \
- 004-ThreadStress
-
-ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
- $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES), $(PICTEST_TYPES), $(TEST_ART_BROKEN_RUN_TESTS), $(ALL_ADDRESS_SIZES))
-
-TEST_ART_BROKEN_RUN_TESTS :=
-
# Note 116-nodex2oat is not broken per-se it just doesn't (and isn't meant to) work with --prebuild.
TEST_ART_BROKEN_PREBUILD_RUN_TESTS := \
116-nodex2oat
@@ -337,7 +328,9 @@ TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS := \
424-checkcast \
427-bounds \
428-optimizing-arith-rem \
+ 430-live-register-slow-path \
701-easy-div-rem \
+ 800-smali \
ifneq (,$(filter optimizing,$(COMPILER_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES), \