Merge "Use HOptimization abstraction for running optimizations."
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index b4eebb3..a3b4df3 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -536,8 +536,8 @@
if (direct_code == 0) {
// kInvokeTgt := arg0_ref->entrypoint
cg->LoadWordDisp(arg0_ref,
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(),
- cg->TargetPtrReg(kInvokeTgt));
+ mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArmPointerSize).Int32Value(), cg->TargetPtrReg(kInvokeTgt));
}
break;
default:
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index 106996e..3e5b7bf 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -473,8 +473,8 @@
if (direct_code == 0) {
// kInvokeTgt := arg0_ref->entrypoint
cg->LoadWordDisp(arg0_ref,
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(),
- cg->TargetPtrReg(kInvokeTgt));
+ mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArm64PointerSize).Int32Value(), cg->TargetPtrReg(kInvokeTgt));
}
break;
default:
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 4cb12f1..a7900ae 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -476,9 +476,10 @@
static bool CommonCallCodeLoadCodePointerIntoInvokeTgt(const RegStorage* alt_from,
const CompilationUnit* cu, Mir2Lir* cg) {
if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
+ int32_t offset = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ InstructionSetPointerSize(cu->instruction_set)).Int32Value();
// Get the compiled code address [use *alt_from or kArg0, set kInvokeTgt]
- cg->LoadWordDisp(alt_from == nullptr ? cg->TargetReg(kArg0, kRef) : *alt_from,
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(),
+ cg->LoadWordDisp(alt_from == nullptr ? cg->TargetReg(kArg0, kRef) : *alt_from, offset,
cg->TargetPtrReg(kInvokeTgt));
return true;
}
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index c945f7f..70ef991 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -201,6 +201,16 @@
RegStorage reg_arg_low = GetArgMappingToPhysicalReg(in_position);
RegStorage reg_arg_high = GetArgMappingToPhysicalReg(in_position + 1);
+ if (cu_->instruction_set == kX86) {
+ // Can't handle double split between reg & memory. Flush reg half to memory.
+ if (rl_dest.reg.IsDouble() && (reg_arg_low.Valid() != reg_arg_high.Valid())) {
+ DCHECK(reg_arg_low.Valid());
+ DCHECK(!reg_arg_high.Valid());
+ Store32Disp(TargetPtrReg(kSp), offset, reg_arg_low);
+ reg_arg_low = RegStorage::InvalidReg();
+ }
+ }
+
if (reg_arg_low.Valid() && reg_arg_high.Valid()) {
OpRegCopyWide(rl_dest.reg, RegStorage::MakeRegPair(reg_arg_low, reg_arg_high));
} else if (reg_arg_low.Valid() && !reg_arg_high.Valid()) {
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index a54c55f..8d4cb3c 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -613,7 +613,8 @@
}
uintptr_t QuickCompiler::GetEntryPointOf(mirror::ArtMethod* method) const {
- return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCode());
+ return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize(
+ InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet())));
}
bool QuickCompiler::WriteElf(art::File* file,
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index db2f272..f5f7113 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -1006,7 +1006,8 @@
call_insn = CallWithLinkerFixup(method_info.GetTargetMethod(), method_info.GetSharpType());
} else {
call_insn = OpMem(kOpBlx, TargetReg(kArg0, kRef),
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value());
+ mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ cu_->target64 ? 8 : 4).Int32Value());
}
} else {
call_insn = GenInvokeNoInlineCall(this, method_info.GetSharpType());
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index 1805d59..ebf7874 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -240,7 +240,8 @@
bool can_sharpen_super_based_on_type = (*invoke_type == kSuper) &&
(referrer_class != methods_class) && referrer_class->IsSubClass(methods_class) &&
resolved_method->GetMethodIndex() < methods_class->GetVTableLength() &&
- (methods_class->GetVTableEntry(resolved_method->GetMethodIndex()) == resolved_method);
+ (methods_class->GetVTableEntry(resolved_method->GetMethodIndex()) == resolved_method) &&
+ !resolved_method->IsAbstract();
if (can_sharpen_virtual_based_on_type || can_sharpen_super_based_on_type) {
// Sharpen a virtual call into a direct call. The method_idx is into referrer's
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index 7e2be3e..dac1ef4 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -105,13 +105,16 @@
ASSERT_TRUE(success_image);
bool success_fixup = ElfWriter::Fixup(dup_oat.get(), writer->GetOatDataBegin());
ASSERT_TRUE(success_fixup);
+
+ ASSERT_EQ(dup_oat->FlushCloseOrErase(), 0) << "Could not flush and close oat file "
+ << oat_file.GetFilename();
}
{
std::unique_ptr<File> file(OS::OpenFileForReading(image_file.GetFilename().c_str()));
ASSERT_TRUE(file.get() != NULL);
ImageHeader image_header;
- file->ReadFully(&image_header, sizeof(image_header));
+ ASSERT_EQ(file->ReadFully(&image_header, sizeof(image_header)), true);
ASSERT_TRUE(image_header.IsValid());
ASSERT_GE(image_header.GetImageBitmapOffset(), sizeof(image_header));
ASSERT_NE(0U, image_header.GetImageBitmapSize());
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 6096625..ef1bf81 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -68,6 +68,7 @@
namespace art {
bool ImageWriter::PrepareImageAddressSpace() {
+ target_ptr_size_ = InstructionSetPointerSize(compiler_driver_.GetInstructionSet());
{
Thread::Current()->TransitionFromSuspendedToRunnable();
PruneNonImageClasses(); // Remove junk
@@ -148,6 +149,11 @@
SetOatChecksumFromElfFile(oat_file.get());
+ if (oat_file->FlushCloseOrErase() != 0) {
+ LOG(ERROR) << "Failed to flush and close oat file " << oat_filename << " for " << oat_location;
+ return false;
+ }
+
std::unique_ptr<File> image_file(OS::CreateEmptyFile(image_filename.c_str()));
ImageHeader* image_header = reinterpret_cast<ImageHeader*>(image_->Begin());
if (image_file.get() == NULL) {
@@ -156,6 +162,7 @@
}
if (fchmod(image_file->Fd(), 0644) != 0) {
PLOG(ERROR) << "Failed to make image file world readable: " << image_filename;
+ image_file->Erase();
return EXIT_FAILURE;
}
@@ -163,6 +170,7 @@
CHECK_EQ(image_end_, image_header->GetImageSize());
if (!image_file->WriteFully(image_->Begin(), image_end_)) {
PLOG(ERROR) << "Failed to write image file " << image_filename;
+ image_file->Erase();
return false;
}
@@ -172,9 +180,14 @@
image_header->GetImageBitmapSize(),
image_header->GetImageBitmapOffset())) {
PLOG(ERROR) << "Failed to write image file " << image_filename;
+ image_file->Erase();
return false;
}
+ if (image_file->FlushCloseOrErase() != 0) {
+ PLOG(ERROR) << "Failed to flush and close image file " << image_filename;
+ return false;
+ }
return true;
}
@@ -214,7 +227,14 @@
void ImageWriter::AssignImageOffset(mirror::Object* object) {
DCHECK(object != nullptr);
SetImageOffset(object, image_end_);
- image_end_ += RoundUp(object->SizeOf(), 8); // 64-bit alignment
+ size_t object_size;
+ if (object->IsArtMethod()) {
+ // Methods are sized based on the target pointer size.
+ object_size = mirror::ArtMethod::InstanceSize(target_ptr_size_);
+ } else {
+ object_size = object->SizeOf();
+ }
+ image_end_ += RoundUp(object_size, 8); // 64-bit alignment
DCHECK_LT(image_end_, image_->Size());
}
@@ -754,7 +774,14 @@
size_t offset = image_writer->GetImageOffset(obj);
uint8_t* dst = image_writer->image_->Begin() + offset;
const uint8_t* src = reinterpret_cast<const uint8_t*>(obj);
- size_t n = obj->SizeOf();
+ size_t n;
+ if (obj->IsArtMethod()) {
+ // Size without pointer fields since we don't want to overrun the buffer if target art method
+ // is 32 bits but source is 64 bits.
+ n = mirror::ArtMethod::SizeWithoutPointerFields();
+ } else {
+ n = obj->SizeOf();
+ }
DCHECK_LT(offset + n, image_writer->image_->Size());
memcpy(dst, src, n);
Object* copy = reinterpret_cast<Object*>(dst);
@@ -834,6 +861,10 @@
}
if (orig->IsArtMethod<kVerifyNone>()) {
FixupMethod(orig->AsArtMethod<kVerifyNone>(), down_cast<ArtMethod*>(copy));
+ } else if (orig->IsClass() && orig->AsClass()->IsArtMethodClass()) {
+ // Set the right size for the target.
+ size_t size = mirror::ArtMethod::InstanceSize(target_ptr_size_);
+ down_cast<mirror::Class*>(copy)->SetObjectSizeWithoutChecks(size);
}
}
@@ -892,29 +923,48 @@
void ImageWriter::FixupMethod(ArtMethod* orig, ArtMethod* copy) {
// OatWriter replaces the code_ with an offset value. Here we re-adjust to a pointer relative to
// oat_begin_
+ // For 64 bit targets we need to repack the current runtime pointer sized fields to the right
+ // locations.
+ // Copy all of the fields from the runtime methods to the target methods first since we did a
+ // bytewise copy earlier.
+ copy->SetEntryPointFromPortableCompiledCodePtrSize<kVerifyNone>(
+ orig->GetEntryPointFromPortableCompiledCode(), target_ptr_size_);
+ copy->SetEntryPointFromInterpreterPtrSize<kVerifyNone>(orig->GetEntryPointFromInterpreter(),
+ target_ptr_size_);
+ copy->SetEntryPointFromJniPtrSize<kVerifyNone>(orig->GetEntryPointFromJni(), target_ptr_size_);
+ copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(
+ orig->GetEntryPointFromQuickCompiledCode(), target_ptr_size_);
+ copy->SetNativeGcMapPtrSize<kVerifyNone>(orig->GetNativeGcMap(), target_ptr_size_);
// The resolution method has a special trampoline to call.
Runtime* runtime = Runtime::Current();
if (UNLIKELY(orig == runtime->GetResolutionMethod())) {
- copy->SetEntryPointFromPortableCompiledCode<kVerifyNone>(GetOatAddress(portable_resolution_trampoline_offset_));
- copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(GetOatAddress(quick_resolution_trampoline_offset_));
+ copy->SetEntryPointFromPortableCompiledCodePtrSize<kVerifyNone>(
+ GetOatAddress(portable_resolution_trampoline_offset_), target_ptr_size_);
+ copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(
+ GetOatAddress(quick_resolution_trampoline_offset_), target_ptr_size_);
} else if (UNLIKELY(orig == runtime->GetImtConflictMethod() ||
orig == runtime->GetImtUnimplementedMethod())) {
- copy->SetEntryPointFromPortableCompiledCode<kVerifyNone>(GetOatAddress(portable_imt_conflict_trampoline_offset_));
- copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(GetOatAddress(quick_imt_conflict_trampoline_offset_));
+ copy->SetEntryPointFromPortableCompiledCodePtrSize<kVerifyNone>(
+ GetOatAddress(portable_imt_conflict_trampoline_offset_), target_ptr_size_);
+ copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(
+ GetOatAddress(quick_imt_conflict_trampoline_offset_), target_ptr_size_);
} else {
// We assume all methods have code. If they don't currently then we set them to the use the
// resolution trampoline. Abstract methods never have code and so we need to make sure their
// use results in an AbstractMethodError. We use the interpreter to achieve this.
if (UNLIKELY(orig->IsAbstract())) {
- copy->SetEntryPointFromPortableCompiledCode<kVerifyNone>(GetOatAddress(portable_to_interpreter_bridge_offset_));
- copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(GetOatAddress(quick_to_interpreter_bridge_offset_));
- copy->SetEntryPointFromInterpreter<kVerifyNone>(reinterpret_cast<EntryPointFromInterpreter*>
- (const_cast<uint8_t*>(GetOatAddress(interpreter_to_interpreter_bridge_offset_))));
+ copy->SetEntryPointFromPortableCompiledCodePtrSize<kVerifyNone>(
+ GetOatAddress(portable_to_interpreter_bridge_offset_), target_ptr_size_);
+ copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(
+ GetOatAddress(quick_to_interpreter_bridge_offset_), target_ptr_size_);
+ copy->SetEntryPointFromInterpreterPtrSize<kVerifyNone>(
+ reinterpret_cast<EntryPointFromInterpreter*>(const_cast<uint8_t*>(
+ GetOatAddress(interpreter_to_interpreter_bridge_offset_))), target_ptr_size_);
} else {
bool quick_is_interpreted;
const uint8_t* quick_code = GetQuickCode(orig, &quick_is_interpreted);
- copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(quick_code);
+ copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(quick_code, target_ptr_size_);
// Portable entrypoint:
const uint8_t* portable_code = GetOatAddress(orig->GetPortableOatCodeOffset());
@@ -937,18 +987,19 @@
// initialization.
portable_code = GetOatAddress(portable_resolution_trampoline_offset_);
}
- copy->SetEntryPointFromPortableCompiledCode<kVerifyNone>(portable_code);
-
+ copy->SetEntryPointFromPortableCompiledCodePtrSize<kVerifyNone>(
+ portable_code, target_ptr_size_);
// JNI entrypoint:
if (orig->IsNative()) {
// The native method's pointer is set to a stub to lookup via dlsym.
// Note this is not the code_ pointer, that is handled above.
- copy->SetNativeMethod<kVerifyNone>(GetOatAddress(jni_dlsym_lookup_offset_));
+ copy->SetEntryPointFromJniPtrSize<kVerifyNone>(GetOatAddress(jni_dlsym_lookup_offset_),
+ target_ptr_size_);
} else {
// Normal (non-abstract non-native) methods have various tables to relocate.
uint32_t native_gc_map_offset = orig->GetOatNativeGcMapOffset();
const uint8_t* native_gc_map = GetOatAddress(native_gc_map_offset);
- copy->SetNativeGcMap<kVerifyNone>(reinterpret_cast<const uint8_t*>(native_gc_map));
+ copy->SetNativeGcMapPtrSize<kVerifyNone>(native_gc_map, target_ptr_size_);
}
// Interpreter entrypoint:
@@ -956,9 +1007,11 @@
uint32_t interpreter_code = (quick_is_interpreted && portable_is_interpreted)
? interpreter_to_interpreter_bridge_offset_
: interpreter_to_compiled_code_bridge_offset_;
- copy->SetEntryPointFromInterpreter<kVerifyNone>(
+ EntryPointFromInterpreter* interpreter_entrypoint =
reinterpret_cast<EntryPointFromInterpreter*>(
- const_cast<uint8_t*>(GetOatAddress(interpreter_code))));
+ const_cast<uint8_t*>(GetOatAddress(interpreter_code)));
+ copy->SetEntryPointFromInterpreterPtrSize<kVerifyNone>(
+ interpreter_entrypoint, target_ptr_size_);
}
}
}
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index 1217145..2fec0aa 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -47,7 +47,8 @@
portable_imt_conflict_trampoline_offset_(0), portable_resolution_trampoline_offset_(0),
portable_to_interpreter_bridge_offset_(0), quick_generic_jni_trampoline_offset_(0),
quick_imt_conflict_trampoline_offset_(0), quick_resolution_trampoline_offset_(0),
- quick_to_interpreter_bridge_offset_(0), compile_pic_(compile_pic) {
+ quick_to_interpreter_bridge_offset_(0), compile_pic_(compile_pic),
+ target_ptr_size_(InstructionSetPointerSize(compiler_driver_.GetInstructionSet())) {
CHECK_NE(image_begin, 0U);
}
@@ -224,6 +225,9 @@
uint32_t quick_to_interpreter_bridge_offset_;
const bool compile_pic_;
+ // Size of pointers on the target architecture.
+ size_t target_ptr_size_;
+
friend class FixupVisitor;
friend class FixupClassVisitor;
DISALLOW_COPY_AND_ASSIGN(ImageWriter);
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 3c3aa02..c3fe75b 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -307,7 +307,9 @@
}
// 9. Plant call to native code associated with method.
- __ Call(main_jni_conv->MethodStackOffset(), mirror::ArtMethod::NativeMethodOffset(),
+ MemberOffset jni_entrypoint_offset = mirror::ArtMethod::EntryPointFromJniOffset(
+ InstructionSetPointerSize(instruction_set));
+ __ Call(main_jni_conv->MethodStackOffset(), jni_entrypoint_offset,
mr_conv->InterproceduralScratchRegister());
// 10. Fix differences in result widths.
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 659c332..c6beb36 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -309,7 +309,7 @@
arm::Thumb2Assembler assembler;
assembler.LoadFromOffset(
arm::kLoadWord, arm::PC, arm::R0,
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value());
+ mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmPointerSize).Int32Value());
assembler.bkpt(0);
std::vector<uint8_t> thunk_code(assembler.CodeSize());
MemoryRegion code(thunk_code.data(), thunk_code.size());
@@ -363,7 +363,8 @@
// The thunk just uses the entry point in the ArtMethod. This works even for calls
// to the generic JNI and interpreter trampolines.
arm64::Arm64Assembler assembler;
- Offset offset(mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value());
+ Offset offset(mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArm64PointerSize).Int32Value());
assembler.JumpTo(ManagedRegister(arm64::X0), offset, ManagedRegister(arm64::IP0));
std::vector<uint8_t> thunk_code(assembler.CodeSize());
MemoryRegion code(thunk_code.data(), thunk_code.size());
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 1debaa5..1701ef5 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -80,7 +80,7 @@
public:
explicit NullCheckSlowPathARM(HNullCheck* instruction) : instruction_(instruction) {}
- virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
arm_codegen->InvokeRuntime(
@@ -96,7 +96,7 @@
public:
explicit DivZeroCheckSlowPathARM(HDivZeroCheck* instruction) : instruction_(instruction) {}
- virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
arm_codegen->InvokeRuntime(
@@ -112,7 +112,7 @@
public:
StackOverflowCheckSlowPathARM() {}
- virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
__ Bind(GetEntryLabel());
__ LoadFromOffset(kLoadWord, PC, TR,
QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pThrowStackOverflow).Int32Value());
@@ -124,10 +124,10 @@
class SuspendCheckSlowPathARM : public SlowPathCodeARM {
public:
- explicit SuspendCheckSlowPathARM(HSuspendCheck* instruction, HBasicBlock* successor)
+ SuspendCheckSlowPathARM(HSuspendCheck* instruction, HBasicBlock* successor)
: instruction_(instruction), successor_(successor) {}
- virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
codegen->SaveLiveRegisters(instruction_->GetLocations());
@@ -166,7 +166,7 @@
index_location_(index_location),
length_location_(length_location) {}
- virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
// We're moving two locations to locations that could overlap, so we need a parallel
@@ -199,7 +199,7 @@
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
}
- virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = at_->GetLocations();
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
@@ -245,7 +245,7 @@
public:
explicit LoadStringSlowPathARM(HLoadString* instruction) : instruction_(instruction) {}
- virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
@@ -281,7 +281,7 @@
object_class_(object_class),
dex_pc_(dex_pc) {}
- virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
DCHECK(instruction_->IsCheckCast()
|| !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
@@ -1188,7 +1188,8 @@
kLoadWord, temp, temp, CodeGenerator::GetCacheOffset(invoke->GetIndexInDexCache()));
// LR = temp[offset_of_quick_compiled_code]
__ LoadFromOffset(kLoadWord, LR, temp,
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value());
+ mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArmWordSize).Int32Value());
// LR()
__ blx(LR);
@@ -1229,7 +1230,8 @@
__ LoadFromOffset(kLoadWord, temp, receiver.As<Register>(), class_offset);
}
// temp = temp->GetMethodAt(method_offset);
- uint32_t entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value();
+ uint32_t entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArmWordSize).Int32Value();
__ LoadFromOffset(kLoadWord, temp, temp, method_offset);
// LR = temp->GetEntryPoint();
__ LoadFromOffset(kLoadWord, LR, temp, entry_point);
@@ -1265,7 +1267,8 @@
__ LoadFromOffset(kLoadWord, temp, receiver.As<Register>(), class_offset);
}
// temp = temp->GetImtEntryAt(method_offset);
- uint32_t entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value();
+ uint32_t entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArmWordSize).Int32Value();
__ LoadFromOffset(kLoadWord, temp, temp, method_offset);
// LR = temp->GetEntryPoint();
__ LoadFromOffset(kLoadWord, LR, temp, entry_point);
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index acc3fd6..c00fac1 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -28,7 +28,8 @@
class CodeGeneratorARM;
class SlowPathCodeARM;
-static constexpr size_t kArmWordSize = 4;
+// Use a local definition to prevent copying mistakes.
+static constexpr size_t kArmWordSize = kArmPointerSize;
static constexpr Register kParameterCoreRegisters[] = { R1, R2, R3 };
static constexpr RegisterPair kParameterCorePairRegisters[] = { R1_R2, R2_R3 };
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 5432882..82dced5 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -48,18 +48,28 @@
return type == Primitive::kPrimFloat || type == Primitive::kPrimDouble;
}
+bool IsIntegralType(Primitive::Type type) {
+ switch (type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ return true;
+ default:
+ return false;
+ }
+}
+
bool Is64BitType(Primitive::Type type) {
return type == Primitive::kPrimLong || type == Primitive::kPrimDouble;
}
// Convenience helpers to ease conversion to and from VIXL operands.
+static_assert((SP == 31) && (WSP == 31) && (XZR == 32) && (WZR == 32),
+ "Unexpected values for register codes.");
int VIXLRegCodeFromART(int code) {
- // TODO: static check?
- DCHECK_EQ(SP, 31);
- DCHECK_EQ(WSP, 31);
- DCHECK_EQ(XZR, 32);
- DCHECK_EQ(WZR, 32);
if (code == SP) {
return vixl::kSPRegInternalCode;
}
@@ -70,11 +80,6 @@
}
int ARTRegCodeFromVIXL(int code) {
- // TODO: static check?
- DCHECK_EQ(SP, 31);
- DCHECK_EQ(WSP, 31);
- DCHECK_EQ(XZR, 32);
- DCHECK_EQ(WZR, 32);
if (code == vixl::kSPRegInternalCode) {
return SP;
}
@@ -128,6 +133,17 @@
instr->InputAt(input_index)->GetType());
}
+CPURegister OutputCPURegister(HInstruction* instr) {
+ return IsFPType(instr->GetType()) ? static_cast<CPURegister>(OutputFPRegister(instr))
+ : static_cast<CPURegister>(OutputRegister(instr));
+}
+
+CPURegister InputCPURegisterAt(HInstruction* instr, int index) {
+ return IsFPType(instr->InputAt(index)->GetType())
+ ? static_cast<CPURegister>(InputFPRegisterAt(instr, index))
+ : static_cast<CPURegister>(InputRegisterAt(instr, index));
+}
+
int64_t Int64ConstantFrom(Location location) {
HConstant* instr = location.GetConstant();
return instr->IsIntConstant() ? instr->AsIntConstant()->GetValue()
@@ -151,14 +167,18 @@
return MemOperand(sp, location.GetStackIndex());
}
-MemOperand HeapOperand(const Register& base, Offset offset) {
+MemOperand HeapOperand(const Register& base, size_t offset) {
// A heap reference must be 32bit, so fit in a W register.
DCHECK(base.IsW());
- return MemOperand(base.X(), offset.SizeValue());
+ return MemOperand(base.X(), offset);
}
-MemOperand HeapOperandFrom(Location location, Primitive::Type type, Offset offset) {
- return HeapOperand(RegisterFrom(location, type), offset);
+MemOperand HeapOperand(const Register& base, Offset offset) {
+ return HeapOperand(base, offset.SizeValue());
+}
+
+MemOperand HeapOperandFrom(Location location, Offset offset) {
+ return HeapOperand(RegisterFrom(location, Primitive::kPrimNot), offset);
}
Location LocationFrom(const Register& reg) {
@@ -227,7 +247,8 @@
return ARM64ReturnLocation(return_type);
}
-#define __ reinterpret_cast<Arm64Assembler*>(codegen->GetAssembler())->vixl_masm_->
+#define __ down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler()->
+#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, x).Int32Value()
class SlowPathCodeARM64 : public SlowPathCode {
public:
@@ -245,45 +266,125 @@
class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 {
public:
- explicit BoundsCheckSlowPathARM64(HBoundsCheck* instruction,
- Location index_location,
- Location length_location)
- : instruction_(instruction),
- index_location_(index_location),
- length_location_(length_location) {}
+ BoundsCheckSlowPathARM64() {}
- virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- CodeGeneratorARM64* arm64_codegen = reinterpret_cast<CodeGeneratorARM64*>(codegen);
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
__ Bind(GetEntryLabel());
- InvokeRuntimeCallingConvention calling_convention;
- arm64_codegen->MoveHelper(LocationFrom(calling_convention.GetRegisterAt(0)),
- index_location_, Primitive::kPrimInt);
- arm64_codegen->MoveHelper(LocationFrom(calling_convention.GetRegisterAt(1)),
- length_location_, Primitive::kPrimInt);
- size_t offset = QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pThrowArrayBounds).SizeValue();
- __ Ldr(lr, MemOperand(tr, offset));
- __ Blr(lr);
- codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+ __ Brk(__LINE__); // TODO: Unimplemented BoundsCheckSlowPathARM64.
}
private:
- HBoundsCheck* const instruction_;
- const Location index_location_;
- const Location length_location_;
-
DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM64);
};
+class DivZeroCheckSlowPathARM64 : public SlowPathCodeARM64 {
+ public:
+ explicit DivZeroCheckSlowPathARM64(HDivZeroCheck* instruction) : instruction_(instruction) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
+ __ Bind(GetEntryLabel());
+ arm64_codegen->InvokeRuntime(
+ QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc());
+ }
+
+ private:
+ HDivZeroCheck* const instruction_;
+ DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM64);
+};
+
+class LoadClassSlowPathARM64 : public SlowPathCodeARM64 {
+ public:
+ LoadClassSlowPathARM64(HLoadClass* cls,
+ HInstruction* at,
+ uint32_t dex_pc,
+ bool do_clinit)
+ : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+ DCHECK(at->IsLoadClass() || at->IsClinitCheck());
+ }
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = at_->GetLocations();
+ CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
+
+ __ Bind(GetEntryLabel());
+ codegen->SaveLiveRegisters(locations);
+
+ InvokeRuntimeCallingConvention calling_convention;
+ __ Mov(calling_convention.GetRegisterAt(0).W(), cls_->GetTypeIndex());
+ arm64_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1).W());
+ int32_t entry_point_offset = do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
+ : QUICK_ENTRY_POINT(pInitializeType);
+ arm64_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_);
+
+ // Move the class to the desired location.
+ Location out = locations->Out();
+ if (out.IsValid()) {
+ DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
+ Primitive::Type type = at_->GetType();
+ arm64_codegen->MoveHelper(out, calling_convention.GetReturnLocation(type), type);
+ }
+
+ codegen->RestoreLiveRegisters(locations);
+ __ B(GetExitLabel());
+ }
+
+ private:
+ // The class this slow path will load.
+ HLoadClass* const cls_;
+
+ // The instruction where this slow path is happening.
+ // (Might be the load class or an initialization check).
+ HInstruction* const at_;
+
+ // The dex PC of `at_`.
+ const uint32_t dex_pc_;
+
+ // Whether to initialize the class.
+ const bool do_clinit_;
+
+ DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM64);
+};
+
+class LoadStringSlowPathARM64 : public SlowPathCodeARM64 {
+ public:
+ explicit LoadStringSlowPathARM64(HLoadString* instruction) : instruction_(instruction) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+ CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
+
+ __ Bind(GetEntryLabel());
+ codegen->SaveLiveRegisters(locations);
+
+ InvokeRuntimeCallingConvention calling_convention;
+ arm64_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(0).W());
+ __ Mov(calling_convention.GetRegisterAt(1).W(), instruction_->GetStringIndex());
+ arm64_codegen->InvokeRuntime(
+ QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc());
+ Primitive::Type type = instruction_->GetType();
+ arm64_codegen->MoveHelper(locations->Out(), calling_convention.GetReturnLocation(type), type);
+
+ codegen->RestoreLiveRegisters(locations);
+ __ B(GetExitLabel());
+ }
+
+ private:
+ HLoadString* const instruction_;
+
+ DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM64);
+};
+
class NullCheckSlowPathARM64 : public SlowPathCodeARM64 {
public:
explicit NullCheckSlowPathARM64(HNullCheck* instr) : instruction_(instr) {}
- virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
__ Bind(GetEntryLabel());
- int32_t offset = QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pThrowNullPointer).Int32Value();
- __ Ldr(lr, MemOperand(tr, offset));
- __ Blr(lr);
- codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+ arm64_codegen->InvokeRuntime(
+ QUICK_ENTRY_POINT(pThrowNullPointer), instruction_, instruction_->GetDexPc());
}
private:
@@ -298,13 +399,18 @@
HBasicBlock* successor)
: instruction_(instruction), successor_(successor) {}
- virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- size_t offset = QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pTestSuspend).SizeValue();
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
__ Bind(GetEntryLabel());
- __ Ldr(lr, MemOperand(tr, offset));
- __ Blr(lr);
- codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
- __ B(GetReturnLabel());
+ codegen->SaveLiveRegisters(instruction_->GetLocations());
+ arm64_codegen->InvokeRuntime(
+ QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc());
+ codegen->RestoreLiveRegisters(instruction_->GetLocations());
+ if (successor_ == nullptr) {
+ __ B(GetReturnLabel());
+ } else {
+ __ B(arm64_codegen->GetLabelOf(successor_));
+ }
}
vixl::Label* GetReturnLabel() {
@@ -324,6 +430,20 @@
DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARM64);
};
+class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 {
+ public:
+ TypeCheckSlowPathARM64() {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ __ Bind(GetEntryLabel());
+ __ Brk(__LINE__); // TODO: Unimplemented TypeCheckSlowPathARM64.
+ __ b(GetExitLabel());
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM64);
+};
+
#undef __
Location InvokeDexCallingConventionVisitor::GetNextLocation(Primitive::Type type) {
@@ -356,11 +476,12 @@
location_builder_(graph, this),
instruction_visitor_(graph, this) {}
-#define __ reinterpret_cast<Arm64Assembler*>(GetAssembler())->vixl_masm_->
+#undef __
+#define __ GetVIXLAssembler()->
void CodeGeneratorARM64::GenerateFrameEntry() {
// TODO: Add proper support for the stack overflow check.
- UseScratchRegisterScope temps(assembler_.vixl_masm_);
+ UseScratchRegisterScope temps(GetVIXLAssembler());
Register temp = temps.AcquireX();
__ Add(temp, sp, -static_cast<int32_t>(GetStackOverflowReservedBytes(kArm64)));
__ Ldr(temp, MemOperand(temp, 0));
@@ -378,7 +499,7 @@
// ... : other preserved registers.
// sp[frame_size - regs_size]: first preserved register.
// ... : reserved frame space.
- // sp[0] : context pointer.
+ // sp[0] : current method.
}
void CodeGeneratorARM64::GenerateFrameExit() {
@@ -413,7 +534,7 @@
__ Mov(dst, value);
} else {
DCHECK(location.IsStackSlot() || location.IsDoubleStackSlot());
- UseScratchRegisterScope temps(assembler_.vixl_masm_);
+ UseScratchRegisterScope temps(GetVIXLAssembler());
Register temp = instruction->IsIntConstant() ? temps.AcquireW() : temps.AcquireX();
__ Mov(temp, value);
__ Str(temp, StackOperandFrom(location));
@@ -465,7 +586,7 @@
}
void CodeGeneratorARM64::MarkGCCard(Register object, Register value) {
- UseScratchRegisterScope temps(assembler_.vixl_masm_);
+ UseScratchRegisterScope temps(GetVIXLAssembler());
Register card = temps.AcquireX();
Register temp = temps.AcquireX();
vixl::Label done;
@@ -522,6 +643,19 @@
stream << Arm64ManagedRegister::FromDRegister(DRegister(reg));
}
+void CodeGeneratorARM64::MoveConstant(CPURegister destination, HConstant* constant) {
+ if (constant->IsIntConstant() || constant->IsLongConstant()) {
+ __ Mov(Register(destination),
+ constant->IsIntConstant() ? constant->AsIntConstant()->GetValue()
+ : constant->AsLongConstant()->GetValue());
+ } else if (constant->IsFloatConstant()) {
+ __ Fmov(FPRegister(destination), constant->AsFloatConstant()->GetValue());
+ } else {
+ DCHECK(constant->IsDoubleConstant());
+ __ Fmov(FPRegister(destination), constant->AsDoubleConstant()->GetValue());
+ }
+}
+
void CodeGeneratorARM64::MoveHelper(Location destination,
Location source,
Primitive::Type type) {
@@ -544,13 +678,7 @@
} else if (source.IsFpuRegister()) {
__ Fmov(dst, FPRegisterFrom(source, type));
} else {
- HConstant* cst = source.GetConstant();
- if (cst->IsFloatConstant()) {
- __ Fmov(dst, cst->AsFloatConstant()->GetValue());
- } else {
- DCHECK(cst->IsDoubleConstant());
- __ Fmov(dst, cst->AsDoubleConstant()->GetValue());
- }
+ MoveConstant(dst, source.GetConstant());
}
} else {
DCHECK(destination.IsStackSlot() || destination.IsDoubleStackSlot());
@@ -558,8 +686,21 @@
__ Str(RegisterFrom(source, type), StackOperandFrom(destination));
} else if (source.IsFpuRegister()) {
__ Str(FPRegisterFrom(source, type), StackOperandFrom(destination));
+ } else if (source.IsConstant()) {
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ HConstant* cst = source.GetConstant();
+ CPURegister temp;
+ if (cst->IsIntConstant() || cst->IsLongConstant()) {
+ temp = cst->IsIntConstant() ? temps.AcquireW() : temps.AcquireX();
+ } else {
+ DCHECK(cst->IsFloatConstant() || cst->IsDoubleConstant());
+ temp = cst->IsFloatConstant() ? temps.AcquireS() : temps.AcquireD();
+ }
+ MoveConstant(temp, cst);
+ __ Str(temp, StackOperandFrom(destination));
} else {
- UseScratchRegisterScope temps(assembler_.vixl_masm_);
+ DCHECK(source.IsStackSlot() || source.IsDoubleStackSlot());
+ UseScratchRegisterScope temps(GetVIXLAssembler());
Register temp = destination.IsDoubleStackSlot() ? temps.AcquireX() : temps.AcquireW();
__ Ldr(temp, StackOperandFrom(source));
__ Str(temp, StackOperandFrom(destination));
@@ -568,61 +709,89 @@
}
void CodeGeneratorARM64::Load(Primitive::Type type,
- vixl::Register dst,
+ vixl::CPURegister dst,
const vixl::MemOperand& src) {
switch (type) {
case Primitive::kPrimBoolean:
- __ Ldrb(dst, src);
+ __ Ldrb(Register(dst), src);
break;
case Primitive::kPrimByte:
- __ Ldrsb(dst, src);
+ __ Ldrsb(Register(dst), src);
break;
case Primitive::kPrimShort:
- __ Ldrsh(dst, src);
+ __ Ldrsh(Register(dst), src);
break;
case Primitive::kPrimChar:
- __ Ldrh(dst, src);
+ __ Ldrh(Register(dst), src);
break;
case Primitive::kPrimInt:
case Primitive::kPrimNot:
case Primitive::kPrimLong:
- DCHECK(dst.Is64Bits() == (type == Primitive::kPrimLong));
- __ Ldr(dst, src);
- break;
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
+ DCHECK(dst.Is64Bits() == Is64BitType(type));
+ __ Ldr(dst, src);
+ break;
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << type;
}
}
void CodeGeneratorARM64::Store(Primitive::Type type,
- vixl::Register rt,
+ vixl::CPURegister rt,
const vixl::MemOperand& dst) {
switch (type) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte:
- __ Strb(rt, dst);
+ __ Strb(Register(rt), dst);
break;
case Primitive::kPrimChar:
case Primitive::kPrimShort:
- __ Strh(rt, dst);
+ __ Strh(Register(rt), dst);
break;
case Primitive::kPrimInt:
case Primitive::kPrimNot:
case Primitive::kPrimLong:
- DCHECK(rt.Is64Bits() == (type == Primitive::kPrimLong));
- __ Str(rt, dst);
- break;
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
+ DCHECK(rt.Is64Bits() == Is64BitType(type));
+ __ Str(rt, dst);
+ break;
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << type;
}
}
-#undef __
-#define __ GetAssembler()->vixl_masm_->
+void CodeGeneratorARM64::LoadCurrentMethod(vixl::Register current_method) {
+ DCHECK(current_method.IsW());
+ __ Ldr(current_method, MemOperand(sp, kCurrentMethodStackOffset));
+}
+
+void CodeGeneratorARM64::InvokeRuntime(int32_t entry_point_offset,
+ HInstruction* instruction,
+ uint32_t dex_pc) {
+ __ Ldr(lr, MemOperand(tr, entry_point_offset));
+ __ Blr(lr);
+ RecordPcInfo(instruction, dex_pc);
+ DCHECK(instruction->IsSuspendCheck()
+ || instruction->IsBoundsCheck()
+ || instruction->IsNullCheck()
+ || instruction->IsDivZeroCheck()
+ || !IsLeafMethod());
+}
+
+void InstructionCodeGeneratorARM64::GenerateClassInitializationCheck(SlowPathCodeARM64* slow_path,
+ vixl::Register class_reg) {
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ Register temp = temps.AcquireW();
+ __ Ldr(temp, HeapOperand(class_reg, mirror::Class::StatusOffset()));
+ __ Cmp(temp, mirror::Class::kStatusInitialized);
+ __ B(lt, slow_path->GetEntryLabel());
+ // Even if the initialized flag is set, we may be in a situation where caches are not synced
+ // properly. Therefore, we do a memory fence.
+ __ Dmb(InnerShareable, BarrierAll);
+ __ Bind(slow_path->GetExitLabel());
+}
InstructionCodeGeneratorARM64::InstructionCodeGeneratorARM64(HGraph* graph,
CodeGeneratorARM64* codegen)
@@ -631,28 +800,14 @@
codegen_(codegen) {}
#define FOR_EACH_UNIMPLEMENTED_INSTRUCTION(M) \
- M(And) \
- M(CheckCast) \
- M(ClinitCheck) \
- M(DivZeroCheck) \
- M(InstanceOf) \
- M(InvokeInterface) \
- M(LoadClass) \
- M(LoadException) \
- M(LoadString) \
- M(MonitorOperation) \
- M(Or) \
M(ParallelMove) \
- M(Rem) \
- M(StaticFieldGet) \
- M(StaticFieldSet) \
- M(Throw) \
- M(TypeConversion) \
- M(Xor) \
+ M(Rem)
#define UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name) name##UnimplementedInstructionBreakCode
enum UnimplementedInstructionBreakCode {
+ // Using a base helps identify when we hit such breakpoints.
+ UnimplementedInstructionBreakCodeBaseCode = 0x900,
#define ENUM_UNIMPLEMENTED_INSTRUCTION(name) UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name),
FOR_EACH_UNIMPLEMENTED_INSTRUCTION(ENUM_UNIMPLEMENTED_INSTRUCTION)
#undef ENUM_UNIMPLEMENTED_INSTRUCTION
@@ -671,9 +826,9 @@
#undef DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS
#undef UNIMPLEMENTED_INSTRUCTION_BREAK_CODE
+#undef FOR_EACH_UNIMPLEMENTED_INSTRUCTION
-void LocationsBuilderARM64::HandleAddSub(HBinaryOperation* instr) {
- DCHECK(instr->IsAdd() || instr->IsSub());
+void LocationsBuilderARM64::HandleBinaryOp(HBinaryOperation* instr) {
DCHECK_EQ(instr->InputCount(), 2U);
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
Primitive::Type type = instr->GetResultType();
@@ -689,7 +844,7 @@
case Primitive::kPrimDouble:
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetInAt(1, Location::RequiresFpuRegister());
- locations->SetOut(Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
break;
default:
@@ -697,9 +852,7 @@
}
}
-void InstructionCodeGeneratorARM64::HandleAddSub(HBinaryOperation* instr) {
- DCHECK(instr->IsAdd() || instr->IsSub());
-
+void InstructionCodeGeneratorARM64::HandleBinaryOp(HBinaryOperation* instr) {
Primitive::Type type = instr->GetType();
switch (type) {
@@ -710,8 +863,15 @@
Operand rhs = InputOperandAt(instr, 1);
if (instr->IsAdd()) {
__ Add(dst, lhs, rhs);
- } else {
+ } else if (instr->IsAnd()) {
+ __ And(dst, lhs, rhs);
+ } else if (instr->IsOr()) {
+ __ Orr(dst, lhs, rhs);
+ } else if (instr->IsSub()) {
__ Sub(dst, lhs, rhs);
+ } else {
+ DCHECK(instr->IsXor());
+ __ Eor(dst, lhs, rhs);
}
break;
}
@@ -722,22 +882,32 @@
FPRegister rhs = InputFPRegisterAt(instr, 1);
if (instr->IsAdd()) {
__ Fadd(dst, lhs, rhs);
- } else {
+ } else if (instr->IsSub()) {
__ Fsub(dst, lhs, rhs);
+ } else {
+ LOG(FATAL) << "Unexpected floating-point binary operation";
}
break;
}
default:
- LOG(FATAL) << "Unexpected add/sub type " << type;
+ LOG(FATAL) << "Unexpected binary operation type " << type;
}
}
void LocationsBuilderARM64::VisitAdd(HAdd* instruction) {
- HandleAddSub(instruction);
+ HandleBinaryOp(instruction);
}
void InstructionCodeGeneratorARM64::VisitAdd(HAdd* instruction) {
- HandleAddSub(instruction);
+ HandleBinaryOp(instruction);
+}
+
+void LocationsBuilderARM64::VisitAnd(HAnd* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void InstructionCodeGeneratorARM64::VisitAnd(HAnd* instruction) {
+ HandleBinaryOp(instruction);
}
void LocationsBuilderARM64::VisitArrayGet(HArrayGet* instruction) {
@@ -752,11 +922,10 @@
LocationSummary* locations = instruction->GetLocations();
Primitive::Type type = instruction->GetType();
Register obj = InputRegisterAt(instruction, 0);
- Register out = OutputRegister(instruction);
Location index = locations->InAt(1);
size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(type)).Uint32Value();
MemOperand source(obj);
- UseScratchRegisterScope temps(GetAssembler()->vixl_masm_);
+ UseScratchRegisterScope temps(GetVIXLAssembler());
if (index.IsConstant()) {
offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(type);
@@ -768,7 +937,7 @@
source = MemOperand(temp, offset);
}
- codegen_->Load(type, out, source);
+ codegen_->Load(type, OutputCPURegister(instruction), source);
}
void LocationsBuilderARM64::VisitArrayLength(HArrayLength* instruction) {
@@ -802,18 +971,16 @@
void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
Primitive::Type value_type = instruction->GetComponentType();
if (value_type == Primitive::kPrimNot) {
- __ Ldr(lr, MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pAputObject).Int32Value()));
- __ Blr(lr);
- codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
- DCHECK(!codegen_->IsLeafMethod());
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject), instruction, instruction->GetDexPc());
+
} else {
LocationSummary* locations = instruction->GetLocations();
Register obj = InputRegisterAt(instruction, 0);
- Register value = InputRegisterAt(instruction, 2);
+ CPURegister value = InputCPURegisterAt(instruction, 2);
Location index = locations->InAt(1);
size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(value_type)).Uint32Value();
MemOperand destination(obj);
- UseScratchRegisterScope temps(GetAssembler()->vixl_masm_);
+ UseScratchRegisterScope temps(GetVIXLAssembler());
if (index.IsConstant()) {
offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(value_type);
@@ -829,6 +996,66 @@
}
}
+void LocationsBuilderARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ if (instruction->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
+ BoundsCheckSlowPathARM64* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathARM64();
+ codegen_->AddSlowPath(slow_path);
+
+ __ Cmp(InputRegisterAt(instruction, 0), InputOperandAt(instruction, 1));
+ __ B(slow_path->GetEntryLabel(), hs);
+}
+
+void LocationsBuilderARM64::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ instruction, LocationSummary::kCallOnSlowPath);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) {
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ Register obj = InputRegisterAt(instruction, 0);;
+ Register cls = InputRegisterAt(instruction, 1);;
+ Register temp = temps.AcquireW();
+
+ SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM64();
+ codegen_->AddSlowPath(slow_path);
+
+ // TODO: avoid this check if we know obj is not null.
+ __ Cbz(obj, slow_path->GetExitLabel());
+ // Compare the class of `obj` with `cls`.
+ __ Ldr(temp, HeapOperand(obj, mirror::Object::ClassOffset()));
+ __ Cmp(temp, cls);
+ __ B(ne, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void LocationsBuilderARM64::VisitClinitCheck(HClinitCheck* check) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
+ locations->SetInAt(0, Location::RequiresRegister());
+ if (check->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitClinitCheck(HClinitCheck* check) {
+ // We assume the class is not null.
+ SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64(
+ check->GetLoadClass(), check, check->GetDexPc(), true);
+ codegen_->AddSlowPath(slow_path);
+ GenerateClassInitializationCheck(slow_path, InputRegisterAt(check, 0));
+}
+
void LocationsBuilderARM64::VisitCompare(HCompare* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
@@ -847,7 +1074,7 @@
Register result = OutputRegister(instruction);
Register left = InputRegisterAt(instruction, 0);
Operand right = InputOperandAt(instruction, 1);
- __ Subs(result, left, right);
+ __ Subs(result.X(), left, right);
__ B(eq, &done);
__ Mov(result, 1);
__ Cneg(result, result, le);
@@ -894,6 +1121,7 @@
void LocationsBuilderARM64::Visit##Name(H##Name* comp) { VisitCondition(comp); } \
void InstructionCodeGeneratorARM64::Visit##Name(H##Name* comp) { VisitCondition(comp); }
FOR_EACH_CONDITION_INSTRUCTION(DEFINE_CONDITION_VISITORS)
+#undef DEFINE_CONDITION_VISITORS
#undef FOR_EACH_CONDITION_INSTRUCTION
void LocationsBuilderARM64::VisitDiv(HDiv* div) {
@@ -937,6 +1165,33 @@
}
}
+void LocationsBuilderARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
+ if (instruction->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
+ SlowPathCodeARM64* slow_path =
+ new (GetGraph()->GetArena()) DivZeroCheckSlowPathARM64(instruction);
+ codegen_->AddSlowPath(slow_path);
+ Location value = instruction->GetLocations()->InAt(0);
+
+ if (value.IsConstant()) {
+ int64_t divisor = Int64ConstantFrom(value);
+ if (divisor == 0) {
+ __ B(slow_path->GetEntryLabel());
+ } else {
+ LOG(FATAL) << "Divisions by non-null constants should have been optimized away.";
+ }
+ } else {
+ __ Cbz(InputRegisterAt(instruction, 0), slow_path->GetEntryLabel());
+ }
+}
+
void LocationsBuilderARM64::VisitDoubleConstant(HDoubleConstant* constant) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
@@ -956,7 +1211,7 @@
UNUSED(exit);
if (kIsDebugBuild) {
down_cast<Arm64Assembler*>(GetAssembler())->Comment("Unreachable");
- __ Brk(0); // TODO: Introduce special markers for such code locations.
+ __ Brk(__LINE__); // TODO: Introduce special markers for such code locations.
}
}
@@ -1039,7 +1294,7 @@
void InstructionCodeGeneratorARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
MemOperand field = MemOperand(InputRegisterAt(instruction, 0),
instruction->GetFieldOffset().Uint32Value());
- codegen_->Load(instruction->GetType(), OutputRegister(instruction), field);
+ codegen_->Load(instruction->GetType(), OutputCPURegister(instruction), field);
}
void LocationsBuilderARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
@@ -1050,14 +1305,56 @@
void InstructionCodeGeneratorARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
Primitive::Type field_type = instruction->GetFieldType();
- Register value = InputRegisterAt(instruction, 1);
+ CPURegister value = InputCPURegisterAt(instruction, 1);
Register obj = InputRegisterAt(instruction, 0);
codegen_->Store(field_type, value, MemOperand(obj, instruction->GetFieldOffset().Uint32Value()));
if (field_type == Primitive::kPrimNot) {
- codegen_->MarkGCCard(obj, value);
+ codegen_->MarkGCCard(obj, Register(value));
}
}
+void LocationsBuilderARM64::VisitInstanceOf(HInstanceOf* instruction) {
+ LocationSummary::CallKind call_kind =
+ instruction->IsClassFinal() ? LocationSummary::kNoCall : LocationSummary::kCallOnSlowPath;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), true); // The output does overlap inputs.
+}
+
+void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = InputRegisterAt(instruction, 0);;
+ Register cls = InputRegisterAt(instruction, 1);;
+ Register out = OutputRegister(instruction);
+
+ vixl::Label done;
+
+ // Return 0 if `obj` is null.
+ // TODO: Avoid this check if we know `obj` is not null.
+ __ Mov(out, 0);
+ __ Cbz(obj, &done);
+
+ // Compare the class of `obj` with `cls`.
+ __ Ldr(out, MemOperand(obj, mirror::Object::ClassOffset().Int32Value()));
+ __ Cmp(out, cls);
+ if (instruction->IsClassFinal()) {
+ // Classes must be equal for the instanceof to succeed.
+ __ Cset(out, eq);
+ } else {
+ // If the classes are not equal, we go into a slow path.
+ DCHECK(locations->OnlyCallsOnSlowPath());
+ SlowPathCodeARM64* slow_path =
+ new (GetGraph()->GetArena()) TypeCheckSlowPathARM64();
+ codegen_->AddSlowPath(slow_path);
+ __ B(ne, slow_path->GetEntryLabel());
+ __ Mov(out, 1);
+ __ Bind(slow_path->GetExitLabel());
+ }
+
+ __ Bind(&done);
+}
+
void LocationsBuilderARM64::VisitIntConstant(HIntConstant* constant) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
locations->SetOut(Location::ConstantLocation(constant));
@@ -1068,14 +1365,6 @@
UNUSED(constant);
}
-void LocationsBuilderARM64::VisitInvokeStatic(HInvokeStatic* invoke) {
- HandleInvoke(invoke);
-}
-
-void LocationsBuilderARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
- HandleInvoke(invoke);
-}
-
void LocationsBuilderARM64::HandleInvoke(HInvoke* invoke) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
@@ -1093,6 +1382,50 @@
}
}
+void LocationsBuilderARM64::VisitInvokeInterface(HInvokeInterface* invoke) {
+ HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invoke) {
+ // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
+ Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0));
+ uint32_t method_offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() +
+ (invoke->GetImtIndex() % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry);
+ Location receiver = invoke->GetLocations()->InAt(0);
+ Offset class_offset = mirror::Object::ClassOffset();
+ Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
+
+ // The register ip1 is required to be used for the hidden argument in
+ // art_quick_imt_conflict_trampoline, so prevent VIXL from using it.
+ UseScratchRegisterScope scratch_scope(GetVIXLAssembler());
+ scratch_scope.Exclude(ip1);
+ __ Mov(ip1, invoke->GetDexMethodIndex());
+
+ // temp = object->GetClass();
+ if (receiver.IsStackSlot()) {
+ __ Ldr(temp, StackOperandFrom(receiver));
+ __ Ldr(temp, HeapOperand(temp, class_offset));
+ } else {
+ __ Ldr(temp, HeapOperandFrom(receiver, class_offset));
+ }
+ // temp = temp->GetImtEntryAt(method_offset);
+ __ Ldr(temp, HeapOperand(temp, method_offset));
+ // lr = temp->GetEntryPoint();
+ __ Ldr(lr, HeapOperand(temp, entry_point));
+ // lr();
+ __ Blr(lr);
+ DCHECK(!codegen_->IsLeafMethod());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+}
+
+void LocationsBuilderARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
+ HandleInvoke(invoke);
+}
+
+void LocationsBuilderARM64::VisitInvokeStatic(HInvokeStatic* invoke) {
+ HandleInvoke(invoke);
+}
+
void InstructionCodeGeneratorARM64::VisitInvokeStatic(HInvokeStatic* invoke) {
Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0));
// Make sure that ArtMethod* is passed in W0 as per the calling convention
@@ -1108,7 +1441,7 @@
// Currently we implement the app -> app logic, which looks up in the resolve cache.
// temp = method;
- __ Ldr(temp, MemOperand(sp, kCurrentMethodStackOffset));
+ codegen_->LoadCurrentMethod(temp);
// temp = temp->dex_cache_resolved_methods_;
__ Ldr(temp, MemOperand(temp.X(),
mirror::ArtMethod::DexCacheResolvedMethodsOffset().SizeValue()));
@@ -1116,7 +1449,8 @@
__ Ldr(temp, MemOperand(temp.X(), index_in_cache));
// lr = temp->entry_point_from_quick_compiled_code_;
__ Ldr(lr, MemOperand(temp.X(),
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().SizeValue()));
+ mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArm64WordSize).SizeValue()));
// lr();
__ Blr(lr);
@@ -1131,7 +1465,7 @@
size_t method_offset = mirror::Class::EmbeddedVTableOffset().SizeValue() +
invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry);
Offset class_offset = mirror::Object::ClassOffset();
- Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset();
+ Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
// temp = object->GetClass();
if (receiver.IsStackSlot()) {
@@ -1139,8 +1473,7 @@
__ Ldr(temp.W(), MemOperand(temp, class_offset.SizeValue()));
} else {
DCHECK(receiver.IsRegister());
- __ Ldr(temp.W(), HeapOperandFrom(receiver, Primitive::kPrimNot,
- class_offset));
+ __ Ldr(temp.W(), HeapOperandFrom(receiver, class_offset));
}
// temp = temp->GetMethodAt(method_offset);
__ Ldr(temp.W(), MemOperand(temp, method_offset));
@@ -1152,6 +1485,50 @@
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
+void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) {
+ LocationSummary::CallKind call_kind = cls->CanCallRuntime() ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) {
+ Register out = OutputRegister(cls);
+ if (cls->IsReferrersClass()) {
+ DCHECK(!cls->CanCallRuntime());
+ DCHECK(!cls->MustGenerateClinitCheck());
+ codegen_->LoadCurrentMethod(out);
+ __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DeclaringClassOffset()));
+ } else {
+ DCHECK(cls->CanCallRuntime());
+ codegen_->LoadCurrentMethod(out);
+ __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DexCacheResolvedTypesOffset()));
+ __ Ldr(out, MemOperand(out.X(), CodeGenerator::GetCacheOffset(cls->GetTypeIndex())));
+
+ SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64(
+ cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
+ codegen_->AddSlowPath(slow_path);
+ __ Cbz(out, slow_path->GetEntryLabel());
+ if (cls->MustGenerateClinitCheck()) {
+ GenerateClassInitializationCheck(slow_path, out);
+ } else {
+ __ Bind(slow_path->GetExitLabel());
+ }
+ }
+}
+
+void LocationsBuilderARM64::VisitLoadException(HLoadException* load) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM64::VisitLoadException(HLoadException* instruction) {
+ MemOperand exception = MemOperand(tr, Thread::ExceptionOffset<kArm64WordSize>().Int32Value());
+ __ Ldr(OutputRegister(instruction), exception);
+ __ Str(wzr, exception);
+}
+
void LocationsBuilderARM64::VisitLoadLocal(HLoadLocal* load) {
load->SetLocations(nullptr);
}
@@ -1161,6 +1538,24 @@
UNUSED(load);
}
+void LocationsBuilderARM64::VisitLoadString(HLoadString* load) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) {
+ SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM64(load);
+ codegen_->AddSlowPath(slow_path);
+
+ Register out = OutputRegister(load);
+ codegen_->LoadCurrentMethod(out);
+ __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DexCacheStringsOffset()));
+ __ Ldr(out, MemOperand(out.X(), CodeGenerator::GetCacheOffset(load->GetStringIndex())));
+ __ Cbz(out, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+}
+
void LocationsBuilderARM64::VisitLocal(HLocal* local) {
local->SetLocations(nullptr);
}
@@ -1179,6 +1574,20 @@
UNUSED(constant);
}
+void LocationsBuilderARM64::VisitMonitorOperation(HMonitorOperation* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
+}
+
+void InstructionCodeGeneratorARM64::VisitMonitorOperation(HMonitorOperation* instruction) {
+ codegen_->InvokeRuntime(instruction->IsEnter()
+ ? QUICK_ENTRY_POINT(pLockObject) : QUICK_ENTRY_POINT(pUnlockObject),
+ instruction,
+ instruction->GetDexPc());
+}
+
void LocationsBuilderARM64::VisitMul(HMul* mul) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
@@ -1194,7 +1603,7 @@
case Primitive::kPrimDouble:
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetInAt(1, Location::RequiresFpuRegister());
- locations->SetOut(Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
break;
default:
@@ -1224,15 +1633,15 @@
new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
switch (neg->GetResultType()) {
case Primitive::kPrimInt:
- case Primitive::kPrimLong: {
+ case Primitive::kPrimLong:
locations->SetInAt(0, Location::RegisterOrConstant(neg->InputAt(0)));
- locations->SetOut(Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
break;
- }
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
- LOG(FATAL) << "Not yet implemented neg type " << neg->GetResultType();
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
break;
default:
@@ -1249,7 +1658,7 @@
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
- LOG(FATAL) << "Not yet implemented neg type " << neg->GetResultType();
+ __ Fneg(OutputFPRegister(neg), InputFPRegisterAt(neg, 0));
break;
default:
@@ -1274,14 +1683,10 @@
DCHECK(type_index.Is(w0));
Register current_method = RegisterFrom(locations->GetTemp(1), Primitive::kPrimNot);
DCHECK(current_method.Is(w1));
- __ Ldr(current_method, MemOperand(sp, kCurrentMethodStackOffset));
+ codegen_->LoadCurrentMethod(current_method);
__ Mov(type_index, instruction->GetTypeIndex());
- int32_t quick_entrypoint_offset =
- QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pAllocArrayWithAccessCheck).Int32Value();
- __ Ldr(lr, MemOperand(tr, quick_entrypoint_offset));
- __ Blr(lr);
- codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
- DCHECK(!codegen_->IsLeafMethod());
+ codegen_->InvokeRuntime(
+ QUICK_ENTRY_POINT(pAllocArrayWithAccessCheck), instruction, instruction->GetDexPc());
}
void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) {
@@ -1299,14 +1704,10 @@
DCHECK(type_index.Is(w0));
Register current_method = RegisterFrom(locations->GetTemp(1), Primitive::kPrimNot);
DCHECK(current_method.Is(w1));
- __ Ldr(current_method, MemOperand(sp, kCurrentMethodStackOffset));
+ codegen_->LoadCurrentMethod(current_method);
__ Mov(type_index, instruction->GetTypeIndex());
- int32_t quick_entrypoint_offset =
- QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pAllocObjectWithAccessCheck).Int32Value();
- __ Ldr(lr, MemOperand(tr, quick_entrypoint_offset));
- __ Blr(lr);
- codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
- DCHECK(!codegen_->IsLeafMethod());
+ codegen_->InvokeRuntime(
+ QUICK_ENTRY_POINT(pAllocObjectWithAccessCheck), instruction, instruction->GetDexPc());
}
void LocationsBuilderARM64::VisitNot(HNot* instruction) {
@@ -1355,6 +1756,14 @@
}
}
+void LocationsBuilderARM64::VisitOr(HOr* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void InstructionCodeGeneratorARM64::VisitOr(HOr* instruction) {
+ HandleBinaryOp(instruction);
+}
+
void LocationsBuilderARM64::VisitParameterValue(HParameterValue* instruction) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
@@ -1435,31 +1844,43 @@
}
void LocationsBuilderARM64::VisitSub(HSub* instruction) {
- HandleAddSub(instruction);
+ HandleBinaryOp(instruction);
}
void InstructionCodeGeneratorARM64::VisitSub(HSub* instruction) {
- HandleAddSub(instruction);
+ HandleBinaryOp(instruction);
}
-void LocationsBuilderARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
+void LocationsBuilderARM64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorARM64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
+ Register cls = InputRegisterAt(instruction, 0);
+ uint32_t offset = instruction->GetFieldOffset().Uint32Value();
+ codegen_->Load(instruction->GetType(), OutputCPURegister(instruction), MemOperand(cls, offset));
+}
+
+void LocationsBuilderARM64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
- if (instruction->HasUses()) {
- locations->SetOut(Location::SameAsFirstInput());
- }
}
-void InstructionCodeGeneratorARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
- LocationSummary* locations = instruction->GetLocations();
- BoundsCheckSlowPathARM64* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathARM64(
- instruction, locations->InAt(0), locations->InAt(1));
- codegen_->AddSlowPath(slow_path);
+void InstructionCodeGeneratorARM64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
+ CPURegister value = InputCPURegisterAt(instruction, 1);
+ Register cls = InputRegisterAt(instruction, 0);
+ uint32_t offset = instruction->GetFieldOffset().Uint32Value();
+ Primitive::Type field_type = instruction->GetFieldType();
- __ Cmp(InputRegisterAt(instruction, 0), InputOperandAt(instruction, 1));
- __ B(slow_path->GetEntryLabel(), hs);
+ codegen_->Store(field_type, value, MemOperand(cls, offset));
+ if (field_type == Primitive::kPrimNot) {
+ codegen_->MarkGCCard(cls, Register(value));
+ }
}
void LocationsBuilderARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
@@ -1486,5 +1907,74 @@
UNUSED(temp);
}
+void LocationsBuilderARM64::VisitThrow(HThrow* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
+}
+
+void InstructionCodeGeneratorARM64::VisitThrow(HThrow* instruction) {
+ codegen_->InvokeRuntime(
+ QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc());
+}
+
+void LocationsBuilderARM64::VisitTypeConversion(HTypeConversion* conversion) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(conversion, LocationSummary::kNoCall);
+ Primitive::Type input_type = conversion->GetInputType();
+ Primitive::Type result_type = conversion->GetResultType();
+ if ((input_type == Primitive::kPrimNot) || (input_type == Primitive::kPrimVoid) ||
+ (result_type == Primitive::kPrimNot) || (result_type == Primitive::kPrimVoid)) {
+ LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type;
+ }
+
+ if (IsFPType(input_type)) {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ } else {
+ locations->SetInAt(0, Location::RequiresRegister());
+ }
+
+ if (IsFPType(result_type)) {
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ } else {
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitTypeConversion(HTypeConversion* conversion) {
+ Primitive::Type result_type = conversion->GetResultType();
+ Primitive::Type input_type = conversion->GetInputType();
+
+ DCHECK_NE(input_type, result_type);
+
+ if (IsIntegralType(result_type) && IsIntegralType(input_type)) {
+ int result_size = Primitive::ComponentSize(result_type);
+ int input_size = Primitive::ComponentSize(input_type);
+ int min_size = kBitsPerByte * std::min(result_size, input_size);
+ if ((result_type == Primitive::kPrimChar) ||
+ ((input_type == Primitive::kPrimChar) && (result_size > input_size))) {
+ __ Ubfx(OutputRegister(conversion), InputRegisterAt(conversion, 0), 0, min_size);
+ } else {
+ __ Sbfx(OutputRegister(conversion), InputRegisterAt(conversion, 0), 0, min_size);
+ }
+ return;
+ }
+
+ LOG(FATAL) << "Unexpected or unimplemented type conversion from " << input_type
+ << " to " << result_type;
+}
+
+void LocationsBuilderARM64::VisitXor(HXor* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void InstructionCodeGeneratorARM64::VisitXor(HXor* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+#undef __
+#undef QUICK_ENTRY_POINT
+
} // namespace arm64
} // namespace art
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 54e87f4..a40f27f 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -29,8 +29,11 @@
namespace arm64 {
class CodeGeneratorARM64;
+class SlowPathCodeARM64;
-static constexpr size_t kArm64WordSize = 8;
+// Use a local definition to prevent copying mistakes.
+static constexpr size_t kArm64WordSize = kArm64PointerSize;
+
static const vixl::Register kParameterCoreRegisters[] = {
vixl::x1, vixl::x2, vixl::x3, vixl::x4, vixl::x5, vixl::x6, vixl::x7
};
@@ -103,9 +106,11 @@
void LoadCurrentMethod(XRegister reg);
Arm64Assembler* GetAssembler() const { return assembler_; }
+ vixl::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->vixl_masm_; }
private:
- void HandleAddSub(HBinaryOperation* instr);
+ void GenerateClassInitializationCheck(SlowPathCodeARM64* slow_path, vixl::Register class_reg);
+ void HandleBinaryOp(HBinaryOperation* instr);
Arm64Assembler* const assembler_;
CodeGeneratorARM64* const codegen_;
@@ -124,7 +129,7 @@
#undef DECLARE_VISIT_INSTRUCTION
private:
- void HandleAddSub(HBinaryOperation* instr);
+ void HandleBinaryOp(HBinaryOperation* instr);
void HandleInvoke(HInvoke* instr);
CodeGeneratorARM64* const codegen_;
@@ -162,9 +167,10 @@
return kArm64WordSize;
}
- uintptr_t GetAddressOf(HBasicBlock* block ATTRIBUTE_UNUSED) const OVERRIDE {
- UNIMPLEMENTED(INFO) << "TODO: GetAddressOf";
- return 0u;
+ uintptr_t GetAddressOf(HBasicBlock* block) const OVERRIDE {
+ vixl::Label* block_entry_label = GetLabelOf(block);
+ DCHECK(block_entry_label->IsBound());
+ return block_entry_label->location();
}
size_t FrameEntrySpillSize() const OVERRIDE;
@@ -172,6 +178,7 @@
HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; }
HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
Arm64Assembler* GetAssembler() OVERRIDE { return &assembler_; }
+ vixl::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->vixl_masm_; }
// Emit a write barrier.
void MarkGCCard(vixl::Register object, vixl::Register value);
@@ -185,18 +192,18 @@
Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
- size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE {
+ size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
UNUSED(stack_index);
UNUSED(reg_id);
- UNIMPLEMENTED(INFO) << "TODO: SaveCoreRegister";
- return 0;
+ LOG(INFO) << "CodeGeneratorARM64::SaveCoreRegister()";
+ return kArm64WordSize;
}
- size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE {
+ size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
UNUSED(stack_index);
UNUSED(reg_id);
- UNIMPLEMENTED(INFO) << "TODO: RestoreCoreRegister";
- return 0;
+ LOG(INFO) << "CodeGeneratorARM64::RestoreCoreRegister()";
+ return kArm64WordSize;
}
// The number of registers that can be allocated. The register allocator may
@@ -226,9 +233,14 @@
}
// Code generation helpers.
+ void MoveConstant(vixl::CPURegister destination, HConstant* constant);
void MoveHelper(Location destination, Location source, Primitive::Type type);
- void Load(Primitive::Type type, vixl::Register dst, const vixl::MemOperand& src);
- void Store(Primitive::Type type, vixl::Register rt, const vixl::MemOperand& dst);
+ void Load(Primitive::Type type, vixl::CPURegister dst, const vixl::MemOperand& src);
+ void Store(Primitive::Type type, vixl::CPURegister rt, const vixl::MemOperand& dst);
+ void LoadCurrentMethod(vixl::Register current_method);
+
+ // Generate code to invoke a runtime entry point.
+ void InvokeRuntime(int32_t offset, HInstruction* instruction, uint32_t dex_pc);
ParallelMoveResolver* GetMoveResolver() OVERRIDE {
UNIMPLEMENTED(INFO) << "TODO: MoveResolver";
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 36fe063..3c53cea 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1120,7 +1120,8 @@
// temp = temp[index_in_cache]
__ movl(temp, Address(temp, CodeGenerator::GetCacheOffset(invoke->GetIndexInDexCache())));
// (temp + offset_of_quick_compiled_code)()
- __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value()));
+ __ call(Address(
+ temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value()));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1184,7 +1185,8 @@
// temp = temp->GetMethodAt(method_offset);
__ movl(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
- __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value()));
+ __ call(Address(
+ temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value()));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1219,7 +1221,8 @@
// temp = temp->GetImtEntryAt(method_offset);
__ movl(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
- __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value()));
+ __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kX86WordSize).Int32Value()));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 8252f81..0aff6cc 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -25,7 +25,8 @@
namespace art {
namespace x86 {
-static constexpr size_t kX86WordSize = 4;
+// Use a local definition to prevent copying mistakes.
+static constexpr size_t kX86WordSize = kX86PointerSize;
class CodeGeneratorX86;
class SlowPathCodeX86;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 29479a2..97f5e5c 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1112,7 +1112,8 @@
// temp = temp[index_in_cache]
__ movl(temp, Address(temp, CodeGenerator::GetCacheOffset(invoke->GetIndexInDexCache())));
// (temp + offset_of_quick_compiled_code)()
- __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().SizeValue()));
+ __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kX86_64WordSize).SizeValue()));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1171,7 +1172,8 @@
// temp = temp->GetMethodAt(method_offset);
__ movl(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
- __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().SizeValue()));
+ __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kX86_64WordSize).SizeValue()));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1206,7 +1208,8 @@
// temp = temp->GetImtEntryAt(method_offset);
__ movl(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
- __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().SizeValue()));
+ __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kX86_64WordSize).SizeValue()));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 86f3b4e..29c679d 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -25,7 +25,8 @@
namespace art {
namespace x86_64 {
-static constexpr size_t kX86_64WordSize = 8;
+// Use a local definition to prevent copying mistakes.
+static constexpr size_t kX86_64WordSize = kX86_64PointerSize;
static constexpr Register kParameterCoreRegisters[] = { RSI, RDX, RCX, R8, R9 };
static constexpr FloatRegister kParameterFloatRegisters[] =
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 83a1e11..42ac77d 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -167,7 +167,8 @@
}
uintptr_t OptimizingCompiler::GetEntryPointOf(mirror::ArtMethod* method) const {
- return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCode());
+ return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize(
+ InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet())));
}
bool OptimizingCompiler::WriteElf(art::File* file, OatWriter* oat_writer,
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 2d2a82e..927c5f5 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -942,9 +942,11 @@
oat_location_ = oat_filename_;
}
} else {
- oat_file_.reset(new File(oat_fd_, oat_location_));
+ oat_file_.reset(new File(oat_fd_, oat_location_, true));
oat_file_->DisableAutoClose();
- oat_file_->SetLength(0);
+ if (oat_file_->SetLength(0) != 0) {
+ PLOG(WARNING) << "Truncating oat file " << oat_location_ << " failed.";
+ }
}
if (oat_file_.get() == nullptr) {
PLOG(ERROR) << "Failed to create oat file: " << oat_location_;
@@ -952,6 +954,7 @@
}
if (create_file && fchmod(oat_file_->Fd(), 0644) != 0) {
PLOG(ERROR) << "Failed to make oat file world readable: " << oat_location_;
+ oat_file_->Erase();
return false;
}
return true;
@@ -1075,7 +1078,10 @@
<< ". Try: adb shell chmod 777 /data/local/tmp";
continue;
}
- tmp_file->WriteFully(dex_file->Begin(), dex_file->Size());
+ // This is just dumping files for debugging. Ignore errors, and leave remnants.
+ UNUSED(tmp_file->WriteFully(dex_file->Begin(), dex_file->Size()));
+ UNUSED(tmp_file->Flush());
+ UNUSED(tmp_file->Close());
LOG(INFO) << "Wrote input to " << tmp_file_name;
}
}
@@ -1266,6 +1272,7 @@
if (!driver_->WriteElf(android_root_, is_host_, dex_files_, oat_writer.get(),
oat_file_.get())) {
LOG(ERROR) << "Failed to write ELF file " << oat_file_->GetPath();
+ oat_file_->Erase();
return false;
}
}
@@ -1273,8 +1280,8 @@
// Flush result to disk.
{
TimingLogger::ScopedTiming t2("dex2oat Flush ELF", timings_);
- if (oat_file_->Flush() != 0) {
- LOG(ERROR) << "Failed to flush ELF file " << oat_file_->GetPath();
+ if (oat_file_->FlushCloseOrErase() != 0) {
+ PLOG(ERROR) << "Failed to flush ELF file " << oat_file_->GetPath();
return false;
}
}
@@ -1302,7 +1309,13 @@
// We need to strip after image creation because FixupElf needs to use .strtab.
if (oat_unstripped_ != oat_stripped_) {
TimingLogger::ScopedTiming t("dex2oat OatFile copy", timings_);
- oat_file_.reset();
+ if (kUsePortableCompiler) {
+ if (oat_file_->FlushCloseOrErase() != 0) {
+ PLOG(ERROR) << "Failed to flush and close oat file: " << oat_location_;
+ return EXIT_FAILURE;
+ }
+ oat_file_.reset();
+ }
std::unique_ptr<File> in(OS::OpenFileForReading(oat_unstripped_.c_str()));
std::unique_ptr<File> out(OS::CreateEmptyFile(oat_stripped_.c_str()));
size_t buffer_size = 8192;
@@ -1330,6 +1343,7 @@
std::string error_msg;
if (!ElfFile::Strip(oat_file_.get(), &error_msg)) {
LOG(ERROR) << "Failed to strip elf file: " << error_msg;
+ oat_file_->Erase();
return false;
}
@@ -1338,8 +1352,20 @@
} else {
VLOG(compiler) << "Oat file written successfully without stripping: " << oat_location_;
}
+ if (oat_file_->FlushCloseOrErase() != 0) {
+ PLOG(ERROR) << "Failed to flush and close oat file: " << oat_location_;
+ return EXIT_FAILURE;
+ }
+ oat_file_.reset(nullptr);
}
+ if (oat_file_.get() != nullptr) {
+ if (oat_file_->FlushCloseOrErase() != 0) {
+ PLOG(ERROR) << "Failed to flush and close oat file: " << oat_location_ << "/"
+ << oat_filename_;
+ return EXIT_FAILURE;
+ }
+ }
return true;
}
@@ -1451,18 +1477,24 @@
// Destroy ImageWriter before doing FixupElf.
image_writer_.reset();
- std::unique_ptr<File> oat_file(OS::OpenFileReadWrite(oat_unstripped_.c_str()));
- if (oat_file.get() == nullptr) {
- PLOG(ERROR) << "Failed to open ELF file: " << oat_unstripped_;
- return false;
- }
-
// Do not fix up the ELF file if we are --compile-pic
if (!compiler_options_->GetCompilePic()) {
+ std::unique_ptr<File> oat_file(OS::OpenFileReadWrite(oat_unstripped_.c_str()));
+ if (oat_file.get() == nullptr) {
+ PLOG(ERROR) << "Failed to open ELF file: " << oat_unstripped_;
+ return false;
+ }
+
if (!ElfWriter::Fixup(oat_file.get(), oat_data_begin)) {
+ oat_file->Erase();
LOG(ERROR) << "Failed to fixup ELF file " << oat_file->GetPath();
return false;
}
+
+ if (oat_file->FlushCloseOrErase()) {
+ PLOG(ERROR) << "Failed to flush and close fixed ELF file " << oat_file->GetPath();
+ return false;
+ }
}
return true;
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index d6309f7..ea71996 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -182,8 +182,8 @@
bool result = builder_->Write();
- elf_output_->Flush();
- elf_output_->Close();
+ // Ignore I/O errors.
+ UNUSED(elf_output_->FlushClose());
return result;
}
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 6b6d11e..b15c712 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -175,7 +175,7 @@
}
gc::space::ImageSpace* ispc = Runtime::Current()->GetHeap()->GetImageSpace();
- PatchOat p(image.release(), ispc->GetLiveBitmap(), ispc->GetMemMap(),
+ PatchOat p(isa, image.release(), ispc->GetLiveBitmap(), ispc->GetMemMap(),
delta, timings);
t.NewTiming("Patching files");
if (!p.PatchImage()) {
@@ -297,7 +297,7 @@
CHECK(is_oat_pic == NOT_PIC);
}
- PatchOat p(elf.release(), image.release(), ispc->GetLiveBitmap(), ispc->GetMemMap(),
+ PatchOat p(isa, elf.release(), image.release(), ispc->GetLiveBitmap(), ispc->GetMemMap(),
delta, timings);
t.NewTiming("Patching files");
if (!skip_patching_oat && !p.PatchElf()) {
@@ -532,39 +532,44 @@
PatchOat::PatchVisitor visitor(this, copy);
object->VisitReferences<true, kVerifyNone>(visitor, visitor);
if (object->IsArtMethod<kVerifyNone>()) {
- FixupMethod(static_cast<mirror::ArtMethod*>(object),
- static_cast<mirror::ArtMethod*>(copy));
+ FixupMethod(down_cast<mirror::ArtMethod*>(object), down_cast<mirror::ArtMethod*>(copy));
}
}
void PatchOat::FixupMethod(mirror::ArtMethod* object, mirror::ArtMethod* copy) {
+ const size_t pointer_size = InstructionSetPointerSize(isa_);
// Just update the entry points if it looks like we should.
// TODO: sanity check all the pointers' values
uintptr_t portable = reinterpret_cast<uintptr_t>(
- object->GetEntryPointFromPortableCompiledCode<kVerifyNone>());
+ object->GetEntryPointFromPortableCompiledCodePtrSize<kVerifyNone>(pointer_size));
if (portable != 0) {
- copy->SetEntryPointFromPortableCompiledCode(reinterpret_cast<void*>(portable + delta_));
+ copy->SetEntryPointFromPortableCompiledCodePtrSize(reinterpret_cast<void*>(portable + delta_),
+ pointer_size);
}
uintptr_t quick= reinterpret_cast<uintptr_t>(
- object->GetEntryPointFromQuickCompiledCode<kVerifyNone>());
+ object->GetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(pointer_size));
if (quick != 0) {
- copy->SetEntryPointFromQuickCompiledCode(reinterpret_cast<void*>(quick + delta_));
+ copy->SetEntryPointFromQuickCompiledCodePtrSize(reinterpret_cast<void*>(quick + delta_),
+ pointer_size);
}
uintptr_t interpreter = reinterpret_cast<uintptr_t>(
- object->GetEntryPointFromInterpreter<kVerifyNone>());
+ object->GetEntryPointFromInterpreterPtrSize<kVerifyNone>(pointer_size));
if (interpreter != 0) {
- copy->SetEntryPointFromInterpreter(
- reinterpret_cast<mirror::EntryPointFromInterpreter*>(interpreter + delta_));
+ copy->SetEntryPointFromInterpreterPtrSize(
+ reinterpret_cast<mirror::EntryPointFromInterpreter*>(interpreter + delta_), pointer_size);
}
- uintptr_t native_method = reinterpret_cast<uintptr_t>(object->GetNativeMethod());
+ uintptr_t native_method = reinterpret_cast<uintptr_t>(
+ object->GetEntryPointFromJniPtrSize(pointer_size));
if (native_method != 0) {
- copy->SetNativeMethod(reinterpret_cast<void*>(native_method + delta_));
+ copy->SetEntryPointFromJniPtrSize(reinterpret_cast<void*>(native_method + delta_),
+ pointer_size);
}
- uintptr_t native_gc_map = reinterpret_cast<uintptr_t>(object->GetNativeGcMap());
+ uintptr_t native_gc_map = reinterpret_cast<uintptr_t>(
+ object->GetNativeGcMapPtrSize(pointer_size));
if (native_gc_map != 0) {
- copy->SetNativeGcMap(reinterpret_cast<uint8_t*>(native_gc_map + delta_));
+ copy->SetNativeGcMapPtrSize(reinterpret_cast<uint8_t*>(native_gc_map + delta_), pointer_size);
}
}
@@ -899,6 +904,20 @@
}
}
+// Either try to close the file (close=true), or erase it.
+static bool FinishFile(File* file, bool close) {
+ if (close) {
+ if (file->FlushCloseOrErase() != 0) {
+ PLOG(ERROR) << "Failed to flush and close file.";
+ return false;
+ }
+ return true;
+ } else {
+ file->Erase();
+ return false;
+ }
+}
+
static int patchoat(int argc, char **argv) {
InitLogging(argv);
MemMap::Init();
@@ -1170,7 +1189,7 @@
if (output_image_filename.empty()) {
output_image_filename = "output-image-file";
}
- output_image.reset(new File(output_image_fd, output_image_filename));
+ output_image.reset(new File(output_image_fd, output_image_filename, true));
} else {
CHECK(!output_image_filename.empty());
output_image.reset(CreateOrOpen(output_image_filename.c_str(), &new_image_out));
@@ -1184,7 +1203,7 @@
if (input_oat_filename.empty()) {
input_oat_filename = "input-oat-file";
}
- input_oat.reset(new File(input_oat_fd, input_oat_filename));
+ input_oat.reset(new File(input_oat_fd, input_oat_filename, false));
if (input_oat == nullptr) {
// Unlikely, but ensure exhaustive logging in non-0 exit code case
LOG(ERROR) << "Failed to open input oat file by its FD" << input_oat_fd;
@@ -1203,7 +1222,7 @@
if (output_oat_filename.empty()) {
output_oat_filename = "output-oat-file";
}
- output_oat.reset(new File(output_oat_fd, output_oat_filename));
+ output_oat.reset(new File(output_oat_fd, output_oat_filename, true));
if (output_oat == nullptr) {
// Unlikely, but ensure exhaustive logging in non-0 exit code case
LOG(ERROR) << "Failed to open output oat file by its FD" << output_oat_fd;
@@ -1276,14 +1295,20 @@
output_oat.get(), output_image.get(), isa, &timings,
output_oat_fd >= 0, // was it opened from FD?
new_oat_out);
+ // The order here doesn't matter. If the first one is successfully saved and the second one
+ // erased, ImageSpace will still detect a problem and not use the files.
+ ret = ret && FinishFile(output_image.get(), ret);
+ ret = ret && FinishFile(output_oat.get(), ret);
} else if (have_oat_files) {
TimingLogger::ScopedTiming pt("patch oat", &timings);
ret = PatchOat::Patch(input_oat.get(), base_delta, output_oat.get(), &timings,
output_oat_fd >= 0, // was it opened from FD?
new_oat_out);
+ ret = ret && FinishFile(output_oat.get(), ret);
} else if (have_image_files) {
TimingLogger::ScopedTiming pt("patch image", &timings);
ret = PatchOat::Patch(input_image_location, base_delta, output_image.get(), isa, &timings);
+ ret = ret && FinishFile(output_image.get(), ret);
} else {
CHECK(false);
ret = true;
diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h
index 5a3545b..578df3a 100644
--- a/patchoat/patchoat.h
+++ b/patchoat/patchoat.h
@@ -61,15 +61,16 @@
// Takes ownership only of the ElfFile. All other pointers are only borrowed.
PatchOat(ElfFile* oat_file, off_t delta, TimingLogger* timings)
: oat_file_(oat_file), image_(nullptr), bitmap_(nullptr), heap_(nullptr), delta_(delta),
- timings_(timings) {}
- PatchOat(MemMap* image, gc::accounting::ContinuousSpaceBitmap* bitmap,
+ isa_(kNone), timings_(timings) {}
+ PatchOat(InstructionSet isa, MemMap* image, gc::accounting::ContinuousSpaceBitmap* bitmap,
MemMap* heap, off_t delta, TimingLogger* timings)
: image_(image), bitmap_(bitmap), heap_(heap),
- delta_(delta), timings_(timings) {}
- PatchOat(ElfFile* oat_file, MemMap* image, gc::accounting::ContinuousSpaceBitmap* bitmap,
- MemMap* heap, off_t delta, TimingLogger* timings)
+ delta_(delta), isa_(isa), timings_(timings) {}
+ PatchOat(InstructionSet isa, ElfFile* oat_file, MemMap* image,
+ gc::accounting::ContinuousSpaceBitmap* bitmap, MemMap* heap, off_t delta,
+ TimingLogger* timings)
: oat_file_(oat_file), image_(image), bitmap_(bitmap), heap_(heap),
- delta_(delta), timings_(timings) {}
+ delta_(delta), isa_(isa), timings_(timings) {}
~PatchOat() {}
// Was the .art image at image_path made with --compile-pic ?
@@ -156,8 +157,10 @@
const MemMap* const heap_;
// The amount we are changing the offset by.
const off_t delta_;
- // Timing splits.
- TimingLogger* const timings_;
+ // Active instruction set, used to know the entrypoint size.
+ const InstructionSet isa_;
+
+ TimingLogger* timings_;
DISALLOW_IMPLICIT_CONSTRUCTORS(PatchOat);
};
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 25fe45f..58f7940 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -302,6 +302,7 @@
base/allocator.h \
base/mutex.h \
debugger.h \
+ base/unix_file/fd_file.h \
dex_file.h \
dex_instruction.h \
gc/allocator/rosalloc.h \
diff --git a/runtime/arch/arm/portable_entrypoints_arm.S b/runtime/arch/arm/portable_entrypoints_arm.S
index d37e760..89ac1f7 100644
--- a/runtime/arch/arm/portable_entrypoints_arm.S
+++ b/runtime/arch/arm/portable_entrypoints_arm.S
@@ -53,7 +53,7 @@
mov ip, #0 @ set ip to 0
str ip, [sp] @ store NULL for method* at bottom of frame
add sp, #16 @ first 4 args are not passed on stack for portable
- ldr ip, [r0, #MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET] @ get pointer to the code
+ ldr ip, [r0, #MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET_32] @ get pointer to the code
blx ip @ call the method
mov sp, r11 @ restore the stack pointer
ldr ip, [sp, #24] @ load the result pointer
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 632b414..1782db5 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -422,7 +422,7 @@
mov r4, #SUSPEND_CHECK_INTERVAL @ reset r4 to suspend check interval
#endif
- ldr ip, [r0, #MIRROR_ART_METHOD_QUICK_CODE_OFFSET] @ get pointer to the code
+ ldr ip, [r0, #MIRROR_ART_METHOD_QUICK_CODE_OFFSET_32] @ get pointer to the code
blx ip @ call the method
mov sp, r11 @ restore the stack pointer
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 147d434..4415935 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -564,7 +564,7 @@
.macro INVOKE_STUB_CALL_AND_RETURN
// load method-> METHOD_QUICK_CODE_OFFSET
- ldr x9, [x0 , #MIRROR_ART_METHOD_QUICK_CODE_OFFSET]
+ ldr x9, [x0 , #MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64]
// Branch to method.
blr x9
diff --git a/runtime/arch/instruction_set.h b/runtime/arch/instruction_set.h
index 46622eb..e413880 100644
--- a/runtime/arch/instruction_set.h
+++ b/runtime/arch/instruction_set.h
@@ -127,6 +127,10 @@
}
}
+static inline size_t InstructionSetPointerSize(InstructionSet isa) {
+ return Is64BitInstructionSet(isa) ? 8U : 4U;
+}
+
static inline size_t GetBytesPerGprSpillLocation(InstructionSet isa) {
switch (isa) {
case kArm:
diff --git a/runtime/arch/mips/portable_entrypoints_mips.S b/runtime/arch/mips/portable_entrypoints_mips.S
index d7e7a8e..8d418e8 100644
--- a/runtime/arch/mips/portable_entrypoints_mips.S
+++ b/runtime/arch/mips/portable_entrypoints_mips.S
@@ -98,7 +98,7 @@
lw $a1, 4($sp) # copy arg value for a1
lw $a2, 8($sp) # copy arg value for a2
lw $a3, 12($sp) # copy arg value for a3
- lw $t9, MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET($a0) # get pointer to the code
+ lw $t9, MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET_32($a0) # get pointer to the code
jalr $t9 # call the method
sw $zero, 0($sp) # store NULL for method* at bottom of frame
move $sp, $fp # restore the stack
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index e878ef7..4824857 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -512,7 +512,7 @@
lw $a1, 4($sp) # copy arg value for a1
lw $a2, 8($sp) # copy arg value for a2
lw $a3, 12($sp) # copy arg value for a3
- lw $t9, MIRROR_ART_METHOD_QUICK_CODE_OFFSET($a0) # get pointer to the code
+ lw $t9, MIRROR_ART_METHOD_QUICK_CODE_OFFSET_32($a0) # get pointer to the code
jalr $t9 # call the method
sw $zero, 0($sp) # store NULL for method* at bottom of frame
move $sp, $fp # restore the stack
diff --git a/runtime/arch/x86/portable_entrypoints_x86.S b/runtime/arch/x86/portable_entrypoints_x86.S
index a7c4124..1f0900e 100644
--- a/runtime/arch/x86/portable_entrypoints_x86.S
+++ b/runtime/arch/x86/portable_entrypoints_x86.S
@@ -46,7 +46,7 @@
addl LITERAL(12), %esp // pop arguments to memcpy
mov 12(%ebp), %eax // move method pointer into eax
mov %eax, (%esp) // push method pointer onto stack
- call *MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET(%eax) // call the method
+ call *MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET_32(%eax) // call the method
mov %ebp, %esp // restore stack pointer
POP ebx // pop ebx
POP ebp // pop ebp
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 2a7ef57..1ce01c4 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -316,7 +316,7 @@
mov 4(%esp), %ecx // copy arg1 into ecx
mov 8(%esp), %edx // copy arg2 into edx
mov 12(%esp), %ebx // copy arg3 into ebx
- call *MIRROR_ART_METHOD_QUICK_CODE_OFFSET(%eax) // call the method
+ call *MIRROR_ART_METHOD_QUICK_CODE_OFFSET_32(%eax) // call the method
mov %ebp, %esp // restore stack pointer
CFI_DEF_CFA_REGISTER(esp)
POP ebx // pop ebx
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 9d3a8cc..a80e7d2 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -518,7 +518,7 @@
LOOP_OVER_SHORTY_LOADING_GPRS r8, r8d, .Lgpr_setup_finished
LOOP_OVER_SHORTY_LOADING_GPRS r9, r9d, .Lgpr_setup_finished
.Lgpr_setup_finished:
- call *MIRROR_ART_METHOD_QUICK_CODE_OFFSET(%rdi) // Call the method.
+ call *MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64(%rdi) // Call the method.
movq %rbp, %rsp // Restore stack pointer.
CFI_DEF_CFA_REGISTER(rsp)
POP r9 // Pop r9 - shorty*.
@@ -601,7 +601,7 @@
LOOP_OVER_SHORTY_LOADING_GPRS r8, r8d, .Lgpr_setup_finished2
LOOP_OVER_SHORTY_LOADING_GPRS r9, r9d, .Lgpr_setup_finished2
.Lgpr_setup_finished2:
- call *MIRROR_ART_METHOD_QUICK_CODE_OFFSET(%rdi) // Call the method.
+ call *MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64(%rdi) // Call the method.
movq %rbp, %rsp // Restore stack pointer.
CFI_DEF_CFA_REGISTER(rsp)
POP r9 // Pop r9 - shorty*.
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index 26df045..4b4c8855 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -148,13 +148,21 @@
ADD_TEST_EQ(MIRROR_ART_METHOD_DEX_CACHE_METHODS_OFFSET,
art::mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value())
-#define MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET (32 + MIRROR_OBJECT_HEADER_SIZE)
-ADD_TEST_EQ(MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET,
- art::mirror::ArtMethod::EntryPointFromPortableCompiledCodeOffset().Int32Value())
+#define MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET_32 (48 + MIRROR_OBJECT_HEADER_SIZE)
+ADD_TEST_EQ(MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET_32,
+ art::mirror::ArtMethod::EntryPointFromPortableCompiledCodeOffset(4).Int32Value())
-#define MIRROR_ART_METHOD_QUICK_CODE_OFFSET (40 + MIRROR_OBJECT_HEADER_SIZE)
-ADD_TEST_EQ(MIRROR_ART_METHOD_QUICK_CODE_OFFSET,
- art::mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value())
+#define MIRROR_ART_METHOD_QUICK_CODE_OFFSET_32 (40 + MIRROR_OBJECT_HEADER_SIZE)
+ADD_TEST_EQ(MIRROR_ART_METHOD_QUICK_CODE_OFFSET_32,
+ art::mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(4).Int32Value())
+
+#define MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET_64 (64 + MIRROR_OBJECT_HEADER_SIZE)
+ADD_TEST_EQ(MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET_64,
+ art::mirror::ArtMethod::EntryPointFromPortableCompiledCodeOffset(8).Int32Value())
+
+#define MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64 (48 + MIRROR_OBJECT_HEADER_SIZE)
+ADD_TEST_EQ(MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64,
+ art::mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(8).Int32Value())
#if defined(__cplusplus)
} // End of CheckAsmSupportOffsets.
diff --git a/runtime/base/scoped_flock.cc b/runtime/base/scoped_flock.cc
index bf091d0..0e93eee 100644
--- a/runtime/base/scoped_flock.cc
+++ b/runtime/base/scoped_flock.cc
@@ -27,6 +27,9 @@
bool ScopedFlock::Init(const char* filename, std::string* error_msg) {
while (true) {
+ if (file_.get() != nullptr) {
+ UNUSED(file_->FlushCloseOrErase()); // Ignore result.
+ }
file_.reset(OS::OpenFileWithFlags(filename, O_CREAT | O_RDWR));
if (file_.get() == NULL) {
*error_msg = StringPrintf("Failed to open file '%s': %s", filename, strerror(errno));
@@ -59,7 +62,7 @@
}
bool ScopedFlock::Init(File* file, std::string* error_msg) {
- file_.reset(new File(dup(file->Fd())));
+ file_.reset(new File(dup(file->Fd()), true));
if (file_->Fd() == -1) {
file_.reset();
*error_msg = StringPrintf("Failed to duplicate open file '%s': %s",
@@ -89,6 +92,9 @@
if (file_.get() != NULL) {
int flock_result = TEMP_FAILURE_RETRY(flock(file_->Fd(), LOCK_UN));
CHECK_EQ(0, flock_result);
+ if (file_->FlushCloseOrErase() != 0) {
+ PLOG(WARNING) << "Could not close scoped file lock file.";
+ }
}
}
diff --git a/runtime/base/unix_file/fd_file.cc b/runtime/base/unix_file/fd_file.cc
index f29a7ec..6e5e7a1 100644
--- a/runtime/base/unix_file/fd_file.cc
+++ b/runtime/base/unix_file/fd_file.cc
@@ -14,28 +14,68 @@
* limitations under the License.
*/
-#include "base/logging.h"
#include "base/unix_file/fd_file.h"
+
#include <errno.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
+#include "base/logging.h"
+
namespace unix_file {
-FdFile::FdFile() : fd_(-1), auto_close_(true) {
+FdFile::FdFile() : guard_state_(GuardState::kClosed), fd_(-1), auto_close_(true) {
}
-FdFile::FdFile(int fd) : fd_(fd), auto_close_(true) {
+FdFile::FdFile(int fd, bool check_usage)
+ : guard_state_(check_usage ? GuardState::kBase : GuardState::kNoCheck),
+ fd_(fd), auto_close_(true) {
}
-FdFile::FdFile(int fd, const std::string& path) : fd_(fd), file_path_(path), auto_close_(true) {
+FdFile::FdFile(int fd, const std::string& path, bool check_usage)
+ : guard_state_(check_usage ? GuardState::kBase : GuardState::kNoCheck),
+ fd_(fd), file_path_(path), auto_close_(true) {
CHECK_NE(0U, path.size());
}
FdFile::~FdFile() {
+ if (kCheckSafeUsage && (guard_state_ < GuardState::kNoCheck)) {
+ if (guard_state_ < GuardState::kFlushed) {
+ LOG(::art::ERROR) << "File " << file_path_ << " wasn't explicitly flushed before destruction.";
+ }
+ if (guard_state_ < GuardState::kClosed) {
+ LOG(::art::ERROR) << "File " << file_path_ << " wasn't explicitly closed before destruction.";
+ }
+ CHECK_GE(guard_state_, GuardState::kClosed);
+ }
if (auto_close_ && fd_ != -1) {
- Close();
+ if (Close() != 0) {
+ PLOG(::art::WARNING) << "Failed to close file " << file_path_;
+ }
+ }
+}
+
+void FdFile::moveTo(GuardState target, GuardState warn_threshold, const char* warning) {
+ if (kCheckSafeUsage) {
+ if (guard_state_ < GuardState::kNoCheck) {
+ if (warn_threshold < GuardState::kNoCheck && guard_state_ >= warn_threshold) {
+ LOG(::art::ERROR) << warning;
+ }
+ guard_state_ = target;
+ }
+ }
+}
+
+void FdFile::moveUp(GuardState target, const char* warning) {
+ if (kCheckSafeUsage) {
+ if (guard_state_ < GuardState::kNoCheck) {
+ if (guard_state_ < target) {
+ guard_state_ = target;
+ } else if (target < guard_state_) {
+ LOG(::art::ERROR) << warning;
+ }
+ }
}
}
@@ -54,11 +94,28 @@
return false;
}
file_path_ = path;
+ static_assert(O_RDONLY == 0, "Readonly flag has unexpected value.");
+ if (kCheckSafeUsage && (flags & (O_RDWR | O_CREAT | O_WRONLY)) != 0) {
+ // Start in the base state (not flushed, not closed).
+ guard_state_ = GuardState::kBase;
+ } else {
+ // We are not concerned with read-only files. In that case, proper flushing and closing is
+ // not important.
+ guard_state_ = GuardState::kNoCheck;
+ }
return true;
}
int FdFile::Close() {
int result = TEMP_FAILURE_RETRY(close(fd_));
+
+ // Test here, so the file is closed and not leaked.
+ if (kCheckSafeUsage) {
+ CHECK_GE(guard_state_, GuardState::kFlushed) << "File " << file_path_
+ << " has not been flushed before closing.";
+ moveUp(GuardState::kClosed, nullptr);
+ }
+
if (result == -1) {
return -errno;
} else {
@@ -74,6 +131,7 @@
#else
int rc = TEMP_FAILURE_RETRY(fsync(fd_));
#endif
+ moveUp(GuardState::kFlushed, "Flushing closed file.");
return (rc == -1) ? -errno : rc;
}
@@ -92,6 +150,7 @@
#else
int rc = TEMP_FAILURE_RETRY(ftruncate(fd_, new_length));
#endif
+ moveTo(GuardState::kBase, GuardState::kClosed, "Truncating closed file.");
return (rc == -1) ? -errno : rc;
}
@@ -107,6 +166,7 @@
#else
int rc = TEMP_FAILURE_RETRY(pwrite(fd_, buf, byte_count, offset));
#endif
+ moveTo(GuardState::kBase, GuardState::kClosed, "Writing into closed file.");
return (rc == -1) ? -errno : rc;
}
@@ -135,6 +195,7 @@
bool FdFile::WriteFully(const void* buffer, size_t byte_count) {
const char* ptr = static_cast<const char*>(buffer);
+ moveTo(GuardState::kBase, GuardState::kClosed, "Writing into closed file.");
while (byte_count > 0) {
ssize_t bytes_written = TEMP_FAILURE_RETRY(write(fd_, ptr, byte_count));
if (bytes_written == -1) {
@@ -146,4 +207,38 @@
return true;
}
+void FdFile::Erase() {
+ TEMP_FAILURE_RETRY(SetLength(0));
+ TEMP_FAILURE_RETRY(Flush());
+ TEMP_FAILURE_RETRY(Close());
+}
+
+int FdFile::FlushCloseOrErase() {
+ int flush_result = TEMP_FAILURE_RETRY(Flush());
+ if (flush_result != 0) {
+ LOG(::art::ERROR) << "CloseOrErase failed while flushing a file.";
+ Erase();
+ return flush_result;
+ }
+ int close_result = TEMP_FAILURE_RETRY(Close());
+ if (close_result != 0) {
+ LOG(::art::ERROR) << "CloseOrErase failed while closing a file.";
+ Erase();
+ return close_result;
+ }
+ return 0;
+}
+
+int FdFile::FlushClose() {
+ int flush_result = TEMP_FAILURE_RETRY(Flush());
+ if (flush_result != 0) {
+ LOG(::art::ERROR) << "FlushClose failed while flushing a file.";
+ }
+ int close_result = TEMP_FAILURE_RETRY(Close());
+ if (close_result != 0) {
+ LOG(::art::ERROR) << "FlushClose failed while closing a file.";
+ }
+ return (flush_result != 0) ? flush_result : close_result;
+}
+
} // namespace unix_file
diff --git a/runtime/base/unix_file/fd_file.h b/runtime/base/unix_file/fd_file.h
index 01f4ca2..8db2ee4 100644
--- a/runtime/base/unix_file/fd_file.h
+++ b/runtime/base/unix_file/fd_file.h
@@ -24,6 +24,9 @@
namespace unix_file {
+// If true, check whether Flush and Close are called before destruction.
+static constexpr bool kCheckSafeUsage = true;
+
// A RandomAccessFile implementation backed by a file descriptor.
//
// Not thread safe.
@@ -32,8 +35,8 @@
FdFile();
// Creates an FdFile using the given file descriptor. Takes ownership of the
// file descriptor. (Use DisableAutoClose to retain ownership.)
- explicit FdFile(int fd);
- explicit FdFile(int fd, const std::string& path);
+ explicit FdFile(int fd, bool checkUsage);
+ explicit FdFile(int fd, const std::string& path, bool checkUsage);
// Destroys an FdFile, closing the file descriptor if Close hasn't already
// been called. (If you care about the return value of Close, call it
@@ -47,12 +50,21 @@
bool Open(const std::string& file_path, int flags, mode_t mode);
// RandomAccessFile API.
- virtual int Close();
- virtual int64_t Read(char* buf, int64_t byte_count, int64_t offset) const;
- virtual int SetLength(int64_t new_length);
+ virtual int Close() WARN_UNUSED;
+ virtual int64_t Read(char* buf, int64_t byte_count, int64_t offset) const WARN_UNUSED;
+ virtual int SetLength(int64_t new_length) WARN_UNUSED;
virtual int64_t GetLength() const;
- virtual int64_t Write(const char* buf, int64_t byte_count, int64_t offset);
- virtual int Flush();
+ virtual int64_t Write(const char* buf, int64_t byte_count, int64_t offset) WARN_UNUSED;
+ virtual int Flush() WARN_UNUSED;
+
+ // Short for SetLength(0); Flush(); Close();
+ void Erase();
+
+ // Try to Flush(), then try to Close(); If either fails, call Erase().
+ int FlushCloseOrErase() WARN_UNUSED;
+
+ // Try to Flush and Close(). Attempts both, but returns the first error.
+ int FlushClose() WARN_UNUSED;
// Bonus API.
int Fd() const;
@@ -61,8 +73,35 @@
return file_path_;
}
void DisableAutoClose();
- bool ReadFully(void* buffer, size_t byte_count);
- bool WriteFully(const void* buffer, size_t byte_count);
+ bool ReadFully(void* buffer, size_t byte_count) WARN_UNUSED;
+ bool WriteFully(const void* buffer, size_t byte_count) WARN_UNUSED;
+
+ // This enum is public so that we can define the << operator over it.
+ enum class GuardState {
+ kBase, // Base, file has not been flushed or closed.
+ kFlushed, // File has been flushed, but not closed.
+ kClosed, // File has been flushed and closed.
+ kNoCheck // Do not check for the current file instance.
+ };
+
+ protected:
+ // If the guard state indicates checking (!=kNoCheck), go to the target state "target". Print the
+ // given warning if the current state is or exceeds warn_threshold.
+ void moveTo(GuardState target, GuardState warn_threshold, const char* warning);
+
+ // If the guard state indicates checking (<kNoCheck), and is below the target state "target", go
+ // to "target." If the current state is higher (excluding kNoCheck) than the trg state, print the
+ // warning.
+ void moveUp(GuardState target, const char* warning);
+
+ // Forcefully sets the state to the given one. This can overwrite kNoCheck.
+ void resetGuard(GuardState new_state) {
+ if (kCheckSafeUsage) {
+ guard_state_ = new_state;
+ }
+ }
+
+ GuardState guard_state_;
private:
int fd_;
@@ -72,6 +111,8 @@
DISALLOW_COPY_AND_ASSIGN(FdFile);
};
+std::ostream& operator<<(std::ostream& os, const FdFile::GuardState& kind);
+
} // namespace unix_file
#endif // ART_RUNTIME_BASE_UNIX_FILE_FD_FILE_H_
diff --git a/runtime/base/unix_file/fd_file_test.cc b/runtime/base/unix_file/fd_file_test.cc
index 3481f2f..a7e5b96 100644
--- a/runtime/base/unix_file/fd_file_test.cc
+++ b/runtime/base/unix_file/fd_file_test.cc
@@ -24,7 +24,7 @@
class FdFileTest : public RandomAccessFileTest {
protected:
virtual RandomAccessFile* MakeTestFile() {
- return new FdFile(fileno(tmpfile()));
+ return new FdFile(fileno(tmpfile()), false);
}
};
@@ -53,6 +53,7 @@
ASSERT_TRUE(file.Open(good_path, O_CREAT | O_WRONLY));
EXPECT_GE(file.Fd(), 0);
EXPECT_TRUE(file.IsOpened());
+ EXPECT_EQ(0, file.Flush());
EXPECT_EQ(0, file.Close());
EXPECT_EQ(-1, file.Fd());
EXPECT_FALSE(file.IsOpened());
@@ -60,7 +61,7 @@
EXPECT_GE(file.Fd(), 0);
EXPECT_TRUE(file.IsOpened());
- file.Close();
+ ASSERT_EQ(file.Close(), 0);
ASSERT_EQ(unlink(good_path.c_str()), 0);
}
diff --git a/runtime/base/unix_file/random_access_file_test.h b/runtime/base/unix_file/random_access_file_test.h
index 0002433..e7ace4c 100644
--- a/runtime/base/unix_file/random_access_file_test.h
+++ b/runtime/base/unix_file/random_access_file_test.h
@@ -76,6 +76,8 @@
ASSERT_EQ(content.size(), static_cast<uint64_t>(file->Write(content.data(), content.size(), 0)));
TestReadContent(content, file.get());
+
+ CleanUp(file.get());
}
void TestReadContent(const std::string& content, RandomAccessFile* file) {
@@ -131,6 +133,8 @@
ASSERT_EQ(new_length, file->GetLength());
ASSERT_TRUE(ReadString(file.get(), &new_content));
ASSERT_EQ('\0', new_content[new_length - 1]);
+
+ CleanUp(file.get());
}
void TestWrite() {
@@ -163,6 +167,11 @@
ASSERT_EQ(file->GetLength(), new_length);
ASSERT_TRUE(ReadString(file.get(), &new_content));
ASSERT_EQ(std::string("hello\0hello", new_length), new_content);
+
+ CleanUp(file.get());
+ }
+
+ virtual void CleanUp(RandomAccessFile* file ATTRIBUTE_UNUSED) {
}
protected:
diff --git a/runtime/base/unix_file/random_access_file_utils_test.cc b/runtime/base/unix_file/random_access_file_utils_test.cc
index 6317922..9457d22 100644
--- a/runtime/base/unix_file/random_access_file_utils_test.cc
+++ b/runtime/base/unix_file/random_access_file_utils_test.cc
@@ -37,14 +37,14 @@
}
TEST_F(RandomAccessFileUtilsTest, BadSrc) {
- FdFile src(-1);
+ FdFile src(-1, false);
StringFile dst;
ASSERT_FALSE(CopyFile(src, &dst));
}
TEST_F(RandomAccessFileUtilsTest, BadDst) {
StringFile src;
- FdFile dst(-1);
+ FdFile dst(-1, false);
// We need some source content to trigger a write.
// Copying an empty file is a no-op.
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index aaf2da7..68e20f2 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -243,7 +243,8 @@
portable_imt_conflict_trampoline_(nullptr),
quick_imt_conflict_trampoline_(nullptr),
quick_generic_jni_trampoline_(nullptr),
- quick_to_interpreter_bridge_trampoline_(nullptr) {
+ quick_to_interpreter_bridge_trampoline_(nullptr),
+ image_pointer_size_(sizeof(void*)) {
memset(find_array_class_cache_, 0, kFindArrayCacheSize * sizeof(mirror::Class*));
}
@@ -378,10 +379,9 @@
Handle<mirror::Class> java_lang_reflect_ArtMethod(hs.NewHandle(
AllocClass(self, java_lang_Class.Get(), mirror::ArtMethod::ClassSize())));
CHECK(java_lang_reflect_ArtMethod.Get() != nullptr);
- java_lang_reflect_ArtMethod->SetObjectSize(mirror::ArtMethod::InstanceSize());
+ java_lang_reflect_ArtMethod->SetObjectSize(mirror::ArtMethod::InstanceSize(sizeof(void*)));
SetClassRoot(kJavaLangReflectArtMethod, java_lang_reflect_ArtMethod.Get());
java_lang_reflect_ArtMethod->SetStatus(mirror::Class::kStatusResolved, self);
-
mirror::ArtMethod::SetClass(java_lang_reflect_ArtMethod.Get());
// Set up array classes for string, field, method
@@ -407,8 +407,7 @@
// DexCache instances. Needs to be after String, Field, Method arrays since AllocDexCache uses
// these roots.
CHECK_NE(0U, boot_class_path.size());
- for (size_t i = 0; i != boot_class_path.size(); ++i) {
- const DexFile* dex_file = boot_class_path[i];
+ for (const DexFile* dex_file : boot_class_path) {
CHECK(dex_file != nullptr);
AppendToBootClassPath(self, *dex_file);
}
@@ -1682,6 +1681,20 @@
// Set classes on AbstractMethod early so that IsMethod tests can be performed during the live
// bitmap walk.
mirror::ArtMethod::SetClass(GetClassRoot(kJavaLangReflectArtMethod));
+ size_t art_method_object_size = mirror::ArtMethod::GetJavaLangReflectArtMethod()->GetObjectSize();
+ if (!Runtime::Current()->IsCompiler()) {
+ // Compiler supports having an image with a different pointer size than the runtime. This
+ // happens on the host for compile 32 bit tests since we use a 64 bit libart compiler. We may
+ // also use 32 bit dex2oat on a system with 64 bit apps.
+ CHECK_EQ(art_method_object_size, mirror::ArtMethod::InstanceSize(sizeof(void*)))
+ << sizeof(void*);
+ }
+ if (art_method_object_size == mirror::ArtMethod::InstanceSize(4)) {
+ image_pointer_size_ = 4;
+ } else {
+ CHECK_EQ(art_method_object_size, mirror::ArtMethod::InstanceSize(8));
+ image_pointer_size_ = 8;
+ }
// Set entry point to interpreter if in InterpretOnly mode.
if (Runtime::Current()->GetInstrumentation()->InterpretOnly()) {
@@ -1695,7 +1708,7 @@
// reinit array_iftable_ from any array class instance, they should be ==
array_iftable_ = GcRoot<mirror::IfTable>(GetClassRoot(kObjectArrayClass)->GetIfTable());
- DCHECK(array_iftable_.Read() == GetClassRoot(kBooleanArrayClass)->GetIfTable());
+ DCHECK_EQ(array_iftable_.Read(), GetClassRoot(kBooleanArrayClass)->GetIfTable());
// String class root was set above
mirror::Reference::SetClass(GetClassRoot(kJavaLangRefReference));
mirror::ArtField::SetClass(GetClassRoot(kJavaLangReflectArtField));
@@ -5312,14 +5325,18 @@
} else {
klass->SetNumReferenceInstanceFields(num_reference_fields);
if (!klass->IsVariableSize()) {
- std::string temp;
- DCHECK_GE(size, sizeof(mirror::Object)) << klass->GetDescriptor(&temp);
- size_t previous_size = klass->GetObjectSize();
- if (previous_size != 0) {
- // Make sure that we didn't originally have an incorrect size.
- CHECK_EQ(previous_size, size) << klass->GetDescriptor(&temp);
+ if (klass->DescriptorEquals("Ljava/lang/reflect/ArtMethod;")) {
+ klass->SetObjectSize(mirror::ArtMethod::InstanceSize(sizeof(void*)));
+ } else {
+ std::string temp;
+ DCHECK_GE(size, sizeof(mirror::Object)) << klass->GetDescriptor(&temp);
+ size_t previous_size = klass->GetObjectSize();
+ if (previous_size != 0) {
+ // Make sure that we didn't originally have an incorrect size.
+ CHECK_EQ(previous_size, size) << klass->GetDescriptor(&temp);
+ }
+ klass->SetObjectSize(size);
}
- klass->SetObjectSize(size);
}
}
@@ -5372,7 +5389,6 @@
}
CHECK_EQ(current_ref_offset.Uint32Value(), end_ref_offset.Uint32Value());
}
-
return true;
}
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 385f135..006354f 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -805,6 +805,9 @@
const void* quick_generic_jni_trampoline_;
const void* quick_to_interpreter_bridge_trampoline_;
+ // Image pointer size.
+ size_t image_pointer_size_;
+
friend class ImageWriter; // for GetClassRoots
friend class ImageDumper; // for FindOpenedOatFileFromOatLocation
friend class ElfPatcher; // for FindOpenedOatFileForDexFile & FindOpenedOatFileFromOatLocation
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index ba5aa3d..0c86761 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -499,11 +499,6 @@
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_cache_strings_), "dexCacheStrings"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_code_item_offset_), "dexCodeItemOffset"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_method_index_), "dexMethodIndex"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, entry_point_from_interpreter_), "entryPointFromInterpreter"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, entry_point_from_jni_), "entryPointFromJni"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, entry_point_from_portable_compiled_code_), "entryPointFromPortableCompiledCode"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, entry_point_from_quick_compiled_code_), "entryPointFromQuickCompiledCode"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, gc_map_), "gcMap"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, method_index_), "methodIndex"));
};
};
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index 6e3ebc2..03b33e9 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -59,7 +59,7 @@
filename_ += "/TmpFile-XXXXXX";
int fd = mkstemp(&filename_[0]);
CHECK_NE(-1, fd);
- file_.reset(new File(fd, GetFilename()));
+ file_.reset(new File(fd, GetFilename(), true));
}
ScratchFile::ScratchFile(const ScratchFile& other, const char* suffix) {
@@ -67,7 +67,7 @@
filename_ += suffix;
int fd = open(filename_.c_str(), O_RDWR | O_CREAT, 0666);
CHECK_NE(-1, fd);
- file_.reset(new File(fd, GetFilename()));
+ file_.reset(new File(fd, GetFilename(), true));
}
ScratchFile::ScratchFile(File* file) {
@@ -88,6 +88,11 @@
if (!OS::FileExists(filename_.c_str())) {
return;
}
+ if (file_.get() != nullptr) {
+ if (file_->FlushCloseOrErase() != 0) {
+ PLOG(WARNING) << "Error closing scratch file.";
+ }
+ }
int unlink_result = unlink(filename_.c_str());
CHECK_EQ(0, unlink_result);
}
diff --git a/runtime/dex_file_test.cc b/runtime/dex_file_test.cc
index 134e284..b304779 100644
--- a/runtime/dex_file_test.cc
+++ b/runtime/dex_file_test.cc
@@ -146,6 +146,9 @@
if (!file->WriteFully(dex_bytes.get(), length)) {
PLOG(FATAL) << "Failed to write base64 as dex file";
}
+ if (file->FlushCloseOrErase() != 0) {
+ PLOG(FATAL) << "Could not flush and close test file.";
+ }
file.reset();
// read dex file
diff --git a/runtime/dex_file_verifier_test.cc b/runtime/dex_file_verifier_test.cc
index addd948..ec1e5f0 100644
--- a/runtime/dex_file_verifier_test.cc
+++ b/runtime/dex_file_verifier_test.cc
@@ -115,6 +115,9 @@
if (!file->WriteFully(dex_bytes.get(), length)) {
PLOG(FATAL) << "Failed to write base64 as dex file";
}
+ if (file->FlushCloseOrErase() != 0) {
+ PLOG(FATAL) << "Could not flush and close test file.";
+ }
file.reset();
// read dex file
@@ -177,6 +180,9 @@
if (!file->WriteFully(bytes, length)) {
PLOG(FATAL) << "Failed to write base64 as dex file";
}
+ if (file->FlushCloseOrErase() != 0) {
+ PLOG(FATAL) << "Could not flush and close test file.";
+ }
file.reset();
// read dex file
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index c4bc969..0b7d382 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -1659,7 +1659,7 @@
*(sp32 - 1) = cookie;
// Retrieve the stored native code.
- const void* nativeCode = called->GetNativeMethod();
+ void* nativeCode = called->GetEntryPointFromJni();
// There are two cases for the content of nativeCode:
// 1) Pointer to the native function.
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index 14d7432..3069581 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -475,9 +475,14 @@
}
}
- std::unique_ptr<File> file(new File(out_fd, filename_));
+ std::unique_ptr<File> file(new File(out_fd, filename_, true));
okay = file->WriteFully(header_data_ptr_, header_data_size_) &&
- file->WriteFully(body_data_ptr_, body_data_size_);
+ file->WriteFully(body_data_ptr_, body_data_size_);
+ if (okay) {
+ okay = file->FlushCloseOrErase() == 0;
+ } else {
+ file->Erase();
+ }
if (!okay) {
std::string msg(StringPrintf("Couldn't dump heap; writing \"%s\" failed: %s",
filename_.c_str(), strerror(errno)));
diff --git a/runtime/image.cc b/runtime/image.cc
index aee84bc3..b83eeb1 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -24,7 +24,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '1', '2', '\0' };
+const uint8_t ImageHeader::kImageVersion[] = { '0', '1', '3', '\0' };
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 18de133..b17f303 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -138,7 +138,7 @@
if (method->IsStatic()) {
if (shorty == "L") {
typedef jobject (fntype)(JNIEnv*, jclass);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
jobject jresult;
@@ -149,35 +149,35 @@
result->SetL(soa.Decode<Object*>(jresult));
} else if (shorty == "V") {
typedef void (fntype)(JNIEnv*, jclass);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedThreadStateChange tsc(self, kNative);
fn(soa.Env(), klass.get());
} else if (shorty == "Z") {
typedef jboolean (fntype)(JNIEnv*, jclass);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedThreadStateChange tsc(self, kNative);
result->SetZ(fn(soa.Env(), klass.get()));
} else if (shorty == "BI") {
typedef jbyte (fntype)(JNIEnv*, jclass, jint);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedThreadStateChange tsc(self, kNative);
result->SetB(fn(soa.Env(), klass.get(), args[0]));
} else if (shorty == "II") {
typedef jint (fntype)(JNIEnv*, jclass, jint);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedThreadStateChange tsc(self, kNative);
result->SetI(fn(soa.Env(), klass.get(), args[0]));
} else if (shorty == "LL") {
typedef jobject (fntype)(JNIEnv*, jclass, jobject);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg0(soa.Env(),
@@ -190,14 +190,15 @@
result->SetL(soa.Decode<Object*>(jresult));
} else if (shorty == "IIZ") {
typedef jint (fntype)(JNIEnv*, jclass, jint, jboolean);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedThreadStateChange tsc(self, kNative);
result->SetI(fn(soa.Env(), klass.get(), args[0], args[1]));
} else if (shorty == "ILI") {
typedef jint (fntype)(JNIEnv*, jclass, jobject, jint);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(
+ method->GetEntryPointFromJni()));
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg0(soa.Env(),
@@ -206,21 +207,21 @@
result->SetI(fn(soa.Env(), klass.get(), arg0.get(), args[1]));
} else if (shorty == "SIZ") {
typedef jshort (fntype)(JNIEnv*, jclass, jint, jboolean);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetEntryPointFromJni()));
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedThreadStateChange tsc(self, kNative);
result->SetS(fn(soa.Env(), klass.get(), args[0], args[1]));
} else if (shorty == "VIZ") {
typedef void (fntype)(JNIEnv*, jclass, jint, jboolean);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedThreadStateChange tsc(self, kNative);
fn(soa.Env(), klass.get(), args[0], args[1]);
} else if (shorty == "ZLL") {
typedef jboolean (fntype)(JNIEnv*, jclass, jobject, jobject);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg0(soa.Env(),
@@ -231,7 +232,7 @@
result->SetZ(fn(soa.Env(), klass.get(), arg0.get(), arg1.get()));
} else if (shorty == "ZILL") {
typedef jboolean (fntype)(JNIEnv*, jclass, jint, jobject, jobject);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg1(soa.Env(),
@@ -242,7 +243,7 @@
result->SetZ(fn(soa.Env(), klass.get(), args[0], arg1.get(), arg2.get()));
} else if (shorty == "VILII") {
typedef void (fntype)(JNIEnv*, jclass, jint, jobject, jint, jint);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg1(soa.Env(),
@@ -251,7 +252,7 @@
fn(soa.Env(), klass.get(), args[0], arg1.get(), args[2], args[3]);
} else if (shorty == "VLILII") {
typedef void (fntype)(JNIEnv*, jclass, jobject, jint, jobject, jint, jint);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg0(soa.Env(),
@@ -267,7 +268,7 @@
} else {
if (shorty == "L") {
typedef jobject (fntype)(JNIEnv*, jobject);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jobject> rcvr(soa.Env(),
soa.AddLocalReference<jobject>(receiver));
jobject jresult;
@@ -278,14 +279,14 @@
result->SetL(soa.Decode<Object*>(jresult));
} else if (shorty == "V") {
typedef void (fntype)(JNIEnv*, jobject);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jobject> rcvr(soa.Env(),
soa.AddLocalReference<jobject>(receiver));
ScopedThreadStateChange tsc(self, kNative);
fn(soa.Env(), rcvr.get());
} else if (shorty == "LL") {
typedef jobject (fntype)(JNIEnv*, jobject, jobject);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jobject> rcvr(soa.Env(),
soa.AddLocalReference<jobject>(receiver));
ScopedLocalRef<jobject> arg0(soa.Env(),
@@ -299,7 +300,7 @@
ScopedThreadStateChange tsc(self, kNative);
} else if (shorty == "III") {
typedef jint (fntype)(JNIEnv*, jobject, jint, jint);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jobject> rcvr(soa.Env(),
soa.AddLocalReference<jobject>(receiver));
ScopedThreadStateChange tsc(self, kNative);
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index 494fa2f..62d17ab 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -307,12 +307,6 @@
return pc - reinterpret_cast<uintptr_t>(code);
}
-template<VerifyObjectFlags kVerifyFlags>
-inline void ArtMethod::SetNativeMethod(const void* native_method) {
- SetFieldPtr<false, true, kVerifyFlags>(
- OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_jni_), native_method);
-}
-
inline QuickMethodFrameInfo ArtMethod::GetQuickFrameInfo(const void* code_pointer) {
DCHECK(code_pointer != nullptr);
DCHECK_EQ(code_pointer, GetQuickOatCodePointer());
@@ -485,6 +479,12 @@
return type;
}
+inline void ArtMethod::CheckObjectSizeEqualsMirrorSize() {
+ // Using the default, check the class object size to make sure it matches the size of the
+ // object.
+ DCHECK_EQ(GetClass()->GetObjectSize(), sizeof(*this));
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index a742aaa..3b4d5f3 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -483,7 +483,7 @@
if (is_fast) {
SetAccessFlags(GetAccessFlags() | kAccFastNative);
}
- SetNativeMethod(native_method);
+ SetEntryPointFromJni(native_method);
}
void ArtMethod::UnregisterNative() {
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index d92d00a..4a7831f 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -48,11 +48,6 @@
// Size of java.lang.reflect.ArtMethod.class.
static uint32_t ClassSize();
- // Size of an instance of java.lang.reflect.ArtMethod not including its value array.
- static constexpr uint32_t InstanceSize() {
- return sizeof(ArtMethod);
- }
-
static ArtMethod* FromReflectedMethod(const ScopedObjectAccessAlreadyRunnable& soa,
jobject jlr_method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -258,49 +253,92 @@
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
EntryPointFromInterpreter* GetEntryPointFromInterpreter()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetFieldPtr<EntryPointFromInterpreter*, kVerifyFlags>(
- OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_interpreter_));
+ CheckObjectSizeEqualsMirrorSize();
+ return GetEntryPointFromInterpreterPtrSize(sizeof(void*));
+ }
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ EntryPointFromInterpreter* GetEntryPointFromInterpreterPtrSize(size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetFieldPtrWithSize<EntryPointFromInterpreter*, kVerifyFlags>(
+ EntryPointFromInterpreterOffset(pointer_size), pointer_size);
}
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
void SetEntryPointFromInterpreter(EntryPointFromInterpreter* entry_point_from_interpreter)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- SetFieldPtr<false, true, kVerifyFlags>(
- OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_interpreter_),
- entry_point_from_interpreter);
+ CheckObjectSizeEqualsMirrorSize();
+ SetEntryPointFromInterpreterPtrSize(entry_point_from_interpreter, sizeof(void*));
+ }
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ void SetEntryPointFromInterpreterPtrSize(EntryPointFromInterpreter* entry_point_from_interpreter,
+ size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SetFieldPtrWithSize<false, true, kVerifyFlags>(
+ EntryPointFromInterpreterOffset(pointer_size), entry_point_from_interpreter, pointer_size);
}
- static MemberOffset EntryPointFromPortableCompiledCodeOffset() {
- return MemberOffset(OFFSETOF_MEMBER(ArtMethod, entry_point_from_portable_compiled_code_));
+ ALWAYS_INLINE static MemberOffset EntryPointFromPortableCompiledCodeOffset(size_t pointer_size) {
+ return MemberOffset(PtrSizedFieldsOffset() + OFFSETOF_MEMBER(
+ PtrSizedFields, entry_point_from_portable_compiled_code_) / sizeof(void*) * pointer_size);
}
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- const void* GetEntryPointFromPortableCompiledCode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetFieldPtr<const void*, kVerifyFlags>(
- EntryPointFromPortableCompiledCodeOffset());
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ const void* GetEntryPointFromPortableCompiledCode()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ CheckObjectSizeEqualsMirrorSize();
+ return GetEntryPointFromPortableCompiledCodePtrSize(sizeof(void*));
}
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ ALWAYS_INLINE const void* GetEntryPointFromPortableCompiledCodePtrSize(size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetFieldPtrWithSize<const void*, kVerifyFlags>(
+ EntryPointFromPortableCompiledCodeOffset(pointer_size), pointer_size);
+ }
+
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
void SetEntryPointFromPortableCompiledCode(const void* entry_point_from_portable_compiled_code)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- SetFieldPtr<false, true, kVerifyFlags>(
- EntryPointFromPortableCompiledCodeOffset(), entry_point_from_portable_compiled_code);
+ CheckObjectSizeEqualsMirrorSize();
+ return SetEntryPointFromPortableCompiledCodePtrSize(entry_point_from_portable_compiled_code,
+ sizeof(void*));
}
- static MemberOffset EntryPointFromQuickCompiledCodeOffset() {
- return MemberOffset(OFFSETOF_MEMBER(ArtMethod, entry_point_from_quick_compiled_code_));
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ void SetEntryPointFromPortableCompiledCodePtrSize(
+ const void* entry_point_from_portable_compiled_code, size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SetFieldPtrWithSize<false, true, kVerifyFlags>(
+ EntryPointFromPortableCompiledCodeOffset(pointer_size),
+ entry_point_from_portable_compiled_code, pointer_size);
}
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
const void* GetEntryPointFromQuickCompiledCode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetFieldPtr<const void*, kVerifyFlags>(EntryPointFromQuickCompiledCodeOffset());
+ CheckObjectSizeEqualsMirrorSize();
+ return GetEntryPointFromQuickCompiledCodePtrSize(sizeof(void*));
+ }
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ ALWAYS_INLINE const void* GetEntryPointFromQuickCompiledCodePtrSize(size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetFieldPtrWithSize<const void*, kVerifyFlags>(
+ EntryPointFromQuickCompiledCodeOffset(pointer_size), pointer_size);
}
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
void SetEntryPointFromQuickCompiledCode(const void* entry_point_from_quick_compiled_code)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- SetFieldPtr<false, true, kVerifyFlags>(
- EntryPointFromQuickCompiledCodeOffset(), entry_point_from_quick_compiled_code);
+ CheckObjectSizeEqualsMirrorSize();
+ SetEntryPointFromQuickCompiledCodePtrSize(entry_point_from_quick_compiled_code,
+ sizeof(void*));
+ }
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ ALWAYS_INLINE void SetEntryPointFromQuickCompiledCodePtrSize(
+ const void* entry_point_from_quick_compiled_code, size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SetFieldPtrWithSize<false, true, kVerifyFlags>(
+ EntryPointFromQuickCompiledCodeOffset(pointer_size), entry_point_from_quick_compiled_code,
+ pointer_size);
}
uint32_t GetCodeSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -365,11 +403,23 @@
CodeInfo GetOptimizedCodeInfo() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const uint8_t* GetNativeGcMap() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetFieldPtr<uint8_t*>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, gc_map_));
+ CheckObjectSizeEqualsMirrorSize();
+ return GetNativeGcMapPtrSize(sizeof(void*));
}
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ ALWAYS_INLINE const uint8_t* GetNativeGcMapPtrSize(size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetFieldPtrWithSize<uint8_t*>(GcMapOffset(pointer_size), pointer_size);
+ }
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
void SetNativeGcMap(const uint8_t* data) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- SetFieldPtr<false, true, kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, gc_map_), data);
+ CheckObjectSizeEqualsMirrorSize();
+ SetNativeGcMapPtrSize(data, sizeof(void*));
+ }
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ ALWAYS_INLINE void SetNativeGcMapPtrSize(const uint8_t* data, size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SetFieldPtrWithSize<false, true, kVerifyFlags>(GcMapOffset(pointer_size), data,
+ pointer_size);
}
// When building the oat need a convenient place to stuff the offset of the native GC map.
@@ -409,16 +459,46 @@
void UnregisterNative() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static MemberOffset NativeMethodOffset() {
- return OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_jni_);
+ static MemberOffset EntryPointFromInterpreterOffset(size_t pointer_size) {
+ return MemberOffset(PtrSizedFieldsOffset() + OFFSETOF_MEMBER(
+ PtrSizedFields, entry_point_from_interpreter_) / sizeof(void*) * pointer_size);
}
- const void* GetNativeMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetFieldPtr<const void*>(NativeMethodOffset());
+ static MemberOffset EntryPointFromJniOffset(size_t pointer_size) {
+ return MemberOffset(PtrSizedFieldsOffset() + OFFSETOF_MEMBER(
+ PtrSizedFields, entry_point_from_jni_) / sizeof(void*) * pointer_size);
}
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- void SetNativeMethod(const void*) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static MemberOffset EntryPointFromQuickCompiledCodeOffset(size_t pointer_size) {
+ return MemberOffset(PtrSizedFieldsOffset() + OFFSETOF_MEMBER(
+ PtrSizedFields, entry_point_from_quick_compiled_code_) / sizeof(void*) * pointer_size);
+ }
+
+ static MemberOffset GcMapOffset(size_t pointer_size) {
+ return MemberOffset(PtrSizedFieldsOffset() + OFFSETOF_MEMBER(
+ PtrSizedFields, gc_map_) / sizeof(void*) * pointer_size);
+ }
+
+ void* GetEntryPointFromJni() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ CheckObjectSizeEqualsMirrorSize();
+ return GetEntryPointFromJniPtrSize(sizeof(void*));
+ }
+ ALWAYS_INLINE void* GetEntryPointFromJniPtrSize(size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetFieldPtrWithSize<void*>(EntryPointFromJniOffset(pointer_size), pointer_size);
+ }
+
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ void SetEntryPointFromJni(const void* entrypoint) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ CheckObjectSizeEqualsMirrorSize();
+ SetEntryPointFromJniPtrSize<kVerifyFlags>(entrypoint, sizeof(void*));
+ }
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ ALWAYS_INLINE void SetEntryPointFromJniPtrSize(const void* entrypoint, size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SetFieldPtrWithSize<false, true, kVerifyFlags>(
+ EntryPointFromJniOffset(pointer_size), entrypoint, pointer_size);
+ }
static MemberOffset GetMethodIndexOffset() {
return OFFSET_OF_OBJECT_MEMBER(ArtMethod, method_index_);
@@ -521,7 +601,16 @@
ALWAYS_INLINE ArtMethod* GetInterfaceMethodIfProxy() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- private:
+ static size_t SizeWithoutPointerFields() {
+ return sizeof(ArtMethod) - sizeof(PtrSizedFields);
+ }
+
+ // Size of an instance of java.lang.reflect.ArtMethod not including its value array.
+ static size_t InstanceSize(size_t pointer_size) {
+ return SizeWithoutPointerFields() + (sizeof(PtrSizedFields) / sizeof(void*)) * pointer_size;
+ }
+
+ protected:
// Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
// The class we are a part of.
HeapReference<Class> declaring_class_;
@@ -535,26 +624,6 @@
// Short cuts to declaring_class_->dex_cache_ member for fast compiled code access.
HeapReference<ObjectArray<String>> dex_cache_strings_;
- // Method dispatch from the interpreter invokes this pointer which may cause a bridge into
- // compiled code.
- uint64_t entry_point_from_interpreter_;
-
- // Pointer to JNI function registered to this method, or a function to resolve the JNI function.
- uint64_t entry_point_from_jni_;
-
- // Method dispatch from portable compiled code invokes this pointer which may cause bridging into
- // quick compiled code or the interpreter.
- uint64_t entry_point_from_portable_compiled_code_;
-
- // Method dispatch from quick compiled code invokes this pointer which may cause bridging into
- // portable compiled code or the interpreter.
- uint64_t entry_point_from_quick_compiled_code_;
-
- // Pointer to a data structure created by the compiler and used by the garbage collector to
- // determine which registers hold live references to objects within the heap. Keyed by native PC
- // offsets for the quick compiler and dex PCs for the portable.
- uint64_t gc_map_;
-
// Access flags; low 16 bits are defined by spec.
uint32_t access_flags_;
@@ -573,15 +642,46 @@
// ifTable.
uint32_t method_index_;
+ // Add alignment word here if necessary.
+
+ // Must be the last fields in the method.
+ struct PACKED(4) PtrSizedFields {
+ // Method dispatch from the interpreter invokes this pointer which may cause a bridge into
+ // compiled code.
+ void* entry_point_from_interpreter_;
+
+ // Pointer to JNI function registered to this method, or a function to resolve the JNI function.
+ void* entry_point_from_jni_;
+
+ // Method dispatch from quick compiled code invokes this pointer which may cause bridging into
+ // portable compiled code or the interpreter.
+ void* entry_point_from_quick_compiled_code_;
+
+ // Pointer to a data structure created by the compiler and used by the garbage collector to
+ // determine which registers hold live references to objects within the heap. Keyed by native PC
+ // offsets for the quick compiler and dex PCs for the portable.
+ void* gc_map_;
+
+ // Method dispatch from portable compiled code invokes this pointer which may cause bridging
+ // into quick compiled code or the interpreter. Last to simplify entrypoint logic.
+ void* entry_point_from_portable_compiled_code_;
+ } ptr_sized_fields_;
+
static GcRoot<Class> java_lang_reflect_ArtMethod_;
private:
+ ALWAYS_INLINE void CheckObjectSizeEqualsMirrorSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
ALWAYS_INLINE ObjectArray<ArtMethod>* GetDexCacheResolvedMethods()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
ALWAYS_INLINE ObjectArray<Class>* GetDexCacheResolvedTypes()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static size_t PtrSizedFieldsOffset() {
+ return OFFSETOF_MEMBER(ArtMethod, ptr_sized_fields_);
+ }
+
friend struct art::ArtMethodOffsets; // for verifying offset information
DISALLOW_IMPLICIT_CONSTRUCTORS(ArtMethod);
};
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index f45ea85..82425b5 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -528,6 +528,13 @@
return SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, object_size_), new_object_size);
}
+ void SetObjectSizeWithoutChecks(uint32_t new_object_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // Not called within a transaction.
+ return SetField32<false, false, kVerifyNone>(
+ OFFSET_OF_OBJECT_MEMBER(Class, object_size_), new_object_size);
+ }
+
// Returns true if this class is in the same packages as that class.
bool IsInSamePackage(Class* that) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 4199eef..121947d 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -404,8 +404,7 @@
}
DCHECK_GE(result, sizeof(Object))
<< " class=" << PrettyTypeOf(GetClass<kNewFlags, kReadBarrierOption>());
- DCHECK(!(IsArtField<kNewFlags, kReadBarrierOption>()) || result == sizeof(ArtField));
- DCHECK(!(IsArtMethod<kNewFlags, kReadBarrierOption>()) || result == sizeof(ArtMethod));
+ DCHECK(!(IsArtField<kNewFlags, kReadBarrierOption>()) || result == sizeof(ArtField));
return result;
}
@@ -962,7 +961,6 @@
}
}
}
-
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 0ce5231..221feca 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -392,15 +392,26 @@
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, typename T>
void SetFieldPtr(MemberOffset field_offset, T new_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-#ifndef __LP64__
- SetField32<kTransactionActive, kCheckTransaction, kVerifyFlags>(
- field_offset, reinterpret_cast<int32_t>(new_value));
-#else
- SetField64<kTransactionActive, kCheckTransaction, kVerifyFlags>(
- field_offset, reinterpret_cast<int64_t>(new_value));
-#endif
+ SetFieldPtrWithSize<kTransactionActive, kCheckTransaction, kVerifyFlags>(
+ field_offset, new_value, sizeof(void*));
}
+ template<bool kTransactionActive, bool kCheckTransaction = true,
+ VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, typename T>
+ ALWAYS_INLINE void SetFieldPtrWithSize(MemberOffset field_offset, T new_value,
+ size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK(pointer_size == 4 || pointer_size == 8) << pointer_size;
+ if (pointer_size == 4) {
+ intptr_t ptr = reinterpret_cast<intptr_t>(new_value);
+ DCHECK_EQ(static_cast<int32_t>(ptr), ptr); // Check that we dont lose any non 0 bits.
+ SetField32<kTransactionActive, kCheckTransaction, kVerifyFlags>(
+ field_offset, static_cast<int32_t>(ptr));
+ } else {
+ SetField64<kTransactionActive, kCheckTransaction, kVerifyFlags>(
+ field_offset, static_cast<int64_t>(reinterpret_cast<intptr_t>(new_value)));
+ }
+ }
// TODO fix thread safety analysis broken by the use of template. This should be
// SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
template <const bool kVisitClass, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
@@ -413,11 +424,21 @@
template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
T GetFieldPtr(MemberOffset field_offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-#ifndef __LP64__
- return reinterpret_cast<T>(GetField32<kVerifyFlags, kIsVolatile>(field_offset));
-#else
- return reinterpret_cast<T>(GetField64<kVerifyFlags, kIsVolatile>(field_offset));
-#endif
+ return GetFieldPtrWithSize<T, kVerifyFlags, kIsVolatile>(field_offset, sizeof(void*));
+ }
+
+ template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
+ ALWAYS_INLINE T GetFieldPtrWithSize(MemberOffset field_offset, size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK(pointer_size == 4 || pointer_size == 8) << pointer_size;
+ if (pointer_size == 4) {
+ return reinterpret_cast<T>(GetField32<kVerifyFlags, kIsVolatile>(field_offset));
+ } else {
+ int64_t v = GetField64<kVerifyFlags, kIsVolatile>(field_offset);
+ // Check that we dont lose any non 0 bits.
+ DCHECK_EQ(reinterpret_cast<int64_t>(reinterpret_cast<T>(v)), v);
+ return reinterpret_cast<T>(v);
+ }
}
// TODO: Fixme when anotatalysis works with visitors.
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index a0aaa9e..4402031 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -78,6 +78,14 @@
TEST_F(ObjectTest, Constants) {
EXPECT_EQ(kObjectReferenceSize, sizeof(HeapReference<Object>));
EXPECT_EQ(kObjectHeaderSize, sizeof(Object));
+ EXPECT_EQ(MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET_32,
+ ArtMethod::EntryPointFromPortableCompiledCodeOffset(4).Int32Value());
+ EXPECT_EQ(MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET_64,
+ ArtMethod::EntryPointFromPortableCompiledCodeOffset(8).Int32Value());
+ EXPECT_EQ(MIRROR_ART_METHOD_QUICK_CODE_OFFSET_32,
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(4).Int32Value());
+ EXPECT_EQ(MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64,
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(8).Int32Value());
}
TEST_F(ObjectTest, IsInSamePackage) {
diff --git a/runtime/native_bridge_art_interface.cc b/runtime/native_bridge_art_interface.cc
index c2c6b12..ffadfc6 100644
--- a/runtime/native_bridge_art_interface.cc
+++ b/runtime/native_bridge_art_interface.cc
@@ -72,7 +72,7 @@
if (count < method_count) {
methods[count].name = m->GetName();
methods[count].signature = m->GetShorty();
- methods[count].fnPtr = const_cast<void*>(m->GetNativeMethod());
+ methods[count].fnPtr = m->GetEntryPointFromJni();
count++;
} else {
LOG(WARNING) << "Output native method array too small. Skipping " << PrettyMethod(m);
@@ -85,7 +85,7 @@
if (count < method_count) {
methods[count].name = m->GetName();
methods[count].signature = m->GetShorty();
- methods[count].fnPtr = const_cast<void*>(m->GetNativeMethod());
+ methods[count].fnPtr = m->GetEntryPointFromJni();
count++;
} else {
LOG(WARNING) << "Output native method array too small. Skipping " << PrettyMethod(m);
diff --git a/runtime/signal_catcher.cc b/runtime/signal_catcher.cc
index d448460..e377542 100644
--- a/runtime/signal_catcher.cc
+++ b/runtime/signal_catcher.cc
@@ -110,11 +110,17 @@
PLOG(ERROR) << "Unable to open stack trace file '" << stack_trace_file_ << "'";
return;
}
- std::unique_ptr<File> file(new File(fd, stack_trace_file_));
- if (!file->WriteFully(s.data(), s.size())) {
- PLOG(ERROR) << "Failed to write stack traces to '" << stack_trace_file_ << "'";
+ std::unique_ptr<File> file(new File(fd, stack_trace_file_, true));
+ bool success = file->WriteFully(s.data(), s.size());
+ if (success) {
+ success = file->FlushCloseOrErase() == 0;
} else {
+ file->Erase();
+ }
+ if (success) {
LOG(INFO) << "Wrote stack traces to '" << stack_trace_file_ << "'";
+ } else {
+ PLOG(ERROR) << "Failed to write stack traces to '" << stack_trace_file_ << "'";
}
}
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 29c01e4..2cc50b3 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -431,6 +431,15 @@
instrumentation::Instrumentation::kMethodExited |
instrumentation::Instrumentation::kMethodUnwind);
}
+ if (the_trace->trace_file_.get() != nullptr) {
+ // Do not try to erase, so flush and close explicitly.
+ if (the_trace->trace_file_->Flush() != 0) {
+ PLOG(ERROR) << "Could not flush trace file.";
+ }
+ if (the_trace->trace_file_->Close() != 0) {
+ PLOG(ERROR) << "Could not close trace file.";
+ }
+ }
delete the_trace;
}
runtime->GetThreadList()->ResumeAll();
diff --git a/runtime/zip_archive_test.cc b/runtime/zip_archive_test.cc
index 96abee2..70a4dda 100644
--- a/runtime/zip_archive_test.cc
+++ b/runtime/zip_archive_test.cc
@@ -41,7 +41,7 @@
ScratchFile tmp;
ASSERT_NE(-1, tmp.GetFd());
- std::unique_ptr<File> file(new File(tmp.GetFd(), tmp.GetFilename()));
+ std::unique_ptr<File> file(new File(tmp.GetFd(), tmp.GetFilename(), false));
ASSERT_TRUE(file.get() != NULL);
bool success = zip_entry->ExtractToFile(*file, &error_msg);
ASSERT_TRUE(success) << error_msg;
diff --git a/test/083-compiler-regressions/expected.txt b/test/083-compiler-regressions/expected.txt
index 51bf847..78c92fc 100644
--- a/test/083-compiler-regressions/expected.txt
+++ b/test/083-compiler-regressions/expected.txt
@@ -1,3 +1,4 @@
+b17325447 passes
b17630605 passes
b17411468 passes
b2296099 passes
diff --git a/test/083-compiler-regressions/src/Main.java b/test/083-compiler-regressions/src/Main.java
index 9ad8ea7..285c360 100644
--- a/test/083-compiler-regressions/src/Main.java
+++ b/test/083-compiler-regressions/src/Main.java
@@ -30,6 +30,7 @@
}
public static void main(String args[]) throws Exception {
+ b17325447();
b17630605();
b17411468();
b2296099Test();
@@ -64,6 +65,31 @@
minDoubleWith3ConstsTest();
}
+ public static double b17325447_i1(int i1, double f) {
+ return f;
+ }
+
+ public static double b17325447_i2(int i1, int i2, double f) {
+ return f;
+ }
+
+ public static double b17325447_i3(int i1, int i2, int i3, double f) {
+ return f;
+ }
+
+ public static void b17325447() {
+ // b/17325447 - x86 handling of special identity method w/ double spanning reg/mem.
+ double d = 0.0;
+ d += b17325447_i1(123, 1.0);
+ d += b17325447_i2(123, 456, 2.0);
+ d += b17325447_i3(123, 456, 789, 3.0);
+ if (d == 6.0) {
+ System.out.println("b17325447 passes");
+ } else {
+ System.out.println("b17325447 fails: " + d);
+ }
+ }
+
public static void b17630605() {
// b/17630605 - failure to properly handle min long immediates.
long a1 = 40455547223404749L;
diff --git a/test/800-smali/expected.txt b/test/800-smali/expected.txt
index 1b813bf..01d7b81 100644
--- a/test/800-smali/expected.txt
+++ b/test/800-smali/expected.txt
@@ -3,4 +3,6 @@
FloatBadArgReg
negLong
sameFieldNames
+b/18380491
+invoke-super abstract
Done!
diff --git a/test/800-smali/smali/b_18380491AbstractBase.smali b/test/800-smali/smali/b_18380491AbstractBase.smali
new file mode 100644
index 0000000..7aa1b1a
--- /dev/null
+++ b/test/800-smali/smali/b_18380491AbstractBase.smali
@@ -0,0 +1,12 @@
+.class public LB18380491ActractBase;
+
+.super Ljava/lang/Object;
+
+.method public constructor <init>()V
+ .locals 0
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+.method public abstract foo(I)I
+.end method
diff --git a/test/800-smali/smali/b_18380491ConcreteClass.smali b/test/800-smali/smali/b_18380491ConcreteClass.smali
new file mode 100644
index 0000000..db5ef3b
--- /dev/null
+++ b/test/800-smali/smali/b_18380491ConcreteClass.smali
@@ -0,0 +1,19 @@
+.class public LB18380491ConcreteClass;
+
+.super LB18380491ActractBase;
+
+.method public constructor <init>()V
+ .locals 0
+ invoke-direct {p0}, LB18380491ActractBase;-><init>()V
+ return-void
+.end method
+
+.method public foo(I)I
+ .locals 1
+ if-eqz p1, :invoke_super_abstract
+ return p1
+ :invoke_super_abstract
+ invoke-super {p0, p1}, LB18380491ActractBase;->foo(I)I
+ move-result v0
+ return v0
+.end method
diff --git a/test/800-smali/src/Main.java b/test/800-smali/src/Main.java
index 3a0f8ea..3f613ef 100644
--- a/test/800-smali/src/Main.java
+++ b/test/800-smali/src/Main.java
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import java.util.LinkedList;
@@ -55,6 +56,10 @@
new Object[]{100}, null, 100));
testCases.add(new TestCase("negLong", "negLong", "negLong", null, null, 122142L));
testCases.add(new TestCase("sameFieldNames", "sameFieldNames", "getInt", null, null, 7));
+ testCases.add(new TestCase("b/18380491", "B18380491ConcreteClass", "foo",
+ new Object[]{42}, null, 42));
+ testCases.add(new TestCase("invoke-super abstract", "B18380491ConcreteClass", "foo",
+ new Object[]{0}, new AbstractMethodError(), null));
}
public void runTests() {
@@ -116,6 +121,9 @@
} catch (Throwable exc) {
if (tc.expectedException == null) {
errorReturn = new IllegalStateException("Did not expect exception", exc);
+ } else if (exc instanceof InvocationTargetException && exc.getCause() != null &&
+ exc.getCause().getClass().equals(tc.expectedException.getClass())) {
+ // Expected exception is wrapped in InvocationTargetException.
} else if (!tc.expectedException.getClass().equals(exc.getClass())) {
errorReturn = new IllegalStateException("Expected " +
tc.expectedException.getClass().getName() +
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 0f58234..11713d4 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -307,164 +307,37 @@
# Known broken tests for the arm64 optimizing compiler backend.
TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS := \
- 001-HelloWorld \
- 002-sleep \
003-omnibus-opcodes \
- 004-InterfaceTest \
- 004-JniTest \
004-NativeAllocations \
004-ReferenceMap \
- 004-SignalTest \
- 004-StackWalk \
- 004-UnsafeTest \
005-annotations \
- 006-args \
- 007-count10 \
- 008-exceptions \
009-instanceof \
010-instance \
- 011-array-copy \
- 013-math2 \
- 014-math3 \
- 016-intern \
- 017-float \
- 018-stack-overflow \
- 019-wrong-array-type \
- 020-string \
- 021-string2 \
- 022-interface \
+ 012-math \
023-many-interfaces \
- 024-illegal-access \
- 025-access-controller \
- 026-access \
- 028-array-write \
- 029-assert \
- 030-bad-finalizer \
- 031-class-attributes \
- 032-concrete-sub \
- 033-class-init-deadlock \
- 034-call-null \
- 035-enum \
- 036-finalizer \
037-inherit \
- 038-inner-null \
- 039-join-main \
- 040-miranda \
- 042-new-instance \
- 043-privates \
044-proxy \
045-reflect-array \
046-reflect \
047-returns \
- 049-show-object \
- 050-sync-test \
- 051-thread \
- 052-verifier-fun \
- 054-uncaught \
- 055-enum-performance \
- 056-const-string-jumbo \
- 058-enum-order \
- 061-out-of-memory \
062-character-encodings \
063-process-manager \
- 064-field-access \
- 065-mismatched-implements \
- 066-mismatched-super \
- 067-preemptive-unpark \
068-classloader \
069-field-type \
- 070-nio-buffer \
071-dexfile \
- 072-precise-gc \
- 074-gc-thrash \
- 075-verification-error \
- 076-boolean-put \
- 077-method-override \
- 078-polymorphic-virtual \
- 079-phantom \
- 080-oom-throw \
- 081-hot-exceptions \
- 082-inline-execute \
083-compiler-regressions \
- 084-class-init \
- 085-old-style-inner-class \
- 086-null-super \
- 087-gc-after-link \
- 088-monitor-verification \
- 090-loop-formation \
- 092-locale \
- 093-serialization \
- 094-pattern \
- 096-array-copy-concurrent-gc \
- 097-duplicate-method \
- 098-ddmc \
- 100-reflect2 \
- 101-fibonacci \
- 102-concurrent-gc \
- 103-string-append \
- 104-growth-limit \
- 105-invoke \
106-exceptions2 \
107-int-math2 \
- 108-check-cast \
- 109-suspend-check \
- 110-field-access \
- 111-unresolvable-exception \
- 112-double-math \
- 113-multidex \
114-ParallelGC \
- 117-nopatchoat \
- 118-noimage-dex2oat \
- 119-noimage-patchoat \
- 120-hashcode \
- 121-modifiers \
- 121-simple-suspend-check \
- 122-npe \
- 123-compiler-regressions-mt \
- 124-missing-classes \
- 125-gc-and-classloading \
- 126-miranda-multidex \
201-built-in-exception-detail-messages \
- 202-thread-oome \
- 300-package-override \
- 301-abstract-protected \
- 303-verification-stress \
- 304-method-tracing \
- 401-optimizing-compiler \
- 402-optimizing-control-flow \
- 403-optimizing-long \
- 404-optimizing-allocator \
- 405-optimizing-long-allocator \
- 406-fields \
407-arrays \
- 409-materialized-condition \
- 410-floats \
- 411-optimizing-arith \
412-new-array \
- 413-regalloc-regression \
- 414-optimizing-arith-sub \
- 414-static-fields \
- 415-optimizing-arith-neg \
- 416-optimizing-arith-not \
- 417-optimizing-arith-div \
- 418-const-string \
- 419-long-parameter \
- 420-const-class \
- 421-exceptions \
- 421-large-frame \
422-instanceof \
422-type-conversion \
- 423-invoke-interface \
424-checkcast \
- 426-monitor \
- 427-bitwise \
427-bounds \
428-optimizing-arith-rem \
- 700-LoadArgRegs \
701-easy-div-rem \
- 702-LargeBranchOffset \
- 703-floating-point-div \
- 800-smali
ifneq (,$(filter optimizing,$(COMPILER_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES), \