diff options
30 files changed, 103 insertions, 80 deletions
diff --git a/libartbase/arch/instruction_set.h b/libartbase/arch/instruction_set.h index 69b01dbfee..8b90aa5a9d 100644 --- a/libartbase/arch/instruction_set.h +++ b/libartbase/arch/instruction_set.h @@ -38,6 +38,8 @@ enum class InstructionSet { }; std::ostream& operator<<(std::ostream& os, InstructionSet rhs); +// kRuntimeISA must match the ISA of the machine that ART will be run on. This ISA will be used for +// the native context, native stack frame and native ABI. #if defined(__arm__) static constexpr InstructionSet kRuntimeISA = InstructionSet::kArm; #elif defined(__aarch64__) @@ -52,6 +54,12 @@ static constexpr InstructionSet kRuntimeISA = InstructionSet::kX86_64; static constexpr InstructionSet kRuntimeISA = InstructionSet::kNone; #endif +// The ISA that ART will generate quick code for, i.e.: that java code will be compiled to. This +// ISA will be used for the quick context, quick stack frame and quick ABI. This may differ from +// kRuntimeISA if the simulator is in use where, for example, the native runtime is x86-64 but the +// quick code generated by the compiler is Arm64. +static constexpr InstructionSet kRuntimeQuickCodeISA = kRuntimeISA; + // Architecture-specific pointer sizes static constexpr PointerSize kArmPointerSize = PointerSize::k32; static constexpr PointerSize kArm64PointerSize = PointerSize::k64; diff --git a/runtime/arch/context-inl.h b/runtime/arch/context-inl.h index b6fcc3e8b6..087075ea38 100644 --- a/runtime/arch/context-inl.h +++ b/runtime/arch/context-inl.h @@ -51,7 +51,7 @@ struct ContextSelector<InstructionSet::kX86_64> { using type = x86_64::X86_64Con template <InstructionSet Isa> using RuntimeContextTypeArch = typename detail::ContextSelector<Isa>::type; -using RuntimeContextType = RuntimeContextTypeArch<kRuntimeISA>; +using RuntimeContextType = RuntimeContextTypeArch<kRuntimeQuickCodeISA>; } // namespace art diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index b23a543b5d..090ee42a08 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -4138,7 +4138,7 @@ void ClassLinker::LoadMethod(const DexFile& dex_file, } } - access_flags |= GetNterpFastPathFlags(shorty, access_flags, kRuntimeISA); + access_flags |= GetNterpFastPathFlags(shorty, access_flags, kRuntimeQuickCodeISA); if (UNLIKELY((access_flags & kAccNative) != 0u)) { // Check if the native method is annotated with @FastNative or @CriticalNative. diff --git a/runtime/entrypoints/quick/callee_save_frame.h b/runtime/entrypoints/quick/callee_save_frame.h index 1049e235ec..43602f0431 100644 --- a/runtime/entrypoints/quick/callee_save_frame.h +++ b/runtime/entrypoints/quick/callee_save_frame.h @@ -96,7 +96,7 @@ struct CSFSelector<InstructionSet::kX86_64> { } // namespace detail -using RuntimeCalleeSaveFrame = detail::CSFSelector<kRuntimeISA>::type; +using RuntimeCalleeSaveFrame = detail::CSFSelector<kRuntimeQuickCodeISA>::type; } // namespace art diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc index fc97e6c1d3..118e4f3bc0 100644 --- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc @@ -182,10 +182,11 @@ class QuickArgumentVisitorImpl { if (UNLIKELY((type == Primitive::kPrimDouble) || (type == Primitive::kPrimFloat))) { if (type == Primitive::kPrimDouble && kQuickDoubleRegAlignedFloatBackFilled) { if (fpr_double_index_ + 2 < kNumQuickFprArgs + 1) { - return fpr_args_ + (fpr_double_index_ * GetBytesPerFprSpillLocation(kRuntimeISA)); + return fpr_args_ + + (fpr_double_index_ * GetBytesPerFprSpillLocation(kRuntimeQuickCodeISA)); } } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { - return fpr_args_ + (fpr_index_ * GetBytesPerFprSpillLocation(kRuntimeISA)); + return fpr_args_ + (fpr_index_ * GetBytesPerFprSpillLocation(kRuntimeQuickCodeISA)); } return stack_args_ + (stack_index_ * kBytesStackArgLocation); } @@ -197,8 +198,8 @@ class QuickArgumentVisitorImpl { } bool IsSplitLongOrDouble() const { - if ((GetBytesPerGprSpillLocation(kRuntimeISA) == 4) || - (GetBytesPerFprSpillLocation(kRuntimeISA) == 4)) { + if ((GetBytesPerGprSpillLocation(kRuntimeQuickCodeISA) == 4) || + (GetBytesPerFprSpillLocation(kRuntimeQuickCodeISA) == 4)) { return is_split_long_or_double_; } else { return false; // An optimization for when GPR and FPRs are 64bit. @@ -304,7 +305,7 @@ class QuickArgumentVisitorImpl { // even-numbered registers by skipping R1 and using R2 instead. IncGprIndex(); } - is_split_long_or_double_ = (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) && + is_split_long_or_double_ = (GetBytesPerGprSpillLocation(kRuntimeQuickCodeISA) == 4) && ((gpr_index_ + 1) == kNumQuickGprArgs); if (!kSplitPairAcrossRegisterAndStack && is_split_long_or_double_) { // We don't want to split this. Pass over this register. @@ -320,14 +321,14 @@ class QuickArgumentVisitorImpl { } if (gpr_index_ < kNumQuickGprArgs) { IncGprIndex(); - if (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) { + if (GetBytesPerGprSpillLocation(kRuntimeQuickCodeISA) == 4) { if (gpr_index_ < kNumQuickGprArgs) { IncGprIndex(); } } } } else { - is_split_long_or_double_ = (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) && + is_split_long_or_double_ = (GetBytesPerFprSpillLocation(kRuntimeQuickCodeISA) == 4) && ((fpr_index_ + 1) == kNumQuickFprArgs) && !kQuickDoubleRegAlignedFloatBackFilled; Visit(); if (kBytesStackArgLocation == 4) { @@ -346,7 +347,7 @@ class QuickArgumentVisitorImpl { } } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { IncFprIndex(); - if (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) { + if (GetBytesPerFprSpillLocation(kRuntimeQuickCodeISA) == 4) { if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { IncFprIndex(); } @@ -414,7 +415,7 @@ class QuickArgumentFrameInfoARM { static constexpr bool kGprFprLockstep = false; static constexpr bool kNaNBoxing = false; static size_t GprIndexToGprOffsetImpl(uint32_t gpr_index) { - return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); + return gpr_index * GetBytesPerGprSpillLocation(InstructionSet::kArm); } }; @@ -450,7 +451,7 @@ class QuickArgumentFrameInfoARM64 { static constexpr bool kGprFprLockstep = false; static constexpr bool kNaNBoxing = false; static size_t GprIndexToGprOffsetImpl(uint32_t gpr_index) { - return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); + return gpr_index * GetBytesPerGprSpillLocation(InstructionSet::kArm64); } }; @@ -499,7 +500,8 @@ class QuickArgumentFrameInfoRISCV64 { static constexpr bool kGprFprLockstep = false; static constexpr bool kNaNBoxing = true; static size_t GprIndexToGprOffsetImpl(uint32_t gpr_index) { - return (gpr_index + 1) * GetBytesPerGprSpillLocation(kRuntimeISA); // skip S0/X8/FP + // skip S0/X8/FP + return (gpr_index + 1) * GetBytesPerGprSpillLocation(InstructionSet::kRiscv64); } }; @@ -533,7 +535,7 @@ class QuickArgumentFrameInfoX86 { static constexpr bool kGprFprLockstep = false; static constexpr bool kNaNBoxing = false; static size_t GprIndexToGprOffsetImpl(uint32_t gpr_index) { - return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); + return gpr_index * GetBytesPerGprSpillLocation(InstructionSet::kX86); } }; @@ -580,12 +582,13 @@ class QuickArgumentFrameInfoX86_64 { static constexpr bool kGprFprLockstep = false; static constexpr bool kNaNBoxing = false; static size_t GprIndexToGprOffsetImpl(uint32_t gpr_index) { + static constexpr size_t kBytesPerSpill = GetBytesPerGprSpillLocation(InstructionSet::kX86_64); switch (gpr_index) { - case 0: return (4 * GetBytesPerGprSpillLocation(kRuntimeISA)); - case 1: return (1 * GetBytesPerGprSpillLocation(kRuntimeISA)); - case 2: return (0 * GetBytesPerGprSpillLocation(kRuntimeISA)); - case 3: return (5 * GetBytesPerGprSpillLocation(kRuntimeISA)); - case 4: return (6 * GetBytesPerGprSpillLocation(kRuntimeISA)); + case 0: return (4 * kBytesPerSpill); + case 1: return (1 * kBytesPerSpill); + case 2: return (0 * kBytesPerSpill); + case 3: return (5 * kBytesPerSpill); + case 4: return (6 * kBytesPerSpill); default: LOG(FATAL) << "Unexpected GPR index: " << gpr_index; UNREACHABLE(); @@ -611,8 +614,8 @@ struct QAFISelector<InstructionSet::kX86_64> { using type = QuickArgumentFrameIn } // namespace detail -// TODO(Simulator): Use the quick code ISA instead of kRuntimeISA. -using QuickArgumentVisitor = QuickArgumentVisitorImpl<detail::QAFISelector<kRuntimeISA>::type>; +using QuickArgumentVisitor = + QuickArgumentVisitorImpl<detail::QAFISelector<kRuntimeQuickCodeISA>::type>; // Returns the 'this' object of a proxy method. This function is only used by StackVisitor. It // allows to use the QuickArgumentVisitor constants without moving all the code in its own module. diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc index 05bbfcf912..881dbadd0c 100644 --- a/runtime/fault_handler.cc +++ b/runtime/fault_handler.cc @@ -242,7 +242,7 @@ bool FaultManager::HandleSigbusFault(int sig, siginfo_t* info, [[maybe_unused]] inline void FaultManager::CheckForUnrecognizedImplicitSuspendCheckInBootImage( siginfo_t* siginfo, void* context) { - CHECK_EQ(kRuntimeISA, InstructionSet::kArm64); + CHECK_EQ(kRuntimeQuickCodeISA, InstructionSet::kArm64); uintptr_t fault_pc = GetFaultPc(siginfo, context); if (fault_pc == 0u || !IsUint<32>(fault_pc) || !IsAligned<4u>(fault_pc)) { return; @@ -301,7 +301,7 @@ bool FaultManager::HandleSigsegvFault(int sig, siginfo_t* info, void* context) { return true; } } - } else if (kRuntimeISA == InstructionSet::kArm64) { + } else if (kRuntimeQuickCodeISA == InstructionSet::kArm64) { CheckForUnrecognizedImplicitSuspendCheckInBootImage(info, context); } diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc index 26aca42557..ce740da1a8 100644 --- a/runtime/instrumentation.cc +++ b/runtime/instrumentation.cc @@ -266,7 +266,7 @@ static void UpdateEntryPoints(ArtMethod* method, const void* new_code) jit::Jit* jit = Runtime::Current()->GetJit(); if (jit != nullptr && jit->GetCodeCache()->ContainsPc(new_code)) { // Ensure we always have the thumb entrypoint for JIT on arm32. - if (kRuntimeISA == InstructionSet::kArm) { + if (kRuntimeQuickCodeISA == InstructionSet::kArm) { CHECK_EQ(reinterpret_cast<uintptr_t>(new_code) & 1, 1u); } } diff --git a/runtime/interpreter/mterp/nterp.cc b/runtime/interpreter/mterp/nterp.cc index 9f627186bf..a69dd8c4c7 100644 --- a/runtime/interpreter/mterp/nterp.cc +++ b/runtime/interpreter/mterp/nterp.cc @@ -35,7 +35,7 @@ namespace art HIDDEN { namespace interpreter { bool IsNterpSupported() { - switch (kRuntimeISA) { + switch (kRuntimeQuickCodeISA) { case InstructionSet::kArm: case InstructionSet::kThumb2: case InstructionSet::kArm64: diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc index 594f4a8e8a..de19b16e4f 100644 --- a/runtime/jit/jit.cc +++ b/runtime/jit/jit.cc @@ -432,7 +432,7 @@ OsrData* Jit::PrepareForOsr(ArtMethod* method, uint32_t dex_pc, uint32_t* vregs) } } - osr_data->native_pc = stack_map.GetNativePcOffset(kRuntimeISA) + + osr_data->native_pc = stack_map.GetNativePcOffset(kRuntimeQuickCodeISA) + osr_method->GetEntryPoint(); VLOG(jit) << "Jumping to " << method_name diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc index 2b83eff44f..b889f659ec 100644 --- a/runtime/jit/jit_code_cache.cc +++ b/runtime/jit/jit_code_cache.cc @@ -366,12 +366,12 @@ bool JitCodeCache::WaitForPotentialCollectionToComplete(Thread* self) { } static uintptr_t FromCodeToAllocation(const void* code) { - size_t alignment = GetInstructionSetCodeAlignment(kRuntimeISA); + size_t alignment = GetInstructionSetCodeAlignment(kRuntimeQuickCodeISA); return reinterpret_cast<uintptr_t>(code) - RoundUp(sizeof(OatQuickMethodHeader), alignment); } static const void* FromAllocationToCode(const uint8_t* alloc) { - size_t alignment = GetInstructionSetCodeAlignment(kRuntimeISA); + size_t alignment = GetInstructionSetCodeAlignment(kRuntimeQuickCodeISA); return reinterpret_cast<const void*>(alloc + RoundUp(sizeof(OatQuickMethodHeader), alignment)); } @@ -530,7 +530,8 @@ void JitCodeCache::FreeAllMethodHeaders( } }); ForEachNativeDebugSymbol([&](const void* addr, size_t, const char* name) { - addr = AlignDown(addr, GetInstructionSetInstructionAlignment(kRuntimeISA)); // Thumb-bit. + addr = AlignDown(addr, + GetInstructionSetInstructionAlignment(kRuntimeQuickCodeISA)); // Thumb-bit. bool res = debug_info.emplace(addr).second; CHECK(res) << "Duplicate debug info: " << addr << " " << name; CHECK_EQ(compiled_methods.count(addr), 1u) << "Extra debug info: " << addr << " " << name; @@ -1370,7 +1371,7 @@ void JitCodeCache::DoCollection(Thread* self) { } OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod* method) { - static_assert(kRuntimeISA != InstructionSet::kThumb2, "kThumb2 cannot be a runtime ISA"); + static_assert(kRuntimeQuickCodeISA != InstructionSet::kThumb2, "kThumb2 cannot be a runtime ISA"); const void* pc_ptr = reinterpret_cast<const void*>(pc); if (!ContainsPc(pc_ptr)) { return nullptr; diff --git a/runtime/nterp_helpers.cc b/runtime/nterp_helpers.cc index ba93df693d..0a306e2f56 100644 --- a/runtime/nterp_helpers.cc +++ b/runtime/nterp_helpers.cc @@ -181,7 +181,9 @@ QuickMethodFrameInfo NterpFrameInfo(ArtMethod** frame) { RuntimeCalleeSaveFrame::GetCoreSpills(CalleeSaveType::kSaveAllCalleeSaves); uint32_t fp_spills = RuntimeCalleeSaveFrame::GetFpSpills(CalleeSaveType::kSaveAllCalleeSaves); - return QuickMethodFrameInfo(NterpGetFrameSize(*frame, kRuntimeISA), core_spills, fp_spills); + return QuickMethodFrameInfo(NterpGetFrameSize(*frame, kRuntimeQuickCodeISA), + core_spills, + fp_spills); } uintptr_t NterpGetRegistersArray(ArtMethod** frame) { @@ -192,7 +194,7 @@ uintptr_t NterpGetRegistersArray(ArtMethod** frame) { } uintptr_t NterpGetReferenceArray(ArtMethod** frame) { - const uint16_t out_regs = GetNumberOfOutRegs(*frame, kRuntimeISA); + const uint16_t out_regs = GetNumberOfOutRegs(*frame, kRuntimeQuickCodeISA); // The references array is just above the saved frame pointer. return reinterpret_cast<uintptr_t>(frame) + kPointerSize + // method @@ -202,7 +204,7 @@ uintptr_t NterpGetReferenceArray(ArtMethod** frame) { } uint32_t NterpGetDexPC(ArtMethod** frame) { - const uint16_t out_regs = GetNumberOfOutRegs(*frame, kRuntimeISA); + const uint16_t out_regs = GetNumberOfOutRegs(*frame, kRuntimeQuickCodeISA); uintptr_t dex_pc_ptr = reinterpret_cast<uintptr_t>(frame) + kPointerSize + // method RoundUp(out_regs * kVRegSize, kPointerSize); // out arguments and pointer alignment diff --git a/runtime/nterp_helpers.h b/runtime/nterp_helpers.h index 6ce2038a9d..a6edeb0ee0 100644 --- a/runtime/nterp_helpers.h +++ b/runtime/nterp_helpers.h @@ -73,7 +73,7 @@ uint32_t NterpGetVRegReference(ArtMethod** frame, uint16_t vreg) * Returns whether the given method can run with nterp. The instruction set can * be passed for cross-compilation. */ -EXPORT bool CanMethodUseNterp(ArtMethod* method, InstructionSet isa = kRuntimeISA) +EXPORT bool CanMethodUseNterp(ArtMethod* method, InstructionSet isa = kRuntimeQuickCodeISA) REQUIRES_SHARED(Locks::mutator_lock_); /** diff --git a/runtime/oat/elf_file.cc b/runtime/oat/elf_file.cc index 436a28a603..5fb8053856 100644 --- a/runtime/oat/elf_file.cc +++ b/runtime/oat/elf_file.cc @@ -1116,9 +1116,9 @@ bool ElfFileImpl<ElfTypes>::Load(File* file, if (executable) { InstructionSet elf_ISA = GetInstructionSetFromELF(GetHeader().e_machine, GetHeader().e_flags); - if (elf_ISA != kRuntimeISA) { + if (elf_ISA != kRuntimeQuickCodeISA) { std::ostringstream oss; - oss << "Expected ISA " << kRuntimeISA << " but found " << elf_ISA; + oss << "Expected ISA " << kRuntimeQuickCodeISA << " but found " << elf_ISA; *error_msg = oss.str(); return false; } diff --git a/runtime/oat/oat_file.cc b/runtime/oat/oat_file.cc index 879e3f1622..3e298e5590 100644 --- a/runtime/oat/oat_file.cc +++ b/runtime/oat/oat_file.cc @@ -1862,7 +1862,7 @@ class OatFileBackedByVdex final : public OatFileBase { store.Put(OatHeader::kClassPathKey, context->EncodeContextForOatFile("")); } - oat_header_.reset(OatHeader::Create(kRuntimeISA, + oat_header_.reset(OatHeader::Create(kRuntimeQuickCodeISA, isa_features.get(), number_of_dex_files, &store)); diff --git a/runtime/oat/oat_file_assistant.cc b/runtime/oat/oat_file_assistant.cc index def2dc34c4..0006ae8146 100644 --- a/runtime/oat/oat_file_assistant.cc +++ b/runtime/oat/oat_file_assistant.cc @@ -145,9 +145,9 @@ OatFileAssistant::OatFileAssistant(const char* dex_location, load_executable_ = false; } - if (load_executable_ && isa != kRuntimeISA) { + if (load_executable_ && isa != kRuntimeQuickCodeISA) { LOG(WARNING) << "OatFileAssistant: Load executable specified, " - << "but isa is not kRuntimeISA. Will not attempt to load executable."; + << "but isa is not kRuntimeQuickCodeISA. Will not attempt to load executable."; load_executable_ = false; } diff --git a/runtime/oat/oat_file_assistant_context.cc b/runtime/oat/oat_file_assistant_context.cc index fd168ffa04..23ad19b652 100644 --- a/runtime/oat/oat_file_assistant_context.cc +++ b/runtime/oat/oat_file_assistant_context.cc @@ -62,7 +62,8 @@ OatFileAssistantContext::OatFileAssistantContext(Runtime* runtime) .deny_art_apex_data_files = runtime->DenyArtApexDataFiles(), })) { // Fetch boot image info from the runtime. - std::vector<BootImageInfo>& boot_image_info_list = boot_image_info_list_by_isa_[kRuntimeISA]; + std::vector<BootImageInfo>& boot_image_info_list = + boot_image_info_list_by_isa_[kRuntimeQuickCodeISA]; for (const ImageSpace* image_space : runtime->GetHeap()->GetBootImageSpaces()) { // We only need the checksum of the first component for each boot image. They are in image // spaces that have a non-zero component count. diff --git a/runtime/oat/oat_file_manager.cc b/runtime/oat/oat_file_manager.cc index 455914d4f8..f10ba673df 100644 --- a/runtime/oat/oat_file_manager.cc +++ b/runtime/oat/oat_file_manager.cc @@ -219,7 +219,7 @@ std::vector<std::unique_ptr<const DexFile>> OatFileManager::OpenDexFilesFromOat( << "Are you using the deprecated DexFile APIs?"; } else if (context != nullptr) { auto oat_file_assistant = std::make_unique<OatFileAssistant>(dex_location, - kRuntimeISA, + kRuntimeQuickCodeISA, context.get(), runtime->GetOatFilesExecutable(), only_use_system_oat_files_); @@ -368,7 +368,7 @@ std::vector<std::unique_ptr<const DexFile>> OatFileManager::OpenDexFilesFromOat( // file as non-executable. auto nonexecutable_oat_file_assistant = std::make_unique<OatFileAssistant>(dex_location, - kRuntimeISA, + kRuntimeQuickCodeISA, context.get(), /*load_executable=*/false, only_use_system_oat_files_); @@ -547,7 +547,7 @@ std::vector<std::unique_ptr<const DexFile>> OatFileManager::OpenDexFilesFromOat_ std::string dex_location; std::string vdex_path; bool has_vdex = OatFileAssistant::AnonymousDexVdexLocation(dex_headers, - kRuntimeISA, + kRuntimeQuickCodeISA, &dex_location, &vdex_path); @@ -838,7 +838,7 @@ void OatFileManager::RunBackgroundVerification(const std::vector<const DexFile*> std::string error_msg; std::string odex_filename; if (!OatFileAssistant::DexLocationToOdexFilename(dex_location, - kRuntimeISA, + kRuntimeQuickCodeISA, &odex_filename, &error_msg)) { LOG(WARNING) << "Could not get odex filename for " << dex_location << ": " << error_msg; diff --git a/runtime/oat/oat_quick_method_header.cc b/runtime/oat/oat_quick_method_header.cc index 3d086e88bf..17120c482f 100644 --- a/runtime/oat/oat_quick_method_header.cc +++ b/runtime/oat/oat_quick_method_header.cc @@ -74,7 +74,8 @@ uintptr_t OatQuickMethodHeader::ToNativeQuickPc(ArtMethod* method, StackMap stack_map = code_info.GetStackMapForDexPc(dex_pc); if (stack_map.IsValid()) { - return reinterpret_cast<uintptr_t>(entry_point) + stack_map.GetNativePcOffset(kRuntimeISA); + return reinterpret_cast<uintptr_t>(entry_point) + + stack_map.GetNativePcOffset(kRuntimeQuickCodeISA); } if (abort_on_failure) { ScopedObjectAccess soa(Thread::Current()); @@ -102,7 +103,7 @@ uintptr_t OatQuickMethodHeader::ToNativeQuickPcForCatchHandlers( *stack_map_row = stack_map.Row(); if (stack_map.IsValid()) { return reinterpret_cast<uintptr_t>(entry_point) + - stack_map.GetNativePcOffset(kRuntimeISA); + stack_map.GetNativePcOffset(kRuntimeQuickCodeISA); } if (abort_on_failure) { std::stringstream ss; diff --git a/runtime/oat/oat_quick_method_header.h b/runtime/oat/oat_quick_method_header.h index c32eb7f41a..e9f8c6f142 100644 --- a/runtime/oat/oat_quick_method_header.h +++ b/runtime/oat/oat_quick_method_header.h @@ -56,8 +56,8 @@ class PACKED(4) OatQuickMethodHeader { static OatQuickMethodHeader* FromCodePointer(const void* code_ptr) { uintptr_t code = reinterpret_cast<uintptr_t>(code_ptr); uintptr_t header = code - OFFSETOF_MEMBER(OatQuickMethodHeader, code_); - DCHECK(IsAlignedParam(code, GetInstructionSetCodeAlignment(kRuntimeISA)) || - IsAlignedParam(header, GetInstructionSetCodeAlignment(kRuntimeISA))) + DCHECK(IsAlignedParam(code, GetInstructionSetCodeAlignment(kRuntimeQuickCodeISA)) || + IsAlignedParam(header, GetInstructionSetCodeAlignment(kRuntimeQuickCodeISA))) << std::hex << code << " " << std::hex << header; return reinterpret_cast<OatQuickMethodHeader*>(header); } @@ -67,7 +67,8 @@ class PACKED(4) OatQuickMethodHeader { } static size_t InstructionAlignedSize() { - return RoundUp(sizeof(OatQuickMethodHeader), GetInstructionSetCodeAlignment(kRuntimeISA)); + return RoundUp(sizeof(OatQuickMethodHeader), + GetInstructionSetCodeAlignment(kRuntimeQuickCodeISA)); } OatQuickMethodHeader(const OatQuickMethodHeader&) = default; @@ -131,8 +132,9 @@ class PACKED(4) OatQuickMethodHeader { // mspace_memalign or memory mapped from a file, neither of which is tagged by MTE/HWASan. DCHECK_EQ(code_start, reinterpret_cast<uintptr_t>(code_start) & ((UINT64_C(1) << 56) - 1)); #endif - static_assert(kRuntimeISA != InstructionSet::kThumb2, "kThumb2 cannot be a runtime ISA"); - if (kRuntimeISA == InstructionSet::kArm) { + static_assert(kRuntimeQuickCodeISA != InstructionSet::kThumb2, + "kThumb2 cannot be a runtime ISA"); + if (kRuntimeQuickCodeISA == InstructionSet::kArm) { // On Thumb-2, the pc is offset by one. code_start++; } @@ -140,12 +142,13 @@ class PACKED(4) OatQuickMethodHeader { } const uint8_t* GetEntryPoint() const { - // When the runtime architecture is ARM, `kRuntimeISA` is set to `kArm` + // When the runtime architecture is ARM, `kRuntimeQuickCodeISA` is set to `kArm` // (not `kThumb2`), *but* we always generate code for the Thumb-2 // instruction set anyway. Thumb-2 requires the entrypoint to be of // offset 1. - static_assert(kRuntimeISA != InstructionSet::kThumb2, "kThumb2 cannot be a runtime ISA"); - return (kRuntimeISA == InstructionSet::kArm) + static_assert(kRuntimeQuickCodeISA != InstructionSet::kThumb2, + "kThumb2 cannot be a runtime ISA"); + return (kRuntimeQuickCodeISA == InstructionSet::kArm) ? reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(code_) | 1) : code_; } @@ -169,9 +172,9 @@ class PACKED(4) OatQuickMethodHeader { QuickMethodFrameInfo frame_info = GetFrameInfo(); size_t frame_size = frame_info.FrameSizeInBytes(); size_t core_spill_size = - POPCOUNT(frame_info.CoreSpillMask()) * GetBytesPerGprSpillLocation(kRuntimeISA); + POPCOUNT(frame_info.CoreSpillMask()) * GetBytesPerGprSpillLocation(kRuntimeQuickCodeISA); size_t fpu_spill_size = - POPCOUNT(frame_info.FpSpillMask()) * GetBytesPerFprSpillLocation(kRuntimeISA); + POPCOUNT(frame_info.FpSpillMask()) * GetBytesPerFprSpillLocation(kRuntimeQuickCodeISA); return frame_size - core_spill_size - fpu_spill_size - kShouldDeoptimizeFlagSize; } diff --git a/runtime/oat/stack_map.h b/runtime/oat/stack_map.h index 07f393aa9d..68c2f08b93 100644 --- a/runtime/oat/stack_map.h +++ b/runtime/oat/stack_map.h @@ -469,7 +469,7 @@ class CodeInfo { } EXPORT StackMap GetStackMapForNativePcOffset(uintptr_t pc, - InstructionSet isa = kRuntimeISA) const; + InstructionSet isa = kRuntimeQuickCodeISA) const; // Dump this CodeInfo object on `vios`. // `code_offset` is the (absolute) native PC of the compiled method. diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc index beb42a6a05..09bbb2ab9e 100644 --- a/runtime/quick_exception_handler.cc +++ b/runtime/quick_exception_handler.cc @@ -798,7 +798,8 @@ void QuickExceptionHandler::DeoptimizeSingleFrame(DeoptimizationKind kind) { void QuickExceptionHandler::DeoptimizePartialFragmentFixup() { CHECK(handler_quick_frame_ != nullptr); // Architecture-dependent work. This is to get the LR right for x86 and x86-64. - if (kRuntimeISA == InstructionSet::kX86 || kRuntimeISA == InstructionSet::kX86_64) { + if (kRuntimeQuickCodeISA == InstructionSet::kX86 || + kRuntimeQuickCodeISA == InstructionSet::kX86_64) { // On x86, the return address is on the stack, so just reuse it. Otherwise we would have to // change how longjump works. handler_quick_frame_ = reinterpret_cast<ArtMethod**>( diff --git a/runtime/runtime-inl.h b/runtime/runtime-inl.h index f90dcd1d30..03108046a6 100644 --- a/runtime/runtime-inl.h +++ b/runtime/runtime-inl.h @@ -43,7 +43,7 @@ inline mirror::Object* Runtime::GetClearedJniWeakGlobal() { inline QuickMethodFrameInfo Runtime::GetRuntimeMethodFrameInfo(ArtMethod* method) { DCHECK(method != nullptr); - DCHECK_EQ(instruction_set_, kRuntimeISA); + DCHECK_EQ(instruction_set_, kRuntimeQuickCodeISA); // Cannot be imt-conflict-method or resolution-method. DCHECK_NE(method, GetImtConflictMethod()); DCHECK_NE(method, GetResolutionMethod()); diff --git a/runtime/runtime.cc b/runtime/runtime.cc index 53c1cb152f..651c88f687 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -1870,7 +1870,7 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) { InitPlatformSignalHandlers(); // Change the implicit checks flags based on runtime architecture. - switch (kRuntimeISA) { + switch (kRuntimeQuickCodeISA) { case InstructionSet::kArm64: implicit_suspend_checks_ = true; FALLTHROUGH_INTENDED; @@ -2960,7 +2960,8 @@ void Runtime::AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vector<std::strin // architecture support, dex2oat may be compiled as a different instruction-set than that // currently being executed. std::string instruction_set("--instruction-set="); - instruction_set += GetInstructionSetString(kRuntimeISA); + // The dex2oat instruction set should match the runtime's target ISA. + instruction_set += GetInstructionSetString(kRuntimeQuickCodeISA); argv->push_back(instruction_set); if (InstructionSetFeatures::IsRuntimeDetectionSupported()) { diff --git a/runtime/runtime_image.cc b/runtime/runtime_image.cc index fdba92e67f..fe1cf545ac 100644 --- a/runtime/runtime_image.cc +++ b/runtime/runtime_image.cc @@ -1143,7 +1143,7 @@ class RuntimeImageHelper { std::unique_ptr<const InstructionSetFeatures> isa_features = InstructionSetFeatures::FromCppDefines(); std::unique_ptr<OatHeader> oat_header( - OatHeader::Create(kRuntimeISA, + OatHeader::Create(kRuntimeQuickCodeISA, isa_features.get(), number_of_dex_files, &key_value_store)); @@ -1856,7 +1856,7 @@ std::string RuntimeImage::GetRuntimeImagePath(const std::string& app_data_dir, std::string RuntimeImage::GetRuntimeImagePath(const std::string& dex_location) { return GetRuntimeImagePath(Runtime::Current()->GetProcessDataDirectory(), dex_location, - GetInstructionSetString(kRuntimeISA)); + GetInstructionSetString(kRuntimeQuickCodeISA)); } static bool EnsureDirectoryExists(const std::string& directory, std::string* error_msg) { diff --git a/runtime/runtime_test.cc b/runtime/runtime_test.cc index 1faecf36a8..182a992434 100644 --- a/runtime/runtime_test.cc +++ b/runtime/runtime_test.cc @@ -95,7 +95,8 @@ TEST_F(RuntimeTest, ElfAlignmentMismatch) { } #endif // Determine the alignment of the ART APEX by reading the alignment of boot.oat. - std::string core_oat_location = GetSystemImageFilename(GetCoreOatLocation().c_str(), kRuntimeISA); + std::string core_oat_location = GetSystemImageFilename(GetCoreOatLocation().c_str(), + kRuntimeQuickCodeISA); std::unique_ptr<File> core_oat_file(OS::OpenFileForReading(core_oat_location.c_str())); ASSERT_TRUE(core_oat_file.get() != nullptr) << core_oat_location; diff --git a/runtime/stack.cc b/runtime/stack.cc index 5670b12415..c3130b3ff8 100644 --- a/runtime/stack.cc +++ b/runtime/stack.cc @@ -136,7 +136,7 @@ uint32_t StackVisitor::GetDexPc(bool abort_on_failure) const { CodeInfo code_info(cur_oat_quick_method_header_); std::stringstream os; VariableIndentationOutputStream vios(&os); - code_info.Dump(&vios, /* code_offset= */ 0u, /* verbose= */ true, kRuntimeISA); + code_info.Dump(&vios, /* code_offset= */ 0u, /* verbose= */ true, kRuntimeQuickCodeISA); LOG(FATAL) << os.str() << '\n' << "StackMap not found for " << std::hex << cur_quick_frame_pc_ << " in " @@ -407,7 +407,7 @@ bool StackVisitor::GetRegisterIfAccessible(uint32_t reg, const bool is_float = (location_kind == DexRegisterLocation::Kind::kInFpuRegister) || (location_kind == DexRegisterLocation::Kind::kInFpuRegisterHigh); - if (kRuntimeISA == InstructionSet::kX86 && is_float) { + if (kRuntimeQuickCodeISA == InstructionSet::kX86 && is_float) { // X86 float registers are 64-bit and each XMM register is provided as two separate // 32-bit registers by the context. reg = (location_kind == DexRegisterLocation::Kind::kInFpuRegisterHigh) @@ -419,7 +419,7 @@ bool StackVisitor::GetRegisterIfAccessible(uint32_t reg, return false; } uintptr_t ptr_val = GetRegister(reg, is_float); - const bool target64 = Is64BitInstructionSet(kRuntimeISA); + const bool target64 = Is64BitInstructionSet(kRuntimeQuickCodeISA); if (target64) { const bool is_high = (location_kind == DexRegisterLocation::Kind::kInRegisterHigh) || (location_kind == DexRegisterLocation::Kind::kInFpuRegisterHigh); @@ -803,9 +803,9 @@ uint8_t* StackVisitor::GetShouldDeoptimizeFlagAddr() const REQUIRES_SHARED(Locks size_t frame_size = frame_info.FrameSizeInBytes(); uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame()); size_t core_spill_size = - POPCOUNT(frame_info.CoreSpillMask()) * GetBytesPerGprSpillLocation(kRuntimeISA); + POPCOUNT(frame_info.CoreSpillMask()) * GetBytesPerGprSpillLocation(kRuntimeQuickCodeISA); size_t fpu_spill_size = - POPCOUNT(frame_info.FpSpillMask()) * GetBytesPerFprSpillLocation(kRuntimeISA); + POPCOUNT(frame_info.FpSpillMask()) * GetBytesPerFprSpillLocation(kRuntimeQuickCodeISA); size_t offset = frame_size - core_spill_size - fpu_spill_size - kShouldDeoptimizeFlagSize; uint8_t* should_deoptimize_addr = sp + offset; DCHECK_EQ(*should_deoptimize_addr & ~static_cast<uint8_t>(DeoptimizeFlagValue::kAll), 0); diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h index 180d28b5eb..432453e311 100644 --- a/runtime/thread-inl.h +++ b/runtime/thread-inl.h @@ -599,14 +599,14 @@ inline void Thread::SetStackSize<StackType::kHardware>(size_t new_stack_size) { inline uint8_t* Thread::GetStackEndForInterpreter(bool implicit_overflow_check) const { uint8_t* end = GetStackEnd<kNativeStackType>() + (implicit_overflow_check - ? GetStackOverflowReservedBytes(kRuntimeISA) + ? GetStackOverflowReservedBytes(kRuntimeQuickCodeISA) : 0); if (kIsDebugBuild) { // In a debuggable build, but especially under ASAN, the access-checks interpreter has a // potentially humongous stack size. We don't want to take too much of the stack regularly, // so do not increase the regular reserved size (for compiled code etc) and only report the // virtually smaller stack to the interpreter here. - end += GetStackOverflowReservedBytes(kRuntimeISA); + end += GetStackOverflowReservedBytes(kRuntimeQuickCodeISA); } return end; } @@ -616,7 +616,7 @@ inline void Thread::ResetDefaultStackEnd() { // Our stacks grow down, so we want stack_end_ to be near there, but reserving enough room // to throw a StackOverflowError. SetStackEnd<stack_type>( - GetStackBegin<stack_type>() + GetStackOverflowReservedBytes(kRuntimeISA)); + GetStackBegin<stack_type>() + GetStackOverflowReservedBytes(kRuntimeQuickCodeISA)); } template <StackType stack_type> @@ -626,7 +626,7 @@ inline void Thread::SetStackEndForStackOverflow() if (GetStackEnd<stack_type>() == GetStackBegin<stack_type>()) { // However, we seem to have already extended to use the full stack. LOG(ERROR) << "Need to increase kStackOverflowReservedBytes (currently " - << GetStackOverflowReservedBytes(kRuntimeISA) << ")?"; + << GetStackOverflowReservedBytes(kRuntimeQuickCodeISA) << ")?"; DumpStack(LOG_STREAM(ERROR)); LOG(FATAL) << "Recursive stack overflow."; } diff --git a/runtime/thread.cc b/runtime/thread.cc index 0e34206585..1f8baeda68 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -138,7 +138,8 @@ using android::base::StringPrintf; bool Thread::is_started_ = false; pthread_key_t Thread::pthread_key_self_; ConditionVariable* Thread::resume_cond_ = nullptr; -const size_t Thread::kStackOverflowImplicitCheckSize = GetStackOverflowReservedBytes(kRuntimeISA); +const size_t Thread::kStackOverflowImplicitCheckSize = + GetStackOverflowReservedBytes(kRuntimeQuickCodeISA); bool (*Thread::is_sensitive_thread_hook_)() = nullptr; Thread* Thread::jit_sensitive_thread_ = nullptr; std::atomic<Mutex*> Thread::cp_placeholder_mutex_(nullptr); @@ -717,12 +718,12 @@ static size_t FixStackSize(size_t stack_size) { // If we are going to use implicit stack checks, allocate space for the protected // region at the bottom of the stack. stack_size += Thread::kStackOverflowImplicitCheckSize + - GetStackOverflowReservedBytes(kRuntimeISA); + GetStackOverflowReservedBytes(kRuntimeQuickCodeISA); } else { // It's likely that callers are trying to ensure they have at least a certain amount of // stack space, so we should add our reserved space on top of what they requested, rather // than implicitly take it away from them. - stack_size += GetStackOverflowReservedBytes(kRuntimeISA); + stack_size += GetStackOverflowReservedBytes(kRuntimeQuickCodeISA); } // Some systems require the stack size to be a multiple of the system page size, so round up. @@ -1369,7 +1370,7 @@ bool Thread::InitStack(uint8_t* read_stack_base, size_t read_stack_size, size_t DCHECK_ALIGNED_PARAM(static_cast<size_t>(GetStackOverflowProtectedSize()), static_cast<int32_t>(gPageSize)); size_t min_stack = GetStackOverflowProtectedSize() + - RoundUp(GetStackOverflowReservedBytes(kRuntimeISA) + 4 * KB, gPageSize); + RoundUp(GetStackOverflowReservedBytes(kRuntimeQuickCodeISA) + 4 * KB, gPageSize); if (read_stack_size <= min_stack) { // Note, as we know the stack is small, avoid operations that could use a lot of stack. LogHelper::LogLineLowStack(__PRETTY_FUNCTION__, diff --git a/test/692-vdex-inmem-loader/vdex_inmem_loader.cc b/test/692-vdex-inmem-loader/vdex_inmem_loader.cc index 8152435212..e1278becbd 100644 --- a/test/692-vdex-inmem-loader/vdex_inmem_loader.cc +++ b/test/692-vdex-inmem-loader/vdex_inmem_loader.cc @@ -95,7 +95,7 @@ extern "C" JNIEXPORT bool JNICALL Java_Main_hasVdexFile(JNIEnv*, std::string odex_filename; std::string error_msg; if (!OatFileAssistant::DexLocationToOdexFilename(dex_location, - kRuntimeISA, + kRuntimeQuickCodeISA, &odex_filename, &error_msg)) { LOG(WARNING) << "Could not get odex filename for " << dex_location << ": " << error_msg; diff --git a/tools/cpp-define-generator/globals.def b/tools/cpp-define-generator/globals.def index afa9499687..7f76b050a0 100644 --- a/tools/cpp-define-generator/globals.def +++ b/tools/cpp-define-generator/globals.def @@ -82,7 +82,7 @@ ASM_DEFINE(STACK_REFERENCE_SIZE, ASM_DEFINE(STD_MEMORY_ORDER_RELAXED, static_cast<std::underlying_type_t<std::memory_order>>(std::memory_order_relaxed)) ASM_DEFINE(STACK_OVERFLOW_RESERVED_BYTES, - GetStackOverflowReservedBytes(art::kRuntimeISA)) + GetStackOverflowReservedBytes(art::kRuntimeQuickCodeISA)) ASM_DEFINE(CALLEE_SAVE_EVERYTHING_NUM_CORE_SPILLS, art::POPCOUNT(art::RuntimeCalleeSaveFrame::GetCoreSpills( art::CalleeSaveType::kSaveEverything))) |