Merge "ART: Interfaces must declare superclass j.l.Object."
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index b93b05c..37f7d63 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -29,7 +29,6 @@
#include "base/macros.h"
#include "calling_convention.h"
#include "class_linker.h"
-#include "compiled_method.h"
#include "debug/dwarf/debug_frame_opcode_writer.h"
#include "dex_file-inl.h"
#include "driver/compiler_driver.h"
@@ -115,10 +114,10 @@
// convention.
//
template <PointerSize kPointerSize>
-static CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
- uint32_t access_flags,
- uint32_t method_idx,
- const DexFile& dex_file) {
+static JniCompiledMethod ArtJniCompileMethodInternal(CompilerDriver* driver,
+ uint32_t access_flags,
+ uint32_t method_idx,
+ const DexFile& dex_file) {
const bool is_native = (access_flags & kAccNative) != 0;
CHECK(is_native);
const bool is_static = (access_flags & kAccStatic) != 0;
@@ -657,16 +656,12 @@
MemoryRegion code(&managed_code[0], managed_code.size());
__ FinalizeInstructions(code);
- return CompiledMethod::SwapAllocCompiledMethod(driver,
- instruction_set,
- ArrayRef<const uint8_t>(managed_code),
- frame_size,
- main_jni_conv->CoreSpillMask(),
- main_jni_conv->FpSpillMask(),
- /* method_info */ ArrayRef<const uint8_t>(),
- /* vmap_table */ ArrayRef<const uint8_t>(),
- ArrayRef<const uint8_t>(*jni_asm->cfi().data()),
- ArrayRef<const linker::LinkerPatch>());
+ return JniCompiledMethod(instruction_set,
+ std::move(managed_code),
+ frame_size,
+ main_jni_conv->CoreSpillMask(),
+ main_jni_conv->FpSpillMask(),
+ ArrayRef<const uint8_t>(*jni_asm->cfi().data()));
}
// Copy a single parameter from the managed to the JNI calling convention.
@@ -775,10 +770,10 @@
}
}
-CompiledMethod* ArtQuickJniCompileMethod(CompilerDriver* compiler,
- uint32_t access_flags,
- uint32_t method_idx,
- const DexFile& dex_file) {
+JniCompiledMethod ArtQuickJniCompileMethod(CompilerDriver* compiler,
+ uint32_t access_flags,
+ uint32_t method_idx,
+ const DexFile& dex_file) {
if (Is64BitInstructionSet(compiler->GetInstructionSet())) {
return ArtJniCompileMethodInternal<PointerSize::k64>(
compiler, access_flags, method_idx, dex_file);
diff --git a/compiler/jni/quick/jni_compiler.h b/compiler/jni/quick/jni_compiler.h
index 3fcce55..1141994 100644
--- a/compiler/jni/quick/jni_compiler.h
+++ b/compiler/jni/quick/jni_compiler.h
@@ -17,18 +17,55 @@
#ifndef ART_COMPILER_JNI_QUICK_JNI_COMPILER_H_
#define ART_COMPILER_JNI_QUICK_JNI_COMPILER_H_
-#include "compiler.h"
-#include "dex_file.h"
+#include <vector>
+
+#include "arch/instruction_set.h"
+#include "base/array_ref.h"
namespace art {
+class ArtMethod;
class CompilerDriver;
-class CompiledMethod;
+class DexFile;
-CompiledMethod* ArtQuickJniCompileMethod(CompilerDriver* compiler,
- uint32_t access_flags,
- uint32_t method_idx,
- const DexFile& dex_file);
+class JniCompiledMethod {
+ public:
+ JniCompiledMethod(InstructionSet instruction_set,
+ std::vector<uint8_t>&& code,
+ uint32_t frame_size,
+ uint32_t core_spill_mask,
+ uint32_t fp_spill_mask,
+ ArrayRef<const uint8_t> cfi)
+ : instruction_set_(instruction_set),
+ code_(std::move(code)),
+ frame_size_(frame_size),
+ core_spill_mask_(core_spill_mask),
+ fp_spill_mask_(fp_spill_mask),
+ cfi_(cfi.begin(), cfi.end()) {}
+
+ JniCompiledMethod(JniCompiledMethod&& other) = default;
+ ~JniCompiledMethod() = default;
+
+ InstructionSet GetInstructionSet() const { return instruction_set_; }
+ ArrayRef<const uint8_t> GetCode() const { return ArrayRef<const uint8_t>(code_); }
+ uint32_t GetFrameSize() const { return frame_size_; }
+ uint32_t GetCoreSpillMask() const { return core_spill_mask_; }
+ uint32_t GetFpSpillMask() const { return fp_spill_mask_; }
+ ArrayRef<const uint8_t> GetCfi() const { return ArrayRef<const uint8_t>(cfi_); }
+
+ private:
+ InstructionSet instruction_set_;
+ std::vector<uint8_t> code_;
+ uint32_t frame_size_;
+ uint32_t core_spill_mask_;
+ uint32_t fp_spill_mask_;
+ std::vector<uint8_t> cfi_;
+};
+
+JniCompiledMethod ArtQuickJniCompileMethod(CompilerDriver* compiler,
+ uint32_t access_flags,
+ uint32_t method_idx,
+ const DexFile& dex_file);
} // namespace art
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 095ca63..b6d3294 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -1133,12 +1133,20 @@
}
}
- CompiledMethod* compiled_method = ArtQuickJniCompileMethod(GetCompilerDriver(),
- access_flags,
- method_idx,
- dex_file);
+ JniCompiledMethod jni_compiled_method = ArtQuickJniCompileMethod(
+ GetCompilerDriver(), access_flags, method_idx, dex_file);
MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kCompiledNativeStub);
- return compiled_method;
+ return CompiledMethod::SwapAllocCompiledMethod(
+ GetCompilerDriver(),
+ jni_compiled_method.GetInstructionSet(),
+ jni_compiled_method.GetCode(),
+ jni_compiled_method.GetFrameSize(),
+ jni_compiled_method.GetCoreSpillMask(),
+ jni_compiled_method.GetFpSpillMask(),
+ /* method_info */ ArrayRef<const uint8_t>(),
+ /* vmap_table */ ArrayRef<const uint8_t>(),
+ jni_compiled_method.GetCfi(),
+ /* patches */ ArrayRef<const linker::LinkerPatch>());
}
Compiler* CreateOptimizingCompiler(CompilerDriver* driver) {
@@ -1188,7 +1196,69 @@
Runtime* runtime = Runtime::Current();
ArenaAllocator allocator(runtime->GetJitArenaPool());
- ArenaStack arena_stack(Runtime::Current()->GetJitArenaPool());
+
+ if (UNLIKELY(method->IsNative())) {
+ JniCompiledMethod jni_compiled_method = ArtQuickJniCompileMethod(
+ GetCompilerDriver(), access_flags, method_idx, *dex_file);
+ ScopedNullHandle<mirror::ObjectArray<mirror::Object>> roots;
+ ArenaSet<ArtMethod*, std::less<ArtMethod*>> cha_single_implementation_list(
+ allocator.Adapter(kArenaAllocCHA));
+ const void* code = code_cache->CommitCode(
+ self,
+ method,
+ /* stack_map_data */ nullptr,
+ /* method_info_data */ nullptr,
+ /* roots_data */ nullptr,
+ jni_compiled_method.GetFrameSize(),
+ jni_compiled_method.GetCoreSpillMask(),
+ jni_compiled_method.GetFpSpillMask(),
+ jni_compiled_method.GetCode().data(),
+ jni_compiled_method.GetCode().size(),
+ /* data_size */ 0u,
+ osr,
+ roots,
+ /* has_should_deoptimize_flag */ false,
+ cha_single_implementation_list);
+ if (code == nullptr) {
+ return false;
+ }
+
+ const CompilerOptions& compiler_options = GetCompilerDriver()->GetCompilerOptions();
+ if (compiler_options.GetGenerateDebugInfo()) {
+ const auto* method_header = reinterpret_cast<const OatQuickMethodHeader*>(code);
+ const uintptr_t code_address = reinterpret_cast<uintptr_t>(method_header->GetCode());
+ debug::MethodDebugInfo info = {};
+ DCHECK(info.trampoline_name.empty());
+ info.dex_file = dex_file;
+ info.class_def_index = class_def_idx;
+ info.dex_method_index = method_idx;
+ info.access_flags = access_flags;
+ info.code_item = code_item;
+ info.isa = jni_compiled_method.GetInstructionSet();
+ info.deduped = false;
+ info.is_native_debuggable = compiler_options.GetNativeDebuggable();
+ info.is_optimized = true;
+ info.is_code_address_text_relative = false;
+ info.code_address = code_address;
+ info.code_size = jni_compiled_method.GetCode().size();
+ info.frame_size_in_bytes = method_header->GetFrameSizeInBytes();
+ info.code_info = nullptr;
+ info.cfi = jni_compiled_method.GetCfi();
+ std::vector<uint8_t> elf_file = debug::WriteDebugElfFileForMethods(
+ GetCompilerDriver()->GetInstructionSet(),
+ GetCompilerDriver()->GetInstructionSetFeatures(),
+ ArrayRef<const debug::MethodDebugInfo>(&info, 1));
+ CreateJITCodeEntryForAddress(code_address, std::move(elf_file));
+ }
+
+ Runtime::Current()->GetJit()->AddMemoryUsage(method, allocator.BytesUsed());
+ if (jit_logger != nullptr) {
+ jit_logger->WriteLog(code, jni_compiled_method.GetCode().size(), method);
+ }
+ return true;
+ }
+
+ ArenaStack arena_stack(runtime->GetJitArenaPool());
CodeVectorAllocator code_allocator(&allocator);
VariableSizedHandleScope handles(self);
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 6ff8dd6..6ec9c48 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -1783,7 +1783,9 @@
.cfi_adjust_cfa_offset FRAME_SIZE_SAVE_REFS_AND_ARGS-FRAME_SIZE_SAVE_REFS_ONLY
.Lexception_in_native:
- ldr sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET]
+ ldr ip, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET]
+ add ip, ip, #-1 // Remove the GenericJNI tag. ADD/SUB writing directly to SP is UNPREDICTABLE.
+ mov sp, ip
.cfi_def_cfa_register sp
# This will create a new save-all frame, required by the runtime.
DELIVER_PENDING_EXCEPTION
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 280e593..47efeb9 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -2299,7 +2299,7 @@
.Lexception_in_native:
// Move to x1 then sp to please assembler.
ldr x1, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET]
- mov sp, x1
+ add sp, x1, #-1 // Remove the GenericJNI tag.
.cfi_def_cfa_register sp
# This will create a new save-all frame, required by the runtime.
DELIVER_PENDING_EXCEPTION
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 489c52c..fc77a64 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -2283,7 +2283,8 @@
nop
2:
- lw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF)
+ lw $t0, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF)
+ addiu $sp, $t0, -1 // Remove the GenericJNI tag.
move $gp, $s3 # restore $gp from $s3
# This will create a new save-all frame, required by the runtime.
DELIVER_PENDING_EXCEPTION
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index 98ffe65..3fb83d9 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -2158,7 +2158,8 @@
dmtc1 $v0, $f0 # place return value to FP return value
1:
- ld $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF)
+ ld $t0, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF)
+ daddiu $sp, $t0, -1 // Remove the GenericJNI tag.
# This will create a new save-all frame, required by the runtime.
DELIVER_PENDING_EXCEPTION
END art_quick_generic_jni_trampoline
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 25716dc..a46ceeb 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -1969,7 +1969,9 @@
punpckldq %xmm1, %xmm0
ret
.Lexception_in_native:
- movl %fs:THREAD_TOP_QUICK_FRAME_OFFSET, %esp
+ pushl %fs:THREAD_TOP_QUICK_FRAME_OFFSET
+ addl LITERAL(-1), (%esp) // Remove the GenericJNI tag.
+ movl (%esp), %esp
// Do a call to push a new save-all frame required by the runtime.
call .Lexception_call
.Lexception_call:
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 2c3da90..463e5a2 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -1958,7 +1958,9 @@
movq %rax, %xmm0
ret
.Lexception_in_native:
- movq %gs:THREAD_TOP_QUICK_FRAME_OFFSET, %rsp
+ pushq %gs:THREAD_TOP_QUICK_FRAME_OFFSET
+ addq LITERAL(-1), (%rsp) // Remove the GenericJNI tag.
+ movq (%rsp), %rsp
CFI_DEF_CFA_REGISTER(rsp)
// Do a call to push a new save-all frame required by the runtime.
call .Lexception_call
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index fa0c501..bdbc450 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -587,11 +587,6 @@
CHECK(existing_entry_point != nullptr) << PrettyMethod() << "@" << this;
ClassLinker* class_linker = runtime->GetClassLinker();
- if (class_linker->IsQuickGenericJniStub(existing_entry_point)) {
- // The generic JNI does not have any method header.
- return nullptr;
- }
-
if (existing_entry_point == GetQuickProxyInvokeHandler()) {
DCHECK(IsProxyMethod() && !IsConstructor());
// The proxy entry point does not have any method header.
@@ -599,7 +594,8 @@
}
// Check whether the current entry point contains this pc.
- if (!class_linker->IsQuickResolutionStub(existing_entry_point) &&
+ if (!class_linker->IsQuickGenericJniStub(existing_entry_point) &&
+ !class_linker->IsQuickResolutionStub(existing_entry_point) &&
!class_linker->IsQuickToInterpreterBridge(existing_entry_point)) {
OatQuickMethodHeader* method_header =
OatQuickMethodHeader::FromEntryPoint(existing_entry_point);
@@ -632,19 +628,13 @@
OatFile::OatMethod oat_method =
FindOatMethodFor(this, class_linker->GetImagePointerSize(), &found);
if (!found) {
- if (class_linker->IsQuickResolutionStub(existing_entry_point)) {
- // We are running the generic jni stub, but the entry point of the method has not
- // been updated yet.
- DCHECK_EQ(pc, 0u) << "Should be a downcall";
- DCHECK(IsNative());
- return nullptr;
- }
- if (existing_entry_point == GetQuickInstrumentationEntryPoint()) {
- // We are running the generic jni stub, but the method is being instrumented.
- // NB We would normally expect the pc to be zero but we can have non-zero pc's if
- // instrumentation is installed or removed during the call which is using the generic jni
- // trampoline.
- DCHECK(IsNative());
+ if (IsNative()) {
+ // We are running the GenericJNI stub. The entrypoint may point
+ // to different entrypoints or to a JIT-compiled JNI stub.
+ DCHECK(class_linker->IsQuickGenericJniStub(existing_entry_point) ||
+ class_linker->IsQuickResolutionStub(existing_entry_point) ||
+ existing_entry_point == GetQuickInstrumentationEntryPoint() ||
+ (jit != nullptr && jit->GetCodeCache()->ContainsPc(existing_entry_point)));
return nullptr;
}
// Only for unit tests.
diff --git a/runtime/art_method.h b/runtime/art_method.h
index dca6f37..6c3bb10 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -460,12 +460,11 @@
}
ProfilingInfo* GetProfilingInfo(PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_) {
- // Don't do a read barrier in the DCHECK, as GetProfilingInfo is called in places
- // where the declaring class is treated as a weak reference (accessing it with
- // a read barrier would either prevent unloading the class, or crash the runtime if
- // the GC wants to unload it).
- DCHECK(!IsNative<kWithoutReadBarrier>());
- if (UNLIKELY(IsProxyMethod())) {
+ // Don't do a read barrier in the DCHECK() inside GetAccessFlags() called by IsNative(),
+ // as GetProfilingInfo is called in places where the declaring class is treated as a weak
+ // reference (accessing it with a read barrier would either prevent unloading the class,
+ // or crash the runtime if the GC wants to unload it).
+ if (UNLIKELY(IsNative<kWithoutReadBarrier>()) || UNLIKELY(IsProxyMethod())) {
return nullptr;
}
return reinterpret_cast<ProfilingInfo*>(GetDataPtrSize(pointer_size));
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 2bf4372..f3450da 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -245,7 +245,7 @@
CallerAndOuterMethod GetCalleeSaveMethodCallerAndOuterMethod(Thread* self, CalleeSaveType type) {
CallerAndOuterMethod result;
ScopedAssertNoThreadSuspension ants(__FUNCTION__);
- ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame();
+ ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrameKnownNotTagged();
auto outer_caller_and_pc = DoGetCalleeSaveMethodOuterCallerAndPc(sp, type);
result.outer_method = outer_caller_and_pc.first;
uintptr_t caller_pc = outer_caller_and_pc.second;
@@ -256,7 +256,7 @@
ArtMethod* GetCalleeSaveOuterMethod(Thread* self, CalleeSaveType type) {
ScopedAssertNoThreadSuspension ants(__FUNCTION__);
- ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame();
+ ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrameKnownNotTagged();
return DoGetCalleeSaveMethodOuterCallerAndPc(sp, type).first;
}
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 2496aa0..0a76cdd 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -31,6 +31,7 @@
#include "index_bss_mapping.h"
#include "instrumentation.h"
#include "interpreter/interpreter.h"
+#include "jit/jit.h"
#include "linear_alloc.h"
#include "method_handles.h"
#include "method_reference.h"
@@ -2167,6 +2168,11 @@
// Note: We cannot walk the stack properly until fixed up below.
ArtMethod* called = *sp;
DCHECK(called->IsNative()) << called->PrettyMethod(true);
+ Runtime* runtime = Runtime::Current();
+ jit::Jit* jit = runtime->GetJit();
+ if (jit != nullptr) {
+ jit->AddSamples(self, called, 1u, /*with_backedges*/ false);
+ }
uint32_t shorty_len = 0;
const char* shorty = called->GetShorty(&shorty_len);
bool critical_native = called->IsCriticalNative();
@@ -2188,7 +2194,7 @@
}
// Fix up managed-stack things in Thread. After this we can walk the stack.
- self->SetTopOfStack(sp);
+ self->SetTopOfStackTagged(sp);
self->VerifyStack();
@@ -2308,6 +2314,7 @@
// anything that requires a mutator lock before that would cause problems as GC may have the
// exclusive mutator lock and may be moving objects, etc.
ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame();
+ DCHECK(self->GetManagedStack()->GetTopQuickFrameTag());
uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp);
ArtMethod* called = *sp;
uint32_t cookie = *(sp32 - 1);
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 953e195..0d95bc6 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -643,7 +643,7 @@
return;
}
- if (method->IsClassInitializer() || method->IsNative() || !method->IsCompilable()) {
+ if (method->IsClassInitializer() || !method->IsCompilable()) {
// We do not want to compile such methods.
return;
}
@@ -659,7 +659,8 @@
count *= priority_thread_weight_;
}
int32_t new_count = starting_count + count; // int32 here to avoid wrap-around;
- if (starting_count < warm_method_threshold_) {
+ // Note: Native method have no "warm" state or profiling info.
+ if (LIKELY(!method->IsNative()) && starting_count < warm_method_threshold_) {
if ((new_count >= warm_method_threshold_) &&
(method->GetProfilingInfo(kRuntimePointerSize) == nullptr)) {
bool success = ProfilingInfo::Create(self, method, /* retry_allocation */ false);
@@ -696,6 +697,7 @@
// If the samples don't contain any back edge, we don't increment the hotness.
return;
}
+ DCHECK(!method->IsNative()); // No back edges reported for native methods.
if ((new_count >= osr_method_threshold_) && !code_cache_->IsOsrCompiled(method)) {
DCHECK(thread_pool_ != nullptr);
thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kCompileOsr));
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 3220513..a5c167e 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -55,6 +55,107 @@
static constexpr size_t kCodeSizeLogThreshold = 50 * KB;
static constexpr size_t kStackMapSizeLogThreshold = 50 * KB;
+class JitCodeCache::JniStubKey {
+ public:
+ explicit JniStubKey(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_)
+ : shorty_(method->GetShorty()),
+ is_static_(method->IsStatic()),
+ is_fast_native_(method->IsFastNative()),
+ is_critical_native_(method->IsCriticalNative()),
+ is_synchronized_(method->IsSynchronized()) {
+ DCHECK(!(is_fast_native_ && is_critical_native_));
+ }
+
+ bool operator<(const JniStubKey& rhs) const {
+ if (is_static_ != rhs.is_static_) {
+ return rhs.is_static_;
+ }
+ if (is_synchronized_ != rhs.is_synchronized_) {
+ return rhs.is_synchronized_;
+ }
+ if (is_fast_native_ != rhs.is_fast_native_) {
+ return rhs.is_fast_native_;
+ }
+ if (is_critical_native_ != rhs.is_critical_native_) {
+ return rhs.is_critical_native_;
+ }
+ return strcmp(shorty_, rhs.shorty_) < 0;
+ }
+
+ // Update the shorty to point to another method's shorty. Call this function when removing
+ // the method that references the old shorty from JniCodeData and not removing the entire
+ // JniCodeData; the old shorty may become a dangling pointer when that method is unloaded.
+ void UpdateShorty(ArtMethod* method) const REQUIRES_SHARED(Locks::mutator_lock_) {
+ const char* shorty = method->GetShorty();
+ DCHECK_STREQ(shorty_, shorty);
+ shorty_ = shorty;
+ }
+
+ private:
+ // The shorty points to a DexFile data and may need to change
+ // to point to the same shorty in a different DexFile.
+ mutable const char* shorty_;
+
+ const bool is_static_;
+ const bool is_fast_native_;
+ const bool is_critical_native_;
+ const bool is_synchronized_;
+};
+
+class JitCodeCache::JniStubData {
+ public:
+ JniStubData() : code_(nullptr), methods_() {}
+
+ void SetCode(const void* code) {
+ DCHECK(code != nullptr);
+ code_ = code;
+ }
+
+ const void* GetCode() const {
+ return code_;
+ }
+
+ bool IsCompiled() const {
+ return GetCode() != nullptr;
+ }
+
+ void AddMethod(ArtMethod* method) {
+ if (!ContainsElement(methods_, method)) {
+ methods_.push_back(method);
+ }
+ }
+
+ const std::vector<ArtMethod*>& GetMethods() const {
+ return methods_;
+ }
+
+ void RemoveMethodsIn(const LinearAlloc& alloc) {
+ auto kept_end = std::remove_if(
+ methods_.begin(),
+ methods_.end(),
+ [&alloc](ArtMethod* method) { return alloc.ContainsUnsafe(method); });
+ methods_.erase(kept_end, methods_.end());
+ }
+
+ bool RemoveMethod(ArtMethod* method) {
+ auto it = std::find(methods_.begin(), methods_.end(), method);
+ if (it != methods_.end()) {
+ methods_.erase(it);
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ void MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method) {
+ std::replace(methods_.begin(), methods_.end(), old_method, new_method);
+ }
+
+ private:
+ const void* code_;
+ std::vector<ArtMethod*> methods_;
+};
+
JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
size_t max_capacity,
bool generate_debug_info,
@@ -193,14 +294,36 @@
bool JitCodeCache::ContainsMethod(ArtMethod* method) {
MutexLock mu(Thread::Current(), lock_);
- for (auto& it : method_code_map_) {
- if (it.second == method) {
+ if (UNLIKELY(method->IsNative())) {
+ auto it = jni_stubs_map_.find(JniStubKey(method));
+ if (it != jni_stubs_map_.end() &&
+ it->second.IsCompiled() &&
+ ContainsElement(it->second.GetMethods(), method)) {
return true;
}
+ } else {
+ for (const auto& it : method_code_map_) {
+ if (it.second == method) {
+ return true;
+ }
+ }
}
return false;
}
+const void* JitCodeCache::GetJniStubCode(ArtMethod* method) {
+ DCHECK(method->IsNative());
+ MutexLock mu(Thread::Current(), lock_);
+ auto it = jni_stubs_map_.find(JniStubKey(method));
+ if (it != jni_stubs_map_.end()) {
+ JniStubData& data = it->second;
+ if (data.IsCompiled() && ContainsElement(data.GetMethods(), method)) {
+ return data.GetCode();
+ }
+ }
+ return nullptr;
+}
+
class ScopedCodeCacheWrite : ScopedTrace {
public:
explicit ScopedCodeCacheWrite(MemMap* code_map, bool only_for_tlb_shootdown = false)
@@ -426,7 +549,9 @@
// Notify native debugger that we are about to remove the code.
// It does nothing if we are not using native debugger.
DeleteJITCodeEntryForAddress(reinterpret_cast<uintptr_t>(code_ptr));
- FreeData(GetRootTable(code_ptr));
+ if (OatQuickMethodHeader::FromCodePointer(code_ptr)->IsOptimized()) {
+ FreeData(GetRootTable(code_ptr));
+ } // else this is a JNI stub without any data.
FreeCode(reinterpret_cast<uint8_t*>(allocation));
}
@@ -463,6 +588,16 @@
// lead to a deadlock.
{
ScopedCodeCacheWrite scc(code_map_.get());
+ for (auto it = jni_stubs_map_.begin(); it != jni_stubs_map_.end();) {
+ it->second.RemoveMethodsIn(alloc);
+ if (it->second.GetMethods().empty()) {
+ method_headers.insert(OatQuickMethodHeader::FromCodePointer(it->second.GetCode()));
+ it = jni_stubs_map_.erase(it);
+ } else {
+ it->first.UpdateShorty(it->second.GetMethods().front());
+ ++it;
+ }
+ }
for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
if (alloc.ContainsUnsafe(it->second)) {
method_headers.insert(OatQuickMethodHeader::FromCodePointer(it->first));
@@ -572,7 +707,8 @@
bool has_should_deoptimize_flag,
const ArenaSet<ArtMethod*>&
cha_single_implementation_list) {
- DCHECK(stack_map != nullptr);
+ DCHECK_NE(stack_map != nullptr, method->IsNative());
+ DCHECK(!method->IsNative() || !osr);
size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
// Ensure the header ends up at expected instruction alignment.
size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
@@ -596,8 +732,8 @@
std::copy(code, code + code_size, code_ptr);
method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
new (method_header) OatQuickMethodHeader(
- code_ptr - stack_map,
- code_ptr - method_info,
+ (stack_map != nullptr) ? code_ptr - stack_map : 0u,
+ (method_info != nullptr) ? code_ptr - method_info : 0u,
frame_size_in_bytes,
core_spill_mask,
fp_spill_mask,
@@ -652,24 +788,40 @@
// possible that the compiled code is considered invalidated by some class linking,
// but below we still make the compiled code valid for the method.
MutexLock mu(self, lock_);
- // Fill the root table before updating the entry point.
- DCHECK_EQ(FromStackMapToRoots(stack_map), roots_data);
- DCHECK_LE(roots_data, stack_map);
- FillRootTable(roots_data, roots);
- {
- // Flush data cache, as compiled code references literals in it.
- // We also need a TLB shootdown to act as memory barrier across cores.
- ScopedCodeCacheWrite ccw(code_map_.get(), /* only_for_tlb_shootdown */ true);
- FlushDataCache(reinterpret_cast<char*>(roots_data),
- reinterpret_cast<char*>(roots_data + data_size));
- }
- method_code_map_.Put(code_ptr, method);
- if (osr) {
- number_of_osr_compilations_++;
- osr_code_map_.Put(method, code_ptr);
+ if (UNLIKELY(method->IsNative())) {
+ DCHECK(stack_map == nullptr);
+ DCHECK(roots_data == nullptr);
+ auto it = jni_stubs_map_.find(JniStubKey(method));
+ DCHECK(it != jni_stubs_map_.end())
+ << "Entry inserted in NotifyCompilationOf() should be alive.";
+ JniStubData* data = &it->second;
+ DCHECK(ContainsElement(data->GetMethods(), method))
+ << "Entry inserted in NotifyCompilationOf() should contain this method.";
+ data->SetCode(code_ptr);
+ instrumentation::Instrumentation* instrum = Runtime::Current()->GetInstrumentation();
+ for (ArtMethod* m : data->GetMethods()) {
+ instrum->UpdateMethodsCode(m, method_header->GetEntryPoint());
+ }
} else {
- Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
- method, method_header->GetEntryPoint());
+ // Fill the root table before updating the entry point.
+ DCHECK_EQ(FromStackMapToRoots(stack_map), roots_data);
+ DCHECK_LE(roots_data, stack_map);
+ FillRootTable(roots_data, roots);
+ {
+ // Flush data cache, as compiled code references literals in it.
+ // We also need a TLB shootdown to act as memory barrier across cores.
+ ScopedCodeCacheWrite ccw(code_map_.get(), /* only_for_tlb_shootdown */ true);
+ FlushDataCache(reinterpret_cast<char*>(roots_data),
+ reinterpret_cast<char*>(roots_data + data_size));
+ }
+ method_code_map_.Put(code_ptr, method);
+ if (osr) {
+ number_of_osr_compilations_++;
+ osr_code_map_.Put(method, code_ptr);
+ } else {
+ Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
+ method, method_header->GetEntryPoint());
+ }
}
if (collection_in_progress_) {
// We need to update the live bitmap if there is a GC to ensure it sees this new
@@ -703,45 +855,18 @@
}
bool JitCodeCache::RemoveMethod(ArtMethod* method, bool release_memory) {
+ // This function is used only for testing and only with non-native methods.
+ CHECK(!method->IsNative());
+
MutexLock mu(Thread::Current(), lock_);
- if (method->IsNative()) {
- return false;
- }
- bool in_cache = false;
- {
- ScopedCodeCacheWrite ccw(code_map_.get());
- for (auto code_iter = method_code_map_.begin(); code_iter != method_code_map_.end();) {
- if (code_iter->second == method) {
- if (release_memory) {
- FreeCode(code_iter->first);
- }
- code_iter = method_code_map_.erase(code_iter);
- in_cache = true;
- continue;
- }
- ++code_iter;
- }
- }
-
- bool osr = false;
- auto code_map = osr_code_map_.find(method);
- if (code_map != osr_code_map_.end()) {
- osr_code_map_.erase(code_map);
- osr = true;
- }
+ bool osr = osr_code_map_.find(method) != osr_code_map_.end();
+ bool in_cache = RemoveMethodLocked(method, release_memory);
if (!in_cache) {
return false;
}
- ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
- if (info != nullptr) {
- auto profile = std::find(profiling_infos_.begin(), profiling_infos_.end(), info);
- DCHECK(profile != profiling_infos_.end());
- profiling_infos_.erase(profile);
- }
- method->SetProfilingInfo(nullptr);
method->ClearCounter();
Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
method, GetQuickToInterpreterBridge());
@@ -753,34 +878,58 @@
return true;
}
+bool JitCodeCache::RemoveMethodLocked(ArtMethod* method, bool release_memory) {
+ if (LIKELY(!method->IsNative())) {
+ ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
+ if (info != nullptr) {
+ RemoveElement(profiling_infos_, info);
+ }
+ method->SetProfilingInfo(nullptr);
+ }
+
+ bool in_cache = false;
+ ScopedCodeCacheWrite ccw(code_map_.get());
+ if (UNLIKELY(method->IsNative())) {
+ auto it = jni_stubs_map_.find(JniStubKey(method));
+ if (it != jni_stubs_map_.end() && it->second.RemoveMethod(method)) {
+ in_cache = true;
+ if (it->second.GetMethods().empty()) {
+ if (release_memory) {
+ FreeCode(it->second.GetCode());
+ }
+ jni_stubs_map_.erase(it);
+ } else {
+ it->first.UpdateShorty(it->second.GetMethods().front());
+ }
+ }
+ } else {
+ for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
+ if (it->second == method) {
+ in_cache = true;
+ if (release_memory) {
+ FreeCode(it->first);
+ }
+ it = method_code_map_.erase(it);
+ } else {
+ ++it;
+ }
+ }
+
+ auto osr_it = osr_code_map_.find(method);
+ if (osr_it != osr_code_map_.end()) {
+ osr_code_map_.erase(osr_it);
+ }
+ }
+
+ return in_cache;
+}
+
// This notifies the code cache that the given method has been redefined and that it should remove
// any cached information it has on the method. All threads must be suspended before calling this
// method. The compiled code for the method (if there is any) must not be in any threads call stack.
void JitCodeCache::NotifyMethodRedefined(ArtMethod* method) {
MutexLock mu(Thread::Current(), lock_);
- if (method->IsNative()) {
- return;
- }
- ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
- if (info != nullptr) {
- auto profile = std::find(profiling_infos_.begin(), profiling_infos_.end(), info);
- DCHECK(profile != profiling_infos_.end());
- profiling_infos_.erase(profile);
- }
- method->SetProfilingInfo(nullptr);
- ScopedCodeCacheWrite ccw(code_map_.get());
- for (auto code_iter = method_code_map_.begin(); code_iter != method_code_map_.end();) {
- if (code_iter->second == method) {
- FreeCode(code_iter->first);
- code_iter = method_code_map_.erase(code_iter);
- continue;
- }
- ++code_iter;
- }
- auto code_map = osr_code_map_.find(method);
- if (code_map != osr_code_map_.end()) {
- osr_code_map_.erase(code_map);
- }
+ RemoveMethodLocked(method, /* release_memory */ true);
}
// This invalidates old_method. Once this function returns one can no longer use old_method to
@@ -790,11 +939,15 @@
// shouldn't be used since it is no longer logically in the jit code cache.
// TODO We should add DCHECKS that validate that the JIT is paused when this method is entered.
void JitCodeCache::MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method) {
- // Native methods have no profiling info and need no special handling from the JIT code cache.
+ MutexLock mu(Thread::Current(), lock_);
if (old_method->IsNative()) {
+ // Update methods in jni_stubs_map_.
+ for (auto& entry : jni_stubs_map_) {
+ JniStubData& data = entry.second;
+ data.MoveObsoleteMethod(old_method, new_method);
+ }
return;
}
- MutexLock mu(Thread::Current(), lock_);
// Update ProfilingInfo to the new one and remove it from the old_method.
if (old_method->GetProfilingInfo(kRuntimePointerSize) != nullptr) {
DCHECK_EQ(old_method->GetProfilingInfo(kRuntimePointerSize)->GetMethod(), old_method);
@@ -936,7 +1089,7 @@
// its stack frame, it is not the method owning return_pc_. We just pass null to
// LookupMethodHeader: the method is only checked against in debug builds.
OatQuickMethodHeader* method_header =
- code_cache_->LookupMethodHeader(frame.return_pc_, nullptr);
+ code_cache_->LookupMethodHeader(frame.return_pc_, /* method */ nullptr);
if (method_header != nullptr) {
const void* code = method_header->GetCode();
CHECK(code_cache_->GetLiveBitmap()->Test(FromCodeToAllocation(code)));
@@ -1089,7 +1242,7 @@
const void* entry_point = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
if (ContainsPc(entry_point)) {
info->SetSavedEntryPoint(entry_point);
- // Don't call Instrumentation::UpdateMethods, as it can check the declaring
+ // Don't call Instrumentation::UpdateMethodsCode(), as it can check the declaring
// class of the method. We may be concurrently running a GC which makes accessing
// the class unsafe. We know it is OK to bypass the instrumentation as we've just
// checked that the current entry point is JIT compiled code.
@@ -1098,6 +1251,25 @@
}
DCHECK(CheckLiveCompiledCodeHasProfilingInfo());
+
+ // Change entry points of native methods back to the GenericJNI entrypoint.
+ for (const auto& entry : jni_stubs_map_) {
+ const JniStubData& data = entry.second;
+ if (!data.IsCompiled()) {
+ continue;
+ }
+ // Make sure a single invocation of the GenericJNI trampoline tries to recompile.
+ uint16_t new_counter = Runtime::Current()->GetJit()->HotMethodThreshold() - 1u;
+ const OatQuickMethodHeader* method_header =
+ OatQuickMethodHeader::FromCodePointer(data.GetCode());
+ for (ArtMethod* method : data.GetMethods()) {
+ if (method->GetEntryPointFromQuickCompiledCode() == method_header->GetEntryPoint()) {
+ // Don't call Instrumentation::UpdateMethodsCode(), same as for normal methods above.
+ method->SetCounter(new_counter);
+ method->SetEntryPointFromQuickCompiledCode(GetQuickGenericJniStub());
+ }
+ }
+ }
}
live_bitmap_.reset(nullptr);
NotifyCollectionDone(self);
@@ -1113,13 +1285,22 @@
MutexLock mu(self, lock_);
ScopedCodeCacheWrite scc(code_map_.get());
// Iterate over all compiled code and remove entries that are not marked.
+ for (auto it = jni_stubs_map_.begin(); it != jni_stubs_map_.end();) {
+ JniStubData* data = &it->second;
+ if (!data->IsCompiled() || GetLiveBitmap()->Test(FromCodeToAllocation(data->GetCode()))) {
+ ++it;
+ } else {
+ method_headers.insert(OatQuickMethodHeader::FromCodePointer(data->GetCode()));
+ it = jni_stubs_map_.erase(it);
+ }
+ }
for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
const void* code_ptr = it->first;
uintptr_t allocation = FromCodeToAllocation(code_ptr);
if (GetLiveBitmap()->Test(allocation)) {
++it;
} else {
- method_headers.insert(OatQuickMethodHeader::FromCodePointer(it->first));
+ method_headers.insert(OatQuickMethodHeader::FromCodePointer(code_ptr));
it = method_code_map_.erase(it);
}
}
@@ -1158,6 +1339,17 @@
// an entry point is either:
// - an osr compiled code, that will be removed if not in a thread call stack.
// - discarded compiled code, that will be removed if not in a thread call stack.
+ for (const auto& entry : jni_stubs_map_) {
+ const JniStubData& data = entry.second;
+ const void* code_ptr = data.GetCode();
+ const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+ for (ArtMethod* method : data.GetMethods()) {
+ if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) {
+ GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr));
+ break;
+ }
+ }
+ }
for (const auto& it : method_code_map_) {
ArtMethod* method = it.second;
const void* code_ptr = it.first;
@@ -1237,19 +1429,51 @@
return nullptr;
}
- MutexLock mu(Thread::Current(), lock_);
- if (method_code_map_.empty()) {
- return nullptr;
+ if (!kIsDebugBuild) {
+ // Called with null `method` only from MarkCodeClosure::Run() in debug build.
+ CHECK(method != nullptr);
}
- auto it = method_code_map_.lower_bound(reinterpret_cast<const void*>(pc));
- --it;
- const void* code_ptr = it->first;
- OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
- if (!method_header->Contains(pc)) {
- return nullptr;
+ MutexLock mu(Thread::Current(), lock_);
+ OatQuickMethodHeader* method_header = nullptr;
+ ArtMethod* found_method = nullptr; // Only for DCHECK(), not for JNI stubs.
+ if (method != nullptr && UNLIKELY(method->IsNative())) {
+ auto it = jni_stubs_map_.find(JniStubKey(method));
+ if (it == jni_stubs_map_.end() || !ContainsElement(it->second.GetMethods(), method)) {
+ return nullptr;
+ }
+ const void* code_ptr = it->second.GetCode();
+ method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+ if (!method_header->Contains(pc)) {
+ return nullptr;
+ }
+ } else {
+ auto it = method_code_map_.lower_bound(reinterpret_cast<const void*>(pc));
+ if (it != method_code_map_.begin()) {
+ --it;
+ const void* code_ptr = it->first;
+ if (OatQuickMethodHeader::FromCodePointer(code_ptr)->Contains(pc)) {
+ method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+ found_method = it->second;
+ }
+ }
+ if (method_header == nullptr && method == nullptr) {
+ // Scan all compiled JNI stubs as well. This slow search is used only
+ // for checks in debug build, for release builds the `method` is not null.
+ for (auto&& entry : jni_stubs_map_) {
+ const JniStubData& data = entry.second;
+ if (data.IsCompiled() &&
+ OatQuickMethodHeader::FromCodePointer(data.GetCode())->Contains(pc)) {
+ method_header = OatQuickMethodHeader::FromCodePointer(data.GetCode());
+ }
+ }
+ }
+ if (method_header == nullptr) {
+ return nullptr;
+ }
}
- if (kIsDebugBuild && method != nullptr) {
+
+ if (kIsDebugBuild && method != nullptr && !method->IsNative()) {
// When we are walking the stack to redefine classes and creating obsolete methods it is
// possible that we might have updated the method_code_map by making this method obsolete in a
// previous frame. Therefore we should just check that the non-obsolete version of this method
@@ -1258,9 +1482,9 @@
// occur when we are in the process of allocating and setting up obsolete methods. Otherwise
// method and it->second should be identical. (See openjdkjvmti/ti_redefine.cc for more
// information.)
- DCHECK_EQ(it->second->GetNonObsoleteMethod(), method->GetNonObsoleteMethod())
+ DCHECK_EQ(found_method->GetNonObsoleteMethod(), method->GetNonObsoleteMethod())
<< ArtMethod::PrettyMethod(method->GetNonObsoleteMethod()) << " "
- << ArtMethod::PrettyMethod(it->second->GetNonObsoleteMethod()) << " "
+ << ArtMethod::PrettyMethod(found_method->GetNonObsoleteMethod()) << " "
<< std::hex << pc;
}
return method_header;
@@ -1449,21 +1673,51 @@
return false;
}
- ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
- if (info == nullptr) {
- VLOG(jit) << method->PrettyMethod() << " needs a ProfilingInfo to be compiled";
- // Because the counter is not atomic, there are some rare cases where we may not hit the
- // threshold for creating the ProfilingInfo. Reset the counter now to "correct" this.
- ClearMethodCounter(method, /*was_warm*/ false);
- return false;
- }
+ if (UNLIKELY(method->IsNative())) {
+ JniStubKey key(method);
+ auto it = jni_stubs_map_.find(key);
+ bool new_compilation = false;
+ if (it == jni_stubs_map_.end()) {
+ // Create a new entry to mark the stub as being compiled.
+ it = jni_stubs_map_.Put(key, JniStubData{});
+ new_compilation = true;
+ }
+ JniStubData* data = &it->second;
+ data->AddMethod(method);
+ if (data->IsCompiled()) {
+ OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(data->GetCode());
+ const void* entrypoint = method_header->GetEntryPoint();
+ // Update also entrypoints of other methods held by the JniStubData.
+ // We could simply update the entrypoint of `method` but if the last JIT GC has
+ // changed these entrypoints to GenericJNI in preparation for a full GC, we may
+ // as well change them back as this stub shall not be collected anyway and this
+ // can avoid a few expensive GenericJNI calls.
+ instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
+ for (ArtMethod* m : data->GetMethods()) {
+ instrumentation->UpdateMethodsCode(m, entrypoint);
+ }
+ if (collection_in_progress_) {
+ GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(data->GetCode()));
+ }
+ }
+ return new_compilation;
+ } else {
+ ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
+ if (info == nullptr) {
+ VLOG(jit) << method->PrettyMethod() << " needs a ProfilingInfo to be compiled";
+ // Because the counter is not atomic, there are some rare cases where we may not hit the
+ // threshold for creating the ProfilingInfo. Reset the counter now to "correct" this.
+ ClearMethodCounter(method, /*was_warm*/ false);
+ return false;
+ }
- if (info->IsMethodBeingCompiled(osr)) {
- return false;
- }
+ if (info->IsMethodBeingCompiled(osr)) {
+ return false;
+ }
- info->SetIsMethodBeingCompiled(true, osr);
- return true;
+ info->SetIsMethodBeingCompiled(true, osr);
+ return true;
+ }
}
ProfilingInfo* JitCodeCache::NotifyCompilerUse(ArtMethod* method, Thread* self) {
@@ -1485,10 +1739,23 @@
info->DecrementInlineUse();
}
-void JitCodeCache::DoneCompiling(ArtMethod* method, Thread* self ATTRIBUTE_UNUSED, bool osr) {
- ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
- DCHECK(info->IsMethodBeingCompiled(osr));
- info->SetIsMethodBeingCompiled(false, osr);
+void JitCodeCache::DoneCompiling(ArtMethod* method, Thread* self, bool osr) {
+ DCHECK_EQ(Thread::Current(), self);
+ MutexLock mu(self, lock_);
+ if (UNLIKELY(method->IsNative())) {
+ auto it = jni_stubs_map_.find(JniStubKey(method));
+ DCHECK(it != jni_stubs_map_.end());
+ JniStubData* data = &it->second;
+ DCHECK(ContainsElement(data->GetMethods(), method));
+ if (UNLIKELY(!data->IsCompiled())) {
+ // Failed to compile; the JNI compiler never fails, but the cache may be full.
+ jni_stubs_map_.erase(it); // Remove the entry added in NotifyCompilationOf().
+ } // else CommitCodeInternal() updated entrypoints of all methods in the JniStubData.
+ } else {
+ ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
+ DCHECK(info->IsMethodBeingCompiled(osr));
+ info->SetIsMethodBeingCompiled(false, osr);
+ }
}
size_t JitCodeCache::GetMemorySizeOfCodePointer(const void* ptr) {
@@ -1498,6 +1765,7 @@
void JitCodeCache::InvalidateCompiledCodeFor(ArtMethod* method,
const OatQuickMethodHeader* header) {
+ DCHECK(!method->IsNative());
ProfilingInfo* profiling_info = method->GetProfilingInfo(kRuntimePointerSize);
if ((profiling_info != nullptr) &&
(profiling_info->GetSavedEntryPoint() == header->GetEntryPoint())) {
@@ -1553,6 +1821,7 @@
os << "Current JIT code cache size: " << PrettySize(used_memory_for_code_) << "\n"
<< "Current JIT data cache size: " << PrettySize(used_memory_for_data_) << "\n"
<< "Current JIT capacity: " << PrettySize(current_capacity_) << "\n"
+ << "Current number of JIT JNI stub entries: " << jni_stubs_map_.size() << "\n"
<< "Current number of JIT code cache entries: " << method_code_map_.size() << "\n"
<< "Total number of JIT compilations: " << number_of_compilations_ << "\n"
<< "Total number of JIT compilations for on stack replacement: "
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 46a4085..fc011dd 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -35,9 +35,23 @@
class LinearAlloc;
class InlineCache;
class IsMarkedVisitor;
+class JitJniStubTestHelper;
class OatQuickMethodHeader;
struct ProfileMethodInfo;
class ProfilingInfo;
+class Thread;
+
+namespace gc {
+namespace accounting {
+template<size_t kAlignment> class MemoryRangeBitmap;
+} // namespace accounting
+} // namespace gc
+
+namespace mirror {
+class Class;
+class Object;
+template<class T> class ObjectArray;
+} // namespace mirror
namespace gc {
namespace accounting {
@@ -137,6 +151,9 @@
// Return true if the code cache contains this method.
bool ContainsMethod(ArtMethod* method) REQUIRES(!lock_);
+ // Return the code pointer for a JNI-compiled stub if the method is in the cache, null otherwise.
+ const void* GetJniStubCode(ArtMethod* method) REQUIRES(!lock_);
+
// Allocate a region of data that contain `size` bytes, and potentially space
// for storing `number_of_roots` roots. Returns null if there is no more room.
// Return the number of bytes allocated.
@@ -160,11 +177,6 @@
return live_bitmap_.get();
}
- // Return whether we should do a full collection given the current state of the cache.
- bool ShouldDoFullCollection()
- REQUIRES(lock_)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
// Perform a collection on the code cache.
void GarbageCollectCache(Thread* self)
REQUIRES(!lock_)
@@ -296,6 +308,12 @@
REQUIRES(!lock_)
REQUIRES(!Locks::cha_lock_);
+ // Removes method from the cache. The caller must ensure that all threads
+ // are suspended and the method should not be in any thread's stack.
+ bool RemoveMethodLocked(ArtMethod* method, bool release_memory)
+ REQUIRES(lock_)
+ REQUIRES(Locks::mutator_lock_);
+
// Free in the mspace allocations for `code_ptr`.
void FreeCode(const void* code_ptr) REQUIRES(lock_);
@@ -315,6 +333,11 @@
// Set the footprint limit of the code cache.
void SetFootprintLimit(size_t new_footprint) REQUIRES(lock_);
+ // Return whether we should do a full collection given the current state of the cache.
+ bool ShouldDoFullCollection()
+ REQUIRES(lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
void DoCollection(Thread* self, bool collect_profiling_info)
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -341,6 +364,9 @@
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
+ class JniStubKey;
+ class JniStubData;
+
// Lock for guarding allocations, collections, and the method_code_map_.
Mutex lock_;
// Condition to wait on during collection.
@@ -357,6 +383,8 @@
void* data_mspace_ GUARDED_BY(lock_);
// Bitmap for collecting code and data.
std::unique_ptr<CodeCacheBitmap> live_bitmap_;
+ // Holds compiled code associated with the shorty for a JNI stub.
+ SafeMap<JniStubKey, JniStubData> jni_stubs_map_ GUARDED_BY(lock_);
// Holds compiled code associated to the ArtMethod.
SafeMap<const void*, ArtMethod*> method_code_map_ GUARDED_BY(lock_);
// Holds osr compiled code associated to the ArtMethod.
@@ -418,6 +446,7 @@
// Condition to wait on for accessing inline caches.
ConditionVariable inline_cache_cond_ GUARDED_BY(lock_);
+ friend class art::JitJniStubTestHelper;
DISALLOW_IMPLICIT_CONSTRUCTORS(JitCodeCache);
};
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index 01853de..acbc6e6 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -357,8 +357,8 @@
sampled_methods->AddReference(method.GetDexFile(), method.GetDexMethodIndex());
}
} else {
- CHECK_EQ(method.GetCounter(), 0u) << method.PrettyMethod()
- << " access_flags=" << method.GetAccessFlags();
+ // We do not record native methods. Once we AOT-compile the app, all native
+ // methods shall have their thunks compiled.
}
}
}
diff --git a/runtime/managed_stack-inl.h b/runtime/managed_stack-inl.h
index 689dd80..678be8e 100644
--- a/runtime/managed_stack-inl.h
+++ b/runtime/managed_stack-inl.h
@@ -24,7 +24,7 @@
namespace art {
inline ShadowFrame* ManagedStack::PushShadowFrame(ShadowFrame* new_top_frame) {
- DCHECK(top_quick_frame_ == nullptr);
+ DCHECK(!HasTopQuickFrame());
ShadowFrame* old_frame = top_shadow_frame_;
top_shadow_frame_ = new_top_frame;
new_top_frame->SetLink(old_frame);
@@ -32,7 +32,7 @@
}
inline ShadowFrame* ManagedStack::PopShadowFrame() {
- DCHECK(top_quick_frame_ == nullptr);
+ DCHECK(!HasTopQuickFrame());
CHECK(top_shadow_frame_ != nullptr);
ShadowFrame* frame = top_shadow_frame_;
top_shadow_frame_ = frame->GetLink();
diff --git a/runtime/managed_stack.h b/runtime/managed_stack.h
index 4f1984d..07078ec 100644
--- a/runtime/managed_stack.h
+++ b/runtime/managed_stack.h
@@ -24,6 +24,7 @@
#include "base/logging.h"
#include "base/macros.h"
#include "base/mutex.h"
+#include "base/bit_utils.h"
namespace art {
@@ -42,7 +43,9 @@
class PACKED(4) ManagedStack {
public:
ManagedStack()
- : top_quick_frame_(nullptr), link_(nullptr), top_shadow_frame_(nullptr) {}
+ : tagged_top_quick_frame_(TaggedTopQuickFrame::CreateNotTagged(nullptr)),
+ link_(nullptr),
+ top_shadow_frame_(nullptr) {}
void PushManagedStackFragment(ManagedStack* fragment) {
// Copy this top fragment into given fragment.
@@ -63,17 +66,36 @@
return link_;
}
+ ArtMethod** GetTopQuickFrameKnownNotTagged() const {
+ return tagged_top_quick_frame_.GetSpKnownNotTagged();
+ }
+
ArtMethod** GetTopQuickFrame() const {
- return top_quick_frame_;
+ return tagged_top_quick_frame_.GetSp();
+ }
+
+ bool GetTopQuickFrameTag() const {
+ return tagged_top_quick_frame_.GetTag();
+ }
+
+ bool HasTopQuickFrame() const {
+ return tagged_top_quick_frame_.GetTaggedSp() != 0u;
}
void SetTopQuickFrame(ArtMethod** top) {
DCHECK(top_shadow_frame_ == nullptr);
- top_quick_frame_ = top;
+ DCHECK_ALIGNED(top, 4u);
+ tagged_top_quick_frame_ = TaggedTopQuickFrame::CreateNotTagged(top);
}
- static size_t TopQuickFrameOffset() {
- return OFFSETOF_MEMBER(ManagedStack, top_quick_frame_);
+ void SetTopQuickFrameTagged(ArtMethod** top) {
+ DCHECK(top_shadow_frame_ == nullptr);
+ DCHECK_ALIGNED(top, 4u);
+ tagged_top_quick_frame_ = TaggedTopQuickFrame::CreateTagged(top);
+ }
+
+ static size_t TaggedTopQuickFrameOffset() {
+ return OFFSETOF_MEMBER(ManagedStack, tagged_top_quick_frame_);
}
ALWAYS_INLINE ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame);
@@ -83,8 +105,12 @@
return top_shadow_frame_;
}
+ bool HasTopShadowFrame() const {
+ return GetTopShadowFrame() != nullptr;
+ }
+
void SetTopShadowFrame(ShadowFrame* top) {
- DCHECK(top_quick_frame_ == nullptr);
+ DCHECK_EQ(tagged_top_quick_frame_.GetTaggedSp(), 0u);
top_shadow_frame_ = top;
}
@@ -97,7 +123,47 @@
bool ShadowFramesContain(StackReference<mirror::Object>* shadow_frame_entry) const;
private:
- ArtMethod** top_quick_frame_;
+ // Encodes the top quick frame (which must be at least 4-byte aligned)
+ // and a flag that marks the GenericJNI trampoline.
+ class TaggedTopQuickFrame {
+ public:
+ static TaggedTopQuickFrame CreateNotTagged(ArtMethod** sp) {
+ DCHECK_ALIGNED(sp, 4u);
+ return TaggedTopQuickFrame(reinterpret_cast<uintptr_t>(sp));
+ }
+
+ static TaggedTopQuickFrame CreateTagged(ArtMethod** sp) {
+ DCHECK_ALIGNED(sp, 4u);
+ return TaggedTopQuickFrame(reinterpret_cast<uintptr_t>(sp) | 1u);
+ }
+
+ // Get SP known to be not tagged and non-null.
+ ArtMethod** GetSpKnownNotTagged() const {
+ DCHECK(!GetTag());
+ DCHECK_NE(tagged_sp_, 0u);
+ return reinterpret_cast<ArtMethod**>(tagged_sp_);
+ }
+
+ ArtMethod** GetSp() const {
+ return reinterpret_cast<ArtMethod**>(tagged_sp_ & ~static_cast<uintptr_t>(1u));
+ }
+
+ bool GetTag() const {
+ return (tagged_sp_ & 1u) != 0u;
+ }
+
+ uintptr_t GetTaggedSp() const {
+ return tagged_sp_;
+ }
+
+ private:
+ explicit TaggedTopQuickFrame(uintptr_t tagged_sp) : tagged_sp_(tagged_sp) { }
+
+ uintptr_t tagged_sp_;
+ };
+ static_assert(sizeof(TaggedTopQuickFrame) == sizeof(uintptr_t), "TaggedTopQuickFrame size check");
+
+ TaggedTopQuickFrame tagged_top_quick_frame_;
ManagedStack* link_;
ShadowFrame* top_shadow_frame_;
};
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index 3071348..b86f479 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -445,8 +445,13 @@
// if it's in the class path). Note this trades correctness for performance
// since the resulting slow down is unacceptable in some cases until b/64530081
// is fixed.
+ // We still pass the class loader context when the classpath string of the runtime
+ // is not empty, which is the situation when ART is invoked standalone.
+ ClassLoaderContext* actual_context = Runtime::Current()->GetClassPathString().empty()
+ ? nullptr
+ : context.get();
switch (oat_file_assistant.MakeUpToDate(/*profile_changed*/ false,
- /*class_loader_context*/ nullptr,
+ actual_context,
/*out*/ &error_msg)) {
case OatFileAssistant::kUpdateFailed:
LOG(WARNING) << error_msg;
diff --git a/runtime/stack.cc b/runtime/stack.cc
index ab9fb0d..5ad1f7c 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -735,12 +735,19 @@
return runtime->GetCalleeSaveMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
}
- // The only remaining case is if the method is native and uses the generic JNI stub.
+ // The only remaining case is if the method is native and uses the generic JNI stub,
+ // called either directly or through some (resolution, instrumentation) trampoline.
DCHECK(method->IsNative());
- ClassLinker* class_linker = runtime->GetClassLinker();
- const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(method,
- kRuntimePointerSize);
- DCHECK(class_linker->IsQuickGenericJniStub(entry_point)) << method->PrettyMethod();
+ if (kIsDebugBuild) {
+ ClassLinker* class_linker = runtime->GetClassLinker();
+ const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(method,
+ kRuntimePointerSize);
+ CHECK(class_linker->IsQuickGenericJniStub(entry_point) ||
+ // The current entrypoint (after filtering out trampolines) may have changed
+ // from GenericJNI to JIT-compiled stub since we have entered this frame.
+ (runtime->GetJit() != nullptr &&
+ runtime->GetJit()->GetCodeCache()->ContainsPc(entry_point))) << method->PrettyMethod();
+ }
// Generic JNI frame.
uint32_t handle_refs = GetNumberOfReferenceArgsWithoutReceiver(method) + 1;
size_t scope_size = HandleScope::SizeOf(handle_refs);
@@ -776,8 +783,48 @@
// Can't be both a shadow and a quick fragment.
DCHECK(current_fragment->GetTopShadowFrame() == nullptr);
ArtMethod* method = *cur_quick_frame_;
+ DCHECK(method != nullptr);
+ bool header_retrieved = false;
+ if (method->IsNative()) {
+ // We do not have a PC for the first frame, so we cannot simply use
+ // ArtMethod::GetOatQuickMethodHeader() as we're unable to distinguish there
+ // between GenericJNI frame and JIT-compiled JNI stub; the entrypoint may have
+ // changed since the frame was entered. The top quick frame tag indicates
+ // GenericJNI here, otherwise it's either AOT-compiled or JNI-compiled JNI stub.
+ if (UNLIKELY(current_fragment->GetTopQuickFrameTag())) {
+ // The generic JNI does not have any method header.
+ cur_oat_quick_method_header_ = nullptr;
+ } else {
+ const void* existing_entry_point = method->GetEntryPointFromQuickCompiledCode();
+ CHECK(existing_entry_point != nullptr);
+ Runtime* runtime = Runtime::Current();
+ ClassLinker* class_linker = runtime->GetClassLinker();
+ // Check whether we can quickly get the header from the current entrypoint.
+ if (!class_linker->IsQuickGenericJniStub(existing_entry_point) &&
+ !class_linker->IsQuickResolutionStub(existing_entry_point) &&
+ existing_entry_point != GetQuickInstrumentationEntryPoint()) {
+ cur_oat_quick_method_header_ =
+ OatQuickMethodHeader::FromEntryPoint(existing_entry_point);
+ } else {
+ const void* code = method->GetOatMethodQuickCode(class_linker->GetImagePointerSize());
+ if (code != nullptr) {
+ cur_oat_quick_method_header_ = OatQuickMethodHeader::FromEntryPoint(code);
+ } else {
+ // This must be a JITted JNI stub frame.
+ CHECK(runtime->GetJit() != nullptr);
+ code = runtime->GetJit()->GetCodeCache()->GetJniStubCode(method);
+ CHECK(code != nullptr) << method->PrettyMethod();
+ cur_oat_quick_method_header_ = OatQuickMethodHeader::FromCodePointer(code);
+ }
+ }
+ }
+ header_retrieved = true;
+ }
while (method != nullptr) {
- cur_oat_quick_method_header_ = method->GetOatQuickMethodHeader(cur_quick_frame_pc_);
+ if (!header_retrieved) {
+ cur_oat_quick_method_header_ = method->GetOatQuickMethodHeader(cur_quick_frame_pc_);
+ }
+ header_retrieved = false; // Force header retrieval in next iteration.
SanityCheckFrame();
if ((walk_kind_ == StackWalkKind::kIncludeInlinedFrames)
diff --git a/runtime/stack.h b/runtime/stack.h
index bd6204f..a16930b 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -140,8 +140,7 @@
};
template <CountTransitions kCount = CountTransitions::kYes>
- void WalkStack(bool include_transitions = false)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ void WalkStack(bool include_transitions = false) REQUIRES_SHARED(Locks::mutator_lock_);
Thread* GetThread() const {
return thread_;
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 712eabc..bec1c90 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1884,9 +1884,7 @@
}
// Threads with no managed stack frames should be shown.
- const ManagedStack* managed_stack = thread->GetManagedStack();
- if (managed_stack == nullptr || (managed_stack->GetTopQuickFrame() == nullptr &&
- managed_stack->GetTopShadowFrame() == nullptr)) {
+ if (!thread->HasManagedStack()) {
return true;
}
diff --git a/runtime/thread.h b/runtime/thread.h
index 39be66d..0803975 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -474,13 +474,16 @@
tlsPtr_.managed_stack.SetTopQuickFrame(top_method);
}
+ void SetTopOfStackTagged(ArtMethod** top_method) {
+ tlsPtr_.managed_stack.SetTopQuickFrameTagged(top_method);
+ }
+
void SetTopOfShadowStack(ShadowFrame* top) {
tlsPtr_.managed_stack.SetTopShadowFrame(top);
}
bool HasManagedStack() const {
- return (tlsPtr_.managed_stack.GetTopQuickFrame() != nullptr) ||
- (tlsPtr_.managed_stack.GetTopShadowFrame() != nullptr);
+ return tlsPtr_.managed_stack.HasTopQuickFrame() || tlsPtr_.managed_stack.HasTopShadowFrame();
}
// If 'msg' is null, no detail message is set.
@@ -833,7 +836,7 @@
static ThreadOffset<pointer_size> TopOfManagedStackOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(
OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
- ManagedStack::TopQuickFrameOffset());
+ ManagedStack::TaggedTopQuickFrameOffset());
}
const ManagedStack* GetManagedStack() const {
diff --git a/test/655-jit-clinit/src/Main.java b/test/655-jit-clinit/src/Main.java
index 44b3154..2fb8f2a 100644
--- a/test/655-jit-clinit/src/Main.java
+++ b/test/655-jit-clinit/src/Main.java
@@ -23,7 +23,7 @@
Foo.hotMethod();
}
- public native static boolean isJitCompiled(Class<?> cls, String methodName);
+ public native static boolean hasJitCompiledEntrypoint(Class<?> cls, String methodName);
private native static boolean hasJit();
}
@@ -36,7 +36,7 @@
static {
array = new Object[10000];
- while (!Main.isJitCompiled(Foo.class, "hotMethod")) {
+ while (!Main.hasJitCompiledEntrypoint(Foo.class, "hotMethod")) {
Foo.hotMethod();
try {
// Sleep to give a chance for the JIT to compile `hotMethod`.
diff --git a/test/667-jit-jni-stub/expected.txt b/test/667-jit-jni-stub/expected.txt
new file mode 100644
index 0000000..6a5618e
--- /dev/null
+++ b/test/667-jit-jni-stub/expected.txt
@@ -0,0 +1 @@
+JNI_OnLoad called
diff --git a/test/667-jit-jni-stub/info.txt b/test/667-jit-jni-stub/info.txt
new file mode 100644
index 0000000..6f25c44
--- /dev/null
+++ b/test/667-jit-jni-stub/info.txt
@@ -0,0 +1 @@
+Tests for JITting and collecting JNI stubs.
diff --git a/test/667-jit-jni-stub/jit_jni_stub_test.cc b/test/667-jit-jni-stub/jit_jni_stub_test.cc
new file mode 100644
index 0000000..82e06fc
--- /dev/null
+++ b/test/667-jit-jni-stub/jit_jni_stub_test.cc
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <jni.h>
+
+#include "jit/jit.h"
+#include "jit/jit_code_cache.h"
+#include "mirror/class.h"
+#include "mirror/string.h"
+#include "runtime.h"
+#include "scoped_thread_state_change-inl.h"
+
+namespace art {
+
+// Local class declared as a friend of JitCodeCache so that we can access its internals.
+class JitJniStubTestHelper {
+ public:
+ static bool isNextJitGcFull(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
+ CHECK(Runtime::Current()->GetJit() != nullptr);
+ jit::JitCodeCache* cache = Runtime::Current()->GetJit()->GetCodeCache();
+ MutexLock mu(self, cache->lock_);
+ return cache->ShouldDoFullCollection();
+ }
+};
+
+// Calls through to a static method with signature "()V".
+extern "C" JNIEXPORT
+void Java_Main_callThrough(JNIEnv* env, jclass, jclass klass, jstring methodName) {
+ ScopedObjectAccess soa(Thread::Current());
+ std::string name = soa.Decode<mirror::String>(methodName)->ToModifiedUtf8();
+ jmethodID method = env->GetStaticMethodID(klass, name.c_str(), "()V");
+ CHECK(method != nullptr) << soa.Decode<mirror::Class>(klass)->PrettyDescriptor() << "." << name;
+ env->CallStaticVoidMethod(klass, method);
+}
+
+extern "C" JNIEXPORT
+void Java_Main_jitGc(JNIEnv*, jclass) {
+ CHECK(Runtime::Current()->GetJit() != nullptr);
+ jit::JitCodeCache* cache = Runtime::Current()->GetJit()->GetCodeCache();
+ ScopedObjectAccess soa(Thread::Current());
+ cache->GarbageCollectCache(Thread::Current());
+}
+
+extern "C" JNIEXPORT
+jboolean Java_Main_isNextJitGcFull(JNIEnv*, jclass) {
+ ScopedObjectAccess soa(Thread::Current());
+ return JitJniStubTestHelper::isNextJitGcFull(soa.Self());
+}
+
+} // namespace art
diff --git a/test/667-jit-jni-stub/run b/test/667-jit-jni-stub/run
new file mode 100755
index 0000000..1877be4
--- /dev/null
+++ b/test/667-jit-jni-stub/run
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Disable AOT compilation of JNI stubs.
+${RUN} "${@}" --no-prebuild --no-dex2oat
diff --git a/test/667-jit-jni-stub/src/Main.java b/test/667-jit-jni-stub/src/Main.java
new file mode 100644
index 0000000..b867970
--- /dev/null
+++ b/test/667-jit-jni-stub/src/Main.java
@@ -0,0 +1,180 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ System.loadLibrary(args[0]);
+ if (isAotCompiled(Main.class, "hasJit")) {
+ throw new Error("This test must be run with --no-prebuild --no-dex2oat!");
+ }
+ if (!hasJit()) {
+ return;
+ }
+
+ testCompilationUseAndCollection();
+ testMixedFramesOnStack();
+ }
+
+ public static void testCompilationUseAndCollection() {
+ // Test that callThrough() can be JIT-compiled.
+ assertFalse(hasJitCompiledEntrypoint(Main.class, "callThrough"));
+ assertFalse(hasJitCompiledCode(Main.class, "callThrough"));
+ ensureCompiledCallThroughEntrypoint(/* call */ true);
+ assertTrue(hasJitCompiledEntrypoint(Main.class, "callThrough"));
+ assertTrue(hasJitCompiledCode(Main.class, "callThrough"));
+
+ // Use callThrough() once again now that the method has a JIT-compiled stub.
+ callThrough(Main.class, "doNothing");
+
+ // Test that GC with the JIT-compiled stub on the stack does not collect it.
+ // Also tests stack walk over the JIT-compiled stub.
+ callThrough(Main.class, "testGcWithCallThroughStubOnStack");
+
+ // Test that, when marking used methods before a full JIT GC, a single execution
+ // of the GenericJNI trampoline can save the compiled stub from being collected.
+ testSingleInvocationTriggersRecompilation();
+
+ // Test that the JNI compiled stub can actually be collected.
+ testStubCanBeCollected();
+ }
+
+ public static void testGcWithCallThroughStubOnStack() {
+ // Check that this method was called via JIT-compiled callThrough() stub.
+ assertTrue(hasJitCompiledEntrypoint(Main.class, "callThrough"));
+ // This assertion also exercises stack walk over the JIT-compiled callThrough() stub.
+ assertTrue(new Throwable().getStackTrace()[1].getMethodName().equals("callThrough"));
+
+ doJitGcsUntilFullJitGcIsScheduled();
+ // The callThrough() on the stack above this method is using the compiled stub,
+ // so the JIT GC should not remove the compiled code.
+ jitGc();
+ assertTrue(hasJitCompiledCode(Main.class, "callThrough"));
+ }
+
+ public static void testSingleInvocationTriggersRecompilation() {
+ // After scheduling a full JIT GC, single call through the GenericJNI
+ // trampoline should ensure that the compiled stub is used again.
+ doJitGcsUntilFullJitGcIsScheduled();
+ callThrough(Main.class, "doNothing");
+ ensureCompiledCallThroughEntrypoint(/* call */ false); // Wait for the compilation task to run.
+ assertTrue(hasJitCompiledEntrypoint(Main.class, "callThrough"));
+ jitGc(); // This JIT GC should not collect the callThrough() stub.
+ assertTrue(hasJitCompiledCode(Main.class, "callThrough"));
+ }
+
+ public static void testMixedFramesOnStack() {
+ // Starts without a compiled JNI stub for callThrough().
+ assertFalse(hasJitCompiledEntrypoint(Main.class, "callThrough"));
+ assertFalse(hasJitCompiledCode(Main.class, "callThrough"));
+ callThrough(Main.class, "testMixedFramesOnStackStage2");
+ // We have just returned through the JIT-compiled JNI stub, so it must still
+ // be compiled (though not necessarily with the entrypoint pointing to it).
+ assertTrue(hasJitCompiledCode(Main.class, "callThrough"));
+ // Though the callThrough() is on the stack, that frame is using the GenericJNI
+ // and does not prevent the collection of the JNI stub.
+ testStubCanBeCollected();
+ }
+
+ public static void testMixedFramesOnStackStage2() {
+ // We cannot assert that callThrough() has no JIT compiled stub as that check
+ // may race against the compilation task. Just check the caller.
+ assertTrue(new Throwable().getStackTrace()[1].getMethodName().equals("callThrough"));
+ // Now ensure that the JNI stub is compiled and used.
+ ensureCompiledCallThroughEntrypoint(/* call */ true);
+ callThrough(Main.class, "testMixedFramesOnStackStage3");
+ }
+
+ public static void testMixedFramesOnStackStage3() {
+ // Check that this method was called via JIT-compiled callThrough() stub.
+ assertTrue(hasJitCompiledEntrypoint(Main.class, "callThrough"));
+ // This assertion also exercises stack walk over the JIT-compiled callThrough() stub.
+ assertTrue(new Throwable().getStackTrace()[1].getMethodName().equals("callThrough"));
+ // For a good measure, try a JIT GC.
+ jitGc();
+ }
+
+ public static void testStubCanBeCollected() {
+ assertTrue(hasJitCompiledCode(Main.class, "callThrough"));
+ doJitGcsUntilFullJitGcIsScheduled();
+ assertFalse(hasJitCompiledEntrypoint(Main.class, "callThrough"));
+ assertTrue(hasJitCompiledCode(Main.class, "callThrough"));
+ jitGc(); // JIT GC without callThrough() on the stack should collect the callThrough() stub.
+ assertFalse(hasJitCompiledEntrypoint(Main.class, "callThrough"));
+ assertFalse(hasJitCompiledCode(Main.class, "callThrough"));
+ }
+
+ public static void doJitGcsUntilFullJitGcIsScheduled() {
+ // We enter with a compiled stub for callThrough() but we also need the entrypoint to be set.
+ assertTrue(hasJitCompiledCode(Main.class, "callThrough"));
+ ensureCompiledCallThroughEntrypoint(/* call */ true);
+ // Perform JIT GC until the next GC is marked to do full collection.
+ do {
+ assertTrue(hasJitCompiledEntrypoint(Main.class, "callThrough"));
+ callThrough(Main.class, "jitGc"); // JIT GC with callThrough() safely on the stack.
+ } while (!isNextJitGcFull());
+ // The JIT GC before the full collection resets entrypoints and waits to see
+ // if the methods are still in use.
+ assertFalse(hasJitCompiledEntrypoint(Main.class, "callThrough"));
+ assertTrue(hasJitCompiledCode(Main.class, "callThrough"));
+ }
+
+ public static void ensureCompiledCallThroughEntrypoint(boolean call) {
+ int count = 0;
+ while (!hasJitCompiledEntrypoint(Main.class, "callThrough")) {
+ // If `call` is true, also exercise the `callThrough()` method to increase hotness.
+ int limit = call ? 1 << Math.min(count, 12) : 0;
+ for (int i = 0; i < limit; ++i) {
+ callThrough(Main.class, "doNothing");
+ }
+ try {
+ // Sleep to give a chance for the JIT to compile `hasJit` stub.
+ Thread.sleep(100);
+ } catch (Exception e) {
+ // Ignore
+ }
+ if (++count == 50) {
+ throw new Error("TIMEOUT");
+ }
+ };
+ }
+
+ public static void assertTrue(boolean value) {
+ if (!value) {
+ throw new AssertionError("Expected true!");
+ }
+ }
+
+ public static void assertFalse(boolean value) {
+ if (value) {
+ throw new AssertionError("Expected false!");
+ }
+ }
+
+ public static void doNothing() { }
+ public static void throwError() { throw new Error(); }
+
+ // Note that the callThrough()'s shorty differs from shorties of the other
+ // native methods used in this test because of the return type `void.`
+ public native static void callThrough(Class<?> cls, String methodName);
+
+ public native static void jitGc();
+ public native static boolean isNextJitGcFull();
+
+ public native static boolean isAotCompiled(Class<?> cls, String methodName);
+ public native static boolean hasJitCompiledEntrypoint(Class<?> cls, String methodName);
+ public native static boolean hasJitCompiledCode(Class<?> cls, String methodName);
+ private native static boolean hasJit();
+}
diff --git a/test/Android.bp b/test/Android.bp
index 8f29251..2d526d2 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -384,6 +384,7 @@
"656-annotation-lookup-generic-jni/test.cc",
"661-oat-writer-layout/oat_writer_layout.cc",
"664-aget-verifier/aget-verifier.cc",
+ "667-jit-jni-stub/jit_jni_stub_test.cc",
"708-jit-cache-churn/jit.cc",
],
shared_libs: [
diff --git a/test/common/runtime_state.cc b/test/common/runtime_state.cc
index df497c1..3458080 100644
--- a/test/common/runtime_state.cc
+++ b/test/common/runtime_state.cc
@@ -152,10 +152,10 @@
return method->GetOatMethodQuickCode(kRuntimePointerSize) != nullptr;
}
-extern "C" JNIEXPORT jboolean JNICALL Java_Main_isJitCompiled(JNIEnv* env,
- jclass,
- jclass cls,
- jstring method_name) {
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_hasJitCompiledEntrypoint(JNIEnv* env,
+ jclass,
+ jclass cls,
+ jstring method_name) {
jit::Jit* jit = GetJitIfEnabled();
if (jit == nullptr) {
return false;
@@ -169,6 +169,23 @@
return jit->GetCodeCache()->ContainsPc(method->GetEntryPointFromQuickCompiledCode());
}
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_hasJitCompiledCode(JNIEnv* env,
+ jclass,
+ jclass cls,
+ jstring method_name) {
+ jit::Jit* jit = GetJitIfEnabled();
+ if (jit == nullptr) {
+ return false;
+ }
+ Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
+ ScopedUtfChars chars(env, method_name);
+ CHECK(chars.c_str() != nullptr);
+ ArtMethod* method = soa.Decode<mirror::Class>(cls)->FindDeclaredDirectMethodByName(
+ chars.c_str(), kRuntimePointerSize);
+ return jit->GetCodeCache()->ContainsMethod(method);
+}
+
extern "C" JNIEXPORT void JNICALL Java_Main_ensureJitCompiled(JNIEnv* env,
jclass,
jclass cls,