Do not create HandleScope for JNI transitions.
We previously crated a HandleScope in the JNI transition
frame to hold references passed as jobject (jclass, etc.)
to the native function and these references were actually
spilled twice during the transition.
We now construct the jobject as a pointer to the reference
spilled in the reserved out vreg area in the caller's frame.
And the jclass for static methods is just a pointer to the
method's declaring class. This reduces the amount of work
required in the JNI transition, both on entry (in compiled
stubs) and exit (in JniMethodEnd*).
Some additional work is required when GC visits references
of a native method as we need to walk over the method's
shorty which was unnecessary for a HandleScope.
Also fix Thread::InitStackHwm() to calculate correct stack
size needed by the new Thread::IsJniTransitionReference().
The results for StringToBytesBenchmark on blueline little
cores running at fixed frequency 1420800 are approximately
arm64 (medians from 3 runs) before after
timeGetBytesAscii EMPTY 447.33 436.86
timeGetBytesIso88591 EMPTY 440.52 431.13
timeGetBytesUtf8 EMPTY 432.31 409.82
arm (medians from 3 runs) before after
timeGetBytesAscii EMPTY 500.53 490.87
timeGetBytesIso88591 EMPTY 496.45 495.30
timeGetBytesUtf8 EMPTY 488.84 472.68
Test: m test-art-host-gtest
Test: testrunner.py --host
Test: testrunner.py --host --gcstress
Test: testrunner.py --host --jit-on-first-use
Test: testrunner.py --host --jit-on-first-use --gcstress
Test: run-gtests.sh
Test: testrunner.py --target --optimizing
Test: boots.
Bug: 172332525
Change-Id: I658f9d87071587b3e89f31c65feca976a11e9cc2
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 4608af1..c6d2826 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -154,7 +154,6 @@
"jni/jni_id_manager.cc",
"jni/jni_internal.cc",
"linear_alloc.cc",
- "managed_stack.cc",
"method_handles.cc",
"metrics_reporter.cc",
"mirror/array.cc",
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 0b96573..01e8911 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -758,12 +758,21 @@
return method->IsStatic() && !method->IsConstructor();
}
-inline HandleScope* GetGenericJniHandleScope(ArtMethod** managed_sp,
- size_t num_handle_scope_references) {
- // The HandleScope is just below the cookie and padding to align as uintptr_t.
- const size_t offset =
- RoundUp(HandleScope::SizeOf(num_handle_scope_references) + kJniCookieSize, sizeof(uintptr_t));
- return reinterpret_cast<HandleScope*>(reinterpret_cast<uint8_t*>(managed_sp) - offset);
+inline jobject GetGenericJniSynchronizationObject(Thread* self, ArtMethod* called)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(!called->IsCriticalNative());
+ DCHECK(!called->IsFastNative());
+ DCHECK(self->GetManagedStack()->GetTopQuickFrame() != nullptr);
+ DCHECK_EQ(*self->GetManagedStack()->GetTopQuickFrame(), called);
+ if (called->IsStatic()) {
+ // The `jclass` is a pointer to the method's declaring class.
+ return reinterpret_cast<jobject>(called->GetDeclaringClassAddressWithoutBarrier());
+ } else {
+ // The `this` reference is stored in the first out vreg in the caller's frame.
+ uint8_t* sp = reinterpret_cast<uint8_t*>(self->GetManagedStack()->GetTopQuickFrame());
+ size_t frame_size = RuntimeCalleeSaveFrame::GetFrameSize(CalleeSaveType::kSaveRefsAndArgs);
+ return reinterpret_cast<jobject>(sp + frame_size + static_cast<size_t>(kRuntimePointerSize));
+ }
}
} // namespace art
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index dfc1edd..72b4c03 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -214,10 +214,11 @@
// The caller is responsible for performing that check.
bool NeedsClinitCheckBeforeCall(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
-constexpr size_t kJniCookieSize = sizeof(uint32_t);
-
-inline HandleScope* GetGenericJniHandleScope(ArtMethod** managed_sp,
- size_t num_handle_scope_references);
+// Returns the synchronization object for a native method for a GenericJni frame
+// we have just created or are about to exit. The synchronization object is
+// the class object for static methods and the `this` object otherwise.
+jobject GetGenericJniSynchronizationObject(Thread* self, ArtMethod* called)
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Update .bss method entrypoint if the `callee_reference` has an associated oat file
// and that oat file has a .bss entry for the `callee_reference`.
diff --git a/runtime/entrypoints/quick/quick_entrypoints.h b/runtime/entrypoints/quick/quick_entrypoints.h
index d75893d..d9fbbc7 100644
--- a/runtime/entrypoints/quick/quick_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_entrypoints.h
@@ -84,7 +84,7 @@
Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) HOT_ATTR;
-extern void ReadBarrierJni(mirror::CompressedReference<mirror::Object>* handle_on_stack,
+extern void ReadBarrierJni(mirror::CompressedReference<mirror::Class>* handle_on_stack,
Thread* self)
NO_THREAD_SAFETY_ANALYSIS HOT_ATTR;
diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h
index e031b21..78e4dbc 100644
--- a/runtime/entrypoints/quick/quick_entrypoints_list.h
+++ b/runtime/entrypoints/quick/quick_entrypoints_list.h
@@ -174,7 +174,7 @@
V(UpdateInlineCache, void, void) \
V(CompileOptimized, void, ArtMethod*, Thread*) \
\
- V(ReadBarrierJni, void, mirror::CompressedReference<mirror::Object>*, Thread*) \
+ V(ReadBarrierJni, void, mirror::CompressedReference<mirror::Class>*, Thread*) \
V(ReadBarrierMarkReg00, mirror::Object*, mirror::Object*) \
V(ReadBarrierMarkReg01, mirror::Object*, mirror::Object*) \
V(ReadBarrierMarkReg02, mirror::Object*, mirror::Object*) \
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index 38c6d3c..bda36d8 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -31,20 +31,20 @@
static inline void GoToRunnableFast(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
-extern void ReadBarrierJni(mirror::CompressedReference<mirror::Object>* handle_on_stack,
+extern void ReadBarrierJni(mirror::CompressedReference<mirror::Class>* declaring_class,
Thread* self ATTRIBUTE_UNUSED) {
DCHECK(kUseReadBarrier);
if (kUseBakerReadBarrier) {
- DCHECK(handle_on_stack->AsMirrorPtr() != nullptr)
+ DCHECK(declaring_class->AsMirrorPtr() != nullptr)
<< "The class of a static jni call must not be null";
// Check the mark bit and return early if it's already marked.
- if (LIKELY(handle_on_stack->AsMirrorPtr()->GetMarkBit() != 0)) {
+ if (LIKELY(declaring_class->AsMirrorPtr()->GetMarkBit() != 0)) {
return;
}
}
// Call the read barrier and update the handle.
- mirror::Object* to_ref = ReadBarrier::BarrierForRoot(handle_on_stack);
- handle_on_stack->Assign(to_ref);
+ mirror::Class* to_ref = ReadBarrier::BarrierForRoot(declaring_class);
+ declaring_class->Assign(to_ref);
}
// Called on entry to fast JNI, push a new local reference table only.
@@ -120,7 +120,6 @@
}
env->SetLocalSegmentState(env->GetLocalRefCookie());
env->SetLocalRefCookie(bit_cast<IRTSegmentState>(saved_local_ref_cookie));
- self->PopHandleScope();
}
// TODO: annotalysis disabled as monitor semantics are maintained in Java code.
@@ -231,8 +230,7 @@
// locked object.
if (called->IsSynchronized()) {
DCHECK(normal_native) << "@FastNative/@CriticalNative and synchronize is not supported";
- HandleScope* handle_scope = down_cast<HandleScope*>(self->GetTopHandleScope());
- jobject lock = handle_scope->GetHandle(0).ToJObject();
+ jobject lock = GetGenericJniSynchronizationObject(self, called);
DCHECK(lock != nullptr);
UnlockJniSynchronizedMethod(lock, self);
}
@@ -242,7 +240,7 @@
result.l, saved_local_ref_cookie, self));
} else {
if (LIKELY(!critical_native)) {
- PopLocalReferences(saved_local_ref_cookie, self); // Invalidates top handle scope.
+ PopLocalReferences(saved_local_ref_cookie, self);
}
switch (return_shorty_char) {
case 'F': {
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index d4ed187..11c3820 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -1436,12 +1436,6 @@
* necessary.
*
* void PushStack(uintptr_t): Push a value to the stack.
- *
- * uintptr_t PushHandleScope(mirror::Object* ref): Add a reference to the HandleScope. This _will_ have nullptr,
- * as this might be important for null initialization.
- * Must return the jobject, that is, the reference to the
- * entry in the HandleScope (nullptr if necessary).
- *
*/
template<class T> class BuildNativeCallFrameStateMachine {
public:
@@ -1526,22 +1520,6 @@
}
}
- bool HaveHandleScopeGpr() const {
- return gpr_index_ > 0;
- }
-
- void AdvanceHandleScope(mirror::Object* ptr) REQUIRES_SHARED(Locks::mutator_lock_) {
- uintptr_t handle = PushHandle(ptr);
- if (HaveHandleScopeGpr()) {
- gpr_index_--;
- PushGpr(handle);
- } else {
- stack_entries_++;
- PushStack(handle);
- gpr_index_ = 0;
- }
- }
-
bool HaveIntGpr() const {
return gpr_index_ > 0;
}
@@ -1718,9 +1696,6 @@
void PushStack(uintptr_t val) {
delegate_->PushStack(val);
}
- uintptr_t PushHandle(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_) {
- return delegate_->PushHandle(ref);
- }
uint32_t gpr_index_; // Number of free GPRs
uint32_t fpr_index_; // Number of free FPRs
@@ -1765,11 +1740,8 @@
Primitive::Type cur_type_ = Primitive::GetType(shorty[i]);
switch (cur_type_) {
case Primitive::kPrimNot:
- // TODO: fix abuse of mirror types.
- sm.AdvanceHandleScope(
- reinterpret_cast<mirror::Object*>(0x12345678));
+ sm.AdvancePointer(nullptr);
break;
-
case Primitive::kPrimBoolean:
case Primitive::kPrimByte:
case Primitive::kPrimChar:
@@ -1811,10 +1783,6 @@
// counting is already done in the superclass
}
- virtual uintptr_t PushHandle(mirror::Object* /* ptr */) {
- return reinterpret_cast<uintptr_t>(nullptr);
- }
-
protected:
uint32_t num_stack_entries_;
};
@@ -1822,26 +1790,18 @@
class ComputeGenericJniFrameSize final : public ComputeNativeCallFrameSize {
public:
explicit ComputeGenericJniFrameSize(bool critical_native)
- : num_handle_scope_references_(0), critical_native_(critical_native) {}
+ : critical_native_(critical_native) {}
- uintptr_t* ComputeLayout(Thread* self,
- ArtMethod** managed_sp,
- const char* shorty,
- uint32_t shorty_len,
- HandleScope** handle_scope) REQUIRES_SHARED(Locks::mutator_lock_) {
+ uintptr_t* ComputeLayout(ArtMethod** managed_sp, const char* shorty, uint32_t shorty_len)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
Walk(shorty, shorty_len);
- // Add space for cookie and HandleScope.
- void* storage = GetGenericJniHandleScope(managed_sp, num_handle_scope_references_);
- DCHECK_ALIGNED(storage, sizeof(uintptr_t));
- *handle_scope =
- HandleScope::Create(storage, self->GetTopHandleScope(), num_handle_scope_references_);
- DCHECK_EQ(*handle_scope, storage);
- uint8_t* sp8 = reinterpret_cast<uint8_t*>(*handle_scope);
- DCHECK_GE(static_cast<size_t>(reinterpret_cast<uint8_t*>(managed_sp) - sp8),
- HandleScope::SizeOf(num_handle_scope_references_) + kJniCookieSize);
+ // Add space for cookie.
+ DCHECK_ALIGNED(managed_sp, sizeof(uintptr_t));
+ static_assert(sizeof(uintptr_t) >= sizeof(IRTSegmentState));
+ uint8_t* sp8 = reinterpret_cast<uint8_t*>(managed_sp) - sizeof(uintptr_t);
// Layout stack arguments.
sp8 = LayoutStackArgs(sp8);
@@ -1873,22 +1833,14 @@
return GetHiddenArgSlot(reserved_area) + 1;
}
- uintptr_t PushHandle(mirror::Object* /* ptr */) override;
-
// Add JNIEnv* and jobj/jclass before the shorty-derived elements.
void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) override
REQUIRES_SHARED(Locks::mutator_lock_);
private:
- uint32_t num_handle_scope_references_;
const bool critical_native_;
};
-uintptr_t ComputeGenericJniFrameSize::PushHandle(mirror::Object* /* ptr */) {
- num_handle_scope_references_++;
- return reinterpret_cast<uintptr_t>(nullptr);
-}
-
void ComputeGenericJniFrameSize::WalkHeader(
BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) {
// First 2 parameters are always excluded for @CriticalNative.
@@ -1900,7 +1852,7 @@
sm->AdvancePointer(nullptr);
// Class object or this as first argument
- sm->AdvanceHandleScope(reinterpret_cast<mirror::Object*>(0x12345678));
+ sm->AdvancePointer(nullptr);
}
// Class to push values to three separate regions. Used to fill the native call part. Adheres to
@@ -1939,11 +1891,6 @@
cur_stack_arg_++;
}
- virtual uintptr_t PushHandle(mirror::Object*) REQUIRES_SHARED(Locks::mutator_lock_) {
- LOG(FATAL) << "(Non-JNI) Native call does not use handles.";
- UNREACHABLE();
- }
-
private:
uintptr_t* cur_gpr_reg_;
uint32_t* cur_fpr_reg_;
@@ -1962,14 +1909,14 @@
ArtMethod** managed_sp,
uintptr_t* reserved_area)
: QuickArgumentVisitor(managed_sp, is_static, shorty, shorty_len),
- jni_call_(nullptr, nullptr, nullptr, nullptr, critical_native),
- sm_(&jni_call_) {
+ jni_call_(nullptr, nullptr, nullptr, critical_native),
+ sm_(&jni_call_),
+ current_vreg_(nullptr) {
DCHECK_ALIGNED(managed_sp, kStackAlignment);
DCHECK_ALIGNED(reserved_area, sizeof(uintptr_t));
ComputeGenericJniFrameSize fsc(critical_native);
- uintptr_t* out_args_sp =
- fsc.ComputeLayout(self, managed_sp, shorty, shorty_len, &handle_scope_);
+ uintptr_t* out_args_sp = fsc.ComputeLayout(managed_sp, shorty, shorty_len);
// Store hidden argument for @CriticalNative.
uintptr_t* hidden_arg_slot = fsc.GetHiddenArgSlot(reserved_area);
@@ -1982,10 +1929,15 @@
uintptr_t* out_args_sp_slot = fsc.GetOutArgsSpSlot(reserved_area);
*out_args_sp_slot = reinterpret_cast<uintptr_t>(out_args_sp);
+ // Prepare vreg pointer for spilling references.
+ static constexpr size_t frame_size =
+ RuntimeCalleeSaveFrame::GetFrameSize(CalleeSaveType::kSaveRefsAndArgs);
+ current_vreg_ = reinterpret_cast<uint32_t*>(
+ reinterpret_cast<uint8_t*>(managed_sp) + frame_size + sizeof(ArtMethod*));
+
jni_call_.Reset(fsc.GetStartGprRegs(reserved_area),
fsc.GetStartFprRegs(reserved_area),
- out_args_sp,
- handle_scope_);
+ out_args_sp);
// First 2 parameters are always excluded for CriticalNative methods.
if (LIKELY(!critical_native)) {
@@ -1993,82 +1945,52 @@
sm_.AdvancePointer(self->GetJniEnv());
if (is_static) {
- sm_.AdvanceHandleScope(method->GetDeclaringClass().Ptr());
+ // The `jclass` is a pointer to the method's declaring class.
+ // The declaring class must be marked.
+ method->GetDeclaringClass<kWithReadBarrier>();
+ sm_.AdvancePointer(method->GetDeclaringClassAddressWithoutBarrier());
} // else "this" reference is already handled by QuickArgumentVisitor.
}
}
void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override;
- void FinalizeHandleScope(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
-
- StackReference<mirror::Object>* GetFirstHandleScopeEntry() {
- return handle_scope_->GetHandle(0).GetReference();
- }
-
- jobject GetFirstHandleScopeJObject() const REQUIRES_SHARED(Locks::mutator_lock_) {
- return handle_scope_->GetHandle(0).ToJObject();
- }
-
private:
// A class to fill a JNI call. Adds reference/handle-scope management to FillNativeCall.
class FillJniCall final : public FillNativeCall {
public:
- FillJniCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args,
- HandleScope* handle_scope, bool critical_native)
- : FillNativeCall(gpr_regs, fpr_regs, stack_args),
- handle_scope_(handle_scope),
- cur_entry_(0),
- critical_native_(critical_native) {}
+ FillJniCall(uintptr_t* gpr_regs,
+ uint32_t* fpr_regs,
+ uintptr_t* stack_args,
+ bool critical_native)
+ : FillNativeCall(gpr_regs, fpr_regs, stack_args),
+ cur_entry_(0),
+ critical_native_(critical_native) {}
- uintptr_t PushHandle(mirror::Object* ref) override REQUIRES_SHARED(Locks::mutator_lock_);
-
- void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, HandleScope* scope) {
+ void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) {
FillNativeCall::Reset(gpr_regs, fpr_regs, stack_args);
- handle_scope_ = scope;
cur_entry_ = 0U;
}
- void ResetRemainingScopeSlots() REQUIRES_SHARED(Locks::mutator_lock_) {
- // Initialize padding entries.
- size_t expected_slots = handle_scope_->NumberOfReferences();
- while (cur_entry_ < expected_slots) {
- handle_scope_->GetMutableHandle(cur_entry_++).Assign(nullptr);
- }
-
- if (!critical_native_) {
- // Non-critical natives have at least the self class (jclass) or this (jobject).
- DCHECK_NE(cur_entry_, 0U);
- }
- }
-
bool CriticalNative() const {
return critical_native_;
}
private:
- HandleScope* handle_scope_;
size_t cur_entry_;
const bool critical_native_;
};
- HandleScope* handle_scope_;
FillJniCall jni_call_;
-
BuildNativeCallFrameStateMachine<FillJniCall> sm_;
+ // Pointer to the current vreg in caller's reserved out vreg area.
+ // Used for spilling reference arguments.
+ uint32_t* current_vreg_;
+
DISALLOW_COPY_AND_ASSIGN(BuildGenericJniFrameVisitor);
};
-uintptr_t BuildGenericJniFrameVisitor::FillJniCall::PushHandle(mirror::Object* ref) {
- uintptr_t tmp;
- MutableHandle<mirror::Object> h = handle_scope_->GetMutableHandle(cur_entry_);
- h.Assign(ref);
- tmp = reinterpret_cast<uintptr_t>(h.ToJObject());
- cur_entry_++;
- return tmp;
-}
-
void BuildGenericJniFrameVisitor::Visit() {
Primitive::Type type = GetParamPrimitiveType();
switch (type) {
@@ -2080,6 +2002,7 @@
long_arg = *reinterpret_cast<jlong*>(GetParamAddress());
}
sm_.AdvanceLong(long_arg);
+ current_vreg_ += 2u;
break;
}
case Primitive::kPrimDouble: {
@@ -2091,16 +2014,22 @@
double_arg = *reinterpret_cast<uint64_t*>(GetParamAddress());
}
sm_.AdvanceDouble(double_arg);
+ current_vreg_ += 2u;
break;
}
case Primitive::kPrimNot: {
- StackReference<mirror::Object>* stack_ref =
- reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
- sm_.AdvanceHandleScope(stack_ref->AsMirrorPtr());
+ mirror::Object* obj =
+ reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress())->AsMirrorPtr();
+ StackReference<mirror::Object>* spill_ref =
+ reinterpret_cast<StackReference<mirror::Object>*>(current_vreg_);
+ spill_ref->Assign(obj);
+ sm_.AdvancePointer(obj != nullptr ? spill_ref : nullptr);
+ current_vreg_ += 1u;
break;
}
case Primitive::kPrimFloat:
sm_.AdvanceFloat(*reinterpret_cast<float*>(GetParamAddress()));
+ current_vreg_ += 1u;
break;
case Primitive::kPrimBoolean: // Fall-through.
case Primitive::kPrimByte: // Fall-through.
@@ -2108,6 +2037,7 @@
case Primitive::kPrimShort: // Fall-through.
case Primitive::kPrimInt: // Fall-through.
sm_.AdvanceInt(*reinterpret_cast<jint*>(GetParamAddress()));
+ current_vreg_ += 1u;
break;
case Primitive::kPrimVoid:
LOG(FATAL) << "UNREACHABLE";
@@ -2115,15 +2045,6 @@
}
}
-void BuildGenericJniFrameVisitor::FinalizeHandleScope(Thread* self) {
- // Clear out rest of the scope.
- jni_call_.ResetRemainingScopeSlots();
- if (!jni_call_.CriticalNative()) {
- // Install HandleScope.
- self->PushHandleScope(handle_scope_);
- }
-}
-
/*
* Initializes the reserved area assumed to be directly below `managed_sp` for a native call:
*
@@ -2165,8 +2086,6 @@
{
ScopedAssertNoThreadSuspension sants(__FUNCTION__);
visitor.VisitArguments();
- // FinalizeHandleScope pushes the handle scope on the thread.
- visitor.FinalizeHandleScope(self);
}
// Fix up managed-stack things in Thread. After this we can walk the stack.
@@ -2204,7 +2123,8 @@
// Start JNI, save the cookie.
if (called->IsSynchronized()) {
DCHECK(normal_native) << " @FastNative and synchronize is not supported";
- cookie = JniMethodStartSynchronized(visitor.GetFirstHandleScopeJObject(), self);
+ jobject lock = GetGenericJniSynchronizationObject(self, called);
+ cookie = JniMethodStartSynchronized(lock, self);
if (self->IsExceptionPending()) {
self->PopHandleScope();
return nullptr; // Report error.
@@ -2259,14 +2179,6 @@
uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp);
ArtMethod* called = *sp;
uint32_t cookie = *(sp32 - 1);
- if (kIsDebugBuild && !called->IsCriticalNative()) {
- BaseHandleScope* handle_scope = self->GetTopHandleScope();
- DCHECK(handle_scope != nullptr);
- DCHECK(!handle_scope->IsVariableSized());
- // Note: We do not hold mutator lock here for normal JNI, so we cannot use the method's shorty
- // to determine the number of references. Instead rely on the value from the HandleScope.
- DCHECK_EQ(handle_scope, GetGenericJniHandleScope(sp, handle_scope->NumberOfReferences()));
- }
return GenericJniMethodEnd(self, cookie, result, result_f, called);
}
diff --git a/runtime/handle.h b/runtime/handle.h
index 779345d..6f6e81f 100644
--- a/runtime/handle.h
+++ b/runtime/handle.h
@@ -93,14 +93,6 @@
return reference_->IsNull();
}
- ALWAYS_INLINE jobject ToJObject() const REQUIRES_SHARED(Locks::mutator_lock_) {
- if (UNLIKELY(reference_->AsMirrorPtr() == nullptr)) {
- // Special case so that we work with null handles.
- return nullptr;
- }
- return reinterpret_cast<jobject>(reference_);
- }
-
ALWAYS_INLINE StackReference<mirror::Object>* GetReference() {
return reference_;
}
diff --git a/runtime/image.cc b/runtime/image.cc
index 6087077..56eee3b 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -29,8 +29,8 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-// Last change: Math.multiplyHigh intrinsic.
-const uint8_t ImageHeader::kImageVersion[] = { '0', '9', '5', '\0' };
+// Last change: JNI transition without HandleScope.
+const uint8_t ImageHeader::kImageVersion[] = { '0', '9', '6', '\0' };
ImageHeader::ImageHeader(uint32_t image_reservation_size,
uint32_t component_count,
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 0deb917..f263b93 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -43,8 +43,8 @@
const char* GetIndirectRefKindString(const IndirectRefKind& kind) {
switch (kind) {
- case kHandleScopeOrInvalid:
- return "HandleScopeOrInvalid";
+ case kJniTransitionOrInvalid:
+ return "JniTransitionOrInvalid";
case kLocal:
return "Local";
case kGlobal:
@@ -76,7 +76,7 @@
current_num_holes_(0),
resizable_(resizable) {
CHECK(error_msg != nullptr);
- CHECK_NE(desired_kind, kHandleScopeOrInvalid);
+ CHECK_NE(desired_kind, kJniTransitionOrInvalid);
// Overflow and maximum check.
CHECK_LE(max_count, kMaxTableSizeInBytes / sizeof(IrtEntry));
@@ -361,13 +361,16 @@
DCHECK(table_ != nullptr);
- if (GetIndirectRefKind(iref) == kHandleScopeOrInvalid) {
+ // TODO: We should eagerly check the ref kind against the `kind_` instead of
+ // relying on this weak check and postponing the rest until `CheckEntry()` below.
+ // Passing the wrong kind shall currently result in misleading warnings.
+ if (GetIndirectRefKind(iref) == kJniTransitionOrInvalid) {
auto* self = Thread::Current();
- if (self->HandleScopeContains(reinterpret_cast<jobject>(iref))) {
+ ScopedObjectAccess soa(self);
+ if (self->IsJniTransitionReference(reinterpret_cast<jobject>(iref))) {
auto* env = self->GetJniEnv();
DCHECK(env != nullptr);
if (env->IsCheckJniEnabled()) {
- ScopedObjectAccess soa(self);
LOG(WARNING) << "Attempt to remove non-JNI local reference, dumping thread";
if (kDumpStackOnNonLocalReference) {
self->Dump(LOG_STREAM(WARNING));
@@ -376,6 +379,7 @@
return true;
}
}
+
const uint32_t idx = ExtractIndex(iref);
if (idx < bottom_index) {
// Wrong segment.
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index f877ce8..884e8d1 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -91,11 +91,11 @@
//
// For convenience these match up with enum jobjectRefType from jni.h.
enum IndirectRefKind {
- kHandleScopeOrInvalid = 0, // <<stack indirect reference table or invalid reference>>
- kLocal = 1, // <<local reference>>
- kGlobal = 2, // <<global reference>>
- kWeakGlobal = 3, // <<weak global reference>>
- kLastKind = kWeakGlobal
+ kJniTransitionOrInvalid = 0, // <<JNI transition frame reference or invalid reference>>
+ kLocal = 1, // <<local reference>>
+ kGlobal = 2, // <<global reference>>
+ kWeakGlobal = 3, // <<weak global reference>>
+ kLastKind = kWeakGlobal
};
std::ostream& operator<<(std::ostream& os, IndirectRefKind rhs);
const char* GetIndirectRefKindString(const IndirectRefKind& kind);
diff --git a/runtime/java_frame_root_info.h b/runtime/java_frame_root_info.h
index c21eee1..c2b4493 100644
--- a/runtime/java_frame_root_info.h
+++ b/runtime/java_frame_root_info.h
@@ -41,6 +41,8 @@
static constexpr size_t kMethodDeclaringClass = -3;
// The root is from the argument to a Proxy invoke.
static constexpr size_t kProxyReferenceArgument = -4;
+ // The root is from the argument to a native invoke.
+ static constexpr size_t kNativeReferenceArgument = -5;
// The maximum precise vreg number
static constexpr size_t kMaxVReg = std::numeric_limits<uint16_t>::max();
diff --git a/runtime/jni/check_jni.cc b/runtime/jni/check_jni.cc
index 42e46e9..4c7b1aa 100644
--- a/runtime/jni/check_jni.cc
+++ b/runtime/jni/check_jni.cc
@@ -56,7 +56,7 @@
// declared as a friend by JniVmExt and JniEnvExt.
inline IndirectReferenceTable* GetIndirectReferenceTable(ScopedObjectAccess& soa,
IndirectRefKind kind) {
- DCHECK_NE(kind, kHandleScopeOrInvalid);
+ DCHECK_NE(kind, kJniTransitionOrInvalid);
JNIEnvExt* env = soa.Env();
IndirectReferenceTable* irt =
(kind == kLocal) ? &env->locals_
@@ -718,11 +718,14 @@
return true;
}
- bool CheckReferenceKind(IndirectRefKind expected_kind, Thread* self, jobject obj) {
+ bool CheckReferenceKind(IndirectRefKind expected_kind, Thread* self, jobject obj)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
IndirectRefKind found_kind;
if (expected_kind == kLocal) {
found_kind = IndirectReferenceTable::GetIndirectRefKind(obj);
- if (found_kind == kHandleScopeOrInvalid && self->HandleScopeContains(obj)) {
+ if (found_kind == kJniTransitionOrInvalid &&
+ obj != nullptr &&
+ self->IsJniTransitionReference(obj)) {
found_kind = kLocal;
}
} else {
@@ -863,8 +866,8 @@
bool expect_null = false;
bool okay = true;
std::string error_msg;
- if (ref_kind == kHandleScopeOrInvalid) {
- if (!soa.Self()->HandleScopeContains(java_object)) {
+ if (ref_kind == kJniTransitionOrInvalid) {
+ if (!soa.Self()->IsJniTransitionReference(java_object)) {
okay = false;
error_msg = "use of invalid jobject";
} else {
diff --git a/runtime/jni/jni_internal.cc b/runtime/jni/jni_internal.cc
index 9aad198..08d2061 100644
--- a/runtime/jni/jni_internal.cc
+++ b/runtime/jni/jni_internal.cc
@@ -2589,8 +2589,8 @@
return JNIGlobalRefType;
case kWeakGlobal:
return JNIWeakGlobalRefType;
- case kHandleScopeOrInvalid:
- // Assume value is in a handle scope.
+ case kJniTransitionOrInvalid:
+ // Assume value is in a JNI transition frame.
return JNILocalRefType;
}
LOG(FATAL) << "IndirectRefKind[" << kind << "]";
diff --git a/runtime/managed_stack.cc b/runtime/managed_stack.cc
deleted file mode 100644
index be609c3..0000000
--- a/runtime/managed_stack.cc
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "managed_stack-inl.h"
-
-#include "android-base/stringprintf.h"
-
-#include "art_method.h"
-#include "mirror/object.h"
-#include "stack_reference.h"
-
-namespace art {
-
-size_t ManagedStack::NumJniShadowFrameReferences() const {
- size_t count = 0;
- for (const ManagedStack* current_fragment = this; current_fragment != nullptr;
- current_fragment = current_fragment->GetLink()) {
- for (ShadowFrame* current_frame = current_fragment->top_shadow_frame_;
- current_frame != nullptr;
- current_frame = current_frame->GetLink()) {
- if (current_frame->GetMethod()->IsNative()) {
- // The JNI ShadowFrame only contains references. (For indirect reference.)
- count += current_frame->NumberOfVRegs();
- }
- }
- }
- return count;
-}
-
-bool ManagedStack::ShadowFramesContain(StackReference<mirror::Object>* shadow_frame_entry) const {
- for (const ManagedStack* current_fragment = this; current_fragment != nullptr;
- current_fragment = current_fragment->GetLink()) {
- for (ShadowFrame* current_frame = current_fragment->top_shadow_frame_;
- current_frame != nullptr;
- current_frame = current_frame->GetLink()) {
- if (current_frame->Contains(shadow_frame_entry)) {
- return true;
- }
- }
- }
- return false;
-}
-
-} // namespace art
diff --git a/runtime/managed_stack.h b/runtime/managed_stack.h
index 3fb83ac..04a27fe 100644
--- a/runtime/managed_stack.h
+++ b/runtime/managed_stack.h
@@ -119,10 +119,6 @@
return OFFSETOF_MEMBER(ManagedStack, top_shadow_frame_);
}
- size_t NumJniShadowFrameReferences() const REQUIRES_SHARED(Locks::mutator_lock_);
-
- bool ShadowFramesContain(StackReference<mirror::Object>* shadow_frame_entry) const;
-
private:
// Encodes the top quick frame (which must be at least 4-byte aligned)
// and a flag that marks the GenericJNI trampoline.
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 870936c..7d97b73 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -31,6 +31,7 @@
#include "dex/dex_file-inl.h"
#include "dex/dex_file_types.h"
#include "dex/dex_instruction-inl.h"
+#include "entrypoints/entrypoint_utils-inl.h"
#include "lock_word-inl.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
@@ -1464,9 +1465,9 @@
// TODO: use the JNI implementation's table of explicit MonitorEnter calls and dump those too.
if (m->IsNative()) {
if (m->IsSynchronized()) {
- ObjPtr<mirror::Object> jni_this =
- stack_visitor->GetCurrentHandleScope(sizeof(void*))->GetReference(0);
- callback(jni_this, callback_context);
+ Thread* thread = stack_visitor->GetThread();
+ jobject lock = GetGenericJniSynchronizationObject(thread, m);
+ callback(thread->DecodeJObject(lock), callback_context);
}
return;
}
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index 771e012..f642bcb 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -1067,8 +1067,8 @@
IndirectRefKind kind = IndirectReferenceTable::GetIndirectRefKind(ref);
if (kind == kLocal) {
self->GetJniEnv()->UpdateLocal(obj, result);
- } else if (kind == kHandleScopeOrInvalid) {
- LOG(FATAL) << "Unsupported UpdateReference for kind kHandleScopeOrInvalid";
+ } else if (kind == kJniTransitionOrInvalid) {
+ LOG(FATAL) << "Unsupported UpdateReference for kind kJniTransitionOrInvalid";
} else if (kind == kGlobal) {
self->GetJniEnv()->GetVm()->UpdateGlobal(self, ref, result);
} else {
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 094c25b..233106e 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -150,19 +150,11 @@
return nullptr;
} else if (m->IsNative()) {
if (cur_quick_frame_ != nullptr) {
- HandleScope* hs;
- if (cur_oat_quick_method_header_ != nullptr) {
- hs = reinterpret_cast<HandleScope*>(
- reinterpret_cast<char*>(cur_quick_frame_) + sizeof(ArtMethod*));
- } else {
- // GenericJNI frames have the HandleScope under the managed frame.
- uint32_t shorty_len;
- const char* shorty = m->GetShorty(&shorty_len);
- const size_t num_handle_scope_references =
- /* this */ 1u + std::count(shorty + 1, shorty + shorty_len, 'L');
- hs = GetGenericJniHandleScope(cur_quick_frame_, num_handle_scope_references);
- }
- return hs->GetReference(0);
+ // The `this` reference is stored in the first out vreg in the caller's frame.
+ const size_t frame_size = GetCurrentQuickFrameInfo().FrameSizeInBytes();
+ auto* stack_ref = reinterpret_cast<StackReference<mirror::Object>*>(
+ reinterpret_cast<uint8_t*>(cur_quick_frame_) + frame_size + sizeof(ArtMethod*));
+ return stack_ref->AsMirrorPtr();
} else {
return cur_shadow_frame_->GetVRegReference(0);
}
diff --git a/runtime/stack.h b/runtime/stack.h
index c746536..2a6fdc2 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -282,12 +282,6 @@
return cur_shadow_frame_;
}
- HandleScope* GetCurrentHandleScope(size_t pointer_size) const {
- ArtMethod** sp = GetCurrentQuickFrame();
- // Skip ArtMethod*; handle scope comes next;
- return reinterpret_cast<HandleScope*>(reinterpret_cast<uintptr_t>(sp) + pointer_size);
- }
-
std::string DescribeLocation() const REQUIRES_SHARED(Locks::mutator_lock_);
static size_t ComputeNumFrames(Thread* thread, StackWalkKind walk_kind)
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 6639501..46aa2b5 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1338,7 +1338,7 @@
tlsPtr_.stack_begin += read_guard_size + kStackOverflowProtectedSize;
tlsPtr_.stack_end += read_guard_size + kStackOverflowProtectedSize;
- tlsPtr_.stack_size -= read_guard_size;
+ tlsPtr_.stack_size -= read_guard_size + kStackOverflowProtectedSize;
InstallImplicitProtection();
}
@@ -2541,16 +2541,82 @@
}
}
-bool Thread::HandleScopeContains(jobject obj) const {
- StackReference<mirror::Object>* hs_entry =
- reinterpret_cast<StackReference<mirror::Object>*>(obj);
- for (BaseHandleScope* cur = tlsPtr_.top_handle_scope; cur!= nullptr; cur = cur->GetLink()) {
- if (cur->Contains(hs_entry)) {
+template <bool kPointsToStack>
+class JniTransitionReferenceVisitor : public StackVisitor {
+ public:
+ JniTransitionReferenceVisitor(Thread* thread, void* obj) REQUIRES_SHARED(Locks::mutator_lock_)
+ : StackVisitor(thread, /*context=*/ nullptr, StackVisitor::StackWalkKind::kSkipInlinedFrames),
+ obj_(obj),
+ found_(false) {}
+
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtMethod* m = GetMethod();
+ if (!m->IsNative() || m->IsCriticalNative()) {
+ return true;
+ }
+ if (kPointsToStack) {
+ uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
+ size_t frame_size = GetCurrentQuickFrameInfo().FrameSizeInBytes();
+ uint32_t* current_vreg = reinterpret_cast<uint32_t*>(sp + frame_size + sizeof(ArtMethod*));
+ if (!m->IsStatic()) {
+ if (current_vreg == obj_) {
+ found_ = true;
+ return false;
+ }
+ current_vreg += 1u;
+ }
+ const char* shorty = m->GetShorty();
+ for (size_t i = 1, len = strlen(shorty); i != len; ++i) {
+ switch (shorty[i]) {
+ case 'D':
+ case 'J':
+ current_vreg += 2u;
+ break;
+ case 'L':
+ if (current_vreg == obj_) {
+ found_ = true;
+ return false;
+ }
+ FALLTHROUGH_INTENDED;
+ default:
+ current_vreg += 1u;
+ break;
+ }
+ }
+ // Continue only if the object is somewhere higher on the stack.
+ return obj_ >= current_vreg;
+ } else { // if (kPointsToStack)
+ if (m->IsStatic() && obj_ == m->GetDeclaringClassAddressWithoutBarrier()) {
+ found_ = true;
+ return false;
+ }
return true;
}
}
- // JNI code invoked from portable code uses shadow frames rather than the handle scope.
- return tlsPtr_.managed_stack.ShadowFramesContain(hs_entry);
+
+ bool Found() const {
+ return found_;
+ }
+
+ private:
+ void* obj_;
+ bool found_;
+};
+
+bool Thread::IsJniTransitionReference(jobject obj) const {
+ DCHECK(obj != nullptr);
+ // We need a non-const pointer for stack walk even if we're not modifying the thread state.
+ Thread* thread = const_cast<Thread*>(this);
+ uint8_t* raw_obj = reinterpret_cast<uint8_t*>(obj);
+ if (static_cast<size_t>(raw_obj - tlsPtr_.stack_begin) < tlsPtr_.stack_size) {
+ JniTransitionReferenceVisitor</*kPointsToStack=*/ true> visitor(thread, raw_obj);
+ visitor.WalkStack();
+ return visitor.Found();
+ } else {
+ JniTransitionReferenceVisitor</*kPointsToStack=*/ false> visitor(thread, raw_obj);
+ visitor.WalkStack();
+ return visitor.Found();
+ }
}
void Thread::HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id) {
@@ -2574,10 +2640,12 @@
IndirectReferenceTable& locals = tlsPtr_.jni_env->locals_;
// Local references do not need a read barrier.
result = locals.Get<kWithoutReadBarrier>(ref);
- } else if (kind == kHandleScopeOrInvalid) {
- // Read from handle scope.
- DCHECK(HandleScopeContains(obj));
- result = reinterpret_cast<StackReference<mirror::Object>*>(obj)->AsMirrorPtr();
+ } else if (kind == kJniTransitionOrInvalid) {
+ // The `jclass` for a static method points to the CompressedReference<> in the
+ // `ArtMethod::declaring_class_`. Other `jobject` arguments point to spilled stack
+ // references but a StackReference<> is just a subclass of CompressedReference<>.
+ DCHECK(IsJniTransitionReference(obj));
+ result = reinterpret_cast<mirror::CompressedReference<mirror::Object>*>(obj)->AsMirrorPtr();
VerifyObject(result);
} else if (kind == kGlobal) {
result = tlsPtr_.jni_env->vm_->DecodeGlobal(ref);
@@ -3808,8 +3876,63 @@
ArtMethod* m = *cur_quick_frame;
VisitDeclaringClass(m);
- // Process register map (which native and runtime methods don't have)
- if (!m->IsNative() && !m->IsRuntimeMethod() && (!m->IsProxyMethod() || m->IsConstructor())) {
+ if (m->IsNative()) {
+ // TODO: Spill the `this` reference in the AOT-compiled String.charAt()
+ // slow-path for throwing SIOOBE, so that we can remove this carve-out.
+ if (UNLIKELY(m->IsIntrinsic()) &&
+ m->GetIntrinsic() == enum_cast<uint32_t>(Intrinsics::kStringCharAt)) {
+ // The String.charAt() method is AOT-compiled with an intrinsic implementation
+ // instead of a JNI stub. It has a slow path that constructs a runtime frame
+ // for throwing SIOOBE and in that path we do not get the `this` pointer
+ // spilled on the stack, so there is nothing to visit. We can distinguish
+ // this from the GenericJni path by checking that the PC is in the boot image
+ // (PC shall be known thanks to the runtime frame for throwing SIOOBE).
+ // Note that JIT does not emit that intrinic implementation.
+ const void* pc = reinterpret_cast<const void*>(GetCurrentQuickFramePc());
+ if (pc != 0u && Runtime::Current()->GetHeap()->IsInBootImageOatFile(pc)) {
+ return;
+ }
+ }
+ // Native methods spill their arguments to the reserved vregs in the caller's frame
+ // and use pointers to these stack references as jobject, jclass, jarray, etc.
+ // Note: We can come here for a @CriticalNative method when it needs to resolve the
+ // target native function but there would be no references to visit below.
+ const size_t frame_size = GetCurrentQuickFrameInfo().FrameSizeInBytes();
+ const size_t method_pointer_size = static_cast<size_t>(kRuntimePointerSize);
+ uint32_t* current_vreg = reinterpret_cast<uint32_t*>(
+ reinterpret_cast<uint8_t*>(cur_quick_frame) + frame_size + method_pointer_size);
+ auto visit = [&]() REQUIRES_SHARED(Locks::mutator_lock_) {
+ auto* ref_addr = reinterpret_cast<StackReference<mirror::Object>*>(current_vreg);
+ mirror::Object* ref = ref_addr->AsMirrorPtr();
+ if (ref != nullptr) {
+ mirror::Object* new_ref = ref;
+ visitor_(&new_ref, /* vreg= */ JavaFrameRootInfo::kNativeReferenceArgument, this);
+ if (ref != new_ref) {
+ ref_addr->Assign(new_ref);
+ }
+ }
+ };
+ const char* shorty = m->GetShorty();
+ if (!m->IsStatic()) {
+ visit();
+ current_vreg += 1u;
+ }
+ for (shorty += 1u; *shorty != 0; ++shorty) {
+ switch (*shorty) {
+ case 'D':
+ case 'J':
+ current_vreg += 2u;
+ break;
+ case 'L':
+ visit();
+ FALLTHROUGH_INTENDED;
+ default:
+ current_vreg += 1u;
+ break;
+ }
+ }
+ } else if (!m->IsRuntimeMethod() && (!m->IsProxyMethod() || m->IsConstructor())) {
+ // Process register map (which native, runtime and proxy methods don't have)
const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
DCHECK(method_header->IsOptimized());
StackReference<mirror::Object>* vreg_base =
diff --git a/runtime/thread.h b/runtime/thread.h
index 7475681..b63e39f 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -902,8 +902,8 @@
ManagedStack::TopShadowFrameOffset());
}
- // Is the given obj in this thread's stack indirect reference table?
- bool HandleScopeContains(jobject obj) const;
+ // Is the given obj in one of this thread's JNI transition frames?
+ bool IsJniTransitionReference(jobject obj) const REQUIRES_SHARED(Locks::mutator_lock_);
void HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -1905,6 +1905,7 @@
friend class ThreadList; // For ~Thread and Destroy.
friend class EntrypointsOrderTest; // To test the order of tls entries.
+ friend class JniCompilerTest; // For intercepting JNI entrypoint calls.
DISALLOW_COPY_AND_ASSIGN(Thread);
};