summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Christian Wailes <chriswailes@google.com> 2021-05-26 17:33:54 +0000
committer Treehugger Robot <treehugger-gerrit@google.com> 2021-05-27 19:28:16 +0000
commite8efdaa09b7ff36d5a986f0320f29ac4a7563896 (patch)
tree5066cecab43006a7bf23f793aa911f74559c8017
parentae553e0e1b26d95006aa27df4675104bfb55d49d (diff)
Revert "Remove stack protector from some interpreter functions"
A performance regression was noticed in go/lem banchmarks for ART. This is likely caused by the no_stack_protector attribute preventing inlining into hot functions. Reason for revert: performance regression Reverted Changes: Ie0c81f97f:Remove stack protector from some interpreter funct... Iaf565a374:Reset stack guard after fork from Zygote Bug: 168258494 Change-Id: I018d58759c3a5bb624ea8e7fe09b6b49c9b19b6a
-rw-r--r--libartbase/base/macros.h4
-rw-r--r--runtime/arch/arm/quick_entrypoints_cc_arm.cc3
-rw-r--r--runtime/art_method.cc1
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc1
-rw-r--r--runtime/interpreter/interpreter.cc2
-rw-r--r--runtime/interpreter/interpreter_common.cc2
-rw-r--r--runtime/interpreter/mterp/mterp.cc2
-rw-r--r--runtime/native/java_lang_reflect_Method.cc1
-rw-r--r--runtime/obj_ptr.h2
-rw-r--r--runtime/reflection.h1
10 files changed, 2 insertions, 17 deletions
diff --git a/libartbase/base/macros.h b/libartbase/base/macros.h
index c9ad68d721..eec73cb699 100644
--- a/libartbase/base/macros.h
+++ b/libartbase/base/macros.h
@@ -71,12 +71,10 @@ template<typename T> ART_FRIEND_TEST(test_set_name, individual_test)
#define ALWAYS_INLINE
#define FLATTEN
#else
-#define ALWAYS_INLINE __attribute__ ((always_inline, no_stack_protector))
+#define ALWAYS_INLINE __attribute__ ((always_inline))
#define FLATTEN __attribute__ ((flatten))
#endif
-#define NO_STACK_PROTECTOR __attribute__ ((no_stack_protector))
-
// clang doesn't like attributes on lambda functions. It would be nice to say:
// #define ALWAYS_INLINE_LAMBDA ALWAYS_INLINE
#define ALWAYS_INLINE_LAMBDA
diff --git a/runtime/arch/arm/quick_entrypoints_cc_arm.cc b/runtime/arch/arm/quick_entrypoints_cc_arm.cc
index d7fef6f72e..987b4590b7 100644
--- a/runtime/arch/arm/quick_entrypoints_cc_arm.cc
+++ b/runtime/arch/arm/quick_entrypoints_cc_arm.cc
@@ -25,7 +25,6 @@ extern "C" void art_quick_invoke_stub_internal(ArtMethod*, uint32_t*, uint32_t,
uint32_t*);
template <bool kIsStatic>
-NO_STACK_PROTECTOR
static void quick_invoke_reg_setup(ArtMethod* method, uint32_t* args, uint32_t args_size,
Thread* self, JValue* result, const char* shorty) {
// Note: We do not follow aapcs ABI in quick code for both softfp and hardfp.
@@ -97,7 +96,6 @@ static void quick_invoke_reg_setup(ArtMethod* method, uint32_t* args, uint32_t a
// Called by art::ArtMethod::Invoke to do entry into a non-static method.
// TODO: migrate into an assembly implementation as with ARM64.
-NO_STACK_PROTECTOR
extern "C" void art_quick_invoke_stub(ArtMethod* method, uint32_t* args, uint32_t args_size,
Thread* self, JValue* result, const char* shorty) {
quick_invoke_reg_setup<false>(method, args, args_size, self, result, shorty);
@@ -105,7 +103,6 @@ extern "C" void art_quick_invoke_stub(ArtMethod* method, uint32_t* args, uint32_
// Called by art::ArtMethod::Invoke to do entry into a static method.
// TODO: migrate into an assembly implementation as with ARM64.
-NO_STACK_PROTECTOR
extern "C" void art_quick_invoke_static_stub(ArtMethod* method, uint32_t* args,
uint32_t args_size, Thread* self, JValue* result,
const char* shorty) {
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index eb99b5e53a..89dc93b895 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -310,7 +310,6 @@ uint32_t ArtMethod::FindCatchBlock(Handle<mirror::Class> exception_type,
return found_dex_pc;
}
-NO_STACK_PROTECTOR
void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue* result,
const char* shorty) {
if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEnd())) {
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index aed2b0b517..eaa9f4eebb 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -644,7 +644,6 @@ static void HandleDeoptimization(JValue* result,
DeoptimizationMethodType::kDefault);
}
-NO_STACK_PROTECTOR
extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self, ArtMethod** sp)
REQUIRES_SHARED(Locks::mutator_lock_) {
// Ensure we don't get thread suspension until the object arguments are safely in the shadow
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index c9d2660626..302551f8dd 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -268,7 +268,6 @@ static JValue ExecuteSwitch(Thread* self,
}
}
-NO_STACK_PROTECTOR
static inline JValue Execute(
Thread* self,
const CodeItemDataAccessor& accessor,
@@ -632,7 +631,6 @@ void EnterInterpreterFromDeoptimize(Thread* self,
ret_val->SetJ(value.GetJ());
}
-NO_STACK_PROTECTOR
JValue EnterInterpreterFromEntryPoint(Thread* self, const CodeItemDataAccessor& accessor,
ShadowFrame* shadow_frame) {
DCHECK_EQ(self, Thread::Current());
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 4ae7fb1c20..4e88eda162 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -250,7 +250,6 @@ ALWAYS_INLINE void CopyRegisters(ShadowFrame& caller_frame,
// END DECLARATIONS.
-NO_STACK_PROTECTOR
void ArtInterpreterToCompiledCodeBridge(Thread* self,
ArtMethod* caller,
ShadowFrame* shadow_frame,
@@ -1413,7 +1412,6 @@ static inline bool DoCallCommon(ArtMethod* called_method,
}
template<bool is_range, bool do_assignability_check>
-NO_STACK_PROTECTOR
bool DoCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
const Instruction* inst, uint16_t inst_data, JValue* result) {
// Argument word count.
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index 6ebe1e2fcc..ae5e4703fd 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -160,7 +160,6 @@ bool CanUseMterp()
}
#define MTERP_INVOKE(Name) \
-NO_STACK_PROTECTOR \
extern "C" size_t MterpInvoke##Name(Thread* self, \
ShadowFrame* shadow_frame, \
uint16_t* dex_pc_ptr, \
@@ -176,7 +175,6 @@ extern "C" size_t MterpInvoke##Name(Thread* self,
self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u; \
} \
} \
-NO_STACK_PROTECTOR \
extern "C" size_t MterpInvoke##Name##Range(Thread* self, \
ShadowFrame* shadow_frame, \
uint16_t* dex_pc_ptr, \
diff --git a/runtime/native/java_lang_reflect_Method.cc b/runtime/native/java_lang_reflect_Method.cc
index 706f1a61ba..2c0dd806e1 100644
--- a/runtime/native/java_lang_reflect_Method.cc
+++ b/runtime/native/java_lang_reflect_Method.cc
@@ -80,7 +80,6 @@ static jobjectArray Method_getExceptionTypes(JNIEnv* env, jobject javaMethod) {
}
}
-NO_STACK_PROTECTOR
static jobject Method_invoke(JNIEnv* env, jobject javaMethod, jobject javaReceiver,
jobjectArray javaArgs) {
ScopedFastNativeObjectAccess soa(env);
diff --git a/runtime/obj_ptr.h b/runtime/obj_ptr.h
index c4d2a5a135..a03b67bed7 100644
--- a/runtime/obj_ptr.h
+++ b/runtime/obj_ptr.h
@@ -25,7 +25,7 @@
#include "runtime_globals.h"
// Always inline ObjPtr methods even in debug builds.
-#define OBJPTR_INLINE __attribute__ ((always_inline, no_stack_protector))
+#define OBJPTR_INLINE __attribute__ ((always_inline))
namespace art {
diff --git a/runtime/reflection.h b/runtime/reflection.h
index 13dc8e1466..b0e27da321 100644
--- a/runtime/reflection.h
+++ b/runtime/reflection.h
@@ -99,7 +99,6 @@ JValue InvokeVirtualOrInterfaceWithVarArgs(const ScopedObjectAccessAlreadyRunnab
// num_frames is number of frames we look up for access check.
template<PointerSize pointer_size>
-NO_STACK_PROTECTOR
jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa,
jobject method,
jobject receiver,