Remove stack protector from some interpreter functions

This CL removes stack protection from several functions in the
interpreter to allow us to re-set the stack protector cookie when new
processes fork from any of the zygotes.

Test: boot
Test: atest KeyboardVisibilityControlTest
Bug: 168258494
Change-Id: Ie0c81f97fedfcdde7afc1b6b4befa65524724194
diff --git a/libartbase/base/macros.h b/libartbase/base/macros.h
index eec73cb..c9ad68d 100644
--- a/libartbase/base/macros.h
+++ b/libartbase/base/macros.h
@@ -71,10 +71,12 @@
 #define ALWAYS_INLINE
 #define FLATTEN
 #else
-#define ALWAYS_INLINE  __attribute__ ((always_inline))
+#define ALWAYS_INLINE  __attribute__ ((always_inline, no_stack_protector))
 #define FLATTEN  __attribute__ ((flatten))
 #endif
 
+#define NO_STACK_PROTECTOR __attribute__ ((no_stack_protector))
+
 // clang doesn't like attributes on lambda functions. It would be nice to say:
 //   #define ALWAYS_INLINE_LAMBDA ALWAYS_INLINE
 #define ALWAYS_INLINE_LAMBDA
diff --git a/runtime/arch/arm/quick_entrypoints_cc_arm.cc b/runtime/arch/arm/quick_entrypoints_cc_arm.cc
index 987b459..d7fef6f 100644
--- a/runtime/arch/arm/quick_entrypoints_cc_arm.cc
+++ b/runtime/arch/arm/quick_entrypoints_cc_arm.cc
@@ -25,6 +25,7 @@
                                                uint32_t*);
 
 template <bool kIsStatic>
+NO_STACK_PROTECTOR
 static void quick_invoke_reg_setup(ArtMethod* method, uint32_t* args, uint32_t args_size,
                                    Thread* self, JValue* result, const char* shorty) {
   // Note: We do not follow aapcs ABI in quick code for both softfp and hardfp.
@@ -96,6 +97,7 @@
 
 // Called by art::ArtMethod::Invoke to do entry into a non-static method.
 // TODO: migrate into an assembly implementation as with ARM64.
+NO_STACK_PROTECTOR
 extern "C" void art_quick_invoke_stub(ArtMethod* method, uint32_t* args, uint32_t args_size,
                                       Thread* self, JValue* result, const char* shorty) {
   quick_invoke_reg_setup<false>(method, args, args_size, self, result, shorty);
@@ -103,6 +105,7 @@
 
 // Called by art::ArtMethod::Invoke to do entry into a static method.
 // TODO: migrate into an assembly implementation as with ARM64.
+NO_STACK_PROTECTOR
 extern "C" void art_quick_invoke_static_stub(ArtMethod* method, uint32_t* args,
                                              uint32_t args_size, Thread* self, JValue* result,
                                              const char* shorty) {
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 89dc93b..eb99b5e 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -310,6 +310,7 @@
   return found_dex_pc;
 }
 
+NO_STACK_PROTECTOR
 void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue* result,
                        const char* shorty) {
   if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEnd())) {
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index eaa9f4e..aed2b0b 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -644,6 +644,7 @@
                                               DeoptimizationMethodType::kDefault);
 }
 
+NO_STACK_PROTECTOR
 extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self, ArtMethod** sp)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   // Ensure we don't get thread suspension until the object arguments are safely in the shadow
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 302551f..c9d2660 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -268,6 +268,7 @@
   }
 }
 
+NO_STACK_PROTECTOR
 static inline JValue Execute(
     Thread* self,
     const CodeItemDataAccessor& accessor,
@@ -631,6 +632,7 @@
   ret_val->SetJ(value.GetJ());
 }
 
+NO_STACK_PROTECTOR
 JValue EnterInterpreterFromEntryPoint(Thread* self, const CodeItemDataAccessor& accessor,
                                       ShadowFrame* shadow_frame) {
   DCHECK_EQ(self, Thread::Current());
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 4e88eda..4ae7fb1 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -250,6 +250,7 @@
 
 // END DECLARATIONS.
 
+NO_STACK_PROTECTOR
 void ArtInterpreterToCompiledCodeBridge(Thread* self,
                                         ArtMethod* caller,
                                         ShadowFrame* shadow_frame,
@@ -1412,6 +1413,7 @@
 }
 
 template<bool is_range, bool do_assignability_check>
+NO_STACK_PROTECTOR
 bool DoCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
             const Instruction* inst, uint16_t inst_data, JValue* result) {
   // Argument word count.
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index ae5e470..6ebe1e2 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -160,6 +160,7 @@
 }
 
 #define MTERP_INVOKE(Name)                                                                         \
+NO_STACK_PROTECTOR                                                                                 \
 extern "C" size_t MterpInvoke##Name(Thread* self,                                                  \
                                     ShadowFrame* shadow_frame,                                     \
                                     uint16_t* dex_pc_ptr,                                          \
@@ -175,6 +176,7 @@
         self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;                          \
   }                                                                                                \
 }                                                                                                  \
+NO_STACK_PROTECTOR                                                                                 \
 extern "C" size_t MterpInvoke##Name##Range(Thread* self,                                           \
                                            ShadowFrame* shadow_frame,                              \
                                            uint16_t* dex_pc_ptr,                                   \
diff --git a/runtime/native/java_lang_reflect_Method.cc b/runtime/native/java_lang_reflect_Method.cc
index 2c0dd80..706f1a6 100644
--- a/runtime/native/java_lang_reflect_Method.cc
+++ b/runtime/native/java_lang_reflect_Method.cc
@@ -80,6 +80,7 @@
   }
 }
 
+NO_STACK_PROTECTOR
 static jobject Method_invoke(JNIEnv* env, jobject javaMethod, jobject javaReceiver,
                              jobjectArray javaArgs) {
   ScopedFastNativeObjectAccess soa(env);
diff --git a/runtime/obj_ptr.h b/runtime/obj_ptr.h
index a03b67b..c4d2a5a 100644
--- a/runtime/obj_ptr.h
+++ b/runtime/obj_ptr.h
@@ -25,7 +25,7 @@
 #include "runtime_globals.h"
 
 // Always inline ObjPtr methods even in debug builds.
-#define OBJPTR_INLINE __attribute__ ((always_inline))
+#define OBJPTR_INLINE __attribute__ ((always_inline, no_stack_protector))
 
 namespace art {
 
diff --git a/runtime/reflection.h b/runtime/reflection.h
index b0e27da..13dc8e1 100644
--- a/runtime/reflection.h
+++ b/runtime/reflection.h
@@ -99,6 +99,7 @@
 
 // num_frames is number of frames we look up for access check.
 template<PointerSize pointer_size>
+NO_STACK_PROTECTOR
 jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa,
                      jobject method,
                      jobject receiver,