Remove stack protector from some interpreter functions
This CL removes stack protection from several functions in the
interpreter to allow us to re-set the stack protector cookie when new
processes fork from any of the zygotes.
Test: boot
Bug: 168258494
Change-Id: Id65d0d96b028599f27f74c25e279a03b85e52651
diff --git a/libartbase/base/macros.h b/libartbase/base/macros.h
index eec73cb..13e87d7 100644
--- a/libartbase/base/macros.h
+++ b/libartbase/base/macros.h
@@ -75,6 +75,8 @@
#define FLATTEN __attribute__ ((flatten))
#endif
+#define NO_STACK_PROTECTOR __attribute__ ((no_stack_protector))
+
// clang doesn't like attributes on lambda functions. It would be nice to say:
// #define ALWAYS_INLINE_LAMBDA ALWAYS_INLINE
#define ALWAYS_INLINE_LAMBDA
diff --git a/runtime/arch/arm/quick_entrypoints_cc_arm.cc b/runtime/arch/arm/quick_entrypoints_cc_arm.cc
index 987b459..d7fef6f 100644
--- a/runtime/arch/arm/quick_entrypoints_cc_arm.cc
+++ b/runtime/arch/arm/quick_entrypoints_cc_arm.cc
@@ -25,6 +25,7 @@
uint32_t*);
template <bool kIsStatic>
+NO_STACK_PROTECTOR
static void quick_invoke_reg_setup(ArtMethod* method, uint32_t* args, uint32_t args_size,
Thread* self, JValue* result, const char* shorty) {
// Note: We do not follow aapcs ABI in quick code for both softfp and hardfp.
@@ -96,6 +97,7 @@
// Called by art::ArtMethod::Invoke to do entry into a non-static method.
// TODO: migrate into an assembly implementation as with ARM64.
+NO_STACK_PROTECTOR
extern "C" void art_quick_invoke_stub(ArtMethod* method, uint32_t* args, uint32_t args_size,
Thread* self, JValue* result, const char* shorty) {
quick_invoke_reg_setup<false>(method, args, args_size, self, result, shorty);
@@ -103,6 +105,7 @@
// Called by art::ArtMethod::Invoke to do entry into a static method.
// TODO: migrate into an assembly implementation as with ARM64.
+NO_STACK_PROTECTOR
extern "C" void art_quick_invoke_static_stub(ArtMethod* method, uint32_t* args,
uint32_t args_size, Thread* self, JValue* result,
const char* shorty) {
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 40b7a7b..2b7c238 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -310,6 +310,7 @@
return found_dex_pc;
}
+NO_STACK_PROTECTOR
void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue* result,
const char* shorty) {
if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEnd())) {
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index ea6501c..1b7c2cf 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -647,6 +647,7 @@
method_type);
}
+NO_STACK_PROTECTOR
extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self, ArtMethod** sp)
REQUIRES_SHARED(Locks::mutator_lock_) {
// Ensure we don't get thread suspension until the object arguments are safely in the shadow
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index ffe1dc0..8ceade8 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -255,6 +255,7 @@
}
}
+NO_STACK_PROTECTOR
static inline JValue Execute(
Thread* self,
const CodeItemDataAccessor& accessor,
@@ -570,6 +571,7 @@
ret_val->SetJ(value.GetJ());
}
+NO_STACK_PROTECTOR
JValue EnterInterpreterFromEntryPoint(Thread* self, const CodeItemDataAccessor& accessor,
ShadowFrame* shadow_frame) {
DCHECK_EQ(self, Thread::Current());
@@ -586,6 +588,7 @@
return Execute(self, accessor, *shadow_frame, JValue());
}
+NO_STACK_PROTECTOR
void ArtInterpreterToInterpreterBridge(Thread* self,
const CodeItemDataAccessor& accessor,
ShadowFrame* shadow_frame,
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index afba57e..4ee4cb5 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -254,6 +254,7 @@
// END DECLARATIONS.
+NO_STACK_PROTECTOR
void ArtInterpreterToCompiledCodeBridge(Thread* self,
ArtMethod* caller,
ShadowFrame* shadow_frame,
@@ -1410,6 +1411,7 @@
}
template<bool is_range, bool do_assignability_check>
+NO_STACK_PROTECTOR
bool DoCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
const Instruction* inst, uint16_t inst_data, JValue* result) {
// Argument word count.
diff --git a/runtime/interpreter/interpreter_switch_impl-inl.h b/runtime/interpreter/interpreter_switch_impl-inl.h
index d95c507..5e744dc 100644
--- a/runtime/interpreter/interpreter_switch_impl-inl.h
+++ b/runtime/interpreter/interpreter_switch_impl-inl.h
@@ -1834,6 +1834,7 @@
#undef OPCODE_CASE
template<bool do_access_check, bool transaction_active>
+NO_STACK_PROTECTOR
void ExecuteSwitchImplCpp(SwitchImplContext* ctx) {
Thread* self = ctx->self;
const CodeItemDataAccessor& accessor = ctx->accessor;
diff --git a/runtime/native/java_lang_reflect_Method.cc b/runtime/native/java_lang_reflect_Method.cc
index 2c0dd80..706f1a6 100644
--- a/runtime/native/java_lang_reflect_Method.cc
+++ b/runtime/native/java_lang_reflect_Method.cc
@@ -80,6 +80,7 @@
}
}
+NO_STACK_PROTECTOR
static jobject Method_invoke(JNIEnv* env, jobject javaMethod, jobject javaReceiver,
jobjectArray javaArgs) {
ScopedFastNativeObjectAccess soa(env);
diff --git a/runtime/reflection.h b/runtime/reflection.h
index b0e27da..13dc8e1 100644
--- a/runtime/reflection.h
+++ b/runtime/reflection.h
@@ -99,6 +99,7 @@
// num_frames is number of frames we look up for access check.
template<PointerSize pointer_size>
+NO_STACK_PROTECTOR
jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa,
jobject method,
jobject receiver,