x86: unify smp_call_function_mask
definition is moved to common header, x86_64 function name
now is native_smp_call_function_mask
Signed-off-by: Glauber Costa <gcosta@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/arch/x86/kernel/smp_64.c b/arch/x86/kernel/smp_64.c
index fd18161..225b765 100644
--- a/arch/x86/kernel/smp_64.c
+++ b/arch/x86/kernel/smp_64.c
@@ -386,9 +386,9 @@
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
-int smp_call_function_mask(cpumask_t mask,
- void (*func)(void *), void *info,
- int wait)
+int native_smp_call_function_mask(cpumask_t mask,
+ void (*func)(void *), void *info,
+ int wait)
{
int ret;
@@ -531,5 +531,6 @@
struct smp_ops smp_ops = {
.smp_send_reschedule = native_smp_send_reschedule,
+ .smp_call_function_mask = native_smp_call_function_mask,
};
EXPORT_SYMBOL_GPL(smp_ops);
diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h
index 28f33c0..d9782f4 100644
--- a/include/asm-x86/smp.h
+++ b/include/asm-x86/smp.h
@@ -28,6 +28,13 @@
{
smp_ops.smp_send_reschedule(cpu);
}
+
+static inline int smp_call_function_mask(cpumask_t mask,
+ void (*func) (void *info), void *info,
+ int wait)
+{
+ return smp_ops.smp_call_function_mask(mask, func, info, wait);
+}
#endif
#ifdef CONFIG_X86_32
diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h
index c60a3dd..d9337ee 100644
--- a/include/asm-x86/smp_32.h
+++ b/include/asm-x86/smp_32.h
@@ -60,12 +60,6 @@
{
smp_ops.smp_send_stop();
}
-static inline int smp_call_function_mask(cpumask_t mask,
- void (*func) (void *info), void *info,
- int wait)
-{
- return smp_ops.smp_call_function_mask(mask, func, info, wait);
-}
void native_smp_prepare_boot_cpu(void);
void native_smp_prepare_cpus(unsigned int max_cpus);