| /* |
| * This program is free software; you can distribute it and/or modify it |
| * under the terms of the GNU General Public License (Version 2) as |
| * published by the Free Software Foundation. |
| * |
| * This program is distributed in the hope it will be useful, but WITHOUT |
| * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| * for more details. |
| * |
| * You should have received a copy of the GNU General Public License along |
| * with this program; if not, write to the Free Software Foundation, Inc., |
| * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. |
| * |
| * Copyright (C) 2007 MIPS Technologies, Inc. |
| * Chris Dearman (chris@mips.com) |
| */ |
| |
| #undef DEBUG |
| |
| #include <linux/kernel.h> |
| #include <linux/sched.h> |
| #include <linux/smp.h> |
| #include <linux/cpumask.h> |
| #include <linux/interrupt.h> |
| #include <linux/compiler.h> |
| |
| #include <asm/atomic.h> |
| #include <asm/cacheflush.h> |
| #include <asm/cpu.h> |
| #include <asm/processor.h> |
| #include <asm/system.h> |
| #include <asm/hardirq.h> |
| #include <asm/mmu_context.h> |
| #include <asm/smp.h> |
| #include <asm/time.h> |
| #include <asm/mipsregs.h> |
| #include <asm/mipsmtregs.h> |
| #include <asm/mips_mt.h> |
| |
| /* |
| * Crude manipulation of the CPU masks to control which |
| * which CPU's are brought online during initialisation |
| * |
| * Beware... this needs to be called after CPU discovery |
| * but before CPU bringup |
| */ |
| static int __init allowcpus(char *str) |
| { |
| cpumask_t cpu_allow_map; |
| char buf[256]; |
| int len; |
| |
| cpus_clear(cpu_allow_map); |
| if (cpulist_parse(str, &cpu_allow_map) == 0) { |
| cpu_set(0, cpu_allow_map); |
| cpus_and(cpu_possible_map, cpu_possible_map, cpu_allow_map); |
| len = cpulist_scnprintf(buf, sizeof(buf)-1, &cpu_possible_map); |
| buf[len] = '\0'; |
| pr_debug("Allowable CPUs: %s\n", buf); |
| return 1; |
| } else |
| return 0; |
| } |
| __setup("allowcpus=", allowcpus); |
| |
| static void ipi_call_function(unsigned int cpu) |
| { |
| unsigned int action = 0; |
| |
| pr_debug("CPU%d: %s cpu %d status %08x\n", |
| smp_processor_id(), __func__, cpu, read_c0_status()); |
| |
| switch (cpu) { |
| case 0: |
| action = GIC_IPI_EXT_INTR_CALLFNC_VPE0; |
| break; |
| case 1: |
| action = GIC_IPI_EXT_INTR_CALLFNC_VPE1; |
| break; |
| case 2: |
| action = GIC_IPI_EXT_INTR_CALLFNC_VPE2; |
| break; |
| case 3: |
| action = GIC_IPI_EXT_INTR_CALLFNC_VPE3; |
| break; |
| } |
| gic_send_ipi(action); |
| } |
| |
| |
| static void ipi_resched(unsigned int cpu) |
| { |
| unsigned int action = 0; |
| |
| pr_debug("CPU%d: %s cpu %d status %08x\n", |
| smp_processor_id(), __func__, cpu, read_c0_status()); |
| |
| switch (cpu) { |
| case 0: |
| action = GIC_IPI_EXT_INTR_RESCHED_VPE0; |
| break; |
| case 1: |
| action = GIC_IPI_EXT_INTR_RESCHED_VPE1; |
| break; |
| case 2: |
| action = GIC_IPI_EXT_INTR_RESCHED_VPE2; |
| break; |
| case 3: |
| action = GIC_IPI_EXT_INTR_RESCHED_VPE3; |
| break; |
| } |
| gic_send_ipi(action); |
| } |
| |
| /* |
| * FIXME: This isn't restricted to CMP |
| * The SMVP kernel could use GIC interrupts if available |
| */ |
| void cmp_send_ipi_single(int cpu, unsigned int action) |
| { |
| unsigned long flags; |
| |
| local_irq_save(flags); |
| |
| switch (action) { |
| case SMP_CALL_FUNCTION: |
| ipi_call_function(cpu); |
| break; |
| |
| case SMP_RESCHEDULE_YOURSELF: |
| ipi_resched(cpu); |
| break; |
| } |
| |
| local_irq_restore(flags); |
| } |
| |
| static void cmp_send_ipi_mask(cpumask_t mask, unsigned int action) |
| { |
| unsigned int i; |
| |
| for_each_cpu_mask(i, mask) |
| cmp_send_ipi_single(i, action); |
| } |
| |
| static void cmp_init_secondary(void) |
| { |
| struct cpuinfo_mips *c = ¤t_cpu_data; |
| |
| /* Assume GIC is present */ |
| change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | STATUSF_IP6 | |
| STATUSF_IP7); |
| |
| /* Enable per-cpu interrupts: platform specific */ |
| |
| c->core = (read_c0_ebase() >> 1) & 0xff; |
| #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) |
| c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE; |
| #endif |
| #ifdef CONFIG_MIPS_MT_SMTC |
| c->tc_id = (read_c0_tcbind() >> TCBIND_CURTC_SHIFT) & TCBIND_CURTC; |
| #endif |
| } |
| |
| static void cmp_smp_finish(void) |
| { |
| pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__); |
| |
| /* CDFIXME: remove this? */ |
| write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ)); |
| |
| #ifdef CONFIG_MIPS_MT_FPAFF |
| /* If we have an FPU, enroll ourselves in the FPU-full mask */ |
| if (cpu_has_fpu) |
| cpu_set(smp_processor_id(), mt_fpu_cpumask); |
| #endif /* CONFIG_MIPS_MT_FPAFF */ |
| |
| local_irq_enable(); |
| } |
| |
| static void cmp_cpus_done(void) |
| { |
| pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__); |
| } |
| |
| /* |
| * Setup the PC, SP, and GP of a secondary processor and start it running |
| * smp_bootstrap is the place to resume from |
| * __KSTK_TOS(idle) is apparently the stack pointer |
| * (unsigned long)idle->thread_info the gp |
| */ |
| static void cmp_boot_secondary(int cpu, struct task_struct *idle) |
| { |
| struct thread_info *gp = task_thread_info(idle); |
| unsigned long sp = __KSTK_TOS(idle); |
| unsigned long pc = (unsigned long)&smp_bootstrap; |
| unsigned long a0 = 0; |
| |
| pr_debug("SMPCMP: CPU%d: %s cpu %d\n", smp_processor_id(), |
| __func__, cpu); |
| |
| #if 0 |
| /* Needed? */ |
| flush_icache_range((unsigned long)gp, |
| (unsigned long)(gp + sizeof(struct thread_info))); |
| #endif |
| |
| amon_cpu_start(cpu, pc, sp, gp, a0); |
| } |
| |
| /* |
| * Common setup before any secondaries are started |
| */ |
| void __init cmp_smp_setup(void) |
| { |
| int i; |
| int ncpu = 0; |
| |
| pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__); |
| |
| #ifdef CONFIG_MIPS_MT_FPAFF |
| /* If we have an FPU, enroll ourselves in the FPU-full mask */ |
| if (cpu_has_fpu) |
| cpu_set(0, mt_fpu_cpumask); |
| #endif /* CONFIG_MIPS_MT_FPAFF */ |
| |
| for (i = 1; i < NR_CPUS; i++) { |
| if (amon_cpu_avail(i)) { |
| cpu_set(i, cpu_possible_map); |
| __cpu_number_map[i] = ++ncpu; |
| __cpu_logical_map[ncpu] = i; |
| } |
| } |
| |
| if (cpu_has_mipsmt) { |
| unsigned int nvpe, mvpconf0 = read_c0_mvpconf0(); |
| |
| nvpe = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; |
| smp_num_siblings = nvpe; |
| } |
| pr_info("Detected %i available secondary CPU(s)\n", ncpu); |
| } |
| |
| void __init cmp_prepare_cpus(unsigned int max_cpus) |
| { |
| pr_debug("SMPCMP: CPU%d: %s max_cpus=%d\n", |
| smp_processor_id(), __func__, max_cpus); |
| |
| /* |
| * FIXME: some of these options are per-system, some per-core and |
| * some per-cpu |
| */ |
| mips_mt_set_cpuoptions(); |
| } |
| |
| struct plat_smp_ops cmp_smp_ops = { |
| .send_ipi_single = cmp_send_ipi_single, |
| .send_ipi_mask = cmp_send_ipi_mask, |
| .init_secondary = cmp_init_secondary, |
| .smp_finish = cmp_smp_finish, |
| .cpus_done = cmp_cpus_done, |
| .boot_secondary = cmp_boot_secondary, |
| .smp_setup = cmp_smp_setup, |
| .prepare_cpus = cmp_prepare_cpus, |
| }; |