| #include <linux/cpumask.h> |
| #include <linux/interrupt.h> |
| |
| #include <linux/mm.h> |
| #include <linux/delay.h> |
| #include <linux/spinlock.h> |
| #include <linux/kernel_stat.h> |
| #include <linux/mc146818rtc.h> |
| #include <linux/cache.h> |
| #include <linux/cpu.h> |
| #include <linux/module.h> |
| |
| #include <asm/smp.h> |
| #include <asm/mtrr.h> |
| #include <asm/tlbflush.h> |
| #include <asm/mmu_context.h> |
| #include <asm/apic.h> |
| #include <asm/proto.h> |
| #include <asm/ipi.h> |
| |
| void default_send_IPI_single_phys(int cpu, int vector) |
| { |
| unsigned long flags; |
| |
| local_irq_save(flags); |
| __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, cpu), |
| vector, APIC_DEST_PHYSICAL); |
| local_irq_restore(flags); |
| } |
| |
| void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector) |
| { |
| unsigned long query_cpu; |
| unsigned long flags; |
| |
| /* |
| * Hack. The clustered APIC addressing mode doesn't allow us to send |
| * to an arbitrary mask, so I do a unicast to each CPU instead. |
| * - mbligh |
| */ |
| local_irq_save(flags); |
| for_each_cpu(query_cpu, mask) { |
| __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, |
| query_cpu), vector, APIC_DEST_PHYSICAL); |
| } |
| local_irq_restore(flags); |
| } |
| |
| void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, |
| int vector) |
| { |
| unsigned int this_cpu = smp_processor_id(); |
| unsigned int query_cpu; |
| unsigned long flags; |
| |
| /* See Hack comment above */ |
| |
| local_irq_save(flags); |
| for_each_cpu(query_cpu, mask) { |
| if (query_cpu == this_cpu) |
| continue; |
| __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, |
| query_cpu), vector, APIC_DEST_PHYSICAL); |
| } |
| local_irq_restore(flags); |
| } |
| |
| /* |
| * Helper function for APICs which insist on cpumasks |
| */ |
| void default_send_IPI_single(int cpu, int vector) |
| { |
| apic->send_IPI_mask(cpumask_of(cpu), vector); |
| } |
| |
| #ifdef CONFIG_X86_32 |
| |
| void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, |
| int vector) |
| { |
| unsigned long flags; |
| unsigned int query_cpu; |
| |
| /* |
| * Hack. The clustered APIC addressing mode doesn't allow us to send |
| * to an arbitrary mask, so I do a unicasts to each CPU instead. This |
| * should be modified to do 1 message per cluster ID - mbligh |
| */ |
| |
| local_irq_save(flags); |
| for_each_cpu(query_cpu, mask) |
| __default_send_IPI_dest_field( |
| early_per_cpu(x86_cpu_to_logical_apicid, query_cpu), |
| vector, apic->dest_logical); |
| local_irq_restore(flags); |
| } |
| |
| void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, |
| int vector) |
| { |
| unsigned long flags; |
| unsigned int query_cpu; |
| unsigned int this_cpu = smp_processor_id(); |
| |
| /* See Hack comment above */ |
| |
| local_irq_save(flags); |
| for_each_cpu(query_cpu, mask) { |
| if (query_cpu == this_cpu) |
| continue; |
| __default_send_IPI_dest_field( |
| early_per_cpu(x86_cpu_to_logical_apicid, query_cpu), |
| vector, apic->dest_logical); |
| } |
| local_irq_restore(flags); |
| } |
| |
| /* |
| * This is only used on smaller machines. |
| */ |
| void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector) |
| { |
| unsigned long mask = cpumask_bits(cpumask)[0]; |
| unsigned long flags; |
| |
| if (!mask) |
| return; |
| |
| local_irq_save(flags); |
| WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]); |
| __default_send_IPI_dest_field(mask, vector, apic->dest_logical); |
| local_irq_restore(flags); |
| } |
| |
| void default_send_IPI_allbutself(int vector) |
| { |
| /* |
| * if there are no other CPUs in the system then we get an APIC send |
| * error if we try to broadcast, thus avoid sending IPIs in this case. |
| */ |
| if (!(num_online_cpus() > 1)) |
| return; |
| |
| __default_local_send_IPI_allbutself(vector); |
| } |
| |
| void default_send_IPI_all(int vector) |
| { |
| __default_local_send_IPI_all(vector); |
| } |
| |
| void default_send_IPI_self(int vector) |
| { |
| __default_send_IPI_shortcut(APIC_DEST_SELF, vector, apic->dest_logical); |
| } |
| |
| /* must come after the send_IPI functions above for inlining */ |
| static int convert_apicid_to_cpu(int apic_id) |
| { |
| int i; |
| |
| for_each_possible_cpu(i) { |
| if (per_cpu(x86_cpu_to_apicid, i) == apic_id) |
| return i; |
| } |
| return -1; |
| } |
| |
| int safe_smp_processor_id(void) |
| { |
| int apicid, cpuid; |
| |
| if (!cpu_has_apic) |
| return 0; |
| |
| apicid = hard_smp_processor_id(); |
| if (apicid == BAD_APICID) |
| return 0; |
| |
| cpuid = convert_apicid_to_cpu(apicid); |
| |
| return cpuid >= 0 ? cpuid : 0; |
| } |
| #endif |