blob: ab0ae1aa6d0af1c5d64edcd313e5283aa099c04f [file] [log] [blame]
Brian Gerst22da7b32009-01-23 11:03:31 +09001#ifndef _ASM_X86_HARDIRQ_H
2#define _ASM_X86_HARDIRQ_H
3
4#include <linux/threads.h>
5#include <linux/irq.h>
6
7typedef struct {
8 unsigned int __softirq_pending;
9 unsigned int __nmi_count; /* arch dependent */
Brian Gerst2de3a5f2009-01-23 11:03:32 +090010#ifdef CONFIG_X86_LOCAL_APIC
11 unsigned int apic_timer_irqs; /* arch dependent */
12 unsigned int irq_spurious_count;
Fernando Luis Vazquez Caob49d7d82011-12-15 11:32:24 +090013 unsigned int icr_read_retry_count;
Brian Gerst2de3a5f2009-01-23 11:03:32 +090014#endif
Yang Zhangd78f2662013-04-11 19:25:11 +080015#ifdef CONFIG_HAVE_KVM
16 unsigned int kvm_posted_intr_ipis;
17#endif
Dimitri Sivanich4a4de9c2009-10-14 09:22:57 -050018 unsigned int x86_platform_ipis; /* arch dependent */
Ingo Molnarbfe2a3c2009-01-23 10:20:15 +010019 unsigned int apic_perf_irqs;
Peter Zijlstrae360adb2010-10-14 14:01:34 +080020 unsigned int apic_irq_work_irqs;
Brian Gerst2de3a5f2009-01-23 11:03:32 +090021#ifdef CONFIG_SMP
Brian Gerst22da7b32009-01-23 11:03:31 +090022 unsigned int irq_resched_count;
23 unsigned int irq_call_count;
Tomoki Sekiyamafd0f5862012-09-26 11:11:28 +090024 /*
25 * irq_tlb_count is double-counted in irq_call_count, so it must be
26 * subtracted from irq_call_count when displaying irq_call_count
27 */
Brian Gerst22da7b32009-01-23 11:03:31 +090028 unsigned int irq_tlb_count;
Brian Gerst2de3a5f2009-01-23 11:03:32 +090029#endif
Jan Beulich0444c9b2009-11-20 14:03:05 +000030#ifdef CONFIG_X86_THERMAL_VECTOR
Brian Gerst22da7b32009-01-23 11:03:31 +090031 unsigned int irq_thermal_count;
Jan Beulich0444c9b2009-11-20 14:03:05 +000032#endif
33#ifdef CONFIG_X86_MCE_THRESHOLD
Brian Gerst22da7b32009-01-23 11:03:31 +090034 unsigned int irq_threshold_count;
Brian Gerst2de3a5f2009-01-23 11:03:32 +090035#endif
Brian Gerst22da7b32009-01-23 11:03:31 +090036} ____cacheline_aligned irq_cpustat_t;
37
David Howells9b8de742009-04-21 23:00:24 +010038DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
Brian Gerst22da7b32009-01-23 11:03:31 +090039
40/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
41#define MAX_HARDIRQS_PER_CPU NR_VECTORS
42
43#define __ARCH_IRQ_STAT
44
Alex Shic6ae41e2012-05-11 15:35:27 +080045#define inc_irq_stat(member) this_cpu_inc(irq_stat.member)
Brian Gerst22da7b32009-01-23 11:03:31 +090046
Alex Shic6ae41e2012-05-11 15:35:27 +080047#define local_softirq_pending() this_cpu_read(irq_stat.__softirq_pending)
Brian Gerst22da7b32009-01-23 11:03:31 +090048
49#define __ARCH_SET_SOFTIRQ_PENDING
50
Alex Shic6ae41e2012-05-11 15:35:27 +080051#define set_softirq_pending(x) \
52 this_cpu_write(irq_stat.__softirq_pending, (x))
53#define or_softirq_pending(x) this_cpu_or(irq_stat.__softirq_pending, (x))
Brian Gerst22da7b32009-01-23 11:03:31 +090054
55extern void ack_bad_irq(unsigned int irq);
Jan Beulicha2eddfa2008-05-12 15:44:41 +020056
57extern u64 arch_irq_stat_cpu(unsigned int cpu);
58#define arch_irq_stat_cpu arch_irq_stat_cpu
59
60extern u64 arch_irq_stat(void);
61#define arch_irq_stat arch_irq_stat
Brian Gerst22da7b32009-01-23 11:03:31 +090062
63#endif /* _ASM_X86_HARDIRQ_H */