| /* SPDX-License-Identifier: GPL-2.0 */ |
| #ifndef _LINUX_KERNEL_STAT_H |
| #define _LINUX_KERNEL_STAT_H |
| |
| #include <linux/smp.h> |
| #include <linux/threads.h> |
| #include <linux/percpu.h> |
| #include <linux/cpumask.h> |
| #include <linux/interrupt.h> |
| #include <linux/sched.h> |
| #include <linux/vtime.h> |
| #include <asm/irq.h> |
| |
| /* |
| * 'kernel_stat.h' contains the definitions needed for doing |
| * some kernel statistics (CPU usage, context switches ...), |
| * used by rstatd/perfmeter |
| */ |
| |
| enum cpu_usage_stat { |
| CPUTIME_USER, |
| CPUTIME_NICE, |
| CPUTIME_SYSTEM, |
| CPUTIME_SOFTIRQ, |
| CPUTIME_IRQ, |
| CPUTIME_IDLE, |
| CPUTIME_IOWAIT, |
| CPUTIME_STEAL, |
| CPUTIME_GUEST, |
| CPUTIME_GUEST_NICE, |
| NR_STATS, |
| }; |
| |
| struct kernel_cpustat { |
| u64 cpustat[NR_STATS]; |
| }; |
| |
| struct kernel_stat { |
| unsigned long irqs_sum; |
| unsigned int softirqs[NR_SOFTIRQS]; |
| }; |
| |
| #ifdef VENDOR_EDIT |
| /* Hailong.Liu@TECH.Kernel.CPU, 2019/10/24, stat cpu usage on each tick. */ |
| #define MAX_CTP_WINDOW (10 * NSEC_PER_SEC / TICK_NSEC) |
| struct task_cpustat { |
| pid_t pid; |
| pid_t tgid; |
| enum cpu_usage_stat type; |
| #ifdef CONFIG_THREAD_INFO_IN_TASK |
| bool l_core; |
| #endif |
| #ifdef CONFIG_MTK_UNIFY_POWER |
| unsigned long cap; |
| #else |
| int freq; |
| #endif |
| unsigned long begin; |
| unsigned long end; |
| char comm[TASK_COMM_LEN]; |
| }; |
| |
| struct kernel_task_cpustat { |
| unsigned int idx; |
| struct task_cpustat cpustat[MAX_CTP_WINDOW]; |
| }; |
| |
| DECLARE_PER_CPU(struct kernel_task_cpustat, ktask_cpustat); |
| extern unsigned int sysctl_task_cpustats_enable; |
| #endif /* VENDOR_EDIT */ |
| |
| DECLARE_PER_CPU(struct kernel_stat, kstat); |
| DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat); |
| |
| /* Must have preemption disabled for this to be meaningful. */ |
| #define kstat_this_cpu this_cpu_ptr(&kstat) |
| #define kcpustat_this_cpu this_cpu_ptr(&kernel_cpustat) |
| #define kstat_cpu(cpu) per_cpu(kstat, cpu) |
| #define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu) |
| |
| extern unsigned long long nr_context_switches(void); |
| |
| extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu); |
| extern void kstat_incr_irq_this_cpu(unsigned int irq); |
| |
| static inline void kstat_incr_softirqs_this_cpu(unsigned int irq) |
| { |
| __this_cpu_inc(kstat.softirqs[irq]); |
| } |
| |
| static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu) |
| { |
| return kstat_cpu(cpu).softirqs[irq]; |
| } |
| |
| /* |
| * Number of interrupts per specific IRQ source, since bootup |
| */ |
| extern unsigned int kstat_irqs(unsigned int irq); |
| extern unsigned int kstat_irqs_usr(unsigned int irq); |
| |
| /* |
| * Number of interrupts per cpu, since bootup |
| */ |
| static inline unsigned long kstat_cpu_irqs_sum(unsigned int cpu) |
| { |
| return kstat_cpu(cpu).irqs_sum; |
| } |
| |
| extern void account_user_time(struct task_struct *, u64); |
| extern void account_guest_time(struct task_struct *, u64); |
| extern void account_system_time(struct task_struct *, int, u64); |
| extern void account_system_index_time(struct task_struct *, u64, |
| enum cpu_usage_stat); |
| extern void account_steal_time(u64); |
| extern void account_idle_time(u64); |
| |
| #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
| static inline void account_process_tick(struct task_struct *tsk, int user) |
| { |
| vtime_flush(tsk); |
| } |
| #else |
| extern void account_process_tick(struct task_struct *, int user); |
| #endif |
| |
| extern void account_idle_ticks(unsigned long ticks); |
| |
| #endif /* _LINUX_KERNEL_STAT_H */ |