Clark Williams | cf4aebc2 | 2013-02-07 09:46:59 -0600 | [diff] [blame] | 1 | #ifndef _SCHED_SYSCTL_H |
| 2 | #define _SCHED_SYSCTL_H |
| 3 | |
| 4 | #ifdef CONFIG_DETECT_HUNG_TASK |
Li Zefan | cd64647 | 2013-09-23 16:43:58 +0800 | [diff] [blame] | 5 | extern int sysctl_hung_task_check_count; |
Clark Williams | cf4aebc2 | 2013-02-07 09:46:59 -0600 | [diff] [blame] | 6 | extern unsigned int sysctl_hung_task_panic; |
Clark Williams | cf4aebc2 | 2013-02-07 09:46:59 -0600 | [diff] [blame] | 7 | extern unsigned long sysctl_hung_task_timeout_secs; |
Aaron Tomlin | 270750db | 2014-01-20 17:34:13 +0000 | [diff] [blame] | 8 | extern int sysctl_hung_task_warnings; |
Clark Williams | cf4aebc2 | 2013-02-07 09:46:59 -0600 | [diff] [blame] | 9 | extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, |
| 10 | void __user *buffer, |
| 11 | size_t *lenp, loff_t *ppos); |
| 12 | #else |
| 13 | /* Avoid need for ifdefs elsewhere in the code */ |
| 14 | enum { sysctl_hung_task_timeout_secs = 0 }; |
| 15 | #endif |
| 16 | |
| 17 | /* |
| 18 | * Default maximum number of active map areas, this limits the number of vmas |
| 19 | * per mm struct. Users can overwrite this number by sysctl but there is a |
| 20 | * problem. |
| 21 | * |
| 22 | * When a program's coredump is generated as ELF format, a section is created |
| 23 | * per a vma. In ELF, the number of sections is represented in unsigned short. |
| 24 | * This means the number of sections should be smaller than 65535 at coredump. |
| 25 | * Because the kernel adds some informative sections to a image of program at |
| 26 | * generating coredump, we need some margin. The number of extra sections is |
| 27 | * 1-3 now and depends on arch. We use "5" as safe margin, here. |
| 28 | */ |
| 29 | #define MAPCOUNT_ELF_CORE_MARGIN (5) |
| 30 | #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) |
| 31 | |
| 32 | extern int sysctl_max_map_count; |
| 33 | |
| 34 | extern unsigned int sysctl_sched_latency; |
| 35 | extern unsigned int sysctl_sched_min_granularity; |
| 36 | extern unsigned int sysctl_sched_wakeup_granularity; |
| 37 | extern unsigned int sysctl_sched_child_runs_first; |
| 38 | |
| 39 | enum sched_tunable_scaling { |
| 40 | SCHED_TUNABLESCALING_NONE, |
| 41 | SCHED_TUNABLESCALING_LOG, |
| 42 | SCHED_TUNABLESCALING_LINEAR, |
| 43 | SCHED_TUNABLESCALING_END, |
| 44 | }; |
| 45 | extern enum sched_tunable_scaling sysctl_sched_tunable_scaling; |
| 46 | |
| 47 | extern unsigned int sysctl_numa_balancing_scan_delay; |
| 48 | extern unsigned int sysctl_numa_balancing_scan_period_min; |
| 49 | extern unsigned int sysctl_numa_balancing_scan_period_max; |
Clark Williams | cf4aebc2 | 2013-02-07 09:46:59 -0600 | [diff] [blame] | 50 | extern unsigned int sysctl_numa_balancing_scan_size; |
Clark Williams | cf4aebc2 | 2013-02-07 09:46:59 -0600 | [diff] [blame] | 51 | |
| 52 | #ifdef CONFIG_SCHED_DEBUG |
| 53 | extern unsigned int sysctl_sched_migration_cost; |
| 54 | extern unsigned int sysctl_sched_nr_migrate; |
| 55 | extern unsigned int sysctl_sched_time_avg; |
| 56 | extern unsigned int sysctl_timer_migration; |
| 57 | extern unsigned int sysctl_sched_shares_window; |
| 58 | |
| 59 | int sched_proc_update_handler(struct ctl_table *table, int write, |
| 60 | void __user *buffer, size_t *length, |
| 61 | loff_t *ppos); |
| 62 | #endif |
| 63 | #ifdef CONFIG_SCHED_DEBUG |
| 64 | static inline unsigned int get_sysctl_timer_migration(void) |
| 65 | { |
| 66 | return sysctl_timer_migration; |
| 67 | } |
| 68 | #else |
| 69 | static inline unsigned int get_sysctl_timer_migration(void) |
| 70 | { |
| 71 | return 1; |
| 72 | } |
| 73 | #endif |
Clark Williams | ce0dbbb | 2013-02-07 09:47:04 -0600 | [diff] [blame] | 74 | |
| 75 | /* |
| 76 | * control realtime throttling: |
| 77 | * |
| 78 | * /proc/sys/kernel/sched_rt_period_us |
| 79 | * /proc/sys/kernel/sched_rt_runtime_us |
| 80 | */ |
Clark Williams | cf4aebc2 | 2013-02-07 09:46:59 -0600 | [diff] [blame] | 81 | extern unsigned int sysctl_sched_rt_period; |
| 82 | extern int sysctl_sched_rt_runtime; |
| 83 | |
| 84 | #ifdef CONFIG_CFS_BANDWIDTH |
| 85 | extern unsigned int sysctl_sched_cfs_bandwidth_slice; |
| 86 | #endif |
| 87 | |
| 88 | #ifdef CONFIG_SCHED_AUTOGROUP |
| 89 | extern unsigned int sysctl_sched_autogroup_enabled; |
| 90 | #endif |
| 91 | |
Clark Williams | ce0dbbb | 2013-02-07 09:47:04 -0600 | [diff] [blame] | 92 | extern int sched_rr_timeslice; |
| 93 | |
| 94 | extern int sched_rr_handler(struct ctl_table *table, int write, |
| 95 | void __user *buffer, size_t *lenp, |
| 96 | loff_t *ppos); |
| 97 | |
| 98 | extern int sched_rt_handler(struct ctl_table *table, int write, |
Clark Williams | cf4aebc2 | 2013-02-07 09:46:59 -0600 | [diff] [blame] | 99 | void __user *buffer, size_t *lenp, |
| 100 | loff_t *ppos); |
| 101 | |
Andi Kleen | 54a43d5 | 2014-01-23 15:53:13 -0800 | [diff] [blame] | 102 | extern int sysctl_numa_balancing(struct ctl_table *table, int write, |
| 103 | void __user *buffer, size_t *lenp, |
| 104 | loff_t *ppos); |
| 105 | |
Clark Williams | cf4aebc2 | 2013-02-07 09:46:59 -0600 | [diff] [blame] | 106 | #endif /* _SCHED_SYSCTL_H */ |