Clark Williams | 8bd75c7 | 2013-02-07 09:47:07 -0600 | [diff] [blame] | 1 | #ifndef _SCHED_RT_H |
| 2 | #define _SCHED_RT_H |
| 3 | |
Dongsheng Yang | 5c22807 | 2014-01-27 17:15:37 -0500 | [diff] [blame] | 4 | #include <linux/sched/prio.h> |
Clark Williams | 8bd75c7 | 2013-02-07 09:47:07 -0600 | [diff] [blame] | 5 | |
| 6 | static inline int rt_prio(int prio) |
| 7 | { |
| 8 | if (unlikely(prio < MAX_RT_PRIO)) |
| 9 | return 1; |
| 10 | return 0; |
| 11 | } |
| 12 | |
| 13 | static inline int rt_task(struct task_struct *p) |
| 14 | { |
| 15 | return rt_prio(p->prio); |
| 16 | } |
| 17 | |
| 18 | #ifdef CONFIG_RT_MUTEXES |
| 19 | extern int rt_mutex_getprio(struct task_struct *p); |
| 20 | extern void rt_mutex_setprio(struct task_struct *p, int prio); |
Thomas Gleixner | c365c29 | 2014-02-07 20:58:42 +0100 | [diff] [blame] | 21 | extern int rt_mutex_check_prio(struct task_struct *task, int newprio); |
Dario Faggioli | 2d3d891 | 2013-11-07 14:43:44 +0100 | [diff] [blame] | 22 | extern struct task_struct *rt_mutex_get_top_task(struct task_struct *task); |
Clark Williams | 8bd75c7 | 2013-02-07 09:47:07 -0600 | [diff] [blame] | 23 | extern void rt_mutex_adjust_pi(struct task_struct *p); |
| 24 | static inline bool tsk_is_pi_blocked(struct task_struct *tsk) |
| 25 | { |
| 26 | return tsk->pi_blocked_on != NULL; |
| 27 | } |
| 28 | #else |
| 29 | static inline int rt_mutex_getprio(struct task_struct *p) |
| 30 | { |
| 31 | return p->normal_prio; |
| 32 | } |
Thomas Gleixner | c365c29 | 2014-02-07 20:58:42 +0100 | [diff] [blame] | 33 | |
| 34 | static inline int rt_mutex_check_prio(struct task_struct *task, int newprio) |
| 35 | { |
| 36 | return 0; |
| 37 | } |
| 38 | |
Dario Faggioli | 2d3d891 | 2013-11-07 14:43:44 +0100 | [diff] [blame] | 39 | static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task) |
| 40 | { |
| 41 | return NULL; |
| 42 | } |
Clark Williams | 8bd75c7 | 2013-02-07 09:47:07 -0600 | [diff] [blame] | 43 | # define rt_mutex_adjust_pi(p) do { } while (0) |
| 44 | static inline bool tsk_is_pi_blocked(struct task_struct *tsk) |
| 45 | { |
| 46 | return false; |
| 47 | } |
| 48 | #endif |
| 49 | |
| 50 | extern void normalize_rt_tasks(void); |
| 51 | |
| 52 | |
Clark Williams | bc68159 | 2013-02-22 09:20:11 -0800 | [diff] [blame] | 53 | /* |
| 54 | * default timeslice is 100 msecs (used only for SCHED_RR tasks). |
| 55 | * Timeslices get refilled after they expire. |
| 56 | */ |
| 57 | #define RR_TIMESLICE (100 * HZ / 1000) |
| 58 | |
Clark Williams | 8bd75c7 | 2013-02-07 09:47:07 -0600 | [diff] [blame] | 59 | #endif /* _SCHED_RT_H */ |