blob: ffcba1f337da833dee1f86d3960b1a315a4bd1a2 [file] [log] [blame]
Ingo Molnar6053ee32006-01-09 15:59:19 -08001/*
2 * Mutexes: blocking mutual exclusion locks
3 *
4 * started by Ingo Molnar:
5 *
6 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7 *
8 * This file contains the main data structure and API definitions.
9 */
10#ifndef __LINUX_MUTEX_H
11#define __LINUX_MUTEX_H
12
Maarten Lankhorst040a0a32013-06-24 10:30:04 +020013#include <asm/current.h>
Ingo Molnar6053ee32006-01-09 15:59:19 -080014#include <linux/list.h>
15#include <linux/spinlock_types.h>
David S. Millera8b9ee72006-01-11 00:15:16 -080016#include <linux/linkage.h>
Ingo Molnaref5d4702006-07-03 00:24:55 -070017#include <linux/lockdep.h>
Arun Sharma600634972011-07-26 16:09:06 -070018#include <linux/atomic.h>
Heiko Carstens083986e2013-09-28 11:23:59 +020019#include <asm/processor.h>
Jason Low90631822014-07-14 10:27:49 -070020#include <linux/osq_lock.h>
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +020021#include <linux/debug_locks.h>
Ingo Molnar6053ee32006-01-09 15:59:19 -080022
Nicolai Hähnle6baa5c62016-12-21 19:46:34 +010023struct ww_acquire_ctx;
24
Ingo Molnar6053ee32006-01-09 15:59:19 -080025/*
26 * Simple, straightforward mutexes with strict semantics:
27 *
28 * - only one task can hold the mutex at a time
29 * - only the owner can unlock the mutex
30 * - multiple unlocks are not permitted
31 * - recursive locking is not permitted
32 * - a mutex object must be initialized via the API
33 * - a mutex object must not be initialized via memset or copying
34 * - task may not exit with mutex held
35 * - memory areas where held locks reside must not be freed
36 * - held mutexes must not be reinitialized
Matti Linnanvuorif20fda42007-10-16 23:29:41 -070037 * - mutexes may not be used in hardware or software interrupt
38 * contexts such as tasklets and timers
Ingo Molnar6053ee32006-01-09 15:59:19 -080039 *
40 * These semantics are fully enforced when DEBUG_MUTEXES is
41 * enabled. Furthermore, besides enforcing the above rules, the mutex
42 * debugging code also implements a number of additional features
43 * that make lock debugging easier and faster:
44 *
45 * - uses symbolic names of mutexes, whenever they are printed in debug output
46 * - point-of-acquire tracking, symbolic lookup of function names
47 * - list of all locks held in the system, printout of them
48 * - owner tracking
49 * - detects self-recursing locks and prints out all relevant info
50 * - detects multi-task circular deadlocks and prints out all affected
51 * locks and tasks (and only those tasks)
52 */
53struct mutex {
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +020054 atomic_long_t owner;
Ingo Molnar6053ee32006-01-09 15:59:19 -080055 spinlock_t wait_lock;
Waiman Long2bd2c922013-04-17 15:23:13 -040056#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
Jason Low90631822014-07-14 10:27:49 -070057 struct optimistic_spin_queue osq; /* Spinner MCS lock */
Waiman Long2bd2c922013-04-17 15:23:13 -040058#endif
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +020059 struct list_head wait_list;
Peter Zijlstra0d66bf62009-01-12 14:01:47 +010060#ifdef CONFIG_DEBUG_MUTEXES
Ingo Molnar6053ee32006-01-09 15:59:19 -080061 void *magic;
62#endif
Ingo Molnaref5d4702006-07-03 00:24:55 -070063#ifdef CONFIG_DEBUG_LOCK_ALLOC
64 struct lockdep_map dep_map;
65#endif
Ingo Molnar6053ee32006-01-09 15:59:19 -080066};
67
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +020068static inline struct task_struct *__mutex_owner(struct mutex *lock)
69{
Peter Zijlstrae2747952017-01-11 14:17:48 +010070 return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x07);
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +020071}
72
Ingo Molnar6053ee32006-01-09 15:59:19 -080073/*
74 * This is the control structure for tasks blocked on mutex,
75 * which resides on the blocked task's kernel stack:
76 */
77struct mutex_waiter {
78 struct list_head list;
79 struct task_struct *task;
Nicolai Hähnle6baa5c62016-12-21 19:46:34 +010080 struct ww_acquire_ctx *ww_ctx;
Ingo Molnar6053ee32006-01-09 15:59:19 -080081#ifdef CONFIG_DEBUG_MUTEXES
Ingo Molnar6053ee32006-01-09 15:59:19 -080082 void *magic;
83#endif
84};
85
86#ifdef CONFIG_DEBUG_MUTEXES
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +020087
88#define __DEBUG_MUTEX_INITIALIZER(lockname) \
89 , .magic = &lockname
90
91extern void mutex_destroy(struct mutex *lock);
92
Ingo Molnar6053ee32006-01-09 15:59:19 -080093#else
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +020094
Ingo Molnar6053ee32006-01-09 15:59:19 -080095# define __DEBUG_MUTEX_INITIALIZER(lockname)
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +020096
97static inline void mutex_destroy(struct mutex *lock) {}
98
99#endif
100
Randy Dunlapef5dc122010-09-02 15:48:16 -0700101/**
102 * mutex_init - initialize the mutex
103 * @mutex: the mutex to be initialized
104 *
105 * Initialize the mutex to unlocked state.
106 *
107 * It is not allowed to initialize an already locked mutex.
108 */
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200109#define mutex_init(mutex) \
110do { \
111 static struct lock_class_key __key; \
112 \
113 __mutex_init((mutex), #mutex, &__key); \
Ingo Molnaref5d4702006-07-03 00:24:55 -0700114} while (0)
Ingo Molnar6053ee32006-01-09 15:59:19 -0800115
Ingo Molnaref5d4702006-07-03 00:24:55 -0700116#ifdef CONFIG_DEBUG_LOCK_ALLOC
117# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
118 , .dep_map = { .name = #lockname }
119#else
120# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
121#endif
122
Ingo Molnar6053ee32006-01-09 15:59:19 -0800123#define __MUTEX_INITIALIZER(lockname) \
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200124 { .owner = ATOMIC_LONG_INIT(0) \
Peter Zijlstra6cfd76a2006-12-06 20:37:22 -0800125 , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
Ingo Molnar6053ee32006-01-09 15:59:19 -0800126 , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \
Ingo Molnaref5d4702006-07-03 00:24:55 -0700127 __DEBUG_MUTEX_INITIALIZER(lockname) \
128 __DEP_MAP_MUTEX_INITIALIZER(lockname) }
Ingo Molnar6053ee32006-01-09 15:59:19 -0800129
130#define DEFINE_MUTEX(mutexname) \
131 struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
132
Ingo Molnaref5d4702006-07-03 00:24:55 -0700133extern void __mutex_init(struct mutex *lock, const char *name,
134 struct lock_class_key *key);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800135
Robert P. J. Day45f8bde2007-01-26 00:57:09 -0800136/**
Ingo Molnar6053ee32006-01-09 15:59:19 -0800137 * mutex_is_locked - is the mutex locked
138 * @lock: the mutex to be queried
139 *
140 * Returns 1 if the mutex is locked, 0 if unlocked.
141 */
Harvey Harrisonec701582008-02-08 04:19:55 -0800142static inline int mutex_is_locked(struct mutex *lock)
Ingo Molnar6053ee32006-01-09 15:59:19 -0800143{
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200144 /*
145 * XXX think about spin_is_locked
146 */
147 return __mutex_owner(lock) != NULL;
Ingo Molnar6053ee32006-01-09 15:59:19 -0800148}
149
150/*
Peter Zijlstra67a6de42013-11-08 08:26:39 +0100151 * See kernel/locking/mutex.c for detailed documentation of these APIs.
Davidlohr Bueso214e0ae2014-07-30 13:41:55 -0700152 * Also see Documentation/locking/mutex-design.txt.
Ingo Molnar6053ee32006-01-09 15:59:19 -0800153 */
Ingo Molnaref5d4702006-07-03 00:24:55 -0700154#ifdef CONFIG_DEBUG_LOCK_ALLOC
155extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
Peter Zijlstrae4c70a62011-05-24 17:12:03 -0700156extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200157
Andrew Morton18d83622007-05-09 02:33:39 -0700158extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
159 unsigned int subclass);
Liam R. Howlettad776532007-12-06 17:37:59 -0500160extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
161 unsigned int subclass);
Tejun Heo1460cb62016-10-28 12:58:11 -0400162extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass);
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200163
164#define mutex_lock(lock) mutex_lock_nested(lock, 0)
165#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
Liam R. Howlettad776532007-12-06 17:37:59 -0500166#define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0)
Tejun Heo1460cb62016-10-28 12:58:11 -0400167#define mutex_lock_io(lock) mutex_lock_io_nested(lock, 0)
Peter Zijlstrae4c70a62011-05-24 17:12:03 -0700168
169#define mutex_lock_nest_lock(lock, nest_lock) \
170do { \
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200171 typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
Peter Zijlstrae4c70a62011-05-24 17:12:03 -0700172 _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
173} while (0)
174
Ingo Molnaref5d4702006-07-03 00:24:55 -0700175#else
Harvey Harrisonec701582008-02-08 04:19:55 -0800176extern void mutex_lock(struct mutex *lock);
177extern int __must_check mutex_lock_interruptible(struct mutex *lock);
178extern int __must_check mutex_lock_killable(struct mutex *lock);
Tejun Heo1460cb62016-10-28 12:58:11 -0400179extern void mutex_lock_io(struct mutex *lock);
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200180
Ingo Molnaref5d4702006-07-03 00:24:55 -0700181# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
NeilBrownd63a5a72006-12-08 02:36:17 -0800182# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
Liam R. Howlettad776532007-12-06 17:37:59 -0500183# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
Peter Zijlstrae4c70a62011-05-24 17:12:03 -0700184# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
Ingo Molnarf21860b2017-01-14 17:11:36 +0100185# define mutex_lock_io_nested(lock, subclass) mutex_lock(lock)
Ingo Molnaref5d4702006-07-03 00:24:55 -0700186#endif
187
Ingo Molnar6053ee32006-01-09 15:59:19 -0800188/*
189 * NOTE: mutex_trylock() follows the spin_trylock() convention,
190 * not the down_trylock() convention!
Arjan van de Vend98d38f2008-10-29 14:24:09 -0700191 *
192 * Returns 1 if the mutex has been acquired successfully, and 0 on contention.
Ingo Molnar6053ee32006-01-09 15:59:19 -0800193 */
Harvey Harrisonec701582008-02-08 04:19:55 -0800194extern int mutex_trylock(struct mutex *lock);
195extern void mutex_unlock(struct mutex *lock);
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200196
Andrew Mortona511e3f2009-04-29 15:59:58 -0700197extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
Eric Parisb1fca262009-03-23 18:22:09 +0100198
Peter Zijlstra0f5225b2016-10-07 17:43:51 +0200199/*
200 * These values are chosen such that FAIL and SUCCESS match the
201 * values of the regular mutex_trylock().
202 */
203enum mutex_trylock_recursive_enum {
204 MUTEX_TRYLOCK_FAILED = 0,
205 MUTEX_TRYLOCK_SUCCESS = 1,
206 MUTEX_TRYLOCK_RECURSIVE,
207};
208
209/**
210 * mutex_trylock_recursive - trylock variant that allows recursive locking
211 * @lock: mutex to be locked
212 *
213 * This function should not be used, _ever_. It is purely for hysterical GEM
214 * raisins, and once those are gone this will be removed.
215 *
216 * Returns:
Mauro Carvalho Chehab7b4ff1a2017-05-11 10:17:45 -0300217 * - MUTEX_TRYLOCK_FAILED - trylock failed,
218 * - MUTEX_TRYLOCK_SUCCESS - lock acquired,
219 * - MUTEX_TRYLOCK_RECURSIVE - we already owned the lock.
Peter Zijlstra0f5225b2016-10-07 17:43:51 +0200220 */
Ingo Molnar43496d32016-11-16 10:36:05 +0100221static inline /* __deprecated */ __must_check enum mutex_trylock_recursive_enum
Peter Zijlstra0f5225b2016-10-07 17:43:51 +0200222mutex_trylock_recursive(struct mutex *lock)
223{
224 if (unlikely(__mutex_owner(lock) == current))
225 return MUTEX_TRYLOCK_RECURSIVE;
226
227 return mutex_trylock(lock);
228}
229
Tim Chene7224672014-01-21 15:36:00 -0800230#endif /* __LINUX_MUTEX_H */