blob: bdbe43bac2307f5cdd617ebad8c0ae5d0842a37d [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Will Deacone1728002014-03-14 17:47:04 +00002#ifndef _ASM_GENERIC_RWSEM_H
3#define _ASM_GENERIC_RWSEM_H
Richard Kuodd472da2011-10-31 18:47:33 -05004
5#ifndef _LINUX_RWSEM_H
6#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
7#endif
8
9#ifdef __KERNEL__
10
11/*
Will Deacone1728002014-03-14 17:47:04 +000012 * R/W semaphores originally for PPC using the stuff in lib/rwsem.c.
Richard Kuodd472da2011-10-31 18:47:33 -050013 * Adapted largely from include/asm-i386/rwsem.h
14 * by Paul Mackerras <paulus@samba.org>.
15 */
16
17/*
18 * the semaphore definition
19 */
Will Deacone1728002014-03-14 17:47:04 +000020#ifdef CONFIG_64BIT
Richard Kuodd472da2011-10-31 18:47:33 -050021# define RWSEM_ACTIVE_MASK 0xffffffffL
22#else
23# define RWSEM_ACTIVE_MASK 0x0000ffffL
24#endif
25
26#define RWSEM_UNLOCKED_VALUE 0x00000000L
27#define RWSEM_ACTIVE_BIAS 0x00000001L
28#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
29#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
30#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
31
32/*
33 * lock for reading
34 */
35static inline void __down_read(struct rw_semaphore *sem)
36{
Waiman Long06321dd2017-01-19 09:31:52 -050037 if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0))
Richard Kuodd472da2011-10-31 18:47:33 -050038 rwsem_down_read_failed(sem);
39}
40
41static inline int __down_read_trylock(struct rw_semaphore *sem)
42{
43 long tmp;
44
Jason Low8ee62b12016-06-03 22:26:02 -070045 while ((tmp = atomic_long_read(&sem->count)) >= 0) {
46 if (tmp == atomic_long_cmpxchg_acquire(&sem->count, tmp,
Richard Kuodd472da2011-10-31 18:47:33 -050047 tmp + RWSEM_ACTIVE_READ_BIAS)) {
48 return 1;
49 }
50 }
51 return 0;
52}
53
54/*
55 * lock for writing
56 */
Michal Hockof8e04d82016-04-07 17:12:21 +020057static inline void __down_write(struct rw_semaphore *sem)
Richard Kuodd472da2011-10-31 18:47:33 -050058{
59 long tmp;
60
Davidlohr Bueso00eb4ba2015-09-30 13:03:15 -070061 tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
Waiman Long06321dd2017-01-19 09:31:52 -050062 &sem->count);
Richard Kuodd472da2011-10-31 18:47:33 -050063 if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
64 rwsem_down_write_failed(sem);
65}
66
Michal Hockod4799602016-04-07 17:12:26 +020067static inline int __down_write_killable(struct rw_semaphore *sem)
68{
69 long tmp;
70
71 tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
Waiman Long06321dd2017-01-19 09:31:52 -050072 &sem->count);
Michal Hockod4799602016-04-07 17:12:26 +020073 if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
74 if (IS_ERR(rwsem_down_write_failed_killable(sem)))
75 return -EINTR;
76 return 0;
77}
78
Richard Kuodd472da2011-10-31 18:47:33 -050079static inline int __down_write_trylock(struct rw_semaphore *sem)
80{
81 long tmp;
82
Jason Low8ee62b12016-06-03 22:26:02 -070083 tmp = atomic_long_cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE,
Richard Kuodd472da2011-10-31 18:47:33 -050084 RWSEM_ACTIVE_WRITE_BIAS);
85 return tmp == RWSEM_UNLOCKED_VALUE;
86}
87
88/*
89 * unlock after reading
90 */
91static inline void __up_read(struct rw_semaphore *sem)
92{
93 long tmp;
94
Waiman Long06321dd2017-01-19 09:31:52 -050095 tmp = atomic_long_dec_return_release(&sem->count);
Richard Kuodd472da2011-10-31 18:47:33 -050096 if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
97 rwsem_wake(sem);
98}
99
100/*
101 * unlock after writing
102 */
103static inline void __up_write(struct rw_semaphore *sem)
104{
Davidlohr Bueso00eb4ba2015-09-30 13:03:15 -0700105 if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS,
Waiman Long06321dd2017-01-19 09:31:52 -0500106 &sem->count) < 0))
Richard Kuodd472da2011-10-31 18:47:33 -0500107 rwsem_wake(sem);
108}
109
110/*
Richard Kuodd472da2011-10-31 18:47:33 -0500111 * downgrade write lock to read lock
112 */
113static inline void __downgrade_write(struct rw_semaphore *sem)
114{
115 long tmp;
116
Davidlohr Bueso00eb4ba2015-09-30 13:03:15 -0700117 /*
118 * When downgrading from exclusive to shared ownership,
119 * anything inside the write-locked region cannot leak
120 * into the read side. In contrast, anything in the
121 * read-locked region is ok to be re-ordered into the
122 * write side. As such, rely on RELEASE semantics.
123 */
Waiman Long06321dd2017-01-19 09:31:52 -0500124 tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS, &sem->count);
Richard Kuodd472da2011-10-31 18:47:33 -0500125 if (tmp < 0)
126 rwsem_downgrade_wake(sem);
127}
128
Richard Kuodd472da2011-10-31 18:47:33 -0500129#endif /* __KERNEL__ */
Will Deacone1728002014-03-14 17:47:04 +0000130#endif /* _ASM_GENERIC_RWSEM_H */