blob: c8bed0da434a40dcd23ef16ecd518fa9cc164bc1 [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_LOCAL_H
2#define _ASM_X86_LOCAL_H
Harvey Harrison5638f992008-01-30 13:31:26 +01003
4#include <linux/percpu.h>
5
Arun Sharma600634972011-07-26 16:09:06 -07006#include <linux/atomic.h>
Harvey Harrison5638f992008-01-30 13:31:26 +01007#include <asm/asm.h>
8
Harvey Harrison01c57fb2008-01-30 13:31:26 +01009typedef struct {
Harvey Harrison5638f992008-01-30 13:31:26 +010010 atomic_long_t a;
11} local_t;
12
13#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
14
15#define local_read(l) atomic_long_read(&(l)->a)
Harvey Harrison01c57fb2008-01-30 13:31:26 +010016#define local_set(l, i) atomic_long_set(&(l)->a, (i))
Harvey Harrison5638f992008-01-30 13:31:26 +010017
18static inline void local_inc(local_t *l)
19{
Joe Perches69cde652008-03-23 01:02:39 -070020 asm volatile(_ASM_INC "%0"
21 : "+m" (l->a.counter));
Harvey Harrison5638f992008-01-30 13:31:26 +010022}
23
24static inline void local_dec(local_t *l)
25{
Joe Perches69cde652008-03-23 01:02:39 -070026 asm volatile(_ASM_DEC "%0"
27 : "+m" (l->a.counter));
Harvey Harrison5638f992008-01-30 13:31:26 +010028}
29
30static inline void local_add(long i, local_t *l)
31{
Joe Perches69cde652008-03-23 01:02:39 -070032 asm volatile(_ASM_ADD "%1,%0"
33 : "+m" (l->a.counter)
34 : "ir" (i));
Harvey Harrison5638f992008-01-30 13:31:26 +010035}
36
37static inline void local_sub(long i, local_t *l)
38{
Joe Perches69cde652008-03-23 01:02:39 -070039 asm volatile(_ASM_SUB "%1,%0"
40 : "+m" (l->a.counter)
41 : "ir" (i));
Harvey Harrison5638f992008-01-30 13:31:26 +010042}
43
44/**
45 * local_sub_and_test - subtract value from variable and test result
46 * @i: integer value to subtract
47 * @l: pointer to type local_t
48 *
49 * Atomically subtracts @i from @l and returns
50 * true if the result is zero, or false for all
51 * other cases.
52 */
53static inline int local_sub_and_test(long i, local_t *l)
54{
55 unsigned char c;
56
Joe Perches69cde652008-03-23 01:02:39 -070057 asm volatile(_ASM_SUB "%2,%0; sete %1"
58 : "+m" (l->a.counter), "=qm" (c)
59 : "ir" (i) : "memory");
Harvey Harrison5638f992008-01-30 13:31:26 +010060 return c;
61}
62
63/**
64 * local_dec_and_test - decrement and test
65 * @l: pointer to type local_t
66 *
67 * Atomically decrements @l by 1 and
68 * returns true if the result is 0, or false for all other
69 * cases.
70 */
71static inline int local_dec_and_test(local_t *l)
72{
73 unsigned char c;
74
Joe Perches69cde652008-03-23 01:02:39 -070075 asm volatile(_ASM_DEC "%0; sete %1"
76 : "+m" (l->a.counter), "=qm" (c)
77 : : "memory");
Harvey Harrison5638f992008-01-30 13:31:26 +010078 return c != 0;
79}
80
81/**
82 * local_inc_and_test - increment and test
83 * @l: pointer to type local_t
84 *
85 * Atomically increments @l by 1
86 * and returns true if the result is zero, or false for all
87 * other cases.
88 */
89static inline int local_inc_and_test(local_t *l)
90{
91 unsigned char c;
92
Joe Perches69cde652008-03-23 01:02:39 -070093 asm volatile(_ASM_INC "%0; sete %1"
94 : "+m" (l->a.counter), "=qm" (c)
95 : : "memory");
Harvey Harrison5638f992008-01-30 13:31:26 +010096 return c != 0;
97}
98
99/**
100 * local_add_negative - add and test if negative
101 * @i: integer value to add
102 * @l: pointer to type local_t
103 *
104 * Atomically adds @i to @l and returns true
105 * if the result is negative, or false when
106 * result is greater than or equal to zero.
107 */
108static inline int local_add_negative(long i, local_t *l)
109{
110 unsigned char c;
111
Joe Perches69cde652008-03-23 01:02:39 -0700112 asm volatile(_ASM_ADD "%2,%0; sets %1"
113 : "+m" (l->a.counter), "=qm" (c)
114 : "ir" (i) : "memory");
Harvey Harrison5638f992008-01-30 13:31:26 +0100115 return c;
116}
117
118/**
119 * local_add_return - add and return
120 * @i: integer value to add
121 * @l: pointer to type local_t
122 *
123 * Atomically adds @i to @l and returns @i + @l
124 */
125static inline long local_add_return(long i, local_t *l)
126{
127 long __i;
128#ifdef CONFIG_M386
129 unsigned long flags;
Harvey Harrison01c57fb2008-01-30 13:31:26 +0100130 if (unlikely(boot_cpu_data.x86 <= 3))
Harvey Harrison5638f992008-01-30 13:31:26 +0100131 goto no_xadd;
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200132#endif
Harvey Harrison5638f992008-01-30 13:31:26 +0100133 /* Modern 486+ processor */
134 __i = i;
Joe Perches69cde652008-03-23 01:02:39 -0700135 asm volatile(_ASM_XADD "%0, %1;"
136 : "+r" (i), "+m" (l->a.counter)
137 : : "memory");
Harvey Harrison5638f992008-01-30 13:31:26 +0100138 return i + __i;
139
140#ifdef CONFIG_M386
141no_xadd: /* Legacy 386 processor */
142 local_irq_save(flags);
143 __i = local_read(l);
144 local_set(l, i + __i);
145 local_irq_restore(flags);
146 return i + __i;
147#endif
148}
149
150static inline long local_sub_return(long i, local_t *l)
151{
Harvey Harrison01c57fb2008-01-30 13:31:26 +0100152 return local_add_return(-i, l);
Harvey Harrison5638f992008-01-30 13:31:26 +0100153}
154
Harvey Harrison01c57fb2008-01-30 13:31:26 +0100155#define local_inc_return(l) (local_add_return(1, l))
156#define local_dec_return(l) (local_sub_return(1, l))
Harvey Harrison5638f992008-01-30 13:31:26 +0100157
158#define local_cmpxchg(l, o, n) \
159 (cmpxchg_local(&((l)->a.counter), (o), (n)))
160/* Always has a lock prefix */
161#define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
162
163/**
164 * local_add_unless - add unless the number is a given value
165 * @l: pointer of type local_t
166 * @a: the amount to add to l...
167 * @u: ...unless l is equal to u.
168 *
169 * Atomically adds @a to @l, so long as it was not @u.
170 * Returns non-zero if @l was not @u, and zero otherwise.
171 */
172#define local_add_unless(l, a, u) \
173({ \
174 long c, old; \
Joe Perches69cde652008-03-23 01:02:39 -0700175 c = local_read((l)); \
Harvey Harrison5638f992008-01-30 13:31:26 +0100176 for (;;) { \
177 if (unlikely(c == (u))) \
178 break; \
Joe Perches69cde652008-03-23 01:02:39 -0700179 old = local_cmpxchg((l), c, c + (a)); \
Harvey Harrison5638f992008-01-30 13:31:26 +0100180 if (likely(old == c)) \
181 break; \
182 c = old; \
183 } \
184 c != (u); \
185})
186#define local_inc_not_zero(l) local_add_unless((l), 1, 0)
187
188/* On x86_32, these are no better than the atomic variants.
189 * On x86-64 these are better than the atomic variants on SMP kernels
190 * because they dont use a lock prefix.
191 */
192#define __local_inc(l) local_inc(l)
193#define __local_dec(l) local_dec(l)
Harvey Harrison01c57fb2008-01-30 13:31:26 +0100194#define __local_add(i, l) local_add((i), (l))
195#define __local_sub(i, l) local_sub((i), (l))
Harvey Harrison5638f992008-01-30 13:31:26 +0100196
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700197#endif /* _ASM_X86_LOCAL_H */