blob: 54b54da6384c197a93848e05ef292c224f5c0f91 [file] [log] [blame]
Vineet Gupta14e968b2013-01-18 15:12:16 +05301/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _ASM_ARC_ATOMIC_H
10#define _ASM_ARC_ATOMIC_H
11
Vineet Gupta14e968b2013-01-18 15:12:16 +053012#ifndef __ASSEMBLY__
13
14#include <linux/types.h>
15#include <linux/compiler.h>
16#include <asm/cmpxchg.h>
17#include <asm/barrier.h>
18#include <asm/smp.h>
19
Noam Camus6492f092017-04-04 11:00:41 +030020#define ATOMIC_INIT(i) { (i) }
21
Noam Camusa5a10d92015-05-16 17:49:35 +030022#ifndef CONFIG_ARC_PLAT_EZNPS
23
Peter Zijlstra62e8a322015-09-18 11:13:10 +020024#define atomic_read(v) READ_ONCE((v)->counter)
Vineet Gupta14e968b2013-01-18 15:12:16 +053025
26#ifdef CONFIG_ARC_HAS_LLSC
27
Peter Zijlstra62e8a322015-09-18 11:13:10 +020028#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
Vineet Gupta14e968b2013-01-18 15:12:16 +053029
Peter Zijlstraf7d11e92014-03-23 16:29:31 +010030#define ATOMIC_OP(op, c_op, asm_op) \
31static inline void atomic_##op(int i, atomic_t *v) \
32{ \
Vineet Guptaed6aefe2016-05-31 16:35:09 +053033 unsigned int val; \
Peter Zijlstraf7d11e92014-03-23 16:29:31 +010034 \
35 __asm__ __volatile__( \
Vineet Gupta8ac06652015-07-21 12:05:42 +030036 "1: llock %[val], [%[ctr]] \n" \
37 " " #asm_op " %[val], %[val], %[i] \n" \
38 " scond %[val], [%[ctr]] \n" \
Vineet Guptaed6aefe2016-05-31 16:35:09 +053039 " bnz 1b \n" \
Vineet Gupta8ac06652015-07-21 12:05:42 +030040 : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
41 : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
42 [i] "ir" (i) \
Peter Zijlstraf7d11e92014-03-23 16:29:31 +010043 : "cc"); \
44} \
Vineet Gupta14e968b2013-01-18 15:12:16 +053045
Peter Zijlstraf7d11e92014-03-23 16:29:31 +010046#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
47static inline int atomic_##op##_return(int i, atomic_t *v) \
48{ \
Vineet Guptaed6aefe2016-05-31 16:35:09 +053049 unsigned int val; \
Peter Zijlstraf7d11e92014-03-23 16:29:31 +010050 \
Vineet Gupta2576c282014-11-20 15:42:09 +053051 /* \
52 * Explicit full memory barrier needed before/after as \
53 * LLOCK/SCOND thmeselves don't provide any such semantics \
54 */ \
55 smp_mb(); \
56 \
Peter Zijlstraf7d11e92014-03-23 16:29:31 +010057 __asm__ __volatile__( \
Vineet Gupta8ac06652015-07-21 12:05:42 +030058 "1: llock %[val], [%[ctr]] \n" \
59 " " #asm_op " %[val], %[val], %[i] \n" \
60 " scond %[val], [%[ctr]] \n" \
Vineet Guptaed6aefe2016-05-31 16:35:09 +053061 " bnz 1b \n" \
Vineet Gupta8ac06652015-07-21 12:05:42 +030062 : [val] "=&r" (val) \
63 : [ctr] "r" (&v->counter), \
64 [i] "ir" (i) \
Peter Zijlstraf7d11e92014-03-23 16:29:31 +010065 : "cc"); \
66 \
Vineet Gupta2576c282014-11-20 15:42:09 +053067 smp_mb(); \
68 \
Vineet Gupta8ac06652015-07-21 12:05:42 +030069 return val; \
Vineet Gupta14e968b2013-01-18 15:12:16 +053070}
71
Peter Zijlstrafbffe892016-04-18 01:16:09 +020072#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
73static inline int atomic_fetch_##op(int i, atomic_t *v) \
74{ \
75 unsigned int val, orig; \
Peter Zijlstrafbffe892016-04-18 01:16:09 +020076 \
77 /* \
78 * Explicit full memory barrier needed before/after as \
79 * LLOCK/SCOND thmeselves don't provide any such semantics \
80 */ \
81 smp_mb(); \
82 \
83 __asm__ __volatile__( \
84 "1: llock %[orig], [%[ctr]] \n" \
85 " " #asm_op " %[val], %[orig], %[i] \n" \
86 " scond %[val], [%[ctr]] \n" \
87 " \n" \
Peter Zijlstrafbffe892016-04-18 01:16:09 +020088 : [val] "=&r" (val), \
89 [orig] "=&r" (orig) \
Peter Zijlstrafbffe892016-04-18 01:16:09 +020090 : [ctr] "r" (&v->counter), \
91 [i] "ir" (i) \
92 : "cc"); \
93 \
94 smp_mb(); \
95 \
96 return orig; \
97}
98
Vineet Gupta14e968b2013-01-18 15:12:16 +053099#else /* !CONFIG_ARC_HAS_LLSC */
100
101#ifndef CONFIG_SMP
102
103 /* violating atomic_xxx API locking protocol in UP for optimization sake */
Peter Zijlstra62e8a322015-09-18 11:13:10 +0200104#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
Vineet Gupta14e968b2013-01-18 15:12:16 +0530105
106#else
107
108static inline void atomic_set(atomic_t *v, int i)
109{
110 /*
111 * Independent of hardware support, all of the atomic_xxx() APIs need
112 * to follow the same locking rules to make sure that a "hardware"
113 * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
114 * sequence
115 *
116 * Thus atomic_set() despite being 1 insn (and seemingly atomic)
117 * requires the locking.
118 */
119 unsigned long flags;
120
121 atomic_ops_lock(flags);
Peter Zijlstra62e8a322015-09-18 11:13:10 +0200122 WRITE_ONCE(v->counter, i);
Vineet Gupta14e968b2013-01-18 15:12:16 +0530123 atomic_ops_unlock(flags);
124}
Peter Zijlstraf7d11e92014-03-23 16:29:31 +0100125
Vineet Gupta14e968b2013-01-18 15:12:16 +0530126#endif
127
128/*
129 * Non hardware assisted Atomic-R-M-W
130 * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
131 */
132
Peter Zijlstraf7d11e92014-03-23 16:29:31 +0100133#define ATOMIC_OP(op, c_op, asm_op) \
134static inline void atomic_##op(int i, atomic_t *v) \
135{ \
136 unsigned long flags; \
137 \
138 atomic_ops_lock(flags); \
139 v->counter c_op i; \
140 atomic_ops_unlock(flags); \
Vineet Gupta14e968b2013-01-18 15:12:16 +0530141}
142
Vineet Guptadaaf40e2015-05-10 12:04:01 +0530143#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
Peter Zijlstraf7d11e92014-03-23 16:29:31 +0100144static inline int atomic_##op##_return(int i, atomic_t *v) \
145{ \
146 unsigned long flags; \
147 unsigned long temp; \
148 \
Vineet Gupta2576c282014-11-20 15:42:09 +0530149 /* \
150 * spin lock/unlock provides the needed smp_mb() before/after \
151 */ \
Peter Zijlstraf7d11e92014-03-23 16:29:31 +0100152 atomic_ops_lock(flags); \
153 temp = v->counter; \
154 temp c_op i; \
155 v->counter = temp; \
156 atomic_ops_unlock(flags); \
157 \
158 return temp; \
Vineet Gupta14e968b2013-01-18 15:12:16 +0530159}
160
Peter Zijlstrafbffe892016-04-18 01:16:09 +0200161#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
162static inline int atomic_fetch_##op(int i, atomic_t *v) \
163{ \
164 unsigned long flags; \
165 unsigned long orig; \
166 \
167 /* \
168 * spin lock/unlock provides the needed smp_mb() before/after \
169 */ \
170 atomic_ops_lock(flags); \
171 orig = v->counter; \
172 v->counter c_op i; \
173 atomic_ops_unlock(flags); \
174 \
175 return orig; \
176}
177
Vineet Gupta14e968b2013-01-18 15:12:16 +0530178#endif /* !CONFIG_ARC_HAS_LLSC */
179
Peter Zijlstraf7d11e92014-03-23 16:29:31 +0100180#define ATOMIC_OPS(op, c_op, asm_op) \
181 ATOMIC_OP(op, c_op, asm_op) \
Peter Zijlstrafbffe892016-04-18 01:16:09 +0200182 ATOMIC_OP_RETURN(op, c_op, asm_op) \
183 ATOMIC_FETCH_OP(op, c_op, asm_op)
Peter Zijlstraf7d11e92014-03-23 16:29:31 +0100184
185ATOMIC_OPS(add, +=, add)
186ATOMIC_OPS(sub, -=, sub)
Peter Zijlstraf7d11e92014-03-23 16:29:31 +0100187
Peter Zijlstracda7e412014-04-23 20:06:20 +0200188#define atomic_andnot atomic_andnot
189
Peter Zijlstrafbffe892016-04-18 01:16:09 +0200190#undef ATOMIC_OPS
191#define ATOMIC_OPS(op, c_op, asm_op) \
192 ATOMIC_OP(op, c_op, asm_op) \
193 ATOMIC_FETCH_OP(op, c_op, asm_op)
194
195ATOMIC_OPS(and, &=, and)
196ATOMIC_OPS(andnot, &= ~, bic)
197ATOMIC_OPS(or, |=, or)
198ATOMIC_OPS(xor, ^=, xor)
Peter Zijlstraf7d11e92014-03-23 16:29:31 +0100199
Noam Camusa5a10d92015-05-16 17:49:35 +0300200#else /* CONFIG_ARC_PLAT_EZNPS */
201
202static inline int atomic_read(const atomic_t *v)
203{
204 int temp;
205
206 __asm__ __volatile__(
207 " ld.di %0, [%1]"
208 : "=r"(temp)
209 : "r"(&v->counter)
210 : "memory");
211 return temp;
212}
213
214static inline void atomic_set(atomic_t *v, int i)
215{
216 __asm__ __volatile__(
217 " st.di %0,[%1]"
218 :
219 : "r"(i), "r"(&v->counter)
220 : "memory");
221}
222
223#define ATOMIC_OP(op, c_op, asm_op) \
224static inline void atomic_##op(int i, atomic_t *v) \
225{ \
226 __asm__ __volatile__( \
227 " mov r2, %0\n" \
228 " mov r3, %1\n" \
229 " .word %2\n" \
230 : \
231 : "r"(i), "r"(&v->counter), "i"(asm_op) \
232 : "r2", "r3", "memory"); \
233} \
234
235#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
236static inline int atomic_##op##_return(int i, atomic_t *v) \
237{ \
238 unsigned int temp = i; \
239 \
240 /* Explicit full memory barrier needed before/after */ \
241 smp_mb(); \
242 \
243 __asm__ __volatile__( \
244 " mov r2, %0\n" \
245 " mov r3, %1\n" \
246 " .word %2\n" \
247 " mov %0, r2" \
248 : "+r"(temp) \
249 : "r"(&v->counter), "i"(asm_op) \
250 : "r2", "r3", "memory"); \
251 \
252 smp_mb(); \
253 \
254 temp c_op i; \
255 \
256 return temp; \
257}
258
Peter Zijlstrafbffe892016-04-18 01:16:09 +0200259#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
260static inline int atomic_fetch_##op(int i, atomic_t *v) \
261{ \
262 unsigned int temp = i; \
263 \
264 /* Explicit full memory barrier needed before/after */ \
265 smp_mb(); \
266 \
267 __asm__ __volatile__( \
268 " mov r2, %0\n" \
269 " mov r3, %1\n" \
270 " .word %2\n" \
271 " mov %0, r2" \
272 : "+r"(temp) \
273 : "r"(&v->counter), "i"(asm_op) \
274 : "r2", "r3", "memory"); \
275 \
276 smp_mb(); \
277 \
278 return temp; \
279}
280
Noam Camusa5a10d92015-05-16 17:49:35 +0300281#define ATOMIC_OPS(op, c_op, asm_op) \
282 ATOMIC_OP(op, c_op, asm_op) \
Peter Zijlstrafbffe892016-04-18 01:16:09 +0200283 ATOMIC_OP_RETURN(op, c_op, asm_op) \
284 ATOMIC_FETCH_OP(op, c_op, asm_op)
Noam Camusa5a10d92015-05-16 17:49:35 +0300285
286ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3)
287#define atomic_sub(i, v) atomic_add(-(i), (v))
288#define atomic_sub_return(i, v) atomic_add_return(-(i), (v))
Noam Camusce0f4932016-09-19 08:34:40 +0300289#define atomic_fetch_sub(i, v) atomic_fetch_add(-(i), (v))
Noam Camusa5a10d92015-05-16 17:49:35 +0300290
Peter Zijlstrafbffe892016-04-18 01:16:09 +0200291#undef ATOMIC_OPS
292#define ATOMIC_OPS(op, c_op, asm_op) \
293 ATOMIC_OP(op, c_op, asm_op) \
294 ATOMIC_FETCH_OP(op, c_op, asm_op)
295
296ATOMIC_OPS(and, &=, CTOP_INST_AAND_DI_R2_R2_R3)
Noam Camusa5a10d92015-05-16 17:49:35 +0300297#define atomic_andnot(mask, v) atomic_and(~(mask), (v))
Noam Camusce0f4932016-09-19 08:34:40 +0300298#define atomic_fetch_andnot(mask, v) atomic_fetch_and(~(mask), (v))
Peter Zijlstrafbffe892016-04-18 01:16:09 +0200299ATOMIC_OPS(or, |=, CTOP_INST_AOR_DI_R2_R2_R3)
300ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
Noam Camusa5a10d92015-05-16 17:49:35 +0300301
302#endif /* CONFIG_ARC_PLAT_EZNPS */
303
304#undef ATOMIC_OPS
Peter Zijlstrafbffe892016-04-18 01:16:09 +0200305#undef ATOMIC_FETCH_OP
Noam Camusa5a10d92015-05-16 17:49:35 +0300306#undef ATOMIC_OP_RETURN
307#undef ATOMIC_OP
308
Vineet Gupta14e968b2013-01-18 15:12:16 +0530309/**
310 * __atomic_add_unless - add unless the number is a given value
311 * @v: pointer of type atomic_t
312 * @a: the amount to add to v...
313 * @u: ...unless v is equal to u.
314 *
315 * Atomically adds @a to @v, so long as it was not @u.
316 * Returns the old value of @v
317 */
318#define __atomic_add_unless(v, a, u) \
319({ \
320 int c, old; \
Vineet Gupta2576c282014-11-20 15:42:09 +0530321 \
322 /* \
323 * Explicit full memory barrier needed before/after as \
324 * LLOCK/SCOND thmeselves don't provide any such semantics \
325 */ \
326 smp_mb(); \
327 \
Vineet Gupta14e968b2013-01-18 15:12:16 +0530328 c = atomic_read(v); \
329 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
330 c = old; \
Vineet Gupta2576c282014-11-20 15:42:09 +0530331 \
332 smp_mb(); \
333 \
Vineet Gupta14e968b2013-01-18 15:12:16 +0530334 c; \
335})
336
337#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
338
339#define atomic_inc(v) atomic_add(1, v)
340#define atomic_dec(v) atomic_sub(1, v)
341
342#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
343#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
344#define atomic_inc_return(v) atomic_add_return(1, (v))
345#define atomic_dec_return(v) atomic_sub_return(1, (v))
346#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
347
348#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
349
Vineet Guptace636522015-07-27 17:23:28 +0530350
351#ifdef CONFIG_GENERIC_ATOMIC64
Vineet Gupta14e968b2013-01-18 15:12:16 +0530352
353#include <asm-generic/atomic64.h>
354
Vineet Guptace636522015-07-27 17:23:28 +0530355#else /* Kconfig ensures this is only enabled with needed h/w assist */
356
357/*
358 * ARCv2 supports 64-bit exclusive load (LLOCKD) / store (SCONDD)
359 * - The address HAS to be 64-bit aligned
360 * - There are 2 semantics involved here:
361 * = exclusive implies no interim update between load/store to same addr
362 * = both words are observed/updated together: this is guaranteed even
363 * for regular 64-bit load (LDD) / store (STD). Thus atomic64_set()
364 * is NOT required to use LLOCKD+SCONDD, STD suffices
365 */
366
367typedef struct {
368 aligned_u64 counter;
369} atomic64_t;
370
371#define ATOMIC64_INIT(a) { (a) }
372
373static inline long long atomic64_read(const atomic64_t *v)
374{
375 unsigned long long val;
376
377 __asm__ __volatile__(
378 " ldd %0, [%1] \n"
379 : "=r"(val)
380 : "r"(&v->counter));
381
382 return val;
383}
384
385static inline void atomic64_set(atomic64_t *v, long long a)
386{
387 /*
388 * This could have been a simple assignment in "C" but would need
389 * explicit volatile. Otherwise gcc optimizers could elide the store
390 * which borked atomic64 self-test
391 * In the inline asm version, memory clobber needed for exact same
392 * reason, to tell gcc about the store.
393 *
394 * This however is not needed for sibling atomic64_add() etc since both
395 * load/store are explicitly done in inline asm. As long as API is used
396 * for each access, gcc has no way to optimize away any load/store
397 */
398 __asm__ __volatile__(
399 " std %0, [%1] \n"
400 :
401 : "r"(a), "r"(&v->counter)
402 : "memory");
403}
404
405#define ATOMIC64_OP(op, op1, op2) \
406static inline void atomic64_##op(long long a, atomic64_t *v) \
407{ \
408 unsigned long long val; \
409 \
410 __asm__ __volatile__( \
411 "1: \n" \
412 " llockd %0, [%1] \n" \
413 " " #op1 " %L0, %L0, %L2 \n" \
414 " " #op2 " %H0, %H0, %H2 \n" \
415 " scondd %0, [%1] \n" \
416 " bnz 1b \n" \
417 : "=&r"(val) \
418 : "r"(&v->counter), "ir"(a) \
419 : "cc"); \
420} \
421
422#define ATOMIC64_OP_RETURN(op, op1, op2) \
423static inline long long atomic64_##op##_return(long long a, atomic64_t *v) \
424{ \
425 unsigned long long val; \
426 \
427 smp_mb(); \
428 \
429 __asm__ __volatile__( \
430 "1: \n" \
431 " llockd %0, [%1] \n" \
432 " " #op1 " %L0, %L0, %L2 \n" \
433 " " #op2 " %H0, %H0, %H2 \n" \
434 " scondd %0, [%1] \n" \
435 " bnz 1b \n" \
436 : [val] "=&r"(val) \
437 : "r"(&v->counter), "ir"(a) \
438 : "cc"); /* memory clobber comes from smp_mb() */ \
439 \
440 smp_mb(); \
441 \
442 return val; \
443}
444
445#define ATOMIC64_FETCH_OP(op, op1, op2) \
446static inline long long atomic64_fetch_##op(long long a, atomic64_t *v) \
447{ \
448 unsigned long long val, orig; \
449 \
450 smp_mb(); \
451 \
452 __asm__ __volatile__( \
453 "1: \n" \
454 " llockd %0, [%2] \n" \
455 " " #op1 " %L1, %L0, %L3 \n" \
456 " " #op2 " %H1, %H0, %H3 \n" \
457 " scondd %1, [%2] \n" \
458 " bnz 1b \n" \
459 : "=&r"(orig), "=&r"(val) \
460 : "r"(&v->counter), "ir"(a) \
461 : "cc"); /* memory clobber comes from smp_mb() */ \
462 \
463 smp_mb(); \
464 \
465 return orig; \
466}
467
468#define ATOMIC64_OPS(op, op1, op2) \
469 ATOMIC64_OP(op, op1, op2) \
470 ATOMIC64_OP_RETURN(op, op1, op2) \
471 ATOMIC64_FETCH_OP(op, op1, op2)
472
473#define atomic64_andnot atomic64_andnot
474
475ATOMIC64_OPS(add, add.f, adc)
476ATOMIC64_OPS(sub, sub.f, sbc)
477ATOMIC64_OPS(and, and, and)
478ATOMIC64_OPS(andnot, bic, bic)
479ATOMIC64_OPS(or, or, or)
480ATOMIC64_OPS(xor, xor, xor)
481
482#undef ATOMIC64_OPS
483#undef ATOMIC64_FETCH_OP
484#undef ATOMIC64_OP_RETURN
485#undef ATOMIC64_OP
486
487static inline long long
488atomic64_cmpxchg(atomic64_t *ptr, long long expected, long long new)
489{
490 long long prev;
491
492 smp_mb();
493
494 __asm__ __volatile__(
495 "1: llockd %0, [%1] \n"
496 " brne %L0, %L2, 2f \n"
497 " brne %H0, %H2, 2f \n"
498 " scondd %3, [%1] \n"
499 " bnz 1b \n"
500 "2: \n"
501 : "=&r"(prev)
502 : "r"(ptr), "ir"(expected), "r"(new)
503 : "cc"); /* memory clobber comes from smp_mb() */
504
505 smp_mb();
506
507 return prev;
508}
509
510static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
511{
512 long long prev;
513
514 smp_mb();
515
516 __asm__ __volatile__(
517 "1: llockd %0, [%1] \n"
518 " scondd %2, [%1] \n"
519 " bnz 1b \n"
520 "2: \n"
521 : "=&r"(prev)
522 : "r"(ptr), "r"(new)
523 : "cc"); /* memory clobber comes from smp_mb() */
524
525 smp_mb();
526
527 return prev;
528}
529
530/**
531 * atomic64_dec_if_positive - decrement by 1 if old value positive
532 * @v: pointer of type atomic64_t
533 *
534 * The function returns the old value of *v minus 1, even if
535 * the atomic variable, v, was not decremented.
536 */
537
538static inline long long atomic64_dec_if_positive(atomic64_t *v)
539{
540 long long val;
541
542 smp_mb();
543
544 __asm__ __volatile__(
545 "1: llockd %0, [%1] \n"
546 " sub.f %L0, %L0, 1 # w0 - 1, set C on borrow\n"
547 " sub.c %H0, %H0, 1 # if C set, w1 - 1\n"
548 " brlt %H0, 0, 2f \n"
549 " scondd %0, [%1] \n"
550 " bnz 1b \n"
551 "2: \n"
552 : "=&r"(val)
553 : "r"(&v->counter)
554 : "cc"); /* memory clobber comes from smp_mb() */
555
556 smp_mb();
557
558 return val;
559}
560
561/**
562 * atomic64_add_unless - add unless the number is a given value
563 * @v: pointer of type atomic64_t
564 * @a: the amount to add to v...
565 * @u: ...unless v is equal to u.
566 *
567 * if (v != u) { v += a; ret = 1} else {ret = 0}
568 * Returns 1 iff @v was not @u (i.e. if add actually happened)
569 */
570static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
571{
572 long long val;
573 int op_done;
574
575 smp_mb();
576
577 __asm__ __volatile__(
578 "1: llockd %0, [%2] \n"
579 " mov %1, 1 \n"
580 " brne %L0, %L4, 2f # continue to add since v != u \n"
581 " breq.d %H0, %H4, 3f # return since v == u \n"
582 " mov %1, 0 \n"
583 "2: \n"
584 " add.f %L0, %L0, %L3 \n"
585 " adc %H0, %H0, %H3 \n"
586 " scondd %0, [%2] \n"
587 " bnz 1b \n"
588 "3: \n"
589 : "=&r"(val), "=&r" (op_done)
590 : "r"(&v->counter), "r"(a), "r"(u)
591 : "cc"); /* memory clobber comes from smp_mb() */
592
593 smp_mb();
594
595 return op_done;
596}
597
598#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
599#define atomic64_inc(v) atomic64_add(1LL, (v))
600#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
601#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
602#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
603#define atomic64_dec(v) atomic64_sub(1LL, (v))
604#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
605#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
606#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
607
608#endif /* !CONFIG_GENERIC_ATOMIC64 */
609
610#endif /* !__ASSEMBLY__ */
Vineet Gupta14e968b2013-01-18 15:12:16 +0530611
612#endif