blob: 093eb694d9c157c717d56a685eb104dd03b0cdab [file] [log] [blame]
Martin Schwidefsky951f22d2005-07-27 11:44:57 -07001/*
2 * arch/s390/lib/spinlock.c
3 * Out of line spinlock code.
4 *
Christian Ehrhardt96567162006-03-09 17:33:49 -08005 * Copyright (C) IBM Corp. 2004, 2006
Martin Schwidefsky951f22d2005-07-27 11:44:57 -07006 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7 */
8
9#include <linux/types.h>
10#include <linux/module.h>
11#include <linux/spinlock.h>
12#include <linux/init.h>
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -040013#include <linux/smp.h>
Martin Schwidefsky951f22d2005-07-27 11:44:57 -070014#include <asm/io.h>
15
Martin Schwidefsky951f22d2005-07-27 11:44:57 -070016int spin_retry = 1000;
17
18/**
19 * spin_retry= parameter
20 */
21static int __init spin_retry_setup(char *str)
22{
23 spin_retry = simple_strtoul(str, &str, 0);
24 return 1;
25}
26__setup("spin_retry=", spin_retry_setup);
27
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010028void arch_spin_lock_wait(arch_spinlock_t *lp)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -070029{
30 int count = spin_retry;
Martin Schwidefsky3c1fcfe2006-09-30 23:27:45 -070031 unsigned int cpu = ~smp_processor_id();
Gerald Schaefer59b69782010-02-26 22:37:40 +010032 unsigned int owner;
Martin Schwidefsky951f22d2005-07-27 11:44:57 -070033
34 while (1) {
Gerald Schaefer59b69782010-02-26 22:37:40 +010035 owner = lp->owner_cpu;
36 if (!owner || smp_vcpu_scheduled(~owner)) {
37 for (count = spin_retry; count > 0; count--) {
38 if (arch_spin_is_locked(lp))
39 continue;
40 if (_raw_compare_and_swap(&lp->owner_cpu, 0,
41 cpu) == 0)
42 return;
43 }
44 if (MACHINE_IS_LPAR)
45 continue;
Martin Schwidefsky951f22d2005-07-27 11:44:57 -070046 }
Gerald Schaefer59b69782010-02-26 22:37:40 +010047 owner = lp->owner_cpu;
48 if (owner)
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -040049 smp_yield_cpu(~owner);
Heiko Carstens3b4beb32008-01-26 14:11:03 +010050 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -070051 return;
52 }
53}
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010054EXPORT_SYMBOL(arch_spin_lock_wait);
Martin Schwidefsky951f22d2005-07-27 11:44:57 -070055
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010056void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
Hisashi Hifumi894cdde2008-01-26 14:11:28 +010057{
58 int count = spin_retry;
59 unsigned int cpu = ~smp_processor_id();
Gerald Schaefer59b69782010-02-26 22:37:40 +010060 unsigned int owner;
Hisashi Hifumi894cdde2008-01-26 14:11:28 +010061
62 local_irq_restore(flags);
63 while (1) {
Gerald Schaefer59b69782010-02-26 22:37:40 +010064 owner = lp->owner_cpu;
65 if (!owner || smp_vcpu_scheduled(~owner)) {
66 for (count = spin_retry; count > 0; count--) {
67 if (arch_spin_is_locked(lp))
68 continue;
69 local_irq_disable();
70 if (_raw_compare_and_swap(&lp->owner_cpu, 0,
71 cpu) == 0)
72 return;
73 local_irq_restore(flags);
74 }
75 if (MACHINE_IS_LPAR)
76 continue;
Hisashi Hifumi894cdde2008-01-26 14:11:28 +010077 }
Gerald Schaefer59b69782010-02-26 22:37:40 +010078 owner = lp->owner_cpu;
79 if (owner)
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -040080 smp_yield_cpu(~owner);
Hisashi Hifumi894cdde2008-01-26 14:11:28 +010081 local_irq_disable();
82 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
83 return;
84 local_irq_restore(flags);
85 }
86}
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010087EXPORT_SYMBOL(arch_spin_lock_wait_flags);
Hisashi Hifumi894cdde2008-01-26 14:11:28 +010088
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010089int arch_spin_trylock_retry(arch_spinlock_t *lp)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -070090{
Martin Schwidefsky3c1fcfe2006-09-30 23:27:45 -070091 unsigned int cpu = ~smp_processor_id();
92 int count;
Martin Schwidefsky951f22d2005-07-27 11:44:57 -070093
Martin Schwidefsky3c1fcfe2006-09-30 23:27:45 -070094 for (count = spin_retry; count > 0; count--) {
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010095 if (arch_spin_is_locked(lp))
Christian Ehrhardt96567162006-03-09 17:33:49 -080096 continue;
Heiko Carstens3b4beb32008-01-26 14:11:03 +010097 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -070098 return 1;
99 }
100 return 0;
101}
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100102EXPORT_SYMBOL(arch_spin_trylock_retry);
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700103
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100104void arch_spin_relax(arch_spinlock_t *lock)
Martin Schwidefsky3c1fcfe2006-09-30 23:27:45 -0700105{
106 unsigned int cpu = lock->owner_cpu;
Gerald Schaefer59b69782010-02-26 22:37:40 +0100107 if (cpu != 0) {
108 if (MACHINE_IS_VM || MACHINE_IS_KVM ||
109 !smp_vcpu_scheduled(~cpu))
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -0400110 smp_yield_cpu(~cpu);
Gerald Schaefer59b69782010-02-26 22:37:40 +0100111 }
Martin Schwidefsky3c1fcfe2006-09-30 23:27:45 -0700112}
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100113EXPORT_SYMBOL(arch_spin_relax);
Martin Schwidefsky3c1fcfe2006-09-30 23:27:45 -0700114
Thomas Gleixnerfb3a6bb2009-12-03 20:01:19 +0100115void _raw_read_lock_wait(arch_rwlock_t *rw)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700116{
117 unsigned int old;
118 int count = spin_retry;
119
120 while (1) {
121 if (count-- <= 0) {
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -0400122 smp_yield();
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700123 count = spin_retry;
124 }
Thomas Gleixnere5931942009-12-03 20:08:46 +0100125 if (!arch_read_can_lock(rw))
Christian Ehrhardt96567162006-03-09 17:33:49 -0800126 continue;
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700127 old = rw->lock & 0x7fffffffU;
128 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
129 return;
130 }
131}
132EXPORT_SYMBOL(_raw_read_lock_wait);
133
Thomas Gleixnerfb3a6bb2009-12-03 20:01:19 +0100134void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
Heiko Carstensce58ae62009-06-12 10:26:22 +0200135{
136 unsigned int old;
137 int count = spin_retry;
138
139 local_irq_restore(flags);
140 while (1) {
141 if (count-- <= 0) {
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -0400142 smp_yield();
Heiko Carstensce58ae62009-06-12 10:26:22 +0200143 count = spin_retry;
144 }
Thomas Gleixnere5931942009-12-03 20:08:46 +0100145 if (!arch_read_can_lock(rw))
Heiko Carstensce58ae62009-06-12 10:26:22 +0200146 continue;
147 old = rw->lock & 0x7fffffffU;
148 local_irq_disable();
149 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
150 return;
151 }
152}
153EXPORT_SYMBOL(_raw_read_lock_wait_flags);
154
Thomas Gleixnerfb3a6bb2009-12-03 20:01:19 +0100155int _raw_read_trylock_retry(arch_rwlock_t *rw)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700156{
157 unsigned int old;
158 int count = spin_retry;
159
160 while (count-- > 0) {
Thomas Gleixnere5931942009-12-03 20:08:46 +0100161 if (!arch_read_can_lock(rw))
Christian Ehrhardt96567162006-03-09 17:33:49 -0800162 continue;
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700163 old = rw->lock & 0x7fffffffU;
164 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
165 return 1;
166 }
167 return 0;
168}
169EXPORT_SYMBOL(_raw_read_trylock_retry);
170
Thomas Gleixnerfb3a6bb2009-12-03 20:01:19 +0100171void _raw_write_lock_wait(arch_rwlock_t *rw)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700172{
173 int count = spin_retry;
174
175 while (1) {
176 if (count-- <= 0) {
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -0400177 smp_yield();
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700178 count = spin_retry;
179 }
Thomas Gleixnere5931942009-12-03 20:08:46 +0100180 if (!arch_write_can_lock(rw))
Christian Ehrhardt96567162006-03-09 17:33:49 -0800181 continue;
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700182 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
183 return;
184 }
185}
186EXPORT_SYMBOL(_raw_write_lock_wait);
187
Thomas Gleixnerfb3a6bb2009-12-03 20:01:19 +0100188void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
Heiko Carstensce58ae62009-06-12 10:26:22 +0200189{
190 int count = spin_retry;
191
192 local_irq_restore(flags);
193 while (1) {
194 if (count-- <= 0) {
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -0400195 smp_yield();
Heiko Carstensce58ae62009-06-12 10:26:22 +0200196 count = spin_retry;
197 }
Thomas Gleixnere5931942009-12-03 20:08:46 +0100198 if (!arch_write_can_lock(rw))
Heiko Carstensce58ae62009-06-12 10:26:22 +0200199 continue;
200 local_irq_disable();
201 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
202 return;
203 }
204}
205EXPORT_SYMBOL(_raw_write_lock_wait_flags);
206
Thomas Gleixnerfb3a6bb2009-12-03 20:01:19 +0100207int _raw_write_trylock_retry(arch_rwlock_t *rw)
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700208{
209 int count = spin_retry;
210
211 while (count-- > 0) {
Thomas Gleixnere5931942009-12-03 20:08:46 +0100212 if (!arch_write_can_lock(rw))
Christian Ehrhardt96567162006-03-09 17:33:49 -0800213 continue;
Martin Schwidefsky951f22d2005-07-27 11:44:57 -0700214 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
215 return 1;
216 }
217 return 0;
218}
219EXPORT_SYMBOL(_raw_write_trylock_retry);