[PATCH] spin_unlock_bh() and preempt_check_resched()
In _spin_unlock_bh(lock):
do { \
_raw_spin_unlock(lock); \
preempt_enable(); \
local_bh_enable(); \
__release(lock); \
} while (0)
there is no reason for using preempt_enable() instead of a simple
preempt_enable_no_resched()
Since we know bottom halves are disabled, preempt_schedule() will always
return at once (preempt_count!=0), and hence preempt_check_resched() is
useless here...
This fixes it by using "preempt_enable_no_resched()" instead of the
"preempt_enable()", and thus avoids the useless preempt_check_resched()
just before re-enabling bottom halves.
Signed-off-by: Samuel Thibault <samuel.thibault@ens-lyon.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index e895f3e..d6ba068 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -248,7 +248,7 @@
#define _spin_trylock_bh(lock) ({preempt_disable(); local_bh_disable(); \
_raw_spin_trylock(lock) ? \
- 1 : ({preempt_enable(); local_bh_enable(); 0;});})
+ 1 : ({preempt_enable_no_resched(); local_bh_enable(); 0;});})
#define _spin_lock(lock) \
do { \
@@ -383,7 +383,7 @@
#define _spin_unlock_bh(lock) \
do { \
_raw_spin_unlock(lock); \
- preempt_enable(); \
+ preempt_enable_no_resched(); \
local_bh_enable(); \
__release(lock); \
} while (0)
@@ -391,7 +391,7 @@
#define _write_unlock_bh(lock) \
do { \
_raw_write_unlock(lock); \
- preempt_enable(); \
+ preempt_enable_no_resched(); \
local_bh_enable(); \
__release(lock); \
} while (0)
@@ -423,8 +423,8 @@
#define _read_unlock_bh(lock) \
do { \
_raw_read_unlock(lock); \
+ preempt_enable_no_resched(); \
local_bh_enable(); \
- preempt_enable(); \
__release(lock); \
} while (0)
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index e15ed17..0c3f9d8 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -294,7 +294,7 @@
void __lockfunc _spin_unlock_bh(spinlock_t *lock)
{
_raw_spin_unlock(lock);
- preempt_enable();
+ preempt_enable_no_resched();
local_bh_enable();
}
EXPORT_SYMBOL(_spin_unlock_bh);
@@ -318,7 +318,7 @@
void __lockfunc _read_unlock_bh(rwlock_t *lock)
{
_raw_read_unlock(lock);
- preempt_enable();
+ preempt_enable_no_resched();
local_bh_enable();
}
EXPORT_SYMBOL(_read_unlock_bh);
@@ -342,7 +342,7 @@
void __lockfunc _write_unlock_bh(rwlock_t *lock)
{
_raw_write_unlock(lock);
- preempt_enable();
+ preempt_enable_no_resched();
local_bh_enable();
}
EXPORT_SYMBOL(_write_unlock_bh);
@@ -354,7 +354,7 @@
if (_raw_spin_trylock(lock))
return 1;
- preempt_enable();
+ preempt_enable_no_resched();
local_bh_enable();
return 0;
}