sched: Get rid of lock_depth
Neil Brown pointed out that lock_depth somehow escaped the BKL
removal work. Let's get rid of it now.
Note that the perf scripting utilities still have a bunch of
code for dealing with common_lock_depth in tracepoints; I have
left that in place in case anybody wants to use that code with
older kernels.
Suggested-by: Neil Brown <neilb@suse.de>
Signed-off-by: Jonathan Corbet <corbet@lwn.net>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/r/20110422111910.456c0e84@bike.lwn.net
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/kernel/fork.c b/kernel/fork.c
index e7548de..aca6287 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1103,7 +1103,6 @@
posix_cpu_timers_init(p);
- p->lock_depth = -1; /* -1 = no lock */
do_posix_clock_monotonic_gettime(&p->start_time);
p->real_start_time = p->start_time;
monotonic_to_bootbased(&p->real_start_time);
diff --git a/kernel/mutex.c b/kernel/mutex.c
index fe4706c..2c938e2 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -163,13 +163,6 @@
struct task_struct *owner;
/*
- * If we own the BKL, then don't spin. The owner of
- * the mutex might be waiting on us to release the BKL.
- */
- if (unlikely(current->lock_depth >= 0))
- break;
-
- /*
* If there's an owner, wait for it to either
* release the lock or go to sleep.
*/
diff --git a/kernel/sched.c b/kernel/sched.c
index 8cb0a57..9cde2dd 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4121,12 +4121,6 @@
profile_hit(SCHED_PROFILING, __builtin_return_address(0));
schedstat_inc(this_rq(), sched_count);
-#ifdef CONFIG_SCHEDSTATS
- if (unlikely(prev->lock_depth >= 0)) {
- schedstat_inc(this_rq(), rq_sched_info.bkl_count);
- schedstat_inc(prev, sched_info.bkl_count);
- }
-#endif
}
static void put_prev_task(struct rq *rq, struct task_struct *prev)
@@ -5852,11 +5846,8 @@
raw_spin_unlock_irqrestore(&rq->lock, flags);
/* Set the preempt count _outside_ the spinlocks! */
-#if defined(CONFIG_PREEMPT)
- task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
-#else
task_thread_info(idle)->preempt_count = 0;
-#endif
+
/*
* The idle tasks have their own, simple scheduling class:
*/
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 3669bec6..a6710a1 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -296,9 +296,6 @@
P(ttwu_count);
P(ttwu_local);
- SEQ_printf(m, " .%-30s: %d\n", "bkl_count",
- rq->rq_sched_info.bkl_count);
-
#undef P
#undef P64
#endif
@@ -441,7 +438,6 @@
P(se.statistics.wait_count);
PN(se.statistics.iowait_sum);
P(se.statistics.iowait_count);
- P(sched_info.bkl_count);
P(se.nr_migrations);
P(se.statistics.nr_migrations_cold);
P(se.statistics.nr_failed_migrations_affine);
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 35d55a3..f925c45 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -53,7 +53,6 @@
"common_preempt_count",
"common_pid",
"common_tgid",
- "common_lock_depth",
FIELD_STRING_IP,
FIELD_STRING_RETIP,
FIELD_STRING_FUNC,