Sophie

Sophie

distrib > Scientific%20Linux > 5x > x86_64 > by-pkgid > 27922b4260f65d317aabda37e42bbbff > files > 1698

kernel-2.6.18-238.el5.src.rpm

From: Peter Zijlstra <a.p.zijlstra@chello.nl>
Date: Tue, 18 Dec 2007 12:22:03 +0100
Subject: [lockdep] sanitise CONFIG_PROVE_LOCKING
Message-id: 20071218112315.499310248@chello.nl
O-Subject: [RHEL5.2 PATCH 02/14] lockdep: sanitise CONFIG_PROVE_LOCKING
Bugzilla: 193729

Ensure that all of the lock dependency tracking code is under
CONFIG_PROVE_LOCKING.  This allows us to use the held lock tracking code for
other purposes.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Ingo Molnar <mingo@elte.hu>
Acked-by: Jason Baron <jbaron@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 003dab5..daf5905 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -56,6 +56,7 @@ static int lockdep_initialized;
 unsigned long nr_list_entries;
 static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
 
+#ifdef CONFIG_PROVE_LOCKING
 /*
  * Allocate a lockdep entry. (assumes hash_lock held, returns
  * with NULL on failure)
@@ -71,6 +72,7 @@ static struct lock_list *alloc_list_entry(void)
 	}
 	return list_entries + nr_list_entries++;
 }
+#endif
 
 /*
  * All data structures here are protected by the global debug_lock.
@@ -100,7 +102,9 @@ LIST_HEAD(all_lock_classes);
 static struct list_head classhash_table[CLASSHASH_SIZE];
 
 unsigned long nr_lock_chains;
+#ifdef CONFIG_PROVE_LOCKING
 static struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
+#endif
 
 /*
  * We put the lock dependency chains into a hash-table as well, to cache
@@ -458,6 +462,7 @@ static void print_lock_dependencies(struct lock_class *class, int depth)
 	}
 }
 
+#ifdef CONFIG_PROVE_LOCKING
 /*
  * Add a new dependency to the head of the list:
  */
@@ -515,6 +520,7 @@ print_circular_bug_entry(struct lock_list *target, unsigned int depth)
 
 	return 0;
 }
+#endif
 
 static void print_kernel_version(void)
 {
@@ -523,6 +529,7 @@ static void print_kernel_version(void)
 		system_utsname.version);
 }
 
+#ifdef CONFIG_PROVE_LOCKING
 /*
  * When a circular dependency is detected, print the
  * header first:
@@ -611,6 +618,7 @@ check_noncircular(struct lock_class *source, unsigned int depth)
 	}
 	return 1;
 }
+#endif
 
 static int very_verbose(struct lock_class *class)
 {
@@ -794,6 +802,7 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
 
 #endif
 
+#ifdef CONFIG_PROVE_LOCKING
 static int
 print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
 		   struct held_lock *next)
@@ -1055,7 +1064,7 @@ out_bug:
 
 	return 0;
 }
-
+#endif
 
 /*
  * Is this the address of a static object:
@@ -1259,6 +1268,7 @@ out_unlock_set:
 	return class;
 }
 
+#ifdef CONFIG_PROVE_LOCKING
 /*
  * Look up a dependency chain. If the key is not present yet then
  * add it and return 0 - in this case the new dependency chain is
@@ -1329,6 +1339,7 @@ cache_hit:
 
 	return 1;
 }
+#endif
 
 /*
  * We are building curr_chain_key incrementally, so double-check
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index fb524b0..e005940 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -94,7 +94,7 @@ unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
 	 * _raw_spin_lock_flags() code, because lockdep assumes
 	 * that interrupts are not re-enabled during lock-acquire:
 	 */
-#ifdef CONFIG_PROVE_LOCKING
+#ifdef CONFIG_LOCKDEP
 	_raw_spin_lock(lock);
 #else
 	_raw_spin_lock_flags(lock, &flags);