[NET]: prot_inuse cleanups and optimizations

1) Cleanups (all functions are prefixed by sock_prot_inuse)

sock_prot_inc_use(prot) -> sock_prot_inuse_add(prot,-1)
sock_prot_dec_use(prot) -> sock_prot_inuse_add(prot,-1)
sock_prot_inuse()       -> sock_prot_inuse_get()

New functions :

sock_prot_inuse_init() and sock_prot_inuse_free() to abstract pcounter use.

2) if CONFIG_PROC_FS=n, we can zap 'inuse' member from "struct proto",
since nobody wants to read the inuse value.

This saves 1372 bytes on i386/SMP and some cpu cycles.

Signed-off-by: Eric Dumazet <dada1@cosmosbay.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 65ddb25..761bdc0 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -293,7 +293,7 @@
 	}
 
 	if (__sk_del_node_init(sk))
-		sock_prot_dec_use(sk->sk_prot);
+		sock_prot_inuse_add(sk->sk_prot, -1);
 	write_unlock_bh(lock);
 out:
 	if (sk->sk_state == TCP_LISTEN)
diff --git a/include/net/sock.h b/include/net/sock.h
index 3d938f6..786fae8 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -548,7 +548,9 @@
 	int			(*get_port)(struct sock *sk, unsigned short snum);
 
 	/* Keeping track of sockets in use */
+#ifdef CONFIG_PROC_FS
 	struct pcounter		inuse;
+#endif
 
 	/* Memory pressure */
 	void			(*enter_memory_pressure)(void);
@@ -584,9 +586,6 @@
 #endif
 };
 
-#define DEFINE_PROTO_INUSE(NAME) DEFINE_PCOUNTER(NAME)
-#define REF_PROTO_INUSE(NAME) PCOUNTER_MEMBER_INITIALIZER(NAME, .inuse)
-
 extern int proto_register(struct proto *prot, int alloc_slab);
 extern void proto_unregister(struct proto *prot);
 
@@ -615,21 +614,42 @@
 #define sk_refcnt_debug_release(sk) do { } while (0)
 #endif /* SOCK_REFCNT_DEBUG */
 
+
+#ifdef CONFIG_PROC_FS
+# define DEFINE_PROTO_INUSE(NAME) DEFINE_PCOUNTER(NAME)
+# define REF_PROTO_INUSE(NAME) PCOUNTER_MEMBER_INITIALIZER(NAME, .inuse)
 /* Called with local bh disabled */
-static __inline__ void sock_prot_inc_use(struct proto *prot)
+static inline void sock_prot_inuse_add(struct proto *prot, int inc)
 {
-	pcounter_add(&prot->inuse, 1);
+	pcounter_add(&prot->inuse, inc);
 }
-
-static __inline__ void sock_prot_dec_use(struct proto *prot)
+static inline int sock_prot_inuse_init(struct proto *proto)
 {
-	pcounter_add(&prot->inuse, -1);
+	return pcounter_alloc(&proto->inuse);
 }
-
-static __inline__ int sock_prot_inuse(struct proto *proto)
+static inline int sock_prot_inuse_get(struct proto *proto)
 {
 	return pcounter_getval(&proto->inuse);
 }
+static inline void sock_prot_inuse_free(struct proto *proto)
+{
+	pcounter_free(&proto->inuse);
+}
+#else
+# define DEFINE_PROTO_INUSE(NAME)
+# define REF_PROTO_INUSE(NAME)
+static void inline sock_prot_inuse_add(struct proto *prot, int inc)
+{
+}
+static int inline sock_prot_inuse_init(struct proto *proto)
+{
+	return 0;
+}
+static void inline sock_prot_inuse_free(struct proto *proto)
+{
+}
+#endif
+
 
 /* With per-bucket locks this operation is not-atomic, so that
  * this version is not worse.
diff --git a/include/net/udp.h b/include/net/udp.h
index 93796be..c6669c0 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -115,7 +115,7 @@
 	write_lock_bh(&udp_hash_lock);
 	if (sk_del_node_init(sk)) {
 		inet_sk(sk)->num = 0;
-		sock_prot_dec_use(sk->sk_prot);
+		sock_prot_inuse_add(sk->sk_prot, -1);
 	}
 	write_unlock_bh(&udp_hash_lock);
 }