mm/sl[aou]b: Move freeing of kmem_cache structure to common code
The freeing action is basically the same in all slab allocators.
Move to the common kmem_cache_destroy() function.
Reviewed-by: Glauber Costa <glommer@parallels.com>
Reviewed-by: Joonsoo Kim <js1304@gmail.com>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
diff --git a/mm/slab.c b/mm/slab.c
index ef94799..8ca6ec6 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2225,7 +2225,6 @@
kfree(l3);
}
}
- kmem_cache_free(kmem_cache, cachep);
}
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 5374150..d6deae9 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -154,6 +154,7 @@
rcu_barrier();
__kmem_cache_destroy(s);
+ kmem_cache_free(kmem_cache, s);
} else {
list_add(&s->list, &slab_caches);
printk(KERN_ERR "kmem_cache_destroy %s: Slab cache still has objects\n",
diff --git a/mm/slob.c b/mm/slob.c
index 7d272c3..cb4ab96 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -540,8 +540,6 @@
void __kmem_cache_destroy(struct kmem_cache *c)
{
- kmemleak_free(c);
- slob_free(c, sizeof(struct kmem_cache));
}
void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
diff --git a/mm/slub.c b/mm/slub.c
index e0d1e04..6f932f7 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -213,7 +213,6 @@
static inline void sysfs_slab_remove(struct kmem_cache *s)
{
kfree(s->name);
- kmem_cache_free(kmem_cache, s);
}
#endif
@@ -5206,7 +5205,6 @@
struct kmem_cache *s = to_slab(kobj);
kfree(s->name);
- kmem_cache_free(kmem_cache, s);
}
static const struct sysfs_ops slab_sysfs_ops = {