aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--mm/memcontrol.c23
-rw-r--r--mm/slab_common.c14
2 files changed, 27 insertions, 10 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 014a4f1acf1c..d2da65c4cd84 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3264,6 +3264,12 @@ void memcg_register_cache(struct kmem_cache *s)
if (is_root_cache(s))
return;
+ /*
+ * Holding the slab_mutex assures nobody will touch the memcg_caches
+ * array while we are modifying it.
+ */
+ lockdep_assert_held(&slab_mutex);
+
root = s->memcg_params->root_cache;
memcg = s->memcg_params->memcg;
id = memcg_cache_id(memcg);
@@ -3283,6 +3289,7 @@ void memcg_register_cache(struct kmem_cache *s)
* before adding it to the memcg_slab_caches list, otherwise we can
* fail to convert memcg_params_to_cache() while traversing the list.
*/
+ VM_BUG_ON(root->memcg_params->memcg_caches[id]);
root->memcg_params->memcg_caches[id] = s;
mutex_lock(&memcg->slab_caches_mutex);
@@ -3299,6 +3306,12 @@ void memcg_unregister_cache(struct kmem_cache *s)
if (is_root_cache(s))
return;
+ /*
+ * Holding the slab_mutex assures nobody will touch the memcg_caches
+ * array while we are modifying it.
+ */
+ lockdep_assert_held(&slab_mutex);
+
root = s->memcg_params->root_cache;
memcg = s->memcg_params->memcg;
id = memcg_cache_id(memcg);
@@ -3312,6 +3325,7 @@ void memcg_unregister_cache(struct kmem_cache *s)
* after removing it from the memcg_slab_caches list, otherwise we can
* fail to convert memcg_params_to_cache() while traversing the list.
*/
+ VM_BUG_ON(!root->memcg_params->memcg_caches[id]);
root->memcg_params->memcg_caches[id] = NULL;
css_put(&memcg->css);
@@ -3464,22 +3478,13 @@ static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
struct kmem_cache *cachep)
{
struct kmem_cache *new_cachep;
- int idx;
BUG_ON(!memcg_can_account_kmem(memcg));
- idx = memcg_cache_id(memcg);
-
mutex_lock(&memcg_cache_mutex);
- new_cachep = cache_from_memcg_idx(cachep, idx);
- if (new_cachep)
- goto out;
-
new_cachep = kmem_cache_dup(memcg, cachep);
if (new_cachep == NULL)
new_cachep = cachep;
-
-out:
mutex_unlock(&memcg_cache_mutex);
return new_cachep;
}
diff --git a/mm/slab_common.c b/mm/slab_common.c
index db24ec48b946..f34707eeacc7 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -180,6 +180,18 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
if (err)
goto out_unlock;
+ if (memcg) {
+ /*
+ * Since per-memcg caches are created asynchronously on first
+ * allocation (see memcg_kmem_get_cache()), several threads can
+ * try to create the same cache, but only one of them may
+ * succeed. Therefore if we get here and see the cache has
+ * already been created, we silently return NULL.
+ */
+ if (cache_from_memcg_idx(parent_cache, memcg_cache_id(memcg)))
+ goto out_unlock;
+ }
+
/*
* Some allocators will constraint the set of valid flags to a subset
* of all flags. We expect them to define CACHE_CREATE_MASK in this
@@ -261,11 +273,11 @@ void kmem_cache_destroy(struct kmem_cache *s)
list_del(&s->list);
if (!__kmem_cache_shutdown(s)) {
+ memcg_unregister_cache(s);
mutex_unlock(&slab_mutex);
if (s->flags & SLAB_DESTROY_BY_RCU)
rcu_barrier();
- memcg_unregister_cache(s);
memcg_free_cache_params(s);
kfree(s->name);
kmem_cache_free(kmem_cache, s);