diff --git a/mm/slub.c b/mm/slub.c index 7611f148ee81e1b7f4f86c19358e1fff07c75f9a..fe6d7be22ef09b30c33dff7c57665b750b0610bf 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3237,8 +3237,9 @@ int __kmem_cache_shutdown(struct kmem_cache *s) if (!rc) { /* - * We do the same lock strategy around sysfs_slab_add, see - * __kmem_cache_create. Because this is pretty much the last + * Since slab_attr_store may take the slab_mutex, we should + * release the lock while removing the sysfs entry in order to + * avoid a deadlock. Because this is pretty much the last * operation we do and the lock will be released shortly after * that in slab_common.c, we could just move sysfs_slab_remove * to a later point in common code. We should do that when we @@ -3778,10 +3779,7 @@ int __kmem_cache_create(struct kmem_cache *s, unsigned long flags) return 0; memcg_propagate_slab_attrs(s); - mutex_unlock(&slab_mutex); err = sysfs_slab_add(s); - mutex_lock(&slab_mutex); - if (err) kmem_cache_close(s);