提交 060807f8 编写于 作者: V Vlastimil Babka 提交者: Linus Torvalds

mm, slub: make remaining slub_debug related attributes read-only

SLUB_DEBUG creates several files under /sys/kernel/slab/<cache>/ that can
be read to check if the respective debugging options are enabled for given
cache.  Some options, namely sanity_checks, trace, and failslab can be
also enabled and disabled at runtime by writing into the files.

The runtime toggling is racy.  Some options disable __CMPXCHG_DOUBLE when
enabled, which means that in case of concurrent allocations, some can
still use __CMPXCHG_DOUBLE and some not, leading to potential corruption.
The s->flags field is also not updated or checked atomically.  The
simplest solution is to remove the runtime toggling.  The extended
slub_debug boot parameter syntax introduced by earlier patch should allow
to fine-tune the debugging configuration during boot with same
granularity.
Signed-off-by: NVlastimil Babka <vbabka@suse.cz>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: NKees Cook <keescook@chromium.org>
Acked-by: NRoman Gushchin <guro@fb.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Jann Horn <jannh@google.com>
Cc: Vijayanand Jitta <vjitta@codeaurora.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Pekka Enberg <penberg@kernel.org>
Link: http://lkml.kernel.org/r/20200610163135.17364-5-vbabka@suse.czSigned-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 32a6f409
...@@ -116,11 +116,8 @@ options from the ``slub_debug`` parameter translate to the following files:: ...@@ -116,11 +116,8 @@ options from the ``slub_debug`` parameter translate to the following files::
T trace T trace
A failslab A failslab
The sanity_checks, trace and failslab files are writable, so writing 1 or 0 Careful with tracing: It may spew out lots of information and never stop if
will enable or disable the option at runtime. The writes to trace and failslab used on the wrong slab.
may return -EINVAL if the cache is subject to slab merging. Careful with
tracing: It may spew out lots of information and never stop if used on the
wrong slab.
Slab merging Slab merging
============ ============
......
...@@ -5040,20 +5040,6 @@ static ssize_t show_slab_objects(struct kmem_cache *s, ...@@ -5040,20 +5040,6 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
return x + sprintf(buf + x, "\n"); return x + sprintf(buf + x, "\n");
} }
#ifdef CONFIG_SLUB_DEBUG
static int any_slab_objects(struct kmem_cache *s)
{
int node;
struct kmem_cache_node *n;
for_each_kmem_cache_node(s, node, n)
if (atomic_long_read(&n->total_objects))
return 1;
return 0;
}
#endif
#define to_slab_attr(n) container_of(n, struct slab_attribute, attr) #define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
#define to_slab(n) container_of(n, struct kmem_cache, kobj) #define to_slab(n) container_of(n, struct kmem_cache, kobj)
...@@ -5275,43 +5261,13 @@ static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) ...@@ -5275,43 +5261,13 @@ static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
{ {
return sprintf(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS)); return sprintf(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS));
} }
SLAB_ATTR_RO(sanity_checks);
static ssize_t sanity_checks_store(struct kmem_cache *s,
const char *buf, size_t length)
{
s->flags &= ~SLAB_CONSISTENCY_CHECKS;
if (buf[0] == '1') {
s->flags &= ~__CMPXCHG_DOUBLE;
s->flags |= SLAB_CONSISTENCY_CHECKS;
}
return length;
}
SLAB_ATTR(sanity_checks);
static ssize_t trace_show(struct kmem_cache *s, char *buf) static ssize_t trace_show(struct kmem_cache *s, char *buf)
{ {
return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE)); return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
} }
SLAB_ATTR_RO(trace);
static ssize_t trace_store(struct kmem_cache *s, const char *buf,
size_t length)
{
/*
* Tracing a merged cache is going to give confusing results
* as well as cause other issues like converting a mergeable
* cache into an umergeable one.
*/
if (s->refcount > 1)
return -EINVAL;
s->flags &= ~SLAB_TRACE;
if (buf[0] == '1') {
s->flags &= ~__CMPXCHG_DOUBLE;
s->flags |= SLAB_TRACE;
}
return length;
}
SLAB_ATTR(trace);
static ssize_t red_zone_show(struct kmem_cache *s, char *buf) static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
{ {
...@@ -5375,19 +5331,7 @@ static ssize_t failslab_show(struct kmem_cache *s, char *buf) ...@@ -5375,19 +5331,7 @@ static ssize_t failslab_show(struct kmem_cache *s, char *buf)
{ {
return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB)); return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
} }
SLAB_ATTR_RO(failslab);
static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
size_t length)
{
if (s->refcount > 1)
return -EINVAL;
s->flags &= ~SLAB_FAILSLAB;
if (buf[0] == '1')
s->flags |= SLAB_FAILSLAB;
return length;
}
SLAB_ATTR(failslab);
#endif #endif
static ssize_t shrink_show(struct kmem_cache *s, char *buf) static ssize_t shrink_show(struct kmem_cache *s, char *buf)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册