提交 58b999d7 编写于 作者: A Andrey Konovalov 提交者: Linus Torvalds

kasan: adopt KUNIT tests to SW_TAGS mode

Now that we have KASAN-KUNIT tests integration, it's easy to see that
some KASAN tests are not adopted to the SW_TAGS mode and are failing.

Adjust the allocation size for kasan_memchr() and kasan_memcmp() by
roung it up to OOB_TAG_OFF so the bad access ends up in a separate
memory granule.

Add a new kmalloc_uaf_16() tests that relies on UAF, and a new
kasan_bitops_tags() test that is tailored to tag-based mode, as it's
hard to adopt the existing kmalloc_oob_16() and kasan_bitops_generic()
(renamed from kasan_bitops()) without losing the precision.

Add new kmalloc_uaf_16() and kasan_bitops_uaf() tests that rely on UAFs,
as it's hard to adopt the existing kmalloc_oob_16() and
kasan_bitops_oob() (rename from kasan_bitops()) without losing the
precision.

Disable kasan_global_oob() and kasan_alloca_oob_left/right() as SW_TAGS
mode doesn't instrument globals nor dynamic allocas.
Signed-off-by: NAndrey Konovalov <andreyknvl@google.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Tested-by: NDavid Gow <davidgow@google.com>
Link: https://lkml.kernel.org/r/76eee17b6531ca8b3ca92b240cb2fd23204aaff7.1603129942.git.andreyknvl@google.comSigned-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 8de15e92
...@@ -216,6 +216,12 @@ static void kmalloc_oob_16(struct kunit *test) ...@@ -216,6 +216,12 @@ static void kmalloc_oob_16(struct kunit *test)
u64 words[2]; u64 words[2];
} *ptr1, *ptr2; } *ptr1, *ptr2;
/* This test is specifically crafted for the generic mode. */
if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
kunit_info(test, "CONFIG_KASAN_GENERIC required\n");
return;
}
ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL); ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
...@@ -227,6 +233,23 @@ static void kmalloc_oob_16(struct kunit *test) ...@@ -227,6 +233,23 @@ static void kmalloc_oob_16(struct kunit *test)
kfree(ptr2); kfree(ptr2);
} }
static void kmalloc_uaf_16(struct kunit *test)
{
struct {
u64 words[2];
} *ptr1, *ptr2;
ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
kfree(ptr2);
KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
kfree(ptr1);
}
static void kmalloc_oob_memset_2(struct kunit *test) static void kmalloc_oob_memset_2(struct kunit *test)
{ {
char *ptr; char *ptr;
...@@ -429,6 +452,12 @@ static void kasan_global_oob(struct kunit *test) ...@@ -429,6 +452,12 @@ static void kasan_global_oob(struct kunit *test)
volatile int i = 3; volatile int i = 3;
char *p = &global_array[ARRAY_SIZE(global_array) + i]; char *p = &global_array[ARRAY_SIZE(global_array) + i];
/* Only generic mode instruments globals. */
if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
kunit_info(test, "CONFIG_KASAN_GENERIC required");
return;
}
KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p); KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
} }
...@@ -467,6 +496,12 @@ static void kasan_alloca_oob_left(struct kunit *test) ...@@ -467,6 +496,12 @@ static void kasan_alloca_oob_left(struct kunit *test)
char alloca_array[i]; char alloca_array[i];
char *p = alloca_array - 1; char *p = alloca_array - 1;
/* Only generic mode instruments dynamic allocas. */
if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
kunit_info(test, "CONFIG_KASAN_GENERIC required");
return;
}
if (!IS_ENABLED(CONFIG_KASAN_STACK)) { if (!IS_ENABLED(CONFIG_KASAN_STACK)) {
kunit_info(test, "CONFIG_KASAN_STACK is not enabled"); kunit_info(test, "CONFIG_KASAN_STACK is not enabled");
return; return;
...@@ -481,6 +516,12 @@ static void kasan_alloca_oob_right(struct kunit *test) ...@@ -481,6 +516,12 @@ static void kasan_alloca_oob_right(struct kunit *test)
char alloca_array[i]; char alloca_array[i];
char *p = alloca_array + i; char *p = alloca_array + i;
/* Only generic mode instruments dynamic allocas. */
if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
kunit_info(test, "CONFIG_KASAN_GENERIC required");
return;
}
if (!IS_ENABLED(CONFIG_KASAN_STACK)) { if (!IS_ENABLED(CONFIG_KASAN_STACK)) {
kunit_info(test, "CONFIG_KASAN_STACK is not enabled"); kunit_info(test, "CONFIG_KASAN_STACK is not enabled");
return; return;
...@@ -551,6 +592,9 @@ static void kasan_memchr(struct kunit *test) ...@@ -551,6 +592,9 @@ static void kasan_memchr(struct kunit *test)
return; return;
} }
if (OOB_TAG_OFF)
size = round_up(size, OOB_TAG_OFF);
ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO); ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
...@@ -573,6 +617,9 @@ static void kasan_memcmp(struct kunit *test) ...@@ -573,6 +617,9 @@ static void kasan_memcmp(struct kunit *test)
return; return;
} }
if (OOB_TAG_OFF)
size = round_up(size, OOB_TAG_OFF);
ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO); ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
memset(arr, 0, sizeof(arr)); memset(arr, 0, sizeof(arr));
...@@ -619,13 +666,50 @@ static void kasan_strings(struct kunit *test) ...@@ -619,13 +666,50 @@ static void kasan_strings(struct kunit *test)
KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1)); KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1));
} }
static void kasan_bitops(struct kunit *test) static void kasan_bitops_modify(struct kunit *test, int nr, void *addr)
{
KUNIT_EXPECT_KASAN_FAIL(test, set_bit(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, change_bit(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(nr, addr));
}
static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr)
{
KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr));
#if defined(clear_bit_unlock_is_negative_byte)
KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result =
clear_bit_unlock_is_negative_byte(nr, addr));
#endif
}
static void kasan_bitops_generic(struct kunit *test)
{ {
long *bits;
/* This test is specifically crafted for the generic mode. */
if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
kunit_info(test, "CONFIG_KASAN_GENERIC required\n");
return;
}
/* /*
* Allocate 1 more byte, which causes kzalloc to round up to 16-bytes; * Allocate 1 more byte, which causes kzalloc to round up to 16-bytes;
* this way we do not actually corrupt other memory. * this way we do not actually corrupt other memory.
*/ */
long *bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL); bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
/* /*
...@@ -633,55 +717,34 @@ static void kasan_bitops(struct kunit *test) ...@@ -633,55 +717,34 @@ static void kasan_bitops(struct kunit *test)
* below accesses are still out-of-bounds, since bitops are defined to * below accesses are still out-of-bounds, since bitops are defined to
* operate on the whole long the bit is in. * operate on the whole long the bit is in.
*/ */
KUNIT_EXPECT_KASAN_FAIL(test, set_bit(BITS_PER_LONG, bits)); kasan_bitops_modify(test, BITS_PER_LONG, bits);
KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(BITS_PER_LONG, bits));
KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(BITS_PER_LONG, bits));
KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(BITS_PER_LONG, bits));
KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(BITS_PER_LONG, bits));
KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(BITS_PER_LONG, bits));
KUNIT_EXPECT_KASAN_FAIL(test, change_bit(BITS_PER_LONG, bits));
KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(BITS_PER_LONG, bits));
/* /*
* Below calls try to access bit beyond allocated memory. * Below calls try to access bit beyond allocated memory.
*/ */
KUNIT_EXPECT_KASAN_FAIL(test, kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, bits);
test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
KUNIT_EXPECT_KASAN_FAIL(test,
__test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
KUNIT_EXPECT_KASAN_FAIL(test,
test_and_set_bit_lock(BITS_PER_LONG + BITS_PER_BYTE, bits));
KUNIT_EXPECT_KASAN_FAIL(test, kfree(bits);
test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits)); }
KUNIT_EXPECT_KASAN_FAIL(test, static void kasan_bitops_tags(struct kunit *test)
__test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits)); {
long *bits;
KUNIT_EXPECT_KASAN_FAIL(test, /* This test is specifically crafted for the tag-based mode. */
test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits)); if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
kunit_info(test, "CONFIG_KASAN_SW_TAGS required\n");
return;
}
KUNIT_EXPECT_KASAN_FAIL(test, /* Allocation size will be rounded to up granule size, which is 16. */
__test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits)); bits = kzalloc(sizeof(*bits), GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
KUNIT_EXPECT_KASAN_FAIL(test, /* Do the accesses past the 16 allocated bytes. */
kasan_int_result = kasan_bitops_modify(test, BITS_PER_LONG, &bits[1]);
test_bit(BITS_PER_LONG + BITS_PER_BYTE, bits)); kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, &bits[1]);
#if defined(clear_bit_unlock_is_negative_byte)
KUNIT_EXPECT_KASAN_FAIL(test,
kasan_int_result = clear_bit_unlock_is_negative_byte(
BITS_PER_LONG + BITS_PER_BYTE, bits));
#endif
kfree(bits); kfree(bits);
} }
...@@ -728,6 +791,7 @@ static struct kunit_case kasan_kunit_test_cases[] = { ...@@ -728,6 +791,7 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kmalloc_oob_krealloc_more), KUNIT_CASE(kmalloc_oob_krealloc_more),
KUNIT_CASE(kmalloc_oob_krealloc_less), KUNIT_CASE(kmalloc_oob_krealloc_less),
KUNIT_CASE(kmalloc_oob_16), KUNIT_CASE(kmalloc_oob_16),
KUNIT_CASE(kmalloc_uaf_16),
KUNIT_CASE(kmalloc_oob_in_memset), KUNIT_CASE(kmalloc_oob_in_memset),
KUNIT_CASE(kmalloc_oob_memset_2), KUNIT_CASE(kmalloc_oob_memset_2),
KUNIT_CASE(kmalloc_oob_memset_4), KUNIT_CASE(kmalloc_oob_memset_4),
...@@ -751,7 +815,8 @@ static struct kunit_case kasan_kunit_test_cases[] = { ...@@ -751,7 +815,8 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kasan_memchr), KUNIT_CASE(kasan_memchr),
KUNIT_CASE(kasan_memcmp), KUNIT_CASE(kasan_memcmp),
KUNIT_CASE(kasan_strings), KUNIT_CASE(kasan_strings),
KUNIT_CASE(kasan_bitops), KUNIT_CASE(kasan_bitops_generic),
KUNIT_CASE(kasan_bitops_tags),
KUNIT_CASE(kmalloc_double_kzfree), KUNIT_CASE(kmalloc_double_kzfree),
KUNIT_CASE(vmalloc_oob), KUNIT_CASE(vmalloc_oob),
{} {}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册