提交 7ca33333 编写于 作者: A Andrey Konovalov 提交者: Zheng Zengkai

kasan: rename get_alloc/free_info

mainline inclusion
from mainline-v5.11-rc1
commit 6476792f
category: bugfix
bugzilla: 187796, https://gitee.com/openeuler/kernel/issues/I5W6YV
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=6476792f1015a356e6864076c210b328b64d08cc

--------------------------------

Rename get_alloc_info() and get_free_info() to kasan_get_alloc_meta() and
kasan_get_free_meta() to better reflect what those do and avoid confusion
with kasan_set_free_info().

No functional changes.

Link: https://lkml.kernel.org/r/27b7c036b754af15a2839e945f6d8bfce32b4c2f.1606162397.git.andreyknvl@google.com
Link: https://linux-review.googlesource.com/id/Ib6e4ba61c8b12112b403d3479a9799ac8fff8de1Signed-off-by: NAndrey Konovalov <andreyknvl@google.com>
Reviewed-by: NDmitry Vyukov <dvyukov@google.com>
Reviewed-by: NMarco Elver <elver@google.com>
Tested-by: NVincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Branislav Rankov <Branislav.Rankov@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Kevin Brodsky <kevin.brodsky@arm.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
Conflicts:
	mm/kasan/generic.c
	mm/kasan/quarantine.c
	mm/kasan/report.c
	mm/kasan/report_sw_tags.c
	mm/kasan/sw_tags.c
Signed-off-by: NLiu Shixin <liushixin2@huawei.com>
Reviewed-by: NKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 f43965ff
......@@ -282,14 +282,14 @@ size_t kasan_metadata_size(struct kmem_cache *cache)
sizeof(struct kasan_free_meta) : 0);
}
struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
const void *object)
struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
const void *object)
{
return (void *)object + cache->kasan_info.alloc_meta_offset;
}
struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
const void *object)
struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
const void *object)
{
BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
return (void *)object + cache->kasan_info.free_meta_offset;
......@@ -366,13 +366,13 @@ static u8 assign_tag(struct kmem_cache *cache, const void *object,
void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
const void *object)
{
struct kasan_alloc_meta *alloc_info;
struct kasan_alloc_meta *alloc_meta;
if (!(cache->flags & SLAB_KASAN))
return (void *)object;
alloc_info = get_alloc_info(cache, object);
__memset(alloc_info, 0, sizeof(*alloc_info));
alloc_meta = kasan_get_alloc_meta(cache, object);
__memset(alloc_meta, 0, sizeof(*alloc_meta));
if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
object = set_tag(object,
......@@ -476,7 +476,7 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
KASAN_KMALLOC_REDZONE);
if (cache->flags & SLAB_KASAN)
kasan_set_track(&get_alloc_info(cache, object)->alloc_track, flags);
kasan_set_track(&kasan_get_alloc_meta(cache, object)->alloc_track, flags);
return set_tag(object, tag);
}
......
......@@ -330,7 +330,7 @@ void kasan_record_aux_stack(void *addr)
{
struct page *page = kasan_addr_to_page(addr);
struct kmem_cache *cache;
struct kasan_alloc_meta *alloc_info;
struct kasan_alloc_meta *alloc_meta;
void *object;
if (is_kfence_address(addr) || !(page && PageSlab(page)))
......@@ -338,15 +338,15 @@ void kasan_record_aux_stack(void *addr)
cache = page->slab_cache;
object = nearest_obj(cache, page, addr);
alloc_info = get_alloc_info(cache, object);
if (!alloc_info)
alloc_meta = kasan_get_alloc_meta(cache, object);
if (!alloc_meta)
return;
/*
* record the last two call_rcu() call stacks.
*/
alloc_info->aux_stack[1] = alloc_info->aux_stack[0];
alloc_info->aux_stack[0] = kasan_save_stack(GFP_NOWAIT);
alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
alloc_meta->aux_stack[0] = kasan_save_stack(GFP_NOWAIT);
}
void kasan_set_free_info(struct kmem_cache *cache,
......@@ -354,7 +354,7 @@ void kasan_set_free_info(struct kmem_cache *cache,
{
struct kasan_free_meta *free_meta;
free_meta = get_free_info(cache, object);
free_meta = kasan_get_free_meta(cache, object);
kasan_set_track(&free_meta->free_track, GFP_NOWAIT);
/*
......@@ -368,5 +368,5 @@ struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
{
if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_KMALLOC_FREETRACK)
return NULL;
return &get_free_info(cache, object)->free_track;
return &kasan_get_free_meta(cache, object)->free_track;
}
......@@ -134,10 +134,10 @@ struct kasan_free_meta {
#endif
};
struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
const void *object);
struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
const void *object);
struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
const void *object);
struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
const void *object);
static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
{
......
......@@ -178,7 +178,7 @@ void quarantine_put(struct kmem_cache *cache, void *object)
unsigned long flags;
struct qlist_head *q;
struct qlist_head temp = QLIST_INIT;
struct kasan_free_meta *info = get_free_info(cache, object);
struct kasan_free_meta *meta = kasan_get_free_meta(cache, object);
/*
* Note: irq must be disabled until after we move the batch to the
......@@ -195,7 +195,7 @@ void quarantine_put(struct kmem_cache *cache, void *object)
local_irq_restore(flags);
return;
}
qlist_put(q, &info->quarantine_link, cache->size);
qlist_put(q, &meta->quarantine_link, cache->size);
if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) {
qlist_move_all(q, &temp);
......
......@@ -172,12 +172,12 @@ static void describe_object_addr(struct kmem_cache *cache, void *object,
static void describe_object(struct kmem_cache *cache, void *object,
const void *addr, u8 tag)
{
struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object);
struct kasan_alloc_meta *alloc_meta = kasan_get_alloc_meta(cache, object);
if (cache->flags & SLAB_KASAN) {
struct kasan_track *free_track;
print_track(&alloc_info->alloc_track, "Allocated");
print_track(&alloc_meta->alloc_track, "Allocated");
pr_err("\n");
free_track = kasan_get_free_track(cache, object, tag);
if (free_track) {
......@@ -186,14 +186,14 @@ static void describe_object(struct kmem_cache *cache, void *object,
}
#ifdef CONFIG_KASAN_GENERIC
if (alloc_info->aux_stack[0]) {
if (alloc_meta->aux_stack[0]) {
pr_err("Last call_rcu():\n");
print_stack(alloc_info->aux_stack[0]);
print_stack(alloc_meta->aux_stack[0]);
pr_err("\n");
}
if (alloc_info->aux_stack[1]) {
if (alloc_meta->aux_stack[1]) {
pr_err("Second to last call_rcu():\n");
print_stack(alloc_info->aux_stack[1]);
print_stack(alloc_meta->aux_stack[1]);
pr_err("\n");
}
#endif
......
......@@ -168,7 +168,7 @@ void kasan_set_free_info(struct kmem_cache *cache,
struct kasan_alloc_meta *alloc_meta;
u8 idx = 0;
alloc_meta = get_alloc_info(cache, object);
alloc_meta = kasan_get_alloc_meta(cache, object);
#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
idx = alloc_meta->free_track_idx;
......@@ -185,7 +185,7 @@ struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
struct kasan_alloc_meta *alloc_meta;
int i = 0;
alloc_meta = get_alloc_info(cache, object);
alloc_meta = kasan_get_alloc_meta(cache, object);
#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
for (i = 0; i < KASAN_NR_FREE_STACKS; i++) {
......
......@@ -51,7 +51,7 @@ const char *get_bug_type(struct kasan_access_info *info)
if (page && PageSlab(page)) {
cache = page->slab_cache;
object = nearest_obj(cache, page, (void *)addr);
alloc_meta = get_alloc_info(cache, object);
alloc_meta = kasan_get_alloc_meta(cache, object);
for (i = 0; i < KASAN_NR_FREE_STACKS; i++)
if (alloc_meta->free_pointer_tag[i] == tag)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册