提交 6a126704 编写于 作者: D Dennis Zhou 提交者: Zheng Zengkai

percpu: flush tlb in pcpu_reclaim_populated()

mainline inclusion
from mainline-v5.14-rc1
commit 93274f1d
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I4BE79
CVE: NA

-------------------------------------------------
Prior to "percpu: implement partial chunk depopulation",
pcpu_depopulate_chunk() was called only on the destruction path. This
meant the virtual address range was on its way back to vmalloc which
will handle flushing the tlbs for us.

However, with pcpu_reclaim_populated(), we are now calling
pcpu_depopulate_chunk() during the active lifecycle of a chunk.
Therefore, we need to flush the tlb as well otherwise we can end up
accessing the wrong page through an invalid tlb mapping as reported in
[1].

[1] https://lore.kernel.org/lkml/20210702191140.GA3166599@roeck-us.net/

Fixes: f1833241 ("percpu: implement partial chunk depopulation")
Reported-and-tested-by: NGuenter Roeck <linux@roeck-us.net>
Signed-off-by: NDennis Zhou <dennis@kernel.org>
(cherry picked from commit 93274f1d)
Conflicts:
	mm/percpu.c
Small content conflicts because of mainline-v5.14-rc1 commit
faf65dde rework memcg accounting.
Signed-off-by: NYuanzheng Song <songyuanzheng@huawei.com>
Reviewed-by: NKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 67d02b4d
...@@ -32,6 +32,12 @@ ...@@ -32,6 +32,12 @@
#include <linux/log2.h> #include <linux/log2.h>
static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
int page_start, int page_end)
{
/* nothing */
}
static int pcpu_populate_chunk(struct pcpu_chunk *chunk, static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
int page_start, int page_end, gfp_t gfp) int page_start, int page_end, gfp_t gfp)
{ {
......
...@@ -302,6 +302,9 @@ static int pcpu_populate_chunk(struct pcpu_chunk *chunk, ...@@ -302,6 +302,9 @@ static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
* For each cpu, depopulate and unmap pages [@page_start,@page_end) * For each cpu, depopulate and unmap pages [@page_start,@page_end)
* from @chunk. * from @chunk.
* *
* Caller is required to call pcpu_post_unmap_tlb_flush() if not returning the
* region back to vmalloc() which will lazily flush the tlb.
*
* CONTEXT: * CONTEXT:
* pcpu_alloc_mutex. * pcpu_alloc_mutex.
*/ */
...@@ -323,8 +326,6 @@ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, ...@@ -323,8 +326,6 @@ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
pcpu_unmap_pages(chunk, pages, page_start, page_end); pcpu_unmap_pages(chunk, pages, page_start, page_end);
/* no need to flush tlb, vmalloc will handle it lazily */
pcpu_free_pages(chunk, pages, page_start, page_end); pcpu_free_pages(chunk, pages, page_start, page_end);
} }
......
...@@ -1579,6 +1579,7 @@ static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk, ...@@ -1579,6 +1579,7 @@ static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk,
* *
* pcpu_populate_chunk - populate the specified range of a chunk * pcpu_populate_chunk - populate the specified range of a chunk
* pcpu_depopulate_chunk - depopulate the specified range of a chunk * pcpu_depopulate_chunk - depopulate the specified range of a chunk
* pcpu_post_unmap_tlb_flush - flush tlb for the specified range of a chunk
* pcpu_create_chunk - create a new chunk * pcpu_create_chunk - create a new chunk
* pcpu_destroy_chunk - destroy a chunk, always preceded by full depop * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop
* pcpu_addr_to_page - translate address to physical address * pcpu_addr_to_page - translate address to physical address
...@@ -1590,6 +1591,8 @@ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, ...@@ -1590,6 +1591,8 @@ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
int page_start, int page_end); int page_start, int page_end);
static struct pcpu_chunk *pcpu_create_chunk(enum pcpu_chunk_type type, static struct pcpu_chunk *pcpu_create_chunk(enum pcpu_chunk_type type,
gfp_t gfp); gfp_t gfp);
static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
int page_start, int page_end);
static void pcpu_destroy_chunk(struct pcpu_chunk *chunk); static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
static struct page *pcpu_addr_to_page(void *addr); static struct page *pcpu_addr_to_page(void *addr);
static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai); static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
...@@ -2137,11 +2140,12 @@ static void pcpu_reclaim_populated(enum pcpu_chunk_type type) ...@@ -2137,11 +2140,12 @@ static void pcpu_reclaim_populated(enum pcpu_chunk_type type)
struct list_head *pcpu_slot = pcpu_chunk_list(type); struct list_head *pcpu_slot = pcpu_chunk_list(type);
struct pcpu_chunk *chunk; struct pcpu_chunk *chunk;
struct pcpu_block_md *block; struct pcpu_block_md *block;
int freed_page_start, freed_page_end;
int i, end; int i, end;
bool reintegrate;
spin_lock_irq(&pcpu_lock); spin_lock_irq(&pcpu_lock);
restart:
/* /*
* Once a chunk is isolated to the to_depopulate list, the chunk is no * Once a chunk is isolated to the to_depopulate list, the chunk is no
* longer discoverable to allocations whom may populate pages. The only * longer discoverable to allocations whom may populate pages. The only
...@@ -2157,6 +2161,9 @@ static void pcpu_reclaim_populated(enum pcpu_chunk_type type) ...@@ -2157,6 +2161,9 @@ static void pcpu_reclaim_populated(enum pcpu_chunk_type type)
* Scan chunk's pages in the reverse order to keep populated * Scan chunk's pages in the reverse order to keep populated
* pages close to the beginning of the chunk. * pages close to the beginning of the chunk.
*/ */
freed_page_start = chunk->nr_pages;
freed_page_end = 0;
reintegrate = false;
for (i = chunk->nr_pages - 1, end = -1; i >= 0; i--) { for (i = chunk->nr_pages - 1, end = -1; i >= 0; i--) {
/* no more work to do */ /* no more work to do */
if (chunk->nr_empty_pop_pages == 0) if (chunk->nr_empty_pop_pages == 0)
...@@ -2165,8 +2172,8 @@ static void pcpu_reclaim_populated(enum pcpu_chunk_type type) ...@@ -2165,8 +2172,8 @@ static void pcpu_reclaim_populated(enum pcpu_chunk_type type)
/* reintegrate chunk to prevent atomic alloc failures */ /* reintegrate chunk to prevent atomic alloc failures */
if (pcpu_nr_empty_pop_pages[type] < if (pcpu_nr_empty_pop_pages[type] <
PCPU_EMPTY_POP_PAGES_HIGH) { PCPU_EMPTY_POP_PAGES_HIGH) {
pcpu_reintegrate_chunk(chunk); reintegrate = true;
goto restart; goto end_chunk;
} }
/* /*
...@@ -2195,16 +2202,29 @@ static void pcpu_reclaim_populated(enum pcpu_chunk_type type) ...@@ -2195,16 +2202,29 @@ static void pcpu_reclaim_populated(enum pcpu_chunk_type type)
spin_lock_irq(&pcpu_lock); spin_lock_irq(&pcpu_lock);
pcpu_chunk_depopulated(chunk, i + 1, end + 1); pcpu_chunk_depopulated(chunk, i + 1, end + 1);
freed_page_start = min(freed_page_start, i + 1);
freed_page_end = max(freed_page_end, end + 1);
/* reset the range and continue */ /* reset the range and continue */
end = -1; end = -1;
} }
if (chunk->free_bytes == pcpu_unit_size) end_chunk:
/* batch tlb flush per chunk to amortize cost */
if (freed_page_start < freed_page_end) {
spin_unlock_irq(&pcpu_lock);
pcpu_post_unmap_tlb_flush(chunk,
freed_page_start,
freed_page_end);
cond_resched();
spin_lock_irq(&pcpu_lock);
}
if (reintegrate || chunk->free_bytes == pcpu_unit_size)
pcpu_reintegrate_chunk(chunk); pcpu_reintegrate_chunk(chunk);
else else
list_move(&chunk->list, list_move_tail(&chunk->list,
&pcpu_slot[pcpu_sidelined_slot]); &pcpu_slot[pcpu_sidelined_slot]);
} }
spin_unlock_irq(&pcpu_lock); spin_unlock_irq(&pcpu_lock);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册