提交 cd5eaf37 编写于 作者: M Matthew Wilcox (Oracle) 提交者: Zheng Zengkai

mm: fix struct page layout on 32-bit systems

stable inclusion
from stable-5.10.38
commit cfddf6a685e3bbdba0c9976563810ecb118fa516
bugzilla: 51875
CVE: NA

--------------------------------

commit 9ddb3c14 upstream.

32-bit architectures which expect 8-byte alignment for 8-byte integers and
need 64-bit DMA addresses (arm, mips, ppc) had their struct page
inadvertently expanded in 2019.  When the dma_addr_t was added, it forced
the alignment of the union to 8 bytes, which inserted a 4 byte gap between
'flags' and the union.

Fix this by storing the dma_addr_t in one or two adjacent unsigned longs.
This restores the alignment to that of an unsigned long.  We always
store the low bits in the first word to prevent the PageTail bit from
being inadvertently set on a big endian platform.  If that happened,
get_user_pages_fast() racing against a page which was freed and
reallocated to the page_pool could dereference a bogus compound_head(),
which would be hard to trace back to this cause.

Link: https://lkml.kernel.org/r/20210510153211.1504886-1-willy@infradead.org
Fixes: c25fff71 ("mm: add dma_addr_t to struct page")
Signed-off-by: NMatthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: NIlias Apalodimas <ilias.apalodimas@linaro.org>
Acked-by: NJesper Dangaard Brouer <brouer@redhat.com>
Acked-by: NVlastimil Babka <vbabka@suse.cz>
Tested-by: NMatteo Croce <mcroce@linux.microsoft.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: NGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: NChen Jun <chenjun102@huawei.com>
Acked-by: NWeilong Chen <chenweilong@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 42e147e0
...@@ -97,10 +97,10 @@ struct page { ...@@ -97,10 +97,10 @@ struct page {
}; };
struct { /* page_pool used by netstack */ struct { /* page_pool used by netstack */
/** /**
* @dma_addr: might require a 64-bit value even on * @dma_addr: might require a 64-bit value on
* 32-bit architectures. * 32-bit architectures.
*/ */
dma_addr_t dma_addr; unsigned long dma_addr[2];
}; };
struct { /* slab, slob and slub */ struct { /* slab, slob and slub */
union { union {
......
...@@ -191,7 +191,17 @@ static inline void page_pool_recycle_direct(struct page_pool *pool, ...@@ -191,7 +191,17 @@ static inline void page_pool_recycle_direct(struct page_pool *pool,
static inline dma_addr_t page_pool_get_dma_addr(struct page *page) static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
{ {
return page->dma_addr; dma_addr_t ret = page->dma_addr[0];
if (sizeof(dma_addr_t) > sizeof(unsigned long))
ret |= (dma_addr_t)page->dma_addr[1] << 16 << 16;
return ret;
}
static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
{
page->dma_addr[0] = addr;
if (sizeof(dma_addr_t) > sizeof(unsigned long))
page->dma_addr[1] = upper_32_bits(addr);
} }
static inline bool is_page_pool_compiled_in(void) static inline bool is_page_pool_compiled_in(void)
......
...@@ -172,8 +172,10 @@ static void page_pool_dma_sync_for_device(struct page_pool *pool, ...@@ -172,8 +172,10 @@ static void page_pool_dma_sync_for_device(struct page_pool *pool,
struct page *page, struct page *page,
unsigned int dma_sync_size) unsigned int dma_sync_size)
{ {
dma_addr_t dma_addr = page_pool_get_dma_addr(page);
dma_sync_size = min(dma_sync_size, pool->p.max_len); dma_sync_size = min(dma_sync_size, pool->p.max_len);
dma_sync_single_range_for_device(pool->p.dev, page->dma_addr, dma_sync_single_range_for_device(pool->p.dev, dma_addr,
pool->p.offset, dma_sync_size, pool->p.offset, dma_sync_size,
pool->p.dma_dir); pool->p.dma_dir);
} }
...@@ -224,7 +226,7 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool, ...@@ -224,7 +226,7 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
put_page(page); put_page(page);
return NULL; return NULL;
} }
page->dma_addr = dma; page_pool_set_dma_addr(page, dma);
if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
page_pool_dma_sync_for_device(pool, page, pool->p.max_len); page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
...@@ -292,13 +294,13 @@ void page_pool_release_page(struct page_pool *pool, struct page *page) ...@@ -292,13 +294,13 @@ void page_pool_release_page(struct page_pool *pool, struct page *page)
*/ */
goto skip_dma_unmap; goto skip_dma_unmap;
dma = page->dma_addr; dma = page_pool_get_dma_addr(page);
/* When page is unmapped, it cannot be returned our pool */ /* When page is unmapped, it cannot be returned to our pool */
dma_unmap_page_attrs(pool->p.dev, dma, dma_unmap_page_attrs(pool->p.dev, dma,
PAGE_SIZE << pool->p.order, pool->p.dma_dir, PAGE_SIZE << pool->p.order, pool->p.dma_dir,
DMA_ATTR_SKIP_CPU_SYNC); DMA_ATTR_SKIP_CPU_SYNC);
page->dma_addr = 0; page_pool_set_dma_addr(page, 0);
skip_dma_unmap: skip_dma_unmap:
/* This may be the last page returned, releasing the pool, so /* This may be the last page returned, releasing the pool, so
* it is not safe to reference pool afterwards. * it is not safe to reference pool afterwards.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册