提交 eb5cf1cc 编写于 作者: D Dave Chinner 提交者: Zheng Zengkai

xfs: remove kmem_alloc_io()

mainline-inclusion
from mainline-v5.14-rc4
commit 98fe2c3c
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I4KIAO
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=98fe2c3cef21b784e2efd1d9d891430d95b4f073

-------------------------------------------------

Since commit 59bb4798 ("mm, sl[aou]b: guarantee natural alignment
for kmalloc(power-of-two)"), the core slab code now guarantees slab
alignment in all situations sufficient for IO purposes (i.e. minimum
of 512 byte alignment of >= 512 byte sized heap allocations) we no
longer need the workaround in the XFS code to provide this
guarantee.

Replace the use of kmem_alloc_io() with kmem_alloc() or
kmem_alloc_large() appropriately, and remove the kmem_alloc_io()
interface altogether.
Signed-off-by: NDave Chinner <dchinner@redhat.com>
Reviewed-by: NDarrick J. Wong <djwong@kernel.org>
Signed-off-by: NDarrick J. Wong <djwong@kernel.org>
Signed-off-by: NGuo Xuenan <guoxuenan@huawei.com>
Reviewed-by: NLihong Kou <koulihong@huawei.com>
Reviewed-by: NZhang Yi <yi.zhang@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 97316767
......@@ -56,31 +56,6 @@ __kmem_vmalloc(size_t size, xfs_km_flags_t flags)
return ptr;
}
/*
* Same as kmem_alloc_large, except we guarantee the buffer returned is aligned
* to the @align_mask. We only guarantee alignment up to page size, we'll clamp
* alignment at page size if it is larger. vmalloc always returns a PAGE_SIZE
* aligned region.
*/
void *
kmem_alloc_io(size_t size, int align_mask, xfs_km_flags_t flags)
{
void *ptr;
trace_kmem_alloc_io(size, flags, _RET_IP_);
if (WARN_ON_ONCE(align_mask >= PAGE_SIZE))
align_mask = PAGE_SIZE - 1;
ptr = kmem_alloc(size, flags | KM_MAYFAIL);
if (ptr) {
if (!((uintptr_t)ptr & align_mask))
return ptr;
kfree(ptr);
}
return __kmem_vmalloc(size, flags);
}
void *
kmem_alloc_large(size_t size, xfs_km_flags_t flags)
{
......
......@@ -57,7 +57,6 @@ kmem_flags_convert(xfs_km_flags_t flags)
}
extern void *kmem_alloc(size_t, xfs_km_flags_t);
extern void *kmem_alloc_io(size_t size, int align_mask, xfs_km_flags_t flags);
extern void *kmem_alloc_large(size_t size, xfs_km_flags_t);
static inline void kmem_free(const void *ptr)
{
......
......@@ -361,7 +361,7 @@ xfs_buf_allocate_memory(
unsigned short page_count, i;
xfs_off_t start, end;
int error;
xfs_km_flags_t kmflag_mask = 0;
xfs_km_flags_t kmflag_mask = KM_NOFS;
/*
* assure zeroed buffer for non-read cases.
......@@ -378,9 +378,7 @@ xfs_buf_allocate_memory(
*/
size = BBTOB(bp->b_length);
if (size < PAGE_SIZE) {
int align_mask = xfs_buftarg_dma_alignment(bp->b_target);
bp->b_addr = kmem_alloc_io(size, align_mask,
KM_NOFS | kmflag_mask);
bp->b_addr = kmem_alloc(size, kmflag_mask);
if (!bp->b_addr) {
/* low memory - use alloc_page loop instead */
goto use_alloc_page;
......
......@@ -353,12 +353,6 @@ extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int);
#define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev)
#define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev)
static inline int
xfs_buftarg_dma_alignment(struct xfs_buftarg *bt)
{
return queue_dma_alignment(bt->bt_bdev->bd_disk->queue);
}
int xfs_buf_reverify(struct xfs_buf *bp, const struct xfs_buf_ops *ops);
bool xfs_verify_magic(struct xfs_buf *bp, __be32 dmagic);
bool xfs_verify_magic16(struct xfs_buf *bp, __be16 dmagic);
......
......@@ -1372,7 +1372,6 @@ xlog_alloc_log(
*/
ASSERT(log->l_iclog_size >= 4096);
for (i = 0; i < log->l_iclog_bufs; i++) {
int align_mask = xfs_buftarg_dma_alignment(mp->m_logdev_targp);
size_t bvec_size = howmany(log->l_iclog_size, PAGE_SIZE) *
sizeof(struct bio_vec);
......@@ -1384,7 +1383,7 @@ xlog_alloc_log(
iclog->ic_prev = prev_iclog;
prev_iclog = iclog;
iclog->ic_data = kmem_alloc_io(log->l_iclog_size, align_mask,
iclog->ic_data = kmem_alloc_large(log->l_iclog_size,
KM_MAYFAIL | KM_ZERO);
if (!iclog->ic_data)
goto out_free_iclog;
......
......@@ -78,8 +78,6 @@ xlog_alloc_buffer(
struct xlog *log,
int nbblks)
{
int align_mask = xfs_buftarg_dma_alignment(log->l_targ);
/*
* Pass log block 0 since we don't have an addr yet, buffer will be
* verified on read.
......@@ -107,7 +105,7 @@ xlog_alloc_buffer(
if (nbblks > 1 && log->l_sectBBsize > 1)
nbblks += log->l_sectBBsize;
nbblks = round_up(nbblks, log->l_sectBBsize);
return kmem_alloc_io(BBTOB(nbblks), align_mask, KM_MAYFAIL | KM_ZERO);
return kmem_alloc_large(BBTOB(nbblks), KM_MAYFAIL | KM_ZERO);
}
/*
......
......@@ -3675,7 +3675,6 @@ DEFINE_EVENT(xfs_kmem_class, name, \
TP_PROTO(ssize_t size, int flags, unsigned long caller_ip), \
TP_ARGS(size, flags, caller_ip))
DEFINE_KMEM_EVENT(kmem_alloc);
DEFINE_KMEM_EVENT(kmem_alloc_io);
DEFINE_KMEM_EVENT(kmem_alloc_large);
TRACE_EVENT(xfs_check_new_dalign,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册