提交 d634525d 编写于 作者: D Dave Chinner 提交者: Darrick J. Wong

xfs: replace kmem_alloc_large() with kvmalloc()

There is no reason for this wrapper existing anymore. All the places
that use KM_NOFS allocation are within transaction contexts and
hence covered by memalloc_nofs_save/restore contexts. Hence we don't
need any special handling of vmalloc for large IOs anymore and
so special casing this code isn't necessary.
Signed-off-by: NDave Chinner <dchinner@redhat.com>
Reviewed-by: NDarrick J. Wong <djwong@kernel.org>
Signed-off-by: NDarrick J. Wong <djwong@kernel.org>
上级 98fe2c3c
......@@ -29,42 +29,3 @@ kmem_alloc(size_t size, xfs_km_flags_t flags)
congestion_wait(BLK_RW_ASYNC, HZ/50);
} while (1);
}
/*
* __vmalloc() will allocate data pages and auxiliary structures (e.g.
* pagetables) with GFP_KERNEL, yet we may be under GFP_NOFS context here. Hence
* we need to tell memory reclaim that we are in such a context via
* PF_MEMALLOC_NOFS to prevent memory reclaim re-entering the filesystem here
* and potentially deadlocking.
*/
static void *
__kmem_vmalloc(size_t size, xfs_km_flags_t flags)
{
unsigned nofs_flag = 0;
void *ptr;
gfp_t lflags = kmem_flags_convert(flags);
if (flags & KM_NOFS)
nofs_flag = memalloc_nofs_save();
ptr = __vmalloc(size, lflags);
if (flags & KM_NOFS)
memalloc_nofs_restore(nofs_flag);
return ptr;
}
void *
kmem_alloc_large(size_t size, xfs_km_flags_t flags)
{
void *ptr;
trace_kmem_alloc_large(size, flags, _RET_IP_);
ptr = kmem_alloc(size, flags | KM_MAYFAIL);
if (ptr)
return ptr;
return __kmem_vmalloc(size, flags);
}
......@@ -57,7 +57,6 @@ kmem_flags_convert(xfs_km_flags_t flags)
}
extern void *kmem_alloc(size_t, xfs_km_flags_t);
extern void *kmem_alloc_large(size_t size, xfs_km_flags_t);
static inline void kmem_free(const void *ptr)
{
kvfree(ptr);
......
......@@ -489,7 +489,7 @@ xfs_attr_copy_value(
}
if (!args->value) {
args->value = kmem_alloc_large(valuelen, KM_NOLOCKDEP);
args->value = kvmalloc(valuelen, GFP_KERNEL | __GFP_NOLOCKDEP);
if (!args->value)
return -ENOMEM;
}
......
......@@ -25,11 +25,11 @@
* reallocating the buffer if necessary. Buffer contents are not preserved
* across a reallocation.
*/
int
static int
xchk_setup_xattr_buf(
struct xfs_scrub *sc,
size_t value_size,
xfs_km_flags_t flags)
gfp_t flags)
{
size_t sz;
struct xchk_xattr_buf *ab = sc->buf;
......@@ -57,7 +57,7 @@ xchk_setup_xattr_buf(
* Don't zero the buffer upon allocation to avoid runtime overhead.
* All users must be careful never to read uninitialized contents.
*/
ab = kmem_alloc_large(sizeof(*ab) + sz, flags);
ab = kvmalloc(sizeof(*ab) + sz, flags);
if (!ab)
return -ENOMEM;
......@@ -79,7 +79,7 @@ xchk_setup_xattr(
* without the inode lock held, which means we can sleep.
*/
if (sc->flags & XCHK_TRY_HARDER) {
error = xchk_setup_xattr_buf(sc, XATTR_SIZE_MAX, 0);
error = xchk_setup_xattr_buf(sc, XATTR_SIZE_MAX, GFP_KERNEL);
if (error)
return error;
}
......@@ -138,7 +138,8 @@ xchk_xattr_listent(
* doesn't work, we overload the seen_enough variable to convey
* the error message back to the main scrub function.
*/
error = xchk_setup_xattr_buf(sx->sc, valuelen, KM_MAYFAIL);
error = xchk_setup_xattr_buf(sx->sc, valuelen,
GFP_KERNEL | __GFP_RETRY_MAYFAIL);
if (error == -ENOMEM)
error = -EDEADLOCK;
if (error) {
......@@ -323,7 +324,8 @@ xchk_xattr_block(
return 0;
/* Allocate memory for block usage checking. */
error = xchk_setup_xattr_buf(ds->sc, 0, KM_MAYFAIL);
error = xchk_setup_xattr_buf(ds->sc, 0,
GFP_KERNEL | __GFP_RETRY_MAYFAIL);
if (error == -ENOMEM)
return -EDEADLOCK;
if (error)
......
......@@ -65,7 +65,4 @@ xchk_xattr_dstmap(
BITS_TO_LONGS(sc->mp->m_attr_geo->blksize);
}
int xchk_setup_xattr_buf(struct xfs_scrub *sc, size_t value_size,
xfs_km_flags_t flags);
#endif /* __XFS_SCRUB_ATTR_H__ */
......@@ -1487,8 +1487,8 @@ xlog_alloc_log(
iclog->ic_prev = prev_iclog;
prev_iclog = iclog;
iclog->ic_data = kmem_alloc_large(log->l_iclog_size,
KM_MAYFAIL | KM_ZERO);
iclog->ic_data = kvzalloc(log->l_iclog_size,
GFP_KERNEL | __GFP_RETRY_MAYFAIL);
if (!iclog->ic_data)
goto out_free_iclog;
#ifdef DEBUG
......
......@@ -185,7 +185,15 @@ xlog_cil_alloc_shadow_bufs(
*/
kmem_free(lip->li_lv_shadow);
lv = kmem_alloc_large(buf_size, KM_NOFS);
/*
* We are in transaction context, which means this
* allocation will pick up GFP_NOFS from the
* memalloc_nofs_save/restore context the transaction
* holds. This means we can use GFP_KERNEL here so the
* generic kvmalloc() code will run vmalloc on
* contiguous page allocation failure as we require.
*/
lv = kvmalloc(buf_size, GFP_KERNEL);
memset(lv, 0, xlog_cil_iovec_space(niovecs));
lv->lv_item = lip;
......
......@@ -106,7 +106,7 @@ xlog_alloc_buffer(
if (nbblks > 1 && log->l_sectBBsize > 1)
nbblks += log->l_sectBBsize;
nbblks = round_up(nbblks, log->l_sectBBsize);
return kmem_alloc_large(BBTOB(nbblks), KM_MAYFAIL | KM_ZERO);
return kvzalloc(BBTOB(nbblks), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
}
/*
......
......@@ -3774,7 +3774,6 @@ DEFINE_EVENT(xfs_kmem_class, name, \
TP_PROTO(ssize_t size, int flags, unsigned long caller_ip), \
TP_ARGS(size, flags, caller_ip))
DEFINE_KMEM_EVENT(kmem_alloc);
DEFINE_KMEM_EVENT(kmem_alloc_large);
TRACE_EVENT(xfs_check_new_dalign,
TP_PROTO(struct xfs_mount *mp, int new_dalign, xfs_ino_t calc_rootino),
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册