提交 8d60ecd9 编写于 作者: N NeilBrown 提交者: Greg Kroah-Hartman

staging: lustre: replace LIBCFS_CPT_ALLOC()

LIBCFS_APT_ALLOC() calls kvmalloc_node() with GFP_NOFS
which is not permitted.
Mostly, a kmalloc_node(GFP_NOFS) is appropriate, though occasionally
the allocation is large and GFP_KERNEL is acceptable, so
kvmalloc_node() can be used.

This patch introduces 4 alternatives to LIBCFS_CPT_ALLOC():
 kmalloc_cpt()
 kzalloc_cpt()
 kvmalloc_cpt()
 kvzalloc_cpt().

Each takes a size, gfp flags, and cpt number.

Almost every call to LIBCFS_CPT_ALLOC() passes lnet_cpt_table()
as the table.  This patch embeds that choice in the k*alloc_cpt()
macros, and opencode kzalloc_node(..., cfs_cpt_spread_node(..))
in the one case that lnet_cpt_table() isn't used.

When LIBCFS_CPT_ALLOC() is replaced, the matching LIBCFS_FREE()
is also replaced, with with kfree() or kvfree() as appropriate.
Signed-off-by: NNeilBrown <neilb@suse.com>
Signed-off-by: NGreg Kroah-Hartman <gregkh@linuxfoundation.org>
上级 d0157f0c
...@@ -126,6 +126,25 @@ do { \ ...@@ -126,6 +126,25 @@ do { \
kvfree(ptr); \ kvfree(ptr); \
} while (0) } while (0)
/*
* Use #define rather than inline, as lnet_cpt_table() might
* not be defined yet
*/
#define kmalloc_cpt(size, flags, cpt) \
kmalloc_node(size, flags, cfs_cpt_spread_node(lnet_cpt_table(), cpt))
#define kzalloc_cpt(size, flags, cpt) \
kmalloc_node(size, flags | __GFP_ZERO, \
cfs_cpt_spread_node(lnet_cpt_table(), cpt))
#define kvmalloc_cpt(size, flags, cpt) \
kvmalloc_node(size, flags, \
cfs_cpt_spread_node(lnet_cpt_table(), cpt))
#define kvzalloc_cpt(size, flags, cpt) \
kvmalloc_node(size, flags | __GFP_ZERO, \
cfs_cpt_spread_node(lnet_cpt_table(), cpt))
/******************************************************************************/ /******************************************************************************/
void libcfs_debug_dumplog(void); void libcfs_debug_dumplog(void);
......
...@@ -325,7 +325,7 @@ int kiblnd_create_peer(struct lnet_ni *ni, struct kib_peer **peerp, ...@@ -325,7 +325,7 @@ int kiblnd_create_peer(struct lnet_ni *ni, struct kib_peer **peerp,
LASSERT(net); LASSERT(net);
LASSERT(nid != LNET_NID_ANY); LASSERT(nid != LNET_NID_ANY);
LIBCFS_CPT_ALLOC(peer, lnet_cpt_table(), cpt, sizeof(*peer)); peer = kzalloc_cpt(sizeof(*peer), GFP_NOFS, cpt);
if (!peer) { if (!peer) {
CERROR("Cannot allocate peer\n"); CERROR("Cannot allocate peer\n");
return -ENOMEM; return -ENOMEM;
...@@ -656,15 +656,14 @@ struct kib_conn *kiblnd_create_conn(struct kib_peer *peer, struct rdma_cm_id *cm ...@@ -656,15 +656,14 @@ struct kib_conn *kiblnd_create_conn(struct kib_peer *peer, struct rdma_cm_id *cm
LASSERT(sched->ibs_nthreads > 0); LASSERT(sched->ibs_nthreads > 0);
LIBCFS_CPT_ALLOC(init_qp_attr, lnet_cpt_table(), cpt, init_qp_attr = kzalloc_cpt(sizeof(*init_qp_attr), GFP_NOFS, cpt);
sizeof(*init_qp_attr));
if (!init_qp_attr) { if (!init_qp_attr) {
CERROR("Can't allocate qp_attr for %s\n", CERROR("Can't allocate qp_attr for %s\n",
libcfs_nid2str(peer->ibp_nid)); libcfs_nid2str(peer->ibp_nid));
goto failed_0; goto failed_0;
} }
LIBCFS_CPT_ALLOC(conn, lnet_cpt_table(), cpt, sizeof(*conn)); conn = kzalloc_cpt(sizeof(*conn), GFP_NOFS, cpt);
if (!conn) { if (!conn) {
CERROR("Can't allocate connection for %s\n", CERROR("Can't allocate connection for %s\n",
libcfs_nid2str(peer->ibp_nid)); libcfs_nid2str(peer->ibp_nid));
...@@ -687,8 +686,7 @@ struct kib_conn *kiblnd_create_conn(struct kib_peer *peer, struct rdma_cm_id *cm ...@@ -687,8 +686,7 @@ struct kib_conn *kiblnd_create_conn(struct kib_peer *peer, struct rdma_cm_id *cm
INIT_LIST_HEAD(&conn->ibc_active_txs); INIT_LIST_HEAD(&conn->ibc_active_txs);
spin_lock_init(&conn->ibc_lock); spin_lock_init(&conn->ibc_lock);
LIBCFS_CPT_ALLOC(conn->ibc_connvars, lnet_cpt_table(), cpt, conn->ibc_connvars = kzalloc_cpt(sizeof(*conn->ibc_connvars), GFP_NOFS, cpt);
sizeof(*conn->ibc_connvars));
if (!conn->ibc_connvars) { if (!conn->ibc_connvars) {
CERROR("Can't allocate in-progress connection state\n"); CERROR("Can't allocate in-progress connection state\n");
goto failed_2; goto failed_2;
...@@ -722,8 +720,8 @@ struct kib_conn *kiblnd_create_conn(struct kib_peer *peer, struct rdma_cm_id *cm ...@@ -722,8 +720,8 @@ struct kib_conn *kiblnd_create_conn(struct kib_peer *peer, struct rdma_cm_id *cm
write_unlock_irqrestore(glock, flags); write_unlock_irqrestore(glock, flags);
LIBCFS_CPT_ALLOC(conn->ibc_rxs, lnet_cpt_table(), cpt, conn->ibc_rxs = kzalloc_cpt(IBLND_RX_MSGS(conn) * sizeof(struct kib_rx),
IBLND_RX_MSGS(conn) * sizeof(struct kib_rx)); GFP_NOFS, cpt);
if (!conn->ibc_rxs) { if (!conn->ibc_rxs) {
CERROR("Cannot allocate RX buffers\n"); CERROR("Cannot allocate RX buffers\n");
goto failed_2; goto failed_2;
...@@ -877,11 +875,7 @@ void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn) ...@@ -877,11 +875,7 @@ void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn)
if (conn->ibc_rx_pages) if (conn->ibc_rx_pages)
kiblnd_unmap_rx_descs(conn); kiblnd_unmap_rx_descs(conn);
if (conn->ibc_rxs) { kfree(conn->ibc_rxs);
LIBCFS_FREE(conn->ibc_rxs,
IBLND_RX_MSGS(conn) * sizeof(struct kib_rx));
}
kfree(conn->ibc_connvars); kfree(conn->ibc_connvars);
if (conn->ibc_hdev) if (conn->ibc_hdev)
...@@ -1088,7 +1082,7 @@ static void kiblnd_free_pages(struct kib_pages *p) ...@@ -1088,7 +1082,7 @@ static void kiblnd_free_pages(struct kib_pages *p)
__free_page(p->ibp_pages[i]); __free_page(p->ibp_pages[i]);
} }
LIBCFS_FREE(p, offsetof(struct kib_pages, ibp_pages[npages])); kfree(p);
} }
int kiblnd_alloc_pages(struct kib_pages **pp, int cpt, int npages) int kiblnd_alloc_pages(struct kib_pages **pp, int cpt, int npages)
...@@ -1096,14 +1090,13 @@ int kiblnd_alloc_pages(struct kib_pages **pp, int cpt, int npages) ...@@ -1096,14 +1090,13 @@ int kiblnd_alloc_pages(struct kib_pages **pp, int cpt, int npages)
struct kib_pages *p; struct kib_pages *p;
int i; int i;
LIBCFS_CPT_ALLOC(p, lnet_cpt_table(), cpt, p = kzalloc_cpt(offsetof(struct kib_pages, ibp_pages[npages]),
offsetof(struct kib_pages, ibp_pages[npages])); GFP_NOFS, cpt);
if (!p) { if (!p) {
CERROR("Can't allocate descriptor for %d pages\n", npages); CERROR("Can't allocate descriptor for %d pages\n", npages);
return -ENOMEM; return -ENOMEM;
} }
memset(p, 0, offsetof(struct kib_pages, ibp_pages[npages]));
p->ibp_npages = npages; p->ibp_npages = npages;
for (i = 0; i < npages; i++) { for (i = 0; i < npages; i++) {
...@@ -1375,8 +1368,7 @@ static int kiblnd_alloc_freg_pool(struct kib_fmr_poolset *fps, struct kib_fmr_po ...@@ -1375,8 +1368,7 @@ static int kiblnd_alloc_freg_pool(struct kib_fmr_poolset *fps, struct kib_fmr_po
INIT_LIST_HEAD(&fpo->fast_reg.fpo_pool_list); INIT_LIST_HEAD(&fpo->fast_reg.fpo_pool_list);
fpo->fast_reg.fpo_pool_size = 0; fpo->fast_reg.fpo_pool_size = 0;
for (i = 0; i < fps->fps_pool_size; i++) { for (i = 0; i < fps->fps_pool_size; i++) {
LIBCFS_CPT_ALLOC(frd, lnet_cpt_table(), fps->fps_cpt, frd = kzalloc_cpt(sizeof(*frd), GFP_NOFS, fps->fps_cpt);
sizeof(*frd));
if (!frd) { if (!frd) {
CERROR("Failed to allocate a new fast_reg descriptor\n"); CERROR("Failed to allocate a new fast_reg descriptor\n");
rc = -ENOMEM; rc = -ENOMEM;
...@@ -1425,7 +1417,7 @@ static int kiblnd_create_fmr_pool(struct kib_fmr_poolset *fps, ...@@ -1425,7 +1417,7 @@ static int kiblnd_create_fmr_pool(struct kib_fmr_poolset *fps,
struct kib_fmr_pool *fpo; struct kib_fmr_pool *fpo;
int rc; int rc;
LIBCFS_CPT_ALLOC(fpo, lnet_cpt_table(), fps->fps_cpt, sizeof(*fpo)); fpo = kzalloc_cpt(sizeof(*fpo), GFP_NOFS, fps->fps_cpt);
if (!fpo) if (!fpo)
return -ENOMEM; return -ENOMEM;
...@@ -1984,30 +1976,14 @@ static void kiblnd_destroy_tx_pool(struct kib_pool *pool) ...@@ -1984,30 +1976,14 @@ static void kiblnd_destroy_tx_pool(struct kib_pool *pool)
struct kib_tx *tx = &tpo->tpo_tx_descs[i]; struct kib_tx *tx = &tpo->tpo_tx_descs[i];
list_del(&tx->tx_list); list_del(&tx->tx_list);
if (tx->tx_pages) kfree(tx->tx_pages);
LIBCFS_FREE(tx->tx_pages, kfree(tx->tx_frags);
LNET_MAX_IOV * kfree(tx->tx_wrq);
sizeof(*tx->tx_pages)); kfree(tx->tx_sge);
if (tx->tx_frags) kfree(tx->tx_rd);
LIBCFS_FREE(tx->tx_frags, }
(1 + IBLND_MAX_RDMA_FRAGS) *
sizeof(*tx->tx_frags)); kfree(tpo->tpo_tx_descs);
if (tx->tx_wrq)
LIBCFS_FREE(tx->tx_wrq,
(1 + IBLND_MAX_RDMA_FRAGS) *
sizeof(*tx->tx_wrq));
if (tx->tx_sge)
LIBCFS_FREE(tx->tx_sge,
(1 + IBLND_MAX_RDMA_FRAGS) *
sizeof(*tx->tx_sge));
if (tx->tx_rd)
LIBCFS_FREE(tx->tx_rd,
offsetof(struct kib_rdma_desc,
rd_frags[IBLND_MAX_RDMA_FRAGS]));
}
LIBCFS_FREE(tpo->tpo_tx_descs,
pool->po_size * sizeof(struct kib_tx));
out: out:
kiblnd_fini_pool(pool); kiblnd_fini_pool(pool);
kfree(tpo); kfree(tpo);
...@@ -2028,7 +2004,7 @@ static int kiblnd_create_tx_pool(struct kib_poolset *ps, int size, ...@@ -2028,7 +2004,7 @@ static int kiblnd_create_tx_pool(struct kib_poolset *ps, int size,
struct kib_pool *pool; struct kib_pool *pool;
struct kib_tx_pool *tpo; struct kib_tx_pool *tpo;
LIBCFS_CPT_ALLOC(tpo, lnet_cpt_table(), ps->ps_cpt, sizeof(*tpo)); tpo = kzalloc_cpt(sizeof(*tpo), GFP_NOFS, ps->ps_cpt);
if (!tpo) { if (!tpo) {
CERROR("Failed to allocate TX pool\n"); CERROR("Failed to allocate TX pool\n");
return -ENOMEM; return -ENOMEM;
...@@ -2046,8 +2022,8 @@ static int kiblnd_create_tx_pool(struct kib_poolset *ps, int size, ...@@ -2046,8 +2022,8 @@ static int kiblnd_create_tx_pool(struct kib_poolset *ps, int size,
return -ENOMEM; return -ENOMEM;
} }
LIBCFS_CPT_ALLOC(tpo->tpo_tx_descs, lnet_cpt_table(), ps->ps_cpt, tpo->tpo_tx_descs = kzalloc_cpt(size * sizeof(struct kib_tx),
size * sizeof(struct kib_tx)); GFP_NOFS, ps->ps_cpt);
if (!tpo->tpo_tx_descs) { if (!tpo->tpo_tx_descs) {
CERROR("Can't allocate %d tx descriptors\n", size); CERROR("Can't allocate %d tx descriptors\n", size);
ps->ps_pool_destroy(pool); ps->ps_pool_destroy(pool);
...@@ -2061,36 +2037,35 @@ static int kiblnd_create_tx_pool(struct kib_poolset *ps, int size, ...@@ -2061,36 +2037,35 @@ static int kiblnd_create_tx_pool(struct kib_poolset *ps, int size,
tx->tx_pool = tpo; tx->tx_pool = tpo;
if (ps->ps_net->ibn_fmr_ps) { if (ps->ps_net->ibn_fmr_ps) {
LIBCFS_CPT_ALLOC(tx->tx_pages, tx->tx_pages = kzalloc_cpt(LNET_MAX_IOV * sizeof(*tx->tx_pages),
lnet_cpt_table(), ps->ps_cpt, GFP_NOFS, ps->ps_cpt);
LNET_MAX_IOV * sizeof(*tx->tx_pages));
if (!tx->tx_pages) if (!tx->tx_pages)
break; break;
} }
LIBCFS_CPT_ALLOC(tx->tx_frags, lnet_cpt_table(), ps->ps_cpt, tx->tx_frags = kzalloc_cpt((1 + IBLND_MAX_RDMA_FRAGS) *
(1 + IBLND_MAX_RDMA_FRAGS) * sizeof(*tx->tx_frags),
sizeof(*tx->tx_frags)); GFP_NOFS, ps->ps_cpt);
if (!tx->tx_frags) if (!tx->tx_frags)
break; break;
sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS + 1); sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS + 1);
LIBCFS_CPT_ALLOC(tx->tx_wrq, lnet_cpt_table(), ps->ps_cpt, tx->tx_wrq = kzalloc_cpt((1 + IBLND_MAX_RDMA_FRAGS) *
(1 + IBLND_MAX_RDMA_FRAGS) * sizeof(*tx->tx_wrq),
sizeof(*tx->tx_wrq)); GFP_NOFS, ps->ps_cpt);
if (!tx->tx_wrq) if (!tx->tx_wrq)
break; break;
LIBCFS_CPT_ALLOC(tx->tx_sge, lnet_cpt_table(), ps->ps_cpt, tx->tx_sge = kzalloc_cpt((1 + IBLND_MAX_RDMA_FRAGS) *
(1 + IBLND_MAX_RDMA_FRAGS) * sizeof(*tx->tx_sge),
sizeof(*tx->tx_sge)); GFP_NOFS, ps->ps_cpt);
if (!tx->tx_sge) if (!tx->tx_sge)
break; break;
LIBCFS_CPT_ALLOC(tx->tx_rd, lnet_cpt_table(), ps->ps_cpt, tx->tx_rd = kzalloc_cpt(offsetof(struct kib_rdma_desc,
offsetof(struct kib_rdma_desc, rd_frags[IBLND_MAX_RDMA_FRAGS]),
rd_frags[IBLND_MAX_RDMA_FRAGS])); GFP_NOFS, ps->ps_cpt);
if (!tx->tx_rd) if (!tx->tx_rd)
break; break;
} }
......
...@@ -108,7 +108,7 @@ ksocknal_create_peer(struct ksock_peer **peerp, struct lnet_ni *ni, ...@@ -108,7 +108,7 @@ ksocknal_create_peer(struct ksock_peer **peerp, struct lnet_ni *ni,
LASSERT(id.pid != LNET_PID_ANY); LASSERT(id.pid != LNET_PID_ANY);
LASSERT(!in_interrupt()); LASSERT(!in_interrupt());
LIBCFS_CPT_ALLOC(peer, lnet_cpt_table(), cpt, sizeof(*peer)); peer = kzalloc_cpt(sizeof(*peer), GFP_NOFS, cpt);
if (!peer) if (!peer)
return -ENOMEM; return -ENOMEM;
...@@ -2257,13 +2257,8 @@ ksocknal_free_buffers(void) ...@@ -2257,13 +2257,8 @@ ksocknal_free_buffers(void)
struct ksock_sched_info *info; struct ksock_sched_info *info;
int i; int i;
cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) { cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info)
if (info->ksi_scheds) { kfree(info->ksi_scheds);
LIBCFS_FREE(info->ksi_scheds,
info->ksi_nthreads_max *
sizeof(info->ksi_scheds[0]));
}
}
cfs_percpt_free(ksocknal_data.ksnd_sched_info); cfs_percpt_free(ksocknal_data.ksnd_sched_info);
} }
...@@ -2452,8 +2447,8 @@ ksocknal_base_startup(void) ...@@ -2452,8 +2447,8 @@ ksocknal_base_startup(void)
info->ksi_nthreads_max = nthrs; info->ksi_nthreads_max = nthrs;
info->ksi_cpt = i; info->ksi_cpt = i;
LIBCFS_CPT_ALLOC(info->ksi_scheds, lnet_cpt_table(), i, info->ksi_scheds = kzalloc_cpt(info->ksi_nthreads_max * sizeof(*sched),
info->ksi_nthreads_max * sizeof(*sched)); GFP_NOFS, i);
if (!info->ksi_scheds) if (!info->ksi_scheds)
goto failed; goto failed;
......
...@@ -49,10 +49,8 @@ cfs_percpt_free(void *vars) ...@@ -49,10 +49,8 @@ cfs_percpt_free(void *vars)
arr = container_of(vars, struct cfs_var_array, va_ptrs[0]); arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);
for (i = 0; i < arr->va_count; i++) { for (i = 0; i < arr->va_count; i++)
if (arr->va_ptrs[i]) kfree(arr->va_ptrs[i]);
LIBCFS_FREE(arr->va_ptrs[i], arr->va_size);
}
kvfree(arr); kvfree(arr);
} }
...@@ -89,7 +87,8 @@ cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size) ...@@ -89,7 +87,8 @@ cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size)
arr->va_cptab = cptab; arr->va_cptab = cptab;
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
LIBCFS_CPT_ALLOC(arr->va_ptrs[i], cptab, i, size); arr->va_ptrs[i] = kzalloc_node(size, GFP_KERNEL,
cfs_cpt_spread_node(cptab, i));
if (!arr->va_ptrs[i]) { if (!arr->va_ptrs[i]) {
cfs_percpt_free((void *)&arr->va_ptrs[0]); cfs_percpt_free((void *)&arr->va_ptrs[0]);
return NULL; return NULL;
......
...@@ -404,11 +404,8 @@ lnet_res_container_cleanup(struct lnet_res_container *rec) ...@@ -404,11 +404,8 @@ lnet_res_container_cleanup(struct lnet_res_container *rec)
count, lnet_res_type2str(rec->rec_type)); count, lnet_res_type2str(rec->rec_type));
} }
if (rec->rec_lh_hash) { kfree(rec->rec_lh_hash);
LIBCFS_FREE(rec->rec_lh_hash, rec->rec_lh_hash = NULL;
LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
rec->rec_lh_hash = NULL;
}
rec->rec_type = 0; /* mark it as finalized */ rec->rec_type = 0; /* mark it as finalized */
} }
...@@ -426,8 +423,8 @@ lnet_res_container_setup(struct lnet_res_container *rec, int cpt, int type) ...@@ -426,8 +423,8 @@ lnet_res_container_setup(struct lnet_res_container *rec, int cpt, int type)
rec->rec_lh_cookie = (cpt << LNET_COOKIE_TYPE_BITS) | type; rec->rec_lh_cookie = (cpt << LNET_COOKIE_TYPE_BITS) | type;
/* Arbitrary choice of hash table size */ /* Arbitrary choice of hash table size */
LIBCFS_CPT_ALLOC(rec->rec_lh_hash, lnet_cpt_table(), cpt, rec->rec_lh_hash = kvmalloc_cpt(LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]),
LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0])); GFP_KERNEL, cpt);
if (!rec->rec_lh_hash) { if (!rec->rec_lh_hash) {
rc = -ENOMEM; rc = -ENOMEM;
goto out; goto out;
......
...@@ -553,12 +553,8 @@ lnet_msg_container_cleanup(struct lnet_msg_container *container) ...@@ -553,12 +553,8 @@ lnet_msg_container_cleanup(struct lnet_msg_container *container)
if (count > 0) if (count > 0)
CERROR("%d active msg on exit\n", count); CERROR("%d active msg on exit\n", count);
if (container->msc_finalizers) { kvfree(container->msc_finalizers);
LIBCFS_FREE(container->msc_finalizers, container->msc_finalizers = NULL;
container->msc_nfinalizers *
sizeof(*container->msc_finalizers));
container->msc_finalizers = NULL;
}
container->msc_init = 0; container->msc_init = 0;
} }
...@@ -573,9 +569,9 @@ lnet_msg_container_setup(struct lnet_msg_container *container, int cpt) ...@@ -573,9 +569,9 @@ lnet_msg_container_setup(struct lnet_msg_container *container, int cpt)
/* number of CPUs */ /* number of CPUs */
container->msc_nfinalizers = cfs_cpt_weight(lnet_cpt_table(), cpt); container->msc_nfinalizers = cfs_cpt_weight(lnet_cpt_table(), cpt);
LIBCFS_CPT_ALLOC(container->msc_finalizers, lnet_cpt_table(), cpt, container->msc_finalizers = kvzalloc_cpt(container->msc_nfinalizers *
container->msc_nfinalizers * sizeof(*container->msc_finalizers),
sizeof(*container->msc_finalizers)); GFP_KERNEL, cpt);
if (!container->msc_finalizers) { if (!container->msc_finalizers) {
CERROR("Failed to allocate message finalizers\n"); CERROR("Failed to allocate message finalizers\n");
......
...@@ -775,7 +775,7 @@ lnet_ptl_cleanup(struct lnet_portal *ptl) ...@@ -775,7 +775,7 @@ lnet_ptl_cleanup(struct lnet_portal *ptl)
} }
} }
/* the extra entry is for MEs with ignore bits */ /* the extra entry is for MEs with ignore bits */
LIBCFS_FREE(mhash, sizeof(*mhash) * (LNET_MT_HASH_SIZE + 1)); kvfree(mhash);
} }
cfs_percpt_free(ptl->ptl_mtables); cfs_percpt_free(ptl->ptl_mtables);
...@@ -803,8 +803,8 @@ lnet_ptl_setup(struct lnet_portal *ptl, int index) ...@@ -803,8 +803,8 @@ lnet_ptl_setup(struct lnet_portal *ptl, int index)
spin_lock_init(&ptl->ptl_lock); spin_lock_init(&ptl->ptl_lock);
cfs_percpt_for_each(mtable, i, ptl->ptl_mtables) { cfs_percpt_for_each(mtable, i, ptl->ptl_mtables) {
/* the extra entry is for MEs with ignore bits */ /* the extra entry is for MEs with ignore bits */
LIBCFS_CPT_ALLOC(mhash, lnet_cpt_table(), i, mhash = kvzalloc_cpt(sizeof(*mhash) * (LNET_MT_HASH_SIZE + 1),
sizeof(*mhash) * (LNET_MT_HASH_SIZE + 1)); GFP_KERNEL, i);
if (!mhash) { if (!mhash) {
CERROR("Failed to create match hash for portal %d\n", CERROR("Failed to create match hash for portal %d\n",
index); index);
......
...@@ -56,8 +56,8 @@ lnet_peer_tables_create(void) ...@@ -56,8 +56,8 @@ lnet_peer_tables_create(void)
cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) { cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
INIT_LIST_HEAD(&ptable->pt_deathrow); INIT_LIST_HEAD(&ptable->pt_deathrow);
LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i, hash = kvmalloc_cpt(LNET_PEER_HASH_SIZE * sizeof(*hash),
LNET_PEER_HASH_SIZE * sizeof(*hash)); GFP_KERNEL, i);
if (!hash) { if (!hash) {
CERROR("Failed to create peer hash table\n"); CERROR("Failed to create peer hash table\n");
lnet_peer_tables_destroy(); lnet_peer_tables_destroy();
...@@ -94,7 +94,7 @@ lnet_peer_tables_destroy(void) ...@@ -94,7 +94,7 @@ lnet_peer_tables_destroy(void)
for (j = 0; j < LNET_PEER_HASH_SIZE; j++) for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
LASSERT(list_empty(&hash[j])); LASSERT(list_empty(&hash[j]));
LIBCFS_FREE(hash, LNET_PEER_HASH_SIZE * sizeof(*hash)); kvfree(hash);
} }
cfs_percpt_free(the_lnet.ln_peer_tables); cfs_percpt_free(the_lnet.ln_peer_tables);
...@@ -297,7 +297,7 @@ lnet_nid2peer_locked(struct lnet_peer **lpp, lnet_nid_t nid, int cpt) ...@@ -297,7 +297,7 @@ lnet_nid2peer_locked(struct lnet_peer **lpp, lnet_nid_t nid, int cpt)
if (lp) if (lp)
memset(lp, 0, sizeof(*lp)); memset(lp, 0, sizeof(*lp));
else else
LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), cpt2, sizeof(*lp)); lp = kzalloc_cpt(sizeof(*lp), GFP_NOFS, cpt2);
if (!lp) { if (!lp) {
rc = -ENOMEM; rc = -ENOMEM;
......
...@@ -1296,12 +1296,10 @@ lnet_router_checker(void *arg) ...@@ -1296,12 +1296,10 @@ lnet_router_checker(void *arg)
void void
lnet_destroy_rtrbuf(struct lnet_rtrbuf *rb, int npages) lnet_destroy_rtrbuf(struct lnet_rtrbuf *rb, int npages)
{ {
int sz = offsetof(struct lnet_rtrbuf, rb_kiov[npages]);
while (--npages >= 0) while (--npages >= 0)
__free_page(rb->rb_kiov[npages].bv_page); __free_page(rb->rb_kiov[npages].bv_page);
LIBCFS_FREE(rb, sz); kfree(rb);
} }
static struct lnet_rtrbuf * static struct lnet_rtrbuf *
...@@ -1313,7 +1311,7 @@ lnet_new_rtrbuf(struct lnet_rtrbufpool *rbp, int cpt) ...@@ -1313,7 +1311,7 @@ lnet_new_rtrbuf(struct lnet_rtrbufpool *rbp, int cpt)
struct lnet_rtrbuf *rb; struct lnet_rtrbuf *rb;
int i; int i;
LIBCFS_CPT_ALLOC(rb, lnet_cpt_table(), cpt, sz); rb = kzalloc_cpt(sz, GFP_NOFS, cpt);
if (!rb) if (!rb)
return NULL; return NULL;
...@@ -1327,7 +1325,7 @@ lnet_new_rtrbuf(struct lnet_rtrbufpool *rbp, int cpt) ...@@ -1327,7 +1325,7 @@ lnet_new_rtrbuf(struct lnet_rtrbufpool *rbp, int cpt)
while (--i >= 0) while (--i >= 0)
__free_page(rb->rb_kiov[i].bv_page); __free_page(rb->rb_kiov[i].bv_page);
LIBCFS_FREE(rb, sz); kfree(rb);
return NULL; return NULL;
} }
......
...@@ -113,7 +113,7 @@ srpc_free_bulk(struct srpc_bulk *bk) ...@@ -113,7 +113,7 @@ srpc_free_bulk(struct srpc_bulk *bk)
__free_page(pg); __free_page(pg);
} }
LIBCFS_FREE(bk, offsetof(struct srpc_bulk, bk_iovs[bk->bk_niov])); kfree(bk);
} }
struct srpc_bulk * struct srpc_bulk *
...@@ -125,8 +125,8 @@ srpc_alloc_bulk(int cpt, unsigned int bulk_off, unsigned int bulk_npg, ...@@ -125,8 +125,8 @@ srpc_alloc_bulk(int cpt, unsigned int bulk_off, unsigned int bulk_npg,
LASSERT(bulk_npg > 0 && bulk_npg <= LNET_MAX_IOV); LASSERT(bulk_npg > 0 && bulk_npg <= LNET_MAX_IOV);
LIBCFS_CPT_ALLOC(bk, lnet_cpt_table(), cpt, bk = kzalloc_cpt(offsetof(struct srpc_bulk, bk_iovs[bulk_npg]),
offsetof(struct srpc_bulk, bk_iovs[bulk_npg])); GFP_KERNEL, cpt);
if (!bk) { if (!bk) {
CERROR("Can't allocate descriptor for %d pages\n", bulk_npg); CERROR("Can't allocate descriptor for %d pages\n", bulk_npg);
return NULL; return NULL;
...@@ -294,8 +294,7 @@ srpc_service_init(struct srpc_service *svc) ...@@ -294,8 +294,7 @@ srpc_service_init(struct srpc_service *svc)
} }
for (j = 0; j < nrpcs; j++) { for (j = 0; j < nrpcs; j++) {
LIBCFS_CPT_ALLOC(rpc, lnet_cpt_table(), rpc = kzalloc_cpt(sizeof(*rpc), GFP_NOFS, i);
i, sizeof(*rpc));
if (!rpc) { if (!rpc) {
srpc_service_fini(svc); srpc_service_fini(svc);
return -ENOMEM; return -ENOMEM;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册