提交 2e84522c 编写于 作者: C Chuck Lever 提交者: Anna Schumaker

xprtrdma: Allocate each struct rpcrdma_mw separately

Currently rpcrdma_buffer_create() allocates struct rpcrdma_mw's as
a single contiguous area of memory. It amounts to quite a bit of
memory, and there's no requirement for these to be carved from a
single piece of contiguous memory.
Signed-off-by: NChuck Lever <chuck.lever@oracle.com>
Tested-by: NSteve Wise <swise@opengridcomputing.com>
Tested-by: NShirley Ma <shirley.ma@oracle.com>
Tested-by: NDevesh Sharma <devesh.sharma@emulex.com>
Signed-off-by: NAnna Schumaker <Anna.Schumaker@Netapp.com>
上级 f590e878
......@@ -1005,9 +1005,91 @@ rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
return rc;
}
/*
* Initialize buffer memory
*/
static int
rpcrdma_init_fmrs(struct rpcrdma_ia *ia, struct rpcrdma_buffer *buf)
{
int mr_access_flags = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ;
struct ib_fmr_attr fmr_attr = {
.max_pages = RPCRDMA_MAX_DATA_SEGS,
.max_maps = 1,
.page_shift = PAGE_SHIFT
};
struct rpcrdma_mw *r;
int i, rc;
i = (buf->rb_max_requests + 1) * RPCRDMA_MAX_SEGS;
dprintk("RPC: %s: initalizing %d FMRs\n", __func__, i);
while (i--) {
r = kzalloc(sizeof(*r), GFP_KERNEL);
if (r == NULL)
return -ENOMEM;
r->r.fmr = ib_alloc_fmr(ia->ri_pd, mr_access_flags, &fmr_attr);
if (IS_ERR(r->r.fmr)) {
rc = PTR_ERR(r->r.fmr);
dprintk("RPC: %s: ib_alloc_fmr failed %i\n",
__func__, rc);
goto out_free;
}
list_add(&r->mw_list, &buf->rb_mws);
list_add(&r->mw_all, &buf->rb_all);
}
return 0;
out_free:
kfree(r);
return rc;
}
static int
rpcrdma_init_frmrs(struct rpcrdma_ia *ia, struct rpcrdma_buffer *buf)
{
struct rpcrdma_frmr *f;
struct rpcrdma_mw *r;
int i, rc;
i = (buf->rb_max_requests + 1) * RPCRDMA_MAX_SEGS;
dprintk("RPC: %s: initalizing %d FRMRs\n", __func__, i);
while (i--) {
r = kzalloc(sizeof(*r), GFP_KERNEL);
if (r == NULL)
return -ENOMEM;
f = &r->r.frmr;
f->fr_mr = ib_alloc_fast_reg_mr(ia->ri_pd,
ia->ri_max_frmr_depth);
if (IS_ERR(f->fr_mr)) {
rc = PTR_ERR(f->fr_mr);
dprintk("RPC: %s: ib_alloc_fast_reg_mr "
"failed %i\n", __func__, rc);
goto out_free;
}
f->fr_pgl = ib_alloc_fast_reg_page_list(ia->ri_id->device,
ia->ri_max_frmr_depth);
if (IS_ERR(f->fr_pgl)) {
rc = PTR_ERR(f->fr_pgl);
dprintk("RPC: %s: ib_alloc_fast_reg_page_list "
"failed %i\n", __func__, rc);
ib_dereg_mr(f->fr_mr);
goto out_free;
}
list_add(&r->mw_list, &buf->rb_mws);
list_add(&r->mw_all, &buf->rb_all);
}
return 0;
out_free:
kfree(r);
return rc;
}
int
rpcrdma_buffer_create(struct rpcrdma_buffer *buf, struct rpcrdma_ep *ep,
struct rpcrdma_ia *ia, struct rpcrdma_create_data_internal *cdata)
......@@ -1015,7 +1097,6 @@ rpcrdma_buffer_create(struct rpcrdma_buffer *buf, struct rpcrdma_ep *ep,
char *p;
size_t len, rlen, wlen;
int i, rc;
struct rpcrdma_mw *r;
buf->rb_max_requests = cdata->max_requests;
spin_lock_init(&buf->rb_lock);
......@@ -1026,28 +1107,12 @@ rpcrdma_buffer_create(struct rpcrdma_buffer *buf, struct rpcrdma_ep *ep,
* 2. arrays of struct rpcrdma_req to fill in pointers
* 3. array of struct rpcrdma_rep for replies
* 4. padding, if any
* 5. mw's, fmr's or frmr's, if any
* Send/recv buffers in req/rep need to be registered
*/
len = buf->rb_max_requests *
(sizeof(struct rpcrdma_req *) + sizeof(struct rpcrdma_rep *));
len += cdata->padding;
switch (ia->ri_memreg_strategy) {
case RPCRDMA_FRMR:
len += buf->rb_max_requests * RPCRDMA_MAX_SEGS *
sizeof(struct rpcrdma_mw);
break;
case RPCRDMA_MTHCAFMR:
/* TBD we are perhaps overallocating here */
len += (buf->rb_max_requests + 1) * RPCRDMA_MAX_SEGS *
sizeof(struct rpcrdma_mw);
break;
default:
break;
}
/* allocate 1, 4 and 5 in one shot */
p = kzalloc(len, GFP_KERNEL);
if (p == NULL) {
dprintk("RPC: %s: req_t/rep_t/pad kzalloc(%zd) failed\n",
......@@ -1075,53 +1140,16 @@ rpcrdma_buffer_create(struct rpcrdma_buffer *buf, struct rpcrdma_ep *ep,
INIT_LIST_HEAD(&buf->rb_mws);
INIT_LIST_HEAD(&buf->rb_all);
r = (struct rpcrdma_mw *)p;
switch (ia->ri_memreg_strategy) {
case RPCRDMA_FRMR:
for (i = buf->rb_max_requests * RPCRDMA_MAX_SEGS; i; i--) {
r->r.frmr.fr_mr = ib_alloc_fast_reg_mr(ia->ri_pd,
ia->ri_max_frmr_depth);
if (IS_ERR(r->r.frmr.fr_mr)) {
rc = PTR_ERR(r->r.frmr.fr_mr);
dprintk("RPC: %s: ib_alloc_fast_reg_mr"
" failed %i\n", __func__, rc);
goto out;
}
r->r.frmr.fr_pgl = ib_alloc_fast_reg_page_list(
ia->ri_id->device,
ia->ri_max_frmr_depth);
if (IS_ERR(r->r.frmr.fr_pgl)) {
rc = PTR_ERR(r->r.frmr.fr_pgl);
dprintk("RPC: %s: "
"ib_alloc_fast_reg_page_list "
"failed %i\n", __func__, rc);
ib_dereg_mr(r->r.frmr.fr_mr);
goto out;
}
list_add(&r->mw_all, &buf->rb_all);
list_add(&r->mw_list, &buf->rb_mws);
++r;
}
rc = rpcrdma_init_frmrs(ia, buf);
if (rc)
goto out;
break;
case RPCRDMA_MTHCAFMR:
/* TBD we are perhaps overallocating here */
for (i = (buf->rb_max_requests+1) * RPCRDMA_MAX_SEGS; i; i--) {
static struct ib_fmr_attr fa =
{ RPCRDMA_MAX_DATA_SEGS, 1, PAGE_SHIFT };
r->r.fmr = ib_alloc_fmr(ia->ri_pd,
IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ,
&fa);
if (IS_ERR(r->r.fmr)) {
rc = PTR_ERR(r->r.fmr);
dprintk("RPC: %s: ib_alloc_fmr"
" failed %i\n", __func__, rc);
goto out;
}
list_add(&r->mw_all, &buf->rb_all);
list_add(&r->mw_list, &buf->rb_mws);
++r;
}
rc = rpcrdma_init_fmrs(ia, buf);
if (rc)
goto out;
break;
default:
break;
......@@ -1189,24 +1217,57 @@ rpcrdma_buffer_create(struct rpcrdma_buffer *buf, struct rpcrdma_ep *ep,
return rc;
}
/*
* Unregister and destroy buffer memory. Need to deal with
* partial initialization, so it's callable from failed create.
* Must be called before destroying endpoint, as registrations
* reference it.
*/
static void
rpcrdma_destroy_fmrs(struct rpcrdma_buffer *buf)
{
struct rpcrdma_mw *r;
int rc;
while (!list_empty(&buf->rb_all)) {
r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all);
list_del(&r->mw_all);
list_del(&r->mw_list);
rc = ib_dealloc_fmr(r->r.fmr);
if (rc)
dprintk("RPC: %s: ib_dealloc_fmr failed %i\n",
__func__, rc);
kfree(r);
}
}
static void
rpcrdma_destroy_frmrs(struct rpcrdma_buffer *buf)
{
struct rpcrdma_mw *r;
int rc;
while (!list_empty(&buf->rb_all)) {
r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all);
list_del(&r->mw_all);
list_del(&r->mw_list);
rc = ib_dereg_mr(r->r.frmr.fr_mr);
if (rc)
dprintk("RPC: %s: ib_dereg_mr failed %i\n",
__func__, rc);
ib_free_fast_reg_page_list(r->r.frmr.fr_pgl);
kfree(r);
}
}
void
rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
{
int rc, i;
struct rpcrdma_ia *ia = rdmab_to_ia(buf);
struct rpcrdma_mw *r;
int i;
/* clean up in reverse order from create
* 1. recv mr memory (mr free, then kfree)
* 2. send mr memory (mr free, then kfree)
* 3. padding (if any) [moved to rpcrdma_ep_destroy]
* 4. arrays
* 3. MWs
*/
dprintk("RPC: %s: entering\n", __func__);
......@@ -1225,32 +1286,15 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
}
}
while (!list_empty(&buf->rb_mws)) {
r = list_entry(buf->rb_mws.next,
struct rpcrdma_mw, mw_list);
list_del(&r->mw_all);
list_del(&r->mw_list);
switch (ia->ri_memreg_strategy) {
case RPCRDMA_FRMR:
rc = ib_dereg_mr(r->r.frmr.fr_mr);
if (rc)
dprintk("RPC: %s:"
" ib_dereg_mr"
" failed %i\n",
__func__, rc);
ib_free_fast_reg_page_list(r->r.frmr.fr_pgl);
break;
case RPCRDMA_MTHCAFMR:
rc = ib_dealloc_fmr(r->r.fmr);
if (rc)
dprintk("RPC: %s:"
" ib_dealloc_fmr"
" failed %i\n",
__func__, rc);
break;
default:
break;
}
switch (ia->ri_memreg_strategy) {
case RPCRDMA_FRMR:
rpcrdma_destroy_frmrs(buf);
break;
case RPCRDMA_MTHCAFMR:
rpcrdma_destroy_fmrs(buf);
break;
default:
break;
}
kfree(buf->rb_pool);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册