提交 385ad87d 编写于 作者: S Sagi Grimberg 提交者: Doug Ledford

IB/iser: Introduce iser registration pool struct

Instead of having it a part of the connection structure,
have it be under a dedicated (embedded) structure in the
connection. A logical separation of the registration pool
and the connection structure.
Signed-off-by: NSagi Grimberg <sagig@mellanox.com>
Signed-off-by: NAdir Lev <adirl@mellanox.com>
Signed-off-by: NDoug Ledford <dledford@redhat.com>
上级 eb6ea8c3
......@@ -417,6 +417,33 @@ struct iser_fr_desc {
struct iser_pi_context *pi_ctx;
};
/**
* struct iser_fr_pool: connection fast registration pool
*
* @lock: protects fmr/fastreg pool
* @union.fmr:
* @pool: FMR pool for fast registrations
* @page_vec: fast reg page list to hold mapped commands pages
* used for registration
* @union.fastreg:
* @pool: Fast registration descriptors pool for fast
* registrations
* @pool_size: Size of pool
*/
struct iser_fr_pool {
spinlock_t lock;
union {
struct {
struct ib_fmr_pool *pool;
struct iser_page_vec *page_vec;
} fmr;
struct {
struct list_head pool;
int pool_size;
} fastreg;
};
};
/**
* struct ib_conn - Infiniband related objects
*
......@@ -430,15 +457,7 @@ struct iser_fr_desc {
* @pi_support: Indicate device T10-PI support
* @beacon: beacon send wr to signal all flush errors were drained
* @flush_comp: completes when all connection completions consumed
* @lock: protects fmr/fastreg pool
* @union.fmr:
* @pool: FMR pool for fast registrations
* @page_vec: page vector to hold mapped commands pages
* used for registration
* @union.fastreg:
* @pool: Fast registration descriptors pool for fast
* registrations
* @pool_size: Size of pool
* @fr_pool: connection fast registration poool
*/
struct ib_conn {
struct rdma_cm_id *cma_id;
......@@ -451,17 +470,7 @@ struct ib_conn {
bool pi_support;
struct ib_send_wr beacon;
struct completion flush_comp;
spinlock_t lock;
union {
struct {
struct ib_fmr_pool *pool;
struct iser_page_vec *page_vec;
} fmr;
struct {
struct list_head pool;
int pool_size;
} fastreg;
};
struct iser_fr_pool fr_pool;
};
/**
......
......@@ -184,14 +184,15 @@ iser_copy_to_bounce(struct iser_data_buf *data)
struct iser_fr_desc *
iser_reg_desc_get(struct ib_conn *ib_conn)
{
struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
struct iser_fr_desc *desc;
unsigned long flags;
spin_lock_irqsave(&ib_conn->lock, flags);
desc = list_first_entry(&ib_conn->fastreg.pool,
spin_lock_irqsave(&fr_pool->lock, flags);
desc = list_first_entry(&fr_pool->fastreg.pool,
struct iser_fr_desc, list);
list_del(&desc->list);
spin_unlock_irqrestore(&ib_conn->lock, flags);
spin_unlock_irqrestore(&fr_pool->lock, flags);
return desc;
}
......@@ -200,11 +201,12 @@ void
iser_reg_desc_put(struct ib_conn *ib_conn,
struct iser_fr_desc *desc)
{
struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
unsigned long flags;
spin_lock_irqsave(&ib_conn->lock, flags);
list_add(&desc->list, &ib_conn->fastreg.pool);
spin_unlock_irqrestore(&ib_conn->lock, flags);
spin_lock_irqsave(&fr_pool->lock, flags);
list_add(&desc->list, &fr_pool->fastreg.pool);
spin_unlock_irqrestore(&fr_pool->lock, flags);
}
/**
......@@ -480,6 +482,7 @@ int iser_reg_page_vec(struct iscsi_iser_task *iser_task,
struct iser_mem_reg *mem_reg)
{
struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
struct iser_device *device = ib_conn->device;
struct ib_pool_fmr *fmr;
int ret, plen;
......@@ -496,7 +499,7 @@ int iser_reg_page_vec(struct iscsi_iser_task *iser_task,
return -EINVAL;
}
fmr = ib_fmr_pool_map_phys(ib_conn->fmr.pool,
fmr = ib_fmr_pool_map_phys(fr_pool->fmr.pool,
page_vec->pages,
page_vec->length,
page_vec->pages[0]);
......@@ -560,6 +563,7 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir)
{
struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
struct iser_device *device = ib_conn->device;
struct ib_device *ibdev = device->ib_device;
struct iser_data_buf *mem = &iser_task->data[cmd_dir];
......@@ -583,20 +587,20 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
if (mem->dma_nents == 1) {
return iser_reg_dma(device, mem, mem_reg);
} else { /* use FMR for multiple dma entries */
err = iser_reg_page_vec(iser_task, mem, ib_conn->fmr.page_vec,
mem_reg);
err = iser_reg_page_vec(iser_task, mem,
fr_pool->fmr.page_vec, mem_reg);
if (err && err != -EAGAIN) {
iser_data_buf_dump(mem, ibdev);
iser_err("mem->dma_nents = %d (dlength = 0x%x)\n",
mem->dma_nents,
ntoh24(iser_task->desc.iscsi_header.dlength));
iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
ib_conn->fmr.page_vec->data_size,
ib_conn->fmr.page_vec->length,
ib_conn->fmr.page_vec->offset);
for (i = 0; i < ib_conn->fmr.page_vec->length; i++)
fr_pool->fmr.page_vec->data_size,
fr_pool->fmr.page_vec->length,
fr_pool->fmr.page_vec->offset);
for (i = 0; i < fr_pool->fmr.page_vec->length; i++)
iser_err("page_vec[%d] = 0x%llx\n", i,
(unsigned long long)ib_conn->fmr.page_vec->pages[i]);
(unsigned long long)fr_pool->fmr.page_vec->pages[i]);
}
if (err)
return err;
......
......@@ -202,16 +202,21 @@ static void iser_free_device_ib_res(struct iser_device *device)
int iser_alloc_fmr_pool(struct ib_conn *ib_conn, unsigned cmds_max)
{
struct iser_device *device = ib_conn->device;
struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
struct iser_page_vec *page_vec;
struct ib_fmr_pool *fmr_pool;
struct ib_fmr_pool_param params;
int ret = -ENOMEM;
ib_conn->fmr.page_vec = kmalloc(sizeof(*ib_conn->fmr.page_vec) +
(sizeof(u64)*(ISCSI_ISER_SG_TABLESIZE + 1)),
GFP_KERNEL);
if (!ib_conn->fmr.page_vec)
spin_lock_init(&fr_pool->lock);
page_vec = kmalloc(sizeof(*page_vec) +
(sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE + 1)),
GFP_KERNEL);
if (!page_vec)
return ret;
ib_conn->fmr.page_vec->pages = (u64 *)(ib_conn->fmr.page_vec + 1);
page_vec->pages = (u64 *)(page_vec + 1);
params.page_shift = SHIFT_4K;
/* when the first/last SG element are not start/end *
......@@ -227,18 +232,20 @@ int iser_alloc_fmr_pool(struct ib_conn *ib_conn, unsigned cmds_max)
IB_ACCESS_REMOTE_WRITE |
IB_ACCESS_REMOTE_READ);
ib_conn->fmr.pool = ib_create_fmr_pool(device->pd, &params);
if (IS_ERR(ib_conn->fmr.pool)) {
ret = PTR_ERR(ib_conn->fmr.pool);
fmr_pool = ib_create_fmr_pool(device->pd, &params);
if (IS_ERR(fmr_pool)) {
ret = PTR_ERR(fmr_pool);
iser_err("FMR allocation failed, err %d\n", ret);
goto err;
}
fr_pool->fmr.page_vec = page_vec;
fr_pool->fmr.pool = fmr_pool;
return 0;
err:
kfree(ib_conn->fmr.page_vec);
ib_conn->fmr.page_vec = NULL;
kfree(page_vec);
return ret;
}
......@@ -247,14 +254,15 @@ int iser_alloc_fmr_pool(struct ib_conn *ib_conn, unsigned cmds_max)
*/
void iser_free_fmr_pool(struct ib_conn *ib_conn)
{
iser_info("freeing conn %p fmr pool %p\n",
ib_conn, ib_conn->fmr.pool);
struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
ib_destroy_fmr_pool(ib_conn->fmr.pool);
ib_conn->fmr.pool = NULL;
iser_info("freeing conn %p fmr pool %p\n",
ib_conn, fr_pool->fmr.pool);
kfree(ib_conn->fmr.page_vec);
ib_conn->fmr.page_vec = NULL;
ib_destroy_fmr_pool(fr_pool->fmr.pool);
fr_pool->fmr.pool = NULL;
kfree(fr_pool->fmr.page_vec);
fr_pool->fmr.page_vec = NULL;
}
static int
......@@ -380,11 +388,13 @@ iser_create_fastreg_desc(struct ib_device *ib_device, struct ib_pd *pd,
int iser_alloc_fastreg_pool(struct ib_conn *ib_conn, unsigned cmds_max)
{
struct iser_device *device = ib_conn->device;
struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
struct iser_fr_desc *desc;
int i, ret;
INIT_LIST_HEAD(&ib_conn->fastreg.pool);
ib_conn->fastreg.pool_size = 0;
INIT_LIST_HEAD(&fr_pool->fastreg.pool);
spin_lock_init(&fr_pool->lock);
fr_pool->fastreg.pool_size = 0;
for (i = 0; i < cmds_max; i++) {
desc = iser_create_fastreg_desc(device->ib_device, device->pd,
ib_conn->pi_support);
......@@ -393,8 +403,8 @@ int iser_alloc_fastreg_pool(struct ib_conn *ib_conn, unsigned cmds_max)
goto err;
}
list_add_tail(&desc->list, &ib_conn->fastreg.pool);
ib_conn->fastreg.pool_size++;
list_add_tail(&desc->list, &fr_pool->fastreg.pool);
fr_pool->fastreg.pool_size++;
}
return 0;
......@@ -409,15 +419,16 @@ int iser_alloc_fastreg_pool(struct ib_conn *ib_conn, unsigned cmds_max)
*/
void iser_free_fastreg_pool(struct ib_conn *ib_conn)
{
struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
struct iser_fr_desc *desc, *tmp;
int i = 0;
if (list_empty(&ib_conn->fastreg.pool))
if (list_empty(&fr_pool->fastreg.pool))
return;
iser_info("freeing conn %p fr pool\n", ib_conn);
list_for_each_entry_safe(desc, tmp, &ib_conn->fastreg.pool, list) {
list_for_each_entry_safe(desc, tmp, &fr_pool->fastreg.pool, list) {
list_del(&desc->list);
iser_free_reg_res(&desc->rsc);
if (desc->pi_ctx)
......@@ -426,9 +437,9 @@ void iser_free_fastreg_pool(struct ib_conn *ib_conn)
++i;
}
if (i < ib_conn->fastreg.pool_size)
if (i < fr_pool->fastreg.pool_size)
iser_warn("pool still has %d regions registered\n",
ib_conn->fastreg.pool_size - i);
fr_pool->fastreg.pool_size - i);
}
/**
......@@ -924,7 +935,6 @@ void iser_conn_init(struct iser_conn *iser_conn)
init_completion(&iser_conn->ib_completion);
init_completion(&iser_conn->up_completion);
INIT_LIST_HEAD(&iser_conn->conn_list);
spin_lock_init(&iser_conn->ib_conn.lock);
mutex_init(&iser_conn->state_mutex);
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册