提交 fea511a6 编写于 作者: E Eric Van Hensbergen

9p: move request management to client code

The virtio transport uses a simplified request management system
that I want to use for all transports.  This patch adapts and moves the
exisiting code for managing requests to the client common code.
Later patches will apply these mechanisms to the other transports.
Signed-off-by: NEric Van Hensbergen <ericvh@gmail.com>
上级 044c7768
......@@ -26,6 +26,9 @@
#ifndef NET_9P_CLIENT_H
#define NET_9P_CLIENT_H
/* Number of requests per row */
#define P9_ROW_MAXTAG 255
/**
* enum p9_trans_status - different states of underlying transports
* @Connected: transport is connected and healthy
......@@ -42,6 +45,62 @@ enum p9_trans_status {
Hung,
};
/**
* enum p9_req_status_t - virtio request status
* @REQ_STATUS_IDLE: request slot unused
* @REQ_STATUS_ALLOC: request has been allocated but not sent
* @REQ_STATUS_SENT: request sent to server
* @REQ_STATUS_FLSH: a flush has been sent for this request
* @REQ_STATUS_RCVD: response received from server
* @REQ_STATUS_FLSHD: request has been flushed
* @REQ_STATUS_ERR: request encountered an error on the client side
*
* The @REQ_STATUS_IDLE state is used to mark a request slot as unused
* but use is actually tracked by the idpool structure which handles tag
* id allocation.
*
*/
enum p9_req_status_t {
REQ_STATUS_IDLE,
REQ_STATUS_ALLOC,
REQ_STATUS_SENT,
REQ_STATUS_FLSH,
REQ_STATUS_RCVD,
REQ_STATUS_FLSHD,
REQ_STATUS_ERROR,
};
/**
* struct p9_req_t - request slots
* @status: status of this request slot
* @t_err: transport error
* @wq: wait_queue for the client to block on for this request
* @tc: the request fcall structure
* @rc: the response fcall structure
* @aux: transport specific data (provided for trans_fd migration)
*
* Transport use an array to track outstanding requests
* instead of a list. While this may incurr overhead during initial
* allocation or expansion, it makes request lookup much easier as the
* tag id is a index into an array. (We use tag+1 so that we can accomodate
* the -1 tag for the T_VERSION request).
* This also has the nice effect of only having to allocate wait_queues
* once, instead of constantly allocating and freeing them. Its possible
* other resources could benefit from this scheme as well.
*
*/
struct p9_req_t {
int status;
int t_err;
wait_queue_head_t *wq;
struct p9_fcall *tc;
struct p9_fcall *rc;
u16 flush_tag;
void *aux;
};
/**
* struct p9_client - per client instance state
* @lock: protect @fidlist
......@@ -52,9 +111,20 @@ enum p9_trans_status {
* @conn: connection state information used by trans_fd
* @fidpool: fid handle accounting for session
* @fidlist: List of active fid handles
* @tagpool - transaction id accounting for session
* @reqs - 2D array of requests
* @max_tag - current maximum tag id allocated
*
* The client structure is used to keep track of various per-client
* state that has been instantiated.
* In order to minimize per-transaction overhead we use a
* simple array to lookup requests instead of a hash table
* or linked list. In order to support larger number of
* transactions, we make this a 2D array, allocating new rows
* when we need to grow the total number of the transactions.
*
* Each row is 256 requests and we'll support up to 256 rows for
* a total of 64k concurrent requests per session.
*
* Bugs: duplicated data and potentially unnecessary elements.
*/
......@@ -70,6 +140,10 @@ struct p9_client {
struct p9_idpool *fidpool;
struct list_head fidlist;
struct p9_idpool *tagpool;
struct p9_req_t *reqs[P9_ROW_MAXTAG];
int max_tag;
};
/**
......@@ -131,4 +205,7 @@ struct p9_stat *p9_client_stat(struct p9_fid *fid);
int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst);
struct p9_stat *p9_client_dirread(struct p9_fid *fid, u64 offset);
struct p9_req_t *p9_tag_alloc(struct p9_client *, u16);
struct p9_req_t *p9_tag_lookup(struct p9_client *, u16);
#endif /* NET_9P_CLIENT_H */
......@@ -120,6 +120,154 @@ static int parse_opts(char *opts, struct p9_client *clnt)
return ret;
}
/**
* p9_tag_alloc - lookup/allocate a request by tag
* @c: client session to lookup tag within
* @tag: numeric id for transaction
*
* this is a simple array lookup, but will grow the
* request_slots as necessary to accomodate transaction
* ids which did not previously have a slot.
*
* this code relies on the client spinlock to manage locks, its
* possible we should switch to something else, but I'd rather
* stick with something low-overhead for the common case.
*
*/
struct p9_req_t *p9_tag_alloc(struct p9_client *c, u16 tag)
{
unsigned long flags;
int row, col;
/* This looks up the original request by tag so we know which
* buffer to read the data into */
tag++;
if (tag >= c->max_tag) {
spin_lock_irqsave(&c->lock, flags);
/* check again since original check was outside of lock */
while (tag >= c->max_tag) {
row = (tag / P9_ROW_MAXTAG);
c->reqs[row] = kcalloc(P9_ROW_MAXTAG,
sizeof(struct p9_req_t), GFP_ATOMIC);
if (!c->reqs[row]) {
printk(KERN_ERR "Couldn't grow tag array\n");
BUG();
}
for (col = 0; col < P9_ROW_MAXTAG; col++) {
c->reqs[row][col].status = REQ_STATUS_IDLE;
c->reqs[row][col].flush_tag = P9_NOTAG;
c->reqs[row][col].wq = kmalloc(
sizeof(wait_queue_head_t), GFP_ATOMIC);
if (!c->reqs[row][col].wq) {
printk(KERN_ERR
"Couldn't grow tag array\n");
BUG();
}
init_waitqueue_head(c->reqs[row][col].wq);
}
c->max_tag += P9_ROW_MAXTAG;
}
spin_unlock_irqrestore(&c->lock, flags);
}
row = tag / P9_ROW_MAXTAG;
col = tag % P9_ROW_MAXTAG;
c->reqs[row][col].status = REQ_STATUS_ALLOC;
c->reqs[row][col].flush_tag = P9_NOTAG;
return &c->reqs[row][col];
}
EXPORT_SYMBOL(p9_tag_alloc);
/**
* p9_tag_lookup - lookup a request by tag
* @c: client session to lookup tag within
* @tag: numeric id for transaction
*
*/
struct p9_req_t *p9_tag_lookup(struct p9_client *c, u16 tag)
{
int row, col;
/* This looks up the original request by tag so we know which
* buffer to read the data into */
tag++;
BUG_ON(tag >= c->max_tag);
row = tag / P9_ROW_MAXTAG;
col = tag % P9_ROW_MAXTAG;
return &c->reqs[row][col];
}
EXPORT_SYMBOL(p9_tag_lookup);
/**
* p9_tag_init - setup tags structure and contents
* @tags: tags structure from the client struct
*
* This initializes the tags structure for each client instance.
*
*/
static int p9_tag_init(struct p9_client *c)
{
int err = 0;
c->tagpool = p9_idpool_create();
if (IS_ERR(c->tagpool)) {
err = PTR_ERR(c->tagpool);
c->tagpool = NULL;
goto error;
}
p9_idpool_get(c->tagpool); /* reserve tag 0 */
c->max_tag = 0;
error:
return err;
}
/**
* p9_tag_cleanup - cleans up tags structure and reclaims resources
* @tags: tags structure from the client struct
*
* This frees resources associated with the tags structure
*
*/
static void p9_tag_cleanup(struct p9_client *c)
{
int row, col;
/* check to insure all requests are idle */
for (row = 0; row < (c->max_tag/P9_ROW_MAXTAG); row++) {
for (col = 0; col < P9_ROW_MAXTAG; col++) {
if (c->reqs[row][col].status != REQ_STATUS_IDLE) {
P9_DPRINTK(P9_DEBUG_MUX,
"Attempting to cleanup non-free tag %d,%d\n",
row, col);
/* TODO: delay execution of cleanup */
return;
}
}
}
if (c->tagpool)
p9_idpool_destroy(c->tagpool);
/* free requests associated with tags */
for (row = 0; row < (c->max_tag/P9_ROW_MAXTAG); row++) {
for (col = 0; col < P9_ROW_MAXTAG; col++)
kfree(c->reqs[row][col].wq);
kfree(c->reqs[row]);
}
c->max_tag = 0;
}
static struct p9_fid *p9_fid_create(struct p9_client *clnt)
{
int err;
......@@ -209,6 +357,8 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
goto error;
}
p9_tag_init(clnt);
err = parse_opts(options, clnt);
if (err < 0)
goto error;
......@@ -285,6 +435,8 @@ void p9_client_destroy(struct p9_client *clnt)
if (clnt->fidpool)
p9_idpool_destroy(clnt->fidpool);
p9_tag_cleanup(clnt);
kfree(clnt);
}
EXPORT_SYMBOL(p9_client_destroy);
......
/*
* The Guest 9p transport driver
* The Virtio 9p transport driver
*
* This is a block based transport driver based on the lguest block driver
* code.
*
*/
/*
* Copyright (C) 2007 Eric Van Hensbergen, IBM Corporation
* Copyright (C) 2007, 2008 Eric Van Hensbergen, IBM Corporation
*
* Based on virtio console driver
* Copyright (C) 2006, 2007 Rusty Russell, IBM Corporation
......@@ -54,49 +52,6 @@ static DEFINE_MUTEX(virtio_9p_lock);
/* global which tracks highest initialized channel */
static int chan_index;
#define P9_INIT_MAXTAG 16
/**
* enum p9_req_status_t - virtio request status
* @REQ_STATUS_IDLE: request slot unused
* @REQ_STATUS_SENT: request sent to server
* @REQ_STATUS_RCVD: response received from server
* @REQ_STATUS_FLSH: request has been flushed
*
* The @REQ_STATUS_IDLE state is used to mark a request slot as unused
* but use is actually tracked by the idpool structure which handles tag
* id allocation.
*
*/
enum p9_req_status_t {
REQ_STATUS_IDLE,
REQ_STATUS_SENT,
REQ_STATUS_RCVD,
REQ_STATUS_FLSH,
};
/**
* struct p9_req_t - virtio request slots
* @status: status of this request slot
* @wq: wait_queue for the client to block on for this request
*
* The virtio transport uses an array to track outstanding requests
* instead of a list. While this may incurr overhead during initial
* allocation or expansion, it makes request lookup much easier as the
* tag id is a index into an array. (We use tag+1 so that we can accomodate
* the -1 tag for the T_VERSION request).
* This also has the nice effect of only having to allocate wait_queues
* once, instead of constantly allocating and freeing them. Its possible
* other resources could benefit from this scheme as well.
*
*/
struct p9_req_t {
int status;
wait_queue_head_t *wq;
};
/**
* struct virtio_chan - per-instance transport information
* @initialized: whether the channel is initialized
......@@ -121,67 +76,14 @@ static struct virtio_chan {
spinlock_t lock;
struct p9_client *client;
struct virtio_device *vdev;
struct virtqueue *vq;
struct p9_idpool *tagpool;
struct p9_req_t *reqs;
int max_tag;
/* Scatterlist: can be too big for stack. */
struct scatterlist sg[VIRTQUEUE_NUM];
} channels[MAX_9P_CHAN];
/**
* p9_lookup_tag - Lookup requests by tag
* @c: virtio channel to lookup tag within
* @tag: numeric id for transaction
*
* this is a simple array lookup, but will grow the
* request_slots as necessary to accomodate transaction
* ids which did not previously have a slot.
*
* Bugs: there is currently no upper limit on request slots set
* here, but that should be constrained by the id accounting.
*/
static struct p9_req_t *p9_lookup_tag(struct virtio_chan *c, u16 tag)
{
/* This looks up the original request by tag so we know which
* buffer to read the data into */
tag++;
while (tag >= c->max_tag) {
int old_max = c->max_tag;
int count;
if (c->max_tag)
c->max_tag *= 2;
else
c->max_tag = P9_INIT_MAXTAG;
c->reqs = krealloc(c->reqs, sizeof(struct p9_req_t)*c->max_tag,
GFP_ATOMIC);
if (!c->reqs) {
printk(KERN_ERR "Couldn't grow tag array\n");
BUG();
}
for (count = old_max; count < c->max_tag; count++) {
c->reqs[count].status = REQ_STATUS_IDLE;
c->reqs[count].wq = kmalloc(sizeof(wait_queue_head_t),
GFP_ATOMIC);
if (!c->reqs[count].wq) {
printk(KERN_ERR "Couldn't grow tag array\n");
BUG();
}
init_waitqueue_head(c->reqs[count].wq);
}
}
return &c->reqs[tag];
}
/* How many bytes left in this page. */
static unsigned int rest_of_page(void *data)
{
......@@ -200,22 +102,10 @@ static unsigned int rest_of_page(void *data)
static void p9_virtio_close(struct p9_client *client)
{
struct virtio_chan *chan = client->trans;
int count;
unsigned long flags;
spin_lock_irqsave(&chan->lock, flags);
p9_idpool_destroy(chan->tagpool);
for (count = 0; count < chan->max_tag; count++)
kfree(chan->reqs[count].wq);
kfree(chan->reqs);
chan->max_tag = 0;
spin_unlock_irqrestore(&chan->lock, flags);
mutex_lock(&virtio_9p_lock);
chan->inuse = false;
mutex_unlock(&virtio_9p_lock);
client->trans = NULL;
}
/**
......@@ -241,7 +131,7 @@ static void req_done(struct virtqueue *vq)
spin_lock_irqsave(&chan->lock, flags);
while ((rc = chan->vq->vq_ops->get_buf(chan->vq, &len)) != NULL) {
req = p9_lookup_tag(chan, rc->tag);
req = p9_tag_lookup(chan->client, rc->tag);
req->status = REQ_STATUS_RCVD;
wake_up(req->wq);
}
......@@ -311,13 +201,13 @@ p9_virtio_rpc(struct p9_client *c, struct p9_fcall *tc, struct p9_fcall **rc)
n = P9_NOTAG;
if (tc->id != P9_TVERSION) {
n = p9_idpool_get(chan->tagpool);
n = p9_idpool_get(c->tagpool);
if (n < 0)
return -ENOMEM;
}
spin_lock_irqsave(&chan->lock, flags);
req = p9_lookup_tag(chan, n);
req = p9_tag_alloc(c, n);
spin_unlock_irqrestore(&chan->lock, flags);
p9_set_tag(tc, n);
......@@ -357,8 +247,8 @@ p9_virtio_rpc(struct p9_client *c, struct p9_fcall *tc, struct p9_fcall **rc)
}
#endif
if (n != P9_NOTAG && p9_idpool_check(n, chan->tagpool))
p9_idpool_put(n, chan->tagpool);
if (n != P9_NOTAG && p9_idpool_check(n, c->tagpool))
p9_idpool_put(n, c->tagpool);
req->status = REQ_STATUS_IDLE;
......@@ -463,16 +353,8 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args)
return -ENODEV;
}
chan->tagpool = p9_idpool_create();
if (IS_ERR(chan->tagpool)) {
printk(KERN_ERR "9p: couldn't allocate tagpool\n");
return -ENOMEM;
}
p9_idpool_get(chan->tagpool); /* reserve tag 0 */
chan->max_tag = 0;
chan->reqs = NULL;
client->trans = (void *)chan;
chan->client = client;
return 0;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册