提交 673d62cd 编写于 作者: E Eric Van Hensbergen

9p: apply common request code to trans_fd

Apply the now common p9_req_t structure to the fd transport.
Signed-off-by: NEric Van Hensbergen <ericvh@gmail.com>
上级 ff683452
...@@ -49,11 +49,12 @@ enum p9_trans_status { ...@@ -49,11 +49,12 @@ enum p9_trans_status {
* enum p9_req_status_t - virtio request status * enum p9_req_status_t - virtio request status
* @REQ_STATUS_IDLE: request slot unused * @REQ_STATUS_IDLE: request slot unused
* @REQ_STATUS_ALLOC: request has been allocated but not sent * @REQ_STATUS_ALLOC: request has been allocated but not sent
* @REQ_STATUS_UNSENT: request waiting to be sent
* @REQ_STATUS_SENT: request sent to server * @REQ_STATUS_SENT: request sent to server
* @REQ_STATUS_FLSH: a flush has been sent for this request * @REQ_STATUS_FLSH: a flush has been sent for this request
* @REQ_STATUS_RCVD: response received from server * @REQ_STATUS_RCVD: response received from server
* @REQ_STATUS_FLSHD: request has been flushed * @REQ_STATUS_FLSHD: request has been flushed
* @REQ_STATUS_ERR: request encountered an error on the client side * @REQ_STATUS_ERROR: request encountered an error on the client side
* *
* The @REQ_STATUS_IDLE state is used to mark a request slot as unused * The @REQ_STATUS_IDLE state is used to mark a request slot as unused
* but use is actually tracked by the idpool structure which handles tag * but use is actually tracked by the idpool structure which handles tag
...@@ -64,6 +65,7 @@ enum p9_trans_status { ...@@ -64,6 +65,7 @@ enum p9_trans_status {
enum p9_req_status_t { enum p9_req_status_t {
REQ_STATUS_IDLE, REQ_STATUS_IDLE,
REQ_STATUS_ALLOC, REQ_STATUS_ALLOC,
REQ_STATUS_UNSENT,
REQ_STATUS_SENT, REQ_STATUS_SENT,
REQ_STATUS_FLSH, REQ_STATUS_FLSH,
REQ_STATUS_RCVD, REQ_STATUS_RCVD,
...@@ -79,6 +81,8 @@ enum p9_req_status_t { ...@@ -79,6 +81,8 @@ enum p9_req_status_t {
* @tc: the request fcall structure * @tc: the request fcall structure
* @rc: the response fcall structure * @rc: the response fcall structure
* @aux: transport specific data (provided for trans_fd migration) * @aux: transport specific data (provided for trans_fd migration)
* @tag: tag on request (BUG: redundant)
* @req_list: link for higher level objects to chain requests
* *
* Transport use an array to track outstanding requests * Transport use an array to track outstanding requests
* instead of a list. While this may incurr overhead during initial * instead of a list. While this may incurr overhead during initial
...@@ -99,6 +103,9 @@ struct p9_req_t { ...@@ -99,6 +103,9 @@ struct p9_req_t {
struct p9_fcall *rc; struct p9_fcall *rc;
u16 flush_tag; u16 flush_tag;
void *aux; void *aux;
int tag;
struct list_head req_list;
}; };
/** /**
...@@ -207,5 +214,6 @@ struct p9_stat *p9_client_dirread(struct p9_fid *fid, u64 offset); ...@@ -207,5 +214,6 @@ struct p9_stat *p9_client_dirread(struct p9_fid *fid, u64 offset);
struct p9_req_t *p9_tag_alloc(struct p9_client *, u16); struct p9_req_t *p9_tag_alloc(struct p9_client *, u16);
struct p9_req_t *p9_tag_lookup(struct p9_client *, u16); struct p9_req_t *p9_tag_lookup(struct p9_client *, u16);
void p9_free_req(struct p9_client *, struct p9_req_t *);
#endif /* NET_9P_CLIENT_H */ #endif /* NET_9P_CLIENT_H */
...@@ -268,6 +268,27 @@ static void p9_tag_cleanup(struct p9_client *c) ...@@ -268,6 +268,27 @@ static void p9_tag_cleanup(struct p9_client *c)
c->max_tag = 0; c->max_tag = 0;
} }
/**
* p9_free_req - free a request and clean-up as necessary
* c: client state
* r: request to release
*
*/
void p9_free_req(struct p9_client *c, struct p9_req_t *r)
{
r->flush_tag = P9_NOTAG;
r->status = REQ_STATUS_IDLE;
if (r->tc->tag != P9_NOTAG && p9_idpool_check(r->tc->tag, c->tagpool))
p9_idpool_put(r->tc->tag, c->tagpool);
/* if this was a flush request we have to free response fcall */
if (r->tc->id == P9_TFLUSH) {
kfree(r->tc);
kfree(r->rc);
}
}
static struct p9_fid *p9_fid_create(struct p9_client *clnt) static struct p9_fid *p9_fid_create(struct p9_client *clnt)
{ {
int err; int err;
......
...@@ -44,7 +44,6 @@ ...@@ -44,7 +44,6 @@
#define P9_PORT 564 #define P9_PORT 564
#define MAX_SOCK_BUF (64*1024) #define MAX_SOCK_BUF (64*1024)
#define ERREQFLUSH 1
#define MAXPOLLWADDR 2 #define MAXPOLLWADDR 2
/** /**
...@@ -99,38 +98,6 @@ enum { ...@@ -99,38 +98,6 @@ enum {
Wpending = 8, /* can write */ Wpending = 8, /* can write */
}; };
enum {
None,
Flushing,
Flushed,
};
/**
* struct p9_req - fd mux encoding of an rpc transaction
* @lock: protects req_list
* @tag: numeric tag for rpc transaction
* @tcall: request &p9_fcall structure
* @rcall: response &p9_fcall structure
* @err: error state
* @flush: flag to indicate RPC has been flushed
* @req_list: list link for higher level objects to chain requests
* @m: connection this request was issued on
* @wqueue: wait queue that client is blocked on for this rpc
*
*/
struct p9_req {
spinlock_t lock;
int tag;
struct p9_fcall *tcall;
struct p9_fcall *rcall;
int err;
int flush;
struct list_head req_list;
struct p9_conn *m;
wait_queue_head_t wqueue;
};
struct p9_poll_wait { struct p9_poll_wait {
struct p9_conn *conn; struct p9_conn *conn;
wait_queue_t wait; wait_queue_t wait;
...@@ -139,7 +106,6 @@ struct p9_poll_wait { ...@@ -139,7 +106,6 @@ struct p9_poll_wait {
/** /**
* struct p9_conn - fd mux connection state information * struct p9_conn - fd mux connection state information
* @lock: protects mux_list (?)
* @mux_list: list link for mux to manage multiple connections (?) * @mux_list: list link for mux to manage multiple connections (?)
* @client: reference to client instance for this connection * @client: reference to client instance for this connection
* @err: error state * @err: error state
...@@ -161,7 +127,6 @@ struct p9_poll_wait { ...@@ -161,7 +127,6 @@ struct p9_poll_wait {
*/ */
struct p9_conn { struct p9_conn {
spinlock_t lock; /* protect lock structure */
struct list_head mux_list; struct list_head mux_list;
struct p9_client *client; struct p9_client *client;
int err; int err;
...@@ -205,64 +170,41 @@ static void p9_mux_poll_stop(struct p9_conn *m) ...@@ -205,64 +170,41 @@ static void p9_mux_poll_stop(struct p9_conn *m)
spin_unlock_irqrestore(&p9_poll_lock, flags); spin_unlock_irqrestore(&p9_poll_lock, flags);
} }
static void p9_mux_free_request(struct p9_conn *m, struct p9_req *req) static void p9_conn_rpc_cb(struct p9_client *, struct p9_req_t *);
{
if (req->tag != P9_NOTAG &&
p9_idpool_check(req->tag, m->client->tagpool))
p9_idpool_put(req->tag, m->client->tagpool);
kfree(req);
}
static void p9_conn_rpc_cb(struct p9_req *req);
static void p9_mux_flush_cb(struct p9_req *freq) static void p9_mux_flush_cb(struct p9_client *client, struct p9_req_t *freq)
{ {
int tag; struct p9_conn *m = client->trans;
struct p9_conn *m = freq->m; struct p9_req_t *req;
struct p9_req *req, *rreq, *rptr;
P9_DPRINTK(P9_DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m, P9_DPRINTK(P9_DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m,
freq->tcall, freq->rcall, freq->err, freq->tc, freq->rc, freq->t_err,
freq->tcall->params.tflush.oldtag); freq->tc->params.tflush.oldtag);
spin_lock(&m->lock);
tag = freq->tcall->params.tflush.oldtag;
req = NULL;
list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
if (rreq->tag == tag) {
req = rreq;
list_del(&req->req_list);
break;
}
}
spin_unlock(&m->lock);
req = p9_tag_lookup(client, freq->tc->params.tflush.oldtag);
if (req) { if (req) {
spin_lock(&req->lock); req->status = REQ_STATUS_FLSHD;
req->flush = Flushed; list_del(&req->req_list);
spin_unlock(&req->lock); p9_conn_rpc_cb(client, req);
p9_conn_rpc_cb(req);
} }
kfree(freq->tcall); p9_free_req(client, freq);
kfree(freq->rcall);
p9_mux_free_request(m, freq);
} }
static void p9_conn_rpc_cb(struct p9_req *req) static void p9_conn_rpc_cb(struct p9_client *client, struct p9_req_t *req)
{ {
P9_DPRINTK(P9_DEBUG_MUX, "req %p\n", req); P9_DPRINTK(P9_DEBUG_MUX, "req %p\n", req);
if (req->tcall->id == P9_TFLUSH) { /* flush callback */ if (req->tc->id == P9_TFLUSH) { /* flush callback */
P9_DPRINTK(P9_DEBUG_MUX, "flush req %p\n", req); P9_DPRINTK(P9_DEBUG_MUX, "flush req %p\n", req);
p9_mux_flush_cb(req); p9_mux_flush_cb(client, req);
} else { /* normal wakeup path */ } else { /* normal wakeup path */
P9_DPRINTK(P9_DEBUG_MUX, "normal req %p\n", req); P9_DPRINTK(P9_DEBUG_MUX, "normal req %p\n", req);
if (req->flush != None && !req->err) if (!req->t_err && (req->status == REQ_STATUS_FLSHD ||
req->err = -ERESTARTSYS; req->status == REQ_STATUS_FLSH))
req->t_err = -ERESTARTSYS;
wake_up(&req->wqueue); wake_up(req->wq);
} }
} }
...@@ -275,59 +217,62 @@ static void p9_conn_rpc_cb(struct p9_req *req) ...@@ -275,59 +217,62 @@ static void p9_conn_rpc_cb(struct p9_req *req)
void p9_conn_cancel(struct p9_conn *m, int err) void p9_conn_cancel(struct p9_conn *m, int err)
{ {
struct p9_req *req, *rtmp; struct p9_req_t *req, *rtmp;
LIST_HEAD(cancel_list); LIST_HEAD(cancel_list);
P9_DPRINTK(P9_DEBUG_ERROR, "mux %p err %d\n", m, err); P9_DPRINTK(P9_DEBUG_ERROR, "mux %p err %d\n", m, err);
m->err = err; m->err = err;
spin_lock(&m->lock); spin_lock(&m->client->lock);
list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) { list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
req->status = REQ_STATUS_ERROR;
if (!req->t_err)
req->t_err = err;
list_move(&req->req_list, &cancel_list); list_move(&req->req_list, &cancel_list);
} }
list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) { list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
req->status = REQ_STATUS_ERROR;
if (!req->t_err)
req->t_err = err;
list_move(&req->req_list, &cancel_list); list_move(&req->req_list, &cancel_list);
} }
spin_unlock(&m->lock); spin_unlock(&m->client->lock);
list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) { list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
list_del(&req->req_list); list_del(&req->req_list);
if (!req->err) p9_conn_rpc_cb(m->client, req);
req->err = err;
p9_conn_rpc_cb(req);
} }
} }
static void process_request(struct p9_conn *m, struct p9_req *req) static void process_request(struct p9_conn *m, struct p9_req_t *req)
{ {
int ecode; int ecode;
struct p9_str *ename; struct p9_str *ename;
if (!req->err && req->rcall->id == P9_RERROR) { if (!req->t_err && req->rc->id == P9_RERROR) {
ecode = req->rcall->params.rerror.errno; ecode = req->rc->params.rerror.errno;
ename = &req->rcall->params.rerror.error; ename = &req->rc->params.rerror.error;
P9_DPRINTK(P9_DEBUG_MUX, "Rerror %.*s\n", ename->len, P9_DPRINTK(P9_DEBUG_MUX, "Rerror %.*s\n", ename->len,
ename->str); ename->str);
if (m->client->dotu) if (m->client->dotu)
req->err = -ecode; req->t_err = -ecode;
if (!req->err) { if (!req->t_err) {
req->err = p9_errstr2errno(ename->str, ename->len); req->t_err = p9_errstr2errno(ename->str, ename->len);
/* string match failed */ /* string match failed */
if (!req->err) { if (!req->t_err) {
PRINT_FCALL_ERROR("unknown error", req->rcall); PRINT_FCALL_ERROR("unknown error", req->rc);
req->err = -ESERVERFAULT; req->t_err = -ESERVERFAULT;
} }
} }
} else if (req->tcall && req->rcall->id != req->tcall->id + 1) { } else if (req->tc && req->rc->id != req->tc->id + 1) {
P9_DPRINTK(P9_DEBUG_ERROR, P9_DPRINTK(P9_DEBUG_ERROR,
"fcall mismatch: expected %d, got %d\n", "fcall mismatch: expected %d, got %d\n",
req->tcall->id + 1, req->rcall->id); req->tc->id + 1, req->rc->id);
if (!req->err) if (!req->t_err)
req->err = -EIO; req->t_err = -EIO;
} }
} }
...@@ -401,7 +346,7 @@ static void p9_read_work(struct work_struct *work) ...@@ -401,7 +346,7 @@ static void p9_read_work(struct work_struct *work)
{ {
int n, err; int n, err;
struct p9_conn *m; struct p9_conn *m;
struct p9_req *req, *rptr, *rreq; struct p9_req_t *req;
struct p9_fcall *rcall; struct p9_fcall *rcall;
char *rbuf; char *rbuf;
...@@ -488,24 +433,19 @@ static void p9_read_work(struct work_struct *work) ...@@ -488,24 +433,19 @@ static void p9_read_work(struct work_struct *work)
P9_DPRINTK(P9_DEBUG_MUX, "mux %p fcall id %d tag %d\n", m, P9_DPRINTK(P9_DEBUG_MUX, "mux %p fcall id %d tag %d\n", m,
rcall->id, rcall->tag); rcall->id, rcall->tag);
req = NULL; req = p9_tag_lookup(m->client, rcall->tag);
spin_lock(&m->lock);
list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
if (rreq->tag == rcall->tag) {
req = rreq;
if (req->flush != Flushing)
list_del(&req->req_list);
break;
}
}
spin_unlock(&m->lock);
if (req) { if (req) {
req->rcall = rcall; if (req->status != REQ_STATUS_FLSH) {
list_del(&req->req_list);
req->status = REQ_STATUS_RCVD;
}
req->rc = rcall;
process_request(m, req); process_request(m, req);
if (req->flush != Flushing) if (req->status != REQ_STATUS_FLSH)
p9_conn_rpc_cb(req); p9_conn_rpc_cb(m->client, req);
} else { } else {
if (err >= 0 && rcall->id != P9_RFLUSH) if (err >= 0 && rcall->id != P9_RFLUSH)
P9_DPRINTK(P9_DEBUG_ERROR, P9_DPRINTK(P9_DEBUG_ERROR,
...@@ -580,7 +520,7 @@ static void p9_write_work(struct work_struct *work) ...@@ -580,7 +520,7 @@ static void p9_write_work(struct work_struct *work)
{ {
int n, err; int n, err;
struct p9_conn *m; struct p9_conn *m;
struct p9_req *req; struct p9_req_t *req;
m = container_of(work, struct p9_conn, wq); m = container_of(work, struct p9_conn, wq);
...@@ -595,18 +535,16 @@ static void p9_write_work(struct work_struct *work) ...@@ -595,18 +535,16 @@ static void p9_write_work(struct work_struct *work)
return; return;
} }
spin_lock(&m->lock); spin_lock(&m->client->lock);
again: req = list_entry(m->unsent_req_list.next, struct p9_req_t,
req = list_entry(m->unsent_req_list.next, struct p9_req,
req_list); req_list);
req->status = REQ_STATUS_SENT;
list_move_tail(&req->req_list, &m->req_list); list_move_tail(&req->req_list, &m->req_list);
if (req->err == ERREQFLUSH)
goto again;
m->wbuf = req->tcall->sdata; m->wbuf = req->tc->sdata;
m->wsize = req->tcall->size; m->wsize = req->tc->size;
m->wpos = 0; m->wpos = 0;
spin_unlock(&m->lock); spin_unlock(&m->client->lock);
} }
P9_DPRINTK(P9_DEBUG_MUX, "mux %p pos %d size %d\n", m, m->wpos, P9_DPRINTK(P9_DEBUG_MUX, "mux %p pos %d size %d\n", m, m->wpos,
...@@ -725,7 +663,6 @@ static struct p9_conn *p9_conn_create(struct p9_client *client) ...@@ -725,7 +663,6 @@ static struct p9_conn *p9_conn_create(struct p9_client *client)
if (!m) if (!m)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
spin_lock_init(&m->lock);
INIT_LIST_HEAD(&m->mux_list); INIT_LIST_HEAD(&m->mux_list);
m->client = client; m->client = client;
...@@ -812,30 +749,27 @@ static void p9_poll_mux(struct p9_conn *m) ...@@ -812,30 +749,27 @@ static void p9_poll_mux(struct p9_conn *m)
* *
*/ */
static struct p9_req *p9_send_request(struct p9_conn *m, struct p9_fcall *tc) static struct p9_req_t *p9_send_request(struct p9_conn *m, struct p9_fcall *tc)
{ {
int tag;
int n; int n;
struct p9_req *req; struct p9_req_t *req;
P9_DPRINTK(P9_DEBUG_MUX, "mux %p task %p tcall %p id %d\n", m, current, P9_DPRINTK(P9_DEBUG_MUX, "mux %p task %p tcall %p id %d\n", m, current,
tc, tc->id); tc, tc->id);
if (m->err < 0) if (m->err < 0)
return ERR_PTR(m->err); return ERR_PTR(m->err);
req = kmalloc(sizeof(struct p9_req), GFP_KERNEL); tag = P9_NOTAG;
if (!req)
return ERR_PTR(-ENOMEM);
n = P9_NOTAG;
if (tc->id != P9_TVERSION) { if (tc->id != P9_TVERSION) {
n = p9_idpool_get(m->client->tagpool); tag = p9_idpool_get(m->client->tagpool);
if (n < 0) { if (tag < 0)
kfree(req);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
}
} }
p9_set_tag(tc, n); p9_set_tag(tc, tag);
req = p9_tag_alloc(m->client, tag);
#ifdef CONFIG_NET_9P_DEBUG #ifdef CONFIG_NET_9P_DEBUG
if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) { if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) {
...@@ -846,18 +780,15 @@ static struct p9_req *p9_send_request(struct p9_conn *m, struct p9_fcall *tc) ...@@ -846,18 +780,15 @@ static struct p9_req *p9_send_request(struct p9_conn *m, struct p9_fcall *tc)
} }
#endif #endif
spin_lock_init(&req->lock); req->tag = tag;
req->m = m; req->tc = tc;
init_waitqueue_head(&req->wqueue); req->rc = NULL;
req->tag = n; req->t_err = 0;
req->tcall = tc; req->status = REQ_STATUS_UNSENT;
req->rcall = NULL;
req->err = 0;
req->flush = None;
spin_lock(&m->lock); spin_lock(&m->client->lock);
list_add_tail(&req->req_list, &m->unsent_req_list); list_add_tail(&req->req_list, &m->unsent_req_list);
spin_unlock(&m->lock); spin_unlock(&m->client->lock);
if (test_and_clear_bit(Wpending, &m->wsched)) if (test_and_clear_bit(Wpending, &m->wsched))
n = POLLOUT; n = POLLOUT;
...@@ -871,39 +802,36 @@ static struct p9_req *p9_send_request(struct p9_conn *m, struct p9_fcall *tc) ...@@ -871,39 +802,36 @@ static struct p9_req *p9_send_request(struct p9_conn *m, struct p9_fcall *tc)
} }
static int static int
p9_mux_flush_request(struct p9_conn *m, struct p9_req *req) p9_mux_flush_request(struct p9_conn *m, struct p9_req_t *req)
{ {
struct p9_fcall *fc; struct p9_fcall *fc;
struct p9_req *rreq, *rptr; struct p9_req_t *rreq, *rptr;
P9_DPRINTK(P9_DEBUG_MUX, "mux %p req %p tag %d\n", m, req, req->tag); P9_DPRINTK(P9_DEBUG_MUX, "mux %p req %p tag %d\n", m, req, req->tag);
/* if a response was received for a request, do nothing */ /* if a response was received for a request, do nothing */
spin_lock(&req->lock); if (req->rc || req->t_err) {
if (req->rcall || req->err) {
spin_unlock(&req->lock);
P9_DPRINTK(P9_DEBUG_MUX, P9_DPRINTK(P9_DEBUG_MUX,
"mux %p req %p response already received\n", m, req); "mux %p req %p response already received\n", m, req);
return 0; return 0;
} }
req->flush = Flushing; req->status = REQ_STATUS_FLSH;
spin_unlock(&req->lock);
spin_lock(&m->lock); spin_lock(&m->client->lock);
/* if the request is not sent yet, just remove it from the list */ /* if the request is not sent yet, just remove it from the list */
list_for_each_entry_safe(rreq, rptr, &m->unsent_req_list, req_list) { list_for_each_entry_safe(rreq, rptr, &m->unsent_req_list, req_list) {
if (rreq->tag == req->tag) { if (rreq->tag == req->tag) {
P9_DPRINTK(P9_DEBUG_MUX, P9_DPRINTK(P9_DEBUG_MUX,
"mux %p req %p request is not sent yet\n", m, req); "mux %p req %p request is not sent yet\n", m, req);
list_del(&rreq->req_list); list_del(&rreq->req_list);
req->flush = Flushed; req->status = REQ_STATUS_FLSHD;
spin_unlock(&m->lock); spin_unlock(&m->client->lock);
p9_conn_rpc_cb(req); p9_conn_rpc_cb(m->client, req);
return 0; return 0;
} }
} }
spin_unlock(&m->lock); spin_unlock(&m->client->lock);
clear_thread_flag(TIF_SIGPENDING); clear_thread_flag(TIF_SIGPENDING);
fc = p9_create_tflush(req->tag); fc = p9_create_tflush(req->tag);
...@@ -927,7 +855,7 @@ p9_fd_rpc(struct p9_client *client, struct p9_fcall *tc, struct p9_fcall **rc) ...@@ -927,7 +855,7 @@ p9_fd_rpc(struct p9_client *client, struct p9_fcall *tc, struct p9_fcall **rc)
struct p9_conn *m = p->conn; struct p9_conn *m = p->conn;
int err, sigpending; int err, sigpending;
unsigned long flags; unsigned long flags;
struct p9_req *req; struct p9_req_t *req;
if (rc) if (rc)
*rc = NULL; *rc = NULL;
...@@ -945,10 +873,10 @@ p9_fd_rpc(struct p9_client *client, struct p9_fcall *tc, struct p9_fcall **rc) ...@@ -945,10 +873,10 @@ p9_fd_rpc(struct p9_client *client, struct p9_fcall *tc, struct p9_fcall **rc)
return err; return err;
} }
err = wait_event_interruptible(req->wqueue, req->rcall != NULL || err = wait_event_interruptible(*req->wq, req->rc != NULL ||
req->err < 0); req->t_err < 0);
if (req->err < 0) if (req->t_err < 0)
err = req->err; err = req->t_err;
if (err == -ERESTARTSYS && client->status == Connected if (err == -ERESTARTSYS && client->status == Connected
&& m->err == 0) { && m->err == 0) {
...@@ -956,9 +884,9 @@ p9_fd_rpc(struct p9_client *client, struct p9_fcall *tc, struct p9_fcall **rc) ...@@ -956,9 +884,9 @@ p9_fd_rpc(struct p9_client *client, struct p9_fcall *tc, struct p9_fcall **rc)
/* wait until we get response of the flush message */ /* wait until we get response of the flush message */
do { do {
clear_thread_flag(TIF_SIGPENDING); clear_thread_flag(TIF_SIGPENDING);
err = wait_event_interruptible(req->wqueue, err = wait_event_interruptible(*req->wq,
req->rcall || req->err); req->rc || req->t_err);
} while (!req->rcall && !req->err && } while (!req->rc && !req->t_err &&
err == -ERESTARTSYS && err == -ERESTARTSYS &&
client->status == Connected && !m->err); client->status == Connected && !m->err);
...@@ -974,11 +902,11 @@ p9_fd_rpc(struct p9_client *client, struct p9_fcall *tc, struct p9_fcall **rc) ...@@ -974,11 +902,11 @@ p9_fd_rpc(struct p9_client *client, struct p9_fcall *tc, struct p9_fcall **rc)
} }
if (rc) if (rc)
*rc = req->rcall; *rc = req->rc;
else else
kfree(req->rcall); kfree(req->rc);
p9_mux_free_request(m, req); p9_free_req(client, req);
if (err > 0) if (err > 0)
err = -EIO; err = -EIO;
......
...@@ -247,10 +247,7 @@ p9_virtio_rpc(struct p9_client *c, struct p9_fcall *tc, struct p9_fcall **rc) ...@@ -247,10 +247,7 @@ p9_virtio_rpc(struct p9_client *c, struct p9_fcall *tc, struct p9_fcall **rc)
} }
#endif #endif
if (n != P9_NOTAG && p9_idpool_check(n, c->tagpool)) p9_free_req(c, req);
p9_idpool_put(n, c->tagpool);
req->status = REQ_STATUS_IDLE;
return 0; return 0;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册