mux.c 23.4 KB
Newer Older
1
/*
2
 * net/9p/mux.c
3 4 5 6
 *
 * Protocol Multiplexer
 *
 *  Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
7
 *  Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
8 9
 *
 *  This program is free software; you can redistribute it and/or modify
10 11
 *  it under the terms of the GNU General Public License version 2
 *  as published by the Free Software Foundation.
12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *  GNU General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License
 *  along with this program; if not, write to:
 *  Free Software Foundation
 *  51 Franklin Street, Fifth Floor
 *  Boston, MA  02111-1301  USA
 *
 */

#include <linux/module.h>
#include <linux/errno.h>
#include <linux/fs.h>
29
#include <linux/poll.h>
30 31
#include <linux/kthread.h>
#include <linux/idr.h>
32
#include <linux/mutex.h>
33
#include <net/9p/9p.h>
34
#include <linux/parser.h>
35 36
#include <net/9p/transport.h>
#include <net/9p/conn.h>
37

38 39 40 41 42 43 44 45 46 47 48
#define ERREQFLUSH	1
#define SCHED_TIMEOUT	10
#define MAXPOLLWADDR	2

enum {
	Rworksched = 1,		/* read work scheduled or running */
	Rpending = 2,		/* can read */
	Wworksched = 4,		/* write work scheduled or running */
	Wpending = 8,		/* can write */
};

49 50 51 52 53 54
enum {
	None,
	Flushing,
	Flushed,
};

55
struct p9_mux_poll_task;
56

57 58
struct p9_req {
	spinlock_t lock; /* protect request structure */
59
	int tag;
60 61
	struct p9_fcall *tcall;
	struct p9_fcall *rcall;
62
	int err;
63
	p9_conn_req_callback cb;
64
	void *cba;
65
	int flush;
66 67 68
	struct list_head req_list;
};

69 70
struct p9_conn {
	spinlock_t lock; /* protect lock structure */
71
	struct list_head mux_list;
72
	struct p9_mux_poll_task *poll_task;
73 74
	int msize;
	unsigned char *extended;
E
Eric Van Hensbergen 已提交
75
	struct p9_trans *trans;
76
	struct p9_idpool *tagpool;
77 78 79 80
	int err;
	wait_queue_head_t equeue;
	struct list_head req_list;
	struct list_head unsent_req_list;
81
	struct p9_fcall *rcall;
82 83 84 85 86 87 88 89 90 91 92 93 94
	int rpos;
	char *rbuf;
	int wpos;
	int wsize;
	char *wbuf;
	wait_queue_t poll_wait[MAXPOLLWADDR];
	wait_queue_head_t *poll_waddr[MAXPOLLWADDR];
	poll_table pt;
	struct work_struct rq;
	struct work_struct wq;
	unsigned long wsched;
};

95
struct p9_mux_poll_task {
96 97 98 99 100
	struct task_struct *task;
	struct list_head mux_list;
	int muxnum;
};

101 102
struct p9_mux_rpc {
	struct p9_conn *m;
103
	int err;
104 105
	struct p9_fcall *tcall;
	struct p9_fcall *rcall;
106 107 108
	wait_queue_head_t wqueue;
};

109 110 111 112
static int p9_poll_proc(void *);
static void p9_read_work(struct work_struct *work);
static void p9_write_work(struct work_struct *work);
static void p9_pollwait(struct file *filp, wait_queue_head_t *wait_address,
113
			  poll_table * p);
114 115
static u16 p9_mux_get_tag(struct p9_conn *);
static void p9_mux_put_tag(struct p9_conn *, u16);
116

117 118
static DEFINE_MUTEX(p9_mux_task_lock);
static struct workqueue_struct *p9_mux_wq;
119

120 121 122
static int p9_mux_num;
static int p9_mux_poll_task_num;
static struct p9_mux_poll_task p9_mux_poll_tasks[100];
123

124
int p9_mux_global_init(void)
125 126 127
{
	int i;

128 129
	for (i = 0; i < ARRAY_SIZE(p9_mux_poll_tasks); i++)
		p9_mux_poll_tasks[i].task = NULL;
130

131 132
	p9_mux_wq = create_workqueue("v9fs");
	if (!p9_mux_wq) {
133
		printk(KERN_WARNING "v9fs: mux: creating workqueue failed\n");
134
		return -ENOMEM;
135
	}
136 137

	return 0;
138
}
139

140
void p9_mux_global_exit(void)
141
{
142
	destroy_workqueue(p9_mux_wq);
143 144 145
}

/**
146
 * p9_mux_calc_poll_procs - calculates the number of polling procs
147
 * based on the number of mounted v9fs filesystems.
148
 *
149
 * The current implementation returns sqrt of the number of mounts.
150
 */
151
static int p9_mux_calc_poll_procs(int muxnum)
152 153 154
{
	int n;

155 156 157
	if (p9_mux_poll_task_num)
		n = muxnum / p9_mux_poll_task_num +
		    (muxnum % p9_mux_poll_task_num ? 1 : 0);
158 159 160
	else
		n = 1;

161 162
	if (n > ARRAY_SIZE(p9_mux_poll_tasks))
		n = ARRAY_SIZE(p9_mux_poll_tasks);
163

164 165 166
	return n;
}

167
static int p9_mux_poll_start(struct p9_conn *m)
168
{
169
	int i, n;
170
	struct p9_mux_poll_task *vpt, *vptlast;
171
	struct task_struct *pproc;
172

173 174 175
	P9_DPRINTK(P9_DEBUG_MUX, "mux %p muxnum %d procnum %d\n", m, p9_mux_num,
		p9_mux_poll_task_num);
	mutex_lock(&p9_mux_task_lock);
176

177 178 179 180 181 182 183 184 185
	n = p9_mux_calc_poll_procs(p9_mux_num + 1);
	if (n > p9_mux_poll_task_num) {
		for (i = 0; i < ARRAY_SIZE(p9_mux_poll_tasks); i++) {
			if (p9_mux_poll_tasks[i].task == NULL) {
				vpt = &p9_mux_poll_tasks[i];
				P9_DPRINTK(P9_DEBUG_MUX, "create proc %p\n",
									vpt);
				pproc = kthread_create(p9_poll_proc, vpt,
								"v9fs-poll");
186 187 188 189 190

				if (!IS_ERR(pproc)) {
					vpt->task = pproc;
					INIT_LIST_HEAD(&vpt->mux_list);
					vpt->muxnum = 0;
191
					p9_mux_poll_task_num++;
192 193
					wake_up_process(vpt->task);
				}
194 195
				break;
			}
196 197
		}

198 199 200
		if (i >= ARRAY_SIZE(p9_mux_poll_tasks))
			P9_DPRINTK(P9_DEBUG_ERROR,
					"warning: no free poll slots\n");
201
	}
202

203 204
	n = (p9_mux_num + 1) / p9_mux_poll_task_num +
	    ((p9_mux_num + 1) % p9_mux_poll_task_num ? 1 : 0);
205 206

	vptlast = NULL;
207 208
	for (i = 0; i < ARRAY_SIZE(p9_mux_poll_tasks); i++) {
		vpt = &p9_mux_poll_tasks[i];
209 210 211
		if (vpt->task != NULL) {
			vptlast = vpt;
			if (vpt->muxnum < n) {
212
				P9_DPRINTK(P9_DEBUG_MUX, "put in proc %d\n", i);
213 214 215
				list_add(&m->mux_list, &vpt->mux_list);
				vpt->muxnum++;
				m->poll_task = vpt;
216 217 218
				memset(&m->poll_waddr, 0,
							sizeof(m->poll_waddr));
				init_poll_funcptr(&m->pt, p9_pollwait);
219 220 221
				break;
			}
		}
222 223
	}

224
	if (i >= ARRAY_SIZE(p9_mux_poll_tasks)) {
225 226
		if (vptlast == NULL) {
			mutex_unlock(&p9_mux_task_lock);
227
			return -ENOMEM;
228
		}
229

230
		P9_DPRINTK(P9_DEBUG_MUX, "put in proc %d\n", i);
231 232
		list_add(&m->mux_list, &vptlast->mux_list);
		vptlast->muxnum++;
233
		m->poll_task = vptlast;
234
		memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
235
		init_poll_funcptr(&m->pt, p9_pollwait);
236 237
	}

238 239
	p9_mux_num++;
	mutex_unlock(&p9_mux_task_lock);
240 241

	return 0;
242
}
243

244
static void p9_mux_poll_stop(struct p9_conn *m)
245 246
{
	int i;
247
	struct p9_mux_poll_task *vpt;
248

249
	mutex_lock(&p9_mux_task_lock);
250 251
	vpt = m->poll_task;
	list_del(&m->mux_list);
252
	for (i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) {
253 254 255 256 257 258 259
		if (m->poll_waddr[i] != NULL) {
			remove_wait_queue(m->poll_waddr[i], &m->poll_wait[i]);
			m->poll_waddr[i] = NULL;
		}
	}
	vpt->muxnum--;
	if (!vpt->muxnum) {
260
		P9_DPRINTK(P9_DEBUG_MUX, "destroy proc %p\n", vpt);
261
		kthread_stop(vpt->task);
262
		vpt->task = NULL;
263
		p9_mux_poll_task_num--;
264
	}
265 266
	p9_mux_num--;
	mutex_unlock(&p9_mux_task_lock);
267
}
268

269
/**
270
 * p9_conn_create - allocate and initialize the per-session mux data
271 272 273 274 275 276
 * Creates the polling task if this is the first session.
 *
 * @trans - transport structure
 * @msize - maximum message size
 * @extended - pointer to the extended flag
 */
E
Eric Van Hensbergen 已提交
277
struct p9_conn *p9_conn_create(struct p9_trans *trans, int msize,
278 279 280
				    unsigned char *extended)
{
	int i, n;
281
	struct p9_conn *m, *mtmp;
282

283 284
	P9_DPRINTK(P9_DEBUG_MUX, "transport %p msize %d\n", trans, msize);
	m = kmalloc(sizeof(struct p9_conn), GFP_KERNEL);
285 286 287 288 289 290 291 292
	if (!m)
		return ERR_PTR(-ENOMEM);

	spin_lock_init(&m->lock);
	INIT_LIST_HEAD(&m->mux_list);
	m->msize = msize;
	m->extended = extended;
	m->trans = trans;
293
	m->tagpool = p9_idpool_create();
E
Eric Van Hensbergen 已提交
294 295
	if (IS_ERR(m->tagpool)) {
		mtmp = ERR_PTR(-ENOMEM);
296
		kfree(m);
E
Eric Van Hensbergen 已提交
297
		return mtmp;
298 299
	}

300 301 302 303
	m->err = 0;
	init_waitqueue_head(&m->equeue);
	INIT_LIST_HEAD(&m->req_list);
	INIT_LIST_HEAD(&m->unsent_req_list);
304
	m->rcall = NULL;
305
	m->rpos = 0;
306
	m->rbuf = NULL;
307
	m->wpos = m->wsize = 0;
308
	m->wbuf = NULL;
309 310
	INIT_WORK(&m->rq, p9_read_work);
	INIT_WORK(&m->wq, p9_write_work);
311 312
	m->wsched = 0;
	memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
313
	m->poll_task = NULL;
314
	n = p9_mux_poll_start(m);
E
Eric Van Hensbergen 已提交
315 316
	if (n) {
		kfree(m);
317
		return ERR_PTR(n);
E
Eric Van Hensbergen 已提交
318
	}
319 320 321

	n = trans->poll(trans, &m->pt);
	if (n & POLLIN) {
322
		P9_DPRINTK(P9_DEBUG_MUX, "mux %p can read\n", m);
323
		set_bit(Rpending, &m->wsched);
324 325
	}

326
	if (n & POLLOUT) {
327
		P9_DPRINTK(P9_DEBUG_MUX, "mux %p can write\n", m);
328
		set_bit(Wpending, &m->wsched);
329 330
	}

331
	for (i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) {
332
		if (IS_ERR(m->poll_waddr[i])) {
333
			p9_mux_poll_stop(m);
334 335 336 337 338
			mtmp = (void *)m->poll_waddr;	/* the error code */
			kfree(m);
			m = mtmp;
			break;
		}
339 340
	}

341 342
	return m;
}
343
EXPORT_SYMBOL(p9_conn_create);
344

345
/**
346
 * p9_mux_destroy - cancels all pending requests and frees mux resources
347
 */
348
void p9_conn_destroy(struct p9_conn *m)
349
{
350
	P9_DPRINTK(P9_DEBUG_MUX, "mux %p prev %p next %p\n", m,
351
		m->mux_list.prev, m->mux_list.next);
352
	p9_conn_cancel(m, -ECONNRESET);
353 354 355

	if (!list_empty(&m->req_list)) {
		/* wait until all processes waiting on this session exit */
356 357
		P9_DPRINTK(P9_DEBUG_MUX,
			"mux %p waiting for empty request queue\n", m);
358
		wait_event_timeout(m->equeue, (list_empty(&m->req_list)), 5000);
359
		P9_DPRINTK(P9_DEBUG_MUX, "mux %p request queue empty: %d\n", m,
360 361
			list_empty(&m->req_list));
	}
362

363
	p9_mux_poll_stop(m);
364
	m->trans = NULL;
365
	p9_idpool_destroy(m->tagpool);
366
	kfree(m);
367
}
368
EXPORT_SYMBOL(p9_conn_destroy);
369 370

/**
371
 * p9_pollwait - called by files poll operation to add v9fs-poll task
372
 * 	to files wait queue
373
 */
374
static void
375
p9_pollwait(struct file *filp, wait_queue_head_t *wait_address,
376
	      poll_table * p)
377
{
378
	int i;
379
	struct p9_conn *m;
380

381 382
	m = container_of(p, struct p9_conn, pt);
	for (i = 0; i < ARRAY_SIZE(m->poll_waddr); i++)
383 384
		if (m->poll_waddr[i] == NULL)
			break;
385

386
	if (i >= ARRAY_SIZE(m->poll_waddr)) {
387
		P9_DPRINTK(P9_DEBUG_ERROR, "not enough wait_address slots\n");
388 389
		return;
	}
390

391
	m->poll_waddr[i] = wait_address;
392

393
	if (!wait_address) {
394
		P9_DPRINTK(P9_DEBUG_ERROR, "no wait_address\n");
395 396 397
		m->poll_waddr[i] = ERR_PTR(-EIO);
		return;
	}
398

399 400
	init_waitqueue_entry(&m->poll_wait[i], m->poll_task->task);
	add_wait_queue(wait_address, &m->poll_wait[i]);
401 402 403
}

/**
404
 * p9_poll_mux - polls a mux and schedules read or write works if necessary
405
 */
406
static void p9_poll_mux(struct p9_conn *m)
407
{
408
	int n;
409

410 411 412 413 414
	if (m->err < 0)
		return;

	n = m->trans->poll(m->trans, NULL);
	if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) {
415
		P9_DPRINTK(P9_DEBUG_MUX, "error mux %p err %d\n", m, n);
416 417
		if (n >= 0)
			n = -ECONNRESET;
418
		p9_conn_cancel(m, n);
419 420 421 422
	}

	if (n & POLLIN) {
		set_bit(Rpending, &m->wsched);
423
		P9_DPRINTK(P9_DEBUG_MUX, "mux %p can read\n", m);
424
		if (!test_and_set_bit(Rworksched, &m->wsched)) {
425 426
			P9_DPRINTK(P9_DEBUG_MUX, "schedule read work %p\n", m);
			queue_work(p9_mux_wq, &m->rq);
427 428
		}
	}
429

430 431
	if (n & POLLOUT) {
		set_bit(Wpending, &m->wsched);
432
		P9_DPRINTK(P9_DEBUG_MUX, "mux %p can write\n", m);
433 434
		if ((m->wsize || !list_empty(&m->unsent_req_list))
		    && !test_and_set_bit(Wworksched, &m->wsched)) {
435 436
			P9_DPRINTK(P9_DEBUG_MUX, "schedule write work %p\n", m);
			queue_work(p9_mux_wq, &m->wq);
437 438
		}
	}
439 440 441
}

/**
442
 * p9_poll_proc - polls all v9fs transports for new events and queues
443
 * 	the appropriate work to the work queue
444
 */
445
static int p9_poll_proc(void *a)
446
{
447 448
	struct p9_conn *m, *mtmp;
	struct p9_mux_poll_task *vpt;
449

450
	vpt = a;
451
	P9_DPRINTK(P9_DEBUG_MUX, "start %p %p\n", current, vpt);
452 453
	while (!kthread_should_stop()) {
		set_current_state(TASK_INTERRUPTIBLE);
454

455
		list_for_each_entry_safe(m, mtmp, &vpt->mux_list, mux_list) {
456
			p9_poll_mux(m);
457 458
		}

459
		P9_DPRINTK(P9_DEBUG_MUX, "sleeping...\n");
460 461
		schedule_timeout(SCHED_TIMEOUT * HZ);
	}
462

463
	__set_current_state(TASK_RUNNING);
464
	P9_DPRINTK(P9_DEBUG_MUX, "finish\n");
465 466
	return 0;
}
467

468
/**
469
 * p9_write_work - called when a transport can send some data
470
 */
471
static void p9_write_work(struct work_struct *work)
472 473
{
	int n, err;
474 475
	struct p9_conn *m;
	struct p9_req *req;
476

477
	m = container_of(work, struct p9_conn, wq);
478

479 480 481
	if (m->err < 0) {
		clear_bit(Wworksched, &m->wsched);
		return;
482 483
	}

484 485 486 487
	if (!m->wsize) {
		if (list_empty(&m->unsent_req_list)) {
			clear_bit(Wworksched, &m->wsched);
			return;
488 489
		}

490
		spin_lock(&m->lock);
491
again:
492
		req = list_entry(m->unsent_req_list.next, struct p9_req,
493 494
			       req_list);
		list_move_tail(&req->req_list, &m->req_list);
495 496 497
		if (req->err == ERREQFLUSH)
			goto again;

498 499
		m->wbuf = req->tcall->sdata;
		m->wsize = req->tcall->size;
500 501
		m->wpos = 0;
		spin_unlock(&m->lock);
502 503
	}

504 505
	P9_DPRINTK(P9_DEBUG_MUX, "mux %p pos %d size %d\n", m, m->wpos,
								m->wsize);
506 507
	clear_bit(Wpending, &m->wsched);
	err = m->trans->write(m->trans, m->wbuf + m->wpos, m->wsize - m->wpos);
508
	P9_DPRINTK(P9_DEBUG_MUX, "mux %p sent %d bytes\n", m, err);
509 510 511 512 513
	if (err == -EAGAIN) {
		clear_bit(Wworksched, &m->wsched);
		return;
	}

514
	if (err < 0)
515
		goto error;
516 517 518 519
	else if (err == 0) {
		err = -EREMOTEIO;
		goto error;
	}
520 521 522 523 524 525 526 527 528 529 530 531

	m->wpos += err;
	if (m->wpos == m->wsize)
		m->wpos = m->wsize = 0;

	if (m->wsize == 0 && !list_empty(&m->unsent_req_list)) {
		if (test_and_clear_bit(Wpending, &m->wsched))
			n = POLLOUT;
		else
			n = m->trans->poll(m->trans, NULL);

		if (n & POLLOUT) {
532 533
			P9_DPRINTK(P9_DEBUG_MUX, "schedule write work %p\n", m);
			queue_work(p9_mux_wq, &m->wq);
534 535 536 537
		} else
			clear_bit(Wworksched, &m->wsched);
	} else
		clear_bit(Wworksched, &m->wsched);
538

539 540
	return;

541 542
error:
	p9_conn_cancel(m, err);
543
	clear_bit(Wworksched, &m->wsched);
544 545
}

546
static void process_request(struct p9_conn *m, struct p9_req *req)
547
{
548
	int ecode;
549
	struct p9_str *ename;
550

551
	if (!req->err && req->rcall->id == P9_RERROR) {
552
		ecode = req->rcall->params.rerror.errno;
553
		ename = &req->rcall->params.rerror.error;
554

555 556
		P9_DPRINTK(P9_DEBUG_MUX, "Rerror %.*s\n", ename->len,
								ename->str);
557 558 559 560 561

		if (*m->extended)
			req->err = -ecode;

		if (!req->err) {
562
			req->err = p9_errstr2errno(ename->str, ename->len);
563 564

			if (!req->err) {	/* string match failed */
565
				PRINT_FCALL_ERROR("unknown error", req->rcall);
566 567 568 569 570 571
			}

			if (!req->err)
				req->err = -ESERVERFAULT;
		}
	} else if (req->tcall && req->rcall->id != req->tcall->id + 1) {
572 573 574
		P9_DPRINTK(P9_DEBUG_ERROR,
				"fcall mismatch: expected %d, got %d\n",
				req->tcall->id + 1, req->rcall->id);
575 576
		if (!req->err)
			req->err = -EIO;
577 578 579
	}
}

580
/**
581
 * p9_read_work - called when there is some data to be read from a transport
582
 */
583
static void p9_read_work(struct work_struct *work)
584
{
585
	int n, err;
586 587 588
	struct p9_conn *m;
	struct p9_req *req, *rptr, *rreq;
	struct p9_fcall *rcall;
589
	char *rbuf;
590

591
	m = container_of(work, struct p9_conn, rq);
592 593 594 595 596

	if (m->err < 0)
		return;

	rcall = NULL;
597
	P9_DPRINTK(P9_DEBUG_MUX, "start mux %p pos %d\n", m, m->rpos);
598 599 600

	if (!m->rcall) {
		m->rcall =
601
		    kmalloc(sizeof(struct p9_fcall) + m->msize, GFP_KERNEL);
602 603 604 605 606
		if (!m->rcall) {
			err = -ENOMEM;
			goto error;
		}

607
		m->rbuf = (char *)m->rcall + sizeof(struct p9_fcall);
608 609 610
		m->rpos = 0;
	}

611 612
	clear_bit(Rpending, &m->wsched);
	err = m->trans->read(m->trans, m->rbuf + m->rpos, m->msize - m->rpos);
613
	P9_DPRINTK(P9_DEBUG_MUX, "mux %p got %d bytes\n", m, err);
614 615 616 617
	if (err == -EAGAIN) {
		clear_bit(Rworksched, &m->wsched);
		return;
	}
618

619 620
	if (err <= 0)
		goto error;
621

622 623 624 625
	m->rpos += err;
	while (m->rpos > 4) {
		n = le32_to_cpu(*(__le32 *) m->rbuf);
		if (n >= m->msize) {
626
			P9_DPRINTK(P9_DEBUG_ERROR,
627 628 629 630 631 632
				"requested packet size too big: %d\n", n);
			err = -EIO;
			goto error;
		}

		if (m->rpos < n)
633
			break;
634

635
		err =
636
		    p9_deserialize_fcall(m->rbuf, n, m->rcall, *m->extended);
637
		if (err < 0) {
638
			goto error;
639 640
		}

641 642
#ifdef CONFIG_NET_9P_DEBUG
		if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) {
643 644
			char buf[150];

645
			p9_printfcall(buf, sizeof(buf), m->rcall,
646 647 648
				*m->extended);
			printk(KERN_NOTICE ">>> %p %s\n", m, buf);
		}
649
#endif
650

651 652 653
		rcall = m->rcall;
		rbuf = m->rbuf;
		if (m->rpos > n) {
654
			m->rcall = kmalloc(sizeof(struct p9_fcall) + m->msize,
655 656 657 658 659 660
					   GFP_KERNEL);
			if (!m->rcall) {
				err = -ENOMEM;
				goto error;
			}

661
			m->rbuf = (char *)m->rcall + sizeof(struct p9_fcall);
662 663 664 665 666 667 668 669
			memmove(m->rbuf, rbuf + n, m->rpos - n);
			m->rpos -= n;
		} else {
			m->rcall = NULL;
			m->rbuf = NULL;
			m->rpos = 0;
		}

670 671
		P9_DPRINTK(P9_DEBUG_MUX, "mux %p fcall id %d tag %d\n", m,
							rcall->id, rcall->tag);
672 673 674 675 676 677

		req = NULL;
		spin_lock(&m->lock);
		list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
			if (rreq->tag == rcall->tag) {
				req = rreq;
678 679
				if (req->flush != Flushing)
					list_del(&req->req_list);
680
				break;
681 682
			}
		}
683
		spin_unlock(&m->lock);
684

685 686 687 688 689 690 691 692 693 694 695 696 697
		if (req) {
			req->rcall = rcall;
			process_request(m, req);

			if (req->flush != Flushing) {
				if (req->cb)
					(*req->cb) (req, req->cba);
				else
					kfree(req->rcall);

				wake_up(&m->equeue);
			}
		} else {
698 699 700 701
			if (err >= 0 && rcall->id != P9_RFLUSH)
				P9_DPRINTK(P9_DEBUG_ERROR,
				  "unexpected response mux %p id %d tag %d\n",
				  m, rcall->id, rcall->tag);
702 703 704 705
			kfree(rcall);
		}
	}

706 707 708 709 710 711 712
	if (!list_empty(&m->req_list)) {
		if (test_and_clear_bit(Rpending, &m->wsched))
			n = POLLIN;
		else
			n = m->trans->poll(m->trans, NULL);

		if (n & POLLIN) {
713 714
			P9_DPRINTK(P9_DEBUG_MUX, "schedule read work %p\n", m);
			queue_work(p9_mux_wq, &m->rq);
715 716 717 718
		} else
			clear_bit(Rworksched, &m->wsched);
	} else
		clear_bit(Rworksched, &m->wsched);
719

720
	return;
721

722 723
error:
	p9_conn_cancel(m, err);
724
	clear_bit(Rworksched, &m->wsched);
725 726 727
}

/**
728
 * p9_send_request - send 9P request
729 730
 * The function can sleep until the request is scheduled for sending.
 * The function can be interrupted. Return from the function is not
A
Andreas Mohr 已提交
731
 * a guarantee that the request is sent successfully. Can return errors
732
 * that can be retrieved by PTR_ERR macros.
733
 *
734 735 736 737
 * @m: mux data
 * @tc: request to be sent
 * @cb: callback function to call when response is received
 * @cba: parameter to pass to the callback function
738
 */
739 740 741
static struct p9_req *p9_send_request(struct p9_conn *m,
					  struct p9_fcall *tc,
					  p9_conn_req_callback cb, void *cba)
742 743
{
	int n;
744
	struct p9_req *req;
745

746
	P9_DPRINTK(P9_DEBUG_MUX, "mux %p task %p tcall %p id %d\n", m, current,
747 748 749 750
		tc, tc->id);
	if (m->err < 0)
		return ERR_PTR(m->err);

751
	req = kmalloc(sizeof(struct p9_req), GFP_KERNEL);
752 753
	if (!req)
		return ERR_PTR(-ENOMEM);
754

755 756
	if (tc->id == P9_TVERSION)
		n = P9_NOTAG;
757
	else
758
		n = p9_mux_get_tag(m);
759 760 761 762

	if (n < 0)
		return ERR_PTR(-ENOMEM);

763 764 765 766
	p9_set_tag(tc, n);

#ifdef CONFIG_NET_9P_DEBUG
	if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) {
767 768
		char buf[150];

769
		p9_printfcall(buf, sizeof(buf), tc, *m->extended);
770 771
		printk(KERN_NOTICE "<<< %p %s\n", m, buf);
	}
772
#endif
773

774
	spin_lock_init(&req->lock);
775 776 777 778 779 780
	req->tag = n;
	req->tcall = tc;
	req->rcall = NULL;
	req->err = 0;
	req->cb = cb;
	req->cba = cba;
781
	req->flush = None;
782 783 784 785 786 787 788 789 790 791 792

	spin_lock(&m->lock);
	list_add_tail(&req->req_list, &m->unsent_req_list);
	spin_unlock(&m->lock);

	if (test_and_clear_bit(Wpending, &m->wsched))
		n = POLLOUT;
	else
		n = m->trans->poll(m->trans, NULL);

	if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
793
		queue_work(p9_mux_wq, &m->wq);
794 795 796 797

	return req;
}

798
static void p9_mux_free_request(struct p9_conn *m, struct p9_req *req)
799
{
800
	p9_mux_put_tag(m, req->tag);
801 802 803
	kfree(req);
}

804
static void p9_mux_flush_cb(struct p9_req *freq, void *a)
805
{
806
	p9_conn_req_callback cb;
807
	int tag;
808 809
	struct p9_conn *m;
	struct p9_req *req, *rreq, *rptr;
810 811

	m = a;
812
	P9_DPRINTK(P9_DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m,
813 814
		freq->tcall, freq->rcall, freq->err,
		freq->tcall->params.tflush.oldtag);
815 816 817

	spin_lock(&m->lock);
	cb = NULL;
818 819 820 821 822
	tag = freq->tcall->params.tflush.oldtag;
	req = NULL;
	list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
		if (rreq->tag == tag) {
			req = rreq;
823 824 825 826
			list_del(&req->req_list);
			break;
		}
	}
827
	spin_unlock(&m->lock);
828

829 830 831 832 833 834 835 836 837 838 839 840
	if (req) {
		spin_lock(&req->lock);
		req->flush = Flushed;
		spin_unlock(&req->lock);

		if (req->cb)
			(*req->cb) (req, req->cba);
		else
			kfree(req->rcall);

		wake_up(&m->equeue);
	}
841

842 843
	kfree(freq->tcall);
	kfree(freq->rcall);
844
	p9_mux_free_request(m, freq);
845 846
}

847
static int
848
p9_mux_flush_request(struct p9_conn *m, struct p9_req *req)
849
{
850 851
	struct p9_fcall *fc;
	struct p9_req *rreq, *rptr;
852

853
	P9_DPRINTK(P9_DEBUG_MUX, "mux %p req %p tag %d\n", m, req, req->tag);
854

855 856 857 858
	/* if a response was received for a request, do nothing */
	spin_lock(&req->lock);
	if (req->rcall || req->err) {
		spin_unlock(&req->lock);
859 860
		P9_DPRINTK(P9_DEBUG_MUX,
			"mux %p req %p response already received\n", m, req);
861 862 863 864 865 866 867 868 869 870
		return 0;
	}

	req->flush = Flushing;
	spin_unlock(&req->lock);

	spin_lock(&m->lock);
	/* if the request is not sent yet, just remove it from the list */
	list_for_each_entry_safe(rreq, rptr, &m->unsent_req_list, req_list) {
		if (rreq->tag == req->tag) {
871 872
			P9_DPRINTK(P9_DEBUG_MUX,
			   "mux %p req %p request is not sent yet\n", m, req);
873 874 875 876 877 878 879 880 881 882 883
			list_del(&rreq->req_list);
			req->flush = Flushed;
			spin_unlock(&m->lock);
			if (req->cb)
				(*req->cb) (req, req->cba);
			return 0;
		}
	}
	spin_unlock(&m->lock);

	clear_thread_flag(TIF_SIGPENDING);
884 885
	fc = p9_create_tflush(req->tag);
	p9_send_request(m, fc, p9_mux_flush_cb, m);
886
	return 1;
887 888 889
}

static void
890
p9_conn_rpc_cb(struct p9_req *req, void *a)
891
{
892
	struct p9_mux_rpc *r;
893

894
	P9_DPRINTK(P9_DEBUG_MUX, "req %p r %p\n", req, a);
895
	r = a;
896 897 898
	r->rcall = req->rcall;
	r->err = req->err;

899
	if (req->flush != None && !req->err)
900 901
		r->err = -ERESTARTSYS;

902 903 904 905
	wake_up(&r->wqueue);
}

/**
906
 * p9_mux_rpc - sends 9P request and waits until a response is available.
907 908 909 910 911 912
 *	The function can be interrupted.
 * @m: mux data
 * @tc: request to be sent
 * @rc: pointer where a pointer to the response is stored
 */
int
913 914
p9_conn_rpc(struct p9_conn *m, struct p9_fcall *tc,
	     struct p9_fcall **rc)
915
{
916
	int err, sigpending;
917
	unsigned long flags;
918 919
	struct p9_req *req;
	struct p9_mux_rpc r;
920 921

	r.err = 0;
922
	r.tcall = tc;
923 924 925 926 927 928 929
	r.rcall = NULL;
	r.m = m;
	init_waitqueue_head(&r.wqueue);

	if (rc)
		*rc = NULL;

930 931 932 933 934 935
	sigpending = 0;
	if (signal_pending(current)) {
		sigpending = 1;
		clear_thread_flag(TIF_SIGPENDING);
	}

936
	req = p9_send_request(m, tc, p9_conn_rpc_cb, &r);
937 938
	if (IS_ERR(req)) {
		err = PTR_ERR(req);
939
		P9_DPRINTK(P9_DEBUG_MUX, "error %d\n", err);
940
		return err;
941 942 943 944 945 946
	}

	err = wait_event_interruptible(r.wqueue, r.rcall != NULL || r.err < 0);
	if (r.err < 0)
		err = r.err;

947 948 949
	if (err == -ERESTARTSYS && m->trans->status == Connected
							&& m->err == 0) {
		if (p9_mux_flush_request(m, req)) {
950 951 952 953 954
			/* wait until we get response of the flush message */
			do {
				clear_thread_flag(TIF_SIGPENDING);
				err = wait_event_interruptible(r.wqueue,
					r.rcall || r.err);
955 956
			} while (!r.rcall && !r.err && err == -ERESTARTSYS &&
				m->trans->status == Connected && !m->err);
957 958

			err = -ERESTARTSYS;
959 960 961
		}
		sigpending = 1;
	}
962

963
	if (sigpending) {
964 965 966
		spin_lock_irqsave(&current->sighand->siglock, flags);
		recalc_sigpending();
		spin_unlock_irqrestore(&current->sighand->siglock, flags);
967 968
	}

969 970 971
	if (rc)
		*rc = r.rcall;
	else
972
		kfree(r.rcall);
973

974
	p9_mux_free_request(m, req);
975 976
	if (err > 0)
		err = -EIO;
977 978 979

	return err;
}
980
EXPORT_SYMBOL(p9_conn_rpc);
981

982
#ifdef P9_NONBLOCK
983
/**
984
 * p9_conn_rpcnb - sends 9P request without waiting for response.
985 986 987 988 989
 * @m: mux data
 * @tc: request to be sent
 * @cb: callback function to be called when response arrives
 * @cba: value to pass to the callback function
 */
990 991
int p9_conn_rpcnb(struct p9_conn *m, struct p9_fcall *tc,
		   p9_conn_req_callback cb, void *a)
992 993
{
	int err;
994
	struct p9_req *req;
995

996
	req = p9_send_request(m, tc, cb, a);
997 998
	if (IS_ERR(req)) {
		err = PTR_ERR(req);
999
		P9_DPRINTK(P9_DEBUG_MUX, "error %d\n", err);
1000 1001
		return PTR_ERR(req);
	}
1002

1003
	P9_DPRINTK(P9_DEBUG_MUX, "mux %p tc %p tag %d\n", m, tc, req->tag);
1004 1005
	return 0;
}
1006 1007
EXPORT_SYMBOL(p9_conn_rpcnb);
#endif /* P9_NONBLOCK */
1008 1009

/**
1010
 * p9_conn_cancel - cancel all pending requests with error
1011 1012 1013
 * @m: mux data
 * @err: error code
 */
1014
void p9_conn_cancel(struct p9_conn *m, int err)
1015
{
1016
	struct p9_req *req, *rtmp;
1017 1018
	LIST_HEAD(cancel_list);

1019
	P9_DPRINTK(P9_DEBUG_ERROR, "mux %p err %d\n", m, err);
1020 1021 1022 1023 1024
	m->err = err;
	spin_lock(&m->lock);
	list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
		list_move(&req->req_list, &cancel_list);
	}
1025 1026 1027
	list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
		list_move(&req->req_list, &cancel_list);
	}
1028 1029 1030 1031 1032 1033 1034 1035
	spin_unlock(&m->lock);

	list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
		list_del(&req->req_list);
		if (!req->err)
			req->err = err;

		if (req->cb)
1036
			(*req->cb) (req, req->cba);
1037 1038 1039 1040 1041 1042
		else
			kfree(req->rcall);
	}

	wake_up(&m->equeue);
}
1043
EXPORT_SYMBOL(p9_conn_cancel);
1044

1045
static u16 p9_mux_get_tag(struct p9_conn *m)
1046 1047 1048
{
	int tag;

1049
	tag = p9_idpool_get(m->tagpool);
1050
	if (tag < 0)
1051
		return P9_NOTAG;
1052 1053 1054 1055
	else
		return (u16) tag;
}

1056
static void p9_mux_put_tag(struct p9_conn *m, u16 tag)
1057
{
1058 1059
	if (tag != P9_NOTAG && p9_idpool_check(tag, m->tagpool))
		p9_idpool_put(tag, m->tagpool);
1060
}