bsg.c 22.4 KB
Newer Older
1
/*
2
 * bsg.c - block layer implementation of the sg v4 interface
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
 *
 * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs
 * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com>
 *
 *  This file is subject to the terms and conditions of the GNU General Public
 *  License version 2.  See the file "COPYING" in the main directory of this
 *  archive for more details.
 *
 */
#include <linux/module.h>
#include <linux/init.h>
#include <linux/file.h>
#include <linux/blkdev.h>
#include <linux/poll.h>
#include <linux/cdev.h>
#include <linux/percpu.h>
#include <linux/uio.h>
20
#include <linux/idr.h>
21 22 23 24 25
#include <linux/bsg.h>

#include <scsi/scsi.h>
#include <scsi/scsi_ioctl.h>
#include <scsi/scsi_cmnd.h>
F
FUJITA Tomonori 已提交
26 27
#include <scsi/scsi_device.h>
#include <scsi/scsi_driver.h>
28 29
#include <scsi/sg.h>

F
FUJITA Tomonori 已提交
30 31
#define BSG_DESCRIPTION	"Block layer SCSI generic (bsg) driver"
#define BSG_VERSION	"0.4"
32 33

struct bsg_device {
34
	struct request_queue *queue;
35 36 37 38 39 40 41 42 43 44
	spinlock_t lock;
	struct list_head busy_list;
	struct list_head done_list;
	struct hlist_node dev_list;
	atomic_t ref_count;
	int minor;
	int queued_cmds;
	int done_cmds;
	wait_queue_head_t wq_done;
	wait_queue_head_t wq_free;
45
	char name[BUS_ID_SIZE];
46 47 48 49 50 51 52 53 54
	int max_queue;
	unsigned long flags;
};

enum {
	BSG_F_BLOCK		= 1,
	BSG_F_WRITE_PERM	= 2,
};

J
Jens Axboe 已提交
55
#define BSG_DEFAULT_CMDS	64
F
FUJITA Tomonori 已提交
56
#define BSG_MAX_DEVS		32768
57 58 59 60 61 62 63 64 65 66

#undef BSG_DEBUG

#ifdef BSG_DEBUG
#define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ##args)
#else
#define dprintk(fmt, args...)
#endif

static DEFINE_MUTEX(bsg_mutex);
67
static DEFINE_IDR(bsg_minor_idr);
68

69 70
#define BSG_LIST_ARRAY_SIZE	8
static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE];
71 72

static struct class *bsg_class;
J
Jens Axboe 已提交
73
static int bsg_major;
74

J
Jens Axboe 已提交
75 76
static struct kmem_cache *bsg_cmd_cachep;

77 78 79 80 81 82 83 84
/*
 * our internal command type
 */
struct bsg_command {
	struct bsg_device *bd;
	struct list_head list;
	struct request *rq;
	struct bio *bio;
F
FUJITA Tomonori 已提交
85
	struct bio *bidi_bio;
86
	int err;
F
FUJITA Tomonori 已提交
87
	struct sg_io_v4 hdr;
88 89 90 91 92 93 94 95
	char sense[SCSI_SENSE_BUFFERSIZE];
};

static void bsg_free_command(struct bsg_command *bc)
{
	struct bsg_device *bd = bc->bd;
	unsigned long flags;

J
Jens Axboe 已提交
96
	kmem_cache_free(bsg_cmd_cachep, bc);
97 98 99 100 101 102 103 104

	spin_lock_irqsave(&bd->lock, flags);
	bd->queued_cmds--;
	spin_unlock_irqrestore(&bd->lock, flags);

	wake_up(&bd->wq_free);
}

F
FUJITA Tomonori 已提交
105
static struct bsg_command *bsg_alloc_command(struct bsg_device *bd)
106
{
F
FUJITA Tomonori 已提交
107
	struct bsg_command *bc = ERR_PTR(-EINVAL);
108 109 110 111 112 113 114 115 116

	spin_lock_irq(&bd->lock);

	if (bd->queued_cmds >= bd->max_queue)
		goto out;

	bd->queued_cmds++;
	spin_unlock_irq(&bd->lock);

117
	bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL);
J
Jens Axboe 已提交
118 119
	if (unlikely(!bc)) {
		spin_lock_irq(&bd->lock);
120
		bd->queued_cmds--;
F
FUJITA Tomonori 已提交
121
		bc = ERR_PTR(-ENOMEM);
122
		goto out;
J
Jens Axboe 已提交
123 124
	}

125 126
	bc->bd = bd;
	INIT_LIST_HEAD(&bc->list);
J
Jens Axboe 已提交
127
	dprintk("%s: returning free cmd %p\n", bd->name, bc);
128 129 130 131 132 133
	return bc;
out:
	spin_unlock_irq(&bd->lock);
	return bc;
}

F
FUJITA Tomonori 已提交
134
static inline struct hlist_head *bsg_dev_idx_hash(int index)
135
{
F
FUJITA Tomonori 已提交
136
	return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)];
137 138
}

139
static int bsg_io_schedule(struct bsg_device *bd)
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163
{
	DEFINE_WAIT(wait);
	int ret = 0;

	spin_lock_irq(&bd->lock);

	BUG_ON(bd->done_cmds > bd->queued_cmds);

	/*
	 * -ENOSPC or -ENODATA?  I'm going for -ENODATA, meaning "I have no
	 * work to do", even though we return -ENOSPC after this same test
	 * during bsg_write() -- there, it means our buffer can't have more
	 * bsg_commands added to it, thus has no space left.
	 */
	if (bd->done_cmds == bd->queued_cmds) {
		ret = -ENODATA;
		goto unlock;
	}

	if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
		ret = -EAGAIN;
		goto unlock;
	}

164
	prepare_to_wait(&bd->wq_done, &wait, TASK_UNINTERRUPTIBLE);
165 166 167 168 169 170 171 172 173 174
	spin_unlock_irq(&bd->lock);
	io_schedule();
	finish_wait(&bd->wq_done, &wait);

	return ret;
unlock:
	spin_unlock_irq(&bd->lock);
	return ret;
}

175
static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
F
FUJITA Tomonori 已提交
176 177 178 179 180 181 182
				struct sg_io_v4 *hdr, int has_write_perm)
{
	memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */

	if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
			   hdr->request_len))
		return -EFAULT;
183 184 185 186 187

	if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
		if (blk_verify_command(rq->cmd, has_write_perm))
			return -EPERM;
	} else if (!capable(CAP_SYS_RAWIO))
F
FUJITA Tomonori 已提交
188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
		return -EPERM;

	/*
	 * fill in request structure
	 */
	rq->cmd_len = hdr->request_len;
	rq->cmd_type = REQ_TYPE_BLOCK_PC;

	rq->timeout = (hdr->timeout * HZ) / 1000;
	if (!rq->timeout)
		rq->timeout = q->sg_timeout;
	if (!rq->timeout)
		rq->timeout = BLK_DEFAULT_SG_TIMEOUT;

	return 0;
}

205
/*
F
FUJITA Tomonori 已提交
206
 * Check if sg_io_v4 from user is allowed and valid
207 208
 */
static int
209
bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw)
210
{
211 212
	int ret = 0;

F
FUJITA Tomonori 已提交
213
	if (hdr->guard != 'Q')
214
		return -EINVAL;
F
FUJITA Tomonori 已提交
215
	if (hdr->request_len > BLK_MAX_CDB)
216
		return -EINVAL;
F
FUJITA Tomonori 已提交
217 218
	if (hdr->dout_xfer_len > (q->max_sectors << 9) ||
	    hdr->din_xfer_len > (q->max_sectors << 9))
219 220
		return -EIO;

221 222 223 224 225 226 227 228 229 230 231 232 233
	switch (hdr->protocol) {
	case BSG_PROTOCOL_SCSI:
		switch (hdr->subprotocol) {
		case BSG_SUB_PROTOCOL_SCSI_CMD:
		case BSG_SUB_PROTOCOL_SCSI_TRANSPORT:
			break;
		default:
			ret = -EINVAL;
		}
		break;
	default:
		ret = -EINVAL;
	}
F
FUJITA Tomonori 已提交
234 235

	*rw = hdr->dout_xfer_len ? WRITE : READ;
236
	return ret;
237 238 239
}

/*
F
FUJITA Tomonori 已提交
240
 * map sg_io_v4 to a request.
241 242
 */
static struct request *
F
FUJITA Tomonori 已提交
243
bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr)
244
{
245
	struct request_queue *q = bd->queue;
F
FUJITA Tomonori 已提交
246
	struct request *rq, *next_rq = NULL;
247
	int ret, rw;
F
FUJITA Tomonori 已提交
248 249
	unsigned int dxfer_len;
	void *dxferp = NULL;
250

F
FUJITA Tomonori 已提交
251 252 253
	dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp,
		hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp,
		hdr->din_xfer_len);
254

F
FUJITA Tomonori 已提交
255
	ret = bsg_validate_sgv4_hdr(q, hdr, &rw);
256 257 258 259 260 261 262
	if (ret)
		return ERR_PTR(ret);

	/*
	 * map scatter-gather elements seperately and string them to request
	 */
	rq = blk_get_request(q, rw, GFP_KERNEL);
F
FUJITA Tomonori 已提交
263 264
	if (!rq)
		return ERR_PTR(-ENOMEM);
F
FUJITA Tomonori 已提交
265 266
	ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, test_bit(BSG_F_WRITE_PERM,
						       &bd->flags));
F
FUJITA Tomonori 已提交
267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
	if (ret)
		goto out;

	if (rw == WRITE && hdr->din_xfer_len) {
		if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
			ret = -EOPNOTSUPP;
			goto out;
		}

		next_rq = blk_get_request(q, READ, GFP_KERNEL);
		if (!next_rq) {
			ret = -ENOMEM;
			goto out;
		}
		rq->next_rq = next_rq;

		dxferp = (void*)(unsigned long)hdr->din_xferp;
		ret =  blk_rq_map_user(q, next_rq, dxferp, hdr->din_xfer_len);
		if (ret)
			goto out;
287 288
	}

F
FUJITA Tomonori 已提交
289 290 291 292 293 294 295 296 297 298 299
	if (hdr->dout_xfer_len) {
		dxfer_len = hdr->dout_xfer_len;
		dxferp = (void*)(unsigned long)hdr->dout_xferp;
	} else if (hdr->din_xfer_len) {
		dxfer_len = hdr->din_xfer_len;
		dxferp = (void*)(unsigned long)hdr->din_xferp;
	} else
		dxfer_len = 0;

	if (dxfer_len) {
		ret = blk_rq_map_user(q, rq, dxferp, dxfer_len);
F
FUJITA Tomonori 已提交
300 301
		if (ret)
			goto out;
302 303
	}
	return rq;
F
FUJITA Tomonori 已提交
304 305 306 307 308 309 310
out:
	blk_put_request(rq);
	if (next_rq) {
		blk_rq_unmap_user(next_rq->bio);
		blk_put_request(next_rq);
	}
	return ERR_PTR(ret);
311 312 313 314 315 316 317 318 319 320 321 322
}

/*
 * async completion call-back from the block layer, when scsi/ide/whatever
 * calls end_that_request_last() on a request
 */
static void bsg_rq_end_io(struct request *rq, int uptodate)
{
	struct bsg_command *bc = rq->end_io_data;
	struct bsg_device *bd = bc->bd;
	unsigned long flags;

J
Jens Axboe 已提交
323 324
	dprintk("%s: finished rq %p bc %p, bio %p stat %d\n",
		bd->name, rq, bc, bc->bio, uptodate);
325 326 327 328

	bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration);

	spin_lock_irqsave(&bd->lock, flags);
329 330
	list_move_tail(&bc->list, &bd->done_list);
	bd->done_cmds++;
331
	spin_unlock_irqrestore(&bd->lock, flags);
332 333

	wake_up(&bd->wq_done);
334 335 336 337 338 339
}

/*
 * do final setup of a 'bc' and submit the matching 'rq' to the block
 * layer for io
 */
340
static void bsg_add_command(struct bsg_device *bd, struct request_queue *q,
341 342 343 344 345 346 347 348 349 350
			    struct bsg_command *bc, struct request *rq)
{
	rq->sense = bc->sense;
	rq->sense_len = 0;

	/*
	 * add bc command to busy queue and submit rq for io
	 */
	bc->rq = rq;
	bc->bio = rq->bio;
F
FUJITA Tomonori 已提交
351 352
	if (rq->next_rq)
		bc->bidi_bio = rq->next_rq->bio;
353 354 355 356 357 358 359 360
	bc->hdr.duration = jiffies;
	spin_lock_irq(&bd->lock);
	list_add_tail(&bc->list, &bd->busy_list);
	spin_unlock_irq(&bd->lock);

	dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc);

	rq->end_io_data = bc;
361
	blk_execute_rq_nowait(q, NULL, rq, 1, bsg_rq_end_io);
362 363
}

364
static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd)
365 366 367 368 369
{
	struct bsg_command *bc = NULL;

	spin_lock_irq(&bd->lock);
	if (bd->done_cmds) {
370 371 372
		bc = list_entry(bd->done_list.next, struct bsg_command, list);
		list_del(&bc->list);
		bd->done_cmds--;
373 374 375 376 377 378 379 380 381
	}
	spin_unlock_irq(&bd->lock);

	return bc;
}

/*
 * Get a finished command from the done list
 */
F
FUJITA Tomonori 已提交
382
static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd)
383 384 385 386 387 388 389 390 391
{
	struct bsg_command *bc;
	int ret;

	do {
		bc = bsg_next_done_cmd(bd);
		if (bc)
			break;

F
FUJITA Tomonori 已提交
392 393 394 395 396 397
		if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
			bc = ERR_PTR(-EAGAIN);
			break;
		}

		ret = wait_event_interruptible(bd->wq_done, bd->done_cmds);
398
		if (ret) {
F
FUJITA Tomonori 已提交
399
			bc = ERR_PTR(-ERESTARTSYS);
400 401 402 403 404 405 406 407 408
			break;
		}
	} while (1);

	dprintk("%s: returning done %p\n", bd->name, bc);

	return bc;
}

F
FUJITA Tomonori 已提交
409
static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
F
FUJITA Tomonori 已提交
410
				    struct bio *bio, struct bio *bidi_bio)
F
FUJITA Tomonori 已提交
411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426
{
	int ret = 0;

	dprintk("rq %p bio %p %u\n", rq, bio, rq->errors);
	/*
	 * fill in all the output members
	 */
	hdr->device_status = status_byte(rq->errors);
	hdr->transport_status = host_byte(rq->errors);
	hdr->driver_status = driver_byte(rq->errors);
	hdr->info = 0;
	if (hdr->device_status || hdr->transport_status || hdr->driver_status)
		hdr->info |= SG_INFO_CHECK;
	hdr->response_len = 0;

	if (rq->sense_len && hdr->response) {
427 428
		int len = min_t(unsigned int, hdr->max_response_len,
					rq->sense_len);
F
FUJITA Tomonori 已提交
429 430 431 432 433 434 435 436 437

		ret = copy_to_user((void*)(unsigned long)hdr->response,
				   rq->sense, len);
		if (!ret)
			hdr->response_len = len;
		else
			ret = -EFAULT;
	}

F
FUJITA Tomonori 已提交
438
	if (rq->next_rq) {
439 440
		hdr->dout_resid = rq->data_len;
		hdr->din_resid = rq->next_rq->data_len;
F
FUJITA Tomonori 已提交
441 442
		blk_rq_unmap_user(bidi_bio);
		blk_put_request(rq->next_rq);
443 444 445 446
	} else if (rq_data_dir(rq) == READ)
		hdr->din_resid = rq->data_len;
	else
		hdr->dout_resid = rq->data_len;
F
FUJITA Tomonori 已提交
447

448 449 450 451 452 453 454 455 456
	/*
	 * If the request generated a negative error number, return it
	 * (providing we aren't already returning an error); if it's
	 * just a protocol response (i.e. non negative), that gets
	 * processed above.
	 */
	if (!ret && rq->errors < 0)
		ret = rq->errors;

F
FUJITA Tomonori 已提交
457 458 459 460 461 462
	blk_rq_unmap_user(bio);
	blk_put_request(rq);

	return ret;
}

463 464 465 466 467 468 469 470 471 472 473 474 475 476
static int bsg_complete_all_commands(struct bsg_device *bd)
{
	struct bsg_command *bc;
	int ret, tret;

	dprintk("%s: entered\n", bd->name);

	set_bit(BSG_F_BLOCK, &bd->flags);

	/*
	 * wait for all commands to complete
	 */
	ret = 0;
	do {
477
		ret = bsg_io_schedule(bd);
478 479 480 481 482 483 484 485 486 487 488 489 490 491
		/*
		 * look for -ENODATA specifically -- we'll sometimes get
		 * -ERESTARTSYS when we've taken a signal, but we can't
		 * return until we're done freeing the queue, so ignore
		 * it.  The signal will get handled when we're done freeing
		 * the bsg_device.
		 */
	} while (ret != -ENODATA);

	/*
	 * discard done commands
	 */
	ret = 0;
	do {
F
FUJITA Tomonori 已提交
492 493 494
		spin_lock_irq(&bd->lock);
		if (!bd->queued_cmds) {
			spin_unlock_irq(&bd->lock);
495 496
			break;
		}
497
		spin_unlock_irq(&bd->lock);
498

F
FUJITA Tomonori 已提交
499 500 501 502
		bc = bsg_get_done_cmd(bd);
		if (IS_ERR(bc))
			break;

F
FUJITA Tomonori 已提交
503 504
		tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
						bc->bidi_bio);
505 506 507 508 509 510 511 512 513
		if (!ret)
			ret = tret;

		bsg_free_command(bc);
	} while (1);

	return ret;
}

514
static int
F
FUJITA Tomonori 已提交
515 516
__bsg_read(char __user *buf, size_t count, struct bsg_device *bd,
	   const struct iovec *iov, ssize_t *bytes_read)
517 518 519 520
{
	struct bsg_command *bc;
	int nr_commands, ret;

F
FUJITA Tomonori 已提交
521
	if (count % sizeof(struct sg_io_v4))
522 523 524
		return -EINVAL;

	ret = 0;
F
FUJITA Tomonori 已提交
525
	nr_commands = count / sizeof(struct sg_io_v4);
526
	while (nr_commands) {
F
FUJITA Tomonori 已提交
527
		bc = bsg_get_done_cmd(bd);
528 529 530 531 532 533 534 535 536 537
		if (IS_ERR(bc)) {
			ret = PTR_ERR(bc);
			break;
		}

		/*
		 * this is the only case where we need to copy data back
		 * after completing the request. so do that here,
		 * bsg_complete_work() cannot do that for us
		 */
F
FUJITA Tomonori 已提交
538 539
		ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
					       bc->bidi_bio);
540

541
		if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr)))
542 543 544 545 546 547 548
			ret = -EFAULT;

		bsg_free_command(bc);

		if (ret)
			break;

F
FUJITA Tomonori 已提交
549 550
		buf += sizeof(struct sg_io_v4);
		*bytes_read += sizeof(struct sg_io_v4);
551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572
		nr_commands--;
	}

	return ret;
}

static inline void bsg_set_block(struct bsg_device *bd, struct file *file)
{
	if (file->f_flags & O_NONBLOCK)
		clear_bit(BSG_F_BLOCK, &bd->flags);
	else
		set_bit(BSG_F_BLOCK, &bd->flags);
}

static inline void bsg_set_write_perm(struct bsg_device *bd, struct file *file)
{
	if (file->f_mode & FMODE_WRITE)
		set_bit(BSG_F_WRITE_PERM, &bd->flags);
	else
		clear_bit(BSG_F_WRITE_PERM, &bd->flags);
}

573 574 575
/*
 * Check if the error is a "real" error that we should return.
 */
576 577 578 579 580 581 582 583 584 585 586 587 588 589 590
static inline int err_block_err(int ret)
{
	if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN)
		return 1;

	return 0;
}

static ssize_t
bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
	struct bsg_device *bd = file->private_data;
	int ret;
	ssize_t bytes_read;

F
FUJITA Tomonori 已提交
591
	dprintk("%s: read %Zd bytes\n", bd->name, count);
592 593 594

	bsg_set_block(bd, file);
	bytes_read = 0;
F
FUJITA Tomonori 已提交
595
	ret = __bsg_read(buf, count, bd, NULL, &bytes_read);
596 597 598 599 600 601 602 603
	*ppos = bytes_read;

	if (!bytes_read || (bytes_read && err_block_err(ret)))
		bytes_read = ret;

	return bytes_read;
}

604 605
static int __bsg_write(struct bsg_device *bd, const char __user *buf,
		       size_t count, ssize_t *bytes_written)
606 607 608 609 610
{
	struct bsg_command *bc;
	struct request *rq;
	int ret, nr_commands;

F
FUJITA Tomonori 已提交
611
	if (count % sizeof(struct sg_io_v4))
612 613
		return -EINVAL;

F
FUJITA Tomonori 已提交
614
	nr_commands = count / sizeof(struct sg_io_v4);
615 616 617 618
	rq = NULL;
	bc = NULL;
	ret = 0;
	while (nr_commands) {
619
		struct request_queue *q = bd->queue;
620

F
FUJITA Tomonori 已提交
621
		bc = bsg_alloc_command(bd);
622 623 624 625 626 627 628 629 630 631 632 633 634 635
		if (IS_ERR(bc)) {
			ret = PTR_ERR(bc);
			bc = NULL;
			break;
		}

		if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) {
			ret = -EFAULT;
			break;
		}

		/*
		 * get a request, fill in the blanks, and add to request queue
		 */
F
FUJITA Tomonori 已提交
636
		rq = bsg_map_hdr(bd, &bc->hdr);
637 638 639 640 641 642 643 644 645 646
		if (IS_ERR(rq)) {
			ret = PTR_ERR(rq);
			rq = NULL;
			break;
		}

		bsg_add_command(bd, q, bc, rq);
		bc = NULL;
		rq = NULL;
		nr_commands--;
F
FUJITA Tomonori 已提交
647
		buf += sizeof(struct sg_io_v4);
648
		*bytes_written += sizeof(struct sg_io_v4);
649 650 651 652 653 654 655 656 657 658 659 660
	}

	if (bc)
		bsg_free_command(bc);

	return ret;
}

static ssize_t
bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
	struct bsg_device *bd = file->private_data;
661
	ssize_t bytes_written;
662 663
	int ret;

F
FUJITA Tomonori 已提交
664
	dprintk("%s: write %Zd bytes\n", bd->name, count);
665 666 667 668

	bsg_set_block(bd, file);
	bsg_set_write_perm(bd, file);

669 670 671
	bytes_written = 0;
	ret = __bsg_write(bd, buf, count, &bytes_written);
	*ppos = bytes_written;
672 673 674 675

	/*
	 * return bytes written on non-fatal errors
	 */
676 677
	if (!bytes_written || (bytes_written && err_block_err(ret)))
		bytes_written = ret;
678

679 680
	dprintk("%s: returning %Zd\n", bd->name, bytes_written);
	return bytes_written;
681 682 683 684 685 686 687 688 689 690 691 692
}

static struct bsg_device *bsg_alloc_device(void)
{
	struct bsg_device *bd;

	bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL);
	if (unlikely(!bd))
		return NULL;

	spin_lock_init(&bd->lock);

J
Jens Axboe 已提交
693
	bd->max_queue = BSG_DEFAULT_CMDS;
694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728

	INIT_LIST_HEAD(&bd->busy_list);
	INIT_LIST_HEAD(&bd->done_list);
	INIT_HLIST_NODE(&bd->dev_list);

	init_waitqueue_head(&bd->wq_free);
	init_waitqueue_head(&bd->wq_done);
	return bd;
}

static int bsg_put_device(struct bsg_device *bd)
{
	int ret = 0;

	mutex_lock(&bsg_mutex);

	if (!atomic_dec_and_test(&bd->ref_count))
		goto out;

	dprintk("%s: tearing down\n", bd->name);

	/*
	 * close can always block
	 */
	set_bit(BSG_F_BLOCK, &bd->flags);

	/*
	 * correct error detection baddies here again. it's the responsibility
	 * of the app to properly reap commands before close() if it wants
	 * fool-proof error detection
	 */
	ret = bsg_complete_all_commands(bd);

	blk_put_queue(bd->queue);
	hlist_del(&bd->dev_list);
J
Jens Axboe 已提交
729
	kfree(bd);
730 731 732 733 734 735
out:
	mutex_unlock(&bsg_mutex);
	return ret;
}

static struct bsg_device *bsg_add_device(struct inode *inode,
736
					 struct request_queue *rq,
737 738
					 struct file *file)
{
739
	struct bsg_device *bd;
740 741 742 743 744 745 746 747
#ifdef BSG_DEBUG
	unsigned char buf[32];
#endif

	bd = bsg_alloc_device();
	if (!bd)
		return ERR_PTR(-ENOMEM);

748 749
	bd->queue = rq;
	kobject_get(&rq->kobj);
750 751 752 753 754
	bsg_set_block(bd, file);

	atomic_set(&bd->ref_count, 1);
	bd->minor = iminor(inode);
	mutex_lock(&bsg_mutex);
F
FUJITA Tomonori 已提交
755
	hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(bd->minor));
756

757
	strncpy(bd->name, rq->bsg_dev.class_dev->class_id, sizeof(bd->name) - 1);
758
	dprintk("bound to <%s>, max queue %d\n",
F
FUJITA Tomonori 已提交
759
		format_dev_t(buf, inode->i_rdev), bd->max_queue);
760 761 762 763 764 765 766 767 768 769 770 771

	mutex_unlock(&bsg_mutex);
	return bd;
}

static struct bsg_device *__bsg_get_device(int minor)
{
	struct bsg_device *bd = NULL;
	struct hlist_node *entry;

	mutex_lock(&bsg_mutex);

F
FUJITA Tomonori 已提交
772
	hlist_for_each(entry, bsg_dev_idx_hash(minor)) {
773 774 775 776 777 778 779 780 781 782 783 784 785 786 787
		bd = hlist_entry(entry, struct bsg_device, dev_list);
		if (bd->minor == minor) {
			atomic_inc(&bd->ref_count);
			break;
		}

		bd = NULL;
	}

	mutex_unlock(&bsg_mutex);
	return bd;
}

static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file)
{
788 789
	struct bsg_device *bd;
	struct bsg_class_device *bcd;
790

791
	bd = __bsg_get_device(iminor(inode));
792 793 794 795 796 797 798
	if (bd)
		return bd;

	/*
	 * find the class device
	 */
	mutex_lock(&bsg_mutex);
799
	bcd = idr_find(&bsg_minor_idr, iminor(inode));
800 801 802 803 804
	mutex_unlock(&bsg_mutex);

	if (!bcd)
		return ERR_PTR(-ENODEV);

805
	return bsg_add_device(inode, bcd->queue, file);
806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844
}

static int bsg_open(struct inode *inode, struct file *file)
{
	struct bsg_device *bd = bsg_get_device(inode, file);

	if (IS_ERR(bd))
		return PTR_ERR(bd);

	file->private_data = bd;
	return 0;
}

static int bsg_release(struct inode *inode, struct file *file)
{
	struct bsg_device *bd = file->private_data;

	file->private_data = NULL;
	return bsg_put_device(bd);
}

static unsigned int bsg_poll(struct file *file, poll_table *wait)
{
	struct bsg_device *bd = file->private_data;
	unsigned int mask = 0;

	poll_wait(file, &bd->wq_done, wait);
	poll_wait(file, &bd->wq_free, wait);

	spin_lock_irq(&bd->lock);
	if (!list_empty(&bd->done_list))
		mask |= POLLIN | POLLRDNORM;
	if (bd->queued_cmds >= bd->max_queue)
		mask |= POLLOUT;
	spin_unlock_irq(&bd->lock);

	return mask;
}

845
static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
846 847 848
{
	struct bsg_device *bd = file->private_data;
	int __user *uarg = (int __user *) arg;
849
	int ret;
850 851 852 853 854 855 856

	switch (cmd) {
		/*
		 * our own ioctls
		 */
	case SG_GET_COMMAND_Q:
		return put_user(bd->max_queue, uarg);
J
Jens Axboe 已提交
857
	case SG_SET_COMMAND_Q: {
858 859 860 861
		int queue;

		if (get_user(queue, uarg))
			return -EFAULT;
J
Jens Axboe 已提交
862
		if (queue < 1)
863 864
			return -EINVAL;

J
Jens Axboe 已提交
865
		spin_lock_irq(&bd->lock);
866
		bd->max_queue = queue;
J
Jens Axboe 已提交
867
		spin_unlock_irq(&bd->lock);
868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883
		return 0;
	}

	/*
	 * SCSI/sg ioctls
	 */
	case SG_GET_VERSION_NUM:
	case SCSI_IOCTL_GET_IDLUN:
	case SCSI_IOCTL_GET_BUS_NUMBER:
	case SG_SET_TIMEOUT:
	case SG_GET_TIMEOUT:
	case SG_GET_RESERVED_SIZE:
	case SG_SET_RESERVED_SIZE:
	case SG_EMULATED_HOST:
	case SCSI_IOCTL_SEND_COMMAND: {
		void __user *uarg = (void __user *) arg;
884
		return scsi_cmd_ioctl(file, bd->queue, NULL, cmd, uarg);
885
	}
F
FUJITA Tomonori 已提交
886 887
	case SG_IO: {
		struct request *rq;
F
FUJITA Tomonori 已提交
888
		struct bio *bio, *bidi_bio = NULL;
F
FUJITA Tomonori 已提交
889 890 891 892 893 894 895 896 897 898
		struct sg_io_v4 hdr;

		if (copy_from_user(&hdr, uarg, sizeof(hdr)))
			return -EFAULT;

		rq = bsg_map_hdr(bd, &hdr);
		if (IS_ERR(rq))
			return PTR_ERR(rq);

		bio = rq->bio;
F
FUJITA Tomonori 已提交
899 900
		if (rq->next_rq)
			bidi_bio = rq->next_rq->bio;
901
		blk_execute_rq(bd->queue, NULL, rq, 0);
902
		ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio);
F
FUJITA Tomonori 已提交
903 904 905

		if (copy_to_user(uarg, &hdr, sizeof(hdr)))
			return -EFAULT;
J
Jens Axboe 已提交
906

907
		return ret;
F
FUJITA Tomonori 已提交
908
	}
909 910 911 912 913 914 915 916 917 918 919 920
	/*
	 * block device ioctls
	 */
	default:
#if 0
		return ioctl_by_bdev(bd->bdev, cmd, arg);
#else
		return -ENOTTY;
#endif
	}
}

921
static const struct file_operations bsg_fops = {
922 923 924 925 926
	.read		=	bsg_read,
	.write		=	bsg_write,
	.poll		=	bsg_poll,
	.open		=	bsg_open,
	.release	=	bsg_release,
927
	.unlocked_ioctl	=	bsg_ioctl,
928 929 930
	.owner		=	THIS_MODULE,
};

931
void bsg_unregister_queue(struct request_queue *q)
932
{
933
	struct bsg_class_device *bcd = &q->bsg_dev;
934

935 936
	if (!bcd->class_dev)
		return;
937 938

	mutex_lock(&bsg_mutex);
939
	idr_remove(&bsg_minor_idr, bcd->minor);
940
	sysfs_remove_link(&q->kobj, "bsg");
941 942
	class_device_unregister(bcd->class_dev);
	put_device(bcd->dev);
943
	bcd->class_dev = NULL;
944
	bcd->dev = NULL;
945 946
	mutex_unlock(&bsg_mutex);
}
F
FUJITA Tomonori 已提交
947
EXPORT_SYMBOL_GPL(bsg_unregister_queue);
948

949 950
int bsg_register_queue(struct request_queue *q, struct device *gdev,
		       const char *name)
951
{
952
	struct bsg_class_device *bcd;
953
	dev_t dev;
954
	int ret, minor;
F
FUJITA Tomonori 已提交
955
	struct class_device *class_dev = NULL;
956 957 958 959 960 961
	const char *devname;

	if (name)
		devname = name;
	else
		devname = gdev->bus_id;
962 963 964 965 966 967 968

	/*
	 * we need a proper transport to send commands, not a stacked device
	 */
	if (!q->request_fn)
		return 0;

969
	bcd = &q->bsg_dev;
970 971 972
	memset(bcd, 0, sizeof(*bcd));

	mutex_lock(&bsg_mutex);
F
FUJITA Tomonori 已提交
973

974 975 976 977
	ret = idr_pre_get(&bsg_minor_idr, GFP_KERNEL);
	if (!ret) {
		ret = -ENOMEM;
		goto unlock;
F
FUJITA Tomonori 已提交
978 979
	}

980 981 982
	ret = idr_get_new(&bsg_minor_idr, bcd, &minor);
	if (ret < 0)
		goto unlock;
F
FUJITA Tomonori 已提交
983

984 985 986 987 988 989 990
	if (minor >= BSG_MAX_DEVS) {
		printk(KERN_ERR "bsg: too many bsg devices\n");
		ret = -EINVAL;
		goto remove_idr;
	}

	bcd->minor = minor;
991
	bcd->queue = q;
992
	bcd->dev = get_device(gdev);
J
Jens Axboe 已提交
993
	dev = MKDEV(bsg_major, bcd->minor);
994 995
	class_dev = class_device_create(bsg_class, NULL, dev, gdev, "%s",
					devname);
F
FUJITA Tomonori 已提交
996 997
	if (IS_ERR(class_dev)) {
		ret = PTR_ERR(class_dev);
998
		goto put_dev;
F
FUJITA Tomonori 已提交
999 1000 1001
	}
	bcd->class_dev = class_dev;

1002
	if (q->kobj.sd) {
F
FUJITA Tomonori 已提交
1003 1004
		ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg");
		if (ret)
1005
			goto unregister_class_dev;
F
FUJITA Tomonori 已提交
1006 1007
	}

1008 1009
	mutex_unlock(&bsg_mutex);
	return 0;
1010

1011
unregister_class_dev:
1012
	class_device_unregister(class_dev);
1013
put_dev:
1014
	put_device(gdev);
1015 1016 1017
remove_idr:
	idr_remove(&bsg_minor_idr, minor);
unlock:
1018
	mutex_unlock(&bsg_mutex);
F
FUJITA Tomonori 已提交
1019 1020
	return ret;
}
F
FUJITA Tomonori 已提交
1021
EXPORT_SYMBOL_GPL(bsg_register_queue);
F
FUJITA Tomonori 已提交
1022

1023
static struct cdev bsg_cdev;
F
FUJITA Tomonori 已提交
1024

1025 1026 1027
static int __init bsg_init(void)
{
	int ret, i;
J
Jens Axboe 已提交
1028
	dev_t devid;
1029

J
Jens Axboe 已提交
1030
	bsg_cmd_cachep = kmem_cache_create("bsg_cmd",
1031
				sizeof(struct bsg_command), 0, 0, NULL);
J
Jens Axboe 已提交
1032 1033 1034 1035 1036
	if (!bsg_cmd_cachep) {
		printk(KERN_ERR "bsg: failed creating slab cache\n");
		return -ENOMEM;
	}

1037
	for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++)
1038 1039 1040
		INIT_HLIST_HEAD(&bsg_device_list[i]);

	bsg_class = class_create(THIS_MODULE, "bsg");
J
Jens Axboe 已提交
1041
	if (IS_ERR(bsg_class)) {
1042 1043
		ret = PTR_ERR(bsg_class);
		goto destroy_kmemcache;
J
Jens Axboe 已提交
1044
	}
1045

J
Jens Axboe 已提交
1046
	ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg");
1047 1048
	if (ret)
		goto destroy_bsg_class;
F
FUJITA Tomonori 已提交
1049

J
Jens Axboe 已提交
1050 1051
	bsg_major = MAJOR(devid);

F
FUJITA Tomonori 已提交
1052
	cdev_init(&bsg_cdev, &bsg_fops);
J
Jens Axboe 已提交
1053
	ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS);
1054 1055
	if (ret)
		goto unregister_chrdev;
1056

1057
	printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION
F
FUJITA Tomonori 已提交
1058
	       " loaded (major %d)\n", bsg_major);
1059
	return 0;
1060 1061 1062 1063 1064 1065 1066
unregister_chrdev:
	unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS);
destroy_bsg_class:
	class_destroy(bsg_class);
destroy_kmemcache:
	kmem_cache_destroy(bsg_cmd_cachep);
	return ret;
1067 1068 1069
}

MODULE_AUTHOR("Jens Axboe");
F
FUJITA Tomonori 已提交
1070
MODULE_DESCRIPTION(BSG_DESCRIPTION);
1071 1072
MODULE_LICENSE("GPL");

F
FUJITA Tomonori 已提交
1073
device_initcall(bsg_init);