scsi_tgt_lib.c 16.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
/*
 * SCSI target lib functions
 *
 * Copyright (C) 2005 Mike Christie <michaelc@cs.wisc.edu>
 * Copyright (C) 2005 FUJITA Tomonori <tomof@acm.org>
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License as
 * published by the Free Software Foundation; either version 2 of the
 * License, or (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
 * 02110-1301 USA
 */
#include <linux/blkdev.h>
#include <linux/hash.h>
#include <linux/module.h>
#include <linux/pagemap.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
30
#include <scsi/scsi_transport.h>
31 32 33 34 35
#include <scsi/scsi_tgt.h>

#include "scsi_tgt_priv.h"

static struct workqueue_struct *scsi_tgtd;
36
static struct kmem_cache *scsi_tgt_cmd_cache;
37 38 39 40 41 42 43 44

/*
 * TODO: this struct will be killed when the block layer supports large bios
 * and James's work struct code is in
 */
struct scsi_tgt_cmd {
	/* TODO replace work with James b's code */
	struct work_struct work;
45 46
	/* TODO fix limits of some drivers */
	struct bio *bio;
47 48 49

	struct list_head hash_list;
	struct request *rq;
50
	u64 itn_id;
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
	u64 tag;
};

#define TGT_HASH_ORDER	4
#define cmd_hashfn(tag)	hash_long((unsigned long) (tag), TGT_HASH_ORDER)

struct scsi_tgt_queuedata {
	struct Scsi_Host *shost;
	struct list_head cmd_hash[1 << TGT_HASH_ORDER];
	spinlock_t cmd_hash_lock;
};

/*
 * Function:	scsi_host_get_command()
 *
 * Purpose:	Allocate and setup a scsi command block and blk request
 *
 * Arguments:	shost	- scsi host
 *		data_dir - dma data dir
 *		gfp_mask- allocator flags
 *
 * Returns:	The allocated scsi command structure.
 *
 * This should be called by target LLDs to get a command.
 */
struct scsi_cmnd *scsi_host_get_command(struct Scsi_Host *shost,
					enum dma_data_direction data_dir,
					gfp_t gfp_mask)
{
	int write = (data_dir == DMA_TO_DEVICE);
	struct request *rq;
	struct scsi_cmnd *cmd;
	struct scsi_tgt_cmd *tcmd;

	/* Bail if we can't get a reference to the device */
	if (!get_device(&shost->shost_gendev))
		return NULL;

	tcmd = kmem_cache_alloc(scsi_tgt_cmd_cache, GFP_ATOMIC);
	if (!tcmd)
		goto put_dev;

93 94 95 96 97 98
	/*
	 * The blk helpers are used to the READ/WRITE requests
	 * transfering data from a initiator point of view. Since
	 * we are in target mode we want the opposite.
	 */
	rq = blk_get_request(shost->uspace_req_q, !write, gfp_mask);
99 100 101 102 103 104 105 106 107 108 109
	if (!rq)
		goto free_tcmd;

	cmd = __scsi_get_command(shost, gfp_mask);
	if (!cmd)
		goto release_rq;

	cmd->sc_data_direction = data_dir;
	cmd->jiffies_at_alloc = jiffies;
	cmd->request = rq;

110 111
	cmd->cmnd = rq->cmd;

112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
	rq->special = cmd;
	rq->cmd_type = REQ_TYPE_SPECIAL;
	rq->cmd_flags |= REQ_TYPE_BLOCK_PC;
	rq->end_io_data = tcmd;

	tcmd->rq = rq;

	return cmd;

release_rq:
	blk_put_request(rq);
free_tcmd:
	kmem_cache_free(scsi_tgt_cmd_cache, tcmd);
put_dev:
	put_device(&shost->shost_gendev);
	return NULL;

}
EXPORT_SYMBOL_GPL(scsi_host_get_command);

/*
 * Function:	scsi_host_put_command()
 *
 * Purpose:	Free a scsi command block
 *
 * Arguments:	shost	- scsi host
 * 		cmd	- command block to free
 *
 * Returns:	Nothing.
 *
 * Notes:	The command must not belong to any lists.
 */
void scsi_host_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
{
	struct request_queue *q = shost->uspace_req_q;
	struct request *rq = cmd->request;
	struct scsi_tgt_cmd *tcmd = rq->end_io_data;
	unsigned long flags;

	kmem_cache_free(scsi_tgt_cmd_cache, tcmd);

	spin_lock_irqsave(q->queue_lock, flags);
	__blk_put_request(q, rq);
	spin_unlock_irqrestore(q->queue_lock, flags);

	__scsi_put_command(shost, cmd, &shost->shost_gendev);
}
EXPORT_SYMBOL_GPL(scsi_host_put_command);

static void cmd_hashlist_del(struct scsi_cmnd *cmd)
{
	struct request_queue *q = cmd->request->q;
	struct scsi_tgt_queuedata *qdata = q->queuedata;
	unsigned long flags;
	struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;

	spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
	list_del(&tcmd->hash_list);
	spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
}

173 174 175 176 177
static void scsi_unmap_user_pages(struct scsi_tgt_cmd *tcmd)
{
	blk_rq_unmap_user(tcmd->bio);
}

178
static void scsi_tgt_cmd_destroy(struct work_struct *work)
179
{
180 181 182
	struct scsi_tgt_cmd *tcmd =
		container_of(work, struct scsi_tgt_cmd, work);
	struct scsi_cmnd *cmd = tcmd->rq->special;
183

184
	dprintk("cmd %p %d %u\n", cmd, cmd->sc_data_direction,
185 186 187 188 189 190
		rq_data_dir(cmd->request));
	scsi_unmap_user_pages(tcmd);
	scsi_host_put_command(scsi_tgt_cmd_to_host(cmd), cmd);
}

static void init_scsi_tgt_cmd(struct request *rq, struct scsi_tgt_cmd *tcmd,
191
			      u64 itn_id, u64 tag)
192 193 194 195 196
{
	struct scsi_tgt_queuedata *qdata = rq->q->queuedata;
	unsigned long flags;
	struct list_head *head;

197
	tcmd->itn_id = itn_id;
198
	tcmd->tag = tag;
199
	tcmd->bio = NULL;
200
	INIT_WORK(&tcmd->work, scsi_tgt_cmd_destroy);
201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
	spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
	head = &qdata->cmd_hash[cmd_hashfn(tag)];
	list_add(&tcmd->hash_list, head);
	spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
}

/*
 * scsi_tgt_alloc_queue - setup queue used for message passing
 * shost: scsi host
 *
 * This should be called by the LLD after host allocation.
 * And will be released when the host is released.
 */
int scsi_tgt_alloc_queue(struct Scsi_Host *shost)
{
	struct scsi_tgt_queuedata *queuedata;
	struct request_queue *q;
	int err, i;

	/*
	 * Do we need to send a netlink event or should uspace
	 * just respond to the hotplug event?
	 */
	q = __scsi_alloc_queue(shost, NULL);
	if (!q)
		return -ENOMEM;

	queuedata = kzalloc(sizeof(*queuedata), GFP_KERNEL);
	if (!queuedata) {
		err = -ENOMEM;
		goto cleanup_queue;
	}
	queuedata->shost = shost;
	q->queuedata = queuedata;

	/*
	 * this is a silly hack. We should probably just queue as many
	 * command as is recvd to userspace. uspace can then make
	 * sure we do not overload the HBA
	 */
F
FUJITA Tomonori 已提交
241
	q->nr_requests = shost->can_queue;
242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
	/*
	 * We currently only support software LLDs so this does
	 * not matter for now. Do we need this for the cards we support?
	 * If so we should make it a host template value.
	 */
	blk_queue_dma_alignment(q, 0);
	shost->uspace_req_q = q;

	for (i = 0; i < ARRAY_SIZE(queuedata->cmd_hash); i++)
		INIT_LIST_HEAD(&queuedata->cmd_hash[i]);
	spin_lock_init(&queuedata->cmd_hash_lock);

	return 0;

cleanup_queue:
	blk_cleanup_queue(q);
	return err;
}
EXPORT_SYMBOL_GPL(scsi_tgt_alloc_queue);

void scsi_tgt_free_queue(struct Scsi_Host *shost)
{
	int i;
	unsigned long flags;
	struct request_queue *q = shost->uspace_req_q;
	struct scsi_cmnd *cmd;
	struct scsi_tgt_queuedata *qdata = q->queuedata;
	struct scsi_tgt_cmd *tcmd, *n;
	LIST_HEAD(cmds);

	spin_lock_irqsave(&qdata->cmd_hash_lock, flags);

	for (i = 0; i < ARRAY_SIZE(qdata->cmd_hash); i++) {
		list_for_each_entry_safe(tcmd, n, &qdata->cmd_hash[i],
					 hash_list) {
			list_del(&tcmd->hash_list);
			list_add(&tcmd->hash_list, &cmds);
		}
	}

	spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);

	while (!list_empty(&cmds)) {
		tcmd = list_entry(cmds.next, struct scsi_tgt_cmd, hash_list);
		list_del(&tcmd->hash_list);
		cmd = tcmd->rq->special;

		shost->hostt->eh_abort_handler(cmd);
290
		scsi_tgt_cmd_destroy(&tcmd->work);
291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307
	}
}
EXPORT_SYMBOL_GPL(scsi_tgt_free_queue);

struct Scsi_Host *scsi_tgt_cmd_to_host(struct scsi_cmnd *cmd)
{
	struct scsi_tgt_queuedata *queue = cmd->request->q->queuedata;
	return queue->shost;
}
EXPORT_SYMBOL_GPL(scsi_tgt_cmd_to_host);

/*
 * scsi_tgt_queue_command - queue command for userspace processing
 * @cmd:	scsi command
 * @scsilun:	scsi lun
 * @tag:	unique value to identify this command for tmf
 */
308 309
int scsi_tgt_queue_command(struct scsi_cmnd *cmd, u64 itn_id,
			   struct scsi_lun *scsilun, u64 tag)
310 311 312 313
{
	struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
	int err;

314 315
	init_scsi_tgt_cmd(cmd->request, tcmd, itn_id, tag);
	err = scsi_tgt_uspace_send_cmd(cmd, itn_id, scsilun, tag);
316 317 318 319 320 321 322 323
	if (err)
		cmd_hashlist_del(cmd);

	return err;
}
EXPORT_SYMBOL_GPL(scsi_tgt_queue_command);

/*
J
Joe Perches 已提交
324
 * This is run from a interrupt handler normally and the unmap
325 326 327 328 329 330
 * needs process context so we must queue
 */
static void scsi_tgt_cmd_done(struct scsi_cmnd *cmd)
{
	struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;

331
	dprintk("cmd %p %u\n", cmd, rq_data_dir(cmd->request));
332

333
	scsi_tgt_uspace_send_status(cmd, tcmd->itn_id, tcmd->tag);
334

335
	scsi_release_buffers(cmd);
336

337 338 339
	queue_work(scsi_tgtd, &tcmd->work);
}

340
static int scsi_tgt_transfer_response(struct scsi_cmnd *cmd)
341 342 343 344
{
	struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd);
	int err;

345
	dprintk("cmd %p %u\n", cmd, rq_data_dir(cmd->request));
346 347 348 349 350 351 352 353 354 355 356 357

	err = shost->hostt->transfer_response(cmd, scsi_tgt_cmd_done);
	switch (err) {
	case SCSI_MLQUEUE_HOST_BUSY:
	case SCSI_MLQUEUE_DEVICE_BUSY:
		return -EAGAIN;
	}
	return 0;
}

/* TODO: test this crap and replace bio_map_user with new interface maybe */
static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
358
			       unsigned long uaddr, unsigned int len, int rw)
359 360 361 362 363
{
	struct request_queue *q = cmd->request->q;
	struct request *rq = cmd->request;
	int err;

364
	dprintk("%lx %u\n", uaddr, len);
365
	err = blk_rq_map_user(q, rq, (void *)uaddr, len, GFP_KERNEL);
366
	if (err) {
367
		/*
368 369 370 371 372 373 374
		 * TODO: need to fixup sg_tablesize, max_segment_size,
		 * max_sectors, etc for modern HW and software drivers
		 * where this value is bogus.
		 *
		 * TODO2: we can alloc a reserve buffer of max size
		 * we can handle and do the slow copy path for really large
		 * IO.
375
		 */
376 377
		eprintk("Could not handle request of size %u.\n", len);
		return err;
378 379
	}

380
	tcmd->bio = rq->bio;
381 382 383
	err = scsi_init_io(cmd, GFP_KERNEL);
	if (err) {
		scsi_release_buffers(cmd);
384
		goto unmap_rq;
385
	}
386 387 388 389 390
	/*
	 * we use REQ_TYPE_BLOCK_PC so scsi_init_io doesn't set the
	 * length for us.
	 */
	cmd->sdb.length = rq->data_len;
391 392 393

	return 0;

394 395
unmap_rq:
	scsi_unmap_user_pages(tcmd);
396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413
	return err;
}

static int scsi_tgt_copy_sense(struct scsi_cmnd *cmd, unsigned long uaddr,
				unsigned len)
{
	char __user *p = (char __user *) uaddr;

	if (copy_from_user(cmd->sense_buffer, p,
			   min_t(unsigned, SCSI_SENSE_BUFFERSIZE, len))) {
		printk(KERN_ERR "Could not copy the sense buffer\n");
		return -EIO;
	}
	return 0;
}

static int scsi_tgt_abort_cmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
{
414
	struct scsi_tgt_cmd *tcmd;
415 416 417 418 419 420
	int err;

	err = shost->hostt->eh_abort_handler(cmd);
	if (err)
		eprintk("fail to abort %p\n", cmd);

421 422
	tcmd = cmd->request->end_io_data;
	scsi_tgt_cmd_destroy(&tcmd->work);
423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447
	return err;
}

static struct request *tgt_cmd_hash_lookup(struct request_queue *q, u64 tag)
{
	struct scsi_tgt_queuedata *qdata = q->queuedata;
	struct request *rq = NULL;
	struct list_head *head;
	struct scsi_tgt_cmd *tcmd;
	unsigned long flags;

	head = &qdata->cmd_hash[cmd_hashfn(tag)];
	spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
	list_for_each_entry(tcmd, head, hash_list) {
		if (tcmd->tag == tag) {
			rq = tcmd->rq;
			list_del(&tcmd->hash_list);
			break;
		}
	}
	spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);

	return rq;
}

448
int scsi_tgt_kspace_exec(int host_no, u64 itn_id, int result, u64 tag,
449 450
			 unsigned long uaddr, u32 len, unsigned long sense_uaddr,
			 u32 sense_len, u8 rw)
451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481
{
	struct Scsi_Host *shost;
	struct scsi_cmnd *cmd;
	struct request *rq;
	struct scsi_tgt_cmd *tcmd;
	int err = 0;

	dprintk("%d %llu %d %u %lx %u\n", host_no, (unsigned long long) tag,
		result, len, uaddr, rw);

	/* TODO: replace with a O(1) alg */
	shost = scsi_host_lookup(host_no);
	if (IS_ERR(shost)) {
		printk(KERN_ERR "Could not find host no %d\n", host_no);
		return -EINVAL;
	}

	if (!shost->uspace_req_q) {
		printk(KERN_ERR "Not target scsi host %d\n", host_no);
		goto done;
	}

	rq = tgt_cmd_hash_lookup(shost->uspace_req_q, tag);
	if (!rq) {
		printk(KERN_ERR "Could not find tag %llu\n",
		       (unsigned long long) tag);
		err = -EINVAL;
		goto done;
	}
	cmd = rq->special;

482 483
	dprintk("cmd %p scb %x result %d len %d bufflen %u %u %x\n",
		cmd, cmd->cmnd[0], result, len, scsi_bufflen(cmd),
484
		rq_data_dir(rq), cmd->cmnd[0]);
485 486 487 488 489 490 491 492 493 494 495 496

	if (result == TASK_ABORTED) {
		scsi_tgt_abort_cmd(shost, cmd);
		goto done;
	}
	/*
	 * store the userspace values here, the working values are
	 * in the request_* values
	 */
	tcmd = cmd->request->end_io_data;
	cmd->result = result;

497 498
	if (cmd->result == SAM_STAT_CHECK_CONDITION)
		scsi_tgt_copy_sense(cmd, sense_uaddr, sense_len);
499

500 501 502
	if (len) {
		err = scsi_map_user_pages(rq->end_io_data, cmd, uaddr, len, rw);
		if (err) {
503 504 505 506
			/*
			 * user-space daemon bugs or OOM
			 * TODO: we can do better for OOM.
			 */
507 508 509 510
			struct scsi_tgt_queuedata *qdata;
			struct list_head *head;
			unsigned long flags;

511 512
			eprintk("cmd %p ret %d uaddr %lx len %d rw %d\n",
				cmd, err, uaddr, len, rw);
513 514 515 516 517 518 519 520 521

			qdata = shost->uspace_req_q->queuedata;
			head = &qdata->cmd_hash[cmd_hashfn(tcmd->tag)];

			spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
			list_add(&tcmd->hash_list, head);
			spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);

			goto done;
522
		}
523
	}
524
	err = scsi_tgt_transfer_response(cmd);
525 526 527 528 529
done:
	scsi_host_put(shost);
	return err;
}

530 531 532
int scsi_tgt_tsk_mgmt_request(struct Scsi_Host *shost, u64 itn_id,
			      int function, u64 tag, struct scsi_lun *scsilun,
			      void *data)
533 534 535 536
{
	int err;

	/* TODO: need to retry if this fails. */
537 538
	err = scsi_tgt_uspace_send_tsk_mgmt(shost->host_no, itn_id,
					    function, tag, scsilun, data);
539 540 541 542 543 544
	if (err < 0)
		eprintk("The task management request lost!\n");
	return err;
}
EXPORT_SYMBOL_GPL(scsi_tgt_tsk_mgmt_request);

545
int scsi_tgt_kspace_tsk_mgmt(int host_no, u64 itn_id, u64 mid, int result)
546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
{
	struct Scsi_Host *shost;
	int err = -EINVAL;

	dprintk("%d %d %llx\n", host_no, result, (unsigned long long) mid);

	shost = scsi_host_lookup(host_no);
	if (IS_ERR(shost)) {
		printk(KERN_ERR "Could not find host no %d\n", host_no);
		return err;
	}

	if (!shost->uspace_req_q) {
		printk(KERN_ERR "Not target scsi host %d\n", host_no);
		goto done;
	}

563
	err = shost->transportt->tsk_mgmt_response(shost, itn_id, mid, result);
564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602
done:
	scsi_host_put(shost);
	return err;
}

int scsi_tgt_it_nexus_create(struct Scsi_Host *shost, u64 itn_id,
			     char *initiator)
{
	int err;

	/* TODO: need to retry if this fails. */
	err = scsi_tgt_uspace_send_it_nexus_request(shost->host_no, itn_id, 0,
						    initiator);
	if (err < 0)
		eprintk("The i_t_neuxs request lost, %d %llx!\n",
			shost->host_no, (unsigned long long)itn_id);
	return err;
}
EXPORT_SYMBOL_GPL(scsi_tgt_it_nexus_create);

int scsi_tgt_it_nexus_destroy(struct Scsi_Host *shost, u64 itn_id)
{
	int err;

	/* TODO: need to retry if this fails. */
	err = scsi_tgt_uspace_send_it_nexus_request(shost->host_no,
						    itn_id, 1, NULL);
	if (err < 0)
		eprintk("The i_t_neuxs request lost, %d %llx!\n",
			shost->host_no, (unsigned long long)itn_id);
	return err;
}
EXPORT_SYMBOL_GPL(scsi_tgt_it_nexus_destroy);

int scsi_tgt_kspace_it_nexus_rsp(int host_no, u64 itn_id, int result)
{
	struct Scsi_Host *shost;
	int err = -EINVAL;

603
	dprintk("%d %d%llx\n", host_no, result, (unsigned long long)itn_id);
604 605 606 607 608 609 610 611 612 613 614 615 616

	shost = scsi_host_lookup(host_no);
	if (IS_ERR(shost)) {
		printk(KERN_ERR "Could not find host no %d\n", host_no);
		return err;
	}

	if (!shost->uspace_req_q) {
		printk(KERN_ERR "Not target scsi host %d\n", host_no);
		goto done;
	}

	err = shost->transportt->it_nexus_response(shost, itn_id, result);
617 618 619 620 621 622 623 624 625
done:
	scsi_host_put(shost);
	return err;
}

static int __init scsi_tgt_init(void)
{
	int err;

626
	scsi_tgt_cmd_cache =  KMEM_CACHE(scsi_tgt_cmd, 0);
627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660
	if (!scsi_tgt_cmd_cache)
		return -ENOMEM;

	scsi_tgtd = create_workqueue("scsi_tgtd");
	if (!scsi_tgtd) {
		err = -ENOMEM;
		goto free_kmemcache;
	}

	err = scsi_tgt_if_init();
	if (err)
		goto destroy_wq;

	return 0;

destroy_wq:
	destroy_workqueue(scsi_tgtd);
free_kmemcache:
	kmem_cache_destroy(scsi_tgt_cmd_cache);
	return err;
}

static void __exit scsi_tgt_exit(void)
{
	destroy_workqueue(scsi_tgtd);
	scsi_tgt_if_exit();
	kmem_cache_destroy(scsi_tgt_cmd_cache);
}

module_init(scsi_tgt_init);
module_exit(scsi_tgt_exit);

MODULE_DESCRIPTION("SCSI target core");
MODULE_LICENSE("GPL");