iscsi_tcp.c 60.1 KB
Newer Older
1 2 3 4 5
/*
 * iSCSI Initiator over TCP/IP Data-Path
 *
 * Copyright (C) 2004 Dmitry Yusupov
 * Copyright (C) 2004 Alex Aizman
6 7
 * Copyright (C) 2005 - 2006 Mike Christie
 * Copyright (C) 2006 Red Hat, Inc.  All rights reserved.
8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
 * maintained by open-iscsi@googlegroups.com
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published
 * by the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 * General Public License for more details.
 *
 * See the file COPYING included with this distribution for more details.
 *
 * Credits:
 *	Christoph Hellwig
 *	FUJITA Tomonori
 *	Arne Redlich
 *	Zhenyu Wang
 */

#include <linux/types.h>
#include <linux/list.h>
#include <linux/inet.h>
M
Mike Christie 已提交
32
#include <linux/file.h>
33 34 35 36 37 38 39
#include <linux/blkdev.h>
#include <linux/crypto.h>
#include <linux/delay.h>
#include <linux/kfifo.h>
#include <linux/scatterlist.h>
#include <net/tcp.h>
#include <scsi/scsi_cmnd.h>
40
#include <scsi/scsi_device.h>
41 42 43 44 45 46 47 48 49 50
#include <scsi/scsi_host.h>
#include <scsi/scsi.h>
#include <scsi/scsi_transport_iscsi.h>

#include "iscsi_tcp.h"

MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus@yahoo.com>, "
	      "Alex Aizman <itn780@yahoo.com>");
MODULE_DESCRIPTION("iSCSI/TCP data-path");
MODULE_LICENSE("GPL");
O
Olaf Kirch 已提交
51
#undef DEBUG_TCP
52 53 54
#define DEBUG_ASSERT

#ifdef DEBUG_TCP
55
#define debug_tcp(fmt...) printk(KERN_INFO "tcp: " fmt)
56 57 58 59 60 61 62 63 64 65 66 67 68 69
#else
#define debug_tcp(fmt...)
#endif

#ifndef DEBUG_ASSERT
#ifdef BUG_ON
#undef BUG_ON
#endif
#define BUG_ON(expr)
#endif

static unsigned int iscsi_max_lun = 512;
module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);

O
Olaf Kirch 已提交
70 71 72
static int iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
				   struct iscsi_chunk *chunk);

73 74 75
static inline void
iscsi_buf_init_iov(struct iscsi_buf *ibuf, char *vbuf, int size)
{
O
Olaf Kirch 已提交
76 77 78
	ibuf->sg.page = virt_to_page(vbuf);
	ibuf->sg.offset = offset_in_page(vbuf);
	ibuf->sg.length = size;
79
	ibuf->sent = 0;
80
	ibuf->use_sendmsg = 1;
81 82 83 84 85
}

static inline void
iscsi_buf_init_sg(struct iscsi_buf *ibuf, struct scatterlist *sg)
{
O
Olaf Kirch 已提交
86 87 88
	ibuf->sg.page = sg->page;
	ibuf->sg.offset = sg->offset;
	ibuf->sg.length = sg->length;
89 90 91
	/*
	 * Fastpath: sg element fits into single page
	 */
O
Olaf Kirch 已提交
92
	if (sg->length + sg->offset <= PAGE_SIZE && !PageSlab(sg->page))
93 94 95
		ibuf->use_sendmsg = 0;
	else
		ibuf->use_sendmsg = 1;
96 97 98 99 100 101 102 103 104 105 106 107 108 109
	ibuf->sent = 0;
}

static inline int
iscsi_buf_left(struct iscsi_buf *ibuf)
{
	int rc;

	rc = ibuf->sg.length - ibuf->sent;
	BUG_ON(rc < 0);
	return rc;
}

static inline void
110 111
iscsi_hdr_digest(struct iscsi_conn *conn, struct iscsi_buf *buf,
		 u8* crc)
112
{
113
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
114

115
	crypto_hash_digest(&tcp_conn->tx_hash, &buf->sg, buf->sg.length, crc);
116
	buf->sg.length += ISCSI_DIGEST_SIZE;
117 118
}

O
Olaf Kirch 已提交
119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
/*
 * Scatterlist handling: inside the iscsi_chunk, we
 * remember an index into the scatterlist, and set data/size
 * to the current scatterlist entry. For highmem pages, we
 * kmap as needed.
 *
 * Note that the page is unmapped when we return from
 * TCP's data_ready handler, so we may end up mapping and
 * unmapping the same page repeatedly. The whole reason
 * for this is that we shouldn't keep the page mapped
 * outside the softirq.
 */

/**
 * iscsi_tcp_chunk_init_sg - init indicated scatterlist entry
 * @chunk: the buffer object
 * @idx: index into scatterlist
 * @offset: byte offset into that sg entry
 *
 * This function sets up the chunk so that subsequent
 * data is copied to the indicated sg entry, at the given
 * offset.
 */
static inline void
iscsi_tcp_chunk_init_sg(struct iscsi_chunk *chunk,
			unsigned int idx, unsigned int offset)
{
	struct scatterlist *sg;

	BUG_ON(chunk->sg == NULL);

	sg = &chunk->sg[idx];
	chunk->sg_index = idx;
	chunk->sg_offset = offset;
	chunk->size = min(sg->length - offset, chunk->total_size);
	chunk->data = NULL;
}

/**
 * iscsi_tcp_chunk_map - map the current S/G page
 * @chunk: iscsi chunk
 *
 * We only need to possibly kmap data if scatter lists are being used,
 * because the iscsi passthrough and internal IO paths will never use high
 * mem pages.
 */
static inline void
iscsi_tcp_chunk_map(struct iscsi_chunk *chunk)
{
	struct scatterlist *sg;

	if (chunk->data != NULL || !chunk->sg)
		return;

	sg = &chunk->sg[chunk->sg_index];
	BUG_ON(chunk->sg_mapped);
	BUG_ON(sg->length == 0);
	chunk->sg_mapped = kmap_atomic(sg->page, KM_SOFTIRQ0);
	chunk->data = chunk->sg_mapped + sg->offset + chunk->sg_offset;
}

static inline void
iscsi_tcp_chunk_unmap(struct iscsi_chunk *chunk)
{
	if (chunk->sg_mapped) {
		kunmap_atomic(chunk->sg_mapped, KM_SOFTIRQ0);
		chunk->sg_mapped = NULL;
		chunk->data = NULL;
	}
}

/*
 * Splice the digest buffer into the buffer
 */
static inline void
iscsi_tcp_chunk_splice_digest(struct iscsi_chunk *chunk, void *digest)
{
	chunk->data = digest;
	chunk->digest_len = ISCSI_DIGEST_SIZE;
	chunk->total_size += ISCSI_DIGEST_SIZE;
	chunk->size = ISCSI_DIGEST_SIZE;
	chunk->copied = 0;
	chunk->sg = NULL;
	chunk->sg_index = 0;
	chunk->hash = NULL;
}

/**
 * iscsi_tcp_chunk_done - check whether the chunk is complete
 * @chunk: iscsi chunk to check
 *
 * Check if we're done receiving this chunk. If the receive
 * buffer is full but we expect more data, move on to the
 * next entry in the scatterlist.
 *
 * If the amount of data we received isn't a multiple of 4,
 * we will transparently receive the pad bytes, too.
 *
 * This function must be re-entrant.
 */
219
static inline int
O
Olaf Kirch 已提交
220
iscsi_tcp_chunk_done(struct iscsi_chunk *chunk)
221
{
O
Olaf Kirch 已提交
222
	static unsigned char padbuf[ISCSI_PAD_LEN];
223
	unsigned int pad;
O
Olaf Kirch 已提交
224 225 226 227 228

	if (chunk->copied < chunk->size) {
		iscsi_tcp_chunk_map(chunk);
		return 0;
	}
229

O
Olaf Kirch 已提交
230 231 232
	chunk->total_copied += chunk->copied;
	chunk->copied = 0;
	chunk->size = 0;
233

O
Olaf Kirch 已提交
234 235 236 237 238 239 240 241 242 243 244 245 246
	/* Unmap the current scatterlist page, if there is one. */
	iscsi_tcp_chunk_unmap(chunk);

	/* Do we have more scatterlist entries? */
	if (chunk->total_copied < chunk->total_size) {
		/* Proceed to the next entry in the scatterlist. */
		iscsi_tcp_chunk_init_sg(chunk, chunk->sg_index + 1, 0);
		iscsi_tcp_chunk_map(chunk);
		BUG_ON(chunk->size == 0);
		return 0;
	}

	/* Do we need to handle padding? */
247 248
	pad = iscsi_padding(chunk->total_copied);
	if (pad != 0) {
O
Olaf Kirch 已提交
249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
		debug_tcp("consume %d pad bytes\n", pad);
		chunk->total_size += pad;
		chunk->size = pad;
		chunk->data = padbuf;
		return 0;
	}

	/*
	 * Set us up for receiving the data digest. hdr digest
	 * is completely handled in hdr done function.
	 */
	if (chunk->hash) {
		if (chunk->digest_len == 0) {
			crypto_hash_final(chunk->hash, chunk->digest);
			iscsi_tcp_chunk_splice_digest(chunk,
						      chunk->recv_digest);
			return 0;
266
		}
O
Olaf Kirch 已提交
267
	}
268

O
Olaf Kirch 已提交
269 270
	return 1;
}
271

O
Olaf Kirch 已提交
272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295
/**
 * iscsi_tcp_chunk_recv - copy data to chunk
 * @tcp_conn: the iSCSI TCP connection
 * @chunk: the buffer to copy to
 * @ptr: data pointer
 * @len: amount of data available
 *
 * This function copies up to @len bytes to the
 * given buffer, and returns the number of bytes
 * consumed, which can actually be less than @len.
 *
 * If hash digest is enabled, the function will update the
 * hash while copying.
 * Combining these two operations doesn't buy us a lot (yet),
 * but in the future we could implement combined copy+crc,
 * just way we do for network layer checksums.
 */
static int
iscsi_tcp_chunk_recv(struct iscsi_tcp_conn *tcp_conn,
		     struct iscsi_chunk *chunk, const void *ptr,
		     unsigned int len)
{
	struct scatterlist sg;
	unsigned int copy, copied = 0;
296

O
Olaf Kirch 已提交
297 298 299
	while (!iscsi_tcp_chunk_done(chunk)) {
		if (copied == len)
			goto out;
300

O
Olaf Kirch 已提交
301 302
		copy = min(len - copied, chunk->size - chunk->copied);
		memcpy(chunk->data + chunk->copied, ptr + copied, copy);
303

O
Olaf Kirch 已提交
304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320
		if (chunk->hash) {
			sg_init_one(&sg, ptr + copied, copy);
			crypto_hash_update(chunk->hash, &sg, copy);
		}
		chunk->copied += copy;
		copied += copy;
	}

out:
	return copied;
}

static inline void
iscsi_tcp_dgst_header(struct hash_desc *hash, const void *hdr, size_t hdrlen,
		      unsigned char digest[ISCSI_DIGEST_SIZE])
{
	struct scatterlist sg;
321

O
Olaf Kirch 已提交
322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381
	sg_init_one(&sg, hdr, hdrlen);
	crypto_hash_digest(hash, &sg, hdrlen, digest);
}

static inline int
iscsi_tcp_dgst_verify(struct iscsi_tcp_conn *tcp_conn,
		      struct iscsi_chunk *chunk)
{
	if (!chunk->digest_len)
		return 1;

	if (memcmp(chunk->recv_digest, chunk->digest, chunk->digest_len)) {
		debug_scsi("digest mismatch\n");
		return 0;
	}

	return 1;
}

/*
 * Helper function to set up chunk buffer
 */
static inline void
__iscsi_chunk_init(struct iscsi_chunk *chunk, size_t size,
		   iscsi_chunk_done_fn_t *done, struct hash_desc *hash)
{
	memset(chunk, 0, sizeof(*chunk));
	chunk->total_size = size;
	chunk->done = done;

	if (hash) {
		chunk->hash = hash;
		crypto_hash_init(hash);
	}
}

static inline void
iscsi_chunk_init_linear(struct iscsi_chunk *chunk, void *data, size_t size,
			iscsi_chunk_done_fn_t *done, struct hash_desc *hash)
{
	__iscsi_chunk_init(chunk, size, done, hash);
	chunk->data = data;
	chunk->size = size;
}

static inline int
iscsi_chunk_seek_sg(struct iscsi_chunk *chunk,
		    struct scatterlist *sg, unsigned int sg_count,
		    unsigned int offset, size_t size,
		    iscsi_chunk_done_fn_t *done, struct hash_desc *hash)
{
	unsigned int i;

	__iscsi_chunk_init(chunk, size, done, hash);
	for (i = 0; i < sg_count; ++i) {
		if (offset < sg[i].length) {
			chunk->sg = sg;
			chunk->sg_count = sg_count;
			iscsi_tcp_chunk_init_sg(chunk, i, offset);
			return 0;
382
		}
O
Olaf Kirch 已提交
383
		offset -= sg[i].length;
384 385
	}

O
Olaf Kirch 已提交
386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425
	return ISCSI_ERR_DATA_OFFSET;
}

/**
 * iscsi_tcp_hdr_recv_prep - prep chunk for hdr reception
 * @tcp_conn: iscsi connection to prep for
 *
 * This function always passes NULL for the hash argument, because when this
 * function is called we do not yet know the final size of the header and want
 * to delay the digest processing until we know that.
 */
static void
iscsi_tcp_hdr_recv_prep(struct iscsi_tcp_conn *tcp_conn)
{
	debug_tcp("iscsi_tcp_hdr_recv_prep(%p%s)\n", tcp_conn,
		  tcp_conn->iscsi_conn->hdrdgst_en ? ", digest enabled" : "");
	iscsi_chunk_init_linear(&tcp_conn->in.chunk,
				tcp_conn->in.hdr_buf, sizeof(struct iscsi_hdr),
				iscsi_tcp_hdr_recv_done, NULL);
}

/*
 * Handle incoming reply to any other type of command
 */
static int
iscsi_tcp_data_recv_done(struct iscsi_tcp_conn *tcp_conn,
			 struct iscsi_chunk *chunk)
{
	struct iscsi_conn *conn = tcp_conn->iscsi_conn;
	int rc = 0;

	if (!iscsi_tcp_dgst_verify(tcp_conn, chunk))
		return ISCSI_ERR_DATA_DGST;

	rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr,
			conn->data, tcp_conn->in.datalen);
	if (rc)
		return rc;

	iscsi_tcp_hdr_recv_prep(tcp_conn);
426 427 428
	return 0;
}

O
Olaf Kirch 已提交
429 430 431 432 433 434 435 436 437 438 439 440 441 442
static void
iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
{
	struct iscsi_conn *conn = tcp_conn->iscsi_conn;
	struct hash_desc *rx_hash = NULL;

	if (conn->datadgst_en)
		rx_hash = &tcp_conn->rx_hash;

	iscsi_chunk_init_linear(&tcp_conn->in.chunk,
				conn->data, tcp_conn->in.datalen,
				iscsi_tcp_data_recv_done, rx_hash);
}

M
Mike Christie 已提交
443 444 445 446
/*
 * must be called with session lock
 */
static void
447
iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
448
{
449
	struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
450
	struct iscsi_r2t_info *r2t;
M
Mike Christie 已提交
451
	struct scsi_cmnd *sc;
452

453 454 455 456 457 458 459
	/* flush ctask's r2t queues */
	while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) {
		__kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
			    sizeof(void*));
		debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n");
	}

M
Mike Christie 已提交
460 461
	sc = ctask->sc;
	if (unlikely(!sc))
462
		return;
M
Mike Christie 已提交
463

464
	tcp_ctask->xmstate = XMSTATE_IDLE;
465
	tcp_ctask->r2t = NULL;
466 467 468 469 470 471 472 473 474 475
}

/**
 * iscsi_data_rsp - SCSI Data-In Response processing
 * @conn: iscsi connection
 * @ctask: scsi command task
 **/
static int
iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
{
476 477 478
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
	struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
479
	struct iscsi_session *session = conn->session;
480
	struct scsi_cmnd *sc = ctask->sc;
481 482
	int datasn = be32_to_cpu(rhdr->datasn);

483
	iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
484 485 486
	/*
	 * setup Data-In byte counter (gets decremented..)
	 */
487
	ctask->data_count = tcp_conn->in.datalen;
488

489
	if (tcp_conn->in.datalen == 0)
490 491
		return 0;

492 493 494
	if (tcp_ctask->exp_datasn != datasn) {
		debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->datasn(%d)\n",
		          __FUNCTION__, tcp_ctask->exp_datasn, datasn);
495
		return ISCSI_ERR_DATASN;
496
	}
497

498
	tcp_ctask->exp_datasn++;
499

500
	tcp_ctask->data_offset = be32_to_cpu(rhdr->offset);
501
	if (tcp_ctask->data_offset + tcp_conn->in.datalen > scsi_bufflen(sc)) {
502 503
		debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n",
		          __FUNCTION__, tcp_ctask->data_offset,
504
		          tcp_conn->in.datalen, scsi_bufflen(sc));
505
		return ISCSI_ERR_DATA_OFFSET;
506
	}
507 508

	if (rhdr->flags & ISCSI_FLAG_DATA_STATUS) {
509
		sc->result = (DID_OK << 16) | rhdr->cmd_status;
510
		conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
511 512
		if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW |
		                   ISCSI_FLAG_DATA_OVERFLOW)) {
513 514 515
			int res_count = be32_to_cpu(rhdr->residual_count);

			if (res_count > 0 &&
516 517
			    (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
			     res_count <= scsi_bufflen(sc)))
518
				scsi_set_resid(sc, res_count);
519
			else
520 521
				sc->result = (DID_BAD_TARGET << 16) |
					rhdr->cmd_status;
522
		}
523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546
	}

	conn->datain_pdus_cnt++;
	return 0;
}

/**
 * iscsi_solicit_data_init - initialize first Data-Out
 * @conn: iscsi connection
 * @ctask: scsi command task
 * @r2t: R2T info
 *
 * Notes:
 *	Initialize first Data-Out within this R2T sequence and finds
 *	proper data_offset within this SCSI command.
 *
 *	This function is called with connection lock taken.
 **/
static void
iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
			struct iscsi_r2t_info *r2t)
{
	struct iscsi_data *hdr;
	struct scsi_cmnd *sc = ctask->sc;
547 548
	int i, sg_count = 0;
	struct scatterlist *sg;
549

550
	hdr = &r2t->dtask.hdr;
551 552 553 554 555
	memset(hdr, 0, sizeof(struct iscsi_data));
	hdr->ttt = r2t->ttt;
	hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
	r2t->solicit_datasn++;
	hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
556 557
	memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
	hdr->itt = ctask->hdr->itt;
558 559 560 561 562 563 564 565 566 567 568 569 570 571 572
	hdr->exp_statsn = r2t->exp_statsn;
	hdr->offset = cpu_to_be32(r2t->data_offset);
	if (r2t->data_length > conn->max_xmit_dlength) {
		hton24(hdr->dlength, conn->max_xmit_dlength);
		r2t->data_count = conn->max_xmit_dlength;
		hdr->flags = 0;
	} else {
		hton24(hdr->dlength, r2t->data_length);
		r2t->data_count = r2t->data_length;
		hdr->flags = ISCSI_FLAG_CMD_FINAL;
	}
	conn->dataout_pdus_cnt++;

	r2t->sent = 0;

573
	iscsi_buf_init_iov(&r2t->headbuf, (char*)hdr,
574
			   sizeof(struct iscsi_hdr));
575

576 577 578 579 580 581
	sg = scsi_sglist(sc);
	r2t->sg = NULL;
	for (i = 0; i < scsi_sg_count(sc); i++, sg += 1) {
		/* FIXME: prefetch ? */
		if (sg_count + sg->length > r2t->data_offset) {
			int page_offset;
582

583
			/* sg page found! */
584

585 586
			/* offset within this page */
			page_offset = r2t->data_offset - sg_count;
587

588 589 590 591
			/* fill in this buffer */
			iscsi_buf_init_sg(&r2t->sendbuf, sg);
			r2t->sendbuf.sg.offset += page_offset;
			r2t->sendbuf.sg.length -= page_offset;
592

593 594 595
			/* xmit logic will continue with next one */
			r2t->sg = sg + 1;
			break;
596
		}
597
		sg_count += sg->length;
598
	}
599
	BUG_ON(r2t->sg == NULL);
600 601 602 603 604 605 606 607 608 609 610 611
}

/**
 * iscsi_r2t_rsp - iSCSI R2T Response processing
 * @conn: iscsi connection
 * @ctask: scsi command task
 **/
static int
iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
{
	struct iscsi_r2t_info *r2t;
	struct iscsi_session *session = conn->session;
612 613 614
	struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
615 616 617
	int r2tsn = be32_to_cpu(rhdr->r2tsn);
	int rc;

618 619 620
	if (tcp_conn->in.datalen) {
		printk(KERN_ERR "iscsi_tcp: invalid R2t with datalen %d\n",
		       tcp_conn->in.datalen);
621
		return ISCSI_ERR_DATALEN;
622
	}
623

624 625 626
	if (tcp_ctask->exp_datasn != r2tsn){
		debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
		          __FUNCTION__, tcp_ctask->exp_datasn, r2tsn);
627
		return ISCSI_ERR_R2TSN;
628
	}
629 630 631

	/* fill-in new R2T associated with the task */
	spin_lock(&session->lock);
632 633
	iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);

634
	if (!ctask->sc || session->state != ISCSI_STATE_LOGGED_IN) {
635 636 637 638 639
		printk(KERN_INFO "iscsi_tcp: dropping R2T itt %d in "
		       "recovery...\n", ctask->itt);
		spin_unlock(&session->lock);
		return 0;
	}
640

641
	rc = __kfifo_get(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*));
642 643 644 645
	BUG_ON(!rc);

	r2t->exp_statsn = rhdr->statsn;
	r2t->data_length = be32_to_cpu(rhdr->data_length);
646 647
	if (r2t->data_length == 0) {
		printk(KERN_ERR "iscsi_tcp: invalid R2T with zero data len\n");
648 649 650 651
		spin_unlock(&session->lock);
		return ISCSI_ERR_DATALEN;
	}

652 653 654 655 656
	if (r2t->data_length > session->max_burst)
		debug_scsi("invalid R2T with data len %u and max burst %u."
			   "Attempting to execute request.\n",
			    r2t->data_length, session->max_burst);

657
	r2t->data_offset = be32_to_cpu(rhdr->data_offset);
658
	if (r2t->data_offset + r2t->data_length > scsi_bufflen(ctask->sc)) {
659
		spin_unlock(&session->lock);
660 661
		printk(KERN_ERR "iscsi_tcp: invalid R2T with data len %u at "
		       "offset %u and total length %d\n", r2t->data_length,
662
		       r2t->data_offset, scsi_bufflen(ctask->sc));
663 664 665 666 667 668 669 670
		return ISCSI_ERR_DATALEN;
	}

	r2t->ttt = rhdr->ttt; /* no flip */
	r2t->solicit_datasn = 0;

	iscsi_solicit_data_init(conn, ctask, r2t);

671
	tcp_ctask->exp_datasn = r2tsn + 1;
672
	__kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*));
673
	tcp_ctask->xmstate |= XMSTATE_SOL_HDR_INIT;
674
	conn->r2t_pdus_cnt++;
675 676

	iscsi_requeue_ctask(ctask);
677 678 679 680 681
	spin_unlock(&session->lock);

	return 0;
}

O
Olaf Kirch 已提交
682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716
/*
 * Handle incoming reply to DataIn command
 */
static int
iscsi_tcp_process_data_in(struct iscsi_tcp_conn *tcp_conn,
			  struct iscsi_chunk *chunk)
{
	struct iscsi_conn *conn = tcp_conn->iscsi_conn;
	struct iscsi_hdr *hdr = tcp_conn->in.hdr;
	int rc;

	if (!iscsi_tcp_dgst_verify(tcp_conn, chunk))
		return ISCSI_ERR_DATA_DGST;

	/* check for non-exceptional status */
	if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
		rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0);
		if (rc)
			return rc;
	}

	iscsi_tcp_hdr_recv_prep(tcp_conn);
	return 0;
}

/**
 * iscsi_tcp_hdr_dissect - process PDU header
 * @conn: iSCSI connection
 * @hdr: PDU header
 *
 * This function analyzes the header of the PDU received,
 * and performs several sanity checks. If the PDU is accompanied
 * by data, the receive buffer is set up to copy the incoming data
 * to the correct location.
 */
717
static int
O
Olaf Kirch 已提交
718
iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
719
{
720
	int rc = 0, opcode, ahslen;
721
	struct iscsi_session *session = conn->session;
722
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
O
Olaf Kirch 已提交
723 724
	struct iscsi_cmd_task *ctask;
	uint32_t itt;
725 726

	/* verify PDU length */
727 728
	tcp_conn->in.datalen = ntoh24(hdr->dlength);
	if (tcp_conn->in.datalen > conn->max_recv_dlength) {
729
		printk(KERN_ERR "iscsi_tcp: datalen %d > %d\n",
730
		       tcp_conn->in.datalen, conn->max_recv_dlength);
731 732 733
		return ISCSI_ERR_DATALEN;
	}

O
Olaf Kirch 已提交
734 735 736
	/* Additional header segments. So far, we don't
	 * process additional headers.
	 */
737
	ahslen = hdr->hlength << 2;
738

739
	opcode = hdr->opcode & ISCSI_OPCODE_MASK;
740
	/* verify itt (itt encoding: age+cid+itt) */
741 742
	rc = iscsi_verify_itt(conn, hdr, &itt);
	if (rc == ISCSI_ERR_NO_SCSI_CMD) {
O
Olaf Kirch 已提交
743
		/* XXX: what does this do? */
744 745 746 747
		tcp_conn->in.datalen = 0; /* force drop */
		return 0;
	} else if (rc)
		return rc;
748

O
Olaf Kirch 已提交
749 750
	debug_tcp("opcode 0x%x ahslen %d datalen %d\n",
		  opcode, ahslen, tcp_conn->in.datalen);
751

752 753
	switch(opcode) {
	case ISCSI_OP_SCSI_DATA_IN:
O
Olaf Kirch 已提交
754 755
		ctask = session->cmds[itt];
		rc = iscsi_data_rsp(conn, ctask);
756 757
		if (rc)
			return rc;
O
Olaf Kirch 已提交
758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784
		if (tcp_conn->in.datalen) {
			struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
			struct hash_desc *rx_hash = NULL;

			/*
			 * Setup copy of Data-In into the Scsi_Cmnd
			 * Scatterlist case:
			 * We set up the iscsi_chunk to point to the next
			 * scatterlist entry to copy to. As we go along,
			 * we move on to the next scatterlist entry and
			 * update the digest per-entry.
			 */
			if (conn->datadgst_en)
				rx_hash = &tcp_conn->rx_hash;

			debug_tcp("iscsi_tcp_begin_data_in(%p, offset=%d, "
				  "datalen=%d)\n", tcp_conn,
				  tcp_ctask->data_offset,
				  tcp_conn->in.datalen);
			return iscsi_chunk_seek_sg(&tcp_conn->in.chunk,
						scsi_sglist(ctask->sc),
						scsi_sg_count(ctask->sc),
						tcp_ctask->data_offset,
						tcp_conn->in.datalen,
						iscsi_tcp_process_data_in,
						rx_hash);
		}
785 786
		/* fall through */
	case ISCSI_OP_SCSI_CMD_RSP:
O
Olaf Kirch 已提交
787 788 789 790 791
		if (tcp_conn->in.datalen) {
			iscsi_tcp_data_recv_prep(tcp_conn);
			return 0;
		}
		rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
792 793
		break;
	case ISCSI_OP_R2T:
O
Olaf Kirch 已提交
794
		ctask = session->cmds[itt];
795 796
		if (ahslen)
			rc = ISCSI_ERR_AHSLEN;
O
Olaf Kirch 已提交
797 798
		else if (ctask->sc->sc_data_direction == DMA_TO_DEVICE)
			rc = iscsi_r2t_rsp(conn, ctask);
799 800 801 802 803 804 805
		else
			rc = ISCSI_ERR_PROTO;
		break;
	case ISCSI_OP_LOGIN_RSP:
	case ISCSI_OP_TEXT_RSP:
	case ISCSI_OP_REJECT:
	case ISCSI_OP_ASYNC_EVENT:
806 807 808 809 810
		/*
		 * It is possible that we could get a PDU with a buffer larger
		 * than 8K, but there are no targets that currently do this.
		 * For now we fail until we find a vendor that needs it
		 */
O
Olaf Kirch 已提交
811
		if (ISCSI_DEF_MAX_RECV_SEG_LEN < tcp_conn->in.datalen) {
812 813 814
			printk(KERN_ERR "iscsi_tcp: received buffer of len %u "
			      "but conn buffer is only %u (opcode %0x)\n",
			      tcp_conn->in.datalen,
815
			      ISCSI_DEF_MAX_RECV_SEG_LEN, opcode);
816 817 818 819
			rc = ISCSI_ERR_PROTO;
			break;
		}

O
Olaf Kirch 已提交
820 821 822 823 824 825 826
		/* If there's data coming in with the response,
		 * receive it to the connection's buffer.
		 */
		if (tcp_conn->in.datalen) {
			iscsi_tcp_data_recv_prep(tcp_conn);
			return 0;
		}
827
	/* fall through */
828 829
	case ISCSI_OP_LOGOUT_RSP:
	case ISCSI_OP_NOOP_IN:
830 831 832 833 834 835 836
	case ISCSI_OP_SCSI_TMFUNC_RSP:
		rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
		break;
	default:
		rc = ISCSI_ERR_BAD_OPCODE;
		break;
	}
837

O
Olaf Kirch 已提交
838 839 840 841 842 843
	if (rc == 0) {
		/* Anything that comes with data should have
		 * been handled above. */
		if (tcp_conn->in.datalen)
			return ISCSI_ERR_PROTO;
		iscsi_tcp_hdr_recv_prep(tcp_conn);
844 845
	}

O
Olaf Kirch 已提交
846
	return rc;
847 848 849
}

static inline void
850
partial_sg_digest_update(struct hash_desc *desc, struct scatterlist *sg,
851
			 int offset, int length)
852 853 854
{
	struct scatterlist temp;

855 856
	sg_init_table(&temp, 1);
	sg_set_page(&temp, sg_page(sg), length, offset);
857
	crypto_hash_update(desc, &temp, length);
858 859
}

O
Olaf Kirch 已提交
860 861 862 863 864 865 866 867 868 869
/**
 * iscsi_tcp_hdr_recv_done - process PDU header
 *
 * This is the callback invoked when the PDU header has
 * been received. If the header is followed by additional
 * header segments, we go back for more data.
 */
static int
iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
			struct iscsi_chunk *chunk)
870
{
O
Olaf Kirch 已提交
871 872
	struct iscsi_conn *conn = tcp_conn->iscsi_conn;
	struct iscsi_hdr *hdr;
873

O
Olaf Kirch 已提交
874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891
	/* Check if there are additional header segments
	 * *prior* to computing the digest, because we
	 * may need to go back to the caller for more.
	 */
	hdr = (struct iscsi_hdr *) tcp_conn->in.hdr_buf;
	if (chunk->copied == sizeof(struct iscsi_hdr) && hdr->hlength) {
		/* Bump the header length - the caller will
		 * just loop around and get the AHS for us, and
		 * call again. */
		unsigned int ahslen = hdr->hlength << 2;

		/* Make sure we don't overflow */
		if (sizeof(*hdr) + ahslen > sizeof(tcp_conn->in.hdr_buf))
			return ISCSI_ERR_AHSLEN;

		chunk->total_size += ahslen;
		chunk->size += ahslen;
		return 0;
892 893
	}

O
Olaf Kirch 已提交
894 895 896 897 898 899 900 901
	/* We're done processing the header. See if we're doing
	 * header digests; if so, set up the recv_digest buffer
	 * and go back for more. */
	if (conn->hdrdgst_en) {
		if (chunk->digest_len == 0) {
			iscsi_tcp_chunk_splice_digest(chunk,
						      chunk->recv_digest);
			return 0;
902
		}
O
Olaf Kirch 已提交
903 904 905
		iscsi_tcp_dgst_header(&tcp_conn->rx_hash, hdr,
				      chunk->total_copied - ISCSI_DIGEST_SIZE,
				      chunk->digest);
906

O
Olaf Kirch 已提交
907 908
		if (!iscsi_tcp_dgst_verify(tcp_conn, chunk))
			return ISCSI_ERR_HDR_DGST;
909
	}
O
Olaf Kirch 已提交
910 911 912

	tcp_conn->in.hdr = hdr;
	return iscsi_tcp_hdr_dissect(conn, hdr);
913 914 915
}

/**
O
Olaf Kirch 已提交
916
 * iscsi_tcp_recv - TCP receive in sendfile fashion
917 918 919 920 921 922
 * @rd_desc: read descriptor
 * @skb: socket buffer
 * @offset: offset in skb
 * @len: skb->len - offset
 **/
static int
O
Olaf Kirch 已提交
923 924
iscsi_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
	       unsigned int offset, size_t len)
925 926
{
	struct iscsi_conn *conn = rd_desc->arg.data;
927
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
O
Olaf Kirch 已提交
928 929 930 931
	struct iscsi_chunk *chunk = &tcp_conn->in.chunk;
	struct skb_seq_state seq;
	unsigned int consumed = 0;
	int rc = 0;
932

O
Olaf Kirch 已提交
933
	debug_tcp("in %d bytes\n", skb->len - offset);
934 935 936 937 938 939

	if (unlikely(conn->suspend_rx)) {
		debug_tcp("conn %d Rx suspended!\n", conn->id);
		return 0;
	}

O
Olaf Kirch 已提交
940 941 942 943
	skb_prepare_seq_read(skb, offset, skb->len, &seq);
	while (1) {
		unsigned int avail;
		const u8 *ptr;
944

O
Olaf Kirch 已提交
945 946 947 948 949 950 951 952 953 954 955 956 957 958 959
		avail = skb_seq_read(consumed, &ptr, &seq);
		if (avail == 0)
			break;
		BUG_ON(chunk->copied >= chunk->size);

		debug_tcp("skb %p ptr=%p avail=%u\n", skb, ptr, avail);
		rc = iscsi_tcp_chunk_recv(tcp_conn, chunk, ptr, avail);
		BUG_ON(rc == 0);
		consumed += rc;

		if (chunk->total_copied >= chunk->total_size) {
			rc = chunk->done(tcp_conn, chunk);
			if (rc != 0) {
				skb_abort_seq_read(&seq);
				goto error;
960
			}
961

O
Olaf Kirch 已提交
962 963
			/* The done() functions sets up the
			 * next chunk. */
964 965 966
		}
	}

O
Olaf Kirch 已提交
967 968
	conn->rxdata_octets += consumed;
	return consumed;
969

O
Olaf Kirch 已提交
970 971 972 973
error:
	debug_tcp("Error receiving PDU, errno=%d\n", rc);
	iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
	return 0;
974 975 976 977 978 979
}

static void
iscsi_tcp_data_ready(struct sock *sk, int flag)
{
	struct iscsi_conn *conn = sk->sk_user_data;
O
Olaf Kirch 已提交
980
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
981 982 983 984
	read_descriptor_t rd_desc;

	read_lock(&sk->sk_callback_lock);

985
	/*
O
Olaf Kirch 已提交
986
	 * Use rd_desc to pass 'conn' to iscsi_tcp_recv.
987
	 * We set count to 1 because we want the network layer to
O
Olaf Kirch 已提交
988
	 * hand us all the skbs that are available. iscsi_tcp_recv
989 990
	 * handled pdus that cross buffers or pdus that still need data.
	 */
991
	rd_desc.arg.data = conn;
992
	rd_desc.count = 1;
O
Olaf Kirch 已提交
993
	tcp_read_sock(sk, &rd_desc, iscsi_tcp_recv);
994 995

	read_unlock(&sk->sk_callback_lock);
O
Olaf Kirch 已提交
996 997 998 999

	/* If we had to (atomically) map a highmem page,
	 * unmap it now. */
	iscsi_tcp_chunk_unmap(&tcp_conn->in.chunk);
1000 1001 1002 1003 1004
}

static void
iscsi_tcp_state_change(struct sock *sk)
{
1005
	struct iscsi_tcp_conn *tcp_conn;
1006 1007 1008 1009 1010 1011 1012 1013 1014
	struct iscsi_conn *conn;
	struct iscsi_session *session;
	void (*old_state_change)(struct sock *);

	read_lock(&sk->sk_callback_lock);

	conn = (struct iscsi_conn*)sk->sk_user_data;
	session = conn->session;

M
Mike Christie 已提交
1015 1016 1017
	if ((sk->sk_state == TCP_CLOSE_WAIT ||
	     sk->sk_state == TCP_CLOSE) &&
	    !atomic_read(&sk->sk_rmem_alloc)) {
1018 1019 1020 1021
		debug_tcp("iscsi_tcp_state_change: TCP_CLOSE|TCP_CLOSE_WAIT\n");
		iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
	}

1022 1023
	tcp_conn = conn->dd_data;
	old_state_change = tcp_conn->old_state_change;
1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037

	read_unlock(&sk->sk_callback_lock);

	old_state_change(sk);
}

/**
 * iscsi_write_space - Called when more output buffer space is available
 * @sk: socket space is available for
 **/
static void
iscsi_write_space(struct sock *sk)
{
	struct iscsi_conn *conn = (struct iscsi_conn*)sk->sk_user_data;
1038 1039 1040
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;

	tcp_conn->old_write_space(sk);
1041
	debug_tcp("iscsi_write_space: cid %d\n", conn->id);
1042
	scsi_queue_work(conn->session->host, &conn->xmitwork);
1043 1044 1045 1046 1047
}

static void
iscsi_conn_set_callbacks(struct iscsi_conn *conn)
{
1048 1049
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct sock *sk = tcp_conn->sock->sk;
1050 1051 1052 1053

	/* assign new callbacks */
	write_lock_bh(&sk->sk_callback_lock);
	sk->sk_user_data = conn;
1054 1055 1056
	tcp_conn->old_data_ready = sk->sk_data_ready;
	tcp_conn->old_state_change = sk->sk_state_change;
	tcp_conn->old_write_space = sk->sk_write_space;
1057 1058 1059 1060 1061 1062 1063
	sk->sk_data_ready = iscsi_tcp_data_ready;
	sk->sk_state_change = iscsi_tcp_state_change;
	sk->sk_write_space = iscsi_write_space;
	write_unlock_bh(&sk->sk_callback_lock);
}

static void
1064
iscsi_conn_restore_callbacks(struct iscsi_tcp_conn *tcp_conn)
1065
{
1066
	struct sock *sk = tcp_conn->sock->sk;
1067 1068 1069 1070

	/* restore socket callbacks, see also: iscsi_conn_set_callbacks() */
	write_lock_bh(&sk->sk_callback_lock);
	sk->sk_user_data    = NULL;
1071 1072 1073
	sk->sk_data_ready   = tcp_conn->old_data_ready;
	sk->sk_state_change = tcp_conn->old_state_change;
	sk->sk_write_space  = tcp_conn->old_write_space;
1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
	sk->sk_no_check	 = 0;
	write_unlock_bh(&sk->sk_callback_lock);
}

/**
 * iscsi_send - generic send routine
 * @sk: kernel's socket
 * @buf: buffer to write from
 * @size: actual size to write
 * @flags: socket's flags
 */
static inline int
1086
iscsi_send(struct iscsi_conn *conn, struct iscsi_buf *buf, int size, int flags)
1087
{
1088 1089
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct socket *sk = tcp_conn->sock;
1090
	int offset = buf->sg.offset + buf->sent, res;
1091

1092 1093 1094 1095 1096 1097 1098 1099 1100
	/*
	 * if we got use_sg=0 or are sending something we kmallocd
	 * then we did not have to do kmap (kmap returns page_address)
	 *
	 * if we got use_sg > 0, but had to drop down, we do not
	 * set clustering so this should only happen for that
	 * slab case.
	 */
	if (buf->use_sendmsg)
O
Olaf Kirch 已提交
1101
		res = sock_no_sendpage(sk, buf->sg.page, offset, size, flags);
1102
	else
O
Olaf Kirch 已提交
1103
		res = tcp_conn->sendpage(sk, buf->sg.page, offset, size, flags);
1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116

	if (res >= 0) {
		conn->txdata_octets += res;
		buf->sent += res;
		return res;
	}

	tcp_conn->sendpage_failures_cnt++;
	if (res == -EAGAIN)
		res = -ENOBUFS;
	else
		iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
	return res;
1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138
}

/**
 * iscsi_sendhdr - send PDU Header via tcp_sendpage()
 * @conn: iscsi connection
 * @buf: buffer to write from
 * @datalen: lenght of data to be sent after the header
 *
 * Notes:
 *	(Tx, Fast Path)
 **/
static inline int
iscsi_sendhdr(struct iscsi_conn *conn, struct iscsi_buf *buf, int datalen)
{
	int flags = 0; /* MSG_DONTWAIT; */
	int res, size;

	size = buf->sg.length - buf->sent;
	BUG_ON(buf->sent + size > buf->sg.length);
	if (buf->sent + size != buf->sg.length || datalen)
		flags |= MSG_MORE;

1139
	res = iscsi_send(conn, buf, size, flags);
1140 1141 1142 1143 1144
	debug_tcp("sendhdr %d bytes, sent %d res %d\n", size, buf->sent, res);
	if (res >= 0) {
		if (size != res)
			return -EAGAIN;
		return 0;
1145
	}
1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170

	return res;
}

/**
 * iscsi_sendpage - send one page of iSCSI Data-Out.
 * @conn: iscsi connection
 * @buf: buffer to write from
 * @count: remaining data
 * @sent: number of bytes sent
 *
 * Notes:
 *	(Tx, Fast Path)
 **/
static inline int
iscsi_sendpage(struct iscsi_conn *conn, struct iscsi_buf *buf,
	       int *count, int *sent)
{
	int flags = 0; /* MSG_DONTWAIT; */
	int res, size;

	size = buf->sg.length - buf->sent;
	BUG_ON(buf->sent + size > buf->sg.length);
	if (size > *count)
		size = *count;
M
Mike Christie 已提交
1171
	if (buf->sent + size != buf->sg.length || *count != size)
1172 1173
		flags |= MSG_MORE;

1174
	res = iscsi_send(conn, buf, size, flags);
1175 1176 1177 1178 1179 1180 1181 1182
	debug_tcp("sendpage: %d bytes, sent %d left %d sent %d res %d\n",
		  size, buf->sent, *count, *sent, res);
	if (res >= 0) {
		*count -= res;
		*sent += res;
		if (size != res)
			return -EAGAIN;
		return 0;
1183
	}
1184 1185 1186 1187 1188

	return res;
}

static inline void
1189
iscsi_data_digest_init(struct iscsi_tcp_conn *tcp_conn,
1190
		      struct iscsi_tcp_cmd_task *tcp_ctask)
1191
{
1192
	crypto_hash_init(&tcp_conn->tx_hash);
1193
	tcp_ctask->digest_count = 4;
1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215
}

/**
 * iscsi_solicit_data_cont - initialize next Data-Out
 * @conn: iscsi connection
 * @ctask: scsi command task
 * @r2t: R2T info
 * @left: bytes left to transfer
 *
 * Notes:
 *	Initialize next Data-Out within this R2T sequence and continue
 *	to process next Scatter-Gather element(if any) of this SCSI command.
 *
 *	Called under connection lock.
 **/
static void
iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
			struct iscsi_r2t_info *r2t, int left)
{
	struct iscsi_data *hdr;
	int new_offset;

1216
	hdr = &r2t->dtask.hdr;
1217 1218 1219 1220 1221
	memset(hdr, 0, sizeof(struct iscsi_data));
	hdr->ttt = r2t->ttt;
	hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
	r2t->solicit_datasn++;
	hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
1222 1223
	memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
	hdr->itt = ctask->hdr->itt;
1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236
	hdr->exp_statsn = r2t->exp_statsn;
	new_offset = r2t->data_offset + r2t->sent;
	hdr->offset = cpu_to_be32(new_offset);
	if (left > conn->max_xmit_dlength) {
		hton24(hdr->dlength, conn->max_xmit_dlength);
		r2t->data_count = conn->max_xmit_dlength;
	} else {
		hton24(hdr->dlength, left);
		r2t->data_count = left;
		hdr->flags = ISCSI_FLAG_CMD_FINAL;
	}
	conn->dataout_pdus_cnt++;

1237
	iscsi_buf_init_iov(&r2t->headbuf, (char*)hdr,
1238
			   sizeof(struct iscsi_hdr));
1239

1240 1241 1242
	if (iscsi_buf_left(&r2t->sendbuf))
		return;

1243 1244
	iscsi_buf_init_sg(&r2t->sendbuf, r2t->sg);
	r2t->sg += 1;
1245 1246
}

1247 1248
static void iscsi_set_padding(struct iscsi_tcp_cmd_task *tcp_ctask,
			      unsigned long len)
1249
{
1250 1251 1252
	tcp_ctask->pad_count = len & (ISCSI_PAD_LEN - 1);
	if (!tcp_ctask->pad_count)
		return;
1253

1254 1255
	tcp_ctask->pad_count = ISCSI_PAD_LEN - tcp_ctask->pad_count;
	debug_scsi("write padding %d bytes\n", tcp_ctask->pad_count);
1256
	tcp_ctask->xmstate |= XMSTATE_W_PAD;
1257 1258 1259
}

/**
1260
 * iscsi_tcp_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
1261 1262 1263 1264 1265
 * @conn: iscsi connection
 * @ctask: scsi command task
 * @sc: scsi command
 **/
static void
1266
iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask)
1267
{
1268
	struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1269

1270
	BUG_ON(__kfifo_len(tcp_ctask->r2tqueue));
1271
	tcp_ctask->xmstate = XMSTATE_CMD_HDR_INIT;
1272 1273 1274
}

/**
1275
 * iscsi_tcp_mtask_xmit - xmit management(immediate) task
1276 1277 1278 1279 1280 1281 1282 1283
 * @conn: iscsi connection
 * @mtask: task management task
 *
 * Notes:
 *	The function can return -EAGAIN in which case caller must
 *	call it again later, or recover. '0' return code means successful
 *	xmit.
 *
1284
 *	Management xmit state machine consists of these states:
1285 1286 1287 1288
 *		XMSTATE_IMM_HDR_INIT	- calculate digest of PDU Header
 *		XMSTATE_IMM_HDR 	- PDU Header xmit in progress
 *		XMSTATE_IMM_DATA 	- PDU Data xmit in progress
 *		XMSTATE_IDLE		- management PDU is done
1289 1290
 **/
static int
1291
iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
1292
{
1293
	struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data;
1294
	int rc;
1295 1296

	debug_scsi("mtask deq [cid %d state %x itt 0x%x]\n",
1297
		conn->id, tcp_mtask->xmstate, mtask->itt);
1298

1299
	if (tcp_mtask->xmstate & XMSTATE_IMM_HDR_INIT) {
1300 1301 1302 1303
		iscsi_buf_init_iov(&tcp_mtask->headbuf, (char*)mtask->hdr,
				   sizeof(struct iscsi_hdr));

		if (mtask->data_count) {
1304
			tcp_mtask->xmstate |= XMSTATE_IMM_DATA;
1305 1306 1307 1308 1309
			iscsi_buf_init_iov(&tcp_mtask->sendbuf,
					   (char*)mtask->data,
					   mtask->data_count);
		}

1310
		if (conn->c_stage != ISCSI_CONN_INITIAL_STAGE &&
M
Mike Christie 已提交
1311
		    conn->stop_stage != STOP_CONN_RECOVER &&
1312
		    conn->hdrdgst_en)
1313 1314
			iscsi_hdr_digest(conn, &tcp_mtask->headbuf,
					(u8*)tcp_mtask->hdrext);
1315 1316

		tcp_mtask->sent = 0;
1317 1318
		tcp_mtask->xmstate &= ~XMSTATE_IMM_HDR_INIT;
		tcp_mtask->xmstate |= XMSTATE_IMM_HDR;
1319 1320
	}

1321
	if (tcp_mtask->xmstate & XMSTATE_IMM_HDR) {
1322 1323
		rc = iscsi_sendhdr(conn, &tcp_mtask->headbuf,
				   mtask->data_count);
1324
		if (rc)
1325
			return rc;
1326
		tcp_mtask->xmstate &= ~XMSTATE_IMM_HDR;
1327 1328
	}

1329
	if (tcp_mtask->xmstate & XMSTATE_IMM_DATA) {
1330
		BUG_ON(!mtask->data_count);
1331
		tcp_mtask->xmstate &= ~XMSTATE_IMM_DATA;
1332 1333 1334 1335
		/* FIXME: implement.
		 * Virtual buffer could be spreaded across multiple pages...
		 */
		do {
1336 1337 1338 1339 1340
			int rc;

			rc = iscsi_sendpage(conn, &tcp_mtask->sendbuf,
					&mtask->data_count, &tcp_mtask->sent);
			if (rc) {
1341
				tcp_mtask->xmstate |= XMSTATE_IMM_DATA;
1342
				return rc;
1343 1344 1345 1346
			}
		} while (mtask->data_count);
	}

1347
	BUG_ON(tcp_mtask->xmstate != XMSTATE_IDLE);
A
Al Viro 已提交
1348
	if (mtask->hdr->itt == RESERVED_ITT) {
1349 1350 1351
		struct iscsi_session *session = conn->session;

		spin_lock_bh(&session->lock);
1352
		iscsi_free_mgmt_task(conn, mtask);
1353 1354
		spin_unlock_bh(&session->lock);
	}
1355 1356 1357
	return 0;
}

1358 1359
static int
iscsi_send_cmd_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1360
{
1361 1362 1363
	struct scsi_cmnd *sc = ctask->sc;
	struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
	int rc = 0;
1364

1365
	if (tcp_ctask->xmstate & XMSTATE_CMD_HDR_INIT) {
1366 1367 1368 1369 1370
		tcp_ctask->sent = 0;
		tcp_ctask->sg_count = 0;
		tcp_ctask->exp_datasn = 0;

		if (sc->sc_data_direction == DMA_TO_DEVICE) {
1371 1372 1373 1374 1375
			struct scatterlist *sg = scsi_sglist(sc);

			iscsi_buf_init_sg(&tcp_ctask->sendbuf, sg);
			tcp_ctask->sg = sg + 1;
			tcp_ctask->bad_sg = sg + scsi_sg_count(sc);
1376

1377 1378
			debug_scsi("cmd [itt 0x%x total %d imm_data %d "
				   "unsol count %d, unsol offset %d]\n",
1379
				   ctask->itt, scsi_bufflen(sc),
1380 1381 1382
				   ctask->imm_count, ctask->unsol_count,
				   ctask->unsol_offset);
		}
1383

1384
		iscsi_buf_init_iov(&tcp_ctask->headbuf, (char*)ctask->hdr,
1385
				  ctask->hdr_len);
1386 1387 1388

		if (conn->hdrdgst_en)
			iscsi_hdr_digest(conn, &tcp_ctask->headbuf,
1389
					 iscsi_next_hdr(ctask));
1390 1391
		tcp_ctask->xmstate &= ~XMSTATE_CMD_HDR_INIT;
		tcp_ctask->xmstate |= XMSTATE_CMD_HDR_XMIT;
1392 1393
	}

1394
	if (tcp_ctask->xmstate & XMSTATE_CMD_HDR_XMIT) {
1395 1396 1397
		rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->imm_count);
		if (rc)
			return rc;
1398
		tcp_ctask->xmstate &= ~XMSTATE_CMD_HDR_XMIT;
1399 1400 1401 1402 1403

		if (sc->sc_data_direction != DMA_TO_DEVICE)
			return 0;

		if (ctask->imm_count) {
1404
			tcp_ctask->xmstate |= XMSTATE_IMM_DATA;
1405
			iscsi_set_padding(tcp_ctask, ctask->imm_count);
1406

1407 1408 1409 1410 1411
			if (ctask->conn->datadgst_en) {
				iscsi_data_digest_init(ctask->conn->dd_data,
						       tcp_ctask);
				tcp_ctask->immdigest = 0;
			}
1412 1413
		}

1414 1415 1416
		if (ctask->unsol_count)
			tcp_ctask->xmstate |=
					XMSTATE_UNS_HDR | XMSTATE_UNS_INIT;
1417 1418
	}
	return rc;
1419 1420
}

1421 1422
static int
iscsi_send_padding(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1423
{
1424
	struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1425 1426
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	int sent = 0, rc;
1427

1428
	if (tcp_ctask->xmstate & XMSTATE_W_PAD) {
1429 1430 1431
		iscsi_buf_init_iov(&tcp_ctask->sendbuf, (char*)&tcp_ctask->pad,
				   tcp_ctask->pad_count);
		if (conn->datadgst_en)
1432 1433 1434
			crypto_hash_update(&tcp_conn->tx_hash,
					   &tcp_ctask->sendbuf.sg,
					   tcp_ctask->sendbuf.sg.length);
1435
	} else if (!(tcp_ctask->xmstate & XMSTATE_W_RESEND_PAD))
1436 1437
		return 0;

1438 1439
	tcp_ctask->xmstate &= ~XMSTATE_W_PAD;
	tcp_ctask->xmstate &= ~XMSTATE_W_RESEND_PAD;
1440 1441 1442 1443
	debug_scsi("sending %d pad bytes for itt 0x%x\n",
		   tcp_ctask->pad_count, ctask->itt);
	rc = iscsi_sendpage(conn, &tcp_ctask->sendbuf, &tcp_ctask->pad_count,
			   &sent);
1444
	if (rc) {
1445
		debug_scsi("padding send failed %d\n", rc);
1446
		tcp_ctask->xmstate |= XMSTATE_W_RESEND_PAD;
1447
	}
1448
	return rc;
1449 1450
}

1451 1452 1453
static int
iscsi_send_digest(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
			struct iscsi_buf *buf, uint32_t *digest)
1454
{
1455 1456 1457
	struct iscsi_tcp_cmd_task *tcp_ctask;
	struct iscsi_tcp_conn *tcp_conn;
	int rc, sent = 0;
1458

1459 1460
	if (!conn->datadgst_en)
		return 0;
1461

1462 1463
	tcp_ctask = ctask->dd_data;
	tcp_conn = conn->dd_data;
1464

1465
	if (!(tcp_ctask->xmstate & XMSTATE_W_RESEND_DATA_DIGEST)) {
1466
		crypto_hash_final(&tcp_conn->tx_hash, (u8*)digest);
1467 1468
		iscsi_buf_init_iov(buf, (char*)digest, 4);
	}
1469
	tcp_ctask->xmstate &= ~XMSTATE_W_RESEND_DATA_DIGEST;
1470

1471 1472 1473 1474 1475 1476 1477
	rc = iscsi_sendpage(conn, buf, &tcp_ctask->digest_count, &sent);
	if (!rc)
		debug_scsi("sent digest 0x%x for itt 0x%x\n", *digest,
			  ctask->itt);
	else {
		debug_scsi("sending digest 0x%x failed for itt 0x%x!\n",
			  *digest, ctask->itt);
1478
		tcp_ctask->xmstate |= XMSTATE_W_RESEND_DATA_DIGEST;
1479
	}
1480 1481
	return rc;
}
1482

1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499
static int
iscsi_send_data(struct iscsi_cmd_task *ctask, struct iscsi_buf *sendbuf,
		struct scatterlist **sg, int *sent, int *count,
		struct iscsi_buf *digestbuf, uint32_t *digest)
{
	struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
	struct iscsi_conn *conn = ctask->conn;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	int rc, buf_sent, offset;

	while (*count) {
		buf_sent = 0;
		offset = sendbuf->sent;

		rc = iscsi_sendpage(conn, sendbuf, count, &buf_sent);
		*sent = *sent + buf_sent;
		if (buf_sent && conn->datadgst_en)
1500
			partial_sg_digest_update(&tcp_conn->tx_hash,
1501 1502 1503 1504 1505
				&sendbuf->sg, sendbuf->sg.offset + offset,
				buf_sent);
		if (!iscsi_buf_left(sendbuf) && *sg != tcp_ctask->bad_sg) {
			iscsi_buf_init_sg(sendbuf, *sg);
			*sg = *sg + 1;
1506
		}
1507 1508 1509

		if (rc)
			return rc;
1510 1511
	}

1512 1513 1514 1515 1516
	rc = iscsi_send_padding(conn, ctask);
	if (rc)
		return rc;

	return iscsi_send_digest(conn, ctask, digestbuf, digest);
1517 1518
}

1519 1520
static int
iscsi_send_unsol_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1521
{
1522
	struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1523
	struct iscsi_data_task *dtask;
1524
	int rc;
1525

1526 1527
	tcp_ctask->xmstate |= XMSTATE_UNS_DATA;
	if (tcp_ctask->xmstate & XMSTATE_UNS_INIT) {
1528 1529
		dtask = &tcp_ctask->unsol_dtask;

1530 1531 1532
		iscsi_prep_unsolicit_data_pdu(ctask, &dtask->hdr);
		iscsi_buf_init_iov(&tcp_ctask->headbuf, (char*)&dtask->hdr,
				   sizeof(struct iscsi_hdr));
1533
		if (conn->hdrdgst_en)
1534
			iscsi_hdr_digest(conn, &tcp_ctask->headbuf,
1535
					(u8*)dtask->hdrext);
1536

1537
		tcp_ctask->xmstate &= ~XMSTATE_UNS_INIT;
1538
		iscsi_set_padding(tcp_ctask, ctask->data_count);
1539
	}
1540 1541 1542

	rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->data_count);
	if (rc) {
1543 1544
		tcp_ctask->xmstate &= ~XMSTATE_UNS_DATA;
		tcp_ctask->xmstate |= XMSTATE_UNS_HDR;
1545
		return rc;
1546 1547
	}

1548 1549 1550 1551 1552 1553
	if (conn->datadgst_en) {
		dtask = &tcp_ctask->unsol_dtask;
		iscsi_data_digest_init(ctask->conn->dd_data, tcp_ctask);
		dtask->digest = 0;
	}

1554
	debug_scsi("uns dout [itt 0x%x dlen %d sent %d]\n",
1555
		   ctask->itt, ctask->unsol_count, tcp_ctask->sent);
1556 1557 1558
	return 0;
}

1559 1560
static int
iscsi_send_unsol_pdu(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1561
{
1562
	struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1563
	int rc;
1564

1565
	if (tcp_ctask->xmstate & XMSTATE_UNS_HDR) {
1566
		BUG_ON(!ctask->unsol_count);
1567
		tcp_ctask->xmstate &= ~XMSTATE_UNS_HDR;
1568 1569 1570 1571
send_hdr:
		rc = iscsi_send_unsol_hdr(conn, ctask);
		if (rc)
			return rc;
1572 1573
	}

1574
	if (tcp_ctask->xmstate & XMSTATE_UNS_DATA) {
1575
		struct iscsi_data_task *dtask = &tcp_ctask->unsol_dtask;
1576
		int start = tcp_ctask->sent;
1577

1578 1579 1580
		rc = iscsi_send_data(ctask, &tcp_ctask->sendbuf, &tcp_ctask->sg,
				     &tcp_ctask->sent, &ctask->data_count,
				     &dtask->digestbuf, &dtask->digest);
1581
		ctask->unsol_count -= tcp_ctask->sent - start;
1582 1583
		if (rc)
			return rc;
1584
		tcp_ctask->xmstate &= ~XMSTATE_UNS_DATA;
1585
		/*
1586 1587
		 * Done with the Data-Out. Next, check if we need
		 * to send another unsolicited Data-Out.
1588
		 */
1589 1590
		if (ctask->unsol_count) {
			debug_scsi("sending more uns\n");
1591
			tcp_ctask->xmstate |= XMSTATE_UNS_INIT;
1592
			goto send_hdr;
1593 1594 1595 1596 1597
		}
	}
	return 0;
}

1598 1599
static int iscsi_send_sol_pdu(struct iscsi_conn *conn,
			      struct iscsi_cmd_task *ctask)
1600
{
1601
	struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1602 1603 1604
	struct iscsi_session *session = conn->session;
	struct iscsi_r2t_info *r2t;
	struct iscsi_data_task *dtask;
1605
	int left, rc;
1606

1607
	if (tcp_ctask->xmstate & XMSTATE_SOL_HDR_INIT) {
1608 1609
		if (!tcp_ctask->r2t) {
			spin_lock_bh(&session->lock);
1610 1611
			__kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t,
				    sizeof(void*));
1612 1613
			spin_unlock_bh(&session->lock);
		}
1614 1615 1616
send_hdr:
		r2t = tcp_ctask->r2t;
		dtask = &r2t->dtask;
1617

1618 1619 1620
		if (conn->hdrdgst_en)
			iscsi_hdr_digest(conn, &r2t->headbuf,
					(u8*)dtask->hdrext);
1621 1622
		tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR_INIT;
		tcp_ctask->xmstate |= XMSTATE_SOL_HDR;
1623 1624
	}

1625
	if (tcp_ctask->xmstate & XMSTATE_SOL_HDR) {
1626 1627 1628
		r2t = tcp_ctask->r2t;
		dtask = &r2t->dtask;

1629
		rc = iscsi_sendhdr(conn, &r2t->headbuf, r2t->data_count);
1630
		if (rc)
1631
			return rc;
1632 1633
		tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
		tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
1634 1635

		if (conn->datadgst_en) {
1636 1637
			iscsi_data_digest_init(conn->dd_data, tcp_ctask);
			dtask->digest = 0;
1638 1639
		}

1640 1641 1642 1643
		iscsi_set_padding(tcp_ctask, r2t->data_count);
		debug_scsi("sol dout [dsn %d itt 0x%x dlen %d sent %d]\n",
			r2t->solicit_datasn - 1, ctask->itt, r2t->data_count,
			r2t->sent);
1644 1645
	}

1646
	if (tcp_ctask->xmstate & XMSTATE_SOL_DATA) {
1647 1648
		r2t = tcp_ctask->r2t;
		dtask = &r2t->dtask;
1649

1650 1651 1652 1653 1654
		rc = iscsi_send_data(ctask, &r2t->sendbuf, &r2t->sg,
				     &r2t->sent, &r2t->data_count,
				     &dtask->digestbuf, &dtask->digest);
		if (rc)
			return rc;
1655
		tcp_ctask->xmstate &= ~XMSTATE_SOL_DATA;
1656

1657 1658 1659 1660 1661 1662 1663 1664 1665 1666
		/*
		 * Done with this Data-Out. Next, check if we have
		 * to send another Data-Out for this R2T.
		 */
		BUG_ON(r2t->data_length - r2t->sent < 0);
		left = r2t->data_length - r2t->sent;
		if (left) {
			iscsi_solicit_data_cont(conn, ctask, r2t, left);
			goto send_hdr;
		}
1667

1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680
		/*
		 * Done with this R2T. Check if there are more
		 * outstanding R2Ts ready to be processed.
		 */
		spin_lock_bh(&session->lock);
		tcp_ctask->r2t = NULL;
		__kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
			    sizeof(void*));
		if (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t,
				sizeof(void*))) {
			tcp_ctask->r2t = r2t;
			spin_unlock_bh(&session->lock);
			goto send_hdr;
1681
		}
1682
		spin_unlock_bh(&session->lock);
1683 1684 1685 1686
	}
	return 0;
}

1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699
/**
 * iscsi_tcp_ctask_xmit - xmit normal PDU task
 * @conn: iscsi connection
 * @ctask: iscsi command task
 *
 * Notes:
 *	The function can return -EAGAIN in which case caller must
 *	call it again later, or recover. '0' return code means successful
 *	xmit.
 *	The function is devided to logical helpers (above) for the different
 *	xmit stages.
 *
 *iscsi_send_cmd_hdr()
1700 1701 1702
 *	XMSTATE_CMD_HDR_INIT - prepare Header and Data buffers Calculate
 *	                       Header Digest
 *	XMSTATE_CMD_HDR_XMIT - Transmit header in progress
1703 1704
 *
 *iscsi_send_padding
1705 1706
 *	XMSTATE_W_PAD        - Prepare and send pading
 *	XMSTATE_W_RESEND_PAD - retry send pading
1707 1708
 *
 *iscsi_send_digest
1709 1710
 *	XMSTATE_W_RESEND_DATA_DIGEST - Finalize and send Data Digest
 *	XMSTATE_W_RESEND_DATA_DIGEST - retry sending digest
1711 1712
 *
 *iscsi_send_unsol_hdr
1713 1714
 *	XMSTATE_UNS_INIT     - prepare un-solicit data header and digest
 *	XMSTATE_UNS_HDR      - send un-solicit header
1715 1716
 *
 *iscsi_send_unsol_pdu
1717
 *	XMSTATE_UNS_DATA     - send un-solicit data in progress
1718 1719
 *
 *iscsi_send_sol_pdu
1720 1721 1722
 *	XMSTATE_SOL_HDR_INIT - solicit data header and digest initialize
 *	XMSTATE_SOL_HDR      - send solicit header
 *	XMSTATE_SOL_DATA     - send solicit data
1723 1724
 *
 *iscsi_tcp_ctask_xmit
1725
 *	XMSTATE_IMM_DATA     - xmit managment data (??)
1726
 **/
1727
static int
1728
iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1729
{
1730
	struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1731 1732 1733
	int rc = 0;

	debug_scsi("ctask deq [cid %d xmstate %x itt 0x%x]\n",
1734
		conn->id, tcp_ctask->xmstate, ctask->itt);
1735

1736 1737 1738 1739 1740
	rc = iscsi_send_cmd_hdr(conn, ctask);
	if (rc)
		return rc;
	if (ctask->sc->sc_data_direction != DMA_TO_DEVICE)
		return 0;
1741

1742
	if (tcp_ctask->xmstate & XMSTATE_IMM_DATA) {
1743 1744 1745
		rc = iscsi_send_data(ctask, &tcp_ctask->sendbuf, &tcp_ctask->sg,
				     &tcp_ctask->sent, &ctask->imm_count,
				     &tcp_ctask->immbuf, &tcp_ctask->immdigest);
1746 1747
		if (rc)
			return rc;
1748
		tcp_ctask->xmstate &= ~XMSTATE_IMM_DATA;
1749 1750
	}

1751 1752 1753
	rc = iscsi_send_unsol_pdu(conn, ctask);
	if (rc)
		return rc;
1754

1755 1756 1757
	rc = iscsi_send_sol_pdu(conn, ctask);
	if (rc)
		return rc;
1758 1759 1760 1761

	return rc;
}

1762 1763
static struct iscsi_cls_conn *
iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1764
{
1765 1766 1767
	struct iscsi_conn *conn;
	struct iscsi_cls_conn *cls_conn;
	struct iscsi_tcp_conn *tcp_conn;
1768

1769 1770 1771 1772
	cls_conn = iscsi_conn_setup(cls_session, conn_idx);
	if (!cls_conn)
		return NULL;
	conn = cls_conn->dd_data;
1773
	/*
1774 1775
	 * due to strange issues with iser these are not set
	 * in iscsi_conn_setup
1776
	 */
1777
	conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN;
1778

1779 1780 1781
	tcp_conn = kzalloc(sizeof(*tcp_conn), GFP_KERNEL);
	if (!tcp_conn)
		goto tcp_conn_alloc_fail;
1782

1783 1784
	conn->dd_data = tcp_conn;
	tcp_conn->iscsi_conn = conn;
1785

1786 1787 1788
	tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
						  CRYPTO_ALG_ASYNC);
	tcp_conn->tx_hash.flags = 0;
1789 1790 1791 1792 1793
	if (IS_ERR(tcp_conn->tx_hash.tfm)) {
		printk(KERN_ERR "Could not create connection due to crc32c "
		       "loading error %ld. Make sure the crc32c module is "
		       "built as a module or into the kernel\n",
			PTR_ERR(tcp_conn->tx_hash.tfm));
1794
		goto free_tcp_conn;
1795
	}
1796

1797 1798 1799
	tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
						  CRYPTO_ALG_ASYNC);
	tcp_conn->rx_hash.flags = 0;
1800 1801 1802 1803 1804
	if (IS_ERR(tcp_conn->rx_hash.tfm)) {
		printk(KERN_ERR "Could not create connection due to crc32c "
		       "loading error %ld. Make sure the crc32c module is "
		       "built as a module or into the kernel\n",
			PTR_ERR(tcp_conn->rx_hash.tfm));
1805
		goto free_tx_tfm;
1806
	}
1807

1808
	return cls_conn;
1809

1810
free_tx_tfm:
1811
	crypto_free_hash(tcp_conn->tx_hash.tfm);
1812 1813
free_tcp_conn:
	kfree(tcp_conn);
1814 1815 1816
tcp_conn_alloc_fail:
	iscsi_conn_teardown(cls_conn);
	return NULL;
1817 1818
}

1819 1820 1821
static void
iscsi_tcp_release_conn(struct iscsi_conn *conn)
{
1822
	struct iscsi_session *session = conn->session;
1823
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1824
	struct socket *sock = tcp_conn->sock;
1825

1826
	if (!sock)
1827 1828
		return;

1829
	sock_hold(sock->sk);
1830
	iscsi_conn_restore_callbacks(tcp_conn);
1831
	sock_put(sock->sk);
1832

1833
	spin_lock_bh(&session->lock);
1834 1835
	tcp_conn->sock = NULL;
	conn->recv_lock = NULL;
1836 1837
	spin_unlock_bh(&session->lock);
	sockfd_put(sock);
1838 1839
}

1840
static void
1841
iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
1842
{
1843 1844
	struct iscsi_conn *conn = cls_conn->dd_data;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1845

1846
	iscsi_tcp_release_conn(conn);
1847
	iscsi_conn_teardown(cls_conn);
1848

P
Pete Wyckoff 已提交
1849 1850 1851 1852
	if (tcp_conn->tx_hash.tfm)
		crypto_free_hash(tcp_conn->tx_hash.tfm);
	if (tcp_conn->rx_hash.tfm)
		crypto_free_hash(tcp_conn->rx_hash.tfm);
1853

1854 1855
	kfree(tcp_conn);
}
1856

1857 1858 1859 1860 1861 1862 1863 1864 1865
static void
iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
{
	struct iscsi_conn *conn = cls_conn->dd_data;

	iscsi_conn_stop(cls_conn, flag);
	iscsi_tcp_release_conn(conn);
}

1866 1867 1868 1869 1870 1871 1872 1873 1874 1875
static int iscsi_tcp_get_addr(struct iscsi_conn *conn, struct socket *sock,
			      char *buf, int *port,
			      int (*getname)(struct socket *, struct sockaddr *,
					int *addrlen))
{
	struct sockaddr_storage *addr;
	struct sockaddr_in6 *sin6;
	struct sockaddr_in *sin;
	int rc = 0, len;

1876
	addr = kmalloc(sizeof(*addr), GFP_KERNEL);
1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905
	if (!addr)
		return -ENOMEM;

	if (getname(sock, (struct sockaddr *) addr, &len)) {
		rc = -ENODEV;
		goto free_addr;
	}

	switch (addr->ss_family) {
	case AF_INET:
		sin = (struct sockaddr_in *)addr;
		spin_lock_bh(&conn->session->lock);
		sprintf(buf, NIPQUAD_FMT, NIPQUAD(sin->sin_addr.s_addr));
		*port = be16_to_cpu(sin->sin_port);
		spin_unlock_bh(&conn->session->lock);
		break;
	case AF_INET6:
		sin6 = (struct sockaddr_in6 *)addr;
		spin_lock_bh(&conn->session->lock);
		sprintf(buf, NIP6_FMT, NIP6(sin6->sin6_addr));
		*port = be16_to_cpu(sin6->sin6_port);
		spin_unlock_bh(&conn->session->lock);
		break;
	}
free_addr:
	kfree(addr);
	return rc;
}

1906 1907
static int
iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
1908
		    struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
1909 1910 1911 1912 1913 1914 1915
		    int is_leading)
{
	struct iscsi_conn *conn = cls_conn->dd_data;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct sock *sk;
	struct socket *sock;
	int err;
1916

1917
	/* lookup for existing socket */
1918
	sock = sockfd_lookup((int)transport_eph, &err);
1919 1920 1921
	if (!sock) {
		printk(KERN_ERR "iscsi_tcp: sockfd_lookup failed %d\n", err);
		return -EEXIST;
1922
	}
1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936
	/*
	 * copy these values now because if we drop the session
	 * userspace may still want to query the values since we will
	 * be using them for the reconnect
	 */
	err = iscsi_tcp_get_addr(conn, sock, conn->portal_address,
				 &conn->portal_port, kernel_getpeername);
	if (err)
		goto free_socket;

	err = iscsi_tcp_get_addr(conn, sock, conn->local_address,
				&conn->local_port, kernel_getsockname);
	if (err)
		goto free_socket;
1937

1938 1939
	err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
	if (err)
1940
		goto free_socket;
1941

1942 1943
	/* bind iSCSI connection and socket */
	tcp_conn->sock = sock;
1944

1945 1946 1947 1948 1949
	/* setup Socket parameters */
	sk = sock->sk;
	sk->sk_reuse = 1;
	sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
	sk->sk_allocation = GFP_ATOMIC;
1950

1951
	/* FIXME: disable Nagle's algorithm */
1952

1953 1954 1955 1956 1957 1958 1959 1960 1961 1962
	/*
	 * Intercept TCP callbacks for sendfile like receive
	 * processing.
	 */
	conn->recv_lock = &sk->sk_callback_lock;
	iscsi_conn_set_callbacks(conn);
	tcp_conn->sendpage = tcp_conn->sock->ops->sendpage;
	/*
	 * set receive state machine into initial state
	 */
O
Olaf Kirch 已提交
1963
	iscsi_tcp_hdr_recv_prep(tcp_conn);
1964
	return 0;
1965 1966 1967 1968

free_socket:
	sockfd_put(sock);
	return err;
1969 1970
}

1971
/* called with host lock */
M
Mike Christie 已提交
1972
static void
1973
iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
1974
{
1975
	struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data;
1976
	tcp_mtask->xmstate = XMSTATE_IMM_HDR_INIT;
1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989
}

static int
iscsi_r2tpool_alloc(struct iscsi_session *session)
{
	int i;
	int cmd_i;

	/*
	 * initialize per-task: R2T pool and xmit queue
	 */
	for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
	        struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
1990
		struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1991 1992 1993 1994 1995 1996 1997 1998

		/*
		 * pre-allocated x4 as much r2ts to handle race when
		 * target acks DataOut faster than we data_xmit() queues
		 * could replenish r2tqueue.
		 */

		/* R2T pool */
1999
		if (iscsi_pool_init(&tcp_ctask->r2tpool, session->max_r2t * 4, NULL,
2000
				    sizeof(struct iscsi_r2t_info))) {
2001 2002 2003 2004
			goto r2t_alloc_fail;
		}

		/* R2T xmit queue */
2005
		tcp_ctask->r2tqueue = kfifo_alloc(
2006
		      session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL);
2007
		if (tcp_ctask->r2tqueue == ERR_PTR(-ENOMEM)) {
2008
			iscsi_pool_free(&tcp_ctask->r2tpool);
2009 2010 2011 2012 2013 2014 2015 2016
			goto r2t_alloc_fail;
		}
	}

	return 0;

r2t_alloc_fail:
	for (i = 0; i < cmd_i; i++) {
2017 2018 2019 2020
		struct iscsi_cmd_task *ctask = session->cmds[i];
		struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;

		kfifo_free(tcp_ctask->r2tqueue);
2021
		iscsi_pool_free(&tcp_ctask->r2tpool);
2022 2023 2024 2025 2026 2027 2028 2029 2030 2031
	}
	return -ENOMEM;
}

static void
iscsi_r2tpool_free(struct iscsi_session *session)
{
	int i;

	for (i = 0; i < session->cmds_max; i++) {
2032 2033
		struct iscsi_cmd_task *ctask = session->cmds[i];
		struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
2034

2035
		kfifo_free(tcp_ctask->r2tqueue);
2036
		iscsi_pool_free(&tcp_ctask->r2tpool);
2037 2038 2039 2040
	}
}

static int
2041
iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
2042
		     char *buf, int buflen)
2043
{
2044
	struct iscsi_conn *conn = cls_conn->dd_data;
2045
	struct iscsi_session *session = conn->session;
2046
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2047
	int value;
2048 2049 2050

	switch(param) {
	case ISCSI_PARAM_HDRDGST_EN:
2051
		iscsi_set_param(cls_conn, param, buf, buflen);
2052 2053
		break;
	case ISCSI_PARAM_DATADGST_EN:
2054
		iscsi_set_param(cls_conn, param, buf, buflen);
2055 2056
		tcp_conn->sendpage = conn->datadgst_en ?
			sock_no_sendpage : tcp_conn->sock->ops->sendpage;
2057 2058
		break;
	case ISCSI_PARAM_MAX_R2T:
2059
		sscanf(buf, "%d", &value);
2060 2061 2062
		if (session->max_r2t == roundup_pow_of_two(value))
			break;
		iscsi_r2tpool_free(session);
2063
		iscsi_set_param(cls_conn, param, buf, buflen);
2064 2065 2066 2067 2068 2069
		if (session->max_r2t & (session->max_r2t - 1))
			session->max_r2t = roundup_pow_of_two(session->max_r2t);
		if (iscsi_r2tpool_alloc(session))
			return -ENOMEM;
		break;
	default:
2070
		return iscsi_set_param(cls_conn, param, buf, buflen);
2071 2072 2073 2074 2075 2076
	}

	return 0;
}

static int
2077 2078
iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
			 enum iscsi_param param, char *buf)
2079
{
2080
	struct iscsi_conn *conn = cls_conn->dd_data;
2081
	int len;
2082 2083

	switch(param) {
2084
	case ISCSI_PARAM_CONN_PORT:
2085 2086 2087
		spin_lock_bh(&conn->session->lock);
		len = sprintf(buf, "%hu\n", conn->portal_port);
		spin_unlock_bh(&conn->session->lock);
2088
		break;
2089
	case ISCSI_PARAM_CONN_ADDRESS:
2090 2091 2092
		spin_lock_bh(&conn->session->lock);
		len = sprintf(buf, "%s\n", conn->portal_address);
		spin_unlock_bh(&conn->session->lock);
2093 2094
		break;
	default:
2095
		return iscsi_conn_get_param(cls_conn, param, buf);
2096 2097 2098 2099 2100
	}

	return len;
}

2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123
static int
iscsi_tcp_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
			 char *buf)
{
        struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
	int len;

	switch (param) {
	case ISCSI_HOST_PARAM_IPADDRESS:
		spin_lock_bh(&session->lock);
		if (!session->leadconn)
			len = -ENODEV;
		else
			len = sprintf(buf, "%s\n",
				     session->leadconn->local_address);
		spin_unlock_bh(&session->lock);
		break;
	default:
		return iscsi_host_get_param(shost, param, buf);
	}
	return len;
}

2124
static void
2125
iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
2126
{
2127
	struct iscsi_conn *conn = cls_conn->dd_data;
2128
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140

	stats->txdata_octets = conn->txdata_octets;
	stats->rxdata_octets = conn->rxdata_octets;
	stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
	stats->dataout_pdus = conn->dataout_pdus_cnt;
	stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
	stats->datain_pdus = conn->datain_pdus_cnt;
	stats->r2t_pdus = conn->r2t_pdus_cnt;
	stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
	stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
	stats->custom_length = 3;
	strcpy(stats->custom[0].desc, "tx_sendpage_failures");
2141
	stats->custom[0].value = tcp_conn->sendpage_failures_cnt;
2142
	strcpy(stats->custom[1].desc, "rx_discontiguous_hdr");
2143
	stats->custom[1].value = tcp_conn->discontiguous_hdr_cnt;
2144 2145 2146 2147
	strcpy(stats->custom[2].desc, "eh_abort_cnt");
	stats->custom[2].value = conn->eh_abort_cnt;
}

2148 2149 2150
static struct iscsi_cls_session *
iscsi_tcp_session_create(struct iscsi_transport *iscsit,
			 struct scsi_transport_template *scsit,
2151
			 uint16_t cmds_max, uint16_t qdepth,
2152
			 uint32_t initial_cmdsn, uint32_t *hostno)
2153
{
2154 2155 2156 2157
	struct iscsi_cls_session *cls_session;
	struct iscsi_session *session;
	uint32_t hn;
	int cmd_i;
2158

2159
	cls_session = iscsi_session_setup(iscsit, scsit, cmds_max, qdepth,
2160 2161 2162 2163 2164 2165
					 sizeof(struct iscsi_tcp_cmd_task),
					 sizeof(struct iscsi_tcp_mgmt_task),
					 initial_cmdsn, &hn);
	if (!cls_session)
		return NULL;
	*hostno = hn;
2166

2167 2168 2169 2170 2171
	session = class_to_transport_session(cls_session);
	for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
		struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
		struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;

2172 2173
		ctask->hdr = &tcp_ctask->hdr.cmd_hdr;
		ctask->hdr_max = sizeof(tcp_ctask->hdr) - ISCSI_DIGEST_SIZE;
2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196
	}

	for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
		struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i];
		struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data;

		mtask->hdr = &tcp_mtask->hdr;
	}

	if (iscsi_r2tpool_alloc(class_to_transport_session(cls_session)))
		goto r2tpool_alloc_fail;

	return cls_session;

r2tpool_alloc_fail:
	iscsi_session_teardown(cls_session);
	return NULL;
}

static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session)
{
	iscsi_r2tpool_free(class_to_transport_session(cls_session));
	iscsi_session_teardown(cls_session);
2197 2198
}

2199 2200
static int iscsi_tcp_slave_configure(struct scsi_device *sdev)
{
2201
	blk_queue_bounce_limit(sdev->request_queue, BLK_BOUNCE_ANY);
2202 2203 2204 2205
	blk_queue_dma_alignment(sdev->request_queue, 0);
	return 0;
}

2206
static struct scsi_host_template iscsi_sht = {
2207
	.module			= THIS_MODULE,
2208
	.name			= "iSCSI Initiator over TCP/IP",
2209 2210
	.queuecommand           = iscsi_queuecommand,
	.change_queue_depth	= iscsi_change_queue_depth,
2211
	.can_queue		= ISCSI_DEF_XMIT_CMDS_MAX - 1,
2212
	.sg_tablesize		= ISCSI_SG_TABLESIZE,
2213
	.max_sectors		= 0xFFFF,
2214 2215
	.cmd_per_lun		= ISCSI_DEF_CMD_PER_LUN,
	.eh_abort_handler       = iscsi_eh_abort,
2216
	.eh_device_reset_handler= iscsi_eh_device_reset,
2217 2218
	.eh_host_reset_handler	= iscsi_eh_host_reset,
	.use_clustering         = DISABLE_CLUSTERING,
2219
	.slave_configure        = iscsi_tcp_slave_configure,
2220 2221 2222 2223
	.proc_name		= "iscsi_tcp",
	.this_id		= -1,
};

2224 2225 2226 2227 2228
static struct iscsi_transport iscsi_tcp_transport = {
	.owner			= THIS_MODULE,
	.name			= "tcp",
	.caps			= CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
				  | CAP_DATADGST,
2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241
	.param_mask		= ISCSI_MAX_RECV_DLENGTH |
				  ISCSI_MAX_XMIT_DLENGTH |
				  ISCSI_HDRDGST_EN |
				  ISCSI_DATADGST_EN |
				  ISCSI_INITIAL_R2T_EN |
				  ISCSI_MAX_R2T |
				  ISCSI_IMM_DATA_EN |
				  ISCSI_FIRST_BURST |
				  ISCSI_MAX_BURST |
				  ISCSI_PDU_INORDER_EN |
				  ISCSI_DATASEQ_INORDER_EN |
				  ISCSI_ERL |
				  ISCSI_CONN_PORT |
2242
				  ISCSI_CONN_ADDRESS |
2243 2244 2245
				  ISCSI_EXP_STATSN |
				  ISCSI_PERSISTENT_PORT |
				  ISCSI_PERSISTENT_ADDRESS |
2246 2247
				  ISCSI_TARGET_NAME | ISCSI_TPGT |
				  ISCSI_USERNAME | ISCSI_PASSWORD |
2248
				  ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
2249 2250 2251
				  ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
				  ISCSI_LU_RESET_TMO |
				  ISCSI_PING_TMO | ISCSI_RECV_TMO,
2252
	.host_param_mask	= ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
2253 2254
				  ISCSI_HOST_INITIATOR_NAME |
				  ISCSI_HOST_NETDEV_NAME,
2255
	.host_template		= &iscsi_sht,
2256
	.conndata_size		= sizeof(struct iscsi_conn),
2257 2258
	.max_conn		= 1,
	.max_cmd_len		= ISCSI_TCP_MAX_CMD_LEN,
2259 2260 2261 2262 2263 2264 2265
	/* session management */
	.create_session		= iscsi_tcp_session_create,
	.destroy_session	= iscsi_tcp_session_destroy,
	/* connection management */
	.create_conn		= iscsi_tcp_conn_create,
	.bind_conn		= iscsi_tcp_conn_bind,
	.destroy_conn		= iscsi_tcp_conn_destroy,
2266
	.set_param		= iscsi_conn_set_param,
2267
	.get_conn_param		= iscsi_tcp_conn_get_param,
2268
	.get_session_param	= iscsi_session_get_param,
2269
	.start_conn		= iscsi_conn_start,
2270
	.stop_conn		= iscsi_tcp_conn_stop,
2271
	/* iscsi host params */
2272
	.get_host_param		= iscsi_tcp_host_get_param,
2273
	.set_host_param		= iscsi_host_set_param,
2274
	/* IO */
2275 2276
	.send_pdu		= iscsi_conn_send_pdu,
	.get_stats		= iscsi_conn_get_stats,
2277 2278 2279 2280 2281 2282
	.init_cmd_task		= iscsi_tcp_cmd_init,
	.init_mgmt_task		= iscsi_tcp_mgmt_init,
	.xmit_cmd_task		= iscsi_tcp_ctask_xmit,
	.xmit_mgmt_task		= iscsi_tcp_mtask_xmit,
	.cleanup_cmd_task	= iscsi_tcp_cleanup_ctask,
	/* recovery */
M
Mike Christie 已提交
2283
	.session_recovery_timedout = iscsi_session_recovery_timedout,
2284 2285 2286 2287 2288 2289
};

static int __init
iscsi_tcp_init(void)
{
	if (iscsi_max_lun < 1) {
O
Or Gerlitz 已提交
2290 2291
		printk(KERN_ERR "iscsi_tcp: Invalid max_lun value of %u\n",
		       iscsi_max_lun);
2292 2293 2294 2295
		return -EINVAL;
	}
	iscsi_tcp_transport.max_lun = iscsi_max_lun;

2296
	if (!iscsi_register_transport(&iscsi_tcp_transport))
2297
		return -ENODEV;
2298

2299
	return 0;
2300 2301 2302 2303 2304 2305 2306 2307 2308 2309
}

static void __exit
iscsi_tcp_exit(void)
{
	iscsi_unregister_transport(&iscsi_tcp_transport);
}

module_init(iscsi_tcp_init);
module_exit(iscsi_tcp_exit);