iscsi_tcp.c 60.2 KB
Newer Older
1 2 3 4 5
/*
 * iSCSI Initiator over TCP/IP Data-Path
 *
 * Copyright (C) 2004 Dmitry Yusupov
 * Copyright (C) 2004 Alex Aizman
6 7
 * Copyright (C) 2005 - 2006 Mike Christie
 * Copyright (C) 2006 Red Hat, Inc.  All rights reserved.
8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
 * maintained by open-iscsi@googlegroups.com
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published
 * by the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 * General Public License for more details.
 *
 * See the file COPYING included with this distribution for more details.
 *
 * Credits:
 *	Christoph Hellwig
 *	FUJITA Tomonori
 *	Arne Redlich
 *	Zhenyu Wang
 */

#include <linux/types.h>
#include <linux/list.h>
#include <linux/inet.h>
M
Mike Christie 已提交
32
#include <linux/file.h>
33 34 35 36 37 38 39
#include <linux/blkdev.h>
#include <linux/crypto.h>
#include <linux/delay.h>
#include <linux/kfifo.h>
#include <linux/scatterlist.h>
#include <net/tcp.h>
#include <scsi/scsi_cmnd.h>
40
#include <scsi/scsi_device.h>
41 42 43 44 45 46 47 48 49 50
#include <scsi/scsi_host.h>
#include <scsi/scsi.h>
#include <scsi/scsi_transport_iscsi.h>

#include "iscsi_tcp.h"

MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus@yahoo.com>, "
	      "Alex Aizman <itn780@yahoo.com>");
MODULE_DESCRIPTION("iSCSI/TCP data-path");
MODULE_LICENSE("GPL");
O
Olaf Kirch 已提交
51
#undef DEBUG_TCP
52 53 54
#define DEBUG_ASSERT

#ifdef DEBUG_TCP
55
#define debug_tcp(fmt...) printk(KERN_INFO "tcp: " fmt)
56 57 58 59 60 61 62 63 64 65 66 67 68 69
#else
#define debug_tcp(fmt...)
#endif

#ifndef DEBUG_ASSERT
#ifdef BUG_ON
#undef BUG_ON
#endif
#define BUG_ON(expr)
#endif

static unsigned int iscsi_max_lun = 512;
module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);

O
Olaf Kirch 已提交
70 71 72
static int iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
				   struct iscsi_chunk *chunk);

73 74 75
static inline void
iscsi_buf_init_iov(struct iscsi_buf *ibuf, char *vbuf, int size)
{
O
Olaf Kirch 已提交
76 77 78
	ibuf->sg.page = virt_to_page(vbuf);
	ibuf->sg.offset = offset_in_page(vbuf);
	ibuf->sg.length = size;
79
	ibuf->sent = 0;
80
	ibuf->use_sendmsg = 1;
81 82 83 84 85
}

static inline void
iscsi_buf_init_sg(struct iscsi_buf *ibuf, struct scatterlist *sg)
{
O
Olaf Kirch 已提交
86 87 88
	ibuf->sg.page = sg->page;
	ibuf->sg.offset = sg->offset;
	ibuf->sg.length = sg->length;
89 90 91
	/*
	 * Fastpath: sg element fits into single page
	 */
O
Olaf Kirch 已提交
92
	if (sg->length + sg->offset <= PAGE_SIZE && !PageSlab(sg->page))
93 94 95
		ibuf->use_sendmsg = 0;
	else
		ibuf->use_sendmsg = 1;
96 97 98 99 100 101 102 103 104 105 106 107 108 109
	ibuf->sent = 0;
}

static inline int
iscsi_buf_left(struct iscsi_buf *ibuf)
{
	int rc;

	rc = ibuf->sg.length - ibuf->sent;
	BUG_ON(rc < 0);
	return rc;
}

static inline void
110 111
iscsi_hdr_digest(struct iscsi_conn *conn, struct iscsi_buf *buf,
		 u8* crc)
112
{
113
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
114

115
	crypto_hash_digest(&tcp_conn->tx_hash, &buf->sg, buf->sg.length, crc);
116
	buf->sg.length += ISCSI_DIGEST_SIZE;
117 118
}

O
Olaf Kirch 已提交
119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
/*
 * Scatterlist handling: inside the iscsi_chunk, we
 * remember an index into the scatterlist, and set data/size
 * to the current scatterlist entry. For highmem pages, we
 * kmap as needed.
 *
 * Note that the page is unmapped when we return from
 * TCP's data_ready handler, so we may end up mapping and
 * unmapping the same page repeatedly. The whole reason
 * for this is that we shouldn't keep the page mapped
 * outside the softirq.
 */

/**
 * iscsi_tcp_chunk_init_sg - init indicated scatterlist entry
 * @chunk: the buffer object
 * @idx: index into scatterlist
 * @offset: byte offset into that sg entry
 *
 * This function sets up the chunk so that subsequent
 * data is copied to the indicated sg entry, at the given
 * offset.
 */
static inline void
iscsi_tcp_chunk_init_sg(struct iscsi_chunk *chunk,
			unsigned int idx, unsigned int offset)
{
	struct scatterlist *sg;

	BUG_ON(chunk->sg == NULL);

	sg = &chunk->sg[idx];
	chunk->sg_index = idx;
	chunk->sg_offset = offset;
	chunk->size = min(sg->length - offset, chunk->total_size);
	chunk->data = NULL;
}

/**
 * iscsi_tcp_chunk_map - map the current S/G page
 * @chunk: iscsi chunk
 *
 * We only need to possibly kmap data if scatter lists are being used,
 * because the iscsi passthrough and internal IO paths will never use high
 * mem pages.
 */
static inline void
iscsi_tcp_chunk_map(struct iscsi_chunk *chunk)
{
	struct scatterlist *sg;

	if (chunk->data != NULL || !chunk->sg)
		return;

	sg = &chunk->sg[chunk->sg_index];
	BUG_ON(chunk->sg_mapped);
	BUG_ON(sg->length == 0);
	chunk->sg_mapped = kmap_atomic(sg->page, KM_SOFTIRQ0);
	chunk->data = chunk->sg_mapped + sg->offset + chunk->sg_offset;
}

static inline void
iscsi_tcp_chunk_unmap(struct iscsi_chunk *chunk)
{
	if (chunk->sg_mapped) {
		kunmap_atomic(chunk->sg_mapped, KM_SOFTIRQ0);
		chunk->sg_mapped = NULL;
		chunk->data = NULL;
	}
}

/*
 * Splice the digest buffer into the buffer
 */
static inline void
iscsi_tcp_chunk_splice_digest(struct iscsi_chunk *chunk, void *digest)
{
	chunk->data = digest;
	chunk->digest_len = ISCSI_DIGEST_SIZE;
	chunk->total_size += ISCSI_DIGEST_SIZE;
	chunk->size = ISCSI_DIGEST_SIZE;
	chunk->copied = 0;
	chunk->sg = NULL;
	chunk->sg_index = 0;
	chunk->hash = NULL;
}

/**
 * iscsi_tcp_chunk_done - check whether the chunk is complete
 * @chunk: iscsi chunk to check
 *
 * Check if we're done receiving this chunk. If the receive
 * buffer is full but we expect more data, move on to the
 * next entry in the scatterlist.
 *
 * If the amount of data we received isn't a multiple of 4,
 * we will transparently receive the pad bytes, too.
 *
 * This function must be re-entrant.
 */
219
static inline int
O
Olaf Kirch 已提交
220
iscsi_tcp_chunk_done(struct iscsi_chunk *chunk)
221
{
O
Olaf Kirch 已提交
222
	static unsigned char padbuf[ISCSI_PAD_LEN];
223
	unsigned int pad;
O
Olaf Kirch 已提交
224 225 226 227 228

	if (chunk->copied < chunk->size) {
		iscsi_tcp_chunk_map(chunk);
		return 0;
	}
229

O
Olaf Kirch 已提交
230 231 232
	chunk->total_copied += chunk->copied;
	chunk->copied = 0;
	chunk->size = 0;
233

O
Olaf Kirch 已提交
234 235 236 237 238 239 240 241 242 243 244 245 246
	/* Unmap the current scatterlist page, if there is one. */
	iscsi_tcp_chunk_unmap(chunk);

	/* Do we have more scatterlist entries? */
	if (chunk->total_copied < chunk->total_size) {
		/* Proceed to the next entry in the scatterlist. */
		iscsi_tcp_chunk_init_sg(chunk, chunk->sg_index + 1, 0);
		iscsi_tcp_chunk_map(chunk);
		BUG_ON(chunk->size == 0);
		return 0;
	}

	/* Do we need to handle padding? */
247 248
	pad = iscsi_padding(chunk->total_copied);
	if (pad != 0) {
O
Olaf Kirch 已提交
249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
		debug_tcp("consume %d pad bytes\n", pad);
		chunk->total_size += pad;
		chunk->size = pad;
		chunk->data = padbuf;
		return 0;
	}

	/*
	 * Set us up for receiving the data digest. hdr digest
	 * is completely handled in hdr done function.
	 */
	if (chunk->hash) {
		if (chunk->digest_len == 0) {
			crypto_hash_final(chunk->hash, chunk->digest);
			iscsi_tcp_chunk_splice_digest(chunk,
						      chunk->recv_digest);
			return 0;
266
		}
O
Olaf Kirch 已提交
267
	}
268

O
Olaf Kirch 已提交
269 270
	return 1;
}
271

O
Olaf Kirch 已提交
272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295
/**
 * iscsi_tcp_chunk_recv - copy data to chunk
 * @tcp_conn: the iSCSI TCP connection
 * @chunk: the buffer to copy to
 * @ptr: data pointer
 * @len: amount of data available
 *
 * This function copies up to @len bytes to the
 * given buffer, and returns the number of bytes
 * consumed, which can actually be less than @len.
 *
 * If hash digest is enabled, the function will update the
 * hash while copying.
 * Combining these two operations doesn't buy us a lot (yet),
 * but in the future we could implement combined copy+crc,
 * just way we do for network layer checksums.
 */
static int
iscsi_tcp_chunk_recv(struct iscsi_tcp_conn *tcp_conn,
		     struct iscsi_chunk *chunk, const void *ptr,
		     unsigned int len)
{
	struct scatterlist sg;
	unsigned int copy, copied = 0;
296

O
Olaf Kirch 已提交
297 298 299
	while (!iscsi_tcp_chunk_done(chunk)) {
		if (copied == len)
			goto out;
300

O
Olaf Kirch 已提交
301 302
		copy = min(len - copied, chunk->size - chunk->copied);
		memcpy(chunk->data + chunk->copied, ptr + copied, copy);
303

O
Olaf Kirch 已提交
304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320
		if (chunk->hash) {
			sg_init_one(&sg, ptr + copied, copy);
			crypto_hash_update(chunk->hash, &sg, copy);
		}
		chunk->copied += copy;
		copied += copy;
	}

out:
	return copied;
}

static inline void
iscsi_tcp_dgst_header(struct hash_desc *hash, const void *hdr, size_t hdrlen,
		      unsigned char digest[ISCSI_DIGEST_SIZE])
{
	struct scatterlist sg;
321

O
Olaf Kirch 已提交
322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381
	sg_init_one(&sg, hdr, hdrlen);
	crypto_hash_digest(hash, &sg, hdrlen, digest);
}

static inline int
iscsi_tcp_dgst_verify(struct iscsi_tcp_conn *tcp_conn,
		      struct iscsi_chunk *chunk)
{
	if (!chunk->digest_len)
		return 1;

	if (memcmp(chunk->recv_digest, chunk->digest, chunk->digest_len)) {
		debug_scsi("digest mismatch\n");
		return 0;
	}

	return 1;
}

/*
 * Helper function to set up chunk buffer
 */
static inline void
__iscsi_chunk_init(struct iscsi_chunk *chunk, size_t size,
		   iscsi_chunk_done_fn_t *done, struct hash_desc *hash)
{
	memset(chunk, 0, sizeof(*chunk));
	chunk->total_size = size;
	chunk->done = done;

	if (hash) {
		chunk->hash = hash;
		crypto_hash_init(hash);
	}
}

static inline void
iscsi_chunk_init_linear(struct iscsi_chunk *chunk, void *data, size_t size,
			iscsi_chunk_done_fn_t *done, struct hash_desc *hash)
{
	__iscsi_chunk_init(chunk, size, done, hash);
	chunk->data = data;
	chunk->size = size;
}

static inline int
iscsi_chunk_seek_sg(struct iscsi_chunk *chunk,
		    struct scatterlist *sg, unsigned int sg_count,
		    unsigned int offset, size_t size,
		    iscsi_chunk_done_fn_t *done, struct hash_desc *hash)
{
	unsigned int i;

	__iscsi_chunk_init(chunk, size, done, hash);
	for (i = 0; i < sg_count; ++i) {
		if (offset < sg[i].length) {
			chunk->sg = sg;
			chunk->sg_count = sg_count;
			iscsi_tcp_chunk_init_sg(chunk, i, offset);
			return 0;
382
		}
O
Olaf Kirch 已提交
383
		offset -= sg[i].length;
384 385
	}

O
Olaf Kirch 已提交
386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425
	return ISCSI_ERR_DATA_OFFSET;
}

/**
 * iscsi_tcp_hdr_recv_prep - prep chunk for hdr reception
 * @tcp_conn: iscsi connection to prep for
 *
 * This function always passes NULL for the hash argument, because when this
 * function is called we do not yet know the final size of the header and want
 * to delay the digest processing until we know that.
 */
static void
iscsi_tcp_hdr_recv_prep(struct iscsi_tcp_conn *tcp_conn)
{
	debug_tcp("iscsi_tcp_hdr_recv_prep(%p%s)\n", tcp_conn,
		  tcp_conn->iscsi_conn->hdrdgst_en ? ", digest enabled" : "");
	iscsi_chunk_init_linear(&tcp_conn->in.chunk,
				tcp_conn->in.hdr_buf, sizeof(struct iscsi_hdr),
				iscsi_tcp_hdr_recv_done, NULL);
}

/*
 * Handle incoming reply to any other type of command
 */
static int
iscsi_tcp_data_recv_done(struct iscsi_tcp_conn *tcp_conn,
			 struct iscsi_chunk *chunk)
{
	struct iscsi_conn *conn = tcp_conn->iscsi_conn;
	int rc = 0;

	if (!iscsi_tcp_dgst_verify(tcp_conn, chunk))
		return ISCSI_ERR_DATA_DGST;

	rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr,
			conn->data, tcp_conn->in.datalen);
	if (rc)
		return rc;

	iscsi_tcp_hdr_recv_prep(tcp_conn);
426 427 428
	return 0;
}

O
Olaf Kirch 已提交
429 430 431 432 433 434 435 436 437 438 439 440 441 442
static void
iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
{
	struct iscsi_conn *conn = tcp_conn->iscsi_conn;
	struct hash_desc *rx_hash = NULL;

	if (conn->datadgst_en)
		rx_hash = &tcp_conn->rx_hash;

	iscsi_chunk_init_linear(&tcp_conn->in.chunk,
				conn->data, tcp_conn->in.datalen,
				iscsi_tcp_data_recv_done, rx_hash);
}

M
Mike Christie 已提交
443 444 445 446
/*
 * must be called with session lock
 */
static void
447
iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
448
{
449
	struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
450
	struct iscsi_r2t_info *r2t;
M
Mike Christie 已提交
451
	struct scsi_cmnd *sc;
452

453 454 455 456 457 458 459
	/* flush ctask's r2t queues */
	while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) {
		__kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
			    sizeof(void*));
		debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n");
	}

M
Mike Christie 已提交
460 461
	sc = ctask->sc;
	if (unlikely(!sc))
462
		return;
M
Mike Christie 已提交
463

464
	tcp_ctask->xmstate = XMSTATE_IDLE;
465
	tcp_ctask->r2t = NULL;
466 467 468 469 470 471 472 473 474 475
}

/**
 * iscsi_data_rsp - SCSI Data-In Response processing
 * @conn: iscsi connection
 * @ctask: scsi command task
 **/
static int
iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
{
476 477 478
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
	struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
479
	struct iscsi_session *session = conn->session;
480
	struct scsi_cmnd *sc = ctask->sc;
481 482
	int datasn = be32_to_cpu(rhdr->datasn);

483
	iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
484 485 486
	/*
	 * setup Data-In byte counter (gets decremented..)
	 */
487
	ctask->data_count = tcp_conn->in.datalen;
488

489
	if (tcp_conn->in.datalen == 0)
490 491
		return 0;

492 493 494
	if (tcp_ctask->exp_datasn != datasn) {
		debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->datasn(%d)\n",
		          __FUNCTION__, tcp_ctask->exp_datasn, datasn);
495
		return ISCSI_ERR_DATASN;
496
	}
497

498
	tcp_ctask->exp_datasn++;
499

500
	tcp_ctask->data_offset = be32_to_cpu(rhdr->offset);
501
	if (tcp_ctask->data_offset + tcp_conn->in.datalen > scsi_bufflen(sc)) {
502 503
		debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n",
		          __FUNCTION__, tcp_ctask->data_offset,
504
		          tcp_conn->in.datalen, scsi_bufflen(sc));
505
		return ISCSI_ERR_DATA_OFFSET;
506
	}
507 508

	if (rhdr->flags & ISCSI_FLAG_DATA_STATUS) {
509
		sc->result = (DID_OK << 16) | rhdr->cmd_status;
510
		conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
511 512
		if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW |
		                   ISCSI_FLAG_DATA_OVERFLOW)) {
513 514 515
			int res_count = be32_to_cpu(rhdr->residual_count);

			if (res_count > 0 &&
516 517
			    (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
			     res_count <= scsi_bufflen(sc)))
518
				scsi_set_resid(sc, res_count);
519
			else
520 521
				sc->result = (DID_BAD_TARGET << 16) |
					rhdr->cmd_status;
522
		}
523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546
	}

	conn->datain_pdus_cnt++;
	return 0;
}

/**
 * iscsi_solicit_data_init - initialize first Data-Out
 * @conn: iscsi connection
 * @ctask: scsi command task
 * @r2t: R2T info
 *
 * Notes:
 *	Initialize first Data-Out within this R2T sequence and finds
 *	proper data_offset within this SCSI command.
 *
 *	This function is called with connection lock taken.
 **/
static void
iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
			struct iscsi_r2t_info *r2t)
{
	struct iscsi_data *hdr;
	struct scsi_cmnd *sc = ctask->sc;
547 548
	int i, sg_count = 0;
	struct scatterlist *sg;
549

550
	hdr = &r2t->dtask.hdr;
551 552 553 554 555
	memset(hdr, 0, sizeof(struct iscsi_data));
	hdr->ttt = r2t->ttt;
	hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
	r2t->solicit_datasn++;
	hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
556 557
	memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
	hdr->itt = ctask->hdr->itt;
558 559 560 561 562 563 564 565 566 567 568 569 570 571 572
	hdr->exp_statsn = r2t->exp_statsn;
	hdr->offset = cpu_to_be32(r2t->data_offset);
	if (r2t->data_length > conn->max_xmit_dlength) {
		hton24(hdr->dlength, conn->max_xmit_dlength);
		r2t->data_count = conn->max_xmit_dlength;
		hdr->flags = 0;
	} else {
		hton24(hdr->dlength, r2t->data_length);
		r2t->data_count = r2t->data_length;
		hdr->flags = ISCSI_FLAG_CMD_FINAL;
	}
	conn->dataout_pdus_cnt++;

	r2t->sent = 0;

573
	iscsi_buf_init_iov(&r2t->headbuf, (char*)hdr,
574
			   sizeof(struct iscsi_hdr));
575

576 577 578 579 580 581
	sg = scsi_sglist(sc);
	r2t->sg = NULL;
	for (i = 0; i < scsi_sg_count(sc); i++, sg += 1) {
		/* FIXME: prefetch ? */
		if (sg_count + sg->length > r2t->data_offset) {
			int page_offset;
582

583
			/* sg page found! */
584

585 586
			/* offset within this page */
			page_offset = r2t->data_offset - sg_count;
587

588 589 590 591
			/* fill in this buffer */
			iscsi_buf_init_sg(&r2t->sendbuf, sg);
			r2t->sendbuf.sg.offset += page_offset;
			r2t->sendbuf.sg.length -= page_offset;
592

593 594 595
			/* xmit logic will continue with next one */
			r2t->sg = sg + 1;
			break;
596
		}
597
		sg_count += sg->length;
598
	}
599
	BUG_ON(r2t->sg == NULL);
600 601 602 603 604 605 606 607 608 609 610 611
}

/**
 * iscsi_r2t_rsp - iSCSI R2T Response processing
 * @conn: iscsi connection
 * @ctask: scsi command task
 **/
static int
iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
{
	struct iscsi_r2t_info *r2t;
	struct iscsi_session *session = conn->session;
612 613 614
	struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
615 616 617
	int r2tsn = be32_to_cpu(rhdr->r2tsn);
	int rc;

618 619 620
	if (tcp_conn->in.datalen) {
		printk(KERN_ERR "iscsi_tcp: invalid R2t with datalen %d\n",
		       tcp_conn->in.datalen);
621
		return ISCSI_ERR_DATALEN;
622
	}
623

624 625 626
	if (tcp_ctask->exp_datasn != r2tsn){
		debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
		          __FUNCTION__, tcp_ctask->exp_datasn, r2tsn);
627
		return ISCSI_ERR_R2TSN;
628
	}
629 630 631

	/* fill-in new R2T associated with the task */
	spin_lock(&session->lock);
632 633
	iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);

634
	if (!ctask->sc || session->state != ISCSI_STATE_LOGGED_IN) {
635 636 637 638 639
		printk(KERN_INFO "iscsi_tcp: dropping R2T itt %d in "
		       "recovery...\n", ctask->itt);
		spin_unlock(&session->lock);
		return 0;
	}
640

641
	rc = __kfifo_get(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*));
642 643 644 645
	BUG_ON(!rc);

	r2t->exp_statsn = rhdr->statsn;
	r2t->data_length = be32_to_cpu(rhdr->data_length);
646 647
	if (r2t->data_length == 0) {
		printk(KERN_ERR "iscsi_tcp: invalid R2T with zero data len\n");
648 649 650 651
		spin_unlock(&session->lock);
		return ISCSI_ERR_DATALEN;
	}

652 653 654 655 656
	if (r2t->data_length > session->max_burst)
		debug_scsi("invalid R2T with data len %u and max burst %u."
			   "Attempting to execute request.\n",
			    r2t->data_length, session->max_burst);

657
	r2t->data_offset = be32_to_cpu(rhdr->data_offset);
658
	if (r2t->data_offset + r2t->data_length > scsi_bufflen(ctask->sc)) {
659
		spin_unlock(&session->lock);
660 661
		printk(KERN_ERR "iscsi_tcp: invalid R2T with data len %u at "
		       "offset %u and total length %d\n", r2t->data_length,
662
		       r2t->data_offset, scsi_bufflen(ctask->sc));
663 664 665 666 667 668 669 670
		return ISCSI_ERR_DATALEN;
	}

	r2t->ttt = rhdr->ttt; /* no flip */
	r2t->solicit_datasn = 0;

	iscsi_solicit_data_init(conn, ctask, r2t);

671
	tcp_ctask->exp_datasn = r2tsn + 1;
672
	__kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*));
673
	tcp_ctask->xmstate |= XMSTATE_SOL_HDR_INIT;
674
	conn->r2t_pdus_cnt++;
675 676

	iscsi_requeue_ctask(ctask);
677 678 679 680 681
	spin_unlock(&session->lock);

	return 0;
}

O
Olaf Kirch 已提交
682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716
/*
 * Handle incoming reply to DataIn command
 */
static int
iscsi_tcp_process_data_in(struct iscsi_tcp_conn *tcp_conn,
			  struct iscsi_chunk *chunk)
{
	struct iscsi_conn *conn = tcp_conn->iscsi_conn;
	struct iscsi_hdr *hdr = tcp_conn->in.hdr;
	int rc;

	if (!iscsi_tcp_dgst_verify(tcp_conn, chunk))
		return ISCSI_ERR_DATA_DGST;

	/* check for non-exceptional status */
	if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
		rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0);
		if (rc)
			return rc;
	}

	iscsi_tcp_hdr_recv_prep(tcp_conn);
	return 0;
}

/**
 * iscsi_tcp_hdr_dissect - process PDU header
 * @conn: iSCSI connection
 * @hdr: PDU header
 *
 * This function analyzes the header of the PDU received,
 * and performs several sanity checks. If the PDU is accompanied
 * by data, the receive buffer is set up to copy the incoming data
 * to the correct location.
 */
717
static int
O
Olaf Kirch 已提交
718
iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
719
{
720
	int rc = 0, opcode, ahslen;
721
	struct iscsi_session *session = conn->session;
722
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
O
Olaf Kirch 已提交
723 724
	struct iscsi_cmd_task *ctask;
	uint32_t itt;
725 726

	/* verify PDU length */
727 728
	tcp_conn->in.datalen = ntoh24(hdr->dlength);
	if (tcp_conn->in.datalen > conn->max_recv_dlength) {
729
		printk(KERN_ERR "iscsi_tcp: datalen %d > %d\n",
730
		       tcp_conn->in.datalen, conn->max_recv_dlength);
731 732 733
		return ISCSI_ERR_DATALEN;
	}

O
Olaf Kirch 已提交
734 735 736
	/* Additional header segments. So far, we don't
	 * process additional headers.
	 */
737
	ahslen = hdr->hlength << 2;
738

739
	opcode = hdr->opcode & ISCSI_OPCODE_MASK;
740
	/* verify itt (itt encoding: age+cid+itt) */
741 742
	rc = iscsi_verify_itt(conn, hdr, &itt);
	if (rc == ISCSI_ERR_NO_SCSI_CMD) {
O
Olaf Kirch 已提交
743
		/* XXX: what does this do? */
744 745 746 747
		tcp_conn->in.datalen = 0; /* force drop */
		return 0;
	} else if (rc)
		return rc;
748

O
Olaf Kirch 已提交
749 750
	debug_tcp("opcode 0x%x ahslen %d datalen %d\n",
		  opcode, ahslen, tcp_conn->in.datalen);
751

752 753
	switch(opcode) {
	case ISCSI_OP_SCSI_DATA_IN:
O
Olaf Kirch 已提交
754 755
		ctask = session->cmds[itt];
		rc = iscsi_data_rsp(conn, ctask);
756 757
		if (rc)
			return rc;
O
Olaf Kirch 已提交
758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784
		if (tcp_conn->in.datalen) {
			struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
			struct hash_desc *rx_hash = NULL;

			/*
			 * Setup copy of Data-In into the Scsi_Cmnd
			 * Scatterlist case:
			 * We set up the iscsi_chunk to point to the next
			 * scatterlist entry to copy to. As we go along,
			 * we move on to the next scatterlist entry and
			 * update the digest per-entry.
			 */
			if (conn->datadgst_en)
				rx_hash = &tcp_conn->rx_hash;

			debug_tcp("iscsi_tcp_begin_data_in(%p, offset=%d, "
				  "datalen=%d)\n", tcp_conn,
				  tcp_ctask->data_offset,
				  tcp_conn->in.datalen);
			return iscsi_chunk_seek_sg(&tcp_conn->in.chunk,
						scsi_sglist(ctask->sc),
						scsi_sg_count(ctask->sc),
						tcp_ctask->data_offset,
						tcp_conn->in.datalen,
						iscsi_tcp_process_data_in,
						rx_hash);
		}
785 786
		/* fall through */
	case ISCSI_OP_SCSI_CMD_RSP:
O
Olaf Kirch 已提交
787 788 789 790 791
		if (tcp_conn->in.datalen) {
			iscsi_tcp_data_recv_prep(tcp_conn);
			return 0;
		}
		rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
792 793
		break;
	case ISCSI_OP_R2T:
O
Olaf Kirch 已提交
794
		ctask = session->cmds[itt];
795 796
		if (ahslen)
			rc = ISCSI_ERR_AHSLEN;
O
Olaf Kirch 已提交
797 798
		else if (ctask->sc->sc_data_direction == DMA_TO_DEVICE)
			rc = iscsi_r2t_rsp(conn, ctask);
799 800 801 802 803 804 805
		else
			rc = ISCSI_ERR_PROTO;
		break;
	case ISCSI_OP_LOGIN_RSP:
	case ISCSI_OP_TEXT_RSP:
	case ISCSI_OP_REJECT:
	case ISCSI_OP_ASYNC_EVENT:
806 807 808 809 810
		/*
		 * It is possible that we could get a PDU with a buffer larger
		 * than 8K, but there are no targets that currently do this.
		 * For now we fail until we find a vendor that needs it
		 */
O
Olaf Kirch 已提交
811
		if (ISCSI_DEF_MAX_RECV_SEG_LEN < tcp_conn->in.datalen) {
812 813 814
			printk(KERN_ERR "iscsi_tcp: received buffer of len %u "
			      "but conn buffer is only %u (opcode %0x)\n",
			      tcp_conn->in.datalen,
815
			      ISCSI_DEF_MAX_RECV_SEG_LEN, opcode);
816 817 818 819
			rc = ISCSI_ERR_PROTO;
			break;
		}

O
Olaf Kirch 已提交
820 821 822 823 824 825 826
		/* If there's data coming in with the response,
		 * receive it to the connection's buffer.
		 */
		if (tcp_conn->in.datalen) {
			iscsi_tcp_data_recv_prep(tcp_conn);
			return 0;
		}
827
	/* fall through */
828 829
	case ISCSI_OP_LOGOUT_RSP:
	case ISCSI_OP_NOOP_IN:
830 831 832 833 834 835 836
	case ISCSI_OP_SCSI_TMFUNC_RSP:
		rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
		break;
	default:
		rc = ISCSI_ERR_BAD_OPCODE;
		break;
	}
837

O
Olaf Kirch 已提交
838 839 840 841 842 843
	if (rc == 0) {
		/* Anything that comes with data should have
		 * been handled above. */
		if (tcp_conn->in.datalen)
			return ISCSI_ERR_PROTO;
		iscsi_tcp_hdr_recv_prep(tcp_conn);
844 845
	}

O
Olaf Kirch 已提交
846
	return rc;
847 848 849
}

static inline void
850
partial_sg_digest_update(struct hash_desc *desc, struct scatterlist *sg,
851
			 int offset, int length)
852 853 854
{
	struct scatterlist temp;

855 856
	sg_init_table(&temp, 1);
	sg_set_page(&temp, sg_page(sg), length, offset);
857
	crypto_hash_update(desc, &temp, length);
858 859
}

O
Olaf Kirch 已提交
860 861 862 863 864 865 866 867 868 869
/**
 * iscsi_tcp_hdr_recv_done - process PDU header
 *
 * This is the callback invoked when the PDU header has
 * been received. If the header is followed by additional
 * header segments, we go back for more data.
 */
static int
iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
			struct iscsi_chunk *chunk)
870
{
O
Olaf Kirch 已提交
871 872
	struct iscsi_conn *conn = tcp_conn->iscsi_conn;
	struct iscsi_hdr *hdr;
873

O
Olaf Kirch 已提交
874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891
	/* Check if there are additional header segments
	 * *prior* to computing the digest, because we
	 * may need to go back to the caller for more.
	 */
	hdr = (struct iscsi_hdr *) tcp_conn->in.hdr_buf;
	if (chunk->copied == sizeof(struct iscsi_hdr) && hdr->hlength) {
		/* Bump the header length - the caller will
		 * just loop around and get the AHS for us, and
		 * call again. */
		unsigned int ahslen = hdr->hlength << 2;

		/* Make sure we don't overflow */
		if (sizeof(*hdr) + ahslen > sizeof(tcp_conn->in.hdr_buf))
			return ISCSI_ERR_AHSLEN;

		chunk->total_size += ahslen;
		chunk->size += ahslen;
		return 0;
892 893
	}

O
Olaf Kirch 已提交
894 895 896 897 898 899 900 901
	/* We're done processing the header. See if we're doing
	 * header digests; if so, set up the recv_digest buffer
	 * and go back for more. */
	if (conn->hdrdgst_en) {
		if (chunk->digest_len == 0) {
			iscsi_tcp_chunk_splice_digest(chunk,
						      chunk->recv_digest);
			return 0;
902
		}
O
Olaf Kirch 已提交
903 904 905
		iscsi_tcp_dgst_header(&tcp_conn->rx_hash, hdr,
				      chunk->total_copied - ISCSI_DIGEST_SIZE,
				      chunk->digest);
906

O
Olaf Kirch 已提交
907 908
		if (!iscsi_tcp_dgst_verify(tcp_conn, chunk))
			return ISCSI_ERR_HDR_DGST;
909
	}
O
Olaf Kirch 已提交
910 911 912

	tcp_conn->in.hdr = hdr;
	return iscsi_tcp_hdr_dissect(conn, hdr);
913 914 915
}

/**
O
Olaf Kirch 已提交
916
 * iscsi_tcp_recv - TCP receive in sendfile fashion
917 918 919 920 921 922
 * @rd_desc: read descriptor
 * @skb: socket buffer
 * @offset: offset in skb
 * @len: skb->len - offset
 **/
static int
O
Olaf Kirch 已提交
923 924
iscsi_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
	       unsigned int offset, size_t len)
925 926
{
	struct iscsi_conn *conn = rd_desc->arg.data;
927
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
O
Olaf Kirch 已提交
928 929 930 931
	struct iscsi_chunk *chunk = &tcp_conn->in.chunk;
	struct skb_seq_state seq;
	unsigned int consumed = 0;
	int rc = 0;
932

O
Olaf Kirch 已提交
933
	debug_tcp("in %d bytes\n", skb->len - offset);
934 935 936 937 938 939

	if (unlikely(conn->suspend_rx)) {
		debug_tcp("conn %d Rx suspended!\n", conn->id);
		return 0;
	}

O
Olaf Kirch 已提交
940 941 942 943
	skb_prepare_seq_read(skb, offset, skb->len, &seq);
	while (1) {
		unsigned int avail;
		const u8 *ptr;
944

O
Olaf Kirch 已提交
945 946 947 948 949 950 951 952 953 954 955 956 957 958 959
		avail = skb_seq_read(consumed, &ptr, &seq);
		if (avail == 0)
			break;
		BUG_ON(chunk->copied >= chunk->size);

		debug_tcp("skb %p ptr=%p avail=%u\n", skb, ptr, avail);
		rc = iscsi_tcp_chunk_recv(tcp_conn, chunk, ptr, avail);
		BUG_ON(rc == 0);
		consumed += rc;

		if (chunk->total_copied >= chunk->total_size) {
			rc = chunk->done(tcp_conn, chunk);
			if (rc != 0) {
				skb_abort_seq_read(&seq);
				goto error;
960
			}
961

O
Olaf Kirch 已提交
962 963
			/* The done() functions sets up the
			 * next chunk. */
964 965 966
		}
	}

O
Olaf Kirch 已提交
967 968
	conn->rxdata_octets += consumed;
	return consumed;
969

O
Olaf Kirch 已提交
970 971 972 973
error:
	debug_tcp("Error receiving PDU, errno=%d\n", rc);
	iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
	return 0;
974 975 976 977 978 979
}

static void
iscsi_tcp_data_ready(struct sock *sk, int flag)
{
	struct iscsi_conn *conn = sk->sk_user_data;
O
Olaf Kirch 已提交
980
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
981 982 983 984
	read_descriptor_t rd_desc;

	read_lock(&sk->sk_callback_lock);

985
	/*
O
Olaf Kirch 已提交
986
	 * Use rd_desc to pass 'conn' to iscsi_tcp_recv.
987
	 * We set count to 1 because we want the network layer to
O
Olaf Kirch 已提交
988
	 * hand us all the skbs that are available. iscsi_tcp_recv
989 990
	 * handled pdus that cross buffers or pdus that still need data.
	 */
991
	rd_desc.arg.data = conn;
992
	rd_desc.count = 1;
O
Olaf Kirch 已提交
993
	tcp_read_sock(sk, &rd_desc, iscsi_tcp_recv);
994 995

	read_unlock(&sk->sk_callback_lock);
O
Olaf Kirch 已提交
996 997 998 999

	/* If we had to (atomically) map a highmem page,
	 * unmap it now. */
	iscsi_tcp_chunk_unmap(&tcp_conn->in.chunk);
1000 1001 1002 1003 1004
}

static void
iscsi_tcp_state_change(struct sock *sk)
{
1005
	struct iscsi_tcp_conn *tcp_conn;
1006 1007 1008 1009 1010 1011 1012 1013 1014
	struct iscsi_conn *conn;
	struct iscsi_session *session;
	void (*old_state_change)(struct sock *);

	read_lock(&sk->sk_callback_lock);

	conn = (struct iscsi_conn*)sk->sk_user_data;
	session = conn->session;

M
Mike Christie 已提交
1015 1016 1017
	if ((sk->sk_state == TCP_CLOSE_WAIT ||
	     sk->sk_state == TCP_CLOSE) &&
	    !atomic_read(&sk->sk_rmem_alloc)) {
1018 1019 1020 1021
		debug_tcp("iscsi_tcp_state_change: TCP_CLOSE|TCP_CLOSE_WAIT\n");
		iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
	}

1022 1023
	tcp_conn = conn->dd_data;
	old_state_change = tcp_conn->old_state_change;
1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037

	read_unlock(&sk->sk_callback_lock);

	old_state_change(sk);
}

/**
 * iscsi_write_space - Called when more output buffer space is available
 * @sk: socket space is available for
 **/
static void
iscsi_write_space(struct sock *sk)
{
	struct iscsi_conn *conn = (struct iscsi_conn*)sk->sk_user_data;
1038 1039 1040
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;

	tcp_conn->old_write_space(sk);
1041
	debug_tcp("iscsi_write_space: cid %d\n", conn->id);
1042
	scsi_queue_work(conn->session->host, &conn->xmitwork);
1043 1044 1045 1046 1047
}

static void
iscsi_conn_set_callbacks(struct iscsi_conn *conn)
{
1048 1049
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct sock *sk = tcp_conn->sock->sk;
1050 1051 1052 1053

	/* assign new callbacks */
	write_lock_bh(&sk->sk_callback_lock);
	sk->sk_user_data = conn;
1054 1055 1056
	tcp_conn->old_data_ready = sk->sk_data_ready;
	tcp_conn->old_state_change = sk->sk_state_change;
	tcp_conn->old_write_space = sk->sk_write_space;
1057 1058 1059 1060 1061 1062 1063
	sk->sk_data_ready = iscsi_tcp_data_ready;
	sk->sk_state_change = iscsi_tcp_state_change;
	sk->sk_write_space = iscsi_write_space;
	write_unlock_bh(&sk->sk_callback_lock);
}

static void
1064
iscsi_conn_restore_callbacks(struct iscsi_tcp_conn *tcp_conn)
1065
{
1066
	struct sock *sk = tcp_conn->sock->sk;
1067 1068 1069 1070

	/* restore socket callbacks, see also: iscsi_conn_set_callbacks() */
	write_lock_bh(&sk->sk_callback_lock);
	sk->sk_user_data    = NULL;
1071 1072 1073
	sk->sk_data_ready   = tcp_conn->old_data_ready;
	sk->sk_state_change = tcp_conn->old_state_change;
	sk->sk_write_space  = tcp_conn->old_write_space;
1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
	sk->sk_no_check	 = 0;
	write_unlock_bh(&sk->sk_callback_lock);
}

/**
 * iscsi_send - generic send routine
 * @sk: kernel's socket
 * @buf: buffer to write from
 * @size: actual size to write
 * @flags: socket's flags
 */
static inline int
1086
iscsi_send(struct iscsi_conn *conn, struct iscsi_buf *buf, int size, int flags)
1087
{
1088 1089
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct socket *sk = tcp_conn->sock;
1090
	int offset = buf->sg.offset + buf->sent, res;
1091

1092 1093 1094 1095 1096 1097 1098 1099 1100
	/*
	 * if we got use_sg=0 or are sending something we kmallocd
	 * then we did not have to do kmap (kmap returns page_address)
	 *
	 * if we got use_sg > 0, but had to drop down, we do not
	 * set clustering so this should only happen for that
	 * slab case.
	 */
	if (buf->use_sendmsg)
O
Olaf Kirch 已提交
1101
		res = sock_no_sendpage(sk, buf->sg.page, offset, size, flags);
1102
	else
O
Olaf Kirch 已提交
1103
		res = tcp_conn->sendpage(sk, buf->sg.page, offset, size, flags);
1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116

	if (res >= 0) {
		conn->txdata_octets += res;
		buf->sent += res;
		return res;
	}

	tcp_conn->sendpage_failures_cnt++;
	if (res == -EAGAIN)
		res = -ENOBUFS;
	else
		iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
	return res;
1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138
}

/**
 * iscsi_sendhdr - send PDU Header via tcp_sendpage()
 * @conn: iscsi connection
 * @buf: buffer to write from
 * @datalen: lenght of data to be sent after the header
 *
 * Notes:
 *	(Tx, Fast Path)
 **/
static inline int
iscsi_sendhdr(struct iscsi_conn *conn, struct iscsi_buf *buf, int datalen)
{
	int flags = 0; /* MSG_DONTWAIT; */
	int res, size;

	size = buf->sg.length - buf->sent;
	BUG_ON(buf->sent + size > buf->sg.length);
	if (buf->sent + size != buf->sg.length || datalen)
		flags |= MSG_MORE;

1139
	res = iscsi_send(conn, buf, size, flags);
1140 1141 1142 1143 1144
	debug_tcp("sendhdr %d bytes, sent %d res %d\n", size, buf->sent, res);
	if (res >= 0) {
		if (size != res)
			return -EAGAIN;
		return 0;
1145
	}
1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170

	return res;
}

/**
 * iscsi_sendpage - send one page of iSCSI Data-Out.
 * @conn: iscsi connection
 * @buf: buffer to write from
 * @count: remaining data
 * @sent: number of bytes sent
 *
 * Notes:
 *	(Tx, Fast Path)
 **/
static inline int
iscsi_sendpage(struct iscsi_conn *conn, struct iscsi_buf *buf,
	       int *count, int *sent)
{
	int flags = 0; /* MSG_DONTWAIT; */
	int res, size;

	size = buf->sg.length - buf->sent;
	BUG_ON(buf->sent + size > buf->sg.length);
	if (size > *count)
		size = *count;
M
Mike Christie 已提交
1171
	if (buf->sent + size != buf->sg.length || *count != size)
1172 1173
		flags |= MSG_MORE;

1174
	res = iscsi_send(conn, buf, size, flags);
1175 1176 1177 1178 1179 1180 1181 1182
	debug_tcp("sendpage: %d bytes, sent %d left %d sent %d res %d\n",
		  size, buf->sent, *count, *sent, res);
	if (res >= 0) {
		*count -= res;
		*sent += res;
		if (size != res)
			return -EAGAIN;
		return 0;
1183
	}
1184 1185 1186 1187 1188

	return res;
}

static inline void
1189
iscsi_data_digest_init(struct iscsi_tcp_conn *tcp_conn,
1190
		      struct iscsi_tcp_cmd_task *tcp_ctask)
1191
{
1192
	crypto_hash_init(&tcp_conn->tx_hash);
1193
	tcp_ctask->digest_count = 4;
1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215
}

/**
 * iscsi_solicit_data_cont - initialize next Data-Out
 * @conn: iscsi connection
 * @ctask: scsi command task
 * @r2t: R2T info
 * @left: bytes left to transfer
 *
 * Notes:
 *	Initialize next Data-Out within this R2T sequence and continue
 *	to process next Scatter-Gather element(if any) of this SCSI command.
 *
 *	Called under connection lock.
 **/
static void
iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
			struct iscsi_r2t_info *r2t, int left)
{
	struct iscsi_data *hdr;
	int new_offset;

1216
	hdr = &r2t->dtask.hdr;
1217 1218 1219 1220 1221
	memset(hdr, 0, sizeof(struct iscsi_data));
	hdr->ttt = r2t->ttt;
	hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
	r2t->solicit_datasn++;
	hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
1222 1223
	memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
	hdr->itt = ctask->hdr->itt;
1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236
	hdr->exp_statsn = r2t->exp_statsn;
	new_offset = r2t->data_offset + r2t->sent;
	hdr->offset = cpu_to_be32(new_offset);
	if (left > conn->max_xmit_dlength) {
		hton24(hdr->dlength, conn->max_xmit_dlength);
		r2t->data_count = conn->max_xmit_dlength;
	} else {
		hton24(hdr->dlength, left);
		r2t->data_count = left;
		hdr->flags = ISCSI_FLAG_CMD_FINAL;
	}
	conn->dataout_pdus_cnt++;

1237
	iscsi_buf_init_iov(&r2t->headbuf, (char*)hdr,
1238
			   sizeof(struct iscsi_hdr));
1239

1240 1241 1242
	if (iscsi_buf_left(&r2t->sendbuf))
		return;

1243 1244
	iscsi_buf_init_sg(&r2t->sendbuf, r2t->sg);
	r2t->sg += 1;
1245 1246
}

1247 1248
static void iscsi_set_padding(struct iscsi_tcp_cmd_task *tcp_ctask,
			      unsigned long len)
1249
{
1250 1251 1252
	tcp_ctask->pad_count = len & (ISCSI_PAD_LEN - 1);
	if (!tcp_ctask->pad_count)
		return;
1253

1254 1255
	tcp_ctask->pad_count = ISCSI_PAD_LEN - tcp_ctask->pad_count;
	debug_scsi("write padding %d bytes\n", tcp_ctask->pad_count);
1256
	tcp_ctask->xmstate |= XMSTATE_W_PAD;
1257 1258 1259
}

/**
1260
 * iscsi_tcp_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
1261 1262 1263 1264 1265
 * @conn: iscsi connection
 * @ctask: scsi command task
 * @sc: scsi command
 **/
static void
1266
iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask)
1267
{
1268
	struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1269

1270
	BUG_ON(__kfifo_len(tcp_ctask->r2tqueue));
1271
	tcp_ctask->xmstate = XMSTATE_CMD_HDR_INIT;
1272 1273 1274
}

/**
1275
 * iscsi_tcp_mtask_xmit - xmit management(immediate) task
1276 1277 1278 1279 1280 1281 1282 1283
 * @conn: iscsi connection
 * @mtask: task management task
 *
 * Notes:
 *	The function can return -EAGAIN in which case caller must
 *	call it again later, or recover. '0' return code means successful
 *	xmit.
 *
1284
 *	Management xmit state machine consists of these states:
1285 1286 1287 1288
 *		XMSTATE_IMM_HDR_INIT	- calculate digest of PDU Header
 *		XMSTATE_IMM_HDR 	- PDU Header xmit in progress
 *		XMSTATE_IMM_DATA 	- PDU Data xmit in progress
 *		XMSTATE_IDLE		- management PDU is done
1289 1290
 **/
static int
1291
iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
1292
{
1293
	struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data;
1294
	int rc;
1295 1296

	debug_scsi("mtask deq [cid %d state %x itt 0x%x]\n",
1297
		conn->id, tcp_mtask->xmstate, mtask->itt);
1298

1299
	if (tcp_mtask->xmstate & XMSTATE_IMM_HDR_INIT) {
1300 1301 1302 1303
		iscsi_buf_init_iov(&tcp_mtask->headbuf, (char*)mtask->hdr,
				   sizeof(struct iscsi_hdr));

		if (mtask->data_count) {
1304
			tcp_mtask->xmstate |= XMSTATE_IMM_DATA;
1305 1306 1307 1308 1309
			iscsi_buf_init_iov(&tcp_mtask->sendbuf,
					   (char*)mtask->data,
					   mtask->data_count);
		}

1310
		if (conn->c_stage != ISCSI_CONN_INITIAL_STAGE &&
M
Mike Christie 已提交
1311
		    conn->stop_stage != STOP_CONN_RECOVER &&
1312
		    conn->hdrdgst_en)
1313 1314
			iscsi_hdr_digest(conn, &tcp_mtask->headbuf,
					(u8*)tcp_mtask->hdrext);
1315 1316

		tcp_mtask->sent = 0;
1317 1318
		tcp_mtask->xmstate &= ~XMSTATE_IMM_HDR_INIT;
		tcp_mtask->xmstate |= XMSTATE_IMM_HDR;
1319 1320
	}

1321
	if (tcp_mtask->xmstate & XMSTATE_IMM_HDR) {
1322 1323
		rc = iscsi_sendhdr(conn, &tcp_mtask->headbuf,
				   mtask->data_count);
1324
		if (rc)
1325
			return rc;
1326
		tcp_mtask->xmstate &= ~XMSTATE_IMM_HDR;
1327 1328
	}

1329
	if (tcp_mtask->xmstate & XMSTATE_IMM_DATA) {
1330
		BUG_ON(!mtask->data_count);
1331
		tcp_mtask->xmstate &= ~XMSTATE_IMM_DATA;
1332 1333 1334 1335
		/* FIXME: implement.
		 * Virtual buffer could be spreaded across multiple pages...
		 */
		do {
1336 1337 1338 1339 1340
			int rc;

			rc = iscsi_sendpage(conn, &tcp_mtask->sendbuf,
					&mtask->data_count, &tcp_mtask->sent);
			if (rc) {
1341
				tcp_mtask->xmstate |= XMSTATE_IMM_DATA;
1342
				return rc;
1343 1344 1345 1346
			}
		} while (mtask->data_count);
	}

1347
	BUG_ON(tcp_mtask->xmstate != XMSTATE_IDLE);
A
Al Viro 已提交
1348
	if (mtask->hdr->itt == RESERVED_ITT) {
1349 1350 1351 1352 1353 1354 1355 1356
		struct iscsi_session *session = conn->session;

		spin_lock_bh(&session->lock);
		list_del(&conn->mtask->running);
		__kfifo_put(session->mgmtpool.queue, (void*)&conn->mtask,
			    sizeof(void*));
		spin_unlock_bh(&session->lock);
	}
1357 1358 1359
	return 0;
}

1360 1361
static int
iscsi_send_cmd_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1362
{
1363 1364 1365
	struct scsi_cmnd *sc = ctask->sc;
	struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
	int rc = 0;
1366

1367
	if (tcp_ctask->xmstate & XMSTATE_CMD_HDR_INIT) {
1368 1369 1370 1371 1372
		tcp_ctask->sent = 0;
		tcp_ctask->sg_count = 0;
		tcp_ctask->exp_datasn = 0;

		if (sc->sc_data_direction == DMA_TO_DEVICE) {
1373 1374 1375 1376 1377
			struct scatterlist *sg = scsi_sglist(sc);

			iscsi_buf_init_sg(&tcp_ctask->sendbuf, sg);
			tcp_ctask->sg = sg + 1;
			tcp_ctask->bad_sg = sg + scsi_sg_count(sc);
1378

1379 1380
			debug_scsi("cmd [itt 0x%x total %d imm_data %d "
				   "unsol count %d, unsol offset %d]\n",
1381
				   ctask->itt, scsi_bufflen(sc),
1382 1383 1384
				   ctask->imm_count, ctask->unsol_count,
				   ctask->unsol_offset);
		}
1385

1386
		iscsi_buf_init_iov(&tcp_ctask->headbuf, (char*)ctask->hdr,
1387
				  ctask->hdr_len);
1388 1389 1390

		if (conn->hdrdgst_en)
			iscsi_hdr_digest(conn, &tcp_ctask->headbuf,
1391
					 iscsi_next_hdr(ctask));
1392 1393
		tcp_ctask->xmstate &= ~XMSTATE_CMD_HDR_INIT;
		tcp_ctask->xmstate |= XMSTATE_CMD_HDR_XMIT;
1394 1395
	}

1396
	if (tcp_ctask->xmstate & XMSTATE_CMD_HDR_XMIT) {
1397 1398 1399
		rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->imm_count);
		if (rc)
			return rc;
1400
		tcp_ctask->xmstate &= ~XMSTATE_CMD_HDR_XMIT;
1401 1402 1403 1404 1405

		if (sc->sc_data_direction != DMA_TO_DEVICE)
			return 0;

		if (ctask->imm_count) {
1406
			tcp_ctask->xmstate |= XMSTATE_IMM_DATA;
1407
			iscsi_set_padding(tcp_ctask, ctask->imm_count);
1408

1409 1410 1411 1412 1413
			if (ctask->conn->datadgst_en) {
				iscsi_data_digest_init(ctask->conn->dd_data,
						       tcp_ctask);
				tcp_ctask->immdigest = 0;
			}
1414 1415
		}

1416 1417 1418
		if (ctask->unsol_count)
			tcp_ctask->xmstate |=
					XMSTATE_UNS_HDR | XMSTATE_UNS_INIT;
1419 1420
	}
	return rc;
1421 1422
}

1423 1424
static int
iscsi_send_padding(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1425
{
1426
	struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1427 1428
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	int sent = 0, rc;
1429

1430
	if (tcp_ctask->xmstate & XMSTATE_W_PAD) {
1431 1432 1433
		iscsi_buf_init_iov(&tcp_ctask->sendbuf, (char*)&tcp_ctask->pad,
				   tcp_ctask->pad_count);
		if (conn->datadgst_en)
1434 1435 1436
			crypto_hash_update(&tcp_conn->tx_hash,
					   &tcp_ctask->sendbuf.sg,
					   tcp_ctask->sendbuf.sg.length);
1437
	} else if (!(tcp_ctask->xmstate & XMSTATE_W_RESEND_PAD))
1438 1439
		return 0;

1440 1441
	tcp_ctask->xmstate &= ~XMSTATE_W_PAD;
	tcp_ctask->xmstate &= ~XMSTATE_W_RESEND_PAD;
1442 1443 1444 1445
	debug_scsi("sending %d pad bytes for itt 0x%x\n",
		   tcp_ctask->pad_count, ctask->itt);
	rc = iscsi_sendpage(conn, &tcp_ctask->sendbuf, &tcp_ctask->pad_count,
			   &sent);
1446
	if (rc) {
1447
		debug_scsi("padding send failed %d\n", rc);
1448
		tcp_ctask->xmstate |= XMSTATE_W_RESEND_PAD;
1449
	}
1450
	return rc;
1451 1452
}

1453 1454 1455
static int
iscsi_send_digest(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
			struct iscsi_buf *buf, uint32_t *digest)
1456
{
1457 1458 1459
	struct iscsi_tcp_cmd_task *tcp_ctask;
	struct iscsi_tcp_conn *tcp_conn;
	int rc, sent = 0;
1460

1461 1462
	if (!conn->datadgst_en)
		return 0;
1463

1464 1465
	tcp_ctask = ctask->dd_data;
	tcp_conn = conn->dd_data;
1466

1467
	if (!(tcp_ctask->xmstate & XMSTATE_W_RESEND_DATA_DIGEST)) {
1468
		crypto_hash_final(&tcp_conn->tx_hash, (u8*)digest);
1469 1470
		iscsi_buf_init_iov(buf, (char*)digest, 4);
	}
1471
	tcp_ctask->xmstate &= ~XMSTATE_W_RESEND_DATA_DIGEST;
1472

1473 1474 1475 1476 1477 1478 1479
	rc = iscsi_sendpage(conn, buf, &tcp_ctask->digest_count, &sent);
	if (!rc)
		debug_scsi("sent digest 0x%x for itt 0x%x\n", *digest,
			  ctask->itt);
	else {
		debug_scsi("sending digest 0x%x failed for itt 0x%x!\n",
			  *digest, ctask->itt);
1480
		tcp_ctask->xmstate |= XMSTATE_W_RESEND_DATA_DIGEST;
1481
	}
1482 1483
	return rc;
}
1484

1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501
static int
iscsi_send_data(struct iscsi_cmd_task *ctask, struct iscsi_buf *sendbuf,
		struct scatterlist **sg, int *sent, int *count,
		struct iscsi_buf *digestbuf, uint32_t *digest)
{
	struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
	struct iscsi_conn *conn = ctask->conn;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	int rc, buf_sent, offset;

	while (*count) {
		buf_sent = 0;
		offset = sendbuf->sent;

		rc = iscsi_sendpage(conn, sendbuf, count, &buf_sent);
		*sent = *sent + buf_sent;
		if (buf_sent && conn->datadgst_en)
1502
			partial_sg_digest_update(&tcp_conn->tx_hash,
1503 1504 1505 1506 1507
				&sendbuf->sg, sendbuf->sg.offset + offset,
				buf_sent);
		if (!iscsi_buf_left(sendbuf) && *sg != tcp_ctask->bad_sg) {
			iscsi_buf_init_sg(sendbuf, *sg);
			*sg = *sg + 1;
1508
		}
1509 1510 1511

		if (rc)
			return rc;
1512 1513
	}

1514 1515 1516 1517 1518
	rc = iscsi_send_padding(conn, ctask);
	if (rc)
		return rc;

	return iscsi_send_digest(conn, ctask, digestbuf, digest);
1519 1520
}

1521 1522
static int
iscsi_send_unsol_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1523
{
1524
	struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1525
	struct iscsi_data_task *dtask;
1526
	int rc;
1527

1528 1529
	tcp_ctask->xmstate |= XMSTATE_UNS_DATA;
	if (tcp_ctask->xmstate & XMSTATE_UNS_INIT) {
1530 1531
		dtask = &tcp_ctask->unsol_dtask;

1532 1533 1534
		iscsi_prep_unsolicit_data_pdu(ctask, &dtask->hdr);
		iscsi_buf_init_iov(&tcp_ctask->headbuf, (char*)&dtask->hdr,
				   sizeof(struct iscsi_hdr));
1535
		if (conn->hdrdgst_en)
1536
			iscsi_hdr_digest(conn, &tcp_ctask->headbuf,
1537
					(u8*)dtask->hdrext);
1538

1539
		tcp_ctask->xmstate &= ~XMSTATE_UNS_INIT;
1540
		iscsi_set_padding(tcp_ctask, ctask->data_count);
1541
	}
1542 1543 1544

	rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->data_count);
	if (rc) {
1545 1546
		tcp_ctask->xmstate &= ~XMSTATE_UNS_DATA;
		tcp_ctask->xmstate |= XMSTATE_UNS_HDR;
1547
		return rc;
1548 1549
	}

1550 1551 1552 1553 1554 1555
	if (conn->datadgst_en) {
		dtask = &tcp_ctask->unsol_dtask;
		iscsi_data_digest_init(ctask->conn->dd_data, tcp_ctask);
		dtask->digest = 0;
	}

1556
	debug_scsi("uns dout [itt 0x%x dlen %d sent %d]\n",
1557
		   ctask->itt, ctask->unsol_count, tcp_ctask->sent);
1558 1559 1560
	return 0;
}

1561 1562
static int
iscsi_send_unsol_pdu(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1563
{
1564
	struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1565
	int rc;
1566

1567
	if (tcp_ctask->xmstate & XMSTATE_UNS_HDR) {
1568
		BUG_ON(!ctask->unsol_count);
1569
		tcp_ctask->xmstate &= ~XMSTATE_UNS_HDR;
1570 1571 1572 1573
send_hdr:
		rc = iscsi_send_unsol_hdr(conn, ctask);
		if (rc)
			return rc;
1574 1575
	}

1576
	if (tcp_ctask->xmstate & XMSTATE_UNS_DATA) {
1577
		struct iscsi_data_task *dtask = &tcp_ctask->unsol_dtask;
1578
		int start = tcp_ctask->sent;
1579

1580 1581 1582
		rc = iscsi_send_data(ctask, &tcp_ctask->sendbuf, &tcp_ctask->sg,
				     &tcp_ctask->sent, &ctask->data_count,
				     &dtask->digestbuf, &dtask->digest);
1583
		ctask->unsol_count -= tcp_ctask->sent - start;
1584 1585
		if (rc)
			return rc;
1586
		tcp_ctask->xmstate &= ~XMSTATE_UNS_DATA;
1587
		/*
1588 1589
		 * Done with the Data-Out. Next, check if we need
		 * to send another unsolicited Data-Out.
1590
		 */
1591 1592
		if (ctask->unsol_count) {
			debug_scsi("sending more uns\n");
1593
			tcp_ctask->xmstate |= XMSTATE_UNS_INIT;
1594
			goto send_hdr;
1595 1596 1597 1598 1599
		}
	}
	return 0;
}

1600 1601
static int iscsi_send_sol_pdu(struct iscsi_conn *conn,
			      struct iscsi_cmd_task *ctask)
1602
{
1603
	struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1604 1605 1606
	struct iscsi_session *session = conn->session;
	struct iscsi_r2t_info *r2t;
	struct iscsi_data_task *dtask;
1607
	int left, rc;
1608

1609
	if (tcp_ctask->xmstate & XMSTATE_SOL_HDR_INIT) {
1610 1611
		if (!tcp_ctask->r2t) {
			spin_lock_bh(&session->lock);
1612 1613
			__kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t,
				    sizeof(void*));
1614 1615
			spin_unlock_bh(&session->lock);
		}
1616 1617 1618
send_hdr:
		r2t = tcp_ctask->r2t;
		dtask = &r2t->dtask;
1619

1620 1621 1622
		if (conn->hdrdgst_en)
			iscsi_hdr_digest(conn, &r2t->headbuf,
					(u8*)dtask->hdrext);
1623 1624
		tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR_INIT;
		tcp_ctask->xmstate |= XMSTATE_SOL_HDR;
1625 1626
	}

1627
	if (tcp_ctask->xmstate & XMSTATE_SOL_HDR) {
1628 1629 1630
		r2t = tcp_ctask->r2t;
		dtask = &r2t->dtask;

1631
		rc = iscsi_sendhdr(conn, &r2t->headbuf, r2t->data_count);
1632
		if (rc)
1633
			return rc;
1634 1635
		tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
		tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
1636 1637

		if (conn->datadgst_en) {
1638 1639
			iscsi_data_digest_init(conn->dd_data, tcp_ctask);
			dtask->digest = 0;
1640 1641
		}

1642 1643 1644 1645
		iscsi_set_padding(tcp_ctask, r2t->data_count);
		debug_scsi("sol dout [dsn %d itt 0x%x dlen %d sent %d]\n",
			r2t->solicit_datasn - 1, ctask->itt, r2t->data_count,
			r2t->sent);
1646 1647
	}

1648
	if (tcp_ctask->xmstate & XMSTATE_SOL_DATA) {
1649 1650
		r2t = tcp_ctask->r2t;
		dtask = &r2t->dtask;
1651

1652 1653 1654 1655 1656
		rc = iscsi_send_data(ctask, &r2t->sendbuf, &r2t->sg,
				     &r2t->sent, &r2t->data_count,
				     &dtask->digestbuf, &dtask->digest);
		if (rc)
			return rc;
1657
		tcp_ctask->xmstate &= ~XMSTATE_SOL_DATA;
1658

1659 1660 1661 1662 1663 1664 1665 1666 1667 1668
		/*
		 * Done with this Data-Out. Next, check if we have
		 * to send another Data-Out for this R2T.
		 */
		BUG_ON(r2t->data_length - r2t->sent < 0);
		left = r2t->data_length - r2t->sent;
		if (left) {
			iscsi_solicit_data_cont(conn, ctask, r2t, left);
			goto send_hdr;
		}
1669

1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682
		/*
		 * Done with this R2T. Check if there are more
		 * outstanding R2Ts ready to be processed.
		 */
		spin_lock_bh(&session->lock);
		tcp_ctask->r2t = NULL;
		__kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
			    sizeof(void*));
		if (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t,
				sizeof(void*))) {
			tcp_ctask->r2t = r2t;
			spin_unlock_bh(&session->lock);
			goto send_hdr;
1683
		}
1684
		spin_unlock_bh(&session->lock);
1685 1686 1687 1688
	}
	return 0;
}

1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701
/**
 * iscsi_tcp_ctask_xmit - xmit normal PDU task
 * @conn: iscsi connection
 * @ctask: iscsi command task
 *
 * Notes:
 *	The function can return -EAGAIN in which case caller must
 *	call it again later, or recover. '0' return code means successful
 *	xmit.
 *	The function is devided to logical helpers (above) for the different
 *	xmit stages.
 *
 *iscsi_send_cmd_hdr()
1702 1703 1704
 *	XMSTATE_CMD_HDR_INIT - prepare Header and Data buffers Calculate
 *	                       Header Digest
 *	XMSTATE_CMD_HDR_XMIT - Transmit header in progress
1705 1706
 *
 *iscsi_send_padding
1707 1708
 *	XMSTATE_W_PAD        - Prepare and send pading
 *	XMSTATE_W_RESEND_PAD - retry send pading
1709 1710
 *
 *iscsi_send_digest
1711 1712
 *	XMSTATE_W_RESEND_DATA_DIGEST - Finalize and send Data Digest
 *	XMSTATE_W_RESEND_DATA_DIGEST - retry sending digest
1713 1714
 *
 *iscsi_send_unsol_hdr
1715 1716
 *	XMSTATE_UNS_INIT     - prepare un-solicit data header and digest
 *	XMSTATE_UNS_HDR      - send un-solicit header
1717 1718
 *
 *iscsi_send_unsol_pdu
1719
 *	XMSTATE_UNS_DATA     - send un-solicit data in progress
1720 1721
 *
 *iscsi_send_sol_pdu
1722 1723 1724
 *	XMSTATE_SOL_HDR_INIT - solicit data header and digest initialize
 *	XMSTATE_SOL_HDR      - send solicit header
 *	XMSTATE_SOL_DATA     - send solicit data
1725 1726
 *
 *iscsi_tcp_ctask_xmit
1727
 *	XMSTATE_IMM_DATA     - xmit managment data (??)
1728
 **/
1729
static int
1730
iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1731
{
1732
	struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1733 1734 1735
	int rc = 0;

	debug_scsi("ctask deq [cid %d xmstate %x itt 0x%x]\n",
1736
		conn->id, tcp_ctask->xmstate, ctask->itt);
1737

1738 1739 1740 1741 1742
	rc = iscsi_send_cmd_hdr(conn, ctask);
	if (rc)
		return rc;
	if (ctask->sc->sc_data_direction != DMA_TO_DEVICE)
		return 0;
1743

1744
	if (tcp_ctask->xmstate & XMSTATE_IMM_DATA) {
1745 1746 1747
		rc = iscsi_send_data(ctask, &tcp_ctask->sendbuf, &tcp_ctask->sg,
				     &tcp_ctask->sent, &ctask->imm_count,
				     &tcp_ctask->immbuf, &tcp_ctask->immdigest);
1748 1749
		if (rc)
			return rc;
1750
		tcp_ctask->xmstate &= ~XMSTATE_IMM_DATA;
1751 1752
	}

1753 1754 1755
	rc = iscsi_send_unsol_pdu(conn, ctask);
	if (rc)
		return rc;
1756

1757 1758 1759
	rc = iscsi_send_sol_pdu(conn, ctask);
	if (rc)
		return rc;
1760 1761 1762 1763

	return rc;
}

1764 1765
static struct iscsi_cls_conn *
iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1766
{
1767 1768 1769
	struct iscsi_conn *conn;
	struct iscsi_cls_conn *cls_conn;
	struct iscsi_tcp_conn *tcp_conn;
1770

1771 1772 1773 1774
	cls_conn = iscsi_conn_setup(cls_session, conn_idx);
	if (!cls_conn)
		return NULL;
	conn = cls_conn->dd_data;
1775
	/*
1776 1777
	 * due to strange issues with iser these are not set
	 * in iscsi_conn_setup
1778
	 */
1779
	conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN;
1780

1781 1782 1783
	tcp_conn = kzalloc(sizeof(*tcp_conn), GFP_KERNEL);
	if (!tcp_conn)
		goto tcp_conn_alloc_fail;
1784

1785 1786
	conn->dd_data = tcp_conn;
	tcp_conn->iscsi_conn = conn;
1787

1788 1789 1790
	tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
						  CRYPTO_ALG_ASYNC);
	tcp_conn->tx_hash.flags = 0;
1791 1792 1793 1794 1795
	if (IS_ERR(tcp_conn->tx_hash.tfm)) {
		printk(KERN_ERR "Could not create connection due to crc32c "
		       "loading error %ld. Make sure the crc32c module is "
		       "built as a module or into the kernel\n",
			PTR_ERR(tcp_conn->tx_hash.tfm));
1796
		goto free_tcp_conn;
1797
	}
1798

1799 1800 1801
	tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
						  CRYPTO_ALG_ASYNC);
	tcp_conn->rx_hash.flags = 0;
1802 1803 1804 1805 1806
	if (IS_ERR(tcp_conn->rx_hash.tfm)) {
		printk(KERN_ERR "Could not create connection due to crc32c "
		       "loading error %ld. Make sure the crc32c module is "
		       "built as a module or into the kernel\n",
			PTR_ERR(tcp_conn->rx_hash.tfm));
1807
		goto free_tx_tfm;
1808
	}
1809

1810
	return cls_conn;
1811

1812
free_tx_tfm:
1813
	crypto_free_hash(tcp_conn->tx_hash.tfm);
1814 1815
free_tcp_conn:
	kfree(tcp_conn);
1816 1817 1818
tcp_conn_alloc_fail:
	iscsi_conn_teardown(cls_conn);
	return NULL;
1819 1820
}

1821 1822 1823
static void
iscsi_tcp_release_conn(struct iscsi_conn *conn)
{
1824
	struct iscsi_session *session = conn->session;
1825
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1826
	struct socket *sock = tcp_conn->sock;
1827

1828
	if (!sock)
1829 1830
		return;

1831
	sock_hold(sock->sk);
1832
	iscsi_conn_restore_callbacks(tcp_conn);
1833
	sock_put(sock->sk);
1834

1835
	spin_lock_bh(&session->lock);
1836 1837
	tcp_conn->sock = NULL;
	conn->recv_lock = NULL;
1838 1839
	spin_unlock_bh(&session->lock);
	sockfd_put(sock);
1840 1841
}

1842
static void
1843
iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
1844
{
1845 1846
	struct iscsi_conn *conn = cls_conn->dd_data;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1847

1848
	iscsi_tcp_release_conn(conn);
1849
	iscsi_conn_teardown(cls_conn);
1850

P
Pete Wyckoff 已提交
1851 1852 1853 1854
	if (tcp_conn->tx_hash.tfm)
		crypto_free_hash(tcp_conn->tx_hash.tfm);
	if (tcp_conn->rx_hash.tfm)
		crypto_free_hash(tcp_conn->rx_hash.tfm);
1855

1856 1857
	kfree(tcp_conn);
}
1858

1859 1860 1861 1862 1863 1864 1865 1866 1867
static void
iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
{
	struct iscsi_conn *conn = cls_conn->dd_data;

	iscsi_conn_stop(cls_conn, flag);
	iscsi_tcp_release_conn(conn);
}

1868 1869 1870 1871 1872 1873 1874 1875 1876 1877
static int iscsi_tcp_get_addr(struct iscsi_conn *conn, struct socket *sock,
			      char *buf, int *port,
			      int (*getname)(struct socket *, struct sockaddr *,
					int *addrlen))
{
	struct sockaddr_storage *addr;
	struct sockaddr_in6 *sin6;
	struct sockaddr_in *sin;
	int rc = 0, len;

1878
	addr = kmalloc(sizeof(*addr), GFP_KERNEL);
1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907
	if (!addr)
		return -ENOMEM;

	if (getname(sock, (struct sockaddr *) addr, &len)) {
		rc = -ENODEV;
		goto free_addr;
	}

	switch (addr->ss_family) {
	case AF_INET:
		sin = (struct sockaddr_in *)addr;
		spin_lock_bh(&conn->session->lock);
		sprintf(buf, NIPQUAD_FMT, NIPQUAD(sin->sin_addr.s_addr));
		*port = be16_to_cpu(sin->sin_port);
		spin_unlock_bh(&conn->session->lock);
		break;
	case AF_INET6:
		sin6 = (struct sockaddr_in6 *)addr;
		spin_lock_bh(&conn->session->lock);
		sprintf(buf, NIP6_FMT, NIP6(sin6->sin6_addr));
		*port = be16_to_cpu(sin6->sin6_port);
		spin_unlock_bh(&conn->session->lock);
		break;
	}
free_addr:
	kfree(addr);
	return rc;
}

1908 1909
static int
iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
1910
		    struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
1911 1912 1913 1914 1915 1916 1917
		    int is_leading)
{
	struct iscsi_conn *conn = cls_conn->dd_data;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct sock *sk;
	struct socket *sock;
	int err;
1918

1919
	/* lookup for existing socket */
1920
	sock = sockfd_lookup((int)transport_eph, &err);
1921 1922 1923
	if (!sock) {
		printk(KERN_ERR "iscsi_tcp: sockfd_lookup failed %d\n", err);
		return -EEXIST;
1924
	}
1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938
	/*
	 * copy these values now because if we drop the session
	 * userspace may still want to query the values since we will
	 * be using them for the reconnect
	 */
	err = iscsi_tcp_get_addr(conn, sock, conn->portal_address,
				 &conn->portal_port, kernel_getpeername);
	if (err)
		goto free_socket;

	err = iscsi_tcp_get_addr(conn, sock, conn->local_address,
				&conn->local_port, kernel_getsockname);
	if (err)
		goto free_socket;
1939

1940 1941
	err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
	if (err)
1942
		goto free_socket;
1943

1944 1945
	/* bind iSCSI connection and socket */
	tcp_conn->sock = sock;
1946

1947 1948 1949 1950 1951
	/* setup Socket parameters */
	sk = sock->sk;
	sk->sk_reuse = 1;
	sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
	sk->sk_allocation = GFP_ATOMIC;
1952

1953
	/* FIXME: disable Nagle's algorithm */
1954

1955 1956 1957 1958 1959 1960 1961 1962 1963 1964
	/*
	 * Intercept TCP callbacks for sendfile like receive
	 * processing.
	 */
	conn->recv_lock = &sk->sk_callback_lock;
	iscsi_conn_set_callbacks(conn);
	tcp_conn->sendpage = tcp_conn->sock->ops->sendpage;
	/*
	 * set receive state machine into initial state
	 */
O
Olaf Kirch 已提交
1965
	iscsi_tcp_hdr_recv_prep(tcp_conn);
1966
	return 0;
1967 1968 1969 1970

free_socket:
	sockfd_put(sock);
	return err;
1971 1972
}

1973
/* called with host lock */
M
Mike Christie 已提交
1974
static void
1975
iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
1976
{
1977
	struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data;
1978
	tcp_mtask->xmstate = XMSTATE_IMM_HDR_INIT;
1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991
}

static int
iscsi_r2tpool_alloc(struct iscsi_session *session)
{
	int i;
	int cmd_i;

	/*
	 * initialize per-task: R2T pool and xmit queue
	 */
	for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
	        struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
1992
		struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1993 1994 1995 1996 1997 1998 1999 2000

		/*
		 * pre-allocated x4 as much r2ts to handle race when
		 * target acks DataOut faster than we data_xmit() queues
		 * could replenish r2tqueue.
		 */

		/* R2T pool */
2001 2002 2003
		if (iscsi_pool_init(&tcp_ctask->r2tpool, session->max_r2t * 4,
				    (void***)&tcp_ctask->r2ts,
				    sizeof(struct iscsi_r2t_info))) {
2004 2005 2006 2007
			goto r2t_alloc_fail;
		}

		/* R2T xmit queue */
2008
		tcp_ctask->r2tqueue = kfifo_alloc(
2009
		      session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL);
2010 2011 2012
		if (tcp_ctask->r2tqueue == ERR_PTR(-ENOMEM)) {
			iscsi_pool_free(&tcp_ctask->r2tpool,
					(void**)tcp_ctask->r2ts);
2013 2014 2015 2016 2017 2018 2019 2020
			goto r2t_alloc_fail;
		}
	}

	return 0;

r2t_alloc_fail:
	for (i = 0; i < cmd_i; i++) {
2021 2022 2023 2024 2025 2026
		struct iscsi_cmd_task *ctask = session->cmds[i];
		struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;

		kfifo_free(tcp_ctask->r2tqueue);
		iscsi_pool_free(&tcp_ctask->r2tpool,
				(void**)tcp_ctask->r2ts);
2027 2028 2029 2030 2031 2032 2033 2034 2035 2036
	}
	return -ENOMEM;
}

static void
iscsi_r2tpool_free(struct iscsi_session *session)
{
	int i;

	for (i = 0; i < session->cmds_max; i++) {
2037 2038
		struct iscsi_cmd_task *ctask = session->cmds[i];
		struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
2039

2040 2041 2042
		kfifo_free(tcp_ctask->r2tqueue);
		iscsi_pool_free(&tcp_ctask->r2tpool,
				(void**)tcp_ctask->r2ts);
2043 2044 2045 2046
	}
}

static int
2047
iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
2048
		     char *buf, int buflen)
2049
{
2050
	struct iscsi_conn *conn = cls_conn->dd_data;
2051
	struct iscsi_session *session = conn->session;
2052
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2053
	int value;
2054 2055 2056

	switch(param) {
	case ISCSI_PARAM_HDRDGST_EN:
2057
		iscsi_set_param(cls_conn, param, buf, buflen);
2058 2059
		break;
	case ISCSI_PARAM_DATADGST_EN:
2060
		iscsi_set_param(cls_conn, param, buf, buflen);
2061 2062
		tcp_conn->sendpage = conn->datadgst_en ?
			sock_no_sendpage : tcp_conn->sock->ops->sendpage;
2063 2064
		break;
	case ISCSI_PARAM_MAX_R2T:
2065
		sscanf(buf, "%d", &value);
2066 2067 2068
		if (session->max_r2t == roundup_pow_of_two(value))
			break;
		iscsi_r2tpool_free(session);
2069
		iscsi_set_param(cls_conn, param, buf, buflen);
2070 2071 2072 2073 2074 2075
		if (session->max_r2t & (session->max_r2t - 1))
			session->max_r2t = roundup_pow_of_two(session->max_r2t);
		if (iscsi_r2tpool_alloc(session))
			return -ENOMEM;
		break;
	default:
2076
		return iscsi_set_param(cls_conn, param, buf, buflen);
2077 2078 2079 2080 2081 2082
	}

	return 0;
}

static int
2083 2084
iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
			 enum iscsi_param param, char *buf)
2085
{
2086
	struct iscsi_conn *conn = cls_conn->dd_data;
2087
	int len;
2088 2089

	switch(param) {
2090
	case ISCSI_PARAM_CONN_PORT:
2091 2092 2093
		spin_lock_bh(&conn->session->lock);
		len = sprintf(buf, "%hu\n", conn->portal_port);
		spin_unlock_bh(&conn->session->lock);
2094
		break;
2095
	case ISCSI_PARAM_CONN_ADDRESS:
2096 2097 2098
		spin_lock_bh(&conn->session->lock);
		len = sprintf(buf, "%s\n", conn->portal_address);
		spin_unlock_bh(&conn->session->lock);
2099 2100
		break;
	default:
2101
		return iscsi_conn_get_param(cls_conn, param, buf);
2102 2103 2104 2105 2106
	}

	return len;
}

2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129
static int
iscsi_tcp_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
			 char *buf)
{
        struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
	int len;

	switch (param) {
	case ISCSI_HOST_PARAM_IPADDRESS:
		spin_lock_bh(&session->lock);
		if (!session->leadconn)
			len = -ENODEV;
		else
			len = sprintf(buf, "%s\n",
				     session->leadconn->local_address);
		spin_unlock_bh(&session->lock);
		break;
	default:
		return iscsi_host_get_param(shost, param, buf);
	}
	return len;
}

2130
static void
2131
iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
2132
{
2133
	struct iscsi_conn *conn = cls_conn->dd_data;
2134
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146

	stats->txdata_octets = conn->txdata_octets;
	stats->rxdata_octets = conn->rxdata_octets;
	stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
	stats->dataout_pdus = conn->dataout_pdus_cnt;
	stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
	stats->datain_pdus = conn->datain_pdus_cnt;
	stats->r2t_pdus = conn->r2t_pdus_cnt;
	stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
	stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
	stats->custom_length = 3;
	strcpy(stats->custom[0].desc, "tx_sendpage_failures");
2147
	stats->custom[0].value = tcp_conn->sendpage_failures_cnt;
2148
	strcpy(stats->custom[1].desc, "rx_discontiguous_hdr");
2149
	stats->custom[1].value = tcp_conn->discontiguous_hdr_cnt;
2150 2151 2152 2153
	strcpy(stats->custom[2].desc, "eh_abort_cnt");
	stats->custom[2].value = conn->eh_abort_cnt;
}

2154 2155 2156
static struct iscsi_cls_session *
iscsi_tcp_session_create(struct iscsi_transport *iscsit,
			 struct scsi_transport_template *scsit,
2157
			 uint16_t cmds_max, uint16_t qdepth,
2158
			 uint32_t initial_cmdsn, uint32_t *hostno)
2159
{
2160 2161 2162 2163
	struct iscsi_cls_session *cls_session;
	struct iscsi_session *session;
	uint32_t hn;
	int cmd_i;
2164

2165
	cls_session = iscsi_session_setup(iscsit, scsit, cmds_max, qdepth,
2166 2167 2168 2169 2170 2171
					 sizeof(struct iscsi_tcp_cmd_task),
					 sizeof(struct iscsi_tcp_mgmt_task),
					 initial_cmdsn, &hn);
	if (!cls_session)
		return NULL;
	*hostno = hn;
2172

2173 2174 2175 2176 2177
	session = class_to_transport_session(cls_session);
	for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
		struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
		struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;

2178 2179
		ctask->hdr = &tcp_ctask->hdr.cmd_hdr;
		ctask->hdr_max = sizeof(tcp_ctask->hdr) - ISCSI_DIGEST_SIZE;
2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202
	}

	for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
		struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i];
		struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data;

		mtask->hdr = &tcp_mtask->hdr;
	}

	if (iscsi_r2tpool_alloc(class_to_transport_session(cls_session)))
		goto r2tpool_alloc_fail;

	return cls_session;

r2tpool_alloc_fail:
	iscsi_session_teardown(cls_session);
	return NULL;
}

static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session)
{
	iscsi_r2tpool_free(class_to_transport_session(cls_session));
	iscsi_session_teardown(cls_session);
2203 2204
}

2205 2206
static int iscsi_tcp_slave_configure(struct scsi_device *sdev)
{
2207
	blk_queue_bounce_limit(sdev->request_queue, BLK_BOUNCE_ANY);
2208 2209 2210 2211
	blk_queue_dma_alignment(sdev->request_queue, 0);
	return 0;
}

2212
static struct scsi_host_template iscsi_sht = {
2213
	.module			= THIS_MODULE,
2214
	.name			= "iSCSI Initiator over TCP/IP",
2215 2216
	.queuecommand           = iscsi_queuecommand,
	.change_queue_depth	= iscsi_change_queue_depth,
2217
	.can_queue		= ISCSI_DEF_XMIT_CMDS_MAX - 1,
2218
	.sg_tablesize		= ISCSI_SG_TABLESIZE,
2219
	.max_sectors		= 0xFFFF,
2220 2221
	.cmd_per_lun		= ISCSI_DEF_CMD_PER_LUN,
	.eh_abort_handler       = iscsi_eh_abort,
2222
	.eh_device_reset_handler= iscsi_eh_device_reset,
2223 2224
	.eh_host_reset_handler	= iscsi_eh_host_reset,
	.use_clustering         = DISABLE_CLUSTERING,
2225
	.slave_configure        = iscsi_tcp_slave_configure,
2226 2227 2228 2229
	.proc_name		= "iscsi_tcp",
	.this_id		= -1,
};

2230 2231 2232 2233 2234
static struct iscsi_transport iscsi_tcp_transport = {
	.owner			= THIS_MODULE,
	.name			= "tcp",
	.caps			= CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
				  | CAP_DATADGST,
2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247
	.param_mask		= ISCSI_MAX_RECV_DLENGTH |
				  ISCSI_MAX_XMIT_DLENGTH |
				  ISCSI_HDRDGST_EN |
				  ISCSI_DATADGST_EN |
				  ISCSI_INITIAL_R2T_EN |
				  ISCSI_MAX_R2T |
				  ISCSI_IMM_DATA_EN |
				  ISCSI_FIRST_BURST |
				  ISCSI_MAX_BURST |
				  ISCSI_PDU_INORDER_EN |
				  ISCSI_DATASEQ_INORDER_EN |
				  ISCSI_ERL |
				  ISCSI_CONN_PORT |
2248
				  ISCSI_CONN_ADDRESS |
2249 2250 2251
				  ISCSI_EXP_STATSN |
				  ISCSI_PERSISTENT_PORT |
				  ISCSI_PERSISTENT_ADDRESS |
2252 2253
				  ISCSI_TARGET_NAME | ISCSI_TPGT |
				  ISCSI_USERNAME | ISCSI_PASSWORD |
2254 2255
				  ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
				  ISCSI_FAST_ABORT,
2256
	.host_param_mask	= ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
2257 2258
				  ISCSI_HOST_INITIATOR_NAME |
				  ISCSI_HOST_NETDEV_NAME,
2259
	.host_template		= &iscsi_sht,
2260
	.conndata_size		= sizeof(struct iscsi_conn),
2261 2262
	.max_conn		= 1,
	.max_cmd_len		= ISCSI_TCP_MAX_CMD_LEN,
2263 2264 2265 2266 2267 2268 2269
	/* session management */
	.create_session		= iscsi_tcp_session_create,
	.destroy_session	= iscsi_tcp_session_destroy,
	/* connection management */
	.create_conn		= iscsi_tcp_conn_create,
	.bind_conn		= iscsi_tcp_conn_bind,
	.destroy_conn		= iscsi_tcp_conn_destroy,
2270
	.set_param		= iscsi_conn_set_param,
2271
	.get_conn_param		= iscsi_tcp_conn_get_param,
2272
	.get_session_param	= iscsi_session_get_param,
2273
	.start_conn		= iscsi_conn_start,
2274
	.stop_conn		= iscsi_tcp_conn_stop,
2275
	/* iscsi host params */
2276
	.get_host_param		= iscsi_tcp_host_get_param,
2277
	.set_host_param		= iscsi_host_set_param,
2278
	/* IO */
2279 2280
	.send_pdu		= iscsi_conn_send_pdu,
	.get_stats		= iscsi_conn_get_stats,
2281 2282 2283 2284 2285 2286
	.init_cmd_task		= iscsi_tcp_cmd_init,
	.init_mgmt_task		= iscsi_tcp_mgmt_init,
	.xmit_cmd_task		= iscsi_tcp_ctask_xmit,
	.xmit_mgmt_task		= iscsi_tcp_mtask_xmit,
	.cleanup_cmd_task	= iscsi_tcp_cleanup_ctask,
	/* recovery */
M
Mike Christie 已提交
2287
	.session_recovery_timedout = iscsi_session_recovery_timedout,
2288 2289 2290 2291 2292 2293
};

static int __init
iscsi_tcp_init(void)
{
	if (iscsi_max_lun < 1) {
O
Or Gerlitz 已提交
2294 2295
		printk(KERN_ERR "iscsi_tcp: Invalid max_lun value of %u\n",
		       iscsi_max_lun);
2296 2297 2298 2299
		return -EINVAL;
	}
	iscsi_tcp_transport.max_lun = iscsi_max_lun;

2300
	if (!iscsi_register_transport(&iscsi_tcp_transport))
2301
		return -ENODEV;
2302

2303
	return 0;
2304 2305 2306 2307 2308 2309 2310 2311 2312 2313
}

static void __exit
iscsi_tcp_exit(void)
{
	iscsi_unregister_transport(&iscsi_tcp_transport);
}

module_init(iscsi_tcp_init);
module_exit(iscsi_tcp_exit);