fc_rport.c 35.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
/*
 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Maintained at www.Open-FCoE.org
 */

/*
 * RPORT GENERAL INFO
 *
 * This file contains all processing regarding fc_rports. It contains the
 * rport state machine and does all rport interaction with the transport class.
 * There should be no other places in libfc that interact directly with the
 * transport class in regards to adding and deleting rports.
 *
 * fc_rport's represent N_Port's within the fabric.
 */

/*
 * RPORT LOCKING
 *
 * The rport should never hold the rport mutex and then attempt to acquire
 * either the lport or disc mutexes. The rport's mutex is considered lesser
 * than both the lport's mutex and the disc mutex. Refer to fc_lport.c for
 * more comments on the heirarchy.
 *
 * The locking strategy is similar to the lport's strategy. The lock protects
 * the rport's states and is held and released by the entry points to the rport
 * block. All _enter_* functions correspond to rport states and expect the rport
 * mutex to be locked before calling them. This means that rports only handle
 * one request or response at a time, since they're not critical for the I/O
 * path this potential over-use of the mutex is acceptable.
 */

#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/rcupdate.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <asm/unaligned.h>

#include <scsi/libfc.h>
#include <scsi/fc_encode.h>

struct workqueue_struct *rport_event_queue;

60 61 62 63 64
static void fc_rport_enter_plogi(struct fc_rport_priv *);
static void fc_rport_enter_prli(struct fc_rport_priv *);
static void fc_rport_enter_rtv(struct fc_rport_priv *);
static void fc_rport_enter_ready(struct fc_rport_priv *);
static void fc_rport_enter_logo(struct fc_rport_priv *);
65

66
static void fc_rport_recv_plogi_req(struct fc_lport *,
67
				    struct fc_seq *, struct fc_frame *);
68
static void fc_rport_recv_prli_req(struct fc_rport_priv *,
69
				   struct fc_seq *, struct fc_frame *);
70
static void fc_rport_recv_prlo_req(struct fc_rport_priv *,
71
				   struct fc_seq *, struct fc_frame *);
72
static void fc_rport_recv_logo_req(struct fc_rport_priv *,
73 74
				   struct fc_seq *, struct fc_frame *);
static void fc_rport_timeout(struct work_struct *);
75 76
static void fc_rport_error(struct fc_rport_priv *, struct fc_frame *);
static void fc_rport_error_retry(struct fc_rport_priv *, struct fc_frame *);
77 78 79 80 81 82 83 84 85
static void fc_rport_work(struct work_struct *);

static const char *fc_rport_state_names[] = {
	[RPORT_ST_INIT] = "Init",
	[RPORT_ST_PLOGI] = "PLOGI",
	[RPORT_ST_PRLI] = "PRLI",
	[RPORT_ST_RTV] = "RTV",
	[RPORT_ST_READY] = "Ready",
	[RPORT_ST_LOGO] = "LOGO",
86
	[RPORT_ST_DELETE] = "Delete",
87 88
};

89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
/**
 * fc_rport_lookup() - lookup a remote port by port_id
 * @lport: Fibre Channel host port instance
 * @port_id: remote port port_id to match
 */
static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
					     u32 port_id)
{
	struct fc_rport_priv *rdata;

	list_for_each_entry(rdata, &lport->disc.rports, peers)
		if (rdata->ids.port_id == port_id &&
		    rdata->rp_state != RPORT_ST_DELETE)
			return rdata;
	return NULL;
}

106
/**
107 108 109
 * fc_rport_create() - Create a new remote port
 * @lport:   The local port that the new remote port is for
 * @port_id: The port ID for the new remote port
110
 *
111
 * Locking note:  must be called with the disc_mutex held.
112 113
 */
static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport,
114
					     u32 port_id)
115
{
116
	struct fc_rport_priv *rdata;
117

118
	rdata = lport->tt.rport_lookup(lport, port_id);
119 120 121
	if (rdata)
		return rdata;

122 123
	rdata = kzalloc(sizeof(*rdata), GFP_KERNEL);
	if (!rdata)
124 125
		return NULL;

126 127 128 129 130
	rdata->ids.node_name = -1;
	rdata->ids.port_name = -1;
	rdata->ids.port_id = port_id;
	rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;

131
	kref_init(&rdata->kref);
132
	mutex_init(&rdata->rp_mutex);
133
	rdata->local_port = lport;
134 135 136
	rdata->rp_state = RPORT_ST_INIT;
	rdata->event = RPORT_EV_NONE;
	rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
137 138
	rdata->e_d_tov = lport->e_d_tov;
	rdata->r_a_tov = lport->r_a_tov;
139
	rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
140 141
	INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout);
	INIT_WORK(&rdata->event_work, fc_rport_work);
142
	if (port_id != FC_FID_DIR_SERV)
143
		list_add(&rdata->peers, &lport->disc.rports);
144
	return rdata;
145 146
}

147 148 149 150 151 152 153 154 155
/**
 * fc_rport_destroy() - free a remote port after last reference is released.
 * @kref: pointer to kref inside struct fc_rport_priv
 */
static void fc_rport_destroy(struct kref *kref)
{
	struct fc_rport_priv *rdata;

	rdata = container_of(kref, struct fc_rport_priv, kref);
156
	kfree(rdata);
157 158
}

159
/**
160
 * fc_rport_state() - return a string for the state the rport is in
161
 * @rdata: remote port private data
162
 */
163
static const char *fc_rport_state(struct fc_rport_priv *rdata)
164 165 166 167 168 169 170 171 172 173
{
	const char *cp;

	cp = fc_rport_state_names[rdata->rp_state];
	if (!cp)
		cp = "Unknown";
	return cp;
}

/**
174
 * fc_set_rport_loss_tmo() - Set the remote port loss timeout in seconds.
175 176 177 178 179 180 181 182 183 184 185 186 187
 * @rport: Pointer to Fibre Channel remote port structure
 * @timeout: timeout in seconds
 */
void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
{
	if (timeout)
		rport->dev_loss_tmo = timeout + 5;
	else
		rport->dev_loss_tmo = 30;
}
EXPORT_SYMBOL(fc_set_rport_loss_tmo);

/**
188
 * fc_plogi_get_maxframe() - Get max payload from the common service parameters
189 190 191
 * @flp: FLOGI payload structure
 * @maxval: upper limit, may be less than what is in the service parameters
 */
192 193
static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp,
					  unsigned int maxval)
194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210
{
	unsigned int mfs;

	/*
	 * Get max payload from the common service parameters and the
	 * class 3 receive data field size.
	 */
	mfs = ntohs(flp->fl_csp.sp_bb_data) & FC_SP_BB_DATA_MASK;
	if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
		maxval = mfs;
	mfs = ntohs(flp->fl_cssp[3 - 1].cp_rdfs);
	if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
		maxval = mfs;
	return maxval;
}

/**
211
 * fc_rport_state_enter() - Change the rport's state
212
 * @rdata: The rport whose state should change
213 214 215 216
 * @new: The new state of the rport
 *
 * Locking Note: Called with the rport lock held
 */
217
static void fc_rport_state_enter(struct fc_rport_priv *rdata,
218 219 220 221 222 223 224 225 226
				 enum fc_rport_state new)
{
	if (rdata->rp_state != new)
		rdata->retries = 0;
	rdata->rp_state = new;
}

static void fc_rport_work(struct work_struct *work)
{
227
	u32 port_id;
228 229
	struct fc_rport_priv *rdata =
		container_of(work, struct fc_rport_priv, event_work);
230
	struct fc_rport_libfc_priv *rp;
231 232 233
	enum fc_rport_event event;
	struct fc_lport *lport = rdata->local_port;
	struct fc_rport_operations *rport_ops;
234
	struct fc_rport_identifiers ids;
235
	struct fc_rport *rport;
236 237 238 239

	mutex_lock(&rdata->rp_mutex);
	event = rdata->event;
	rport_ops = rdata->ops;
240
	rport = rdata->rport;
241

242 243
	FC_RPORT_DBG(rdata, "work event %u\n", event);

244
	switch (event) {
245
	case RPORT_EV_READY:
246
		ids = rdata->ids;
247
		rdata->event = RPORT_EV_NONE;
248
		kref_get(&rdata->kref);
249 250
		mutex_unlock(&rdata->rp_mutex);

251 252 253 254 255 256 257
		if (!rport)
			rport = fc_remote_port_add(lport->host, 0, &ids);
		if (!rport) {
			FC_RPORT_DBG(rdata, "Failed to add the rport\n");
			lport->tt.rport_logoff(rdata);
			kref_put(&rdata->kref, lport->tt.rport_destroy);
			return;
258
		}
259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
		mutex_lock(&rdata->rp_mutex);
		if (rdata->rport)
			FC_RPORT_DBG(rdata, "rport already allocated\n");
		rdata->rport = rport;
		rport->maxframe_size = rdata->maxframe_size;
		rport->supported_classes = rdata->supported_classes;

		rp = rport->dd_data;
		rp->local_port = lport;
		rp->rp_state = rdata->rp_state;
		rp->flags = rdata->flags;
		rp->e_d_tov = rdata->e_d_tov;
		rp->r_a_tov = rdata->r_a_tov;
		mutex_unlock(&rdata->rp_mutex);

274
		if (rport_ops && rport_ops->event_callback) {
275
			FC_RPORT_DBG(rdata, "callback ev %d\n", event);
276
			rport_ops->event_callback(lport, rdata, event);
277 278
		}
		kref_put(&rdata->kref, lport->tt.rport_destroy);
279 280 281 282 283
		break;

	case RPORT_EV_FAILED:
	case RPORT_EV_LOGO:
	case RPORT_EV_STOP:
284
		port_id = rdata->ids.port_id;
285
		mutex_unlock(&rdata->rp_mutex);
286

287 288 289 290 291 292
		if (port_id != FC_FID_DIR_SERV) {
			mutex_lock(&lport->disc.disc_mutex);
			list_del(&rdata->peers);
			mutex_unlock(&lport->disc.disc_mutex);
		}

293
		if (rport_ops && rport_ops->event_callback) {
294
			FC_RPORT_DBG(rdata, "callback ev %d\n", event);
295
			rport_ops->event_callback(lport, rdata, event);
296
		}
297
		cancel_delayed_work_sync(&rdata->retry_work);
298 299 300 301 302 303 304 305 306 307 308 309 310

		/*
		 * Reset any outstanding exchanges before freeing rport.
		 */
		lport->tt.exch_mgr_reset(lport, 0, port_id);
		lport->tt.exch_mgr_reset(lport, port_id, 0);

		if (rport) {
			rp = rport->dd_data;
			rp->rp_state = RPORT_ST_DELETE;
			mutex_lock(&rdata->rp_mutex);
			rdata->rport = NULL;
			mutex_unlock(&rdata->rp_mutex);
311
			fc_remote_port_delete(rport);
312
		}
313
		kref_put(&rdata->kref, lport->tt.rport_destroy);
314 315 316
		break;

	default:
317
		mutex_unlock(&rdata->rp_mutex);
318 319
		break;
	}
320 321 322
}

/**
323
 * fc_rport_login() - Start the remote port login state machine
324
 * @rdata: private remote port
325 326 327 328 329
 *
 * Locking Note: Called without the rport lock held. This
 * function will hold the rport lock, call an _enter_*
 * function and then unlock the rport.
 */
330
int fc_rport_login(struct fc_rport_priv *rdata)
331 332 333
{
	mutex_lock(&rdata->rp_mutex);

334
	FC_RPORT_DBG(rdata, "Login to port\n");
335

336
	fc_rport_enter_plogi(rdata);
337 338 339 340 341 342

	mutex_unlock(&rdata->rp_mutex);

	return 0;
}

343 344
/**
 * fc_rport_enter_delete() - schedule a remote port to be deleted.
345
 * @rdata: private remote port
346 347 348 349 350 351 352 353 354 355 356
 * @event: event to report as the reason for deletion
 *
 * Locking Note: Called with the rport lock held.
 *
 * Allow state change into DELETE only once.
 *
 * Call queue_work only if there's no event already pending.
 * Set the new event so that the old pending event will not occur.
 * Since we have the mutex, even if fc_rport_work() is already started,
 * it'll see the new event.
 */
357
static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
358 359 360 361 362
				  enum fc_rport_event event)
{
	if (rdata->rp_state == RPORT_ST_DELETE)
		return;

363
	FC_RPORT_DBG(rdata, "Delete port\n");
364

365
	fc_rport_state_enter(rdata, RPORT_ST_DELETE);
366 367 368 369 370 371

	if (rdata->event == RPORT_EV_NONE)
		queue_work(rport_event_queue, &rdata->event_work);
	rdata->event = event;
}

372
/**
373
 * fc_rport_logoff() - Logoff and remove an rport
374
 * @rdata: private remote port
375 376 377 378 379
 *
 * Locking Note: Called without the rport lock held. This
 * function will hold the rport lock, call an _enter_*
 * function and then unlock the rport.
 */
380
int fc_rport_logoff(struct fc_rport_priv *rdata)
381 382 383
{
	mutex_lock(&rdata->rp_mutex);

384
	FC_RPORT_DBG(rdata, "Remove port\n");
385

386
	if (rdata->rp_state == RPORT_ST_DELETE) {
387
		FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n");
388 389 390 391
		mutex_unlock(&rdata->rp_mutex);
		goto out;
	}

392
	fc_rport_enter_logo(rdata);
393 394

	/*
395
	 * Change the state to Delete so that we discard
396 397
	 * the response.
	 */
398
	fc_rport_enter_delete(rdata, RPORT_EV_STOP);
399 400
	mutex_unlock(&rdata->rp_mutex);

401
out:
402 403 404 405
	return 0;
}

/**
406
 * fc_rport_enter_ready() - The rport is ready
407
 * @rdata: private remote port
408 409 410 411
 *
 * Locking Note: The rport lock is expected to be held before calling
 * this routine.
 */
412
static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
413
{
414
	fc_rport_state_enter(rdata, RPORT_ST_READY);
415

416
	FC_RPORT_DBG(rdata, "Port is Ready\n");
417

418 419
	if (rdata->event == RPORT_EV_NONE)
		queue_work(rport_event_queue, &rdata->event_work);
420
	rdata->event = RPORT_EV_READY;
421 422 423
}

/**
424
 * fc_rport_timeout() - Handler for the retry_work timer.
425
 * @work: The work struct of the fc_rport_priv
426 427 428 429 430 431 432
 *
 * Locking Note: Called without the rport lock held. This
 * function will hold the rport lock, call an _enter_*
 * function and then unlock the rport.
 */
static void fc_rport_timeout(struct work_struct *work)
{
433 434
	struct fc_rport_priv *rdata =
		container_of(work, struct fc_rport_priv, retry_work.work);
435 436 437 438 439

	mutex_lock(&rdata->rp_mutex);

	switch (rdata->rp_state) {
	case RPORT_ST_PLOGI:
440
		fc_rport_enter_plogi(rdata);
441 442
		break;
	case RPORT_ST_PRLI:
443
		fc_rport_enter_prli(rdata);
444 445
		break;
	case RPORT_ST_RTV:
446
		fc_rport_enter_rtv(rdata);
447 448
		break;
	case RPORT_ST_LOGO:
449
		fc_rport_enter_logo(rdata);
450 451 452
		break;
	case RPORT_ST_READY:
	case RPORT_ST_INIT:
453
	case RPORT_ST_DELETE:
454 455 456 457 458 459 460
		break;
	}

	mutex_unlock(&rdata->rp_mutex);
}

/**
461
 * fc_rport_error() - Error handler, called once retries have been exhausted
462
 * @rdata: private remote port
463 464 465 466 467
 * @fp: The frame pointer
 *
 * Locking Note: The rport lock is expected to be held before
 * calling this routine
 */
468
static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
469
{
470
	FC_RPORT_DBG(rdata, "Error %ld in state %s, retries %d\n",
471 472
		     IS_ERR(fp) ? -PTR_ERR(fp) : 0,
		     fc_rport_state(rdata), rdata->retries);
473

474 475 476 477
	switch (rdata->rp_state) {
	case RPORT_ST_PLOGI:
	case RPORT_ST_PRLI:
	case RPORT_ST_LOGO:
478
		fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
479 480
		break;
	case RPORT_ST_RTV:
481
		fc_rport_enter_ready(rdata);
482
		break;
483
	case RPORT_ST_DELETE:
484 485 486
	case RPORT_ST_READY:
	case RPORT_ST_INIT:
		break;
487 488 489
	}
}

490
/**
491
 * fc_rport_error_retry() - Error handler when retries are desired
492
 * @rdata: private remote port data
493 494 495 496 497 498 499 500
 * @fp: The frame pointer
 *
 * If the error was an exchange timeout retry immediately,
 * otherwise wait for E_D_TOV.
 *
 * Locking Note: The rport lock is expected to be held before
 * calling this routine
 */
501 502
static void fc_rport_error_retry(struct fc_rport_priv *rdata,
				 struct fc_frame *fp)
503 504 505 506 507
{
	unsigned long delay = FC_DEF_E_D_TOV;

	/* make sure this isn't an FC_EX_CLOSED error, never retry those */
	if (PTR_ERR(fp) == -FC_EX_CLOSED)
508
		return fc_rport_error(rdata, fp);
509

510
	if (rdata->retries < rdata->local_port->max_rport_retry_count) {
511 512
		FC_RPORT_DBG(rdata, "Error %ld in state %s, retrying\n",
			     PTR_ERR(fp), fc_rport_state(rdata));
513 514 515 516 517 518 519 520
		rdata->retries++;
		/* no additional delay on exchange timeouts */
		if (PTR_ERR(fp) == -FC_EX_TIMEOUT)
			delay = 0;
		schedule_delayed_work(&rdata->retry_work, delay);
		return;
	}

521
	return fc_rport_error(rdata, fp);
522 523
}

524
/**
525
 * fc_rport_plogi_recv_resp() - Handle incoming ELS PLOGI response
526 527
 * @sp: current sequence in the PLOGI exchange
 * @fp: response frame
528
 * @rdata_arg: private remote port data
529 530 531 532 533 534
 *
 * Locking Note: This function will be called without the rport lock
 * held, but it will lock, call an _enter_* function or fc_rport_error
 * and then unlock the rport.
 */
static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
535
				void *rdata_arg)
536
{
537
	struct fc_rport_priv *rdata = rdata_arg;
538
	struct fc_lport *lport = rdata->local_port;
539
	struct fc_els_flogi *plp = NULL;
540 541 542 543 544 545 546
	unsigned int tov;
	u16 csp_seq;
	u16 cssp_seq;
	u8 op;

	mutex_lock(&rdata->rp_mutex);

547
	FC_RPORT_DBG(rdata, "Received a PLOGI %s\n", fc_els_resp_type(fp));
548 549

	if (rdata->rp_state != RPORT_ST_PLOGI) {
550 551
		FC_RPORT_DBG(rdata, "Received a PLOGI response, but in state "
			     "%s\n", fc_rport_state(rdata));
552 553
		if (IS_ERR(fp))
			goto err;
554 555 556
		goto out;
	}

557
	if (IS_ERR(fp)) {
558
		fc_rport_error_retry(rdata, fp);
559 560 561
		goto err;
	}

562 563 564
	op = fc_frame_payload_op(fp);
	if (op == ELS_LS_ACC &&
	    (plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) {
565 566
		rdata->ids.port_name = get_unaligned_be64(&plp->fl_wwpn);
		rdata->ids.node_name = get_unaligned_be64(&plp->fl_wwnn);
567 568 569 570 571 572 573 574 575 576 577

		tov = ntohl(plp->fl_csp.sp_e_d_tov);
		if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR)
			tov /= 1000;
		if (tov > rdata->e_d_tov)
			rdata->e_d_tov = tov;
		csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
		cssp_seq = ntohs(plp->fl_cssp[3 - 1].cp_con_seq);
		if (cssp_seq < csp_seq)
			csp_seq = cssp_seq;
		rdata->max_seq = csp_seq;
578
		rdata->maxframe_size = fc_plogi_get_maxframe(plp, lport->mfs);
579
		fc_rport_enter_prli(rdata);
580
	} else
581
		fc_rport_error_retry(rdata, fp);
582 583 584 585 586

out:
	fc_frame_free(fp);
err:
	mutex_unlock(&rdata->rp_mutex);
587
	kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
588 589 590
}

/**
591
 * fc_rport_enter_plogi() - Send Port Login (PLOGI) request to peer
592
 * @rdata: private remote port data
593 594 595 596
 *
 * Locking Note: The rport lock is expected to be held before calling
 * this routine.
 */
597
static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
598 599 600 601
{
	struct fc_lport *lport = rdata->local_port;
	struct fc_frame *fp;

602 603
	FC_RPORT_DBG(rdata, "Port entered PLOGI state from %s state\n",
		     fc_rport_state(rdata));
604

605
	fc_rport_state_enter(rdata, RPORT_ST_PLOGI);
606

607
	rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
608 609
	fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
	if (!fp) {
610
		fc_rport_error_retry(rdata, fp);
611 612 613 614
		return;
	}
	rdata->e_d_tov = lport->e_d_tov;

615
	if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PLOGI,
616 617
				  fc_rport_plogi_resp, rdata, lport->e_d_tov))
		fc_rport_error_retry(rdata, fp);
618
	else
619
		kref_get(&rdata->kref);
620 621 622
}

/**
623
 * fc_rport_prli_resp() - Process Login (PRLI) response handler
624 625
 * @sp: current sequence in the PRLI exchange
 * @fp: response frame
626
 * @rdata_arg: private remote port data
627 628 629 630 631 632
 *
 * Locking Note: This function will be called without the rport lock
 * held, but it will lock, call an _enter_* function or fc_rport_error
 * and then unlock the rport.
 */
static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
633
			       void *rdata_arg)
634
{
635
	struct fc_rport_priv *rdata = rdata_arg;
636 637 638 639 640 641 642 643 644 645
	struct {
		struct fc_els_prli prli;
		struct fc_els_spp spp;
	} *pp;
	u32 roles = FC_RPORT_ROLE_UNKNOWN;
	u32 fcp_parm = 0;
	u8 op;

	mutex_lock(&rdata->rp_mutex);

646
	FC_RPORT_DBG(rdata, "Received a PRLI %s\n", fc_els_resp_type(fp));
647 648

	if (rdata->rp_state != RPORT_ST_PRLI) {
649 650
		FC_RPORT_DBG(rdata, "Received a PRLI response, but in state "
			     "%s\n", fc_rport_state(rdata));
651 652
		if (IS_ERR(fp))
			goto err;
653 654 655
		goto out;
	}

656
	if (IS_ERR(fp)) {
657
		fc_rport_error_retry(rdata, fp);
658 659 660
		goto err;
	}

661 662 663
	/* reinitialize remote port roles */
	rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;

664 665 666 667 668 669 670 671 672
	op = fc_frame_payload_op(fp);
	if (op == ELS_LS_ACC) {
		pp = fc_frame_payload_get(fp, sizeof(*pp));
		if (pp && pp->prli.prli_spp_len >= sizeof(pp->spp)) {
			fcp_parm = ntohl(pp->spp.spp_params);
			if (fcp_parm & FCP_SPPF_RETRY)
				rdata->flags |= FC_RP_FLAGS_RETRY;
		}

673
		rdata->supported_classes = FC_COS_CLASS3;
674 675 676 677 678
		if (fcp_parm & FCP_SPPF_INIT_FCN)
			roles |= FC_RPORT_ROLE_FCP_INITIATOR;
		if (fcp_parm & FCP_SPPF_TARG_FCN)
			roles |= FC_RPORT_ROLE_FCP_TARGET;

679
		rdata->ids.roles = roles;
680
		fc_rport_enter_rtv(rdata);
681 682

	} else {
683 684
		FC_RPORT_DBG(rdata, "Bad ELS response for PRLI command\n");
		fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
685 686 687 688 689 690
	}

out:
	fc_frame_free(fp);
err:
	mutex_unlock(&rdata->rp_mutex);
691
	kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
692 693 694
}

/**
695
 * fc_rport_logo_resp() - Logout (LOGO) response handler
696 697
 * @sp: current sequence in the LOGO exchange
 * @fp: response frame
698
 * @rdata_arg: private remote port data
699 700 701 702 703 704
 *
 * Locking Note: This function will be called without the rport lock
 * held, but it will lock, call an _enter_* function or fc_rport_error
 * and then unlock the rport.
 */
static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
705
			       void *rdata_arg)
706
{
707
	struct fc_rport_priv *rdata = rdata_arg;
708 709 710 711
	u8 op;

	mutex_lock(&rdata->rp_mutex);

712
	FC_RPORT_DBG(rdata, "Received a LOGO %s\n", fc_els_resp_type(fp));
713 714

	if (rdata->rp_state != RPORT_ST_LOGO) {
715 716
		FC_RPORT_DBG(rdata, "Received a LOGO response, but in state "
			     "%s\n", fc_rport_state(rdata));
717 718
		if (IS_ERR(fp))
			goto err;
719 720 721
		goto out;
	}

722
	if (IS_ERR(fp)) {
723
		fc_rport_error_retry(rdata, fp);
724 725 726
		goto err;
	}

727 728
	op = fc_frame_payload_op(fp);
	if (op == ELS_LS_ACC) {
729
		fc_rport_enter_rtv(rdata);
730
	} else {
731 732
		FC_RPORT_DBG(rdata, "Bad ELS response for LOGO command\n");
		fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
733 734 735 736 737 738
	}

out:
	fc_frame_free(fp);
err:
	mutex_unlock(&rdata->rp_mutex);
739
	kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
740 741 742
}

/**
743
 * fc_rport_enter_prli() - Send Process Login (PRLI) request to peer
744
 * @rdata: private remote port data
745 746 747 748
 *
 * Locking Note: The rport lock is expected to be held before calling
 * this routine.
 */
749
static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
750 751 752 753 754 755 756 757
{
	struct fc_lport *lport = rdata->local_port;
	struct {
		struct fc_els_prli prli;
		struct fc_els_spp spp;
	} *pp;
	struct fc_frame *fp;

758 759 760 761 762 763 764 765 766
	/*
	 * If the rport is one of the well known addresses
	 * we skip PRLI and RTV and go straight to READY.
	 */
	if (rdata->ids.port_id >= FC_FID_DOM_MGR) {
		fc_rport_enter_ready(rdata);
		return;
	}

767 768
	FC_RPORT_DBG(rdata, "Port entered PRLI state from %s state\n",
		     fc_rport_state(rdata));
769

770
	fc_rport_state_enter(rdata, RPORT_ST_PRLI);
771 772 773

	fp = fc_frame_alloc(lport, sizeof(*pp));
	if (!fp) {
774
		fc_rport_error_retry(rdata, fp);
775 776 777
		return;
	}

778
	if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PRLI,
779 780
				  fc_rport_prli_resp, rdata, lport->e_d_tov))
		fc_rport_error_retry(rdata, fp);
781
	else
782
		kref_get(&rdata->kref);
783 784 785
}

/**
786
 * fc_rport_els_rtv_resp() - Request Timeout Value response handler
787 788
 * @sp: current sequence in the RTV exchange
 * @fp: response frame
789
 * @rdata_arg: private remote port data
790 791 792 793 794 795 796 797
 *
 * Many targets don't seem to support this.
 *
 * Locking Note: This function will be called without the rport lock
 * held, but it will lock, call an _enter_* function or fc_rport_error
 * and then unlock the rport.
 */
static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
798
			      void *rdata_arg)
799
{
800
	struct fc_rport_priv *rdata = rdata_arg;
801 802 803 804
	u8 op;

	mutex_lock(&rdata->rp_mutex);

805
	FC_RPORT_DBG(rdata, "Received a RTV %s\n", fc_els_resp_type(fp));
806 807

	if (rdata->rp_state != RPORT_ST_RTV) {
808 809
		FC_RPORT_DBG(rdata, "Received a RTV response, but in state "
			     "%s\n", fc_rport_state(rdata));
810 811
		if (IS_ERR(fp))
			goto err;
812 813 814
		goto out;
	}

815
	if (IS_ERR(fp)) {
816
		fc_rport_error(rdata, fp);
817 818 819
		goto err;
	}

820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841
	op = fc_frame_payload_op(fp);
	if (op == ELS_LS_ACC) {
		struct fc_els_rtv_acc *rtv;
		u32 toq;
		u32 tov;

		rtv = fc_frame_payload_get(fp, sizeof(*rtv));
		if (rtv) {
			toq = ntohl(rtv->rtv_toq);
			tov = ntohl(rtv->rtv_r_a_tov);
			if (tov == 0)
				tov = 1;
			rdata->r_a_tov = tov;
			tov = ntohl(rtv->rtv_e_d_tov);
			if (toq & FC_ELS_RTV_EDRES)
				tov /= 1000000;
			if (tov == 0)
				tov = 1;
			rdata->e_d_tov = tov;
		}
	}

842
	fc_rport_enter_ready(rdata);
843 844 845 846 847

out:
	fc_frame_free(fp);
err:
	mutex_unlock(&rdata->rp_mutex);
848
	kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
849 850 851
}

/**
852
 * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request to peer
853
 * @rdata: private remote port data
854 855 856 857
 *
 * Locking Note: The rport lock is expected to be held before calling
 * this routine.
 */
858
static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
859 860 861 862
{
	struct fc_frame *fp;
	struct fc_lport *lport = rdata->local_port;

863 864
	FC_RPORT_DBG(rdata, "Port entered RTV state from %s state\n",
		     fc_rport_state(rdata));
865

866
	fc_rport_state_enter(rdata, RPORT_ST_RTV);
867 868 869

	fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv));
	if (!fp) {
870
		fc_rport_error_retry(rdata, fp);
871 872 873
		return;
	}

874
	if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_RTV,
875 876
				     fc_rport_rtv_resp, rdata, lport->e_d_tov))
		fc_rport_error_retry(rdata, fp);
877
	else
878
		kref_get(&rdata->kref);
879 880 881
}

/**
882
 * fc_rport_enter_logo() - Send Logout (LOGO) request to peer
883
 * @rdata: private remote port data
884 885 886 887
 *
 * Locking Note: The rport lock is expected to be held before calling
 * this routine.
 */
888
static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
889 890 891 892
{
	struct fc_lport *lport = rdata->local_port;
	struct fc_frame *fp;

893 894
	FC_RPORT_DBG(rdata, "Port entered LOGO state from %s state\n",
		     fc_rport_state(rdata));
895

896
	fc_rport_state_enter(rdata, RPORT_ST_LOGO);
897 898 899

	fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo));
	if (!fp) {
900
		fc_rport_error_retry(rdata, fp);
901 902 903
		return;
	}

904
	if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO,
905 906
				  fc_rport_logo_resp, rdata, lport->e_d_tov))
		fc_rport_error_retry(rdata, fp);
907
	else
908
		kref_get(&rdata->kref);
909 910 911 912
}


/**
913
 * fc_rport_recv_req() - Receive a request from a rport
914 915
 * @sp: current sequence in the PLOGI exchange
 * @fp: response frame
916
 * @lport: Fibre Channel local port
917
 *
918
 * Locking Note: Called with the lport lock held.
919 920
 */
void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp,
921
		       struct fc_lport *lport)
922
{
923
	struct fc_rport_priv *rdata;
924 925
	struct fc_frame_header *fh;
	struct fc_seq_els_data els_data;
926
	u32 s_id;
927 928 929 930 931 932
	u8 op;

	els_data.fp = NULL;
	els_data.explan = ELS_EXPL_NONE;
	els_data.reason = ELS_RJT_NONE;

933 934 935 936 937 938 939 940 941
	op = fc_frame_payload_op(fp);
	switch (op) {
	case ELS_PLOGI:
		fc_rport_recv_plogi_req(lport, sp, fp);
		return;
	default:
		break;
	}

942
	fh = fc_frame_header_get(fp);
943
	s_id = ntoh24(fh->fh_s_id);
944

945
	mutex_lock(&lport->disc.disc_mutex);
946 947
	rdata = lport->tt.rport_lookup(lport, s_id);
	if (!rdata) {
948
		mutex_unlock(&lport->disc.disc_mutex);
949 950 951 952 953 954
		els_data.reason = ELS_RJT_UNAB;
		lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data);
		fc_frame_free(fp);
		return;
	}
	mutex_lock(&rdata->rp_mutex);
955
	mutex_unlock(&lport->disc.disc_mutex);
956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978

	switch (op) {
	case ELS_PRLI:
		fc_rport_recv_prli_req(rdata, sp, fp);
		break;
	case ELS_PRLO:
		fc_rport_recv_prlo_req(rdata, sp, fp);
		break;
	case ELS_LOGO:
		fc_rport_recv_logo_req(rdata, sp, fp);
		break;
	case ELS_RRQ:
		els_data.fp = fp;
		lport->tt.seq_els_rsp_send(sp, ELS_RRQ, &els_data);
		break;
	case ELS_REC:
		els_data.fp = fp;
		lport->tt.seq_els_rsp_send(sp, ELS_REC, &els_data);
		break;
	default:
		els_data.reason = ELS_RJT_UNSUP;
		lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data);
		break;
979 980 981 982 983 984
	}

	mutex_unlock(&rdata->rp_mutex);
}

/**
985
 * fc_rport_recv_plogi_req() - Handle incoming Port Login (PLOGI) request
986
 * @lport: local port
987 988 989
 * @sp: current sequence in the PLOGI exchange
 * @fp: PLOGI request frame
 *
990
 * Locking Note: The rport lock is held before calling this function.
991
 */
992
static void fc_rport_recv_plogi_req(struct fc_lport *lport,
993 994
				    struct fc_seq *sp, struct fc_frame *rx_fp)
{
995 996
	struct fc_disc *disc;
	struct fc_rport_priv *rdata;
997 998 999 1000 1001
	struct fc_frame *fp = rx_fp;
	struct fc_exch *ep;
	struct fc_frame_header *fh;
	struct fc_els_flogi *pl;
	struct fc_seq_els_data rjt_data;
1002
	u32 sid, f_ctl;
1003

1004
	rjt_data.fp = NULL;
1005
	fh = fc_frame_header_get(fp);
1006
	sid = ntoh24(fh->fh_s_id);
1007

1008
	FC_RPORT_ID_DBG(lport, sid, "Received PLOGI request\n");
1009 1010 1011

	pl = fc_frame_payload_get(fp, sizeof(*pl));
	if (!pl) {
1012 1013 1014 1015
		FC_RPORT_ID_DBG(lport, sid, "Received PLOGI too short\n");
		rjt_data.reason = ELS_RJT_PROT;
		rjt_data.explan = ELS_EXPL_INV_LEN;
		goto reject;
1016
	}
1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032

	disc = &lport->disc;
	mutex_lock(&disc->disc_mutex);
	rdata = lport->tt.rport_create(lport, sid);
	if (!rdata) {
		mutex_unlock(&disc->disc_mutex);
		rjt_data.reason = ELS_RJT_UNAB;
		rjt_data.explan = ELS_EXPL_INSUF_RES;
		goto reject;
	}

	mutex_lock(&rdata->rp_mutex);
	mutex_unlock(&disc->disc_mutex);

	rdata->ids.port_name = get_unaligned_be64(&pl->fl_wwpn);
	rdata->ids.node_name = get_unaligned_be64(&pl->fl_wwnn);
1033 1034

	/*
1035
	 * If the rport was just created, possibly due to the incoming PLOGI,
1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046
	 * set the state appropriately and accept the PLOGI.
	 *
	 * If we had also sent a PLOGI, and if the received PLOGI is from a
	 * higher WWPN, we accept it, otherwise an LS_RJT is sent with reason
	 * "command already in progress".
	 *
	 * XXX TBD: If the session was ready before, the PLOGI should result in
	 * all outstanding exchanges being reset.
	 */
	switch (rdata->rp_state) {
	case RPORT_ST_INIT:
1047
		FC_RPORT_DBG(rdata, "Received PLOGI in INIT state\n");
1048 1049
		break;
	case RPORT_ST_PLOGI:
1050 1051 1052 1053 1054 1055 1056
		FC_RPORT_DBG(rdata, "Received PLOGI in PLOGI state\n");
		if (rdata->ids.port_name < lport->wwpn) {
			mutex_unlock(&rdata->rp_mutex);
			rjt_data.reason = ELS_RJT_INPROG;
			rjt_data.explan = ELS_EXPL_NONE;
			goto reject;
		}
1057 1058 1059 1060
		break;
	case RPORT_ST_PRLI:
	case RPORT_ST_READY:
		break;
1061
	case RPORT_ST_DELETE:
1062
	default:
1063 1064 1065 1066
		FC_RPORT_DBG(rdata, "Received PLOGI in unexpected state %d\n",
			     rdata->rp_state);
		fc_frame_free(rx_fp);
		goto out;
1067 1068
	}

1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098
	/*
	 * Get session payload size from incoming PLOGI.
	 */
	rdata->maxframe_size = fc_plogi_get_maxframe(pl, lport->mfs);
	fc_frame_free(rx_fp);

	/*
	 * Send LS_ACC.	 If this fails, the originator should retry.
	 */
	sp = lport->tt.seq_start_next(sp);
	if (!sp)
		goto out;
	fp = fc_frame_alloc(lport, sizeof(*pl));
	if (!fp)
		goto out;

	fc_plogi_fill(lport, fp, ELS_LS_ACC);
	f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT;
	ep = fc_seq_exch(sp);
	fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
		       FC_TYPE_ELS, f_ctl, 0);
	lport->tt.seq_send(lport, sp, fp);
	fc_rport_enter_prli(rdata);
out:
	mutex_unlock(&rdata->rp_mutex);
	return;

reject:
	lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
	fc_frame_free(fp);
1099 1100 1101
}

/**
1102
 * fc_rport_recv_prli_req() - Handle incoming Process Login (PRLI) request
1103
 * @rdata: private remote port data
1104 1105 1106 1107 1108 1109
 * @sp: current sequence in the PRLI exchange
 * @fp: PRLI request frame
 *
 * Locking Note: The rport lock is exected to be held before calling
 * this function.
 */
1110
static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135
				   struct fc_seq *sp, struct fc_frame *rx_fp)
{
	struct fc_lport *lport = rdata->local_port;
	struct fc_exch *ep;
	struct fc_frame *fp;
	struct fc_frame_header *fh;
	struct {
		struct fc_els_prli prli;
		struct fc_els_spp spp;
	} *pp;
	struct fc_els_spp *rspp;	/* request service param page */
	struct fc_els_spp *spp;	/* response spp */
	unsigned int len;
	unsigned int plen;
	enum fc_els_rjt_reason reason = ELS_RJT_UNAB;
	enum fc_els_rjt_explan explan = ELS_EXPL_NONE;
	enum fc_els_spp_resp resp;
	struct fc_seq_els_data rjt_data;
	u32 f_ctl;
	u32 fcp_parm;
	u32 roles = FC_RPORT_ROLE_UNKNOWN;
	rjt_data.fp = NULL;

	fh = fc_frame_header_get(rx_fp);

1136 1137
	FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n",
		     fc_rport_state(rdata));
1138 1139 1140

	switch (rdata->rp_state) {
	case RPORT_ST_PRLI:
1141
	case RPORT_ST_RTV:
1142 1143 1144 1145
	case RPORT_ST_READY:
		reason = ELS_RJT_NONE;
		break;
	default:
1146 1147
		fc_frame_free(rx_fp);
		return;
1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186
		break;
	}
	len = fr_len(rx_fp) - sizeof(*fh);
	pp = fc_frame_payload_get(rx_fp, sizeof(*pp));
	if (pp == NULL) {
		reason = ELS_RJT_PROT;
		explan = ELS_EXPL_INV_LEN;
	} else {
		plen = ntohs(pp->prli.prli_len);
		if ((plen % 4) != 0 || plen > len) {
			reason = ELS_RJT_PROT;
			explan = ELS_EXPL_INV_LEN;
		} else if (plen < len) {
			len = plen;
		}
		plen = pp->prli.prli_spp_len;
		if ((plen % 4) != 0 || plen < sizeof(*spp) ||
		    plen > len || len < sizeof(*pp)) {
			reason = ELS_RJT_PROT;
			explan = ELS_EXPL_INV_LEN;
		}
		rspp = &pp->spp;
	}
	if (reason != ELS_RJT_NONE ||
	    (fp = fc_frame_alloc(lport, len)) == NULL) {
		rjt_data.reason = reason;
		rjt_data.explan = explan;
		lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
	} else {
		sp = lport->tt.seq_start_next(sp);
		WARN_ON(!sp);
		pp = fc_frame_payload_get(fp, len);
		WARN_ON(!pp);
		memset(pp, 0, len);
		pp->prli.prli_cmd = ELS_LS_ACC;
		pp->prli.prli_spp_len = plen;
		pp->prli.prli_len = htons(len);
		len -= sizeof(struct fc_els_prli);

1187 1188 1189
		/* reinitialize remote port roles */
		rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;

1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209
		/*
		 * Go through all the service parameter pages and build
		 * response.  If plen indicates longer SPP than standard,
		 * use that.  The entire response has been pre-cleared above.
		 */
		spp = &pp->spp;
		while (len >= plen) {
			spp->spp_type = rspp->spp_type;
			spp->spp_type_ext = rspp->spp_type_ext;
			spp->spp_flags = rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
			resp = FC_SPP_RESP_ACK;
			if (rspp->spp_flags & FC_SPP_RPA_VAL)
				resp = FC_SPP_RESP_NO_PA;
			switch (rspp->spp_type) {
			case 0:	/* common to all FC-4 types */
				break;
			case FC_TYPE_FCP:
				fcp_parm = ntohl(rspp->spp_params);
				if (fcp_parm * FCP_SPPF_RETRY)
					rdata->flags |= FC_RP_FLAGS_RETRY;
1210
				rdata->supported_classes = FC_COS_CLASS3;
1211 1212 1213 1214
				if (fcp_parm & FCP_SPPF_INIT_FCN)
					roles |= FC_RPORT_ROLE_FCP_INITIATOR;
				if (fcp_parm & FCP_SPPF_TARG_FCN)
					roles |= FC_RPORT_ROLE_FCP_TARGET;
1215
				rdata->ids.roles = roles;
1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244

				spp->spp_params =
					htonl(lport->service_params);
				break;
			default:
				resp = FC_SPP_RESP_INVL;
				break;
			}
			spp->spp_flags |= resp;
			len -= plen;
			rspp = (struct fc_els_spp *)((char *)rspp + plen);
			spp = (struct fc_els_spp *)((char *)spp + plen);
		}

		/*
		 * Send LS_ACC.	 If this fails, the originator should retry.
		 */
		f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
		f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
		ep = fc_seq_exch(sp);
		fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
			       FC_TYPE_ELS, f_ctl, 0);
		lport->tt.seq_send(lport, sp, fp);

		/*
		 * Get lock and re-check state.
		 */
		switch (rdata->rp_state) {
		case RPORT_ST_PRLI:
1245
			fc_rport_enter_ready(rdata);
1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256
			break;
		case RPORT_ST_READY:
			break;
		default:
			break;
		}
	}
	fc_frame_free(rx_fp);
}

/**
1257
 * fc_rport_recv_prlo_req() - Handle incoming Process Logout (PRLO) request
1258
 * @rdata: private remote port data
1259 1260 1261 1262 1263 1264
 * @sp: current sequence in the PRLO exchange
 * @fp: PRLO request frame
 *
 * Locking Note: The rport lock is exected to be held before calling
 * this function.
 */
1265 1266
static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata,
				   struct fc_seq *sp,
1267 1268 1269 1270 1271 1272 1273 1274 1275
				   struct fc_frame *fp)
{
	struct fc_lport *lport = rdata->local_port;

	struct fc_frame_header *fh;
	struct fc_seq_els_data rjt_data;

	fh = fc_frame_header_get(fp);

1276 1277
	FC_RPORT_DBG(rdata, "Received PRLO request while in state %s\n",
		     fc_rport_state(rdata));
1278

1279
	if (rdata->rp_state == RPORT_ST_DELETE) {
1280 1281 1282 1283
		fc_frame_free(fp);
		return;
	}

1284 1285 1286 1287 1288 1289 1290 1291
	rjt_data.fp = NULL;
	rjt_data.reason = ELS_RJT_UNAB;
	rjt_data.explan = ELS_EXPL_NONE;
	lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
	fc_frame_free(fp);
}

/**
1292
 * fc_rport_recv_logo_req() - Handle incoming Logout (LOGO) request
1293
 * @rdata: private remote port data
1294 1295 1296 1297 1298 1299
 * @sp: current sequence in the LOGO exchange
 * @fp: LOGO request frame
 *
 * Locking Note: The rport lock is exected to be held before calling
 * this function.
 */
1300 1301
static void fc_rport_recv_logo_req(struct fc_rport_priv *rdata,
				   struct fc_seq *sp,
1302 1303 1304 1305 1306 1307 1308
				   struct fc_frame *fp)
{
	struct fc_frame_header *fh;
	struct fc_lport *lport = rdata->local_port;

	fh = fc_frame_header_get(fp);

1309 1310
	FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
		     fc_rport_state(rdata));
1311

1312
	if (rdata->rp_state == RPORT_ST_DELETE) {
1313 1314 1315 1316
		fc_frame_free(fp);
		return;
	}

1317
	fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329

	lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
	fc_frame_free(fp);
}

static void fc_rport_flush_queue(void)
{
	flush_workqueue(rport_event_queue);
}

int fc_rport_init(struct fc_lport *lport)
{
1330 1331 1332
	if (!lport->tt.rport_lookup)
		lport->tt.rport_lookup = fc_rport_lookup;

1333
	if (!lport->tt.rport_create)
1334
		lport->tt.rport_create = fc_rport_create;
1335

1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347
	if (!lport->tt.rport_login)
		lport->tt.rport_login = fc_rport_login;

	if (!lport->tt.rport_logoff)
		lport->tt.rport_logoff = fc_rport_logoff;

	if (!lport->tt.rport_recv_req)
		lport->tt.rport_recv_req = fc_rport_recv_req;

	if (!lport->tt.rport_flush_queue)
		lport->tt.rport_flush_queue = fc_rport_flush_queue;

1348 1349 1350
	if (!lport->tt.rport_destroy)
		lport->tt.rport_destroy = fc_rport_destroy;

1351 1352 1353 1354
	return 0;
}
EXPORT_SYMBOL(fc_rport_init);

1355
int fc_setup_rport(void)
1356 1357 1358 1359 1360 1361 1362 1363
{
	rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
	if (!rport_event_queue)
		return -ENOMEM;
	return 0;
}
EXPORT_SYMBOL(fc_setup_rport);

1364
void fc_destroy_rport(void)
1365 1366 1367 1368 1369 1370 1371
{
	destroy_workqueue(rport_event_queue);
}
EXPORT_SYMBOL(fc_destroy_rport);

void fc_rport_terminate_io(struct fc_rport *rport)
{
1372 1373
	struct fc_rport_libfc_priv *rp = rport->dd_data;
	struct fc_lport *lport = rp->local_port;
1374

1375 1376
	lport->tt.exch_mgr_reset(lport, 0, rport->port_id);
	lport->tt.exch_mgr_reset(lport, rport->port_id, 0);
1377 1378
}
EXPORT_SYMBOL(fc_rport_terminate_io);