fc_rport.c 54.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
/*
 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Maintained at www.Open-FCoE.org
 */

/*
 * RPORT GENERAL INFO
 *
 * This file contains all processing regarding fc_rports. It contains the
 * rport state machine and does all rport interaction with the transport class.
 * There should be no other places in libfc that interact directly with the
 * transport class in regards to adding and deleting rports.
 *
 * fc_rport's represent N_Port's within the fabric.
 */

/*
 * RPORT LOCKING
 *
 * The rport should never hold the rport mutex and then attempt to acquire
 * either the lport or disc mutexes. The rport's mutex is considered lesser
 * than both the lport's mutex and the disc mutex. Refer to fc_lport.c for
37
 * more comments on the hierarchy.
38 39 40 41 42 43 44 45 46 47 48 49
 *
 * The locking strategy is similar to the lport's strategy. The lock protects
 * the rport's states and is held and released by the entry points to the rport
 * block. All _enter_* functions correspond to rport states and expect the rport
 * mutex to be locked before calling them. This means that rports only handle
 * one request or response at a time, since they're not critical for the I/O
 * path this potential over-use of the mutex is acceptable.
 */

#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
50
#include <linux/slab.h>
51 52 53
#include <linux/rcupdate.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
54
#include <linux/export.h>
55 56 57 58 59
#include <asm/unaligned.h>

#include <scsi/libfc.h>
#include <scsi/fc_encode.h>

60 61
#include "fc_libfc.h"

62
static struct workqueue_struct *rport_event_queue;
63

64
static void fc_rport_enter_flogi(struct fc_rport_priv *);
65 66 67 68 69
static void fc_rport_enter_plogi(struct fc_rport_priv *);
static void fc_rport_enter_prli(struct fc_rport_priv *);
static void fc_rport_enter_rtv(struct fc_rport_priv *);
static void fc_rport_enter_ready(struct fc_rport_priv *);
static void fc_rport_enter_logo(struct fc_rport_priv *);
70
static void fc_rport_enter_adisc(struct fc_rport_priv *);
71

72 73 74 75
static void fc_rport_recv_plogi_req(struct fc_lport *, struct fc_frame *);
static void fc_rport_recv_prli_req(struct fc_rport_priv *, struct fc_frame *);
static void fc_rport_recv_prlo_req(struct fc_rport_priv *, struct fc_frame *);
static void fc_rport_recv_logo_req(struct fc_lport *, struct fc_frame *);
76
static void fc_rport_timeout(struct work_struct *);
77 78
static void fc_rport_error(struct fc_rport_priv *, struct fc_frame *);
static void fc_rport_error_retry(struct fc_rport_priv *, struct fc_frame *);
79 80 81 82
static void fc_rport_work(struct work_struct *);

static const char *fc_rport_state_names[] = {
	[RPORT_ST_INIT] = "Init",
83 84
	[RPORT_ST_FLOGI] = "FLOGI",
	[RPORT_ST_PLOGI_WAIT] = "PLOGI_WAIT",
85 86 87 88
	[RPORT_ST_PLOGI] = "PLOGI",
	[RPORT_ST_PRLI] = "PRLI",
	[RPORT_ST_RTV] = "RTV",
	[RPORT_ST_READY] = "Ready",
89
	[RPORT_ST_ADISC] = "ADISC",
90
	[RPORT_ST_DELETE] = "Delete",
91 92
};

93
/**
94 95 96
 * fc_rport_lookup() - Lookup a remote port by port_id
 * @lport:   The local port to lookup the remote port on
 * @port_id: The remote port ID to look up
97 98
 *
 * The caller must hold either disc_mutex or rcu_read_lock().
99 100 101 102 103 104
 */
static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
					     u32 port_id)
{
	struct fc_rport_priv *rdata;

105
	list_for_each_entry_rcu(rdata, &lport->disc.rports, peers)
106
		if (rdata->ids.port_id == port_id)
107 108 109 110
			return rdata;
	return NULL;
}

111
/**
112
 * fc_rport_create() - Create a new remote port
113 114 115 116
 * @lport: The local port this remote port will be associated with
 * @ids:   The identifiers for the new remote port
 *
 * The remote port will start in the INIT state.
117
 *
118
 * Locking note:  must be called with the disc_mutex held.
119 120
 */
static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport,
121
					     u32 port_id)
122
{
123
	struct fc_rport_priv *rdata;
124

125
	rdata = lport->tt.rport_lookup(lport, port_id);
126 127 128
	if (rdata)
		return rdata;

129
	rdata = kzalloc(sizeof(*rdata) + lport->rport_priv_size, GFP_KERNEL);
130
	if (!rdata)
131 132
		return NULL;

133 134 135 136 137
	rdata->ids.node_name = -1;
	rdata->ids.port_name = -1;
	rdata->ids.port_id = port_id;
	rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;

138
	kref_init(&rdata->kref);
139
	mutex_init(&rdata->rp_mutex);
140
	rdata->local_port = lport;
141 142 143
	rdata->rp_state = RPORT_ST_INIT;
	rdata->event = RPORT_EV_NONE;
	rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
144 145
	rdata->e_d_tov = lport->e_d_tov;
	rdata->r_a_tov = lport->r_a_tov;
146
	rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
147 148
	INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout);
	INIT_WORK(&rdata->event_work, fc_rport_work);
149 150
	if (port_id != FC_FID_DIR_SERV) {
		rdata->lld_event_callback = lport->tt.rport_event_callback;
151
		list_add_rcu(&rdata->peers, &lport->disc.rports);
152
	}
153
	return rdata;
154 155
}

156
/**
157 158
 * fc_rport_destroy() - Free a remote port after last reference is released
 * @kref: The remote port's kref
159 160 161 162 163 164
 */
static void fc_rport_destroy(struct kref *kref)
{
	struct fc_rport_priv *rdata;

	rdata = container_of(kref, struct fc_rport_priv, kref);
165
	kfree_rcu(rdata, rcu);
166 167
}

168
/**
169 170
 * fc_rport_state() - Return a string identifying the remote port's state
 * @rdata: The remote port
171
 */
172
static const char *fc_rport_state(struct fc_rport_priv *rdata)
173 174 175 176 177 178 179 180 181 182
{
	const char *cp;

	cp = fc_rport_state_names[rdata->rp_state];
	if (!cp)
		cp = "Unknown";
	return cp;
}

/**
183 184 185
 * fc_set_rport_loss_tmo() - Set the remote port loss timeout
 * @rport:   The remote port that gets a new timeout value
 * @timeout: The new timeout value (in seconds)
186 187 188 189
 */
void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
{
	if (timeout)
190
		rport->dev_loss_tmo = timeout;
191
	else
192
		rport->dev_loss_tmo = 1;
193 194 195 196
}
EXPORT_SYMBOL(fc_set_rport_loss_tmo);

/**
197 198
 * fc_plogi_get_maxframe() - Get the maximum payload from the common service
 *			     parameters in a FLOGI frame
199
 * @flp:    The FLOGI or PLOGI payload
200 201
 * @maxval: The maximum frame size upper limit; this may be less than what
 *	    is in the service parameters
202
 */
203 204
static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp,
					  unsigned int maxval)
205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
{
	unsigned int mfs;

	/*
	 * Get max payload from the common service parameters and the
	 * class 3 receive data field size.
	 */
	mfs = ntohs(flp->fl_csp.sp_bb_data) & FC_SP_BB_DATA_MASK;
	if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
		maxval = mfs;
	mfs = ntohs(flp->fl_cssp[3 - 1].cp_rdfs);
	if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
		maxval = mfs;
	return maxval;
}

/**
222 223 224
 * fc_rport_state_enter() - Change the state of a remote port
 * @rdata: The remote port whose state should change
 * @new:   The new state
225 226 227
 *
 * Locking Note: Called with the rport lock held
 */
228
static void fc_rport_state_enter(struct fc_rport_priv *rdata,
229 230 231 232 233 234 235
				 enum fc_rport_state new)
{
	if (rdata->rp_state != new)
		rdata->retries = 0;
	rdata->rp_state = new;
}

236 237 238 239
/**
 * fc_rport_work() - Handler for remote port events in the rport_event_queue
 * @work: Handle to the remote port being dequeued
 */
240 241
static void fc_rport_work(struct work_struct *work)
{
242
	u32 port_id;
243 244
	struct fc_rport_priv *rdata =
		container_of(work, struct fc_rport_priv, event_work);
245
	struct fc_rport_libfc_priv *rpriv;
246 247 248
	enum fc_rport_event event;
	struct fc_lport *lport = rdata->local_port;
	struct fc_rport_operations *rport_ops;
249
	struct fc_rport_identifiers ids;
250
	struct fc_rport *rport;
251 252
	struct fc4_prov *prov;
	u8 type;
253 254 255 256

	mutex_lock(&rdata->rp_mutex);
	event = rdata->event;
	rport_ops = rdata->ops;
257
	rport = rdata->rport;
258

259 260
	FC_RPORT_DBG(rdata, "work event %u\n", event);

261
	switch (event) {
262
	case RPORT_EV_READY:
263
		ids = rdata->ids;
264
		rdata->event = RPORT_EV_NONE;
265
		rdata->major_retries = 0;
266
		kref_get(&rdata->kref);
267 268
		mutex_unlock(&rdata->rp_mutex);

269 270 271 272 273 274 275
		if (!rport)
			rport = fc_remote_port_add(lport->host, 0, &ids);
		if (!rport) {
			FC_RPORT_DBG(rdata, "Failed to add the rport\n");
			lport->tt.rport_logoff(rdata);
			kref_put(&rdata->kref, lport->tt.rport_destroy);
			return;
276
		}
277 278 279 280 281 282 283
		mutex_lock(&rdata->rp_mutex);
		if (rdata->rport)
			FC_RPORT_DBG(rdata, "rport already allocated\n");
		rdata->rport = rport;
		rport->maxframe_size = rdata->maxframe_size;
		rport->supported_classes = rdata->supported_classes;

284 285 286 287 288 289
		rpriv = rport->dd_data;
		rpriv->local_port = lport;
		rpriv->rp_state = rdata->rp_state;
		rpriv->flags = rdata->flags;
		rpriv->e_d_tov = rdata->e_d_tov;
		rpriv->r_a_tov = rdata->r_a_tov;
290 291
		mutex_unlock(&rdata->rp_mutex);

292
		if (rport_ops && rport_ops->event_callback) {
293
			FC_RPORT_DBG(rdata, "callback ev %d\n", event);
294
			rport_ops->event_callback(lport, rdata, event);
295
		}
296 297 298 299
		if (rdata->lld_event_callback) {
			FC_RPORT_DBG(rdata, "lld callback ev %d\n", event);
			rdata->lld_event_callback(lport, rdata, event);
		}
300
		kref_put(&rdata->kref, lport->tt.rport_destroy);
301 302 303 304 305
		break;

	case RPORT_EV_FAILED:
	case RPORT_EV_LOGO:
	case RPORT_EV_STOP:
306 307 308 309 310 311 312 313 314
		if (rdata->prli_count) {
			mutex_lock(&fc_prov_mutex);
			for (type = 1; type < FC_FC4_PROV_SIZE; type++) {
				prov = fc_passive_prov[type];
				if (prov && prov->prlo)
					prov->prlo(rdata);
			}
			mutex_unlock(&fc_prov_mutex);
		}
315
		port_id = rdata->ids.port_id;
316
		mutex_unlock(&rdata->rp_mutex);
317

318
		if (rport_ops && rport_ops->event_callback) {
319
			FC_RPORT_DBG(rdata, "callback ev %d\n", event);
320
			rport_ops->event_callback(lport, rdata, event);
321
		}
322 323 324 325
		if (rdata->lld_event_callback) {
			FC_RPORT_DBG(rdata, "lld callback ev %d\n", event);
			rdata->lld_event_callback(lport, rdata, event);
		}
326
		cancel_delayed_work_sync(&rdata->retry_work);
327 328 329 330 331 332 333 334

		/*
		 * Reset any outstanding exchanges before freeing rport.
		 */
		lport->tt.exch_mgr_reset(lport, 0, port_id);
		lport->tt.exch_mgr_reset(lport, port_id, 0);

		if (rport) {
335 336
			rpriv = rport->dd_data;
			rpriv->rp_state = RPORT_ST_DELETE;
337 338 339
			mutex_lock(&rdata->rp_mutex);
			rdata->rport = NULL;
			mutex_unlock(&rdata->rp_mutex);
340
			fc_remote_port_delete(rport);
341
		}
342 343 344 345 346 347 348

		mutex_lock(&lport->disc.disc_mutex);
		mutex_lock(&rdata->rp_mutex);
		if (rdata->rp_state == RPORT_ST_DELETE) {
			if (port_id == FC_FID_DIR_SERV) {
				rdata->event = RPORT_EV_NONE;
				mutex_unlock(&rdata->rp_mutex);
349
				kref_put(&rdata->kref, lport->tt.rport_destroy);
350 351 352 353
			} else if ((rdata->flags & FC_RP_STARTED) &&
				   rdata->major_retries <
				   lport->max_rport_retry_count) {
				rdata->major_retries++;
354 355
				rdata->event = RPORT_EV_NONE;
				FC_RPORT_DBG(rdata, "work restart\n");
356
				fc_rport_enter_flogi(rdata);
357 358 359
				mutex_unlock(&rdata->rp_mutex);
			} else {
				FC_RPORT_DBG(rdata, "work delete\n");
360
				list_del_rcu(&rdata->peers);
361 362 363 364 365 366 367 368 369 370
				mutex_unlock(&rdata->rp_mutex);
				kref_put(&rdata->kref, lport->tt.rport_destroy);
			}
		} else {
			/*
			 * Re-open for events.  Reissue READY event if ready.
			 */
			rdata->event = RPORT_EV_NONE;
			if (rdata->rp_state == RPORT_ST_READY)
				fc_rport_enter_ready(rdata);
371
			mutex_unlock(&rdata->rp_mutex);
372 373
		}
		mutex_unlock(&lport->disc.disc_mutex);
374 375 376
		break;

	default:
377
		mutex_unlock(&rdata->rp_mutex);
378 379
		break;
	}
380 381 382
}

/**
383
 * fc_rport_login() - Start the remote port login state machine
384
 * @rdata: The remote port to be logged in to
385 386 387 388
 *
 * Locking Note: Called without the rport lock held. This
 * function will hold the rport lock, call an _enter_*
 * function and then unlock the rport.
389 390 391 392
 *
 * This indicates the intent to be logged into the remote port.
 * If it appears we are already logged in, ADISC is used to verify
 * the setup.
393
 */
394
static int fc_rport_login(struct fc_rport_priv *rdata)
395 396 397
{
	mutex_lock(&rdata->rp_mutex);

398
	rdata->flags |= FC_RP_STARTED;
399 400 401 402 403
	switch (rdata->rp_state) {
	case RPORT_ST_READY:
		FC_RPORT_DBG(rdata, "ADISC port\n");
		fc_rport_enter_adisc(rdata);
		break;
404 405 406
	case RPORT_ST_DELETE:
		FC_RPORT_DBG(rdata, "Restart deleted port\n");
		break;
407 408
	default:
		FC_RPORT_DBG(rdata, "Login to port\n");
409
		fc_rport_enter_flogi(rdata);
410 411
		break;
	}
412 413 414 415 416
	mutex_unlock(&rdata->rp_mutex);

	return 0;
}

417
/**
418 419 420
 * fc_rport_enter_delete() - Schedule a remote port to be deleted
 * @rdata: The remote port to be deleted
 * @event: The event to report as the reason for deletion
421 422 423 424 425 426 427 428 429 430
 *
 * Locking Note: Called with the rport lock held.
 *
 * Allow state change into DELETE only once.
 *
 * Call queue_work only if there's no event already pending.
 * Set the new event so that the old pending event will not occur.
 * Since we have the mutex, even if fc_rport_work() is already started,
 * it'll see the new event.
 */
431
static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
432 433 434 435 436
				  enum fc_rport_event event)
{
	if (rdata->rp_state == RPORT_ST_DELETE)
		return;

437
	FC_RPORT_DBG(rdata, "Delete port\n");
438

439
	fc_rport_state_enter(rdata, RPORT_ST_DELETE);
440 441 442 443 444 445

	if (rdata->event == RPORT_EV_NONE)
		queue_work(rport_event_queue, &rdata->event_work);
	rdata->event = event;
}

446
/**
447 448
 * fc_rport_logoff() - Logoff and remove a remote port
 * @rdata: The remote port to be logged off of
449 450 451 452 453
 *
 * Locking Note: Called without the rport lock held. This
 * function will hold the rport lock, call an _enter_*
 * function and then unlock the rport.
 */
454
static int fc_rport_logoff(struct fc_rport_priv *rdata)
455 456 457
{
	mutex_lock(&rdata->rp_mutex);

458
	FC_RPORT_DBG(rdata, "Remove port\n");
459

460
	rdata->flags &= ~FC_RP_STARTED;
461
	if (rdata->rp_state == RPORT_ST_DELETE) {
462
		FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n");
463 464
		goto out;
	}
465
	fc_rport_enter_logo(rdata);
466 467

	/*
468
	 * Change the state to Delete so that we discard
469 470
	 * the response.
	 */
471
	fc_rport_enter_delete(rdata, RPORT_EV_STOP);
472
out:
473
	mutex_unlock(&rdata->rp_mutex);
474 475 476 477
	return 0;
}

/**
478 479
 * fc_rport_enter_ready() - Transition to the RPORT_ST_READY state
 * @rdata: The remote port that is ready
480 481 482 483
 *
 * Locking Note: The rport lock is expected to be held before calling
 * this routine.
 */
484
static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
485
{
486
	fc_rport_state_enter(rdata, RPORT_ST_READY);
487

488
	FC_RPORT_DBG(rdata, "Port is Ready\n");
489

490 491
	if (rdata->event == RPORT_EV_NONE)
		queue_work(rport_event_queue, &rdata->event_work);
492
	rdata->event = RPORT_EV_READY;
493 494 495
}

/**
496 497
 * fc_rport_timeout() - Handler for the retry_work timer
 * @work: Handle to the remote port that has timed out
498 499 500 501 502 503 504
 *
 * Locking Note: Called without the rport lock held. This
 * function will hold the rport lock, call an _enter_*
 * function and then unlock the rport.
 */
static void fc_rport_timeout(struct work_struct *work)
{
505 506
	struct fc_rport_priv *rdata =
		container_of(work, struct fc_rport_priv, retry_work.work);
507 508 509 510

	mutex_lock(&rdata->rp_mutex);

	switch (rdata->rp_state) {
511 512 513
	case RPORT_ST_FLOGI:
		fc_rport_enter_flogi(rdata);
		break;
514
	case RPORT_ST_PLOGI:
515
		fc_rport_enter_plogi(rdata);
516 517
		break;
	case RPORT_ST_PRLI:
518
		fc_rport_enter_prli(rdata);
519 520
		break;
	case RPORT_ST_RTV:
521
		fc_rport_enter_rtv(rdata);
522
		break;
523 524 525
	case RPORT_ST_ADISC:
		fc_rport_enter_adisc(rdata);
		break;
526
	case RPORT_ST_PLOGI_WAIT:
527 528
	case RPORT_ST_READY:
	case RPORT_ST_INIT:
529
	case RPORT_ST_DELETE:
530 531 532 533 534 535 536
		break;
	}

	mutex_unlock(&rdata->rp_mutex);
}

/**
537
 * fc_rport_error() - Error handler, called once retries have been exhausted
538 539
 * @rdata: The remote port the error is happened on
 * @fp:	   The error code encapsulated in a frame pointer
540 541 542 543
 *
 * Locking Note: The rport lock is expected to be held before
 * calling this routine
 */
544
static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
545
{
546
	FC_RPORT_DBG(rdata, "Error %ld in state %s, retries %d\n",
547 548
		     IS_ERR(fp) ? -PTR_ERR(fp) : 0,
		     fc_rport_state(rdata), rdata->retries);
549

550
	switch (rdata->rp_state) {
551
	case RPORT_ST_FLOGI:
552
	case RPORT_ST_PLOGI:
553
		rdata->flags &= ~FC_RP_STARTED;
554
		fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
555 556
		break;
	case RPORT_ST_RTV:
557
		fc_rport_enter_ready(rdata);
558
		break;
559 560 561 562
	case RPORT_ST_PRLI:
	case RPORT_ST_ADISC:
		fc_rport_enter_logo(rdata);
		break;
563
	case RPORT_ST_PLOGI_WAIT:
564
	case RPORT_ST_DELETE:
565 566 567
	case RPORT_ST_READY:
	case RPORT_ST_INIT:
		break;
568 569 570
	}
}

571
/**
572 573 574
 * fc_rport_error_retry() - Handler for remote port state retries
 * @rdata: The remote port whose state is to be retried
 * @fp:	   The error code encapsulated in a frame pointer
575 576 577 578 579 580 581
 *
 * If the error was an exchange timeout retry immediately,
 * otherwise wait for E_D_TOV.
 *
 * Locking Note: The rport lock is expected to be held before
 * calling this routine
 */
582 583
static void fc_rport_error_retry(struct fc_rport_priv *rdata,
				 struct fc_frame *fp)
584
{
585
	unsigned long delay = msecs_to_jiffies(FC_DEF_E_D_TOV);
586 587 588

	/* make sure this isn't an FC_EX_CLOSED error, never retry those */
	if (PTR_ERR(fp) == -FC_EX_CLOSED)
589
		goto out;
590

591
	if (rdata->retries < rdata->local_port->max_rport_retry_count) {
592 593
		FC_RPORT_DBG(rdata, "Error %ld in state %s, retrying\n",
			     PTR_ERR(fp), fc_rport_state(rdata));
594 595 596 597 598 599 600 601
		rdata->retries++;
		/* no additional delay on exchange timeouts */
		if (PTR_ERR(fp) == -FC_EX_TIMEOUT)
			delay = 0;
		schedule_delayed_work(&rdata->retry_work, delay);
		return;
	}

602 603
out:
	fc_rport_error(rdata, fp);
604 605
}

606
/**
607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655
 * fc_rport_login_complete() - Handle parameters and completion of p-mp login.
 * @rdata:  The remote port which we logged into or which logged into us.
 * @fp:     The FLOGI or PLOGI request or response frame
 *
 * Returns non-zero error if a problem is detected with the frame.
 * Does not free the frame.
 *
 * This is only used in point-to-multipoint mode for FIP currently.
 */
static int fc_rport_login_complete(struct fc_rport_priv *rdata,
				   struct fc_frame *fp)
{
	struct fc_lport *lport = rdata->local_port;
	struct fc_els_flogi *flogi;
	unsigned int e_d_tov;
	u16 csp_flags;

	flogi = fc_frame_payload_get(fp, sizeof(*flogi));
	if (!flogi)
		return -EINVAL;

	csp_flags = ntohs(flogi->fl_csp.sp_features);

	if (fc_frame_payload_op(fp) == ELS_FLOGI) {
		if (csp_flags & FC_SP_FT_FPORT) {
			FC_RPORT_DBG(rdata, "Fabric bit set in FLOGI\n");
			return -EINVAL;
		}
	} else {

		/*
		 * E_D_TOV is not valid on an incoming FLOGI request.
		 */
		e_d_tov = ntohl(flogi->fl_csp.sp_e_d_tov);
		if (csp_flags & FC_SP_FT_EDTR)
			e_d_tov /= 1000000;
		if (e_d_tov > rdata->e_d_tov)
			rdata->e_d_tov = e_d_tov;
	}
	rdata->maxframe_size = fc_plogi_get_maxframe(flogi, lport->mfs);
	return 0;
}

/**
 * fc_rport_flogi_resp() - Handle response to FLOGI request for p-mp mode
 * @sp:	    The sequence that the FLOGI was on
 * @fp:	    The FLOGI response frame
 * @rp_arg: The remote port that received the FLOGI response
 */
656 657
static void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
				void *rp_arg)
658 659 660 661 662 663 664 665 666
{
	struct fc_rport_priv *rdata = rp_arg;
	struct fc_lport *lport = rdata->local_port;
	struct fc_els_flogi *flogi;
	unsigned int r_a_tov;

	FC_RPORT_DBG(rdata, "Received a FLOGI %s\n", fc_els_resp_type(fp));

	if (fp == ERR_PTR(-FC_EX_CLOSED))
667
		goto put;
668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703

	mutex_lock(&rdata->rp_mutex);

	if (rdata->rp_state != RPORT_ST_FLOGI) {
		FC_RPORT_DBG(rdata, "Received a FLOGI response, but in state "
			     "%s\n", fc_rport_state(rdata));
		if (IS_ERR(fp))
			goto err;
		goto out;
	}

	if (IS_ERR(fp)) {
		fc_rport_error(rdata, fp);
		goto err;
	}

	if (fc_frame_payload_op(fp) != ELS_LS_ACC)
		goto bad;
	if (fc_rport_login_complete(rdata, fp))
		goto bad;

	flogi = fc_frame_payload_get(fp, sizeof(*flogi));
	if (!flogi)
		goto bad;
	r_a_tov = ntohl(flogi->fl_csp.sp_r_a_tov);
	if (r_a_tov > rdata->r_a_tov)
		rdata->r_a_tov = r_a_tov;

	if (rdata->ids.port_name < lport->wwpn)
		fc_rport_enter_plogi(rdata);
	else
		fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT);
out:
	fc_frame_free(fp);
err:
	mutex_unlock(&rdata->rp_mutex);
704
put:
705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750
	kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
	return;
bad:
	FC_RPORT_DBG(rdata, "Bad FLOGI response\n");
	fc_rport_error_retry(rdata, fp);
	goto out;
}

/**
 * fc_rport_enter_flogi() - Send a FLOGI request to the remote port for p-mp
 * @rdata: The remote port to send a FLOGI to
 *
 * Locking Note: The rport lock is expected to be held before calling
 * this routine.
 */
static void fc_rport_enter_flogi(struct fc_rport_priv *rdata)
{
	struct fc_lport *lport = rdata->local_port;
	struct fc_frame *fp;

	if (!lport->point_to_multipoint)
		return fc_rport_enter_plogi(rdata);

	FC_RPORT_DBG(rdata, "Entered FLOGI state from %s state\n",
		     fc_rport_state(rdata));

	fc_rport_state_enter(rdata, RPORT_ST_FLOGI);

	fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
	if (!fp)
		return fc_rport_error_retry(rdata, fp);

	if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_FLOGI,
				  fc_rport_flogi_resp, rdata,
				  2 * lport->r_a_tov))
		fc_rport_error_retry(rdata, NULL);
	else
		kref_get(&rdata->kref);
}

/**
 * fc_rport_recv_flogi_req() - Handle Fabric Login (FLOGI) request in p-mp mode
 * @lport: The local port that received the PLOGI request
 * @rx_fp: The PLOGI request frame
 */
static void fc_rport_recv_flogi_req(struct fc_lport *lport,
751
				    struct fc_frame *rx_fp)
752 753 754 755 756 757
{
	struct fc_disc *disc;
	struct fc_els_flogi *flp;
	struct fc_rport_priv *rdata;
	struct fc_frame *fp = rx_fp;
	struct fc_seq_els_data rjt_data;
758
	u32 sid;
759

760
	sid = fc_frame_sid(fp);
761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792

	FC_RPORT_ID_DBG(lport, sid, "Received FLOGI request\n");

	disc = &lport->disc;
	mutex_lock(&disc->disc_mutex);

	if (!lport->point_to_multipoint) {
		rjt_data.reason = ELS_RJT_UNSUP;
		rjt_data.explan = ELS_EXPL_NONE;
		goto reject;
	}

	flp = fc_frame_payload_get(fp, sizeof(*flp));
	if (!flp) {
		rjt_data.reason = ELS_RJT_LOGIC;
		rjt_data.explan = ELS_EXPL_INV_LEN;
		goto reject;
	}

	rdata = lport->tt.rport_lookup(lport, sid);
	if (!rdata) {
		rjt_data.reason = ELS_RJT_FIP;
		rjt_data.explan = ELS_EXPL_NOT_NEIGHBOR;
		goto reject;
	}
	mutex_lock(&rdata->rp_mutex);

	FC_RPORT_DBG(rdata, "Received FLOGI in %s state\n",
		     fc_rport_state(rdata));

	switch (rdata->rp_state) {
	case RPORT_ST_INIT:
793 794 795 796 797 798 799 800 801 802 803 804 805 806
		/*
		 * If received the FLOGI request on RPORT which is INIT state
		 * (means not transition to FLOGI either fc_rport timeout
		 * function didn;t trigger or this end hasn;t received
		 * beacon yet from other end. In that case only, allow RPORT
		 * state machine to continue, otherwise fall through which
		 * causes the code to send reject response.
		 * NOTE; Not checking for FIP->state such as VNMP_UP or
		 * VNMP_CLAIM because if FIP state is not one of those,
		 * RPORT wouldn;t have created and 'rport_lookup' would have
		 * failed anyway in that case.
		 */
		if (lport->point_to_multipoint)
			break;
807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844
	case RPORT_ST_DELETE:
		mutex_unlock(&rdata->rp_mutex);
		rjt_data.reason = ELS_RJT_FIP;
		rjt_data.explan = ELS_EXPL_NOT_NEIGHBOR;
		goto reject;
	case RPORT_ST_FLOGI:
	case RPORT_ST_PLOGI_WAIT:
	case RPORT_ST_PLOGI:
		break;
	case RPORT_ST_PRLI:
	case RPORT_ST_RTV:
	case RPORT_ST_READY:
	case RPORT_ST_ADISC:
		/*
		 * Set the remote port to be deleted and to then restart.
		 * This queues work to be sure exchanges are reset.
		 */
		fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
		mutex_unlock(&rdata->rp_mutex);
		rjt_data.reason = ELS_RJT_BUSY;
		rjt_data.explan = ELS_EXPL_NONE;
		goto reject;
	}
	if (fc_rport_login_complete(rdata, fp)) {
		mutex_unlock(&rdata->rp_mutex);
		rjt_data.reason = ELS_RJT_LOGIC;
		rjt_data.explan = ELS_EXPL_NONE;
		goto reject;
	}

	fp = fc_frame_alloc(lport, sizeof(*flp));
	if (!fp)
		goto out;

	fc_flogi_fill(lport, fp);
	flp = fc_frame_payload_get(fp, sizeof(*flp));
	flp->fl_cmd = ELS_LS_ACC;

845 846
	fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
	lport->tt.frame_send(lport, fp);
847 848 849 850 851 852 853 854

	if (rdata->ids.port_name < lport->wwpn)
		fc_rport_enter_plogi(rdata);
	else
		fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT);
out:
	mutex_unlock(&rdata->rp_mutex);
	mutex_unlock(&disc->disc_mutex);
855
	fc_frame_free(rx_fp);
856 857 858 859
	return;

reject:
	mutex_unlock(&disc->disc_mutex);
860
	lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
861
	fc_frame_free(rx_fp);
862 863 864 865
}

/**
 * fc_rport_plogi_resp() - Handler for ELS PLOGI responses
866 867 868
 * @sp:	       The sequence the PLOGI is on
 * @fp:	       The PLOGI response frame
 * @rdata_arg: The remote port that sent the PLOGI response
869 870 871 872 873 874
 *
 * Locking Note: This function will be called without the rport lock
 * held, but it will lock, call an _enter_* function or fc_rport_error
 * and then unlock the rport.
 */
static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
875
				void *rdata_arg)
876
{
877
	struct fc_rport_priv *rdata = rdata_arg;
878
	struct fc_lport *lport = rdata->local_port;
879
	struct fc_els_flogi *plp = NULL;
880 881 882 883 884 885
	u16 csp_seq;
	u16 cssp_seq;
	u8 op;

	mutex_lock(&rdata->rp_mutex);

886
	FC_RPORT_DBG(rdata, "Received a PLOGI %s\n", fc_els_resp_type(fp));
887 888

	if (rdata->rp_state != RPORT_ST_PLOGI) {
889 890
		FC_RPORT_DBG(rdata, "Received a PLOGI response, but in state "
			     "%s\n", fc_rport_state(rdata));
891 892
		if (IS_ERR(fp))
			goto err;
893 894 895
		goto out;
	}

896
	if (IS_ERR(fp)) {
897
		fc_rport_error_retry(rdata, fp);
898 899 900
		goto err;
	}

901 902 903
	op = fc_frame_payload_op(fp);
	if (op == ELS_LS_ACC &&
	    (plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) {
904 905
		rdata->ids.port_name = get_unaligned_be64(&plp->fl_wwpn);
		rdata->ids.node_name = get_unaligned_be64(&plp->fl_wwnn);
906

907 908 909
		/* save plogi response sp_features for further reference */
		rdata->sp_features = ntohs(plp->fl_csp.sp_features);

910 911
		if (lport->point_to_multipoint)
			fc_rport_login_complete(rdata, fp);
912 913 914 915 916
		csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
		cssp_seq = ntohs(plp->fl_cssp[3 - 1].cp_con_seq);
		if (cssp_seq < csp_seq)
			csp_seq = cssp_seq;
		rdata->max_seq = csp_seq;
917
		rdata->maxframe_size = fc_plogi_get_maxframe(plp, lport->mfs);
918
		fc_rport_enter_prli(rdata);
919
	} else
920
		fc_rport_error_retry(rdata, fp);
921 922 923 924 925

out:
	fc_frame_free(fp);
err:
	mutex_unlock(&rdata->rp_mutex);
926
	kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
927 928 929
}

/**
930 931
 * fc_rport_enter_plogi() - Send Port Login (PLOGI) request
 * @rdata: The remote port to send a PLOGI to
932 933 934 935
 *
 * Locking Note: The rport lock is expected to be held before calling
 * this routine.
 */
936
static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
937 938 939 940
{
	struct fc_lport *lport = rdata->local_port;
	struct fc_frame *fp;

941 942
	FC_RPORT_DBG(rdata, "Port entered PLOGI state from %s state\n",
		     fc_rport_state(rdata));
943

944
	fc_rport_state_enter(rdata, RPORT_ST_PLOGI);
945

946
	rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
947 948
	fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
	if (!fp) {
949
		FC_RPORT_DBG(rdata, "%s frame alloc failed\n", __func__);
950
		fc_rport_error_retry(rdata, fp);
951 952 953 954
		return;
	}
	rdata->e_d_tov = lport->e_d_tov;

955
	if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PLOGI,
956 957
				  fc_rport_plogi_resp, rdata,
				  2 * lport->r_a_tov))
958
		fc_rport_error_retry(rdata, NULL);
959
	else
960
		kref_get(&rdata->kref);
961 962 963
}

/**
964
 * fc_rport_prli_resp() - Process Login (PRLI) response handler
965 966 967
 * @sp:	       The sequence the PRLI response was on
 * @fp:	       The PRLI response frame
 * @rdata_arg: The remote port that sent the PRLI response
968 969 970 971 972 973
 *
 * Locking Note: This function will be called without the rport lock
 * held, but it will lock, call an _enter_* function or fc_rport_error
 * and then unlock the rport.
 */
static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
974
			       void *rdata_arg)
975
{
976
	struct fc_rport_priv *rdata = rdata_arg;
977 978 979 980
	struct {
		struct fc_els_prli prli;
		struct fc_els_spp spp;
	} *pp;
981 982
	struct fc_els_spp temp_spp;
	struct fc4_prov *prov;
983 984 985
	u32 roles = FC_RPORT_ROLE_UNKNOWN;
	u32 fcp_parm = 0;
	u8 op;
986
	u8 resp_code = 0;
987 988 989

	mutex_lock(&rdata->rp_mutex);

990
	FC_RPORT_DBG(rdata, "Received a PRLI %s\n", fc_els_resp_type(fp));
991 992

	if (rdata->rp_state != RPORT_ST_PRLI) {
993 994
		FC_RPORT_DBG(rdata, "Received a PRLI response, but in state "
			     "%s\n", fc_rport_state(rdata));
995 996
		if (IS_ERR(fp))
			goto err;
997 998 999
		goto out;
	}

1000
	if (IS_ERR(fp)) {
1001
		fc_rport_error_retry(rdata, fp);
1002 1003 1004
		goto err;
	}

1005 1006 1007
	/* reinitialize remote port roles */
	rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;

1008 1009 1010
	op = fc_frame_payload_op(fp);
	if (op == ELS_LS_ACC) {
		pp = fc_frame_payload_get(fp, sizeof(*pp));
1011 1012 1013 1014 1015 1016
		if (!pp)
			goto out;

		resp_code = (pp->spp.spp_flags & FC_SPP_RESP_MASK);
		FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x\n",
			     pp->spp.spp_flags);
1017
		rdata->spp_type = pp->spp.spp_type;
1018 1019 1020 1021 1022 1023
		if (resp_code != FC_SPP_RESP_ACK) {
			if (resp_code == FC_SPP_RESP_CONF)
				fc_rport_error(rdata, fp);
			else
				fc_rport_error_retry(rdata, fp);
			goto out;
1024
		}
1025 1026 1027 1028 1029 1030
		if (pp->prli.prli_spp_len < sizeof(pp->spp))
			goto out;

		fcp_parm = ntohl(pp->spp.spp_params);
		if (fcp_parm & FCP_SPPF_RETRY)
			rdata->flags |= FC_RP_FLAGS_RETRY;
1031 1032
		if (fcp_parm & FCP_SPPF_CONF_COMPL)
			rdata->flags |= FC_RP_FLAGS_CONF_REQ;
1033

1034 1035 1036 1037 1038 1039 1040
		prov = fc_passive_prov[FC_TYPE_FCP];
		if (prov) {
			memset(&temp_spp, 0, sizeof(temp_spp));
			prov->prli(rdata, pp->prli.prli_spp_len,
				   &pp->spp, &temp_spp);
		}

1041
		rdata->supported_classes = FC_COS_CLASS3;
1042 1043 1044 1045 1046
		if (fcp_parm & FCP_SPPF_INIT_FCN)
			roles |= FC_RPORT_ROLE_FCP_INITIATOR;
		if (fcp_parm & FCP_SPPF_TARG_FCN)
			roles |= FC_RPORT_ROLE_FCP_TARGET;

1047
		rdata->ids.roles = roles;
1048
		fc_rport_enter_rtv(rdata);
1049 1050

	} else {
1051
		FC_RPORT_DBG(rdata, "Bad ELS response for PRLI command\n");
1052
		fc_rport_error_retry(rdata, fp);
1053 1054 1055 1056 1057 1058
	}

out:
	fc_frame_free(fp);
err:
	mutex_unlock(&rdata->rp_mutex);
1059
	kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
1060 1061 1062
}

/**
1063 1064
 * fc_rport_enter_prli() - Send Process Login (PRLI) request
 * @rdata: The remote port to send the PRLI request to
1065 1066 1067 1068
 *
 * Locking Note: The rport lock is expected to be held before calling
 * this routine.
 */
1069
static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
1070 1071 1072 1073 1074 1075 1076
{
	struct fc_lport *lport = rdata->local_port;
	struct {
		struct fc_els_prli prli;
		struct fc_els_spp spp;
	} *pp;
	struct fc_frame *fp;
1077
	struct fc4_prov *prov;
1078

1079 1080 1081 1082 1083 1084 1085 1086 1087
	/*
	 * If the rport is one of the well known addresses
	 * we skip PRLI and RTV and go straight to READY.
	 */
	if (rdata->ids.port_id >= FC_FID_DOM_MGR) {
		fc_rport_enter_ready(rdata);
		return;
	}

1088 1089
	FC_RPORT_DBG(rdata, "Port entered PRLI state from %s state\n",
		     fc_rport_state(rdata));
1090

1091
	fc_rport_state_enter(rdata, RPORT_ST_PRLI);
1092 1093 1094

	fp = fc_frame_alloc(lport, sizeof(*pp));
	if (!fp) {
1095
		fc_rport_error_retry(rdata, fp);
1096 1097 1098
		return;
	}

1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112
	fc_prli_fill(lport, fp);

	prov = fc_passive_prov[FC_TYPE_FCP];
	if (prov) {
		pp = fc_frame_payload_get(fp, sizeof(*pp));
		prov->prli(rdata, sizeof(pp->spp), NULL, &pp->spp);
	}

	fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rdata->ids.port_id,
		       fc_host_port_id(lport->host), FC_TYPE_ELS,
		       FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);

	if (!lport->tt.exch_seq_send(lport, fp, fc_rport_prli_resp,
				    NULL, rdata, 2 * lport->r_a_tov))
1113
		fc_rport_error_retry(rdata, NULL);
1114
	else
1115
		kref_get(&rdata->kref);
1116 1117 1118
}

/**
1119 1120 1121 1122
 * fc_rport_els_rtv_resp() - Handler for Request Timeout Value (RTV) responses
 * @sp:	       The sequence the RTV was on
 * @fp:	       The RTV response frame
 * @rdata_arg: The remote port that sent the RTV response
1123 1124 1125 1126 1127 1128 1129 1130
 *
 * Many targets don't seem to support this.
 *
 * Locking Note: This function will be called without the rport lock
 * held, but it will lock, call an _enter_* function or fc_rport_error
 * and then unlock the rport.
 */
static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
1131
			      void *rdata_arg)
1132
{
1133
	struct fc_rport_priv *rdata = rdata_arg;
1134 1135 1136 1137
	u8 op;

	mutex_lock(&rdata->rp_mutex);

1138
	FC_RPORT_DBG(rdata, "Received a RTV %s\n", fc_els_resp_type(fp));
1139 1140

	if (rdata->rp_state != RPORT_ST_RTV) {
1141 1142
		FC_RPORT_DBG(rdata, "Received a RTV response, but in state "
			     "%s\n", fc_rport_state(rdata));
1143 1144
		if (IS_ERR(fp))
			goto err;
1145 1146 1147
		goto out;
	}

1148
	if (IS_ERR(fp)) {
1149
		fc_rport_error(rdata, fp);
1150 1151 1152
		goto err;
	}

1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174
	op = fc_frame_payload_op(fp);
	if (op == ELS_LS_ACC) {
		struct fc_els_rtv_acc *rtv;
		u32 toq;
		u32 tov;

		rtv = fc_frame_payload_get(fp, sizeof(*rtv));
		if (rtv) {
			toq = ntohl(rtv->rtv_toq);
			tov = ntohl(rtv->rtv_r_a_tov);
			if (tov == 0)
				tov = 1;
			rdata->r_a_tov = tov;
			tov = ntohl(rtv->rtv_e_d_tov);
			if (toq & FC_ELS_RTV_EDRES)
				tov /= 1000000;
			if (tov == 0)
				tov = 1;
			rdata->e_d_tov = tov;
		}
	}

1175
	fc_rport_enter_ready(rdata);
1176 1177 1178 1179 1180

out:
	fc_frame_free(fp);
err:
	mutex_unlock(&rdata->rp_mutex);
1181
	kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
1182 1183 1184
}

/**
1185 1186
 * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request
 * @rdata: The remote port to send the RTV request to
1187 1188 1189 1190
 *
 * Locking Note: The rport lock is expected to be held before calling
 * this routine.
 */
1191
static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
1192 1193 1194 1195
{
	struct fc_frame *fp;
	struct fc_lport *lport = rdata->local_port;

1196 1197
	FC_RPORT_DBG(rdata, "Port entered RTV state from %s state\n",
		     fc_rport_state(rdata));
1198

1199
	fc_rport_state_enter(rdata, RPORT_ST_RTV);
1200 1201 1202

	fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv));
	if (!fp) {
1203
		fc_rport_error_retry(rdata, fp);
1204 1205 1206
		return;
	}

1207
	if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_RTV,
1208 1209
				  fc_rport_rtv_resp, rdata,
				  2 * lport->r_a_tov))
1210
		fc_rport_error_retry(rdata, NULL);
1211
	else
1212
		kref_get(&rdata->kref);
1213 1214
}

1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232
/**
 * fc_rport_logo_resp() - Handler for logout (LOGO) responses
 * @sp:	       The sequence the LOGO was on
 * @fp:	       The LOGO response frame
 * @lport_arg: The local port
 */
static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
			       void *lport_arg)
{
	struct fc_lport *lport = lport_arg;

	FC_RPORT_ID_DBG(lport, fc_seq_exch(sp)->did,
			"Received a LOGO %s\n", fc_els_resp_type(fp));
	if (IS_ERR(fp))
		return;
	fc_frame_free(fp);
}

1233
/**
1234 1235
 * fc_rport_enter_logo() - Send a logout (LOGO) request
 * @rdata: The remote port to send the LOGO request to
1236 1237 1238 1239
 *
 * Locking Note: The rport lock is expected to be held before calling
 * this routine.
 */
1240
static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
1241 1242 1243 1244
{
	struct fc_lport *lport = rdata->local_port;
	struct fc_frame *fp;

1245
	FC_RPORT_DBG(rdata, "Port sending LOGO from %s state\n",
1246
		     fc_rport_state(rdata));
1247 1248

	fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo));
1249
	if (!fp)
1250
		return;
1251 1252
	(void)lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO,
				   fc_rport_logo_resp, lport, 0);
1253 1254
}

1255
/**
1256 1257 1258 1259
 * fc_rport_els_adisc_resp() - Handler for Address Discovery (ADISC) responses
 * @sp:	       The sequence the ADISC response was on
 * @fp:	       The ADISC response frame
 * @rdata_arg: The remote port that sent the ADISC response
1260 1261 1262 1263 1264 1265
 *
 * Locking Note: This function will be called without the rport lock
 * held, but it will lock, call an _enter_* function or fc_rport_error
 * and then unlock the rport.
 */
static void fc_rport_adisc_resp(struct fc_seq *sp, struct fc_frame *fp,
1266
				void *rdata_arg)
1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300
{
	struct fc_rport_priv *rdata = rdata_arg;
	struct fc_els_adisc *adisc;
	u8 op;

	mutex_lock(&rdata->rp_mutex);

	FC_RPORT_DBG(rdata, "Received a ADISC response\n");

	if (rdata->rp_state != RPORT_ST_ADISC) {
		FC_RPORT_DBG(rdata, "Received a ADISC resp but in state %s\n",
			     fc_rport_state(rdata));
		if (IS_ERR(fp))
			goto err;
		goto out;
	}

	if (IS_ERR(fp)) {
		fc_rport_error(rdata, fp);
		goto err;
	}

	/*
	 * If address verification failed.  Consider us logged out of the rport.
	 * Since the rport is still in discovery, we want to be
	 * logged in, so go to PLOGI state.  Otherwise, go back to READY.
	 */
	op = fc_frame_payload_op(fp);
	adisc = fc_frame_payload_get(fp, sizeof(*adisc));
	if (op != ELS_LS_ACC || !adisc ||
	    ntoh24(adisc->adisc_port_id) != rdata->ids.port_id ||
	    get_unaligned_be64(&adisc->adisc_wwpn) != rdata->ids.port_name ||
	    get_unaligned_be64(&adisc->adisc_wwnn) != rdata->ids.node_name) {
		FC_RPORT_DBG(rdata, "ADISC error or mismatch\n");
1301
		fc_rport_enter_flogi(rdata);
1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313
	} else {
		FC_RPORT_DBG(rdata, "ADISC OK\n");
		fc_rport_enter_ready(rdata);
	}
out:
	fc_frame_free(fp);
err:
	mutex_unlock(&rdata->rp_mutex);
	kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
}

/**
1314 1315
 * fc_rport_enter_adisc() - Send Address Discover (ADISC) request
 * @rdata: The remote port to send the ADISC request to
1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335
 *
 * Locking Note: The rport lock is expected to be held before calling
 * this routine.
 */
static void fc_rport_enter_adisc(struct fc_rport_priv *rdata)
{
	struct fc_lport *lport = rdata->local_port;
	struct fc_frame *fp;

	FC_RPORT_DBG(rdata, "sending ADISC from %s state\n",
		     fc_rport_state(rdata));

	fc_rport_state_enter(rdata, RPORT_ST_ADISC);

	fp = fc_frame_alloc(lport, sizeof(struct fc_els_adisc));
	if (!fp) {
		fc_rport_error_retry(rdata, fp);
		return;
	}
	if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_ADISC,
1336 1337
				  fc_rport_adisc_resp, rdata,
				  2 * lport->r_a_tov))
1338
		fc_rport_error_retry(rdata, NULL);
1339 1340 1341 1342
	else
		kref_get(&rdata->kref);
}

1343
/**
1344 1345 1346
 * fc_rport_recv_adisc_req() - Handler for Address Discovery (ADISC) requests
 * @rdata: The remote port that sent the ADISC request
 * @in_fp: The ADISC request frame
1347 1348 1349 1350
 *
 * Locking Note:  Called with the lport and rport locks held.
 */
static void fc_rport_recv_adisc_req(struct fc_rport_priv *rdata,
1351
				    struct fc_frame *in_fp)
1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363
{
	struct fc_lport *lport = rdata->local_port;
	struct fc_frame *fp;
	struct fc_els_adisc *adisc;
	struct fc_seq_els_data rjt_data;

	FC_RPORT_DBG(rdata, "Received ADISC request\n");

	adisc = fc_frame_payload_get(in_fp, sizeof(*adisc));
	if (!adisc) {
		rjt_data.reason = ELS_RJT_PROT;
		rjt_data.explan = ELS_EXPL_INV_LEN;
1364
		lport->tt.seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
1365 1366 1367 1368 1369 1370 1371 1372 1373
		goto drop;
	}

	fp = fc_frame_alloc(lport, sizeof(*adisc));
	if (!fp)
		goto drop;
	fc_adisc_fill(lport, fp);
	adisc = fc_frame_payload_get(fp, sizeof(*adisc));
	adisc->adisc_cmd = ELS_LS_ACC;
1374 1375
	fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0);
	lport->tt.frame_send(lport, fp);
1376 1377 1378 1379
drop:
	fc_frame_free(in_fp);
}

1380 1381 1382 1383 1384 1385 1386 1387 1388
/**
 * fc_rport_recv_rls_req() - Handle received Read Link Status request
 * @rdata: The remote port that sent the RLS request
 * @rx_fp: The PRLI request frame
 *
 * Locking Note: The rport lock is expected to be held before calling
 * this function.
 */
static void fc_rport_recv_rls_req(struct fc_rport_priv *rdata,
1389
				  struct fc_frame *rx_fp)
1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434

{
	struct fc_lport *lport = rdata->local_port;
	struct fc_frame *fp;
	struct fc_els_rls *rls;
	struct fc_els_rls_resp *rsp;
	struct fc_els_lesb *lesb;
	struct fc_seq_els_data rjt_data;
	struct fc_host_statistics *hst;

	FC_RPORT_DBG(rdata, "Received RLS request while in state %s\n",
		     fc_rport_state(rdata));

	rls = fc_frame_payload_get(rx_fp, sizeof(*rls));
	if (!rls) {
		rjt_data.reason = ELS_RJT_PROT;
		rjt_data.explan = ELS_EXPL_INV_LEN;
		goto out_rjt;
	}

	fp = fc_frame_alloc(lport, sizeof(*rsp));
	if (!fp) {
		rjt_data.reason = ELS_RJT_UNAB;
		rjt_data.explan = ELS_EXPL_INSUF_RES;
		goto out_rjt;
	}

	rsp = fc_frame_payload_get(fp, sizeof(*rsp));
	memset(rsp, 0, sizeof(*rsp));
	rsp->rls_cmd = ELS_LS_ACC;
	lesb = &rsp->rls_lesb;
	if (lport->tt.get_lesb) {
		/* get LESB from LLD if it supports it */
		lport->tt.get_lesb(lport, lesb);
	} else {
		fc_get_host_stats(lport->host);
		hst = &lport->host_stats;
		lesb->lesb_link_fail = htonl(hst->link_failure_count);
		lesb->lesb_sync_loss = htonl(hst->loss_of_sync_count);
		lesb->lesb_sig_loss = htonl(hst->loss_of_signal_count);
		lesb->lesb_prim_err = htonl(hst->prim_seq_protocol_err_count);
		lesb->lesb_inv_word = htonl(hst->invalid_tx_word_count);
		lesb->lesb_inv_crc = htonl(hst->invalid_crc_count);
	}

1435 1436
	fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
	lport->tt.frame_send(lport, fp);
1437 1438 1439
	goto out;

out_rjt:
1440
	lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
1441 1442 1443 1444
out:
	fc_frame_free(rx_fp);
}

1445
/**
1446 1447 1448
 * fc_rport_recv_els_req() - Handler for validated ELS requests
 * @lport: The local port that received the ELS request
 * @fp:	   The ELS request frame
1449 1450 1451
 *
 * Handle incoming ELS requests that require port login.
 * The ELS opcode has already been validated by the caller.
1452
 *
1453
 * Locking Note: Called with the lport lock held.
1454
 */
1455
static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp)
1456
{
1457
	struct fc_rport_priv *rdata;
1458 1459
	struct fc_seq_els_data els_data;

1460
	mutex_lock(&lport->disc.disc_mutex);
1461
	rdata = lport->tt.rport_lookup(lport, fc_frame_sid(fp));
1462
	if (!rdata) {
1463
		mutex_unlock(&lport->disc.disc_mutex);
1464
		goto reject;
1465 1466
	}
	mutex_lock(&rdata->rp_mutex);
1467
	mutex_unlock(&lport->disc.disc_mutex);
1468

1469 1470 1471 1472
	switch (rdata->rp_state) {
	case RPORT_ST_PRLI:
	case RPORT_ST_RTV:
	case RPORT_ST_READY:
1473
	case RPORT_ST_ADISC:
1474 1475 1476 1477 1478 1479 1480
		break;
	default:
		mutex_unlock(&rdata->rp_mutex);
		goto reject;
	}

	switch (fc_frame_payload_op(fp)) {
1481
	case ELS_PRLI:
1482
		fc_rport_recv_prli_req(rdata, fp);
1483 1484
		break;
	case ELS_PRLO:
1485
		fc_rport_recv_prlo_req(rdata, fp);
1486
		break;
1487
	case ELS_ADISC:
1488
		fc_rport_recv_adisc_req(rdata, fp);
1489
		break;
1490
	case ELS_RRQ:
1491 1492
		lport->tt.seq_els_rsp_send(fp, ELS_RRQ, NULL);
		fc_frame_free(fp);
1493 1494
		break;
	case ELS_REC:
1495 1496
		lport->tt.seq_els_rsp_send(fp, ELS_REC, NULL);
		fc_frame_free(fp);
1497
		break;
1498
	case ELS_RLS:
1499
		fc_rport_recv_rls_req(rdata, fp);
1500
		break;
1501
	default:
1502
		fc_frame_free(fp);	/* can't happen */
1503
		break;
1504 1505 1506
	}

	mutex_unlock(&rdata->rp_mutex);
1507 1508 1509
	return;

reject:
1510 1511 1512
	els_data.reason = ELS_RJT_UNAB;
	els_data.explan = ELS_EXPL_PLOGI_REQD;
	lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &els_data);
1513 1514 1515 1516
	fc_frame_free(fp);
}

/**
1517 1518
 * fc_rport_recv_req() - Handler for requests
 * @lport: The local port that received the request
1519
 * @fp:	   The request frame
1520 1521 1522
 *
 * Locking Note: Called with the lport lock held.
 */
1523
static void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
1524 1525 1526 1527
{
	struct fc_seq_els_data els_data;

	/*
1528
	 * Handle FLOGI, PLOGI and LOGO requests separately, since they
1529 1530 1531 1532 1533
	 * don't require prior login.
	 * Check for unsupported opcodes first and reject them.
	 * For some ops, it would be incorrect to reject with "PLOGI required".
	 */
	switch (fc_frame_payload_op(fp)) {
1534
	case ELS_FLOGI:
1535
		fc_rport_recv_flogi_req(lport, fp);
1536
		break;
1537
	case ELS_PLOGI:
1538
		fc_rport_recv_plogi_req(lport, fp);
1539 1540
		break;
	case ELS_LOGO:
1541
		fc_rport_recv_logo_req(lport, fp);
1542 1543 1544
		break;
	case ELS_PRLI:
	case ELS_PRLO:
1545
	case ELS_ADISC:
1546 1547
	case ELS_RRQ:
	case ELS_REC:
1548
	case ELS_RLS:
1549
		fc_rport_recv_els_req(lport, fp);
1550 1551 1552 1553
		break;
	default:
		els_data.reason = ELS_RJT_UNSUP;
		els_data.explan = ELS_EXPL_NONE;
1554 1555
		lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &els_data);
		fc_frame_free(fp);
1556 1557
		break;
	}
1558 1559 1560
}

/**
1561 1562 1563
 * fc_rport_recv_plogi_req() - Handler for Port Login (PLOGI) requests
 * @lport: The local port that received the PLOGI request
 * @rx_fp: The PLOGI request frame
1564
 *
1565
 * Locking Note: The rport lock is held before calling this function.
1566
 */
1567
static void fc_rport_recv_plogi_req(struct fc_lport *lport,
1568
				    struct fc_frame *rx_fp)
1569
{
1570 1571
	struct fc_disc *disc;
	struct fc_rport_priv *rdata;
1572 1573 1574
	struct fc_frame *fp = rx_fp;
	struct fc_els_flogi *pl;
	struct fc_seq_els_data rjt_data;
1575
	u32 sid;
1576

1577
	sid = fc_frame_sid(fp);
1578

1579
	FC_RPORT_ID_DBG(lport, sid, "Received PLOGI request\n");
1580 1581 1582

	pl = fc_frame_payload_get(fp, sizeof(*pl));
	if (!pl) {
1583 1584 1585 1586
		FC_RPORT_ID_DBG(lport, sid, "Received PLOGI too short\n");
		rjt_data.reason = ELS_RJT_PROT;
		rjt_data.explan = ELS_EXPL_INV_LEN;
		goto reject;
1587
	}
1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603

	disc = &lport->disc;
	mutex_lock(&disc->disc_mutex);
	rdata = lport->tt.rport_create(lport, sid);
	if (!rdata) {
		mutex_unlock(&disc->disc_mutex);
		rjt_data.reason = ELS_RJT_UNAB;
		rjt_data.explan = ELS_EXPL_INSUF_RES;
		goto reject;
	}

	mutex_lock(&rdata->rp_mutex);
	mutex_unlock(&disc->disc_mutex);

	rdata->ids.port_name = get_unaligned_be64(&pl->fl_wwpn);
	rdata->ids.node_name = get_unaligned_be64(&pl->fl_wwnn);
1604 1605

	/*
1606
	 * If the rport was just created, possibly due to the incoming PLOGI,
1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617
	 * set the state appropriately and accept the PLOGI.
	 *
	 * If we had also sent a PLOGI, and if the received PLOGI is from a
	 * higher WWPN, we accept it, otherwise an LS_RJT is sent with reason
	 * "command already in progress".
	 *
	 * XXX TBD: If the session was ready before, the PLOGI should result in
	 * all outstanding exchanges being reset.
	 */
	switch (rdata->rp_state) {
	case RPORT_ST_INIT:
1618
		FC_RPORT_DBG(rdata, "Received PLOGI in INIT state\n");
1619
		break;
1620 1621 1622
	case RPORT_ST_PLOGI_WAIT:
		FC_RPORT_DBG(rdata, "Received PLOGI in PLOGI_WAIT state\n");
		break;
1623
	case RPORT_ST_PLOGI:
1624 1625 1626 1627 1628 1629 1630
		FC_RPORT_DBG(rdata, "Received PLOGI in PLOGI state\n");
		if (rdata->ids.port_name < lport->wwpn) {
			mutex_unlock(&rdata->rp_mutex);
			rjt_data.reason = ELS_RJT_INPROG;
			rjt_data.explan = ELS_EXPL_NONE;
			goto reject;
		}
1631 1632
		break;
	case RPORT_ST_PRLI:
1633
	case RPORT_ST_RTV:
1634
	case RPORT_ST_READY:
1635 1636 1637 1638
	case RPORT_ST_ADISC:
		FC_RPORT_DBG(rdata, "Received PLOGI in logged-in state %d "
			     "- ignored for now\n", rdata->rp_state);
		/* XXX TBD - should reset */
1639
		break;
1640
	case RPORT_ST_FLOGI:
1641
	case RPORT_ST_DELETE:
1642 1643 1644 1645 1646 1647
		FC_RPORT_DBG(rdata, "Received PLOGI in state %s - send busy\n",
			     fc_rport_state(rdata));
		mutex_unlock(&rdata->rp_mutex);
		rjt_data.reason = ELS_RJT_BUSY;
		rjt_data.explan = ELS_EXPL_NONE;
		goto reject;
1648 1649
	}

1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662
	/*
	 * Get session payload size from incoming PLOGI.
	 */
	rdata->maxframe_size = fc_plogi_get_maxframe(pl, lport->mfs);

	/*
	 * Send LS_ACC.	 If this fails, the originator should retry.
	 */
	fp = fc_frame_alloc(lport, sizeof(*pl));
	if (!fp)
		goto out;

	fc_plogi_fill(lport, fp, ELS_LS_ACC);
1663 1664
	fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
	lport->tt.frame_send(lport, fp);
1665 1666 1667
	fc_rport_enter_prli(rdata);
out:
	mutex_unlock(&rdata->rp_mutex);
1668
	fc_frame_free(rx_fp);
1669 1670 1671
	return;

reject:
1672
	lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
1673
	fc_frame_free(fp);
1674 1675 1676
}

/**
1677 1678 1679
 * fc_rport_recv_prli_req() - Handler for process login (PRLI) requests
 * @rdata: The remote port that sent the PRLI request
 * @rx_fp: The PRLI request frame
1680 1681 1682 1683
 *
 * Locking Note: The rport lock is exected to be held before calling
 * this function.
 */
1684
static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
1685
				   struct fc_frame *rx_fp)
1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697
{
	struct fc_lport *lport = rdata->local_port;
	struct fc_frame *fp;
	struct {
		struct fc_els_prli prli;
		struct fc_els_spp spp;
	} *pp;
	struct fc_els_spp *rspp;	/* request service param page */
	struct fc_els_spp *spp;	/* response spp */
	unsigned int len;
	unsigned int plen;
	enum fc_els_spp_resp resp;
1698
	enum fc_els_spp_resp passive;
1699
	struct fc_seq_els_data rjt_data;
1700
	struct fc4_prov *prov;
1701

1702 1703
	FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n",
		     fc_rport_state(rdata));
1704

1705
	len = fr_len(rx_fp) - sizeof(struct fc_frame_header);
1706
	pp = fc_frame_payload_get(rx_fp, sizeof(*pp));
1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724
	if (!pp)
		goto reject_len;
	plen = ntohs(pp->prli.prli_len);
	if ((plen % 4) != 0 || plen > len || plen < 16)
		goto reject_len;
	if (plen < len)
		len = plen;
	plen = pp->prli.prli_spp_len;
	if ((plen % 4) != 0 || plen < sizeof(*spp) ||
	    plen > len || len < sizeof(*pp) || plen < 12)
		goto reject_len;
	rspp = &pp->spp;

	fp = fc_frame_alloc(lport, len);
	if (!fp) {
		rjt_data.reason = ELS_RJT_UNAB;
		rjt_data.explan = ELS_EXPL_INSUF_RES;
		goto reject;
1725
	}
1726 1727 1728 1729 1730 1731 1732
	pp = fc_frame_payload_get(fp, len);
	WARN_ON(!pp);
	memset(pp, 0, len);
	pp->prli.prli_cmd = ELS_LS_ACC;
	pp->prli.prli_spp_len = plen;
	pp->prli.prli_len = htons(len);
	len -= sizeof(struct fc_els_prli);
1733

1734 1735 1736 1737 1738 1739
	/*
	 * Go through all the service parameter pages and build
	 * response.  If plen indicates longer SPP than standard,
	 * use that.  The entire response has been pre-cleared above.
	 */
	spp = &pp->spp;
1740
	mutex_lock(&fc_prov_mutex);
1741
	while (len >= plen) {
1742
		rdata->spp_type = rspp->spp_type;
1743 1744
		spp->spp_type = rspp->spp_type;
		spp->spp_type_ext = rspp->spp_type_ext;
1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762
		resp = 0;

		if (rspp->spp_type < FC_FC4_PROV_SIZE) {
			prov = fc_active_prov[rspp->spp_type];
			if (prov)
				resp = prov->prli(rdata, plen, rspp, spp);
			prov = fc_passive_prov[rspp->spp_type];
			if (prov) {
				passive = prov->prli(rdata, plen, rspp, spp);
				if (!resp || passive == FC_SPP_RESP_ACK)
					resp = passive;
			}
		}
		if (!resp) {
			if (spp->spp_flags & FC_SPP_EST_IMG_PAIR)
				resp |= FC_SPP_RESP_CONF;
			else
				resp |= FC_SPP_RESP_INVL;
1763
		}
1764 1765 1766 1767 1768
		spp->spp_flags |= resp;
		len -= plen;
		rspp = (struct fc_els_spp *)((char *)rspp + plen);
		spp = (struct fc_els_spp *)((char *)spp + plen);
	}
1769
	mutex_unlock(&fc_prov_mutex);
1770 1771 1772 1773

	/*
	 * Send LS_ACC.	 If this fails, the originator should retry.
	 */
1774 1775
	fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
	lport->tt.frame_send(lport, fp);
1776 1777 1778 1779 1780 1781 1782

	switch (rdata->rp_state) {
	case RPORT_ST_PRLI:
		fc_rport_enter_ready(rdata);
		break;
	default:
		break;
1783
	}
1784 1785 1786 1787 1788 1789
	goto drop;

reject_len:
	rjt_data.reason = ELS_RJT_PROT;
	rjt_data.explan = ELS_EXPL_INV_LEN;
reject:
1790
	lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
1791
drop:
1792 1793 1794 1795
	fc_frame_free(rx_fp);
}

/**
1796 1797
 * fc_rport_recv_prlo_req() - Handler for process logout (PRLO) requests
 * @rdata: The remote port that sent the PRLO request
1798
 * @rx_fp: The PRLO request frame
1799 1800 1801 1802
 *
 * Locking Note: The rport lock is exected to be held before calling
 * this function.
 */
1803
static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata,
1804
				   struct fc_frame *rx_fp)
1805 1806
{
	struct fc_lport *lport = rdata->local_port;
1807 1808 1809 1810 1811 1812 1813 1814 1815
	struct fc_frame *fp;
	struct {
		struct fc_els_prlo prlo;
		struct fc_els_spp spp;
	} *pp;
	struct fc_els_spp *rspp;	/* request service param page */
	struct fc_els_spp *spp;		/* response spp */
	unsigned int len;
	unsigned int plen;
1816 1817
	struct fc_seq_els_data rjt_data;

1818 1819
	FC_RPORT_DBG(rdata, "Received PRLO request while in state %s\n",
		     fc_rport_state(rdata));
1820

1821
	len = fr_len(rx_fp) - sizeof(struct fc_frame_header);
1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852
	pp = fc_frame_payload_get(rx_fp, sizeof(*pp));
	if (!pp)
		goto reject_len;
	plen = ntohs(pp->prlo.prlo_len);
	if (plen != 20)
		goto reject_len;
	if (plen < len)
		len = plen;

	rspp = &pp->spp;

	fp = fc_frame_alloc(lport, len);
	if (!fp) {
		rjt_data.reason = ELS_RJT_UNAB;
		rjt_data.explan = ELS_EXPL_INSUF_RES;
		goto reject;
	}

	pp = fc_frame_payload_get(fp, len);
	WARN_ON(!pp);
	memset(pp, 0, len);
	pp->prlo.prlo_cmd = ELS_LS_ACC;
	pp->prlo.prlo_obs = 0x10;
	pp->prlo.prlo_len = htons(len);
	spp = &pp->spp;
	spp->spp_type = rspp->spp_type;
	spp->spp_type_ext = rspp->spp_type_ext;
	spp->spp_flags = FC_SPP_RESP_ACK;

	fc_rport_enter_delete(rdata, RPORT_EV_LOGO);

1853 1854
	fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
	lport->tt.frame_send(lport, fp);
1855 1856 1857 1858 1859 1860
	goto drop;

reject_len:
	rjt_data.reason = ELS_RJT_PROT;
	rjt_data.explan = ELS_EXPL_INV_LEN;
reject:
1861
	lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
1862 1863
drop:
	fc_frame_free(rx_fp);
1864 1865 1866
}

/**
1867 1868 1869
 * fc_rport_recv_logo_req() - Handler for logout (LOGO) requests
 * @lport: The local port that received the LOGO request
 * @fp:	   The LOGO request frame
1870 1871 1872 1873
 *
 * Locking Note: The rport lock is exected to be held before calling
 * this function.
 */
1874
static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
1875
{
1876 1877
	struct fc_rport_priv *rdata;
	u32 sid;
1878

1879
	lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
1880

1881
	sid = fc_frame_sid(fp);
1882

1883 1884 1885 1886 1887 1888
	mutex_lock(&lport->disc.disc_mutex);
	rdata = lport->tt.rport_lookup(lport, sid);
	if (rdata) {
		mutex_lock(&rdata->rp_mutex);
		FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
			     fc_rport_state(rdata));
1889

1890
		fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
1891 1892 1893 1894 1895
		mutex_unlock(&rdata->rp_mutex);
	} else
		FC_RPORT_ID_DBG(lport, sid,
				"Received LOGO from non-logged-in port\n");
	mutex_unlock(&lport->disc.disc_mutex);
1896 1897 1898
	fc_frame_free(fp);
}

1899 1900 1901
/**
 * fc_rport_flush_queue() - Flush the rport_event_queue
 */
1902 1903 1904 1905 1906
static void fc_rport_flush_queue(void)
{
	flush_workqueue(rport_event_queue);
}

1907 1908 1909 1910
/**
 * fc_rport_init() - Initialize the remote port layer for a local port
 * @lport: The local port to initialize the remote port layer for
 */
1911 1912
int fc_rport_init(struct fc_lport *lport)
{
1913 1914 1915
	if (!lport->tt.rport_lookup)
		lport->tt.rport_lookup = fc_rport_lookup;

1916
	if (!lport->tt.rport_create)
1917
		lport->tt.rport_create = fc_rport_create;
1918

1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930
	if (!lport->tt.rport_login)
		lport->tt.rport_login = fc_rport_login;

	if (!lport->tt.rport_logoff)
		lport->tt.rport_logoff = fc_rport_logoff;

	if (!lport->tt.rport_recv_req)
		lport->tt.rport_recv_req = fc_rport_recv_req;

	if (!lport->tt.rport_flush_queue)
		lport->tt.rport_flush_queue = fc_rport_flush_queue;

1931 1932 1933
	if (!lport->tt.rport_destroy)
		lport->tt.rport_destroy = fc_rport_destroy;

1934 1935 1936 1937
	return 0;
}
EXPORT_SYMBOL(fc_rport_init);

1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964
/**
 * fc_rport_fcp_prli() - Handle incoming PRLI for the FCP initiator.
 * @rdata: remote port private
 * @spp_len: service parameter page length
 * @rspp: received service parameter page
 * @spp: response service parameter page
 *
 * Returns the value for the response code to be placed in spp_flags;
 * Returns 0 if not an initiator.
 */
static int fc_rport_fcp_prli(struct fc_rport_priv *rdata, u32 spp_len,
			     const struct fc_els_spp *rspp,
			     struct fc_els_spp *spp)
{
	struct fc_lport *lport = rdata->local_port;
	u32 fcp_parm;

	fcp_parm = ntohl(rspp->spp_params);
	rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
	if (fcp_parm & FCP_SPPF_INIT_FCN)
		rdata->ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
	if (fcp_parm & FCP_SPPF_TARG_FCN)
		rdata->ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
	if (fcp_parm & FCP_SPPF_RETRY)
		rdata->flags |= FC_RP_FLAGS_RETRY;
	rdata->supported_classes = FC_COS_CLASS3;

1965
	if (!(lport->service_params & FCP_SPPF_INIT_FCN))
1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010
		return 0;

	spp->spp_flags |= rspp->spp_flags & FC_SPP_EST_IMG_PAIR;

	/*
	 * OR in our service parameters with other providers (target), if any.
	 */
	fcp_parm = ntohl(spp->spp_params);
	spp->spp_params = htonl(fcp_parm | lport->service_params);
	return FC_SPP_RESP_ACK;
}

/*
 * FC-4 provider ops for FCP initiator.
 */
struct fc4_prov fc_rport_fcp_init = {
	.prli = fc_rport_fcp_prli,
};

/**
 * fc_rport_t0_prli() - Handle incoming PRLI parameters for type 0
 * @rdata: remote port private
 * @spp_len: service parameter page length
 * @rspp: received service parameter page
 * @spp: response service parameter page
 */
static int fc_rport_t0_prli(struct fc_rport_priv *rdata, u32 spp_len,
			    const struct fc_els_spp *rspp,
			    struct fc_els_spp *spp)
{
	if (rspp->spp_flags & FC_SPP_EST_IMG_PAIR)
		return FC_SPP_RESP_INVL;
	return FC_SPP_RESP_ACK;
}

/*
 * FC-4 provider ops for type 0 service parameters.
 *
 * This handles the special case of type 0 which is always successful
 * but doesn't do anything otherwise.
 */
struct fc4_prov fc_rport_t0_prov = {
	.prli = fc_rport_t0_prli,
};

2011 2012 2013
/**
 * fc_setup_rport() - Initialize the rport_event_queue
 */
2014
int fc_setup_rport(void)
2015 2016 2017 2018 2019 2020 2021
{
	rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
	if (!rport_event_queue)
		return -ENOMEM;
	return 0;
}

2022 2023 2024
/**
 * fc_destroy_rport() - Destroy the rport_event_queue
 */
2025
void fc_destroy_rport(void)
2026 2027 2028 2029
{
	destroy_workqueue(rport_event_queue);
}

2030 2031 2032 2033
/**
 * fc_rport_terminate_io() - Stop all outstanding I/O on a remote port
 * @rport: The remote port whose I/O should be terminated
 */
2034 2035
void fc_rport_terminate_io(struct fc_rport *rport)
{
2036 2037
	struct fc_rport_libfc_priv *rpriv = rport->dd_data;
	struct fc_lport *lport = rpriv->local_port;
2038

2039 2040
	lport->tt.exch_mgr_reset(lport, 0, rport->port_id);
	lport->tt.exch_mgr_reset(lport, rport->port_id, 0);
2041 2042
}
EXPORT_SYMBOL(fc_rport_terminate_io);