fc_rport.c 35.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
/*
 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Maintained at www.Open-FCoE.org
 */

/*
 * RPORT GENERAL INFO
 *
 * This file contains all processing regarding fc_rports. It contains the
 * rport state machine and does all rport interaction with the transport class.
 * There should be no other places in libfc that interact directly with the
 * transport class in regards to adding and deleting rports.
 *
 * fc_rport's represent N_Port's within the fabric.
 */

/*
 * RPORT LOCKING
 *
 * The rport should never hold the rport mutex and then attempt to acquire
 * either the lport or disc mutexes. The rport's mutex is considered lesser
 * than both the lport's mutex and the disc mutex. Refer to fc_lport.c for
 * more comments on the heirarchy.
 *
 * The locking strategy is similar to the lport's strategy. The lock protects
 * the rport's states and is held and released by the entry points to the rport
 * block. All _enter_* functions correspond to rport states and expect the rport
 * mutex to be locked before calling them. This means that rports only handle
 * one request or response at a time, since they're not critical for the I/O
 * path this potential over-use of the mutex is acceptable.
 */

#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/rcupdate.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <asm/unaligned.h>

#include <scsi/libfc.h>
#include <scsi/fc_encode.h>

struct workqueue_struct *rport_event_queue;

60 61 62 63 64
static void fc_rport_enter_plogi(struct fc_rport_priv *);
static void fc_rport_enter_prli(struct fc_rport_priv *);
static void fc_rport_enter_rtv(struct fc_rport_priv *);
static void fc_rport_enter_ready(struct fc_rport_priv *);
static void fc_rport_enter_logo(struct fc_rport_priv *);
65

66
static void fc_rport_recv_plogi_req(struct fc_rport_priv *,
67
				    struct fc_seq *, struct fc_frame *);
68
static void fc_rport_recv_prli_req(struct fc_rport_priv *,
69
				   struct fc_seq *, struct fc_frame *);
70
static void fc_rport_recv_prlo_req(struct fc_rport_priv *,
71
				   struct fc_seq *, struct fc_frame *);
72
static void fc_rport_recv_logo_req(struct fc_rport_priv *,
73 74
				   struct fc_seq *, struct fc_frame *);
static void fc_rport_timeout(struct work_struct *);
75 76
static void fc_rport_error(struct fc_rport_priv *, struct fc_frame *);
static void fc_rport_error_retry(struct fc_rport_priv *, struct fc_frame *);
77 78 79 80 81 82 83 84 85
static void fc_rport_work(struct work_struct *);

static const char *fc_rport_state_names[] = {
	[RPORT_ST_INIT] = "Init",
	[RPORT_ST_PLOGI] = "PLOGI",
	[RPORT_ST_PRLI] = "PRLI",
	[RPORT_ST_RTV] = "RTV",
	[RPORT_ST_READY] = "Ready",
	[RPORT_ST_LOGO] = "LOGO",
86
	[RPORT_ST_DELETE] = "Delete",
87 88 89 90 91
};

static void fc_rport_rogue_destroy(struct device *dev)
{
	struct fc_rport *rport = dev_to_rport(dev);
92 93 94
	struct fc_rport_priv *rdata = RPORT_TO_PRIV(rport);

	FC_RPORT_DBG(rdata, "Destroying rogue rport\n");
95 96 97
	kfree(rport);
}

98 99
struct fc_rport_priv *fc_rport_rogue_create(struct fc_lport *lport,
					    struct fc_rport_identifiers *ids)
100 101
{
	struct fc_rport *rport;
102
	struct fc_rport_priv *rdata;
103 104 105 106 107 108 109 110
	rport = kzalloc(sizeof(*rport) + sizeof(*rdata), GFP_KERNEL);

	if (!rport)
		return NULL;

	rdata = RPORT_TO_PRIV(rport);

	rport->dd_data = rdata;
111 112 113 114
	rport->port_id = ids->port_id;
	rport->port_name = ids->port_name;
	rport->node_name = ids->node_name;
	rport->roles = ids->roles;
115 116 117 118 119 120 121 122 123
	rport->maxframe_size = FC_MIN_MAX_PAYLOAD;
	/*
	 * Note: all this libfc rogue rport code will be removed for
	 * upstream so it fine that this is really ugly and hacky right now.
	 */
	device_initialize(&rport->dev);
	rport->dev.release = fc_rport_rogue_destroy;

	mutex_init(&rdata->rp_mutex);
124
	rdata->local_port = lport;
125 126 127 128 129
	rdata->trans_state = FC_PORTSTATE_ROGUE;
	rdata->rp_state = RPORT_ST_INIT;
	rdata->event = RPORT_EV_NONE;
	rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
	rdata->ops = NULL;
130 131
	rdata->e_d_tov = lport->e_d_tov;
	rdata->r_a_tov = lport->r_a_tov;
132 133 134 135 136 137 138 139
	INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout);
	INIT_WORK(&rdata->event_work, fc_rport_work);
	/*
	 * For good measure, but not necessary as we should only
	 * add REAL rport to the lport list.
	 */
	INIT_LIST_HEAD(&rdata->peers);

140
	return rdata;
141 142 143
}

/**
144
 * fc_rport_state() - return a string for the state the rport is in
145
 * @rdata: remote port private data
146
 */
147
static const char *fc_rport_state(struct fc_rport_priv *rdata)
148 149 150 151 152 153 154 155 156 157
{
	const char *cp;

	cp = fc_rport_state_names[rdata->rp_state];
	if (!cp)
		cp = "Unknown";
	return cp;
}

/**
158
 * fc_set_rport_loss_tmo() - Set the remote port loss timeout in seconds.
159 160 161 162 163 164 165 166 167 168 169 170 171
 * @rport: Pointer to Fibre Channel remote port structure
 * @timeout: timeout in seconds
 */
void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
{
	if (timeout)
		rport->dev_loss_tmo = timeout + 5;
	else
		rport->dev_loss_tmo = 30;
}
EXPORT_SYMBOL(fc_set_rport_loss_tmo);

/**
172
 * fc_plogi_get_maxframe() - Get max payload from the common service parameters
173 174 175
 * @flp: FLOGI payload structure
 * @maxval: upper limit, may be less than what is in the service parameters
 */
176 177
static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp,
					  unsigned int maxval)
178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
{
	unsigned int mfs;

	/*
	 * Get max payload from the common service parameters and the
	 * class 3 receive data field size.
	 */
	mfs = ntohs(flp->fl_csp.sp_bb_data) & FC_SP_BB_DATA_MASK;
	if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
		maxval = mfs;
	mfs = ntohs(flp->fl_cssp[3 - 1].cp_rdfs);
	if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
		maxval = mfs;
	return maxval;
}

/**
195
 * fc_rport_state_enter() - Change the rport's state
196
 * @rdata: The rport whose state should change
197 198 199 200
 * @new: The new state of the rport
 *
 * Locking Note: Called with the rport lock held
 */
201
static void fc_rport_state_enter(struct fc_rport_priv *rdata,
202 203 204 205 206 207 208 209 210
				 enum fc_rport_state new)
{
	if (rdata->rp_state != new)
		rdata->retries = 0;
	rdata->rp_state = new;
}

static void fc_rport_work(struct work_struct *work)
{
211
	u32 port_id;
212 213
	struct fc_rport_priv *rdata =
		container_of(work, struct fc_rport_priv, event_work);
214 215 216 217 218 219 220 221 222 223 224 225
	enum fc_rport_event event;
	enum fc_rport_trans_state trans_state;
	struct fc_lport *lport = rdata->local_port;
	struct fc_rport_operations *rport_ops;
	struct fc_rport *rport = PRIV_TO_RPORT(rdata);

	mutex_lock(&rdata->rp_mutex);
	event = rdata->event;
	rport_ops = rdata->ops;

	if (event == RPORT_EV_CREATED) {
		struct fc_rport *new_rport;
226
		struct fc_rport_priv *new_rdata;
227 228 229 230 231 232 233
		struct fc_rport_identifiers ids;

		ids.port_id = rport->port_id;
		ids.roles = rport->roles;
		ids.port_name = rport->port_name;
		ids.node_name = rport->node_name;

234
		rdata->event = RPORT_EV_NONE;
235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
		mutex_unlock(&rdata->rp_mutex);

		new_rport = fc_remote_port_add(lport->host, 0, &ids);
		if (new_rport) {
			/*
			 * Switch from the rogue rport to the rport
			 * returned by the FC class.
			 */
			new_rport->maxframe_size = rport->maxframe_size;

			new_rdata = new_rport->dd_data;
			new_rdata->e_d_tov = rdata->e_d_tov;
			new_rdata->r_a_tov = rdata->r_a_tov;
			new_rdata->ops = rdata->ops;
			new_rdata->local_port = rdata->local_port;
			new_rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
			new_rdata->trans_state = FC_PORTSTATE_REAL;
			mutex_init(&new_rdata->rp_mutex);
			INIT_DELAYED_WORK(&new_rdata->retry_work,
					  fc_rport_timeout);
			INIT_LIST_HEAD(&new_rdata->peers);
			INIT_WORK(&new_rdata->event_work, fc_rport_work);

258
			fc_rport_state_enter(new_rdata, RPORT_ST_READY);
259
		} else {
260 261
			printk(KERN_WARNING "libfc: Failed to allocate "
			       " memory for rport (%6x)\n", ids.port_id);
262 263
			event = RPORT_EV_FAILED;
		}
264 265
		if (rport->port_id != FC_FID_DIR_SERV)
			if (rport_ops->event_callback)
266
				rport_ops->event_callback(lport, rdata,
267
							  RPORT_EV_FAILED);
268 269 270 271
		put_device(&rport->dev);
		rport = new_rport;
		rdata = new_rport->dd_data;
		if (rport_ops->event_callback)
272
			rport_ops->event_callback(lport, rdata, event);
273 274 275 276 277 278
	} else if ((event == RPORT_EV_FAILED) ||
		   (event == RPORT_EV_LOGO) ||
		   (event == RPORT_EV_STOP)) {
		trans_state = rdata->trans_state;
		mutex_unlock(&rdata->rp_mutex);
		if (rport_ops->event_callback)
279
			rport_ops->event_callback(lport, rdata, event);
280
		cancel_delayed_work_sync(&rdata->retry_work);
281 282
		if (trans_state == FC_PORTSTATE_ROGUE)
			put_device(&rport->dev);
283 284
		else {
			port_id = rport->port_id;
285
			fc_remote_port_delete(rport);
286 287 288
			lport->tt.exch_mgr_reset(lport, 0, port_id);
			lport->tt.exch_mgr_reset(lport, port_id, 0);
		}
289 290 291 292 293
	} else
		mutex_unlock(&rdata->rp_mutex);
}

/**
294
 * fc_rport_login() - Start the remote port login state machine
295
 * @rdata: private remote port
296 297 298 299 300
 *
 * Locking Note: Called without the rport lock held. This
 * function will hold the rport lock, call an _enter_*
 * function and then unlock the rport.
 */
301
int fc_rport_login(struct fc_rport_priv *rdata)
302 303 304
{
	mutex_lock(&rdata->rp_mutex);

305
	FC_RPORT_DBG(rdata, "Login to port\n");
306

307
	fc_rport_enter_plogi(rdata);
308 309 310 311 312 313

	mutex_unlock(&rdata->rp_mutex);

	return 0;
}

314 315
/**
 * fc_rport_enter_delete() - schedule a remote port to be deleted.
316
 * @rdata: private remote port
317 318 319 320 321 322 323 324 325 326 327
 * @event: event to report as the reason for deletion
 *
 * Locking Note: Called with the rport lock held.
 *
 * Allow state change into DELETE only once.
 *
 * Call queue_work only if there's no event already pending.
 * Set the new event so that the old pending event will not occur.
 * Since we have the mutex, even if fc_rport_work() is already started,
 * it'll see the new event.
 */
328
static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
329 330 331 332 333
				  enum fc_rport_event event)
{
	if (rdata->rp_state == RPORT_ST_DELETE)
		return;

334
	FC_RPORT_DBG(rdata, "Delete port\n");
335

336
	fc_rport_state_enter(rdata, RPORT_ST_DELETE);
337 338 339 340 341 342

	if (rdata->event == RPORT_EV_NONE)
		queue_work(rport_event_queue, &rdata->event_work);
	rdata->event = event;
}

343
/**
344
 * fc_rport_logoff() - Logoff and remove an rport
345
 * @rdata: private remote port
346 347 348 349 350
 *
 * Locking Note: Called without the rport lock held. This
 * function will hold the rport lock, call an _enter_*
 * function and then unlock the rport.
 */
351
int fc_rport_logoff(struct fc_rport_priv *rdata)
352 353 354
{
	mutex_lock(&rdata->rp_mutex);

355
	FC_RPORT_DBG(rdata, "Remove port\n");
356

357
	if (rdata->rp_state == RPORT_ST_DELETE) {
358
		FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n");
359 360 361 362
		mutex_unlock(&rdata->rp_mutex);
		goto out;
	}

363
	fc_rport_enter_logo(rdata);
364 365

	/*
366
	 * Change the state to Delete so that we discard
367 368
	 * the response.
	 */
369
	fc_rport_enter_delete(rdata, RPORT_EV_STOP);
370 371
	mutex_unlock(&rdata->rp_mutex);

372
out:
373 374 375 376
	return 0;
}

/**
377
 * fc_rport_enter_ready() - The rport is ready
378
 * @rdata: private remote port
379 380 381 382
 *
 * Locking Note: The rport lock is expected to be held before calling
 * this routine.
 */
383
static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
384
{
385
	fc_rport_state_enter(rdata, RPORT_ST_READY);
386

387
	FC_RPORT_DBG(rdata, "Port is Ready\n");
388

389 390
	if (rdata->event == RPORT_EV_NONE)
		queue_work(rport_event_queue, &rdata->event_work);
391 392 393 394
	rdata->event = RPORT_EV_CREATED;
}

/**
395
 * fc_rport_timeout() - Handler for the retry_work timer.
396
 * @work: The work struct of the fc_rport_priv
397 398 399 400 401 402 403
 *
 * Locking Note: Called without the rport lock held. This
 * function will hold the rport lock, call an _enter_*
 * function and then unlock the rport.
 */
static void fc_rport_timeout(struct work_struct *work)
{
404 405
	struct fc_rport_priv *rdata =
		container_of(work, struct fc_rport_priv, retry_work.work);
406 407 408 409 410

	mutex_lock(&rdata->rp_mutex);

	switch (rdata->rp_state) {
	case RPORT_ST_PLOGI:
411
		fc_rport_enter_plogi(rdata);
412 413
		break;
	case RPORT_ST_PRLI:
414
		fc_rport_enter_prli(rdata);
415 416
		break;
	case RPORT_ST_RTV:
417
		fc_rport_enter_rtv(rdata);
418 419
		break;
	case RPORT_ST_LOGO:
420
		fc_rport_enter_logo(rdata);
421 422 423
		break;
	case RPORT_ST_READY:
	case RPORT_ST_INIT:
424
	case RPORT_ST_DELETE:
425 426 427 428 429 430 431
		break;
	}

	mutex_unlock(&rdata->rp_mutex);
}

/**
432
 * fc_rport_error() - Error handler, called once retries have been exhausted
433
 * @rdata: private remote port
434 435 436 437 438
 * @fp: The frame pointer
 *
 * Locking Note: The rport lock is expected to be held before
 * calling this routine
 */
439
static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
440
{
441 442
	FC_RPORT_DBG(rdata, "Error %ld in state %s, retries %d\n",
		     PTR_ERR(fp), fc_rport_state(rdata), rdata->retries);
443

444 445 446 447
	switch (rdata->rp_state) {
	case RPORT_ST_PLOGI:
	case RPORT_ST_PRLI:
	case RPORT_ST_LOGO:
448
		fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
449 450
		break;
	case RPORT_ST_RTV:
451
		fc_rport_enter_ready(rdata);
452
		break;
453
	case RPORT_ST_DELETE:
454 455 456
	case RPORT_ST_READY:
	case RPORT_ST_INIT:
		break;
457 458 459
	}
}

460
/**
461
 * fc_rport_error_retry() - Error handler when retries are desired
462
 * @rdata: private remote port data
463 464 465 466 467 468 469 470
 * @fp: The frame pointer
 *
 * If the error was an exchange timeout retry immediately,
 * otherwise wait for E_D_TOV.
 *
 * Locking Note: The rport lock is expected to be held before
 * calling this routine
 */
471 472
static void fc_rport_error_retry(struct fc_rport_priv *rdata,
				 struct fc_frame *fp)
473 474 475 476 477
{
	unsigned long delay = FC_DEF_E_D_TOV;

	/* make sure this isn't an FC_EX_CLOSED error, never retry those */
	if (PTR_ERR(fp) == -FC_EX_CLOSED)
478
		return fc_rport_error(rdata, fp);
479

480
	if (rdata->retries < rdata->local_port->max_rport_retry_count) {
481 482
		FC_RPORT_DBG(rdata, "Error %ld in state %s, retrying\n",
			     PTR_ERR(fp), fc_rport_state(rdata));
483 484 485 486 487 488 489 490
		rdata->retries++;
		/* no additional delay on exchange timeouts */
		if (PTR_ERR(fp) == -FC_EX_TIMEOUT)
			delay = 0;
		schedule_delayed_work(&rdata->retry_work, delay);
		return;
	}

491
	return fc_rport_error(rdata, fp);
492 493
}

494
/**
495
 * fc_rport_plogi_recv_resp() - Handle incoming ELS PLOGI response
496 497
 * @sp: current sequence in the PLOGI exchange
 * @fp: response frame
498
 * @rdata_arg: private remote port data
499 500 501 502 503 504
 *
 * Locking Note: This function will be called without the rport lock
 * held, but it will lock, call an _enter_* function or fc_rport_error
 * and then unlock the rport.
 */
static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
505
				void *rdata_arg)
506
{
507 508
	struct fc_rport_priv *rdata = rdata_arg;
	struct fc_rport *rport = PRIV_TO_RPORT(rdata);
509
	struct fc_lport *lport = rdata->local_port;
510
	struct fc_els_flogi *plp = NULL;
511 512 513 514 515 516 517
	unsigned int tov;
	u16 csp_seq;
	u16 cssp_seq;
	u8 op;

	mutex_lock(&rdata->rp_mutex);

518
	FC_RPORT_DBG(rdata, "Received a PLOGI response\n");
519 520

	if (rdata->rp_state != RPORT_ST_PLOGI) {
521 522
		FC_RPORT_DBG(rdata, "Received a PLOGI response, but in state "
			     "%s\n", fc_rport_state(rdata));
523 524
		if (IS_ERR(fp))
			goto err;
525 526 527
		goto out;
	}

528
	if (IS_ERR(fp)) {
529
		fc_rport_error_retry(rdata, fp);
530 531 532
		goto err;
	}

533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556
	op = fc_frame_payload_op(fp);
	if (op == ELS_LS_ACC &&
	    (plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) {
		rport->port_name = get_unaligned_be64(&plp->fl_wwpn);
		rport->node_name = get_unaligned_be64(&plp->fl_wwnn);

		tov = ntohl(plp->fl_csp.sp_e_d_tov);
		if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR)
			tov /= 1000;
		if (tov > rdata->e_d_tov)
			rdata->e_d_tov = tov;
		csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
		cssp_seq = ntohs(plp->fl_cssp[3 - 1].cp_con_seq);
		if (cssp_seq < csp_seq)
			csp_seq = cssp_seq;
		rdata->max_seq = csp_seq;
		rport->maxframe_size =
			fc_plogi_get_maxframe(plp, lport->mfs);

		/*
		 * If the rport is one of the well known addresses
		 * we skip PRLI and RTV and go straight to READY.
		 */
		if (rport->port_id >= FC_FID_DOM_MGR)
557
			fc_rport_enter_ready(rdata);
558
		else
559
			fc_rport_enter_prli(rdata);
560
	} else
561
		fc_rport_error_retry(rdata, fp);
562 563 564 565 566 567 568 569 570

out:
	fc_frame_free(fp);
err:
	mutex_unlock(&rdata->rp_mutex);
	put_device(&rport->dev);
}

/**
571
 * fc_rport_enter_plogi() - Send Port Login (PLOGI) request to peer
572
 * @rdata: private remote port data
573 574 575 576
 *
 * Locking Note: The rport lock is expected to be held before calling
 * this routine.
 */
577
static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
578 579
{
	struct fc_lport *lport = rdata->local_port;
580
	struct fc_rport *rport = PRIV_TO_RPORT(rdata);
581 582
	struct fc_frame *fp;

583 584
	FC_RPORT_DBG(rdata, "Port entered PLOGI state from %s state\n",
		     fc_rport_state(rdata));
585

586
	fc_rport_state_enter(rdata, RPORT_ST_PLOGI);
587 588 589 590

	rport->maxframe_size = FC_MIN_MAX_PAYLOAD;
	fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
	if (!fp) {
591
		fc_rport_error_retry(rdata, fp);
592 593 594 595
		return;
	}
	rdata->e_d_tov = lport->e_d_tov;

596
	if (!lport->tt.elsct_send(lport, rport->port_id, fp, ELS_PLOGI,
597 598
				  fc_rport_plogi_resp, rdata, lport->e_d_tov))
		fc_rport_error_retry(rdata, fp);
599 600 601 602 603
	else
		get_device(&rport->dev);
}

/**
604
 * fc_rport_prli_resp() - Process Login (PRLI) response handler
605 606
 * @sp: current sequence in the PRLI exchange
 * @fp: response frame
607
 * @rdata_arg: private remote port data
608 609 610 611 612 613
 *
 * Locking Note: This function will be called without the rport lock
 * held, but it will lock, call an _enter_* function or fc_rport_error
 * and then unlock the rport.
 */
static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
614
			       void *rdata_arg)
615
{
616 617
	struct fc_rport_priv *rdata = rdata_arg;
	struct fc_rport *rport = PRIV_TO_RPORT(rdata);
618 619 620 621 622 623 624 625 626 627
	struct {
		struct fc_els_prli prli;
		struct fc_els_spp spp;
	} *pp;
	u32 roles = FC_RPORT_ROLE_UNKNOWN;
	u32 fcp_parm = 0;
	u8 op;

	mutex_lock(&rdata->rp_mutex);

628
	FC_RPORT_DBG(rdata, "Received a PRLI response\n");
629 630

	if (rdata->rp_state != RPORT_ST_PRLI) {
631 632
		FC_RPORT_DBG(rdata, "Received a PRLI response, but in state "
			     "%s\n", fc_rport_state(rdata));
633 634
		if (IS_ERR(fp))
			goto err;
635 636 637
		goto out;
	}

638
	if (IS_ERR(fp)) {
639
		fc_rport_error_retry(rdata, fp);
640 641 642
		goto err;
	}

643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658
	op = fc_frame_payload_op(fp);
	if (op == ELS_LS_ACC) {
		pp = fc_frame_payload_get(fp, sizeof(*pp));
		if (pp && pp->prli.prli_spp_len >= sizeof(pp->spp)) {
			fcp_parm = ntohl(pp->spp.spp_params);
			if (fcp_parm & FCP_SPPF_RETRY)
				rdata->flags |= FC_RP_FLAGS_RETRY;
		}

		rport->supported_classes = FC_COS_CLASS3;
		if (fcp_parm & FCP_SPPF_INIT_FCN)
			roles |= FC_RPORT_ROLE_FCP_INITIATOR;
		if (fcp_parm & FCP_SPPF_TARG_FCN)
			roles |= FC_RPORT_ROLE_FCP_TARGET;

		rport->roles = roles;
659
		fc_rport_enter_rtv(rdata);
660 661

	} else {
662 663
		FC_RPORT_DBG(rdata, "Bad ELS response for PRLI command\n");
		fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
664 665 666 667 668 669 670 671 672 673
	}

out:
	fc_frame_free(fp);
err:
	mutex_unlock(&rdata->rp_mutex);
	put_device(&rport->dev);
}

/**
674
 * fc_rport_logo_resp() - Logout (LOGO) response handler
675 676
 * @sp: current sequence in the LOGO exchange
 * @fp: response frame
677
 * @rdata_arg: private remote port data
678 679 680 681 682 683
 *
 * Locking Note: This function will be called without the rport lock
 * held, but it will lock, call an _enter_* function or fc_rport_error
 * and then unlock the rport.
 */
static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
684
			       void *rdata_arg)
685
{
686 687
	struct fc_rport_priv *rdata = rdata_arg;
	struct fc_rport *rport = PRIV_TO_RPORT(rdata);
688 689 690 691
	u8 op;

	mutex_lock(&rdata->rp_mutex);

692
	FC_RPORT_DBG(rdata, "Received a LOGO response\n");
693 694

	if (rdata->rp_state != RPORT_ST_LOGO) {
695 696
		FC_RPORT_DBG(rdata, "Received a LOGO response, but in state "
			     "%s\n", fc_rport_state(rdata));
697 698
		if (IS_ERR(fp))
			goto err;
699 700 701
		goto out;
	}

702
	if (IS_ERR(fp)) {
703
		fc_rport_error_retry(rdata, fp);
704 705 706
		goto err;
	}

707 708
	op = fc_frame_payload_op(fp);
	if (op == ELS_LS_ACC) {
709
		fc_rport_enter_rtv(rdata);
710
	} else {
711 712
		FC_RPORT_DBG(rdata, "Bad ELS response for LOGO command\n");
		fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
713 714 715 716 717 718 719 720 721 722
	}

out:
	fc_frame_free(fp);
err:
	mutex_unlock(&rdata->rp_mutex);
	put_device(&rport->dev);
}

/**
723
 * fc_rport_enter_prli() - Send Process Login (PRLI) request to peer
724
 * @rdata: private remote port data
725 726 727 728
 *
 * Locking Note: The rport lock is expected to be held before calling
 * this routine.
 */
729
static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
730
{
731
	struct fc_rport *rport = PRIV_TO_RPORT(rdata);
732 733 734 735 736 737 738
	struct fc_lport *lport = rdata->local_port;
	struct {
		struct fc_els_prli prli;
		struct fc_els_spp spp;
	} *pp;
	struct fc_frame *fp;

739 740
	FC_RPORT_DBG(rdata, "Port entered PRLI state from %s state\n",
		     fc_rport_state(rdata));
741

742
	fc_rport_state_enter(rdata, RPORT_ST_PRLI);
743 744 745

	fp = fc_frame_alloc(lport, sizeof(*pp));
	if (!fp) {
746
		fc_rport_error_retry(rdata, fp);
747 748 749
		return;
	}

750
	if (!lport->tt.elsct_send(lport, rport->port_id, fp, ELS_PRLI,
751 752
				  fc_rport_prli_resp, rdata, lport->e_d_tov))
		fc_rport_error_retry(rdata, fp);
753 754 755 756 757
	else
		get_device(&rport->dev);
}

/**
758
 * fc_rport_els_rtv_resp() - Request Timeout Value response handler
759 760
 * @sp: current sequence in the RTV exchange
 * @fp: response frame
761
 * @rdata_arg: private remote port data
762 763 764 765 766 767 768 769
 *
 * Many targets don't seem to support this.
 *
 * Locking Note: This function will be called without the rport lock
 * held, but it will lock, call an _enter_* function or fc_rport_error
 * and then unlock the rport.
 */
static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
770
			      void *rdata_arg)
771
{
772 773
	struct fc_rport_priv *rdata = rdata_arg;
	struct fc_rport *rport = PRIV_TO_RPORT(rdata);
774 775 776 777
	u8 op;

	mutex_lock(&rdata->rp_mutex);

778
	FC_RPORT_DBG(rdata, "Received a RTV response\n");
779 780

	if (rdata->rp_state != RPORT_ST_RTV) {
781 782
		FC_RPORT_DBG(rdata, "Received a RTV response, but in state "
			     "%s\n", fc_rport_state(rdata));
783 784
		if (IS_ERR(fp))
			goto err;
785 786 787
		goto out;
	}

788
	if (IS_ERR(fp)) {
789
		fc_rport_error(rdata, fp);
790 791 792
		goto err;
	}

793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814
	op = fc_frame_payload_op(fp);
	if (op == ELS_LS_ACC) {
		struct fc_els_rtv_acc *rtv;
		u32 toq;
		u32 tov;

		rtv = fc_frame_payload_get(fp, sizeof(*rtv));
		if (rtv) {
			toq = ntohl(rtv->rtv_toq);
			tov = ntohl(rtv->rtv_r_a_tov);
			if (tov == 0)
				tov = 1;
			rdata->r_a_tov = tov;
			tov = ntohl(rtv->rtv_e_d_tov);
			if (toq & FC_ELS_RTV_EDRES)
				tov /= 1000000;
			if (tov == 0)
				tov = 1;
			rdata->e_d_tov = tov;
		}
	}

815
	fc_rport_enter_ready(rdata);
816 817 818 819 820 821 822 823 824

out:
	fc_frame_free(fp);
err:
	mutex_unlock(&rdata->rp_mutex);
	put_device(&rport->dev);
}

/**
825
 * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request to peer
826
 * @rdata: private remote port data
827 828 829 830
 *
 * Locking Note: The rport lock is expected to be held before calling
 * this routine.
 */
831
static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
832 833 834
{
	struct fc_frame *fp;
	struct fc_lport *lport = rdata->local_port;
835
	struct fc_rport *rport = PRIV_TO_RPORT(rdata);
836

837 838
	FC_RPORT_DBG(rdata, "Port entered RTV state from %s state\n",
		     fc_rport_state(rdata));
839

840
	fc_rport_state_enter(rdata, RPORT_ST_RTV);
841 842 843

	fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv));
	if (!fp) {
844
		fc_rport_error_retry(rdata, fp);
845 846 847
		return;
	}

848
	if (!lport->tt.elsct_send(lport, rport->port_id, fp, ELS_RTV,
849 850
				     fc_rport_rtv_resp, rdata, lport->e_d_tov))
		fc_rport_error_retry(rdata, fp);
851 852 853 854 855
	else
		get_device(&rport->dev);
}

/**
856
 * fc_rport_enter_logo() - Send Logout (LOGO) request to peer
857
 * @rdata: private remote port data
858 859 860 861
 *
 * Locking Note: The rport lock is expected to be held before calling
 * this routine.
 */
862
static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
863 864
{
	struct fc_lport *lport = rdata->local_port;
865
	struct fc_rport *rport = PRIV_TO_RPORT(rdata);
866 867
	struct fc_frame *fp;

868 869
	FC_RPORT_DBG(rdata, "Port entered LOGO state from %s state\n",
		     fc_rport_state(rdata));
870

871
	fc_rport_state_enter(rdata, RPORT_ST_LOGO);
872 873 874

	fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo));
	if (!fp) {
875
		fc_rport_error_retry(rdata, fp);
876 877 878
		return;
	}

879
	if (!lport->tt.elsct_send(lport, rport->port_id, fp, ELS_LOGO,
880 881
				  fc_rport_logo_resp, rdata, lport->e_d_tov))
		fc_rport_error_retry(rdata, fp);
882 883 884 885 886 887
	else
		get_device(&rport->dev);
}


/**
888
 * fc_rport_recv_req() - Receive a request from a rport
889 890
 * @sp: current sequence in the PLOGI exchange
 * @fp: response frame
891
 * @rdata_arg: private remote port data
892 893 894 895 896 897
 *
 * Locking Note: Called without the rport lock held. This
 * function will hold the rport lock, call an _enter_*
 * function and then unlock the rport.
 */
void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp,
898
		       struct fc_rport_priv *rdata)
899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917
{
	struct fc_lport *lport = rdata->local_port;

	struct fc_frame_header *fh;
	struct fc_seq_els_data els_data;
	u8 op;

	mutex_lock(&rdata->rp_mutex);

	els_data.fp = NULL;
	els_data.explan = ELS_EXPL_NONE;
	els_data.reason = ELS_RJT_NONE;

	fh = fc_frame_header_get(fp);

	if (fh->fh_r_ctl == FC_RCTL_ELS_REQ && fh->fh_type == FC_TYPE_ELS) {
		op = fc_frame_payload_op(fp);
		switch (op) {
		case ELS_PLOGI:
918
			fc_rport_recv_plogi_req(rdata, sp, fp);
919 920
			break;
		case ELS_PRLI:
921
			fc_rport_recv_prli_req(rdata, sp, fp);
922 923
			break;
		case ELS_PRLO:
924
			fc_rport_recv_prlo_req(rdata, sp, fp);
925 926
			break;
		case ELS_LOGO:
927
			fc_rport_recv_logo_req(rdata, sp, fp);
928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947
			break;
		case ELS_RRQ:
			els_data.fp = fp;
			lport->tt.seq_els_rsp_send(sp, ELS_RRQ, &els_data);
			break;
		case ELS_REC:
			els_data.fp = fp;
			lport->tt.seq_els_rsp_send(sp, ELS_REC, &els_data);
			break;
		default:
			els_data.reason = ELS_RJT_UNSUP;
			lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data);
			break;
		}
	}

	mutex_unlock(&rdata->rp_mutex);
}

/**
948
 * fc_rport_recv_plogi_req() - Handle incoming Port Login (PLOGI) request
949
 * @rdata: private remote port data
950 951 952 953 954 955
 * @sp: current sequence in the PLOGI exchange
 * @fp: PLOGI request frame
 *
 * Locking Note: The rport lock is exected to be held before calling
 * this function.
 */
956
static void fc_rport_recv_plogi_req(struct fc_rport_priv *rdata,
957 958
				    struct fc_seq *sp, struct fc_frame *rx_fp)
{
959
	struct fc_rport *rport = PRIV_TO_RPORT(rdata);
960 961 962 963 964 965 966 967 968 969 970 971 972 973 974
	struct fc_lport *lport = rdata->local_port;
	struct fc_frame *fp = rx_fp;
	struct fc_exch *ep;
	struct fc_frame_header *fh;
	struct fc_els_flogi *pl;
	struct fc_seq_els_data rjt_data;
	u32 sid;
	u64 wwpn;
	u64 wwnn;
	enum fc_els_rjt_reason reject = 0;
	u32 f_ctl;
	rjt_data.fp = NULL;

	fh = fc_frame_header_get(fp);

975 976
	FC_RPORT_DBG(rdata, "Received PLOGI request while in state %s\n",
		     fc_rport_state(rdata));
977 978 979 980

	sid = ntoh24(fh->fh_s_id);
	pl = fc_frame_payload_get(fp, sizeof(*pl));
	if (!pl) {
981
		FC_RPORT_DBG(rdata, "Received PLOGI too short\n");
982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002
		WARN_ON(1);
		/* XXX TBD: send reject? */
		fc_frame_free(fp);
		return;
	}
	wwpn = get_unaligned_be64(&pl->fl_wwpn);
	wwnn = get_unaligned_be64(&pl->fl_wwnn);

	/*
	 * If the session was just created, possibly due to the incoming PLOGI,
	 * set the state appropriately and accept the PLOGI.
	 *
	 * If we had also sent a PLOGI, and if the received PLOGI is from a
	 * higher WWPN, we accept it, otherwise an LS_RJT is sent with reason
	 * "command already in progress".
	 *
	 * XXX TBD: If the session was ready before, the PLOGI should result in
	 * all outstanding exchanges being reset.
	 */
	switch (rdata->rp_state) {
	case RPORT_ST_INIT:
1003
		FC_RPORT_DBG(rdata, "Received PLOGI, wwpn %llx state INIT "
1004
			     "- reject\n", (unsigned long long)wwpn);
1005 1006 1007
		reject = ELS_RJT_UNSUP;
		break;
	case RPORT_ST_PLOGI:
1008
		FC_RPORT_DBG(rdata, "Received PLOGI in PLOGI state %d\n",
1009
			     rdata->rp_state);
1010 1011 1012 1013 1014
		if (wwpn < lport->wwpn)
			reject = ELS_RJT_INPROG;
		break;
	case RPORT_ST_PRLI:
	case RPORT_ST_READY:
1015
		FC_RPORT_DBG(rdata, "Received PLOGI in logged-in state %d "
1016
			     "- ignored for now\n", rdata->rp_state);
1017 1018
		/* XXX TBD - should reset */
		break;
1019
	case RPORT_ST_DELETE:
1020
	default:
1021
		FC_RPORT_DBG(rdata, "Received PLOGI in unexpected "
1022
			     "state %d\n", rdata->rp_state);
1023 1024
		fc_frame_free(fp);
		return;
1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064
		break;
	}

	if (reject) {
		rjt_data.reason = reject;
		rjt_data.explan = ELS_EXPL_NONE;
		lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
		fc_frame_free(fp);
	} else {
		fp = fc_frame_alloc(lport, sizeof(*pl));
		if (fp == NULL) {
			fp = rx_fp;
			rjt_data.reason = ELS_RJT_UNAB;
			rjt_data.explan = ELS_EXPL_NONE;
			lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
			fc_frame_free(fp);
		} else {
			sp = lport->tt.seq_start_next(sp);
			WARN_ON(!sp);
			fc_rport_set_name(rport, wwpn, wwnn);

			/*
			 * Get session payload size from incoming PLOGI.
			 */
			rport->maxframe_size =
				fc_plogi_get_maxframe(pl, lport->mfs);
			fc_frame_free(rx_fp);
			fc_plogi_fill(lport, fp, ELS_LS_ACC);

			/*
			 * Send LS_ACC.	 If this fails,
			 * the originator should retry.
			 */
			f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
			f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
			ep = fc_seq_exch(sp);
			fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
				       FC_TYPE_ELS, f_ctl, 0);
			lport->tt.seq_send(lport, sp, fp);
			if (rdata->rp_state == RPORT_ST_PLOGI)
1065
				fc_rport_enter_prli(rdata);
1066 1067 1068 1069 1070
		}
	}
}

/**
1071
 * fc_rport_recv_prli_req() - Handle incoming Process Login (PRLI) request
1072
 * @rdata: private remote port data
1073 1074 1075 1076 1077 1078
 * @sp: current sequence in the PRLI exchange
 * @fp: PRLI request frame
 *
 * Locking Note: The rport lock is exected to be held before calling
 * this function.
 */
1079
static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
1080 1081
				   struct fc_seq *sp, struct fc_frame *rx_fp)
{
1082
	struct fc_rport *rport = PRIV_TO_RPORT(rdata);
1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105
	struct fc_lport *lport = rdata->local_port;
	struct fc_exch *ep;
	struct fc_frame *fp;
	struct fc_frame_header *fh;
	struct {
		struct fc_els_prli prli;
		struct fc_els_spp spp;
	} *pp;
	struct fc_els_spp *rspp;	/* request service param page */
	struct fc_els_spp *spp;	/* response spp */
	unsigned int len;
	unsigned int plen;
	enum fc_els_rjt_reason reason = ELS_RJT_UNAB;
	enum fc_els_rjt_explan explan = ELS_EXPL_NONE;
	enum fc_els_spp_resp resp;
	struct fc_seq_els_data rjt_data;
	u32 f_ctl;
	u32 fcp_parm;
	u32 roles = FC_RPORT_ROLE_UNKNOWN;
	rjt_data.fp = NULL;

	fh = fc_frame_header_get(rx_fp);

1106 1107
	FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n",
		     fc_rport_state(rdata));
1108 1109 1110 1111 1112 1113 1114

	switch (rdata->rp_state) {
	case RPORT_ST_PRLI:
	case RPORT_ST_READY:
		reason = ELS_RJT_NONE;
		break;
	default:
1115 1116
		fc_frame_free(rx_fp);
		return;
1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210
		break;
	}
	len = fr_len(rx_fp) - sizeof(*fh);
	pp = fc_frame_payload_get(rx_fp, sizeof(*pp));
	if (pp == NULL) {
		reason = ELS_RJT_PROT;
		explan = ELS_EXPL_INV_LEN;
	} else {
		plen = ntohs(pp->prli.prli_len);
		if ((plen % 4) != 0 || plen > len) {
			reason = ELS_RJT_PROT;
			explan = ELS_EXPL_INV_LEN;
		} else if (plen < len) {
			len = plen;
		}
		plen = pp->prli.prli_spp_len;
		if ((plen % 4) != 0 || plen < sizeof(*spp) ||
		    plen > len || len < sizeof(*pp)) {
			reason = ELS_RJT_PROT;
			explan = ELS_EXPL_INV_LEN;
		}
		rspp = &pp->spp;
	}
	if (reason != ELS_RJT_NONE ||
	    (fp = fc_frame_alloc(lport, len)) == NULL) {
		rjt_data.reason = reason;
		rjt_data.explan = explan;
		lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
	} else {
		sp = lport->tt.seq_start_next(sp);
		WARN_ON(!sp);
		pp = fc_frame_payload_get(fp, len);
		WARN_ON(!pp);
		memset(pp, 0, len);
		pp->prli.prli_cmd = ELS_LS_ACC;
		pp->prli.prli_spp_len = plen;
		pp->prli.prli_len = htons(len);
		len -= sizeof(struct fc_els_prli);

		/*
		 * Go through all the service parameter pages and build
		 * response.  If plen indicates longer SPP than standard,
		 * use that.  The entire response has been pre-cleared above.
		 */
		spp = &pp->spp;
		while (len >= plen) {
			spp->spp_type = rspp->spp_type;
			spp->spp_type_ext = rspp->spp_type_ext;
			spp->spp_flags = rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
			resp = FC_SPP_RESP_ACK;
			if (rspp->spp_flags & FC_SPP_RPA_VAL)
				resp = FC_SPP_RESP_NO_PA;
			switch (rspp->spp_type) {
			case 0:	/* common to all FC-4 types */
				break;
			case FC_TYPE_FCP:
				fcp_parm = ntohl(rspp->spp_params);
				if (fcp_parm * FCP_SPPF_RETRY)
					rdata->flags |= FC_RP_FLAGS_RETRY;
				rport->supported_classes = FC_COS_CLASS3;
				if (fcp_parm & FCP_SPPF_INIT_FCN)
					roles |= FC_RPORT_ROLE_FCP_INITIATOR;
				if (fcp_parm & FCP_SPPF_TARG_FCN)
					roles |= FC_RPORT_ROLE_FCP_TARGET;
				rport->roles = roles;

				spp->spp_params =
					htonl(lport->service_params);
				break;
			default:
				resp = FC_SPP_RESP_INVL;
				break;
			}
			spp->spp_flags |= resp;
			len -= plen;
			rspp = (struct fc_els_spp *)((char *)rspp + plen);
			spp = (struct fc_els_spp *)((char *)spp + plen);
		}

		/*
		 * Send LS_ACC.	 If this fails, the originator should retry.
		 */
		f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
		f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
		ep = fc_seq_exch(sp);
		fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
			       FC_TYPE_ELS, f_ctl, 0);
		lport->tt.seq_send(lport, sp, fp);

		/*
		 * Get lock and re-check state.
		 */
		switch (rdata->rp_state) {
		case RPORT_ST_PRLI:
1211
			fc_rport_enter_ready(rdata);
1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222
			break;
		case RPORT_ST_READY:
			break;
		default:
			break;
		}
	}
	fc_frame_free(rx_fp);
}

/**
1223
 * fc_rport_recv_prlo_req() - Handle incoming Process Logout (PRLO) request
1224
 * @rdata: private remote port data
1225 1226 1227 1228 1229 1230
 * @sp: current sequence in the PRLO exchange
 * @fp: PRLO request frame
 *
 * Locking Note: The rport lock is exected to be held before calling
 * this function.
 */
1231 1232
static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata,
				   struct fc_seq *sp,
1233 1234 1235 1236 1237 1238 1239 1240 1241
				   struct fc_frame *fp)
{
	struct fc_lport *lport = rdata->local_port;

	struct fc_frame_header *fh;
	struct fc_seq_els_data rjt_data;

	fh = fc_frame_header_get(fp);

1242 1243
	FC_RPORT_DBG(rdata, "Received PRLO request while in state %s\n",
		     fc_rport_state(rdata));
1244

1245
	if (rdata->rp_state == RPORT_ST_DELETE) {
1246 1247 1248 1249
		fc_frame_free(fp);
		return;
	}

1250 1251 1252 1253 1254 1255 1256 1257
	rjt_data.fp = NULL;
	rjt_data.reason = ELS_RJT_UNAB;
	rjt_data.explan = ELS_EXPL_NONE;
	lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
	fc_frame_free(fp);
}

/**
1258
 * fc_rport_recv_logo_req() - Handle incoming Logout (LOGO) request
1259
 * @rdata: private remote port data
1260 1261 1262 1263 1264 1265
 * @sp: current sequence in the LOGO exchange
 * @fp: LOGO request frame
 *
 * Locking Note: The rport lock is exected to be held before calling
 * this function.
 */
1266 1267
static void fc_rport_recv_logo_req(struct fc_rport_priv *rdata,
				   struct fc_seq *sp,
1268 1269 1270 1271 1272 1273 1274
				   struct fc_frame *fp)
{
	struct fc_frame_header *fh;
	struct fc_lport *lport = rdata->local_port;

	fh = fc_frame_header_get(fp);

1275 1276
	FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
		     fc_rport_state(rdata));
1277

1278
	if (rdata->rp_state == RPORT_ST_DELETE) {
1279 1280 1281 1282
		fc_frame_free(fp);
		return;
	}

1283
	rdata->event = RPORT_EV_LOGO;
1284
	fc_rport_state_enter(rdata, RPORT_ST_DELETE);
1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297
	queue_work(rport_event_queue, &rdata->event_work);

	lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
	fc_frame_free(fp);
}

static void fc_rport_flush_queue(void)
{
	flush_workqueue(rport_event_queue);
}

int fc_rport_init(struct fc_lport *lport)
{
1298 1299 1300
	if (!lport->tt.rport_create)
		lport->tt.rport_create = fc_rport_rogue_create;

1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316
	if (!lport->tt.rport_login)
		lport->tt.rport_login = fc_rport_login;

	if (!lport->tt.rport_logoff)
		lport->tt.rport_logoff = fc_rport_logoff;

	if (!lport->tt.rport_recv_req)
		lport->tt.rport_recv_req = fc_rport_recv_req;

	if (!lport->tt.rport_flush_queue)
		lport->tt.rport_flush_queue = fc_rport_flush_queue;

	return 0;
}
EXPORT_SYMBOL(fc_rport_init);

1317
int fc_setup_rport(void)
1318 1319 1320 1321 1322 1323 1324 1325
{
	rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
	if (!rport_event_queue)
		return -ENOMEM;
	return 0;
}
EXPORT_SYMBOL(fc_setup_rport);

1326
void fc_destroy_rport(void)
1327 1328 1329 1330 1331 1332 1333
{
	destroy_workqueue(rport_event_queue);
}
EXPORT_SYMBOL(fc_destroy_rport);

void fc_rport_terminate_io(struct fc_rport *rport)
{
1334 1335
	struct fc_rport_libfc_priv *rp = rport->dd_data;
	struct fc_lport *lport = rp->local_port;
1336

1337 1338
	lport->tt.exch_mgr_reset(lport, 0, rport->port_id);
	lport->tt.exch_mgr_reset(lport, rport->port_id, 0);
1339 1340
}
EXPORT_SYMBOL(fc_rport_terminate_io);