zfcp_fc.c 23.0 KB
Newer Older
1 2 3 4 5
/*
 * zfcp device driver
 *
 * Fibre Channel related functions for the zfcp device driver.
 *
6
 * Copyright IBM Corporation 2008, 2010
7 8
 */

9 10 11
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

12
#include <linux/types.h>
13
#include <linux/slab.h>
14 15
#include <scsi/fc/fc_els.h>
#include <scsi/libfc.h>
16
#include "zfcp_ext.h"
17
#include "zfcp_fc.h"
18

19 20
struct kmem_cache *zfcp_fc_req_cache;

21 22 23 24 25
static u32 zfcp_fc_rscn_range_mask[] = {
	[ELS_ADDR_FMT_PORT]		= 0xFFFFFF,
	[ELS_ADDR_FMT_AREA]		= 0xFFFF00,
	[ELS_ADDR_FMT_DOM]		= 0xFF0000,
	[ELS_ADDR_FMT_FAB]		= 0x000000,
26 27
};

28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
/**
 * zfcp_fc_post_event - post event to userspace via fc_transport
 * @work: work struct with enqueued events
 */
void zfcp_fc_post_event(struct work_struct *work)
{
	struct zfcp_fc_event *event = NULL, *tmp = NULL;
	LIST_HEAD(tmp_lh);
	struct zfcp_fc_events *events = container_of(work,
					struct zfcp_fc_events, work);
	struct zfcp_adapter *adapter = container_of(events, struct zfcp_adapter,
						events);

	spin_lock_bh(&events->list_lock);
	list_splice_init(&events->list, &tmp_lh);
	spin_unlock_bh(&events->list_lock);

	list_for_each_entry_safe(event, tmp, &tmp_lh, list) {
		fc_host_post_event(adapter->scsi_host, fc_get_event_number(),
				event->code, event->data);
		list_del(&event->list);
		kfree(event);
	}

}

/**
 * zfcp_fc_enqueue_event - safely enqueue FC HBA API event from irq context
 * @adapter: The adapter where to enqueue the event
 * @event_code: The event code (as defined in fc_host_event_code in
 *		scsi_transport_fc.h)
 * @event_data: The event data (e.g. n_port page in case of els)
 */
void zfcp_fc_enqueue_event(struct zfcp_adapter *adapter,
			enum fc_host_event_code event_code, u32 event_data)
{
	struct zfcp_fc_event *event;

	event = kmalloc(sizeof(struct zfcp_fc_event), GFP_ATOMIC);
	if (!event)
		return;

	event->code = event_code;
	event->data = event_data;

	spin_lock(&adapter->events.list_lock);
	list_add_tail(&event->list, &adapter->events.list);
	spin_unlock(&adapter->events.list_lock);

	queue_work(adapter->work_queue, &adapter->events.work);
}

80
static int zfcp_fc_wka_port_get(struct zfcp_fc_wka_port *wka_port)
81 82 83 84
{
	if (mutex_lock_interruptible(&wka_port->mutex))
		return -ERESTARTSYS;

85 86 87
	if (wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE ||
	    wka_port->status == ZFCP_FC_WKA_PORT_CLOSING) {
		wka_port->status = ZFCP_FC_WKA_PORT_OPENING;
88
		if (zfcp_fsf_open_wka_port(wka_port))
89
			wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
90 91 92 93
	}

	mutex_unlock(&wka_port->mutex);

94
	wait_event(wka_port->completion_wq,
95 96
		   wka_port->status == ZFCP_FC_WKA_PORT_ONLINE ||
		   wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE);
97

98
	if (wka_port->status == ZFCP_FC_WKA_PORT_ONLINE) {
99 100 101 102 103 104
		atomic_inc(&wka_port->refcount);
		return 0;
	}
	return -EIO;
}

105
static void zfcp_fc_wka_port_offline(struct work_struct *work)
106
{
107
	struct delayed_work *dw = to_delayed_work(work);
108 109
	struct zfcp_fc_wka_port *wka_port =
			container_of(dw, struct zfcp_fc_wka_port, work);
110 111 112

	mutex_lock(&wka_port->mutex);
	if ((atomic_read(&wka_port->refcount) != 0) ||
113
	    (wka_port->status != ZFCP_FC_WKA_PORT_ONLINE))
114 115
		goto out;

116
	wka_port->status = ZFCP_FC_WKA_PORT_CLOSING;
117
	if (zfcp_fsf_close_wka_port(wka_port)) {
118
		wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
119 120 121 122 123 124
		wake_up(&wka_port->completion_wq);
	}
out:
	mutex_unlock(&wka_port->mutex);
}

125
static void zfcp_fc_wka_port_put(struct zfcp_fc_wka_port *wka_port)
126 127 128
{
	if (atomic_dec_return(&wka_port->refcount) != 0)
		return;
129
	/* wait 10 milliseconds, other reqs might pop in */
130 131 132
	schedule_delayed_work(&wka_port->work, HZ / 100);
}

133
static void zfcp_fc_wka_port_init(struct zfcp_fc_wka_port *wka_port, u32 d_id,
134
				  struct zfcp_adapter *adapter)
135 136 137 138
{
	init_waitqueue_head(&wka_port->completion_wq);

	wka_port->adapter = adapter;
139
	wka_port->d_id = d_id;
140

141
	wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
142 143
	atomic_set(&wka_port->refcount, 0);
	mutex_init(&wka_port->mutex);
144
	INIT_DELAYED_WORK(&wka_port->work, zfcp_fc_wka_port_offline);
145 146
}

147
static void zfcp_fc_wka_port_force_offline(struct zfcp_fc_wka_port *wka)
148 149 150
{
	cancel_delayed_work_sync(&wka->work);
	mutex_lock(&wka->mutex);
151
	wka->status = ZFCP_FC_WKA_PORT_OFFLINE;
152 153 154
	mutex_unlock(&wka->mutex);
}

155
void zfcp_fc_wka_ports_force_offline(struct zfcp_fc_wka_ports *gs)
156
{
157 158
	if (!gs)
		return;
159 160 161 162 163 164
	zfcp_fc_wka_port_force_offline(&gs->ms);
	zfcp_fc_wka_port_force_offline(&gs->ts);
	zfcp_fc_wka_port_force_offline(&gs->ds);
	zfcp_fc_wka_port_force_offline(&gs->as);
}

165
static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
166
				   struct fc_els_rscn_page *page)
167 168
{
	unsigned long flags;
169
	struct zfcp_adapter *adapter = fsf_req->adapter;
170 171
	struct zfcp_port *port;

172 173
	read_lock_irqsave(&adapter->port_list_lock, flags);
	list_for_each_entry(port, &adapter->port_list, list) {
174
		if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range))
175
			zfcp_fc_test_link(port);
176 177 178
		if (!port->d_id)
			zfcp_erp_port_reopen(port,
					     ZFCP_STATUS_COMMON_ERP_FAILED,
179
					     "fcrscn1");
180
	}
181
	read_unlock_irqrestore(&adapter->port_list_lock, flags);
182 183 184 185 186
}

static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
{
	struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data;
187 188
	struct fc_els_rscn *head;
	struct fc_els_rscn_page *page;
189 190
	u16 i;
	u16 no_entries;
191
	unsigned int afmt;
192

193 194
	head = (struct fc_els_rscn *) status_buffer->payload.data;
	page = (struct fc_els_rscn_page *) head;
195 196

	/* see FC-FS */
197
	no_entries = head->rscn_plen / sizeof(struct fc_els_rscn_page);
198 199 200

	for (i = 1; i < no_entries; i++) {
		/* skip head and start with 1st element */
201 202 203 204
		page++;
		afmt = page->rscn_page_flags & ELS_RSCN_ADDR_FMT_MASK;
		_zfcp_fc_incoming_rscn(fsf_req, zfcp_fc_rscn_range_mask[afmt],
				       page);
205 206
		zfcp_fc_enqueue_event(fsf_req->adapter, FCH_EVT_RSCN,
				      *(u32 *)page);
207
	}
208
	queue_work(fsf_req->adapter->work_queue, &fsf_req->adapter->scan_work);
209 210
}

211
static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn)
212
{
213
	unsigned long flags;
214 215 216
	struct zfcp_adapter *adapter = req->adapter;
	struct zfcp_port *port;

217 218 219
	read_lock_irqsave(&adapter->port_list_lock, flags);
	list_for_each_entry(port, &adapter->port_list, list)
		if (port->wwpn == wwpn) {
220
			zfcp_erp_port_forced_reopen(port, 0, "fciwwp1");
221
			break;
222 223
		}
	read_unlock_irqrestore(&adapter->port_list_lock, flags);
224 225 226 227
}

static void zfcp_fc_incoming_plogi(struct zfcp_fsf_req *req)
{
228 229
	struct fsf_status_read_buffer *status_buffer;
	struct fc_els_flogi *plogi;
230

231 232 233
	status_buffer = (struct fsf_status_read_buffer *) req->data;
	plogi = (struct fc_els_flogi *) status_buffer->payload.data;
	zfcp_fc_incoming_wwpn(req, plogi->fl_wwpn);
234 235 236 237 238 239
}

static void zfcp_fc_incoming_logo(struct zfcp_fsf_req *req)
{
	struct fsf_status_read_buffer *status_buffer =
		(struct fsf_status_read_buffer *)req->data;
240 241
	struct fc_els_logo *logo =
		(struct fc_els_logo *) status_buffer->payload.data;
242

243
	zfcp_fc_incoming_wwpn(req, logo->fl_n_port_wwn);
244 245 246 247 248 249 250 251 252 253
}

/**
 * zfcp_fc_incoming_els - handle incoming ELS
 * @fsf_req - request which contains incoming ELS
 */
void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req)
{
	struct fsf_status_read_buffer *status_buffer =
		(struct fsf_status_read_buffer *) fsf_req->data;
S
Swen Schillig 已提交
254
	unsigned int els_type = status_buffer->payload.data[0];
255

256
	zfcp_dbf_san_in_els("fciels1", fsf_req);
257
	if (els_type == ELS_PLOGI)
258
		zfcp_fc_incoming_plogi(fsf_req);
259
	else if (els_type == ELS_LOGO)
260
		zfcp_fc_incoming_logo(fsf_req);
261
	else if (els_type == ELS_RSCN)
262 263 264
		zfcp_fc_incoming_rscn(fsf_req);
}

265
static void zfcp_fc_ns_gid_pn_eval(struct zfcp_fc_req *fc_req)
266
{
267 268
	struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
	struct zfcp_fc_gid_pn_rsp *gid_pn_rsp = &fc_req->u.gid_pn.rsp;
269

270
	if (ct_els->status)
271
		return;
272
	if (gid_pn_rsp->ct_hdr.ct_cmd != FC_FS_ACC)
273
		return;
274

275
	/* looks like a valid d_id */
276
	ct_els->port->d_id = ntoh24(gid_pn_rsp->gid_pn.fp_fid);
277 278
}

279 280 281 282 283
static void zfcp_fc_complete(void *data)
{
	complete(data);
}

284 285 286 287 288 289 290 291 292
static void zfcp_fc_ct_ns_init(struct fc_ct_hdr *ct_hdr, u16 cmd, u16 mr_size)
{
	ct_hdr->ct_rev = FC_CT_REV;
	ct_hdr->ct_fs_type = FC_FST_DIR;
	ct_hdr->ct_fs_subtype = FC_NS_SUBTYPE;
	ct_hdr->ct_cmd = cmd;
	ct_hdr->ct_mr_size = mr_size / 4;
}

293
static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port,
294
				     struct zfcp_fc_req *fc_req)
295
{
296
	struct zfcp_adapter *adapter = port->adapter;
297
	DECLARE_COMPLETION_ONSTACK(completion);
298 299
	struct zfcp_fc_gid_pn_req *gid_pn_req = &fc_req->u.gid_pn.req;
	struct zfcp_fc_gid_pn_rsp *gid_pn_rsp = &fc_req->u.gid_pn.rsp;
300
	int ret;
301 302

	/* setup parameters for send generic command */
303 304 305 306 307 308 309 310 311 312 313 314 315
	fc_req->ct_els.port = port;
	fc_req->ct_els.handler = zfcp_fc_complete;
	fc_req->ct_els.handler_data = &completion;
	fc_req->ct_els.req = &fc_req->sg_req;
	fc_req->ct_els.resp = &fc_req->sg_rsp;
	sg_init_one(&fc_req->sg_req, gid_pn_req, sizeof(*gid_pn_req));
	sg_init_one(&fc_req->sg_rsp, gid_pn_rsp, sizeof(*gid_pn_rsp));

	zfcp_fc_ct_ns_init(&gid_pn_req->ct_hdr,
			   FC_NS_GID_PN, ZFCP_FC_CT_SIZE_PAGE);
	gid_pn_req->gid_pn.fn_wwpn = port->wwpn;

	ret = zfcp_fsf_send_ct(&adapter->gs->ds, &fc_req->ct_els,
316 317
			       adapter->pool.gid_pn_req,
			       ZFCP_FC_CTELS_TMO);
318 319
	if (!ret) {
		wait_for_completion(&completion);
320
		zfcp_fc_ns_gid_pn_eval(fc_req);
321
	}
322 323 324 325
	return ret;
}

/**
326
 * zfcp_fc_ns_gid_pn - initiate GID_PN nameserver request
327
 * @port: port where GID_PN request is needed
328 329
 * return: -ENOMEM on error, 0 otherwise
 */
330
static int zfcp_fc_ns_gid_pn(struct zfcp_port *port)
331 332
{
	int ret;
333
	struct zfcp_fc_req *fc_req;
334
	struct zfcp_adapter *adapter = port->adapter;
335

336 337
	fc_req = mempool_alloc(adapter->pool.gid_pn, GFP_ATOMIC);
	if (!fc_req)
338 339
		return -ENOMEM;

340
	memset(fc_req, 0, sizeof(*fc_req));
341

342
	ret = zfcp_fc_wka_port_get(&adapter->gs->ds);
343
	if (ret)
344 345
		goto out;

346
	ret = zfcp_fc_ns_gid_pn_request(port, fc_req);
347

348
	zfcp_fc_wka_port_put(&adapter->gs->ds);
349
out:
350
	mempool_free(fc_req, adapter->pool.gid_pn);
351 352 353
	return ret;
}

354 355 356 357 358 359 360 361 362
void zfcp_fc_port_did_lookup(struct work_struct *work)
{
	int ret;
	struct zfcp_port *port = container_of(work, struct zfcp_port,
					      gid_pn_work);

	ret = zfcp_fc_ns_gid_pn(port);
	if (ret) {
		/* could not issue gid_pn for some reason */
363
		zfcp_erp_adapter_reopen(port->adapter, 0, "fcgpn_1");
364 365 366 367
		goto out;
	}

	if (!port->d_id) {
368
		zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED);
369 370 371
		goto out;
	}

372
	zfcp_erp_port_reopen(port, 0, "fcgpn_3");
373
out:
374
	put_device(&port->dev);
375 376
}

377 378 379 380 381 382
/**
 * zfcp_fc_trigger_did_lookup - trigger the d_id lookup using a GID_PN request
 * @port: The zfcp_port to lookup the d_id for.
 */
void zfcp_fc_trigger_did_lookup(struct zfcp_port *port)
{
383
	get_device(&port->dev);
384
	if (!queue_work(port->adapter->work_queue, &port->gid_pn_work))
385
		put_device(&port->dev);
386 387
}

388 389 390 391 392 393 394
/**
 * zfcp_fc_plogi_evaluate - evaluate PLOGI playload
 * @port: zfcp_port structure
 * @plogi: plogi payload
 *
 * Evaluate PLOGI playload and copy important fields into zfcp_port structure
 */
395
void zfcp_fc_plogi_evaluate(struct zfcp_port *port, struct fc_els_flogi *plogi)
396
{
397 398 399 400 401 402 403 404 405 406 407 408 409 410
	if (plogi->fl_wwpn != port->wwpn) {
		port->d_id = 0;
		dev_warn(&port->adapter->ccw_device->dev,
			 "A port opened with WWPN 0x%016Lx returned data that "
			 "identifies it as WWPN 0x%016Lx\n",
			 (unsigned long long) port->wwpn,
			 (unsigned long long) plogi->fl_wwpn);
		return;
	}

	port->wwnn = plogi->fl_wwnn;
	port->maxframe_size = plogi->fl_csp.sp_bb_data;

	if (plogi->fl_cssp[0].cp_class & FC_CPC_VALID)
411
		port->supported_classes |= FC_COS_CLASS1;
412
	if (plogi->fl_cssp[1].cp_class & FC_CPC_VALID)
413
		port->supported_classes |= FC_COS_CLASS2;
414
	if (plogi->fl_cssp[2].cp_class & FC_CPC_VALID)
415
		port->supported_classes |= FC_COS_CLASS3;
416
	if (plogi->fl_cssp[3].cp_class & FC_CPC_VALID)
417 418 419
		port->supported_classes |= FC_COS_CLASS4;
}

420
static void zfcp_fc_adisc_handler(void *data)
421
{
422 423 424
	struct zfcp_fc_req *fc_req = data;
	struct zfcp_port *port = fc_req->ct_els.port;
	struct fc_els_adisc *adisc_resp = &fc_req->u.adisc.rsp;
425

426
	if (fc_req->ct_els.status) {
427
		/* request rejected or timed out */
428
		zfcp_erp_port_forced_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
429
					    "fcadh_1");
430 431 432 433
		goto out;
	}

	if (!port->wwnn)
434
		port->wwnn = adisc_resp->adisc_wwnn;
435

436
	if ((port->wwpn != adisc_resp->adisc_wwpn) ||
437
	    !(atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN)) {
438
		zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
439
				     "fcadh_2");
440 441
		goto out;
	}
442

443 444
	/* port is good, unblock rport without going through erp */
	zfcp_scsi_schedule_rport_register(port);
445
 out:
446
	atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
447
	put_device(&port->dev);
448
	kmem_cache_free(zfcp_fc_req_cache, fc_req);
449 450 451 452
}

static int zfcp_fc_adisc(struct zfcp_port *port)
{
453
	struct zfcp_fc_req *fc_req;
454
	struct zfcp_adapter *adapter = port->adapter;
455
	struct Scsi_Host *shost = adapter->scsi_host;
456
	int ret;
457

458 459
	fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_ATOMIC);
	if (!fc_req)
460 461
		return -ENOMEM;

462 463 464 465
	fc_req->ct_els.port = port;
	fc_req->ct_els.req = &fc_req->sg_req;
	fc_req->ct_els.resp = &fc_req->sg_rsp;
	sg_init_one(&fc_req->sg_req, &fc_req->u.adisc.req,
466
		    sizeof(struct fc_els_adisc));
467
	sg_init_one(&fc_req->sg_rsp, &fc_req->u.adisc.rsp,
468
		    sizeof(struct fc_els_adisc));
469

470 471
	fc_req->ct_els.handler = zfcp_fc_adisc_handler;
	fc_req->ct_els.handler_data = fc_req;
472 473 474

	/* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports
	   without FC-AL-2 capability, so we don't set it */
475 476 477 478
	fc_req->u.adisc.req.adisc_wwpn = fc_host_port_name(shost);
	fc_req->u.adisc.req.adisc_wwnn = fc_host_node_name(shost);
	fc_req->u.adisc.req.adisc_cmd = ELS_ADISC;
	hton24(fc_req->u.adisc.req.adisc_port_id, fc_host_port_id(shost));
479

480
	ret = zfcp_fsf_send_els(adapter, port->d_id, &fc_req->ct_els,
481
				ZFCP_FC_CTELS_TMO);
482
	if (ret)
483
		kmem_cache_free(zfcp_fc_req_cache, fc_req);
484 485

	return ret;
486 487
}

488
void zfcp_fc_link_test_work(struct work_struct *work)
489
{
490 491
	struct zfcp_port *port =
		container_of(work, struct zfcp_port, test_link_work);
492 493
	int retval;

494
	get_device(&port->dev);
495 496 497
	port->rport_task = RPORT_DEL;
	zfcp_scsi_rport_work(&port->rport_work);

498 499 500 501 502 503
	/* only issue one test command at one time per port */
	if (atomic_read(&port->status) & ZFCP_STATUS_PORT_LINK_TEST)
		goto out;

	atomic_set_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);

504
	retval = zfcp_fc_adisc(port);
505
	if (retval == 0)
506 507 508
		return;

	/* send of ADISC was not possible */
509
	atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
510
	zfcp_erp_port_forced_reopen(port, 0, "fcltwk1");
511

512
out:
513
	put_device(&port->dev);
514
}
515

516
/**
517
 * zfcp_fc_test_link - lightweight link test procedure
518 519 520 521 522 523
 * @port: port to be tested
 *
 * Test status of a link to a remote port using the ELS command ADISC.
 * If there is a problem with the remote port, error recovery steps
 * will be triggered.
 */
524
void zfcp_fc_test_link(struct zfcp_port *port)
525
{
526
	get_device(&port->dev);
527
	if (!queue_work(port->adapter->work_queue, &port->test_link_work))
528
		put_device(&port->dev);
529 530
}

531
static void zfcp_free_sg_env(struct zfcp_fc_gpn_ft *gpn_ft, int buf_num)
532 533 534
{
	struct scatterlist *sg = &gpn_ft->sg_req;

535
	kmem_cache_free(zfcp_data.gpn_ft_cache, sg_virt(sg));
536
	zfcp_sg_free_table(gpn_ft->sg_resp, buf_num);
537 538 539 540

	kfree(gpn_ft);
}

541
static struct zfcp_fc_gpn_ft *zfcp_alloc_sg_env(int buf_num)
542
{
543 544
	struct zfcp_fc_gpn_ft *gpn_ft;
	struct zfcp_fc_gpn_ft_req *req;
545 546 547 548 549

	gpn_ft = kzalloc(sizeof(*gpn_ft), GFP_KERNEL);
	if (!gpn_ft)
		return NULL;

550
	req = kmem_cache_zalloc(zfcp_data.gpn_ft_cache, GFP_KERNEL);
551 552 553 554 555 556 557
	if (!req) {
		kfree(gpn_ft);
		gpn_ft = NULL;
		goto out;
	}
	sg_init_one(&gpn_ft->sg_req, req, sizeof(*req));

558 559
	if (zfcp_sg_setup_table(gpn_ft->sg_resp, buf_num)) {
		zfcp_free_sg_env(gpn_ft, buf_num);
560 561 562 563 564 565 566
		gpn_ft = NULL;
	}
out:
	return gpn_ft;
}


567
static int zfcp_fc_send_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
568
			       struct zfcp_adapter *adapter, int max_bytes)
569
{
570
	struct zfcp_fsf_ct_els *ct = &gpn_ft->ct;
571
	struct zfcp_fc_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req);
572
	DECLARE_COMPLETION_ONSTACK(completion);
573 574 575
	int ret;

	/* prepare CT IU for GPN_FT */
576 577 578 579 580 581 582 583 584
	req->ct_hdr.ct_rev = FC_CT_REV;
	req->ct_hdr.ct_fs_type = FC_FST_DIR;
	req->ct_hdr.ct_fs_subtype = FC_NS_SUBTYPE;
	req->ct_hdr.ct_options = 0;
	req->ct_hdr.ct_cmd = FC_NS_GPN_FT;
	req->ct_hdr.ct_mr_size = max_bytes / 4;
	req->gpn_ft.fn_domain_id_scope = 0;
	req->gpn_ft.fn_area_id_scope = 0;
	req->gpn_ft.fn_fc4_type = FC_TYPE_FCP;
585 586

	/* prepare zfcp_send_ct */
587 588
	ct->handler = zfcp_fc_complete;
	ct->handler_data = &completion;
589 590 591
	ct->req = &gpn_ft->sg_req;
	ct->resp = gpn_ft->sg_resp;

592 593
	ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct, NULL,
			       ZFCP_FC_CTELS_TMO);
594
	if (!ret)
595
		wait_for_completion(&completion);
596 597 598
	return ret;
}

599
static void zfcp_fc_validate_port(struct zfcp_port *port, struct list_head *lh)
600
{
601 602 603
	if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC))
		return;

604 605
	atomic_clear_mask(ZFCP_STATUS_COMMON_NOESC, &port->status);

606
	if ((port->supported_classes != 0) ||
607
	    !list_empty(&port->unit_list))
608
		return;
609 610

	list_move_tail(&port->list, lh);
611 612
}

613 614
static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
			       struct zfcp_adapter *adapter, int max_entries)
615
{
616
	struct zfcp_fsf_ct_els *ct = &gpn_ft->ct;
617
	struct scatterlist *sg = gpn_ft->sg_resp;
618 619
	struct fc_ct_hdr *hdr = sg_virt(sg);
	struct fc_gpn_ft_resp *acc = sg_virt(sg);
620
	struct zfcp_port *port, *tmp;
621
	unsigned long flags;
622
	LIST_HEAD(remove_lh);
623
	u32 d_id;
624
	int ret = 0, x, last = 0;
625 626 627 628

	if (ct->status)
		return -EIO;

629 630
	if (hdr->ct_cmd != FC_FS_ACC) {
		if (hdr->ct_reason == FC_BA_RJT_UNABLE)
631 632 633 634
			return -EAGAIN; /* might be a temporary condition */
		return -EIO;
	}

635
	if (hdr->ct_mr_size) {
636 637
		dev_warn(&adapter->ccw_device->dev,
			 "The name server reported %d words residual data\n",
638
			 hdr->ct_mr_size);
639
		return -E2BIG;
640
	}
641 642

	/* first entry is the header */
643
	for (x = 1; x < max_entries && !last; x++) {
644
		if (x % (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
645 646 647 648
			acc++;
		else
			acc = sg_virt(++sg);

649 650
		last = acc->fp_flags & FC_NS_FID_LAST;
		d_id = ntoh24(acc->fp_fid);
651

652
		/* don't attach ports with a well known address */
653
		if (d_id >= FC_FID_WELL_KNOWN_BASE)
654
			continue;
655
		/* skip the adapter's port and known remote ports */
656
		if (acc->fp_wwpn == fc_host_port_name(adapter->scsi_host))
657 658
			continue;

659
		port = zfcp_port_enqueue(adapter, acc->fp_wwpn,
660
					 ZFCP_STATUS_COMMON_NOESC, d_id);
661
		if (!IS_ERR(port))
662
			zfcp_erp_port_reopen(port, 0, "fcegpf1");
663 664
		else if (PTR_ERR(port) != -EEXIST)
			ret = PTR_ERR(port);
665 666 667
	}

	zfcp_erp_wait(adapter);
668 669
	write_lock_irqsave(&adapter->port_list_lock, flags);
	list_for_each_entry_safe(port, tmp, &adapter->port_list, list)
670
		zfcp_fc_validate_port(port, &remove_lh);
671
	write_unlock_irqrestore(&adapter->port_list_lock, flags);
672 673

	list_for_each_entry_safe(port, tmp, &remove_lh, list) {
674
		zfcp_erp_port_shutdown(port, 0, "fcegpf2");
675
		zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs);
676 677
	}

678 679 680 681
	return ret;
}

/**
682
 * zfcp_fc_scan_ports - scan remote ports and attach new ports
683
 * @work: reference to scheduled work
684
 */
685
void zfcp_fc_scan_ports(struct work_struct *work)
686
{
687 688
	struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter,
						    scan_work);
689
	int ret, i;
690
	struct zfcp_fc_gpn_ft *gpn_ft;
691 692 693
	int chain, max_entries, buf_num, max_bytes;

	chain = adapter->adapter_features & FSF_FEATURE_ELS_CT_CHAINED_SBALS;
694 695 696
	buf_num = chain ? ZFCP_FC_GPN_FT_NUM_BUFS : 1;
	max_entries = chain ? ZFCP_FC_GPN_FT_MAX_ENT : ZFCP_FC_GPN_FT_ENT_PAGE;
	max_bytes = chain ? ZFCP_FC_GPN_FT_MAX_SIZE : ZFCP_FC_CT_SIZE_PAGE;
697

698 699
	if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT &&
	    fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
700
		return;
701

702 703
	if (zfcp_fc_wka_port_get(&adapter->gs->ds))
		return;
704

705
	gpn_ft = zfcp_alloc_sg_env(buf_num);
706
	if (!gpn_ft)
707
		goto out;
708 709

	for (i = 0; i < 3; i++) {
710
		ret = zfcp_fc_send_gpn_ft(gpn_ft, adapter, max_bytes);
711
		if (!ret) {
712
			ret = zfcp_fc_eval_gpn_ft(gpn_ft, adapter, max_entries);
713 714 715 716 717 718
			if (ret == -EAGAIN)
				ssleep(1);
			else
				break;
		}
	}
719
	zfcp_free_sg_env(gpn_ft, buf_num);
720
out:
721
	zfcp_fc_wka_port_put(&adapter->gs->ds);
722 723
}

724
static void zfcp_fc_ct_els_job_handler(void *data)
725
{
726 727
	struct fc_bsg_job *job = data;
	struct zfcp_fsf_ct_els *zfcp_ct_els = job->dd_data;
728
	struct fc_bsg_reply *jr = job->reply;
729

730 731 732
	jr->reply_payload_rcv_len = job->reply_payload.payload_len;
	jr->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
	jr->result = zfcp_ct_els->status ? -EIO : 0;
733 734 735
	job->job_done(job);
}

736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773
static struct zfcp_fc_wka_port *zfcp_fc_job_wka_port(struct fc_bsg_job *job)
{
	u32 preamble_word1;
	u8 gs_type;
	struct zfcp_adapter *adapter;

	preamble_word1 = job->request->rqst_data.r_ct.preamble_word1;
	gs_type = (preamble_word1 & 0xff000000) >> 24;

	adapter = (struct zfcp_adapter *) job->shost->hostdata[0];

	switch (gs_type) {
	case FC_FST_ALIAS:
		return &adapter->gs->as;
	case FC_FST_MGMT:
		return &adapter->gs->ms;
	case FC_FST_TIME:
		return &adapter->gs->ts;
		break;
	case FC_FST_DIR:
		return &adapter->gs->ds;
		break;
	default:
		return NULL;
	}
}

static void zfcp_fc_ct_job_handler(void *data)
{
	struct fc_bsg_job *job = data;
	struct zfcp_fc_wka_port *wka_port;

	wka_port = zfcp_fc_job_wka_port(job);
	zfcp_fc_wka_port_put(wka_port);

	zfcp_fc_ct_els_job_handler(data);
}

774 775
static int zfcp_fc_exec_els_job(struct fc_bsg_job *job,
				struct zfcp_adapter *adapter)
776
{
777
	struct zfcp_fsf_ct_els *els = job->dd_data;
778 779
	struct fc_rport *rport = job->rport;
	struct zfcp_port *port;
780
	u32 d_id;
781 782

	if (rport) {
783
		port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
784
		if (!port)
785
			return -EINVAL;
786

787
		d_id = port->d_id;
788
		put_device(&port->dev);
789 790
	} else
		d_id = ntoh24(job->request->rqst_data.h_els.port_id);
791

792
	els->handler = zfcp_fc_ct_els_job_handler;
793
	return zfcp_fsf_send_els(adapter, d_id, els, job->req->timeout / HZ);
794 795
}

796 797
static int zfcp_fc_exec_ct_job(struct fc_bsg_job *job,
			       struct zfcp_adapter *adapter)
798 799
{
	int ret;
800 801
	struct zfcp_fsf_ct_els *ct = job->dd_data;
	struct zfcp_fc_wka_port *wka_port;
802

803 804 805
	wka_port = zfcp_fc_job_wka_port(job);
	if (!wka_port)
		return -EINVAL;
806

807 808
	ret = zfcp_fc_wka_port_get(wka_port);
	if (ret)
809 810
		return ret;

811
	ct->handler = zfcp_fc_ct_job_handler;
812
	ret = zfcp_fsf_send_ct(wka_port, ct, NULL, job->req->timeout / HZ);
813 814
	if (ret)
		zfcp_fc_wka_port_put(wka_port);
815 816 817

	return ret;
}
818

819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846
int zfcp_fc_exec_bsg_job(struct fc_bsg_job *job)
{
	struct Scsi_Host *shost;
	struct zfcp_adapter *adapter;
	struct zfcp_fsf_ct_els *ct_els = job->dd_data;

	shost = job->rport ? rport_to_shost(job->rport) : job->shost;
	adapter = (struct zfcp_adapter *)shost->hostdata[0];

	if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN))
		return -EINVAL;

	ct_els->req = job->request_payload.sg_list;
	ct_els->resp = job->reply_payload.sg_list;
	ct_els->handler_data = job;

	switch (job->request->msgcode) {
	case FC_BSG_RPT_ELS:
	case FC_BSG_HST_ELS_NOLOGIN:
		return zfcp_fc_exec_els_job(job, adapter);
	case FC_BSG_RPT_CT:
	case FC_BSG_HST_CT:
		return zfcp_fc_exec_ct_job(job, adapter);
	default:
		return -EINVAL;
	}
}

847 848 849 850 851 852
int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *job)
{
	/* hardware tracks timeout, reset bsg timeout to not interfere */
	return -EAGAIN;
}

853 854
int zfcp_fc_gs_setup(struct zfcp_adapter *adapter)
{
855
	struct zfcp_fc_wka_ports *wka_ports;
856

857
	wka_ports = kzalloc(sizeof(struct zfcp_fc_wka_ports), GFP_KERNEL);
858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875
	if (!wka_ports)
		return -ENOMEM;

	adapter->gs = wka_ports;
	zfcp_fc_wka_port_init(&wka_ports->ms, FC_FID_MGMT_SERV, adapter);
	zfcp_fc_wka_port_init(&wka_ports->ts, FC_FID_TIME_SERV, adapter);
	zfcp_fc_wka_port_init(&wka_ports->ds, FC_FID_DIR_SERV, adapter);
	zfcp_fc_wka_port_init(&wka_ports->as, FC_FID_ALIASES, adapter);

	return 0;
}

void zfcp_fc_gs_destroy(struct zfcp_adapter *adapter)
{
	kfree(adapter->gs);
	adapter->gs = NULL;
}