zfcp_fc.c 23.1 KB
Newer Older
1 2 3 4 5
/*
 * zfcp device driver
 *
 * Fibre Channel related functions for the zfcp device driver.
 *
6
 * Copyright IBM Corporation 2008, 2010
7 8
 */

9 10 11
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

12
#include <linux/types.h>
13
#include <linux/slab.h>
14 15
#include <scsi/fc/fc_els.h>
#include <scsi/libfc.h>
16
#include "zfcp_ext.h"
17
#include "zfcp_fc.h"
18

19 20 21 22 23
static u32 zfcp_fc_rscn_range_mask[] = {
	[ELS_ADDR_FMT_PORT]		= 0xFFFFFF,
	[ELS_ADDR_FMT_AREA]		= 0xFFFF00,
	[ELS_ADDR_FMT_DOM]		= 0xFF0000,
	[ELS_ADDR_FMT_FAB]		= 0x000000,
24 25
};

26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
/**
 * zfcp_fc_post_event - post event to userspace via fc_transport
 * @work: work struct with enqueued events
 */
void zfcp_fc_post_event(struct work_struct *work)
{
	struct zfcp_fc_event *event = NULL, *tmp = NULL;
	LIST_HEAD(tmp_lh);
	struct zfcp_fc_events *events = container_of(work,
					struct zfcp_fc_events, work);
	struct zfcp_adapter *adapter = container_of(events, struct zfcp_adapter,
						events);

	spin_lock_bh(&events->list_lock);
	list_splice_init(&events->list, &tmp_lh);
	spin_unlock_bh(&events->list_lock);

	list_for_each_entry_safe(event, tmp, &tmp_lh, list) {
		fc_host_post_event(adapter->scsi_host, fc_get_event_number(),
				event->code, event->data);
		list_del(&event->list);
		kfree(event);
	}

}

/**
 * zfcp_fc_enqueue_event - safely enqueue FC HBA API event from irq context
 * @adapter: The adapter where to enqueue the event
 * @event_code: The event code (as defined in fc_host_event_code in
 *		scsi_transport_fc.h)
 * @event_data: The event data (e.g. n_port page in case of els)
 */
void zfcp_fc_enqueue_event(struct zfcp_adapter *adapter,
			enum fc_host_event_code event_code, u32 event_data)
{
	struct zfcp_fc_event *event;

	event = kmalloc(sizeof(struct zfcp_fc_event), GFP_ATOMIC);
	if (!event)
		return;

	event->code = event_code;
	event->data = event_data;

	spin_lock(&adapter->events.list_lock);
	list_add_tail(&event->list, &adapter->events.list);
	spin_unlock(&adapter->events.list_lock);

	queue_work(adapter->work_queue, &adapter->events.work);
}

78
static int zfcp_fc_wka_port_get(struct zfcp_fc_wka_port *wka_port)
79 80 81 82
{
	if (mutex_lock_interruptible(&wka_port->mutex))
		return -ERESTARTSYS;

83 84 85
	if (wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE ||
	    wka_port->status == ZFCP_FC_WKA_PORT_CLOSING) {
		wka_port->status = ZFCP_FC_WKA_PORT_OPENING;
86
		if (zfcp_fsf_open_wka_port(wka_port))
87
			wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
88 89 90 91
	}

	mutex_unlock(&wka_port->mutex);

92
	wait_event(wka_port->completion_wq,
93 94
		   wka_port->status == ZFCP_FC_WKA_PORT_ONLINE ||
		   wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE);
95

96
	if (wka_port->status == ZFCP_FC_WKA_PORT_ONLINE) {
97 98 99 100 101 102
		atomic_inc(&wka_port->refcount);
		return 0;
	}
	return -EIO;
}

103
static void zfcp_fc_wka_port_offline(struct work_struct *work)
104
{
105
	struct delayed_work *dw = to_delayed_work(work);
106 107
	struct zfcp_fc_wka_port *wka_port =
			container_of(dw, struct zfcp_fc_wka_port, work);
108 109 110

	mutex_lock(&wka_port->mutex);
	if ((atomic_read(&wka_port->refcount) != 0) ||
111
	    (wka_port->status != ZFCP_FC_WKA_PORT_ONLINE))
112 113
		goto out;

114
	wka_port->status = ZFCP_FC_WKA_PORT_CLOSING;
115
	if (zfcp_fsf_close_wka_port(wka_port)) {
116
		wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
117 118 119 120 121 122
		wake_up(&wka_port->completion_wq);
	}
out:
	mutex_unlock(&wka_port->mutex);
}

123
static void zfcp_fc_wka_port_put(struct zfcp_fc_wka_port *wka_port)
124 125 126
{
	if (atomic_dec_return(&wka_port->refcount) != 0)
		return;
127
	/* wait 10 milliseconds, other reqs might pop in */
128 129 130
	schedule_delayed_work(&wka_port->work, HZ / 100);
}

131
static void zfcp_fc_wka_port_init(struct zfcp_fc_wka_port *wka_port, u32 d_id,
132
				  struct zfcp_adapter *adapter)
133 134 135 136
{
	init_waitqueue_head(&wka_port->completion_wq);

	wka_port->adapter = adapter;
137
	wka_port->d_id = d_id;
138

139
	wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
140 141
	atomic_set(&wka_port->refcount, 0);
	mutex_init(&wka_port->mutex);
142
	INIT_DELAYED_WORK(&wka_port->work, zfcp_fc_wka_port_offline);
143 144
}

145
static void zfcp_fc_wka_port_force_offline(struct zfcp_fc_wka_port *wka)
146 147 148
{
	cancel_delayed_work_sync(&wka->work);
	mutex_lock(&wka->mutex);
149
	wka->status = ZFCP_FC_WKA_PORT_OFFLINE;
150 151 152
	mutex_unlock(&wka->mutex);
}

153
void zfcp_fc_wka_ports_force_offline(struct zfcp_fc_wka_ports *gs)
154
{
155 156
	if (!gs)
		return;
157 158 159 160 161 162
	zfcp_fc_wka_port_force_offline(&gs->ms);
	zfcp_fc_wka_port_force_offline(&gs->ts);
	zfcp_fc_wka_port_force_offline(&gs->ds);
	zfcp_fc_wka_port_force_offline(&gs->as);
}

163
static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
164
				   struct fc_els_rscn_page *page)
165 166
{
	unsigned long flags;
167
	struct zfcp_adapter *adapter = fsf_req->adapter;
168 169
	struct zfcp_port *port;

170 171
	read_lock_irqsave(&adapter->port_list_lock, flags);
	list_for_each_entry(port, &adapter->port_list, list) {
172
		if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range))
173
			zfcp_fc_test_link(port);
174 175 176 177 178
		if (!port->d_id)
			zfcp_erp_port_reopen(port,
					     ZFCP_STATUS_COMMON_ERP_FAILED,
					     "fcrscn1", NULL);
	}
179
	read_unlock_irqrestore(&adapter->port_list_lock, flags);
180 181 182 183 184
}

static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
{
	struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data;
185 186
	struct fc_els_rscn *head;
	struct fc_els_rscn_page *page;
187 188
	u16 i;
	u16 no_entries;
189
	unsigned int afmt;
190

191 192
	head = (struct fc_els_rscn *) status_buffer->payload.data;
	page = (struct fc_els_rscn_page *) head;
193 194

	/* see FC-FS */
195
	no_entries = head->rscn_plen / sizeof(struct fc_els_rscn_page);
196 197 198

	for (i = 1; i < no_entries; i++) {
		/* skip head and start with 1st element */
199 200 201 202
		page++;
		afmt = page->rscn_page_flags & ELS_RSCN_ADDR_FMT_MASK;
		_zfcp_fc_incoming_rscn(fsf_req, zfcp_fc_rscn_range_mask[afmt],
				       page);
203 204
		zfcp_fc_enqueue_event(fsf_req->adapter, FCH_EVT_RSCN,
				      *(u32 *)page);
205
	}
206
	queue_work(fsf_req->adapter->work_queue, &fsf_req->adapter->scan_work);
207 208
}

209
static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn)
210
{
211
	unsigned long flags;
212 213 214
	struct zfcp_adapter *adapter = req->adapter;
	struct zfcp_port *port;

215 216 217 218
	read_lock_irqsave(&adapter->port_list_lock, flags);
	list_for_each_entry(port, &adapter->port_list, list)
		if (port->wwpn == wwpn) {
			zfcp_erp_port_forced_reopen(port, 0, "fciwwp1", req);
219
			break;
220 221
		}
	read_unlock_irqrestore(&adapter->port_list_lock, flags);
222 223 224 225
}

static void zfcp_fc_incoming_plogi(struct zfcp_fsf_req *req)
{
226 227
	struct fsf_status_read_buffer *status_buffer;
	struct fc_els_flogi *plogi;
228

229 230 231
	status_buffer = (struct fsf_status_read_buffer *) req->data;
	plogi = (struct fc_els_flogi *) status_buffer->payload.data;
	zfcp_fc_incoming_wwpn(req, plogi->fl_wwpn);
232 233 234 235 236 237
}

static void zfcp_fc_incoming_logo(struct zfcp_fsf_req *req)
{
	struct fsf_status_read_buffer *status_buffer =
		(struct fsf_status_read_buffer *)req->data;
238 239
	struct fc_els_logo *logo =
		(struct fc_els_logo *) status_buffer->payload.data;
240

241
	zfcp_fc_incoming_wwpn(req, logo->fl_n_port_wwn);
242 243 244 245 246 247 248 249 250 251
}

/**
 * zfcp_fc_incoming_els - handle incoming ELS
 * @fsf_req - request which contains incoming ELS
 */
void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req)
{
	struct fsf_status_read_buffer *status_buffer =
		(struct fsf_status_read_buffer *) fsf_req->data;
S
Swen Schillig 已提交
252
	unsigned int els_type = status_buffer->payload.data[0];
253

S
Swen Schillig 已提交
254
	zfcp_dbf_san_incoming_els(fsf_req);
255
	if (els_type == ELS_PLOGI)
256
		zfcp_fc_incoming_plogi(fsf_req);
257
	else if (els_type == ELS_LOGO)
258
		zfcp_fc_incoming_logo(fsf_req);
259
	else if (els_type == ELS_RSCN)
260 261 262
		zfcp_fc_incoming_rscn(fsf_req);
}

263
static void zfcp_fc_ns_gid_pn_eval(void *data)
264
{
265 266
	struct zfcp_fc_gid_pn *gid_pn = data;
	struct zfcp_fsf_ct_els *ct = &gid_pn->ct;
267 268
	struct zfcp_fc_gid_pn_req *gid_pn_req = sg_virt(ct->req);
	struct zfcp_fc_gid_pn_resp *gid_pn_resp = sg_virt(ct->resp);
269 270 271
	struct zfcp_port *port = gid_pn->port;

	if (ct->status)
272
		return;
273
	if (gid_pn_resp->ct_hdr.ct_cmd != FC_FS_ACC)
274
		return;
275

276
	/* paranoia */
277
	if (gid_pn_req->gid_pn.fn_wwpn != port->wwpn)
278
		return;
279
	/* looks like a valid d_id */
280
	port->d_id = ntoh24(gid_pn_resp->gid_pn.fp_fid);
281 282
}

283 284 285 286 287
static void zfcp_fc_complete(void *data)
{
	complete(data);
}

288
static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port,
289
				     struct zfcp_fc_gid_pn *gid_pn)
290
{
291
	struct zfcp_adapter *adapter = port->adapter;
292
	DECLARE_COMPLETION_ONSTACK(completion);
293
	int ret;
294 295

	/* setup parameters for send generic command */
296
	gid_pn->port = port;
297 298
	gid_pn->ct.handler = zfcp_fc_complete;
	gid_pn->ct.handler_data = &completion;
299 300 301 302 303 304
	gid_pn->ct.req = &gid_pn->sg_req;
	gid_pn->ct.resp = &gid_pn->sg_resp;
	sg_init_one(&gid_pn->sg_req, &gid_pn->gid_pn_req,
		    sizeof(struct zfcp_fc_gid_pn_req));
	sg_init_one(&gid_pn->sg_resp, &gid_pn->gid_pn_resp,
		    sizeof(struct zfcp_fc_gid_pn_resp));
305 306

	/* setup nameserver request */
307 308 309 310 311 312 313
	gid_pn->gid_pn_req.ct_hdr.ct_rev = FC_CT_REV;
	gid_pn->gid_pn_req.ct_hdr.ct_fs_type = FC_FST_DIR;
	gid_pn->gid_pn_req.ct_hdr.ct_fs_subtype = FC_NS_SUBTYPE;
	gid_pn->gid_pn_req.ct_hdr.ct_options = 0;
	gid_pn->gid_pn_req.ct_hdr.ct_cmd = FC_NS_GID_PN;
	gid_pn->gid_pn_req.ct_hdr.ct_mr_size = ZFCP_FC_CT_SIZE_PAGE / 4;
	gid_pn->gid_pn_req.gid_pn.fn_wwpn = port->wwpn;
314

315
	ret = zfcp_fsf_send_ct(&adapter->gs->ds, &gid_pn->ct,
316 317
			       adapter->pool.gid_pn_req,
			       ZFCP_FC_CTELS_TMO);
318 319 320 321
	if (!ret) {
		wait_for_completion(&completion);
		zfcp_fc_ns_gid_pn_eval(gid_pn);
	}
322 323 324 325 326
	return ret;
}

/**
 * zfcp_fc_ns_gid_pn_request - initiate GID_PN nameserver request
327
 * @port: port where GID_PN request is needed
328 329
 * return: -ENOMEM on error, 0 otherwise
 */
330
static int zfcp_fc_ns_gid_pn(struct zfcp_port *port)
331 332
{
	int ret;
333
	struct zfcp_fc_gid_pn *gid_pn;
334
	struct zfcp_adapter *adapter = port->adapter;
335

336
	gid_pn = mempool_alloc(adapter->pool.gid_pn, GFP_ATOMIC);
337 338 339 340 341
	if (!gid_pn)
		return -ENOMEM;

	memset(gid_pn, 0, sizeof(*gid_pn));

342
	ret = zfcp_fc_wka_port_get(&adapter->gs->ds);
343
	if (ret)
344 345
		goto out;

346
	ret = zfcp_fc_ns_gid_pn_request(port, gid_pn);
347

348
	zfcp_fc_wka_port_put(&adapter->gs->ds);
349
out:
350
	mempool_free(gid_pn, adapter->pool.gid_pn);
351 352 353
	return ret;
}

354 355 356 357 358 359 360 361 362 363 364 365 366 367
void zfcp_fc_port_did_lookup(struct work_struct *work)
{
	int ret;
	struct zfcp_port *port = container_of(work, struct zfcp_port,
					      gid_pn_work);

	ret = zfcp_fc_ns_gid_pn(port);
	if (ret) {
		/* could not issue gid_pn for some reason */
		zfcp_erp_adapter_reopen(port->adapter, 0, "fcgpn_1", NULL);
		goto out;
	}

	if (!port->d_id) {
368
		zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED);
369 370 371 372 373
		goto out;
	}

	zfcp_erp_port_reopen(port, 0, "fcgpn_3", NULL);
out:
374
	put_device(&port->dev);
375 376
}

377 378 379 380 381 382
/**
 * zfcp_fc_trigger_did_lookup - trigger the d_id lookup using a GID_PN request
 * @port: The zfcp_port to lookup the d_id for.
 */
void zfcp_fc_trigger_did_lookup(struct zfcp_port *port)
{
383
	get_device(&port->dev);
384
	if (!queue_work(port->adapter->work_queue, &port->gid_pn_work))
385
		put_device(&port->dev);
386 387
}

388 389 390 391 392 393 394
/**
 * zfcp_fc_plogi_evaluate - evaluate PLOGI playload
 * @port: zfcp_port structure
 * @plogi: plogi payload
 *
 * Evaluate PLOGI playload and copy important fields into zfcp_port structure
 */
395
void zfcp_fc_plogi_evaluate(struct zfcp_port *port, struct fc_els_flogi *plogi)
396
{
397 398 399 400 401 402 403 404 405 406 407 408 409 410
	if (plogi->fl_wwpn != port->wwpn) {
		port->d_id = 0;
		dev_warn(&port->adapter->ccw_device->dev,
			 "A port opened with WWPN 0x%016Lx returned data that "
			 "identifies it as WWPN 0x%016Lx\n",
			 (unsigned long long) port->wwpn,
			 (unsigned long long) plogi->fl_wwpn);
		return;
	}

	port->wwnn = plogi->fl_wwnn;
	port->maxframe_size = plogi->fl_csp.sp_bb_data;

	if (plogi->fl_cssp[0].cp_class & FC_CPC_VALID)
411
		port->supported_classes |= FC_COS_CLASS1;
412
	if (plogi->fl_cssp[1].cp_class & FC_CPC_VALID)
413
		port->supported_classes |= FC_COS_CLASS2;
414
	if (plogi->fl_cssp[2].cp_class & FC_CPC_VALID)
415
		port->supported_classes |= FC_COS_CLASS3;
416
	if (plogi->fl_cssp[3].cp_class & FC_CPC_VALID)
417 418 419
		port->supported_classes |= FC_COS_CLASS4;
}

420
static void zfcp_fc_adisc_handler(void *data)
421
{
422
	struct zfcp_fc_els_adisc *adisc = data;
423
	struct zfcp_port *port = adisc->els.port;
424
	struct fc_els_adisc *adisc_resp = &adisc->adisc_resp;
425

426
	if (adisc->els.status) {
427
		/* request rejected or timed out */
428 429
		zfcp_erp_port_forced_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
					    "fcadh_1", NULL);
430 431 432 433
		goto out;
	}

	if (!port->wwnn)
434
		port->wwnn = adisc_resp->adisc_wwnn;
435

436
	if ((port->wwpn != adisc_resp->adisc_wwpn) ||
437
	    !(atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN)) {
438 439
		zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
				     "fcadh_2", NULL);
440 441
		goto out;
	}
442

443 444
	/* port is good, unblock rport without going through erp */
	zfcp_scsi_schedule_rport_register(port);
445
 out:
446
	atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
447
	put_device(&port->dev);
448
	kmem_cache_free(zfcp_data.adisc_cache, adisc);
449 450 451 452
}

static int zfcp_fc_adisc(struct zfcp_port *port)
{
453
	struct zfcp_fc_els_adisc *adisc;
454
	struct zfcp_adapter *adapter = port->adapter;
455
	int ret;
456

457
	adisc = kmem_cache_zalloc(zfcp_data.adisc_cache, GFP_ATOMIC);
458 459 460
	if (!adisc)
		return -ENOMEM;

461
	adisc->els.port = port;
462 463
	adisc->els.req = &adisc->req;
	adisc->els.resp = &adisc->resp;
464 465 466 467
	sg_init_one(adisc->els.req, &adisc->adisc_req,
		    sizeof(struct fc_els_adisc));
	sg_init_one(adisc->els.resp, &adisc->adisc_resp,
		    sizeof(struct fc_els_adisc));
468 469

	adisc->els.handler = zfcp_fc_adisc_handler;
470
	adisc->els.handler_data = adisc;
471 472 473

	/* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports
	   without FC-AL-2 capability, so we don't set it */
474 475
	adisc->adisc_req.adisc_wwpn = fc_host_port_name(adapter->scsi_host);
	adisc->adisc_req.adisc_wwnn = fc_host_node_name(adapter->scsi_host);
476
	adisc->adisc_req.adisc_cmd = ELS_ADISC;
477 478
	hton24(adisc->adisc_req.adisc_port_id,
	       fc_host_port_id(adapter->scsi_host));
479

480 481
	ret = zfcp_fsf_send_els(adapter, port->d_id, &adisc->els,
				ZFCP_FC_CTELS_TMO);
482 483 484 485
	if (ret)
		kmem_cache_free(zfcp_data.adisc_cache, adisc);

	return ret;
486 487
}

488
void zfcp_fc_link_test_work(struct work_struct *work)
489
{
490 491
	struct zfcp_port *port =
		container_of(work, struct zfcp_port, test_link_work);
492 493
	int retval;

494
	get_device(&port->dev);
495 496 497
	port->rport_task = RPORT_DEL;
	zfcp_scsi_rport_work(&port->rport_work);

498 499 500 501 502 503
	/* only issue one test command at one time per port */
	if (atomic_read(&port->status) & ZFCP_STATUS_PORT_LINK_TEST)
		goto out;

	atomic_set_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);

504
	retval = zfcp_fc_adisc(port);
505
	if (retval == 0)
506 507 508
		return;

	/* send of ADISC was not possible */
509
	atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
510 511
	zfcp_erp_port_forced_reopen(port, 0, "fcltwk1", NULL);

512
out:
513
	put_device(&port->dev);
514
}
515

516
/**
517
 * zfcp_fc_test_link - lightweight link test procedure
518 519 520 521 522 523
 * @port: port to be tested
 *
 * Test status of a link to a remote port using the ELS command ADISC.
 * If there is a problem with the remote port, error recovery steps
 * will be triggered.
 */
524
void zfcp_fc_test_link(struct zfcp_port *port)
525
{
526
	get_device(&port->dev);
527
	if (!queue_work(port->adapter->work_queue, &port->test_link_work))
528
		put_device(&port->dev);
529 530
}

531
static void zfcp_free_sg_env(struct zfcp_fc_gpn_ft *gpn_ft, int buf_num)
532 533 534
{
	struct scatterlist *sg = &gpn_ft->sg_req;

535
	kmem_cache_free(zfcp_data.gpn_ft_cache, sg_virt(sg));
536
	zfcp_sg_free_table(gpn_ft->sg_resp, buf_num);
537 538 539 540

	kfree(gpn_ft);
}

541
static struct zfcp_fc_gpn_ft *zfcp_alloc_sg_env(int buf_num)
542
{
543 544
	struct zfcp_fc_gpn_ft *gpn_ft;
	struct zfcp_fc_gpn_ft_req *req;
545 546 547 548 549

	gpn_ft = kzalloc(sizeof(*gpn_ft), GFP_KERNEL);
	if (!gpn_ft)
		return NULL;

550
	req = kmem_cache_zalloc(zfcp_data.gpn_ft_cache, GFP_KERNEL);
551 552 553 554 555 556 557
	if (!req) {
		kfree(gpn_ft);
		gpn_ft = NULL;
		goto out;
	}
	sg_init_one(&gpn_ft->sg_req, req, sizeof(*req));

558 559
	if (zfcp_sg_setup_table(gpn_ft->sg_resp, buf_num)) {
		zfcp_free_sg_env(gpn_ft, buf_num);
560 561 562 563 564 565 566
		gpn_ft = NULL;
	}
out:
	return gpn_ft;
}


567
static int zfcp_fc_send_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
568
			       struct zfcp_adapter *adapter, int max_bytes)
569
{
570
	struct zfcp_fsf_ct_els *ct = &gpn_ft->ct;
571
	struct zfcp_fc_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req);
572
	DECLARE_COMPLETION_ONSTACK(completion);
573 574 575
	int ret;

	/* prepare CT IU for GPN_FT */
576 577 578 579 580 581 582 583 584
	req->ct_hdr.ct_rev = FC_CT_REV;
	req->ct_hdr.ct_fs_type = FC_FST_DIR;
	req->ct_hdr.ct_fs_subtype = FC_NS_SUBTYPE;
	req->ct_hdr.ct_options = 0;
	req->ct_hdr.ct_cmd = FC_NS_GPN_FT;
	req->ct_hdr.ct_mr_size = max_bytes / 4;
	req->gpn_ft.fn_domain_id_scope = 0;
	req->gpn_ft.fn_area_id_scope = 0;
	req->gpn_ft.fn_fc4_type = FC_TYPE_FCP;
585 586

	/* prepare zfcp_send_ct */
587 588
	ct->handler = zfcp_fc_complete;
	ct->handler_data = &completion;
589 590 591
	ct->req = &gpn_ft->sg_req;
	ct->resp = gpn_ft->sg_resp;

592 593
	ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct, NULL,
			       ZFCP_FC_CTELS_TMO);
594
	if (!ret)
595
		wait_for_completion(&completion);
596 597 598
	return ret;
}

599
static void zfcp_fc_validate_port(struct zfcp_port *port, struct list_head *lh)
600
{
601 602 603
	if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC))
		return;

604 605
	atomic_clear_mask(ZFCP_STATUS_COMMON_NOESC, &port->status);

606
	if ((port->supported_classes != 0) ||
607
	    !list_empty(&port->unit_list))
608
		return;
609 610

	list_move_tail(&port->list, lh);
611 612
}

613 614
static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
			       struct zfcp_adapter *adapter, int max_entries)
615
{
616
	struct zfcp_fsf_ct_els *ct = &gpn_ft->ct;
617
	struct scatterlist *sg = gpn_ft->sg_resp;
618 619
	struct fc_ct_hdr *hdr = sg_virt(sg);
	struct fc_gpn_ft_resp *acc = sg_virt(sg);
620
	struct zfcp_port *port, *tmp;
621
	unsigned long flags;
622
	LIST_HEAD(remove_lh);
623
	u32 d_id;
624
	int ret = 0, x, last = 0;
625 626 627 628

	if (ct->status)
		return -EIO;

629 630
	if (hdr->ct_cmd != FC_FS_ACC) {
		if (hdr->ct_reason == FC_BA_RJT_UNABLE)
631 632 633 634
			return -EAGAIN; /* might be a temporary condition */
		return -EIO;
	}

635
	if (hdr->ct_mr_size) {
636 637
		dev_warn(&adapter->ccw_device->dev,
			 "The name server reported %d words residual data\n",
638
			 hdr->ct_mr_size);
639
		return -E2BIG;
640
	}
641 642

	/* first entry is the header */
643
	for (x = 1; x < max_entries && !last; x++) {
644
		if (x % (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
645 646 647 648
			acc++;
		else
			acc = sg_virt(++sg);

649 650
		last = acc->fp_flags & FC_NS_FID_LAST;
		d_id = ntoh24(acc->fp_fid);
651

652
		/* don't attach ports with a well known address */
653
		if (d_id >= FC_FID_WELL_KNOWN_BASE)
654
			continue;
655
		/* skip the adapter's port and known remote ports */
656
		if (acc->fp_wwpn == fc_host_port_name(adapter->scsi_host))
657 658
			continue;

659
		port = zfcp_port_enqueue(adapter, acc->fp_wwpn,
660
					 ZFCP_STATUS_COMMON_NOESC, d_id);
661
		if (!IS_ERR(port))
662
			zfcp_erp_port_reopen(port, 0, "fcegpf1", NULL);
663 664
		else if (PTR_ERR(port) != -EEXIST)
			ret = PTR_ERR(port);
665 666 667
	}

	zfcp_erp_wait(adapter);
668 669
	write_lock_irqsave(&adapter->port_list_lock, flags);
	list_for_each_entry_safe(port, tmp, &adapter->port_list, list)
670
		zfcp_fc_validate_port(port, &remove_lh);
671
	write_unlock_irqrestore(&adapter->port_list_lock, flags);
672 673 674

	list_for_each_entry_safe(port, tmp, &remove_lh, list) {
		zfcp_erp_port_shutdown(port, 0, "fcegpf2", NULL);
675
		zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs);
676 677
	}

678 679 680 681
	return ret;
}

/**
682
 * zfcp_fc_scan_ports - scan remote ports and attach new ports
683
 * @work: reference to scheduled work
684
 */
685
void zfcp_fc_scan_ports(struct work_struct *work)
686
{
687 688
	struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter,
						    scan_work);
689
	int ret, i;
690
	struct zfcp_fc_gpn_ft *gpn_ft;
691 692 693
	int chain, max_entries, buf_num, max_bytes;

	chain = adapter->adapter_features & FSF_FEATURE_ELS_CT_CHAINED_SBALS;
694 695 696
	buf_num = chain ? ZFCP_FC_GPN_FT_NUM_BUFS : 1;
	max_entries = chain ? ZFCP_FC_GPN_FT_MAX_ENT : ZFCP_FC_GPN_FT_ENT_PAGE;
	max_bytes = chain ? ZFCP_FC_GPN_FT_MAX_SIZE : ZFCP_FC_CT_SIZE_PAGE;
697

698 699
	if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT &&
	    fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
700
		return;
701

702 703
	if (zfcp_fc_wka_port_get(&adapter->gs->ds))
		return;
704

705
	gpn_ft = zfcp_alloc_sg_env(buf_num);
706
	if (!gpn_ft)
707
		goto out;
708 709

	for (i = 0; i < 3; i++) {
710
		ret = zfcp_fc_send_gpn_ft(gpn_ft, adapter, max_bytes);
711
		if (!ret) {
712
			ret = zfcp_fc_eval_gpn_ft(gpn_ft, adapter, max_entries);
713 714 715 716 717 718
			if (ret == -EAGAIN)
				ssleep(1);
			else
				break;
		}
	}
719
	zfcp_free_sg_env(gpn_ft, buf_num);
720
out:
721
	zfcp_fc_wka_port_put(&adapter->gs->ds);
722 723
}

724
static void zfcp_fc_ct_els_job_handler(void *data)
725
{
726 727
	struct fc_bsg_job *job = data;
	struct zfcp_fsf_ct_els *zfcp_ct_els = job->dd_data;
728
	struct fc_bsg_reply *jr = job->reply;
729

730 731 732
	jr->reply_payload_rcv_len = job->reply_payload.payload_len;
	jr->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
	jr->result = zfcp_ct_els->status ? -EIO : 0;
733 734 735
	job->job_done(job);
}

736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773
static struct zfcp_fc_wka_port *zfcp_fc_job_wka_port(struct fc_bsg_job *job)
{
	u32 preamble_word1;
	u8 gs_type;
	struct zfcp_adapter *adapter;

	preamble_word1 = job->request->rqst_data.r_ct.preamble_word1;
	gs_type = (preamble_word1 & 0xff000000) >> 24;

	adapter = (struct zfcp_adapter *) job->shost->hostdata[0];

	switch (gs_type) {
	case FC_FST_ALIAS:
		return &adapter->gs->as;
	case FC_FST_MGMT:
		return &adapter->gs->ms;
	case FC_FST_TIME:
		return &adapter->gs->ts;
		break;
	case FC_FST_DIR:
		return &adapter->gs->ds;
		break;
	default:
		return NULL;
	}
}

static void zfcp_fc_ct_job_handler(void *data)
{
	struct fc_bsg_job *job = data;
	struct zfcp_fc_wka_port *wka_port;

	wka_port = zfcp_fc_job_wka_port(job);
	zfcp_fc_wka_port_put(wka_port);

	zfcp_fc_ct_els_job_handler(data);
}

774 775
static int zfcp_fc_exec_els_job(struct fc_bsg_job *job,
				struct zfcp_adapter *adapter)
776
{
777
	struct zfcp_fsf_ct_els *els = job->dd_data;
778 779
	struct fc_rport *rport = job->rport;
	struct zfcp_port *port;
780
	u32 d_id;
781 782

	if (rport) {
783
		port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
784
		if (!port)
785
			return -EINVAL;
786

787
		d_id = port->d_id;
788
		put_device(&port->dev);
789 790
	} else
		d_id = ntoh24(job->request->rqst_data.h_els.port_id);
791

792
	els->handler = zfcp_fc_ct_els_job_handler;
793
	return zfcp_fsf_send_els(adapter, d_id, els, job->req->timeout / HZ);
794 795
}

796 797
static int zfcp_fc_exec_ct_job(struct fc_bsg_job *job,
			       struct zfcp_adapter *adapter)
798 799
{
	int ret;
800 801
	struct zfcp_fsf_ct_els *ct = job->dd_data;
	struct zfcp_fc_wka_port *wka_port;
802

803 804 805
	wka_port = zfcp_fc_job_wka_port(job);
	if (!wka_port)
		return -EINVAL;
806

807 808
	ret = zfcp_fc_wka_port_get(wka_port);
	if (ret)
809 810
		return ret;

811
	ct->handler = zfcp_fc_ct_job_handler;
812
	ret = zfcp_fsf_send_ct(wka_port, ct, NULL, job->req->timeout / HZ);
813 814
	if (ret)
		zfcp_fc_wka_port_put(wka_port);
815 816 817

	return ret;
}
818

819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846
int zfcp_fc_exec_bsg_job(struct fc_bsg_job *job)
{
	struct Scsi_Host *shost;
	struct zfcp_adapter *adapter;
	struct zfcp_fsf_ct_els *ct_els = job->dd_data;

	shost = job->rport ? rport_to_shost(job->rport) : job->shost;
	adapter = (struct zfcp_adapter *)shost->hostdata[0];

	if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN))
		return -EINVAL;

	ct_els->req = job->request_payload.sg_list;
	ct_els->resp = job->reply_payload.sg_list;
	ct_els->handler_data = job;

	switch (job->request->msgcode) {
	case FC_BSG_RPT_ELS:
	case FC_BSG_HST_ELS_NOLOGIN:
		return zfcp_fc_exec_els_job(job, adapter);
	case FC_BSG_RPT_CT:
	case FC_BSG_HST_CT:
		return zfcp_fc_exec_ct_job(job, adapter);
	default:
		return -EINVAL;
	}
}

847 848 849 850 851 852
int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *job)
{
	/* hardware tracks timeout, reset bsg timeout to not interfere */
	return -EAGAIN;
}

853 854
int zfcp_fc_gs_setup(struct zfcp_adapter *adapter)
{
855
	struct zfcp_fc_wka_ports *wka_ports;
856

857
	wka_ports = kzalloc(sizeof(struct zfcp_fc_wka_ports), GFP_KERNEL);
858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875
	if (!wka_ports)
		return -ENOMEM;

	adapter->gs = wka_ports;
	zfcp_fc_wka_port_init(&wka_ports->ms, FC_FID_MGMT_SERV, adapter);
	zfcp_fc_wka_port_init(&wka_ports->ts, FC_FID_TIME_SERV, adapter);
	zfcp_fc_wka_port_init(&wka_ports->ds, FC_FID_DIR_SERV, adapter);
	zfcp_fc_wka_port_init(&wka_ports->as, FC_FID_ALIASES, adapter);

	return 0;
}

void zfcp_fc_gs_destroy(struct zfcp_adapter *adapter)
{
	kfree(adapter->gs);
	adapter->gs = NULL;
}