zfcp_fc.c 28.6 KB
Newer Older
1 2 3 4 5
/*
 * zfcp device driver
 *
 * Fibre Channel related functions for the zfcp device driver.
 *
6
 * Copyright IBM Corp. 2008, 2017
7 8
 */

9 10 11
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

12
#include <linux/types.h>
13
#include <linux/slab.h>
14
#include <linux/utsname.h>
M
Martin Peschke 已提交
15
#include <linux/random.h>
16
#include <linux/bsg-lib.h>
17 18
#include <scsi/fc/fc_els.h>
#include <scsi/libfc.h>
19
#include "zfcp_ext.h"
20
#include "zfcp_fc.h"
21

22 23
struct kmem_cache *zfcp_fc_req_cache;

24 25 26 27 28
static u32 zfcp_fc_rscn_range_mask[] = {
	[ELS_ADDR_FMT_PORT]		= 0xFFFFFF,
	[ELS_ADDR_FMT_AREA]		= 0xFFFF00,
	[ELS_ADDR_FMT_DOM]		= 0xFF0000,
	[ELS_ADDR_FMT_FAB]		= 0x000000,
29 30
};

31
static bool no_auto_port_rescan;
32
module_param(no_auto_port_rescan, bool, 0600);
33 34 35
MODULE_PARM_DESC(no_auto_port_rescan,
		 "no automatic port_rescan (default off)");

M
Martin Peschke 已提交
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
static unsigned int port_scan_backoff = 500;
module_param(port_scan_backoff, uint, 0600);
MODULE_PARM_DESC(port_scan_backoff,
	"upper limit of port scan random backoff in msecs (default 500)");

static unsigned int port_scan_ratelimit = 60000;
module_param(port_scan_ratelimit, uint, 0600);
MODULE_PARM_DESC(port_scan_ratelimit,
	"minimum interval between port scans in msecs (default 60000)");

unsigned int zfcp_fc_port_scan_backoff(void)
{
	if (!port_scan_backoff)
		return 0;
	return get_random_int() % port_scan_backoff;
}

static void zfcp_fc_port_scan_time(struct zfcp_adapter *adapter)
{
	unsigned long interval = msecs_to_jiffies(port_scan_ratelimit);
	unsigned long backoff = msecs_to_jiffies(zfcp_fc_port_scan_backoff());

	adapter->next_port_scan = jiffies + interval + backoff;
}

static void zfcp_fc_port_scan(struct zfcp_adapter *adapter)
{
	unsigned long now = jiffies;
	unsigned long next = adapter->next_port_scan;
	unsigned long delay = 0, max;

	/* delay only needed within waiting period */
	if (time_before(now, next)) {
		delay = next - now;
		/* paranoia: never ever delay scans longer than specified */
		max = msecs_to_jiffies(port_scan_ratelimit + port_scan_backoff);
		delay = min(delay, max);
	}

	queue_delayed_work(adapter->work_queue, &adapter->scan_work, delay);
}

78 79 80 81 82
void zfcp_fc_conditional_port_scan(struct zfcp_adapter *adapter)
{
	if (no_auto_port_rescan)
		return;

M
Martin Peschke 已提交
83
	zfcp_fc_port_scan(adapter);
84 85 86 87 88 89 90
}

void zfcp_fc_inverse_conditional_port_scan(struct zfcp_adapter *adapter)
{
	if (!no_auto_port_rescan)
		return;

M
Martin Peschke 已提交
91
	zfcp_fc_port_scan(adapter);
92 93
}

94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
/**
 * zfcp_fc_post_event - post event to userspace via fc_transport
 * @work: work struct with enqueued events
 */
void zfcp_fc_post_event(struct work_struct *work)
{
	struct zfcp_fc_event *event = NULL, *tmp = NULL;
	LIST_HEAD(tmp_lh);
	struct zfcp_fc_events *events = container_of(work,
					struct zfcp_fc_events, work);
	struct zfcp_adapter *adapter = container_of(events, struct zfcp_adapter,
						events);

	spin_lock_bh(&events->list_lock);
	list_splice_init(&events->list, &tmp_lh);
	spin_unlock_bh(&events->list_lock);

	list_for_each_entry_safe(event, tmp, &tmp_lh, list) {
		fc_host_post_event(adapter->scsi_host, fc_get_event_number(),
				event->code, event->data);
		list_del(&event->list);
		kfree(event);
	}

}

/**
 * zfcp_fc_enqueue_event - safely enqueue FC HBA API event from irq context
 * @adapter: The adapter where to enqueue the event
 * @event_code: The event code (as defined in fc_host_event_code in
 *		scsi_transport_fc.h)
 * @event_data: The event data (e.g. n_port page in case of els)
 */
void zfcp_fc_enqueue_event(struct zfcp_adapter *adapter,
			enum fc_host_event_code event_code, u32 event_data)
{
	struct zfcp_fc_event *event;

	event = kmalloc(sizeof(struct zfcp_fc_event), GFP_ATOMIC);
	if (!event)
		return;

	event->code = event_code;
	event->data = event_data;

	spin_lock(&adapter->events.list_lock);
	list_add_tail(&event->list, &adapter->events.list);
	spin_unlock(&adapter->events.list_lock);

	queue_work(adapter->work_queue, &adapter->events.work);
}

146
static int zfcp_fc_wka_port_get(struct zfcp_fc_wka_port *wka_port)
147 148 149 150
{
	if (mutex_lock_interruptible(&wka_port->mutex))
		return -ERESTARTSYS;

151 152 153
	if (wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE ||
	    wka_port->status == ZFCP_FC_WKA_PORT_CLOSING) {
		wka_port->status = ZFCP_FC_WKA_PORT_OPENING;
154
		if (zfcp_fsf_open_wka_port(wka_port))
155
			wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
156 157 158 159
	}

	mutex_unlock(&wka_port->mutex);

160
	wait_event(wka_port->completion_wq,
161 162
		   wka_port->status == ZFCP_FC_WKA_PORT_ONLINE ||
		   wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE);
163

164
	if (wka_port->status == ZFCP_FC_WKA_PORT_ONLINE) {
165 166 167 168 169 170
		atomic_inc(&wka_port->refcount);
		return 0;
	}
	return -EIO;
}

171
static void zfcp_fc_wka_port_offline(struct work_struct *work)
172
{
173
	struct delayed_work *dw = to_delayed_work(work);
174 175
	struct zfcp_fc_wka_port *wka_port =
			container_of(dw, struct zfcp_fc_wka_port, work);
176 177 178

	mutex_lock(&wka_port->mutex);
	if ((atomic_read(&wka_port->refcount) != 0) ||
179
	    (wka_port->status != ZFCP_FC_WKA_PORT_ONLINE))
180 181
		goto out;

182
	wka_port->status = ZFCP_FC_WKA_PORT_CLOSING;
183
	if (zfcp_fsf_close_wka_port(wka_port)) {
184
		wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
185 186 187 188 189 190
		wake_up(&wka_port->completion_wq);
	}
out:
	mutex_unlock(&wka_port->mutex);
}

191
static void zfcp_fc_wka_port_put(struct zfcp_fc_wka_port *wka_port)
192 193 194
{
	if (atomic_dec_return(&wka_port->refcount) != 0)
		return;
195
	/* wait 10 milliseconds, other reqs might pop in */
196 197 198
	schedule_delayed_work(&wka_port->work, HZ / 100);
}

199
static void zfcp_fc_wka_port_init(struct zfcp_fc_wka_port *wka_port, u32 d_id,
200
				  struct zfcp_adapter *adapter)
201 202 203 204
{
	init_waitqueue_head(&wka_port->completion_wq);

	wka_port->adapter = adapter;
205
	wka_port->d_id = d_id;
206

207
	wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
208 209
	atomic_set(&wka_port->refcount, 0);
	mutex_init(&wka_port->mutex);
210
	INIT_DELAYED_WORK(&wka_port->work, zfcp_fc_wka_port_offline);
211 212
}

213
static void zfcp_fc_wka_port_force_offline(struct zfcp_fc_wka_port *wka)
214 215 216
{
	cancel_delayed_work_sync(&wka->work);
	mutex_lock(&wka->mutex);
217
	wka->status = ZFCP_FC_WKA_PORT_OFFLINE;
218 219 220
	mutex_unlock(&wka->mutex);
}

221
void zfcp_fc_wka_ports_force_offline(struct zfcp_fc_wka_ports *gs)
222
{
223 224
	if (!gs)
		return;
225 226 227 228 229 230
	zfcp_fc_wka_port_force_offline(&gs->ms);
	zfcp_fc_wka_port_force_offline(&gs->ts);
	zfcp_fc_wka_port_force_offline(&gs->ds);
	zfcp_fc_wka_port_force_offline(&gs->as);
}

231
static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
232
				   struct fc_els_rscn_page *page)
233 234
{
	unsigned long flags;
235
	struct zfcp_adapter *adapter = fsf_req->adapter;
236 237
	struct zfcp_port *port;

238 239
	read_lock_irqsave(&adapter->port_list_lock, flags);
	list_for_each_entry(port, &adapter->port_list, list) {
240
		if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range))
241
			zfcp_fc_test_link(port);
242 243 244
		if (!port->d_id)
			zfcp_erp_port_reopen(port,
					     ZFCP_STATUS_COMMON_ERP_FAILED,
245
					     "fcrscn1");
246
	}
247
	read_unlock_irqrestore(&adapter->port_list_lock, flags);
248 249 250 251 252
}

static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
{
	struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data;
253 254
	struct fc_els_rscn *head;
	struct fc_els_rscn_page *page;
255 256
	u16 i;
	u16 no_entries;
257
	unsigned int afmt;
258

259 260
	head = (struct fc_els_rscn *) status_buffer->payload.data;
	page = (struct fc_els_rscn_page *) head;
261 262

	/* see FC-FS */
263 264
	no_entries = be16_to_cpu(head->rscn_plen) /
		sizeof(struct fc_els_rscn_page);
265 266 267

	for (i = 1; i < no_entries; i++) {
		/* skip head and start with 1st element */
268 269 270 271
		page++;
		afmt = page->rscn_page_flags & ELS_RSCN_ADDR_FMT_MASK;
		_zfcp_fc_incoming_rscn(fsf_req, zfcp_fc_rscn_range_mask[afmt],
				       page);
272 273
		zfcp_fc_enqueue_event(fsf_req->adapter, FCH_EVT_RSCN,
				      *(u32 *)page);
274
	}
275
	zfcp_fc_conditional_port_scan(fsf_req->adapter);
276 277
}

278
static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn)
279
{
280
	unsigned long flags;
281 282 283
	struct zfcp_adapter *adapter = req->adapter;
	struct zfcp_port *port;

284 285 286
	read_lock_irqsave(&adapter->port_list_lock, flags);
	list_for_each_entry(port, &adapter->port_list, list)
		if (port->wwpn == wwpn) {
287
			zfcp_erp_port_forced_reopen(port, 0, "fciwwp1");
288
			break;
289 290
		}
	read_unlock_irqrestore(&adapter->port_list_lock, flags);
291 292 293 294
}

static void zfcp_fc_incoming_plogi(struct zfcp_fsf_req *req)
{
295 296
	struct fsf_status_read_buffer *status_buffer;
	struct fc_els_flogi *plogi;
297

298 299
	status_buffer = (struct fsf_status_read_buffer *) req->data;
	plogi = (struct fc_els_flogi *) status_buffer->payload.data;
300
	zfcp_fc_incoming_wwpn(req, be64_to_cpu(plogi->fl_wwpn));
301 302 303 304 305 306
}

static void zfcp_fc_incoming_logo(struct zfcp_fsf_req *req)
{
	struct fsf_status_read_buffer *status_buffer =
		(struct fsf_status_read_buffer *)req->data;
307 308
	struct fc_els_logo *logo =
		(struct fc_els_logo *) status_buffer->payload.data;
309

310
	zfcp_fc_incoming_wwpn(req, be64_to_cpu(logo->fl_n_port_wwn));
311 312 313 314 315 316 317 318 319 320
}

/**
 * zfcp_fc_incoming_els - handle incoming ELS
 * @fsf_req - request which contains incoming ELS
 */
void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req)
{
	struct fsf_status_read_buffer *status_buffer =
		(struct fsf_status_read_buffer *) fsf_req->data;
S
Swen Schillig 已提交
321
	unsigned int els_type = status_buffer->payload.data[0];
322

323
	zfcp_dbf_san_in_els("fciels1", fsf_req);
324
	if (els_type == ELS_PLOGI)
325
		zfcp_fc_incoming_plogi(fsf_req);
326
	else if (els_type == ELS_LOGO)
327
		zfcp_fc_incoming_logo(fsf_req);
328
	else if (els_type == ELS_RSCN)
329 330 331
		zfcp_fc_incoming_rscn(fsf_req);
}

332
static void zfcp_fc_ns_gid_pn_eval(struct zfcp_fc_req *fc_req)
333
{
334 335
	struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
	struct zfcp_fc_gid_pn_rsp *gid_pn_rsp = &fc_req->u.gid_pn.rsp;
336

337
	if (ct_els->status)
338
		return;
339
	if (gid_pn_rsp->ct_hdr.ct_cmd != cpu_to_be16(FC_FS_ACC))
340
		return;
341

342
	/* looks like a valid d_id */
343
	ct_els->port->d_id = ntoh24(gid_pn_rsp->gid_pn.fp_fid);
344 345
}

346 347 348 349 350
static void zfcp_fc_complete(void *data)
{
	complete(data);
}

351 352 353 354 355
static void zfcp_fc_ct_ns_init(struct fc_ct_hdr *ct_hdr, u16 cmd, u16 mr_size)
{
	ct_hdr->ct_rev = FC_CT_REV;
	ct_hdr->ct_fs_type = FC_FST_DIR;
	ct_hdr->ct_fs_subtype = FC_NS_SUBTYPE;
356 357
	ct_hdr->ct_cmd = cpu_to_be16(cmd);
	ct_hdr->ct_mr_size = cpu_to_be16(mr_size / 4);
358 359
}

360
static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port,
361
				     struct zfcp_fc_req *fc_req)
362
{
363
	struct zfcp_adapter *adapter = port->adapter;
364
	DECLARE_COMPLETION_ONSTACK(completion);
365 366
	struct zfcp_fc_gid_pn_req *gid_pn_req = &fc_req->u.gid_pn.req;
	struct zfcp_fc_gid_pn_rsp *gid_pn_rsp = &fc_req->u.gid_pn.rsp;
367
	int ret;
368 369

	/* setup parameters for send generic command */
370 371 372 373 374 375 376 377 378 379
	fc_req->ct_els.port = port;
	fc_req->ct_els.handler = zfcp_fc_complete;
	fc_req->ct_els.handler_data = &completion;
	fc_req->ct_els.req = &fc_req->sg_req;
	fc_req->ct_els.resp = &fc_req->sg_rsp;
	sg_init_one(&fc_req->sg_req, gid_pn_req, sizeof(*gid_pn_req));
	sg_init_one(&fc_req->sg_rsp, gid_pn_rsp, sizeof(*gid_pn_rsp));

	zfcp_fc_ct_ns_init(&gid_pn_req->ct_hdr,
			   FC_NS_GID_PN, ZFCP_FC_CT_SIZE_PAGE);
380
	gid_pn_req->gid_pn.fn_wwpn = cpu_to_be64(port->wwpn);
381 382

	ret = zfcp_fsf_send_ct(&adapter->gs->ds, &fc_req->ct_els,
383 384
			       adapter->pool.gid_pn_req,
			       ZFCP_FC_CTELS_TMO);
385 386
	if (!ret) {
		wait_for_completion(&completion);
387
		zfcp_fc_ns_gid_pn_eval(fc_req);
388
	}
389 390 391 392
	return ret;
}

/**
393
 * zfcp_fc_ns_gid_pn - initiate GID_PN nameserver request
394
 * @port: port where GID_PN request is needed
395 396
 * return: -ENOMEM on error, 0 otherwise
 */
397
static int zfcp_fc_ns_gid_pn(struct zfcp_port *port)
398 399
{
	int ret;
400
	struct zfcp_fc_req *fc_req;
401
	struct zfcp_adapter *adapter = port->adapter;
402

403 404
	fc_req = mempool_alloc(adapter->pool.gid_pn, GFP_ATOMIC);
	if (!fc_req)
405 406
		return -ENOMEM;

407
	memset(fc_req, 0, sizeof(*fc_req));
408

409
	ret = zfcp_fc_wka_port_get(&adapter->gs->ds);
410
	if (ret)
411 412
		goto out;

413
	ret = zfcp_fc_ns_gid_pn_request(port, fc_req);
414

415
	zfcp_fc_wka_port_put(&adapter->gs->ds);
416
out:
417
	mempool_free(fc_req, adapter->pool.gid_pn);
418 419 420
	return ret;
}

421 422 423 424 425 426 427 428 429
void zfcp_fc_port_did_lookup(struct work_struct *work)
{
	int ret;
	struct zfcp_port *port = container_of(work, struct zfcp_port,
					      gid_pn_work);

	ret = zfcp_fc_ns_gid_pn(port);
	if (ret) {
		/* could not issue gid_pn for some reason */
430
		zfcp_erp_adapter_reopen(port->adapter, 0, "fcgpn_1");
431 432 433 434
		goto out;
	}

	if (!port->d_id) {
435
		zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED);
436 437 438
		goto out;
	}

439
	zfcp_erp_port_reopen(port, 0, "fcgpn_3");
440
out:
441
	put_device(&port->dev);
442 443
}

444 445 446 447 448 449
/**
 * zfcp_fc_trigger_did_lookup - trigger the d_id lookup using a GID_PN request
 * @port: The zfcp_port to lookup the d_id for.
 */
void zfcp_fc_trigger_did_lookup(struct zfcp_port *port)
{
450
	get_device(&port->dev);
451
	if (!queue_work(port->adapter->work_queue, &port->gid_pn_work))
452
		put_device(&port->dev);
453 454
}

455 456 457 458 459 460 461
/**
 * zfcp_fc_plogi_evaluate - evaluate PLOGI playload
 * @port: zfcp_port structure
 * @plogi: plogi payload
 *
 * Evaluate PLOGI playload and copy important fields into zfcp_port structure
 */
462
void zfcp_fc_plogi_evaluate(struct zfcp_port *port, struct fc_els_flogi *plogi)
463
{
464
	if (be64_to_cpu(plogi->fl_wwpn) != port->wwpn) {
465 466 467 468 469
		port->d_id = 0;
		dev_warn(&port->adapter->ccw_device->dev,
			 "A port opened with WWPN 0x%016Lx returned data that "
			 "identifies it as WWPN 0x%016Lx\n",
			 (unsigned long long) port->wwpn,
470
			 (unsigned long long) be64_to_cpu(plogi->fl_wwpn));
471 472 473
		return;
	}

474 475
	port->wwnn = be64_to_cpu(plogi->fl_wwnn);
	port->maxframe_size = be16_to_cpu(plogi->fl_csp.sp_bb_data);
476

477
	if (plogi->fl_cssp[0].cp_class & cpu_to_be16(FC_CPC_VALID))
478
		port->supported_classes |= FC_COS_CLASS1;
479
	if (plogi->fl_cssp[1].cp_class & cpu_to_be16(FC_CPC_VALID))
480
		port->supported_classes |= FC_COS_CLASS2;
481
	if (plogi->fl_cssp[2].cp_class & cpu_to_be16(FC_CPC_VALID))
482
		port->supported_classes |= FC_COS_CLASS3;
483
	if (plogi->fl_cssp[3].cp_class & cpu_to_be16(FC_CPC_VALID))
484 485 486
		port->supported_classes |= FC_COS_CLASS4;
}

487
static void zfcp_fc_adisc_handler(void *data)
488
{
489 490 491
	struct zfcp_fc_req *fc_req = data;
	struct zfcp_port *port = fc_req->ct_els.port;
	struct fc_els_adisc *adisc_resp = &fc_req->u.adisc.rsp;
492

493
	if (fc_req->ct_els.status) {
494
		/* request rejected or timed out */
495
		zfcp_erp_port_forced_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
496
					    "fcadh_1");
497 498 499 500
		goto out;
	}

	if (!port->wwnn)
501
		port->wwnn = be64_to_cpu(adisc_resp->adisc_wwnn);
502

503
	if ((port->wwpn != be64_to_cpu(adisc_resp->adisc_wwpn)) ||
504
	    !(atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN)) {
505
		zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
506
				     "fcadh_2");
507 508
		goto out;
	}
509

510 511
	/* port is good, unblock rport without going through erp */
	zfcp_scsi_schedule_rport_register(port);
512
 out:
513
	atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
514
	put_device(&port->dev);
515
	kmem_cache_free(zfcp_fc_req_cache, fc_req);
516 517 518 519
}

static int zfcp_fc_adisc(struct zfcp_port *port)
{
520
	struct zfcp_fc_req *fc_req;
521
	struct zfcp_adapter *adapter = port->adapter;
522
	struct Scsi_Host *shost = adapter->scsi_host;
523
	int ret;
524

525 526
	fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_ATOMIC);
	if (!fc_req)
527 528
		return -ENOMEM;

529 530 531 532
	fc_req->ct_els.port = port;
	fc_req->ct_els.req = &fc_req->sg_req;
	fc_req->ct_els.resp = &fc_req->sg_rsp;
	sg_init_one(&fc_req->sg_req, &fc_req->u.adisc.req,
533
		    sizeof(struct fc_els_adisc));
534
	sg_init_one(&fc_req->sg_rsp, &fc_req->u.adisc.rsp,
535
		    sizeof(struct fc_els_adisc));
536

537 538
	fc_req->ct_els.handler = zfcp_fc_adisc_handler;
	fc_req->ct_els.handler_data = fc_req;
539 540 541

	/* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports
	   without FC-AL-2 capability, so we don't set it */
542 543
	fc_req->u.adisc.req.adisc_wwpn = cpu_to_be64(fc_host_port_name(shost));
	fc_req->u.adisc.req.adisc_wwnn = cpu_to_be64(fc_host_node_name(shost));
544 545
	fc_req->u.adisc.req.adisc_cmd = ELS_ADISC;
	hton24(fc_req->u.adisc.req.adisc_port_id, fc_host_port_id(shost));
546

547
	ret = zfcp_fsf_send_els(adapter, port->d_id, &fc_req->ct_els,
548
				ZFCP_FC_CTELS_TMO);
549
	if (ret)
550
		kmem_cache_free(zfcp_fc_req_cache, fc_req);
551 552

	return ret;
553 554
}

555
void zfcp_fc_link_test_work(struct work_struct *work)
556
{
557 558
	struct zfcp_port *port =
		container_of(work, struct zfcp_port, test_link_work);
559 560
	int retval;

561
	get_device(&port->dev);
562 563 564
	port->rport_task = RPORT_DEL;
	zfcp_scsi_rport_work(&port->rport_work);

565 566 567 568
	/* only issue one test command at one time per port */
	if (atomic_read(&port->status) & ZFCP_STATUS_PORT_LINK_TEST)
		goto out;

569
	atomic_or(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
570

571
	retval = zfcp_fc_adisc(port);
572
	if (retval == 0)
573 574 575
		return;

	/* send of ADISC was not possible */
576
	atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
577
	zfcp_erp_port_forced_reopen(port, 0, "fcltwk1");
578

579
out:
580
	put_device(&port->dev);
581
}
582

583
/**
584
 * zfcp_fc_test_link - lightweight link test procedure
585 586 587 588 589 590
 * @port: port to be tested
 *
 * Test status of a link to a remote port using the ELS command ADISC.
 * If there is a problem with the remote port, error recovery steps
 * will be triggered.
 */
591
void zfcp_fc_test_link(struct zfcp_port *port)
592
{
593
	get_device(&port->dev);
594
	if (!queue_work(port->adapter->work_queue, &port->test_link_work))
595
		put_device(&port->dev);
596 597
}

598
static struct zfcp_fc_req *zfcp_alloc_sg_env(int buf_num)
599
{
600
	struct zfcp_fc_req *fc_req;
601

602 603
	fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_KERNEL);
	if (!fc_req)
604 605
		return NULL;

606 607 608
	if (zfcp_sg_setup_table(&fc_req->sg_rsp, buf_num)) {
		kmem_cache_free(zfcp_fc_req_cache, fc_req);
		return NULL;
609 610
	}

611 612
	sg_init_one(&fc_req->sg_req, &fc_req->u.gpn_ft.req,
		    sizeof(struct zfcp_fc_gpn_ft_req));
613

614 615
	return fc_req;
}
616

617
static int zfcp_fc_send_gpn_ft(struct zfcp_fc_req *fc_req,
618
			       struct zfcp_adapter *adapter, int max_bytes)
619
{
620 621
	struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
	struct zfcp_fc_gpn_ft_req *req = &fc_req->u.gpn_ft.req;
622
	DECLARE_COMPLETION_ONSTACK(completion);
623 624
	int ret;

625
	zfcp_fc_ct_ns_init(&req->ct_hdr, FC_NS_GPN_FT, max_bytes);
626
	req->gpn_ft.fn_fc4_type = FC_TYPE_FCP;
627

628 629 630 631
	ct_els->handler = zfcp_fc_complete;
	ct_els->handler_data = &completion;
	ct_els->req = &fc_req->sg_req;
	ct_els->resp = &fc_req->sg_rsp;
632

633
	ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
634
			       ZFCP_FC_CTELS_TMO);
635
	if (!ret)
636
		wait_for_completion(&completion);
637 638 639
	return ret;
}

640
static void zfcp_fc_validate_port(struct zfcp_port *port, struct list_head *lh)
641
{
642 643 644
	if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC))
		return;

645
	atomic_andnot(ZFCP_STATUS_COMMON_NOESC, &port->status);
646

647
	if ((port->supported_classes != 0) ||
648
	    !list_empty(&port->unit_list))
649
		return;
650 651

	list_move_tail(&port->list, lh);
652 653
}

654
static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_req *fc_req,
655
			       struct zfcp_adapter *adapter, int max_entries)
656
{
657 658
	struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
	struct scatterlist *sg = &fc_req->sg_rsp;
659 660
	struct fc_ct_hdr *hdr = sg_virt(sg);
	struct fc_gpn_ft_resp *acc = sg_virt(sg);
661
	struct zfcp_port *port, *tmp;
662
	unsigned long flags;
663
	LIST_HEAD(remove_lh);
664
	u32 d_id;
665
	int ret = 0, x, last = 0;
666

667
	if (ct_els->status)
668 669
		return -EIO;

670
	if (hdr->ct_cmd != cpu_to_be16(FC_FS_ACC)) {
671
		if (hdr->ct_reason == FC_FS_RJT_UNABL)
672 673 674 675
			return -EAGAIN; /* might be a temporary condition */
		return -EIO;
	}

676
	if (hdr->ct_mr_size) {
677 678
		dev_warn(&adapter->ccw_device->dev,
			 "The name server reported %d words residual data\n",
679
			 hdr->ct_mr_size);
680
		return -E2BIG;
681
	}
682 683

	/* first entry is the header */
684
	for (x = 1; x < max_entries && !last; x++) {
685
		if (x % (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
686 687 688 689
			acc++;
		else
			acc = sg_virt(++sg);

690 691
		last = acc->fp_flags & FC_NS_FID_LAST;
		d_id = ntoh24(acc->fp_fid);
692

693
		/* don't attach ports with a well known address */
694
		if (d_id >= FC_FID_WELL_KNOWN_BASE)
695
			continue;
696
		/* skip the adapter's port and known remote ports */
697 698
		if (be64_to_cpu(acc->fp_wwpn) ==
		    fc_host_port_name(adapter->scsi_host))
699 700
			continue;

701
		port = zfcp_port_enqueue(adapter, be64_to_cpu(acc->fp_wwpn),
702
					 ZFCP_STATUS_COMMON_NOESC, d_id);
703
		if (!IS_ERR(port))
704
			zfcp_erp_port_reopen(port, 0, "fcegpf1");
705 706
		else if (PTR_ERR(port) != -EEXIST)
			ret = PTR_ERR(port);
707 708 709
	}

	zfcp_erp_wait(adapter);
710 711
	write_lock_irqsave(&adapter->port_list_lock, flags);
	list_for_each_entry_safe(port, tmp, &adapter->port_list, list)
712
		zfcp_fc_validate_port(port, &remove_lh);
713
	write_unlock_irqrestore(&adapter->port_list_lock, flags);
714 715

	list_for_each_entry_safe(port, tmp, &remove_lh, list) {
716
		zfcp_erp_port_shutdown(port, 0, "fcegpf2");
717
		device_unregister(&port->dev);
718 719
	}

720 721 722 723
	return ret;
}

/**
724
 * zfcp_fc_scan_ports - scan remote ports and attach new ports
725
 * @work: reference to scheduled work
726
 */
727
void zfcp_fc_scan_ports(struct work_struct *work)
728
{
M
Martin Peschke 已提交
729 730
	struct delayed_work *dw = to_delayed_work(work);
	struct zfcp_adapter *adapter = container_of(dw, struct zfcp_adapter,
731
						    scan_work);
732
	int ret, i;
733
	struct zfcp_fc_req *fc_req;
734 735
	int chain, max_entries, buf_num, max_bytes;

M
Martin Peschke 已提交
736 737
	zfcp_fc_port_scan_time(adapter);

738
	chain = adapter->adapter_features & FSF_FEATURE_ELS_CT_CHAINED_SBALS;
739 740 741
	buf_num = chain ? ZFCP_FC_GPN_FT_NUM_BUFS : 1;
	max_entries = chain ? ZFCP_FC_GPN_FT_MAX_ENT : ZFCP_FC_GPN_FT_ENT_PAGE;
	max_bytes = chain ? ZFCP_FC_GPN_FT_MAX_SIZE : ZFCP_FC_CT_SIZE_PAGE;
742

743 744
	if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT &&
	    fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
745
		return;
746

747 748
	if (zfcp_fc_wka_port_get(&adapter->gs->ds))
		return;
749

750 751
	fc_req = zfcp_alloc_sg_env(buf_num);
	if (!fc_req)
752
		goto out;
753 754

	for (i = 0; i < 3; i++) {
755
		ret = zfcp_fc_send_gpn_ft(fc_req, adapter, max_bytes);
756
		if (!ret) {
757
			ret = zfcp_fc_eval_gpn_ft(fc_req, adapter, max_entries);
758 759 760 761 762 763
			if (ret == -EAGAIN)
				ssleep(1);
			else
				break;
		}
	}
764 765
	zfcp_sg_free_table(&fc_req->sg_rsp, buf_num);
	kmem_cache_free(zfcp_fc_req_cache, fc_req);
766
out:
767
	zfcp_fc_wka_port_put(&adapter->gs->ds);
768 769
}

770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888
static int zfcp_fc_gspn(struct zfcp_adapter *adapter,
			struct zfcp_fc_req *fc_req)
{
	DECLARE_COMPLETION_ONSTACK(completion);
	char devno[] = "DEVNO:";
	struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
	struct zfcp_fc_gspn_req *gspn_req = &fc_req->u.gspn.req;
	struct zfcp_fc_gspn_rsp *gspn_rsp = &fc_req->u.gspn.rsp;
	int ret;

	zfcp_fc_ct_ns_init(&gspn_req->ct_hdr, FC_NS_GSPN_ID,
			   FC_SYMBOLIC_NAME_SIZE);
	hton24(gspn_req->gspn.fp_fid, fc_host_port_id(adapter->scsi_host));

	sg_init_one(&fc_req->sg_req, gspn_req, sizeof(*gspn_req));
	sg_init_one(&fc_req->sg_rsp, gspn_rsp, sizeof(*gspn_rsp));

	ct_els->handler = zfcp_fc_complete;
	ct_els->handler_data = &completion;
	ct_els->req = &fc_req->sg_req;
	ct_els->resp = &fc_req->sg_rsp;

	ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
			       ZFCP_FC_CTELS_TMO);
	if (ret)
		return ret;

	wait_for_completion(&completion);
	if (ct_els->status)
		return ct_els->status;

	if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_NPIV &&
	    !(strstr(gspn_rsp->gspn.fp_name, devno)))
		snprintf(fc_host_symbolic_name(adapter->scsi_host),
			 FC_SYMBOLIC_NAME_SIZE, "%s%s %s NAME: %s",
			 gspn_rsp->gspn.fp_name, devno,
			 dev_name(&adapter->ccw_device->dev),
			 init_utsname()->nodename);
	else
		strlcpy(fc_host_symbolic_name(adapter->scsi_host),
			gspn_rsp->gspn.fp_name, FC_SYMBOLIC_NAME_SIZE);

	return 0;
}

static void zfcp_fc_rspn(struct zfcp_adapter *adapter,
			 struct zfcp_fc_req *fc_req)
{
	DECLARE_COMPLETION_ONSTACK(completion);
	struct Scsi_Host *shost = adapter->scsi_host;
	struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
	struct zfcp_fc_rspn_req *rspn_req = &fc_req->u.rspn.req;
	struct fc_ct_hdr *rspn_rsp = &fc_req->u.rspn.rsp;
	int ret, len;

	zfcp_fc_ct_ns_init(&rspn_req->ct_hdr, FC_NS_RSPN_ID,
			   FC_SYMBOLIC_NAME_SIZE);
	hton24(rspn_req->rspn.fr_fid.fp_fid, fc_host_port_id(shost));
	len = strlcpy(rspn_req->rspn.fr_name, fc_host_symbolic_name(shost),
		      FC_SYMBOLIC_NAME_SIZE);
	rspn_req->rspn.fr_name_len = len;

	sg_init_one(&fc_req->sg_req, rspn_req, sizeof(*rspn_req));
	sg_init_one(&fc_req->sg_rsp, rspn_rsp, sizeof(*rspn_rsp));

	ct_els->handler = zfcp_fc_complete;
	ct_els->handler_data = &completion;
	ct_els->req = &fc_req->sg_req;
	ct_els->resp = &fc_req->sg_rsp;

	ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
			       ZFCP_FC_CTELS_TMO);
	if (!ret)
		wait_for_completion(&completion);
}

/**
 * zfcp_fc_sym_name_update - Retrieve and update the symbolic port name
 * @work: ns_up_work of the adapter where to update the symbolic port name
 *
 * Retrieve the current symbolic port name that may have been set by
 * the hardware using the GSPN request and update the fc_host
 * symbolic_name sysfs attribute. When running in NPIV mode (and hence
 * the port name is unique for this system), update the symbolic port
 * name to add Linux specific information and update the FC nameserver
 * using the RSPN request.
 */
void zfcp_fc_sym_name_update(struct work_struct *work)
{
	struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter,
						    ns_up_work);
	int ret;
	struct zfcp_fc_req *fc_req;

	if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT &&
	    fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
		return;

	fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_KERNEL);
	if (!fc_req)
		return;

	ret = zfcp_fc_wka_port_get(&adapter->gs->ds);
	if (ret)
		goto out_free;

	ret = zfcp_fc_gspn(adapter, fc_req);
	if (ret || fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
		goto out_ds_put;

	memset(fc_req, 0, sizeof(*fc_req));
	zfcp_fc_rspn(adapter, fc_req);

out_ds_put:
	zfcp_fc_wka_port_put(&adapter->gs->ds);
out_free:
	kmem_cache_free(zfcp_fc_req_cache, fc_req);
}

889
static void zfcp_fc_ct_els_job_handler(void *data)
890
{
891
	struct bsg_job *job = data;
892
	struct zfcp_fsf_ct_els *zfcp_ct_els = job->dd_data;
893
	struct fc_bsg_reply *jr = job->reply;
894

895 896 897
	jr->reply_payload_rcv_len = job->reply_payload.payload_len;
	jr->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
	jr->result = zfcp_ct_els->status ? -EIO : 0;
J
Johannes Thumshirn 已提交
898
	bsg_job_done(job, jr->result, jr->reply_payload_rcv_len);
899 900
}

901
static struct zfcp_fc_wka_port *zfcp_fc_job_wka_port(struct bsg_job *job)
902 903 904 905
{
	u32 preamble_word1;
	u8 gs_type;
	struct zfcp_adapter *adapter;
906
	struct fc_bsg_request *bsg_request = job->request;
907
	struct fc_rport *rport = fc_bsg_to_rport(job);
908
	struct Scsi_Host *shost;
909

910
	preamble_word1 = bsg_request->rqst_data.r_ct.preamble_word1;
911 912
	gs_type = (preamble_word1 & 0xff000000) >> 24;

913
	shost = rport ? rport_to_shost(rport) : fc_bsg_to_shost(job);
914
	adapter = (struct zfcp_adapter *) shost->hostdata[0];
915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933

	switch (gs_type) {
	case FC_FST_ALIAS:
		return &adapter->gs->as;
	case FC_FST_MGMT:
		return &adapter->gs->ms;
	case FC_FST_TIME:
		return &adapter->gs->ts;
		break;
	case FC_FST_DIR:
		return &adapter->gs->ds;
		break;
	default:
		return NULL;
	}
}

static void zfcp_fc_ct_job_handler(void *data)
{
934
	struct bsg_job *job = data;
935 936 937 938 939 940 941 942
	struct zfcp_fc_wka_port *wka_port;

	wka_port = zfcp_fc_job_wka_port(job);
	zfcp_fc_wka_port_put(wka_port);

	zfcp_fc_ct_els_job_handler(data);
}

943
static int zfcp_fc_exec_els_job(struct bsg_job *job,
944
				struct zfcp_adapter *adapter)
945
{
946
	struct zfcp_fsf_ct_els *els = job->dd_data;
947
	struct fc_rport *rport = fc_bsg_to_rport(job);
948
	struct fc_bsg_request *bsg_request = job->request;
949
	struct zfcp_port *port;
950
	u32 d_id;
951 952

	if (rport) {
953
		port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
954
		if (!port)
955
			return -EINVAL;
956

957
		d_id = port->d_id;
958
		put_device(&port->dev);
959
	} else
960
		d_id = ntoh24(bsg_request->rqst_data.h_els.port_id);
961

962
	els->handler = zfcp_fc_ct_els_job_handler;
963
	return zfcp_fsf_send_els(adapter, d_id, els, job->req->timeout / HZ);
964 965
}

966
static int zfcp_fc_exec_ct_job(struct bsg_job *job,
967
			       struct zfcp_adapter *adapter)
968 969
{
	int ret;
970 971
	struct zfcp_fsf_ct_els *ct = job->dd_data;
	struct zfcp_fc_wka_port *wka_port;
972

973 974 975
	wka_port = zfcp_fc_job_wka_port(job);
	if (!wka_port)
		return -EINVAL;
976

977 978
	ret = zfcp_fc_wka_port_get(wka_port);
	if (ret)
979 980
		return ret;

981
	ct->handler = zfcp_fc_ct_job_handler;
982
	ret = zfcp_fsf_send_ct(wka_port, ct, NULL, job->req->timeout / HZ);
983 984
	if (ret)
		zfcp_fc_wka_port_put(wka_port);
985 986 987

	return ret;
}
988

989
int zfcp_fc_exec_bsg_job(struct bsg_job *job)
990 991 992 993
{
	struct Scsi_Host *shost;
	struct zfcp_adapter *adapter;
	struct zfcp_fsf_ct_els *ct_els = job->dd_data;
994
	struct fc_bsg_request *bsg_request = job->request;
995
	struct fc_rport *rport = fc_bsg_to_rport(job);
996

997
	shost = rport ? rport_to_shost(rport) : fc_bsg_to_shost(job);
998 999 1000 1001 1002 1003 1004 1005 1006
	adapter = (struct zfcp_adapter *)shost->hostdata[0];

	if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN))
		return -EINVAL;

	ct_els->req = job->request_payload.sg_list;
	ct_els->resp = job->reply_payload.sg_list;
	ct_els->handler_data = job;

1007
	switch (bsg_request->msgcode) {
1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018
	case FC_BSG_RPT_ELS:
	case FC_BSG_HST_ELS_NOLOGIN:
		return zfcp_fc_exec_els_job(job, adapter);
	case FC_BSG_RPT_CT:
	case FC_BSG_HST_CT:
		return zfcp_fc_exec_ct_job(job, adapter);
	default:
		return -EINVAL;
	}
}

1019
int zfcp_fc_timeout_bsg_job(struct bsg_job *job)
1020 1021 1022 1023 1024
{
	/* hardware tracks timeout, reset bsg timeout to not interfere */
	return -EAGAIN;
}

1025 1026
int zfcp_fc_gs_setup(struct zfcp_adapter *adapter)
{
1027
	struct zfcp_fc_wka_ports *wka_ports;
1028

1029
	wka_ports = kzalloc(sizeof(struct zfcp_fc_wka_ports), GFP_KERNEL);
1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047
	if (!wka_ports)
		return -ENOMEM;

	adapter->gs = wka_ports;
	zfcp_fc_wka_port_init(&wka_ports->ms, FC_FID_MGMT_SERV, adapter);
	zfcp_fc_wka_port_init(&wka_ports->ts, FC_FID_TIME_SERV, adapter);
	zfcp_fc_wka_port_init(&wka_ports->ds, FC_FID_DIR_SERV, adapter);
	zfcp_fc_wka_port_init(&wka_ports->as, FC_FID_ALIASES, adapter);

	return 0;
}

void zfcp_fc_gs_destroy(struct zfcp_adapter *adapter)
{
	kfree(adapter->gs);
	adapter->gs = NULL;
}