ap_bus.c 57.2 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0+
2
/*
3
 * Copyright IBM Corp. 2006, 2021
4 5 6
 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
 *	      Martin Schwidefsky <schwidefsky@de.ibm.com>
 *	      Ralph Wuerthner <rwuerthn@de.ibm.com>
F
Felix Beck 已提交
7
 *	      Felix Beck <felix.beck@de.ibm.com>
8
 *	      Holger Dengler <hd@linux.vnet.ibm.com>
9
 *	      Harald Freudenberger <freude@linux.ibm.com>
10 11 12 13
 *
 * Adjunct processor bus.
 */

14 15 16
#define KMSG_COMPONENT "ap"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

17
#include <linux/kernel_stat.h>
18
#include <linux/moduleparam.h>
19 20 21
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/err.h>
22
#include <linux/freezer.h>
23 24
#include <linux/interrupt.h>
#include <linux/workqueue.h>
25
#include <linux/slab.h>
26 27 28
#include <linux/notifier.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
F
Felix Beck 已提交
29
#include <asm/airq.h>
A
Arun Sharma 已提交
30
#include <linux/atomic.h>
F
Felix Beck 已提交
31
#include <asm/isc.h>
32 33
#include <linux/hrtimer.h>
#include <linux/ktime.h>
34
#include <asm/facility.h>
35
#include <linux/crypto.h>
36
#include <linux/mod_devicetable.h>
37
#include <linux/debugfs.h>
38
#include <linux/ctype.h>
39
#include <linux/module.h>
40 41

#include "ap_bus.h"
42
#include "ap_debug.h"
43

44
/*
45
 * Module parameters; note though this file itself isn't modular.
46 47
 */
int ap_domain_index = -1;	/* Adjunct Processor Domain Index */
48
static DEFINE_SPINLOCK(ap_domain_lock);
49
module_param_named(domain, ap_domain_index, int, 0440);
50 51 52
MODULE_PARM_DESC(domain, "domain index for ap devices");
EXPORT_SYMBOL(ap_domain_index);

53 54
static int ap_thread_flag;
module_param_named(poll_thread, ap_thread_flag, int, 0440);
55
MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off).");
56

57 58 59 60 61 62 63 64
static char *apm_str;
module_param_named(apmask, apm_str, charp, 0440);
MODULE_PARM_DESC(apmask, "AP bus adapter mask.");

static char *aqm_str;
module_param_named(aqmask, aqm_str, charp, 0440);
MODULE_PARM_DESC(aqmask, "AP bus domain mask.");

65 66 67 68
static int ap_useirq = 1;
module_param_named(useirq, ap_useirq, int, 0440);
MODULE_PARM_DESC(useirq, "Use interrupt if available, default is 1 (on).");

69 70 71
atomic_t ap_max_msg_size = ATOMIC_INIT(AP_DEFAULT_MAX_MSG_SIZE);
EXPORT_SYMBOL(ap_max_msg_size);

72 73
static struct device *ap_root_device;

74 75 76 77
/* Hashtable of all queue devices on the AP bus */
DEFINE_HASHTABLE(ap_queues, 8);
/* lock used for the ap_queues hashtable */
DEFINE_SPINLOCK(ap_queues_lock);
78

79 80 81 82 83
/* Default permissions (ioctl, card and domain masking) */
struct ap_perms ap_perms;
EXPORT_SYMBOL(ap_perms);
DEFINE_MUTEX(ap_perms_mutex);
EXPORT_SYMBOL(ap_perms_mutex);
84

85 86 87
/* # of bus scans since init */
static atomic64_t ap_scan_bus_count;

88 89 90
/* # of bindings complete since init */
static atomic64_t ap_bindings_complete_count = ATOMIC64_INIT(0);

91 92 93
/* completion for initial APQN bindings complete */
static DECLARE_COMPLETION(ap_init_apqn_bindings_complete);

94
static struct ap_config_info *ap_qci_info;
95
static struct ap_config_info *ap_qci_info_old;
96

97 98 99 100 101
/*
 * AP bus related debug feature things.
 */
debug_info_t *ap_dbf_info;

102
/*
103
 * Workqueue timer for bus rescan.
104 105 106
 */
static struct timer_list ap_config_timer;
static int ap_config_time = AP_CONFIG_TIME;
107
static void ap_scan_bus(struct work_struct *);
108
static DECLARE_WORK(ap_scan_work, ap_scan_bus);
109

110
/*
F
Felix Beck 已提交
111
 * Tasklet & timer for AP request polling and interrupts
112
 */
113
static void ap_tasklet_fn(unsigned long);
114
static DECLARE_TASKLET_OLD(ap_tasklet, ap_tasklet_fn);
115
static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
116
static struct task_struct *ap_poll_kthread;
117
static DEFINE_MUTEX(ap_poll_thread_mutex);
118
static DEFINE_SPINLOCK(ap_poll_timer_lock);
119
static struct hrtimer ap_poll_timer;
120 121 122 123
/*
 * In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds.
 * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.
 */
124
static unsigned long long poll_timeout = 250000;
125

126 127 128 129
/* Maximum domain id, if not given via qci */
static int ap_max_domain_id = 15;
/* Maximum adapter id, if not given via qci */
static int ap_max_adapter_id = 63;
130

131 132
static struct bus_type ap_bus_type;

133
/* Adapter interrupt definitions */
134
static void ap_interrupt_handler(struct airq_struct *airq, bool floating);
135

136
static bool ap_irq_flag;
137 138 139 140 141 142

static struct airq_struct ap_airq = {
	.handler = ap_interrupt_handler,
	.isc = AP_ISC,
};

143 144 145 146 147 148 149 150 151
/**
 * ap_airq_ptr() - Get the address of the adapter interrupt indicator
 *
 * Returns the address of the local-summary-indicator of the adapter
 * interrupt handler for AP, or NULL if adapter interrupts are not
 * available.
 */
void *ap_airq_ptr(void)
{
152
	if (ap_irq_flag)
153 154 155 156
		return ap_airq.lsi_ptr;
	return NULL;
}

F
Felix Beck 已提交
157 158 159 160 161 162 163
/**
 * ap_interrupts_available(): Test if AP interrupts are available.
 *
 * Returns 1 if AP interrupts are available.
 */
static int ap_interrupts_available(void)
{
164
	return test_facility(65);
F
Felix Beck 已提交
165 166
}

167
/**
168 169
 * ap_qci_available(): Test if AP configuration
 * information can be queried via QCI subfunction.
170
 *
171
 * Returns 1 if subfunction PQAP(QCI) is available.
172
 */
173
static int ap_qci_available(void)
174
{
175
	return test_facility(12);
176 177
}

178 179 180 181
/**
 * ap_apft_available(): Test if AP facilities test (APFT)
 * facility is available.
 *
H
Harald Freudenberger 已提交
182
 * Returns 1 if APFT is available.
183 184 185 186 187 188
 */
static int ap_apft_available(void)
{
	return test_facility(15);
}

189 190 191 192 193 194 195
/*
 * ap_qact_available(): Test if the PQAP(QACT) subfunction is available.
 *
 * Returns 1 if the QACT subfunction is available.
 */
static inline int ap_qact_available(void)
{
196 197
	if (ap_qci_info)
		return ap_qci_info->qact;
198 199 200
	return 0;
}

201
/*
202
 * ap_fetch_qci_info(): Fetch cryptographic config info
203 204 205 206 207 208
 *
 * Returns the ap configuration info fetched via PQAP(QCI).
 * On success 0 is returned, on failure a negative errno
 * is returned, e.g. if the PQAP(QCI) instruction is not
 * available, the return value will be -EOPNOTSUPP.
 */
209
static inline int ap_fetch_qci_info(struct ap_config_info *info)
210
{
211
	if (!ap_qci_available())
212
		return -EOPNOTSUPP;
213 214 215
	if (!info)
		return -EINVAL;
	return ap_qci(info);
216 217
}

218
/**
219 220 221
 * ap_init_qci_info(): Allocate and query qci config info.
 * Does also update the static variables ap_max_domain_id
 * and ap_max_adapter_id if this info is available.
222
 */
223
static void __init ap_init_qci_info(void)
224
{
225
	if (!ap_qci_available()) {
226
		AP_DBF_INFO("%s QCI not supported\n", __func__);
227
		return;
228
	}
229

230 231
	ap_qci_info = kzalloc(sizeof(*ap_qci_info), GFP_KERNEL);
	if (!ap_qci_info)
232
		return;
233 234 235
	ap_qci_info_old = kzalloc(sizeof(*ap_qci_info_old), GFP_KERNEL);
	if (!ap_qci_info_old)
		return;
236 237
	if (ap_fetch_qci_info(ap_qci_info) != 0) {
		kfree(ap_qci_info);
238
		kfree(ap_qci_info_old);
239
		ap_qci_info = NULL;
240
		ap_qci_info_old = NULL;
241 242
		return;
	}
243
	AP_DBF_INFO("%s successful fetched initial qci info\n", __func__);
244 245 246 247

	if (ap_qci_info->apxa) {
		if (ap_qci_info->Na) {
			ap_max_adapter_id = ap_qci_info->Na;
248 249
			AP_DBF_INFO("%s new ap_max_adapter_id is %d\n",
				    __func__, ap_max_adapter_id);
250 251 252
		}
		if (ap_qci_info->Nd) {
			ap_max_domain_id = ap_qci_info->Nd;
253 254
			AP_DBF_INFO("%s new ap_max_domain_id is %d\n",
				    __func__, ap_max_domain_id);
255 256
		}
	}
257 258

	memcpy(ap_qci_info_old, ap_qci_info, sizeof(*ap_qci_info));
259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
}

/*
 * ap_test_config(): helper function to extract the nrth bit
 *		     within the unsigned int array field.
 */
static inline int ap_test_config(unsigned int *field, unsigned int nr)
{
	return ap_test_bit((field + (nr >> 5)), (nr & 0x1f));
}

/*
 * ap_test_config_card_id(): Test, whether an AP card ID is configured.
 *
 * Returns 0 if the card is not configured
 *	   1 if the card is configured or
 *	     if the configuration information is not available
 */
static inline int ap_test_config_card_id(unsigned int id)
{
279 280 281 282 283
	if (id > ap_max_adapter_id)
		return 0;
	if (ap_qci_info)
		return ap_test_config(ap_qci_info->apm, id);
	return 1;
284 285 286
}

/*
287 288
 * ap_test_config_usage_domain(): Test, whether an AP usage domain
 * is configured.
289 290 291 292 293
 *
 * Returns 0 if the usage domain is not configured
 *	   1 if the usage domain is configured or
 *	     if the configuration information is not available
 */
294
int ap_test_config_usage_domain(unsigned int domain)
295
{
296 297 298 299 300
	if (domain > ap_max_domain_id)
		return 0;
	if (ap_qci_info)
		return ap_test_config(ap_qci_info->aqm, domain);
	return 1;
301
}
302 303 304 305 306 307 308 309 310 311 312 313
EXPORT_SYMBOL(ap_test_config_usage_domain);

/*
 * ap_test_config_ctrl_domain(): Test, whether an AP control domain
 * is configured.
 * @domain AP control domain ID
 *
 * Returns 1 if the control domain is configured
 *	   0 in all other cases
 */
int ap_test_config_ctrl_domain(unsigned int domain)
{
314
	if (!ap_qci_info || domain > ap_max_domain_id)
315
		return 0;
316
	return ap_test_config(ap_qci_info->adm, domain);
317 318
}
EXPORT_SYMBOL(ap_test_config_ctrl_domain);
319

320 321 322 323
/*
 * ap_queue_info(): Check and get AP queue info.
 * Returns true if TAPQ succeeded and the info is filled or
 * false otherwise.
324
 */
325
static bool ap_queue_info(ap_qid_t qid, int *q_type, unsigned int *q_fac,
326
			  int *q_depth, int *q_ml, bool *q_decfg, bool *q_cstop)
327 328
{
	struct ap_queue_status status;
329 330 331 332 333 334 335 336 337 338 339 340 341 342
	union {
		unsigned long value;
		struct {
			unsigned int fac   : 32; /* facility bits */
			unsigned int at	   :  8; /* ap type */
			unsigned int _res1 :  8;
			unsigned int _res2 :  4;
			unsigned int ml	   :  4; /* apxl ml */
			unsigned int _res3 :  4;
			unsigned int qd	   :  4; /* queue depth */
		} tapq_gr2;
	} tapq_info;

	tapq_info.value = 0;
343

344 345 346 347
	/* make sure we don't run into a specifiation exception */
	if (AP_QID_CARD(qid) > ap_max_adapter_id ||
	    AP_QID_QUEUE(qid) > ap_max_domain_id)
		return false;
348

349
	/* call TAPQ on this APQN */
350
	status = ap_test_queue(qid, ap_apft_available(), &tapq_info.value);
351 352
	switch (status.response_code) {
	case AP_RESPONSE_NORMAL:
353
	case AP_RESPONSE_RESET_IN_PROGRESS:
354 355 356
	case AP_RESPONSE_DECONFIGURED:
	case AP_RESPONSE_CHECKSTOPPED:
	case AP_RESPONSE_BUSY:
357 358 359 360 361
		/*
		 * According to the architecture in all these cases the
		 * info should be filled. All bits 0 is not possible as
		 * there is at least one of the mode bits set.
		 */
362
		if (WARN_ON_ONCE(!tapq_info.value))
363
			return false;
364 365 366 367
		*q_type = tapq_info.tapq_gr2.at;
		*q_fac = tapq_info.tapq_gr2.fac;
		*q_depth = tapq_info.tapq_gr2.qd;
		*q_ml = tapq_info.tapq_gr2.ml;
368
		*q_decfg = status.response_code == AP_RESPONSE_DECONFIGURED;
369
		*q_cstop = status.response_code == AP_RESPONSE_CHECKSTOPPED;
370
		switch (*q_type) {
371
			/* For CEX2 and CEX3 the available functions
372
			 * are not reflected by the facilities bits.
373 374 375 376 377
			 * Instead it is coded into the type. So here
			 * modify the function bits based on the type.
			 */
		case AP_DEVICE_TYPE_CEX2A:
		case AP_DEVICE_TYPE_CEX3A:
378
			*q_fac |= 0x08000000;
379 380 381
			break;
		case AP_DEVICE_TYPE_CEX2C:
		case AP_DEVICE_TYPE_CEX3C:
382
			*q_fac |= 0x10000000;
383 384 385 386
			break;
		default:
			break;
		}
387
		return true;
388
	default:
389 390 391 392
		/*
		 * A response code which indicates, there is no info available.
		 */
		return false;
393 394 395
	}
}

396
void ap_wait(enum ap_sm_wait wait)
397 398 399 400
{
	ktime_t hr_time;

	switch (wait) {
401 402
	case AP_SM_WAIT_AGAIN:
	case AP_SM_WAIT_INTERRUPT:
403
		if (ap_irq_flag)
404 405 406 407 408
			break;
		if (ap_poll_kthread) {
			wake_up(&ap_poll_wait);
			break;
		}
J
Joe Perches 已提交
409
		fallthrough;
410
	case AP_SM_WAIT_TIMEOUT:
411 412
		spin_lock_bh(&ap_poll_timer_lock);
		if (!hrtimer_is_queued(&ap_poll_timer)) {
T
Thomas Gleixner 已提交
413
			hr_time = poll_timeout;
414 415 416 417 418
			hrtimer_forward_now(&ap_poll_timer, hr_time);
			hrtimer_restart(&ap_poll_timer);
		}
		spin_unlock_bh(&ap_poll_timer_lock);
		break;
419
	case AP_SM_WAIT_NONE:
420 421 422 423 424 425 426
	default:
		break;
	}
}

/**
 * ap_request_timeout(): Handling of request timeouts
427
 * @t: timer making this callback
428 429 430
 *
 * Handles request timeouts.
 */
431
void ap_request_timeout(struct timer_list *t)
432
{
433
	struct ap_queue *aq = from_timer(aq, t, timeout);
434

435
	spin_lock_bh(&aq->lock);
436
	ap_wait(ap_sm_event(aq, AP_SM_EVENT_TIMEOUT));
437
	spin_unlock_bh(&aq->lock);
438 439 440 441 442 443 444 445 446 447
}

/**
 * ap_poll_timeout(): AP receive polling for finished AP requests.
 * @unused: Unused pointer.
 *
 * Schedules the AP tasklet using a high resolution timer.
 */
static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused)
{
448
	tasklet_schedule(&ap_tasklet);
449 450 451 452 453 454
	return HRTIMER_NORESTART;
}

/**
 * ap_interrupt_handler() - Schedule ap_tasklet on interrupt
 * @airq: pointer to adapter interrupt descriptor
455
 * @floating: ignored
456
 */
457
static void ap_interrupt_handler(struct airq_struct *airq, bool floating)
458 459
{
	inc_irq_stat(IRQIO_APB);
460
	tasklet_schedule(&ap_tasklet);
461 462 463 464 465 466 467 468 469 470
}

/**
 * ap_tasklet_fn(): Tasklet to poll all AP devices.
 * @dummy: Unused variable
 *
 * Poll all AP devices on the bus.
 */
static void ap_tasklet_fn(unsigned long dummy)
{
471
	int bkt;
472
	struct ap_queue *aq;
473
	enum ap_sm_wait wait = AP_SM_WAIT_NONE;
474 475 476 477 478

	/* Reset the indicator if interrupts are used. Thus new interrupts can
	 * be received. Doing it in the beginning of the tasklet is therefor
	 * important that no requests on any AP get lost.
	 */
479
	if (ap_irq_flag)
480 481
		xchg(ap_airq.lsi_ptr, 0);

482 483 484
	spin_lock_bh(&ap_queues_lock);
	hash_for_each(ap_queues, bkt, aq, hnode) {
		spin_lock_bh(&aq->lock);
485
		wait = min(wait, ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
486
		spin_unlock_bh(&aq->lock);
487
	}
488
	spin_unlock_bh(&ap_queues_lock);
489 490

	ap_wait(wait);
491 492
}

493 494
static int ap_pending_requests(void)
{
495
	int bkt;
496 497
	struct ap_queue *aq;

498 499 500 501 502 503
	spin_lock_bh(&ap_queues_lock);
	hash_for_each(ap_queues, bkt, aq, hnode) {
		if (aq->queue_count == 0)
			continue;
		spin_unlock_bh(&ap_queues_lock);
		return 1;
504
	}
505
	spin_unlock_bh(&ap_queues_lock);
506
	return 0;
507 508
}

509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527
/**
 * ap_poll_thread(): Thread that polls for finished requests.
 * @data: Unused pointer
 *
 * AP bus poll thread. The purpose of this thread is to poll for
 * finished requests in a loop if there is a "free" cpu - that is
 * a cpu that doesn't have anything better to do. The polling stops
 * as soon as there is another task or if all messages have been
 * delivered.
 */
static int ap_poll_thread(void *data)
{
	DECLARE_WAITQUEUE(wait, current);

	set_user_nice(current, MAX_NICE);
	set_freezable();
	while (!kthread_should_stop()) {
		add_wait_queue(&ap_poll_wait, &wait);
		set_current_state(TASK_INTERRUPTIBLE);
528
		if (!ap_pending_requests()) {
529 530 531 532 533 534 535 536 537 538
			schedule();
			try_to_freeze();
		}
		set_current_state(TASK_RUNNING);
		remove_wait_queue(&ap_poll_wait, &wait);
		if (need_resched()) {
			schedule();
			try_to_freeze();
			continue;
		}
539
		ap_tasklet_fn(0);
540 541
	}

542 543 544 545 546 547 548
	return 0;
}

static int ap_poll_thread_start(void)
{
	int rc;

549
	if (ap_irq_flag || ap_poll_kthread)
550 551 552
		return 0;
	mutex_lock(&ap_poll_thread_mutex);
	ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
553
	rc = PTR_ERR_OR_ZERO(ap_poll_kthread);
554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569
	if (rc)
		ap_poll_kthread = NULL;
	mutex_unlock(&ap_poll_thread_mutex);
	return rc;
}

static void ap_poll_thread_stop(void)
{
	if (!ap_poll_kthread)
		return;
	mutex_lock(&ap_poll_thread_mutex);
	kthread_stop(ap_poll_kthread);
	ap_poll_kthread = NULL;
	mutex_unlock(&ap_poll_thread_mutex);
}

570 571
#define is_card_dev(x) ((x)->parent == ap_root_device)
#define is_queue_dev(x) ((x)->parent != ap_root_device)
572 573

/**
574 575 576 577
 * ap_bus_match()
 * @dev: Pointer to device
 * @drv: Pointer to device_driver
 *
578 579 580 581 582 583 584
 * AP bus driver registration/unregistration.
 */
static int ap_bus_match(struct device *dev, struct device_driver *drv)
{
	struct ap_driver *ap_drv = to_ap_drv(drv);
	struct ap_device_id *id;

585
	/*
586 587 588 589
	 * Compare device type of the device with the list of
	 * supported types of the device_driver.
	 */
	for (id = ap_drv->ids; id->match_flags; id++) {
590 591 592 593 594 595 596 597
		if (is_card_dev(dev) &&
		    id->match_flags & AP_DEVICE_ID_MATCH_CARD_TYPE &&
		    id->dev_type == to_ap_dev(dev)->device_type)
			return 1;
		if (is_queue_dev(dev) &&
		    id->match_flags & AP_DEVICE_ID_MATCH_QUEUE_TYPE &&
		    id->dev_type == to_ap_dev(dev)->device_type)
			return 1;
598 599 600 601 602
	}
	return 0;
}

/**
603 604 605 606 607 608
 * ap_uevent(): Uevent function for AP devices.
 * @dev: Pointer to device
 * @env: Pointer to kobj_uevent_env
 *
 * It sets up a single environment variable DEV_TYPE which contains the
 * hardware device type.
609
 */
610
static int ap_uevent(struct device *dev, struct kobj_uevent_env *env)
611
{
612
	int rc = 0;
613 614
	struct ap_device *ap_dev = to_ap_dev(dev);

615 616 617
	/* Uevents from ap bus core don't need extensions to the env */
	if (dev == ap_root_device)
		return 0;
618

619 620
	if (is_card_dev(dev)) {
		struct ap_card *ac = to_ap_card(&ap_dev->device);
621

622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652
		/* Set up DEV_TYPE environment variable. */
		rc = add_uevent_var(env, "DEV_TYPE=%04X", ap_dev->device_type);
		if (rc)
			return rc;
		/* Add MODALIAS= */
		rc = add_uevent_var(env, "MODALIAS=ap:t%02X", ap_dev->device_type);
		if (rc)
			return rc;

		/* Add MODE=<accel|cca|ep11> */
		if (ap_test_bit(&ac->functions, AP_FUNC_ACCEL))
			rc = add_uevent_var(env, "MODE=accel");
		else if (ap_test_bit(&ac->functions, AP_FUNC_COPRO))
			rc = add_uevent_var(env, "MODE=cca");
		else if (ap_test_bit(&ac->functions, AP_FUNC_EP11))
			rc = add_uevent_var(env, "MODE=ep11");
		if (rc)
			return rc;
	} else {
		struct ap_queue *aq = to_ap_queue(&ap_dev->device);

		/* Add MODE=<accel|cca|ep11> */
		if (ap_test_bit(&aq->card->functions, AP_FUNC_ACCEL))
			rc = add_uevent_var(env, "MODE=accel");
		else if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO))
			rc = add_uevent_var(env, "MODE=cca");
		else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11))
			rc = add_uevent_var(env, "MODE=ep11");
		if (rc)
			return rc;
	}
653 654 655 656 657 658 659 660 661 662 663 664 665

	return 0;
}

static void ap_send_init_scan_done_uevent(void)
{
	char *envp[] = { "INITSCAN=done", NULL };

	kobject_uevent_env(&ap_root_device->kobj, KOBJ_CHANGE, envp);
}

static void ap_send_bindings_complete_uevent(void)
{
666 667
	char buf[32];
	char *envp[] = { "BINDINGS=complete", buf, NULL };
668

669 670
	snprintf(buf, sizeof(buf), "COMPLETECOUNT=%llu",
		 atomic64_inc_return(&ap_bindings_complete_count));
671 672 673
	kobject_uevent_env(&ap_root_device->kobj, KOBJ_CHANGE, envp);
}

674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695
void ap_send_config_uevent(struct ap_device *ap_dev, bool cfg)
{
	char buf[16];
	char *envp[] = { buf, NULL };

	snprintf(buf, sizeof(buf), "CONFIG=%d", cfg ? 1 : 0);

	kobject_uevent_env(&ap_dev->device.kobj, KOBJ_CHANGE, envp);
}
EXPORT_SYMBOL(ap_send_config_uevent);

void ap_send_online_uevent(struct ap_device *ap_dev, int online)
{
	char buf[16];
	char *envp[] = { buf, NULL };

	snprintf(buf, sizeof(buf), "ONLINE=%d", online ? 1 : 0);

	kobject_uevent_env(&ap_dev->device.kobj, KOBJ_CHANGE, envp);
}
EXPORT_SYMBOL(ap_send_online_uevent);

696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713
static void ap_send_mask_changed_uevent(unsigned long *newapm,
					unsigned long *newaqm)
{
	char buf[100];
	char *envp[] = { buf, NULL };

	if (newapm)
		snprintf(buf, sizeof(buf),
			 "APMASK=0x%016lx%016lx%016lx%016lx\n",
			 newapm[0], newapm[1], newapm[2], newapm[3]);
	else
		snprintf(buf, sizeof(buf),
			 "AQMASK=0x%016lx%016lx%016lx%016lx\n",
			 newaqm[0], newaqm[1], newaqm[2], newaqm[3]);

	kobject_uevent_env(&ap_root_device->kobj, KOBJ_CHANGE, envp);
}

714 715 716 717 718 719 720 721 722 723 724
/*
 * calc # of bound APQNs
 */

struct __ap_calc_ctrs {
	unsigned int apqns;
	unsigned int bound;
};

static int __ap_calc_helper(struct device *dev, void *arg)
{
H
Harald Freudenberger 已提交
725
	struct __ap_calc_ctrs *pctrs = (struct __ap_calc_ctrs *)arg;
726 727 728

	if (is_queue_dev(dev)) {
		pctrs->apqns++;
729
		if (dev->driver)
730 731 732 733 734 735 736 737 738 739 740
			pctrs->bound++;
	}

	return 0;
}

static void ap_calc_bound_apqns(unsigned int *apqns, unsigned int *bound)
{
	struct __ap_calc_ctrs ctrs;

	memset(&ctrs, 0, sizeof(ctrs));
H
Harald Freudenberger 已提交
741
	bus_for_each_dev(&ap_bus_type, NULL, (void *)&ctrs, __ap_calc_helper);
742

743 744
	*apqns = ctrs.apqns;
	*bound = ctrs.bound;
745 746
}

747 748 749 750 751 752 753 754 755 756 757 758 759
/*
 * After initial ap bus scan do check if all existing APQNs are
 * bound to device drivers.
 */
static void ap_check_bindings_complete(void)
{
	unsigned int apqns, bound;

	if (atomic64_read(&ap_scan_bus_count) >= 1) {
		ap_calc_bound_apqns(&apqns, &bound);
		if (bound == apqns) {
			if (!completion_done(&ap_init_apqn_bindings_complete)) {
				complete_all(&ap_init_apqn_bindings_complete);
760
				AP_DBF_INFO("%s complete\n", __func__);
761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798
			}
			ap_send_bindings_complete_uevent();
		}
	}
}

/*
 * Interface to wait for the AP bus to have done one initial ap bus
 * scan and all detected APQNs have been bound to device drivers.
 * If these both conditions are not fulfilled, this function blocks
 * on a condition with wait_for_completion_interruptible_timeout().
 * If these both conditions are fulfilled (before the timeout hits)
 * the return value is 0. If the timeout (in jiffies) hits instead
 * -ETIME is returned. On failures negative return values are
 * returned to the caller.
 */
int ap_wait_init_apqn_bindings_complete(unsigned long timeout)
{
	long l;

	if (completion_done(&ap_init_apqn_bindings_complete))
		return 0;

	if (timeout)
		l = wait_for_completion_interruptible_timeout(
			&ap_init_apqn_bindings_complete, timeout);
	else
		l = wait_for_completion_interruptible(
			&ap_init_apqn_bindings_complete);
	if (l < 0)
		return l == -ERESTARTSYS ? -EINTR : l;
	else if (l == 0 && timeout)
		return -ETIME;

	return 0;
}
EXPORT_SYMBOL(ap_wait_init_apqn_bindings_complete);

799
static int __ap_queue_devices_with_id_unregister(struct device *dev, void *data)
800
{
801
	if (is_queue_dev(dev) &&
H
Harald Freudenberger 已提交
802
	    AP_QID_CARD(to_ap_queue(dev)->qid) == (int)(long)data)
803
		device_unregister(dev);
804 805 806
	return 0;
}

807 808 809 810 811 812 813 814
static int __ap_revise_reserved(struct device *dev, void *dummy)
{
	int rc, card, queue, devres, drvres;

	if (is_queue_dev(dev)) {
		card = AP_QID_CARD(to_ap_queue(dev)->qid);
		queue = AP_QID_QUEUE(to_ap_queue(dev)->qid);
		mutex_lock(&ap_perms_mutex);
H
Harald Freudenberger 已提交
815 816
		devres = test_bit_inv(card, ap_perms.apm) &&
			test_bit_inv(queue, ap_perms.aqm);
817 818 819 820
		mutex_unlock(&ap_perms_mutex);
		drvres = to_ap_drv(dev->driver)->flags
			& AP_DRIVER_FLAG_DEFAULT;
		if (!!devres != !!drvres) {
821 822
			AP_DBF_DBG("%s reprobing queue=%02x.%04x\n",
				   __func__, card, queue);
823
			rc = device_reprobe(dev);
824 825 826
			if (rc)
				AP_DBF_WARN("%s reprobing queue=%02x.%04x failed\n",
					    __func__, card, queue);
827 828 829 830 831 832 833 834 835 836 837
		}
	}

	return 0;
}

static void ap_bus_revise_bindings(void)
{
	bus_for_each_dev(&ap_bus_type, NULL, NULL, __ap_revise_reserved);
}

838 839 840 841 842 843 844 845 846 847 848
/**
 * ap_owned_by_def_drv: indicates whether an AP adapter is reserved for the
 *			default host driver or not.
 * @card: the APID of the adapter card to check
 * @queue: the APQI of the queue to check
 *
 * Note: the ap_perms_mutex must be locked by the caller of this function.
 *
 * Return: an int specifying whether the AP adapter is reserved for the host (1)
 *	   or not (0).
 */
849 850 851 852 853 854 855
int ap_owned_by_def_drv(int card, int queue)
{
	int rc = 0;

	if (card < 0 || card >= AP_DEVICES || queue < 0 || queue >= AP_DOMAINS)
		return -EINVAL;

H
Harald Freudenberger 已提交
856 857
	if (test_bit_inv(card, ap_perms.apm) &&
	    test_bit_inv(queue, ap_perms.aqm))
858 859 860 861 862 863
		rc = 1;

	return rc;
}
EXPORT_SYMBOL(ap_owned_by_def_drv);

864 865 866 867 868 869 870 871 872 873 874 875
/**
 * ap_apqn_in_matrix_owned_by_def_drv: indicates whether every APQN contained in
 *				       a set is reserved for the host drivers
 *				       or not.
 * @apm: a bitmap specifying a set of APIDs comprising the APQNs to check
 * @aqm: a bitmap specifying a set of APQIs comprising the APQNs to check
 *
 * Note: the ap_perms_mutex must be locked by the caller of this function.
 *
 * Return: an int specifying whether each APQN is reserved for the host (1) or
 *	   not (0)
 */
876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892
int ap_apqn_in_matrix_owned_by_def_drv(unsigned long *apm,
				       unsigned long *aqm)
{
	int card, queue, rc = 0;

	for (card = 0; !rc && card < AP_DEVICES; card++)
		if (test_bit_inv(card, apm) &&
		    test_bit_inv(card, ap_perms.apm))
			for (queue = 0; !rc && queue < AP_DOMAINS; queue++)
				if (test_bit_inv(queue, aqm) &&
				    test_bit_inv(queue, ap_perms.aqm))
					rc = 1;

	return rc;
}
EXPORT_SYMBOL(ap_apqn_in_matrix_owned_by_def_drv);

893 894 895
static int ap_device_probe(struct device *dev)
{
	struct ap_device *ap_dev = to_ap_dev(dev);
896
	struct ap_driver *ap_drv = to_ap_drv(dev->driver);
897 898 899 900
	int card, queue, devres, drvres, rc = -ENODEV;

	if (!get_device(dev))
		return rc;
901 902 903 904 905 906 907 908 909 910 911

	if (is_queue_dev(dev)) {
		/*
		 * If the apqn is marked as reserved/used by ap bus and
		 * default drivers, only probe with drivers with the default
		 * flag set. If it is not marked, only probe with drivers
		 * with the default flag not set.
		 */
		card = AP_QID_CARD(to_ap_queue(dev)->qid);
		queue = AP_QID_QUEUE(to_ap_queue(dev)->qid);
		mutex_lock(&ap_perms_mutex);
H
Harald Freudenberger 已提交
912 913
		devres = test_bit_inv(card, ap_perms.apm) &&
			test_bit_inv(queue, ap_perms.aqm);
914 915 916
		mutex_unlock(&ap_perms_mutex);
		drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT;
		if (!!devres != !!drvres)
917
			goto out;
918
	}
919

920
	/* Add queue/card to list of active queues/cards */
921 922 923 924 925
	spin_lock_bh(&ap_queues_lock);
	if (is_queue_dev(dev))
		hash_add(ap_queues, &to_ap_queue(dev)->hnode,
			 to_ap_queue(dev)->qid);
	spin_unlock_bh(&ap_queues_lock);
926

927
	rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
928 929

	if (rc) {
930 931 932 933
		spin_lock_bh(&ap_queues_lock);
		if (is_queue_dev(dev))
			hash_del(&to_ap_queue(dev)->hnode);
		spin_unlock_bh(&ap_queues_lock);
H
Harald Freudenberger 已提交
934
	} else {
935
		ap_check_bindings_complete();
H
Harald Freudenberger 已提交
936
	}
937

938 939 940
out:
	if (rc)
		put_device(dev);
941 942 943
	return rc;
}

944
static void ap_device_remove(struct device *dev)
945 946
{
	struct ap_device *ap_dev = to_ap_dev(dev);
947
	struct ap_driver *ap_drv = to_ap_drv(dev->driver);
948

949
	/* prepare ap queue device removal */
950
	if (is_queue_dev(dev))
951 952 953
		ap_queue_prepare_remove(to_ap_queue(dev));

	/* driver's chance to clean up gracefully */
954 955 956
	if (ap_drv->remove)
		ap_drv->remove(ap_dev);

957 958 959 960
	/* now do the ap queue device remove */
	if (is_queue_dev(dev))
		ap_queue_remove(to_ap_queue(dev));

961
	/* Remove queue/card from list of active queues/cards */
962 963 964 965
	spin_lock_bh(&ap_queues_lock);
	if (is_queue_dev(dev))
		hash_del(&to_ap_queue(dev)->hnode);
	spin_unlock_bh(&ap_queues_lock);
966

967
	put_device(dev);
968 969
}

970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988
struct ap_queue *ap_get_qdev(ap_qid_t qid)
{
	int bkt;
	struct ap_queue *aq;

	spin_lock_bh(&ap_queues_lock);
	hash_for_each(ap_queues, bkt, aq, hnode) {
		if (aq->qid == qid) {
			get_device(&aq->ap_dev.device);
			spin_unlock_bh(&ap_queues_lock);
			return aq;
		}
	}
	spin_unlock_bh(&ap_queues_lock);

	return NULL;
}
EXPORT_SYMBOL(ap_get_qdev);

989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006
int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
		       char *name)
{
	struct device_driver *drv = &ap_drv->driver;

	drv->bus = &ap_bus_type;
	drv->owner = owner;
	drv->name = name;
	return driver_register(drv);
}
EXPORT_SYMBOL(ap_driver_register);

void ap_driver_unregister(struct ap_driver *ap_drv)
{
	driver_unregister(&ap_drv->driver);
}
EXPORT_SYMBOL(ap_driver_unregister);

1007 1008
void ap_bus_force_rescan(void)
{
1009
	/* processing a asynchronous bus rescan */
1010
	del_timer(&ap_config_timer);
1011 1012
	queue_work(system_long_wq, &ap_scan_work);
	flush_work(&ap_scan_work);
1013 1014 1015
}
EXPORT_SYMBOL(ap_bus_force_rescan);

1016
/*
H
Harald Freudenberger 已提交
1017 1018
 * A config change has happened, force an ap bus rescan.
 */
1019 1020
void ap_bus_cfg_chg(void)
{
1021
	AP_DBF_DBG("%s config change, forcing bus rescan\n", __func__);
1022 1023 1024 1025

	ap_bus_force_rescan();
}

1026
/*
1027 1028 1029 1030 1031 1032
 * hex2bitmap() - parse hex mask string and set bitmap.
 * Valid strings are "0x012345678" with at least one valid hex number.
 * Rest of the bitmap to the right is padded with 0. No spaces allowed
 * within the string, the leading 0x may be omitted.
 * Returns the bitmask with exactly the bits set as given by the hex
 * string (both in big endian order).
1033
 */
1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054
static int hex2bitmap(const char *str, unsigned long *bitmap, int bits)
{
	int i, n, b;

	/* bits needs to be a multiple of 8 */
	if (bits & 0x07)
		return -EINVAL;

	if (str[0] == '0' && str[1] == 'x')
		str++;
	if (*str == 'x')
		str++;

	for (i = 0; isxdigit(*str) && i < bits; str++) {
		b = hex_to_bin(*str);
		for (n = 0; n < 4; n++)
			if (b & (0x08 >> n))
				set_bit_inv(i + n, bitmap);
		i += 4;
	}

1055 1056 1057 1058 1059 1060 1061 1062
	if (*str == '\n')
		str++;
	if (*str)
		return -EINVAL;
	return 0;
}

/*
1063 1064 1065
 * modify_bitmap() - parse bitmask argument and modify an existing
 * bit mask accordingly. A concatenation (done with ',') of these
 * terms is recognized:
1066 1067 1068 1069 1070 1071
 *   +<bitnr>[-<bitnr>] or -<bitnr>[-<bitnr>]
 * <bitnr> may be any valid number (hex, decimal or octal) in the range
 * 0...bits-1; the leading + or - is required. Here are some examples:
 *   +0-15,+32,-128,-0xFF
 *   -0-255,+1-16,+0x128
 *   +1,+2,+3,+4,-5,-7-10
1072 1073 1074 1075 1076 1077
 * Returns the new bitmap after all changes have been applied. Every
 * positive value in the string will set a bit and every negative value
 * in the string will clear a bit. As a bit may be touched more than once,
 * the last 'operation' wins:
 * +0-255,-128 = first bits 0-255 will be set, then bit 128 will be
 * cleared again. All other bits are unmodified.
1078
 */
1079
static int modify_bitmap(const char *str, unsigned long *bitmap, int bits)
1080 1081 1082 1083 1084 1085
{
	int a, i, z;
	char *np, sign;

	/* bits needs to be a multiple of 8 */
	if (bits & 0x07)
1086 1087
		return -EINVAL;

1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102
	while (*str) {
		sign = *str++;
		if (sign != '+' && sign != '-')
			return -EINVAL;
		a = z = simple_strtoul(str, &np, 0);
		if (str == np || a >= bits)
			return -EINVAL;
		str = np;
		if (*str == '-') {
			z = simple_strtoul(++str, &np, 0);
			if (str == np || a > z || z >= bits)
				return -EINVAL;
			str = np;
		}
		for (i = a; i <= z; i++)
1103 1104 1105 1106
			if (sign == '+')
				set_bit_inv(i, bitmap);
			else
				clear_bit_inv(i, bitmap);
1107 1108 1109 1110
		while (*str == ',' || *str == '\n')
			str++;
	}

1111 1112 1113
	return 0;
}

1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130
static int ap_parse_bitmap_str(const char *str, unsigned long *bitmap, int bits,
			       unsigned long *newmap)
{
	unsigned long size;
	int rc;

	size = BITS_TO_LONGS(bits) * sizeof(unsigned long);
	if (*str == '+' || *str == '-') {
		memcpy(newmap, bitmap, size);
		rc = modify_bitmap(str, newmap, bits);
	} else {
		memset(newmap, 0, size);
		rc = hex2bitmap(str, newmap, bits);
	}
	return rc;
}

1131 1132 1133
int ap_parse_mask_str(const char *str,
		      unsigned long *bitmap, int bits,
		      struct mutex *lock)
1134
{
1135 1136
	unsigned long *newmap, size;
	int rc;
1137 1138 1139 1140 1141

	/* bits needs to be a multiple of 8 */
	if (bits & 0x07)
		return -EINVAL;

H
Harald Freudenberger 已提交
1142
	size = BITS_TO_LONGS(bits) * sizeof(unsigned long);
1143 1144 1145 1146 1147 1148 1149
	newmap = kmalloc(size, GFP_KERNEL);
	if (!newmap)
		return -ENOMEM;
	if (mutex_lock_interruptible(lock)) {
		kfree(newmap);
		return -ERESTARTSYS;
	}
1150
	rc = ap_parse_bitmap_str(str, bitmap, bits, newmap);
1151 1152
	if (rc == 0)
		memcpy(bitmap, newmap, size);
1153
	mutex_unlock(lock);
1154 1155
	kfree(newmap);
	return rc;
1156
}
1157
EXPORT_SYMBOL(ap_parse_mask_str);
1158 1159 1160 1161 1162

/*
 * AP bus attributes.
 */

1163 1164
static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
{
1165
	return scnprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index);
1166 1167
}

1168 1169 1170 1171 1172 1173
static ssize_t ap_domain_store(struct bus_type *bus,
			       const char *buf, size_t count)
{
	int domain;

	if (sscanf(buf, "%i\n", &domain) != 1 ||
1174 1175
	    domain < 0 || domain > ap_max_domain_id ||
	    !test_bit_inv(domain, ap_perms.aqm))
1176
		return -EINVAL;
1177

1178 1179 1180
	spin_lock_bh(&ap_domain_lock);
	ap_domain_index = domain;
	spin_unlock_bh(&ap_domain_lock);
1181

1182 1183
	AP_DBF_INFO("%s stored new default domain=%d\n",
		    __func__, domain);
1184

1185 1186 1187
	return count;
}

1188
static BUS_ATTR_RW(ap_domain);
1189

1190 1191
static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf)
{
1192
	if (!ap_qci_info)	/* QCI not supported */
1193 1194 1195 1196
		return scnprintf(buf, PAGE_SIZE, "not supported\n");

	return scnprintf(buf, PAGE_SIZE,
			 "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
1197 1198 1199 1200
			 ap_qci_info->adm[0], ap_qci_info->adm[1],
			 ap_qci_info->adm[2], ap_qci_info->adm[3],
			 ap_qci_info->adm[4], ap_qci_info->adm[5],
			 ap_qci_info->adm[6], ap_qci_info->adm[7]);
1201 1202
}

1203
static BUS_ATTR_RO(ap_control_domain_mask);
1204

1205 1206
static ssize_t ap_usage_domain_mask_show(struct bus_type *bus, char *buf)
{
1207
	if (!ap_qci_info)	/* QCI not supported */
1208 1209 1210 1211
		return scnprintf(buf, PAGE_SIZE, "not supported\n");

	return scnprintf(buf, PAGE_SIZE,
			 "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
1212 1213 1214 1215
			 ap_qci_info->aqm[0], ap_qci_info->aqm[1],
			 ap_qci_info->aqm[2], ap_qci_info->aqm[3],
			 ap_qci_info->aqm[4], ap_qci_info->aqm[5],
			 ap_qci_info->aqm[6], ap_qci_info->aqm[7]);
1216 1217
}

1218
static BUS_ATTR_RO(ap_usage_domain_mask);
1219

1220 1221
static ssize_t ap_adapter_mask_show(struct bus_type *bus, char *buf)
{
1222
	if (!ap_qci_info)	/* QCI not supported */
1223 1224 1225 1226
		return scnprintf(buf, PAGE_SIZE, "not supported\n");

	return scnprintf(buf, PAGE_SIZE,
			 "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
1227 1228 1229 1230
			 ap_qci_info->apm[0], ap_qci_info->apm[1],
			 ap_qci_info->apm[2], ap_qci_info->apm[3],
			 ap_qci_info->apm[4], ap_qci_info->apm[5],
			 ap_qci_info->apm[6], ap_qci_info->apm[7]);
1231 1232 1233 1234
}

static BUS_ATTR_RO(ap_adapter_mask);

F
Felix Beck 已提交
1235 1236
static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf)
{
1237
	return scnprintf(buf, PAGE_SIZE, "%d\n",
1238
			 ap_irq_flag ? 1 : 0);
F
Felix Beck 已提交
1239 1240
}

1241 1242 1243 1244
static BUS_ATTR_RO(ap_interrupts);

static ssize_t config_time_show(struct bus_type *bus, char *buf)
{
1245
	return scnprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
1246
}
F
Felix Beck 已提交
1247

1248 1249
static ssize_t config_time_store(struct bus_type *bus,
				 const char *buf, size_t count)
1250 1251 1252 1253 1254 1255
{
	int time;

	if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120)
		return -EINVAL;
	ap_config_time = time;
1256
	mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
1257 1258 1259
	return count;
}

1260
static BUS_ATTR_RW(config_time);
1261

1262
static ssize_t poll_thread_show(struct bus_type *bus, char *buf)
1263
{
1264
	return scnprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0);
1265 1266
}

1267 1268
static ssize_t poll_thread_store(struct bus_type *bus,
				 const char *buf, size_t count)
1269 1270 1271 1272 1273 1274 1275 1276
{
	int flag, rc;

	if (sscanf(buf, "%d\n", &flag) != 1)
		return -EINVAL;
	if (flag) {
		rc = ap_poll_thread_start();
		if (rc)
1277
			count = rc;
H
Harald Freudenberger 已提交
1278
	} else {
1279
		ap_poll_thread_stop();
H
Harald Freudenberger 已提交
1280
	}
1281 1282 1283
	return count;
}

1284
static BUS_ATTR_RW(poll_thread);
1285

1286 1287
static ssize_t poll_timeout_show(struct bus_type *bus, char *buf)
{
1288
	return scnprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout);
1289 1290 1291 1292 1293 1294 1295 1296 1297
}

static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf,
				  size_t count)
{
	unsigned long long time;
	ktime_t hr_time;

	/* 120 seconds = maximum poll interval */
F
Felix Beck 已提交
1298 1299
	if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 ||
	    time > 120000000000ULL)
1300 1301
		return -EINVAL;
	poll_timeout = time;
T
Thomas Gleixner 已提交
1302
	hr_time = poll_timeout;
1303

1304 1305 1306 1307 1308 1309
	spin_lock_bh(&ap_poll_timer_lock);
	hrtimer_cancel(&ap_poll_timer);
	hrtimer_set_expires(&ap_poll_timer, hr_time);
	hrtimer_start_expires(&ap_poll_timer, HRTIMER_MODE_ABS);
	spin_unlock_bh(&ap_poll_timer_lock);

1310 1311 1312
	return count;
}

1313
static BUS_ATTR_RW(poll_timeout);
1314

1315 1316
static ssize_t ap_max_domain_id_show(struct bus_type *bus, char *buf)
{
1317
	return scnprintf(buf, PAGE_SIZE, "%d\n", ap_max_domain_id);
1318 1319
}

1320
static BUS_ATTR_RO(ap_max_domain_id);
1321

1322 1323 1324 1325 1326 1327 1328
static ssize_t ap_max_adapter_id_show(struct bus_type *bus, char *buf)
{
	return scnprintf(buf, PAGE_SIZE, "%d\n", ap_max_adapter_id);
}

static BUS_ATTR_RO(ap_max_adapter_id);

1329 1330 1331 1332 1333 1334
static ssize_t apmask_show(struct bus_type *bus, char *buf)
{
	int rc;

	if (mutex_lock_interruptible(&ap_perms_mutex))
		return -ERESTARTSYS;
1335 1336 1337 1338
	rc = scnprintf(buf, PAGE_SIZE,
		       "0x%016lx%016lx%016lx%016lx\n",
		       ap_perms.apm[0], ap_perms.apm[1],
		       ap_perms.apm[2], ap_perms.apm[3]);
1339 1340 1341 1342 1343
	mutex_unlock(&ap_perms_mutex);

	return rc;
}

1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389
static int __verify_card_reservations(struct device_driver *drv, void *data)
{
	int rc = 0;
	struct ap_driver *ap_drv = to_ap_drv(drv);
	unsigned long *newapm = (unsigned long *)data;

	/*
	 * increase the driver's module refcounter to be sure it is not
	 * going away when we invoke the callback function.
	 */
	if (!try_module_get(drv->owner))
		return 0;

	if (ap_drv->in_use) {
		rc = ap_drv->in_use(newapm, ap_perms.aqm);
		if (rc)
			rc = -EBUSY;
	}

	/* release the driver's module */
	module_put(drv->owner);

	return rc;
}

static int apmask_commit(unsigned long *newapm)
{
	int rc;
	unsigned long reserved[BITS_TO_LONGS(AP_DEVICES)];

	/*
	 * Check if any bits in the apmask have been set which will
	 * result in queues being removed from non-default drivers
	 */
	if (bitmap_andnot(reserved, newapm, ap_perms.apm, AP_DEVICES)) {
		rc = bus_for_each_drv(&ap_bus_type, NULL, reserved,
				      __verify_card_reservations);
		if (rc)
			return rc;
	}

	memcpy(ap_perms.apm, newapm, APMASKSIZE);

	return 0;
}

1390 1391 1392
static ssize_t apmask_store(struct bus_type *bus, const char *buf,
			    size_t count)
{
1393
	int rc, changes = 0;
1394 1395 1396 1397
	DECLARE_BITMAP(newapm, AP_DEVICES);

	if (mutex_lock_interruptible(&ap_perms_mutex))
		return -ERESTARTSYS;
1398

1399 1400 1401 1402
	rc = ap_parse_bitmap_str(buf, ap_perms.apm, AP_DEVICES, newapm);
	if (rc)
		goto done;

1403 1404 1405
	changes = memcmp(ap_perms.apm, newapm, APMASKSIZE);
	if (changes)
		rc = apmask_commit(newapm);
1406 1407 1408

done:
	mutex_unlock(&ap_perms_mutex);
1409 1410
	if (rc)
		return rc;
1411

1412 1413 1414 1415
	if (changes) {
		ap_bus_revise_bindings();
		ap_send_mask_changed_uevent(newapm, NULL);
	}
1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427

	return count;
}

static BUS_ATTR_RW(apmask);

static ssize_t aqmask_show(struct bus_type *bus, char *buf)
{
	int rc;

	if (mutex_lock_interruptible(&ap_perms_mutex))
		return -ERESTARTSYS;
1428 1429 1430 1431
	rc = scnprintf(buf, PAGE_SIZE,
		       "0x%016lx%016lx%016lx%016lx\n",
		       ap_perms.aqm[0], ap_perms.aqm[1],
		       ap_perms.aqm[2], ap_perms.aqm[3]);
1432 1433 1434 1435 1436
	mutex_unlock(&ap_perms_mutex);

	return rc;
}

1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482
static int __verify_queue_reservations(struct device_driver *drv, void *data)
{
	int rc = 0;
	struct ap_driver *ap_drv = to_ap_drv(drv);
	unsigned long *newaqm = (unsigned long *)data;

	/*
	 * increase the driver's module refcounter to be sure it is not
	 * going away when we invoke the callback function.
	 */
	if (!try_module_get(drv->owner))
		return 0;

	if (ap_drv->in_use) {
		rc = ap_drv->in_use(ap_perms.apm, newaqm);
		if (rc)
			return -EBUSY;
	}

	/* release the driver's module */
	module_put(drv->owner);

	return rc;
}

static int aqmask_commit(unsigned long *newaqm)
{
	int rc;
	unsigned long reserved[BITS_TO_LONGS(AP_DOMAINS)];

	/*
	 * Check if any bits in the aqmask have been set which will
	 * result in queues being removed from non-default drivers
	 */
	if (bitmap_andnot(reserved, newaqm, ap_perms.aqm, AP_DOMAINS)) {
		rc = bus_for_each_drv(&ap_bus_type, NULL, reserved,
				      __verify_queue_reservations);
		if (rc)
			return rc;
	}

	memcpy(ap_perms.aqm, newaqm, AQMASKSIZE);

	return 0;
}

1483 1484 1485
static ssize_t aqmask_store(struct bus_type *bus, const char *buf,
			    size_t count)
{
1486
	int rc, changes = 0;
1487
	DECLARE_BITMAP(newaqm, AP_DOMAINS);
1488

1489 1490 1491 1492 1493 1494 1495
	if (mutex_lock_interruptible(&ap_perms_mutex))
		return -ERESTARTSYS;

	rc = ap_parse_bitmap_str(buf, ap_perms.aqm, AP_DOMAINS, newaqm);
	if (rc)
		goto done;

1496 1497 1498
	changes = memcmp(ap_perms.aqm, newaqm, APMASKSIZE);
	if (changes)
		rc = aqmask_commit(newaqm);
1499 1500 1501

done:
	mutex_unlock(&ap_perms_mutex);
1502 1503
	if (rc)
		return rc;
1504

1505 1506 1507 1508
	if (changes) {
		ap_bus_revise_bindings();
		ap_send_mask_changed_uevent(NULL, newaqm);
	}
1509 1510 1511 1512 1513 1514

	return count;
}

static BUS_ATTR_RW(aqmask);

1515 1516 1517 1518 1519 1520
static ssize_t scans_show(struct bus_type *bus, char *buf)
{
	return scnprintf(buf, PAGE_SIZE, "%llu\n",
			 atomic64_read(&ap_scan_bus_count));
}

1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531
static ssize_t scans_store(struct bus_type *bus, const char *buf,
			   size_t count)
{
	AP_DBF_INFO("%s force AP bus rescan\n", __func__);

	ap_bus_force_rescan();

	return count;
}

static BUS_ATTR_RW(scans);
1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548

static ssize_t bindings_show(struct bus_type *bus, char *buf)
{
	int rc;
	unsigned int apqns, n;

	ap_calc_bound_apqns(&apqns, &n);
	if (atomic64_read(&ap_scan_bus_count) >= 1 && n == apqns)
		rc = scnprintf(buf, PAGE_SIZE, "%u/%u (complete)\n", n, apqns);
	else
		rc = scnprintf(buf, PAGE_SIZE, "%u/%u\n", n, apqns);

	return rc;
}

static BUS_ATTR_RO(bindings);

1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563
static struct attribute *ap_bus_attrs[] = {
	&bus_attr_ap_domain.attr,
	&bus_attr_ap_control_domain_mask.attr,
	&bus_attr_ap_usage_domain_mask.attr,
	&bus_attr_ap_adapter_mask.attr,
	&bus_attr_config_time.attr,
	&bus_attr_poll_thread.attr,
	&bus_attr_ap_interrupts.attr,
	&bus_attr_poll_timeout.attr,
	&bus_attr_ap_max_domain_id.attr,
	&bus_attr_ap_max_adapter_id.attr,
	&bus_attr_apmask.attr,
	&bus_attr_aqmask.attr,
	&bus_attr_scans.attr,
	&bus_attr_bindings.attr,
1564
	NULL,
1565
};
1566 1567 1568 1569 1570 1571 1572
ATTRIBUTE_GROUPS(ap_bus);

static struct bus_type ap_bus_type = {
	.name = "ap",
	.bus_groups = ap_bus_groups,
	.match = &ap_bus_match,
	.uevent = &ap_uevent,
1573 1574
	.probe = ap_device_probe,
	.remove = ap_device_remove,
1575
};
1576 1577

/**
1578 1579
 * ap_select_domain(): Select an AP domain if possible and we haven't
 * already done so before.
1580
 */
1581
static void ap_select_domain(void)
1582
{
1583
	struct ap_queue_status status;
1584
	int card, dom;
1585

1586
	/*
1587 1588 1589
	 * Choose the default domain. Either the one specified with
	 * the "domain=" parameter or the first domain with at least
	 * one valid APQN.
1590
	 */
1591 1592
	spin_lock_bh(&ap_domain_lock);
	if (ap_domain_index >= 0) {
1593
		/* Domain has already been selected. */
1594
		goto out;
1595
	}
1596 1597 1598
	for (dom = 0; dom <= ap_max_domain_id; dom++) {
		if (!ap_test_config_usage_domain(dom) ||
		    !test_bit_inv(dom, ap_perms.aqm))
1599
			continue;
1600 1601 1602
		for (card = 0; card <= ap_max_adapter_id; card++) {
			if (!ap_test_config_card_id(card) ||
			    !test_bit_inv(card, ap_perms.apm))
1603
				continue;
1604
			status = ap_test_queue(AP_MKQID(card, dom),
1605 1606
					       ap_apft_available(),
					       NULL);
1607 1608
			if (status.response_code == AP_RESPONSE_NORMAL)
				break;
1609
		}
1610 1611
		if (card <= ap_max_adapter_id)
			break;
1612
	}
1613 1614
	if (dom <= ap_max_domain_id) {
		ap_domain_index = dom;
1615 1616
		AP_DBF_INFO("%s new default domain is %d\n",
			    __func__, ap_domain_index);
1617
	}
1618
out:
1619
	spin_unlock_bh(&ap_domain_lock);
1620 1621
}

1622 1623 1624 1625 1626 1627 1628 1629 1630 1631
/*
 * This function checks the type and returns either 0 for not
 * supported or the highest compatible type value (which may
 * include the input type value).
 */
static int ap_get_compatible_type(ap_qid_t qid, int rawtype, unsigned int func)
{
	int comp_type = 0;

	/* < CEX2A is not supported */
1632
	if (rawtype < AP_DEVICE_TYPE_CEX2A) {
1633 1634 1635
		AP_DBF_WARN("%s queue=%02x.%04x unsupported type %d\n",
			    __func__, AP_QID_CARD(qid),
			    AP_QID_QUEUE(qid), rawtype);
1636
		return 0;
1637
	}
1638 1639
	/* up to CEX8 known and fully supported */
	if (rawtype <= AP_DEVICE_TYPE_CEX8)
1640 1641
		return rawtype;
	/*
1642
	 * unknown new type > CEX8, check for compatibility
1643
	 * to the highest known and supported type which is
1644
	 * currently CEX8 with the help of the QACT function.
1645 1646 1647
	 */
	if (ap_qact_available()) {
		struct ap_queue_status status;
1648
		union ap_qact_ap_info apinfo = {0};
1649 1650

		apinfo.mode = (func >> 26) & 0x07;
1651
		apinfo.cat = AP_DEVICE_TYPE_CEX8;
1652
		status = ap_qact(qid, 0, &apinfo);
H
Harald Freudenberger 已提交
1653 1654 1655
		if (status.response_code == AP_RESPONSE_NORMAL &&
		    apinfo.cat >= AP_DEVICE_TYPE_CEX2A &&
		    apinfo.cat <= AP_DEVICE_TYPE_CEX8)
1656 1657 1658
			comp_type = apinfo.cat;
	}
	if (!comp_type)
1659 1660 1661
		AP_DBF_WARN("%s queue=%02x.%04x unable to map type %d\n",
			    __func__, AP_QID_CARD(qid),
			    AP_QID_QUEUE(qid), rawtype);
1662
	else if (comp_type != rawtype)
1663 1664
		AP_DBF_INFO("%s queue=%02x.%04x map type %d to %d\n",
			    __func__, AP_QID_CARD(qid), AP_QID_QUEUE(qid),
1665
			    rawtype, comp_type);
1666 1667 1668
	return comp_type;
}

1669
/*
1670
 * Helper function to be used with bus_find_dev
1671
 * matches for the card device with the given id
1672
 */
1673
static int __match_card_device_with_id(struct device *dev, const void *data)
1674
{
H
Harald Freudenberger 已提交
1675
	return is_card_dev(dev) && to_ap_card(dev)->id == (int)(long)(void *)data;
1676 1677
}

1678 1679
/*
 * Helper function to be used with bus_find_dev
1680 1681
 * matches for the queue device with a given qid
 */
1682
static int __match_queue_device_with_qid(struct device *dev, const void *data)
1683
{
H
Harald Freudenberger 已提交
1684
	return is_queue_dev(dev) && to_ap_queue(dev)->qid == (int)(long)data;
1685 1686
}

1687 1688 1689 1690
/*
 * Helper function to be used with bus_find_dev
 * matches any queue device with given queue id
 */
1691
static int __match_queue_device_with_queue_id(struct device *dev, const void *data)
1692
{
H
Harald Freudenberger 已提交
1693 1694
	return is_queue_dev(dev) &&
		AP_QID_QUEUE(to_ap_queue(dev)->qid) == (int)(long)data;
1695 1696
}

1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739
/* Helper function for notify_config_changed */
static int __drv_notify_config_changed(struct device_driver *drv, void *data)
{
	struct ap_driver *ap_drv = to_ap_drv(drv);

	if (try_module_get(drv->owner)) {
		if (ap_drv->on_config_changed)
			ap_drv->on_config_changed(ap_qci_info, ap_qci_info_old);
		module_put(drv->owner);
	}

	return 0;
}

/* Notify all drivers about an qci config change */
static inline void notify_config_changed(void)
{
	bus_for_each_drv(&ap_bus_type, NULL, NULL,
			 __drv_notify_config_changed);
}

/* Helper function for notify_scan_complete */
static int __drv_notify_scan_complete(struct device_driver *drv, void *data)
{
	struct ap_driver *ap_drv = to_ap_drv(drv);

	if (try_module_get(drv->owner)) {
		if (ap_drv->on_scan_complete)
			ap_drv->on_scan_complete(ap_qci_info,
						 ap_qci_info_old);
		module_put(drv->owner);
	}

	return 0;
}

/* Notify all drivers about bus scan complete */
static inline void notify_scan_complete(void)
{
	bus_for_each_drv(&ap_bus_type, NULL, NULL,
			 __drv_notify_scan_complete);
}

1740 1741
/*
 * Helper function for ap_scan_bus().
1742 1743 1744 1745 1746
 * Remove card device and associated queue devices.
 */
static inline void ap_scan_rm_card_dev_and_queue_devs(struct ap_card *ac)
{
	bus_for_each_dev(&ap_bus_type, NULL,
H
Harald Freudenberger 已提交
1747
			 (void *)(long)ac->id,
1748 1749 1750 1751 1752 1753 1754 1755
			 __ap_queue_devices_with_id_unregister);
	device_unregister(&ac->ap_dev.device);
}

/*
 * Helper function for ap_scan_bus().
 * Does the scan bus job for all the domains within
 * a valid adapter given by an ap_card ptr.
1756
 */
1757
static inline void ap_scan_domains(struct ap_card *ac)
1758
{
1759
	bool decfg, chkstop;
1760 1761
	ap_qid_t qid;
	unsigned int func;
1762
	struct device *dev;
1763
	struct ap_queue *aq;
1764
	int rc, dom, depth, type, ml;
1765 1766 1767 1768 1769 1770 1771 1772 1773 1774

	/*
	 * Go through the configuration for the domains and compare them
	 * to the existing queue devices. Also take care of the config
	 * and error state for the queue devices.
	 */

	for (dom = 0; dom <= ap_max_domain_id; dom++) {
		qid = AP_MKQID(ac->id, dom);
		dev = bus_find_device(&ap_bus_type, NULL,
H
Harald Freudenberger 已提交
1775
				      (void *)(long)qid,
1776 1777 1778 1779
				      __match_queue_device_with_qid);
		aq = dev ? to_ap_queue(dev) : NULL;
		if (!ap_test_config_usage_domain(dom)) {
			if (dev) {
1780
				AP_DBF_INFO("%s(%d,%d) not in config anymore, rm queue dev\n",
1781 1782 1783 1784 1785 1786 1787
					    __func__, ac->id, dom);
				device_unregister(dev);
				put_device(dev);
			}
			continue;
		}
		/* domain is valid, get info from this APQN */
1788 1789
		if (!ap_queue_info(qid, &type, &func, &depth,
				   &ml, &decfg, &chkstop)) {
1790
			if (aq) {
1791 1792
				AP_DBF_INFO("%s(%d,%d) queue_info() failed, rm queue dev\n",
					    __func__, ac->id, dom);
1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807
				device_unregister(dev);
				put_device(dev);
			}
			continue;
		}
		/* if no queue device exists, create a new one */
		if (!aq) {
			aq = ap_queue_create(qid, ac->ap_dev.device_type);
			if (!aq) {
				AP_DBF_WARN("%s(%d,%d) ap_queue_create() failed\n",
					    __func__, ac->id, dom);
				continue;
			}
			aq->card = ac;
			aq->config = !decfg;
1808
			aq->chkstop = chkstop;
1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819
			dev = &aq->ap_dev.device;
			dev->bus = &ap_bus_type;
			dev->parent = &ac->ap_dev.device;
			dev_set_name(dev, "%02x.%04x", ac->id, dom);
			/* register queue device */
			rc = device_register(dev);
			if (rc) {
				AP_DBF_WARN("%s(%d,%d) device_register() failed\n",
					    __func__, ac->id, dom);
				goto put_dev_and_continue;
			}
1820 1821
			/* get it and thus adjust reference counter */
			get_device(dev);
1822
			if (decfg)
1823
				AP_DBF_INFO("%s(%d,%d) new (decfg) queue dev created\n",
1824
					    __func__, ac->id, dom);
1825 1826 1827
			else if (chkstop)
				AP_DBF_INFO("%s(%d,%d) new (chkstop) queue dev created\n",
					    __func__, ac->id, dom);
1828
			else
1829
				AP_DBF_INFO("%s(%d,%d) new queue dev created\n",
1830 1831 1832
					    __func__, ac->id, dom);
			goto put_dev_and_continue;
		}
1833
		/* handle state changes on already existing queue device */
1834
		spin_lock_bh(&aq->lock);
1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861
		/* checkstop state */
		if (chkstop && !aq->chkstop) {
			/* checkstop on */
			aq->chkstop = true;
			if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
				aq->dev_state = AP_DEV_STATE_ERROR;
				aq->last_err_rc = AP_RESPONSE_CHECKSTOPPED;
			}
			spin_unlock_bh(&aq->lock);
			AP_DBF_DBG("%s(%d,%d) queue dev checkstop on\n",
				   __func__, ac->id, dom);
			/* 'receive' pending messages with -EAGAIN */
			ap_flush_queue(aq);
			goto put_dev_and_continue;
		} else if (!chkstop && aq->chkstop) {
			/* checkstop off */
			aq->chkstop = false;
			if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
				aq->dev_state = AP_DEV_STATE_OPERATING;
				aq->sm_state = AP_SM_STATE_RESET_START;
			}
			spin_unlock_bh(&aq->lock);
			AP_DBF_DBG("%s(%d,%d) queue dev checkstop off\n",
				   __func__, ac->id, dom);
			goto put_dev_and_continue;
		}
		/* config state change */
1862 1863 1864 1865 1866 1867 1868 1869
		if (decfg && aq->config) {
			/* config off this queue device */
			aq->config = false;
			if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
				aq->dev_state = AP_DEV_STATE_ERROR;
				aq->last_err_rc = AP_RESPONSE_DECONFIGURED;
			}
			spin_unlock_bh(&aq->lock);
1870 1871
			AP_DBF_DBG("%s(%d,%d) queue dev config off\n",
				   __func__, ac->id, dom);
1872
			ap_send_config_uevent(&aq->ap_dev, aq->config);
1873 1874 1875
			/* 'receive' pending messages with -EAGAIN */
			ap_flush_queue(aq);
			goto put_dev_and_continue;
1876
		} else if (!decfg && !aq->config) {
1877 1878 1879 1880 1881 1882 1883
			/* config on this queue device */
			aq->config = true;
			if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
				aq->dev_state = AP_DEV_STATE_OPERATING;
				aq->sm_state = AP_SM_STATE_RESET_START;
			}
			spin_unlock_bh(&aq->lock);
1884 1885
			AP_DBF_DBG("%s(%d,%d) queue dev config on\n",
				   __func__, ac->id, dom);
1886
			ap_send_config_uevent(&aq->ap_dev, aq->config);
1887 1888 1889 1890 1891 1892 1893 1894 1895
			goto put_dev_and_continue;
		}
		/* handle other error states */
		if (!decfg && aq->dev_state == AP_DEV_STATE_ERROR) {
			spin_unlock_bh(&aq->lock);
			/* 'receive' pending messages with -EAGAIN */
			ap_flush_queue(aq);
			/* re-init (with reset) the queue device */
			ap_queue_init_state(aq);
1896
			AP_DBF_INFO("%s(%d,%d) queue dev reinit enforced\n",
1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911
				    __func__, ac->id, dom);
			goto put_dev_and_continue;
		}
		spin_unlock_bh(&aq->lock);
put_dev_and_continue:
		put_device(dev);
	}
}

/*
 * Helper function for ap_scan_bus().
 * Does the scan bus job for the given adapter id.
 */
static inline void ap_scan_adapter(int ap)
{
1912
	bool decfg, chkstop;
1913 1914 1915 1916
	ap_qid_t qid;
	unsigned int func;
	struct device *dev;
	struct ap_card *ac;
1917
	int rc, dom, depth, type, comp_type, ml;
1918

1919
	/* Is there currently a card device for this adapter ? */
1920
	dev = bus_find_device(&ap_bus_type, NULL,
H
Harald Freudenberger 已提交
1921
			      (void *)(long)ap,
1922 1923
			      __match_card_device_with_id);
	ac = dev ? to_ap_card(dev) : NULL;
1924 1925 1926 1927

	/* Adapter not in configuration ? */
	if (!ap_test_config_card_id(ap)) {
		if (ac) {
1928
			AP_DBF_INFO("%s(%d) ap not in config any more, rm card and queue devs\n",
1929 1930
				    __func__, ap);
			ap_scan_rm_card_dev_and_queue_devs(ac);
1931 1932 1933 1934
			put_device(dev);
		}
		return;
	}
1935

1936
	/*
1937 1938 1939 1940
	 * Adapter ap is valid in the current configuration. So do some checks:
	 * If no card device exists, build one. If a card device exists, check
	 * for type and functions changed. For all this we need to find a valid
	 * APQN first.
1941
	 */
1942 1943 1944 1945

	for (dom = 0; dom <= ap_max_domain_id; dom++)
		if (ap_test_config_usage_domain(dom)) {
			qid = AP_MKQID(ap, dom);
1946 1947
			if (ap_queue_info(qid, &type, &func, &depth,
					  &ml, &decfg, &chkstop))
1948 1949
				break;
		}
1950 1951 1952
	if (dom > ap_max_domain_id) {
		/* Could not find a valid APQN for this adapter */
		if (ac) {
1953 1954
			AP_DBF_INFO("%s(%d) no type info (no APQN found), rm card and queue devs\n",
				    __func__, ap);
1955 1956 1957 1958 1959
			ap_scan_rm_card_dev_and_queue_devs(ac);
			put_device(dev);
		} else {
			AP_DBF_DBG("%s(%d) no type info (no APQN found), ignored\n",
				   __func__, ap);
1960
		}
1961 1962 1963 1964 1965
		return;
	}
	if (!type) {
		/* No apdater type info available, an unusable adapter */
		if (ac) {
1966
			AP_DBF_INFO("%s(%d) no valid type (0) info, rm card and queue devs\n",
1967 1968
				    __func__, ap);
			ap_scan_rm_card_dev_and_queue_devs(ac);
1969
			put_device(dev);
1970 1971 1972
		} else {
			AP_DBF_DBG("%s(%d) no valid type (0) info, ignored\n",
				   __func__, ap);
1973
		}
1974
		return;
1975
	}
1976

1977 1978 1979
	if (ac) {
		/* Check APQN against existing card device for changes */
		if (ac->raw_hwtype != type) {
1980
			AP_DBF_INFO("%s(%d) hwtype %d changed, rm card and queue devs\n",
1981 1982 1983 1984 1985
				    __func__, ap, type);
			ap_scan_rm_card_dev_and_queue_devs(ac);
			put_device(dev);
			ac = NULL;
		} else if (ac->functions != func) {
1986
			AP_DBF_INFO("%s(%d) functions 0x%08x changed, rm card and queue devs\n",
1987 1988 1989 1990 1991
				    __func__, ap, type);
			ap_scan_rm_card_dev_and_queue_devs(ac);
			put_device(dev);
			ac = NULL;
		} else {
1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004
			/* handle checkstop state change */
			if (chkstop && !ac->chkstop) {
				/* checkstop on */
				ac->chkstop = true;
				AP_DBF_INFO("%s(%d) card dev checkstop on\n",
					    __func__, ap);
			} else if (!chkstop && ac->chkstop) {
				/* checkstop off */
				ac->chkstop = false;
				AP_DBF_INFO("%s(%d) card dev checkstop off\n",
					    __func__, ap);
			}
			/* handle config state change */
2005 2006
			if (decfg && ac->config) {
				ac->config = false;
2007
				AP_DBF_INFO("%s(%d) card dev config off\n",
2008
					    __func__, ap);
2009
				ap_send_config_uevent(&ac->ap_dev, ac->config);
2010
			} else if (!decfg && !ac->config) {
2011
				ac->config = true;
2012
				AP_DBF_INFO("%s(%d) card dev config on\n",
2013
					    __func__, ap);
2014
				ap_send_config_uevent(&ac->ap_dev, ac->config);
2015
			}
2016
		}
2017 2018 2019 2020
	}

	if (!ac) {
		/* Build a new card device */
2021
		comp_type = ap_get_compatible_type(qid, type, func);
2022 2023 2024 2025 2026
		if (!comp_type) {
			AP_DBF_WARN("%s(%d) type %d, can't get compatibility type\n",
				    __func__, ap, type);
			return;
		}
2027
		ac = ap_card_create(ap, depth, type, comp_type, func, ml);
2028
		if (!ac) {
2029 2030 2031
			AP_DBF_WARN("%s(%d) ap_card_create() failed\n",
				    __func__, ap);
			return;
2032
		}
2033
		ac->config = !decfg;
2034
		ac->chkstop = chkstop;
2035 2036 2037 2038
		dev = &ac->ap_dev.device;
		dev->bus = &ap_bus_type;
		dev->parent = ap_root_device;
		dev_set_name(dev, "card%02x", ap);
2039 2040 2041 2042
		/* maybe enlarge ap_max_msg_size to support this card */
		if (ac->maxmsgsize > atomic_read(&ap_max_msg_size)) {
			atomic_set(&ap_max_msg_size, ac->maxmsgsize);
			AP_DBF_INFO("%s(%d) ap_max_msg_size update to %d byte\n",
2043 2044
				    __func__, ap,
				    atomic_read(&ap_max_msg_size));
2045
		}
2046 2047
		/* Register the new card device with AP bus */
		rc = device_register(dev);
2048
		if (rc) {
2049 2050 2051 2052
			AP_DBF_WARN("%s(%d) device_register() failed\n",
				    __func__, ap);
			put_device(dev);
			return;
2053
		}
2054 2055 2056
		/* get it and thus adjust reference counter */
		get_device(dev);
		if (decfg)
2057
			AP_DBF_INFO("%s(%d) new (decfg) card dev type=%d func=0x%08x created\n",
2058
				    __func__, ap, type, func);
2059 2060 2061
		else if (chkstop)
			AP_DBF_INFO("%s(%d) new (chkstop) card dev type=%d func=0x%08x created\n",
				    __func__, ap, type, func);
2062
		else
2063
			AP_DBF_INFO("%s(%d) new card dev type=%d func=0x%08x created\n",
2064 2065 2066 2067 2068
				    __func__, ap, type, func);
	}

	/* Verify the domains and the queue devices for this card */
	ap_scan_domains(ac);
2069

2070 2071
	/* release the card device */
	put_device(&ac->ap_dev.device);
2072
}
2073

2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092
/**
 * ap_get_configuration - get the host AP configuration
 *
 * Stores the host AP configuration information returned from the previous call
 * to Query Configuration Information (QCI), then retrieves and stores the
 * current AP configuration returned from QCI.
 *
 * Return: true if the host AP configuration changed between calls to QCI;
 * otherwise, return false.
 */
static bool ap_get_configuration(void)
{
	memcpy(ap_qci_info_old, ap_qci_info, sizeof(*ap_qci_info));
	ap_fetch_qci_info(ap_qci_info);

	return memcmp(ap_qci_info, ap_qci_info_old,
		      sizeof(struct ap_config_info)) != 0;
}

2093 2094 2095
/**
 * ap_scan_bus(): Scan the AP bus for new devices
 * Runs periodically, workqueue timer (ap_config_time)
2096
 * @unused: Unused pointer.
2097 2098 2099
 */
static void ap_scan_bus(struct work_struct *unused)
{
2100
	int ap, config_changed = 0;
2101

2102 2103 2104 2105
	/* config change notify */
	config_changed = ap_get_configuration();
	if (config_changed)
		notify_config_changed();
2106 2107
	ap_select_domain();

2108
	AP_DBF_DBG("%s running\n", __func__);
2109

2110
	/* loop over all possible adapters */
2111 2112
	for (ap = 0; ap <= ap_max_adapter_id; ap++)
		ap_scan_adapter(ap);
2113

2114 2115 2116 2117
	/* scan complete notify */
	if (config_changed)
		notify_scan_complete();

2118 2119 2120 2121
	/* check if there is at least one queue available with default domain */
	if (ap_domain_index >= 0) {
		struct device *dev =
			bus_find_device(&ap_bus_type, NULL,
H
Harald Freudenberger 已提交
2122
					(void *)(long)ap_domain_index,
2123
					__match_queue_device_with_queue_id);
2124 2125 2126
		if (dev)
			put_device(dev);
		else
2127 2128
			AP_DBF_INFO("%s no queue device with default domain %d available\n",
				    __func__, ap_domain_index);
2129
	}
2130

2131
	if (atomic64_inc_return(&ap_scan_bus_count) == 1) {
2132
		AP_DBF_DBG("%s init scan complete\n", __func__);
2133 2134 2135 2136
		ap_send_init_scan_done_uevent();
		ap_check_bindings_complete();
	}

2137
	mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
2138 2139
}

2140
static void ap_config_timeout(struct timer_list *unused)
2141
{
2142
	queue_work(system_long_wq, &ap_scan_work);
2143
}
2144

2145
static int __init ap_debug_init(void)
2146
{
2147
	ap_dbf_info = debug_register("ap", 2, 1,
2148 2149 2150 2151 2152 2153 2154
				     DBF_MAX_SPRINTF_ARGS * sizeof(long));
	debug_register_view(ap_dbf_info, &debug_sprintf_view);
	debug_set_level(ap_dbf_info, DBF_ERR);

	return 0;
}

2155 2156
static void __init ap_perms_init(void)
{
H
Harald Freudenberger 已提交
2157
	/* all resources usable if no kernel parameter string given */
2158
	memset(&ap_perms.ioctlm, 0xFF, sizeof(ap_perms.ioctlm));
2159 2160 2161
	memset(&ap_perms.apm, 0xFF, sizeof(ap_perms.apm));
	memset(&ap_perms.aqm, 0xFF, sizeof(ap_perms.aqm));

2162
	/* apm kernel parameter string */
2163
	if (apm_str) {
2164
		memset(&ap_perms.apm, 0, sizeof(ap_perms.apm));
2165 2166
		ap_parse_mask_str(apm_str, ap_perms.apm, AP_DEVICES,
				  &ap_perms_mutex);
2167 2168
	}

2169 2170 2171
	/* aqm kernel parameter string */
	if (aqm_str) {
		memset(&ap_perms.aqm, 0, sizeof(ap_perms.aqm));
2172 2173
		ap_parse_mask_str(aqm_str, ap_perms.aqm, AP_DOMAINS,
				  &ap_perms_mutex);
2174 2175 2176
	}
}

2177
/**
2178 2179 2180
 * ap_module_init(): The module initialization code.
 *
 * Initializes the module.
2181
 */
2182
static int __init ap_module_init(void)
2183
{
2184
	int rc;
2185

2186 2187 2188 2189
	rc = ap_debug_init();
	if (rc)
		return rc;

2190
	if (!ap_instructions_available()) {
2191 2192 2193 2194
		pr_warn("The hardware system does not support AP instructions\n");
		return -ENODEV;
	}

2195 2196 2197
	/* init ap_queue hashtable */
	hash_init(ap_queues);

2198
	/* set up the AP permissions (ioctls, ap and aq masks) */
2199 2200
	ap_perms_init();

2201
	/* Get AP configuration data if available */
2202 2203 2204 2205
	ap_init_qci_info();

	/* check default domain setting */
	if (ap_domain_index < -1 || ap_domain_index > ap_max_domain_id ||
2206 2207
	    (ap_domain_index >= 0 &&
	     !test_bit_inv(ap_domain_index, ap_perms.aqm))) {
2208 2209
		pr_warn("%d is not a valid cryptographic domain\n",
			ap_domain_index);
2210
		ap_domain_index = -1;
2211
	}
2212

2213
	/* enable interrupts if available */
2214
	if (ap_interrupts_available() && ap_useirq) {
2215
		rc = register_adapter_interrupt(&ap_airq);
2216
		ap_irq_flag = (rc == 0);
F
Felix Beck 已提交
2217 2218
	}

2219 2220 2221 2222 2223 2224
	/* Create /sys/bus/ap. */
	rc = bus_register(&ap_bus_type);
	if (rc)
		goto out;

	/* Create /sys/devices/ap. */
M
Mark McLoughlin 已提交
2225
	ap_root_device = root_device_register("ap");
2226
	rc = PTR_ERR_OR_ZERO(ap_root_device);
2227 2228
	if (rc)
		goto out_bus;
2229
	ap_root_device->bus = &ap_bus_type;
2230

2231
	/* Setup the AP bus rescan timer. */
2232
	timer_setup(&ap_config_timer, ap_config_timeout, 0);
2233

2234 2235
	/*
	 * Setup the high resultion poll timer.
2236 2237 2238 2239 2240 2241 2242
	 * If we are running under z/VM adjust polling to z/VM polling rate.
	 */
	if (MACHINE_IS_VM)
		poll_timeout = 1500000;
	hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
	ap_poll_timer.function = ap_poll_timeout;

2243 2244 2245 2246 2247 2248 2249
	/* Start the low priority AP bus poll thread. */
	if (ap_thread_flag) {
		rc = ap_poll_thread_start();
		if (rc)
			goto out_work;
	}

2250
	queue_work(system_long_wq, &ap_scan_work);
2251

2252 2253 2254
	return 0;

out_work:
2255
	hrtimer_cancel(&ap_poll_timer);
M
Mark McLoughlin 已提交
2256
	root_device_unregister(ap_root_device);
2257 2258 2259
out_bus:
	bus_unregister(&ap_bus_type);
out:
2260
	if (ap_irq_flag)
2261
		unregister_adapter_interrupt(&ap_airq);
2262
	kfree(ap_qci_info);
2263 2264
	return rc;
}
2265
device_initcall(ap_module_init);