ehca_irq.c 22.2 KB
Newer Older
1 2 3 4 5 6 7
/*
 *  IBM eServer eHCA Infiniband device driver for Linux on POWER
 *
 *  Functions for EQs, NEQs and interrupts
 *
 *  Authors: Heiko J Schick <schickhj@de.ibm.com>
 *           Khadija Souissi <souissi@de.ibm.com>
8 9
 *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
 *           Joachim Fenkes <fenkes@de.ibm.com>
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49
 *
 *  Copyright (c) 2005 IBM Corporation
 *
 *  All rights reserved.
 *
 *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
 *  BSD.
 *
 * OpenIB BSD License
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
 *
 * Redistributions of source code must retain the above copyright notice, this
 * list of conditions and the following disclaimer.
 *
 * Redistributions in binary form must reproduce the above copyright notice,
 * this list of conditions and the following disclaimer in the documentation
 * and/or other materials
 * provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 * POSSIBILITY OF SUCH DAMAGE.
 */

#include "ehca_classes.h"
#include "ehca_irq.h"
#include "ehca_iverbs.h"
#include "ehca_tools.h"
#include "hcp_if.h"
#include "hipz_fns.h"
50
#include "ipz_pt_fn.h"
51

52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
#define EQE_COMPLETION_EVENT   EHCA_BMASK_IBM( 1,  1)
#define EQE_CQ_QP_NUMBER       EHCA_BMASK_IBM( 8, 31)
#define EQE_EE_IDENTIFIER      EHCA_BMASK_IBM( 2,  7)
#define EQE_CQ_NUMBER          EHCA_BMASK_IBM( 8, 31)
#define EQE_QP_NUMBER          EHCA_BMASK_IBM( 8, 31)
#define EQE_QP_TOKEN           EHCA_BMASK_IBM(32, 63)
#define EQE_CQ_TOKEN           EHCA_BMASK_IBM(32, 63)

#define NEQE_COMPLETION_EVENT  EHCA_BMASK_IBM( 1,  1)
#define NEQE_EVENT_CODE        EHCA_BMASK_IBM( 2,  7)
#define NEQE_PORT_NUMBER       EHCA_BMASK_IBM( 8, 15)
#define NEQE_PORT_AVAILABILITY EHCA_BMASK_IBM(16, 16)
#define NEQE_DISRUPTIVE        EHCA_BMASK_IBM(16, 16)

#define ERROR_DATA_LENGTH      EHCA_BMASK_IBM(52, 63)
#define ERROR_DATA_TYPE        EHCA_BMASK_IBM( 0,  7)
68 69 70

static void queue_comp_task(struct ehca_cq *__cq);

71
static struct ehca_comp_pool *pool;
72
#ifdef CONFIG_HOTPLUG_CPU
73
static struct notifier_block comp_pool_callback_nb;
74
#endif
75 76 77 78 79 80 81 82 83 84 85 86 87

static inline void comp_event_callback(struct ehca_cq *cq)
{
	if (!cq->ib_cq.comp_handler)
		return;

	spin_lock(&cq->cb_lock);
	cq->ib_cq.comp_handler(&cq->ib_cq, cq->ib_cq.cq_context);
	spin_unlock(&cq->cb_lock);

	return;
}

88 89
static void print_error_data(struct ehca_shca *shca, void *data,
			     u64 *rblock, int length)
90 91 92 93 94 95 96
{
	u64 type = EHCA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
	u64 resource = rblock[1];

	switch (type) {
	case 0x1: /* Queue Pair */
	{
97
		struct ehca_qp *qp = (struct ehca_qp *)data;
98 99 100 101 102 103 104 105 106 107 108 109

		/* only print error data if AER is set */
		if (rblock[6] == 0)
			return;

		ehca_err(&shca->ib_device,
			 "QP 0x%x (resource=%lx) has errors.",
			 qp->ib_qp.qp_num, resource);
		break;
	}
	case 0x4: /* Completion Queue */
	{
110
		struct ehca_cq *cq = (struct ehca_cq *)data;
111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141

		ehca_err(&shca->ib_device,
			 "CQ 0x%x (resource=%lx) has errors.",
			 cq->cq_number, resource);
		break;
	}
	default:
		ehca_err(&shca->ib_device,
			 "Unknown errror type: %lx on %s.",
			 type, shca->ib_device.name);
		break;
	}

	ehca_err(&shca->ib_device, "Error data is available: %lx.", resource);
	ehca_err(&shca->ib_device, "EHCA ----- error data begin "
		 "---------------------------------------------------");
	ehca_dmp(rblock, length, "resource=%lx", resource);
	ehca_err(&shca->ib_device, "EHCA ----- error data end "
		 "----------------------------------------------------");

	return;
}

int ehca_error_data(struct ehca_shca *shca, void *data,
		    u64 resource)
{

	unsigned long ret;
	u64 *rblock;
	unsigned long block_count;

142
	rblock = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
143 144 145 146 147 148
	if (!rblock) {
		ehca_err(&shca->ib_device, "Cannot allocate rblock memory.");
		ret = -ENOMEM;
		goto error_data1;
	}

149
	/* rblock must be 4K aligned and should be 4K large */
150 151 152 153 154
	ret = hipz_h_error_data(shca->ipz_hca_handle,
				resource,
				rblock,
				&block_count);

155
	if (ret == H_R_STATE)
156 157 158 159 160 161 162
		ehca_err(&shca->ib_device,
			 "No error data is available: %lx.", resource);
	else if (ret == H_SUCCESS) {
		int length;

		length = EHCA_BMASK_GET(ERROR_DATA_LENGTH, rblock[0]);

163 164
		if (length > EHCA_PAGESIZE)
			length = EHCA_PAGESIZE;
165 166

		print_error_data(shca, data, rblock, length);
167
	} else
168 169 170
		ehca_err(&shca->ib_device,
			 "Error data could not be fetched: %lx", resource);

171
	ehca_free_fw_ctrlblock(rblock);
172 173 174 175 176 177

error_data1:
	return ret;

}

178 179
static void qp_event_callback(struct ehca_shca *shca, u64 eqe,
			      enum ib_event_type event_type, int fatal)
180 181 182 183 184
{
	struct ib_event event;
	struct ehca_qp *qp;
	u32 token = EHCA_BMASK_GET(EQE_QP_TOKEN, eqe);

185
	read_lock(&ehca_qp_idr_lock);
186
	qp = idr_find(&ehca_qp_idr, token);
187
	read_unlock(&ehca_qp_idr_lock);
188 189 190 191 192


	if (!qp)
		return;

193 194
	if (fatal)
		ehca_error_data(shca, qp, qp->ipz_qp_handle.handle);
195

196
	event.device = &shca->ib_device;
197

198 199 200
	if (qp->ext_type == EQPT_SRQ) {
		if (!qp->ib_srq.event_handler)
			return;
201

202 203 204 205 206 207 208 209 210 211 212
		event.event = fatal ? IB_EVENT_SRQ_ERR : event_type;
		event.element.srq = &qp->ib_srq;
		qp->ib_srq.event_handler(&event, qp->ib_srq.srq_context);
	} else {
		if (!qp->ib_qp.event_handler)
			return;

		event.event = event_type;
		event.element.qp = &qp->ib_qp;
		qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context);
	}
213 214 215 216 217

	return;
}

static void cq_event_callback(struct ehca_shca *shca,
H
Hoang-Nam Nguyen 已提交
218
			      u64 eqe)
219 220 221 222
{
	struct ehca_cq *cq;
	u32 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe);

223
	read_lock(&ehca_cq_idr_lock);
224
	cq = idr_find(&ehca_cq_idr, token);
225 226
	if (cq)
		atomic_inc(&cq->nr_events);
227
	read_unlock(&ehca_cq_idr_lock);
228 229 230 231 232 233

	if (!cq)
		return;

	ehca_error_data(shca, cq, cq->ipz_cq_handle.handle);

234 235 236
	if (atomic_dec_and_test(&cq->nr_events))
		wake_up(&cq->wait_completion);

237 238 239 240 241 242 243 244 245
	return;
}

static void parse_identifier(struct ehca_shca *shca, u64 eqe)
{
	u8 identifier = EHCA_BMASK_GET(EQE_EE_IDENTIFIER, eqe);

	switch (identifier) {
	case 0x02: /* path migrated */
246
		qp_event_callback(shca, eqe, IB_EVENT_PATH_MIG, 0);
247 248
		break;
	case 0x03: /* communication established */
249
		qp_event_callback(shca, eqe, IB_EVENT_COMM_EST, 0);
250 251
		break;
	case 0x04: /* send queue drained */
252
		qp_event_callback(shca, eqe, IB_EVENT_SQ_DRAINED, 0);
253 254 255
		break;
	case 0x05: /* QP error */
	case 0x06: /* QP error */
256
		qp_event_callback(shca, eqe, IB_EVENT_QP_FATAL, 1);
257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
		break;
	case 0x07: /* CQ error */
	case 0x08: /* CQ error */
		cq_event_callback(shca, eqe);
		break;
	case 0x09: /* MRMWPTE error */
		ehca_err(&shca->ib_device, "MRMWPTE error.");
		break;
	case 0x0A: /* port event */
		ehca_err(&shca->ib_device, "Port event.");
		break;
	case 0x0B: /* MR access error */
		ehca_err(&shca->ib_device, "MR access error.");
		break;
	case 0x0C: /* EQ error */
		ehca_err(&shca->ib_device, "EQ error.");
		break;
	case 0x0D: /* P/Q_Key mismatch */
		ehca_err(&shca->ib_device, "P/Q_Key mismatch.");
		break;
	case 0x10: /* sampling complete */
		ehca_err(&shca->ib_device, "Sampling complete.");
		break;
	case 0x11: /* unaffiliated access error */
		ehca_err(&shca->ib_device, "Unaffiliated access error.");
		break;
	case 0x12: /* path migrating error */
		ehca_err(&shca->ib_device, "Path migration error.");
		break;
	case 0x13: /* interface trace stopped */
		ehca_err(&shca->ib_device, "Interface trace stopped.");
		break;
	case 0x14: /* first error capture info available */
290 291 292 293 294
		ehca_info(&shca->ib_device, "First error capture available");
		break;
	case 0x15: /* SRQ limit reached */
		qp_event_callback(shca, eqe, IB_EVENT_SRQ_LIMIT_REACHED, 0);
		break;
295 296 297 298 299 300 301 302 303
	default:
		ehca_err(&shca->ib_device, "Unknown identifier: %x on %s.",
			 identifier, shca->ib_device.name);
		break;
	}

	return;
}

304 305
static void dispatch_port_event(struct ehca_shca *shca, int port_num,
				enum ib_event_type type, const char *msg)
306 307
{
	struct ib_event event;
308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343

	ehca_info(&shca->ib_device, "port %d %s.", port_num, msg);
	event.device = &shca->ib_device;
	event.event = type;
	event.element.port_num = port_num;
	ib_dispatch_event(&event);
}

static void notify_port_conf_change(struct ehca_shca *shca, int port_num)
{
	struct ehca_sma_attr  new_attr;
	struct ehca_sma_attr *old_attr = &shca->sport[port_num - 1].saved_attr;

	ehca_query_sma_attr(shca, port_num, &new_attr);

	if (new_attr.sm_sl  != old_attr->sm_sl ||
	    new_attr.sm_lid != old_attr->sm_lid)
		dispatch_port_event(shca, port_num, IB_EVENT_SM_CHANGE,
				    "SM changed");

	if (new_attr.lid != old_attr->lid ||
	    new_attr.lmc != old_attr->lmc)
		dispatch_port_event(shca, port_num, IB_EVENT_LID_CHANGE,
				    "LID changed");

	if (new_attr.pkey_tbl_len != old_attr->pkey_tbl_len ||
	    memcmp(new_attr.pkeys, old_attr->pkeys,
		   sizeof(u16) * new_attr.pkey_tbl_len))
		dispatch_port_event(shca, port_num, IB_EVENT_PKEY_CHANGE,
				    "P_Key changed");

	*old_attr = new_attr;
}

static void parse_ec(struct ehca_shca *shca, u64 eqe)
{
344 345 346 347 348 349 350
	u8 ec   = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe);
	u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe);

	switch (ec) {
	case 0x30: /* port availability change */
		if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) {
			shca->sport[port - 1].port_state = IB_PORT_ACTIVE;
351 352 353 354
			dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
					    "is active");
			ehca_query_sma_attr(shca, port,
					    &shca->sport[port - 1].saved_attr);
355 356
		} else {
			shca->sport[port - 1].port_state = IB_PORT_DOWN;
357 358
			dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
					    "is inactive");
359 360 361 362 363 364 365
		}
		break;
	case 0x31:
		/* port configuration change
		 * disruptive change is caused by
		 * LID, PKEY or SM change
		 */
366 367 368 369 370 371 372 373 374 375 376 377 378
		if (EHCA_BMASK_GET(NEQE_DISRUPTIVE, eqe)) {
			ehca_warn(&shca->ib_device, "disruptive port "
				  "%d configuration change", port);

			shca->sport[port - 1].port_state = IB_PORT_DOWN;
			dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
					    "is inactive");

			shca->sport[port - 1].port_state = IB_PORT_ACTIVE;
			dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
					    "is active");
		} else
			notify_port_conf_change(shca, port);
379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405
		break;
	case 0x32: /* adapter malfunction */
		ehca_err(&shca->ib_device, "Adapter malfunction.");
		break;
	case 0x33:  /* trace stopped */
		ehca_err(&shca->ib_device, "Traced stopped.");
		break;
	default:
		ehca_err(&shca->ib_device, "Unknown event code: %x on %s.",
			 ec, shca->ib_device.name);
		break;
	}

	return;
}

static inline void reset_eq_pending(struct ehca_cq *cq)
{
	u64 CQx_EP;
	struct h_galpa gal = cq->galpas.kernel;

	hipz_galpa_store_cq(gal, cqx_ep, 0x0);
	CQx_EP = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_ep));

	return;
}

406
irqreturn_t ehca_interrupt_neq(int irq, void *dev_id)
407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438
{
	struct ehca_shca *shca = (struct ehca_shca*)dev_id;

	tasklet_hi_schedule(&shca->neq.interrupt_task);

	return IRQ_HANDLED;
}

void ehca_tasklet_neq(unsigned long data)
{
	struct ehca_shca *shca = (struct ehca_shca*)data;
	struct ehca_eqe *eqe;
	u64 ret;

	eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->neq);

	while (eqe) {
		if (!EHCA_BMASK_GET(NEQE_COMPLETION_EVENT, eqe->entry))
			parse_ec(shca, eqe->entry);

		eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->neq);
	}

	ret = hipz_h_reset_event(shca->ipz_hca_handle,
				 shca->neq.ipz_eq_handle, 0xFFFFFFFFFFFFFFFFL);

	if (ret != H_SUCCESS)
		ehca_err(&shca->ib_device, "Can't clear notification events.");

	return;
}

439
irqreturn_t ehca_interrupt_eq(int irq, void *dev_id)
440 441 442 443 444 445 446 447 448
{
	struct ehca_shca *shca = (struct ehca_shca*)dev_id;

	tasklet_hi_schedule(&shca->eq.interrupt_task);

	return IRQ_HANDLED;
}


H
Hoang-Nam Nguyen 已提交
449 450 451 452 453
static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe)
{
	u64 eqe_value;
	u32 token;
	struct ehca_cq *cq;
454

H
Hoang-Nam Nguyen 已提交
455 456 457
	eqe_value = eqe->entry;
	ehca_dbg(&shca->ib_device, "eqe_value=%lx", eqe_value);
	if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
458
		ehca_dbg(&shca->ib_device, "Got completion event");
H
Hoang-Nam Nguyen 已提交
459
		token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
460
		read_lock(&ehca_cq_idr_lock);
H
Hoang-Nam Nguyen 已提交
461
		cq = idr_find(&ehca_cq_idr, token);
462 463
		if (cq)
			atomic_inc(&cq->nr_events);
464
		read_unlock(&ehca_cq_idr_lock);
H
Hoang-Nam Nguyen 已提交
465 466 467 468 469 470 471
		if (cq == NULL) {
			ehca_err(&shca->ib_device,
				 "Invalid eqe for non-existing cq token=%x",
				 token);
			return;
		}
		reset_eq_pending(cq);
472
		if (ehca_scaling_code)
473
			queue_comp_task(cq);
474
		else {
475
			comp_event_callback(cq);
476
			if (atomic_dec_and_test(&cq->nr_events))
477
				wake_up(&cq->wait_completion);
478
		}
H
Hoang-Nam Nguyen 已提交
479
	} else {
480
		ehca_dbg(&shca->ib_device, "Got non completion event");
H
Hoang-Nam Nguyen 已提交
481 482 483
		parse_identifier(shca, eqe_value);
	}
}
484

H
Hoang-Nam Nguyen 已提交
485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508
void ehca_process_eq(struct ehca_shca *shca, int is_irq)
{
	struct ehca_eq *eq = &shca->eq;
	struct ehca_eqe_cache_entry *eqe_cache = eq->eqe_cache;
	u64 eqe_value;
	unsigned long flags;
	int eqe_cnt, i;
	int eq_empty = 0;

	spin_lock_irqsave(&eq->irq_spinlock, flags);
	if (is_irq) {
		const int max_query_cnt = 100;
		int query_cnt = 0;
		int int_state = 1;
		do {
			int_state = hipz_h_query_int_state(
				shca->ipz_hca_handle, eq->ist);
			query_cnt++;
			iosync();
		} while (int_state && query_cnt < max_query_cnt);
		if (unlikely((query_cnt == max_query_cnt)))
			ehca_dbg(&shca->ib_device, "int_state=%x query_cnt=%x",
				 int_state, query_cnt);
	}
509

H
Hoang-Nam Nguyen 已提交
510 511 512 513 514 515 516 517 518 519 520
	/* read out all eqes */
	eqe_cnt = 0;
	do {
		u32 token;
		eqe_cache[eqe_cnt].eqe =
			(struct ehca_eqe *)ehca_poll_eq(shca, eq);
		if (!eqe_cache[eqe_cnt].eqe)
			break;
		eqe_value = eqe_cache[eqe_cnt].eqe->entry;
		if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
			token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
521
			read_lock(&ehca_cq_idr_lock);
H
Hoang-Nam Nguyen 已提交
522
			eqe_cache[eqe_cnt].cq = idr_find(&ehca_cq_idr, token);
523 524
			if (eqe_cache[eqe_cnt].cq)
				atomic_inc(&eqe_cache[eqe_cnt].cq->nr_events);
525
			read_unlock(&ehca_cq_idr_lock);
H
Hoang-Nam Nguyen 已提交
526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556
			if (!eqe_cache[eqe_cnt].cq) {
				ehca_err(&shca->ib_device,
					 "Invalid eqe for non-existing cq "
					 "token=%x", token);
				continue;
			}
		} else
			eqe_cache[eqe_cnt].cq = NULL;
		eqe_cnt++;
	} while (eqe_cnt < EHCA_EQE_CACHE_SIZE);
	if (!eqe_cnt) {
		if (is_irq)
			ehca_dbg(&shca->ib_device,
				 "No eqe found for irq event");
		goto unlock_irq_spinlock;
	} else if (!is_irq)
		ehca_dbg(&shca->ib_device, "deadman found %x eqe", eqe_cnt);
	if (unlikely(eqe_cnt == EHCA_EQE_CACHE_SIZE))
		ehca_dbg(&shca->ib_device, "too many eqes for one irq event");
	/* enable irq for new packets */
	for (i = 0; i < eqe_cnt; i++) {
		if (eq->eqe_cache[i].cq)
			reset_eq_pending(eq->eqe_cache[i].cq);
	}
	/* check eq */
	spin_lock(&eq->spinlock);
	eq_empty = (!ipz_eqit_eq_peek_valid(&shca->eq.ipz_queue));
	spin_unlock(&eq->spinlock);
	/* call completion handler for cached eqes */
	for (i = 0; i < eqe_cnt; i++)
		if (eq->eqe_cache[i].cq) {
557
			if (ehca_scaling_code)
558
				queue_comp_task(eq->eqe_cache[i].cq);
559 560 561
			else {
				struct ehca_cq *cq = eq->eqe_cache[i].cq;
				comp_event_callback(cq);
562
				if (atomic_dec_and_test(&cq->nr_events))
563 564
					wake_up(&cq->wait_completion);
			}
H
Hoang-Nam Nguyen 已提交
565 566 567
		} else {
			ehca_dbg(&shca->ib_device, "Got non completion event");
			parse_identifier(shca, eq->eqe_cache[i].eqe->entry);
568
		}
H
Hoang-Nam Nguyen 已提交
569 570 571 572 573 574 575 576 577 578 579 580 581 582
	/* poll eq if not empty */
	if (eq_empty)
		goto unlock_irq_spinlock;
	do {
		struct ehca_eqe *eqe;
		eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->eq);
		if (!eqe)
			break;
		process_eqe(shca, eqe);
	} while (1);

unlock_irq_spinlock:
	spin_unlock_irqrestore(&eq->irq_spinlock, flags);
}
583

H
Hoang-Nam Nguyen 已提交
584 585 586
void ehca_tasklet_eq(unsigned long data)
{
	ehca_process_eq((struct ehca_shca*)data, 1);
587 588
}

589
static inline int find_next_online_cpu(struct ehca_comp_pool *pool)
590
{
591 592
	int cpu;
	unsigned long flags;
593

594
	WARN_ON_ONCE(!in_interrupt());
595 596 597
	if (ehca_debug_level)
		ehca_dmp(&cpu_online_map, sizeof(cpumask_t), "");

598 599 600 601 602 603
	spin_lock_irqsave(&pool->last_cpu_lock, flags);
	cpu = next_cpu(pool->last_cpu, cpu_online_map);
	if (cpu == NR_CPUS)
		cpu = first_cpu(cpu_online_map);
	pool->last_cpu = cpu;
	spin_unlock_irqrestore(&pool->last_cpu_lock, flags);
604

605
	return cpu;
606 607 608 609 610
}

static void __queue_comp_task(struct ehca_cq *__cq,
			      struct ehca_cpu_comp_task *cct)
{
611
	unsigned long flags;
612

613 614
	spin_lock_irqsave(&cct->task_lock, flags);
	spin_lock(&__cq->task_lock);
615 616 617 618 619 620

	if (__cq->nr_callbacks == 0) {
		__cq->nr_callbacks++;
		list_add_tail(&__cq->entry, &cct->cq_list);
		cct->cq_jobs++;
		wake_up(&cct->wait_queue);
621
	} else
622 623
		__cq->nr_callbacks++;

624 625
	spin_unlock(&__cq->task_lock);
	spin_unlock_irqrestore(&cct->task_lock, flags);
626 627 628 629 630 631
}

static void queue_comp_task(struct ehca_cq *__cq)
{
	int cpu_id;
	struct ehca_cpu_comp_task *cct;
632 633
	int cq_jobs;
	unsigned long flags;
634 635 636 637 638

	cpu_id = find_next_online_cpu(pool);
	BUG_ON(!cpu_online(cpu_id));

	cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
639
	BUG_ON(!cct);
640

641 642 643 644
	spin_lock_irqsave(&cct->task_lock, flags);
	cq_jobs = cct->cq_jobs;
	spin_unlock_irqrestore(&cct->task_lock, flags);
	if (cq_jobs > 0) {
645 646
		cpu_id = find_next_online_cpu(pool);
		cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
647
		BUG_ON(!cct);
648 649 650 651 652
	}

	__queue_comp_task(__cq, cct);
}

653
static void run_comp_task(struct ehca_cpu_comp_task *cct)
654 655
{
	struct ehca_cq *cq;
656
	unsigned long flags;
657

658
	spin_lock_irqsave(&cct->task_lock, flags);
659 660 661

	while (!list_empty(&cct->cq_list)) {
		cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
662
		spin_unlock_irqrestore(&cct->task_lock, flags);
663

664 665
		comp_event_callback(cq);
		if (atomic_dec_and_test(&cq->nr_events))
666 667 668
			wake_up(&cq->wait_completion);

		spin_lock_irqsave(&cct->task_lock, flags);
669
		spin_lock(&cq->task_lock);
670
		cq->nr_callbacks--;
671
		if (!cq->nr_callbacks) {
672 673 674
			list_del_init(cct->cq_list.next);
			cct->cq_jobs--;
		}
675
		spin_unlock(&cq->task_lock);
676 677
	}

678
	spin_unlock_irqrestore(&cct->task_lock, flags);
679 680 681 682
}

static int comp_task(void *__cct)
{
683
	struct ehca_cpu_comp_task *cct = __cct;
684
	int cql_empty;
685 686 687
	DECLARE_WAITQUEUE(wait, current);

	set_current_state(TASK_INTERRUPTIBLE);
688
	while (!kthread_should_stop()) {
689 690
		add_wait_queue(&cct->wait_queue, &wait);

691 692 693 694
		spin_lock_irq(&cct->task_lock);
		cql_empty = list_empty(&cct->cq_list);
		spin_unlock_irq(&cct->task_lock);
		if (cql_empty)
695 696 697 698 699 700
			schedule();
		else
			__set_current_state(TASK_RUNNING);

		remove_wait_queue(&cct->wait_queue, &wait);

701 702 703 704
		spin_lock_irq(&cct->task_lock);
		cql_empty = list_empty(&cct->cq_list);
		spin_unlock_irq(&cct->task_lock);
		if (!cql_empty)
705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748
			run_comp_task(__cct);

		set_current_state(TASK_INTERRUPTIBLE);
	}
	__set_current_state(TASK_RUNNING);

	return 0;
}

static struct task_struct *create_comp_task(struct ehca_comp_pool *pool,
					    int cpu)
{
	struct ehca_cpu_comp_task *cct;

	cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
	spin_lock_init(&cct->task_lock);
	INIT_LIST_HEAD(&cct->cq_list);
	init_waitqueue_head(&cct->wait_queue);
	cct->task = kthread_create(comp_task, cct, "ehca_comp/%d", cpu);

	return cct->task;
}

static void destroy_comp_task(struct ehca_comp_pool *pool,
			      int cpu)
{
	struct ehca_cpu_comp_task *cct;
	struct task_struct *task;
	unsigned long flags_cct;

	cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);

	spin_lock_irqsave(&cct->task_lock, flags_cct);

	task = cct->task;
	cct->task = NULL;
	cct->cq_jobs = 0;

	spin_unlock_irqrestore(&cct->task_lock, flags_cct);

	if (task)
		kthread_stop(task);
}

749
#ifdef CONFIG_HOTPLUG_CPU
750 751 752 753 754 755 756 757 758 759 760 761
static void take_over_work(struct ehca_comp_pool *pool,
			   int cpu)
{
	struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
	LIST_HEAD(list);
	struct ehca_cq *cq;
	unsigned long flags_cct;

	spin_lock_irqsave(&cct->task_lock, flags_cct);

	list_splice_init(&cct->cq_list, &list);

762
	while (!list_empty(&list)) {
H
Hoang-Nam Nguyen 已提交
763
		cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
764

H
Hoang-Nam Nguyen 已提交
765 766 767
		list_del(&cq->entry);
		__queue_comp_task(cq, per_cpu_ptr(pool->cpu_comp_tasks,
						  smp_processor_id()));
768 769 770 771 772 773 774 775 776 777 778 779 780 781 782
	}

	spin_unlock_irqrestore(&cct->task_lock, flags_cct);

}

static int comp_pool_callback(struct notifier_block *nfb,
			      unsigned long action,
			      void *hcpu)
{
	unsigned int cpu = (unsigned long)hcpu;
	struct ehca_cpu_comp_task *cct;

	switch (action) {
	case CPU_UP_PREPARE:
783
	case CPU_UP_PREPARE_FROZEN:
784
		ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu);
785
		if (!create_comp_task(pool, cpu)) {
786 787 788 789 790
			ehca_gen_err("Can't create comp_task for cpu: %x", cpu);
			return NOTIFY_BAD;
		}
		break;
	case CPU_UP_CANCELED:
791
	case CPU_UP_CANCELED_FROZEN:
792 793 794 795 796 797
		ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu);
		cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
		kthread_bind(cct->task, any_online_cpu(cpu_online_map));
		destroy_comp_task(pool, cpu);
		break;
	case CPU_ONLINE:
798
	case CPU_ONLINE_FROZEN:
799 800 801 802 803 804
		ehca_gen_dbg("CPU: %x (CPU_ONLINE)", cpu);
		cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
		kthread_bind(cct->task, cpu);
		wake_up_process(cct->task);
		break;
	case CPU_DOWN_PREPARE:
805
	case CPU_DOWN_PREPARE_FROZEN:
806 807 808
		ehca_gen_dbg("CPU: %x (CPU_DOWN_PREPARE)", cpu);
		break;
	case CPU_DOWN_FAILED:
809
	case CPU_DOWN_FAILED_FROZEN:
810 811 812
		ehca_gen_dbg("CPU: %x (CPU_DOWN_FAILED)", cpu);
		break;
	case CPU_DEAD:
813
	case CPU_DEAD_FROZEN:
814 815 816 817 818 819 820 821
		ehca_gen_dbg("CPU: %x (CPU_DEAD)", cpu);
		destroy_comp_task(pool, cpu);
		take_over_work(pool, cpu);
		break;
	}

	return NOTIFY_OK;
}
822
#endif
823 824 825 826 827 828

int ehca_create_comp_pool(void)
{
	int cpu;
	struct task_struct *task;

829 830 831
	if (!ehca_scaling_code)
		return 0;

832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852
	pool = kzalloc(sizeof(struct ehca_comp_pool), GFP_KERNEL);
	if (pool == NULL)
		return -ENOMEM;

	spin_lock_init(&pool->last_cpu_lock);
	pool->last_cpu = any_online_cpu(cpu_online_map);

	pool->cpu_comp_tasks = alloc_percpu(struct ehca_cpu_comp_task);
	if (pool->cpu_comp_tasks == NULL) {
		kfree(pool);
		return -EINVAL;
	}

	for_each_online_cpu(cpu) {
		task = create_comp_task(pool, cpu);
		if (task) {
			kthread_bind(task, cpu);
			wake_up_process(task);
		}
	}

853
#ifdef CONFIG_HOTPLUG_CPU
854
	comp_pool_callback_nb.notifier_call = comp_pool_callback;
855
	comp_pool_callback_nb.priority = 0;
856
	register_cpu_notifier(&comp_pool_callback_nb);
857
#endif
858 859

	printk(KERN_INFO "eHCA scaling code enabled\n");
860 861 862 863 864 865 866 867

	return 0;
}

void ehca_destroy_comp_pool(void)
{
	int i;

868 869 870
	if (!ehca_scaling_code)
		return;

871
#ifdef CONFIG_HOTPLUG_CPU
872
	unregister_cpu_notifier(&comp_pool_callback_nb);
873
#endif
874 875 876 877 878

	for (i = 0; i < NR_CPUS; i++) {
		if (cpu_online(i))
			destroy_comp_task(pool, i);
	}
879 880
	free_percpu(pool->cpu_comp_tasks);
	kfree(pool);
881
}