qdio_main.c 45.0 KB
Newer Older
J
Jan Glauber 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/*
 * linux/drivers/s390/cio/qdio_main.c
 *
 * Linux for s390 qdio support, buffer handling, qdio API and module support.
 *
 * Copyright 2000,2008 IBM Corp.
 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
 *	      Jan Glauber <jang@linux.vnet.ibm.com>
 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
 */
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/delay.h>
16
#include <linux/gfp.h>
17
#include <linux/io.h>
A
Arun Sharma 已提交
18
#include <linux/atomic.h>
J
Jan Glauber 已提交
19 20
#include <asm/debug.h>
#include <asm/qdio.h>
21
#include <asm/ipl.h>
J
Jan Glauber 已提交
22 23 24 25 26 27 28 29 30 31 32 33

#include "cio.h"
#include "css.h"
#include "device.h"
#include "qdio.h"
#include "qdio_debug.h"

MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
	"Jan Glauber <jang@linux.vnet.ibm.com>");
MODULE_DESCRIPTION("QDIO base support");
MODULE_LICENSE("GPL");

34 35 36
static inline int do_siga_sync(unsigned long schid,
			       unsigned int out_mask, unsigned int in_mask,
			       unsigned int fc)
J
Jan Glauber 已提交
37
{
38 39
	register unsigned long __fc asm ("0") = fc;
	register unsigned long __schid asm ("1") = schid;
J
Jan Glauber 已提交
40 41 42 43 44 45 46 47 48 49 50 51 52
	register unsigned long out asm ("2") = out_mask;
	register unsigned long in asm ("3") = in_mask;
	int cc;

	asm volatile(
		"	siga	0\n"
		"	ipm	%0\n"
		"	srl	%0,28\n"
		: "=d" (cc)
		: "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
	return cc;
}

53 54
static inline int do_siga_input(unsigned long schid, unsigned int mask,
				unsigned int fc)
J
Jan Glauber 已提交
55
{
56 57
	register unsigned long __fc asm ("0") = fc;
	register unsigned long __schid asm ("1") = schid;
J
Jan Glauber 已提交
58 59 60 61 62 63 64 65
	register unsigned long __mask asm ("2") = mask;
	int cc;

	asm volatile(
		"	siga	0\n"
		"	ipm	%0\n"
		"	srl	%0,28\n"
		: "=d" (cc)
66
		: "d" (__fc), "d" (__schid), "d" (__mask) : "cc");
J
Jan Glauber 已提交
67 68 69 70 71 72 73 74 75 76
	return cc;
}

/**
 * do_siga_output - perform SIGA-w/wt function
 * @schid: subchannel id or in case of QEBSM the subchannel token
 * @mask: which output queues to process
 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
 * @fc: function code to perform
 *
77
 * Returns condition code.
J
Jan Glauber 已提交
78 79 80
 * Note: For IQDC unicast queues only the highest priority queue is processed.
 */
static inline int do_siga_output(unsigned long schid, unsigned long mask,
81 82
				 unsigned int *bb, unsigned int fc,
				 unsigned long aob)
J
Jan Glauber 已提交
83 84 85 86
{
	register unsigned long __fc asm("0") = fc;
	register unsigned long __schid asm("1") = schid;
	register unsigned long __mask asm("2") = mask;
87
	register unsigned long __aob asm("3") = aob;
88
	int cc;
J
Jan Glauber 已提交
89 90 91

	asm volatile(
		"	siga	0\n"
92
		"	ipm	%0\n"
J
Jan Glauber 已提交
93
		"	srl	%0,28\n"
94 95 96 97
		: "=d" (cc), "+d" (__fc), "+d" (__aob)
		: "d" (__schid), "d" (__mask)
		: "cc");
	*bb = __fc >> 31;
J
Jan Glauber 已提交
98 99 100 101 102 103 104 105
	return cc;
}

static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
{
	/* all done or next buffer state different */
	if (ccq == 0 || ccq == 32)
		return 0;
J
Jan Glauber 已提交
106 107
	/* no buffer processed */
	if (ccq == 97)
J
Jan Glauber 已提交
108
		return 1;
J
Jan Glauber 已提交
109 110 111
	/* not all buffers processed */
	if (ccq == 96)
		return 2;
J
Jan Glauber 已提交
112
	/* notify devices immediately */
113
	DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
J
Jan Glauber 已提交
114 115 116 117 118 119 120 121 122
	return -EIO;
}

/**
 * qdio_do_eqbs - extract buffer states for QEBSM
 * @q: queue to manipulate
 * @state: state of the extracted buffers
 * @start: buffer number to start at
 * @count: count of buffers to examine
123
 * @auto_ack: automatically acknowledge buffers
J
Jan Glauber 已提交
124
 *
C
Coly Li 已提交
125
 * Returns the number of successfully extracted equal buffer states.
J
Jan Glauber 已提交
126 127 128
 * Stops processing if a state is different from the last buffers state.
 */
static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
129
			int start, int count, int auto_ack)
J
Jan Glauber 已提交
130
{
J
Jan Glauber 已提交
131
	int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0;
J
Jan Glauber 已提交
132 133 134
	unsigned int ccq = 0;

	BUG_ON(!q->irq_ptr->sch_token);
135
	qperf_inc(q, eqbs);
J
Jan Glauber 已提交
136 137 138 139

	if (!q->is_input_q)
		nr += q->irq_ptr->nr_input_qs;
again:
140 141
	ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
		      auto_ack);
J
Jan Glauber 已提交
142
	rc = qdio_check_ccq(q, ccq);
J
Jan Glauber 已提交
143 144
	if (!rc)
		return count - tmp_count;
145

J
Jan Glauber 已提交
146
	if (rc == 1) {
147
		DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
J
Jan Glauber 已提交
148 149 150
		goto again;
	}

J
Jan Glauber 已提交
151 152 153 154 155 156 157 158 159 160 161 162 163
	if (rc == 2) {
		BUG_ON(tmp_count == count);
		qperf_inc(q, eqbs_partial);
		DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
			tmp_count);
		/*
		 * Retry once, if that fails bail out and process the
		 * extracted buffers before trying again.
		 */
		if (!retried++)
			goto again;
		else
			return count - tmp_count;
J
Jan Glauber 已提交
164
	}
J
Jan Glauber 已提交
165 166 167

	DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
	DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
168
	q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE,
169
		   q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
J
Jan Glauber 已提交
170
	return 0;
J
Jan Glauber 已提交
171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191
}

/**
 * qdio_do_sqbs - set buffer states for QEBSM
 * @q: queue to manipulate
 * @state: new state of the buffers
 * @start: first buffer number to change
 * @count: how many buffers to change
 *
 * Returns the number of successfully changed buffers.
 * Does retrying until the specified count of buffer states is set or an
 * error occurs.
 */
static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
			int count)
{
	unsigned int ccq = 0;
	int tmp_count = count, tmp_start = start;
	int nr = q->nr;
	int rc;

192 193 194
	if (!count)
		return 0;

J
Jan Glauber 已提交
195
	BUG_ON(!q->irq_ptr->sch_token);
196
	qperf_inc(q, sqbs);
J
Jan Glauber 已提交
197 198 199 200 201 202

	if (!q->is_input_q)
		nr += q->irq_ptr->nr_input_qs;
again:
	ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
	rc = qdio_check_ccq(q, ccq);
J
Jan Glauber 已提交
203 204 205 206 207 208
	if (!rc) {
		WARN_ON(tmp_count);
		return count - tmp_count;
	}

	if (rc == 1 || rc == 2) {
209
		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
210
		qperf_inc(q, sqbs_partial);
J
Jan Glauber 已提交
211 212
		goto again;
	}
J
Jan Glauber 已提交
213 214 215

	DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
	DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
216
	q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE,
217
		   q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
J
Jan Glauber 已提交
218
	return 0;
J
Jan Glauber 已提交
219 220 221 222
}

/* returns number of examined buffers and their common state in *state */
static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
223
				 unsigned char *state, unsigned int count,
224
				 int auto_ack, int merge_pending)
J
Jan Glauber 已提交
225 226 227 228 229 230 231 232
{
	unsigned char __state = 0;
	int i;

	BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
	BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);

	if (is_qebsm(q))
233
		return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
J
Jan Glauber 已提交
234 235

	for (i = 0; i < count; i++) {
236
		if (!__state) {
J
Jan Glauber 已提交
237
			__state = q->slsb.val[bufnr];
238 239 240 241 242 243
			if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
				__state = SLSB_P_OUTPUT_EMPTY;
		} else if (merge_pending) {
			if ((q->slsb.val[bufnr] & __state) != __state)
				break;
		} else if (q->slsb.val[bufnr] != __state)
J
Jan Glauber 已提交
244 245 246 247 248 249 250
			break;
		bufnr = next_buf(bufnr);
	}
	*state = __state;
	return i;
}

251 252
static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
				unsigned char *state, int auto_ack)
J
Jan Glauber 已提交
253
{
254
	return get_buf_states(q, bufnr, state, 1, auto_ack, 0);
J
Jan Glauber 已提交
255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282
}

/* wrap-around safe setting of slsb states, returns number of changed buffers */
static inline int set_buf_states(struct qdio_q *q, int bufnr,
				 unsigned char state, int count)
{
	int i;

	BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
	BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);

	if (is_qebsm(q))
		return qdio_do_sqbs(q, state, bufnr, count);

	for (i = 0; i < count; i++) {
		xchg(&q->slsb.val[bufnr], state);
		bufnr = next_buf(bufnr);
	}
	return count;
}

static inline int set_buf_state(struct qdio_q *q, int bufnr,
				unsigned char state)
{
	return set_buf_states(q, bufnr, state, 1);
}

/* set slsb states to initial state */
283
static void qdio_init_buf_states(struct qdio_irq *irq_ptr)
J
Jan Glauber 已提交
284 285 286 287 288 289 290 291 292 293 294 295
{
	struct qdio_q *q;
	int i;

	for_each_input_queue(irq_ptr, q, i)
		set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
			       QDIO_MAX_BUFFERS_PER_Q);
	for_each_output_queue(irq_ptr, q, i)
		set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
			       QDIO_MAX_BUFFERS_PER_Q);
}

296
static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
J
Jan Glauber 已提交
297 298
			  unsigned int input)
{
299 300
	unsigned long schid = *((u32 *) &q->irq_ptr->schid);
	unsigned int fc = QDIO_SIGA_SYNC;
J
Jan Glauber 已提交
301 302
	int cc;

303
	DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
304
	qperf_inc(q, siga_sync);
J
Jan Glauber 已提交
305

306 307 308 309 310 311
	if (is_qebsm(q)) {
		schid = q->irq_ptr->sch_token;
		fc |= QDIO_SIGA_QEBSM_FLAG;
	}

	cc = do_siga_sync(schid, output, input, fc);
J
Jan Glauber 已提交
312
	if (unlikely(cc))
313
		DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
314
	return (cc) ? -EIO : 0;
J
Jan Glauber 已提交
315 316
}

317
static inline int qdio_siga_sync_q(struct qdio_q *q)
J
Jan Glauber 已提交
318 319 320 321 322 323 324
{
	if (q->is_input_q)
		return qdio_siga_sync(q, 0, q->mask);
	else
		return qdio_siga_sync(q, q->mask, 0);
}

325 326
static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit,
	unsigned long aob)
J
Jan Glauber 已提交
327
{
328 329
	unsigned long schid = *((u32 *) &q->irq_ptr->schid);
	unsigned int fc = QDIO_SIGA_WRITE;
330
	u64 start_time = 0;
331
	int retries = 0, cc;
332 333 334 335 336 337
	unsigned long laob = 0;

	if (q->u.out.use_cq && aob != 0) {
		fc = QDIO_SIGA_WRITEQ;
		laob = aob;
	}
J
Jan Glauber 已提交
338

339
	if (is_qebsm(q)) {
J
Jan Glauber 已提交
340
		schid = q->irq_ptr->sch_token;
341
		fc |= QDIO_SIGA_QEBSM_FLAG;
J
Jan Glauber 已提交
342 343
	}
again:
344 345 346
	WARN_ON_ONCE((aob && queue_type(q) != QDIO_IQDIO_QFMT) ||
		(aob && fc != QDIO_SIGA_WRITEQ));
	cc = do_siga_output(schid, q->mask, busy_bit, fc, laob);
347 348

	/* hipersocket busy condition */
J
Jan Glauber 已提交
349
	if (unlikely(*busy_bit)) {
350
		WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
351
		retries++;
352

353
		if (!start_time) {
354
			start_time = get_clock();
355 356
			goto again;
		}
357
		if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
J
Jan Glauber 已提交
358 359
			goto again;
	}
360 361 362 363 364
	if (retries) {
		DBF_DEV_EVENT(DBF_WARN, q->irq_ptr,
			      "%4x cc2 BB1:%1d", SCH_NO(q), q->nr);
		DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries);
	}
J
Jan Glauber 已提交
365 366 367 368 369
	return cc;
}

static inline int qdio_siga_input(struct qdio_q *q)
{
370 371
	unsigned long schid = *((u32 *) &q->irq_ptr->schid);
	unsigned int fc = QDIO_SIGA_READ;
J
Jan Glauber 已提交
372 373
	int cc;

374
	DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
375
	qperf_inc(q, siga_read);
J
Jan Glauber 已提交
376

377 378 379 380 381 382
	if (is_qebsm(q)) {
		schid = q->irq_ptr->sch_token;
		fc |= QDIO_SIGA_QEBSM_FLAG;
	}

	cc = do_siga_input(schid, q->mask, fc);
J
Jan Glauber 已提交
383
	if (unlikely(cc))
384
		DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
385
	return (cc) ? -EIO : 0;
J
Jan Glauber 已提交
386 387
}

J
Jan Glauber 已提交
388 389 390 391
#define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
#define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)

static inline void qdio_sync_queues(struct qdio_q *q)
J
Jan Glauber 已提交
392
{
J
Jan Glauber 已提交
393 394 395 396
	/* PCI capable outbound queues will also be scanned so sync them too */
	if (pci_out_supported(q))
		qdio_siga_sync_all(q);
	else
J
Jan Glauber 已提交
397 398 399
		qdio_siga_sync_q(q);
}

400 401 402
int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
			unsigned char *state)
{
J
Jan Glauber 已提交
403 404
	if (need_siga_sync(q))
		qdio_siga_sync_q(q);
405
	return get_buf_states(q, bufnr, state, 1, 0, 0);
406 407 408
}

static inline void qdio_stop_polling(struct qdio_q *q)
J
Jan Glauber 已提交
409
{
410
	if (!q->u.in.polling)
J
Jan Glauber 已提交
411
		return;
412

J
Jan Glauber 已提交
413
	q->u.in.polling = 0;
414
	qperf_inc(q, stop_polling);
J
Jan Glauber 已提交
415 416

	/* show the card that we are not polling anymore */
417
	if (is_qebsm(q)) {
418
		set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
419 420 421
			       q->u.in.ack_count);
		q->u.in.ack_count = 0;
	} else
422
		set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
J
Jan Glauber 已提交
423 424
}

425 426 427 428 429 430 431 432 433 434 435 436 437 438
static inline void account_sbals(struct qdio_q *q, int count)
{
	int pos = 0;

	q->q_stats.nr_sbal_total += count;
	if (count == QDIO_MAX_BUFFERS_MASK) {
		q->q_stats.nr_sbals[7]++;
		return;
	}
	while (count >>= 1)
		pos++;
	q->q_stats.nr_sbals[pos]++;
}

439
static void process_buffer_error(struct qdio_q *q, int count)
J
Jan Glauber 已提交
440
{
441 442 443
	unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT :
					SLSB_P_OUTPUT_NOT_INIT;

444
	q->qdio_error = QDIO_ERROR_SLSB_STATE;
445 446 447

	/* special handling for no target buffer empty */
	if ((!q->is_input_q &&
448
	    (q->sbal[q->first_to_check]->element[15].sflags) == 0x10)) {
449
		qperf_inc(q, target_full);
450
		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
451
			      q->first_to_check);
452
		goto set;
453 454
	}

455 456
	DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
	DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
457
	DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
458
	DBF_ERROR("F14:%2x F15:%2x",
459 460
		  q->sbal[q->first_to_check]->element[14].sflags,
		  q->sbal[q->first_to_check]->element[15].sflags);
461

462
set:
463 464 465 466 467
	/*
	 * Interrupts may be avoided as long as the error is present
	 * so change the buffer state immediately to avoid starvation.
	 */
	set_buf_states(q, q->first_to_check, state, count);
468
}
J
Jan Glauber 已提交
469

470 471 472 473
static inline void inbound_primed(struct qdio_q *q, int count)
{
	int new;

474
	DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %02x", count);
475 476 477 478 479 480

	/* for QEBSM the ACK was already set by EQBS */
	if (is_qebsm(q)) {
		if (!q->u.in.polling) {
			q->u.in.polling = 1;
			q->u.in.ack_count = count;
481
			q->u.in.ack_start = q->first_to_check;
482 483 484 485
			return;
		}

		/* delete the previous ACK's */
486
		set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
487 488
			       q->u.in.ack_count);
		q->u.in.ack_count = count;
489
		q->u.in.ack_start = q->first_to_check;
490 491 492 493 494 495 496 497 498 499 500
		return;
	}

	/*
	 * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
	 * or by the next inbound run.
	 */
	new = add_buf(q->first_to_check, count - 1);
	if (q->u.in.polling) {
		/* reset the previous ACK but first set the new one */
		set_buf_state(q, new, SLSB_P_INPUT_ACK);
501
		set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
502
	} else {
503
		q->u.in.polling = 1;
504
		set_buf_state(q, new, SLSB_P_INPUT_ACK);
505 506
	}

507
	q->u.in.ack_start = new;
508 509 510
	count--;
	if (!count)
		return;
511 512
	/* need to change ALL buffers to get more interrupts */
	set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count);
J
Jan Glauber 已提交
513 514 515 516 517
}

static int get_inbound_buffer_frontier(struct qdio_q *q)
{
	int count, stop;
518
	unsigned char state = 0;
J
Jan Glauber 已提交
519

520 521
	q->timestamp = get_clock_fast();

J
Jan Glauber 已提交
522 523 524 525 526 527 528 529 530 531
	/*
	 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
	 * would return 0.
	 */
	count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
	stop = add_buf(q->first_to_check, count);

	if (q->first_to_check == stop)
		goto out;

532 533 534 535
	/*
	 * No siga sync here, as a PCI or we after a thin interrupt
	 * already sync'ed the queues.
	 */
536
	count = get_buf_states(q, q->first_to_check, &state, count, 1, 0);
J
Jan Glauber 已提交
537 538 539 540 541
	if (!count)
		goto out;

	switch (state) {
	case SLSB_P_INPUT_PRIMED:
542
		inbound_primed(q, count);
J
Jan Glauber 已提交
543
		q->first_to_check = add_buf(q->first_to_check, count);
544
		if (atomic_sub(count, &q->nr_buf_used) == 0)
545
			qperf_inc(q, inbound_queue_full);
546 547
		if (q->irq_ptr->perf_stat_enabled)
			account_sbals(q, count);
548
		break;
J
Jan Glauber 已提交
549
	case SLSB_P_INPUT_ERROR:
550
		process_buffer_error(q, count);
J
Jan Glauber 已提交
551 552
		q->first_to_check = add_buf(q->first_to_check, count);
		atomic_sub(count, &q->nr_buf_used);
553 554
		if (q->irq_ptr->perf_stat_enabled)
			account_sbals_error(q, count);
J
Jan Glauber 已提交
555 556 557 558
		break;
	case SLSB_CU_INPUT_EMPTY:
	case SLSB_P_INPUT_NOT_INIT:
	case SLSB_P_INPUT_ACK:
559 560
		if (q->irq_ptr->perf_stat_enabled)
			q->q_stats.nr_sbal_nop++;
561
		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
J
Jan Glauber 已提交
562 563 564 565 566 567 568 569
		break;
	default:
		BUG();
	}
out:
	return q->first_to_check;
}

570
static int qdio_inbound_q_moved(struct qdio_q *q)
J
Jan Glauber 已提交
571 572 573 574 575
{
	int bufnr;

	bufnr = get_inbound_buffer_frontier(q);

576
	if (bufnr != q->last_move) {
577
		q->last_move = bufnr;
578
		if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
579
			q->u.in.timestamp = get_clock();
J
Jan Glauber 已提交
580 581 582 583 584
		return 1;
	} else
		return 0;
}

585
static inline int qdio_inbound_q_done(struct qdio_q *q)
J
Jan Glauber 已提交
586
{
587
	unsigned char state = 0;
J
Jan Glauber 已提交
588 589 590 591

	if (!atomic_read(&q->nr_buf_used))
		return 1;

J
Jan Glauber 已提交
592 593
	if (need_siga_sync(q))
		qdio_siga_sync_q(q);
594
	get_buf_state(q, q->first_to_check, &state, 0);
595

596
	if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
597
		/* more work coming */
J
Jan Glauber 已提交
598 599
		return 0;

600 601 602 603 604
	if (is_thinint_irq(q->irq_ptr))
		return 1;

	/* don't poll under z/VM */
	if (MACHINE_IS_VM)
J
Jan Glauber 已提交
605 606 607 608 609 610
		return 1;

	/*
	 * At this point we know, that inbound first_to_check
	 * has (probably) not moved (see qdio_inbound_processing).
	 */
611
	if (get_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
612
		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
613
			      q->first_to_check);
J
Jan Glauber 已提交
614
		return 1;
615
	} else
616 617 618
		return 0;
}

619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719
static inline int contains_aobs(struct qdio_q *q)
{
	return !q->is_input_q && q->u.out.use_cq;
}

static inline void qdio_trace_aob(struct qdio_irq *irq, struct qdio_q *q,
				int i, struct qaob *aob)
{
	int tmp;

	DBF_DEV_EVENT(DBF_INFO, irq, "AOB%d:%lx", i,
			(unsigned long) virt_to_phys(aob));
	DBF_DEV_EVENT(DBF_INFO, irq, "RES00:%lx",
			(unsigned long) aob->res0[0]);
	DBF_DEV_EVENT(DBF_INFO, irq, "RES01:%lx",
			(unsigned long) aob->res0[1]);
	DBF_DEV_EVENT(DBF_INFO, irq, "RES02:%lx",
			(unsigned long) aob->res0[2]);
	DBF_DEV_EVENT(DBF_INFO, irq, "RES03:%lx",
			(unsigned long) aob->res0[3]);
	DBF_DEV_EVENT(DBF_INFO, irq, "RES04:%lx",
			(unsigned long) aob->res0[4]);
	DBF_DEV_EVENT(DBF_INFO, irq, "RES05:%lx",
			(unsigned long) aob->res0[5]);
	DBF_DEV_EVENT(DBF_INFO, irq, "RES1:%x", aob->res1);
	DBF_DEV_EVENT(DBF_INFO, irq, "RES2:%x", aob->res2);
	DBF_DEV_EVENT(DBF_INFO, irq, "RES3:%x", aob->res3);
	DBF_DEV_EVENT(DBF_INFO, irq, "AORC:%u", aob->aorc);
	DBF_DEV_EVENT(DBF_INFO, irq, "FLAGS:%u", aob->flags);
	DBF_DEV_EVENT(DBF_INFO, irq, "CBTBS:%u", aob->cbtbs);
	DBF_DEV_EVENT(DBF_INFO, irq, "SBC:%u", aob->sb_count);
	for (tmp = 0; tmp < QDIO_MAX_ELEMENTS_PER_BUFFER; ++tmp) {
		DBF_DEV_EVENT(DBF_INFO, irq, "SBA%d:%lx", tmp,
				(unsigned long) aob->sba[tmp]);
		DBF_DEV_EVENT(DBF_INFO, irq, "rSBA%d:%lx", tmp,
				(unsigned long) q->sbal[i]->element[tmp].addr);
		DBF_DEV_EVENT(DBF_INFO, irq, "DC%d:%u", tmp, aob->dcount[tmp]);
		DBF_DEV_EVENT(DBF_INFO, irq, "rDC%d:%u", tmp,
				q->sbal[i]->element[tmp].length);
	}
	DBF_DEV_EVENT(DBF_INFO, irq, "USER0:%lx", (unsigned long) aob->user0);
	for (tmp = 0; tmp < 2; ++tmp) {
		DBF_DEV_EVENT(DBF_INFO, irq, "RES4%d:%lx", tmp,
			(unsigned long) aob->res4[tmp]);
	}
	DBF_DEV_EVENT(DBF_INFO, irq, "USER1:%lx", (unsigned long) aob->user1);
	DBF_DEV_EVENT(DBF_INFO, irq, "USER2:%lx", (unsigned long) aob->user2);
}

static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
{
	unsigned char state = 0;
	int j, b = start;

	if (!contains_aobs(q))
		return;

	for (j = 0; j < count; ++j) {
		get_buf_state(q, b, &state, 0);
		if (state == SLSB_P_OUTPUT_PENDING) {
			struct qaob *aob = q->u.out.aobs[b];
			if (aob == NULL)
				continue;

			BUG_ON(q->u.out.sbal_state == NULL);
			q->u.out.sbal_state[b].flags |=
				QDIO_OUTBUF_STATE_FLAG_PENDING;
			q->u.out.aobs[b] = NULL;
		} else if (state == SLSB_P_OUTPUT_EMPTY) {
			BUG_ON(q->u.out.sbal_state == NULL);
			q->u.out.sbal_state[b].aob = NULL;
		}
		b = next_buf(b);
	}
}

static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
					int bufnr)
{
	unsigned long phys_aob = 0;

	if (!q->use_cq)
		goto out;

	if (!q->aobs[bufnr]) {
		struct qaob *aob = qdio_allocate_aob();
		q->aobs[bufnr] = aob;
	}
	if (q->aobs[bufnr]) {
		BUG_ON(q->sbal_state == NULL);
		q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE;
		q->sbal_state[bufnr].aob = q->aobs[bufnr];
		q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
		phys_aob = virt_to_phys(q->aobs[bufnr]);
		BUG_ON(phys_aob & 0xFF);
	}

out:
	return phys_aob;
}

720
static void qdio_kick_handler(struct qdio_q *q)
J
Jan Glauber 已提交
721
{
722 723 724
	int start = q->first_to_kick;
	int end = q->first_to_check;
	int count;
J
Jan Glauber 已提交
725 726 727 728

	if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
		return;

729 730 731
	count = sub_buf(end, start);

	if (q->is_input_q) {
732
		qperf_inc(q, inbound_handler);
733
		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
U
Ursula Braun 已提交
734
	} else {
735
		qperf_inc(q, outbound_handler);
736 737
		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
			      start, count);
U
Ursula Braun 已提交
738
	}
739

740 741
	qdio_handle_aobs(q, start, count);

742 743
	q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
		   q->irq_ptr->int_parm);
J
Jan Glauber 已提交
744 745

	/* for the next time */
746
	q->first_to_kick = end;
J
Jan Glauber 已提交
747 748 749 750 751
	q->qdio_error = 0;
}

static void __qdio_inbound_processing(struct qdio_q *q)
{
752
	qperf_inc(q, tasklet_inbound);
753

J
Jan Glauber 已提交
754 755 756
	if (!qdio_inbound_q_moved(q))
		return;

757
	qdio_kick_handler(q);
J
Jan Glauber 已提交
758

759
	if (!qdio_inbound_q_done(q)) {
J
Jan Glauber 已提交
760
		/* means poll time is not yet over */
761
		qperf_inc(q, tasklet_inbound_resched);
762 763 764 765
		if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
			tasklet_schedule(&q->tasklet);
			return;
		}
766
	}
J
Jan Glauber 已提交
767 768 769 770 771 772

	qdio_stop_polling(q);
	/*
	 * We need to check again to not lose initiative after
	 * resetting the ACK state.
	 */
773 774
	if (!qdio_inbound_q_done(q)) {
		qperf_inc(q, tasklet_inbound_resched2);
775 776
		if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
			tasklet_schedule(&q->tasklet);
777
	}
J
Jan Glauber 已提交
778 779 780 781 782 783 784 785 786 787 788
}

void qdio_inbound_processing(unsigned long data)
{
	struct qdio_q *q = (struct qdio_q *)data;
	__qdio_inbound_processing(q);
}

static int get_outbound_buffer_frontier(struct qdio_q *q)
{
	int count, stop;
789
	unsigned char state = 0;
J
Jan Glauber 已提交
790

791 792
	q->timestamp = get_clock_fast();

J
Jan Glauber 已提交
793 794 795 796 797 798
	if (need_siga_sync(q))
		if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
		    !pci_out_supported(q)) ||
		    (queue_type(q) == QDIO_IQDIO_QFMT &&
		    multicast_outbound(q)))
			qdio_siga_sync_q(q);
J
Jan Glauber 已提交
799 800 801 802 803 804 805 806

	/*
	 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
	 * would return 0.
	 */
	count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
	stop = add_buf(q->first_to_check, count);
	if (q->first_to_check == stop)
807
		goto out;
J
Jan Glauber 已提交
808

809
	count = get_buf_states(q, q->first_to_check, &state, count, 0, 1);
J
Jan Glauber 已提交
810
	if (!count)
811
		goto out;
J
Jan Glauber 已提交
812 813

	switch (state) {
814 815
	case SLSB_P_OUTPUT_PENDING:
		BUG();
J
Jan Glauber 已提交
816 817
	case SLSB_P_OUTPUT_EMPTY:
		/* the adapter got it */
818 819
		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
			"out empty:%1d %02x", q->nr, count);
J
Jan Glauber 已提交
820 821 822

		atomic_sub(count, &q->nr_buf_used);
		q->first_to_check = add_buf(q->first_to_check, count);
823 824
		if (q->irq_ptr->perf_stat_enabled)
			account_sbals(q, count);
825

826
		break;
J
Jan Glauber 已提交
827
	case SLSB_P_OUTPUT_ERROR:
828
		process_buffer_error(q, count);
J
Jan Glauber 已提交
829 830
		q->first_to_check = add_buf(q->first_to_check, count);
		atomic_sub(count, &q->nr_buf_used);
831 832
		if (q->irq_ptr->perf_stat_enabled)
			account_sbals_error(q, count);
J
Jan Glauber 已提交
833 834 835
		break;
	case SLSB_CU_OUTPUT_PRIMED:
		/* the adapter has not fetched the output yet */
836 837
		if (q->irq_ptr->perf_stat_enabled)
			q->q_stats.nr_sbal_nop++;
838 839
		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
			      q->nr);
J
Jan Glauber 已提交
840 841 842 843 844 845 846
		break;
	case SLSB_P_OUTPUT_NOT_INIT:
	case SLSB_P_OUTPUT_HALTED:
		break;
	default:
		BUG();
	}
847 848

out:
J
Jan Glauber 已提交
849 850 851 852 853 854 855 856 857 858 859 860 861 862 863
	return q->first_to_check;
}

/* all buffers processed? */
static inline int qdio_outbound_q_done(struct qdio_q *q)
{
	return atomic_read(&q->nr_buf_used) == 0;
}

static inline int qdio_outbound_q_moved(struct qdio_q *q)
{
	int bufnr;

	bufnr = get_outbound_buffer_frontier(q);

864
	if (bufnr != q->last_move) {
865
		q->last_move = bufnr;
866
		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
J
Jan Glauber 已提交
867 868 869 870 871
		return 1;
	} else
		return 0;
}

872
static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob)
J
Jan Glauber 已提交
873
{
874
	int retries = 0, cc;
875
	unsigned int busy_bit;
J
Jan Glauber 已提交
876 877

	if (!need_siga_out(q))
878
		return 0;
J
Jan Glauber 已提交
879

880
	DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
881
retry:
882
	qperf_inc(q, siga_write);
883

884
	cc = qdio_siga_output(q, &busy_bit, aob);
885
	switch (cc) {
J
Jan Glauber 已提交
886 887
	case 0:
		break;
888 889
	case 2:
		if (busy_bit) {
890 891 892 893 894
			while (++retries < QDIO_BUSY_BIT_RETRIES) {
				mdelay(QDIO_BUSY_BIT_RETRY_DELAY);
				goto retry;
			}
			DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr);
895 896
			cc = -EBUSY;
		} else {
897
			DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
898 899
			cc = -ENOBUFS;
		}
900 901 902 903
		break;
	case 1:
	case 3:
		DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
904
		cc = -EIO;
905
		break;
J
Jan Glauber 已提交
906
	}
907 908 909 910
	if (retries) {
		DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr);
		DBF_ERROR("count:%u", retries);
	}
911
	return cc;
J
Jan Glauber 已提交
912 913 914 915
}

static void __qdio_outbound_processing(struct qdio_q *q)
{
916
	qperf_inc(q, tasklet_outbound);
J
Jan Glauber 已提交
917 918 919
	BUG_ON(atomic_read(&q->nr_buf_used) < 0);

	if (qdio_outbound_q_moved(q))
920
		qdio_kick_handler(q);
J
Jan Glauber 已提交
921

922
	if (queue_type(q) == QDIO_ZFCP_QFMT)
J
Jan Glauber 已提交
923
		if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
924
			goto sched;
J
Jan Glauber 已提交
925 926 927 928 929 930

	if (q->u.out.pci_out_enabled)
		return;

	/*
	 * Now we know that queue type is either qeth without pci enabled
J
Jan Glauber 已提交
931 932
	 * or HiperSockets. Make sure buffer switch from PRIMED to EMPTY
	 * is noticed and outbound_handler is called after some time.
J
Jan Glauber 已提交
933 934 935
	 */
	if (qdio_outbound_q_done(q))
		del_timer(&q->u.out.timer);
936 937
	else
		if (!timer_pending(&q->u.out.timer))
J
Jan Glauber 已提交
938
			mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
939 940 941 942 943 944
	return;

sched:
	if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
		return;
	tasklet_schedule(&q->tasklet);
J
Jan Glauber 已提交
945 946 947 948 949 950 951 952 953 954 955 956
}

/* outbound tasklet */
void qdio_outbound_processing(unsigned long data)
{
	struct qdio_q *q = (struct qdio_q *)data;
	__qdio_outbound_processing(q);
}

void qdio_outbound_timer(unsigned long data)
{
	struct qdio_q *q = (struct qdio_q *)data;
957 958 959

	if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
		return;
J
Jan Glauber 已提交
960 961 962
	tasklet_schedule(&q->tasklet);
}

963
static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
J
Jan Glauber 已提交
964 965 966 967 968 969 970 971 972 973 974 975
{
	struct qdio_q *out;
	int i;

	if (!pci_out_supported(q))
		return;

	for_each_output_queue(q->irq_ptr, out, i)
		if (!qdio_outbound_q_done(out))
			tasklet_schedule(&out->tasklet);
}

976 977
static void __tiqdio_inbound_processing(struct qdio_q *q)
{
978
	qperf_inc(q, tasklet_inbound);
J
Jan Glauber 已提交
979 980
	if (need_siga_sync(q) && need_siga_sync_after_ai(q))
		qdio_sync_queues(q);
981 982 983 984 985 986 987 988 989 990 991 992

	/*
	 * The interrupt could be caused by a PCI request. Check the
	 * PCI capable outbound queues.
	 */
	qdio_check_outbound_after_thinint(q);

	if (!qdio_inbound_q_moved(q))
		return;

	qdio_kick_handler(q);

993
	if (!qdio_inbound_q_done(q)) {
994
		qperf_inc(q, tasklet_inbound_resched);
995
		if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
996
			tasklet_schedule(&q->tasklet);
997 998
			return;
		}
999 1000 1001 1002 1003 1004 1005
	}

	qdio_stop_polling(q);
	/*
	 * We need to check again to not lose initiative after
	 * resetting the ACK state.
	 */
1006
	if (!qdio_inbound_q_done(q)) {
1007
		qperf_inc(q, tasklet_inbound_resched2);
1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018
		if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
			tasklet_schedule(&q->tasklet);
	}
}

void tiqdio_inbound_processing(unsigned long data)
{
	struct qdio_q *q = (struct qdio_q *)data;
	__tiqdio_inbound_processing(q);
}

J
Jan Glauber 已提交
1019 1020 1021
static inline void qdio_set_state(struct qdio_irq *irq_ptr,
				  enum qdio_irq_states state)
{
1022
	DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
J
Jan Glauber 已提交
1023 1024 1025 1026 1027

	irq_ptr->state = state;
	mb();
}

1028
static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
J
Jan Glauber 已提交
1029 1030
{
	if (irb->esw.esw0.erw.cons) {
1031 1032 1033
		DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
		DBF_ERROR_HEX(irb, 64);
		DBF_ERROR_HEX(irb->ecw, 64);
J
Jan Glauber 已提交
1034 1035 1036 1037 1038 1039 1040 1041 1042
	}
}

/* PCI interrupt handler */
static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
{
	int i;
	struct qdio_q *q;

1043 1044 1045
	if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
		return;

J
Jan Glauber 已提交
1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
	for_each_input_queue(irq_ptr, q, i) {
		if (q->u.in.queue_start_poll) {
			/* skip if polling is enabled or already in work */
			if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
				     &q->u.in.queue_irq_state)) {
				qperf_inc(q, int_discarded);
				continue;
			}
			q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
						 q->irq_ptr->int_parm);
1056
		} else {
J
Jan Glauber 已提交
1057
			tasklet_schedule(&q->tasklet);
1058
		}
J
Jan Glauber 已提交
1059
	}
J
Jan Glauber 已提交
1060

J
Jan Glauber 已提交
1061
	if (!pci_out_supported(q))
J
Jan Glauber 已提交
1062 1063 1064 1065 1066
		return;

	for_each_output_queue(irq_ptr, q, i) {
		if (qdio_outbound_q_done(q))
			continue;
J
Jan Glauber 已提交
1067
		if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
J
Jan Glauber 已提交
1068 1069 1070 1071 1072 1073 1074 1075 1076 1077
			qdio_siga_sync_q(q);
		tasklet_schedule(&q->tasklet);
	}
}

static void qdio_handle_activate_check(struct ccw_device *cdev,
				unsigned long intparm, int cstat, int dstat)
{
	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
	struct qdio_q *q;
1078
	int count;
J
Jan Glauber 已提交
1079

1080 1081 1082
	DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
	DBF_ERROR("intp :%lx", intparm);
	DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
J
Jan Glauber 已提交
1083 1084 1085 1086 1087 1088 1089 1090 1091

	if (irq_ptr->nr_input_qs) {
		q = irq_ptr->input_qs[0];
	} else if (irq_ptr->nr_output_qs) {
		q = irq_ptr->output_qs[0];
	} else {
		dump_stack();
		goto no_handler;
	}
1092 1093

	count = sub_buf(q->first_to_check, q->first_to_kick);
1094
	q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE,
1095
		   q->nr, q->first_to_kick, count, irq_ptr->int_parm);
J
Jan Glauber 已提交
1096 1097
no_handler:
	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1098 1099 1100 1101 1102
	/*
	 * In case of z/VM LGR (Live Guest Migration) QDIO recovery will happen.
	 * Therefore we call the LGR detection function here.
	 */
	lgr_info_log();
J
Jan Glauber 已提交
1103 1104
}

1105 1106
static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
				      int dstat)
J
Jan Glauber 已提交
1107 1108 1109
{
	struct qdio_irq *irq_ptr = cdev->private->qdio_data;

1110
	DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
J
Jan Glauber 已提交
1111

1112
	if (cstat)
J
Jan Glauber 已提交
1113
		goto error;
1114
	if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
J
Jan Glauber 已提交
1115
		goto error;
1116 1117 1118 1119 1120
	if (!(dstat & DEV_STAT_DEV_END))
		goto error;
	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
	return;

J
Jan Glauber 已提交
1121
error:
1122 1123
	DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
	DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
J
Jan Glauber 已提交
1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134
	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
}

/* qdio interrupt handler */
void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
		      struct irb *irb)
{
	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
	int cstat, dstat;

	if (!intparm || !irq_ptr) {
1135
		DBF_ERROR("qint:%4x", cdev->private->schid.sch_no);
J
Jan Glauber 已提交
1136 1137 1138
		return;
	}

1139 1140 1141
	if (irq_ptr->perf_stat_enabled)
		irq_ptr->perf_stat.qdio_int++;

J
Jan Glauber 已提交
1142 1143 1144
	if (IS_ERR(irb)) {
		switch (PTR_ERR(irb)) {
		case -EIO:
1145
			DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
1146 1147
			qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
			wake_up(&cdev->private->wait_q);
J
Jan Glauber 已提交
1148 1149 1150 1151 1152 1153
			return;
		default:
			WARN_ON(1);
			return;
		}
	}
1154
	qdio_irq_check_sense(irq_ptr, irb);
J
Jan Glauber 已提交
1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170
	cstat = irb->scsw.cmd.cstat;
	dstat = irb->scsw.cmd.dstat;

	switch (irq_ptr->state) {
	case QDIO_IRQ_STATE_INACTIVE:
		qdio_establish_handle_irq(cdev, cstat, dstat);
		break;
	case QDIO_IRQ_STATE_CLEANUP:
		qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
		break;
	case QDIO_IRQ_STATE_ESTABLISHED:
	case QDIO_IRQ_STATE_ACTIVE:
		if (cstat & SCHN_STAT_PCI) {
			qdio_int_handler_pci(irq_ptr);
			return;
		}
1171
		if (cstat || dstat)
J
Jan Glauber 已提交
1172 1173
			qdio_handle_activate_check(cdev, intparm, cstat,
						   dstat);
1174
		break;
1175 1176
	case QDIO_IRQ_STATE_STOPPED:
		break;
J
Jan Glauber 已提交
1177 1178 1179 1180 1181 1182 1183 1184 1185
	default:
		WARN_ON(1);
	}
	wake_up(&cdev->private->wait_q);
}

/**
 * qdio_get_ssqd_desc - get qdio subchannel description
 * @cdev: ccw device to get description for
1186
 * @data: where to store the ssqd
J
Jan Glauber 已提交
1187
 *
1188 1189
 * Returns 0 or an error code. The results of the chsc are stored in the
 * specified structure.
J
Jan Glauber 已提交
1190
 */
1191 1192
int qdio_get_ssqd_desc(struct ccw_device *cdev,
		       struct qdio_ssqd_desc *data)
J
Jan Glauber 已提交
1193 1194
{

1195 1196 1197
	if (!cdev || !cdev->private)
		return -EINVAL;

1198
	DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no);
1199
	return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data);
J
Jan Glauber 已提交
1200 1201 1202 1203 1204 1205 1206 1207 1208 1209
}
EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);

static void qdio_shutdown_queues(struct ccw_device *cdev)
{
	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
	struct qdio_q *q;
	int i;

	for_each_input_queue(irq_ptr, q, i)
1210
		tasklet_kill(&q->tasklet);
J
Jan Glauber 已提交
1211 1212 1213

	for_each_output_queue(irq_ptr, q, i) {
		del_timer(&q->u.out.timer);
1214
		tasklet_kill(&q->tasklet);
J
Jan Glauber 已提交
1215 1216 1217 1218 1219 1220 1221 1222 1223 1224
	}
}

/**
 * qdio_shutdown - shut down a qdio subchannel
 * @cdev: associated ccw device
 * @how: use halt or clear to shutdown
 */
int qdio_shutdown(struct ccw_device *cdev, int how)
{
1225
	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
J
Jan Glauber 已提交
1226 1227 1228 1229 1230 1231
	int rc;
	unsigned long flags;

	if (!irq_ptr)
		return -ENODEV;

1232
	BUG_ON(irqs_disabled());
1233 1234
	DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no);

J
Jan Glauber 已提交
1235 1236 1237 1238 1239 1240 1241 1242 1243 1244
	mutex_lock(&irq_ptr->setup_mutex);
	/*
	 * Subchannel was already shot down. We cannot prevent being called
	 * twice since cio may trigger a shutdown asynchronously.
	 */
	if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
		mutex_unlock(&irq_ptr->setup_mutex);
		return 0;
	}

1245 1246 1247 1248 1249 1250
	/*
	 * Indicate that the device is going down. Scheduling the queue
	 * tasklets is forbidden from here on.
	 */
	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);

J
Jan Glauber 已提交
1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263
	tiqdio_remove_input_queues(irq_ptr);
	qdio_shutdown_queues(cdev);
	qdio_shutdown_debug_entries(irq_ptr, cdev);

	/* cleanup subchannel */
	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);

	if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
		rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
	else
		/* default behaviour is halt */
		rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
	if (rc) {
1264 1265
		DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
		DBF_ERROR("rc:%4d", rc);
J
Jan Glauber 已提交
1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298
		goto no_cleanup;
	}

	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
	wait_event_interruptible_timeout(cdev->private->wait_q,
		irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
		irq_ptr->state == QDIO_IRQ_STATE_ERR,
		10 * HZ);
	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);

no_cleanup:
	qdio_shutdown_thinint(irq_ptr);

	/* restore interrupt handler */
	if ((void *)cdev->handler == (void *)qdio_int_handler)
		cdev->handler = irq_ptr->orig_handler;
	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);

	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
	mutex_unlock(&irq_ptr->setup_mutex);
	if (rc)
		return rc;
	return 0;
}
EXPORT_SYMBOL_GPL(qdio_shutdown);

/**
 * qdio_free - free data structures for a qdio subchannel
 * @cdev: associated ccw device
 */
int qdio_free(struct ccw_device *cdev)
{
1299
	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1300

J
Jan Glauber 已提交
1301 1302 1303
	if (!irq_ptr)
		return -ENODEV;

1304
	DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no);
J
Jan Glauber 已提交
1305
	mutex_lock(&irq_ptr->setup_mutex);
1306 1307 1308 1309 1310

	if (irq_ptr->debug_area != NULL) {
		debug_unregister(irq_ptr->debug_area);
		irq_ptr->debug_area = NULL;
	}
J
Jan Glauber 已提交
1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326
	cdev->private->qdio_data = NULL;
	mutex_unlock(&irq_ptr->setup_mutex);

	qdio_release_memory(irq_ptr);
	return 0;
}
EXPORT_SYMBOL_GPL(qdio_free);

/**
 * qdio_allocate - allocate qdio queues and associated data
 * @init_data: initialization data
 */
int qdio_allocate(struct qdio_initialize *init_data)
{
	struct qdio_irq *irq_ptr;

1327
	DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no);
J
Jan Glauber 已提交
1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346

	if ((init_data->no_input_qs && !init_data->input_handler) ||
	    (init_data->no_output_qs && !init_data->output_handler))
		return -EINVAL;

	if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
	    (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
		return -EINVAL;

	if ((!init_data->input_sbal_addr_array) ||
	    (!init_data->output_sbal_addr_array))
		return -EINVAL;

	/* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
	irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
	if (!irq_ptr)
		goto out_err;

	mutex_init(&irq_ptr->setup_mutex);
1347
	qdio_allocate_dbf(init_data, irq_ptr);
J
Jan Glauber 已提交
1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359

	/*
	 * Allocate a page for the chsc calls in qdio_establish.
	 * Must be pre-allocated since a zfcp recovery will call
	 * qdio_establish. In case of low memory and swap on a zfcp disk
	 * we may not be able to allocate memory otherwise.
	 */
	irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
	if (!irq_ptr->chsc_page)
		goto out_rel;

	/* qdr is used in ccw1.cda which is u32 */
1360
	irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
J
Jan Glauber 已提交
1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378
	if (!irq_ptr->qdr)
		goto out_rel;
	WARN_ON((unsigned long)irq_ptr->qdr & 0xfff);

	if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
			     init_data->no_output_qs))
		goto out_rel;

	init_data->cdev->private->qdio_data = irq_ptr;
	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
	return 0;
out_rel:
	qdio_release_memory(irq_ptr);
out_err:
	return -ENOMEM;
}
EXPORT_SYMBOL_GPL(qdio_allocate);

1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398
static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
{
	struct qdio_q *q = irq_ptr->input_qs[0];
	int i, use_cq = 0;

	if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT)
		use_cq = 1;

	for_each_output_queue(irq_ptr, q, i) {
		if (use_cq) {
			if (qdio_enable_async_operation(&q->u.out) < 0) {
				use_cq = 0;
				continue;
			}
		} else
			qdio_disable_async_operation(&q->u.out);
	}
	DBF_EVENT("use_cq:%d", use_cq);
}

J
Jan Glauber 已提交
1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409
/**
 * qdio_establish - establish queues on a qdio subchannel
 * @init_data: initialization data
 */
int qdio_establish(struct qdio_initialize *init_data)
{
	struct qdio_irq *irq_ptr;
	struct ccw_device *cdev = init_data->cdev;
	unsigned long saveflags;
	int rc;

1410
	DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no);
1411

J
Jan Glauber 已提交
1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439
	irq_ptr = cdev->private->qdio_data;
	if (!irq_ptr)
		return -ENODEV;

	if (cdev->private->state != DEV_STATE_ONLINE)
		return -EINVAL;

	mutex_lock(&irq_ptr->setup_mutex);
	qdio_setup_irq(init_data);

	rc = qdio_establish_thinint(irq_ptr);
	if (rc) {
		mutex_unlock(&irq_ptr->setup_mutex);
		qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
		return rc;
	}

	/* establish q */
	irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
	irq_ptr->ccw.flags = CCW_FLAG_SLI;
	irq_ptr->ccw.count = irq_ptr->equeue.count;
	irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);

	spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
	ccw_device_set_options_mask(cdev, 0);

	rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
	if (rc) {
1440 1441
		DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
		DBF_ERROR("rc:%4x", rc);
J
Jan Glauber 已提交
1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462
	}
	spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);

	if (rc) {
		mutex_unlock(&irq_ptr->setup_mutex);
		qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
		return rc;
	}

	wait_event_interruptible_timeout(cdev->private->wait_q,
		irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
		irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);

	if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
		mutex_unlock(&irq_ptr->setup_mutex);
		qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
		return -EIO;
	}

	qdio_setup_ssqd_info(irq_ptr);

1463 1464
	qdio_detect_hsicq(irq_ptr);

J
Jan Glauber 已提交
1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484
	/* qebsm is now setup if available, initialize buffer states */
	qdio_init_buf_states(irq_ptr);

	mutex_unlock(&irq_ptr->setup_mutex);
	qdio_print_subchannel_info(irq_ptr, cdev);
	qdio_setup_debug_entries(irq_ptr, cdev);
	return 0;
}
EXPORT_SYMBOL_GPL(qdio_establish);

/**
 * qdio_activate - activate queues on a qdio subchannel
 * @cdev: associated cdev
 */
int qdio_activate(struct ccw_device *cdev)
{
	struct qdio_irq *irq_ptr;
	int rc;
	unsigned long saveflags;

1485
	DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no);
1486

J
Jan Glauber 已提交
1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510
	irq_ptr = cdev->private->qdio_data;
	if (!irq_ptr)
		return -ENODEV;

	if (cdev->private->state != DEV_STATE_ONLINE)
		return -EINVAL;

	mutex_lock(&irq_ptr->setup_mutex);
	if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
		rc = -EBUSY;
		goto out;
	}

	irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
	irq_ptr->ccw.flags = CCW_FLAG_SLI;
	irq_ptr->ccw.count = irq_ptr->aqueue.count;
	irq_ptr->ccw.cda = 0;

	spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
	ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);

	rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
			      0, DOIO_DENY_PREFETCH);
	if (rc) {
1511 1512
		DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
		DBF_ERROR("rc:%4x", rc);
J
Jan Glauber 已提交
1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527
	}
	spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);

	if (rc)
		goto out;

	if (is_thinint_irq(irq_ptr))
		tiqdio_add_input_queues(irq_ptr);

	/* wait for subchannel to become active */
	msleep(5);

	switch (irq_ptr->state) {
	case QDIO_IRQ_STATE_STOPPED:
	case QDIO_IRQ_STATE_ERR:
1528 1529
		rc = -EIO;
		break;
J
Jan Glauber 已提交
1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565
	default:
		qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
		rc = 0;
	}
out:
	mutex_unlock(&irq_ptr->setup_mutex);
	return rc;
}
EXPORT_SYMBOL_GPL(qdio_activate);

static inline int buf_in_between(int bufnr, int start, int count)
{
	int end = add_buf(start, count);

	if (end > start) {
		if (bufnr >= start && bufnr < end)
			return 1;
		else
			return 0;
	}

	/* wrap-around case */
	if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
	    (bufnr < end))
		return 1;
	else
		return 0;
}

/**
 * handle_inbound - reset processed input buffers
 * @q: queue containing the buffers
 * @callflags: flags
 * @bufnr: first buffer to process
 * @count: how many buffers are emptied
 */
1566 1567
static int handle_inbound(struct qdio_q *q, unsigned int callflags,
			  int bufnr, int count)
J
Jan Glauber 已提交
1568
{
1569
	int used, diff;
J
Jan Glauber 已提交
1570

1571 1572
	qperf_inc(q, inbound_call);

1573 1574 1575 1576 1577 1578 1579 1580 1581
	if (!q->u.in.polling)
		goto set;

	/* protect against stop polling setting an ACK for an emptied slsb */
	if (count == QDIO_MAX_BUFFERS_PER_Q) {
		/* overwriting everything, just delete polling status */
		q->u.in.polling = 0;
		q->u.in.ack_count = 0;
		goto set;
1582
	} else if (buf_in_between(q->u.in.ack_start, bufnr, count)) {
1583
		if (is_qebsm(q)) {
1584
			/* partial overwrite, just update ack_start */
1585
			diff = add_buf(bufnr, count);
1586
			diff = sub_buf(diff, q->u.in.ack_start);
1587 1588 1589 1590 1591 1592
			q->u.in.ack_count -= diff;
			if (q->u.in.ack_count <= 0) {
				q->u.in.polling = 0;
				q->u.in.ack_count = 0;
				goto set;
			}
1593
			q->u.in.ack_start = add_buf(q->u.in.ack_start, diff);
1594 1595 1596
		}
		else
			/* the only ACK will be deleted, so stop polling */
J
Jan Glauber 已提交
1597
			q->u.in.polling = 0;
1598
	}
J
Jan Glauber 已提交
1599

1600
set:
J
Jan Glauber 已提交
1601 1602 1603 1604 1605
	count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);

	used = atomic_add_return(count, &q->nr_buf_used) - count;
	BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);

1606 1607
	if (need_siga_in(q))
		return qdio_siga_input(q);
1608

1609
	return 0;
J
Jan Glauber 已提交
1610 1611 1612 1613 1614 1615 1616 1617 1618
}

/**
 * handle_outbound - process filled outbound buffers
 * @q: queue containing the buffers
 * @callflags: flags
 * @bufnr: first buffer to process
 * @count: how many buffers are filled
 */
1619 1620
static int handle_outbound(struct qdio_q *q, unsigned int callflags,
			   int bufnr, int count)
J
Jan Glauber 已提交
1621
{
1622
	unsigned char state = 0;
1623
	int used, rc = 0;
J
Jan Glauber 已提交
1624

1625
	qperf_inc(q, outbound_call);
J
Jan Glauber 已提交
1626 1627 1628 1629 1630

	count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
	used = atomic_add_return(count, &q->nr_buf_used);
	BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);

1631 1632 1633
	if (used == QDIO_MAX_BUFFERS_PER_Q)
		qperf_inc(q, outbound_queue_full);

1634
	if (callflags & QDIO_FLAG_PCI_OUT) {
J
Jan Glauber 已提交
1635
		q->u.out.pci_out_enabled = 1;
1636
		qperf_inc(q, pci_request_int);
J
Jan Glauber 已提交
1637
	} else
J
Jan Glauber 已提交
1638 1639 1640
		q->u.out.pci_out_enabled = 0;

	if (queue_type(q) == QDIO_IQDIO_QFMT) {
1641 1642 1643
		unsigned long phys_aob = 0;

		/* One SIGA-W per buffer required for unicast HSI */
J
Jan Glauber 已提交
1644 1645
		WARN_ON_ONCE(count > 1 && !multicast_outbound(q));

1646 1647 1648
		phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);

		rc = qdio_kick_outbound_q(q, phys_aob);
J
Jan Glauber 已提交
1649
	} else if (need_siga_sync(q)) {
J
Jan Glauber 已提交
1650 1651 1652 1653 1654
		rc = qdio_siga_sync_q(q);
	} else {
		/* try to fast requeue buffers */
		get_buf_state(q, prev_buf(bufnr), &state, 0);
		if (state != SLSB_CU_OUTPUT_PRIMED)
1655
			rc = qdio_kick_outbound_q(q, 0);
J
Jan Glauber 已提交
1656
		else
J
Jan Glauber 已提交
1657
			qperf_inc(q, fast_requeue);
J
Jan Glauber 已提交
1658 1659
	}

1660 1661 1662 1663 1664 1665 1666
	/* in case of SIGA errors we must process the error immediately */
	if (used >= q->u.out.scan_threshold || rc)
		tasklet_schedule(&q->tasklet);
	else
		/* free the SBALs in case of no further traffic */
		if (!timer_pending(&q->u.out.timer))
			mod_timer(&q->u.out.timer, jiffies + HZ);
1667
	return rc;
J
Jan Glauber 已提交
1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678
}

/**
 * do_QDIO - process input or output buffers
 * @cdev: associated ccw_device for the qdio subchannel
 * @callflags: input or output and special flags from the program
 * @q_nr: queue number
 * @bufnr: buffer number
 * @count: how many buffers to process
 */
int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1679
	    int q_nr, unsigned int bufnr, unsigned int count)
J
Jan Glauber 已提交
1680 1681 1682
{
	struct qdio_irq *irq_ptr;

1683

1684
	if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
J
Jan Glauber 已提交
1685 1686 1687 1688 1689 1690
		return -EINVAL;

	irq_ptr = cdev->private->qdio_data;
	if (!irq_ptr)
		return -ENODEV;

1691 1692
	DBF_DEV_EVENT(DBF_INFO, irq_ptr,
		      "do%02x b:%02x c:%02x", callflags, bufnr, count);
J
Jan Glauber 已提交
1693 1694

	if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1695
		return -EIO;
1696 1697
	if (!count)
		return 0;
J
Jan Glauber 已提交
1698
	if (callflags & QDIO_FLAG_SYNC_INPUT)
1699 1700
		return handle_inbound(irq_ptr->input_qs[q_nr],
				      callflags, bufnr, count);
J
Jan Glauber 已提交
1701
	else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
1702 1703 1704
		return handle_outbound(irq_ptr->output_qs[q_nr],
				       callflags, bufnr, count);
	return -EINVAL;
J
Jan Glauber 已提交
1705 1706 1707
}
EXPORT_SYMBOL_GPL(do_QDIO);

J
Jan Glauber 已提交
1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727
/**
 * qdio_start_irq - process input buffers
 * @cdev: associated ccw_device for the qdio subchannel
 * @nr: input queue number
 *
 * Return codes
 *   0 - success
 *   1 - irqs not started since new data is available
 */
int qdio_start_irq(struct ccw_device *cdev, int nr)
{
	struct qdio_q *q;
	struct qdio_irq *irq_ptr = cdev->private->qdio_data;

	if (!irq_ptr)
		return -ENODEV;
	q = irq_ptr->input_qs[nr];

	WARN_ON(queue_irqs_enabled(q));

1728
	clear_nonshared_ind(irq_ptr);
J
Jan Glauber 已提交
1729 1730 1731 1732 1733 1734 1735
	qdio_stop_polling(q);
	clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);

	/*
	 * We need to check again to not lose initiative after
	 * resetting the ACK state.
	 */
1736
	if (test_nonshared_ind(irq_ptr))
J
Jan Glauber 已提交
1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776
		goto rescan;
	if (!qdio_inbound_q_done(q))
		goto rescan;
	return 0;

rescan:
	if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
			     &q->u.in.queue_irq_state))
		return 0;
	else
		return 1;

}
EXPORT_SYMBOL(qdio_start_irq);

/**
 * qdio_get_next_buffers - process input buffers
 * @cdev: associated ccw_device for the qdio subchannel
 * @nr: input queue number
 * @bufnr: first filled buffer number
 * @error: buffers are in error state
 *
 * Return codes
 *   < 0 - error
 *   = 0 - no new buffers found
 *   > 0 - number of processed buffers
 */
int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
			  int *error)
{
	struct qdio_q *q;
	int start, end;
	struct qdio_irq *irq_ptr = cdev->private->qdio_data;

	if (!irq_ptr)
		return -ENODEV;
	q = irq_ptr->input_qs[nr];
	WARN_ON(queue_irqs_enabled(q));

	/*
J
Jan Glauber 已提交
1777 1778
	 * Cannot rely on automatic sync after interrupt since queues may
	 * also be examined without interrupt.
J
Jan Glauber 已提交
1779
	 */
J
Jan Glauber 已提交
1780 1781 1782 1783
	if (need_siga_sync(q))
		qdio_sync_queues(q);

	/* check the PCI capable outbound queues. */
J
Jan Glauber 已提交
1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830
	qdio_check_outbound_after_thinint(q);

	if (!qdio_inbound_q_moved(q))
		return 0;

	/* Note: upper-layer MUST stop processing immediately here ... */
	if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
		return -EIO;

	start = q->first_to_kick;
	end = q->first_to_check;
	*bufnr = start;
	*error = q->qdio_error;

	/* for the next time */
	q->first_to_kick = end;
	q->qdio_error = 0;
	return sub_buf(end, start);
}
EXPORT_SYMBOL(qdio_get_next_buffers);

/**
 * qdio_stop_irq - disable interrupt processing for the device
 * @cdev: associated ccw_device for the qdio subchannel
 * @nr: input queue number
 *
 * Return codes
 *   0 - interrupts were already disabled
 *   1 - interrupts successfully disabled
 */
int qdio_stop_irq(struct ccw_device *cdev, int nr)
{
	struct qdio_q *q;
	struct qdio_irq *irq_ptr = cdev->private->qdio_data;

	if (!irq_ptr)
		return -ENODEV;
	q = irq_ptr->input_qs[nr];

	if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
			     &q->u.in.queue_irq_state))
		return 0;
	else
		return 1;
}
EXPORT_SYMBOL(qdio_stop_irq);

J
Jan Glauber 已提交
1831 1832 1833 1834
static int __init init_QDIO(void)
{
	int rc;

S
Sebastian Ott 已提交
1835
	rc = qdio_debug_init();
J
Jan Glauber 已提交
1836 1837
	if (rc)
		return rc;
S
Sebastian Ott 已提交
1838 1839 1840
	rc = qdio_setup_init();
	if (rc)
		goto out_debug;
J
Jan Glauber 已提交
1841 1842 1843 1844 1845
	rc = tiqdio_allocate_memory();
	if (rc)
		goto out_cache;
	rc = tiqdio_register_thinints();
	if (rc)
S
Sebastian Ott 已提交
1846
		goto out_ti;
J
Jan Glauber 已提交
1847 1848 1849 1850 1851 1852
	return 0;

out_ti:
	tiqdio_free_memory();
out_cache:
	qdio_setup_exit();
S
Sebastian Ott 已提交
1853 1854
out_debug:
	qdio_debug_exit();
J
Jan Glauber 已提交
1855 1856 1857 1858 1859 1860 1861 1862
	return rc;
}

static void __exit exit_QDIO(void)
{
	tiqdio_unregister_thinints();
	tiqdio_free_memory();
	qdio_setup_exit();
S
Sebastian Ott 已提交
1863
	qdio_debug_exit();
J
Jan Glauber 已提交
1864 1865 1866 1867
}

module_init(init_QDIO);
module_exit(exit_QDIO);