qeth_core_main.c 187.9 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
F
Frank Blaschka 已提交
2
/*
3
 *    Copyright IBM Corp. 2007, 2009
F
Frank Blaschka 已提交
4 5 6 7 8 9
 *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
 *		 Frank Pavlic <fpavlic@de.ibm.com>,
 *		 Thomas Spatzier <tspat@de.ibm.com>,
 *		 Frank Blaschka <frank.blaschka@de.ibm.com>
 */

10 11 12
#define KMSG_COMPONENT "qeth"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

13
#include <linux/compat.h>
F
Frank Blaschka 已提交
14 15 16 17 18
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/kernel.h>
19
#include <linux/log2.h>
20
#include <linux/io.h>
F
Frank Blaschka 已提交
21 22 23
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/mii.h>
J
Julian Wiedmann 已提交
24
#include <linux/mm.h>
F
Frank Blaschka 已提交
25
#include <linux/kthread.h>
26
#include <linux/slab.h>
27 28 29
#include <linux/if_vlan.h>
#include <linux/netdevice.h>
#include <linux/netdev_features.h>
30
#include <linux/rcutree.h>
31
#include <linux/skbuff.h>
32
#include <linux/vmalloc.h>
33

34
#include <net/iucv/af_iucv.h>
35
#include <net/dsfield.h>
36
#include <net/sock.h>
F
Frank Blaschka 已提交
37

38
#include <asm/ebcdic.h>
39
#include <asm/chpid.h>
40
#include <asm/sysinfo.h>
41 42 43
#include <asm/diag.h>
#include <asm/cio.h>
#include <asm/ccwdev.h>
44
#include <asm/cpcmd.h>
F
Frank Blaschka 已提交
45 46 47

#include "qeth_core.h"

48 49 50 51 52
struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
	/* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
	/*                   N  P  A    M  L  V                      H  */
	[QETH_DBF_SETUP] = {"qeth_setup",
				8, 1,   8, 5, &debug_hex_ascii_view, NULL},
53 54
	[QETH_DBF_MSG]	 = {"qeth_msg", 8, 1, 11 * sizeof(long), 3,
			    &debug_sprintf_view, NULL},
55 56 57 58
	[QETH_DBF_CTRL]  = {"qeth_control",
		8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
};
EXPORT_SYMBOL_GPL(qeth_dbf);
F
Frank Blaschka 已提交
59

J
Julian Wiedmann 已提交
60
static struct kmem_cache *qeth_core_header_cache;
61
static struct kmem_cache *qeth_qdio_outbuf_cache;
F
Frank Blaschka 已提交
62 63

static struct device *qeth_core_root_dev;
64
static struct dentry *qeth_debugfs_root;
F
Frank Blaschka 已提交
65 66
static struct lock_class_key qdio_out_skb_queue_key;

67
static void qeth_issue_next_read_cb(struct qeth_card *card,
68 69
				    struct qeth_cmd_buffer *iob,
				    unsigned int data_length);
F
Frank Blaschka 已提交
70
static int qeth_qdio_establish(struct qeth_card *);
71
static void qeth_free_qdio_queues(struct qeth_card *card);
F
Frank Blaschka 已提交
72

S
Stefan Raspl 已提交
73 74 75 76 77 78 79 80 81
static void qeth_close_dev_handler(struct work_struct *work)
{
	struct qeth_card *card;

	card = container_of(work, struct qeth_card, close_dev_work);
	QETH_CARD_TEXT(card, 2, "cldevhdl");
	ccwgroup_set_offline(card->gdev);
}

J
Julian Wiedmann 已提交
82
static const char *qeth_get_cardname(struct qeth_card *card)
F
Frank Blaschka 已提交
83
{
84
	if (IS_VM_NIC(card)) {
F
Frank Blaschka 已提交
85
		switch (card->info.type) {
86
		case QETH_CARD_TYPE_OSD:
87
			return " Virtual NIC QDIO";
F
Frank Blaschka 已提交
88
		case QETH_CARD_TYPE_IQD:
89
			return " Virtual NIC Hiper";
90
		case QETH_CARD_TYPE_OSM:
91
			return " Virtual NIC QDIO - OSM";
92
		case QETH_CARD_TYPE_OSX:
93
			return " Virtual NIC QDIO - OSX";
F
Frank Blaschka 已提交
94 95 96 97 98
		default:
			return " unknown";
		}
	} else {
		switch (card->info.type) {
99
		case QETH_CARD_TYPE_OSD:
F
Frank Blaschka 已提交
100 101 102
			return " OSD Express";
		case QETH_CARD_TYPE_IQD:
			return " HiperSockets";
103 104 105 106
		case QETH_CARD_TYPE_OSM:
			return " OSM QDIO";
		case QETH_CARD_TYPE_OSX:
			return " OSX QDIO";
F
Frank Blaschka 已提交
107 108 109 110 111 112 113 114 115 116
		default:
			return " unknown";
		}
	}
	return " n/a";
}

/* max length to be returned: 14 */
const char *qeth_get_cardname_short(struct qeth_card *card)
{
117
	if (IS_VM_NIC(card)) {
F
Frank Blaschka 已提交
118
		switch (card->info.type) {
119
		case QETH_CARD_TYPE_OSD:
120
			return "Virt.NIC QDIO";
F
Frank Blaschka 已提交
121
		case QETH_CARD_TYPE_IQD:
122
			return "Virt.NIC Hiper";
123
		case QETH_CARD_TYPE_OSM:
124
			return "Virt.NIC OSM";
125
		case QETH_CARD_TYPE_OSX:
126
			return "Virt.NIC OSX";
F
Frank Blaschka 已提交
127 128 129 130 131
		default:
			return "unknown";
		}
	} else {
		switch (card->info.type) {
132
		case QETH_CARD_TYPE_OSD:
F
Frank Blaschka 已提交
133 134 135 136 137 138 139 140 141
			switch (card->info.link_type) {
			case QETH_LINK_TYPE_FAST_ETH:
				return "OSD_100";
			case QETH_LINK_TYPE_HSTR:
				return "HSTR";
			case QETH_LINK_TYPE_GBIT_ETH:
				return "OSD_1000";
			case QETH_LINK_TYPE_10GBIT_ETH:
				return "OSD_10GIG";
142 143
			case QETH_LINK_TYPE_25GBIT_ETH:
				return "OSD_25GIG";
F
Frank Blaschka 已提交
144 145 146 147 148 149 150 151 152 153 154 155 156
			case QETH_LINK_TYPE_LANE_ETH100:
				return "OSD_FE_LANE";
			case QETH_LINK_TYPE_LANE_TR:
				return "OSD_TR_LANE";
			case QETH_LINK_TYPE_LANE_ETH1000:
				return "OSD_GbE_LANE";
			case QETH_LINK_TYPE_LANE:
				return "OSD_ATM_LANE";
			default:
				return "OSD_Express";
			}
		case QETH_CARD_TYPE_IQD:
			return "HiperSockets";
157 158 159 160
		case QETH_CARD_TYPE_OSM:
			return "OSM_1000";
		case QETH_CARD_TYPE_OSX:
			return "OSX_10GIG";
F
Frank Blaschka 已提交
161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
		default:
			return "unknown";
		}
	}
	return "n/a";
}

void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
			 int clear_start_mask)
{
	unsigned long flags;

	spin_lock_irqsave(&card->thread_mask_lock, flags);
	card->thread_allowed_mask = threads;
	if (clear_start_mask)
		card->thread_start_mask &= threads;
	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
	wake_up(&card->wait_q);
}
EXPORT_SYMBOL_GPL(qeth_set_allowed_threads);

int qeth_threads_running(struct qeth_card *card, unsigned long threads)
{
	unsigned long flags;
	int rc = 0;

	spin_lock_irqsave(&card->thread_mask_lock, flags);
	rc = (card->thread_running_mask & threads);
	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
	return rc;
}
EXPORT_SYMBOL_GPL(qeth_threads_running);

194
static void qeth_clear_working_pool_list(struct qeth_card *card)
F
Frank Blaschka 已提交
195 196
{
	struct qeth_buffer_pool_entry *pool_entry, *tmp;
197 198
	struct qeth_qdio_q *queue = card->qdio.in_q;
	unsigned int i;
F
Frank Blaschka 已提交
199

C
Carsten Otte 已提交
200
	QETH_CARD_TEXT(card, 5, "clwrklst");
F
Frank Blaschka 已提交
201
	list_for_each_entry_safe(pool_entry, tmp,
202 203
				 &card->qdio.in_buf_pool.entry_list, list)
		list_del(&pool_entry->list);
204 205 206

	for (i = 0; i < ARRAY_SIZE(queue->bufs); i++)
		queue->bufs[i].pool_entry = NULL;
F
Frank Blaschka 已提交
207 208
}

209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
static void qeth_free_pool_entry(struct qeth_buffer_pool_entry *entry)
{
	unsigned int i;

	for (i = 0; i < ARRAY_SIZE(entry->elements); i++) {
		if (entry->elements[i])
			__free_page(entry->elements[i]);
	}

	kfree(entry);
}

static void qeth_free_buffer_pool(struct qeth_card *card)
{
	struct qeth_buffer_pool_entry *entry, *tmp;

	list_for_each_entry_safe(entry, tmp, &card->qdio.init_pool.entry_list,
				 init_list) {
		list_del(&entry->init_list);
		qeth_free_pool_entry(entry);
	}
}

static struct qeth_buffer_pool_entry *qeth_alloc_pool_entry(unsigned int pages)
{
	struct qeth_buffer_pool_entry *entry;
	unsigned int i;

	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
	if (!entry)
		return NULL;

	for (i = 0; i < pages; i++) {
242
		entry->elements[i] = __dev_alloc_page(GFP_KERNEL);
243 244 245 246 247 248 249 250 251 252

		if (!entry->elements[i]) {
			qeth_free_pool_entry(entry);
			return NULL;
		}
	}

	return entry;
}

F
Frank Blaschka 已提交
253 254
static int qeth_alloc_buffer_pool(struct qeth_card *card)
{
255 256
	unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
	unsigned int i;
F
Frank Blaschka 已提交
257

C
Carsten Otte 已提交
258
	QETH_CARD_TEXT(card, 5, "alocpool");
F
Frank Blaschka 已提交
259
	for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
260 261 262 263
		struct qeth_buffer_pool_entry *entry;

		entry = qeth_alloc_pool_entry(buf_elements);
		if (!entry) {
F
Frank Blaschka 已提交
264 265 266
			qeth_free_buffer_pool(card);
			return -ENOMEM;
		}
267

268
		list_add(&entry->init_list, &card->qdio.init_pool.entry_list);
F
Frank Blaschka 已提交
269 270 271 272
	}
	return 0;
}

273
int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count)
F
Frank Blaschka 已提交
274
{
275 276 277 278 279 280
	unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
	struct qeth_qdio_buffer_pool *pool = &card->qdio.init_pool;
	struct qeth_buffer_pool_entry *entry, *tmp;
	int delta = count - pool->buf_count;
	LIST_HEAD(entries);

C
Carsten Otte 已提交
281
	QETH_CARD_TEXT(card, 2, "realcbp");
F
Frank Blaschka 已提交
282

283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321
	/* Defer until queue is allocated: */
	if (!card->qdio.in_q)
		goto out;

	/* Remove entries from the pool: */
	while (delta < 0) {
		entry = list_first_entry(&pool->entry_list,
					 struct qeth_buffer_pool_entry,
					 init_list);
		list_del(&entry->init_list);
		qeth_free_pool_entry(entry);

		delta++;
	}

	/* Allocate additional entries: */
	while (delta > 0) {
		entry = qeth_alloc_pool_entry(buf_elements);
		if (!entry) {
			list_for_each_entry_safe(entry, tmp, &entries,
						 init_list) {
				list_del(&entry->init_list);
				qeth_free_pool_entry(entry);
			}

			return -ENOMEM;
		}

		list_add(&entry->init_list, &entries);

		delta--;
	}

	list_splice(&entries, &pool->entry_list);

out:
	card->qdio.in_buf_pool.buf_count = count;
	pool->buf_count = count;
	return 0;
F
Frank Blaschka 已提交
322
}
323
EXPORT_SYMBOL_GPL(qeth_resize_buffer_pool);
F
Frank Blaschka 已提交
324

S
Sebastian Ott 已提交
325 326
static void qeth_free_qdio_queue(struct qeth_qdio_q *q)
{
327 328 329 330
	if (!q)
		return;

	qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
S
Sebastian Ott 已提交
331 332 333 334 335 336 337 338 339 340 341
	kfree(q);
}

static struct qeth_qdio_q *qeth_alloc_qdio_queue(void)
{
	struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
	int i;

	if (!q)
		return NULL;

342 343 344 345 346
	if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
		kfree(q);
		return NULL;
	}

S
Sebastian Ott 已提交
347
	for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
348
		q->bufs[i].buffer = q->qdio_bufs[i];
S
Sebastian Ott 已提交
349 350 351 352 353

	QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *));
	return q;
}

J
Julian Wiedmann 已提交
354
static int qeth_cq_init(struct qeth_card *card)
355 356 357 358
{
	int rc;

	if (card->options.cq == QETH_CQ_ENABLED) {
359
		QETH_CARD_TEXT(card, 2, "cqinit");
360 361
		qdio_reset_buffers(card->qdio.c_q->qdio_bufs,
				   QDIO_MAX_BUFFERS_PER_Q);
362 363
		card->qdio.c_q->next_buf_to_init = 127;
		rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT,
364
			     card->qdio.no_in_queues - 1, 0, 127, NULL);
365
		if (rc) {
366
			QETH_CARD_TEXT_(card, 2, "1err%d", rc);
367 368 369 370 371 372 373 374
			goto out;
		}
	}
	rc = 0;
out:
	return rc;
}

J
Julian Wiedmann 已提交
375
static int qeth_alloc_cq(struct qeth_card *card)
376 377
{
	if (card->options.cq == QETH_CQ_ENABLED) {
378
		QETH_CARD_TEXT(card, 2, "cqon");
S
Sebastian Ott 已提交
379
		card->qdio.c_q = qeth_alloc_qdio_queue();
380
		if (!card->qdio.c_q) {
381 382
			dev_err(&card->gdev->dev, "Failed to create completion queue\n");
			return -ENOMEM;
383
		}
384

385 386
		card->qdio.no_in_queues = 2;
	} else {
387
		QETH_CARD_TEXT(card, 2, "nocq");
388 389 390
		card->qdio.c_q = NULL;
		card->qdio.no_in_queues = 1;
	}
391
	QETH_CARD_TEXT_(card, 2, "iqc%d", card->qdio.no_in_queues);
392
	return 0;
393 394
}

J
Julian Wiedmann 已提交
395
static void qeth_free_cq(struct qeth_card *card)
396 397 398
{
	if (card->qdio.c_q) {
		--card->qdio.no_in_queues;
S
Sebastian Ott 已提交
399
		qeth_free_qdio_queue(card->qdio.c_q);
400 401 402 403
		card->qdio.c_q = NULL;
	}
}

J
Julian Wiedmann 已提交
404 405 406
static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
							int delayed)
{
407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428
	enum iucv_tx_notify n;

	switch (sbalf15) {
	case 0:
		n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK;
		break;
	case 4:
	case 16:
	case 17:
	case 18:
		n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE :
			TX_NOTIFY_UNREACHABLE;
		break;
	default:
		n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR :
			TX_NOTIFY_GENERALERROR;
		break;
	}

	return n;
}

J
Julian Wiedmann 已提交
429 430 431 432 433 434 435
static void qeth_put_cmd(struct qeth_cmd_buffer *iob)
{
	if (refcount_dec_and_test(&iob->ref_count)) {
		kfree(iob->data);
		kfree(iob);
	}
}
436 437
static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
			   void *data)
438 439
{
	ccw->cmd_code = cmd_code;
440
	ccw->flags = flags | CCW_FLAG_SLI;
441 442 443 444
	ccw->count = len;
	ccw->cda = (__u32) __pa(data);
}

445
static int __qeth_issue_next_read(struct qeth_card *card)
F
Frank Blaschka 已提交
446
{
447 448 449
	struct qeth_cmd_buffer *iob = card->read_cmd;
	struct qeth_channel *channel = iob->channel;
	struct ccw1 *ccw = __ccw_from_cmd(iob);
450
	int rc;
F
Frank Blaschka 已提交
451

C
Carsten Otte 已提交
452
	QETH_CARD_TEXT(card, 5, "issnxrd");
453
	if (channel->state != CH_STATE_UP)
F
Frank Blaschka 已提交
454
		return -EIO;
455

456 457
	memset(iob->data, 0, iob->length);
	qeth_setup_ccw(ccw, CCW_CMD_READ, 0, iob->length, iob->data);
458
	iob->callback = qeth_issue_next_read_cb;
459 460 461
	/* keep the cmd alive after completion: */
	qeth_get_cmd(iob);

C
Carsten Otte 已提交
462
	QETH_CARD_TEXT(card, 6, "noirqpnd");
463
	rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0);
464 465 466
	if (!rc) {
		channel->active_cmd = iob;
	} else {
467 468
		QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
				 rc, CARD_DEVID(card));
469
		qeth_unlock_channel(card, channel);
470
		qeth_put_cmd(iob);
471
		card->read_or_write_problem = 1;
F
Frank Blaschka 已提交
472 473 474 475 476
		qeth_schedule_recovery(card);
	}
	return rc;
}

477 478 479 480 481 482 483 484 485 486 487
static int qeth_issue_next_read(struct qeth_card *card)
{
	int ret;

	spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
	ret = __qeth_issue_next_read(card);
	spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));

	return ret;
}

488 489
static void qeth_enqueue_cmd(struct qeth_card *card,
			     struct qeth_cmd_buffer *iob)
490 491
{
	spin_lock_irq(&card->lock);
492
	list_add_tail(&iob->list_entry, &card->cmd_waiter_list);
493 494 495
	spin_unlock_irq(&card->lock);
}

496 497
static void qeth_dequeue_cmd(struct qeth_card *card,
			     struct qeth_cmd_buffer *iob)
498 499
{
	spin_lock_irq(&card->lock);
500
	list_del(&iob->list_entry);
501 502 503
	spin_unlock_irq(&card->lock);
}

J
Julian Wiedmann 已提交
504
static void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason)
505
{
506 507
	iob->rc = reason;
	complete(&iob->done);
508 509
}

510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537
static void qeth_flush_local_addrs4(struct qeth_card *card)
{
	struct qeth_local_addr *addr;
	struct hlist_node *tmp;
	unsigned int i;

	spin_lock_irq(&card->local_addrs4_lock);
	hash_for_each_safe(card->local_addrs4, i, tmp, addr, hnode) {
		hash_del_rcu(&addr->hnode);
		kfree_rcu(addr, rcu);
	}
	spin_unlock_irq(&card->local_addrs4_lock);
}

static void qeth_flush_local_addrs6(struct qeth_card *card)
{
	struct qeth_local_addr *addr;
	struct hlist_node *tmp;
	unsigned int i;

	spin_lock_irq(&card->local_addrs6_lock);
	hash_for_each_safe(card->local_addrs6, i, tmp, addr, hnode) {
		hash_del_rcu(&addr->hnode);
		kfree_rcu(addr, rcu);
	}
	spin_unlock_irq(&card->local_addrs6_lock);
}

538
static void qeth_flush_local_addrs(struct qeth_card *card)
539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689
{
	qeth_flush_local_addrs4(card);
	qeth_flush_local_addrs6(card);
}

static void qeth_add_local_addrs4(struct qeth_card *card,
				  struct qeth_ipacmd_local_addrs4 *cmd)
{
	unsigned int i;

	if (cmd->addr_length !=
	    sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
		dev_err_ratelimited(&card->gdev->dev,
				    "Dropped IPv4 ADD LOCAL ADDR event with bad length %u\n",
				    cmd->addr_length);
		return;
	}

	spin_lock(&card->local_addrs4_lock);
	for (i = 0; i < cmd->count; i++) {
		unsigned int key = ipv4_addr_hash(cmd->addrs[i].addr);
		struct qeth_local_addr *addr;
		bool duplicate = false;

		hash_for_each_possible(card->local_addrs4, addr, hnode, key) {
			if (addr->addr.s6_addr32[3] == cmd->addrs[i].addr) {
				duplicate = true;
				break;
			}
		}

		if (duplicate)
			continue;

		addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
		if (!addr) {
			dev_err(&card->gdev->dev,
				"Failed to allocate local addr object. Traffic to %pI4 might suffer.\n",
				&cmd->addrs[i].addr);
			continue;
		}

		ipv6_addr_set(&addr->addr, 0, 0, 0, cmd->addrs[i].addr);
		hash_add_rcu(card->local_addrs4, &addr->hnode, key);
	}
	spin_unlock(&card->local_addrs4_lock);
}

static void qeth_add_local_addrs6(struct qeth_card *card,
				  struct qeth_ipacmd_local_addrs6 *cmd)
{
	unsigned int i;

	if (cmd->addr_length !=
	    sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
		dev_err_ratelimited(&card->gdev->dev,
				    "Dropped IPv6 ADD LOCAL ADDR event with bad length %u\n",
				    cmd->addr_length);
		return;
	}

	spin_lock(&card->local_addrs6_lock);
	for (i = 0; i < cmd->count; i++) {
		u32 key = ipv6_addr_hash(&cmd->addrs[i].addr);
		struct qeth_local_addr *addr;
		bool duplicate = false;

		hash_for_each_possible(card->local_addrs6, addr, hnode, key) {
			if (ipv6_addr_equal(&addr->addr, &cmd->addrs[i].addr)) {
				duplicate = true;
				break;
			}
		}

		if (duplicate)
			continue;

		addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
		if (!addr) {
			dev_err(&card->gdev->dev,
				"Failed to allocate local addr object. Traffic to %pI6c might suffer.\n",
				&cmd->addrs[i].addr);
			continue;
		}

		addr->addr = cmd->addrs[i].addr;
		hash_add_rcu(card->local_addrs6, &addr->hnode, key);
	}
	spin_unlock(&card->local_addrs6_lock);
}

static void qeth_del_local_addrs4(struct qeth_card *card,
				  struct qeth_ipacmd_local_addrs4 *cmd)
{
	unsigned int i;

	if (cmd->addr_length !=
	    sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
		dev_err_ratelimited(&card->gdev->dev,
				    "Dropped IPv4 DEL LOCAL ADDR event with bad length %u\n",
				    cmd->addr_length);
		return;
	}

	spin_lock(&card->local_addrs4_lock);
	for (i = 0; i < cmd->count; i++) {
		struct qeth_ipacmd_local_addr4 *addr = &cmd->addrs[i];
		unsigned int key = ipv4_addr_hash(addr->addr);
		struct qeth_local_addr *tmp;

		hash_for_each_possible(card->local_addrs4, tmp, hnode, key) {
			if (tmp->addr.s6_addr32[3] == addr->addr) {
				hash_del_rcu(&tmp->hnode);
				kfree_rcu(tmp, rcu);
				break;
			}
		}
	}
	spin_unlock(&card->local_addrs4_lock);
}

static void qeth_del_local_addrs6(struct qeth_card *card,
				  struct qeth_ipacmd_local_addrs6 *cmd)
{
	unsigned int i;

	if (cmd->addr_length !=
	    sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
		dev_err_ratelimited(&card->gdev->dev,
				    "Dropped IPv6 DEL LOCAL ADDR event with bad length %u\n",
				    cmd->addr_length);
		return;
	}

	spin_lock(&card->local_addrs6_lock);
	for (i = 0; i < cmd->count; i++) {
		struct qeth_ipacmd_local_addr6 *addr = &cmd->addrs[i];
		u32 key = ipv6_addr_hash(&addr->addr);
		struct qeth_local_addr *tmp;

		hash_for_each_possible(card->local_addrs6, tmp, hnode, key) {
			if (ipv6_addr_equal(&tmp->addr, &addr->addr)) {
				hash_del_rcu(&tmp->hnode);
				kfree_rcu(tmp, rcu);
				break;
			}
		}
	}
	spin_unlock(&card->local_addrs6_lock);
}

690 691 692 693 694 695 696 697 698 699 700 701
static bool qeth_next_hop_is_local_v4(struct qeth_card *card,
				      struct sk_buff *skb)
{
	struct qeth_local_addr *tmp;
	bool is_local = false;
	unsigned int key;
	__be32 next_hop;

	if (hash_empty(card->local_addrs4))
		return false;

	rcu_read_lock();
702 703
	next_hop = qeth_next_hop_v4_rcu(skb,
					qeth_dst_check_rcu(skb, htons(ETH_P_IP)));
704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728
	key = ipv4_addr_hash(next_hop);

	hash_for_each_possible_rcu(card->local_addrs4, tmp, hnode, key) {
		if (tmp->addr.s6_addr32[3] == next_hop) {
			is_local = true;
			break;
		}
	}
	rcu_read_unlock();

	return is_local;
}

static bool qeth_next_hop_is_local_v6(struct qeth_card *card,
				      struct sk_buff *skb)
{
	struct qeth_local_addr *tmp;
	struct in6_addr *next_hop;
	bool is_local = false;
	u32 key;

	if (hash_empty(card->local_addrs6))
		return false;

	rcu_read_lock();
729 730
	next_hop = qeth_next_hop_v6_rcu(skb,
					qeth_dst_check_rcu(skb, htons(ETH_P_IPV6)));
731 732 733 734 735 736 737 738 739 740 741 742 743
	key = ipv6_addr_hash(next_hop);

	hash_for_each_possible_rcu(card->local_addrs6, tmp, hnode, key) {
		if (ipv6_addr_equal(&tmp->addr, next_hop)) {
			is_local = true;
			break;
		}
	}
	rcu_read_unlock();

	return is_local;
}

744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761
static int qeth_debugfs_local_addr_show(struct seq_file *m, void *v)
{
	struct qeth_card *card = m->private;
	struct qeth_local_addr *tmp;
	unsigned int i;

	rcu_read_lock();
	hash_for_each_rcu(card->local_addrs4, i, tmp, hnode)
		seq_printf(m, "%pI4\n", &tmp->addr.s6_addr32[3]);
	hash_for_each_rcu(card->local_addrs6, i, tmp, hnode)
		seq_printf(m, "%pI6c\n", &tmp->addr);
	rcu_read_unlock();

	return 0;
}

DEFINE_SHOW_ATTRIBUTE(qeth_debugfs_local_addr);

762
static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
F
Frank Blaschka 已提交
763 764
		struct qeth_card *card)
{
765
	const char *ipa_name;
766
	int com = cmd->hdr.command;
767

F
Frank Blaschka 已提交
768
	ipa_name = qeth_get_ipa_cmd_name(com);
769

770
	if (rc)
771 772 773
		QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n",
				 ipa_name, com, CARD_DEVID(card), rc,
				 qeth_get_ipa_msg(rc));
774
	else
775 776
		QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n",
				 ipa_name, com, CARD_DEVID(card));
F
Frank Blaschka 已提交
777 778 779
}

static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
780
						struct qeth_ipa_cmd *cmd)
F
Frank Blaschka 已提交
781
{
C
Carsten Otte 已提交
782
	QETH_CARD_TEXT(card, 5, "chkipad");
783 784

	if (IS_IPA_REPLY(cmd)) {
J
Julian Wiedmann 已提交
785
		if (cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
786 787 788 789 790 791 792 793 794 795
			qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
		return cmd;
	}

	/* handle unsolicited event: */
	switch (cmd->hdr.command) {
	case IPA_CMD_STOPLAN:
		if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) {
			dev_err(&card->gdev->dev,
				"Interface %s is down because the adjacent port is no longer in reflective relay mode\n",
J
Julian Wiedmann 已提交
796
				netdev_name(card->dev));
797
			schedule_work(&card->close_dev_work);
F
Frank Blaschka 已提交
798
		} else {
799 800
			dev_warn(&card->gdev->dev,
				 "The link for interface %s on CHPID 0x%X failed\n",
J
Julian Wiedmann 已提交
801
				 netdev_name(card->dev), card->info.chpid);
802
			qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
803
			netif_carrier_off(card->dev);
F
Frank Blaschka 已提交
804
		}
805 806 807 808
		return NULL;
	case IPA_CMD_STARTLAN:
		dev_info(&card->gdev->dev,
			 "The link for %s on CHPID 0x%X has been restored\n",
J
Julian Wiedmann 已提交
809
			 netdev_name(card->dev), card->info.chpid);
810 811 812 813 814 815 816 817 818 819 820
		if (card->info.hwtrap)
			card->info.hwtrap = 2;
		qeth_schedule_recovery(card);
		return NULL;
	case IPA_CMD_SETBRIDGEPORT_IQD:
	case IPA_CMD_SETBRIDGEPORT_OSA:
	case IPA_CMD_ADDRESS_CHANGE_NOTIF:
		if (card->discipline->control_event_handler(card, cmd))
			return cmd;
		return NULL;
	case IPA_CMD_REGISTER_LOCAL_ADDR:
821 822 823 824 825
		if (cmd->hdr.prot_version == QETH_PROT_IPV4)
			qeth_add_local_addrs4(card, &cmd->data.local_addrs4);
		else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
			qeth_add_local_addrs6(card, &cmd->data.local_addrs6);

826 827 828
		QETH_CARD_TEXT(card, 3, "irla");
		return NULL;
	case IPA_CMD_UNREGISTER_LOCAL_ADDR:
829 830 831 832 833
		if (cmd->hdr.prot_version == QETH_PROT_IPV4)
			qeth_del_local_addrs4(card, &cmd->data.local_addrs4);
		else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
			qeth_del_local_addrs6(card, &cmd->data.local_addrs6);

834 835 836 837 838
		QETH_CARD_TEXT(card, 3, "urla");
		return NULL;
	default:
		QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n");
		return cmd;
F
Frank Blaschka 已提交
839 840 841
	}
}

842
static void qeth_clear_ipacmd_list(struct qeth_card *card)
F
Frank Blaschka 已提交
843
{
844
	struct qeth_cmd_buffer *iob;
F
Frank Blaschka 已提交
845 846
	unsigned long flags;

C
Carsten Otte 已提交
847
	QETH_CARD_TEXT(card, 4, "clipalst");
F
Frank Blaschka 已提交
848 849

	spin_lock_irqsave(&card->lock, flags);
850
	list_for_each_entry(iob, &card->cmd_waiter_list, list_entry)
851
		qeth_notify_cmd(iob, -ECANCELED);
F
Frank Blaschka 已提交
852 853 854
	spin_unlock_irqrestore(&card->lock, flags);
}

855 856
static int qeth_check_idx_response(struct qeth_card *card,
	unsigned char *buffer)
F
Frank Blaschka 已提交
857
{
858
	QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
859
	if ((buffer[2] & QETH_IDX_TERMINATE_MASK) == QETH_IDX_TERMINATE) {
860
		QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
861
				 buffer[4]);
C
Carsten Otte 已提交
862 863
		QETH_CARD_TEXT(card, 2, "ckidxres");
		QETH_CARD_TEXT(card, 2, " idxterm");
864 865 866
		QETH_CARD_TEXT_(card, 2, "rc%x", buffer[4]);
		if (buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT ||
		    buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM) {
867
			dev_err(&card->gdev->dev,
868 869
				"The device does not support the configured transport mode\n");
			return -EPROTONOSUPPORT;
870
		}
F
Frank Blaschka 已提交
871 872 873 874 875
		return -EIO;
	}
	return 0;
}

876
static void qeth_release_buffer_cb(struct qeth_card *card,
877 878
				   struct qeth_cmd_buffer *iob,
				   unsigned int data_length)
879
{
880
	qeth_put_cmd(iob);
881 882
}

883 884
static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc)
{
885
	qeth_notify_cmd(iob, rc);
886
	qeth_put_cmd(iob);
F
Frank Blaschka 已提交
887 888
}

J
Julian Wiedmann 已提交
889 890 891
static struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
					      unsigned int length,
					      unsigned int ccws, long timeout)
892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908
{
	struct qeth_cmd_buffer *iob;

	if (length > QETH_BUFSIZE)
		return NULL;

	iob = kzalloc(sizeof(*iob), GFP_KERNEL);
	if (!iob)
		return NULL;

	iob->data = kzalloc(ALIGN(length, 8) + ccws * sizeof(struct ccw1),
			    GFP_KERNEL | GFP_DMA);
	if (!iob->data) {
		kfree(iob);
		return NULL;
	}

909 910
	init_completion(&iob->done);
	spin_lock_init(&iob->lock);
911
	refcount_set(&iob->ref_count, 1);
912 913 914 915 916 917
	iob->channel = channel;
	iob->timeout = timeout;
	iob->length = length;
	return iob;
}

918
static void qeth_issue_next_read_cb(struct qeth_card *card,
919 920
				    struct qeth_cmd_buffer *iob,
				    unsigned int data_length)
F
Frank Blaschka 已提交
921
{
922
	struct qeth_cmd_buffer *request = NULL;
923
	struct qeth_ipa_cmd *cmd = NULL;
924
	struct qeth_reply *reply = NULL;
925
	struct qeth_cmd_buffer *tmp;
F
Frank Blaschka 已提交
926
	unsigned long flags;
927
	int rc = 0;
F
Frank Blaschka 已提交
928

C
Carsten Otte 已提交
929
	QETH_CARD_TEXT(card, 4, "sndctlcb");
930 931 932 933 934 935
	rc = qeth_check_idx_response(card, iob->data);
	switch (rc) {
	case 0:
		break;
	case -EIO:
		qeth_schedule_recovery(card);
936
		fallthrough;
937
	default:
938
		qeth_clear_ipacmd_list(card);
939
		goto err_idx;
F
Frank Blaschka 已提交
940 941
	}

942 943
	cmd = __ipa_reply(iob);
	if (cmd) {
944
		cmd = qeth_check_ipa_data(card, cmd);
945 946
		if (!cmd)
			goto out;
F
Frank Blaschka 已提交
947 948
	}

949
	/* match against pending cmd requests */
F
Frank Blaschka 已提交
950
	spin_lock_irqsave(&card->lock, flags);
951
	list_for_each_entry(tmp, &card->cmd_waiter_list, list_entry) {
952
		if (tmp->match && tmp->match(tmp, iob)) {
953
			request = tmp;
954
			/* take the object outside the lock */
955
			qeth_get_cmd(request);
956
			break;
F
Frank Blaschka 已提交
957 958 959
		}
	}
	spin_unlock_irqrestore(&card->lock, flags);
960

961
	if (!request)
962 963
		goto out;

964
	reply = &request->reply;
965 966
	if (!reply->callback) {
		rc = 0;
967 968 969
		goto no_callback;
	}

970 971
	spin_lock_irqsave(&request->lock, flags);
	if (request->rc)
972
		/* Bail out when the requestor has already left: */
973
		rc = request->rc;
974 975 976
	else
		rc = reply->callback(card, reply, cmd ? (unsigned long)cmd :
							(unsigned long)iob);
977
	spin_unlock_irqrestore(&request->lock, flags);
978

979
no_callback:
980
	if (rc <= 0)
981 982
		qeth_notify_cmd(request, rc);
	qeth_put_cmd(request);
F
Frank Blaschka 已提交
983 984 985 986
out:
	memcpy(&card->seqno.pdu_hdr_ack,
		QETH_PDU_HEADER_SEQ_NO(iob->data),
		QETH_SEQ_NO_LENGTH);
987
	__qeth_issue_next_read(card);
988 989
err_idx:
	qeth_put_cmd(iob);
F
Frank Blaschka 已提交
990 991 992 993 994 995
}

static int qeth_set_thread_start_bit(struct qeth_card *card,
		unsigned long thread)
{
	unsigned long flags;
996
	int rc = 0;
F
Frank Blaschka 已提交
997 998

	spin_lock_irqsave(&card->thread_mask_lock, flags);
999 1000 1001 1002 1003 1004
	if (!(card->thread_allowed_mask & thread))
		rc = -EPERM;
	else if (card->thread_start_mask & thread)
		rc = -EBUSY;
	else
		card->thread_start_mask |= thread;
F
Frank Blaschka 已提交
1005
	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1006 1007

	return rc;
F
Frank Blaschka 已提交
1008 1009
}

1010 1011
static void qeth_clear_thread_start_bit(struct qeth_card *card,
					unsigned long thread)
F
Frank Blaschka 已提交
1012 1013 1014 1015 1016 1017 1018 1019 1020
{
	unsigned long flags;

	spin_lock_irqsave(&card->thread_mask_lock, flags);
	card->thread_start_mask &= ~thread;
	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
	wake_up(&card->wait_q);
}

1021 1022
static void qeth_clear_thread_running_bit(struct qeth_card *card,
					  unsigned long thread)
F
Frank Blaschka 已提交
1023 1024 1025 1026 1027 1028
{
	unsigned long flags;

	spin_lock_irqsave(&card->thread_mask_lock, flags);
	card->thread_running_mask &= ~thread;
	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1029
	wake_up_all(&card->wait_q);
F
Frank Blaschka 已提交
1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050
}

static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
{
	unsigned long flags;
	int rc = 0;

	spin_lock_irqsave(&card->thread_mask_lock, flags);
	if (card->thread_start_mask & thread) {
		if ((card->thread_allowed_mask & thread) &&
		    !(card->thread_running_mask & thread)) {
			rc = 1;
			card->thread_start_mask &= ~thread;
			card->thread_running_mask |= thread;
		} else
			rc = -EPERM;
	}
	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
	return rc;
}

1051
static int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
F
Frank Blaschka 已提交
1052 1053 1054 1055 1056 1057 1058 1059
{
	int rc = 0;

	wait_event(card->wait_q,
		   (rc = __qeth_do_run_thread(card, thread)) >= 0);
	return rc;
}

1060
int qeth_schedule_recovery(struct qeth_card *card)
F
Frank Blaschka 已提交
1061
{
1062 1063
	int rc;

C
Carsten Otte 已提交
1064
	QETH_CARD_TEXT(card, 2, "startrec");
1065 1066 1067

	rc = qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD);
	if (!rc)
F
Frank Blaschka 已提交
1068
		schedule_work(&card->kernel_thread_starter);
1069 1070

	return rc;
F
Frank Blaschka 已提交
1071 1072
}

1073 1074
static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
			    struct irb *irb)
F
Frank Blaschka 已提交
1075 1076 1077 1078 1079
{
	int dstat, cstat;
	char *sense;

	sense = (char *) irb->ecw;
1080 1081
	cstat = irb->scsw.cmd.cstat;
	dstat = irb->scsw.cmd.dstat;
F
Frank Blaschka 已提交
1082 1083 1084 1085

	if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
		     SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
		     SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
C
Carsten Otte 已提交
1086
		QETH_CARD_TEXT(card, 2, "CGENCHK");
1087 1088
		dev_warn(&cdev->dev, "The qeth device driver "
			"failed to recover an error on the device\n");
1089 1090
		QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n",
				 CCW_DEVID(cdev), dstat, cstat);
F
Frank Blaschka 已提交
1091 1092
		print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
				16, 1, irb, 64, 1);
1093
		return -EIO;
F
Frank Blaschka 已提交
1094 1095 1096 1097 1098
	}

	if (dstat & DEV_STAT_UNIT_CHECK) {
		if (sense[SENSE_RESETTING_EVENT_BYTE] &
		    SENSE_RESETTING_EVENT_FLAG) {
C
Carsten Otte 已提交
1099
			QETH_CARD_TEXT(card, 2, "REVIND");
1100
			return -EIO;
F
Frank Blaschka 已提交
1101 1102 1103
		}
		if (sense[SENSE_COMMAND_REJECT_BYTE] &
		    SENSE_COMMAND_REJECT_FLAG) {
C
Carsten Otte 已提交
1104
			QETH_CARD_TEXT(card, 2, "CMDREJi");
1105
			return -EIO;
F
Frank Blaschka 已提交
1106 1107
		}
		if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
C
Carsten Otte 已提交
1108
			QETH_CARD_TEXT(card, 2, "AFFE");
1109
			return -EIO;
F
Frank Blaschka 已提交
1110 1111
		}
		if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
C
Carsten Otte 已提交
1112
			QETH_CARD_TEXT(card, 2, "ZEROSEN");
F
Frank Blaschka 已提交
1113 1114
			return 0;
		}
C
Carsten Otte 已提交
1115
		QETH_CARD_TEXT(card, 2, "DGENCHK");
1116
		return -EIO;
F
Frank Blaschka 已提交
1117 1118 1119 1120
	}
	return 0;
}

1121
static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev,
1122
				struct irb *irb)
F
Frank Blaschka 已提交
1123
{
1124
	if (!IS_ERR(irb))
F
Frank Blaschka 已提交
1125 1126 1127 1128
		return 0;

	switch (PTR_ERR(irb)) {
	case -EIO:
1129 1130
		QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n",
				 CCW_DEVID(cdev));
C
Carsten Otte 已提交
1131 1132
		QETH_CARD_TEXT(card, 2, "ckirberr");
		QETH_CARD_TEXT_(card, 2, "  rc%d", -EIO);
1133
		return -EIO;
F
Frank Blaschka 已提交
1134
	case -ETIMEDOUT:
1135 1136
		dev_warn(&cdev->dev, "A hardware operation timed out"
			" on the device\n");
C
Carsten Otte 已提交
1137 1138
		QETH_CARD_TEXT(card, 2, "ckirberr");
		QETH_CARD_TEXT_(card, 2, "  rc%d", -ETIMEDOUT);
1139
		return -ETIMEDOUT;
F
Frank Blaschka 已提交
1140
	default:
1141 1142
		QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
				 PTR_ERR(irb), CCW_DEVID(cdev));
C
Carsten Otte 已提交
1143 1144
		QETH_CARD_TEXT(card, 2, "ckirberr");
		QETH_CARD_TEXT(card, 2, "  rc???");
1145
		return PTR_ERR(irb);
F
Frank Blaschka 已提交
1146 1147 1148 1149 1150 1151 1152 1153
	}
}

static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
		struct irb *irb)
{
	int rc;
	int cstat, dstat;
1154
	struct qeth_cmd_buffer *iob = NULL;
1155
	struct ccwgroup_device *gdev;
F
Frank Blaschka 已提交
1156 1157 1158
	struct qeth_channel *channel;
	struct qeth_card *card;

1159 1160 1161
	/* while we hold the ccwdev lock, this stays valid: */
	gdev = dev_get_drvdata(&cdev->dev);
	card = dev_get_drvdata(&gdev->dev);
F
Frank Blaschka 已提交
1162

C
Carsten Otte 已提交
1163 1164
	QETH_CARD_TEXT(card, 5, "irq");

F
Frank Blaschka 已提交
1165 1166
	if (card->read.ccwdev == cdev) {
		channel = &card->read;
C
Carsten Otte 已提交
1167
		QETH_CARD_TEXT(card, 5, "read");
F
Frank Blaschka 已提交
1168 1169
	} else if (card->write.ccwdev == cdev) {
		channel = &card->write;
C
Carsten Otte 已提交
1170
		QETH_CARD_TEXT(card, 5, "write");
F
Frank Blaschka 已提交
1171 1172
	} else {
		channel = &card->data;
C
Carsten Otte 已提交
1173
		QETH_CARD_TEXT(card, 5, "data");
F
Frank Blaschka 已提交
1174
	}
1175

1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189
	if (intparm == 0) {
		QETH_CARD_TEXT(card, 5, "irqunsol");
	} else if ((addr_t)intparm != (addr_t)channel->active_cmd) {
		QETH_CARD_TEXT(card, 5, "irqunexp");

		dev_err(&cdev->dev,
			"Received IRQ with intparm %lx, expected %px\n",
			intparm, channel->active_cmd);
		if (channel->active_cmd)
			qeth_cancel_cmd(channel->active_cmd, -EIO);
	} else {
		iob = (struct qeth_cmd_buffer *) (addr_t)intparm;
	}

1190
	qeth_unlock_channel(card, channel);
1191

1192
	rc = qeth_check_irb_error(card, cdev, irb);
1193
	if (rc) {
1194 1195
		/* IO was terminated, free its resources. */
		if (iob)
1196
			qeth_cancel_cmd(iob, rc);
1197 1198 1199
		return;
	}

1200
	if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
F
Frank Blaschka 已提交
1201
		channel->state = CH_STATE_STOPPED;
1202 1203
		wake_up(&card->wait_q);
	}
F
Frank Blaschka 已提交
1204

1205
	if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
F
Frank Blaschka 已提交
1206
		channel->state = CH_STATE_HALTED;
1207 1208
		wake_up(&card->wait_q);
	}
F
Frank Blaschka 已提交
1209

1210 1211 1212 1213
	if (iob && (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC |
					  SCSW_FCTL_HALT_FUNC))) {
		qeth_cancel_cmd(iob, -ECANCELED);
		iob = NULL;
F
Frank Blaschka 已提交
1214
	}
1215 1216 1217 1218

	cstat = irb->scsw.cmd.cstat;
	dstat = irb->scsw.cmd.dstat;

F
Frank Blaschka 已提交
1219 1220 1221 1222
	if ((dstat & DEV_STAT_UNIT_EXCEP) ||
	    (dstat & DEV_STAT_UNIT_CHECK) ||
	    (cstat)) {
		if (irb->esw.esw0.erw.cons) {
1223 1224 1225
			dev_warn(&channel->ccwdev->dev,
				"The qeth device driver failed to recover "
				"an error on the device\n");
1226 1227 1228
			QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n",
					 CCW_DEVID(channel->ccwdev), cstat,
					 dstat);
F
Frank Blaschka 已提交
1229 1230 1231 1232 1233
			print_hex_dump(KERN_WARNING, "qeth: irb ",
				DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
			print_hex_dump(KERN_WARNING, "qeth: sense data ",
				DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1);
		}
1234

1235
		rc = qeth_get_problem(card, cdev, irb);
F
Frank Blaschka 已提交
1236
		if (rc) {
1237
			card->read_or_write_problem = 1;
1238
			if (iob)
1239
				qeth_cancel_cmd(iob, rc);
1240
			qeth_clear_ipacmd_list(card);
F
Frank Blaschka 已提交
1241
			qeth_schedule_recovery(card);
1242
			return;
F
Frank Blaschka 已提交
1243 1244 1245
		}
	}

1246 1247 1248 1249
	if (iob) {
		/* sanity check: */
		if (irb->scsw.cmd.count > iob->length) {
			qeth_cancel_cmd(iob, -EIO);
1250
			return;
1251 1252 1253 1254 1255
		}
		if (iob->callback)
			iob->callback(card, iob,
				      iob->length - irb->scsw.cmd.count);
	}
F
Frank Blaschka 已提交
1256 1257
}

1258
static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
1259
		struct qeth_qdio_out_buffer *buf,
1260
		enum iucv_tx_notify notification)
F
Frank Blaschka 已提交
1261 1262 1263
{
	struct sk_buff *skb;

1264
	skb_queue_walk(&buf->skb_list, skb) {
1265 1266
		struct sock *sk = skb->sk;

1267 1268
		QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
		QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
1269 1270
		if (sk && sk->sk_family == PF_IUCV)
			iucv_sk(sk)->sk_txnotify(sk, notification);
1271 1272 1273
	}
}

1274 1275
static void qeth_tx_complete_buf(struct qeth_qdio_out_q *queue,
				 struct qeth_qdio_out_buffer *buf, bool error,
1276
				 int budget)
1277
{
1278 1279
	struct sk_buff *skb;

1280 1281 1282 1283 1284 1285
	/* Empty buffer? */
	if (buf->next_element_to_fill == 0)
		return;

	QETH_TXQ_STAT_INC(queue, bufs);
	QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill);
1286 1287 1288 1289 1290 1291 1292
	if (error) {
		QETH_TXQ_STAT_ADD(queue, tx_errors, buf->frames);
	} else {
		QETH_TXQ_STAT_ADD(queue, tx_packets, buf->frames);
		QETH_TXQ_STAT_ADD(queue, tx_bytes, buf->bytes);
	}

1293 1294 1295 1296 1297 1298
	while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) {
		unsigned int bytes = qdisc_pkt_len(skb);
		bool is_tso = skb_is_gso(skb);
		unsigned int packets;

		packets = is_tso ? skb_shinfo(skb)->gso_segs : 1;
1299
		if (!error) {
1300 1301 1302 1303 1304 1305 1306 1307 1308
			if (skb->ip_summed == CHECKSUM_PARTIAL)
				QETH_TXQ_STAT_ADD(queue, skbs_csum, packets);
			if (skb_is_nonlinear(skb))
				QETH_TXQ_STAT_INC(queue, skbs_sg);
			if (is_tso) {
				QETH_TXQ_STAT_INC(queue, skbs_tso);
				QETH_TXQ_STAT_ADD(queue, tso_bytes, bytes);
			}
		}
1309

1310
		napi_consume_skb(skb, budget);
1311
	}
1312 1313 1314
}

static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
1315
				     struct qeth_qdio_out_buffer *buf,
1316
				     bool error, int budget)
1317 1318 1319 1320
{
	int i;

	/* is PCI flag set on buffer? */
1321
	if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ) {
1322
		atomic_dec(&queue->set_pci_flags_count);
1323 1324
		QETH_TXQ_STAT_INC(queue, completion_irq);
	}
1325

1326
	qeth_tx_complete_buf(queue, buf, error, budget);
1327

1328
	for (i = 0; i < queue->max_elements; ++i) {
1329 1330
		void *data = phys_to_virt(buf->buffer->element[i].addr);

1331
		if (__test_and_clear_bit(i, buf->from_kmem_cache) && data)
1332
			kmem_cache_free(qeth_core_header_cache, data);
F
Frank Blaschka 已提交
1333
	}
1334

1335
	qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements);
F
Frank Blaschka 已提交
1336
	buf->next_element_to_fill = 0;
1337
	buf->frames = 0;
1338
	buf->bytes = 0;
1339
	atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
1340 1341
}

1342 1343 1344 1345 1346 1347 1348
static void qeth_free_out_buf(struct qeth_qdio_out_buffer *buf)
{
	if (buf->aob)
		qdio_release_aob(buf->aob);
	kmem_cache_free(qeth_qdio_outbuf_cache, buf);
}

1349 1350
static void qeth_tx_complete_pending_bufs(struct qeth_card *card,
					  struct qeth_qdio_out_q *queue,
1351
					  bool drain, int budget)
1352 1353 1354 1355
{
	struct qeth_qdio_out_buffer *buf, *tmp;

	list_for_each_entry_safe(buf, tmp, &queue->pending_bufs, list_entry) {
1356
		struct qeth_qaob_priv1 *priv;
1357 1358 1359 1360
		struct qaob *aob = buf->aob;
		enum iucv_tx_notify notify;
		unsigned int i;

1361 1362
		priv = (struct qeth_qaob_priv1 *)&aob->user1;
		if (drain || READ_ONCE(priv->state) == QETH_QAOB_DONE) {
1363 1364 1365
			QETH_CARD_TEXT(card, 5, "fp");
			QETH_CARD_TEXT_(card, 5, "%lx", (long) buf);

1366 1367 1368
			notify = drain ? TX_NOTIFY_GENERALERROR :
					 qeth_compute_cq_notification(aob->aorc, 1);
			qeth_notify_skbs(queue, buf, notify);
1369
			qeth_tx_complete_buf(queue, buf, drain, budget);
1370

1371 1372 1373 1374 1375
			for (i = 0;
			     i < aob->sb_count && i < queue->max_elements;
			     i++) {
				void *data = phys_to_virt(aob->sba[i]);

1376
				if (test_bit(i, buf->from_kmem_cache) && data)
1377 1378 1379 1380
					kmem_cache_free(qeth_core_header_cache,
							data);
			}

1381
			list_del(&buf->list_entry);
1382
			qeth_free_out_buf(buf);
1383 1384 1385 1386
		}
	}
}

1387
static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
1388 1389 1390
{
	int j;

1391
	qeth_tx_complete_pending_bufs(q->card, q, true, 0);
1392

1393 1394 1395
	for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
		if (!q->bufs[j])
			continue;
1396

1397
		qeth_clear_output_buffer(q, q->bufs[j], true, 0);
1398
		if (free) {
1399
			qeth_free_out_buf(q->bufs[j]);
1400 1401 1402
			q->bufs[j] = NULL;
		}
	}
F
Frank Blaschka 已提交
1403 1404
}

1405
static void qeth_drain_output_queues(struct qeth_card *card)
F
Frank Blaschka 已提交
1406
{
1407
	int i;
F
Frank Blaschka 已提交
1408

C
Carsten Otte 已提交
1409
	QETH_CARD_TEXT(card, 2, "clearqdbf");
F
Frank Blaschka 已提交
1410
	/* clear outbound buffers to free skbs */
1411
	for (i = 0; i < card->qdio.no_out_queues; ++i) {
1412 1413
		if (card->qdio.out_qs[i])
			qeth_drain_output_queue(card->qdio.out_qs[i], false);
1414
	}
F
Frank Blaschka 已提交
1415 1416
}

1417
static void qeth_osa_set_output_queues(struct qeth_card *card, bool single)
1418
{
1419
	unsigned int max = single ? 1 : card->dev->num_tx_queues;
1420

1421
	if (card->qdio.no_out_queues == max)
1422
		return;
1423

1424
	if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
1425
		qeth_free_qdio_queues(card);
1426

1427
	if (max == 1 && card->qdio.do_prio_queueing != QETH_PRIOQ_DEFAULT)
1428 1429
		dev_info(&card->gdev->dev, "Priority Queueing not supported\n");

1430
	card->qdio.no_out_queues = max;
1431 1432
}

1433
static int qeth_update_from_chp_desc(struct qeth_card *card)
F
Frank Blaschka 已提交
1434 1435
{
	struct ccw_device *ccwdev;
1436
	struct channel_path_desc_fmt0 *chp_dsc;
F
Frank Blaschka 已提交
1437

1438
	QETH_CARD_TEXT(card, 2, "chp_desc");
F
Frank Blaschka 已提交
1439 1440

	ccwdev = card->data.ccwdev;
1441 1442
	chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
	if (!chp_dsc)
1443
		return -ENOMEM;
1444 1445 1446

	card->info.func_level = 0x4100 + chp_dsc->desc;

1447 1448
	if (IS_OSD(card) || IS_OSX(card))
		/* CHPP field bit 6 == 1 -> single queue */
1449
		qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
1450

1451
	kfree(chp_dsc);
1452 1453
	QETH_CARD_TEXT_(card, 2, "nr:%x", card->qdio.no_out_queues);
	QETH_CARD_TEXT_(card, 2, "lvl:%02x", card->info.func_level);
1454
	return 0;
F
Frank Blaschka 已提交
1455 1456 1457 1458
}

static void qeth_init_qdio_info(struct qeth_card *card)
{
1459
	QETH_CARD_TEXT(card, 4, "intqdinf");
F
Frank Blaschka 已提交
1460
	atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
1461 1462 1463
	card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
	card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;

F
Frank Blaschka 已提交
1464
	/* inbound */
1465
	card->qdio.no_in_queues = 1;
F
Frank Blaschka 已提交
1466
	card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1467
	if (IS_IQD(card))
1468 1469 1470
		card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
	else
		card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
F
Frank Blaschka 已提交
1471 1472 1473 1474 1475
	card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
	INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
	INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
}

1476
static void qeth_set_initial_options(struct qeth_card *card)
F
Frank Blaschka 已提交
1477 1478 1479
{
	card->options.route4.type = NO_ROUTER;
	card->options.route6.type = NO_ROUTER;
E
Einar Lueck 已提交
1480
	card->options.isolation = ISOLATION_MODE_NONE;
1481
	card->options.cq = QETH_CQ_DISABLED;
1482
	card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
F
Frank Blaschka 已提交
1483 1484 1485 1486 1487 1488 1489 1490
}

static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
{
	unsigned long flags;
	int rc = 0;

	spin_lock_irqsave(&card->thread_mask_lock, flags);
C
Carsten Otte 已提交
1491
	QETH_CARD_TEXT_(card, 4, "  %02x%02x%02x",
F
Frank Blaschka 已提交
1492 1493 1494 1495 1496 1497 1498 1499
			(u8) card->thread_start_mask,
			(u8) card->thread_allowed_mask,
			(u8) card->thread_running_mask);
	rc = (card->thread_start_mask & thread);
	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
	return rc;
}

1500
static int qeth_do_reset(void *data);
F
Frank Blaschka 已提交
1501 1502
static void qeth_start_kernel_thread(struct work_struct *work)
{
1503
	struct task_struct *ts;
F
Frank Blaschka 已提交
1504 1505
	struct qeth_card *card = container_of(work, struct qeth_card,
					kernel_thread_starter);
1506
	QETH_CARD_TEXT(card, 2, "strthrd");
F
Frank Blaschka 已提交
1507 1508 1509 1510

	if (card->read.state != CH_STATE_UP &&
	    card->write.state != CH_STATE_UP)
		return;
1511
	if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
1512
		ts = kthread_run(qeth_do_reset, card, "qeth_recover");
1513 1514 1515 1516 1517 1518
		if (IS_ERR(ts)) {
			qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
			qeth_clear_thread_running_bit(card,
				QETH_RECOVER_THREAD);
		}
	}
F
Frank Blaschka 已提交
1519 1520
}

1521
static void qeth_buffer_reclaim_work(struct work_struct *);
1522
static void qeth_setup_card(struct qeth_card *card)
F
Frank Blaschka 已提交
1523
{
1524
	QETH_CARD_TEXT(card, 2, "setupcrd");
F
Frank Blaschka 已提交
1525

1526
	card->info.type = CARD_RDEV(card)->id.driver_info;
F
Frank Blaschka 已提交
1527 1528 1529
	card->state = CARD_STATE_DOWN;
	spin_lock_init(&card->lock);
	spin_lock_init(&card->thread_mask_lock);
1530
	mutex_init(&card->conf_mutex);
1531
	mutex_init(&card->discipline_mutex);
F
Frank Blaschka 已提交
1532 1533 1534
	INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
	INIT_LIST_HEAD(&card->cmd_waiter_list);
	init_waitqueue_head(&card->wait_q);
1535
	qeth_set_initial_options(card);
F
Frank Blaschka 已提交
1536 1537 1538
	/* IP address takeover */
	INIT_LIST_HEAD(&card->ipato.entries);
	qeth_init_qdio_info(card);
1539
	INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
S
Stefan Raspl 已提交
1540
	INIT_WORK(&card->close_dev_work, qeth_close_dev_handler);
1541
	hash_init(card->rx_mode_addrs);
1542 1543 1544 1545
	hash_init(card->local_addrs4);
	hash_init(card->local_addrs6);
	spin_lock_init(&card->local_addrs4_lock);
	spin_lock_init(&card->local_addrs6_lock);
F
Frank Blaschka 已提交
1546 1547
}

1548 1549 1550 1551
static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
{
	struct qeth_card *card = container_of(slr, struct qeth_card,
					qeth_service_level);
1552 1553 1554
	if (card->info.mcl_level[0])
		seq_printf(m, "qeth: %s firmware level %s\n",
			CARD_BUS_ID(card), card->info.mcl_level);
1555 1556
}

1557
static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
F
Frank Blaschka 已提交
1558 1559 1560
{
	struct qeth_card *card;

1561
	QETH_DBF_TEXT(SETUP, 2, "alloccrd");
1562
	card = kzalloc(sizeof(*card), GFP_KERNEL);
F
Frank Blaschka 已提交
1563
	if (!card)
1564
		goto out;
1565
	QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1566 1567

	card->gdev = gdev;
1568
	dev_set_drvdata(&gdev->dev, card);
1569 1570 1571
	CARD_RDEV(card) = gdev->cdev[0];
	CARD_WDEV(card) = gdev->cdev[1];
	CARD_DDEV(card) = gdev->cdev[2];
1572

1573 1574
	card->event_wq = alloc_ordered_workqueue("%s_event", 0,
						 dev_name(&gdev->dev));
1575 1576
	if (!card->event_wq)
		goto out_wq;
1577 1578 1579 1580

	card->read_cmd = qeth_alloc_cmd(&card->read, QETH_BUFSIZE, 1, 0);
	if (!card->read_cmd)
		goto out_read_cmd;
1581

1582 1583 1584 1585 1586
	card->debugfs = debugfs_create_dir(dev_name(&gdev->dev),
					   qeth_debugfs_root);
	debugfs_create_file("local_addrs", 0400, card->debugfs, card,
			    &qeth_debugfs_local_addr_fops);

1587 1588
	card->qeth_service_level.seq_print = qeth_core_sl_print;
	register_service_level(&card->qeth_service_level);
F
Frank Blaschka 已提交
1589
	return card;
1590

1591
out_read_cmd:
1592 1593
	destroy_workqueue(card->event_wq);
out_wq:
1594
	dev_set_drvdata(&gdev->dev, NULL);
1595 1596 1597
	kfree(card);
out:
	return NULL;
F
Frank Blaschka 已提交
1598 1599
}

1600 1601
static int qeth_clear_channel(struct qeth_card *card,
			      struct qeth_channel *channel)
F
Frank Blaschka 已提交
1602 1603 1604
{
	int rc;

C
Carsten Otte 已提交
1605
	QETH_CARD_TEXT(card, 3, "clearch");
J
Julian Wiedmann 已提交
1606
	spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1607
	rc = ccw_device_clear(channel->ccwdev, (addr_t)channel->active_cmd);
J
Julian Wiedmann 已提交
1608
	spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
F
Frank Blaschka 已提交
1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621

	if (rc)
		return rc;
	rc = wait_event_interruptible_timeout(card->wait_q,
			channel->state == CH_STATE_STOPPED, QETH_TIMEOUT);
	if (rc == -ERESTARTSYS)
		return rc;
	if (channel->state != CH_STATE_STOPPED)
		return -ETIME;
	channel->state = CH_STATE_DOWN;
	return 0;
}

1622 1623
static int qeth_halt_channel(struct qeth_card *card,
			     struct qeth_channel *channel)
F
Frank Blaschka 已提交
1624 1625 1626
{
	int rc;

C
Carsten Otte 已提交
1627
	QETH_CARD_TEXT(card, 3, "haltch");
J
Julian Wiedmann 已提交
1628
	spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1629
	rc = ccw_device_halt(channel->ccwdev, (addr_t)channel->active_cmd);
J
Julian Wiedmann 已提交
1630
	spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
F
Frank Blaschka 已提交
1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642

	if (rc)
		return rc;
	rc = wait_event_interruptible_timeout(card->wait_q,
			channel->state == CH_STATE_HALTED, QETH_TIMEOUT);
	if (rc == -ERESTARTSYS)
		return rc;
	if (channel->state != CH_STATE_HALTED)
		return -ETIME;
	return 0;
}

1643
static int qeth_stop_channel(struct qeth_channel *channel)
1644 1645 1646 1647 1648 1649 1650
{
	struct ccw_device *cdev = channel->ccwdev;
	int rc;

	rc = ccw_device_set_offline(cdev);

	spin_lock_irq(get_ccwdev_lock(cdev));
1651
	if (channel->active_cmd)
1652 1653
		dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n",
			channel->active_cmd);
1654

1655
	cdev->handler = NULL;
1656 1657 1658 1659 1660
	spin_unlock_irq(get_ccwdev_lock(cdev));

	return rc;
}

1661 1662 1663 1664 1665 1666
static int qeth_start_channel(struct qeth_channel *channel)
{
	struct ccw_device *cdev = channel->ccwdev;
	int rc;

	channel->state = CH_STATE_DOWN;
1667
	xchg(&channel->active_cmd, NULL);
1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685

	spin_lock_irq(get_ccwdev_lock(cdev));
	cdev->handler = qeth_irq;
	spin_unlock_irq(get_ccwdev_lock(cdev));

	rc = ccw_device_set_online(cdev);
	if (rc)
		goto err;

	return 0;

err:
	spin_lock_irq(get_ccwdev_lock(cdev));
	cdev->handler = NULL;
	spin_unlock_irq(get_ccwdev_lock(cdev));
	return rc;
}

F
Frank Blaschka 已提交
1686 1687 1688 1689
static int qeth_halt_channels(struct qeth_card *card)
{
	int rc1 = 0, rc2 = 0, rc3 = 0;

C
Carsten Otte 已提交
1690
	QETH_CARD_TEXT(card, 3, "haltchs");
1691 1692 1693
	rc1 = qeth_halt_channel(card, &card->read);
	rc2 = qeth_halt_channel(card, &card->write);
	rc3 = qeth_halt_channel(card, &card->data);
F
Frank Blaschka 已提交
1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704
	if (rc1)
		return rc1;
	if (rc2)
		return rc2;
	return rc3;
}

static int qeth_clear_channels(struct qeth_card *card)
{
	int rc1 = 0, rc2 = 0, rc3 = 0;

C
Carsten Otte 已提交
1705
	QETH_CARD_TEXT(card, 3, "clearchs");
1706 1707 1708
	rc1 = qeth_clear_channel(card, &card->read);
	rc2 = qeth_clear_channel(card, &card->write);
	rc3 = qeth_clear_channel(card, &card->data);
F
Frank Blaschka 已提交
1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719
	if (rc1)
		return rc1;
	if (rc2)
		return rc2;
	return rc3;
}

static int qeth_clear_halt_card(struct qeth_card *card, int halt)
{
	int rc = 0;

C
Carsten Otte 已提交
1720
	QETH_CARD_TEXT(card, 3, "clhacrd");
F
Frank Blaschka 已提交
1721 1722 1723 1724 1725 1726 1727 1728

	if (halt)
		rc = qeth_halt_channels(card);
	if (rc)
		return rc;
	return qeth_clear_channels(card);
}

1729
static int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
F
Frank Blaschka 已提交
1730 1731 1732
{
	int rc = 0;

C
Carsten Otte 已提交
1733
	QETH_CARD_TEXT(card, 3, "qdioclr");
F
Frank Blaschka 已提交
1734 1735 1736
	switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
		QETH_QDIO_CLEANING)) {
	case QETH_QDIO_ESTABLISHED:
1737
		if (IS_IQD(card))
J
Jan Glauber 已提交
1738
			rc = qdio_shutdown(CARD_DDEV(card),
F
Frank Blaschka 已提交
1739 1740
				QDIO_FLAG_CLEANUP_USING_HALT);
		else
J
Jan Glauber 已提交
1741
			rc = qdio_shutdown(CARD_DDEV(card),
F
Frank Blaschka 已提交
1742 1743
				QDIO_FLAG_CLEANUP_USING_CLEAR);
		if (rc)
C
Carsten Otte 已提交
1744
			QETH_CARD_TEXT_(card, 3, "1err%d", rc);
F
Frank Blaschka 已提交
1745 1746 1747 1748 1749 1750 1751 1752 1753
		atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
		break;
	case QETH_QDIO_CLEANING:
		return rc;
	default:
		break;
	}
	rc = qeth_clear_halt_card(card, use_halt);
	if (rc)
C
Carsten Otte 已提交
1754
		QETH_CARD_TEXT_(card, 3, "2err%d", rc);
F
Frank Blaschka 已提交
1755 1756 1757
	return rc;
}

1758 1759 1760 1761 1762 1763 1764 1765 1766
static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
{
	enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
	struct diag26c_vnic_resp *response = NULL;
	struct diag26c_vnic_req *request = NULL;
	struct ccw_dev_id id;
	char userid[80];
	int rc = 0;

1767
	QETH_CARD_TEXT(card, 2, "vmlayer");
1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809

	cpcmd("QUERY USERID", userid, sizeof(userid), &rc);
	if (rc)
		goto out;

	request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
	response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
	if (!request || !response) {
		rc = -ENOMEM;
		goto out;
	}

	ccw_device_get_id(CARD_RDEV(card), &id);
	request->resp_buf_len = sizeof(*response);
	request->resp_version = DIAG26C_VERSION6_VM65918;
	request->req_format = DIAG26C_VNIC_INFO;
	ASCEBC(userid, 8);
	memcpy(&request->sys_name, userid, 8);
	request->devno = id.devno;

	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
	rc = diag26c(request, response, DIAG26C_PORT_VNIC);
	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
	if (rc)
		goto out;
	QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));

	if (request->resp_buf_len < sizeof(*response) ||
	    response->version != request->resp_version) {
		rc = -EIO;
		goto out;
	}

	if (response->protocol == VNIC_INFO_PROT_L2)
		disc = QETH_DISCIPLINE_LAYER2;
	else if (response->protocol == VNIC_INFO_PROT_L3)
		disc = QETH_DISCIPLINE_LAYER3;

out:
	kfree(response);
	kfree(request);
	if (rc)
1810
		QETH_CARD_TEXT_(card, 2, "err%x", rc);
1811 1812 1813
	return disc;
}

1814 1815 1816
/* Determine whether the device requires a specific layer discipline */
static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
{
1817 1818
	enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;

J
Julian Wiedmann 已提交
1819
	if (IS_OSM(card))
1820
		disc = QETH_DISCIPLINE_LAYER2;
1821 1822 1823
	else if (IS_VM_NIC(card))
		disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
				      qeth_vm_detect_layer(card);
1824 1825 1826

	switch (disc) {
	case QETH_DISCIPLINE_LAYER2:
1827
		QETH_CARD_TEXT(card, 3, "force l2");
1828 1829
		break;
	case QETH_DISCIPLINE_LAYER3:
1830
		QETH_CARD_TEXT(card, 3, "force l3");
1831 1832
		break;
	default:
1833
		QETH_CARD_TEXT(card, 3, "force no");
1834 1835
	}

1836
	return disc;
1837 1838
}

1839
static void qeth_set_blkt_defaults(struct qeth_card *card)
1840
{
1841
	QETH_CARD_TEXT(card, 2, "cfgblkt");
1842

1843
	if (card->info.use_v1_blkt) {
1844 1845 1846
		card->info.blkt.time_total = 0;
		card->info.blkt.inter_packet = 0;
		card->info.blkt.inter_packet_jumbo = 0;
1847 1848 1849 1850
	} else {
		card->info.blkt.time_total = 250;
		card->info.blkt.inter_packet = 5;
		card->info.blkt.inter_packet_jumbo = 15;
1851
	}
F
Frank Blaschka 已提交
1852 1853
}

1854
static void qeth_idx_init(struct qeth_card *card)
F
Frank Blaschka 已提交
1855
{
1856 1857
	memset(&card->seqno, 0, sizeof(card->seqno));

F
Frank Blaschka 已提交
1858 1859 1860 1861 1862 1863
	card->token.issuer_rm_w = 0x00010103UL;
	card->token.cm_filter_w = 0x00010108UL;
	card->token.cm_connection_w = 0x0001010aUL;
	card->token.ulp_filter_w = 0x0001010bUL;
	card->token.ulp_connection_w = 0x0001010dUL;

1864 1865
	switch (card->info.type) {
	case QETH_CARD_TYPE_IQD:
1866
		card->info.func_level =	QETH_IDX_FUNC_LEVEL_IQD;
1867 1868 1869 1870 1871 1872
		break;
	case QETH_CARD_TYPE_OSD:
		card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
		break;
	default:
		break;
F
Frank Blaschka 已提交
1873 1874 1875
	}
}

1876
static void qeth_idx_finalize_cmd(struct qeth_card *card,
1877
				  struct qeth_cmd_buffer *iob)
1878 1879 1880 1881 1882 1883 1884
{
	memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr,
	       QETH_SEQ_NO_LENGTH);
	if (iob->channel == &card->write)
		card->seqno.trans_hdr++;
}

F
Frank Blaschka 已提交
1885 1886 1887 1888 1889 1890 1891 1892 1893
static int qeth_peer_func_level(int level)
{
	if ((level & 0xff) == 8)
		return (level & 0xff) + 0x400;
	if (((level >> 8) & 3) == 1)
		return (level & 0xff) + 0x200;
	return level;
}

1894
static void qeth_mpc_finalize_cmd(struct qeth_card *card,
1895
				  struct qeth_cmd_buffer *iob)
F
Frank Blaschka 已提交
1896
{
1897
	qeth_idx_finalize_cmd(card, iob);
F
Frank Blaschka 已提交
1898 1899 1900 1901 1902 1903

	memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
	       &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
	card->seqno.pdu_hdr++;
	memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
	       &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
1904 1905

	iob->callback = qeth_release_buffer_cb;
F
Frank Blaschka 已提交
1906 1907
}

1908 1909 1910 1911 1912 1913 1914
static bool qeth_mpc_match_reply(struct qeth_cmd_buffer *iob,
				 struct qeth_cmd_buffer *reply)
{
	/* MPC cmds are issued strictly in sequence. */
	return !IS_IPA(reply->data);
}

1915
static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card,
1916
						  const void *data,
1917
						  unsigned int data_length)
1918 1919 1920
{
	struct qeth_cmd_buffer *iob;

1921 1922 1923 1924 1925 1926 1927 1928
	iob = qeth_alloc_cmd(&card->write, data_length, 1, QETH_TIMEOUT);
	if (!iob)
		return NULL;

	memcpy(iob->data, data, data_length);
	qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length,
		       iob->data);
	iob->finalize = qeth_mpc_finalize_cmd;
1929
	iob->match = qeth_mpc_match_reply;
1930 1931 1932
	return iob;
}

E
Eugene Crosser 已提交
1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946
/**
 * qeth_send_control_data() -	send control command to the card
 * @card:			qeth_card structure pointer
 * @iob:			qeth_cmd_buffer pointer
 * @reply_cb:			callback function pointer
 * @cb_card:			pointer to the qeth_card structure
 * @cb_reply:			pointer to the qeth_reply structure
 * @cb_cmd:			pointer to the original iob for non-IPA
 *				commands, or to the qeth_ipa_cmd structure
 *				for the IPA commands.
 * @reply_param:		private pointer passed to the callback
 *
 * Callback function gets called one or more times, with cb_cmd
 * pointing to the response returned by the hardware. Callback
1947 1948 1949 1950 1951
 * function must return
 *   > 0 if more reply blocks are expected,
 *     0 if the last or only reply block is received, and
 *   < 0 on error.
 * Callback function can get the value of the reply_param pointer from the
E
Eugene Crosser 已提交
1952 1953 1954
 * field 'param' of the structure qeth_reply.
 */

1955
static int qeth_send_control_data(struct qeth_card *card,
1956 1957 1958 1959 1960
				  struct qeth_cmd_buffer *iob,
				  int (*reply_cb)(struct qeth_card *cb_card,
						  struct qeth_reply *cb_reply,
						  unsigned long cb_cmd),
				  void *reply_param)
F
Frank Blaschka 已提交
1961
{
1962
	struct qeth_channel *channel = iob->channel;
1963
	struct qeth_reply *reply = &iob->reply;
1964
	long timeout = iob->timeout;
F
Frank Blaschka 已提交
1965 1966
	int rc;

C
Carsten Otte 已提交
1967
	QETH_CARD_TEXT(card, 2, "sendctl");
F
Frank Blaschka 已提交
1968 1969 1970

	reply->callback = reply_cb;
	reply->param = reply_param;
1971

1972
	timeout = wait_event_interruptible_timeout(card->wait_q,
1973
						   qeth_trylock_channel(channel, iob),
1974 1975
						   timeout);
	if (timeout <= 0) {
1976
		qeth_put_cmd(iob);
1977 1978
		return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
	}
F
Frank Blaschka 已提交
1979

1980
	if (iob->finalize)
1981 1982
		iob->finalize(card, iob);
	QETH_DBF_HEX(CTRL, 2, iob->data, min(iob->length, QETH_DBF_CTRL_LEN));
1983

1984
	qeth_enqueue_cmd(card, iob);
1985

1986 1987 1988
	/* This pairs with iob->callback, and keeps the iob alive after IO: */
	qeth_get_cmd(iob);

C
Carsten Otte 已提交
1989
	QETH_CARD_TEXT(card, 6, "noirqpnd");
J
Julian Wiedmann 已提交
1990
	spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1991
	rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob),
1992
				      (addr_t) iob, 0, 0, timeout);
J
Julian Wiedmann 已提交
1993
	spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
F
Frank Blaschka 已提交
1994
	if (rc) {
1995 1996
		QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
				 CARD_DEVID(card), rc);
C
Carsten Otte 已提交
1997
		QETH_CARD_TEXT_(card, 2, " err%d", rc);
1998
		qeth_dequeue_cmd(card, iob);
1999
		qeth_put_cmd(iob);
2000
		qeth_unlock_channel(card, channel);
2001
		goto out;
F
Frank Blaschka 已提交
2002
	}
2003

2004
	timeout = wait_for_completion_interruptible_timeout(&iob->done,
2005 2006 2007
							    timeout);
	if (timeout <= 0)
		rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
2008

2009
	qeth_dequeue_cmd(card, iob);
2010 2011 2012

	if (reply_cb) {
		/* Wait until the callback for a late reply has completed: */
2013
		spin_lock_irq(&iob->lock);
2014 2015
		if (rc)
			/* Zap any callback that's still pending: */
2016 2017
			iob->rc = rc;
		spin_unlock_irq(&iob->lock);
2018 2019
	}

2020
	if (!rc)
2021
		rc = iob->rc;
2022 2023 2024

out:
	qeth_put_cmd(iob);
2025
	return rc;
F
Frank Blaschka 已提交
2026 2027
}

2028 2029 2030 2031 2032 2033
struct qeth_node_desc {
	struct node_descriptor nd1;
	struct node_descriptor nd2;
	struct node_descriptor nd3;
};

2034
static void qeth_read_conf_data_cb(struct qeth_card *card,
2035 2036
				   struct qeth_cmd_buffer *iob,
				   unsigned int data_length)
2037
{
2038
	struct qeth_node_desc *nd = (struct qeth_node_desc *) iob->data;
2039
	int rc = 0;
2040
	u8 *tag;
2041 2042

	QETH_CARD_TEXT(card, 2, "cfgunit");
2043 2044 2045 2046 2047 2048

	if (data_length < sizeof(*nd)) {
		rc = -EINVAL;
		goto out;
	}

2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061
	card->info.is_vm_nic = nd->nd1.plant[0] == _ascebc['V'] &&
			       nd->nd1.plant[1] == _ascebc['M'];
	tag = (u8 *)&nd->nd1.tag;
	card->info.chpid = tag[0];
	card->info.unit_addr2 = tag[1];

	tag = (u8 *)&nd->nd2.tag;
	card->info.cula = tag[1];

	card->info.use_v1_blkt = nd->nd3.model[0] == 0xF0 &&
				 nd->nd3.model[1] == 0xF0 &&
				 nd->nd3.model[2] >= 0xF1 &&
				 nd->nd3.model[2] <= 0xF4;
2062

2063
out:
2064
	qeth_notify_cmd(iob, rc);
2065
	qeth_put_cmd(iob);
2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077
}

static int qeth_read_conf_data(struct qeth_card *card)
{
	struct qeth_channel *channel = &card->data;
	struct qeth_cmd_buffer *iob;
	struct ciw *ciw;

	/* scan for RCD command in extended SenseID data */
	ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
	if (!ciw || ciw->cmd == 0)
		return -EOPNOTSUPP;
2078 2079
	if (ciw->count < sizeof(struct qeth_node_desc))
		return -EINVAL;
2080 2081 2082 2083 2084 2085 2086 2087 2088

	iob = qeth_alloc_cmd(channel, ciw->count, 1, QETH_RCD_TIMEOUT);
	if (!iob)
		return -ENOMEM;

	iob->callback = qeth_read_conf_data_cb;
	qeth_setup_ccw(__ccw_from_cmd(iob), ciw->cmd, 0, iob->length,
		       iob->data);

2089
	return qeth_send_control_data(card, iob, NULL, NULL);
2090 2091
}

2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105
static int qeth_idx_check_activate_response(struct qeth_card *card,
					    struct qeth_channel *channel,
					    struct qeth_cmd_buffer *iob)
{
	int rc;

	rc = qeth_check_idx_response(card, iob->data);
	if (rc)
		return rc;

	if (QETH_IS_IDX_ACT_POS_REPLY(iob->data))
		return 0;

	/* negative reply: */
2106 2107
	QETH_CARD_TEXT_(card, 2, "idxneg%c",
			QETH_IDX_ACT_CAUSE_CODE(iob->data));
2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125

	switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
	case QETH_IDX_ACT_ERR_EXCL:
		dev_err(&channel->ccwdev->dev,
			"The adapter is used exclusively by another host\n");
		return -EBUSY;
	case QETH_IDX_ACT_ERR_AUTH:
	case QETH_IDX_ACT_ERR_AUTH_USER:
		dev_err(&channel->ccwdev->dev,
			"Setting the device online failed because of insufficient authorization\n");
		return -EPERM;
	default:
		QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
				 CCW_DEVID(channel->ccwdev));
		return -EIO;
	}
}

2126
static void qeth_idx_activate_read_channel_cb(struct qeth_card *card,
2127 2128
					      struct qeth_cmd_buffer *iob,
					      unsigned int data_length)
2129
{
2130
	struct qeth_channel *channel = iob->channel;
2131 2132 2133
	u16 peer_level;
	int rc;

2134
	QETH_CARD_TEXT(card, 2, "idxrdcb");
2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155

	rc = qeth_idx_check_activate_response(card, channel, iob);
	if (rc)
		goto out;

	memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
	if (peer_level != qeth_peer_func_level(card->info.func_level)) {
		QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
				 CCW_DEVID(channel->ccwdev),
				 card->info.func_level, peer_level);
		rc = -EINVAL;
		goto out;
	}

	memcpy(&card->token.issuer_rm_r,
	       QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
	       QETH_MPC_TOKEN_LENGTH);
	memcpy(&card->info.mcl_level[0],
	       QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);

out:
2156
	qeth_notify_cmd(iob, rc);
2157
	qeth_put_cmd(iob);
2158 2159
}

2160
static void qeth_idx_activate_write_channel_cb(struct qeth_card *card,
2161 2162
					       struct qeth_cmd_buffer *iob,
					       unsigned int data_length)
2163
{
2164
	struct qeth_channel *channel = iob->channel;
2165 2166 2167
	u16 peer_level;
	int rc;

2168
	QETH_CARD_TEXT(card, 2, "idxwrcb");
2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183

	rc = qeth_idx_check_activate_response(card, channel, iob);
	if (rc)
		goto out;

	memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
	if ((peer_level & ~0x0100) !=
	    qeth_peer_func_level(card->info.func_level)) {
		QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
				 CCW_DEVID(channel->ccwdev),
				 card->info.func_level, peer_level);
		rc = -EINVAL;
	}

out:
2184
	qeth_notify_cmd(iob, rc);
2185
	qeth_put_cmd(iob);
2186 2187 2188 2189 2190 2191 2192
}

static void qeth_idx_setup_activate_cmd(struct qeth_card *card,
					struct qeth_cmd_buffer *iob)
{
	u16 addr = (card->info.cula << 8) + card->info.unit_addr2;
	u8 port = ((u8)card->dev->dev_port) | 0x80;
2193
	struct ccw1 *ccw = __ccw_from_cmd(iob);
2194

2195 2196 2197
	qeth_setup_ccw(&ccw[0], CCW_CMD_WRITE, CCW_FLAG_CC, IDX_ACTIVATE_SIZE,
		       iob->data);
	qeth_setup_ccw(&ccw[1], CCW_CMD_READ, 0, iob->length, iob->data);
2198 2199
	iob->finalize = qeth_idx_finalize_cmd;

2200
	port |= QETH_IDX_ACT_INVAL_FRAME;
2201 2202 2203 2204 2205
	memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1);
	memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
	       &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
	       &card->info.func_level, 2);
2206
	memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &card->info.ddev_devno, 2);
2207 2208 2209 2210 2211 2212 2213 2214 2215
	memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2);
}

static int qeth_idx_activate_read_channel(struct qeth_card *card)
{
	struct qeth_channel *channel = &card->read;
	struct qeth_cmd_buffer *iob;
	int rc;

2216
	QETH_CARD_TEXT(card, 2, "idxread");
2217

2218
	iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2219 2220 2221 2222 2223
	if (!iob)
		return -ENOMEM;

	memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
	qeth_idx_setup_activate_cmd(card, iob);
2224
	iob->callback = qeth_idx_activate_read_channel_cb;
2225

2226
	rc = qeth_send_control_data(card, iob, NULL, NULL);
2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239
	if (rc)
		return rc;

	channel->state = CH_STATE_UP;
	return 0;
}

static int qeth_idx_activate_write_channel(struct qeth_card *card)
{
	struct qeth_channel *channel = &card->write;
	struct qeth_cmd_buffer *iob;
	int rc;

2240
	QETH_CARD_TEXT(card, 2, "idxwrite");
2241

2242
	iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2243 2244 2245 2246 2247
	if (!iob)
		return -ENOMEM;

	memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
	qeth_idx_setup_activate_cmd(card, iob);
2248
	iob->callback = qeth_idx_activate_write_channel_cb;
2249

2250
	rc = qeth_send_control_data(card, iob, NULL, NULL);
2251 2252 2253 2254 2255 2256 2257
	if (rc)
		return rc;

	channel->state = CH_STATE_UP;
	return 0;
}

F
Frank Blaschka 已提交
2258 2259 2260 2261 2262
static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
		unsigned long data)
{
	struct qeth_cmd_buffer *iob;

2263
	QETH_CARD_TEXT(card, 2, "cmenblcb");
F
Frank Blaschka 已提交
2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275

	iob = (struct qeth_cmd_buffer *) data;
	memcpy(&card->token.cm_filter_r,
	       QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
	       QETH_MPC_TOKEN_LENGTH);
	return 0;
}

static int qeth_cm_enable(struct qeth_card *card)
{
	struct qeth_cmd_buffer *iob;

2276
	QETH_CARD_TEXT(card, 2, "cmenable");
F
Frank Blaschka 已提交
2277

2278
	iob = qeth_mpc_alloc_cmd(card, CM_ENABLE, CM_ENABLE_SIZE);
2279 2280
	if (!iob)
		return -ENOMEM;
2281

F
Frank Blaschka 已提交
2282 2283 2284 2285 2286
	memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
	       &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
	       &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);

2287
	return qeth_send_control_data(card, iob, qeth_cm_enable_cb, NULL);
F
Frank Blaschka 已提交
2288 2289 2290 2291 2292 2293 2294
}

static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
		unsigned long data)
{
	struct qeth_cmd_buffer *iob;

2295
	QETH_CARD_TEXT(card, 2, "cmsetpcb");
F
Frank Blaschka 已提交
2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307

	iob = (struct qeth_cmd_buffer *) data;
	memcpy(&card->token.cm_connection_r,
	       QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
	       QETH_MPC_TOKEN_LENGTH);
	return 0;
}

static int qeth_cm_setup(struct qeth_card *card)
{
	struct qeth_cmd_buffer *iob;

2308
	QETH_CARD_TEXT(card, 2, "cmsetup");
F
Frank Blaschka 已提交
2309

2310
	iob = qeth_mpc_alloc_cmd(card, CM_SETUP, CM_SETUP_SIZE);
2311 2312
	if (!iob)
		return -ENOMEM;
2313

F
Frank Blaschka 已提交
2314 2315 2316 2317 2318 2319
	memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
	       &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
	       &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
	       &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
2320
	return qeth_send_control_data(card, iob, qeth_cm_setup_cb, NULL);
F
Frank Blaschka 已提交
2321 2322
}

2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333
static bool qeth_is_supported_link_type(struct qeth_card *card, u8 link_type)
{
	if (link_type == QETH_LINK_TYPE_LANE_TR ||
	    link_type == QETH_LINK_TYPE_HSTR) {
		dev_err(&card->gdev->dev, "Unsupported Token Ring device\n");
		return false;
	}

	return true;
}

2334
static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
F
Frank Blaschka 已提交
2335
{
2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354
	struct net_device *dev = card->dev;
	unsigned int new_mtu;

	if (!max_mtu) {
		/* IQD needs accurate max MTU to set up its RX buffers: */
		if (IS_IQD(card))
			return -EINVAL;
		/* tolerate quirky HW: */
		max_mtu = ETH_MAX_MTU;
	}

	rtnl_lock();
	if (IS_IQD(card)) {
		/* move any device with default MTU to new max MTU: */
		new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu;

		/* adjust RX buffer size to new max MTU: */
		card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE;
		if (dev->max_mtu && dev->max_mtu != max_mtu)
2355
			qeth_free_qdio_queues(card);
2356 2357 2358 2359
	} else {
		if (dev->mtu)
			new_mtu = dev->mtu;
		/* default MTUs for first setup: */
2360
		else if (IS_LAYER2(card))
2361 2362 2363
			new_mtu = ETH_DATA_LEN;
		else
			new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */
F
Frank Blaschka 已提交
2364
	}
2365 2366 2367 2368 2369

	dev->max_mtu = max_mtu;
	dev->mtu = min(new_mtu, max_mtu);
	rtnl_unlock();
	return 0;
F
Frank Blaschka 已提交
2370 2371
}

J
Julian Wiedmann 已提交
2372
static int qeth_get_mtu_outof_framesize(int framesize)
F
Frank Blaschka 已提交
2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393
{
	switch (framesize) {
	case 0x4000:
		return 8192;
	case 0x6000:
		return 16384;
	case 0xa000:
		return 32768;
	case 0xffff:
		return 57344;
	default:
		return 0;
	}
}

static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
		unsigned long data)
{
	__u16 mtu, framesize;
	__u16 len;
	struct qeth_cmd_buffer *iob;
2394
	u8 link_type = 0;
F
Frank Blaschka 已提交
2395

2396
	QETH_CARD_TEXT(card, 2, "ulpenacb");
F
Frank Blaschka 已提交
2397 2398 2399 2400 2401

	iob = (struct qeth_cmd_buffer *) data;
	memcpy(&card->token.ulp_filter_r,
	       QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
	       QETH_MPC_TOKEN_LENGTH);
2402
	if (IS_IQD(card)) {
F
Frank Blaschka 已提交
2403 2404 2405
		memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
		mtu = qeth_get_mtu_outof_framesize(framesize);
	} else {
2406
		mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data);
F
Frank Blaschka 已提交
2407
	}
2408
	*(u16 *)reply->param = mtu;
F
Frank Blaschka 已提交
2409 2410 2411 2412 2413

	memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
	if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
		memcpy(&link_type,
		       QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
2414 2415 2416 2417 2418
		if (!qeth_is_supported_link_type(card, link_type))
			return -EPROTONOSUPPORT;
	}

	card->info.link_type = link_type;
2419
	QETH_CARD_TEXT_(card, 2, "link%d", card->info.link_type);
F
Frank Blaschka 已提交
2420 2421 2422
	return 0;
}

2423 2424
static u8 qeth_mpc_select_prot_type(struct qeth_card *card)
{
2425
	return IS_LAYER2(card) ? QETH_MPC_PROT_L2 : QETH_MPC_PROT_L3;
2426 2427
}

F
Frank Blaschka 已提交
2428 2429
static int qeth_ulp_enable(struct qeth_card *card)
{
2430
	u8 prot_type = qeth_mpc_select_prot_type(card);
F
Frank Blaschka 已提交
2431
	struct qeth_cmd_buffer *iob;
2432
	u16 max_mtu;
2433
	int rc;
F
Frank Blaschka 已提交
2434

2435
	QETH_CARD_TEXT(card, 2, "ulpenabl");
F
Frank Blaschka 已提交
2436

2437
	iob = qeth_mpc_alloc_cmd(card, ULP_ENABLE, ULP_ENABLE_SIZE);
2438 2439
	if (!iob)
		return -ENOMEM;
F
Frank Blaschka 已提交
2440

2441
	*(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port;
F
Frank Blaschka 已提交
2442 2443 2444 2445 2446
	memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1);
	memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
	       &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
2447
	rc = qeth_send_control_data(card, iob, qeth_ulp_enable_cb, &max_mtu);
2448 2449 2450
	if (rc)
		return rc;
	return qeth_update_max_mtu(card, max_mtu);
F
Frank Blaschka 已提交
2451 2452 2453 2454 2455 2456 2457
}

static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
		unsigned long data)
{
	struct qeth_cmd_buffer *iob;

2458
	QETH_CARD_TEXT(card, 2, "ulpstpcb");
F
Frank Blaschka 已提交
2459 2460 2461 2462 2463

	iob = (struct qeth_cmd_buffer *) data;
	memcpy(&card->token.ulp_connection_r,
	       QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
	       QETH_MPC_TOKEN_LENGTH);
2464 2465
	if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
		     3)) {
2466
		QETH_CARD_TEXT(card, 2, "olmlimit");
2467 2468
		dev_err(&card->gdev->dev, "A connection could not be "
			"established because of an OLM limit\n");
2469
		return -EMLINK;
2470
	}
S
Stefan Raspl 已提交
2471
	return 0;
F
Frank Blaschka 已提交
2472 2473 2474 2475 2476 2477 2478
}

static int qeth_ulp_setup(struct qeth_card *card)
{
	__u16 temp;
	struct qeth_cmd_buffer *iob;

2479
	QETH_CARD_TEXT(card, 2, "ulpsetup");
F
Frank Blaschka 已提交
2480

2481
	iob = qeth_mpc_alloc_cmd(card, ULP_SETUP, ULP_SETUP_SIZE);
2482 2483
	if (!iob)
		return -ENOMEM;
F
Frank Blaschka 已提交
2484 2485 2486 2487 2488 2489 2490 2491

	memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
	       &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
	       &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);

2492
	memcpy(QETH_ULP_SETUP_CUA(iob->data), &card->info.ddev_devno, 2);
F
Frank Blaschka 已提交
2493 2494
	temp = (card->info.cula << 8) + card->info.unit_addr2;
	memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
2495
	return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL);
F
Frank Blaschka 已提交
2496 2497
}

2498 2499
static int qeth_alloc_out_buf(struct qeth_qdio_out_q *q, unsigned int bidx,
			      gfp_t gfp)
2500 2501 2502
{
	struct qeth_qdio_out_buffer *newbuf;

2503
	newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, gfp);
2504 2505 2506
	if (!newbuf)
		return -ENOMEM;

2507
	newbuf->buffer = q->qdio_bufs[bidx];
2508 2509 2510 2511
	skb_queue_head_init(&newbuf->skb_list);
	lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
	atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
	q->bufs[bidx] = newbuf;
2512
	return 0;
2513 2514
}

2515
static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
2516 2517 2518 2519
{
	if (!q)
		return;

2520
	qeth_drain_output_queue(q, true);
2521 2522 2523 2524
	qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
	kfree(q);
}

2525
static struct qeth_qdio_out_q *qeth_alloc_output_queue(void)
2526 2527
{
	struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
2528
	unsigned int i;
2529 2530 2531 2532

	if (!q)
		return NULL;

2533 2534 2535 2536
	if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q))
		goto err_qdio_bufs;

	for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
2537
		if (qeth_alloc_out_buf(q, i, GFP_KERNEL))
2538
			goto err_out_bufs;
2539
	}
2540

2541
	return q;
2542 2543 2544

err_out_bufs:
	while (i > 0)
2545
		qeth_free_out_buf(q->bufs[--i]);
2546 2547 2548 2549
	qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
err_qdio_bufs:
	kfree(q);
	return NULL;
2550
}
2551

2552 2553 2554 2555 2556 2557 2558 2559
static void qeth_tx_completion_timer(struct timer_list *timer)
{
	struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer);

	napi_schedule(&queue->napi);
	QETH_TXQ_STAT_INC(queue, completion_timer);
}

2560
static int qeth_alloc_qdio_queues(struct qeth_card *card)
F
Frank Blaschka 已提交
2561
{
2562
	unsigned int i;
F
Frank Blaschka 已提交
2563

2564
	QETH_CARD_TEXT(card, 2, "allcqdbf");
F
Frank Blaschka 已提交
2565 2566 2567 2568 2569

	if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
		QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
		return 0;

2570
	QETH_CARD_TEXT(card, 2, "inq");
S
Sebastian Ott 已提交
2571
	card->qdio.in_q = qeth_alloc_qdio_queue();
F
Frank Blaschka 已提交
2572 2573
	if (!card->qdio.in_q)
		goto out_nomem;
S
Sebastian Ott 已提交
2574

F
Frank Blaschka 已提交
2575 2576 2577
	/* inbound buffer pool */
	if (qeth_alloc_buffer_pool(card))
		goto out_freeinq;
2578

F
Frank Blaschka 已提交
2579 2580
	/* outbound */
	for (i = 0; i < card->qdio.no_out_queues; ++i) {
2581 2582 2583 2584
		struct qeth_qdio_out_q *queue;

		queue = qeth_alloc_output_queue();
		if (!queue)
F
Frank Blaschka 已提交
2585
			goto out_freeoutq;
2586
		QETH_CARD_TEXT_(card, 2, "outq %i", i);
2587 2588 2589 2590
		QETH_CARD_HEX(card, 2, &queue, sizeof(void *));
		card->qdio.out_qs[i] = queue;
		queue->card = card;
		queue->queue_no = i;
2591
		INIT_LIST_HEAD(&queue->pending_bufs);
2592
		spin_lock_init(&queue->lock);
2593
		timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
2594 2595 2596 2597 2598 2599 2600 2601 2602
		if (IS_IQD(card)) {
			queue->coalesce_usecs = QETH_TX_COALESCE_USECS;
			queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES;
			queue->rescan_usecs = QETH_TX_TIMER_USECS;
		} else {
			queue->coalesce_usecs = USEC_PER_SEC;
			queue->max_coalesced_frames = 0;
			queue->rescan_usecs = 10 * USEC_PER_SEC;
		}
2603
		queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT;
F
Frank Blaschka 已提交
2604
	}
2605 2606 2607 2608 2609

	/* completion */
	if (qeth_alloc_cq(card))
		goto out_freeoutq;

F
Frank Blaschka 已提交
2610 2611 2612
	return 0;

out_freeoutq:
2613
	while (i > 0) {
2614
		qeth_free_output_queue(card->qdio.out_qs[--i]);
2615 2616
		card->qdio.out_qs[i] = NULL;
	}
F
Frank Blaschka 已提交
2617 2618
	qeth_free_buffer_pool(card);
out_freeinq:
S
Sebastian Ott 已提交
2619
	qeth_free_qdio_queue(card->qdio.in_q);
F
Frank Blaschka 已提交
2620 2621 2622 2623 2624 2625
	card->qdio.in_q = NULL;
out_nomem:
	atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
	return -ENOMEM;
}

2626
static void qeth_free_qdio_queues(struct qeth_card *card)
2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643
{
	int i, j;

	if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
		QETH_QDIO_UNINITIALIZED)
		return;

	qeth_free_cq(card);
	for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
		if (card->qdio.in_q->bufs[j].rx_skb)
			dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
	}
	qeth_free_qdio_queue(card->qdio.in_q);
	card->qdio.in_q = NULL;
	/* inbound buffer pool */
	qeth_free_buffer_pool(card);
	/* free outbound qdio_qs */
2644 2645 2646
	for (i = 0; i < card->qdio.no_out_queues; i++) {
		qeth_free_output_queue(card->qdio.out_qs[i]);
		card->qdio.out_qs[i] = NULL;
2647 2648 2649
	}
}

2650 2651 2652
static void qeth_fill_qib_parms(struct qeth_card *card,
				struct qeth_qib_parms *parms)
{
2653 2654 2655
	struct qeth_qdio_out_q *queue;
	unsigned int i;

2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672
	parms->pcit_magic[0] = 'P';
	parms->pcit_magic[1] = 'C';
	parms->pcit_magic[2] = 'I';
	parms->pcit_magic[3] = 'T';
	ASCEBC(parms->pcit_magic, sizeof(parms->pcit_magic));
	parms->pcit_a = QETH_PCI_THRESHOLD_A(card);
	parms->pcit_b = QETH_PCI_THRESHOLD_B(card);
	parms->pcit_c = QETH_PCI_TIMER_VALUE(card);

	parms->blkt_magic[0] = 'B';
	parms->blkt_magic[1] = 'L';
	parms->blkt_magic[2] = 'K';
	parms->blkt_magic[3] = 'T';
	ASCEBC(parms->blkt_magic, sizeof(parms->blkt_magic));
	parms->blkt_total = card->info.blkt.time_total;
	parms->blkt_inter_packet = card->info.blkt.inter_packet;
	parms->blkt_inter_packet_jumbo = card->info.blkt.inter_packet_jumbo;
2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687

	/* Prio-queueing implicitly uses the default priorities: */
	if (qeth_uses_tx_prio_queueing(card) || card->qdio.no_out_queues == 1)
		return;

	parms->pque_magic[0] = 'P';
	parms->pque_magic[1] = 'Q';
	parms->pque_magic[2] = 'U';
	parms->pque_magic[3] = 'E';
	ASCEBC(parms->pque_magic, sizeof(parms->pque_magic));
	parms->pque_order = QETH_QIB_PQUE_ORDER_RR;
	parms->pque_units = QETH_QIB_PQUE_UNITS_SBAL;

	qeth_for_each_output_queue(card, queue, i)
		parms->pque_priority[i] = queue->priority;
F
Frank Blaschka 已提交
2688 2689 2690 2691
}

static int qeth_qdio_activate(struct qeth_card *card)
{
2692
	QETH_CARD_TEXT(card, 3, "qdioact");
J
Jan Glauber 已提交
2693
	return qdio_activate(CARD_DDEV(card));
F
Frank Blaschka 已提交
2694 2695 2696 2697 2698 2699
}

static int qeth_dm_act(struct qeth_card *card)
{
	struct qeth_cmd_buffer *iob;

2700
	QETH_CARD_TEXT(card, 2, "dmact");
F
Frank Blaschka 已提交
2701

2702
	iob = qeth_mpc_alloc_cmd(card, DM_ACT, DM_ACT_SIZE);
2703 2704
	if (!iob)
		return -ENOMEM;
F
Frank Blaschka 已提交
2705 2706 2707 2708 2709

	memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
	       &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2710
	return qeth_send_control_data(card, iob, NULL, NULL);
F
Frank Blaschka 已提交
2711 2712 2713 2714 2715 2716
}

static int qeth_mpc_initialize(struct qeth_card *card)
{
	int rc;

2717
	QETH_CARD_TEXT(card, 2, "mpcinit");
F
Frank Blaschka 已提交
2718 2719 2720

	rc = qeth_issue_next_read(card);
	if (rc) {
2721
		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
F
Frank Blaschka 已提交
2722 2723 2724 2725
		return rc;
	}
	rc = qeth_cm_enable(card);
	if (rc) {
2726
		QETH_CARD_TEXT_(card, 2, "2err%d", rc);
2727
		return rc;
F
Frank Blaschka 已提交
2728 2729 2730
	}
	rc = qeth_cm_setup(card);
	if (rc) {
2731
		QETH_CARD_TEXT_(card, 2, "3err%d", rc);
2732
		return rc;
F
Frank Blaschka 已提交
2733 2734 2735
	}
	rc = qeth_ulp_enable(card);
	if (rc) {
2736
		QETH_CARD_TEXT_(card, 2, "4err%d", rc);
2737
		return rc;
F
Frank Blaschka 已提交
2738 2739 2740
	}
	rc = qeth_ulp_setup(card);
	if (rc) {
2741
		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2742
		return rc;
F
Frank Blaschka 已提交
2743
	}
2744
	rc = qeth_alloc_qdio_queues(card);
F
Frank Blaschka 已提交
2745
	if (rc) {
2746
		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2747
		return rc;
F
Frank Blaschka 已提交
2748 2749 2750
	}
	rc = qeth_qdio_establish(card);
	if (rc) {
2751
		QETH_CARD_TEXT_(card, 2, "6err%d", rc);
2752
		qeth_free_qdio_queues(card);
2753
		return rc;
F
Frank Blaschka 已提交
2754 2755 2756
	}
	rc = qeth_qdio_activate(card);
	if (rc) {
2757
		QETH_CARD_TEXT_(card, 2, "7err%d", rc);
2758
		return rc;
F
Frank Blaschka 已提交
2759 2760 2761
	}
	rc = qeth_dm_act(card);
	if (rc) {
2762
		QETH_CARD_TEXT_(card, 2, "8err%d", rc);
2763
		return rc;
F
Frank Blaschka 已提交
2764 2765 2766 2767 2768
	}

	return 0;
}

2769
static void qeth_print_status_message(struct qeth_card *card)
F
Frank Blaschka 已提交
2770 2771
{
	switch (card->info.type) {
2772 2773 2774
	case QETH_CARD_TYPE_OSD:
	case QETH_CARD_TYPE_OSM:
	case QETH_CARD_TYPE_OSX:
F
Frank Blaschka 已提交
2775 2776 2777 2778 2779 2780 2781 2782 2783 2784
		/* VM will use a non-zero first character
		 * to indicate a HiperSockets like reporting
		 * of the level OSA sets the first character to zero
		 * */
		if (!card->info.mcl_level[0]) {
			sprintf(card->info.mcl_level, "%02x%02x",
				card->info.mcl_level[2],
				card->info.mcl_level[3]);
			break;
		}
2785
		fallthrough;
F
Frank Blaschka 已提交
2786
	case QETH_CARD_TYPE_IQD:
2787
		if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) {
F
Frank Blaschka 已提交
2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801
			card->info.mcl_level[0] = (char) _ebcasc[(__u8)
				card->info.mcl_level[0]];
			card->info.mcl_level[1] = (char) _ebcasc[(__u8)
				card->info.mcl_level[1]];
			card->info.mcl_level[2] = (char) _ebcasc[(__u8)
				card->info.mcl_level[2]];
			card->info.mcl_level[3] = (char) _ebcasc[(__u8)
				card->info.mcl_level[3]];
			card->info.mcl_level[QETH_MCL_LENGTH] = 0;
		}
		break;
	default:
		memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
	}
2802 2803 2804 2805 2806 2807 2808
	dev_info(&card->gdev->dev,
		 "Device is a%s card%s%s%s\nwith link type %s.\n",
		 qeth_get_cardname(card),
		 (card->info.mcl_level[0]) ? " (level: " : "",
		 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
		 (card->info.mcl_level[0]) ? ")" : "",
		 qeth_get_cardname_short(card));
F
Frank Blaschka 已提交
2809 2810 2811 2812 2813 2814
}

static void qeth_initialize_working_pool_list(struct qeth_card *card)
{
	struct qeth_buffer_pool_entry *entry;

C
Carsten Otte 已提交
2815
	QETH_CARD_TEXT(card, 5, "inwrklst");
F
Frank Blaschka 已提交
2816 2817 2818 2819 2820 2821 2822

	list_for_each_entry(entry,
			    &card->qdio.init_pool.entry_list, init_list) {
		qeth_put_buffer_pool_entry(card, entry);
	}
}

J
Julian Wiedmann 已提交
2823 2824
static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
					struct qeth_card *card)
F
Frank Blaschka 已提交
2825 2826 2827 2828 2829 2830 2831
{
	struct qeth_buffer_pool_entry *entry;
	int i, free;

	if (list_empty(&card->qdio.in_buf_pool.entry_list))
		return NULL;

2832
	list_for_each_entry(entry, &card->qdio.in_buf_pool.entry_list, list) {
F
Frank Blaschka 已提交
2833 2834
		free = 1;
		for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2835
			if (page_count(entry->elements[i]) > 1) {
F
Frank Blaschka 已提交
2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846
				free = 0;
				break;
			}
		}
		if (free) {
			list_del_init(&entry->list);
			return entry;
		}
	}

	/* no free buffer in pool so take first one and swap pages */
2847 2848
	entry = list_first_entry(&card->qdio.in_buf_pool.entry_list,
				 struct qeth_buffer_pool_entry, list);
F
Frank Blaschka 已提交
2849
	for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2850
		if (page_count(entry->elements[i]) > 1) {
2851
			struct page *page = dev_alloc_page();
2852 2853

			if (!page)
F
Frank Blaschka 已提交
2854
				return NULL;
2855 2856 2857 2858

			__free_page(entry->elements[i]);
			entry->elements[i] = page;
			QETH_CARD_STAT_INC(card, rx_sg_alloc_page);
F
Frank Blaschka 已提交
2859 2860 2861 2862 2863 2864 2865 2866 2867
		}
	}
	list_del_init(&entry->list);
	return entry;
}

static int qeth_init_input_buffer(struct qeth_card *card,
		struct qeth_qdio_buffer *buf)
{
2868
	struct qeth_buffer_pool_entry *pool_entry = buf->pool_entry;
F
Frank Blaschka 已提交
2869 2870
	int i;

2871
	if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) {
2872
		buf->rx_skb = netdev_alloc_skb(card->dev,
2873 2874
					       ETH_HLEN +
					       sizeof(struct ipv6hdr));
2875
		if (!buf->rx_skb)
2876
			return -ENOMEM;
2877 2878
	}

2879 2880 2881 2882 2883 2884 2885
	if (!pool_entry) {
		pool_entry = qeth_find_free_buffer_pool_entry(card);
		if (!pool_entry)
			return -ENOBUFS;

		buf->pool_entry = pool_entry;
	}
F
Frank Blaschka 已提交
2886 2887 2888 2889 2890 2891 2892 2893 2894

	/*
	 * since the buffer is accessed only from the input_tasklet
	 * there shouldn't be a need to synchronize; also, since we use
	 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run  out off
	 * buffers
	 */
	for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
		buf->buffer->element[i].length = PAGE_SIZE;
2895
		buf->buffer->element[i].addr =
2896
			page_to_phys(pool_entry->elements[i]);
F
Frank Blaschka 已提交
2897
		if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2898
			buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
F
Frank Blaschka 已提交
2899
		else
2900 2901
			buf->buffer->element[i].eflags = 0;
		buf->buffer->element[i].sflags = 0;
F
Frank Blaschka 已提交
2902 2903 2904 2905
	}
	return 0;
}

J
Julian Wiedmann 已提交
2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917
static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card,
					    struct qeth_qdio_out_q *queue)
{
	if (!IS_IQD(card) ||
	    qeth_iqd_is_mcast_queue(card, queue) ||
	    card->options.cq == QETH_CQ_ENABLED ||
	    qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd))
		return 1;

	return card->ssqd.mmwc ? card->ssqd.mmwc : 1;
}

2918
static int qeth_init_qdio_queues(struct qeth_card *card)
F
Frank Blaschka 已提交
2919
{
2920
	unsigned int rx_bufs = card->qdio.in_buf_pool.buf_count;
2921
	unsigned int i;
F
Frank Blaschka 已提交
2922 2923
	int rc;

2924
	QETH_CARD_TEXT(card, 2, "initqdqs");
F
Frank Blaschka 已提交
2925 2926

	/* inbound queue */
2927 2928
	qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
	memset(&card->rx, 0, sizeof(struct qeth_rx));
2929

F
Frank Blaschka 已提交
2930 2931
	qeth_initialize_working_pool_list(card);
	/*give only as many buffers to hardware as we have buffer pool entries*/
2932
	for (i = 0; i < rx_bufs; i++) {
2933 2934 2935 2936 2937
		rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
		if (rc)
			return rc;
	}

2938
	card->qdio.in_q->next_buf_to_init = QDIO_BUFNR(rx_bufs);
2939 2940
	rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, rx_bufs,
		     NULL);
F
Frank Blaschka 已提交
2941
	if (rc) {
2942
		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
F
Frank Blaschka 已提交
2943 2944
		return rc;
	}
2945 2946 2947 2948 2949 2950 2951

	/* completion */
	rc = qeth_cq_init(card);
	if (rc) {
		return rc;
	}

F
Frank Blaschka 已提交
2952 2953
	/* outbound queue */
	for (i = 0; i < card->qdio.no_out_queues; ++i) {
2954 2955 2956 2957 2958 2959
		struct qeth_qdio_out_q *queue = card->qdio.out_qs[i];

		qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
		queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
		queue->next_buf_to_fill = 0;
		queue->do_pack = 0;
2960
		queue->prev_hdr = NULL;
2961
		queue->coalesced_frames = 0;
2962
		queue->bulk_start = 0;
J
Julian Wiedmann 已提交
2963 2964
		queue->bulk_count = 0;
		queue->bulk_max = qeth_tx_select_bulk_max(card, queue);
2965 2966
		atomic_set(&queue->used_buffers, 0);
		atomic_set(&queue->set_pci_flags_count, 0);
2967
		netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i));
F
Frank Blaschka 已提交
2968 2969 2970 2971
	}
	return 0;
}

2972
static void qeth_ipa_finalize_cmd(struct qeth_card *card,
2973
				  struct qeth_cmd_buffer *iob)
2974
{
2975
	qeth_mpc_finalize_cmd(card, iob);
2976 2977

	/* override with IPA-specific values: */
2978
	__ipa_cmd(iob)->hdr.seqno = card->seqno.ipa++;
2979 2980
}

J
Julian Wiedmann 已提交
2981 2982
static void qeth_prepare_ipa_cmd(struct qeth_card *card,
				 struct qeth_cmd_buffer *iob, u16 cmd_length)
2983 2984
{
	u8 prot_type = qeth_mpc_select_prot_type(card);
2985
	u16 total_length = iob->length;
2986

2987 2988
	qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length,
		       iob->data);
2989
	iob->finalize = qeth_ipa_finalize_cmd;
2990

2991
	memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
2992
	memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2);
2993
	memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
2994 2995
	memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2);
	memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2);
2996 2997
	memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
	       &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2998
	memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2);
2999 3000
}

3001 3002 3003 3004 3005 3006 3007 3008
static bool qeth_ipa_match_reply(struct qeth_cmd_buffer *iob,
				 struct qeth_cmd_buffer *reply)
{
	struct qeth_ipa_cmd *ipa_reply = __ipa_reply(reply);

	return ipa_reply && (__ipa_cmd(iob)->hdr.seqno == ipa_reply->hdr.seqno);
}

3009 3010 3011 3012 3013 3014
struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
					   enum qeth_ipa_cmds cmd_code,
					   enum qeth_prot_versions prot,
					   unsigned int data_length)
{
	struct qeth_cmd_buffer *iob;
3015
	struct qeth_ipacmd_hdr *hdr;
3016 3017 3018 3019 3020 3021 3022

	data_length += offsetof(struct qeth_ipa_cmd, data);
	iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_length, 1,
			     QETH_IPA_TIMEOUT);
	if (!iob)
		return NULL;

J
Julian Wiedmann 已提交
3023 3024
	qeth_prepare_ipa_cmd(card, iob, data_length);
	iob->match = qeth_ipa_match_reply;
3025 3026 3027 3028 3029

	hdr = &__ipa_cmd(iob)->hdr;
	hdr->command = cmd_code;
	hdr->initiator = IPA_CMD_INITIATOR_HOST;
	/* hdr->seqno is set by qeth_send_control_data() */
3030
	hdr->adapter_type = QETH_LINK_TYPE_FAST_ETH;
3031 3032 3033 3034
	hdr->rel_adapter_no = (u8) card->dev->dev_port;
	hdr->prim_version_no = IS_LAYER2(card) ? 2 : 1;
	hdr->param_count = 1;
	hdr->prot_version = prot;
3035 3036 3037 3038
	return iob;
}
EXPORT_SYMBOL_GPL(qeth_ipa_alloc_cmd);

3039 3040 3041 3042 3043 3044 3045 3046
static int qeth_send_ipa_cmd_cb(struct qeth_card *card,
				struct qeth_reply *reply, unsigned long data)
{
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;

	return (cmd->hdr.return_code) ? -EIO : 0;
}

E
Eugene Crosser 已提交
3047 3048 3049 3050 3051 3052
/**
 * qeth_send_ipa_cmd() - send an IPA command
 *
 * See qeth_send_control_data() for explanation of the arguments.
 */

F
Frank Blaschka 已提交
3053 3054 3055 3056 3057 3058 3059
int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
		int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
			unsigned long),
		void *reply_param)
{
	int rc;

C
Carsten Otte 已提交
3060
	QETH_CARD_TEXT(card, 4, "sendipa");
3061

3062
	if (card->read_or_write_problem) {
3063
		qeth_put_cmd(iob);
3064 3065 3066
		return -EIO;
	}

3067 3068
	if (reply_cb == NULL)
		reply_cb = qeth_send_ipa_cmd_cb;
3069
	rc = qeth_send_control_data(card, iob, reply_cb, reply_param);
3070 3071 3072 3073
	if (rc == -ETIME) {
		qeth_clear_ipacmd_list(card);
		qeth_schedule_recovery(card);
	}
F
Frank Blaschka 已提交
3074 3075 3076 3077
	return rc;
}
EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);

3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088
static int qeth_send_startlan_cb(struct qeth_card *card,
				 struct qeth_reply *reply, unsigned long data)
{
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;

	if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE)
		return -ENETDOWN;

	return (cmd->hdr.return_code) ? -EIO : 0;
}

3089
static int qeth_send_startlan(struct qeth_card *card)
F
Frank Blaschka 已提交
3090
{
3091
	struct qeth_cmd_buffer *iob;
F
Frank Blaschka 已提交
3092

3093
	QETH_CARD_TEXT(card, 2, "strtlan");
F
Frank Blaschka 已提交
3094

3095
	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_STARTLAN, QETH_PROT_NONE, 0);
3096 3097
	if (!iob)
		return -ENOMEM;
3098
	return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL);
F
Frank Blaschka 已提交
3099 3100
}

3101
static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
F
Frank Blaschka 已提交
3102
{
3103
	if (!cmd->hdr.return_code)
F
Frank Blaschka 已提交
3104 3105
		cmd->hdr.return_code =
			cmd->data.setadapterparms.hdr.return_code;
3106
	return cmd->hdr.return_code;
F
Frank Blaschka 已提交
3107 3108 3109 3110 3111
}

static int qeth_query_setadapterparms_cb(struct qeth_card *card,
		struct qeth_reply *reply, unsigned long data)
{
3112
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3113
	struct qeth_query_cmds_supp *query_cmd;
F
Frank Blaschka 已提交
3114

C
Carsten Otte 已提交
3115
	QETH_CARD_TEXT(card, 3, "quyadpcb");
3116
	if (qeth_setadpparms_inspect_rc(cmd))
3117
		return -EIO;
F
Frank Blaschka 已提交
3118

3119 3120 3121 3122 3123 3124
	query_cmd = &cmd->data.setadapterparms.data.query_cmds_supp;
	if (query_cmd->lan_type & 0x7f) {
		if (!qeth_is_supported_link_type(card, query_cmd->lan_type))
			return -EPROTONOSUPPORT;

		card->info.link_type = query_cmd->lan_type;
3125
		QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type);
3126
	}
3127 3128

	card->options.adp.supported = query_cmd->supported_cmds;
3129
	return 0;
F
Frank Blaschka 已提交
3130 3131
}

S
Stefan Raspl 已提交
3132
static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
3133 3134
						    enum qeth_ipa_setadp_cmd adp_cmd,
						    unsigned int data_length)
F
Frank Blaschka 已提交
3135
{
3136
	struct qeth_ipacmd_setadpparms_hdr *hdr;
F
Frank Blaschka 已提交
3137 3138
	struct qeth_cmd_buffer *iob;

3139 3140 3141 3142 3143 3144
	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4,
				 data_length +
				 offsetof(struct qeth_ipacmd_setadpparms,
					  data));
	if (!iob)
		return NULL;
F
Frank Blaschka 已提交
3145

3146 3147 3148 3149 3150
	hdr = &__ipa_cmd(iob)->data.setadapterparms.hdr;
	hdr->cmdlength = sizeof(*hdr) + data_length;
	hdr->command_code = adp_cmd;
	hdr->used_total = 1;
	hdr->seq_no = 1;
F
Frank Blaschka 已提交
3151 3152 3153
	return iob;
}

3154
static int qeth_query_setadapterparms(struct qeth_card *card)
F
Frank Blaschka 已提交
3155 3156 3157 3158
{
	int rc;
	struct qeth_cmd_buffer *iob;

C
Carsten Otte 已提交
3159
	QETH_CARD_TEXT(card, 3, "queryadp");
F
Frank Blaschka 已提交
3160
	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
3161
				   SETADP_DATA_SIZEOF(query_cmds_supp));
3162 3163
	if (!iob)
		return -ENOMEM;
F
Frank Blaschka 已提交
3164 3165 3166 3167
	rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
	return rc;
}

3168 3169 3170 3171 3172
static int qeth_query_ipassists_cb(struct qeth_card *card,
		struct qeth_reply *reply, unsigned long data)
{
	struct qeth_ipa_cmd *cmd;

3173
	QETH_CARD_TEXT(card, 2, "qipasscb");
3174 3175

	cmd = (struct qeth_ipa_cmd *) data;
3176 3177

	switch (cmd->hdr.return_code) {
3178 3179
	case IPA_RC_SUCCESS:
		break;
3180 3181
	case IPA_RC_NOTSUPP:
	case IPA_RC_L2_UNSUPPORTED_CMD:
3182
		QETH_CARD_TEXT(card, 2, "ipaunsup");
3183 3184
		card->options.ipa4.supported |= IPA_SETADAPTERPARMS;
		card->options.ipa6.supported |= IPA_SETADAPTERPARMS;
3185
		return -EOPNOTSUPP;
3186
	default:
3187 3188 3189
		QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
				 CARD_DEVID(card), cmd->hdr.return_code);
		return -EIO;
3190 3191
	}

3192 3193 3194 3195 3196
	if (cmd->hdr.prot_version == QETH_PROT_IPV4)
		card->options.ipa4 = cmd->hdr.assists;
	else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
		card->options.ipa6 = cmd->hdr.assists;
	else
3197 3198
		QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
				 CARD_DEVID(card));
3199 3200 3201
	return 0;
}

3202 3203
static int qeth_query_ipassists(struct qeth_card *card,
				enum qeth_prot_versions prot)
3204 3205 3206 3207
{
	int rc;
	struct qeth_cmd_buffer *iob;

3208
	QETH_CARD_TEXT_(card, 2, "qipassi%i", prot);
3209
	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_QIPASSIST, prot, 0);
3210 3211
	if (!iob)
		return -ENOMEM;
3212 3213 3214 3215
	rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
	return rc;
}

3216 3217 3218
static int qeth_query_switch_attributes_cb(struct qeth_card *card,
				struct qeth_reply *reply, unsigned long data)
{
3219
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3220
	struct qeth_query_switch_attributes *attrs;
3221
	struct qeth_switch_info *sw_info;
3222 3223

	QETH_CARD_TEXT(card, 2, "qswiatcb");
3224
	if (qeth_setadpparms_inspect_rc(cmd))
3225
		return -EIO;
3226

3227 3228 3229 3230 3231 3232
	sw_info = (struct qeth_switch_info *)reply->param;
	attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
	sw_info->capabilities = attrs->capabilities;
	sw_info->settings = attrs->settings;
	QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
			sw_info->settings);
3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245
	return 0;
}

int qeth_query_switch_attributes(struct qeth_card *card,
				 struct qeth_switch_info *sw_info)
{
	struct qeth_cmd_buffer *iob;

	QETH_CARD_TEXT(card, 2, "qswiattr");
	if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES))
		return -EOPNOTSUPP;
	if (!netif_carrier_ok(card->dev))
		return -ENOMEDIUM;
3246
	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, 0);
3247 3248
	if (!iob)
		return -ENOMEM;
3249 3250 3251 3252
	return qeth_send_ipa_cmd(card, iob,
				qeth_query_switch_attributes_cb, sw_info);
}

3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271
struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card,
					  enum qeth_diags_cmds sub_cmd,
					  unsigned int data_length)
{
	struct qeth_ipacmd_diagass *cmd;
	struct qeth_cmd_buffer *iob;

	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SET_DIAG_ASS, QETH_PROT_NONE,
				 DIAG_HDR_LEN + data_length);
	if (!iob)
		return NULL;

	cmd = &__ipa_cmd(iob)->data.diagass;
	cmd->subcmd_len = DIAG_SUB_HDR_LEN + data_length;
	cmd->subcmd = sub_cmd;
	return iob;
}
EXPORT_SYMBOL_GPL(qeth_get_diag_cmd);

3272 3273 3274
static int qeth_query_setdiagass_cb(struct qeth_card *card,
		struct qeth_reply *reply, unsigned long data)
{
3275 3276
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
	u16 rc = cmd->hdr.return_code;
3277

3278
	if (rc) {
3279
		QETH_CARD_TEXT_(card, 2, "diagq:%x", rc);
3280 3281 3282 3283
		return -EIO;
	}

	card->info.diagass_support = cmd->data.diagass.ext;
3284 3285 3286 3287 3288 3289 3290
	return 0;
}

static int qeth_query_setdiagass(struct qeth_card *card)
{
	struct qeth_cmd_buffer *iob;

3291
	QETH_CARD_TEXT(card, 2, "qdiagass");
3292
	iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_QUERY, 0);
3293 3294
	if (!iob)
		return -ENOMEM;
3295 3296 3297 3298 3299 3300 3301 3302 3303
	return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL);
}

static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
{
	unsigned long info = get_zeroed_page(GFP_KERNEL);
	struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
	struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info;
	struct ccw_dev_id ccwid;
3304
	int level;
3305 3306 3307 3308 3309 3310 3311

	tid->chpid = card->info.chpid;
	ccw_device_get_id(CARD_RDEV(card), &ccwid);
	tid->ssid = ccwid.ssid;
	tid->devno = ccwid.devno;
	if (!info)
		return;
3312 3313
	level = stsi(NULL, 0, 0, 0);
	if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0))
3314
		tid->lparnr = info222->lpar_number;
3315
	if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) {
3316 3317 3318 3319 3320 3321 3322 3323 3324
		EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name));
		memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
	}
	free_page(info);
}

static int qeth_hw_trap_cb(struct qeth_card *card,
		struct qeth_reply *reply, unsigned long data)
{
3325 3326
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
	u16 rc = cmd->hdr.return_code;
3327

3328
	if (rc) {
3329
		QETH_CARD_TEXT_(card, 2, "trapc:%x", rc);
3330 3331
		return -EIO;
	}
3332 3333 3334 3335 3336 3337 3338 3339
	return 0;
}

int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
{
	struct qeth_cmd_buffer *iob;
	struct qeth_ipa_cmd *cmd;

3340
	QETH_CARD_TEXT(card, 2, "diagtrap");
3341
	iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRAP, 64);
3342 3343
	if (!iob)
		return -ENOMEM;
3344
	cmd = __ipa_cmd(iob);
3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363
	cmd->data.diagass.type = 1;
	cmd->data.diagass.action = action;
	switch (action) {
	case QETH_DIAGS_TRAP_ARM:
		cmd->data.diagass.options = 0x0003;
		cmd->data.diagass.ext = 0x00010000 +
			sizeof(struct qeth_trap_id);
		qeth_get_trap_id(card,
			(struct qeth_trap_id *)cmd->data.diagass.cdata);
		break;
	case QETH_DIAGS_TRAP_DISARM:
		cmd->data.diagass.options = 0x0001;
		break;
	case QETH_DIAGS_TRAP_CAPTURE:
		break;
	}
	return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL);
}

3364 3365 3366 3367
static int qeth_check_qdio_errors(struct qeth_card *card,
				  struct qdio_buffer *buf,
				  unsigned int qdio_error,
				  const char *dbftext)
F
Frank Blaschka 已提交
3368
{
J
Jan Glauber 已提交
3369
	if (qdio_error) {
C
Carsten Otte 已提交
3370
		QETH_CARD_TEXT(card, 2, dbftext);
C
Carsten Otte 已提交
3371
		QETH_CARD_TEXT_(card, 2, " F15=%02X",
3372
			       buf->element[15].sflags);
C
Carsten Otte 已提交
3373
		QETH_CARD_TEXT_(card, 2, " F14=%02X",
3374
			       buf->element[14].sflags);
C
Carsten Otte 已提交
3375
		QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
3376
		if ((buf->element[15].sflags) == 0x12) {
3377
			QETH_CARD_STAT_INC(card, rx_fifo_errors);
3378 3379 3380
			return 0;
		} else
			return 1;
F
Frank Blaschka 已提交
3381 3382 3383 3384
	}
	return 0;
}

3385 3386
static unsigned int qeth_rx_refill_queue(struct qeth_card *card,
					 unsigned int count)
F
Frank Blaschka 已提交
3387 3388
{
	struct qeth_qdio_q *queue = card->qdio.in_q;
3389
	struct list_head *lh;
F
Frank Blaschka 已提交
3390 3391 3392 3393 3394 3395 3396 3397 3398
	int i;
	int rc;
	int newcount = 0;

	/* only requeue at a certain threshold to avoid SIGAs */
	if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) {
		for (i = queue->next_buf_to_init;
		     i < queue->next_buf_to_init + count; ++i) {
			if (qeth_init_input_buffer(card,
J
Julian Wiedmann 已提交
3399
				&queue->bufs[QDIO_BUFNR(i)])) {
F
Frank Blaschka 已提交
3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414
				break;
			} else {
				newcount++;
			}
		}

		if (newcount < count) {
			/* we are in memory shortage so we switch back to
			   traditional skb allocation and drop packages */
			atomic_set(&card->force_alloc_skb, 3);
			count = newcount;
		} else {
			atomic_add_unless(&card->force_alloc_skb, -1, 0);
		}

3415 3416 3417 3418 3419 3420 3421 3422 3423 3424
		if (!count) {
			i = 0;
			list_for_each(lh, &card->qdio.in_buf_pool.entry_list)
				i++;
			if (i == card->qdio.in_buf_pool.buf_count) {
				QETH_CARD_TEXT(card, 2, "qsarbw");
				schedule_delayed_work(
					&card->buffer_reclaim_work,
					QETH_RECLAIM_WORK_TIME);
			}
3425
			return 0;
3426 3427
		}

J
Jan Glauber 已提交
3428
		rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
3429
			     queue->next_buf_to_init, count, NULL);
F
Frank Blaschka 已提交
3430
		if (rc) {
C
Carsten Otte 已提交
3431
			QETH_CARD_TEXT(card, 2, "qinberr");
F
Frank Blaschka 已提交
3432
		}
J
Julian Wiedmann 已提交
3433 3434
		queue->next_buf_to_init = QDIO_BUFNR(queue->next_buf_to_init +
						     count);
3435
		return count;
F
Frank Blaschka 已提交
3436
	}
3437 3438

	return 0;
F
Frank Blaschka 已提交
3439
}
3440 3441 3442

static void qeth_buffer_reclaim_work(struct work_struct *work)
{
3443 3444 3445
	struct qeth_card *card = container_of(to_delayed_work(work),
					      struct qeth_card,
					      buffer_reclaim_work);
3446

3447 3448 3449 3450
	local_bh_disable();
	napi_schedule(&card->napi);
	/* kick-start the NAPI softirq: */
	local_bh_enable();
3451
}
F
Frank Blaschka 已提交
3452

3453
static void qeth_handle_send_error(struct qeth_card *card,
J
Jan Glauber 已提交
3454
		struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
F
Frank Blaschka 已提交
3455
{
3456
	int sbalf15 = buffer->buffer->element[15].sflags;
F
Frank Blaschka 已提交
3457

C
Carsten Otte 已提交
3458
	QETH_CARD_TEXT(card, 6, "hdsnderr");
3459
	qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
3460 3461

	if (!qdio_err)
3462
		return;
3463 3464

	if ((sbalf15 >= 15) && (sbalf15 <= 31))
3465
		return;
3466

C
Carsten Otte 已提交
3467 3468
	QETH_CARD_TEXT(card, 1, "lnkfail");
	QETH_CARD_TEXT_(card, 1, "%04x %02x",
3469
		       (u16)qdio_err, (u8)sbalf15);
F
Frank Blaschka 已提交
3470 3471
}

3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487
/**
 * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer.
 * @queue: queue to check for packing buffer
 *
 * Returns number of buffers that were prepared for flush.
 */
static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue)
{
	struct qeth_qdio_out_buffer *buffer;

	buffer = queue->bufs[queue->next_buf_to_fill];
	if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
	    (buffer->next_element_to_fill > 0)) {
		/* it's a packing buffer */
		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
		queue->next_buf_to_fill =
J
Julian Wiedmann 已提交
3488
			QDIO_BUFNR(queue->next_buf_to_fill + 1);
3489 3490 3491 3492 3493
		return 1;
	}
	return 0;
}

F
Frank Blaschka 已提交
3494 3495 3496 3497 3498 3499 3500 3501 3502 3503
/*
 * Switched to packing state if the number of used buffers on a queue
 * reaches a certain limit.
 */
static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
{
	if (!queue->do_pack) {
		if (atomic_read(&queue->used_buffers)
		    >= QETH_HIGH_WATERMARK_PACK){
			/* switch non-PACKING -> PACKING */
C
Carsten Otte 已提交
3504
			QETH_CARD_TEXT(queue->card, 6, "np->pack");
3505
			QETH_TXQ_STAT_INC(queue, packing_mode_switch);
F
Frank Blaschka 已提交
3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522
			queue->do_pack = 1;
		}
	}
}

/*
 * Switches from packing to non-packing mode. If there is a packing
 * buffer on the queue this buffer will be prepared to be flushed.
 * In that case 1 is returned to inform the caller. If no buffer
 * has to be flushed, zero is returned.
 */
static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
{
	if (queue->do_pack) {
		if (atomic_read(&queue->used_buffers)
		    <= QETH_LOW_WATERMARK_PACK) {
			/* switch PACKING -> non-PACKING */
C
Carsten Otte 已提交
3523
			QETH_CARD_TEXT(queue->card, 6, "pack->np");
3524
			QETH_TXQ_STAT_INC(queue, packing_mode_switch);
F
Frank Blaschka 已提交
3525
			queue->do_pack = 0;
3526
			return qeth_prep_flush_pack_buffer(queue);
F
Frank Blaschka 已提交
3527 3528 3529 3530 3531
		}
	}
	return 0;
}

J
Jan Glauber 已提交
3532 3533
static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
			       int count)
F
Frank Blaschka 已提交
3534
{
3535
	struct qeth_qdio_out_buffer *buf = queue->bufs[index];
3536
	struct qeth_card *card = queue->card;
3537
	unsigned int frames, usecs;
3538
	struct qaob *aob = NULL;
F
Frank Blaschka 已提交
3539 3540 3541 3542
	int rc;
	int i;

	for (i = index; i < index + count; ++i) {
J
Julian Wiedmann 已提交
3543
		unsigned int bidx = QDIO_BUFNR(i);
3544
		struct sk_buff *skb;
J
Julian Wiedmann 已提交
3545

3546
		buf = queue->bufs[bidx];
3547 3548
		buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
				SBAL_EFLAGS_LAST_ENTRY;
3549
		queue->coalesced_frames += buf->frames;
F
Frank Blaschka 已提交
3550

3551 3552 3553 3554
		if (IS_IQD(card)) {
			skb_queue_walk(&buf->skb_list, skb)
				skb_tx_timestamp(skb);
		}
3555
	}
F
Frank Blaschka 已提交
3556

3557 3558 3559 3560 3561 3562 3563
	if (IS_IQD(card)) {
		if (card->options.cq == QETH_CQ_ENABLED &&
		    !qeth_iqd_is_mcast_queue(card, queue) &&
		    count == 1) {
			if (!buf->aob)
				buf->aob = qdio_allocate_aob();
			if (buf->aob) {
3564 3565
				struct qeth_qaob_priv1 *priv;

3566
				aob = buf->aob;
3567 3568 3569
				priv = (struct qeth_qaob_priv1 *)&aob->user1;
				priv->state = QETH_QAOB_ISSUED;
				priv->queue_no = queue->queue_no;
3570 3571 3572
			}
		}
	} else {
F
Frank Blaschka 已提交
3573 3574 3575 3576 3577 3578 3579 3580
		if (!queue->do_pack) {
			if ((atomic_read(&queue->used_buffers) >=
				(QETH_HIGH_WATERMARK_PACK -
				 QETH_WATERMARK_PACK_FUZZ)) &&
			    !atomic_read(&queue->set_pci_flags_count)) {
				/* it's likely that we'll go to packing
				 * mode soon */
				atomic_inc(&queue->set_pci_flags_count);
3581
				buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
F
Frank Blaschka 已提交
3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593
			}
		} else {
			if (!atomic_read(&queue->set_pci_flags_count)) {
				/*
				 * there's no outstanding PCI any more, so we
				 * have to request a PCI to be sure the the PCI
				 * will wake at some time in the future then we
				 * can flush packed buffers that might still be
				 * hanging around, which can happen if no
				 * further send was requested by the stack
				 */
				atomic_inc(&queue->set_pci_flags_count);
3594
				buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
F
Frank Blaschka 已提交
3595 3596 3597 3598
			}
		}
	}

3599
	QETH_TXQ_STAT_INC(queue, doorbell);
3600 3601
	rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_OUTPUT, queue->queue_no,
		     index, count, aob);
3602

3603 3604 3605 3606 3607 3608
	switch (rc) {
	case 0:
	case -ENOBUFS:
		/* ignore temporary SIGA errors without busy condition */

		/* Fake the TX completion interrupt: */
3609 3610
		frames = READ_ONCE(queue->max_coalesced_frames);
		usecs = READ_ONCE(queue->coalesce_usecs);
3611

3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622
		if (frames && queue->coalesced_frames >= frames) {
			napi_schedule(&queue->napi);
			queue->coalesced_frames = 0;
			QETH_TXQ_STAT_INC(queue, coal_frames);
		} else if (qeth_use_tx_irqs(card) &&
			   atomic_read(&queue->used_buffers) >= 32) {
			/* Old behaviour carried over from the qdio layer: */
			napi_schedule(&queue->napi);
			QETH_TXQ_STAT_INC(queue, coal_frames);
		} else if (usecs) {
			qeth_tx_arm_timer(queue, usecs);
3623
		}
3624

3625 3626
		break;
	default:
C
Carsten Otte 已提交
3627
		QETH_CARD_TEXT(queue->card, 2, "flushbuf");
3628 3629 3630
		QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no);
		QETH_CARD_TEXT_(queue->card, 2, " idx%d", index);
		QETH_CARD_TEXT_(queue->card, 2, " c%d", count);
C
Carsten Otte 已提交
3631
		QETH_CARD_TEXT_(queue->card, 2, " err%d", rc);
3632

F
Frank Blaschka 已提交
3633 3634 3635 3636 3637 3638
		/* this must not happen under normal circumstances. if it
		 * happens something is really wrong -> recover */
		qeth_schedule_recovery(queue->card);
	}
}

3639 3640
static void qeth_flush_queue(struct qeth_qdio_out_q *queue)
{
J
Julian Wiedmann 已提交
3641
	qeth_flush_buffers(queue, queue->bulk_start, queue->bulk_count);
3642

J
Julian Wiedmann 已提交
3643
	queue->bulk_start = QDIO_BUFNR(queue->bulk_start + queue->bulk_count);
3644
	queue->prev_hdr = NULL;
J
Julian Wiedmann 已提交
3645
	queue->bulk_count = 0;
3646 3647
}

F
Frank Blaschka 已提交
3648 3649 3650 3651 3652 3653 3654 3655
static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
{
	/*
	 * check if weed have to switch to non-packing mode or if
	 * we have to get a pci flag out on the queue
	 */
	if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
	    !atomic_read(&queue->set_pci_flags_count)) {
3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669
		unsigned int index, flush_cnt;
		bool q_was_packing;

		spin_lock(&queue->lock);

		index = queue->next_buf_to_fill;
		q_was_packing = queue->do_pack;

		flush_cnt = qeth_switch_to_nonpacking_if_needed(queue);
		if (!flush_cnt && !atomic_read(&queue->set_pci_flags_count))
			flush_cnt = qeth_prep_flush_pack_buffer(queue);

		if (flush_cnt) {
			qeth_flush_buffers(queue, index, flush_cnt);
3670 3671
			if (q_was_packing)
				QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt);
F
Frank Blaschka 已提交
3672
		}
3673 3674

		spin_unlock(&queue->lock);
F
Frank Blaschka 已提交
3675 3676 3677
	}
}

3678
static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr)
3679 3680 3681
{
	struct qeth_card *card = (struct qeth_card *)card_ptr;

3682
	napi_schedule_irqoff(&card->napi);
3683 3684
}

3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697
int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
{
	int rc;

	if (card->options.cq ==  QETH_CQ_NOTAVAILABLE) {
		rc = -1;
		goto out;
	} else {
		if (card->options.cq == cq) {
			rc = 0;
			goto out;
		}

3698
		qeth_free_qdio_queues(card);
3699 3700 3701 3702 3703 3704 3705 3706 3707
		card->options.cq = cq;
		rc = 0;
	}
out:
	return rc;

}
EXPORT_SYMBOL_GPL(qeth_configure_cq);

3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719
static void qeth_qdio_handle_aob(struct qeth_card *card, struct qaob *aob)
{
	struct qeth_qaob_priv1 *priv = (struct qeth_qaob_priv1 *)&aob->user1;
	unsigned int queue_no = priv->queue_no;

	BUILD_BUG_ON(sizeof(*priv) > ARRAY_SIZE(aob->user1));

	if (xchg(&priv->state, QETH_QAOB_DONE) == QETH_QAOB_PENDING &&
	    queue_no < card->qdio.no_out_queues)
		napi_schedule(&card->qdio.out_qs[queue_no]->napi);
}

3720 3721 3722 3723
static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
				 unsigned int queue, int first_element,
				 int count)
{
3724 3725 3726 3727 3728 3729 3730 3731 3732
	struct qeth_qdio_q *cq = card->qdio.c_q;
	int i;
	int rc;

	QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element);
	QETH_CARD_TEXT_(card, 5, "qcqhc%d", count);
	QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);

	if (qdio_err) {
3733
		netif_tx_stop_all_queues(card->dev);
3734
		qeth_schedule_recovery(card);
3735
		return;
3736 3737 3738
	}

	for (i = first_element; i < first_element + count; ++i) {
J
Julian Wiedmann 已提交
3739
		struct qdio_buffer *buffer = cq->qdio_bufs[QDIO_BUFNR(i)];
3740
		int e = 0;
3741

3742 3743
		while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
		       buffer->element[e].addr) {
3744
			unsigned long phys_aob_addr = buffer->element[e].addr;
3745

3746
			qeth_qdio_handle_aob(card, phys_to_virt(phys_aob_addr));
3747 3748
			++e;
		}
3749
		qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
3750 3751
	}
	rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue,
3752
		     cq->next_buf_to_init, count, NULL);
3753 3754 3755 3756 3757
	if (rc) {
		dev_warn(&card->gdev->dev,
			"QDIO reported an error, rc=%i\n", rc);
		QETH_CARD_TEXT(card, 2, "qcqherr");
	}
J
Julian Wiedmann 已提交
3758 3759

	cq->next_buf_to_init = QDIO_BUFNR(cq->next_buf_to_init + count);
3760 3761
}

3762 3763 3764 3765
static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
				    unsigned int qdio_err, int queue,
				    int first_elem, int count,
				    unsigned long card_ptr)
3766 3767 3768
{
	struct qeth_card *card = (struct qeth_card *)card_ptr;

3769 3770 3771
	QETH_CARD_TEXT_(card, 2, "qihq%d", queue);
	QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err);

3772
	if (qdio_err)
3773 3774 3775
		qeth_schedule_recovery(card);
}

3776 3777 3778 3779
static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
				     unsigned int qdio_error, int __queue,
				     int first_element, int count,
				     unsigned long card_ptr)
F
Frank Blaschka 已提交
3780 3781 3782
{
	struct qeth_card *card        = (struct qeth_card *) card_ptr;

3783 3784 3785
	QETH_CARD_TEXT(card, 2, "achkcond");
	netif_tx_stop_all_queues(card->dev);
	qeth_schedule_recovery(card);
F
Frank Blaschka 已提交
3786 3787
}

3788 3789 3790
/**
 * Note: Function assumes that we have 4 outbound queues.
 */
3791
int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
F
Frank Blaschka 已提交
3792
{
J
Julian Wiedmann 已提交
3793
	struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
3794 3795 3796 3797 3798
	u8 tos;

	switch (card->qdio.do_prio_queueing) {
	case QETH_PRIO_Q_ING_TOS:
	case QETH_PRIO_Q_ING_PREC:
3799 3800
		switch (vlan_get_protocol(skb)) {
		case htons(ETH_P_IP):
3801 3802
			tos = ipv4_get_dsfield(ip_hdr(skb));
			break;
3803
		case htons(ETH_P_IPV6):
3804 3805 3806 3807
			tos = ipv6_get_dsfield(ipv6_hdr(skb));
			break;
		default:
			return card->qdio.default_out_queue;
F
Frank Blaschka 已提交
3808
		}
3809
		if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
J
Julian Wiedmann 已提交
3810
			return ~tos >> 6 & 3;
3811
		if (tos & IPTOS_MINCOST)
J
Julian Wiedmann 已提交
3812
			return 3;
3813 3814 3815 3816 3817 3818
		if (tos & IPTOS_RELIABILITY)
			return 2;
		if (tos & IPTOS_THROUGHPUT)
			return 1;
		if (tos & IPTOS_LOWDELAY)
			return 0;
3819 3820 3821 3822
		break;
	case QETH_PRIO_Q_ING_SKB:
		if (skb->priority > 5)
			return 0;
J
Julian Wiedmann 已提交
3823
		return ~skb->priority >> 1 & 3;
3824
	case QETH_PRIO_Q_ING_VLAN:
J
Julian Wiedmann 已提交
3825 3826 3827
		if (veth->h_vlan_proto == htons(ETH_P_8021Q))
			return ~ntohs(veth->h_vlan_TCI) >>
			       (VLAN_PRIO_SHIFT + 1) & 3;
3828
		break;
3829 3830
	case QETH_PRIO_Q_ING_FIXED:
		return card->qdio.default_out_queue;
F
Frank Blaschka 已提交
3831
	default:
3832
		break;
F
Frank Blaschka 已提交
3833
	}
3834
	return card->qdio.default_out_queue;
F
Frank Blaschka 已提交
3835 3836 3837
}
EXPORT_SYMBOL_GPL(qeth_get_priority_queue);

3838 3839 3840 3841 3842 3843 3844
/**
 * qeth_get_elements_for_frags() -	find number of SBALEs for skb frags.
 * @skb:				SKB address
 *
 * Returns the number of pages, and thus QDIO buffer elements, needed to cover
 * fragmented part of the SKB. Returns zero for linear SKB.
 */
3845
static int qeth_get_elements_for_frags(struct sk_buff *skb)
3846
{
3847
	int cnt, elements = 0;
3848 3849

	for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
3850
		skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
3851 3852 3853 3854

		elements += qeth_get_elements_for_range(
			(addr_t)skb_frag_address(frag),
			(addr_t)skb_frag_address(frag) + skb_frag_size(frag));
3855 3856 3857 3858
	}
	return elements;
}

3859 3860 3861 3862 3863 3864 3865 3866 3867
/**
 * qeth_count_elements() -	Counts the number of QDIO buffer elements needed
 *				to transmit an skb.
 * @skb:			the skb to operate on.
 * @data_offset:		skip this part of the skb's linear data
 *
 * Returns the number of pages, and thus QDIO buffer elements, needed to map the
 * skb's data (both its linear part and paged fragments).
 */
J
Julian Wiedmann 已提交
3868 3869
static unsigned int qeth_count_elements(struct sk_buff *skb,
					unsigned int data_offset)
3870 3871 3872 3873 3874 3875 3876 3877 3878
{
	unsigned int elements = qeth_get_elements_for_frags(skb);
	addr_t end = (addr_t)skb->data + skb_headlen(skb);
	addr_t start = (addr_t)skb->data + data_offset;

	if (start != end)
		elements += qeth_get_elements_for_range(start, end);
	return elements;
}
F
Frank Blaschka 已提交
3879

3880 3881
#define QETH_HDR_CACHE_OBJ_SIZE		(sizeof(struct qeth_hdr_tso) + \
					 MAX_TCP_HEADER)
3882

3883
/**
3884 3885
 * qeth_add_hw_header() - add a HW header to an skb.
 * @skb: skb that the HW header should be added to.
3886 3887
 * @hdr: double pointer to a qeth_hdr. When returning with >= 0,
 *	 it contains a valid pointer to a qeth_hdr.
3888 3889 3890
 * @hdr_len: length of the HW header.
 * @proto_len: length of protocol headers that need to be in same page as the
 *	       HW header.
3891 3892 3893 3894
 *
 * Returns the pushed length. If the header can't be pushed on
 * (eg. because it would cross a page boundary), it is allocated from
 * the cache instead and 0 is returned.
3895
 * The number of needed buffer elements is returned in @elements.
3896 3897
 * Error to create the hdr is indicated by returning with < 0.
 */
3898 3899 3900 3901
static int qeth_add_hw_header(struct qeth_qdio_out_q *queue,
			      struct sk_buff *skb, struct qeth_hdr **hdr,
			      unsigned int hdr_len, unsigned int proto_len,
			      unsigned int *elements)
3902
{
3903
	gfp_t gfp = GFP_ATOMIC | (skb_pfmemalloc(skb) ? __GFP_MEMALLOC : 0);
3904
	const unsigned int contiguous = proto_len ? proto_len : 1;
3905
	const unsigned int max_elements = queue->max_elements;
3906 3907 3908 3909 3910 3911
	unsigned int __elements;
	addr_t start, end;
	bool push_ok;
	int rc;

check_layout:
3912
	start = (addr_t)skb->data - hdr_len;
3913 3914
	end = (addr_t)skb->data;

3915
	if (qeth_get_elements_for_range(start, end + contiguous) == 1) {
3916 3917
		/* Push HW header into same page as first protocol header. */
		push_ok = true;
3918 3919 3920 3921 3922
		/* ... but TSO always needs a separate element for headers: */
		if (skb_is_gso(skb))
			__elements = 1 + qeth_count_elements(skb, proto_len);
		else
			__elements = qeth_count_elements(skb, 0);
J
Julian Wiedmann 已提交
3923 3924
	} else if (!proto_len && PAGE_ALIGNED(skb->data)) {
		/* Push HW header into preceding page, flush with skb->data. */
3925
		push_ok = true;
3926
		__elements = 1 + qeth_count_elements(skb, 0);
3927 3928 3929 3930
	} else {
		/* Use header cache, copy protocol headers up. */
		push_ok = false;
		__elements = 1 + qeth_count_elements(skb, proto_len);
3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942
	}

	/* Compress skb to fit into one IO buffer: */
	if (__elements > max_elements) {
		if (!skb_is_nonlinear(skb)) {
			/* Drop it, no easy way of shrinking it further. */
			QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n",
					 max_elements, __elements, skb->len);
			return -E2BIG;
		}

		rc = skb_linearize(skb);
3943 3944
		if (rc) {
			QETH_TXQ_STAT_INC(queue, skbs_linearized_fail);
3945
			return rc;
3946
		}
3947

3948
		QETH_TXQ_STAT_INC(queue, skbs_linearized);
3949 3950 3951 3952 3953 3954 3955
		/* Linearization changed the layout, re-evaluate: */
		goto check_layout;
	}

	*elements = __elements;
	/* Add the header: */
	if (push_ok) {
3956 3957
		*hdr = skb_push(skb, hdr_len);
		return hdr_len;
3958
	}
3959 3960

	/* Fall back to cache element with known-good alignment: */
3961 3962
	if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE)
		return -E2BIG;
3963
	*hdr = kmem_cache_alloc(qeth_core_header_cache, gfp);
3964 3965
	if (!*hdr)
		return -ENOMEM;
3966 3967
	/* Copy protocol headers behind HW header: */
	skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len);
3968 3969 3970
	return 0;
}

3971 3972 3973 3974
static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue,
			      struct sk_buff *curr_skb,
			      struct qeth_hdr *curr_hdr)
{
J
Julian Wiedmann 已提交
3975
	struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993
	struct qeth_hdr *prev_hdr = queue->prev_hdr;

	if (!prev_hdr)
		return true;

	/* All packets must have the same target: */
	if (curr_hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
		struct sk_buff *prev_skb = skb_peek(&buffer->skb_list);

		return ether_addr_equal(eth_hdr(prev_skb)->h_dest,
					eth_hdr(curr_skb)->h_dest) &&
		       qeth_l2_same_vlan(&prev_hdr->hdr.l2, &curr_hdr->hdr.l2);
	}

	return qeth_l3_same_next_hop(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3) &&
	       qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3);
}

3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005
/**
 * qeth_fill_buffer() - map skb into an output buffer
 * @buf:	buffer to transport the skb
 * @skb:	skb to map into the buffer
 * @hdr:	qeth_hdr for this skb. Either at skb->data, or allocated
 *		from qeth_core_header_cache.
 * @offset:	when mapping the skb, start at skb->data + offset
 * @hd_len:	if > 0, build a dedicated header element of this size
 */
static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
				     struct sk_buff *skb, struct qeth_hdr *hdr,
				     unsigned int offset, unsigned int hd_len)
F
Frank Blaschka 已提交
4006
{
4007 4008
	struct qdio_buffer *buffer = buf->buffer;
	int element = buf->next_element_to_fill;
4009 4010
	int length = skb_headlen(skb) - offset;
	char *data = skb->data + offset;
J
Julian Wiedmann 已提交
4011
	unsigned int elem_length, cnt;
4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022
	bool is_first_elem = true;

	__skb_queue_tail(&buf->skb_list, skb);

	/* build dedicated element for HW Header */
	if (hd_len) {
		is_first_elem = false;

		buffer->element[element].addr = virt_to_phys(hdr);
		buffer->element[element].length = hd_len;
		buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
4023 4024 4025

		/* HW header is allocated from cache: */
		if ((void *)hdr != skb->data)
4026
			__set_bit(element, buf->from_kmem_cache);
4027 4028 4029 4030 4031 4032
		/* HW header was pushed and is contiguous with linear part: */
		else if (length > 0 && !PAGE_ALIGNED(data) &&
			 (data == (char *)hdr + hd_len))
			buffer->element[element].eflags |=
				SBAL_EFLAGS_CONTIGUOUS;

4033 4034
		element++;
	}
F
Frank Blaschka 已提交
4035

4036
	/* map linear part into buffer element(s) */
F
Frank Blaschka 已提交
4037
	while (length > 0) {
J
Julian Wiedmann 已提交
4038 4039
		elem_length = min_t(unsigned int, length,
				    PAGE_SIZE - offset_in_page(data));
F
Frank Blaschka 已提交
4040

4041
		buffer->element[element].addr = virt_to_phys(data);
J
Julian Wiedmann 已提交
4042 4043
		buffer->element[element].length = elem_length;
		length -= elem_length;
4044 4045
		if (is_first_elem) {
			is_first_elem = false;
4046 4047
			if (length || skb_is_nonlinear(skb))
				/* skb needs additional elements */
4048
				buffer->element[element].eflags =
4049
					SBAL_EFLAGS_FIRST_FRAG;
F
Frank Blaschka 已提交
4050
			else
4051 4052 4053 4054
				buffer->element[element].eflags = 0;
		} else {
			buffer->element[element].eflags =
				SBAL_EFLAGS_MIDDLE_FRAG;
F
Frank Blaschka 已提交
4055
		}
J
Julian Wiedmann 已提交
4056 4057

		data += elem_length;
F
Frank Blaschka 已提交
4058 4059
		element++;
	}
4060

4061
	/* map page frags into buffer element(s) */
4062
	for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
4063 4064 4065 4066
		skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];

		data = skb_frag_address(frag);
		length = skb_frag_size(frag);
4067
		while (length > 0) {
J
Julian Wiedmann 已提交
4068 4069
			elem_length = min_t(unsigned int, length,
					    PAGE_SIZE - offset_in_page(data));
4070

4071
			buffer->element[element].addr = virt_to_phys(data);
J
Julian Wiedmann 已提交
4072
			buffer->element[element].length = elem_length;
4073 4074
			buffer->element[element].eflags =
				SBAL_EFLAGS_MIDDLE_FRAG;
J
Julian Wiedmann 已提交
4075 4076 4077

			length -= elem_length;
			data += elem_length;
4078 4079
			element++;
		}
4080 4081
	}

4082 4083
	if (buffer->element[element - 1].eflags)
		buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
4084
	buf->next_element_to_fill = element;
4085
	return element;
F
Frank Blaschka 已提交
4086 4087
}

4088 4089 4090 4091
static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
		       struct sk_buff *skb, unsigned int elements,
		       struct qeth_hdr *hdr, unsigned int offset,
		       unsigned int hd_len)
F
Frank Blaschka 已提交
4092
{
4093
	unsigned int bytes = qdisc_pkt_len(skb);
J
Julian Wiedmann 已提交
4094
	struct qeth_qdio_out_buffer *buffer;
4095
	unsigned int next_element;
4096 4097
	struct netdev_queue *txq;
	bool stopped = false;
4098 4099
	bool flush;

J
Julian Wiedmann 已提交
4100
	buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + queue->bulk_count)];
4101
	txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
F
Frank Blaschka 已提交
4102

4103 4104
	/* Just a sanity check, the wake/stop logic should ensure that we always
	 * get a free buffer.
F
Frank Blaschka 已提交
4105 4106
	 */
	if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
4107
		return -EBUSY;
4108

J
Julian Wiedmann 已提交
4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125
	flush = !qeth_iqd_may_bulk(queue, skb, hdr);

	if (flush ||
	    (buffer->next_element_to_fill + elements > queue->max_elements)) {
		if (buffer->next_element_to_fill > 0) {
			atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
			queue->bulk_count++;
		}

		if (queue->bulk_count >= queue->bulk_max)
			flush = true;

		if (flush)
			qeth_flush_queue(queue);

		buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start +
						queue->bulk_count)];
4126

4127 4128 4129 4130 4131 4132 4133
		/* Sanity-check again: */
		if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
			return -EBUSY;
	}

	if (buffer->next_element_to_fill == 0 &&
	    atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
4134 4135 4136 4137 4138 4139 4140 4141
		/* If a TX completion happens right _here_ and misses to wake
		 * the txq, then our re-check below will catch the race.
		 */
		QETH_TXQ_STAT_INC(queue, stopped);
		netif_tx_stop_queue(txq);
		stopped = true;
	}

4142
	next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
4143
	buffer->bytes += bytes;
4144
	buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
4145
	queue->prev_hdr = hdr;
4146

4147 4148 4149 4150 4151
	flush = __netdev_tx_sent_queue(txq, bytes,
				       !stopped && netdev_xmit_more());

	if (flush || next_element >= queue->max_elements) {
		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
J
Julian Wiedmann 已提交
4152 4153 4154 4155 4156 4157 4158
		queue->bulk_count++;

		if (queue->bulk_count >= queue->bulk_max)
			flush = true;

		if (flush)
			qeth_flush_queue(queue);
4159
	}
4160 4161 4162

	if (stopped && !qeth_out_queue_is_full(queue))
		netif_tx_start_queue(txq);
F
Frank Blaschka 已提交
4163 4164 4165
	return 0;
}

J
Julian Wiedmann 已提交
4166 4167 4168 4169 4170
static int qeth_do_send_packet(struct qeth_card *card,
			       struct qeth_qdio_out_q *queue,
			       struct sk_buff *skb, struct qeth_hdr *hdr,
			       unsigned int offset, unsigned int hd_len,
			       unsigned int elements_needed)
F
Frank Blaschka 已提交
4171
{
4172
	unsigned int start_index = queue->next_buf_to_fill;
F
Frank Blaschka 已提交
4173
	struct qeth_qdio_out_buffer *buffer;
4174
	unsigned int next_element;
4175 4176
	struct netdev_queue *txq;
	bool stopped = false;
F
Frank Blaschka 已提交
4177 4178 4179 4180
	int flush_count = 0;
	int do_pack = 0;
	int rc = 0;

4181
	buffer = queue->bufs[queue->next_buf_to_fill];
4182 4183 4184

	/* Just a sanity check, the wake/stop logic should ensure that we always
	 * get a free buffer.
F
Frank Blaschka 已提交
4185
	 */
4186
	if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
F
Frank Blaschka 已提交
4187
		return -EBUSY;
4188 4189 4190

	txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));

F
Frank Blaschka 已提交
4191 4192 4193 4194
	/* check if we need to switch packing state of this queue */
	qeth_switch_to_packing_if_needed(queue);
	if (queue->do_pack) {
		do_pack = 1;
F
Frank Blaschka 已提交
4195
		/* does packet fit in current buffer? */
4196 4197
		if (buffer->next_element_to_fill + elements_needed >
		    queue->max_elements) {
F
Frank Blaschka 已提交
4198 4199 4200 4201
			/* ... no -> set state PRIMED */
			atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
			flush_count++;
			queue->next_buf_to_fill =
J
Julian Wiedmann 已提交
4202
				QDIO_BUFNR(queue->next_buf_to_fill + 1);
4203
			buffer = queue->bufs[queue->next_buf_to_fill];
4204 4205

			/* We stepped forward, so sanity-check again: */
F
Frank Blaschka 已提交
4206 4207 4208
			if (atomic_read(&buffer->state) !=
			    QETH_QDIO_BUF_EMPTY) {
				qeth_flush_buffers(queue, start_index,
J
Jan Glauber 已提交
4209
							   flush_count);
4210 4211
				rc = -EBUSY;
				goto out;
F
Frank Blaschka 已提交
4212 4213 4214
			}
		}
	}
4215

4216 4217 4218 4219 4220 4221 4222 4223 4224 4225
	if (buffer->next_element_to_fill == 0 &&
	    atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
		/* If a TX completion happens right _here_ and misses to wake
		 * the txq, then our re-check below will catch the race.
		 */
		QETH_TXQ_STAT_INC(queue, stopped);
		netif_tx_stop_queue(txq);
		stopped = true;
	}

4226
	next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
4227 4228
	buffer->bytes += qdisc_pkt_len(skb);
	buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
4229 4230 4231 4232 4233 4234

	if (queue->do_pack)
		QETH_TXQ_STAT_INC(queue, skbs_pack);
	if (!queue->do_pack || stopped || next_element >= queue->max_elements) {
		flush_count++;
		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
J
Julian Wiedmann 已提交
4235 4236
		queue->next_buf_to_fill =
				QDIO_BUFNR(queue->next_buf_to_fill + 1);
4237 4238
	}

F
Frank Blaschka 已提交
4239
	if (flush_count)
J
Jan Glauber 已提交
4240
		qeth_flush_buffers(queue, start_index, flush_count);
4241

4242
out:
4243 4244
	if (do_pack)
		QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count);
F
Frank Blaschka 已提交
4245

4246 4247
	if (stopped && !qeth_out_queue_is_full(queue))
		netif_tx_start_queue(txq);
F
Frank Blaschka 已提交
4248 4249 4250
	return rc;
}

J
Julian Wiedmann 已提交
4251 4252 4253
static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
			      unsigned int payload_len, struct sk_buff *skb,
			      unsigned int proto_len)
4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266
{
	struct qeth_hdr_ext_tso *ext = &hdr->ext;

	ext->hdr_tot_len = sizeof(*ext);
	ext->imb_hdr_no = 1;
	ext->hdr_type = 1;
	ext->hdr_version = 1;
	ext->hdr_len = 28;
	ext->payload_len = payload_len;
	ext->mss = skb_shinfo(skb)->gso_size;
	ext->dg_hdr_len = proto_len;
}

4267
int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
4268
	      struct qeth_qdio_out_q *queue, __be16 proto,
4269 4270
	      void (*fill_header)(struct qeth_qdio_out_q *queue,
				  struct qeth_hdr *hdr, struct sk_buff *skb,
4271
				  __be16 proto, unsigned int data_len))
4272
{
4273
	unsigned int proto_len, hw_hdr_len;
4274
	unsigned int frame_len = skb->len;
4275
	bool is_tso = skb_is_gso(skb);
4276 4277 4278 4279 4280 4281
	unsigned int data_offset = 0;
	struct qeth_hdr *hdr = NULL;
	unsigned int hd_len = 0;
	unsigned int elements;
	int push_len, rc;

4282 4283 4284 4285 4286
	if (is_tso) {
		hw_hdr_len = sizeof(struct qeth_hdr_tso);
		proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
	} else {
		hw_hdr_len = sizeof(struct qeth_hdr);
J
Julian Wiedmann 已提交
4287
		proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0;
4288 4289
	}

4290 4291 4292 4293
	rc = skb_cow_head(skb, hw_hdr_len);
	if (rc)
		return rc;

4294
	push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len,
4295 4296 4297
				      &elements);
	if (push_len < 0)
		return push_len;
4298
	if (is_tso || !push_len) {
4299 4300
		/* HW header needs its own buffer element. */
		hd_len = hw_hdr_len + proto_len;
4301
		data_offset = push_len + proto_len;
4302
	}
4303
	memset(hdr, 0, hw_hdr_len);
4304
	fill_header(queue, hdr, skb, proto, frame_len);
4305 4306 4307
	if (is_tso)
		qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
				  frame_len - proto_len, skb, proto_len);
4308 4309

	if (IS_IQD(card)) {
4310 4311
		rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset,
				 hd_len);
4312 4313 4314
	} else {
		/* TODO: drop skb_orphan() once TX completion is fast enough */
		skb_orphan(skb);
4315
		spin_lock(&queue->lock);
4316 4317
		rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
					 hd_len, elements);
4318
		spin_unlock(&queue->lock);
4319 4320
	}

4321 4322 4323
	if (rc && !push_len)
		kmem_cache_free(qeth_core_header_cache, hdr);

4324 4325 4326 4327
	return rc;
}
EXPORT_SYMBOL_GPL(qeth_xmit);

F
Frank Blaschka 已提交
4328 4329 4330
static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
		struct qeth_reply *reply, unsigned long data)
{
4331
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
F
Frank Blaschka 已提交
4332 4333
	struct qeth_ipacmd_setadpparms *setparms;

C
Carsten Otte 已提交
4334
	QETH_CARD_TEXT(card, 4, "prmadpcb");
F
Frank Blaschka 已提交
4335 4336

	setparms = &(cmd->data.setadapterparms);
4337
	if (qeth_setadpparms_inspect_rc(cmd)) {
4338
		QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
F
Frank Blaschka 已提交
4339 4340 4341
		setparms->data.mode = SET_PROMISC_MODE_OFF;
	}
	card->info.promisc_mode = setparms->data.mode;
4342
	return (cmd->hdr.return_code) ? -EIO : 0;
F
Frank Blaschka 已提交
4343 4344
}

4345
void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable)
F
Frank Blaschka 已提交
4346
{
4347 4348
	enum qeth_ipa_promisc_modes mode = enable ? SET_PROMISC_MODE_ON :
						    SET_PROMISC_MODE_OFF;
F
Frank Blaschka 已提交
4349 4350 4351
	struct qeth_cmd_buffer *iob;
	struct qeth_ipa_cmd *cmd;

C
Carsten Otte 已提交
4352 4353
	QETH_CARD_TEXT(card, 4, "setprom");
	QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
F
Frank Blaschka 已提交
4354 4355

	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
4356
				   SETADP_DATA_SIZEOF(mode));
4357 4358
	if (!iob)
		return;
4359
	cmd = __ipa_cmd(iob);
F
Frank Blaschka 已提交
4360 4361 4362 4363 4364 4365 4366 4367
	cmd->data.setadapterparms.data.mode = mode;
	qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
}
EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);

static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
		struct qeth_reply *reply, unsigned long data)
{
4368
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4369
	struct qeth_ipacmd_setadpparms *adp_cmd;
F
Frank Blaschka 已提交
4370

C
Carsten Otte 已提交
4371
	QETH_CARD_TEXT(card, 4, "chgmaccb");
4372
	if (qeth_setadpparms_inspect_rc(cmd))
4373
		return -EIO;
F
Frank Blaschka 已提交
4374

4375
	adp_cmd = &cmd->data.setadapterparms;
4376 4377 4378
	if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr))
		return -EADDRNOTAVAIL;

4379 4380
	if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) &&
	    !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC))
4381
		return -EADDRNOTAVAIL;
4382 4383

	ether_addr_copy(card->dev->dev_addr, adp_cmd->data.change_addr.addr);
F
Frank Blaschka 已提交
4384 4385 4386 4387 4388 4389 4390 4391 4392
	return 0;
}

int qeth_setadpparms_change_macaddr(struct qeth_card *card)
{
	int rc;
	struct qeth_cmd_buffer *iob;
	struct qeth_ipa_cmd *cmd;

C
Carsten Otte 已提交
4393
	QETH_CARD_TEXT(card, 4, "chgmac");
F
Frank Blaschka 已提交
4394 4395

	iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
4396
				   SETADP_DATA_SIZEOF(change_addr));
4397 4398
	if (!iob)
		return -ENOMEM;
4399
	cmd = __ipa_cmd(iob);
F
Frank Blaschka 已提交
4400
	cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
4401 4402 4403
	cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN;
	ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr,
			card->dev->dev_addr);
F
Frank Blaschka 已提交
4404 4405 4406 4407 4408 4409
	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
			       NULL);
	return rc;
}
EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);

E
Einar Lueck 已提交
4410 4411 4412
static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
		struct qeth_reply *reply, unsigned long data)
{
4413
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
E
Einar Lueck 已提交
4414 4415
	struct qeth_set_access_ctrl *access_ctrl_req;

C
Carsten Otte 已提交
4416
	QETH_CARD_TEXT(card, 4, "setaccb");
E
Einar Lueck 已提交
4417 4418

	access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4419 4420
	QETH_CARD_TEXT_(card, 2, "rc=%d",
			cmd->data.setadapterparms.hdr.return_code);
S
Stefan Raspl 已提交
4421 4422
	if (cmd->data.setadapterparms.hdr.return_code !=
						SET_ACCESS_CTRL_RC_SUCCESS)
4423 4424 4425
		QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
				 access_ctrl_req->subcmd_code, CARD_DEVID(card),
				 cmd->data.setadapterparms.hdr.return_code);
4426
	switch (qeth_setadpparms_inspect_rc(cmd)) {
E
Einar Lueck 已提交
4427
	case SET_ACCESS_CTRL_RC_SUCCESS:
4428
		if (access_ctrl_req->subcmd_code == ISOLATION_MODE_NONE)
E
Einar Lueck 已提交
4429 4430
			dev_info(&card->gdev->dev,
			    "QDIO data connection isolation is deactivated\n");
4431
		else
E
Einar Lueck 已提交
4432 4433
			dev_info(&card->gdev->dev,
			    "QDIO data connection isolation is activated\n");
4434
		return 0;
S
Stefan Raspl 已提交
4435
	case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
4436 4437
		QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
				 CARD_DEVID(card));
4438
		return 0;
S
Stefan Raspl 已提交
4439
	case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
4440 4441
		QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
				 CARD_DEVID(card));
4442
		return 0;
E
Einar Lueck 已提交
4443 4444 4445
	case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
		dev_err(&card->gdev->dev, "Adapter does not "
			"support QDIO data connection isolation\n");
4446
		return -EOPNOTSUPP;
E
Einar Lueck 已提交
4447 4448 4449 4450
	case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
		dev_err(&card->gdev->dev,
			"Adapter is dedicated. "
			"QDIO data connection isolation not supported\n");
4451
		return -EOPNOTSUPP;
E
Einar Lueck 已提交
4452 4453 4454
	case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
		dev_err(&card->gdev->dev,
			"TSO does not permit QDIO data connection isolation\n");
4455
		return -EPERM;
S
Stefan Raspl 已提交
4456 4457 4458
	case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED:
		dev_err(&card->gdev->dev, "The adjacent switch port does not "
			"support reflective relay mode\n");
4459
		return -EOPNOTSUPP;
S
Stefan Raspl 已提交
4460 4461 4462
	case SET_ACCESS_CTRL_RC_REFLREL_FAILED:
		dev_err(&card->gdev->dev, "The reflective relay mode cannot be "
					"enabled at the adjacent switch port");
4463
		return -EREMOTEIO;
S
Stefan Raspl 已提交
4464 4465 4466
	case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED:
		dev_warn(&card->gdev->dev, "Turning off reflective relay mode "
					"at the adjacent switch failed\n");
4467 4468
		/* benign error while disabling ISOLATION_MODE_FWD */
		return 0;
E
Einar Lueck 已提交
4469
	default:
4470
		return -EIO;
E
Einar Lueck 已提交
4471 4472 4473
	}
}

4474 4475
int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
				     enum qeth_ipa_isolation_modes mode)
E
Einar Lueck 已提交
4476 4477 4478 4479 4480 4481
{
	int rc;
	struct qeth_cmd_buffer *iob;
	struct qeth_ipa_cmd *cmd;
	struct qeth_set_access_ctrl *access_ctrl_req;

C
Carsten Otte 已提交
4482
	QETH_CARD_TEXT(card, 4, "setacctl");
E
Einar Lueck 已提交
4483

4484 4485 4486 4487 4488 4489
	if (!qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
		dev_err(&card->gdev->dev,
			"Adapter does not support QDIO data connection isolation\n");
		return -EOPNOTSUPP;
	}

E
Einar Lueck 已提交
4490
	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
4491
				   SETADP_DATA_SIZEOF(set_access_ctrl));
4492 4493
	if (!iob)
		return -ENOMEM;
4494
	cmd = __ipa_cmd(iob);
E
Einar Lueck 已提交
4495
	access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4496
	access_ctrl_req->subcmd_code = mode;
E
Einar Lueck 已提交
4497 4498

	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
4499
			       NULL);
4500
	if (rc) {
4501
		QETH_CARD_TEXT_(card, 2, "rc=%d", rc);
4502 4503
		QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
				 rc, CARD_DEVID(card));
E
Einar Lueck 已提交
4504
	}
4505

E
Einar Lueck 已提交
4506 4507 4508
	return rc;
}

4509
void qeth_tx_timeout(struct net_device *dev, unsigned int txqueue)
F
Frank Blaschka 已提交
4510 4511 4512
{
	struct qeth_card *card;

4513
	card = dev->ml_priv;
C
Carsten Otte 已提交
4514
	QETH_CARD_TEXT(card, 4, "txtimeo");
F
Frank Blaschka 已提交
4515 4516 4517 4518
	qeth_schedule_recovery(card);
}
EXPORT_SYMBOL_GPL(qeth_tx_timeout);

4519
static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
F
Frank Blaschka 已提交
4520
{
4521
	struct qeth_card *card = dev->ml_priv;
F
Frank Blaschka 已提交
4522 4523 4524 4525 4526 4527
	int rc = 0;

	switch (regnum) {
	case MII_BMCR: /* Basic mode control register */
		rc = BMCR_FULLDPLX;
		if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
4528 4529
		    (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) &&
		    (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH))
F
Frank Blaschka 已提交
4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560
			rc |= BMCR_SPEED100;
		break;
	case MII_BMSR: /* Basic mode status register */
		rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
		     BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
		     BMSR_100BASE4;
		break;
	case MII_PHYSID1: /* PHYS ID 1 */
		rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
		     dev->dev_addr[2];
		rc = (rc >> 5) & 0xFFFF;
		break;
	case MII_PHYSID2: /* PHYS ID 2 */
		rc = (dev->dev_addr[2] << 10) & 0xFFFF;
		break;
	case MII_ADVERTISE: /* Advertisement control reg */
		rc = ADVERTISE_ALL;
		break;
	case MII_LPA: /* Link partner ability reg */
		rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
		     LPA_100BASE4 | LPA_LPACK;
		break;
	case MII_EXPANSION: /* Expansion register */
		break;
	case MII_DCOUNTER: /* disconnect counter */
		break;
	case MII_FCSCOUNTER: /* false carrier counter */
		break;
	case MII_NWAYTEST: /* N-way auto-neg test register */
		break;
	case MII_RERRCOUNTER: /* rx error counter */
4561 4562 4563
		rc = card->stats.rx_length_errors +
		     card->stats.rx_frame_errors +
		     card->stats.rx_fifo_errors;
F
Frank Blaschka 已提交
4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585
		break;
	case MII_SREVISION: /* silicon revision */
		break;
	case MII_RESV1: /* reserved 1 */
		break;
	case MII_LBRERROR: /* loopback, rx, bypass error */
		break;
	case MII_PHYADDR: /* physical address */
		break;
	case MII_RESV2: /* reserved 2 */
		break;
	case MII_TPISTATUS: /* TPI status for 10mbps */
		break;
	case MII_NCONFIG: /* network interface config */
		break;
	default:
		break;
	}
	return rc;
}

static int qeth_snmp_command_cb(struct qeth_card *card,
4586
				struct qeth_reply *reply, unsigned long data)
F
Frank Blaschka 已提交
4587
{
4588 4589 4590 4591
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
	struct qeth_arp_query_info *qinfo = reply->param;
	struct qeth_ipacmd_setadpparms *adp_cmd;
	unsigned int data_len;
4592
	void *snmp_data;
F
Frank Blaschka 已提交
4593

C
Carsten Otte 已提交
4594
	QETH_CARD_TEXT(card, 3, "snpcmdcb");
F
Frank Blaschka 已提交
4595 4596

	if (cmd->hdr.return_code) {
4597
		QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
4598
		return -EIO;
F
Frank Blaschka 已提交
4599 4600 4601 4602
	}
	if (cmd->data.setadapterparms.hdr.return_code) {
		cmd->hdr.return_code =
			cmd->data.setadapterparms.hdr.return_code;
4603
		QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code);
4604
		return -EIO;
F
Frank Blaschka 已提交
4605
	}
4606 4607 4608 4609 4610

	adp_cmd = &cmd->data.setadapterparms;
	data_len = adp_cmd->hdr.cmdlength - sizeof(adp_cmd->hdr);
	if (adp_cmd->hdr.seq_no == 1) {
		snmp_data = &adp_cmd->data.snmp;
4611
	} else {
4612 4613
		snmp_data = &adp_cmd->data.snmp.request;
		data_len -= offsetof(struct qeth_snmp_cmd, request);
4614
	}
F
Frank Blaschka 已提交
4615 4616 4617

	/* check if there is enough room in userspace */
	if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
4618 4619
		QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC);
		return -ENOSPC;
F
Frank Blaschka 已提交
4620
	}
C
Carsten Otte 已提交
4621
	QETH_CARD_TEXT_(card, 4, "snore%i",
4622
			cmd->data.setadapterparms.hdr.used_total);
C
Carsten Otte 已提交
4623
	QETH_CARD_TEXT_(card, 4, "sseqn%i",
4624
			cmd->data.setadapterparms.hdr.seq_no);
F
Frank Blaschka 已提交
4625
	/*copy entries to user buffer*/
4626
	memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len);
F
Frank Blaschka 已提交
4627
	qinfo->udata_offset += data_len;
4628

F
Frank Blaschka 已提交
4629 4630 4631 4632 4633 4634
	if (cmd->data.setadapterparms.hdr.seq_no <
	    cmd->data.setadapterparms.hdr.used_total)
		return 1;
	return 0;
}

4635
static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
F
Frank Blaschka 已提交
4636
{
4637
	struct qeth_snmp_ureq __user *ureq;
F
Frank Blaschka 已提交
4638
	struct qeth_cmd_buffer *iob;
4639
	unsigned int req_len;
F
Frank Blaschka 已提交
4640 4641 4642
	struct qeth_arp_query_info qinfo = {0, };
	int rc = 0;

C
Carsten Otte 已提交
4643
	QETH_CARD_TEXT(card, 3, "snmpcmd");
F
Frank Blaschka 已提交
4644

4645
	if (IS_VM_NIC(card))
F
Frank Blaschka 已提交
4646 4647 4648
		return -EOPNOTSUPP;

	if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
4649
	    IS_LAYER3(card))
F
Frank Blaschka 已提交
4650
		return -EOPNOTSUPP;
4651

4652 4653 4654 4655 4656
	ureq = (struct qeth_snmp_ureq __user *) udata;
	if (get_user(qinfo.udata_len, &ureq->hdr.data_len) ||
	    get_user(req_len, &ureq->hdr.req_len))
		return -EFAULT;

4657 4658 4659 4660
	/* Sanitize user input, to avoid overflows in iob size calculation: */
	if (req_len > QETH_BUFSIZE)
		return -EINVAL;

4661 4662 4663 4664 4665 4666 4667
	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len);
	if (!iob)
		return -ENOMEM;

	if (copy_from_user(&__ipa_cmd(iob)->data.setadapterparms.data.snmp,
			   &ureq->cmd, req_len)) {
		qeth_put_cmd(iob);
F
Frank Blaschka 已提交
4668
		return -EFAULT;
4669 4670
	}

F
Frank Blaschka 已提交
4671 4672
	qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
	if (!qinfo.udata) {
4673
		qeth_put_cmd(iob);
F
Frank Blaschka 已提交
4674 4675 4676 4677
		return -ENOMEM;
	}
	qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);

4678
	rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo);
F
Frank Blaschka 已提交
4679
	if (rc)
4680 4681
		QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
				 CARD_DEVID(card), rc);
F
Frank Blaschka 已提交
4682 4683 4684 4685
	else {
		if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
			rc = -EFAULT;
	}
4686

F
Frank Blaschka 已提交
4687 4688 4689 4690
	kfree(qinfo.udata);
	return rc;
}

4691
static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
J
Julian Wiedmann 已提交
4692 4693
					 struct qeth_reply *reply,
					 unsigned long data)
4694
{
4695
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
J
Julian Wiedmann 已提交
4696
	struct qeth_qoat_priv *priv = reply->param;
4697 4698 4699
	int resdatalen;

	QETH_CARD_TEXT(card, 3, "qoatcb");
4700
	if (qeth_setadpparms_inspect_rc(cmd))
4701
		return -EIO;
4702 4703 4704

	resdatalen = cmd->data.setadapterparms.hdr.cmdlength;

4705 4706
	if (resdatalen > (priv->buffer_len - priv->response_len))
		return -ENOSPC;
4707

4708 4709
	memcpy(priv->buffer + priv->response_len,
	       &cmd->data.setadapterparms.hdr, resdatalen);
4710 4711 4712 4713 4714 4715 4716 4717
	priv->response_len += resdatalen;

	if (cmd->data.setadapterparms.hdr.seq_no <
	    cmd->data.setadapterparms.hdr.used_total)
		return 1;
	return 0;
}

4718
static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729
{
	int rc = 0;
	struct qeth_cmd_buffer *iob;
	struct qeth_ipa_cmd *cmd;
	struct qeth_query_oat *oat_req;
	struct qeth_query_oat_data oat_data;
	struct qeth_qoat_priv priv;
	void __user *tmp;

	QETH_CARD_TEXT(card, 3, "qoatcmd");

J
Julian Wiedmann 已提交
4730 4731
	if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT))
		return -EOPNOTSUPP;
4732

J
Julian Wiedmann 已提交
4733 4734
	if (copy_from_user(&oat_data, udata, sizeof(oat_data)))
		return -EFAULT;
4735 4736 4737

	priv.buffer_len = oat_data.buffer_len;
	priv.response_len = 0;
4738
	priv.buffer = vzalloc(oat_data.buffer_len);
J
Julian Wiedmann 已提交
4739 4740
	if (!priv.buffer)
		return -ENOMEM;
4741 4742

	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
4743
				   SETADP_DATA_SIZEOF(query_oat));
4744 4745 4746 4747
	if (!iob) {
		rc = -ENOMEM;
		goto out_free;
	}
4748
	cmd = __ipa_cmd(iob);
4749 4750 4751
	oat_req = &cmd->data.setadapterparms.data.query_oat;
	oat_req->subcmd_code = oat_data.command;

J
Julian Wiedmann 已提交
4752
	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, &priv);
4753
	if (!rc) {
4754 4755
		tmp = is_compat_task() ? compat_ptr(oat_data.ptr) :
					 u64_to_user_ptr(oat_data.ptr);
4756 4757
		oat_data.response_len = priv.response_len;

J
Julian Wiedmann 已提交
4758 4759
		if (copy_to_user(tmp, priv.buffer, priv.response_len) ||
		    copy_to_user(udata, &oat_data, sizeof(oat_data)))
4760
			rc = -EFAULT;
4761
	}
4762 4763

out_free:
4764
	vfree(priv.buffer);
4765 4766 4767
	return rc;
}

4768 4769
static int qeth_query_card_info_cb(struct qeth_card *card,
				   struct qeth_reply *reply, unsigned long data)
E
Eugene Crosser 已提交
4770
{
4771
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4772
	struct qeth_link_info *link_info = reply->param;
E
Eugene Crosser 已提交
4773 4774 4775
	struct qeth_query_card_info *card_info;

	QETH_CARD_TEXT(card, 2, "qcrdincb");
4776
	if (qeth_setadpparms_inspect_rc(cmd))
4777
		return -EIO;
E
Eugene Crosser 已提交
4778

4779
	card_info = &cmd->data.setadapterparms.data.card_info;
4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835
	netdev_dbg(card->dev,
		   "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n",
		   card_info->card_type, card_info->port_mode,
		   card_info->port_speed);

	switch (card_info->port_mode) {
	case CARD_INFO_PORTM_FULLDUPLEX:
		link_info->duplex = DUPLEX_FULL;
		break;
	case CARD_INFO_PORTM_HALFDUPLEX:
		link_info->duplex = DUPLEX_HALF;
		break;
	default:
		link_info->duplex = DUPLEX_UNKNOWN;
	}

	switch (card_info->card_type) {
	case CARD_INFO_TYPE_1G_COPPER_A:
	case CARD_INFO_TYPE_1G_COPPER_B:
		link_info->speed = SPEED_1000;
		link_info->port = PORT_TP;
		break;
	case CARD_INFO_TYPE_1G_FIBRE_A:
	case CARD_INFO_TYPE_1G_FIBRE_B:
		link_info->speed = SPEED_1000;
		link_info->port = PORT_FIBRE;
		break;
	case CARD_INFO_TYPE_10G_FIBRE_A:
	case CARD_INFO_TYPE_10G_FIBRE_B:
		link_info->speed = SPEED_10000;
		link_info->port = PORT_FIBRE;
		break;
	default:
		switch (card_info->port_speed) {
		case CARD_INFO_PORTS_10M:
			link_info->speed = SPEED_10;
			break;
		case CARD_INFO_PORTS_100M:
			link_info->speed = SPEED_100;
			break;
		case CARD_INFO_PORTS_1G:
			link_info->speed = SPEED_1000;
			break;
		case CARD_INFO_PORTS_10G:
			link_info->speed = SPEED_10000;
			break;
		case CARD_INFO_PORTS_25G:
			link_info->speed = SPEED_25000;
			break;
		default:
			link_info->speed = SPEED_UNKNOWN;
		}

		link_info->port = PORT_OTHER;
	}

E
Eugene Crosser 已提交
4836 4837 4838
	return 0;
}

4839
int qeth_query_card_info(struct qeth_card *card,
4840
			 struct qeth_link_info *link_info)
E
Eugene Crosser 已提交
4841 4842 4843 4844 4845 4846
{
	struct qeth_cmd_buffer *iob;

	QETH_CARD_TEXT(card, 2, "qcrdinfo");
	if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO))
		return -EOPNOTSUPP;
4847
	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 0);
4848 4849
	if (!iob)
		return -ENOMEM;
4850 4851

	return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, link_info);
E
Eugene Crosser 已提交
4852 4853
}

4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920
static int qeth_init_link_info_oat_cb(struct qeth_card *card,
				      struct qeth_reply *reply_priv,
				      unsigned long data)
{
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
	struct qeth_link_info *link_info = reply_priv->param;
	struct qeth_query_oat_physical_if *phys_if;
	struct qeth_query_oat_reply *reply;

	if (qeth_setadpparms_inspect_rc(cmd))
		return -EIO;

	/* Multi-part reply is unexpected, don't bother: */
	if (cmd->data.setadapterparms.hdr.used_total > 1)
		return -EINVAL;

	/* Expect the reply to start with phys_if data: */
	reply = &cmd->data.setadapterparms.data.query_oat.reply[0];
	if (reply->type != QETH_QOAT_REPLY_TYPE_PHYS_IF ||
	    reply->length < sizeof(*reply))
		return -EINVAL;

	phys_if = &reply->phys_if;

	switch (phys_if->speed_duplex) {
	case QETH_QOAT_PHYS_SPEED_10M_HALF:
		link_info->speed = SPEED_10;
		link_info->duplex = DUPLEX_HALF;
		break;
	case QETH_QOAT_PHYS_SPEED_10M_FULL:
		link_info->speed = SPEED_10;
		link_info->duplex = DUPLEX_FULL;
		break;
	case QETH_QOAT_PHYS_SPEED_100M_HALF:
		link_info->speed = SPEED_100;
		link_info->duplex = DUPLEX_HALF;
		break;
	case QETH_QOAT_PHYS_SPEED_100M_FULL:
		link_info->speed = SPEED_100;
		link_info->duplex = DUPLEX_FULL;
		break;
	case QETH_QOAT_PHYS_SPEED_1000M_HALF:
		link_info->speed = SPEED_1000;
		link_info->duplex = DUPLEX_HALF;
		break;
	case QETH_QOAT_PHYS_SPEED_1000M_FULL:
		link_info->speed = SPEED_1000;
		link_info->duplex = DUPLEX_FULL;
		break;
	case QETH_QOAT_PHYS_SPEED_10G_FULL:
		link_info->speed = SPEED_10000;
		link_info->duplex = DUPLEX_FULL;
		break;
	case QETH_QOAT_PHYS_SPEED_25G_FULL:
		link_info->speed = SPEED_25000;
		link_info->duplex = DUPLEX_FULL;
		break;
	case QETH_QOAT_PHYS_SPEED_UNKNOWN:
	default:
		link_info->speed = SPEED_UNKNOWN;
		link_info->duplex = DUPLEX_UNKNOWN;
		break;
	}

	switch (phys_if->media_type) {
	case QETH_QOAT_PHYS_MEDIA_COPPER:
		link_info->port = PORT_TP;
4921
		link_info->link_mode = QETH_LINK_MODE_UNKNOWN;
4922 4923
		break;
	case QETH_QOAT_PHYS_MEDIA_FIBRE_SHORT:
4924 4925 4926
		link_info->port = PORT_FIBRE;
		link_info->link_mode = QETH_LINK_MODE_FIBRE_SHORT;
		break;
4927 4928
	case QETH_QOAT_PHYS_MEDIA_FIBRE_LONG:
		link_info->port = PORT_FIBRE;
4929
		link_info->link_mode = QETH_LINK_MODE_FIBRE_LONG;
4930 4931 4932
		break;
	default:
		link_info->port = PORT_OTHER;
4933
		link_info->link_mode = QETH_LINK_MODE_UNKNOWN;
4934 4935 4936 4937 4938 4939
		break;
	}

	return 0;
}

4940 4941 4942 4943 4944 4945 4946
static void qeth_init_link_info(struct qeth_card *card)
{
	card->info.link_info.duplex = DUPLEX_FULL;

	if (IS_IQD(card) || IS_VM_NIC(card)) {
		card->info.link_info.speed = SPEED_10000;
		card->info.link_info.port = PORT_FIBRE;
4947
		card->info.link_info.link_mode = QETH_LINK_MODE_FIBRE_SHORT;
4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973
	} else {
		switch (card->info.link_type) {
		case QETH_LINK_TYPE_FAST_ETH:
		case QETH_LINK_TYPE_LANE_ETH100:
			card->info.link_info.speed = SPEED_100;
			card->info.link_info.port = PORT_TP;
			break;
		case QETH_LINK_TYPE_GBIT_ETH:
		case QETH_LINK_TYPE_LANE_ETH1000:
			card->info.link_info.speed = SPEED_1000;
			card->info.link_info.port = PORT_FIBRE;
			break;
		case QETH_LINK_TYPE_10GBIT_ETH:
			card->info.link_info.speed = SPEED_10000;
			card->info.link_info.port = PORT_FIBRE;
			break;
		case QETH_LINK_TYPE_25GBIT_ETH:
			card->info.link_info.speed = SPEED_25000;
			card->info.link_info.port = PORT_FIBRE;
			break;
		default:
			dev_info(&card->gdev->dev, "Unknown link type %x\n",
				 card->info.link_type);
			card->info.link_info.speed = SPEED_UNKNOWN;
			card->info.link_info.port = PORT_OTHER;
		}
4974 4975

		card->info.link_info.link_mode = QETH_LINK_MODE_UNKNOWN;
4976
	}
4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000

	/* Get more accurate data via QUERY OAT: */
	if (qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) {
		struct qeth_link_info link_info;
		struct qeth_cmd_buffer *iob;

		iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
					   SETADP_DATA_SIZEOF(query_oat));
		if (iob) {
			struct qeth_ipa_cmd *cmd = __ipa_cmd(iob);
			struct qeth_query_oat *oat_req;

			oat_req = &cmd->data.setadapterparms.data.query_oat;
			oat_req->subcmd_code = QETH_QOAT_SCOPE_INTERFACE;

			if (!qeth_send_ipa_cmd(card, iob,
					       qeth_init_link_info_oat_cb,
					       &link_info)) {
				if (link_info.speed != SPEED_UNKNOWN)
					card->info.link_info.speed = link_info.speed;
				if (link_info.duplex != DUPLEX_UNKNOWN)
					card->info.link_info.duplex = link_info.duplex;
				if (link_info.port != PORT_OTHER)
					card->info.link_info.port = link_info.port;
5001 5002
				if (link_info.link_mode != QETH_LINK_MODE_UNKNOWN)
					card->info.link_info.link_mode = link_info.link_mode;
5003 5004 5005
			}
		}
	}
5006 5007
}

5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021
/**
 * qeth_vm_request_mac() - Request a hypervisor-managed MAC address
 * @card: pointer to a qeth_card
 *
 * Returns
 *	0, if a MAC address has been set for the card's netdevice
 *	a return code, for various error conditions
 */
int qeth_vm_request_mac(struct qeth_card *card)
{
	struct diag26c_mac_resp *response;
	struct diag26c_mac_req *request;
	int rc;

5022
	QETH_CARD_TEXT(card, 2, "vmreqmac");
5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033

	request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
	response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
	if (!request || !response) {
		rc = -ENOMEM;
		goto out;
	}

	request->resp_buf_len = sizeof(*response);
	request->resp_version = DIAG26C_VERSION2;
	request->op_code = DIAG26C_GET_MAC;
5034
	request->devno = card->info.ddev_devno;
5035

5036
	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
5037
	rc = diag26c(request, response, DIAG26C_MAC_SERVICES);
5038
	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
5039 5040
	if (rc)
		goto out;
5041
	QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
5042 5043 5044 5045

	if (request->resp_buf_len < sizeof(*response) ||
	    response->version != request->resp_version) {
		rc = -EIO;
5046 5047 5048
		QETH_CARD_TEXT(card, 2, "badresp");
		QETH_CARD_HEX(card, 2, &request->resp_buf_len,
			      sizeof(request->resp_buf_len));
5049 5050
	} else if (!is_valid_ether_addr(response->mac)) {
		rc = -EINVAL;
5051 5052
		QETH_CARD_TEXT(card, 2, "badmac");
		QETH_CARD_HEX(card, 2, response->mac, ETH_ALEN);
5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063
	} else {
		ether_addr_copy(card->dev->dev_addr, response->mac);
	}

out:
	kfree(response);
	kfree(request);
	return rc;
}
EXPORT_SYMBOL_GPL(qeth_vm_request_mac);

5064 5065
static void qeth_determine_capabilities(struct qeth_card *card)
{
5066 5067
	struct qeth_channel *channel = &card->data;
	struct ccw_device *ddev = channel->ccwdev;
5068 5069 5070
	int rc;
	int ddev_offline = 0;

5071
	QETH_CARD_TEXT(card, 2, "detcapab");
5072 5073
	if (!ddev->online) {
		ddev_offline = 1;
5074
		rc = qeth_start_channel(channel);
5075
		if (rc) {
5076
			QETH_CARD_TEXT_(card, 2, "3err%d", rc);
5077 5078 5079 5080
			goto out;
		}
	}

5081
	rc = qeth_read_conf_data(card);
5082
	if (rc) {
5083 5084
		QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
				 CARD_DEVID(card), rc);
5085
		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
5086 5087 5088 5089 5090
		goto out_offline;
	}

	rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
	if (rc)
5091
		QETH_CARD_TEXT_(card, 2, "6err%d", rc);
5092

5093 5094 5095 5096 5097
	QETH_CARD_TEXT_(card, 2, "qfmt%d", card->ssqd.qfmt);
	QETH_CARD_TEXT_(card, 2, "ac1:%02x", card->ssqd.qdioac1);
	QETH_CARD_TEXT_(card, 2, "ac2:%04x", card->ssqd.qdioac2);
	QETH_CARD_TEXT_(card, 2, "ac3:%04x", card->ssqd.qdioac3);
	QETH_CARD_TEXT_(card, 2, "icnt%d", card->ssqd.icnt);
5098 5099 5100 5101 5102 5103 5104 5105 5106
	if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) ||
	    ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) ||
	    ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) {
		dev_info(&card->gdev->dev,
			"Completion Queueing supported\n");
	} else {
		card->options.cq = QETH_CQ_NOTAVAILABLE;
	}

5107 5108
out_offline:
	if (ddev_offline == 1)
5109
		qeth_stop_channel(channel);
5110 5111 5112 5113
out:
	return;
}

5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140
static void qeth_read_ccw_conf_data(struct qeth_card *card)
{
	struct qeth_card_info *info = &card->info;
	struct ccw_device *cdev = CARD_DDEV(card);
	struct ccw_dev_id dev_id;

	QETH_CARD_TEXT(card, 2, "ccwconfd");
	ccw_device_get_id(cdev, &dev_id);

	info->ddev_devno = dev_id.devno;
	info->ids_valid = !ccw_device_get_cssid(cdev, &info->cssid) &&
			  !ccw_device_get_iid(cdev, &info->iid) &&
			  !ccw_device_get_chid(cdev, 0, &info->chid);
	info->ssid = dev_id.ssid;

	dev_info(&card->gdev->dev, "CHID: %x CHPID: %x\n",
		 info->chid, info->chpid);

	QETH_CARD_TEXT_(card, 3, "devn%x", info->ddev_devno);
	QETH_CARD_TEXT_(card, 3, "cssid:%x", info->cssid);
	QETH_CARD_TEXT_(card, 3, "iid:%x", info->iid);
	QETH_CARD_TEXT_(card, 3, "ssid:%x", info->ssid);
	QETH_CARD_TEXT_(card, 3, "chpid:%x", info->chpid);
	QETH_CARD_TEXT_(card, 3, "chid:%x", info->chid);
	QETH_CARD_TEXT_(card, 3, "idval%x", info->ids_valid);
}

F
Frank Blaschka 已提交
5141 5142
static int qeth_qdio_establish(struct qeth_card *card)
{
5143 5144
	struct qdio_buffer **out_sbal_ptrs[QETH_MAX_OUT_QUEUES];
	struct qdio_buffer **in_sbal_ptrs[QETH_MAX_IN_QUEUES];
5145
	struct qeth_qib_parms *qib_parms = NULL;
F
Frank Blaschka 已提交
5146
	struct qdio_initialize init_data;
5147
	unsigned int i;
F
Frank Blaschka 已提交
5148 5149
	int rc = 0;

5150
	QETH_CARD_TEXT(card, 2, "qdioest");
F
Frank Blaschka 已提交
5151

5152 5153 5154 5155
	if (!IS_IQD(card) && !IS_VM_NIC(card)) {
		qib_parms = kzalloc(sizeof_field(struct qib, parm), GFP_KERNEL);
		if (!qib_parms)
			return -ENOMEM;
F
Frank Blaschka 已提交
5156

5157 5158
		qeth_fill_qib_parms(card, qib_parms);
	}
F
Frank Blaschka 已提交
5159

5160 5161 5162
	in_sbal_ptrs[0] = card->qdio.in_q->qdio_bufs;
	if (card->options.cq == QETH_CQ_ENABLED)
		in_sbal_ptrs[1] = card->qdio.c_q->qdio_bufs;
5163

5164 5165
	for (i = 0; i < card->qdio.no_out_queues; i++)
		out_sbal_ptrs[i] = card->qdio.out_qs[i]->qdio_bufs;
F
Frank Blaschka 已提交
5166 5167

	memset(&init_data, 0, sizeof(struct qdio_initialize));
5168 5169
	init_data.q_format		 = IS_IQD(card) ? QDIO_IQDIO_QFMT :
							  QDIO_QETH_QFMT;
F
Frank Blaschka 已提交
5170
	init_data.qib_param_field_format = 0;
5171
	init_data.qib_param_field	 = (void *)qib_parms;
5172
	init_data.no_input_qs            = card->qdio.no_in_queues;
F
Frank Blaschka 已提交
5173
	init_data.no_output_qs           = card->qdio.no_out_queues;
5174 5175
	init_data.input_handler		 = qeth_qdio_input_handler;
	init_data.output_handler	 = qeth_qdio_output_handler;
5176
	init_data.irq_poll		 = qeth_qdio_poll;
F
Frank Blaschka 已提交
5177
	init_data.int_parm               = (unsigned long) card;
5178 5179
	init_data.input_sbal_addr_array  = in_sbal_ptrs;
	init_data.output_sbal_addr_array = out_sbal_ptrs;
F
Frank Blaschka 已提交
5180 5181 5182

	if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
		QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
5183 5184
		rc = qdio_allocate(CARD_DDEV(card), init_data.no_input_qs,
				   init_data.no_output_qs);
J
Jan Glauber 已提交
5185 5186 5187 5188
		if (rc) {
			atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
			goto out;
		}
5189
		rc = qdio_establish(CARD_DDEV(card), &init_data);
J
Jan Glauber 已提交
5190
		if (rc) {
F
Frank Blaschka 已提交
5191
			atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
J
Jan Glauber 已提交
5192 5193
			qdio_free(CARD_DDEV(card));
		}
F
Frank Blaschka 已提交
5194
	}
5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205

	switch (card->options.cq) {
	case QETH_CQ_ENABLED:
		dev_info(&card->gdev->dev, "Completion Queue support enabled");
		break;
	case QETH_CQ_DISABLED:
		dev_info(&card->gdev->dev, "Completion Queue support disabled");
		break;
	default:
		break;
	}
5206

J
Jan Glauber 已提交
5207
out:
5208
	kfree(qib_parms);
F
Frank Blaschka 已提交
5209 5210 5211 5212 5213
	return rc;
}

static void qeth_core_free_card(struct qeth_card *card)
{
5214
	QETH_CARD_TEXT(card, 2, "freecrd");
5215 5216 5217

	unregister_service_level(&card->qeth_service_level);
	debugfs_remove_recursive(card->debugfs);
5218
	qeth_put_cmd(card->read_cmd);
5219
	destroy_workqueue(card->event_wq);
5220
	dev_set_drvdata(&card->gdev->dev, NULL);
F
Frank Blaschka 已提交
5221 5222 5223
	kfree(card);
}

5224
static void qeth_trace_features(struct qeth_card *card)
5225 5226
{
	QETH_CARD_TEXT(card, 2, "features");
5227 5228 5229 5230 5231
	QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4));
	QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6));
	QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp));
	QETH_CARD_HEX(card, 2, &card->info.diagass_support,
		      sizeof(card->info.diagass_support));
5232 5233
}

F
Frank Blaschka 已提交
5234
static struct ccw_device_id qeth_ids[] = {
5235 5236 5237 5238 5239 5240
	{CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
					.driver_info = QETH_CARD_TYPE_OSD},
	{CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
					.driver_info = QETH_CARD_TYPE_IQD},
	{CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
					.driver_info = QETH_CARD_TYPE_OSM},
5241
#ifdef CONFIG_QETH_OSX
5242 5243
	{CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
					.driver_info = QETH_CARD_TYPE_OSX},
5244
#endif
F
Frank Blaschka 已提交
5245 5246 5247 5248 5249
	{},
};
MODULE_DEVICE_TABLE(ccw, qeth_ids);

static struct ccw_driver qeth_ccw_driver = {
5250
	.driver = {
S
Sebastian Ott 已提交
5251
		.owner = THIS_MODULE,
5252 5253
		.name = "qeth",
	},
F
Frank Blaschka 已提交
5254 5255 5256 5257 5258
	.ids = qeth_ids,
	.probe = ccwgroup_probe_ccwdev,
	.remove = ccwgroup_remove_ccwdev,
};

5259
static int qeth_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
F
Frank Blaschka 已提交
5260
{
5261
	int retries = 3;
F
Frank Blaschka 已提交
5262 5263
	int rc;

5264
	QETH_CARD_TEXT(card, 2, "hrdsetup");
F
Frank Blaschka 已提交
5265
	atomic_set(&card->force_alloc_skb, 0);
5266 5267 5268
	rc = qeth_update_from_chp_desc(card);
	if (rc)
		return rc;
F
Frank Blaschka 已提交
5269
retry:
5270
	if (retries < 3)
5271 5272
		QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
				 CARD_DEVID(card));
5273
	rc = qeth_qdio_clear_card(card, !IS_IQD(card));
5274 5275 5276
	qeth_stop_channel(&card->data);
	qeth_stop_channel(&card->write);
	qeth_stop_channel(&card->read);
5277
	qdio_free(CARD_DDEV(card));
5278 5279

	rc = qeth_start_channel(&card->read);
5280 5281
	if (rc)
		goto retriable;
5282
	rc = qeth_start_channel(&card->write);
5283 5284
	if (rc)
		goto retriable;
5285
	rc = qeth_start_channel(&card->data);
5286 5287 5288
	if (rc)
		goto retriable;
retriable:
F
Frank Blaschka 已提交
5289
	if (rc == -ERESTARTSYS) {
5290
		QETH_CARD_TEXT(card, 2, "break1");
F
Frank Blaschka 已提交
5291 5292
		return rc;
	} else if (rc) {
5293
		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
5294
		if (--retries < 0)
F
Frank Blaschka 已提交
5295 5296 5297 5298
			goto out;
		else
			goto retry;
	}
5299

5300
	qeth_determine_capabilities(card);
5301
	qeth_read_ccw_conf_data(card);
5302
	qeth_idx_init(card);
5303 5304 5305

	rc = qeth_idx_activate_read_channel(card);
	if (rc == -EINTR) {
5306
		QETH_CARD_TEXT(card, 2, "break2");
F
Frank Blaschka 已提交
5307 5308
		return rc;
	} else if (rc) {
5309
		QETH_CARD_TEXT_(card, 2, "3err%d", rc);
F
Frank Blaschka 已提交
5310 5311 5312 5313 5314
		if (--retries < 0)
			goto out;
		else
			goto retry;
	}
5315 5316 5317

	rc = qeth_idx_activate_write_channel(card);
	if (rc == -EINTR) {
5318
		QETH_CARD_TEXT(card, 2, "break3");
F
Frank Blaschka 已提交
5319 5320
		return rc;
	} else if (rc) {
5321
		QETH_CARD_TEXT_(card, 2, "4err%d", rc);
F
Frank Blaschka 已提交
5322 5323 5324 5325 5326
		if (--retries < 0)
			goto out;
		else
			goto retry;
	}
5327
	card->read_or_write_problem = 0;
F
Frank Blaschka 已提交
5328 5329
	rc = qeth_mpc_initialize(card);
	if (rc) {
5330
		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
F
Frank Blaschka 已提交
5331 5332
		goto out;
	}
5333

5334 5335
	rc = qeth_send_startlan(card);
	if (rc) {
5336
		QETH_CARD_TEXT_(card, 2, "6err%d", rc);
5337 5338
		if (rc == -ENETDOWN) {
			dev_warn(&card->gdev->dev, "The LAN is offline\n");
J
Julian Wiedmann 已提交
5339
			*carrier_ok = false;
5340 5341 5342
		} else {
			goto out;
		}
5343
	} else {
J
Julian Wiedmann 已提交
5344 5345 5346
		*carrier_ok = true;
	}

5347 5348 5349
	card->options.ipa4.supported = 0;
	card->options.ipa6.supported = 0;
	card->options.adp.supported = 0;
5350
	card->options.sbp.supported_funcs = 0;
5351
	card->info.diagass_support = 0;
5352 5353 5354
	rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
	if (rc == -ENOMEM)
		goto out;
5355 5356 5357 5358 5359
	if (qeth_is_supported(card, IPA_IPV6)) {
		rc = qeth_query_ipassists(card, QETH_PROT_IPV6);
		if (rc == -ENOMEM)
			goto out;
	}
5360 5361 5362
	if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
		rc = qeth_query_setadapterparms(card);
		if (rc < 0) {
5363
			QETH_CARD_TEXT_(card, 2, "7err%d", rc);
5364 5365 5366 5367 5368
			goto out;
		}
	}
	if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
		rc = qeth_query_setdiagass(card);
5369
		if (rc)
5370
			QETH_CARD_TEXT_(card, 2, "8err%d", rc);
5371
	}
5372

5373 5374
	qeth_trace_features(card);

5375 5376 5377 5378
	if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) ||
	    (card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM)))
		card->info.hwtrap = 0;

5379
	if (card->options.isolation != ISOLATION_MODE_NONE) {
5380 5381
		rc = qeth_setadpparms_set_access_ctrl(card,
						      card->options.isolation);
5382 5383 5384
		if (rc)
			goto out;
	}
5385

5386 5387
	qeth_init_link_info(card);

5388 5389 5390 5391 5392 5393
	rc = qeth_init_qdio_queues(card);
	if (rc) {
		QETH_CARD_TEXT_(card, 2, "9err%d", rc);
		goto out;
	}

F
Frank Blaschka 已提交
5394 5395
	return 0;
out:
5396 5397
	dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
		"an error on the device\n");
5398 5399
	QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n",
			 CARD_DEVID(card), rc);
F
Frank Blaschka 已提交
5400 5401 5402
	return rc;
}

5403 5404
static int qeth_set_online(struct qeth_card *card,
			   const struct qeth_discipline *disc)
5405
{
5406
	bool carrier_ok;
5407 5408 5409 5410 5411
	int rc;

	mutex_lock(&card->conf_mutex);
	QETH_CARD_TEXT(card, 2, "setonlin");

5412 5413 5414 5415 5416 5417 5418 5419 5420
	rc = qeth_hardsetup_card(card, &carrier_ok);
	if (rc) {
		QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
		rc = -ENODEV;
		goto err_hardsetup;
	}

	qeth_print_status_message(card);

5421
	if (card->dev->reg_state != NETREG_REGISTERED)
5422 5423 5424
		/* no need for locking / error handling at this early stage: */
		qeth_set_real_num_tx_queues(card, qeth_tx_actual_queues(card));

5425
	rc = disc->set_online(card, carrier_ok);
5426 5427 5428 5429 5430
	if (rc)
		goto err_online;

	/* let user_space know that device is online */
	kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
5431 5432

	mutex_unlock(&card->conf_mutex);
5433
	return 0;
5434

5435 5436
err_online:
err_hardsetup:
5437 5438 5439 5440
	qeth_qdio_clear_card(card, 0);
	qeth_clear_working_pool_list(card);
	qeth_flush_local_addrs(card);

5441 5442 5443 5444 5445 5446
	qeth_stop_channel(&card->data);
	qeth_stop_channel(&card->write);
	qeth_stop_channel(&card->read);
	qdio_free(CARD_DDEV(card));

	mutex_unlock(&card->conf_mutex);
5447 5448 5449
	return rc;
}

5450 5451
int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
		     bool resetting)
5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462
{
	int rc, rc2, rc3;

	mutex_lock(&card->conf_mutex);
	QETH_CARD_TEXT(card, 3, "setoffl");

	if ((!resetting && card->info.hwtrap) || card->info.hwtrap == 2) {
		qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
		card->info.hwtrap = 1;
	}

5463 5464 5465
	/* cancel any stalled cmd that might block the rtnl: */
	qeth_clear_ipacmd_list(card);

5466 5467 5468 5469 5470 5471 5472
	rtnl_lock();
	card->info.open_when_online = card->dev->flags & IFF_UP;
	dev_close(card->dev);
	netif_device_detach(card->dev);
	netif_carrier_off(card->dev);
	rtnl_unlock();

5473 5474
	cancel_work_sync(&card->rx_mode_work);

5475
	disc->set_offline(card);
5476

5477 5478 5479 5480 5481 5482
	qeth_qdio_clear_card(card, 0);
	qeth_drain_output_queues(card);
	qeth_clear_working_pool_list(card);
	qeth_flush_local_addrs(card);
	card->info.promisc_mode = 0;

5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501
	rc  = qeth_stop_channel(&card->data);
	rc2 = qeth_stop_channel(&card->write);
	rc3 = qeth_stop_channel(&card->read);
	if (!rc)
		rc = (rc2) ? rc2 : rc3;
	if (rc)
		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
	qdio_free(CARD_DDEV(card));

	/* let user_space know that device is offline */
	kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);

	mutex_unlock(&card->conf_mutex);
	return 0;
}
EXPORT_SYMBOL_GPL(qeth_set_offline);

static int qeth_do_reset(void *data)
{
5502
	const struct qeth_discipline *disc;
5503 5504 5505
	struct qeth_card *card = data;
	int rc;

5506 5507 5508
	/* Lock-free, other users will block until we are done. */
	disc = card->discipline;

5509 5510 5511 5512 5513 5514 5515
	QETH_CARD_TEXT(card, 2, "recover1");
	if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
		return 0;
	QETH_CARD_TEXT(card, 2, "recover2");
	dev_warn(&card->gdev->dev,
		 "A recovery process has been started for the device\n");

5516 5517
	qeth_set_offline(card, disc, true);
	rc = qeth_set_online(card, disc);
5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530
	if (!rc) {
		dev_info(&card->gdev->dev,
			 "Device successfully recovered!\n");
	} else {
		ccwgroup_set_offline(card->gdev);
		dev_warn(&card->gdev->dev,
			 "The qeth device driver failed to recover an error on the device\n");
	}
	qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
	qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
	return 0;
}

J
Julian Wiedmann 已提交
5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590
#if IS_ENABLED(CONFIG_QETH_L3)
static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
				struct qeth_hdr *hdr)
{
	struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data;
	struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
	struct net_device *dev = skb->dev;

	if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) {
		dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr,
				"FAKELL", skb->len);
		return;
	}

	if (!(l3_hdr->flags & QETH_HDR_PASSTHRU)) {
		u16 prot = (l3_hdr->flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
							     ETH_P_IP;
		unsigned char tg_addr[ETH_ALEN];

		skb_reset_network_header(skb);
		switch (l3_hdr->flags & QETH_HDR_CAST_MASK) {
		case QETH_CAST_MULTICAST:
			if (prot == ETH_P_IP)
				ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr);
			else
				ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr);
			QETH_CARD_STAT_INC(card, rx_multicast);
			break;
		case QETH_CAST_BROADCAST:
			ether_addr_copy(tg_addr, dev->broadcast);
			QETH_CARD_STAT_INC(card, rx_multicast);
			break;
		default:
			if (card->options.sniffer)
				skb->pkt_type = PACKET_OTHERHOST;
			ether_addr_copy(tg_addr, dev->dev_addr);
		}

		if (l3_hdr->ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
			dev_hard_header(skb, dev, prot, tg_addr,
					&l3_hdr->next_hop.rx.src_mac, skb->len);
		else
			dev_hard_header(skb, dev, prot, tg_addr, "FAKELL",
					skb->len);
	}

	/* copy VLAN tag from hdr into skb */
	if (!card->options.sniffer &&
	    (l3_hdr->ext_flags & (QETH_HDR_EXT_VLAN_FRAME |
				  QETH_HDR_EXT_INCLUDE_VLAN_TAG))) {
		u16 tag = (l3_hdr->ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
				l3_hdr->vlan_id :
				l3_hdr->next_hop.rx.vlan_id;

		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
	}
}
#endif

static void qeth_receive_skb(struct qeth_card *card, struct sk_buff *skb,
5591
			     struct qeth_hdr *hdr, bool uses_frags)
J
Julian Wiedmann 已提交
5592
{
5593
	struct napi_struct *napi = &card->napi;
J
Julian Wiedmann 已提交
5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607
	bool is_cso;

	switch (hdr->hdr.l2.id) {
#if IS_ENABLED(CONFIG_QETH_L3)
	case QETH_HEADER_TYPE_LAYER3:
		qeth_l3_rebuild_skb(card, skb, hdr);
		is_cso = hdr->hdr.l3.ext_flags & QETH_HDR_EXT_CSUM_TRANSP_REQ;
		break;
#endif
	case QETH_HEADER_TYPE_LAYER2:
		is_cso = hdr->hdr.l2.flags[1] & QETH_HDR_EXT_CSUM_TRANSP_REQ;
		break;
	default:
		/* never happens */
5608 5609 5610 5611
		if (uses_frags)
			napi_free_frags(napi);
		else
			dev_kfree_skb_any(skb);
J
Julian Wiedmann 已提交
5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629
		return;
	}

	if (is_cso && (card->dev->features & NETIF_F_RXCSUM)) {
		skb->ip_summed = CHECKSUM_UNNECESSARY;
		QETH_CARD_STAT_INC(card, rx_skb_csum);
	} else {
		skb->ip_summed = CHECKSUM_NONE;
	}

	QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
	QETH_CARD_STAT_INC(card, rx_packets);
	if (skb_is_nonlinear(skb)) {
		QETH_CARD_STAT_INC(card, rx_sg_skbs);
		QETH_CARD_STAT_ADD(card, rx_sg_frags,
				   skb_shinfo(skb)->nr_frags);
	}

5630 5631 5632 5633 5634 5635
	if (uses_frags) {
		napi_gro_frags(napi);
	} else {
		skb->protocol = eth_type_trans(skb, skb->dev);
		napi_gro_receive(napi, skb);
	}
J
Julian Wiedmann 已提交
5636 5637
}

5638
static void qeth_create_skb_frag(struct sk_buff *skb, char *data, int data_len)
F
Frank Blaschka 已提交
5639
{
5640
	struct page *page = virt_to_page(data);
5641
	unsigned int next_frag;
5642

5643
	next_frag = skb_shinfo(skb)->nr_frags;
5644
	get_page(page);
5645 5646
	skb_add_rx_frag(skb, next_frag, page, offset_in_page(data), data_len,
			data_len);
F
Frank Blaschka 已提交
5647 5648
}

5649 5650 5651 5652 5653
static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
{
	return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
}

J
Julian Wiedmann 已提交
5654
static int qeth_extract_skb(struct qeth_card *card,
5655
			    struct qeth_qdio_buffer *qethbuffer, u8 *element_no,
J
Julian Wiedmann 已提交
5656
			    int *__offset)
F
Frank Blaschka 已提交
5657
{
5658
	struct qeth_priv *priv = netdev_priv(card->dev);
5659
	struct qdio_buffer *buffer = qethbuffer->buffer;
5660
	struct napi_struct *napi = &card->napi;
5661
	struct qdio_buffer_element *element;
5662
	unsigned int linear_len = 0;
5663
	bool uses_frags = false;
F
Frank Blaschka 已提交
5664
	int offset = *__offset;
5665
	bool use_rx_sg = false;
5666
	unsigned int headroom;
J
Julian Wiedmann 已提交
5667
	struct qeth_hdr *hdr;
5668
	struct sk_buff *skb;
5669
	int skb_len = 0;
F
Frank Blaschka 已提交
5670

5671 5672
	element = &buffer->element[*element_no];

5673
next_packet:
F
Frank Blaschka 已提交
5674
	/* qeth_hdr must not cross element boundaries */
5675
	while (element->length < offset + sizeof(struct qeth_hdr)) {
F
Frank Blaschka 已提交
5676
		if (qeth_is_last_sbale(element))
J
Julian Wiedmann 已提交
5677
			return -ENODATA;
F
Frank Blaschka 已提交
5678 5679 5680 5681
		element++;
		offset = 0;
	}

5682
	hdr = phys_to_virt(element->addr) + offset;
J
Julian Wiedmann 已提交
5683
	offset += sizeof(*hdr);
5684 5685
	skb = NULL;

J
Julian Wiedmann 已提交
5686
	switch (hdr->hdr.l2.id) {
5687
	case QETH_HEADER_TYPE_LAYER2:
J
Julian Wiedmann 已提交
5688
		skb_len = hdr->hdr.l2.pkt_length;
5689
		linear_len = ETH_HLEN;
5690
		headroom = 0;
5691 5692
		break;
	case QETH_HEADER_TYPE_LAYER3:
J
Julian Wiedmann 已提交
5693
		skb_len = hdr->hdr.l3.length;
5694 5695 5696 5697 5698
		if (!IS_LAYER3(card)) {
			QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
			goto walk_packet;
		}

J
Julian Wiedmann 已提交
5699
		if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) {
5700 5701 5702 5703 5704
			linear_len = ETH_HLEN;
			headroom = 0;
			break;
		}

J
Julian Wiedmann 已提交
5705
		if (hdr->hdr.l3.flags & QETH_HDR_IPV6)
5706 5707 5708
			linear_len = sizeof(struct ipv6hdr);
		else
			linear_len = sizeof(struct iphdr);
5709
		headroom = ETH_HLEN;
5710 5711
		break;
	default:
J
Julian Wiedmann 已提交
5712
		if (hdr->hdr.l2.id & QETH_HEADER_MASK_INVAL)
5713 5714 5715 5716
			QETH_CARD_STAT_INC(card, rx_frame_errors);
		else
			QETH_CARD_STAT_INC(card, rx_dropped_notsupp);

5717
		/* Can't determine packet length, drop the whole buffer. */
J
Julian Wiedmann 已提交
5718
		return -EPROTONOSUPPORT;
F
Frank Blaschka 已提交
5719 5720
	}

5721 5722 5723 5724
	if (skb_len < linear_len) {
		QETH_CARD_STAT_INC(card, rx_dropped_runt);
		goto walk_packet;
	}
F
Frank Blaschka 已提交
5725

5726
	use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) ||
5727
		    (skb_len > READ_ONCE(priv->rx_copybreak) &&
J
Julian Wiedmann 已提交
5728
		     !atomic_read(&card->force_alloc_skb));
5729

5730
	if (use_rx_sg) {
5731
		/* QETH_CQ_ENABLED only: */
5732 5733
		if (qethbuffer->rx_skb &&
		    skb_tailroom(qethbuffer->rx_skb) >= linear_len + headroom) {
5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754
			skb = qethbuffer->rx_skb;
			qethbuffer->rx_skb = NULL;
			goto use_skb;
		}

		skb = napi_get_frags(napi);
		if (!skb) {
			/* -ENOMEM, no point in falling back further. */
			QETH_CARD_STAT_INC(card, rx_dropped_nomem);
			goto walk_packet;
		}

		if (skb_tailroom(skb) >= linear_len + headroom) {
			uses_frags = true;
			goto use_skb;
		}

		netdev_info_once(card->dev,
				 "Insufficient linear space in NAPI frags skb, need %u but have %u\n",
				 linear_len + headroom, skb_tailroom(skb));
		/* Shouldn't happen. Don't optimize, fall back to linear skb. */
F
Frank Blaschka 已提交
5755
	}
5756

5757 5758 5759
	linear_len = skb_len;
	skb = napi_alloc_skb(napi, linear_len + headroom);
	if (!skb) {
5760
		QETH_CARD_STAT_INC(card, rx_dropped_nomem);
5761 5762
		goto walk_packet;
	}
F
Frank Blaschka 已提交
5763

5764 5765 5766
use_skb:
	if (headroom)
		skb_reserve(skb, headroom);
5767
walk_packet:
F
Frank Blaschka 已提交
5768
	while (skb_len) {
5769
		int data_len = min(skb_len, (int)(element->length - offset));
5770
		char *data = phys_to_virt(element->addr) + offset;
5771 5772 5773

		skb_len -= data_len;
		offset += data_len;
5774

5775
		/* Extract data from current element: */
5776
		if (skb && data_len) {
5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790
			if (linear_len) {
				unsigned int copy_len;

				copy_len = min_t(unsigned int, linear_len,
						 data_len);

				skb_put_data(skb, data, copy_len);
				linear_len -= copy_len;
				data_len -= copy_len;
				data += copy_len;
			}

			if (data_len)
				qeth_create_skb_frag(skb, data, data_len);
F
Frank Blaschka 已提交
5791
		}
5792 5793

		/* Step forward to next element: */
F
Frank Blaschka 已提交
5794 5795
		if (skb_len) {
			if (qeth_is_last_sbale(element)) {
C
Carsten Otte 已提交
5796
				QETH_CARD_TEXT(card, 4, "unexeob");
C
Carsten Otte 已提交
5797
				QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
5798
				if (skb) {
5799 5800 5801 5802
					if (uses_frags)
						napi_free_frags(napi);
					else
						dev_kfree_skb_any(skb);
5803 5804 5805
					QETH_CARD_STAT_INC(card,
							   rx_length_errors);
				}
J
Julian Wiedmann 已提交
5806
				return -EMSGSIZE;
F
Frank Blaschka 已提交
5807 5808 5809 5810 5811
			}
			element++;
			offset = 0;
		}
	}
5812 5813 5814 5815 5816

	/* This packet was skipped, go get another one: */
	if (!skb)
		goto next_packet;

5817
	*element_no = element - &buffer->element[0];
F
Frank Blaschka 已提交
5818
	*__offset = offset;
J
Julian Wiedmann 已提交
5819

5820
	qeth_receive_skb(card, skb, hdr, uses_frags);
J
Julian Wiedmann 已提交
5821 5822 5823
	return 0;
}

5824 5825
static unsigned int qeth_extract_skbs(struct qeth_card *card, int budget,
				      struct qeth_qdio_buffer *buf, bool *done)
J
Julian Wiedmann 已提交
5826
{
5827
	unsigned int work_done = 0;
J
Julian Wiedmann 已提交
5828 5829

	while (budget) {
5830
		if (qeth_extract_skb(card, buf, &card->rx.buf_element,
J
Julian Wiedmann 已提交
5831 5832 5833 5834 5835 5836 5837
				     &card->rx.e_offset)) {
			*done = true;
			break;
		}

		work_done++;
		budget--;
F
Frank Blaschka 已提交
5838
	}
J
Julian Wiedmann 已提交
5839 5840

	return work_done;
F
Frank Blaschka 已提交
5841 5842
}

5843
static unsigned int qeth_rx_poll(struct qeth_card *card, int budget)
5844
{
5845
	struct qeth_rx *ctx = &card->rx;
5846
	unsigned int work_done = 0;
5847

5848
	while (budget > 0) {
5849 5850 5851 5852
		struct qeth_qdio_buffer *buffer;
		unsigned int skbs_done = 0;
		bool done = false;

5853
		/* Fetch completed RX buffers: */
5854 5855
		if (!card->rx.b_count) {
			card->rx.qdio_err = 0;
5856 5857 5858 5859
			card->rx.b_count = qdio_inspect_queue(CARD_DDEV(card),
							      0, true,
							      &card->rx.b_index,
							      &card->rx.qdio_err);
5860 5861 5862 5863 5864 5865
			if (card->rx.b_count <= 0) {
				card->rx.b_count = 0;
				break;
			}
		}

5866
		/* Process one completed RX buffer: */
5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881
		buffer = &card->qdio.in_q->bufs[card->rx.b_index];
		if (!(card->rx.qdio_err &&
		      qeth_check_qdio_errors(card, buffer->buffer,
					     card->rx.qdio_err, "qinerr")))
			skbs_done = qeth_extract_skbs(card, budget, buffer,
						      &done);
		else
			done = true;

		work_done += skbs_done;
		budget -= skbs_done;

		if (done) {
			QETH_CARD_STAT_INC(card, rx_bufs);
			qeth_put_buffer_pool_entry(card, buffer->pool_entry);
5882
			buffer->pool_entry = NULL;
5883
			card->rx.b_count--;
5884 5885 5886
			ctx->bufs_refill++;
			ctx->bufs_refill -= qeth_rx_refill_queue(card,
								 ctx->bufs_refill);
5887 5888 5889 5890 5891

			/* Step forward to next buffer: */
			card->rx.b_index = QDIO_BUFNR(card->rx.b_index + 1);
			card->rx.buf_element = 0;
			card->rx.e_offset = 0;
5892 5893 5894
		}
	}

5895 5896 5897
	return work_done;
}

5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915
static void qeth_cq_poll(struct qeth_card *card)
{
	unsigned int work_done = 0;

	while (work_done < QDIO_MAX_BUFFERS_PER_Q) {
		unsigned int start, error;
		int completed;

		completed = qdio_inspect_queue(CARD_DDEV(card), 1, true, &start,
					       &error);
		if (completed <= 0)
			return;

		qeth_qdio_cq_handler(card, error, 1, start, completed);
		work_done += completed;
	}
}

5916 5917 5918 5919 5920 5921 5922
int qeth_poll(struct napi_struct *napi, int budget)
{
	struct qeth_card *card = container_of(napi, struct qeth_card, napi);
	unsigned int work_done;

	work_done = qeth_rx_poll(card, budget);

5923 5924 5925 5926 5927 5928 5929 5930 5931 5932
	if (qeth_use_tx_irqs(card)) {
		struct qeth_qdio_out_q *queue;
		unsigned int i;

		qeth_for_each_output_queue(card, queue, i) {
			if (!qeth_out_queue_is_empty(queue))
				napi_schedule(&queue->napi);
		}
	}

5933 5934 5935
	if (card->options.cq == QETH_CQ_ENABLED)
		qeth_cq_poll(card);

5936 5937 5938 5939 5940 5941 5942 5943 5944 5945
	if (budget) {
		struct qeth_rx *ctx = &card->rx;

		/* Process any substantial refill backlog: */
		ctx->bufs_refill -= qeth_rx_refill_queue(card, ctx->bufs_refill);

		/* Exhausted the RX budget. Keep IRQ disabled, we get called again. */
		if (work_done >= budget)
			return work_done;
	}
5946

5947
	if (napi_complete_done(napi, work_done) &&
5948
	    qdio_start_irq(CARD_DDEV(card)))
5949
		napi_schedule(napi);
5950

5951 5952 5953 5954
	return work_done;
}
EXPORT_SYMBOL_GPL(qeth_poll);

5955
static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
5956 5957
				 unsigned int bidx, unsigned int qdio_error,
				 int budget)
5958 5959 5960 5961
{
	struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx];
	u8 sflags = buffer->buffer->element[15].sflags;
	struct qeth_card *card = queue->card;
5962
	bool error = !!qdio_error;
5963

5964
	if (qdio_error == QDIO_ERROR_SLSB_PENDING) {
5965
		struct qaob *aob = buffer->aob;
5966
		struct qeth_qaob_priv1 *priv;
5967
		enum iucv_tx_notify notify;
5968 5969 5970 5971 5972 5973 5974 5975

		if (!aob) {
			netdev_WARN_ONCE(card->dev,
					 "Pending TX buffer %#x without QAOB on TX queue %u\n",
					 bidx, queue->queue_no);
			qeth_schedule_recovery(card);
			return;
		}
5976

5977 5978
		QETH_CARD_TEXT_(card, 5, "pel%u", bidx);

5979 5980 5981
		priv = (struct qeth_qaob_priv1 *)&aob->user1;
		/* QAOB hasn't completed yet: */
		if (xchg(&priv->state, QETH_QAOB_PENDING) != QETH_QAOB_DONE) {
5982 5983
			qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING);

5984 5985 5986 5987 5988
			/* Prepare the queue slot for immediate re-use: */
			qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements);
			if (qeth_alloc_out_buf(queue, bidx, GFP_ATOMIC)) {
				QETH_CARD_TEXT(card, 2, "outofbuf");
				qeth_schedule_recovery(card);
5989
			}
5990

5991 5992 5993
			list_add(&buffer->list_entry, &queue->pending_bufs);
			/* Skip clearing the buffer: */
			return;
5994
		}
5995

5996 5997 5998 5999
		/* QAOB already completed: */
		notify = qeth_compute_cq_notification(aob->aorc, 0);
		qeth_notify_skbs(queue, buffer, notify);
		error = !!aob->aorc;
6000
		memset(aob, 0, sizeof(*aob));
6001
	} else if (card->options.cq == QETH_CQ_ENABLED) {
6002 6003
		qeth_notify_skbs(queue, buffer,
				 qeth_compute_cq_notification(sflags, 0));
6004 6005
	}

6006
	qeth_clear_output_buffer(queue, buffer, error, budget);
6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017
}

static int qeth_tx_poll(struct napi_struct *napi, int budget)
{
	struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi);
	unsigned int queue_no = queue->queue_no;
	struct qeth_card *card = queue->card;
	struct net_device *dev = card->dev;
	unsigned int work_done = 0;
	struct netdev_queue *txq;

6018 6019 6020 6021
	if (IS_IQD(card))
		txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no));
	else
		txq = netdev_get_tx_queue(dev, queue_no);
6022 6023 6024

	while (1) {
		unsigned int start, error, i;
6025 6026
		unsigned int packets = 0;
		unsigned int bytes = 0;
6027 6028
		int completed;

6029
		qeth_tx_complete_pending_bufs(card, queue, false, budget);
6030

6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047
		if (qeth_out_queue_is_empty(queue)) {
			napi_complete(napi);
			return 0;
		}

		/* Give the CPU a breather: */
		if (work_done >= QDIO_MAX_BUFFERS_PER_Q) {
			QETH_TXQ_STAT_INC(queue, completion_yield);
			if (napi_complete_done(napi, 0))
				napi_schedule(napi);
			return 0;
		}

		completed = qdio_inspect_queue(CARD_DDEV(card), queue_no, false,
					       &start, &error);
		if (completed <= 0) {
			/* Ensure we see TX completion for pending work: */
6048 6049 6050
			if (napi_complete_done(napi, 0) &&
			    !atomic_read(&queue->set_pci_flags_count))
				qeth_tx_arm_timer(queue, queue->rescan_usecs);
6051 6052 6053 6054
			return 0;
		}

		for (i = start; i < start + completed; i++) {
6055
			struct qeth_qdio_out_buffer *buffer;
6056 6057
			unsigned int bidx = QDIO_BUFNR(i);

6058
			buffer = queue->bufs[bidx];
6059
			packets += buffer->frames;
6060 6061 6062
			bytes += buffer->bytes;

			qeth_handle_send_error(card, buffer, error);
6063 6064 6065 6066 6067
			if (IS_IQD(card))
				qeth_iqd_tx_complete(queue, bidx, error, budget);
			else
				qeth_clear_output_buffer(queue, buffer, error,
							 budget);
6068 6069 6070 6071
		}

		atomic_sub(completed, &queue->used_buffers);
		work_done += completed;
6072 6073 6074 6075
		if (IS_IQD(card))
			netdev_tx_completed_queue(txq, packets, bytes);
		else
			qeth_check_outbound_queue(queue);
6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087

		/* xmit may have observed the full-condition, but not yet
		 * stopped the txq. In which case the code below won't trigger.
		 * So before returning, xmit will re-check the txq's fill level
		 * and wake it up if needed.
		 */
		if (netif_tx_queue_stopped(txq) &&
		    !qeth_out_queue_is_full(queue))
			netif_tx_wake_queue(txq);
	}
}

6088 6089 6090 6091 6092 6093 6094
static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
{
	if (!cmd->hdr.return_code)
		cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
	return cmd->hdr.return_code;
}

6095 6096 6097 6098 6099 6100 6101 6102
static int qeth_setassparms_get_caps_cb(struct qeth_card *card,
					struct qeth_reply *reply,
					unsigned long data)
{
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
	struct qeth_ipa_caps *caps = reply->param;

	if (qeth_setassparms_inspect_rc(cmd))
6103
		return -EIO;
6104 6105 6106 6107 6108 6109

	caps->supported = cmd->data.setassparms.data.caps.supported;
	caps->enabled = cmd->data.setassparms.data.caps.enabled;
	return 0;
}

6110 6111
int qeth_setassparms_cb(struct qeth_card *card,
			struct qeth_reply *reply, unsigned long data)
6112
{
6113
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6114 6115 6116

	QETH_CARD_TEXT(card, 4, "defadpcb");

6117 6118 6119 6120 6121
	if (cmd->hdr.return_code)
		return -EIO;

	cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
	if (cmd->hdr.prot_version == QETH_PROT_IPV4)
6122
		card->options.ipa4.enabled = cmd->hdr.assists.enabled;
6123
	if (cmd->hdr.prot_version == QETH_PROT_IPV6)
6124
		card->options.ipa6.enabled = cmd->hdr.assists.enabled;
6125 6126
	return 0;
}
6127
EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
6128

6129 6130
struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
						 enum qeth_ipa_funcs ipa_func,
6131 6132
						 u16 cmd_code,
						 unsigned int data_length,
6133
						 enum qeth_prot_versions prot)
6134
{
6135 6136
	struct qeth_ipacmd_setassparms *setassparms;
	struct qeth_ipacmd_setassparms_hdr *hdr;
6137 6138 6139
	struct qeth_cmd_buffer *iob;

	QETH_CARD_TEXT(card, 4, "getasscm");
6140 6141 6142 6143 6144 6145
	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETASSPARMS, prot,
				 data_length +
				 offsetof(struct qeth_ipacmd_setassparms,
					  data));
	if (!iob)
		return NULL;
6146

6147 6148
	setassparms = &__ipa_cmd(iob)->data.setassparms;
	setassparms->assist_no = ipa_func;
6149

6150 6151 6152
	hdr = &setassparms->hdr;
	hdr->length = sizeof(*hdr) + data_length;
	hdr->command_code = cmd_code;
6153 6154
	return iob;
}
6155
EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
6156

6157 6158
int qeth_send_simple_setassparms_prot(struct qeth_card *card,
				      enum qeth_ipa_funcs ipa_func,
6159
				      u16 cmd_code, u32 *data,
6160
				      enum qeth_prot_versions prot)
6161
{
6162
	unsigned int length = data ? SETASS_DATA_SIZEOF(flags_32bit) : 0;
6163 6164
	struct qeth_cmd_buffer *iob;

6165 6166
	QETH_CARD_TEXT_(card, 4, "simassp%i", prot);
	iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot);
6167 6168
	if (!iob)
		return -ENOMEM;
6169

6170 6171
	if (data)
		__ipa_cmd(iob)->data.setassparms.data.flags_32bit = *data;
6172
	return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
6173
}
6174
EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot);
6175

F
Frank Blaschka 已提交
6176 6177
static void qeth_unregister_dbf_views(void)
{
6178
	int x;
6179

6180 6181 6182 6183
	for (x = 0; x < QETH_DBF_INFOS; x++) {
		debug_unregister(qeth_dbf[x].id);
		qeth_dbf[x].id = NULL;
	}
F
Frank Blaschka 已提交
6184 6185
}

C
Carsten Otte 已提交
6186
void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
P
Peter Tiedemann 已提交
6187 6188
{
	char dbf_txt_buf[32];
6189
	va_list args;
P
Peter Tiedemann 已提交
6190

6191
	if (!debug_level_enabled(id, level))
P
Peter Tiedemann 已提交
6192
		return;
6193 6194 6195
	va_start(args, fmt);
	vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
	va_end(args);
C
Carsten Otte 已提交
6196
	debug_text_event(id, level, dbf_txt_buf);
P
Peter Tiedemann 已提交
6197 6198 6199
}
EXPORT_SYMBOL_GPL(qeth_dbf_longtext);

F
Frank Blaschka 已提交
6200 6201
static int qeth_register_dbf_views(void)
{
6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214
	int ret;
	int x;

	for (x = 0; x < QETH_DBF_INFOS; x++) {
		/* register the areas */
		qeth_dbf[x].id = debug_register(qeth_dbf[x].name,
						qeth_dbf[x].pages,
						qeth_dbf[x].areas,
						qeth_dbf[x].len);
		if (qeth_dbf[x].id == NULL) {
			qeth_unregister_dbf_views();
			return -ENOMEM;
		}
F
Frank Blaschka 已提交
6215

6216 6217 6218 6219 6220 6221
		/* register a view */
		ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view);
		if (ret) {
			qeth_unregister_dbf_views();
			return ret;
		}
F
Frank Blaschka 已提交
6222

6223 6224 6225
		/* set a passing level */
		debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level);
	}
F
Frank Blaschka 已提交
6226 6227 6228 6229

	return 0;
}

6230 6231
static DEFINE_MUTEX(qeth_mod_mutex);	/* for synchronized module loading */

6232 6233
int qeth_setup_discipline(struct qeth_card *card,
			  enum qeth_discipline_id discipline)
F
Frank Blaschka 已提交
6234
{
6235 6236
	int rc;

6237
	mutex_lock(&qeth_mod_mutex);
F
Frank Blaschka 已提交
6238 6239
	switch (discipline) {
	case QETH_DISCIPLINE_LAYER3:
6240 6241
		card->discipline = try_then_request_module(
			symbol_get(qeth_l3_discipline), "qeth_l3");
F
Frank Blaschka 已提交
6242 6243
		break;
	case QETH_DISCIPLINE_LAYER2:
6244 6245
		card->discipline = try_then_request_module(
			symbol_get(qeth_l2_discipline), "qeth_l2");
F
Frank Blaschka 已提交
6246
		break;
6247 6248
	default:
		break;
F
Frank Blaschka 已提交
6249
	}
6250
	mutex_unlock(&qeth_mod_mutex);
6251

6252
	if (!card->discipline) {
6253 6254
		dev_err(&card->gdev->dev, "There is no kernel module to "
			"support discipline %d\n", discipline);
6255
		return -EINVAL;
F
Frank Blaschka 已提交
6256
	}
6257

6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268
	rc = card->discipline->setup(card->gdev);
	if (rc) {
		if (discipline == QETH_DISCIPLINE_LAYER2)
			symbol_put(qeth_l2_discipline);
		else
			symbol_put(qeth_l3_discipline);
		card->discipline = NULL;

		return rc;
	}

6269
	card->options.layer = discipline;
6270
	return 0;
F
Frank Blaschka 已提交
6271 6272
}

6273
void qeth_remove_discipline(struct qeth_card *card)
F
Frank Blaschka 已提交
6274
{
6275 6276
	card->discipline->remove(card->gdev);

6277
	if (IS_LAYER2(card))
6278
		symbol_put(qeth_l2_discipline);
F
Frank Blaschka 已提交
6279
	else
6280
		symbol_put(qeth_l3_discipline);
6281
	card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
6282
	card->discipline = NULL;
F
Frank Blaschka 已提交
6283 6284
}

6285
static const struct device_type qeth_generic_devtype = {
6286 6287 6288
	.name = "qeth_generic",
};

6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356
#define DBF_NAME_LEN	20

struct qeth_dbf_entry {
	char dbf_name[DBF_NAME_LEN];
	debug_info_t *dbf_info;
	struct list_head dbf_list;
};

static LIST_HEAD(qeth_dbf_list);
static DEFINE_MUTEX(qeth_dbf_list_mutex);

static debug_info_t *qeth_get_dbf_entry(char *name)
{
	struct qeth_dbf_entry *entry;
	debug_info_t *rc = NULL;

	mutex_lock(&qeth_dbf_list_mutex);
	list_for_each_entry(entry, &qeth_dbf_list, dbf_list) {
		if (strcmp(entry->dbf_name, name) == 0) {
			rc = entry->dbf_info;
			break;
		}
	}
	mutex_unlock(&qeth_dbf_list_mutex);
	return rc;
}

static int qeth_add_dbf_entry(struct qeth_card *card, char *name)
{
	struct qeth_dbf_entry *new_entry;

	card->debug = debug_register(name, 2, 1, 8);
	if (!card->debug) {
		QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf");
		goto err;
	}
	if (debug_register_view(card->debug, &debug_hex_ascii_view))
		goto err_dbg;
	new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL);
	if (!new_entry)
		goto err_dbg;
	strncpy(new_entry->dbf_name, name, DBF_NAME_LEN);
	new_entry->dbf_info = card->debug;
	mutex_lock(&qeth_dbf_list_mutex);
	list_add(&new_entry->dbf_list, &qeth_dbf_list);
	mutex_unlock(&qeth_dbf_list_mutex);

	return 0;

err_dbg:
	debug_unregister(card->debug);
err:
	return -ENOMEM;
}

static void qeth_clear_dbf_list(void)
{
	struct qeth_dbf_entry *entry, *tmp;

	mutex_lock(&qeth_dbf_list_mutex);
	list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) {
		list_del(&entry->dbf_list);
		debug_unregister(entry->dbf_info);
		kfree(entry);
	}
	mutex_unlock(&qeth_dbf_list_mutex);
}

6357 6358 6359
static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
{
	struct net_device *dev;
6360
	struct qeth_priv *priv;
6361 6362 6363

	switch (card->info.type) {
	case QETH_CARD_TYPE_IQD:
6364
		dev = alloc_netdev_mqs(sizeof(*priv), "hsi%d", NET_NAME_UNKNOWN,
6365
				       ether_setup, QETH_MAX_OUT_QUEUES, 1);
6366
		break;
6367
	case QETH_CARD_TYPE_OSM:
6368
		dev = alloc_etherdev(sizeof(*priv));
6369
		break;
6370
	default:
6371
		dev = alloc_etherdev_mqs(sizeof(*priv), QETH_MAX_OUT_QUEUES, 1);
6372 6373 6374 6375 6376
	}

	if (!dev)
		return NULL;

6377 6378
	priv = netdev_priv(dev);
	priv->rx_copybreak = QETH_RX_COPYBREAK;
6379
	priv->tx_wanted_queues = IS_IQD(card) ? QETH_IQD_MIN_TXQ : 1;
6380

6381 6382
	dev->ml_priv = card;
	dev->watchdog_timeo = QETH_TX_TIMEOUT;
J
Julian Wiedmann 已提交
6383
	dev->min_mtu = 576;
6384 6385 6386
	 /* initialized when device first goes online: */
	dev->max_mtu = 0;
	dev->mtu = 0;
6387 6388
	SET_NETDEV_DEV(dev, &card->gdev->dev);
	netif_carrier_off(dev);
6389

J
Julian Wiedmann 已提交
6390 6391 6392 6393 6394 6395
	dev->ethtool_ops = &qeth_ethtool_ops;
	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
	dev->hw_features |= NETIF_F_SG;
	dev->vlan_features |= NETIF_F_SG;
	if (IS_IQD(card))
		dev->features |= NETIF_F_SG;
6396

6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410
	return dev;
}

struct net_device *qeth_clone_netdev(struct net_device *orig)
{
	struct net_device *clone = qeth_alloc_netdev(orig->ml_priv);

	if (!clone)
		return NULL;

	clone->dev_port = orig->dev_port;
	return clone;
}

F
Frank Blaschka 已提交
6411 6412 6413 6414 6415
static int qeth_core_probe_device(struct ccwgroup_device *gdev)
{
	struct qeth_card *card;
	struct device *dev;
	int rc;
6416
	enum qeth_discipline_id enforced_disc;
6417
	char dbf_name[DBF_NAME_LEN];
F
Frank Blaschka 已提交
6418

6419
	QETH_DBF_TEXT(SETUP, 2, "probedev");
F
Frank Blaschka 已提交
6420 6421 6422 6423 6424

	dev = &gdev->dev;
	if (!get_device(dev))
		return -ENODEV;

6425
	QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev));
F
Frank Blaschka 已提交
6426

6427
	card = qeth_alloc_card(gdev);
F
Frank Blaschka 已提交
6428
	if (!card) {
6429
		QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM);
F
Frank Blaschka 已提交
6430 6431 6432
		rc = -ENOMEM;
		goto err_dev;
	}
6433 6434 6435

	snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s",
		dev_name(&gdev->dev));
6436
	card->debug = qeth_get_dbf_entry(dbf_name);
6437
	if (!card->debug) {
6438 6439 6440
		rc = qeth_add_dbf_entry(card, dbf_name);
		if (rc)
			goto err_card;
6441 6442
	}

6443
	qeth_setup_card(card);
6444
	card->dev = qeth_alloc_netdev(card);
6445 6446
	if (!card->dev) {
		rc = -ENOMEM;
6447
		goto err_card;
6448
	}
6449

6450 6451 6452
	qeth_determine_capabilities(card);
	qeth_set_blkt_defaults(card);

6453 6454 6455 6456
	card->qdio.no_out_queues = card->dev->num_tx_queues;
	rc = qeth_update_from_chp_desc(card);
	if (rc)
		goto err_chp_desc;
6457

J
Julian Wiedmann 已提交
6458
	gdev->dev.groups = qeth_dev_groups;
6459

6460 6461 6462 6463 6464 6465 6466
	enforced_disc = qeth_enforce_discipline(card);
	switch (enforced_disc) {
	case QETH_DISCIPLINE_UNDETERMINED:
		gdev->dev.type = &qeth_generic_devtype;
		break;
	default:
		card->info.layer_enforced = true;
6467
		/* It's so early that we don't need the discipline_mutex yet. */
6468
		rc = qeth_setup_discipline(card, enforced_disc);
6469
		if (rc)
6470
			goto err_setup_disc;
6471 6472

		break;
F
Frank Blaschka 已提交
6473 6474 6475 6476
	}

	return 0;

6477
err_setup_disc:
6478
err_chp_desc:
6479
	free_netdev(card->dev);
F
Frank Blaschka 已提交
6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490
err_card:
	qeth_core_free_card(card);
err_dev:
	put_device(dev);
	return rc;
}

static void qeth_core_remove_device(struct ccwgroup_device *gdev)
{
	struct qeth_card *card = dev_get_drvdata(&gdev->dev);

6491
	QETH_CARD_TEXT(card, 2, "removedv");
F
Frank Blaschka 已提交
6492

6493
	mutex_lock(&card->discipline_mutex);
6494 6495
	if (card->discipline)
		qeth_remove_discipline(card);
6496
	mutex_unlock(&card->discipline_mutex);
6497

6498 6499
	qeth_free_qdio_queues(card);

6500
	free_netdev(card->dev);
F
Frank Blaschka 已提交
6501 6502 6503 6504 6505 6506 6507 6508
	qeth_core_free_card(card);
	put_device(&gdev->dev);
}

static int qeth_core_set_online(struct ccwgroup_device *gdev)
{
	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
	int rc = 0;
6509
	enum qeth_discipline_id def_discipline;
F
Frank Blaschka 已提交
6510

6511
	mutex_lock(&card->discipline_mutex);
6512
	if (!card->discipline) {
6513 6514
		def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
						QETH_DISCIPLINE_LAYER2;
6515
		rc = qeth_setup_discipline(card, def_discipline);
F
Frank Blaschka 已提交
6516 6517 6518
		if (rc)
			goto err;
	}
6519

6520 6521
	rc = qeth_set_online(card, card->discipline);

F
Frank Blaschka 已提交
6522
err:
6523
	mutex_unlock(&card->discipline_mutex);
F
Frank Blaschka 已提交
6524 6525 6526 6527 6528 6529
	return rc;
}

static int qeth_core_set_offline(struct ccwgroup_device *gdev)
{
	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6530
	int rc;
6531

6532 6533 6534 6535 6536
	mutex_lock(&card->discipline_mutex);
	rc = qeth_set_offline(card, card->discipline, false);
	mutex_unlock(&card->discipline_mutex);

	return rc;
F
Frank Blaschka 已提交
6537 6538 6539 6540 6541
}

static void qeth_core_shutdown(struct ccwgroup_device *gdev)
{
	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6542

6543 6544 6545 6546
	qeth_set_allowed_threads(card, 0, 1);
	if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
		qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
	qeth_qdio_clear_card(card, 0);
6547
	qeth_drain_output_queues(card);
6548
	qdio_free(CARD_DDEV(card));
F
Frank Blaschka 已提交
6549 6550
}

6551 6552
static ssize_t group_store(struct device_driver *ddrv, const char *buf,
			   size_t count)
F
Frank Blaschka 已提交
6553 6554 6555
{
	int err;

6556 6557
	err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3,
				  buf);
6558 6559 6560

	return err ? err : count;
}
6561
static DRIVER_ATTR_WO(group);
F
Frank Blaschka 已提交
6562

6563 6564 6565 6566 6567 6568 6569 6570 6571 6572 6573 6574
static struct attribute *qeth_drv_attrs[] = {
	&driver_attr_group.attr,
	NULL,
};
static struct attribute_group qeth_drv_attr_group = {
	.attrs = qeth_drv_attrs,
};
static const struct attribute_group *qeth_drv_attr_groups[] = {
	&qeth_drv_attr_group,
	NULL,
};

6575 6576 6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588
static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
	.driver = {
		.groups = qeth_drv_attr_groups,
		.owner = THIS_MODULE,
		.name = "qeth",
	},
	.ccw_driver = &qeth_ccw_driver,
	.setup = qeth_core_probe_device,
	.remove = qeth_core_remove_device,
	.set_online = qeth_core_set_online,
	.set_offline = qeth_core_set_offline,
	.shutdown = qeth_core_shutdown,
};

A
Arnd Bergmann 已提交
6589
int qeth_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd)
6590 6591 6592 6593 6594 6595
{
	struct qeth_card *card = dev->ml_priv;
	int rc = 0;

	switch (cmd) {
	case SIOC_QETH_ADP_SET_SNMP_CONTROL:
A
Arnd Bergmann 已提交
6596
		rc = qeth_snmp_command(card, data);
6597 6598
		break;
	case SIOC_QETH_GET_CARD_TYPE:
6599 6600
		if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) &&
		    !IS_VM_NIC(card))
6601
			return 1;
6602
		return 0;
A
Arnd Bergmann 已提交
6603 6604 6605 6606 6607 6608 6609 6610 6611 6612 6613 6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624
	case SIOC_QETH_QUERY_OAT:
		rc = qeth_query_oat_command(card, data);
		break;
	default:
		if (card->discipline->do_ioctl)
			rc = card->discipline->do_ioctl(dev, rq, data, cmd);
		else
			rc = -EOPNOTSUPP;
	}
	if (rc)
		QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
	return rc;
}
EXPORT_SYMBOL_GPL(qeth_siocdevprivate);

int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
	struct qeth_card *card = dev->ml_priv;
	struct mii_ioctl_data *mii_data;
	int rc = 0;

	switch (cmd) {
6625 6626 6627 6628 6629 6630 6631 6632 6633 6634 6635 6636 6637
	case SIOCGMIIPHY:
		mii_data = if_mii(rq);
		mii_data->phy_id = 0;
		break;
	case SIOCGMIIREG:
		mii_data = if_mii(rq);
		if (mii_data->phy_id != 0)
			rc = -EINVAL;
		else
			mii_data->val_out = qeth_mdio_read(dev,
				mii_data->phy_id, mii_data->reg_num);
		break;
	default:
A
Arnd Bergmann 已提交
6638
		return -EOPNOTSUPP;
6639 6640 6641 6642 6643 6644 6645
	}
	if (rc)
		QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
	return rc;
}
EXPORT_SYMBOL_GPL(qeth_do_ioctl);

6646 6647
static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply,
			      unsigned long data)
6648 6649
{
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6650
	u32 *features = reply->param;
6651

6652
	if (qeth_setassparms_inspect_rc(cmd))
6653
		return -EIO;
6654

6655
	*features = cmd->data.setassparms.data.flags_32bit;
6656 6657 6658
	return 0;
}

6659 6660
static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype,
			     enum qeth_prot_versions prot)
6661
{
6662 6663
	return qeth_send_simple_setassparms_prot(card, cstype, IPA_CMD_ASS_STOP,
						 NULL, prot);
6664 6665
}

6666
static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype,
6667
			    enum qeth_prot_versions prot, u8 *lp2lp)
6668
{
6669
	u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP;
6670 6671 6672
	struct qeth_cmd_buffer *iob;
	struct qeth_ipa_caps caps;
	u32 features;
6673 6674
	int rc;

6675 6676 6677
	/* some L3 HW requires combined L3+L4 csum offload: */
	if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 &&
	    cstype == IPA_OUTBOUND_CHECKSUM)
6678
		required_features |= QETH_IPA_CHECKSUM_IP_HDR;
6679

6680 6681 6682 6683 6684 6685 6686
	iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0,
				       prot);
	if (!iob)
		return -ENOMEM;

	rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features);
	if (rc)
6687
		return rc;
6688

6689 6690 6691 6692
	if ((required_features & features) != required_features) {
		qeth_set_csum_off(card, cstype, prot);
		return -EOPNOTSUPP;
	}
6693

6694 6695
	iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE,
				       SETASS_DATA_SIZEOF(flags_32bit),
6696
				       prot);
6697 6698 6699
	if (!iob) {
		qeth_set_csum_off(card, cstype, prot);
		return -ENOMEM;
6700
	}
6701 6702 6703 6704 6705

	if (features & QETH_IPA_CHECKSUM_LP2LP)
		required_features |= QETH_IPA_CHECKSUM_LP2LP;
	__ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features;
	rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6706
	if (rc) {
6707
		qeth_set_csum_off(card, cstype, prot);
6708 6709
		return rc;
	}
6710

6711 6712 6713 6714 6715 6716
	if (!qeth_ipa_caps_supported(&caps, required_features) ||
	    !qeth_ipa_caps_enabled(&caps, required_features)) {
		qeth_set_csum_off(card, cstype, prot);
		return -EOPNOTSUPP;
	}

6717 6718
	dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n",
		 cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot);
6719 6720 6721 6722

	if (lp2lp)
		*lp2lp = qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP);

6723 6724 6725
	return 0;
}

6726
static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
6727
			     enum qeth_prot_versions prot, u8 *lp2lp)
6728
{
6729
	return on ? qeth_set_csum_on(card, cstype, prot, lp2lp) :
6730
		    qeth_set_csum_off(card, cstype, prot);
6731 6732
}

6733 6734 6735 6736 6737 6738 6739
static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply,
			     unsigned long data)
{
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
	struct qeth_tso_start_data *tso_data = reply->param;

	if (qeth_setassparms_inspect_rc(cmd))
6740
		return -EIO;
6741 6742 6743 6744 6745 6746

	tso_data->mss = cmd->data.setassparms.data.tso.mss;
	tso_data->supported = cmd->data.setassparms.data.tso.supported;
	return 0;
}

6747 6748
static int qeth_set_tso_off(struct qeth_card *card,
			    enum qeth_prot_versions prot)
6749
{
6750
	return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO,
6751
						 IPA_CMD_ASS_STOP, NULL, prot);
6752
}
6753

6754 6755 6756
static int qeth_set_tso_on(struct qeth_card *card,
			   enum qeth_prot_versions prot)
{
6757 6758 6759 6760 6761 6762 6763 6764 6765 6766
	struct qeth_tso_start_data tso_data;
	struct qeth_cmd_buffer *iob;
	struct qeth_ipa_caps caps;
	int rc;

	iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
				       IPA_CMD_ASS_START, 0, prot);
	if (!iob)
		return -ENOMEM;

6767
	rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data);
6768 6769 6770 6771 6772 6773 6774 6775 6776
	if (rc)
		return rc;

	if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) {
		qeth_set_tso_off(card, prot);
		return -EOPNOTSUPP;
	}

	iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6777 6778
				       IPA_CMD_ASS_ENABLE,
				       SETASS_DATA_SIZEOF(caps), prot);
6779 6780 6781 6782 6783 6784
	if (!iob) {
		qeth_set_tso_off(card, prot);
		return -ENOMEM;
	}

	/* enable TSO capability */
6785 6786 6787
	__ipa_cmd(iob)->data.setassparms.data.caps.enabled =
		QETH_IPA_LARGE_SEND_TCP;
	rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6788 6789 6790 6791 6792 6793 6794 6795 6796 6797 6798 6799 6800 6801
	if (rc) {
		qeth_set_tso_off(card, prot);
		return rc;
	}

	if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) ||
	    !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) {
		qeth_set_tso_off(card, prot);
		return -EOPNOTSUPP;
	}

	dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot,
		 tso_data.mss);
	return 0;
6802
}
6803

6804 6805 6806
static int qeth_set_ipa_tso(struct qeth_card *card, bool on,
			    enum qeth_prot_versions prot)
{
6807
	return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot);
6808
}
6809

6810 6811 6812 6813 6814 6815 6816
static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
{
	int rc_ipv4 = (on) ? -EOPNOTSUPP : 0;
	int rc_ipv6;

	if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
		rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6817
					    QETH_PROT_IPV4, NULL);
6818 6819 6820
	if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
		/* no/one Offload Assist available, so the rc is trivial */
		return rc_ipv4;
6821

6822
	rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6823
				    QETH_PROT_IPV6, NULL);
6824 6825 6826 6827 6828 6829 6830 6831 6832

	if (on)
		/* enable: success if any Assist is active */
		return (rc_ipv6) ? rc_ipv4 : 0;

	/* disable: failure if any Assist is still active */
	return (rc_ipv6) ? rc_ipv6 : rc_ipv4;
}

6833
/**
6834 6835
 * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
 * @dev:	a net_device
6836
 */
6837
void qeth_enable_hw_features(struct net_device *dev)
6838 6839
{
	struct qeth_card *card = dev->ml_priv;
6840
	netdev_features_t features;
6841

6842
	features = dev->features;
6843
	/* force-off any feature that might need an IPA sequence.
6844 6845
	 * netdev_update_features() will restart them.
	 */
6846 6847 6848 6849 6850 6851
	dev->features &= ~dev->hw_features;
	/* toggle VLAN filter, so that VIDs are re-programmed: */
	if (IS_LAYER2(card) && IS_VM_NIC(card)) {
		dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
		dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
	}
6852
	netdev_update_features(dev);
6853 6854 6855
	if (features != dev->features)
		dev_warn(&card->gdev->dev,
			 "Device recovery failed to restore all offload features\n");
6856
}
6857
EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
6858

6859 6860 6861 6862 6863 6864 6865 6866 6867 6868 6869 6870 6871 6872 6873 6874 6875 6876
static void qeth_check_restricted_features(struct qeth_card *card,
					   netdev_features_t changed,
					   netdev_features_t actual)
{
	netdev_features_t ipv6_features = NETIF_F_TSO6;
	netdev_features_t ipv4_features = NETIF_F_TSO;

	if (!card->info.has_lp2lp_cso_v6)
		ipv6_features |= NETIF_F_IPV6_CSUM;
	if (!card->info.has_lp2lp_cso_v4)
		ipv4_features |= NETIF_F_IP_CSUM;

	if ((changed & ipv6_features) && !(actual & ipv6_features))
		qeth_flush_local_addrs6(card);
	if ((changed & ipv4_features) && !(actual & ipv4_features))
		qeth_flush_local_addrs4(card);
}

6877 6878 6879
int qeth_set_features(struct net_device *dev, netdev_features_t features)
{
	struct qeth_card *card = dev->ml_priv;
6880
	netdev_features_t changed = dev->features ^ features;
6881 6882
	int rc = 0;

6883 6884
	QETH_CARD_TEXT(card, 2, "setfeat");
	QETH_CARD_HEX(card, 2, &features, sizeof(features));
6885

6886
	if ((changed & NETIF_F_IP_CSUM)) {
6887
		rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM,
6888 6889
				       IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4,
				       &card->info.has_lp2lp_cso_v4);
6890 6891 6892
		if (rc)
			changed ^= NETIF_F_IP_CSUM;
	}
6893 6894
	if (changed & NETIF_F_IPV6_CSUM) {
		rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM,
6895 6896
				       IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6,
				       &card->info.has_lp2lp_cso_v6);
6897 6898 6899
		if (rc)
			changed ^= NETIF_F_IPV6_CSUM;
	}
6900 6901
	if (changed & NETIF_F_RXCSUM) {
		rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM);
6902 6903 6904
		if (rc)
			changed ^= NETIF_F_RXCSUM;
	}
6905 6906 6907
	if (changed & NETIF_F_TSO) {
		rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO,
				      QETH_PROT_IPV4);
6908 6909 6910
		if (rc)
			changed ^= NETIF_F_TSO;
	}
6911 6912 6913 6914 6915 6916
	if (changed & NETIF_F_TSO6) {
		rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6,
				      QETH_PROT_IPV6);
		if (rc)
			changed ^= NETIF_F_TSO6;
	}
6917

6918 6919 6920
	qeth_check_restricted_features(card, dev->features ^ features,
				       dev->features ^ changed);

6921 6922 6923 6924 6925 6926
	/* everything changed successfully? */
	if ((dev->features ^ features) == changed)
		return 0;
	/* something went wrong. save changed features and return error */
	dev->features ^= changed;
	return -EIO;
6927 6928 6929 6930 6931 6932 6933 6934
}
EXPORT_SYMBOL_GPL(qeth_set_features);

netdev_features_t qeth_fix_features(struct net_device *dev,
				    netdev_features_t features)
{
	struct qeth_card *card = dev->ml_priv;

6935
	QETH_CARD_TEXT(card, 2, "fixfeat");
6936 6937
	if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
		features &= ~NETIF_F_IP_CSUM;
6938 6939
	if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6))
		features &= ~NETIF_F_IPV6_CSUM;
6940 6941
	if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) &&
	    !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6942
		features &= ~NETIF_F_RXCSUM;
6943
	if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
6944
		features &= ~NETIF_F_TSO;
6945 6946
	if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO))
		features &= ~NETIF_F_TSO6;
6947

6948
	QETH_CARD_HEX(card, 2, &features, sizeof(features));
6949 6950 6951
	return features;
}
EXPORT_SYMBOL_GPL(qeth_fix_features);
6952

6953 6954 6955 6956
netdev_features_t qeth_features_check(struct sk_buff *skb,
				      struct net_device *dev,
				      netdev_features_t features)
{
6957 6958
	struct qeth_card *card = dev->ml_priv;

6959
	/* Traffic with local next-hop is not eligible for some offloads: */
6960
	if (skb->ip_summed == CHECKSUM_PARTIAL &&
6961
	    READ_ONCE(card->options.isolation) != ISOLATION_MODE_FWD) {
6962 6963 6964 6965 6966 6967 6968 6969 6970 6971 6972 6973 6974 6975 6976 6977 6978 6979 6980 6981 6982 6983 6984 6985 6986
		netdev_features_t restricted = 0;

		if (skb_is_gso(skb) && !netif_needs_gso(skb, features))
			restricted |= NETIF_F_ALL_TSO;

		switch (vlan_get_protocol(skb)) {
		case htons(ETH_P_IP):
			if (!card->info.has_lp2lp_cso_v4)
				restricted |= NETIF_F_IP_CSUM;

			if (restricted && qeth_next_hop_is_local_v4(card, skb))
				features &= ~restricted;
			break;
		case htons(ETH_P_IPV6):
			if (!card->info.has_lp2lp_cso_v6)
				restricted |= NETIF_F_IPV6_CSUM;

			if (restricted && qeth_next_hop_is_local_v6(card, skb))
				features &= ~restricted;
			break;
		default:
			break;
		}
	}

6987 6988 6989 6990 6991 6992 6993 6994 6995 6996 6997 6998 6999 7000 7001 7002 7003 7004 7005 7006 7007 7008
	/* GSO segmentation builds skbs with
	 *	a (small) linear part for the headers, and
	 *	page frags for the data.
	 * Compared to a linear skb, the header-only part consumes an
	 * additional buffer element. This reduces buffer utilization, and
	 * hurts throughput. So compress small segments into one element.
	 */
	if (netif_needs_gso(skb, features)) {
		/* match skb_segment(): */
		unsigned int doffset = skb->data - skb_mac_header(skb);
		unsigned int hsize = skb_shinfo(skb)->gso_size;
		unsigned int hroom = skb_headroom(skb);

		/* linearize only if resulting skb allocations are order-0: */
		if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0))
			features &= ~NETIF_F_SG;
	}

	return vlan_features_check(skb, features);
}
EXPORT_SYMBOL_GPL(qeth_features_check);

7009 7010 7011 7012 7013 7014 7015 7016 7017 7018
void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
	struct qeth_card *card = dev->ml_priv;
	struct qeth_qdio_out_q *queue;
	unsigned int i;

	QETH_CARD_TEXT(card, 5, "getstat");

	stats->rx_packets = card->stats.rx_packets;
	stats->rx_bytes = card->stats.rx_bytes;
7019
	stats->rx_errors = card->stats.rx_length_errors +
7020
			   card->stats.rx_frame_errors +
7021 7022
			   card->stats.rx_fifo_errors;
	stats->rx_dropped = card->stats.rx_dropped_nomem +
7023 7024
			    card->stats.rx_dropped_notsupp +
			    card->stats.rx_dropped_runt;
7025
	stats->multicast = card->stats.rx_multicast;
7026
	stats->rx_length_errors = card->stats.rx_length_errors;
7027
	stats->rx_frame_errors = card->stats.rx_frame_errors;
7028
	stats->rx_fifo_errors = card->stats.rx_fifo_errors;
7029 7030 7031 7032 7033 7034 7035 7036 7037 7038 7039 7040

	for (i = 0; i < card->qdio.no_out_queues; i++) {
		queue = card->qdio.out_qs[i];

		stats->tx_packets += queue->stats.tx_packets;
		stats->tx_bytes += queue->stats.tx_bytes;
		stats->tx_errors += queue->stats.tx_errors;
		stats->tx_dropped += queue->stats.tx_dropped;
	}
}
EXPORT_SYMBOL_GPL(qeth_get_stats64);

7041 7042 7043 7044 7045 7046 7047 7048 7049 7050 7051 7052 7053 7054 7055 7056 7057 7058 7059 7060 7061 7062 7063 7064 7065 7066 7067 7068 7069 7070 7071 7072 7073 7074 7075 7076 7077 7078 7079 7080
#define TC_IQD_UCAST   0
static void qeth_iqd_set_prio_tc_map(struct net_device *dev,
				     unsigned int ucast_txqs)
{
	unsigned int prio;

	/* IQD requires mcast traffic to be placed on a dedicated queue, and
	 * qeth_iqd_select_queue() deals with this.
	 * For unicast traffic, we defer the queue selection to the stack.
	 * By installing a trivial prio map that spans over only the unicast
	 * queues, we can encourage the stack to spread the ucast traffic evenly
	 * without selecting the mcast queue.
	 */

	/* One traffic class, spanning over all active ucast queues: */
	netdev_set_num_tc(dev, 1);
	netdev_set_tc_queue(dev, TC_IQD_UCAST, ucast_txqs,
			    QETH_IQD_MIN_UCAST_TXQ);

	/* Map all priorities to this traffic class: */
	for (prio = 0; prio <= TC_BITMASK; prio++)
		netdev_set_prio_tc_map(dev, prio, TC_IQD_UCAST);
}

int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count)
{
	struct net_device *dev = card->dev;
	int rc;

	/* Per netif_setup_tc(), adjust the mapping first: */
	if (IS_IQD(card))
		qeth_iqd_set_prio_tc_map(dev, count - 1);

	rc = netif_set_real_num_tx_queues(dev, count);

	if (rc && IS_IQD(card))
		qeth_iqd_set_prio_tc_map(dev, dev->real_num_tx_queues - 1);

	return rc;
}
7081
EXPORT_SYMBOL_GPL(qeth_set_real_num_tx_queues);
7082

7083 7084 7085
u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
			  u8 cast_type, struct net_device *sb_dev)
{
7086 7087
	u16 txq;

7088 7089
	if (cast_type != RTN_UNICAST)
		return QETH_IQD_MCAST_TXQ;
7090 7091
	if (dev->real_num_tx_queues == QETH_IQD_MIN_TXQ)
		return QETH_IQD_MIN_UCAST_TXQ;
7092 7093 7094

	txq = netdev_pick_tx(dev, skb, sb_dev);
	return (txq == QETH_IQD_MCAST_TXQ) ? QETH_IQD_MIN_UCAST_TXQ : txq;
7095 7096 7097
}
EXPORT_SYMBOL_GPL(qeth_iqd_select_queue);

7098
int qeth_open(struct net_device *dev)
7099 7100
{
	struct qeth_card *card = dev->ml_priv;
7101 7102
	struct qeth_qdio_out_q *queue;
	unsigned int i;
7103 7104 7105 7106

	QETH_CARD_TEXT(card, 4, "qethopen");

	card->data.state = CH_STATE_UP;
7107
	netif_tx_start_all_queues(dev);
7108 7109

	local_bh_disable();
7110 7111 7112 7113 7114
	qeth_for_each_output_queue(card, queue, i) {
		netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll,
				  QETH_NAPI_WEIGHT);
		napi_enable(&queue->napi);
		napi_schedule(&queue->napi);
7115
	}
7116 7117 7118

	napi_enable(&card->napi);
	napi_schedule(&card->napi);
7119 7120
	/* kick-start the NAPI softirq: */
	local_bh_enable();
7121

7122 7123 7124 7125 7126 7127 7128
	return 0;
}
EXPORT_SYMBOL_GPL(qeth_open);

int qeth_stop(struct net_device *dev)
{
	struct qeth_card *card = dev->ml_priv;
7129 7130
	struct qeth_qdio_out_q *queue;
	unsigned int i;
7131 7132

	QETH_CARD_TEXT(card, 4, "qethstop");
7133 7134 7135 7136 7137

	napi_disable(&card->napi);
	cancel_delayed_work_sync(&card->buffer_reclaim_work);
	qdio_stop_irq(CARD_DDEV(card));

7138 7139 7140
	/* Quiesce the NAPI instances: */
	qeth_for_each_output_queue(card, queue, i)
		napi_disable(&queue->napi);
7141

7142 7143
	/* Stop .ndo_start_xmit, might still access queue->napi. */
	netif_tx_disable(dev);
7144

7145 7146 7147 7148
	qeth_for_each_output_queue(card, queue, i) {
		del_timer_sync(&queue->timer);
		/* Queues may get re-allocated, so remove the NAPIs. */
		netif_napi_del(&queue->napi);
7149 7150
	}

7151 7152 7153 7154
	return 0;
}
EXPORT_SYMBOL_GPL(qeth_stop);

F
Frank Blaschka 已提交
7155 7156 7157 7158
static int __init qeth_core_init(void)
{
	int rc;

7159
	pr_info("loading core functions\n");
F
Frank Blaschka 已提交
7160

7161 7162
	qeth_debugfs_root = debugfs_create_dir("qeth", NULL);

F
Frank Blaschka 已提交
7163 7164
	rc = qeth_register_dbf_views();
	if (rc)
7165
		goto dbf_err;
M
Mark McLoughlin 已提交
7166
	qeth_core_root_dev = root_device_register("qeth");
7167
	rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
F
Frank Blaschka 已提交
7168 7169
	if (rc)
		goto register_err;
7170 7171 7172 7173
	qeth_core_header_cache =
		kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE,
				  roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE),
				  0, NULL);
7174 7175 7176 7177
	if (!qeth_core_header_cache) {
		rc = -ENOMEM;
		goto slab_err;
	}
7178 7179 7180 7181 7182 7183
	qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf",
			sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL);
	if (!qeth_qdio_outbuf_cache) {
		rc = -ENOMEM;
		goto cqslab_err;
	}
7184 7185 7186 7187 7188 7189
	rc = ccw_driver_register(&qeth_ccw_driver);
	if (rc)
		goto ccw_err;
	rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
	if (rc)
		goto ccwgroup_err;
7190

7191
	return 0;
7192 7193 7194 7195 7196

ccwgroup_err:
	ccw_driver_unregister(&qeth_ccw_driver);
ccw_err:
	kmem_cache_destroy(qeth_qdio_outbuf_cache);
7197 7198
cqslab_err:
	kmem_cache_destroy(qeth_core_header_cache);
7199
slab_err:
M
Mark McLoughlin 已提交
7200
	root_device_unregister(qeth_core_root_dev);
F
Frank Blaschka 已提交
7201 7202
register_err:
	qeth_unregister_dbf_views();
7203
dbf_err:
7204
	debugfs_remove_recursive(qeth_debugfs_root);
7205
	pr_err("Initializing the qeth device driver failed\n");
F
Frank Blaschka 已提交
7206 7207 7208 7209 7210
	return rc;
}

static void __exit qeth_core_exit(void)
{
7211
	qeth_clear_dbf_list();
F
Frank Blaschka 已提交
7212 7213
	ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
	ccw_driver_unregister(&qeth_ccw_driver);
7214
	kmem_cache_destroy(qeth_qdio_outbuf_cache);
7215
	kmem_cache_destroy(qeth_core_header_cache);
7216
	root_device_unregister(qeth_core_root_dev);
F
Frank Blaschka 已提交
7217
	qeth_unregister_dbf_views();
7218
	debugfs_remove_recursive(qeth_debugfs_root);
7219
	pr_info("core functions removed\n");
F
Frank Blaschka 已提交
7220 7221 7222 7223 7224 7225 7226
}

module_init(qeth_core_init);
module_exit(qeth_core_exit);
MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
MODULE_DESCRIPTION("qeth core functions");
MODULE_LICENSE("GPL");