qeth_core_main.c 187.6 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
F
Frank Blaschka 已提交
2
/*
3
 *    Copyright IBM Corp. 2007, 2009
F
Frank Blaschka 已提交
4 5 6 7 8 9
 *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
 *		 Frank Pavlic <fpavlic@de.ibm.com>,
 *		 Thomas Spatzier <tspat@de.ibm.com>,
 *		 Frank Blaschka <frank.blaschka@de.ibm.com>
 */

10 11 12
#define KMSG_COMPONENT "qeth"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

13
#include <linux/compat.h>
F
Frank Blaschka 已提交
14 15 16 17 18
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/kernel.h>
19
#include <linux/log2.h>
20
#include <linux/io.h>
F
Frank Blaschka 已提交
21 22 23
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/mii.h>
J
Julian Wiedmann 已提交
24
#include <linux/mm.h>
F
Frank Blaschka 已提交
25
#include <linux/kthread.h>
26
#include <linux/slab.h>
27 28 29
#include <linux/if_vlan.h>
#include <linux/netdevice.h>
#include <linux/netdev_features.h>
30
#include <linux/rcutree.h>
31
#include <linux/skbuff.h>
32
#include <linux/vmalloc.h>
33

34
#include <net/iucv/af_iucv.h>
35
#include <net/dsfield.h>
36
#include <net/sock.h>
F
Frank Blaschka 已提交
37

38
#include <asm/ebcdic.h>
39
#include <asm/chpid.h>
40
#include <asm/sysinfo.h>
41 42 43
#include <asm/diag.h>
#include <asm/cio.h>
#include <asm/ccwdev.h>
44
#include <asm/cpcmd.h>
F
Frank Blaschka 已提交
45 46 47

#include "qeth_core.h"

48 49 50 51 52
struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
	/* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
	/*                   N  P  A    M  L  V                      H  */
	[QETH_DBF_SETUP] = {"qeth_setup",
				8, 1,   8, 5, &debug_hex_ascii_view, NULL},
53 54
	[QETH_DBF_MSG]	 = {"qeth_msg", 8, 1, 11 * sizeof(long), 3,
			    &debug_sprintf_view, NULL},
55 56 57 58
	[QETH_DBF_CTRL]  = {"qeth_control",
		8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
};
EXPORT_SYMBOL_GPL(qeth_dbf);
F
Frank Blaschka 已提交
59

J
Julian Wiedmann 已提交
60
static struct kmem_cache *qeth_core_header_cache;
61
static struct kmem_cache *qeth_qdio_outbuf_cache;
F
Frank Blaschka 已提交
62 63

static struct device *qeth_core_root_dev;
64
static struct dentry *qeth_debugfs_root;
F
Frank Blaschka 已提交
65 66
static struct lock_class_key qdio_out_skb_queue_key;

67
static void qeth_issue_next_read_cb(struct qeth_card *card,
68 69
				    struct qeth_cmd_buffer *iob,
				    unsigned int data_length);
F
Frank Blaschka 已提交
70
static int qeth_qdio_establish(struct qeth_card *);
71
static void qeth_free_qdio_queues(struct qeth_card *card);
F
Frank Blaschka 已提交
72

J
Julian Wiedmann 已提交
73
static const char *qeth_get_cardname(struct qeth_card *card)
F
Frank Blaschka 已提交
74
{
75
	if (IS_VM_NIC(card)) {
F
Frank Blaschka 已提交
76
		switch (card->info.type) {
77
		case QETH_CARD_TYPE_OSD:
78
			return " Virtual NIC QDIO";
F
Frank Blaschka 已提交
79
		case QETH_CARD_TYPE_IQD:
80
			return " Virtual NIC Hiper";
81
		case QETH_CARD_TYPE_OSM:
82
			return " Virtual NIC QDIO - OSM";
83
		case QETH_CARD_TYPE_OSX:
84
			return " Virtual NIC QDIO - OSX";
F
Frank Blaschka 已提交
85 86 87 88 89
		default:
			return " unknown";
		}
	} else {
		switch (card->info.type) {
90
		case QETH_CARD_TYPE_OSD:
F
Frank Blaschka 已提交
91 92 93
			return " OSD Express";
		case QETH_CARD_TYPE_IQD:
			return " HiperSockets";
94 95 96 97
		case QETH_CARD_TYPE_OSM:
			return " OSM QDIO";
		case QETH_CARD_TYPE_OSX:
			return " OSX QDIO";
F
Frank Blaschka 已提交
98 99 100 101 102 103 104 105 106 107
		default:
			return " unknown";
		}
	}
	return " n/a";
}

/* max length to be returned: 14 */
const char *qeth_get_cardname_short(struct qeth_card *card)
{
108
	if (IS_VM_NIC(card)) {
F
Frank Blaschka 已提交
109
		switch (card->info.type) {
110
		case QETH_CARD_TYPE_OSD:
111
			return "Virt.NIC QDIO";
F
Frank Blaschka 已提交
112
		case QETH_CARD_TYPE_IQD:
113
			return "Virt.NIC Hiper";
114
		case QETH_CARD_TYPE_OSM:
115
			return "Virt.NIC OSM";
116
		case QETH_CARD_TYPE_OSX:
117
			return "Virt.NIC OSX";
F
Frank Blaschka 已提交
118 119 120 121 122
		default:
			return "unknown";
		}
	} else {
		switch (card->info.type) {
123
		case QETH_CARD_TYPE_OSD:
F
Frank Blaschka 已提交
124 125 126 127 128 129 130 131 132
			switch (card->info.link_type) {
			case QETH_LINK_TYPE_FAST_ETH:
				return "OSD_100";
			case QETH_LINK_TYPE_HSTR:
				return "HSTR";
			case QETH_LINK_TYPE_GBIT_ETH:
				return "OSD_1000";
			case QETH_LINK_TYPE_10GBIT_ETH:
				return "OSD_10GIG";
133 134
			case QETH_LINK_TYPE_25GBIT_ETH:
				return "OSD_25GIG";
F
Frank Blaschka 已提交
135 136 137 138 139 140 141 142 143 144 145 146 147
			case QETH_LINK_TYPE_LANE_ETH100:
				return "OSD_FE_LANE";
			case QETH_LINK_TYPE_LANE_TR:
				return "OSD_TR_LANE";
			case QETH_LINK_TYPE_LANE_ETH1000:
				return "OSD_GbE_LANE";
			case QETH_LINK_TYPE_LANE:
				return "OSD_ATM_LANE";
			default:
				return "OSD_Express";
			}
		case QETH_CARD_TYPE_IQD:
			return "HiperSockets";
148 149 150 151
		case QETH_CARD_TYPE_OSM:
			return "OSM_1000";
		case QETH_CARD_TYPE_OSX:
			return "OSX_10GIG";
F
Frank Blaschka 已提交
152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
		default:
			return "unknown";
		}
	}
	return "n/a";
}

void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
			 int clear_start_mask)
{
	unsigned long flags;

	spin_lock_irqsave(&card->thread_mask_lock, flags);
	card->thread_allowed_mask = threads;
	if (clear_start_mask)
		card->thread_start_mask &= threads;
	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
	wake_up(&card->wait_q);
}
EXPORT_SYMBOL_GPL(qeth_set_allowed_threads);

int qeth_threads_running(struct qeth_card *card, unsigned long threads)
{
	unsigned long flags;
	int rc = 0;

	spin_lock_irqsave(&card->thread_mask_lock, flags);
	rc = (card->thread_running_mask & threads);
	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
	return rc;
}
EXPORT_SYMBOL_GPL(qeth_threads_running);

185
static void qeth_clear_working_pool_list(struct qeth_card *card)
F
Frank Blaschka 已提交
186 187
{
	struct qeth_buffer_pool_entry *pool_entry, *tmp;
188 189
	struct qeth_qdio_q *queue = card->qdio.in_q;
	unsigned int i;
F
Frank Blaschka 已提交
190

C
Carsten Otte 已提交
191
	QETH_CARD_TEXT(card, 5, "clwrklst");
F
Frank Blaschka 已提交
192
	list_for_each_entry_safe(pool_entry, tmp,
193 194
				 &card->qdio.in_buf_pool.entry_list, list)
		list_del(&pool_entry->list);
195

196 197 198
	if (!queue)
		return;

199 200
	for (i = 0; i < ARRAY_SIZE(queue->bufs); i++)
		queue->bufs[i].pool_entry = NULL;
F
Frank Blaschka 已提交
201 202
}

203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235
static void qeth_free_pool_entry(struct qeth_buffer_pool_entry *entry)
{
	unsigned int i;

	for (i = 0; i < ARRAY_SIZE(entry->elements); i++) {
		if (entry->elements[i])
			__free_page(entry->elements[i]);
	}

	kfree(entry);
}

static void qeth_free_buffer_pool(struct qeth_card *card)
{
	struct qeth_buffer_pool_entry *entry, *tmp;

	list_for_each_entry_safe(entry, tmp, &card->qdio.init_pool.entry_list,
				 init_list) {
		list_del(&entry->init_list);
		qeth_free_pool_entry(entry);
	}
}

static struct qeth_buffer_pool_entry *qeth_alloc_pool_entry(unsigned int pages)
{
	struct qeth_buffer_pool_entry *entry;
	unsigned int i;

	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
	if (!entry)
		return NULL;

	for (i = 0; i < pages; i++) {
236
		entry->elements[i] = __dev_alloc_page(GFP_KERNEL);
237 238 239 240 241 242 243 244 245 246

		if (!entry->elements[i]) {
			qeth_free_pool_entry(entry);
			return NULL;
		}
	}

	return entry;
}

F
Frank Blaschka 已提交
247 248
static int qeth_alloc_buffer_pool(struct qeth_card *card)
{
249 250
	unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
	unsigned int i;
F
Frank Blaschka 已提交
251

C
Carsten Otte 已提交
252
	QETH_CARD_TEXT(card, 5, "alocpool");
F
Frank Blaschka 已提交
253
	for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
254 255 256 257
		struct qeth_buffer_pool_entry *entry;

		entry = qeth_alloc_pool_entry(buf_elements);
		if (!entry) {
F
Frank Blaschka 已提交
258 259 260
			qeth_free_buffer_pool(card);
			return -ENOMEM;
		}
261

262
		list_add(&entry->init_list, &card->qdio.init_pool.entry_list);
F
Frank Blaschka 已提交
263 264 265 266
	}
	return 0;
}

267
int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count)
F
Frank Blaschka 已提交
268
{
269 270 271 272 273 274
	unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
	struct qeth_qdio_buffer_pool *pool = &card->qdio.init_pool;
	struct qeth_buffer_pool_entry *entry, *tmp;
	int delta = count - pool->buf_count;
	LIST_HEAD(entries);

C
Carsten Otte 已提交
275
	QETH_CARD_TEXT(card, 2, "realcbp");
F
Frank Blaschka 已提交
276

277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
	/* Defer until queue is allocated: */
	if (!card->qdio.in_q)
		goto out;

	/* Remove entries from the pool: */
	while (delta < 0) {
		entry = list_first_entry(&pool->entry_list,
					 struct qeth_buffer_pool_entry,
					 init_list);
		list_del(&entry->init_list);
		qeth_free_pool_entry(entry);

		delta++;
	}

	/* Allocate additional entries: */
	while (delta > 0) {
		entry = qeth_alloc_pool_entry(buf_elements);
		if (!entry) {
			list_for_each_entry_safe(entry, tmp, &entries,
						 init_list) {
				list_del(&entry->init_list);
				qeth_free_pool_entry(entry);
			}

			return -ENOMEM;
		}

		list_add(&entry->init_list, &entries);

		delta--;
	}

	list_splice(&entries, &pool->entry_list);

out:
	card->qdio.in_buf_pool.buf_count = count;
	pool->buf_count = count;
	return 0;
F
Frank Blaschka 已提交
316
}
317
EXPORT_SYMBOL_GPL(qeth_resize_buffer_pool);
F
Frank Blaschka 已提交
318

S
Sebastian Ott 已提交
319 320
static void qeth_free_qdio_queue(struct qeth_qdio_q *q)
{
321 322 323 324
	if (!q)
		return;

	qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
S
Sebastian Ott 已提交
325 326 327 328 329 330 331 332 333 334 335
	kfree(q);
}

static struct qeth_qdio_q *qeth_alloc_qdio_queue(void)
{
	struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
	int i;

	if (!q)
		return NULL;

336 337 338 339 340
	if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
		kfree(q);
		return NULL;
	}

S
Sebastian Ott 已提交
341
	for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
342
		q->bufs[i].buffer = q->qdio_bufs[i];
S
Sebastian Ott 已提交
343 344 345 346 347

	QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *));
	return q;
}

J
Julian Wiedmann 已提交
348
static int qeth_cq_init(struct qeth_card *card)
349 350 351 352
{
	int rc;

	if (card->options.cq == QETH_CQ_ENABLED) {
353
		QETH_CARD_TEXT(card, 2, "cqinit");
354 355
		qdio_reset_buffers(card->qdio.c_q->qdio_bufs,
				   QDIO_MAX_BUFFERS_PER_Q);
356 357
		card->qdio.c_q->next_buf_to_init = 127;
		rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT,
358
			     card->qdio.no_in_queues - 1, 0, 127, NULL);
359
		if (rc) {
360
			QETH_CARD_TEXT_(card, 2, "1err%d", rc);
361 362 363 364 365 366 367 368
			goto out;
		}
	}
	rc = 0;
out:
	return rc;
}

J
Julian Wiedmann 已提交
369
static int qeth_alloc_cq(struct qeth_card *card)
370 371
{
	if (card->options.cq == QETH_CQ_ENABLED) {
372
		QETH_CARD_TEXT(card, 2, "cqon");
S
Sebastian Ott 已提交
373
		card->qdio.c_q = qeth_alloc_qdio_queue();
374
		if (!card->qdio.c_q) {
375 376
			dev_err(&card->gdev->dev, "Failed to create completion queue\n");
			return -ENOMEM;
377
		}
378

379 380
		card->qdio.no_in_queues = 2;
	} else {
381
		QETH_CARD_TEXT(card, 2, "nocq");
382 383 384
		card->qdio.c_q = NULL;
		card->qdio.no_in_queues = 1;
	}
385
	QETH_CARD_TEXT_(card, 2, "iqc%d", card->qdio.no_in_queues);
386
	return 0;
387 388
}

J
Julian Wiedmann 已提交
389
static void qeth_free_cq(struct qeth_card *card)
390 391 392
{
	if (card->qdio.c_q) {
		--card->qdio.no_in_queues;
S
Sebastian Ott 已提交
393
		qeth_free_qdio_queue(card->qdio.c_q);
394 395 396 397
		card->qdio.c_q = NULL;
	}
}

J
Julian Wiedmann 已提交
398 399 400
static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
							int delayed)
{
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422
	enum iucv_tx_notify n;

	switch (sbalf15) {
	case 0:
		n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK;
		break;
	case 4:
	case 16:
	case 17:
	case 18:
		n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE :
			TX_NOTIFY_UNREACHABLE;
		break;
	default:
		n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR :
			TX_NOTIFY_GENERALERROR;
		break;
	}

	return n;
}

J
Julian Wiedmann 已提交
423 424 425 426 427 428 429
static void qeth_put_cmd(struct qeth_cmd_buffer *iob)
{
	if (refcount_dec_and_test(&iob->ref_count)) {
		kfree(iob->data);
		kfree(iob);
	}
}
430 431
static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
			   void *data)
432 433
{
	ccw->cmd_code = cmd_code;
434
	ccw->flags = flags | CCW_FLAG_SLI;
435 436 437 438
	ccw->count = len;
	ccw->cda = (__u32) __pa(data);
}

439
static int __qeth_issue_next_read(struct qeth_card *card)
F
Frank Blaschka 已提交
440
{
441 442 443
	struct qeth_cmd_buffer *iob = card->read_cmd;
	struct qeth_channel *channel = iob->channel;
	struct ccw1 *ccw = __ccw_from_cmd(iob);
444
	int rc;
F
Frank Blaschka 已提交
445

C
Carsten Otte 已提交
446
	QETH_CARD_TEXT(card, 5, "issnxrd");
447
	if (channel->state != CH_STATE_UP)
F
Frank Blaschka 已提交
448
		return -EIO;
449

450 451
	memset(iob->data, 0, iob->length);
	qeth_setup_ccw(ccw, CCW_CMD_READ, 0, iob->length, iob->data);
452
	iob->callback = qeth_issue_next_read_cb;
453 454 455
	/* keep the cmd alive after completion: */
	qeth_get_cmd(iob);

C
Carsten Otte 已提交
456
	QETH_CARD_TEXT(card, 6, "noirqpnd");
457
	rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0);
458 459 460
	if (!rc) {
		channel->active_cmd = iob;
	} else {
461 462
		QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
				 rc, CARD_DEVID(card));
463
		qeth_unlock_channel(card, channel);
464
		qeth_put_cmd(iob);
465
		card->read_or_write_problem = 1;
F
Frank Blaschka 已提交
466 467 468 469 470
		qeth_schedule_recovery(card);
	}
	return rc;
}

471 472 473 474 475 476 477 478 479 480 481
static int qeth_issue_next_read(struct qeth_card *card)
{
	int ret;

	spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
	ret = __qeth_issue_next_read(card);
	spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));

	return ret;
}

482 483
static void qeth_enqueue_cmd(struct qeth_card *card,
			     struct qeth_cmd_buffer *iob)
484 485
{
	spin_lock_irq(&card->lock);
486
	list_add_tail(&iob->list_entry, &card->cmd_waiter_list);
487 488 489
	spin_unlock_irq(&card->lock);
}

490 491
static void qeth_dequeue_cmd(struct qeth_card *card,
			     struct qeth_cmd_buffer *iob)
492 493
{
	spin_lock_irq(&card->lock);
494
	list_del(&iob->list_entry);
495 496 497
	spin_unlock_irq(&card->lock);
}

J
Julian Wiedmann 已提交
498
static void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason)
499
{
500 501
	iob->rc = reason;
	complete(&iob->done);
502 503
}

504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531
static void qeth_flush_local_addrs4(struct qeth_card *card)
{
	struct qeth_local_addr *addr;
	struct hlist_node *tmp;
	unsigned int i;

	spin_lock_irq(&card->local_addrs4_lock);
	hash_for_each_safe(card->local_addrs4, i, tmp, addr, hnode) {
		hash_del_rcu(&addr->hnode);
		kfree_rcu(addr, rcu);
	}
	spin_unlock_irq(&card->local_addrs4_lock);
}

static void qeth_flush_local_addrs6(struct qeth_card *card)
{
	struct qeth_local_addr *addr;
	struct hlist_node *tmp;
	unsigned int i;

	spin_lock_irq(&card->local_addrs6_lock);
	hash_for_each_safe(card->local_addrs6, i, tmp, addr, hnode) {
		hash_del_rcu(&addr->hnode);
		kfree_rcu(addr, rcu);
	}
	spin_unlock_irq(&card->local_addrs6_lock);
}

532
static void qeth_flush_local_addrs(struct qeth_card *card)
533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683
{
	qeth_flush_local_addrs4(card);
	qeth_flush_local_addrs6(card);
}

static void qeth_add_local_addrs4(struct qeth_card *card,
				  struct qeth_ipacmd_local_addrs4 *cmd)
{
	unsigned int i;

	if (cmd->addr_length !=
	    sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
		dev_err_ratelimited(&card->gdev->dev,
				    "Dropped IPv4 ADD LOCAL ADDR event with bad length %u\n",
				    cmd->addr_length);
		return;
	}

	spin_lock(&card->local_addrs4_lock);
	for (i = 0; i < cmd->count; i++) {
		unsigned int key = ipv4_addr_hash(cmd->addrs[i].addr);
		struct qeth_local_addr *addr;
		bool duplicate = false;

		hash_for_each_possible(card->local_addrs4, addr, hnode, key) {
			if (addr->addr.s6_addr32[3] == cmd->addrs[i].addr) {
				duplicate = true;
				break;
			}
		}

		if (duplicate)
			continue;

		addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
		if (!addr) {
			dev_err(&card->gdev->dev,
				"Failed to allocate local addr object. Traffic to %pI4 might suffer.\n",
				&cmd->addrs[i].addr);
			continue;
		}

		ipv6_addr_set(&addr->addr, 0, 0, 0, cmd->addrs[i].addr);
		hash_add_rcu(card->local_addrs4, &addr->hnode, key);
	}
	spin_unlock(&card->local_addrs4_lock);
}

static void qeth_add_local_addrs6(struct qeth_card *card,
				  struct qeth_ipacmd_local_addrs6 *cmd)
{
	unsigned int i;

	if (cmd->addr_length !=
	    sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
		dev_err_ratelimited(&card->gdev->dev,
				    "Dropped IPv6 ADD LOCAL ADDR event with bad length %u\n",
				    cmd->addr_length);
		return;
	}

	spin_lock(&card->local_addrs6_lock);
	for (i = 0; i < cmd->count; i++) {
		u32 key = ipv6_addr_hash(&cmd->addrs[i].addr);
		struct qeth_local_addr *addr;
		bool duplicate = false;

		hash_for_each_possible(card->local_addrs6, addr, hnode, key) {
			if (ipv6_addr_equal(&addr->addr, &cmd->addrs[i].addr)) {
				duplicate = true;
				break;
			}
		}

		if (duplicate)
			continue;

		addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
		if (!addr) {
			dev_err(&card->gdev->dev,
				"Failed to allocate local addr object. Traffic to %pI6c might suffer.\n",
				&cmd->addrs[i].addr);
			continue;
		}

		addr->addr = cmd->addrs[i].addr;
		hash_add_rcu(card->local_addrs6, &addr->hnode, key);
	}
	spin_unlock(&card->local_addrs6_lock);
}

static void qeth_del_local_addrs4(struct qeth_card *card,
				  struct qeth_ipacmd_local_addrs4 *cmd)
{
	unsigned int i;

	if (cmd->addr_length !=
	    sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
		dev_err_ratelimited(&card->gdev->dev,
				    "Dropped IPv4 DEL LOCAL ADDR event with bad length %u\n",
				    cmd->addr_length);
		return;
	}

	spin_lock(&card->local_addrs4_lock);
	for (i = 0; i < cmd->count; i++) {
		struct qeth_ipacmd_local_addr4 *addr = &cmd->addrs[i];
		unsigned int key = ipv4_addr_hash(addr->addr);
		struct qeth_local_addr *tmp;

		hash_for_each_possible(card->local_addrs4, tmp, hnode, key) {
			if (tmp->addr.s6_addr32[3] == addr->addr) {
				hash_del_rcu(&tmp->hnode);
				kfree_rcu(tmp, rcu);
				break;
			}
		}
	}
	spin_unlock(&card->local_addrs4_lock);
}

static void qeth_del_local_addrs6(struct qeth_card *card,
				  struct qeth_ipacmd_local_addrs6 *cmd)
{
	unsigned int i;

	if (cmd->addr_length !=
	    sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
		dev_err_ratelimited(&card->gdev->dev,
				    "Dropped IPv6 DEL LOCAL ADDR event with bad length %u\n",
				    cmd->addr_length);
		return;
	}

	spin_lock(&card->local_addrs6_lock);
	for (i = 0; i < cmd->count; i++) {
		struct qeth_ipacmd_local_addr6 *addr = &cmd->addrs[i];
		u32 key = ipv6_addr_hash(&addr->addr);
		struct qeth_local_addr *tmp;

		hash_for_each_possible(card->local_addrs6, tmp, hnode, key) {
			if (ipv6_addr_equal(&tmp->addr, &addr->addr)) {
				hash_del_rcu(&tmp->hnode);
				kfree_rcu(tmp, rcu);
				break;
			}
		}
	}
	spin_unlock(&card->local_addrs6_lock);
}

684 685 686 687 688 689 690 691 692 693 694 695
static bool qeth_next_hop_is_local_v4(struct qeth_card *card,
				      struct sk_buff *skb)
{
	struct qeth_local_addr *tmp;
	bool is_local = false;
	unsigned int key;
	__be32 next_hop;

	if (hash_empty(card->local_addrs4))
		return false;

	rcu_read_lock();
696 697
	next_hop = qeth_next_hop_v4_rcu(skb,
					qeth_dst_check_rcu(skb, htons(ETH_P_IP)));
698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722
	key = ipv4_addr_hash(next_hop);

	hash_for_each_possible_rcu(card->local_addrs4, tmp, hnode, key) {
		if (tmp->addr.s6_addr32[3] == next_hop) {
			is_local = true;
			break;
		}
	}
	rcu_read_unlock();

	return is_local;
}

static bool qeth_next_hop_is_local_v6(struct qeth_card *card,
				      struct sk_buff *skb)
{
	struct qeth_local_addr *tmp;
	struct in6_addr *next_hop;
	bool is_local = false;
	u32 key;

	if (hash_empty(card->local_addrs6))
		return false;

	rcu_read_lock();
723 724
	next_hop = qeth_next_hop_v6_rcu(skb,
					qeth_dst_check_rcu(skb, htons(ETH_P_IPV6)));
725 726 727 728 729 730 731 732 733 734 735 736 737
	key = ipv6_addr_hash(next_hop);

	hash_for_each_possible_rcu(card->local_addrs6, tmp, hnode, key) {
		if (ipv6_addr_equal(&tmp->addr, next_hop)) {
			is_local = true;
			break;
		}
	}
	rcu_read_unlock();

	return is_local;
}

738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755
static int qeth_debugfs_local_addr_show(struct seq_file *m, void *v)
{
	struct qeth_card *card = m->private;
	struct qeth_local_addr *tmp;
	unsigned int i;

	rcu_read_lock();
	hash_for_each_rcu(card->local_addrs4, i, tmp, hnode)
		seq_printf(m, "%pI4\n", &tmp->addr.s6_addr32[3]);
	hash_for_each_rcu(card->local_addrs6, i, tmp, hnode)
		seq_printf(m, "%pI6c\n", &tmp->addr);
	rcu_read_unlock();

	return 0;
}

DEFINE_SHOW_ATTRIBUTE(qeth_debugfs_local_addr);

756
static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
F
Frank Blaschka 已提交
757 758
		struct qeth_card *card)
{
759
	const char *ipa_name;
760
	int com = cmd->hdr.command;
761

F
Frank Blaschka 已提交
762
	ipa_name = qeth_get_ipa_cmd_name(com);
763

764
	if (rc)
765 766 767
		QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n",
				 ipa_name, com, CARD_DEVID(card), rc,
				 qeth_get_ipa_msg(rc));
768
	else
769 770
		QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n",
				 ipa_name, com, CARD_DEVID(card));
F
Frank Blaschka 已提交
771 772 773
}

static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
774
						struct qeth_ipa_cmd *cmd)
F
Frank Blaschka 已提交
775
{
C
Carsten Otte 已提交
776
	QETH_CARD_TEXT(card, 5, "chkipad");
777 778

	if (IS_IPA_REPLY(cmd)) {
J
Julian Wiedmann 已提交
779
		if (cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
780 781 782 783 784 785 786 787 788
			qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
		return cmd;
	}

	/* handle unsolicited event: */
	switch (cmd->hdr.command) {
	case IPA_CMD_STOPLAN:
		if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) {
			dev_err(&card->gdev->dev,
789
				"Adjacent port of interface %s is no longer in reflective relay mode, trigger recovery\n",
J
Julian Wiedmann 已提交
790
				netdev_name(card->dev));
791 792
			/* Set offline, then probably fail to set online: */
			qeth_schedule_recovery(card);
F
Frank Blaschka 已提交
793
		} else {
794
			/* stay online for subsequent STARTLAN */
795 796
			dev_warn(&card->gdev->dev,
				 "The link for interface %s on CHPID 0x%X failed\n",
J
Julian Wiedmann 已提交
797
				 netdev_name(card->dev), card->info.chpid);
798
			qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
799
			netif_carrier_off(card->dev);
F
Frank Blaschka 已提交
800
		}
801 802 803 804
		return NULL;
	case IPA_CMD_STARTLAN:
		dev_info(&card->gdev->dev,
			 "The link for %s on CHPID 0x%X has been restored\n",
J
Julian Wiedmann 已提交
805
			 netdev_name(card->dev), card->info.chpid);
806 807 808 809 810 811 812 813 814 815 816
		if (card->info.hwtrap)
			card->info.hwtrap = 2;
		qeth_schedule_recovery(card);
		return NULL;
	case IPA_CMD_SETBRIDGEPORT_IQD:
	case IPA_CMD_SETBRIDGEPORT_OSA:
	case IPA_CMD_ADDRESS_CHANGE_NOTIF:
		if (card->discipline->control_event_handler(card, cmd))
			return cmd;
		return NULL;
	case IPA_CMD_REGISTER_LOCAL_ADDR:
817 818 819 820 821
		if (cmd->hdr.prot_version == QETH_PROT_IPV4)
			qeth_add_local_addrs4(card, &cmd->data.local_addrs4);
		else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
			qeth_add_local_addrs6(card, &cmd->data.local_addrs6);

822 823 824
		QETH_CARD_TEXT(card, 3, "irla");
		return NULL;
	case IPA_CMD_UNREGISTER_LOCAL_ADDR:
825 826 827 828 829
		if (cmd->hdr.prot_version == QETH_PROT_IPV4)
			qeth_del_local_addrs4(card, &cmd->data.local_addrs4);
		else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
			qeth_del_local_addrs6(card, &cmd->data.local_addrs6);

830 831 832 833 834
		QETH_CARD_TEXT(card, 3, "urla");
		return NULL;
	default:
		QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n");
		return cmd;
F
Frank Blaschka 已提交
835 836 837
	}
}

838
static void qeth_clear_ipacmd_list(struct qeth_card *card)
F
Frank Blaschka 已提交
839
{
840
	struct qeth_cmd_buffer *iob;
F
Frank Blaschka 已提交
841 842
	unsigned long flags;

C
Carsten Otte 已提交
843
	QETH_CARD_TEXT(card, 4, "clipalst");
F
Frank Blaschka 已提交
844 845

	spin_lock_irqsave(&card->lock, flags);
846
	list_for_each_entry(iob, &card->cmd_waiter_list, list_entry)
847
		qeth_notify_cmd(iob, -ECANCELED);
F
Frank Blaschka 已提交
848 849 850
	spin_unlock_irqrestore(&card->lock, flags);
}

851 852
static int qeth_check_idx_response(struct qeth_card *card,
	unsigned char *buffer)
F
Frank Blaschka 已提交
853
{
854
	QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
855
	if ((buffer[2] & QETH_IDX_TERMINATE_MASK) == QETH_IDX_TERMINATE) {
856
		QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
857
				 buffer[4]);
C
Carsten Otte 已提交
858 859
		QETH_CARD_TEXT(card, 2, "ckidxres");
		QETH_CARD_TEXT(card, 2, " idxterm");
860 861 862
		QETH_CARD_TEXT_(card, 2, "rc%x", buffer[4]);
		if (buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT ||
		    buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM) {
863
			dev_err(&card->gdev->dev,
864 865
				"The device does not support the configured transport mode\n");
			return -EPROTONOSUPPORT;
866
		}
F
Frank Blaschka 已提交
867 868 869 870 871
		return -EIO;
	}
	return 0;
}

872
static void qeth_release_buffer_cb(struct qeth_card *card,
873 874
				   struct qeth_cmd_buffer *iob,
				   unsigned int data_length)
875
{
876
	qeth_put_cmd(iob);
877 878
}

879 880
static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc)
{
881
	qeth_notify_cmd(iob, rc);
882
	qeth_put_cmd(iob);
F
Frank Blaschka 已提交
883 884
}

J
Julian Wiedmann 已提交
885 886 887
static struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
					      unsigned int length,
					      unsigned int ccws, long timeout)
888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904
{
	struct qeth_cmd_buffer *iob;

	if (length > QETH_BUFSIZE)
		return NULL;

	iob = kzalloc(sizeof(*iob), GFP_KERNEL);
	if (!iob)
		return NULL;

	iob->data = kzalloc(ALIGN(length, 8) + ccws * sizeof(struct ccw1),
			    GFP_KERNEL | GFP_DMA);
	if (!iob->data) {
		kfree(iob);
		return NULL;
	}

905 906
	init_completion(&iob->done);
	spin_lock_init(&iob->lock);
907
	refcount_set(&iob->ref_count, 1);
908 909 910 911 912 913
	iob->channel = channel;
	iob->timeout = timeout;
	iob->length = length;
	return iob;
}

914
static void qeth_issue_next_read_cb(struct qeth_card *card,
915 916
				    struct qeth_cmd_buffer *iob,
				    unsigned int data_length)
F
Frank Blaschka 已提交
917
{
918
	struct qeth_cmd_buffer *request = NULL;
919
	struct qeth_ipa_cmd *cmd = NULL;
920
	struct qeth_reply *reply = NULL;
921
	struct qeth_cmd_buffer *tmp;
F
Frank Blaschka 已提交
922
	unsigned long flags;
923
	int rc = 0;
F
Frank Blaschka 已提交
924

C
Carsten Otte 已提交
925
	QETH_CARD_TEXT(card, 4, "sndctlcb");
926 927 928 929 930 931
	rc = qeth_check_idx_response(card, iob->data);
	switch (rc) {
	case 0:
		break;
	case -EIO:
		qeth_schedule_recovery(card);
932
		fallthrough;
933
	default:
934
		qeth_clear_ipacmd_list(card);
935
		goto err_idx;
F
Frank Blaschka 已提交
936 937
	}

938 939
	cmd = __ipa_reply(iob);
	if (cmd) {
940
		cmd = qeth_check_ipa_data(card, cmd);
941 942
		if (!cmd)
			goto out;
F
Frank Blaschka 已提交
943 944
	}

945
	/* match against pending cmd requests */
F
Frank Blaschka 已提交
946
	spin_lock_irqsave(&card->lock, flags);
947
	list_for_each_entry(tmp, &card->cmd_waiter_list, list_entry) {
948
		if (tmp->match && tmp->match(tmp, iob)) {
949
			request = tmp;
950
			/* take the object outside the lock */
951
			qeth_get_cmd(request);
952
			break;
F
Frank Blaschka 已提交
953 954 955
		}
	}
	spin_unlock_irqrestore(&card->lock, flags);
956

957
	if (!request)
958 959
		goto out;

960
	reply = &request->reply;
961 962
	if (!reply->callback) {
		rc = 0;
963 964 965
		goto no_callback;
	}

966 967
	spin_lock_irqsave(&request->lock, flags);
	if (request->rc)
968
		/* Bail out when the requestor has already left: */
969
		rc = request->rc;
970 971 972
	else
		rc = reply->callback(card, reply, cmd ? (unsigned long)cmd :
							(unsigned long)iob);
973
	spin_unlock_irqrestore(&request->lock, flags);
974

975
no_callback:
976
	if (rc <= 0)
977 978
		qeth_notify_cmd(request, rc);
	qeth_put_cmd(request);
F
Frank Blaschka 已提交
979 980 981 982
out:
	memcpy(&card->seqno.pdu_hdr_ack,
		QETH_PDU_HEADER_SEQ_NO(iob->data),
		QETH_SEQ_NO_LENGTH);
983
	__qeth_issue_next_read(card);
984 985
err_idx:
	qeth_put_cmd(iob);
F
Frank Blaschka 已提交
986 987 988 989 990 991
}

static int qeth_set_thread_start_bit(struct qeth_card *card,
		unsigned long thread)
{
	unsigned long flags;
992
	int rc = 0;
F
Frank Blaschka 已提交
993 994

	spin_lock_irqsave(&card->thread_mask_lock, flags);
995 996 997 998 999 1000
	if (!(card->thread_allowed_mask & thread))
		rc = -EPERM;
	else if (card->thread_start_mask & thread)
		rc = -EBUSY;
	else
		card->thread_start_mask |= thread;
F
Frank Blaschka 已提交
1001
	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1002 1003

	return rc;
F
Frank Blaschka 已提交
1004 1005
}

1006 1007
static void qeth_clear_thread_start_bit(struct qeth_card *card,
					unsigned long thread)
F
Frank Blaschka 已提交
1008 1009 1010 1011 1012 1013 1014 1015 1016
{
	unsigned long flags;

	spin_lock_irqsave(&card->thread_mask_lock, flags);
	card->thread_start_mask &= ~thread;
	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
	wake_up(&card->wait_q);
}

1017 1018
static void qeth_clear_thread_running_bit(struct qeth_card *card,
					  unsigned long thread)
F
Frank Blaschka 已提交
1019 1020 1021 1022 1023 1024
{
	unsigned long flags;

	spin_lock_irqsave(&card->thread_mask_lock, flags);
	card->thread_running_mask &= ~thread;
	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1025
	wake_up_all(&card->wait_q);
F
Frank Blaschka 已提交
1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046
}

static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
{
	unsigned long flags;
	int rc = 0;

	spin_lock_irqsave(&card->thread_mask_lock, flags);
	if (card->thread_start_mask & thread) {
		if ((card->thread_allowed_mask & thread) &&
		    !(card->thread_running_mask & thread)) {
			rc = 1;
			card->thread_start_mask &= ~thread;
			card->thread_running_mask |= thread;
		} else
			rc = -EPERM;
	}
	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
	return rc;
}

1047
static int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
F
Frank Blaschka 已提交
1048 1049 1050 1051 1052 1053 1054 1055
{
	int rc = 0;

	wait_event(card->wait_q,
		   (rc = __qeth_do_run_thread(card, thread)) >= 0);
	return rc;
}

1056
int qeth_schedule_recovery(struct qeth_card *card)
F
Frank Blaschka 已提交
1057
{
1058 1059
	int rc;

C
Carsten Otte 已提交
1060
	QETH_CARD_TEXT(card, 2, "startrec");
1061 1062 1063

	rc = qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD);
	if (!rc)
F
Frank Blaschka 已提交
1064
		schedule_work(&card->kernel_thread_starter);
1065 1066

	return rc;
F
Frank Blaschka 已提交
1067 1068
}

1069 1070
static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
			    struct irb *irb)
F
Frank Blaschka 已提交
1071 1072 1073 1074 1075
{
	int dstat, cstat;
	char *sense;

	sense = (char *) irb->ecw;
1076 1077
	cstat = irb->scsw.cmd.cstat;
	dstat = irb->scsw.cmd.dstat;
F
Frank Blaschka 已提交
1078 1079 1080 1081

	if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
		     SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
		     SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
C
Carsten Otte 已提交
1082
		QETH_CARD_TEXT(card, 2, "CGENCHK");
1083 1084
		dev_warn(&cdev->dev, "The qeth device driver "
			"failed to recover an error on the device\n");
1085 1086
		QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n",
				 CCW_DEVID(cdev), dstat, cstat);
F
Frank Blaschka 已提交
1087 1088
		print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
				16, 1, irb, 64, 1);
1089
		return -EIO;
F
Frank Blaschka 已提交
1090 1091 1092 1093 1094
	}

	if (dstat & DEV_STAT_UNIT_CHECK) {
		if (sense[SENSE_RESETTING_EVENT_BYTE] &
		    SENSE_RESETTING_EVENT_FLAG) {
C
Carsten Otte 已提交
1095
			QETH_CARD_TEXT(card, 2, "REVIND");
1096
			return -EIO;
F
Frank Blaschka 已提交
1097 1098 1099
		}
		if (sense[SENSE_COMMAND_REJECT_BYTE] &
		    SENSE_COMMAND_REJECT_FLAG) {
C
Carsten Otte 已提交
1100
			QETH_CARD_TEXT(card, 2, "CMDREJi");
1101
			return -EIO;
F
Frank Blaschka 已提交
1102 1103
		}
		if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
C
Carsten Otte 已提交
1104
			QETH_CARD_TEXT(card, 2, "AFFE");
1105
			return -EIO;
F
Frank Blaschka 已提交
1106 1107
		}
		if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
C
Carsten Otte 已提交
1108
			QETH_CARD_TEXT(card, 2, "ZEROSEN");
F
Frank Blaschka 已提交
1109 1110
			return 0;
		}
C
Carsten Otte 已提交
1111
		QETH_CARD_TEXT(card, 2, "DGENCHK");
1112
		return -EIO;
F
Frank Blaschka 已提交
1113 1114 1115 1116
	}
	return 0;
}

1117
static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev,
1118
				struct irb *irb)
F
Frank Blaschka 已提交
1119
{
1120
	if (!IS_ERR(irb))
F
Frank Blaschka 已提交
1121 1122 1123 1124
		return 0;

	switch (PTR_ERR(irb)) {
	case -EIO:
1125 1126
		QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n",
				 CCW_DEVID(cdev));
C
Carsten Otte 已提交
1127 1128
		QETH_CARD_TEXT(card, 2, "ckirberr");
		QETH_CARD_TEXT_(card, 2, "  rc%d", -EIO);
1129
		return -EIO;
F
Frank Blaschka 已提交
1130
	case -ETIMEDOUT:
1131 1132
		dev_warn(&cdev->dev, "A hardware operation timed out"
			" on the device\n");
C
Carsten Otte 已提交
1133 1134
		QETH_CARD_TEXT(card, 2, "ckirberr");
		QETH_CARD_TEXT_(card, 2, "  rc%d", -ETIMEDOUT);
1135
		return -ETIMEDOUT;
F
Frank Blaschka 已提交
1136
	default:
1137 1138
		QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
				 PTR_ERR(irb), CCW_DEVID(cdev));
C
Carsten Otte 已提交
1139 1140
		QETH_CARD_TEXT(card, 2, "ckirberr");
		QETH_CARD_TEXT(card, 2, "  rc???");
1141
		return PTR_ERR(irb);
F
Frank Blaschka 已提交
1142 1143 1144 1145 1146 1147 1148 1149
	}
}

static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
		struct irb *irb)
{
	int rc;
	int cstat, dstat;
1150
	struct qeth_cmd_buffer *iob = NULL;
1151
	struct ccwgroup_device *gdev;
F
Frank Blaschka 已提交
1152 1153 1154
	struct qeth_channel *channel;
	struct qeth_card *card;

1155 1156 1157
	/* while we hold the ccwdev lock, this stays valid: */
	gdev = dev_get_drvdata(&cdev->dev);
	card = dev_get_drvdata(&gdev->dev);
F
Frank Blaschka 已提交
1158

C
Carsten Otte 已提交
1159 1160
	QETH_CARD_TEXT(card, 5, "irq");

F
Frank Blaschka 已提交
1161 1162
	if (card->read.ccwdev == cdev) {
		channel = &card->read;
C
Carsten Otte 已提交
1163
		QETH_CARD_TEXT(card, 5, "read");
F
Frank Blaschka 已提交
1164 1165
	} else if (card->write.ccwdev == cdev) {
		channel = &card->write;
C
Carsten Otte 已提交
1166
		QETH_CARD_TEXT(card, 5, "write");
F
Frank Blaschka 已提交
1167 1168
	} else {
		channel = &card->data;
C
Carsten Otte 已提交
1169
		QETH_CARD_TEXT(card, 5, "data");
F
Frank Blaschka 已提交
1170
	}
1171

1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185
	if (intparm == 0) {
		QETH_CARD_TEXT(card, 5, "irqunsol");
	} else if ((addr_t)intparm != (addr_t)channel->active_cmd) {
		QETH_CARD_TEXT(card, 5, "irqunexp");

		dev_err(&cdev->dev,
			"Received IRQ with intparm %lx, expected %px\n",
			intparm, channel->active_cmd);
		if (channel->active_cmd)
			qeth_cancel_cmd(channel->active_cmd, -EIO);
	} else {
		iob = (struct qeth_cmd_buffer *) (addr_t)intparm;
	}

1186
	qeth_unlock_channel(card, channel);
1187

1188
	rc = qeth_check_irb_error(card, cdev, irb);
1189
	if (rc) {
1190 1191
		/* IO was terminated, free its resources. */
		if (iob)
1192
			qeth_cancel_cmd(iob, rc);
1193 1194 1195
		return;
	}

1196
	if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
F
Frank Blaschka 已提交
1197
		channel->state = CH_STATE_STOPPED;
1198 1199
		wake_up(&card->wait_q);
	}
F
Frank Blaschka 已提交
1200

1201
	if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
F
Frank Blaschka 已提交
1202
		channel->state = CH_STATE_HALTED;
1203 1204
		wake_up(&card->wait_q);
	}
F
Frank Blaschka 已提交
1205

1206 1207 1208 1209
	if (iob && (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC |
					  SCSW_FCTL_HALT_FUNC))) {
		qeth_cancel_cmd(iob, -ECANCELED);
		iob = NULL;
F
Frank Blaschka 已提交
1210
	}
1211 1212 1213 1214

	cstat = irb->scsw.cmd.cstat;
	dstat = irb->scsw.cmd.dstat;

F
Frank Blaschka 已提交
1215 1216 1217 1218
	if ((dstat & DEV_STAT_UNIT_EXCEP) ||
	    (dstat & DEV_STAT_UNIT_CHECK) ||
	    (cstat)) {
		if (irb->esw.esw0.erw.cons) {
1219 1220 1221
			dev_warn(&channel->ccwdev->dev,
				"The qeth device driver failed to recover "
				"an error on the device\n");
1222 1223 1224
			QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n",
					 CCW_DEVID(channel->ccwdev), cstat,
					 dstat);
F
Frank Blaschka 已提交
1225 1226 1227 1228 1229
			print_hex_dump(KERN_WARNING, "qeth: irb ",
				DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
			print_hex_dump(KERN_WARNING, "qeth: sense data ",
				DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1);
		}
1230

1231
		rc = qeth_get_problem(card, cdev, irb);
F
Frank Blaschka 已提交
1232
		if (rc) {
1233
			card->read_or_write_problem = 1;
1234
			if (iob)
1235
				qeth_cancel_cmd(iob, rc);
1236
			qeth_clear_ipacmd_list(card);
F
Frank Blaschka 已提交
1237
			qeth_schedule_recovery(card);
1238
			return;
F
Frank Blaschka 已提交
1239 1240 1241
		}
	}

1242 1243 1244 1245
	if (iob) {
		/* sanity check: */
		if (irb->scsw.cmd.count > iob->length) {
			qeth_cancel_cmd(iob, -EIO);
1246
			return;
1247 1248 1249 1250 1251
		}
		if (iob->callback)
			iob->callback(card, iob,
				      iob->length - irb->scsw.cmd.count);
	}
F
Frank Blaschka 已提交
1252 1253
}

1254
static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
1255
		struct qeth_qdio_out_buffer *buf,
1256
		enum iucv_tx_notify notification)
F
Frank Blaschka 已提交
1257 1258 1259
{
	struct sk_buff *skb;

1260
	skb_queue_walk(&buf->skb_list, skb) {
1261 1262
		struct sock *sk = skb->sk;

1263 1264
		QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
		QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
1265 1266
		if (sk && sk->sk_family == PF_IUCV)
			iucv_sk(sk)->sk_txnotify(sk, notification);
1267 1268 1269
	}
}

1270 1271
static void qeth_tx_complete_buf(struct qeth_qdio_out_q *queue,
				 struct qeth_qdio_out_buffer *buf, bool error,
1272
				 int budget)
1273
{
1274 1275
	struct sk_buff *skb;

1276 1277 1278 1279 1280 1281
	/* Empty buffer? */
	if (buf->next_element_to_fill == 0)
		return;

	QETH_TXQ_STAT_INC(queue, bufs);
	QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill);
1282 1283 1284 1285 1286 1287 1288
	if (error) {
		QETH_TXQ_STAT_ADD(queue, tx_errors, buf->frames);
	} else {
		QETH_TXQ_STAT_ADD(queue, tx_packets, buf->frames);
		QETH_TXQ_STAT_ADD(queue, tx_bytes, buf->bytes);
	}

1289 1290 1291 1292 1293 1294
	while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) {
		unsigned int bytes = qdisc_pkt_len(skb);
		bool is_tso = skb_is_gso(skb);
		unsigned int packets;

		packets = is_tso ? skb_shinfo(skb)->gso_segs : 1;
1295
		if (!error) {
1296 1297 1298 1299 1300 1301 1302 1303 1304
			if (skb->ip_summed == CHECKSUM_PARTIAL)
				QETH_TXQ_STAT_ADD(queue, skbs_csum, packets);
			if (skb_is_nonlinear(skb))
				QETH_TXQ_STAT_INC(queue, skbs_sg);
			if (is_tso) {
				QETH_TXQ_STAT_INC(queue, skbs_tso);
				QETH_TXQ_STAT_ADD(queue, tso_bytes, bytes);
			}
		}
1305

1306
		napi_consume_skb(skb, budget);
1307
	}
1308 1309 1310
}

static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
1311
				     struct qeth_qdio_out_buffer *buf,
1312
				     bool error, int budget)
1313 1314 1315 1316
{
	int i;

	/* is PCI flag set on buffer? */
1317
	if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ) {
1318
		atomic_dec(&queue->set_pci_flags_count);
1319 1320
		QETH_TXQ_STAT_INC(queue, completion_irq);
	}
1321

1322
	qeth_tx_complete_buf(queue, buf, error, budget);
1323

1324
	for (i = 0; i < queue->max_elements; ++i) {
1325 1326
		void *data = phys_to_virt(buf->buffer->element[i].addr);

1327
		if (__test_and_clear_bit(i, buf->from_kmem_cache) && data)
1328
			kmem_cache_free(qeth_core_header_cache, data);
F
Frank Blaschka 已提交
1329
	}
1330

1331
	qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements);
F
Frank Blaschka 已提交
1332
	buf->next_element_to_fill = 0;
1333
	buf->frames = 0;
1334
	buf->bytes = 0;
1335
	atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
1336 1337
}

1338 1339 1340 1341 1342 1343 1344
static void qeth_free_out_buf(struct qeth_qdio_out_buffer *buf)
{
	if (buf->aob)
		qdio_release_aob(buf->aob);
	kmem_cache_free(qeth_qdio_outbuf_cache, buf);
}

1345 1346
static void qeth_tx_complete_pending_bufs(struct qeth_card *card,
					  struct qeth_qdio_out_q *queue,
1347
					  bool drain, int budget)
1348 1349 1350 1351
{
	struct qeth_qdio_out_buffer *buf, *tmp;

	list_for_each_entry_safe(buf, tmp, &queue->pending_bufs, list_entry) {
1352
		struct qeth_qaob_priv1 *priv;
1353 1354 1355 1356
		struct qaob *aob = buf->aob;
		enum iucv_tx_notify notify;
		unsigned int i;

1357 1358
		priv = (struct qeth_qaob_priv1 *)&aob->user1;
		if (drain || READ_ONCE(priv->state) == QETH_QAOB_DONE) {
1359 1360 1361
			QETH_CARD_TEXT(card, 5, "fp");
			QETH_CARD_TEXT_(card, 5, "%lx", (long) buf);

1362 1363 1364
			notify = drain ? TX_NOTIFY_GENERALERROR :
					 qeth_compute_cq_notification(aob->aorc, 1);
			qeth_notify_skbs(queue, buf, notify);
1365
			qeth_tx_complete_buf(queue, buf, drain, budget);
1366

1367 1368 1369 1370 1371
			for (i = 0;
			     i < aob->sb_count && i < queue->max_elements;
			     i++) {
				void *data = phys_to_virt(aob->sba[i]);

1372
				if (test_bit(i, buf->from_kmem_cache) && data)
1373 1374 1375 1376
					kmem_cache_free(qeth_core_header_cache,
							data);
			}

1377
			list_del(&buf->list_entry);
1378
			qeth_free_out_buf(buf);
1379 1380 1381 1382
		}
	}
}

1383
static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
1384 1385 1386
{
	int j;

1387
	qeth_tx_complete_pending_bufs(q->card, q, true, 0);
1388

1389 1390 1391
	for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
		if (!q->bufs[j])
			continue;
1392

1393
		qeth_clear_output_buffer(q, q->bufs[j], true, 0);
1394
		if (free) {
1395
			qeth_free_out_buf(q->bufs[j]);
1396 1397 1398
			q->bufs[j] = NULL;
		}
	}
F
Frank Blaschka 已提交
1399 1400
}

1401
static void qeth_drain_output_queues(struct qeth_card *card)
F
Frank Blaschka 已提交
1402
{
1403
	int i;
F
Frank Blaschka 已提交
1404

C
Carsten Otte 已提交
1405
	QETH_CARD_TEXT(card, 2, "clearqdbf");
F
Frank Blaschka 已提交
1406
	/* clear outbound buffers to free skbs */
1407
	for (i = 0; i < card->qdio.no_out_queues; ++i) {
1408 1409
		if (card->qdio.out_qs[i])
			qeth_drain_output_queue(card->qdio.out_qs[i], false);
1410
	}
F
Frank Blaschka 已提交
1411 1412
}

1413
static void qeth_osa_set_output_queues(struct qeth_card *card, bool single)
1414
{
1415
	unsigned int max = single ? 1 : card->dev->num_tx_queues;
1416

1417
	if (card->qdio.no_out_queues == max)
1418
		return;
1419

1420
	if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
1421
		qeth_free_qdio_queues(card);
1422

1423
	if (max == 1 && card->qdio.do_prio_queueing != QETH_PRIOQ_DEFAULT)
1424 1425
		dev_info(&card->gdev->dev, "Priority Queueing not supported\n");

1426
	card->qdio.no_out_queues = max;
1427 1428
}

1429
static int qeth_update_from_chp_desc(struct qeth_card *card)
F
Frank Blaschka 已提交
1430 1431
{
	struct ccw_device *ccwdev;
1432
	struct channel_path_desc_fmt0 *chp_dsc;
F
Frank Blaschka 已提交
1433

1434
	QETH_CARD_TEXT(card, 2, "chp_desc");
F
Frank Blaschka 已提交
1435 1436

	ccwdev = card->data.ccwdev;
1437 1438
	chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
	if (!chp_dsc)
1439
		return -ENOMEM;
1440 1441 1442

	card->info.func_level = 0x4100 + chp_dsc->desc;

1443 1444
	if (IS_OSD(card) || IS_OSX(card))
		/* CHPP field bit 6 == 1 -> single queue */
1445
		qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
1446

1447
	kfree(chp_dsc);
1448 1449
	QETH_CARD_TEXT_(card, 2, "nr:%x", card->qdio.no_out_queues);
	QETH_CARD_TEXT_(card, 2, "lvl:%02x", card->info.func_level);
1450
	return 0;
F
Frank Blaschka 已提交
1451 1452 1453 1454
}

static void qeth_init_qdio_info(struct qeth_card *card)
{
1455
	QETH_CARD_TEXT(card, 4, "intqdinf");
F
Frank Blaschka 已提交
1456
	atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
1457 1458 1459
	card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
	card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;

F
Frank Blaschka 已提交
1460
	/* inbound */
1461
	card->qdio.no_in_queues = 1;
F
Frank Blaschka 已提交
1462
	card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1463
	if (IS_IQD(card))
1464 1465 1466
		card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
	else
		card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
F
Frank Blaschka 已提交
1467 1468 1469 1470 1471
	card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
	INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
	INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
}

1472
static void qeth_set_initial_options(struct qeth_card *card)
F
Frank Blaschka 已提交
1473 1474 1475
{
	card->options.route4.type = NO_ROUTER;
	card->options.route6.type = NO_ROUTER;
E
Einar Lueck 已提交
1476
	card->options.isolation = ISOLATION_MODE_NONE;
1477
	card->options.cq = QETH_CQ_DISABLED;
1478
	card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
F
Frank Blaschka 已提交
1479 1480 1481 1482 1483 1484 1485 1486
}

static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
{
	unsigned long flags;
	int rc = 0;

	spin_lock_irqsave(&card->thread_mask_lock, flags);
C
Carsten Otte 已提交
1487
	QETH_CARD_TEXT_(card, 4, "  %02x%02x%02x",
F
Frank Blaschka 已提交
1488 1489 1490 1491 1492 1493 1494 1495
			(u8) card->thread_start_mask,
			(u8) card->thread_allowed_mask,
			(u8) card->thread_running_mask);
	rc = (card->thread_start_mask & thread);
	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
	return rc;
}

1496
static int qeth_do_reset(void *data);
F
Frank Blaschka 已提交
1497 1498
static void qeth_start_kernel_thread(struct work_struct *work)
{
1499
	struct task_struct *ts;
F
Frank Blaschka 已提交
1500 1501
	struct qeth_card *card = container_of(work, struct qeth_card,
					kernel_thread_starter);
1502
	QETH_CARD_TEXT(card, 2, "strthrd");
F
Frank Blaschka 已提交
1503 1504 1505 1506

	if (card->read.state != CH_STATE_UP &&
	    card->write.state != CH_STATE_UP)
		return;
1507
	if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
1508
		ts = kthread_run(qeth_do_reset, card, "qeth_recover");
1509 1510 1511 1512 1513 1514
		if (IS_ERR(ts)) {
			qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
			qeth_clear_thread_running_bit(card,
				QETH_RECOVER_THREAD);
		}
	}
F
Frank Blaschka 已提交
1515 1516
}

1517
static void qeth_buffer_reclaim_work(struct work_struct *);
1518
static void qeth_setup_card(struct qeth_card *card)
F
Frank Blaschka 已提交
1519
{
1520
	QETH_CARD_TEXT(card, 2, "setupcrd");
F
Frank Blaschka 已提交
1521

1522
	card->info.type = CARD_RDEV(card)->id.driver_info;
F
Frank Blaschka 已提交
1523 1524 1525
	card->state = CARD_STATE_DOWN;
	spin_lock_init(&card->lock);
	spin_lock_init(&card->thread_mask_lock);
1526
	mutex_init(&card->conf_mutex);
1527
	mutex_init(&card->discipline_mutex);
F
Frank Blaschka 已提交
1528 1529 1530
	INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
	INIT_LIST_HEAD(&card->cmd_waiter_list);
	init_waitqueue_head(&card->wait_q);
1531
	qeth_set_initial_options(card);
F
Frank Blaschka 已提交
1532 1533 1534
	/* IP address takeover */
	INIT_LIST_HEAD(&card->ipato.entries);
	qeth_init_qdio_info(card);
1535
	INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
1536
	hash_init(card->rx_mode_addrs);
1537 1538 1539 1540
	hash_init(card->local_addrs4);
	hash_init(card->local_addrs6);
	spin_lock_init(&card->local_addrs4_lock);
	spin_lock_init(&card->local_addrs6_lock);
F
Frank Blaschka 已提交
1541 1542
}

1543 1544 1545 1546
static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
{
	struct qeth_card *card = container_of(slr, struct qeth_card,
					qeth_service_level);
1547 1548 1549
	if (card->info.mcl_level[0])
		seq_printf(m, "qeth: %s firmware level %s\n",
			CARD_BUS_ID(card), card->info.mcl_level);
1550 1551
}

1552
static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
F
Frank Blaschka 已提交
1553 1554 1555
{
	struct qeth_card *card;

1556
	QETH_DBF_TEXT(SETUP, 2, "alloccrd");
1557
	card = kzalloc(sizeof(*card), GFP_KERNEL);
F
Frank Blaschka 已提交
1558
	if (!card)
1559
		goto out;
1560
	QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1561 1562

	card->gdev = gdev;
1563
	dev_set_drvdata(&gdev->dev, card);
1564 1565 1566
	CARD_RDEV(card) = gdev->cdev[0];
	CARD_WDEV(card) = gdev->cdev[1];
	CARD_DDEV(card) = gdev->cdev[2];
1567

1568 1569
	card->event_wq = alloc_ordered_workqueue("%s_event", 0,
						 dev_name(&gdev->dev));
1570 1571
	if (!card->event_wq)
		goto out_wq;
1572 1573 1574 1575

	card->read_cmd = qeth_alloc_cmd(&card->read, QETH_BUFSIZE, 1, 0);
	if (!card->read_cmd)
		goto out_read_cmd;
1576

1577 1578 1579 1580 1581
	card->debugfs = debugfs_create_dir(dev_name(&gdev->dev),
					   qeth_debugfs_root);
	debugfs_create_file("local_addrs", 0400, card->debugfs, card,
			    &qeth_debugfs_local_addr_fops);

1582 1583
	card->qeth_service_level.seq_print = qeth_core_sl_print;
	register_service_level(&card->qeth_service_level);
F
Frank Blaschka 已提交
1584
	return card;
1585

1586
out_read_cmd:
1587 1588
	destroy_workqueue(card->event_wq);
out_wq:
1589
	dev_set_drvdata(&gdev->dev, NULL);
1590 1591 1592
	kfree(card);
out:
	return NULL;
F
Frank Blaschka 已提交
1593 1594
}

1595 1596
static int qeth_clear_channel(struct qeth_card *card,
			      struct qeth_channel *channel)
F
Frank Blaschka 已提交
1597 1598 1599
{
	int rc;

C
Carsten Otte 已提交
1600
	QETH_CARD_TEXT(card, 3, "clearch");
J
Julian Wiedmann 已提交
1601
	spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1602
	rc = ccw_device_clear(channel->ccwdev, (addr_t)channel->active_cmd);
J
Julian Wiedmann 已提交
1603
	spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
F
Frank Blaschka 已提交
1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616

	if (rc)
		return rc;
	rc = wait_event_interruptible_timeout(card->wait_q,
			channel->state == CH_STATE_STOPPED, QETH_TIMEOUT);
	if (rc == -ERESTARTSYS)
		return rc;
	if (channel->state != CH_STATE_STOPPED)
		return -ETIME;
	channel->state = CH_STATE_DOWN;
	return 0;
}

1617 1618
static int qeth_halt_channel(struct qeth_card *card,
			     struct qeth_channel *channel)
F
Frank Blaschka 已提交
1619 1620 1621
{
	int rc;

C
Carsten Otte 已提交
1622
	QETH_CARD_TEXT(card, 3, "haltch");
J
Julian Wiedmann 已提交
1623
	spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1624
	rc = ccw_device_halt(channel->ccwdev, (addr_t)channel->active_cmd);
J
Julian Wiedmann 已提交
1625
	spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
F
Frank Blaschka 已提交
1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637

	if (rc)
		return rc;
	rc = wait_event_interruptible_timeout(card->wait_q,
			channel->state == CH_STATE_HALTED, QETH_TIMEOUT);
	if (rc == -ERESTARTSYS)
		return rc;
	if (channel->state != CH_STATE_HALTED)
		return -ETIME;
	return 0;
}

1638
static int qeth_stop_channel(struct qeth_channel *channel)
1639 1640 1641 1642 1643 1644 1645
{
	struct ccw_device *cdev = channel->ccwdev;
	int rc;

	rc = ccw_device_set_offline(cdev);

	spin_lock_irq(get_ccwdev_lock(cdev));
1646
	if (channel->active_cmd)
1647 1648
		dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n",
			channel->active_cmd);
1649

1650
	cdev->handler = NULL;
1651 1652 1653 1654 1655
	spin_unlock_irq(get_ccwdev_lock(cdev));

	return rc;
}

1656 1657 1658 1659 1660 1661
static int qeth_start_channel(struct qeth_channel *channel)
{
	struct ccw_device *cdev = channel->ccwdev;
	int rc;

	channel->state = CH_STATE_DOWN;
1662
	xchg(&channel->active_cmd, NULL);
1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680

	spin_lock_irq(get_ccwdev_lock(cdev));
	cdev->handler = qeth_irq;
	spin_unlock_irq(get_ccwdev_lock(cdev));

	rc = ccw_device_set_online(cdev);
	if (rc)
		goto err;

	return 0;

err:
	spin_lock_irq(get_ccwdev_lock(cdev));
	cdev->handler = NULL;
	spin_unlock_irq(get_ccwdev_lock(cdev));
	return rc;
}

F
Frank Blaschka 已提交
1681 1682 1683 1684
static int qeth_halt_channels(struct qeth_card *card)
{
	int rc1 = 0, rc2 = 0, rc3 = 0;

C
Carsten Otte 已提交
1685
	QETH_CARD_TEXT(card, 3, "haltchs");
1686 1687 1688
	rc1 = qeth_halt_channel(card, &card->read);
	rc2 = qeth_halt_channel(card, &card->write);
	rc3 = qeth_halt_channel(card, &card->data);
F
Frank Blaschka 已提交
1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699
	if (rc1)
		return rc1;
	if (rc2)
		return rc2;
	return rc3;
}

static int qeth_clear_channels(struct qeth_card *card)
{
	int rc1 = 0, rc2 = 0, rc3 = 0;

C
Carsten Otte 已提交
1700
	QETH_CARD_TEXT(card, 3, "clearchs");
1701 1702 1703
	rc1 = qeth_clear_channel(card, &card->read);
	rc2 = qeth_clear_channel(card, &card->write);
	rc3 = qeth_clear_channel(card, &card->data);
F
Frank Blaschka 已提交
1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714
	if (rc1)
		return rc1;
	if (rc2)
		return rc2;
	return rc3;
}

static int qeth_clear_halt_card(struct qeth_card *card, int halt)
{
	int rc = 0;

C
Carsten Otte 已提交
1715
	QETH_CARD_TEXT(card, 3, "clhacrd");
F
Frank Blaschka 已提交
1716 1717 1718 1719 1720 1721 1722 1723

	if (halt)
		rc = qeth_halt_channels(card);
	if (rc)
		return rc;
	return qeth_clear_channels(card);
}

1724
static int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
F
Frank Blaschka 已提交
1725 1726 1727
{
	int rc = 0;

C
Carsten Otte 已提交
1728
	QETH_CARD_TEXT(card, 3, "qdioclr");
F
Frank Blaschka 已提交
1729 1730 1731
	switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
		QETH_QDIO_CLEANING)) {
	case QETH_QDIO_ESTABLISHED:
1732
		if (IS_IQD(card))
J
Jan Glauber 已提交
1733
			rc = qdio_shutdown(CARD_DDEV(card),
F
Frank Blaschka 已提交
1734 1735
				QDIO_FLAG_CLEANUP_USING_HALT);
		else
J
Jan Glauber 已提交
1736
			rc = qdio_shutdown(CARD_DDEV(card),
F
Frank Blaschka 已提交
1737 1738
				QDIO_FLAG_CLEANUP_USING_CLEAR);
		if (rc)
C
Carsten Otte 已提交
1739
			QETH_CARD_TEXT_(card, 3, "1err%d", rc);
F
Frank Blaschka 已提交
1740 1741 1742 1743 1744 1745 1746 1747 1748
		atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
		break;
	case QETH_QDIO_CLEANING:
		return rc;
	default:
		break;
	}
	rc = qeth_clear_halt_card(card, use_halt);
	if (rc)
C
Carsten Otte 已提交
1749
		QETH_CARD_TEXT_(card, 3, "2err%d", rc);
F
Frank Blaschka 已提交
1750 1751 1752
	return rc;
}

1753 1754 1755 1756 1757 1758 1759 1760 1761
static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
{
	enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
	struct diag26c_vnic_resp *response = NULL;
	struct diag26c_vnic_req *request = NULL;
	struct ccw_dev_id id;
	char userid[80];
	int rc = 0;

1762
	QETH_CARD_TEXT(card, 2, "vmlayer");
1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804

	cpcmd("QUERY USERID", userid, sizeof(userid), &rc);
	if (rc)
		goto out;

	request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
	response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
	if (!request || !response) {
		rc = -ENOMEM;
		goto out;
	}

	ccw_device_get_id(CARD_RDEV(card), &id);
	request->resp_buf_len = sizeof(*response);
	request->resp_version = DIAG26C_VERSION6_VM65918;
	request->req_format = DIAG26C_VNIC_INFO;
	ASCEBC(userid, 8);
	memcpy(&request->sys_name, userid, 8);
	request->devno = id.devno;

	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
	rc = diag26c(request, response, DIAG26C_PORT_VNIC);
	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
	if (rc)
		goto out;
	QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));

	if (request->resp_buf_len < sizeof(*response) ||
	    response->version != request->resp_version) {
		rc = -EIO;
		goto out;
	}

	if (response->protocol == VNIC_INFO_PROT_L2)
		disc = QETH_DISCIPLINE_LAYER2;
	else if (response->protocol == VNIC_INFO_PROT_L3)
		disc = QETH_DISCIPLINE_LAYER3;

out:
	kfree(response);
	kfree(request);
	if (rc)
1805
		QETH_CARD_TEXT_(card, 2, "err%x", rc);
1806 1807 1808
	return disc;
}

1809 1810 1811
/* Determine whether the device requires a specific layer discipline */
static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
{
1812 1813
	enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;

J
Julian Wiedmann 已提交
1814
	if (IS_OSM(card))
1815
		disc = QETH_DISCIPLINE_LAYER2;
1816 1817 1818
	else if (IS_VM_NIC(card))
		disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
				      qeth_vm_detect_layer(card);
1819 1820 1821

	switch (disc) {
	case QETH_DISCIPLINE_LAYER2:
1822
		QETH_CARD_TEXT(card, 3, "force l2");
1823 1824
		break;
	case QETH_DISCIPLINE_LAYER3:
1825
		QETH_CARD_TEXT(card, 3, "force l3");
1826 1827
		break;
	default:
1828
		QETH_CARD_TEXT(card, 3, "force no");
1829 1830
	}

1831
	return disc;
1832 1833
}

1834
static void qeth_set_blkt_defaults(struct qeth_card *card)
1835
{
1836
	QETH_CARD_TEXT(card, 2, "cfgblkt");
1837

1838
	if (card->info.use_v1_blkt) {
1839 1840 1841
		card->info.blkt.time_total = 0;
		card->info.blkt.inter_packet = 0;
		card->info.blkt.inter_packet_jumbo = 0;
1842 1843 1844 1845
	} else {
		card->info.blkt.time_total = 250;
		card->info.blkt.inter_packet = 5;
		card->info.blkt.inter_packet_jumbo = 15;
1846
	}
F
Frank Blaschka 已提交
1847 1848
}

1849
static void qeth_idx_init(struct qeth_card *card)
F
Frank Blaschka 已提交
1850
{
1851 1852
	memset(&card->seqno, 0, sizeof(card->seqno));

F
Frank Blaschka 已提交
1853 1854 1855 1856 1857 1858
	card->token.issuer_rm_w = 0x00010103UL;
	card->token.cm_filter_w = 0x00010108UL;
	card->token.cm_connection_w = 0x0001010aUL;
	card->token.ulp_filter_w = 0x0001010bUL;
	card->token.ulp_connection_w = 0x0001010dUL;

1859 1860
	switch (card->info.type) {
	case QETH_CARD_TYPE_IQD:
1861
		card->info.func_level =	QETH_IDX_FUNC_LEVEL_IQD;
1862 1863 1864 1865 1866 1867
		break;
	case QETH_CARD_TYPE_OSD:
		card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
		break;
	default:
		break;
F
Frank Blaschka 已提交
1868 1869 1870
	}
}

1871
static void qeth_idx_finalize_cmd(struct qeth_card *card,
1872
				  struct qeth_cmd_buffer *iob)
1873 1874 1875 1876 1877 1878 1879
{
	memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr,
	       QETH_SEQ_NO_LENGTH);
	if (iob->channel == &card->write)
		card->seqno.trans_hdr++;
}

F
Frank Blaschka 已提交
1880 1881 1882 1883 1884 1885 1886 1887 1888
static int qeth_peer_func_level(int level)
{
	if ((level & 0xff) == 8)
		return (level & 0xff) + 0x400;
	if (((level >> 8) & 3) == 1)
		return (level & 0xff) + 0x200;
	return level;
}

1889
static void qeth_mpc_finalize_cmd(struct qeth_card *card,
1890
				  struct qeth_cmd_buffer *iob)
F
Frank Blaschka 已提交
1891
{
1892
	qeth_idx_finalize_cmd(card, iob);
F
Frank Blaschka 已提交
1893 1894 1895 1896 1897 1898

	memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
	       &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
	card->seqno.pdu_hdr++;
	memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
	       &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
1899 1900

	iob->callback = qeth_release_buffer_cb;
F
Frank Blaschka 已提交
1901 1902
}

1903 1904 1905 1906 1907 1908 1909
static bool qeth_mpc_match_reply(struct qeth_cmd_buffer *iob,
				 struct qeth_cmd_buffer *reply)
{
	/* MPC cmds are issued strictly in sequence. */
	return !IS_IPA(reply->data);
}

1910
static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card,
1911
						  const void *data,
1912
						  unsigned int data_length)
1913 1914 1915
{
	struct qeth_cmd_buffer *iob;

1916 1917 1918 1919 1920 1921 1922 1923
	iob = qeth_alloc_cmd(&card->write, data_length, 1, QETH_TIMEOUT);
	if (!iob)
		return NULL;

	memcpy(iob->data, data, data_length);
	qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length,
		       iob->data);
	iob->finalize = qeth_mpc_finalize_cmd;
1924
	iob->match = qeth_mpc_match_reply;
1925 1926 1927
	return iob;
}

E
Eugene Crosser 已提交
1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941
/**
 * qeth_send_control_data() -	send control command to the card
 * @card:			qeth_card structure pointer
 * @iob:			qeth_cmd_buffer pointer
 * @reply_cb:			callback function pointer
 * @cb_card:			pointer to the qeth_card structure
 * @cb_reply:			pointer to the qeth_reply structure
 * @cb_cmd:			pointer to the original iob for non-IPA
 *				commands, or to the qeth_ipa_cmd structure
 *				for the IPA commands.
 * @reply_param:		private pointer passed to the callback
 *
 * Callback function gets called one or more times, with cb_cmd
 * pointing to the response returned by the hardware. Callback
1942 1943 1944 1945 1946
 * function must return
 *   > 0 if more reply blocks are expected,
 *     0 if the last or only reply block is received, and
 *   < 0 on error.
 * Callback function can get the value of the reply_param pointer from the
E
Eugene Crosser 已提交
1947 1948 1949
 * field 'param' of the structure qeth_reply.
 */

1950
static int qeth_send_control_data(struct qeth_card *card,
1951 1952 1953 1954 1955
				  struct qeth_cmd_buffer *iob,
				  int (*reply_cb)(struct qeth_card *cb_card,
						  struct qeth_reply *cb_reply,
						  unsigned long cb_cmd),
				  void *reply_param)
F
Frank Blaschka 已提交
1956
{
1957
	struct qeth_channel *channel = iob->channel;
1958
	struct qeth_reply *reply = &iob->reply;
1959
	long timeout = iob->timeout;
F
Frank Blaschka 已提交
1960 1961
	int rc;

C
Carsten Otte 已提交
1962
	QETH_CARD_TEXT(card, 2, "sendctl");
F
Frank Blaschka 已提交
1963 1964 1965

	reply->callback = reply_cb;
	reply->param = reply_param;
1966

1967
	timeout = wait_event_interruptible_timeout(card->wait_q,
1968
						   qeth_trylock_channel(channel, iob),
1969 1970
						   timeout);
	if (timeout <= 0) {
1971
		qeth_put_cmd(iob);
1972 1973
		return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
	}
F
Frank Blaschka 已提交
1974

1975
	if (iob->finalize)
1976 1977
		iob->finalize(card, iob);
	QETH_DBF_HEX(CTRL, 2, iob->data, min(iob->length, QETH_DBF_CTRL_LEN));
1978

1979
	qeth_enqueue_cmd(card, iob);
1980

1981 1982 1983
	/* This pairs with iob->callback, and keeps the iob alive after IO: */
	qeth_get_cmd(iob);

C
Carsten Otte 已提交
1984
	QETH_CARD_TEXT(card, 6, "noirqpnd");
J
Julian Wiedmann 已提交
1985
	spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1986
	rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob),
1987
				      (addr_t) iob, 0, 0, timeout);
J
Julian Wiedmann 已提交
1988
	spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
F
Frank Blaschka 已提交
1989
	if (rc) {
1990 1991
		QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
				 CARD_DEVID(card), rc);
C
Carsten Otte 已提交
1992
		QETH_CARD_TEXT_(card, 2, " err%d", rc);
1993
		qeth_dequeue_cmd(card, iob);
1994
		qeth_put_cmd(iob);
1995
		qeth_unlock_channel(card, channel);
1996
		goto out;
F
Frank Blaschka 已提交
1997
	}
1998

1999
	timeout = wait_for_completion_interruptible_timeout(&iob->done,
2000 2001 2002
							    timeout);
	if (timeout <= 0)
		rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
2003

2004
	qeth_dequeue_cmd(card, iob);
2005 2006 2007

	if (reply_cb) {
		/* Wait until the callback for a late reply has completed: */
2008
		spin_lock_irq(&iob->lock);
2009 2010
		if (rc)
			/* Zap any callback that's still pending: */
2011 2012
			iob->rc = rc;
		spin_unlock_irq(&iob->lock);
2013 2014
	}

2015
	if (!rc)
2016
		rc = iob->rc;
2017 2018 2019

out:
	qeth_put_cmd(iob);
2020
	return rc;
F
Frank Blaschka 已提交
2021 2022
}

2023 2024 2025 2026 2027 2028
struct qeth_node_desc {
	struct node_descriptor nd1;
	struct node_descriptor nd2;
	struct node_descriptor nd3;
};

2029
static void qeth_read_conf_data_cb(struct qeth_card *card,
2030 2031
				   struct qeth_cmd_buffer *iob,
				   unsigned int data_length)
2032
{
2033
	struct qeth_node_desc *nd = (struct qeth_node_desc *) iob->data;
2034
	int rc = 0;
2035
	u8 *tag;
2036 2037

	QETH_CARD_TEXT(card, 2, "cfgunit");
2038 2039 2040 2041 2042 2043

	if (data_length < sizeof(*nd)) {
		rc = -EINVAL;
		goto out;
	}

2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056
	card->info.is_vm_nic = nd->nd1.plant[0] == _ascebc['V'] &&
			       nd->nd1.plant[1] == _ascebc['M'];
	tag = (u8 *)&nd->nd1.tag;
	card->info.chpid = tag[0];
	card->info.unit_addr2 = tag[1];

	tag = (u8 *)&nd->nd2.tag;
	card->info.cula = tag[1];

	card->info.use_v1_blkt = nd->nd3.model[0] == 0xF0 &&
				 nd->nd3.model[1] == 0xF0 &&
				 nd->nd3.model[2] >= 0xF1 &&
				 nd->nd3.model[2] <= 0xF4;
2057

2058
out:
2059
	qeth_notify_cmd(iob, rc);
2060
	qeth_put_cmd(iob);
2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072
}

static int qeth_read_conf_data(struct qeth_card *card)
{
	struct qeth_channel *channel = &card->data;
	struct qeth_cmd_buffer *iob;
	struct ciw *ciw;

	/* scan for RCD command in extended SenseID data */
	ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
	if (!ciw || ciw->cmd == 0)
		return -EOPNOTSUPP;
2073 2074
	if (ciw->count < sizeof(struct qeth_node_desc))
		return -EINVAL;
2075 2076 2077 2078 2079 2080 2081 2082 2083

	iob = qeth_alloc_cmd(channel, ciw->count, 1, QETH_RCD_TIMEOUT);
	if (!iob)
		return -ENOMEM;

	iob->callback = qeth_read_conf_data_cb;
	qeth_setup_ccw(__ccw_from_cmd(iob), ciw->cmd, 0, iob->length,
		       iob->data);

2084
	return qeth_send_control_data(card, iob, NULL, NULL);
2085 2086
}

2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100
static int qeth_idx_check_activate_response(struct qeth_card *card,
					    struct qeth_channel *channel,
					    struct qeth_cmd_buffer *iob)
{
	int rc;

	rc = qeth_check_idx_response(card, iob->data);
	if (rc)
		return rc;

	if (QETH_IS_IDX_ACT_POS_REPLY(iob->data))
		return 0;

	/* negative reply: */
2101 2102
	QETH_CARD_TEXT_(card, 2, "idxneg%c",
			QETH_IDX_ACT_CAUSE_CODE(iob->data));
2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120

	switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
	case QETH_IDX_ACT_ERR_EXCL:
		dev_err(&channel->ccwdev->dev,
			"The adapter is used exclusively by another host\n");
		return -EBUSY;
	case QETH_IDX_ACT_ERR_AUTH:
	case QETH_IDX_ACT_ERR_AUTH_USER:
		dev_err(&channel->ccwdev->dev,
			"Setting the device online failed because of insufficient authorization\n");
		return -EPERM;
	default:
		QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
				 CCW_DEVID(channel->ccwdev));
		return -EIO;
	}
}

2121
static void qeth_idx_activate_read_channel_cb(struct qeth_card *card,
2122 2123
					      struct qeth_cmd_buffer *iob,
					      unsigned int data_length)
2124
{
2125
	struct qeth_channel *channel = iob->channel;
2126 2127 2128
	u16 peer_level;
	int rc;

2129
	QETH_CARD_TEXT(card, 2, "idxrdcb");
2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150

	rc = qeth_idx_check_activate_response(card, channel, iob);
	if (rc)
		goto out;

	memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
	if (peer_level != qeth_peer_func_level(card->info.func_level)) {
		QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
				 CCW_DEVID(channel->ccwdev),
				 card->info.func_level, peer_level);
		rc = -EINVAL;
		goto out;
	}

	memcpy(&card->token.issuer_rm_r,
	       QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
	       QETH_MPC_TOKEN_LENGTH);
	memcpy(&card->info.mcl_level[0],
	       QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);

out:
2151
	qeth_notify_cmd(iob, rc);
2152
	qeth_put_cmd(iob);
2153 2154
}

2155
static void qeth_idx_activate_write_channel_cb(struct qeth_card *card,
2156 2157
					       struct qeth_cmd_buffer *iob,
					       unsigned int data_length)
2158
{
2159
	struct qeth_channel *channel = iob->channel;
2160 2161 2162
	u16 peer_level;
	int rc;

2163
	QETH_CARD_TEXT(card, 2, "idxwrcb");
2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178

	rc = qeth_idx_check_activate_response(card, channel, iob);
	if (rc)
		goto out;

	memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
	if ((peer_level & ~0x0100) !=
	    qeth_peer_func_level(card->info.func_level)) {
		QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
				 CCW_DEVID(channel->ccwdev),
				 card->info.func_level, peer_level);
		rc = -EINVAL;
	}

out:
2179
	qeth_notify_cmd(iob, rc);
2180
	qeth_put_cmd(iob);
2181 2182 2183 2184 2185 2186 2187
}

static void qeth_idx_setup_activate_cmd(struct qeth_card *card,
					struct qeth_cmd_buffer *iob)
{
	u16 addr = (card->info.cula << 8) + card->info.unit_addr2;
	u8 port = ((u8)card->dev->dev_port) | 0x80;
2188
	struct ccw1 *ccw = __ccw_from_cmd(iob);
2189

2190 2191 2192
	qeth_setup_ccw(&ccw[0], CCW_CMD_WRITE, CCW_FLAG_CC, IDX_ACTIVATE_SIZE,
		       iob->data);
	qeth_setup_ccw(&ccw[1], CCW_CMD_READ, 0, iob->length, iob->data);
2193 2194
	iob->finalize = qeth_idx_finalize_cmd;

2195
	port |= QETH_IDX_ACT_INVAL_FRAME;
2196 2197 2198 2199 2200
	memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1);
	memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
	       &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
	       &card->info.func_level, 2);
2201
	memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &card->info.ddev_devno, 2);
2202 2203 2204 2205 2206 2207 2208 2209 2210
	memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2);
}

static int qeth_idx_activate_read_channel(struct qeth_card *card)
{
	struct qeth_channel *channel = &card->read;
	struct qeth_cmd_buffer *iob;
	int rc;

2211
	QETH_CARD_TEXT(card, 2, "idxread");
2212

2213
	iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2214 2215 2216 2217 2218
	if (!iob)
		return -ENOMEM;

	memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
	qeth_idx_setup_activate_cmd(card, iob);
2219
	iob->callback = qeth_idx_activate_read_channel_cb;
2220

2221
	rc = qeth_send_control_data(card, iob, NULL, NULL);
2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234
	if (rc)
		return rc;

	channel->state = CH_STATE_UP;
	return 0;
}

static int qeth_idx_activate_write_channel(struct qeth_card *card)
{
	struct qeth_channel *channel = &card->write;
	struct qeth_cmd_buffer *iob;
	int rc;

2235
	QETH_CARD_TEXT(card, 2, "idxwrite");
2236

2237
	iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2238 2239 2240 2241 2242
	if (!iob)
		return -ENOMEM;

	memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
	qeth_idx_setup_activate_cmd(card, iob);
2243
	iob->callback = qeth_idx_activate_write_channel_cb;
2244

2245
	rc = qeth_send_control_data(card, iob, NULL, NULL);
2246 2247 2248 2249 2250 2251 2252
	if (rc)
		return rc;

	channel->state = CH_STATE_UP;
	return 0;
}

F
Frank Blaschka 已提交
2253 2254 2255 2256 2257
static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
		unsigned long data)
{
	struct qeth_cmd_buffer *iob;

2258
	QETH_CARD_TEXT(card, 2, "cmenblcb");
F
Frank Blaschka 已提交
2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270

	iob = (struct qeth_cmd_buffer *) data;
	memcpy(&card->token.cm_filter_r,
	       QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
	       QETH_MPC_TOKEN_LENGTH);
	return 0;
}

static int qeth_cm_enable(struct qeth_card *card)
{
	struct qeth_cmd_buffer *iob;

2271
	QETH_CARD_TEXT(card, 2, "cmenable");
F
Frank Blaschka 已提交
2272

2273
	iob = qeth_mpc_alloc_cmd(card, CM_ENABLE, CM_ENABLE_SIZE);
2274 2275
	if (!iob)
		return -ENOMEM;
2276

F
Frank Blaschka 已提交
2277 2278 2279 2280 2281
	memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
	       &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
	       &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);

2282
	return qeth_send_control_data(card, iob, qeth_cm_enable_cb, NULL);
F
Frank Blaschka 已提交
2283 2284 2285 2286 2287 2288 2289
}

static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
		unsigned long data)
{
	struct qeth_cmd_buffer *iob;

2290
	QETH_CARD_TEXT(card, 2, "cmsetpcb");
F
Frank Blaschka 已提交
2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302

	iob = (struct qeth_cmd_buffer *) data;
	memcpy(&card->token.cm_connection_r,
	       QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
	       QETH_MPC_TOKEN_LENGTH);
	return 0;
}

static int qeth_cm_setup(struct qeth_card *card)
{
	struct qeth_cmd_buffer *iob;

2303
	QETH_CARD_TEXT(card, 2, "cmsetup");
F
Frank Blaschka 已提交
2304

2305
	iob = qeth_mpc_alloc_cmd(card, CM_SETUP, CM_SETUP_SIZE);
2306 2307
	if (!iob)
		return -ENOMEM;
2308

F
Frank Blaschka 已提交
2309 2310 2311 2312 2313 2314
	memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
	       &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
	       &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
	       &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
2315
	return qeth_send_control_data(card, iob, qeth_cm_setup_cb, NULL);
F
Frank Blaschka 已提交
2316 2317
}

2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328
static bool qeth_is_supported_link_type(struct qeth_card *card, u8 link_type)
{
	if (link_type == QETH_LINK_TYPE_LANE_TR ||
	    link_type == QETH_LINK_TYPE_HSTR) {
		dev_err(&card->gdev->dev, "Unsupported Token Ring device\n");
		return false;
	}

	return true;
}

2329
static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
F
Frank Blaschka 已提交
2330
{
2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349
	struct net_device *dev = card->dev;
	unsigned int new_mtu;

	if (!max_mtu) {
		/* IQD needs accurate max MTU to set up its RX buffers: */
		if (IS_IQD(card))
			return -EINVAL;
		/* tolerate quirky HW: */
		max_mtu = ETH_MAX_MTU;
	}

	rtnl_lock();
	if (IS_IQD(card)) {
		/* move any device with default MTU to new max MTU: */
		new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu;

		/* adjust RX buffer size to new max MTU: */
		card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE;
		if (dev->max_mtu && dev->max_mtu != max_mtu)
2350
			qeth_free_qdio_queues(card);
2351 2352 2353 2354
	} else {
		if (dev->mtu)
			new_mtu = dev->mtu;
		/* default MTUs for first setup: */
2355
		else if (IS_LAYER2(card))
2356 2357 2358
			new_mtu = ETH_DATA_LEN;
		else
			new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */
F
Frank Blaschka 已提交
2359
	}
2360 2361 2362 2363 2364

	dev->max_mtu = max_mtu;
	dev->mtu = min(new_mtu, max_mtu);
	rtnl_unlock();
	return 0;
F
Frank Blaschka 已提交
2365 2366
}

J
Julian Wiedmann 已提交
2367
static int qeth_get_mtu_outof_framesize(int framesize)
F
Frank Blaschka 已提交
2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388
{
	switch (framesize) {
	case 0x4000:
		return 8192;
	case 0x6000:
		return 16384;
	case 0xa000:
		return 32768;
	case 0xffff:
		return 57344;
	default:
		return 0;
	}
}

static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
		unsigned long data)
{
	__u16 mtu, framesize;
	__u16 len;
	struct qeth_cmd_buffer *iob;
2389
	u8 link_type = 0;
F
Frank Blaschka 已提交
2390

2391
	QETH_CARD_TEXT(card, 2, "ulpenacb");
F
Frank Blaschka 已提交
2392 2393 2394 2395 2396

	iob = (struct qeth_cmd_buffer *) data;
	memcpy(&card->token.ulp_filter_r,
	       QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
	       QETH_MPC_TOKEN_LENGTH);
2397
	if (IS_IQD(card)) {
F
Frank Blaschka 已提交
2398 2399 2400
		memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
		mtu = qeth_get_mtu_outof_framesize(framesize);
	} else {
2401
		mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data);
F
Frank Blaschka 已提交
2402
	}
2403
	*(u16 *)reply->param = mtu;
F
Frank Blaschka 已提交
2404 2405 2406 2407 2408

	memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
	if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
		memcpy(&link_type,
		       QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
2409 2410 2411 2412 2413
		if (!qeth_is_supported_link_type(card, link_type))
			return -EPROTONOSUPPORT;
	}

	card->info.link_type = link_type;
2414
	QETH_CARD_TEXT_(card, 2, "link%d", card->info.link_type);
F
Frank Blaschka 已提交
2415 2416 2417
	return 0;
}

2418 2419
static u8 qeth_mpc_select_prot_type(struct qeth_card *card)
{
2420
	return IS_LAYER2(card) ? QETH_MPC_PROT_L2 : QETH_MPC_PROT_L3;
2421 2422
}

F
Frank Blaschka 已提交
2423 2424
static int qeth_ulp_enable(struct qeth_card *card)
{
2425
	u8 prot_type = qeth_mpc_select_prot_type(card);
F
Frank Blaschka 已提交
2426
	struct qeth_cmd_buffer *iob;
2427
	u16 max_mtu;
2428
	int rc;
F
Frank Blaschka 已提交
2429

2430
	QETH_CARD_TEXT(card, 2, "ulpenabl");
F
Frank Blaschka 已提交
2431

2432
	iob = qeth_mpc_alloc_cmd(card, ULP_ENABLE, ULP_ENABLE_SIZE);
2433 2434
	if (!iob)
		return -ENOMEM;
F
Frank Blaschka 已提交
2435

2436
	*(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port;
F
Frank Blaschka 已提交
2437 2438 2439 2440 2441
	memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1);
	memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
	       &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
2442
	rc = qeth_send_control_data(card, iob, qeth_ulp_enable_cb, &max_mtu);
2443 2444 2445
	if (rc)
		return rc;
	return qeth_update_max_mtu(card, max_mtu);
F
Frank Blaschka 已提交
2446 2447 2448 2449 2450 2451 2452
}

static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
		unsigned long data)
{
	struct qeth_cmd_buffer *iob;

2453
	QETH_CARD_TEXT(card, 2, "ulpstpcb");
F
Frank Blaschka 已提交
2454 2455 2456 2457 2458

	iob = (struct qeth_cmd_buffer *) data;
	memcpy(&card->token.ulp_connection_r,
	       QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
	       QETH_MPC_TOKEN_LENGTH);
2459 2460
	if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
		     3)) {
2461
		QETH_CARD_TEXT(card, 2, "olmlimit");
2462 2463
		dev_err(&card->gdev->dev, "A connection could not be "
			"established because of an OLM limit\n");
2464
		return -EMLINK;
2465
	}
S
Stefan Raspl 已提交
2466
	return 0;
F
Frank Blaschka 已提交
2467 2468 2469 2470 2471 2472 2473
}

static int qeth_ulp_setup(struct qeth_card *card)
{
	__u16 temp;
	struct qeth_cmd_buffer *iob;

2474
	QETH_CARD_TEXT(card, 2, "ulpsetup");
F
Frank Blaschka 已提交
2475

2476
	iob = qeth_mpc_alloc_cmd(card, ULP_SETUP, ULP_SETUP_SIZE);
2477 2478
	if (!iob)
		return -ENOMEM;
F
Frank Blaschka 已提交
2479 2480 2481 2482 2483 2484 2485 2486

	memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
	       &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
	       &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);

2487
	memcpy(QETH_ULP_SETUP_CUA(iob->data), &card->info.ddev_devno, 2);
F
Frank Blaschka 已提交
2488 2489
	temp = (card->info.cula << 8) + card->info.unit_addr2;
	memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
2490
	return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL);
F
Frank Blaschka 已提交
2491 2492
}

2493 2494
static int qeth_alloc_out_buf(struct qeth_qdio_out_q *q, unsigned int bidx,
			      gfp_t gfp)
2495 2496 2497
{
	struct qeth_qdio_out_buffer *newbuf;

2498
	newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, gfp);
2499 2500 2501
	if (!newbuf)
		return -ENOMEM;

2502
	newbuf->buffer = q->qdio_bufs[bidx];
2503 2504 2505 2506
	skb_queue_head_init(&newbuf->skb_list);
	lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
	atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
	q->bufs[bidx] = newbuf;
2507
	return 0;
2508 2509
}

2510
static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
2511 2512 2513 2514
{
	if (!q)
		return;

2515
	qeth_drain_output_queue(q, true);
2516 2517 2518 2519
	qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
	kfree(q);
}

2520
static struct qeth_qdio_out_q *qeth_alloc_output_queue(void)
2521 2522
{
	struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
2523
	unsigned int i;
2524 2525 2526 2527

	if (!q)
		return NULL;

2528 2529 2530 2531
	if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q))
		goto err_qdio_bufs;

	for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
2532
		if (qeth_alloc_out_buf(q, i, GFP_KERNEL))
2533
			goto err_out_bufs;
2534
	}
2535

2536
	return q;
2537 2538 2539

err_out_bufs:
	while (i > 0)
2540
		qeth_free_out_buf(q->bufs[--i]);
2541 2542 2543 2544
	qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
err_qdio_bufs:
	kfree(q);
	return NULL;
2545
}
2546

2547 2548 2549 2550 2551 2552 2553 2554
static void qeth_tx_completion_timer(struct timer_list *timer)
{
	struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer);

	napi_schedule(&queue->napi);
	QETH_TXQ_STAT_INC(queue, completion_timer);
}

2555
static int qeth_alloc_qdio_queues(struct qeth_card *card)
F
Frank Blaschka 已提交
2556
{
2557
	unsigned int i;
F
Frank Blaschka 已提交
2558

2559
	QETH_CARD_TEXT(card, 2, "allcqdbf");
F
Frank Blaschka 已提交
2560 2561 2562 2563 2564

	if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
		QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
		return 0;

2565
	QETH_CARD_TEXT(card, 2, "inq");
S
Sebastian Ott 已提交
2566
	card->qdio.in_q = qeth_alloc_qdio_queue();
F
Frank Blaschka 已提交
2567 2568
	if (!card->qdio.in_q)
		goto out_nomem;
S
Sebastian Ott 已提交
2569

F
Frank Blaschka 已提交
2570 2571 2572
	/* inbound buffer pool */
	if (qeth_alloc_buffer_pool(card))
		goto out_freeinq;
2573

F
Frank Blaschka 已提交
2574 2575
	/* outbound */
	for (i = 0; i < card->qdio.no_out_queues; ++i) {
2576 2577 2578 2579
		struct qeth_qdio_out_q *queue;

		queue = qeth_alloc_output_queue();
		if (!queue)
F
Frank Blaschka 已提交
2580
			goto out_freeoutq;
2581
		QETH_CARD_TEXT_(card, 2, "outq %i", i);
2582 2583 2584 2585
		QETH_CARD_HEX(card, 2, &queue, sizeof(void *));
		card->qdio.out_qs[i] = queue;
		queue->card = card;
		queue->queue_no = i;
2586
		INIT_LIST_HEAD(&queue->pending_bufs);
2587
		spin_lock_init(&queue->lock);
2588
		timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
2589 2590 2591 2592 2593 2594 2595 2596 2597
		if (IS_IQD(card)) {
			queue->coalesce_usecs = QETH_TX_COALESCE_USECS;
			queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES;
			queue->rescan_usecs = QETH_TX_TIMER_USECS;
		} else {
			queue->coalesce_usecs = USEC_PER_SEC;
			queue->max_coalesced_frames = 0;
			queue->rescan_usecs = 10 * USEC_PER_SEC;
		}
2598
		queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT;
F
Frank Blaschka 已提交
2599
	}
2600 2601 2602 2603 2604

	/* completion */
	if (qeth_alloc_cq(card))
		goto out_freeoutq;

F
Frank Blaschka 已提交
2605 2606 2607
	return 0;

out_freeoutq:
2608
	while (i > 0) {
2609
		qeth_free_output_queue(card->qdio.out_qs[--i]);
2610 2611
		card->qdio.out_qs[i] = NULL;
	}
F
Frank Blaschka 已提交
2612 2613
	qeth_free_buffer_pool(card);
out_freeinq:
S
Sebastian Ott 已提交
2614
	qeth_free_qdio_queue(card->qdio.in_q);
F
Frank Blaschka 已提交
2615 2616 2617 2618 2619 2620
	card->qdio.in_q = NULL;
out_nomem:
	atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
	return -ENOMEM;
}

2621
static void qeth_free_qdio_queues(struct qeth_card *card)
2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638
{
	int i, j;

	if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
		QETH_QDIO_UNINITIALIZED)
		return;

	qeth_free_cq(card);
	for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
		if (card->qdio.in_q->bufs[j].rx_skb)
			dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
	}
	qeth_free_qdio_queue(card->qdio.in_q);
	card->qdio.in_q = NULL;
	/* inbound buffer pool */
	qeth_free_buffer_pool(card);
	/* free outbound qdio_qs */
2639 2640 2641
	for (i = 0; i < card->qdio.no_out_queues; i++) {
		qeth_free_output_queue(card->qdio.out_qs[i]);
		card->qdio.out_qs[i] = NULL;
2642 2643 2644
	}
}

2645 2646 2647
static void qeth_fill_qib_parms(struct qeth_card *card,
				struct qeth_qib_parms *parms)
{
2648 2649 2650
	struct qeth_qdio_out_q *queue;
	unsigned int i;

2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667
	parms->pcit_magic[0] = 'P';
	parms->pcit_magic[1] = 'C';
	parms->pcit_magic[2] = 'I';
	parms->pcit_magic[3] = 'T';
	ASCEBC(parms->pcit_magic, sizeof(parms->pcit_magic));
	parms->pcit_a = QETH_PCI_THRESHOLD_A(card);
	parms->pcit_b = QETH_PCI_THRESHOLD_B(card);
	parms->pcit_c = QETH_PCI_TIMER_VALUE(card);

	parms->blkt_magic[0] = 'B';
	parms->blkt_magic[1] = 'L';
	parms->blkt_magic[2] = 'K';
	parms->blkt_magic[3] = 'T';
	ASCEBC(parms->blkt_magic, sizeof(parms->blkt_magic));
	parms->blkt_total = card->info.blkt.time_total;
	parms->blkt_inter_packet = card->info.blkt.inter_packet;
	parms->blkt_inter_packet_jumbo = card->info.blkt.inter_packet_jumbo;
2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682

	/* Prio-queueing implicitly uses the default priorities: */
	if (qeth_uses_tx_prio_queueing(card) || card->qdio.no_out_queues == 1)
		return;

	parms->pque_magic[0] = 'P';
	parms->pque_magic[1] = 'Q';
	parms->pque_magic[2] = 'U';
	parms->pque_magic[3] = 'E';
	ASCEBC(parms->pque_magic, sizeof(parms->pque_magic));
	parms->pque_order = QETH_QIB_PQUE_ORDER_RR;
	parms->pque_units = QETH_QIB_PQUE_UNITS_SBAL;

	qeth_for_each_output_queue(card, queue, i)
		parms->pque_priority[i] = queue->priority;
F
Frank Blaschka 已提交
2683 2684 2685 2686
}

static int qeth_qdio_activate(struct qeth_card *card)
{
2687
	QETH_CARD_TEXT(card, 3, "qdioact");
J
Jan Glauber 已提交
2688
	return qdio_activate(CARD_DDEV(card));
F
Frank Blaschka 已提交
2689 2690 2691 2692 2693 2694
}

static int qeth_dm_act(struct qeth_card *card)
{
	struct qeth_cmd_buffer *iob;

2695
	QETH_CARD_TEXT(card, 2, "dmact");
F
Frank Blaschka 已提交
2696

2697
	iob = qeth_mpc_alloc_cmd(card, DM_ACT, DM_ACT_SIZE);
2698 2699
	if (!iob)
		return -ENOMEM;
F
Frank Blaschka 已提交
2700 2701 2702 2703 2704

	memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
	       &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2705
	return qeth_send_control_data(card, iob, NULL, NULL);
F
Frank Blaschka 已提交
2706 2707 2708 2709 2710 2711
}

static int qeth_mpc_initialize(struct qeth_card *card)
{
	int rc;

2712
	QETH_CARD_TEXT(card, 2, "mpcinit");
F
Frank Blaschka 已提交
2713 2714 2715

	rc = qeth_issue_next_read(card);
	if (rc) {
2716
		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
F
Frank Blaschka 已提交
2717 2718 2719 2720
		return rc;
	}
	rc = qeth_cm_enable(card);
	if (rc) {
2721
		QETH_CARD_TEXT_(card, 2, "2err%d", rc);
2722
		return rc;
F
Frank Blaschka 已提交
2723 2724 2725
	}
	rc = qeth_cm_setup(card);
	if (rc) {
2726
		QETH_CARD_TEXT_(card, 2, "3err%d", rc);
2727
		return rc;
F
Frank Blaschka 已提交
2728 2729 2730
	}
	rc = qeth_ulp_enable(card);
	if (rc) {
2731
		QETH_CARD_TEXT_(card, 2, "4err%d", rc);
2732
		return rc;
F
Frank Blaschka 已提交
2733 2734 2735
	}
	rc = qeth_ulp_setup(card);
	if (rc) {
2736
		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2737
		return rc;
F
Frank Blaschka 已提交
2738
	}
2739
	rc = qeth_alloc_qdio_queues(card);
F
Frank Blaschka 已提交
2740
	if (rc) {
2741
		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2742
		return rc;
F
Frank Blaschka 已提交
2743 2744 2745
	}
	rc = qeth_qdio_establish(card);
	if (rc) {
2746
		QETH_CARD_TEXT_(card, 2, "6err%d", rc);
2747
		qeth_free_qdio_queues(card);
2748
		return rc;
F
Frank Blaschka 已提交
2749 2750 2751
	}
	rc = qeth_qdio_activate(card);
	if (rc) {
2752
		QETH_CARD_TEXT_(card, 2, "7err%d", rc);
2753
		return rc;
F
Frank Blaschka 已提交
2754 2755 2756
	}
	rc = qeth_dm_act(card);
	if (rc) {
2757
		QETH_CARD_TEXT_(card, 2, "8err%d", rc);
2758
		return rc;
F
Frank Blaschka 已提交
2759 2760 2761 2762 2763
	}

	return 0;
}

2764
static void qeth_print_status_message(struct qeth_card *card)
F
Frank Blaschka 已提交
2765 2766
{
	switch (card->info.type) {
2767 2768 2769
	case QETH_CARD_TYPE_OSD:
	case QETH_CARD_TYPE_OSM:
	case QETH_CARD_TYPE_OSX:
F
Frank Blaschka 已提交
2770 2771 2772 2773 2774 2775 2776 2777 2778 2779
		/* VM will use a non-zero first character
		 * to indicate a HiperSockets like reporting
		 * of the level OSA sets the first character to zero
		 * */
		if (!card->info.mcl_level[0]) {
			sprintf(card->info.mcl_level, "%02x%02x",
				card->info.mcl_level[2],
				card->info.mcl_level[3]);
			break;
		}
2780
		fallthrough;
F
Frank Blaschka 已提交
2781
	case QETH_CARD_TYPE_IQD:
2782
		if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) {
F
Frank Blaschka 已提交
2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796
			card->info.mcl_level[0] = (char) _ebcasc[(__u8)
				card->info.mcl_level[0]];
			card->info.mcl_level[1] = (char) _ebcasc[(__u8)
				card->info.mcl_level[1]];
			card->info.mcl_level[2] = (char) _ebcasc[(__u8)
				card->info.mcl_level[2]];
			card->info.mcl_level[3] = (char) _ebcasc[(__u8)
				card->info.mcl_level[3]];
			card->info.mcl_level[QETH_MCL_LENGTH] = 0;
		}
		break;
	default:
		memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
	}
2797 2798 2799 2800 2801 2802 2803
	dev_info(&card->gdev->dev,
		 "Device is a%s card%s%s%s\nwith link type %s.\n",
		 qeth_get_cardname(card),
		 (card->info.mcl_level[0]) ? " (level: " : "",
		 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
		 (card->info.mcl_level[0]) ? ")" : "",
		 qeth_get_cardname_short(card));
F
Frank Blaschka 已提交
2804 2805 2806 2807 2808 2809
}

static void qeth_initialize_working_pool_list(struct qeth_card *card)
{
	struct qeth_buffer_pool_entry *entry;

C
Carsten Otte 已提交
2810
	QETH_CARD_TEXT(card, 5, "inwrklst");
F
Frank Blaschka 已提交
2811 2812 2813 2814 2815 2816 2817

	list_for_each_entry(entry,
			    &card->qdio.init_pool.entry_list, init_list) {
		qeth_put_buffer_pool_entry(card, entry);
	}
}

J
Julian Wiedmann 已提交
2818 2819
static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
					struct qeth_card *card)
F
Frank Blaschka 已提交
2820 2821 2822 2823 2824 2825 2826
{
	struct qeth_buffer_pool_entry *entry;
	int i, free;

	if (list_empty(&card->qdio.in_buf_pool.entry_list))
		return NULL;

2827
	list_for_each_entry(entry, &card->qdio.in_buf_pool.entry_list, list) {
F
Frank Blaschka 已提交
2828 2829
		free = 1;
		for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2830
			if (page_count(entry->elements[i]) > 1) {
F
Frank Blaschka 已提交
2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841
				free = 0;
				break;
			}
		}
		if (free) {
			list_del_init(&entry->list);
			return entry;
		}
	}

	/* no free buffer in pool so take first one and swap pages */
2842 2843
	entry = list_first_entry(&card->qdio.in_buf_pool.entry_list,
				 struct qeth_buffer_pool_entry, list);
F
Frank Blaschka 已提交
2844
	for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2845
		if (page_count(entry->elements[i]) > 1) {
2846
			struct page *page = dev_alloc_page();
2847 2848

			if (!page)
F
Frank Blaschka 已提交
2849
				return NULL;
2850 2851 2852 2853

			__free_page(entry->elements[i]);
			entry->elements[i] = page;
			QETH_CARD_STAT_INC(card, rx_sg_alloc_page);
F
Frank Blaschka 已提交
2854 2855 2856 2857 2858 2859 2860 2861 2862
		}
	}
	list_del_init(&entry->list);
	return entry;
}

static int qeth_init_input_buffer(struct qeth_card *card,
		struct qeth_qdio_buffer *buf)
{
2863
	struct qeth_buffer_pool_entry *pool_entry = buf->pool_entry;
F
Frank Blaschka 已提交
2864 2865
	int i;

2866
	if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) {
2867
		buf->rx_skb = netdev_alloc_skb(card->dev,
2868 2869
					       ETH_HLEN +
					       sizeof(struct ipv6hdr));
2870
		if (!buf->rx_skb)
2871
			return -ENOMEM;
2872 2873
	}

2874 2875 2876 2877 2878 2879 2880
	if (!pool_entry) {
		pool_entry = qeth_find_free_buffer_pool_entry(card);
		if (!pool_entry)
			return -ENOBUFS;

		buf->pool_entry = pool_entry;
	}
F
Frank Blaschka 已提交
2881 2882 2883 2884 2885 2886 2887 2888 2889

	/*
	 * since the buffer is accessed only from the input_tasklet
	 * there shouldn't be a need to synchronize; also, since we use
	 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run  out off
	 * buffers
	 */
	for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
		buf->buffer->element[i].length = PAGE_SIZE;
2890
		buf->buffer->element[i].addr =
2891
			page_to_phys(pool_entry->elements[i]);
F
Frank Blaschka 已提交
2892
		if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2893
			buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
F
Frank Blaschka 已提交
2894
		else
2895 2896
			buf->buffer->element[i].eflags = 0;
		buf->buffer->element[i].sflags = 0;
F
Frank Blaschka 已提交
2897 2898 2899 2900
	}
	return 0;
}

J
Julian Wiedmann 已提交
2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912
static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card,
					    struct qeth_qdio_out_q *queue)
{
	if (!IS_IQD(card) ||
	    qeth_iqd_is_mcast_queue(card, queue) ||
	    card->options.cq == QETH_CQ_ENABLED ||
	    qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd))
		return 1;

	return card->ssqd.mmwc ? card->ssqd.mmwc : 1;
}

2913
static int qeth_init_qdio_queues(struct qeth_card *card)
F
Frank Blaschka 已提交
2914
{
2915
	unsigned int rx_bufs = card->qdio.in_buf_pool.buf_count;
2916
	unsigned int i;
F
Frank Blaschka 已提交
2917 2918
	int rc;

2919
	QETH_CARD_TEXT(card, 2, "initqdqs");
F
Frank Blaschka 已提交
2920 2921

	/* inbound queue */
2922 2923
	qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
	memset(&card->rx, 0, sizeof(struct qeth_rx));
2924

F
Frank Blaschka 已提交
2925 2926
	qeth_initialize_working_pool_list(card);
	/*give only as many buffers to hardware as we have buffer pool entries*/
2927
	for (i = 0; i < rx_bufs; i++) {
2928 2929 2930 2931 2932
		rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
		if (rc)
			return rc;
	}

2933
	card->qdio.in_q->next_buf_to_init = QDIO_BUFNR(rx_bufs);
2934 2935
	rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, rx_bufs,
		     NULL);
F
Frank Blaschka 已提交
2936
	if (rc) {
2937
		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
F
Frank Blaschka 已提交
2938 2939
		return rc;
	}
2940 2941 2942 2943 2944 2945 2946

	/* completion */
	rc = qeth_cq_init(card);
	if (rc) {
		return rc;
	}

F
Frank Blaschka 已提交
2947 2948
	/* outbound queue */
	for (i = 0; i < card->qdio.no_out_queues; ++i) {
2949 2950 2951 2952 2953 2954
		struct qeth_qdio_out_q *queue = card->qdio.out_qs[i];

		qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
		queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
		queue->next_buf_to_fill = 0;
		queue->do_pack = 0;
2955
		queue->prev_hdr = NULL;
2956
		queue->coalesced_frames = 0;
2957
		queue->bulk_start = 0;
J
Julian Wiedmann 已提交
2958 2959
		queue->bulk_count = 0;
		queue->bulk_max = qeth_tx_select_bulk_max(card, queue);
2960 2961
		atomic_set(&queue->used_buffers, 0);
		atomic_set(&queue->set_pci_flags_count, 0);
2962
		netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i));
F
Frank Blaschka 已提交
2963 2964 2965 2966
	}
	return 0;
}

2967
static void qeth_ipa_finalize_cmd(struct qeth_card *card,
2968
				  struct qeth_cmd_buffer *iob)
2969
{
2970
	qeth_mpc_finalize_cmd(card, iob);
2971 2972

	/* override with IPA-specific values: */
2973
	__ipa_cmd(iob)->hdr.seqno = card->seqno.ipa++;
2974 2975
}

J
Julian Wiedmann 已提交
2976 2977
static void qeth_prepare_ipa_cmd(struct qeth_card *card,
				 struct qeth_cmd_buffer *iob, u16 cmd_length)
2978 2979
{
	u8 prot_type = qeth_mpc_select_prot_type(card);
2980
	u16 total_length = iob->length;
2981

2982 2983
	qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length,
		       iob->data);
2984
	iob->finalize = qeth_ipa_finalize_cmd;
2985

2986
	memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
2987
	memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2);
2988
	memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
2989 2990
	memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2);
	memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2);
2991 2992
	memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
	       &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2993
	memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2);
2994 2995
}

2996 2997 2998 2999 3000 3001 3002 3003
static bool qeth_ipa_match_reply(struct qeth_cmd_buffer *iob,
				 struct qeth_cmd_buffer *reply)
{
	struct qeth_ipa_cmd *ipa_reply = __ipa_reply(reply);

	return ipa_reply && (__ipa_cmd(iob)->hdr.seqno == ipa_reply->hdr.seqno);
}

3004 3005 3006 3007 3008 3009
struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
					   enum qeth_ipa_cmds cmd_code,
					   enum qeth_prot_versions prot,
					   unsigned int data_length)
{
	struct qeth_cmd_buffer *iob;
3010
	struct qeth_ipacmd_hdr *hdr;
3011 3012 3013 3014 3015 3016 3017

	data_length += offsetof(struct qeth_ipa_cmd, data);
	iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_length, 1,
			     QETH_IPA_TIMEOUT);
	if (!iob)
		return NULL;

J
Julian Wiedmann 已提交
3018 3019
	qeth_prepare_ipa_cmd(card, iob, data_length);
	iob->match = qeth_ipa_match_reply;
3020 3021 3022 3023 3024

	hdr = &__ipa_cmd(iob)->hdr;
	hdr->command = cmd_code;
	hdr->initiator = IPA_CMD_INITIATOR_HOST;
	/* hdr->seqno is set by qeth_send_control_data() */
3025
	hdr->adapter_type = QETH_LINK_TYPE_FAST_ETH;
3026 3027 3028 3029
	hdr->rel_adapter_no = (u8) card->dev->dev_port;
	hdr->prim_version_no = IS_LAYER2(card) ? 2 : 1;
	hdr->param_count = 1;
	hdr->prot_version = prot;
3030 3031 3032 3033
	return iob;
}
EXPORT_SYMBOL_GPL(qeth_ipa_alloc_cmd);

3034 3035 3036 3037 3038 3039 3040 3041
static int qeth_send_ipa_cmd_cb(struct qeth_card *card,
				struct qeth_reply *reply, unsigned long data)
{
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;

	return (cmd->hdr.return_code) ? -EIO : 0;
}

E
Eugene Crosser 已提交
3042 3043 3044 3045 3046 3047
/**
 * qeth_send_ipa_cmd() - send an IPA command
 *
 * See qeth_send_control_data() for explanation of the arguments.
 */

F
Frank Blaschka 已提交
3048 3049 3050 3051 3052 3053 3054
int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
		int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
			unsigned long),
		void *reply_param)
{
	int rc;

C
Carsten Otte 已提交
3055
	QETH_CARD_TEXT(card, 4, "sendipa");
3056

3057
	if (card->read_or_write_problem) {
3058
		qeth_put_cmd(iob);
3059 3060 3061
		return -EIO;
	}

3062 3063
	if (reply_cb == NULL)
		reply_cb = qeth_send_ipa_cmd_cb;
3064
	rc = qeth_send_control_data(card, iob, reply_cb, reply_param);
3065 3066 3067 3068
	if (rc == -ETIME) {
		qeth_clear_ipacmd_list(card);
		qeth_schedule_recovery(card);
	}
F
Frank Blaschka 已提交
3069 3070 3071 3072
	return rc;
}
EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);

3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083
static int qeth_send_startlan_cb(struct qeth_card *card,
				 struct qeth_reply *reply, unsigned long data)
{
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;

	if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE)
		return -ENETDOWN;

	return (cmd->hdr.return_code) ? -EIO : 0;
}

3084
static int qeth_send_startlan(struct qeth_card *card)
F
Frank Blaschka 已提交
3085
{
3086
	struct qeth_cmd_buffer *iob;
F
Frank Blaschka 已提交
3087

3088
	QETH_CARD_TEXT(card, 2, "strtlan");
F
Frank Blaschka 已提交
3089

3090
	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_STARTLAN, QETH_PROT_NONE, 0);
3091 3092
	if (!iob)
		return -ENOMEM;
3093
	return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL);
F
Frank Blaschka 已提交
3094 3095
}

3096
static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
F
Frank Blaschka 已提交
3097
{
3098
	if (!cmd->hdr.return_code)
F
Frank Blaschka 已提交
3099 3100
		cmd->hdr.return_code =
			cmd->data.setadapterparms.hdr.return_code;
3101
	return cmd->hdr.return_code;
F
Frank Blaschka 已提交
3102 3103 3104 3105 3106
}

static int qeth_query_setadapterparms_cb(struct qeth_card *card,
		struct qeth_reply *reply, unsigned long data)
{
3107
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3108
	struct qeth_query_cmds_supp *query_cmd;
F
Frank Blaschka 已提交
3109

C
Carsten Otte 已提交
3110
	QETH_CARD_TEXT(card, 3, "quyadpcb");
3111
	if (qeth_setadpparms_inspect_rc(cmd))
3112
		return -EIO;
F
Frank Blaschka 已提交
3113

3114 3115 3116 3117 3118 3119
	query_cmd = &cmd->data.setadapterparms.data.query_cmds_supp;
	if (query_cmd->lan_type & 0x7f) {
		if (!qeth_is_supported_link_type(card, query_cmd->lan_type))
			return -EPROTONOSUPPORT;

		card->info.link_type = query_cmd->lan_type;
3120
		QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type);
3121
	}
3122 3123

	card->options.adp.supported = query_cmd->supported_cmds;
3124
	return 0;
F
Frank Blaschka 已提交
3125 3126
}

S
Stefan Raspl 已提交
3127
static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
3128 3129
						    enum qeth_ipa_setadp_cmd adp_cmd,
						    unsigned int data_length)
F
Frank Blaschka 已提交
3130
{
3131
	struct qeth_ipacmd_setadpparms_hdr *hdr;
F
Frank Blaschka 已提交
3132 3133
	struct qeth_cmd_buffer *iob;

3134 3135 3136 3137 3138 3139
	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4,
				 data_length +
				 offsetof(struct qeth_ipacmd_setadpparms,
					  data));
	if (!iob)
		return NULL;
F
Frank Blaschka 已提交
3140

3141 3142 3143 3144 3145
	hdr = &__ipa_cmd(iob)->data.setadapterparms.hdr;
	hdr->cmdlength = sizeof(*hdr) + data_length;
	hdr->command_code = adp_cmd;
	hdr->used_total = 1;
	hdr->seq_no = 1;
F
Frank Blaschka 已提交
3146 3147 3148
	return iob;
}

3149
static int qeth_query_setadapterparms(struct qeth_card *card)
F
Frank Blaschka 已提交
3150 3151 3152 3153
{
	int rc;
	struct qeth_cmd_buffer *iob;

C
Carsten Otte 已提交
3154
	QETH_CARD_TEXT(card, 3, "queryadp");
F
Frank Blaschka 已提交
3155
	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
3156
				   SETADP_DATA_SIZEOF(query_cmds_supp));
3157 3158
	if (!iob)
		return -ENOMEM;
F
Frank Blaschka 已提交
3159 3160 3161 3162
	rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
	return rc;
}

3163 3164 3165 3166 3167
static int qeth_query_ipassists_cb(struct qeth_card *card,
		struct qeth_reply *reply, unsigned long data)
{
	struct qeth_ipa_cmd *cmd;

3168
	QETH_CARD_TEXT(card, 2, "qipasscb");
3169 3170

	cmd = (struct qeth_ipa_cmd *) data;
3171 3172

	switch (cmd->hdr.return_code) {
3173 3174
	case IPA_RC_SUCCESS:
		break;
3175 3176
	case IPA_RC_NOTSUPP:
	case IPA_RC_L2_UNSUPPORTED_CMD:
3177
		QETH_CARD_TEXT(card, 2, "ipaunsup");
3178 3179
		card->options.ipa4.supported |= IPA_SETADAPTERPARMS;
		card->options.ipa6.supported |= IPA_SETADAPTERPARMS;
3180
		return -EOPNOTSUPP;
3181
	default:
3182 3183 3184
		QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
				 CARD_DEVID(card), cmd->hdr.return_code);
		return -EIO;
3185 3186
	}

3187 3188 3189 3190 3191
	if (cmd->hdr.prot_version == QETH_PROT_IPV4)
		card->options.ipa4 = cmd->hdr.assists;
	else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
		card->options.ipa6 = cmd->hdr.assists;
	else
3192 3193
		QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
				 CARD_DEVID(card));
3194 3195 3196
	return 0;
}

3197 3198
static int qeth_query_ipassists(struct qeth_card *card,
				enum qeth_prot_versions prot)
3199 3200 3201 3202
{
	int rc;
	struct qeth_cmd_buffer *iob;

3203
	QETH_CARD_TEXT_(card, 2, "qipassi%i", prot);
3204
	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_QIPASSIST, prot, 0);
3205 3206
	if (!iob)
		return -ENOMEM;
3207 3208 3209 3210
	rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
	return rc;
}

3211 3212 3213
static int qeth_query_switch_attributes_cb(struct qeth_card *card,
				struct qeth_reply *reply, unsigned long data)
{
3214
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3215
	struct qeth_query_switch_attributes *attrs;
3216
	struct qeth_switch_info *sw_info;
3217 3218

	QETH_CARD_TEXT(card, 2, "qswiatcb");
3219
	if (qeth_setadpparms_inspect_rc(cmd))
3220
		return -EIO;
3221

3222 3223 3224 3225 3226 3227
	sw_info = (struct qeth_switch_info *)reply->param;
	attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
	sw_info->capabilities = attrs->capabilities;
	sw_info->settings = attrs->settings;
	QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
			sw_info->settings);
3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240
	return 0;
}

int qeth_query_switch_attributes(struct qeth_card *card,
				 struct qeth_switch_info *sw_info)
{
	struct qeth_cmd_buffer *iob;

	QETH_CARD_TEXT(card, 2, "qswiattr");
	if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES))
		return -EOPNOTSUPP;
	if (!netif_carrier_ok(card->dev))
		return -ENOMEDIUM;
3241
	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, 0);
3242 3243
	if (!iob)
		return -ENOMEM;
3244 3245 3246 3247
	return qeth_send_ipa_cmd(card, iob,
				qeth_query_switch_attributes_cb, sw_info);
}

3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266
struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card,
					  enum qeth_diags_cmds sub_cmd,
					  unsigned int data_length)
{
	struct qeth_ipacmd_diagass *cmd;
	struct qeth_cmd_buffer *iob;

	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SET_DIAG_ASS, QETH_PROT_NONE,
				 DIAG_HDR_LEN + data_length);
	if (!iob)
		return NULL;

	cmd = &__ipa_cmd(iob)->data.diagass;
	cmd->subcmd_len = DIAG_SUB_HDR_LEN + data_length;
	cmd->subcmd = sub_cmd;
	return iob;
}
EXPORT_SYMBOL_GPL(qeth_get_diag_cmd);

3267 3268 3269
static int qeth_query_setdiagass_cb(struct qeth_card *card,
		struct qeth_reply *reply, unsigned long data)
{
3270 3271
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
	u16 rc = cmd->hdr.return_code;
3272

3273
	if (rc) {
3274
		QETH_CARD_TEXT_(card, 2, "diagq:%x", rc);
3275 3276 3277 3278
		return -EIO;
	}

	card->info.diagass_support = cmd->data.diagass.ext;
3279 3280 3281 3282 3283 3284 3285
	return 0;
}

static int qeth_query_setdiagass(struct qeth_card *card)
{
	struct qeth_cmd_buffer *iob;

3286
	QETH_CARD_TEXT(card, 2, "qdiagass");
3287
	iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_QUERY, 0);
3288 3289
	if (!iob)
		return -ENOMEM;
3290 3291 3292 3293 3294 3295 3296 3297 3298
	return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL);
}

static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
{
	unsigned long info = get_zeroed_page(GFP_KERNEL);
	struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
	struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info;
	struct ccw_dev_id ccwid;
3299
	int level;
3300 3301 3302 3303 3304 3305 3306

	tid->chpid = card->info.chpid;
	ccw_device_get_id(CARD_RDEV(card), &ccwid);
	tid->ssid = ccwid.ssid;
	tid->devno = ccwid.devno;
	if (!info)
		return;
3307 3308
	level = stsi(NULL, 0, 0, 0);
	if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0))
3309
		tid->lparnr = info222->lpar_number;
3310
	if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) {
3311 3312 3313 3314 3315 3316 3317 3318 3319
		EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name));
		memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
	}
	free_page(info);
}

static int qeth_hw_trap_cb(struct qeth_card *card,
		struct qeth_reply *reply, unsigned long data)
{
3320 3321
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
	u16 rc = cmd->hdr.return_code;
3322

3323
	if (rc) {
3324
		QETH_CARD_TEXT_(card, 2, "trapc:%x", rc);
3325 3326
		return -EIO;
	}
3327 3328 3329 3330 3331 3332 3333 3334
	return 0;
}

int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
{
	struct qeth_cmd_buffer *iob;
	struct qeth_ipa_cmd *cmd;

3335
	QETH_CARD_TEXT(card, 2, "diagtrap");
3336
	iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRAP, 64);
3337 3338
	if (!iob)
		return -ENOMEM;
3339
	cmd = __ipa_cmd(iob);
3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358
	cmd->data.diagass.type = 1;
	cmd->data.diagass.action = action;
	switch (action) {
	case QETH_DIAGS_TRAP_ARM:
		cmd->data.diagass.options = 0x0003;
		cmd->data.diagass.ext = 0x00010000 +
			sizeof(struct qeth_trap_id);
		qeth_get_trap_id(card,
			(struct qeth_trap_id *)cmd->data.diagass.cdata);
		break;
	case QETH_DIAGS_TRAP_DISARM:
		cmd->data.diagass.options = 0x0001;
		break;
	case QETH_DIAGS_TRAP_CAPTURE:
		break;
	}
	return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL);
}

3359 3360 3361 3362
static int qeth_check_qdio_errors(struct qeth_card *card,
				  struct qdio_buffer *buf,
				  unsigned int qdio_error,
				  const char *dbftext)
F
Frank Blaschka 已提交
3363
{
J
Jan Glauber 已提交
3364
	if (qdio_error) {
C
Carsten Otte 已提交
3365
		QETH_CARD_TEXT(card, 2, dbftext);
C
Carsten Otte 已提交
3366
		QETH_CARD_TEXT_(card, 2, " F15=%02X",
3367
			       buf->element[15].sflags);
C
Carsten Otte 已提交
3368
		QETH_CARD_TEXT_(card, 2, " F14=%02X",
3369
			       buf->element[14].sflags);
C
Carsten Otte 已提交
3370
		QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
3371
		if ((buf->element[15].sflags) == 0x12) {
3372
			QETH_CARD_STAT_INC(card, rx_fifo_errors);
3373 3374 3375
			return 0;
		} else
			return 1;
F
Frank Blaschka 已提交
3376 3377 3378 3379
	}
	return 0;
}

3380 3381
static unsigned int qeth_rx_refill_queue(struct qeth_card *card,
					 unsigned int count)
F
Frank Blaschka 已提交
3382 3383
{
	struct qeth_qdio_q *queue = card->qdio.in_q;
3384
	struct list_head *lh;
F
Frank Blaschka 已提交
3385 3386 3387 3388 3389 3390 3391 3392 3393
	int i;
	int rc;
	int newcount = 0;

	/* only requeue at a certain threshold to avoid SIGAs */
	if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) {
		for (i = queue->next_buf_to_init;
		     i < queue->next_buf_to_init + count; ++i) {
			if (qeth_init_input_buffer(card,
J
Julian Wiedmann 已提交
3394
				&queue->bufs[QDIO_BUFNR(i)])) {
F
Frank Blaschka 已提交
3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409
				break;
			} else {
				newcount++;
			}
		}

		if (newcount < count) {
			/* we are in memory shortage so we switch back to
			   traditional skb allocation and drop packages */
			atomic_set(&card->force_alloc_skb, 3);
			count = newcount;
		} else {
			atomic_add_unless(&card->force_alloc_skb, -1, 0);
		}

3410 3411 3412 3413 3414 3415 3416 3417 3418 3419
		if (!count) {
			i = 0;
			list_for_each(lh, &card->qdio.in_buf_pool.entry_list)
				i++;
			if (i == card->qdio.in_buf_pool.buf_count) {
				QETH_CARD_TEXT(card, 2, "qsarbw");
				schedule_delayed_work(
					&card->buffer_reclaim_work,
					QETH_RECLAIM_WORK_TIME);
			}
3420
			return 0;
3421 3422
		}

J
Jan Glauber 已提交
3423
		rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
3424
			     queue->next_buf_to_init, count, NULL);
F
Frank Blaschka 已提交
3425
		if (rc) {
C
Carsten Otte 已提交
3426
			QETH_CARD_TEXT(card, 2, "qinberr");
F
Frank Blaschka 已提交
3427
		}
J
Julian Wiedmann 已提交
3428 3429
		queue->next_buf_to_init = QDIO_BUFNR(queue->next_buf_to_init +
						     count);
3430
		return count;
F
Frank Blaschka 已提交
3431
	}
3432 3433

	return 0;
F
Frank Blaschka 已提交
3434
}
3435 3436 3437

static void qeth_buffer_reclaim_work(struct work_struct *work)
{
3438 3439 3440
	struct qeth_card *card = container_of(to_delayed_work(work),
					      struct qeth_card,
					      buffer_reclaim_work);
3441

3442 3443 3444 3445
	local_bh_disable();
	napi_schedule(&card->napi);
	/* kick-start the NAPI softirq: */
	local_bh_enable();
3446
}
F
Frank Blaschka 已提交
3447

3448
static void qeth_handle_send_error(struct qeth_card *card,
J
Jan Glauber 已提交
3449
		struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
F
Frank Blaschka 已提交
3450
{
3451
	int sbalf15 = buffer->buffer->element[15].sflags;
F
Frank Blaschka 已提交
3452

C
Carsten Otte 已提交
3453
	QETH_CARD_TEXT(card, 6, "hdsnderr");
3454
	qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
3455 3456

	if (!qdio_err)
3457
		return;
3458 3459

	if ((sbalf15 >= 15) && (sbalf15 <= 31))
3460
		return;
3461

C
Carsten Otte 已提交
3462 3463
	QETH_CARD_TEXT(card, 1, "lnkfail");
	QETH_CARD_TEXT_(card, 1, "%04x %02x",
3464
		       (u16)qdio_err, (u8)sbalf15);
F
Frank Blaschka 已提交
3465 3466
}

3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482
/**
 * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer.
 * @queue: queue to check for packing buffer
 *
 * Returns number of buffers that were prepared for flush.
 */
static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue)
{
	struct qeth_qdio_out_buffer *buffer;

	buffer = queue->bufs[queue->next_buf_to_fill];
	if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
	    (buffer->next_element_to_fill > 0)) {
		/* it's a packing buffer */
		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
		queue->next_buf_to_fill =
J
Julian Wiedmann 已提交
3483
			QDIO_BUFNR(queue->next_buf_to_fill + 1);
3484 3485 3486 3487 3488
		return 1;
	}
	return 0;
}

F
Frank Blaschka 已提交
3489 3490 3491 3492 3493 3494 3495 3496 3497 3498
/*
 * Switched to packing state if the number of used buffers on a queue
 * reaches a certain limit.
 */
static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
{
	if (!queue->do_pack) {
		if (atomic_read(&queue->used_buffers)
		    >= QETH_HIGH_WATERMARK_PACK){
			/* switch non-PACKING -> PACKING */
C
Carsten Otte 已提交
3499
			QETH_CARD_TEXT(queue->card, 6, "np->pack");
3500
			QETH_TXQ_STAT_INC(queue, packing_mode_switch);
F
Frank Blaschka 已提交
3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517
			queue->do_pack = 1;
		}
	}
}

/*
 * Switches from packing to non-packing mode. If there is a packing
 * buffer on the queue this buffer will be prepared to be flushed.
 * In that case 1 is returned to inform the caller. If no buffer
 * has to be flushed, zero is returned.
 */
static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
{
	if (queue->do_pack) {
		if (atomic_read(&queue->used_buffers)
		    <= QETH_LOW_WATERMARK_PACK) {
			/* switch PACKING -> non-PACKING */
C
Carsten Otte 已提交
3518
			QETH_CARD_TEXT(queue->card, 6, "pack->np");
3519
			QETH_TXQ_STAT_INC(queue, packing_mode_switch);
F
Frank Blaschka 已提交
3520
			queue->do_pack = 0;
3521
			return qeth_prep_flush_pack_buffer(queue);
F
Frank Blaschka 已提交
3522 3523 3524 3525 3526
		}
	}
	return 0;
}

J
Jan Glauber 已提交
3527 3528
static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
			       int count)
F
Frank Blaschka 已提交
3529
{
3530
	struct qeth_qdio_out_buffer *buf = queue->bufs[index];
3531
	struct qeth_card *card = queue->card;
3532
	unsigned int frames, usecs;
3533
	struct qaob *aob = NULL;
F
Frank Blaschka 已提交
3534 3535 3536 3537
	int rc;
	int i;

	for (i = index; i < index + count; ++i) {
J
Julian Wiedmann 已提交
3538
		unsigned int bidx = QDIO_BUFNR(i);
3539
		struct sk_buff *skb;
J
Julian Wiedmann 已提交
3540

3541
		buf = queue->bufs[bidx];
3542 3543
		buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
				SBAL_EFLAGS_LAST_ENTRY;
3544
		queue->coalesced_frames += buf->frames;
F
Frank Blaschka 已提交
3545

3546 3547 3548 3549
		if (IS_IQD(card)) {
			skb_queue_walk(&buf->skb_list, skb)
				skb_tx_timestamp(skb);
		}
3550
	}
F
Frank Blaschka 已提交
3551

3552 3553 3554 3555 3556 3557 3558
	if (IS_IQD(card)) {
		if (card->options.cq == QETH_CQ_ENABLED &&
		    !qeth_iqd_is_mcast_queue(card, queue) &&
		    count == 1) {
			if (!buf->aob)
				buf->aob = qdio_allocate_aob();
			if (buf->aob) {
3559 3560
				struct qeth_qaob_priv1 *priv;

3561
				aob = buf->aob;
3562 3563 3564
				priv = (struct qeth_qaob_priv1 *)&aob->user1;
				priv->state = QETH_QAOB_ISSUED;
				priv->queue_no = queue->queue_no;
3565 3566 3567
			}
		}
	} else {
F
Frank Blaschka 已提交
3568 3569 3570 3571 3572 3573 3574 3575
		if (!queue->do_pack) {
			if ((atomic_read(&queue->used_buffers) >=
				(QETH_HIGH_WATERMARK_PACK -
				 QETH_WATERMARK_PACK_FUZZ)) &&
			    !atomic_read(&queue->set_pci_flags_count)) {
				/* it's likely that we'll go to packing
				 * mode soon */
				atomic_inc(&queue->set_pci_flags_count);
3576
				buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
F
Frank Blaschka 已提交
3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588
			}
		} else {
			if (!atomic_read(&queue->set_pci_flags_count)) {
				/*
				 * there's no outstanding PCI any more, so we
				 * have to request a PCI to be sure the the PCI
				 * will wake at some time in the future then we
				 * can flush packed buffers that might still be
				 * hanging around, which can happen if no
				 * further send was requested by the stack
				 */
				atomic_inc(&queue->set_pci_flags_count);
3589
				buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
F
Frank Blaschka 已提交
3590 3591 3592 3593
			}
		}
	}

3594
	QETH_TXQ_STAT_INC(queue, doorbell);
3595 3596
	rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_OUTPUT, queue->queue_no,
		     index, count, aob);
3597

3598 3599 3600 3601 3602 3603
	switch (rc) {
	case 0:
	case -ENOBUFS:
		/* ignore temporary SIGA errors without busy condition */

		/* Fake the TX completion interrupt: */
3604 3605
		frames = READ_ONCE(queue->max_coalesced_frames);
		usecs = READ_ONCE(queue->coalesce_usecs);
3606

3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617
		if (frames && queue->coalesced_frames >= frames) {
			napi_schedule(&queue->napi);
			queue->coalesced_frames = 0;
			QETH_TXQ_STAT_INC(queue, coal_frames);
		} else if (qeth_use_tx_irqs(card) &&
			   atomic_read(&queue->used_buffers) >= 32) {
			/* Old behaviour carried over from the qdio layer: */
			napi_schedule(&queue->napi);
			QETH_TXQ_STAT_INC(queue, coal_frames);
		} else if (usecs) {
			qeth_tx_arm_timer(queue, usecs);
3618
		}
3619

3620 3621
		break;
	default:
C
Carsten Otte 已提交
3622
		QETH_CARD_TEXT(queue->card, 2, "flushbuf");
3623 3624 3625
		QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no);
		QETH_CARD_TEXT_(queue->card, 2, " idx%d", index);
		QETH_CARD_TEXT_(queue->card, 2, " c%d", count);
C
Carsten Otte 已提交
3626
		QETH_CARD_TEXT_(queue->card, 2, " err%d", rc);
3627

F
Frank Blaschka 已提交
3628 3629 3630 3631 3632 3633
		/* this must not happen under normal circumstances. if it
		 * happens something is really wrong -> recover */
		qeth_schedule_recovery(queue->card);
	}
}

3634 3635
static void qeth_flush_queue(struct qeth_qdio_out_q *queue)
{
J
Julian Wiedmann 已提交
3636
	qeth_flush_buffers(queue, queue->bulk_start, queue->bulk_count);
3637

J
Julian Wiedmann 已提交
3638
	queue->bulk_start = QDIO_BUFNR(queue->bulk_start + queue->bulk_count);
3639
	queue->prev_hdr = NULL;
J
Julian Wiedmann 已提交
3640
	queue->bulk_count = 0;
3641 3642
}

F
Frank Blaschka 已提交
3643 3644 3645 3646 3647 3648 3649 3650
static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
{
	/*
	 * check if weed have to switch to non-packing mode or if
	 * we have to get a pci flag out on the queue
	 */
	if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
	    !atomic_read(&queue->set_pci_flags_count)) {
3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664
		unsigned int index, flush_cnt;
		bool q_was_packing;

		spin_lock(&queue->lock);

		index = queue->next_buf_to_fill;
		q_was_packing = queue->do_pack;

		flush_cnt = qeth_switch_to_nonpacking_if_needed(queue);
		if (!flush_cnt && !atomic_read(&queue->set_pci_flags_count))
			flush_cnt = qeth_prep_flush_pack_buffer(queue);

		if (flush_cnt) {
			qeth_flush_buffers(queue, index, flush_cnt);
3665 3666
			if (q_was_packing)
				QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt);
F
Frank Blaschka 已提交
3667
		}
3668 3669

		spin_unlock(&queue->lock);
F
Frank Blaschka 已提交
3670 3671 3672
	}
}

3673
static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr)
3674 3675 3676
{
	struct qeth_card *card = (struct qeth_card *)card_ptr;

3677
	napi_schedule_irqoff(&card->napi);
3678 3679
}

3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692
int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
{
	int rc;

	if (card->options.cq ==  QETH_CQ_NOTAVAILABLE) {
		rc = -1;
		goto out;
	} else {
		if (card->options.cq == cq) {
			rc = 0;
			goto out;
		}

3693
		qeth_free_qdio_queues(card);
3694 3695 3696 3697 3698 3699 3700 3701 3702
		card->options.cq = cq;
		rc = 0;
	}
out:
	return rc;

}
EXPORT_SYMBOL_GPL(qeth_configure_cq);

3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714
static void qeth_qdio_handle_aob(struct qeth_card *card, struct qaob *aob)
{
	struct qeth_qaob_priv1 *priv = (struct qeth_qaob_priv1 *)&aob->user1;
	unsigned int queue_no = priv->queue_no;

	BUILD_BUG_ON(sizeof(*priv) > ARRAY_SIZE(aob->user1));

	if (xchg(&priv->state, QETH_QAOB_DONE) == QETH_QAOB_PENDING &&
	    queue_no < card->qdio.no_out_queues)
		napi_schedule(&card->qdio.out_qs[queue_no]->napi);
}

3715 3716 3717 3718
static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
				 unsigned int queue, int first_element,
				 int count)
{
3719 3720 3721 3722 3723 3724 3725 3726 3727
	struct qeth_qdio_q *cq = card->qdio.c_q;
	int i;
	int rc;

	QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element);
	QETH_CARD_TEXT_(card, 5, "qcqhc%d", count);
	QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);

	if (qdio_err) {
3728
		netif_tx_stop_all_queues(card->dev);
3729
		qeth_schedule_recovery(card);
3730
		return;
3731 3732 3733
	}

	for (i = first_element; i < first_element + count; ++i) {
J
Julian Wiedmann 已提交
3734
		struct qdio_buffer *buffer = cq->qdio_bufs[QDIO_BUFNR(i)];
3735
		int e = 0;
3736

3737 3738
		while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
		       buffer->element[e].addr) {
3739
			unsigned long phys_aob_addr = buffer->element[e].addr;
3740

3741
			qeth_qdio_handle_aob(card, phys_to_virt(phys_aob_addr));
3742 3743
			++e;
		}
3744
		qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
3745 3746
	}
	rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue,
3747
		     cq->next_buf_to_init, count, NULL);
3748 3749 3750 3751 3752
	if (rc) {
		dev_warn(&card->gdev->dev,
			"QDIO reported an error, rc=%i\n", rc);
		QETH_CARD_TEXT(card, 2, "qcqherr");
	}
J
Julian Wiedmann 已提交
3753 3754

	cq->next_buf_to_init = QDIO_BUFNR(cq->next_buf_to_init + count);
3755 3756
}

3757 3758 3759 3760
static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
				    unsigned int qdio_err, int queue,
				    int first_elem, int count,
				    unsigned long card_ptr)
3761 3762 3763
{
	struct qeth_card *card = (struct qeth_card *)card_ptr;

3764 3765 3766
	QETH_CARD_TEXT_(card, 2, "qihq%d", queue);
	QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err);

3767
	if (qdio_err)
3768 3769 3770
		qeth_schedule_recovery(card);
}

3771 3772 3773 3774
static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
				     unsigned int qdio_error, int __queue,
				     int first_element, int count,
				     unsigned long card_ptr)
F
Frank Blaschka 已提交
3775 3776 3777
{
	struct qeth_card *card        = (struct qeth_card *) card_ptr;

3778 3779 3780
	QETH_CARD_TEXT(card, 2, "achkcond");
	netif_tx_stop_all_queues(card->dev);
	qeth_schedule_recovery(card);
F
Frank Blaschka 已提交
3781 3782
}

3783 3784 3785
/**
 * Note: Function assumes that we have 4 outbound queues.
 */
3786
int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
F
Frank Blaschka 已提交
3787
{
J
Julian Wiedmann 已提交
3788
	struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
3789 3790 3791 3792 3793
	u8 tos;

	switch (card->qdio.do_prio_queueing) {
	case QETH_PRIO_Q_ING_TOS:
	case QETH_PRIO_Q_ING_PREC:
3794 3795
		switch (vlan_get_protocol(skb)) {
		case htons(ETH_P_IP):
3796 3797
			tos = ipv4_get_dsfield(ip_hdr(skb));
			break;
3798
		case htons(ETH_P_IPV6):
3799 3800 3801 3802
			tos = ipv6_get_dsfield(ipv6_hdr(skb));
			break;
		default:
			return card->qdio.default_out_queue;
F
Frank Blaschka 已提交
3803
		}
3804
		if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
J
Julian Wiedmann 已提交
3805
			return ~tos >> 6 & 3;
3806
		if (tos & IPTOS_MINCOST)
J
Julian Wiedmann 已提交
3807
			return 3;
3808 3809 3810 3811 3812 3813
		if (tos & IPTOS_RELIABILITY)
			return 2;
		if (tos & IPTOS_THROUGHPUT)
			return 1;
		if (tos & IPTOS_LOWDELAY)
			return 0;
3814 3815 3816 3817
		break;
	case QETH_PRIO_Q_ING_SKB:
		if (skb->priority > 5)
			return 0;
J
Julian Wiedmann 已提交
3818
		return ~skb->priority >> 1 & 3;
3819
	case QETH_PRIO_Q_ING_VLAN:
J
Julian Wiedmann 已提交
3820 3821 3822
		if (veth->h_vlan_proto == htons(ETH_P_8021Q))
			return ~ntohs(veth->h_vlan_TCI) >>
			       (VLAN_PRIO_SHIFT + 1) & 3;
3823
		break;
3824 3825
	case QETH_PRIO_Q_ING_FIXED:
		return card->qdio.default_out_queue;
F
Frank Blaschka 已提交
3826
	default:
3827
		break;
F
Frank Blaschka 已提交
3828
	}
3829
	return card->qdio.default_out_queue;
F
Frank Blaschka 已提交
3830 3831 3832
}
EXPORT_SYMBOL_GPL(qeth_get_priority_queue);

3833 3834 3835 3836 3837 3838 3839
/**
 * qeth_get_elements_for_frags() -	find number of SBALEs for skb frags.
 * @skb:				SKB address
 *
 * Returns the number of pages, and thus QDIO buffer elements, needed to cover
 * fragmented part of the SKB. Returns zero for linear SKB.
 */
3840
static int qeth_get_elements_for_frags(struct sk_buff *skb)
3841
{
3842
	int cnt, elements = 0;
3843 3844

	for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
3845
		skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
3846 3847 3848 3849

		elements += qeth_get_elements_for_range(
			(addr_t)skb_frag_address(frag),
			(addr_t)skb_frag_address(frag) + skb_frag_size(frag));
3850 3851 3852 3853
	}
	return elements;
}

3854 3855 3856 3857 3858 3859 3860 3861 3862
/**
 * qeth_count_elements() -	Counts the number of QDIO buffer elements needed
 *				to transmit an skb.
 * @skb:			the skb to operate on.
 * @data_offset:		skip this part of the skb's linear data
 *
 * Returns the number of pages, and thus QDIO buffer elements, needed to map the
 * skb's data (both its linear part and paged fragments).
 */
J
Julian Wiedmann 已提交
3863 3864
static unsigned int qeth_count_elements(struct sk_buff *skb,
					unsigned int data_offset)
3865 3866 3867 3868 3869 3870 3871 3872 3873
{
	unsigned int elements = qeth_get_elements_for_frags(skb);
	addr_t end = (addr_t)skb->data + skb_headlen(skb);
	addr_t start = (addr_t)skb->data + data_offset;

	if (start != end)
		elements += qeth_get_elements_for_range(start, end);
	return elements;
}
F
Frank Blaschka 已提交
3874

3875 3876
#define QETH_HDR_CACHE_OBJ_SIZE		(sizeof(struct qeth_hdr_tso) + \
					 MAX_TCP_HEADER)
3877

3878
/**
3879 3880
 * qeth_add_hw_header() - add a HW header to an skb.
 * @skb: skb that the HW header should be added to.
3881 3882
 * @hdr: double pointer to a qeth_hdr. When returning with >= 0,
 *	 it contains a valid pointer to a qeth_hdr.
3883 3884 3885
 * @hdr_len: length of the HW header.
 * @proto_len: length of protocol headers that need to be in same page as the
 *	       HW header.
3886 3887 3888 3889
 *
 * Returns the pushed length. If the header can't be pushed on
 * (eg. because it would cross a page boundary), it is allocated from
 * the cache instead and 0 is returned.
3890
 * The number of needed buffer elements is returned in @elements.
3891 3892
 * Error to create the hdr is indicated by returning with < 0.
 */
3893 3894 3895 3896
static int qeth_add_hw_header(struct qeth_qdio_out_q *queue,
			      struct sk_buff *skb, struct qeth_hdr **hdr,
			      unsigned int hdr_len, unsigned int proto_len,
			      unsigned int *elements)
3897
{
3898
	gfp_t gfp = GFP_ATOMIC | (skb_pfmemalloc(skb) ? __GFP_MEMALLOC : 0);
3899
	const unsigned int contiguous = proto_len ? proto_len : 1;
3900
	const unsigned int max_elements = queue->max_elements;
3901 3902 3903 3904 3905 3906
	unsigned int __elements;
	addr_t start, end;
	bool push_ok;
	int rc;

check_layout:
3907
	start = (addr_t)skb->data - hdr_len;
3908 3909
	end = (addr_t)skb->data;

3910
	if (qeth_get_elements_for_range(start, end + contiguous) == 1) {
3911 3912
		/* Push HW header into same page as first protocol header. */
		push_ok = true;
3913 3914 3915 3916 3917
		/* ... but TSO always needs a separate element for headers: */
		if (skb_is_gso(skb))
			__elements = 1 + qeth_count_elements(skb, proto_len);
		else
			__elements = qeth_count_elements(skb, 0);
J
Julian Wiedmann 已提交
3918 3919
	} else if (!proto_len && PAGE_ALIGNED(skb->data)) {
		/* Push HW header into preceding page, flush with skb->data. */
3920
		push_ok = true;
3921
		__elements = 1 + qeth_count_elements(skb, 0);
3922 3923 3924 3925
	} else {
		/* Use header cache, copy protocol headers up. */
		push_ok = false;
		__elements = 1 + qeth_count_elements(skb, proto_len);
3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937
	}

	/* Compress skb to fit into one IO buffer: */
	if (__elements > max_elements) {
		if (!skb_is_nonlinear(skb)) {
			/* Drop it, no easy way of shrinking it further. */
			QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n",
					 max_elements, __elements, skb->len);
			return -E2BIG;
		}

		rc = skb_linearize(skb);
3938 3939
		if (rc) {
			QETH_TXQ_STAT_INC(queue, skbs_linearized_fail);
3940
			return rc;
3941
		}
3942

3943
		QETH_TXQ_STAT_INC(queue, skbs_linearized);
3944 3945 3946 3947 3948 3949 3950
		/* Linearization changed the layout, re-evaluate: */
		goto check_layout;
	}

	*elements = __elements;
	/* Add the header: */
	if (push_ok) {
3951 3952
		*hdr = skb_push(skb, hdr_len);
		return hdr_len;
3953
	}
3954 3955

	/* Fall back to cache element with known-good alignment: */
3956 3957
	if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE)
		return -E2BIG;
3958
	*hdr = kmem_cache_alloc(qeth_core_header_cache, gfp);
3959 3960
	if (!*hdr)
		return -ENOMEM;
3961 3962
	/* Copy protocol headers behind HW header: */
	skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len);
3963 3964 3965
	return 0;
}

3966 3967 3968 3969
static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue,
			      struct sk_buff *curr_skb,
			      struct qeth_hdr *curr_hdr)
{
J
Julian Wiedmann 已提交
3970
	struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988
	struct qeth_hdr *prev_hdr = queue->prev_hdr;

	if (!prev_hdr)
		return true;

	/* All packets must have the same target: */
	if (curr_hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
		struct sk_buff *prev_skb = skb_peek(&buffer->skb_list);

		return ether_addr_equal(eth_hdr(prev_skb)->h_dest,
					eth_hdr(curr_skb)->h_dest) &&
		       qeth_l2_same_vlan(&prev_hdr->hdr.l2, &curr_hdr->hdr.l2);
	}

	return qeth_l3_same_next_hop(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3) &&
	       qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3);
}

3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000
/**
 * qeth_fill_buffer() - map skb into an output buffer
 * @buf:	buffer to transport the skb
 * @skb:	skb to map into the buffer
 * @hdr:	qeth_hdr for this skb. Either at skb->data, or allocated
 *		from qeth_core_header_cache.
 * @offset:	when mapping the skb, start at skb->data + offset
 * @hd_len:	if > 0, build a dedicated header element of this size
 */
static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
				     struct sk_buff *skb, struct qeth_hdr *hdr,
				     unsigned int offset, unsigned int hd_len)
F
Frank Blaschka 已提交
4001
{
4002 4003
	struct qdio_buffer *buffer = buf->buffer;
	int element = buf->next_element_to_fill;
4004 4005
	int length = skb_headlen(skb) - offset;
	char *data = skb->data + offset;
J
Julian Wiedmann 已提交
4006
	unsigned int elem_length, cnt;
4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017
	bool is_first_elem = true;

	__skb_queue_tail(&buf->skb_list, skb);

	/* build dedicated element for HW Header */
	if (hd_len) {
		is_first_elem = false;

		buffer->element[element].addr = virt_to_phys(hdr);
		buffer->element[element].length = hd_len;
		buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
4018 4019 4020

		/* HW header is allocated from cache: */
		if ((void *)hdr != skb->data)
4021
			__set_bit(element, buf->from_kmem_cache);
4022 4023 4024 4025 4026 4027
		/* HW header was pushed and is contiguous with linear part: */
		else if (length > 0 && !PAGE_ALIGNED(data) &&
			 (data == (char *)hdr + hd_len))
			buffer->element[element].eflags |=
				SBAL_EFLAGS_CONTIGUOUS;

4028 4029
		element++;
	}
F
Frank Blaschka 已提交
4030

4031
	/* map linear part into buffer element(s) */
F
Frank Blaschka 已提交
4032
	while (length > 0) {
J
Julian Wiedmann 已提交
4033 4034
		elem_length = min_t(unsigned int, length,
				    PAGE_SIZE - offset_in_page(data));
F
Frank Blaschka 已提交
4035

4036
		buffer->element[element].addr = virt_to_phys(data);
J
Julian Wiedmann 已提交
4037 4038
		buffer->element[element].length = elem_length;
		length -= elem_length;
4039 4040
		if (is_first_elem) {
			is_first_elem = false;
4041 4042
			if (length || skb_is_nonlinear(skb))
				/* skb needs additional elements */
4043
				buffer->element[element].eflags =
4044
					SBAL_EFLAGS_FIRST_FRAG;
F
Frank Blaschka 已提交
4045
			else
4046 4047 4048 4049
				buffer->element[element].eflags = 0;
		} else {
			buffer->element[element].eflags =
				SBAL_EFLAGS_MIDDLE_FRAG;
F
Frank Blaschka 已提交
4050
		}
J
Julian Wiedmann 已提交
4051 4052

		data += elem_length;
F
Frank Blaschka 已提交
4053 4054
		element++;
	}
4055

4056
	/* map page frags into buffer element(s) */
4057
	for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
4058 4059 4060 4061
		skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];

		data = skb_frag_address(frag);
		length = skb_frag_size(frag);
4062
		while (length > 0) {
J
Julian Wiedmann 已提交
4063 4064
			elem_length = min_t(unsigned int, length,
					    PAGE_SIZE - offset_in_page(data));
4065

4066
			buffer->element[element].addr = virt_to_phys(data);
J
Julian Wiedmann 已提交
4067
			buffer->element[element].length = elem_length;
4068 4069
			buffer->element[element].eflags =
				SBAL_EFLAGS_MIDDLE_FRAG;
J
Julian Wiedmann 已提交
4070 4071 4072

			length -= elem_length;
			data += elem_length;
4073 4074
			element++;
		}
4075 4076
	}

4077 4078
	if (buffer->element[element - 1].eflags)
		buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
4079
	buf->next_element_to_fill = element;
4080
	return element;
F
Frank Blaschka 已提交
4081 4082
}

4083 4084 4085 4086
static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
		       struct sk_buff *skb, unsigned int elements,
		       struct qeth_hdr *hdr, unsigned int offset,
		       unsigned int hd_len)
F
Frank Blaschka 已提交
4087
{
4088
	unsigned int bytes = qdisc_pkt_len(skb);
J
Julian Wiedmann 已提交
4089
	struct qeth_qdio_out_buffer *buffer;
4090
	unsigned int next_element;
4091 4092
	struct netdev_queue *txq;
	bool stopped = false;
4093 4094
	bool flush;

J
Julian Wiedmann 已提交
4095
	buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + queue->bulk_count)];
4096
	txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
F
Frank Blaschka 已提交
4097

4098 4099
	/* Just a sanity check, the wake/stop logic should ensure that we always
	 * get a free buffer.
F
Frank Blaschka 已提交
4100 4101
	 */
	if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
4102
		return -EBUSY;
4103

J
Julian Wiedmann 已提交
4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120
	flush = !qeth_iqd_may_bulk(queue, skb, hdr);

	if (flush ||
	    (buffer->next_element_to_fill + elements > queue->max_elements)) {
		if (buffer->next_element_to_fill > 0) {
			atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
			queue->bulk_count++;
		}

		if (queue->bulk_count >= queue->bulk_max)
			flush = true;

		if (flush)
			qeth_flush_queue(queue);

		buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start +
						queue->bulk_count)];
4121

4122 4123 4124 4125 4126 4127 4128
		/* Sanity-check again: */
		if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
			return -EBUSY;
	}

	if (buffer->next_element_to_fill == 0 &&
	    atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
4129 4130 4131 4132 4133 4134 4135 4136
		/* If a TX completion happens right _here_ and misses to wake
		 * the txq, then our re-check below will catch the race.
		 */
		QETH_TXQ_STAT_INC(queue, stopped);
		netif_tx_stop_queue(txq);
		stopped = true;
	}

4137
	next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
4138
	buffer->bytes += bytes;
4139
	buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
4140
	queue->prev_hdr = hdr;
4141

4142 4143 4144 4145 4146
	flush = __netdev_tx_sent_queue(txq, bytes,
				       !stopped && netdev_xmit_more());

	if (flush || next_element >= queue->max_elements) {
		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
J
Julian Wiedmann 已提交
4147 4148 4149 4150 4151 4152 4153
		queue->bulk_count++;

		if (queue->bulk_count >= queue->bulk_max)
			flush = true;

		if (flush)
			qeth_flush_queue(queue);
4154
	}
4155 4156 4157

	if (stopped && !qeth_out_queue_is_full(queue))
		netif_tx_start_queue(txq);
F
Frank Blaschka 已提交
4158 4159 4160
	return 0;
}

J
Julian Wiedmann 已提交
4161 4162 4163 4164 4165
static int qeth_do_send_packet(struct qeth_card *card,
			       struct qeth_qdio_out_q *queue,
			       struct sk_buff *skb, struct qeth_hdr *hdr,
			       unsigned int offset, unsigned int hd_len,
			       unsigned int elements_needed)
F
Frank Blaschka 已提交
4166
{
4167
	unsigned int start_index = queue->next_buf_to_fill;
F
Frank Blaschka 已提交
4168
	struct qeth_qdio_out_buffer *buffer;
4169
	unsigned int next_element;
4170 4171
	struct netdev_queue *txq;
	bool stopped = false;
F
Frank Blaschka 已提交
4172 4173 4174 4175
	int flush_count = 0;
	int do_pack = 0;
	int rc = 0;

4176
	buffer = queue->bufs[queue->next_buf_to_fill];
4177 4178 4179

	/* Just a sanity check, the wake/stop logic should ensure that we always
	 * get a free buffer.
F
Frank Blaschka 已提交
4180
	 */
4181
	if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
F
Frank Blaschka 已提交
4182
		return -EBUSY;
4183 4184 4185

	txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));

F
Frank Blaschka 已提交
4186 4187 4188 4189
	/* check if we need to switch packing state of this queue */
	qeth_switch_to_packing_if_needed(queue);
	if (queue->do_pack) {
		do_pack = 1;
F
Frank Blaschka 已提交
4190
		/* does packet fit in current buffer? */
4191 4192
		if (buffer->next_element_to_fill + elements_needed >
		    queue->max_elements) {
F
Frank Blaschka 已提交
4193 4194 4195 4196
			/* ... no -> set state PRIMED */
			atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
			flush_count++;
			queue->next_buf_to_fill =
J
Julian Wiedmann 已提交
4197
				QDIO_BUFNR(queue->next_buf_to_fill + 1);
4198
			buffer = queue->bufs[queue->next_buf_to_fill];
4199 4200

			/* We stepped forward, so sanity-check again: */
F
Frank Blaschka 已提交
4201 4202 4203
			if (atomic_read(&buffer->state) !=
			    QETH_QDIO_BUF_EMPTY) {
				qeth_flush_buffers(queue, start_index,
J
Jan Glauber 已提交
4204
							   flush_count);
4205 4206
				rc = -EBUSY;
				goto out;
F
Frank Blaschka 已提交
4207 4208 4209
			}
		}
	}
4210

4211 4212 4213 4214 4215 4216 4217 4218 4219 4220
	if (buffer->next_element_to_fill == 0 &&
	    atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
		/* If a TX completion happens right _here_ and misses to wake
		 * the txq, then our re-check below will catch the race.
		 */
		QETH_TXQ_STAT_INC(queue, stopped);
		netif_tx_stop_queue(txq);
		stopped = true;
	}

4221
	next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
4222 4223
	buffer->bytes += qdisc_pkt_len(skb);
	buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
4224 4225 4226 4227 4228 4229

	if (queue->do_pack)
		QETH_TXQ_STAT_INC(queue, skbs_pack);
	if (!queue->do_pack || stopped || next_element >= queue->max_elements) {
		flush_count++;
		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
J
Julian Wiedmann 已提交
4230 4231
		queue->next_buf_to_fill =
				QDIO_BUFNR(queue->next_buf_to_fill + 1);
4232 4233
	}

F
Frank Blaschka 已提交
4234
	if (flush_count)
J
Jan Glauber 已提交
4235
		qeth_flush_buffers(queue, start_index, flush_count);
4236

4237
out:
4238 4239
	if (do_pack)
		QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count);
F
Frank Blaschka 已提交
4240

4241 4242
	if (stopped && !qeth_out_queue_is_full(queue))
		netif_tx_start_queue(txq);
F
Frank Blaschka 已提交
4243 4244 4245
	return rc;
}

J
Julian Wiedmann 已提交
4246 4247 4248
static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
			      unsigned int payload_len, struct sk_buff *skb,
			      unsigned int proto_len)
4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261
{
	struct qeth_hdr_ext_tso *ext = &hdr->ext;

	ext->hdr_tot_len = sizeof(*ext);
	ext->imb_hdr_no = 1;
	ext->hdr_type = 1;
	ext->hdr_version = 1;
	ext->hdr_len = 28;
	ext->payload_len = payload_len;
	ext->mss = skb_shinfo(skb)->gso_size;
	ext->dg_hdr_len = proto_len;
}

4262
int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
4263
	      struct qeth_qdio_out_q *queue, __be16 proto,
4264 4265
	      void (*fill_header)(struct qeth_qdio_out_q *queue,
				  struct qeth_hdr *hdr, struct sk_buff *skb,
4266
				  __be16 proto, unsigned int data_len))
4267
{
4268
	unsigned int proto_len, hw_hdr_len;
4269
	unsigned int frame_len = skb->len;
4270
	bool is_tso = skb_is_gso(skb);
4271 4272 4273 4274 4275 4276
	unsigned int data_offset = 0;
	struct qeth_hdr *hdr = NULL;
	unsigned int hd_len = 0;
	unsigned int elements;
	int push_len, rc;

4277 4278 4279 4280 4281
	if (is_tso) {
		hw_hdr_len = sizeof(struct qeth_hdr_tso);
		proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
	} else {
		hw_hdr_len = sizeof(struct qeth_hdr);
J
Julian Wiedmann 已提交
4282
		proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0;
4283 4284
	}

4285 4286 4287 4288
	rc = skb_cow_head(skb, hw_hdr_len);
	if (rc)
		return rc;

4289
	push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len,
4290 4291 4292
				      &elements);
	if (push_len < 0)
		return push_len;
4293
	if (is_tso || !push_len) {
4294 4295
		/* HW header needs its own buffer element. */
		hd_len = hw_hdr_len + proto_len;
4296
		data_offset = push_len + proto_len;
4297
	}
4298
	memset(hdr, 0, hw_hdr_len);
4299
	fill_header(queue, hdr, skb, proto, frame_len);
4300 4301 4302
	if (is_tso)
		qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
				  frame_len - proto_len, skb, proto_len);
4303 4304

	if (IS_IQD(card)) {
4305 4306
		rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset,
				 hd_len);
4307 4308 4309
	} else {
		/* TODO: drop skb_orphan() once TX completion is fast enough */
		skb_orphan(skb);
4310
		spin_lock(&queue->lock);
4311 4312
		rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
					 hd_len, elements);
4313
		spin_unlock(&queue->lock);
4314 4315
	}

4316 4317 4318
	if (rc && !push_len)
		kmem_cache_free(qeth_core_header_cache, hdr);

4319 4320 4321 4322
	return rc;
}
EXPORT_SYMBOL_GPL(qeth_xmit);

F
Frank Blaschka 已提交
4323 4324 4325
static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
		struct qeth_reply *reply, unsigned long data)
{
4326
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
F
Frank Blaschka 已提交
4327 4328
	struct qeth_ipacmd_setadpparms *setparms;

C
Carsten Otte 已提交
4329
	QETH_CARD_TEXT(card, 4, "prmadpcb");
F
Frank Blaschka 已提交
4330 4331

	setparms = &(cmd->data.setadapterparms);
4332
	if (qeth_setadpparms_inspect_rc(cmd)) {
4333
		QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
F
Frank Blaschka 已提交
4334 4335 4336
		setparms->data.mode = SET_PROMISC_MODE_OFF;
	}
	card->info.promisc_mode = setparms->data.mode;
4337
	return (cmd->hdr.return_code) ? -EIO : 0;
F
Frank Blaschka 已提交
4338 4339
}

4340
void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable)
F
Frank Blaschka 已提交
4341
{
4342 4343
	enum qeth_ipa_promisc_modes mode = enable ? SET_PROMISC_MODE_ON :
						    SET_PROMISC_MODE_OFF;
F
Frank Blaschka 已提交
4344 4345 4346
	struct qeth_cmd_buffer *iob;
	struct qeth_ipa_cmd *cmd;

C
Carsten Otte 已提交
4347 4348
	QETH_CARD_TEXT(card, 4, "setprom");
	QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
F
Frank Blaschka 已提交
4349 4350

	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
4351
				   SETADP_DATA_SIZEOF(mode));
4352 4353
	if (!iob)
		return;
4354
	cmd = __ipa_cmd(iob);
F
Frank Blaschka 已提交
4355 4356 4357 4358 4359 4360 4361 4362
	cmd->data.setadapterparms.data.mode = mode;
	qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
}
EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);

static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
		struct qeth_reply *reply, unsigned long data)
{
4363
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4364
	struct qeth_ipacmd_setadpparms *adp_cmd;
F
Frank Blaschka 已提交
4365

C
Carsten Otte 已提交
4366
	QETH_CARD_TEXT(card, 4, "chgmaccb");
4367
	if (qeth_setadpparms_inspect_rc(cmd))
4368
		return -EIO;
F
Frank Blaschka 已提交
4369

4370
	adp_cmd = &cmd->data.setadapterparms;
4371 4372 4373
	if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr))
		return -EADDRNOTAVAIL;

4374 4375
	if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) &&
	    !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC))
4376
		return -EADDRNOTAVAIL;
4377

4378
	eth_hw_addr_set(card->dev, adp_cmd->data.change_addr.addr);
F
Frank Blaschka 已提交
4379 4380 4381 4382 4383 4384 4385 4386 4387
	return 0;
}

int qeth_setadpparms_change_macaddr(struct qeth_card *card)
{
	int rc;
	struct qeth_cmd_buffer *iob;
	struct qeth_ipa_cmd *cmd;

C
Carsten Otte 已提交
4388
	QETH_CARD_TEXT(card, 4, "chgmac");
F
Frank Blaschka 已提交
4389 4390

	iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
4391
				   SETADP_DATA_SIZEOF(change_addr));
4392 4393
	if (!iob)
		return -ENOMEM;
4394
	cmd = __ipa_cmd(iob);
F
Frank Blaschka 已提交
4395
	cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
4396 4397 4398
	cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN;
	ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr,
			card->dev->dev_addr);
F
Frank Blaschka 已提交
4399 4400 4401 4402 4403 4404
	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
			       NULL);
	return rc;
}
EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);

E
Einar Lueck 已提交
4405 4406 4407
static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
		struct qeth_reply *reply, unsigned long data)
{
4408
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
E
Einar Lueck 已提交
4409 4410
	struct qeth_set_access_ctrl *access_ctrl_req;

C
Carsten Otte 已提交
4411
	QETH_CARD_TEXT(card, 4, "setaccb");
E
Einar Lueck 已提交
4412 4413

	access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4414 4415
	QETH_CARD_TEXT_(card, 2, "rc=%d",
			cmd->data.setadapterparms.hdr.return_code);
S
Stefan Raspl 已提交
4416 4417
	if (cmd->data.setadapterparms.hdr.return_code !=
						SET_ACCESS_CTRL_RC_SUCCESS)
4418 4419 4420
		QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
				 access_ctrl_req->subcmd_code, CARD_DEVID(card),
				 cmd->data.setadapterparms.hdr.return_code);
4421
	switch (qeth_setadpparms_inspect_rc(cmd)) {
E
Einar Lueck 已提交
4422
	case SET_ACCESS_CTRL_RC_SUCCESS:
4423
		if (access_ctrl_req->subcmd_code == ISOLATION_MODE_NONE)
E
Einar Lueck 已提交
4424 4425
			dev_info(&card->gdev->dev,
			    "QDIO data connection isolation is deactivated\n");
4426
		else
E
Einar Lueck 已提交
4427 4428
			dev_info(&card->gdev->dev,
			    "QDIO data connection isolation is activated\n");
4429
		return 0;
S
Stefan Raspl 已提交
4430
	case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
4431 4432
		QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
				 CARD_DEVID(card));
4433
		return 0;
S
Stefan Raspl 已提交
4434
	case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
4435 4436
		QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
				 CARD_DEVID(card));
4437
		return 0;
E
Einar Lueck 已提交
4438 4439 4440
	case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
		dev_err(&card->gdev->dev, "Adapter does not "
			"support QDIO data connection isolation\n");
4441
		return -EOPNOTSUPP;
E
Einar Lueck 已提交
4442 4443 4444 4445
	case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
		dev_err(&card->gdev->dev,
			"Adapter is dedicated. "
			"QDIO data connection isolation not supported\n");
4446
		return -EOPNOTSUPP;
E
Einar Lueck 已提交
4447 4448 4449
	case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
		dev_err(&card->gdev->dev,
			"TSO does not permit QDIO data connection isolation\n");
4450
		return -EPERM;
S
Stefan Raspl 已提交
4451 4452 4453
	case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED:
		dev_err(&card->gdev->dev, "The adjacent switch port does not "
			"support reflective relay mode\n");
4454
		return -EOPNOTSUPP;
S
Stefan Raspl 已提交
4455 4456 4457
	case SET_ACCESS_CTRL_RC_REFLREL_FAILED:
		dev_err(&card->gdev->dev, "The reflective relay mode cannot be "
					"enabled at the adjacent switch port");
4458
		return -EREMOTEIO;
S
Stefan Raspl 已提交
4459 4460 4461
	case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED:
		dev_warn(&card->gdev->dev, "Turning off reflective relay mode "
					"at the adjacent switch failed\n");
4462 4463
		/* benign error while disabling ISOLATION_MODE_FWD */
		return 0;
E
Einar Lueck 已提交
4464
	default:
4465
		return -EIO;
E
Einar Lueck 已提交
4466 4467 4468
	}
}

4469 4470
int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
				     enum qeth_ipa_isolation_modes mode)
E
Einar Lueck 已提交
4471 4472 4473 4474 4475 4476
{
	int rc;
	struct qeth_cmd_buffer *iob;
	struct qeth_ipa_cmd *cmd;
	struct qeth_set_access_ctrl *access_ctrl_req;

C
Carsten Otte 已提交
4477
	QETH_CARD_TEXT(card, 4, "setacctl");
E
Einar Lueck 已提交
4478

4479 4480 4481 4482 4483 4484
	if (!qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
		dev_err(&card->gdev->dev,
			"Adapter does not support QDIO data connection isolation\n");
		return -EOPNOTSUPP;
	}

E
Einar Lueck 已提交
4485
	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
4486
				   SETADP_DATA_SIZEOF(set_access_ctrl));
4487 4488
	if (!iob)
		return -ENOMEM;
4489
	cmd = __ipa_cmd(iob);
E
Einar Lueck 已提交
4490
	access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4491
	access_ctrl_req->subcmd_code = mode;
E
Einar Lueck 已提交
4492 4493

	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
4494
			       NULL);
4495
	if (rc) {
4496
		QETH_CARD_TEXT_(card, 2, "rc=%d", rc);
4497 4498
		QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
				 rc, CARD_DEVID(card));
E
Einar Lueck 已提交
4499
	}
4500

E
Einar Lueck 已提交
4501 4502 4503
	return rc;
}

4504
void qeth_tx_timeout(struct net_device *dev, unsigned int txqueue)
F
Frank Blaschka 已提交
4505 4506 4507
{
	struct qeth_card *card;

4508
	card = dev->ml_priv;
C
Carsten Otte 已提交
4509
	QETH_CARD_TEXT(card, 4, "txtimeo");
F
Frank Blaschka 已提交
4510 4511 4512 4513
	qeth_schedule_recovery(card);
}
EXPORT_SYMBOL_GPL(qeth_tx_timeout);

4514
static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
F
Frank Blaschka 已提交
4515
{
4516
	struct qeth_card *card = dev->ml_priv;
F
Frank Blaschka 已提交
4517 4518 4519 4520 4521 4522
	int rc = 0;

	switch (regnum) {
	case MII_BMCR: /* Basic mode control register */
		rc = BMCR_FULLDPLX;
		if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
4523 4524
		    (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) &&
		    (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH))
F
Frank Blaschka 已提交
4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555
			rc |= BMCR_SPEED100;
		break;
	case MII_BMSR: /* Basic mode status register */
		rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
		     BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
		     BMSR_100BASE4;
		break;
	case MII_PHYSID1: /* PHYS ID 1 */
		rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
		     dev->dev_addr[2];
		rc = (rc >> 5) & 0xFFFF;
		break;
	case MII_PHYSID2: /* PHYS ID 2 */
		rc = (dev->dev_addr[2] << 10) & 0xFFFF;
		break;
	case MII_ADVERTISE: /* Advertisement control reg */
		rc = ADVERTISE_ALL;
		break;
	case MII_LPA: /* Link partner ability reg */
		rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
		     LPA_100BASE4 | LPA_LPACK;
		break;
	case MII_EXPANSION: /* Expansion register */
		break;
	case MII_DCOUNTER: /* disconnect counter */
		break;
	case MII_FCSCOUNTER: /* false carrier counter */
		break;
	case MII_NWAYTEST: /* N-way auto-neg test register */
		break;
	case MII_RERRCOUNTER: /* rx error counter */
4556 4557 4558
		rc = card->stats.rx_length_errors +
		     card->stats.rx_frame_errors +
		     card->stats.rx_fifo_errors;
F
Frank Blaschka 已提交
4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580
		break;
	case MII_SREVISION: /* silicon revision */
		break;
	case MII_RESV1: /* reserved 1 */
		break;
	case MII_LBRERROR: /* loopback, rx, bypass error */
		break;
	case MII_PHYADDR: /* physical address */
		break;
	case MII_RESV2: /* reserved 2 */
		break;
	case MII_TPISTATUS: /* TPI status for 10mbps */
		break;
	case MII_NCONFIG: /* network interface config */
		break;
	default:
		break;
	}
	return rc;
}

static int qeth_snmp_command_cb(struct qeth_card *card,
4581
				struct qeth_reply *reply, unsigned long data)
F
Frank Blaschka 已提交
4582
{
4583 4584 4585 4586
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
	struct qeth_arp_query_info *qinfo = reply->param;
	struct qeth_ipacmd_setadpparms *adp_cmd;
	unsigned int data_len;
4587
	void *snmp_data;
F
Frank Blaschka 已提交
4588

C
Carsten Otte 已提交
4589
	QETH_CARD_TEXT(card, 3, "snpcmdcb");
F
Frank Blaschka 已提交
4590 4591

	if (cmd->hdr.return_code) {
4592
		QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
4593
		return -EIO;
F
Frank Blaschka 已提交
4594 4595 4596 4597
	}
	if (cmd->data.setadapterparms.hdr.return_code) {
		cmd->hdr.return_code =
			cmd->data.setadapterparms.hdr.return_code;
4598
		QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code);
4599
		return -EIO;
F
Frank Blaschka 已提交
4600
	}
4601 4602 4603 4604 4605

	adp_cmd = &cmd->data.setadapterparms;
	data_len = adp_cmd->hdr.cmdlength - sizeof(adp_cmd->hdr);
	if (adp_cmd->hdr.seq_no == 1) {
		snmp_data = &adp_cmd->data.snmp;
4606
	} else {
4607 4608
		snmp_data = &adp_cmd->data.snmp.request;
		data_len -= offsetof(struct qeth_snmp_cmd, request);
4609
	}
F
Frank Blaschka 已提交
4610 4611 4612

	/* check if there is enough room in userspace */
	if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
4613 4614
		QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC);
		return -ENOSPC;
F
Frank Blaschka 已提交
4615
	}
C
Carsten Otte 已提交
4616
	QETH_CARD_TEXT_(card, 4, "snore%i",
4617
			cmd->data.setadapterparms.hdr.used_total);
C
Carsten Otte 已提交
4618
	QETH_CARD_TEXT_(card, 4, "sseqn%i",
4619
			cmd->data.setadapterparms.hdr.seq_no);
F
Frank Blaschka 已提交
4620
	/*copy entries to user buffer*/
4621
	memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len);
F
Frank Blaschka 已提交
4622
	qinfo->udata_offset += data_len;
4623

F
Frank Blaschka 已提交
4624 4625 4626 4627 4628 4629
	if (cmd->data.setadapterparms.hdr.seq_no <
	    cmd->data.setadapterparms.hdr.used_total)
		return 1;
	return 0;
}

4630
static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
F
Frank Blaschka 已提交
4631
{
4632
	struct qeth_snmp_ureq __user *ureq;
F
Frank Blaschka 已提交
4633
	struct qeth_cmd_buffer *iob;
4634
	unsigned int req_len;
F
Frank Blaschka 已提交
4635 4636 4637
	struct qeth_arp_query_info qinfo = {0, };
	int rc = 0;

C
Carsten Otte 已提交
4638
	QETH_CARD_TEXT(card, 3, "snmpcmd");
F
Frank Blaschka 已提交
4639

4640
	if (IS_VM_NIC(card))
F
Frank Blaschka 已提交
4641 4642 4643
		return -EOPNOTSUPP;

	if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
4644
	    IS_LAYER3(card))
F
Frank Blaschka 已提交
4645
		return -EOPNOTSUPP;
4646

4647 4648 4649 4650 4651
	ureq = (struct qeth_snmp_ureq __user *) udata;
	if (get_user(qinfo.udata_len, &ureq->hdr.data_len) ||
	    get_user(req_len, &ureq->hdr.req_len))
		return -EFAULT;

4652 4653 4654 4655
	/* Sanitize user input, to avoid overflows in iob size calculation: */
	if (req_len > QETH_BUFSIZE)
		return -EINVAL;

4656 4657 4658 4659 4660 4661 4662
	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len);
	if (!iob)
		return -ENOMEM;

	if (copy_from_user(&__ipa_cmd(iob)->data.setadapterparms.data.snmp,
			   &ureq->cmd, req_len)) {
		qeth_put_cmd(iob);
F
Frank Blaschka 已提交
4663
		return -EFAULT;
4664 4665
	}

F
Frank Blaschka 已提交
4666 4667
	qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
	if (!qinfo.udata) {
4668
		qeth_put_cmd(iob);
F
Frank Blaschka 已提交
4669 4670 4671 4672
		return -ENOMEM;
	}
	qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);

4673
	rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo);
F
Frank Blaschka 已提交
4674
	if (rc)
4675 4676
		QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
				 CARD_DEVID(card), rc);
F
Frank Blaschka 已提交
4677 4678 4679 4680
	else {
		if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
			rc = -EFAULT;
	}
4681

F
Frank Blaschka 已提交
4682 4683 4684 4685
	kfree(qinfo.udata);
	return rc;
}

4686
static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
J
Julian Wiedmann 已提交
4687 4688
					 struct qeth_reply *reply,
					 unsigned long data)
4689
{
4690
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
J
Julian Wiedmann 已提交
4691
	struct qeth_qoat_priv *priv = reply->param;
4692 4693 4694
	int resdatalen;

	QETH_CARD_TEXT(card, 3, "qoatcb");
4695
	if (qeth_setadpparms_inspect_rc(cmd))
4696
		return -EIO;
4697 4698 4699

	resdatalen = cmd->data.setadapterparms.hdr.cmdlength;

4700 4701
	if (resdatalen > (priv->buffer_len - priv->response_len))
		return -ENOSPC;
4702

4703 4704
	memcpy(priv->buffer + priv->response_len,
	       &cmd->data.setadapterparms.hdr, resdatalen);
4705 4706 4707 4708 4709 4710 4711 4712
	priv->response_len += resdatalen;

	if (cmd->data.setadapterparms.hdr.seq_no <
	    cmd->data.setadapterparms.hdr.used_total)
		return 1;
	return 0;
}

4713
static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724
{
	int rc = 0;
	struct qeth_cmd_buffer *iob;
	struct qeth_ipa_cmd *cmd;
	struct qeth_query_oat *oat_req;
	struct qeth_query_oat_data oat_data;
	struct qeth_qoat_priv priv;
	void __user *tmp;

	QETH_CARD_TEXT(card, 3, "qoatcmd");

J
Julian Wiedmann 已提交
4725 4726
	if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT))
		return -EOPNOTSUPP;
4727

J
Julian Wiedmann 已提交
4728 4729
	if (copy_from_user(&oat_data, udata, sizeof(oat_data)))
		return -EFAULT;
4730 4731 4732

	priv.buffer_len = oat_data.buffer_len;
	priv.response_len = 0;
4733
	priv.buffer = vzalloc(oat_data.buffer_len);
J
Julian Wiedmann 已提交
4734 4735
	if (!priv.buffer)
		return -ENOMEM;
4736 4737

	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
4738
				   SETADP_DATA_SIZEOF(query_oat));
4739 4740 4741 4742
	if (!iob) {
		rc = -ENOMEM;
		goto out_free;
	}
4743
	cmd = __ipa_cmd(iob);
4744 4745 4746
	oat_req = &cmd->data.setadapterparms.data.query_oat;
	oat_req->subcmd_code = oat_data.command;

J
Julian Wiedmann 已提交
4747
	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, &priv);
4748
	if (!rc) {
4749 4750
		tmp = is_compat_task() ? compat_ptr(oat_data.ptr) :
					 u64_to_user_ptr(oat_data.ptr);
4751 4752
		oat_data.response_len = priv.response_len;

J
Julian Wiedmann 已提交
4753 4754
		if (copy_to_user(tmp, priv.buffer, priv.response_len) ||
		    copy_to_user(udata, &oat_data, sizeof(oat_data)))
4755
			rc = -EFAULT;
4756
	}
4757 4758

out_free:
4759
	vfree(priv.buffer);
4760 4761 4762
	return rc;
}

4763 4764
static int qeth_query_card_info_cb(struct qeth_card *card,
				   struct qeth_reply *reply, unsigned long data)
E
Eugene Crosser 已提交
4765
{
4766
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4767
	struct qeth_link_info *link_info = reply->param;
E
Eugene Crosser 已提交
4768 4769 4770
	struct qeth_query_card_info *card_info;

	QETH_CARD_TEXT(card, 2, "qcrdincb");
4771
	if (qeth_setadpparms_inspect_rc(cmd))
4772
		return -EIO;
E
Eugene Crosser 已提交
4773

4774
	card_info = &cmd->data.setadapterparms.data.card_info;
4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830
	netdev_dbg(card->dev,
		   "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n",
		   card_info->card_type, card_info->port_mode,
		   card_info->port_speed);

	switch (card_info->port_mode) {
	case CARD_INFO_PORTM_FULLDUPLEX:
		link_info->duplex = DUPLEX_FULL;
		break;
	case CARD_INFO_PORTM_HALFDUPLEX:
		link_info->duplex = DUPLEX_HALF;
		break;
	default:
		link_info->duplex = DUPLEX_UNKNOWN;
	}

	switch (card_info->card_type) {
	case CARD_INFO_TYPE_1G_COPPER_A:
	case CARD_INFO_TYPE_1G_COPPER_B:
		link_info->speed = SPEED_1000;
		link_info->port = PORT_TP;
		break;
	case CARD_INFO_TYPE_1G_FIBRE_A:
	case CARD_INFO_TYPE_1G_FIBRE_B:
		link_info->speed = SPEED_1000;
		link_info->port = PORT_FIBRE;
		break;
	case CARD_INFO_TYPE_10G_FIBRE_A:
	case CARD_INFO_TYPE_10G_FIBRE_B:
		link_info->speed = SPEED_10000;
		link_info->port = PORT_FIBRE;
		break;
	default:
		switch (card_info->port_speed) {
		case CARD_INFO_PORTS_10M:
			link_info->speed = SPEED_10;
			break;
		case CARD_INFO_PORTS_100M:
			link_info->speed = SPEED_100;
			break;
		case CARD_INFO_PORTS_1G:
			link_info->speed = SPEED_1000;
			break;
		case CARD_INFO_PORTS_10G:
			link_info->speed = SPEED_10000;
			break;
		case CARD_INFO_PORTS_25G:
			link_info->speed = SPEED_25000;
			break;
		default:
			link_info->speed = SPEED_UNKNOWN;
		}

		link_info->port = PORT_OTHER;
	}

E
Eugene Crosser 已提交
4831 4832 4833
	return 0;
}

4834
int qeth_query_card_info(struct qeth_card *card,
4835
			 struct qeth_link_info *link_info)
E
Eugene Crosser 已提交
4836 4837 4838 4839 4840 4841
{
	struct qeth_cmd_buffer *iob;

	QETH_CARD_TEXT(card, 2, "qcrdinfo");
	if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO))
		return -EOPNOTSUPP;
4842
	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 0);
4843 4844
	if (!iob)
		return -ENOMEM;
4845 4846

	return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, link_info);
E
Eugene Crosser 已提交
4847 4848
}

4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915
static int qeth_init_link_info_oat_cb(struct qeth_card *card,
				      struct qeth_reply *reply_priv,
				      unsigned long data)
{
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
	struct qeth_link_info *link_info = reply_priv->param;
	struct qeth_query_oat_physical_if *phys_if;
	struct qeth_query_oat_reply *reply;

	if (qeth_setadpparms_inspect_rc(cmd))
		return -EIO;

	/* Multi-part reply is unexpected, don't bother: */
	if (cmd->data.setadapterparms.hdr.used_total > 1)
		return -EINVAL;

	/* Expect the reply to start with phys_if data: */
	reply = &cmd->data.setadapterparms.data.query_oat.reply[0];
	if (reply->type != QETH_QOAT_REPLY_TYPE_PHYS_IF ||
	    reply->length < sizeof(*reply))
		return -EINVAL;

	phys_if = &reply->phys_if;

	switch (phys_if->speed_duplex) {
	case QETH_QOAT_PHYS_SPEED_10M_HALF:
		link_info->speed = SPEED_10;
		link_info->duplex = DUPLEX_HALF;
		break;
	case QETH_QOAT_PHYS_SPEED_10M_FULL:
		link_info->speed = SPEED_10;
		link_info->duplex = DUPLEX_FULL;
		break;
	case QETH_QOAT_PHYS_SPEED_100M_HALF:
		link_info->speed = SPEED_100;
		link_info->duplex = DUPLEX_HALF;
		break;
	case QETH_QOAT_PHYS_SPEED_100M_FULL:
		link_info->speed = SPEED_100;
		link_info->duplex = DUPLEX_FULL;
		break;
	case QETH_QOAT_PHYS_SPEED_1000M_HALF:
		link_info->speed = SPEED_1000;
		link_info->duplex = DUPLEX_HALF;
		break;
	case QETH_QOAT_PHYS_SPEED_1000M_FULL:
		link_info->speed = SPEED_1000;
		link_info->duplex = DUPLEX_FULL;
		break;
	case QETH_QOAT_PHYS_SPEED_10G_FULL:
		link_info->speed = SPEED_10000;
		link_info->duplex = DUPLEX_FULL;
		break;
	case QETH_QOAT_PHYS_SPEED_25G_FULL:
		link_info->speed = SPEED_25000;
		link_info->duplex = DUPLEX_FULL;
		break;
	case QETH_QOAT_PHYS_SPEED_UNKNOWN:
	default:
		link_info->speed = SPEED_UNKNOWN;
		link_info->duplex = DUPLEX_UNKNOWN;
		break;
	}

	switch (phys_if->media_type) {
	case QETH_QOAT_PHYS_MEDIA_COPPER:
		link_info->port = PORT_TP;
4916
		link_info->link_mode = QETH_LINK_MODE_UNKNOWN;
4917 4918
		break;
	case QETH_QOAT_PHYS_MEDIA_FIBRE_SHORT:
4919 4920 4921
		link_info->port = PORT_FIBRE;
		link_info->link_mode = QETH_LINK_MODE_FIBRE_SHORT;
		break;
4922 4923
	case QETH_QOAT_PHYS_MEDIA_FIBRE_LONG:
		link_info->port = PORT_FIBRE;
4924
		link_info->link_mode = QETH_LINK_MODE_FIBRE_LONG;
4925 4926 4927
		break;
	default:
		link_info->port = PORT_OTHER;
4928
		link_info->link_mode = QETH_LINK_MODE_UNKNOWN;
4929 4930 4931 4932 4933 4934
		break;
	}

	return 0;
}

4935 4936 4937 4938 4939 4940 4941
static void qeth_init_link_info(struct qeth_card *card)
{
	card->info.link_info.duplex = DUPLEX_FULL;

	if (IS_IQD(card) || IS_VM_NIC(card)) {
		card->info.link_info.speed = SPEED_10000;
		card->info.link_info.port = PORT_FIBRE;
4942
		card->info.link_info.link_mode = QETH_LINK_MODE_FIBRE_SHORT;
4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968
	} else {
		switch (card->info.link_type) {
		case QETH_LINK_TYPE_FAST_ETH:
		case QETH_LINK_TYPE_LANE_ETH100:
			card->info.link_info.speed = SPEED_100;
			card->info.link_info.port = PORT_TP;
			break;
		case QETH_LINK_TYPE_GBIT_ETH:
		case QETH_LINK_TYPE_LANE_ETH1000:
			card->info.link_info.speed = SPEED_1000;
			card->info.link_info.port = PORT_FIBRE;
			break;
		case QETH_LINK_TYPE_10GBIT_ETH:
			card->info.link_info.speed = SPEED_10000;
			card->info.link_info.port = PORT_FIBRE;
			break;
		case QETH_LINK_TYPE_25GBIT_ETH:
			card->info.link_info.speed = SPEED_25000;
			card->info.link_info.port = PORT_FIBRE;
			break;
		default:
			dev_info(&card->gdev->dev, "Unknown link type %x\n",
				 card->info.link_type);
			card->info.link_info.speed = SPEED_UNKNOWN;
			card->info.link_info.port = PORT_OTHER;
		}
4969 4970

		card->info.link_info.link_mode = QETH_LINK_MODE_UNKNOWN;
4971
	}
4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995

	/* Get more accurate data via QUERY OAT: */
	if (qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) {
		struct qeth_link_info link_info;
		struct qeth_cmd_buffer *iob;

		iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
					   SETADP_DATA_SIZEOF(query_oat));
		if (iob) {
			struct qeth_ipa_cmd *cmd = __ipa_cmd(iob);
			struct qeth_query_oat *oat_req;

			oat_req = &cmd->data.setadapterparms.data.query_oat;
			oat_req->subcmd_code = QETH_QOAT_SCOPE_INTERFACE;

			if (!qeth_send_ipa_cmd(card, iob,
					       qeth_init_link_info_oat_cb,
					       &link_info)) {
				if (link_info.speed != SPEED_UNKNOWN)
					card->info.link_info.speed = link_info.speed;
				if (link_info.duplex != DUPLEX_UNKNOWN)
					card->info.link_info.duplex = link_info.duplex;
				if (link_info.port != PORT_OTHER)
					card->info.link_info.port = link_info.port;
4996 4997
				if (link_info.link_mode != QETH_LINK_MODE_UNKNOWN)
					card->info.link_info.link_mode = link_info.link_mode;
4998 4999 5000
			}
		}
	}
5001 5002
}

5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016
/**
 * qeth_vm_request_mac() - Request a hypervisor-managed MAC address
 * @card: pointer to a qeth_card
 *
 * Returns
 *	0, if a MAC address has been set for the card's netdevice
 *	a return code, for various error conditions
 */
int qeth_vm_request_mac(struct qeth_card *card)
{
	struct diag26c_mac_resp *response;
	struct diag26c_mac_req *request;
	int rc;

5017
	QETH_CARD_TEXT(card, 2, "vmreqmac");
5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028

	request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
	response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
	if (!request || !response) {
		rc = -ENOMEM;
		goto out;
	}

	request->resp_buf_len = sizeof(*response);
	request->resp_version = DIAG26C_VERSION2;
	request->op_code = DIAG26C_GET_MAC;
5029
	request->devno = card->info.ddev_devno;
5030

5031
	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
5032
	rc = diag26c(request, response, DIAG26C_MAC_SERVICES);
5033
	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
5034 5035
	if (rc)
		goto out;
5036
	QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
5037 5038 5039 5040

	if (request->resp_buf_len < sizeof(*response) ||
	    response->version != request->resp_version) {
		rc = -EIO;
5041 5042 5043
		QETH_CARD_TEXT(card, 2, "badresp");
		QETH_CARD_HEX(card, 2, &request->resp_buf_len,
			      sizeof(request->resp_buf_len));
5044 5045
	} else if (!is_valid_ether_addr(response->mac)) {
		rc = -EINVAL;
5046 5047
		QETH_CARD_TEXT(card, 2, "badmac");
		QETH_CARD_HEX(card, 2, response->mac, ETH_ALEN);
5048
	} else {
5049
		eth_hw_addr_set(card->dev, response->mac);
5050 5051 5052 5053 5054 5055 5056 5057 5058
	}

out:
	kfree(response);
	kfree(request);
	return rc;
}
EXPORT_SYMBOL_GPL(qeth_vm_request_mac);

5059 5060
static void qeth_determine_capabilities(struct qeth_card *card)
{
5061 5062
	struct qeth_channel *channel = &card->data;
	struct ccw_device *ddev = channel->ccwdev;
5063 5064 5065
	int rc;
	int ddev_offline = 0;

5066
	QETH_CARD_TEXT(card, 2, "detcapab");
5067 5068
	if (!ddev->online) {
		ddev_offline = 1;
5069
		rc = qeth_start_channel(channel);
5070
		if (rc) {
5071
			QETH_CARD_TEXT_(card, 2, "3err%d", rc);
5072 5073 5074 5075
			goto out;
		}
	}

5076
	rc = qeth_read_conf_data(card);
5077
	if (rc) {
5078 5079
		QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
				 CARD_DEVID(card), rc);
5080
		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
5081 5082 5083 5084 5085
		goto out_offline;
	}

	rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
	if (rc)
5086
		QETH_CARD_TEXT_(card, 2, "6err%d", rc);
5087

5088 5089 5090 5091 5092
	QETH_CARD_TEXT_(card, 2, "qfmt%d", card->ssqd.qfmt);
	QETH_CARD_TEXT_(card, 2, "ac1:%02x", card->ssqd.qdioac1);
	QETH_CARD_TEXT_(card, 2, "ac2:%04x", card->ssqd.qdioac2);
	QETH_CARD_TEXT_(card, 2, "ac3:%04x", card->ssqd.qdioac3);
	QETH_CARD_TEXT_(card, 2, "icnt%d", card->ssqd.icnt);
5093 5094 5095 5096 5097 5098 5099 5100 5101
	if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) ||
	    ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) ||
	    ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) {
		dev_info(&card->gdev->dev,
			"Completion Queueing supported\n");
	} else {
		card->options.cq = QETH_CQ_NOTAVAILABLE;
	}

5102 5103
out_offline:
	if (ddev_offline == 1)
5104
		qeth_stop_channel(channel);
5105 5106 5107 5108
out:
	return;
}

5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135
static void qeth_read_ccw_conf_data(struct qeth_card *card)
{
	struct qeth_card_info *info = &card->info;
	struct ccw_device *cdev = CARD_DDEV(card);
	struct ccw_dev_id dev_id;

	QETH_CARD_TEXT(card, 2, "ccwconfd");
	ccw_device_get_id(cdev, &dev_id);

	info->ddev_devno = dev_id.devno;
	info->ids_valid = !ccw_device_get_cssid(cdev, &info->cssid) &&
			  !ccw_device_get_iid(cdev, &info->iid) &&
			  !ccw_device_get_chid(cdev, 0, &info->chid);
	info->ssid = dev_id.ssid;

	dev_info(&card->gdev->dev, "CHID: %x CHPID: %x\n",
		 info->chid, info->chpid);

	QETH_CARD_TEXT_(card, 3, "devn%x", info->ddev_devno);
	QETH_CARD_TEXT_(card, 3, "cssid:%x", info->cssid);
	QETH_CARD_TEXT_(card, 3, "iid:%x", info->iid);
	QETH_CARD_TEXT_(card, 3, "ssid:%x", info->ssid);
	QETH_CARD_TEXT_(card, 3, "chpid:%x", info->chpid);
	QETH_CARD_TEXT_(card, 3, "chid:%x", info->chid);
	QETH_CARD_TEXT_(card, 3, "idval%x", info->ids_valid);
}

F
Frank Blaschka 已提交
5136 5137
static int qeth_qdio_establish(struct qeth_card *card)
{
5138 5139
	struct qdio_buffer **out_sbal_ptrs[QETH_MAX_OUT_QUEUES];
	struct qdio_buffer **in_sbal_ptrs[QETH_MAX_IN_QUEUES];
5140
	struct qeth_qib_parms *qib_parms = NULL;
F
Frank Blaschka 已提交
5141
	struct qdio_initialize init_data;
5142
	unsigned int i;
F
Frank Blaschka 已提交
5143 5144
	int rc = 0;

5145
	QETH_CARD_TEXT(card, 2, "qdioest");
F
Frank Blaschka 已提交
5146

5147 5148 5149 5150
	if (!IS_IQD(card) && !IS_VM_NIC(card)) {
		qib_parms = kzalloc(sizeof_field(struct qib, parm), GFP_KERNEL);
		if (!qib_parms)
			return -ENOMEM;
F
Frank Blaschka 已提交
5151

5152 5153
		qeth_fill_qib_parms(card, qib_parms);
	}
F
Frank Blaschka 已提交
5154

5155 5156 5157
	in_sbal_ptrs[0] = card->qdio.in_q->qdio_bufs;
	if (card->options.cq == QETH_CQ_ENABLED)
		in_sbal_ptrs[1] = card->qdio.c_q->qdio_bufs;
5158

5159 5160
	for (i = 0; i < card->qdio.no_out_queues; i++)
		out_sbal_ptrs[i] = card->qdio.out_qs[i]->qdio_bufs;
F
Frank Blaschka 已提交
5161 5162

	memset(&init_data, 0, sizeof(struct qdio_initialize));
5163 5164
	init_data.q_format		 = IS_IQD(card) ? QDIO_IQDIO_QFMT :
							  QDIO_QETH_QFMT;
F
Frank Blaschka 已提交
5165
	init_data.qib_param_field_format = 0;
5166
	init_data.qib_param_field	 = (void *)qib_parms;
5167
	init_data.no_input_qs            = card->qdio.no_in_queues;
F
Frank Blaschka 已提交
5168
	init_data.no_output_qs           = card->qdio.no_out_queues;
5169 5170
	init_data.input_handler		 = qeth_qdio_input_handler;
	init_data.output_handler	 = qeth_qdio_output_handler;
5171
	init_data.irq_poll		 = qeth_qdio_poll;
F
Frank Blaschka 已提交
5172
	init_data.int_parm               = (unsigned long) card;
5173 5174
	init_data.input_sbal_addr_array  = in_sbal_ptrs;
	init_data.output_sbal_addr_array = out_sbal_ptrs;
F
Frank Blaschka 已提交
5175 5176 5177

	if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
		QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
5178 5179
		rc = qdio_allocate(CARD_DDEV(card), init_data.no_input_qs,
				   init_data.no_output_qs);
J
Jan Glauber 已提交
5180 5181 5182 5183
		if (rc) {
			atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
			goto out;
		}
5184
		rc = qdio_establish(CARD_DDEV(card), &init_data);
J
Jan Glauber 已提交
5185
		if (rc) {
F
Frank Blaschka 已提交
5186
			atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
J
Jan Glauber 已提交
5187 5188
			qdio_free(CARD_DDEV(card));
		}
F
Frank Blaschka 已提交
5189
	}
5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200

	switch (card->options.cq) {
	case QETH_CQ_ENABLED:
		dev_info(&card->gdev->dev, "Completion Queue support enabled");
		break;
	case QETH_CQ_DISABLED:
		dev_info(&card->gdev->dev, "Completion Queue support disabled");
		break;
	default:
		break;
	}
5201

J
Jan Glauber 已提交
5202
out:
5203
	kfree(qib_parms);
F
Frank Blaschka 已提交
5204 5205 5206 5207 5208
	return rc;
}

static void qeth_core_free_card(struct qeth_card *card)
{
5209
	QETH_CARD_TEXT(card, 2, "freecrd");
5210 5211 5212

	unregister_service_level(&card->qeth_service_level);
	debugfs_remove_recursive(card->debugfs);
5213
	qeth_put_cmd(card->read_cmd);
5214
	destroy_workqueue(card->event_wq);
5215
	dev_set_drvdata(&card->gdev->dev, NULL);
F
Frank Blaschka 已提交
5216 5217 5218
	kfree(card);
}

5219
static void qeth_trace_features(struct qeth_card *card)
5220 5221
{
	QETH_CARD_TEXT(card, 2, "features");
5222 5223 5224 5225 5226
	QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4));
	QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6));
	QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp));
	QETH_CARD_HEX(card, 2, &card->info.diagass_support,
		      sizeof(card->info.diagass_support));
5227 5228
}

F
Frank Blaschka 已提交
5229
static struct ccw_device_id qeth_ids[] = {
5230 5231 5232 5233 5234 5235
	{CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
					.driver_info = QETH_CARD_TYPE_OSD},
	{CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
					.driver_info = QETH_CARD_TYPE_IQD},
	{CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
					.driver_info = QETH_CARD_TYPE_OSM},
5236
#ifdef CONFIG_QETH_OSX
5237 5238
	{CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
					.driver_info = QETH_CARD_TYPE_OSX},
5239
#endif
F
Frank Blaschka 已提交
5240 5241 5242 5243 5244
	{},
};
MODULE_DEVICE_TABLE(ccw, qeth_ids);

static struct ccw_driver qeth_ccw_driver = {
5245
	.driver = {
S
Sebastian Ott 已提交
5246
		.owner = THIS_MODULE,
5247 5248
		.name = "qeth",
	},
F
Frank Blaschka 已提交
5249 5250 5251 5252 5253
	.ids = qeth_ids,
	.probe = ccwgroup_probe_ccwdev,
	.remove = ccwgroup_remove_ccwdev,
};

5254
static int qeth_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
F
Frank Blaschka 已提交
5255
{
5256
	int retries = 3;
F
Frank Blaschka 已提交
5257 5258
	int rc;

5259
	QETH_CARD_TEXT(card, 2, "hrdsetup");
F
Frank Blaschka 已提交
5260
	atomic_set(&card->force_alloc_skb, 0);
5261 5262 5263
	rc = qeth_update_from_chp_desc(card);
	if (rc)
		return rc;
F
Frank Blaschka 已提交
5264
retry:
5265
	if (retries < 3)
5266 5267
		QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
				 CARD_DEVID(card));
5268
	rc = qeth_qdio_clear_card(card, !IS_IQD(card));
5269 5270 5271
	qeth_stop_channel(&card->data);
	qeth_stop_channel(&card->write);
	qeth_stop_channel(&card->read);
5272
	qdio_free(CARD_DDEV(card));
5273 5274

	rc = qeth_start_channel(&card->read);
5275 5276
	if (rc)
		goto retriable;
5277
	rc = qeth_start_channel(&card->write);
5278 5279
	if (rc)
		goto retriable;
5280
	rc = qeth_start_channel(&card->data);
5281 5282 5283
	if (rc)
		goto retriable;
retriable:
F
Frank Blaschka 已提交
5284
	if (rc == -ERESTARTSYS) {
5285
		QETH_CARD_TEXT(card, 2, "break1");
F
Frank Blaschka 已提交
5286 5287
		return rc;
	} else if (rc) {
5288
		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
5289
		if (--retries < 0)
F
Frank Blaschka 已提交
5290 5291 5292 5293
			goto out;
		else
			goto retry;
	}
5294

5295
	qeth_determine_capabilities(card);
5296
	qeth_read_ccw_conf_data(card);
5297
	qeth_idx_init(card);
5298 5299 5300

	rc = qeth_idx_activate_read_channel(card);
	if (rc == -EINTR) {
5301
		QETH_CARD_TEXT(card, 2, "break2");
F
Frank Blaschka 已提交
5302 5303
		return rc;
	} else if (rc) {
5304
		QETH_CARD_TEXT_(card, 2, "3err%d", rc);
F
Frank Blaschka 已提交
5305 5306 5307 5308 5309
		if (--retries < 0)
			goto out;
		else
			goto retry;
	}
5310 5311 5312

	rc = qeth_idx_activate_write_channel(card);
	if (rc == -EINTR) {
5313
		QETH_CARD_TEXT(card, 2, "break3");
F
Frank Blaschka 已提交
5314 5315
		return rc;
	} else if (rc) {
5316
		QETH_CARD_TEXT_(card, 2, "4err%d", rc);
F
Frank Blaschka 已提交
5317 5318 5319 5320 5321
		if (--retries < 0)
			goto out;
		else
			goto retry;
	}
5322
	card->read_or_write_problem = 0;
F
Frank Blaschka 已提交
5323 5324
	rc = qeth_mpc_initialize(card);
	if (rc) {
5325
		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
F
Frank Blaschka 已提交
5326 5327
		goto out;
	}
5328

5329 5330
	rc = qeth_send_startlan(card);
	if (rc) {
5331
		QETH_CARD_TEXT_(card, 2, "6err%d", rc);
5332 5333
		if (rc == -ENETDOWN) {
			dev_warn(&card->gdev->dev, "The LAN is offline\n");
J
Julian Wiedmann 已提交
5334
			*carrier_ok = false;
5335 5336 5337
		} else {
			goto out;
		}
5338
	} else {
J
Julian Wiedmann 已提交
5339 5340 5341
		*carrier_ok = true;
	}

5342 5343 5344
	card->options.ipa4.supported = 0;
	card->options.ipa6.supported = 0;
	card->options.adp.supported = 0;
5345
	card->options.sbp.supported_funcs = 0;
5346
	card->info.diagass_support = 0;
5347 5348 5349
	rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
	if (rc == -ENOMEM)
		goto out;
5350 5351 5352 5353 5354
	if (qeth_is_supported(card, IPA_IPV6)) {
		rc = qeth_query_ipassists(card, QETH_PROT_IPV6);
		if (rc == -ENOMEM)
			goto out;
	}
5355 5356 5357
	if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
		rc = qeth_query_setadapterparms(card);
		if (rc < 0) {
5358
			QETH_CARD_TEXT_(card, 2, "7err%d", rc);
5359 5360 5361 5362 5363
			goto out;
		}
	}
	if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
		rc = qeth_query_setdiagass(card);
5364
		if (rc)
5365
			QETH_CARD_TEXT_(card, 2, "8err%d", rc);
5366
	}
5367

5368 5369
	qeth_trace_features(card);

5370 5371 5372 5373
	if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) ||
	    (card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM)))
		card->info.hwtrap = 0;

5374
	if (card->options.isolation != ISOLATION_MODE_NONE) {
5375 5376
		rc = qeth_setadpparms_set_access_ctrl(card,
						      card->options.isolation);
5377 5378 5379
		if (rc)
			goto out;
	}
5380

5381 5382
	qeth_init_link_info(card);

5383 5384 5385 5386 5387 5388
	rc = qeth_init_qdio_queues(card);
	if (rc) {
		QETH_CARD_TEXT_(card, 2, "9err%d", rc);
		goto out;
	}

F
Frank Blaschka 已提交
5389 5390
	return 0;
out:
5391 5392
	dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
		"an error on the device\n");
5393 5394
	QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n",
			 CARD_DEVID(card), rc);
F
Frank Blaschka 已提交
5395 5396 5397
	return rc;
}

5398 5399
static int qeth_set_online(struct qeth_card *card,
			   const struct qeth_discipline *disc)
5400
{
5401
	bool carrier_ok;
5402 5403 5404 5405 5406
	int rc;

	mutex_lock(&card->conf_mutex);
	QETH_CARD_TEXT(card, 2, "setonlin");

5407 5408 5409 5410 5411 5412 5413 5414 5415
	rc = qeth_hardsetup_card(card, &carrier_ok);
	if (rc) {
		QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
		rc = -ENODEV;
		goto err_hardsetup;
	}

	qeth_print_status_message(card);

5416
	if (card->dev->reg_state != NETREG_REGISTERED)
5417 5418 5419
		/* no need for locking / error handling at this early stage: */
		qeth_set_real_num_tx_queues(card, qeth_tx_actual_queues(card));

5420
	rc = disc->set_online(card, carrier_ok);
5421 5422 5423 5424 5425
	if (rc)
		goto err_online;

	/* let user_space know that device is online */
	kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
5426 5427

	mutex_unlock(&card->conf_mutex);
5428
	return 0;
5429

5430 5431
err_online:
err_hardsetup:
5432 5433 5434 5435
	qeth_qdio_clear_card(card, 0);
	qeth_clear_working_pool_list(card);
	qeth_flush_local_addrs(card);

5436 5437 5438 5439 5440 5441
	qeth_stop_channel(&card->data);
	qeth_stop_channel(&card->write);
	qeth_stop_channel(&card->read);
	qdio_free(CARD_DDEV(card));

	mutex_unlock(&card->conf_mutex);
5442 5443 5444
	return rc;
}

5445 5446
int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
		     bool resetting)
5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457
{
	int rc, rc2, rc3;

	mutex_lock(&card->conf_mutex);
	QETH_CARD_TEXT(card, 3, "setoffl");

	if ((!resetting && card->info.hwtrap) || card->info.hwtrap == 2) {
		qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
		card->info.hwtrap = 1;
	}

5458 5459 5460
	/* cancel any stalled cmd that might block the rtnl: */
	qeth_clear_ipacmd_list(card);

5461 5462 5463 5464 5465 5466 5467
	rtnl_lock();
	card->info.open_when_online = card->dev->flags & IFF_UP;
	dev_close(card->dev);
	netif_device_detach(card->dev);
	netif_carrier_off(card->dev);
	rtnl_unlock();

5468 5469
	cancel_work_sync(&card->rx_mode_work);

5470
	disc->set_offline(card);
5471

5472 5473 5474 5475 5476 5477
	qeth_qdio_clear_card(card, 0);
	qeth_drain_output_queues(card);
	qeth_clear_working_pool_list(card);
	qeth_flush_local_addrs(card);
	card->info.promisc_mode = 0;

5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496
	rc  = qeth_stop_channel(&card->data);
	rc2 = qeth_stop_channel(&card->write);
	rc3 = qeth_stop_channel(&card->read);
	if (!rc)
		rc = (rc2) ? rc2 : rc3;
	if (rc)
		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
	qdio_free(CARD_DDEV(card));

	/* let user_space know that device is offline */
	kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);

	mutex_unlock(&card->conf_mutex);
	return 0;
}
EXPORT_SYMBOL_GPL(qeth_set_offline);

static int qeth_do_reset(void *data)
{
5497
	const struct qeth_discipline *disc;
5498 5499 5500
	struct qeth_card *card = data;
	int rc;

5501 5502 5503
	/* Lock-free, other users will block until we are done. */
	disc = card->discipline;

5504 5505 5506 5507 5508 5509 5510
	QETH_CARD_TEXT(card, 2, "recover1");
	if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
		return 0;
	QETH_CARD_TEXT(card, 2, "recover2");
	dev_warn(&card->gdev->dev,
		 "A recovery process has been started for the device\n");

5511 5512
	qeth_set_offline(card, disc, true);
	rc = qeth_set_online(card, disc);
5513 5514 5515 5516
	if (!rc) {
		dev_info(&card->gdev->dev,
			 "Device successfully recovered!\n");
	} else {
5517 5518
		qeth_set_offline(card, disc, true);
		ccwgroup_set_offline(card->gdev, false);
5519 5520 5521 5522 5523 5524 5525 5526
		dev_warn(&card->gdev->dev,
			 "The qeth device driver failed to recover an error on the device\n");
	}
	qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
	qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
	return 0;
}

J
Julian Wiedmann 已提交
5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586
#if IS_ENABLED(CONFIG_QETH_L3)
static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
				struct qeth_hdr *hdr)
{
	struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data;
	struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
	struct net_device *dev = skb->dev;

	if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) {
		dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr,
				"FAKELL", skb->len);
		return;
	}

	if (!(l3_hdr->flags & QETH_HDR_PASSTHRU)) {
		u16 prot = (l3_hdr->flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
							     ETH_P_IP;
		unsigned char tg_addr[ETH_ALEN];

		skb_reset_network_header(skb);
		switch (l3_hdr->flags & QETH_HDR_CAST_MASK) {
		case QETH_CAST_MULTICAST:
			if (prot == ETH_P_IP)
				ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr);
			else
				ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr);
			QETH_CARD_STAT_INC(card, rx_multicast);
			break;
		case QETH_CAST_BROADCAST:
			ether_addr_copy(tg_addr, dev->broadcast);
			QETH_CARD_STAT_INC(card, rx_multicast);
			break;
		default:
			if (card->options.sniffer)
				skb->pkt_type = PACKET_OTHERHOST;
			ether_addr_copy(tg_addr, dev->dev_addr);
		}

		if (l3_hdr->ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
			dev_hard_header(skb, dev, prot, tg_addr,
					&l3_hdr->next_hop.rx.src_mac, skb->len);
		else
			dev_hard_header(skb, dev, prot, tg_addr, "FAKELL",
					skb->len);
	}

	/* copy VLAN tag from hdr into skb */
	if (!card->options.sniffer &&
	    (l3_hdr->ext_flags & (QETH_HDR_EXT_VLAN_FRAME |
				  QETH_HDR_EXT_INCLUDE_VLAN_TAG))) {
		u16 tag = (l3_hdr->ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
				l3_hdr->vlan_id :
				l3_hdr->next_hop.rx.vlan_id;

		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
	}
}
#endif

static void qeth_receive_skb(struct qeth_card *card, struct sk_buff *skb,
5587
			     struct qeth_hdr *hdr, bool uses_frags)
J
Julian Wiedmann 已提交
5588
{
5589
	struct napi_struct *napi = &card->napi;
J
Julian Wiedmann 已提交
5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603
	bool is_cso;

	switch (hdr->hdr.l2.id) {
#if IS_ENABLED(CONFIG_QETH_L3)
	case QETH_HEADER_TYPE_LAYER3:
		qeth_l3_rebuild_skb(card, skb, hdr);
		is_cso = hdr->hdr.l3.ext_flags & QETH_HDR_EXT_CSUM_TRANSP_REQ;
		break;
#endif
	case QETH_HEADER_TYPE_LAYER2:
		is_cso = hdr->hdr.l2.flags[1] & QETH_HDR_EXT_CSUM_TRANSP_REQ;
		break;
	default:
		/* never happens */
5604 5605 5606 5607
		if (uses_frags)
			napi_free_frags(napi);
		else
			dev_kfree_skb_any(skb);
J
Julian Wiedmann 已提交
5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625
		return;
	}

	if (is_cso && (card->dev->features & NETIF_F_RXCSUM)) {
		skb->ip_summed = CHECKSUM_UNNECESSARY;
		QETH_CARD_STAT_INC(card, rx_skb_csum);
	} else {
		skb->ip_summed = CHECKSUM_NONE;
	}

	QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
	QETH_CARD_STAT_INC(card, rx_packets);
	if (skb_is_nonlinear(skb)) {
		QETH_CARD_STAT_INC(card, rx_sg_skbs);
		QETH_CARD_STAT_ADD(card, rx_sg_frags,
				   skb_shinfo(skb)->nr_frags);
	}

5626 5627 5628 5629 5630 5631
	if (uses_frags) {
		napi_gro_frags(napi);
	} else {
		skb->protocol = eth_type_trans(skb, skb->dev);
		napi_gro_receive(napi, skb);
	}
J
Julian Wiedmann 已提交
5632 5633
}

5634
static void qeth_create_skb_frag(struct sk_buff *skb, char *data, int data_len)
F
Frank Blaschka 已提交
5635
{
5636
	struct page *page = virt_to_page(data);
5637
	unsigned int next_frag;
5638

5639
	next_frag = skb_shinfo(skb)->nr_frags;
5640
	get_page(page);
5641 5642
	skb_add_rx_frag(skb, next_frag, page, offset_in_page(data), data_len,
			data_len);
F
Frank Blaschka 已提交
5643 5644
}

5645 5646 5647 5648 5649
static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
{
	return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
}

J
Julian Wiedmann 已提交
5650
static int qeth_extract_skb(struct qeth_card *card,
5651
			    struct qeth_qdio_buffer *qethbuffer, u8 *element_no,
J
Julian Wiedmann 已提交
5652
			    int *__offset)
F
Frank Blaschka 已提交
5653
{
5654
	struct qeth_priv *priv = netdev_priv(card->dev);
5655
	struct qdio_buffer *buffer = qethbuffer->buffer;
5656
	struct napi_struct *napi = &card->napi;
5657
	struct qdio_buffer_element *element;
5658
	unsigned int linear_len = 0;
5659
	bool uses_frags = false;
F
Frank Blaschka 已提交
5660
	int offset = *__offset;
5661
	bool use_rx_sg = false;
5662
	unsigned int headroom;
J
Julian Wiedmann 已提交
5663
	struct qeth_hdr *hdr;
5664
	struct sk_buff *skb;
5665
	int skb_len = 0;
F
Frank Blaschka 已提交
5666

5667 5668
	element = &buffer->element[*element_no];

5669
next_packet:
F
Frank Blaschka 已提交
5670
	/* qeth_hdr must not cross element boundaries */
5671
	while (element->length < offset + sizeof(struct qeth_hdr)) {
F
Frank Blaschka 已提交
5672
		if (qeth_is_last_sbale(element))
J
Julian Wiedmann 已提交
5673
			return -ENODATA;
F
Frank Blaschka 已提交
5674 5675 5676 5677
		element++;
		offset = 0;
	}

5678
	hdr = phys_to_virt(element->addr) + offset;
J
Julian Wiedmann 已提交
5679
	offset += sizeof(*hdr);
5680 5681
	skb = NULL;

J
Julian Wiedmann 已提交
5682
	switch (hdr->hdr.l2.id) {
5683
	case QETH_HEADER_TYPE_LAYER2:
J
Julian Wiedmann 已提交
5684
		skb_len = hdr->hdr.l2.pkt_length;
5685
		linear_len = ETH_HLEN;
5686
		headroom = 0;
5687 5688
		break;
	case QETH_HEADER_TYPE_LAYER3:
J
Julian Wiedmann 已提交
5689
		skb_len = hdr->hdr.l3.length;
5690 5691 5692 5693 5694
		if (!IS_LAYER3(card)) {
			QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
			goto walk_packet;
		}

J
Julian Wiedmann 已提交
5695
		if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) {
5696 5697 5698 5699 5700
			linear_len = ETH_HLEN;
			headroom = 0;
			break;
		}

J
Julian Wiedmann 已提交
5701
		if (hdr->hdr.l3.flags & QETH_HDR_IPV6)
5702 5703 5704
			linear_len = sizeof(struct ipv6hdr);
		else
			linear_len = sizeof(struct iphdr);
5705
		headroom = ETH_HLEN;
5706 5707
		break;
	default:
J
Julian Wiedmann 已提交
5708
		if (hdr->hdr.l2.id & QETH_HEADER_MASK_INVAL)
5709 5710 5711 5712
			QETH_CARD_STAT_INC(card, rx_frame_errors);
		else
			QETH_CARD_STAT_INC(card, rx_dropped_notsupp);

5713
		/* Can't determine packet length, drop the whole buffer. */
J
Julian Wiedmann 已提交
5714
		return -EPROTONOSUPPORT;
F
Frank Blaschka 已提交
5715 5716
	}

5717 5718 5719 5720
	if (skb_len < linear_len) {
		QETH_CARD_STAT_INC(card, rx_dropped_runt);
		goto walk_packet;
	}
F
Frank Blaschka 已提交
5721

5722
	use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) ||
5723
		    (skb_len > READ_ONCE(priv->rx_copybreak) &&
J
Julian Wiedmann 已提交
5724
		     !atomic_read(&card->force_alloc_skb));
5725

5726
	if (use_rx_sg) {
5727
		/* QETH_CQ_ENABLED only: */
5728 5729
		if (qethbuffer->rx_skb &&
		    skb_tailroom(qethbuffer->rx_skb) >= linear_len + headroom) {
5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750
			skb = qethbuffer->rx_skb;
			qethbuffer->rx_skb = NULL;
			goto use_skb;
		}

		skb = napi_get_frags(napi);
		if (!skb) {
			/* -ENOMEM, no point in falling back further. */
			QETH_CARD_STAT_INC(card, rx_dropped_nomem);
			goto walk_packet;
		}

		if (skb_tailroom(skb) >= linear_len + headroom) {
			uses_frags = true;
			goto use_skb;
		}

		netdev_info_once(card->dev,
				 "Insufficient linear space in NAPI frags skb, need %u but have %u\n",
				 linear_len + headroom, skb_tailroom(skb));
		/* Shouldn't happen. Don't optimize, fall back to linear skb. */
F
Frank Blaschka 已提交
5751
	}
5752

5753 5754 5755
	linear_len = skb_len;
	skb = napi_alloc_skb(napi, linear_len + headroom);
	if (!skb) {
5756
		QETH_CARD_STAT_INC(card, rx_dropped_nomem);
5757 5758
		goto walk_packet;
	}
F
Frank Blaschka 已提交
5759

5760 5761 5762
use_skb:
	if (headroom)
		skb_reserve(skb, headroom);
5763
walk_packet:
F
Frank Blaschka 已提交
5764
	while (skb_len) {
5765
		int data_len = min(skb_len, (int)(element->length - offset));
5766
		char *data = phys_to_virt(element->addr) + offset;
5767 5768 5769

		skb_len -= data_len;
		offset += data_len;
5770

5771
		/* Extract data from current element: */
5772
		if (skb && data_len) {
5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786
			if (linear_len) {
				unsigned int copy_len;

				copy_len = min_t(unsigned int, linear_len,
						 data_len);

				skb_put_data(skb, data, copy_len);
				linear_len -= copy_len;
				data_len -= copy_len;
				data += copy_len;
			}

			if (data_len)
				qeth_create_skb_frag(skb, data, data_len);
F
Frank Blaschka 已提交
5787
		}
5788 5789

		/* Step forward to next element: */
F
Frank Blaschka 已提交
5790 5791
		if (skb_len) {
			if (qeth_is_last_sbale(element)) {
C
Carsten Otte 已提交
5792
				QETH_CARD_TEXT(card, 4, "unexeob");
C
Carsten Otte 已提交
5793
				QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
5794
				if (skb) {
5795 5796 5797 5798
					if (uses_frags)
						napi_free_frags(napi);
					else
						dev_kfree_skb_any(skb);
5799 5800 5801
					QETH_CARD_STAT_INC(card,
							   rx_length_errors);
				}
J
Julian Wiedmann 已提交
5802
				return -EMSGSIZE;
F
Frank Blaschka 已提交
5803 5804 5805 5806 5807
			}
			element++;
			offset = 0;
		}
	}
5808 5809 5810 5811 5812

	/* This packet was skipped, go get another one: */
	if (!skb)
		goto next_packet;

5813
	*element_no = element - &buffer->element[0];
F
Frank Blaschka 已提交
5814
	*__offset = offset;
J
Julian Wiedmann 已提交
5815

5816
	qeth_receive_skb(card, skb, hdr, uses_frags);
J
Julian Wiedmann 已提交
5817 5818 5819
	return 0;
}

5820 5821
static unsigned int qeth_extract_skbs(struct qeth_card *card, int budget,
				      struct qeth_qdio_buffer *buf, bool *done)
J
Julian Wiedmann 已提交
5822
{
5823
	unsigned int work_done = 0;
J
Julian Wiedmann 已提交
5824 5825

	while (budget) {
5826
		if (qeth_extract_skb(card, buf, &card->rx.buf_element,
J
Julian Wiedmann 已提交
5827 5828 5829 5830 5831 5832 5833
				     &card->rx.e_offset)) {
			*done = true;
			break;
		}

		work_done++;
		budget--;
F
Frank Blaschka 已提交
5834
	}
J
Julian Wiedmann 已提交
5835 5836

	return work_done;
F
Frank Blaschka 已提交
5837 5838
}

5839
static unsigned int qeth_rx_poll(struct qeth_card *card, int budget)
5840
{
5841
	struct qeth_rx *ctx = &card->rx;
5842
	unsigned int work_done = 0;
5843

5844
	while (budget > 0) {
5845 5846 5847 5848
		struct qeth_qdio_buffer *buffer;
		unsigned int skbs_done = 0;
		bool done = false;

5849
		/* Fetch completed RX buffers: */
5850 5851
		if (!card->rx.b_count) {
			card->rx.qdio_err = 0;
5852 5853 5854 5855
			card->rx.b_count = qdio_inspect_queue(CARD_DDEV(card),
							      0, true,
							      &card->rx.b_index,
							      &card->rx.qdio_err);
5856 5857 5858 5859 5860 5861
			if (card->rx.b_count <= 0) {
				card->rx.b_count = 0;
				break;
			}
		}

5862
		/* Process one completed RX buffer: */
5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877
		buffer = &card->qdio.in_q->bufs[card->rx.b_index];
		if (!(card->rx.qdio_err &&
		      qeth_check_qdio_errors(card, buffer->buffer,
					     card->rx.qdio_err, "qinerr")))
			skbs_done = qeth_extract_skbs(card, budget, buffer,
						      &done);
		else
			done = true;

		work_done += skbs_done;
		budget -= skbs_done;

		if (done) {
			QETH_CARD_STAT_INC(card, rx_bufs);
			qeth_put_buffer_pool_entry(card, buffer->pool_entry);
5878
			buffer->pool_entry = NULL;
5879
			card->rx.b_count--;
5880 5881 5882
			ctx->bufs_refill++;
			ctx->bufs_refill -= qeth_rx_refill_queue(card,
								 ctx->bufs_refill);
5883 5884 5885 5886 5887

			/* Step forward to next buffer: */
			card->rx.b_index = QDIO_BUFNR(card->rx.b_index + 1);
			card->rx.buf_element = 0;
			card->rx.e_offset = 0;
5888 5889 5890
		}
	}

5891 5892 5893
	return work_done;
}

5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911
static void qeth_cq_poll(struct qeth_card *card)
{
	unsigned int work_done = 0;

	while (work_done < QDIO_MAX_BUFFERS_PER_Q) {
		unsigned int start, error;
		int completed;

		completed = qdio_inspect_queue(CARD_DDEV(card), 1, true, &start,
					       &error);
		if (completed <= 0)
			return;

		qeth_qdio_cq_handler(card, error, 1, start, completed);
		work_done += completed;
	}
}

5912 5913 5914 5915 5916 5917 5918
int qeth_poll(struct napi_struct *napi, int budget)
{
	struct qeth_card *card = container_of(napi, struct qeth_card, napi);
	unsigned int work_done;

	work_done = qeth_rx_poll(card, budget);

5919 5920 5921 5922 5923 5924 5925 5926 5927 5928
	if (qeth_use_tx_irqs(card)) {
		struct qeth_qdio_out_q *queue;
		unsigned int i;

		qeth_for_each_output_queue(card, queue, i) {
			if (!qeth_out_queue_is_empty(queue))
				napi_schedule(&queue->napi);
		}
	}

5929 5930 5931
	if (card->options.cq == QETH_CQ_ENABLED)
		qeth_cq_poll(card);

5932 5933 5934 5935 5936 5937 5938 5939 5940 5941
	if (budget) {
		struct qeth_rx *ctx = &card->rx;

		/* Process any substantial refill backlog: */
		ctx->bufs_refill -= qeth_rx_refill_queue(card, ctx->bufs_refill);

		/* Exhausted the RX budget. Keep IRQ disabled, we get called again. */
		if (work_done >= budget)
			return work_done;
	}
5942

5943
	if (napi_complete_done(napi, work_done) &&
5944
	    qdio_start_irq(CARD_DDEV(card)))
5945
		napi_schedule(napi);
5946

5947 5948 5949 5950
	return work_done;
}
EXPORT_SYMBOL_GPL(qeth_poll);

5951
static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
5952 5953
				 unsigned int bidx, unsigned int qdio_error,
				 int budget)
5954 5955 5956 5957
{
	struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx];
	u8 sflags = buffer->buffer->element[15].sflags;
	struct qeth_card *card = queue->card;
5958
	bool error = !!qdio_error;
5959

5960
	if (qdio_error == QDIO_ERROR_SLSB_PENDING) {
5961
		struct qaob *aob = buffer->aob;
5962
		struct qeth_qaob_priv1 *priv;
5963
		enum iucv_tx_notify notify;
5964 5965 5966 5967 5968 5969 5970 5971

		if (!aob) {
			netdev_WARN_ONCE(card->dev,
					 "Pending TX buffer %#x without QAOB on TX queue %u\n",
					 bidx, queue->queue_no);
			qeth_schedule_recovery(card);
			return;
		}
5972

5973 5974
		QETH_CARD_TEXT_(card, 5, "pel%u", bidx);

5975 5976 5977
		priv = (struct qeth_qaob_priv1 *)&aob->user1;
		/* QAOB hasn't completed yet: */
		if (xchg(&priv->state, QETH_QAOB_PENDING) != QETH_QAOB_DONE) {
5978 5979
			qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING);

5980 5981 5982 5983 5984
			/* Prepare the queue slot for immediate re-use: */
			qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements);
			if (qeth_alloc_out_buf(queue, bidx, GFP_ATOMIC)) {
				QETH_CARD_TEXT(card, 2, "outofbuf");
				qeth_schedule_recovery(card);
5985
			}
5986

5987 5988 5989
			list_add(&buffer->list_entry, &queue->pending_bufs);
			/* Skip clearing the buffer: */
			return;
5990
		}
5991

5992 5993 5994 5995
		/* QAOB already completed: */
		notify = qeth_compute_cq_notification(aob->aorc, 0);
		qeth_notify_skbs(queue, buffer, notify);
		error = !!aob->aorc;
5996
		memset(aob, 0, sizeof(*aob));
5997
	} else if (card->options.cq == QETH_CQ_ENABLED) {
5998 5999
		qeth_notify_skbs(queue, buffer,
				 qeth_compute_cq_notification(sflags, 0));
6000 6001
	}

6002
	qeth_clear_output_buffer(queue, buffer, error, budget);
6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013
}

static int qeth_tx_poll(struct napi_struct *napi, int budget)
{
	struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi);
	unsigned int queue_no = queue->queue_no;
	struct qeth_card *card = queue->card;
	struct net_device *dev = card->dev;
	unsigned int work_done = 0;
	struct netdev_queue *txq;

6014 6015 6016 6017
	if (IS_IQD(card))
		txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no));
	else
		txq = netdev_get_tx_queue(dev, queue_no);
6018 6019 6020

	while (1) {
		unsigned int start, error, i;
6021 6022
		unsigned int packets = 0;
		unsigned int bytes = 0;
6023 6024
		int completed;

6025
		qeth_tx_complete_pending_bufs(card, queue, false, budget);
6026

6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043
		if (qeth_out_queue_is_empty(queue)) {
			napi_complete(napi);
			return 0;
		}

		/* Give the CPU a breather: */
		if (work_done >= QDIO_MAX_BUFFERS_PER_Q) {
			QETH_TXQ_STAT_INC(queue, completion_yield);
			if (napi_complete_done(napi, 0))
				napi_schedule(napi);
			return 0;
		}

		completed = qdio_inspect_queue(CARD_DDEV(card), queue_no, false,
					       &start, &error);
		if (completed <= 0) {
			/* Ensure we see TX completion for pending work: */
6044 6045 6046
			if (napi_complete_done(napi, 0) &&
			    !atomic_read(&queue->set_pci_flags_count))
				qeth_tx_arm_timer(queue, queue->rescan_usecs);
6047 6048 6049 6050
			return 0;
		}

		for (i = start; i < start + completed; i++) {
6051
			struct qeth_qdio_out_buffer *buffer;
6052 6053
			unsigned int bidx = QDIO_BUFNR(i);

6054
			buffer = queue->bufs[bidx];
6055
			packets += buffer->frames;
6056 6057 6058
			bytes += buffer->bytes;

			qeth_handle_send_error(card, buffer, error);
6059 6060 6061 6062 6063
			if (IS_IQD(card))
				qeth_iqd_tx_complete(queue, bidx, error, budget);
			else
				qeth_clear_output_buffer(queue, buffer, error,
							 budget);
6064 6065 6066 6067
		}

		atomic_sub(completed, &queue->used_buffers);
		work_done += completed;
6068 6069 6070 6071
		if (IS_IQD(card))
			netdev_tx_completed_queue(txq, packets, bytes);
		else
			qeth_check_outbound_queue(queue);
6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083

		/* xmit may have observed the full-condition, but not yet
		 * stopped the txq. In which case the code below won't trigger.
		 * So before returning, xmit will re-check the txq's fill level
		 * and wake it up if needed.
		 */
		if (netif_tx_queue_stopped(txq) &&
		    !qeth_out_queue_is_full(queue))
			netif_tx_wake_queue(txq);
	}
}

6084 6085 6086 6087 6088 6089 6090
static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
{
	if (!cmd->hdr.return_code)
		cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
	return cmd->hdr.return_code;
}

6091 6092 6093 6094 6095 6096 6097 6098
static int qeth_setassparms_get_caps_cb(struct qeth_card *card,
					struct qeth_reply *reply,
					unsigned long data)
{
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
	struct qeth_ipa_caps *caps = reply->param;

	if (qeth_setassparms_inspect_rc(cmd))
6099
		return -EIO;
6100 6101 6102 6103 6104 6105

	caps->supported = cmd->data.setassparms.data.caps.supported;
	caps->enabled = cmd->data.setassparms.data.caps.enabled;
	return 0;
}

6106 6107
int qeth_setassparms_cb(struct qeth_card *card,
			struct qeth_reply *reply, unsigned long data)
6108
{
6109
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6110 6111 6112

	QETH_CARD_TEXT(card, 4, "defadpcb");

6113 6114 6115 6116 6117
	if (cmd->hdr.return_code)
		return -EIO;

	cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
	if (cmd->hdr.prot_version == QETH_PROT_IPV4)
6118
		card->options.ipa4.enabled = cmd->hdr.assists.enabled;
6119
	if (cmd->hdr.prot_version == QETH_PROT_IPV6)
6120
		card->options.ipa6.enabled = cmd->hdr.assists.enabled;
6121 6122
	return 0;
}
6123
EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
6124

6125 6126
struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
						 enum qeth_ipa_funcs ipa_func,
6127 6128
						 u16 cmd_code,
						 unsigned int data_length,
6129
						 enum qeth_prot_versions prot)
6130
{
6131 6132
	struct qeth_ipacmd_setassparms *setassparms;
	struct qeth_ipacmd_setassparms_hdr *hdr;
6133 6134 6135
	struct qeth_cmd_buffer *iob;

	QETH_CARD_TEXT(card, 4, "getasscm");
6136 6137 6138 6139 6140 6141
	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETASSPARMS, prot,
				 data_length +
				 offsetof(struct qeth_ipacmd_setassparms,
					  data));
	if (!iob)
		return NULL;
6142

6143 6144
	setassparms = &__ipa_cmd(iob)->data.setassparms;
	setassparms->assist_no = ipa_func;
6145

6146 6147 6148
	hdr = &setassparms->hdr;
	hdr->length = sizeof(*hdr) + data_length;
	hdr->command_code = cmd_code;
6149 6150
	return iob;
}
6151
EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
6152

6153 6154
int qeth_send_simple_setassparms_prot(struct qeth_card *card,
				      enum qeth_ipa_funcs ipa_func,
6155
				      u16 cmd_code, u32 *data,
6156
				      enum qeth_prot_versions prot)
6157
{
6158
	unsigned int length = data ? SETASS_DATA_SIZEOF(flags_32bit) : 0;
6159 6160
	struct qeth_cmd_buffer *iob;

6161 6162
	QETH_CARD_TEXT_(card, 4, "simassp%i", prot);
	iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot);
6163 6164
	if (!iob)
		return -ENOMEM;
6165

6166 6167
	if (data)
		__ipa_cmd(iob)->data.setassparms.data.flags_32bit = *data;
6168
	return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
6169
}
6170
EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot);
6171

F
Frank Blaschka 已提交
6172 6173
static void qeth_unregister_dbf_views(void)
{
6174
	int x;
6175

6176 6177 6178 6179
	for (x = 0; x < QETH_DBF_INFOS; x++) {
		debug_unregister(qeth_dbf[x].id);
		qeth_dbf[x].id = NULL;
	}
F
Frank Blaschka 已提交
6180 6181
}

C
Carsten Otte 已提交
6182
void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
P
Peter Tiedemann 已提交
6183 6184
{
	char dbf_txt_buf[32];
6185
	va_list args;
P
Peter Tiedemann 已提交
6186

6187
	if (!debug_level_enabled(id, level))
P
Peter Tiedemann 已提交
6188
		return;
6189 6190 6191
	va_start(args, fmt);
	vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
	va_end(args);
C
Carsten Otte 已提交
6192
	debug_text_event(id, level, dbf_txt_buf);
P
Peter Tiedemann 已提交
6193 6194 6195
}
EXPORT_SYMBOL_GPL(qeth_dbf_longtext);

F
Frank Blaschka 已提交
6196 6197
static int qeth_register_dbf_views(void)
{
6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210
	int ret;
	int x;

	for (x = 0; x < QETH_DBF_INFOS; x++) {
		/* register the areas */
		qeth_dbf[x].id = debug_register(qeth_dbf[x].name,
						qeth_dbf[x].pages,
						qeth_dbf[x].areas,
						qeth_dbf[x].len);
		if (qeth_dbf[x].id == NULL) {
			qeth_unregister_dbf_views();
			return -ENOMEM;
		}
F
Frank Blaschka 已提交
6211

6212 6213 6214 6215 6216 6217
		/* register a view */
		ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view);
		if (ret) {
			qeth_unregister_dbf_views();
			return ret;
		}
F
Frank Blaschka 已提交
6218

6219 6220 6221
		/* set a passing level */
		debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level);
	}
F
Frank Blaschka 已提交
6222 6223 6224 6225

	return 0;
}

6226 6227
static DEFINE_MUTEX(qeth_mod_mutex);	/* for synchronized module loading */

6228 6229
int qeth_setup_discipline(struct qeth_card *card,
			  enum qeth_discipline_id discipline)
F
Frank Blaschka 已提交
6230
{
6231 6232
	int rc;

6233
	mutex_lock(&qeth_mod_mutex);
F
Frank Blaschka 已提交
6234 6235
	switch (discipline) {
	case QETH_DISCIPLINE_LAYER3:
6236 6237
		card->discipline = try_then_request_module(
			symbol_get(qeth_l3_discipline), "qeth_l3");
F
Frank Blaschka 已提交
6238 6239
		break;
	case QETH_DISCIPLINE_LAYER2:
6240 6241
		card->discipline = try_then_request_module(
			symbol_get(qeth_l2_discipline), "qeth_l2");
F
Frank Blaschka 已提交
6242
		break;
6243 6244
	default:
		break;
F
Frank Blaschka 已提交
6245
	}
6246
	mutex_unlock(&qeth_mod_mutex);
6247

6248
	if (!card->discipline) {
6249 6250
		dev_err(&card->gdev->dev, "There is no kernel module to "
			"support discipline %d\n", discipline);
6251
		return -EINVAL;
F
Frank Blaschka 已提交
6252
	}
6253

6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264
	rc = card->discipline->setup(card->gdev);
	if (rc) {
		if (discipline == QETH_DISCIPLINE_LAYER2)
			symbol_put(qeth_l2_discipline);
		else
			symbol_put(qeth_l3_discipline);
		card->discipline = NULL;

		return rc;
	}

6265
	card->options.layer = discipline;
6266
	return 0;
F
Frank Blaschka 已提交
6267 6268
}

6269
void qeth_remove_discipline(struct qeth_card *card)
F
Frank Blaschka 已提交
6270
{
6271 6272
	card->discipline->remove(card->gdev);

6273
	if (IS_LAYER2(card))
6274
		symbol_put(qeth_l2_discipline);
F
Frank Blaschka 已提交
6275
	else
6276
		symbol_put(qeth_l3_discipline);
6277
	card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
6278
	card->discipline = NULL;
F
Frank Blaschka 已提交
6279 6280
}

6281
static const struct device_type qeth_generic_devtype = {
6282 6283 6284
	.name = "qeth_generic",
};

6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352
#define DBF_NAME_LEN	20

struct qeth_dbf_entry {
	char dbf_name[DBF_NAME_LEN];
	debug_info_t *dbf_info;
	struct list_head dbf_list;
};

static LIST_HEAD(qeth_dbf_list);
static DEFINE_MUTEX(qeth_dbf_list_mutex);

static debug_info_t *qeth_get_dbf_entry(char *name)
{
	struct qeth_dbf_entry *entry;
	debug_info_t *rc = NULL;

	mutex_lock(&qeth_dbf_list_mutex);
	list_for_each_entry(entry, &qeth_dbf_list, dbf_list) {
		if (strcmp(entry->dbf_name, name) == 0) {
			rc = entry->dbf_info;
			break;
		}
	}
	mutex_unlock(&qeth_dbf_list_mutex);
	return rc;
}

static int qeth_add_dbf_entry(struct qeth_card *card, char *name)
{
	struct qeth_dbf_entry *new_entry;

	card->debug = debug_register(name, 2, 1, 8);
	if (!card->debug) {
		QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf");
		goto err;
	}
	if (debug_register_view(card->debug, &debug_hex_ascii_view))
		goto err_dbg;
	new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL);
	if (!new_entry)
		goto err_dbg;
	strncpy(new_entry->dbf_name, name, DBF_NAME_LEN);
	new_entry->dbf_info = card->debug;
	mutex_lock(&qeth_dbf_list_mutex);
	list_add(&new_entry->dbf_list, &qeth_dbf_list);
	mutex_unlock(&qeth_dbf_list_mutex);

	return 0;

err_dbg:
	debug_unregister(card->debug);
err:
	return -ENOMEM;
}

static void qeth_clear_dbf_list(void)
{
	struct qeth_dbf_entry *entry, *tmp;

	mutex_lock(&qeth_dbf_list_mutex);
	list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) {
		list_del(&entry->dbf_list);
		debug_unregister(entry->dbf_info);
		kfree(entry);
	}
	mutex_unlock(&qeth_dbf_list_mutex);
}

6353 6354 6355
static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
{
	struct net_device *dev;
6356
	struct qeth_priv *priv;
6357 6358 6359

	switch (card->info.type) {
	case QETH_CARD_TYPE_IQD:
6360
		dev = alloc_netdev_mqs(sizeof(*priv), "hsi%d", NET_NAME_UNKNOWN,
6361
				       ether_setup, QETH_MAX_OUT_QUEUES, 1);
6362
		break;
6363
	case QETH_CARD_TYPE_OSM:
6364
		dev = alloc_etherdev(sizeof(*priv));
6365
		break;
6366
	default:
6367
		dev = alloc_etherdev_mqs(sizeof(*priv), QETH_MAX_OUT_QUEUES, 1);
6368 6369 6370 6371 6372
	}

	if (!dev)
		return NULL;

6373 6374
	priv = netdev_priv(dev);
	priv->rx_copybreak = QETH_RX_COPYBREAK;
6375
	priv->tx_wanted_queues = IS_IQD(card) ? QETH_IQD_MIN_TXQ : 1;
6376

6377 6378
	dev->ml_priv = card;
	dev->watchdog_timeo = QETH_TX_TIMEOUT;
J
Julian Wiedmann 已提交
6379
	dev->min_mtu = 576;
6380 6381 6382
	 /* initialized when device first goes online: */
	dev->max_mtu = 0;
	dev->mtu = 0;
6383 6384
	SET_NETDEV_DEV(dev, &card->gdev->dev);
	netif_carrier_off(dev);
6385

J
Julian Wiedmann 已提交
6386 6387 6388 6389 6390 6391
	dev->ethtool_ops = &qeth_ethtool_ops;
	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
	dev->hw_features |= NETIF_F_SG;
	dev->vlan_features |= NETIF_F_SG;
	if (IS_IQD(card))
		dev->features |= NETIF_F_SG;
6392

6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406
	return dev;
}

struct net_device *qeth_clone_netdev(struct net_device *orig)
{
	struct net_device *clone = qeth_alloc_netdev(orig->ml_priv);

	if (!clone)
		return NULL;

	clone->dev_port = orig->dev_port;
	return clone;
}

F
Frank Blaschka 已提交
6407 6408 6409 6410 6411
static int qeth_core_probe_device(struct ccwgroup_device *gdev)
{
	struct qeth_card *card;
	struct device *dev;
	int rc;
6412
	enum qeth_discipline_id enforced_disc;
6413
	char dbf_name[DBF_NAME_LEN];
F
Frank Blaschka 已提交
6414

6415
	QETH_DBF_TEXT(SETUP, 2, "probedev");
F
Frank Blaschka 已提交
6416 6417 6418 6419 6420

	dev = &gdev->dev;
	if (!get_device(dev))
		return -ENODEV;

6421
	QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev));
F
Frank Blaschka 已提交
6422

6423
	card = qeth_alloc_card(gdev);
F
Frank Blaschka 已提交
6424
	if (!card) {
6425
		QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM);
F
Frank Blaschka 已提交
6426 6427 6428
		rc = -ENOMEM;
		goto err_dev;
	}
6429 6430 6431

	snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s",
		dev_name(&gdev->dev));
6432
	card->debug = qeth_get_dbf_entry(dbf_name);
6433
	if (!card->debug) {
6434 6435 6436
		rc = qeth_add_dbf_entry(card, dbf_name);
		if (rc)
			goto err_card;
6437 6438
	}

6439
	qeth_setup_card(card);
6440
	card->dev = qeth_alloc_netdev(card);
6441 6442
	if (!card->dev) {
		rc = -ENOMEM;
6443
		goto err_card;
6444
	}
6445

6446 6447 6448
	qeth_determine_capabilities(card);
	qeth_set_blkt_defaults(card);

6449 6450 6451 6452
	card->qdio.no_out_queues = card->dev->num_tx_queues;
	rc = qeth_update_from_chp_desc(card);
	if (rc)
		goto err_chp_desc;
6453

J
Julian Wiedmann 已提交
6454
	gdev->dev.groups = qeth_dev_groups;
6455

6456 6457 6458 6459 6460 6461 6462
	enforced_disc = qeth_enforce_discipline(card);
	switch (enforced_disc) {
	case QETH_DISCIPLINE_UNDETERMINED:
		gdev->dev.type = &qeth_generic_devtype;
		break;
	default:
		card->info.layer_enforced = true;
6463
		/* It's so early that we don't need the discipline_mutex yet. */
6464
		rc = qeth_setup_discipline(card, enforced_disc);
6465
		if (rc)
6466
			goto err_setup_disc;
6467 6468

		break;
F
Frank Blaschka 已提交
6469 6470 6471 6472
	}

	return 0;

6473
err_setup_disc:
6474
err_chp_desc:
6475
	free_netdev(card->dev);
F
Frank Blaschka 已提交
6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486
err_card:
	qeth_core_free_card(card);
err_dev:
	put_device(dev);
	return rc;
}

static void qeth_core_remove_device(struct ccwgroup_device *gdev)
{
	struct qeth_card *card = dev_get_drvdata(&gdev->dev);

6487
	QETH_CARD_TEXT(card, 2, "removedv");
F
Frank Blaschka 已提交
6488

6489
	mutex_lock(&card->discipline_mutex);
6490 6491
	if (card->discipline)
		qeth_remove_discipline(card);
6492
	mutex_unlock(&card->discipline_mutex);
6493

6494 6495
	qeth_free_qdio_queues(card);

6496
	free_netdev(card->dev);
F
Frank Blaschka 已提交
6497 6498 6499 6500 6501 6502 6503 6504
	qeth_core_free_card(card);
	put_device(&gdev->dev);
}

static int qeth_core_set_online(struct ccwgroup_device *gdev)
{
	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
	int rc = 0;
6505
	enum qeth_discipline_id def_discipline;
F
Frank Blaschka 已提交
6506

6507
	mutex_lock(&card->discipline_mutex);
6508
	if (!card->discipline) {
6509 6510
		def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
						QETH_DISCIPLINE_LAYER2;
6511
		rc = qeth_setup_discipline(card, def_discipline);
F
Frank Blaschka 已提交
6512 6513 6514
		if (rc)
			goto err;
	}
6515

6516 6517
	rc = qeth_set_online(card, card->discipline);

F
Frank Blaschka 已提交
6518
err:
6519
	mutex_unlock(&card->discipline_mutex);
F
Frank Blaschka 已提交
6520 6521 6522 6523 6524 6525
	return rc;
}

static int qeth_core_set_offline(struct ccwgroup_device *gdev)
{
	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6526
	int rc;
6527

6528 6529 6530 6531 6532
	mutex_lock(&card->discipline_mutex);
	rc = qeth_set_offline(card, card->discipline, false);
	mutex_unlock(&card->discipline_mutex);

	return rc;
F
Frank Blaschka 已提交
6533 6534 6535 6536 6537
}

static void qeth_core_shutdown(struct ccwgroup_device *gdev)
{
	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6538

6539 6540 6541 6542
	qeth_set_allowed_threads(card, 0, 1);
	if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
		qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
	qeth_qdio_clear_card(card, 0);
6543
	qeth_drain_output_queues(card);
6544
	qdio_free(CARD_DDEV(card));
F
Frank Blaschka 已提交
6545 6546
}

6547 6548
static ssize_t group_store(struct device_driver *ddrv, const char *buf,
			   size_t count)
F
Frank Blaschka 已提交
6549 6550 6551
{
	int err;

6552 6553
	err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3,
				  buf);
6554 6555 6556

	return err ? err : count;
}
6557
static DRIVER_ATTR_WO(group);
F
Frank Blaschka 已提交
6558

6559 6560 6561 6562 6563 6564 6565 6566 6567 6568 6569 6570
static struct attribute *qeth_drv_attrs[] = {
	&driver_attr_group.attr,
	NULL,
};
static struct attribute_group qeth_drv_attr_group = {
	.attrs = qeth_drv_attrs,
};
static const struct attribute_group *qeth_drv_attr_groups[] = {
	&qeth_drv_attr_group,
	NULL,
};

6571 6572 6573 6574 6575 6576 6577 6578 6579 6580 6581 6582 6583 6584
static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
	.driver = {
		.groups = qeth_drv_attr_groups,
		.owner = THIS_MODULE,
		.name = "qeth",
	},
	.ccw_driver = &qeth_ccw_driver,
	.setup = qeth_core_probe_device,
	.remove = qeth_core_remove_device,
	.set_online = qeth_core_set_online,
	.set_offline = qeth_core_set_offline,
	.shutdown = qeth_core_shutdown,
};

A
Arnd Bergmann 已提交
6585
int qeth_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd)
6586 6587 6588 6589 6590 6591
{
	struct qeth_card *card = dev->ml_priv;
	int rc = 0;

	switch (cmd) {
	case SIOC_QETH_ADP_SET_SNMP_CONTROL:
A
Arnd Bergmann 已提交
6592
		rc = qeth_snmp_command(card, data);
6593 6594
		break;
	case SIOC_QETH_GET_CARD_TYPE:
6595 6596
		if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) &&
		    !IS_VM_NIC(card))
6597
			return 1;
6598
		return 0;
A
Arnd Bergmann 已提交
6599 6600 6601 6602
	case SIOC_QETH_QUERY_OAT:
		rc = qeth_query_oat_command(card, data);
		break;
	default:
6603
		rc = -EOPNOTSUPP;
A
Arnd Bergmann 已提交
6604 6605 6606 6607 6608 6609 6610 6611 6612 6613 6614 6615 6616 6617
	}
	if (rc)
		QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
	return rc;
}
EXPORT_SYMBOL_GPL(qeth_siocdevprivate);

int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
	struct qeth_card *card = dev->ml_priv;
	struct mii_ioctl_data *mii_data;
	int rc = 0;

	switch (cmd) {
6618 6619 6620 6621 6622 6623 6624 6625 6626 6627 6628 6629 6630
	case SIOCGMIIPHY:
		mii_data = if_mii(rq);
		mii_data->phy_id = 0;
		break;
	case SIOCGMIIREG:
		mii_data = if_mii(rq);
		if (mii_data->phy_id != 0)
			rc = -EINVAL;
		else
			mii_data->val_out = qeth_mdio_read(dev,
				mii_data->phy_id, mii_data->reg_num);
		break;
	default:
A
Arnd Bergmann 已提交
6631
		return -EOPNOTSUPP;
6632 6633 6634 6635 6636 6637 6638
	}
	if (rc)
		QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
	return rc;
}
EXPORT_SYMBOL_GPL(qeth_do_ioctl);

6639 6640
static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply,
			      unsigned long data)
6641 6642
{
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6643
	u32 *features = reply->param;
6644

6645
	if (qeth_setassparms_inspect_rc(cmd))
6646
		return -EIO;
6647

6648
	*features = cmd->data.setassparms.data.flags_32bit;
6649 6650 6651
	return 0;
}

6652 6653
static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype,
			     enum qeth_prot_versions prot)
6654
{
6655 6656
	return qeth_send_simple_setassparms_prot(card, cstype, IPA_CMD_ASS_STOP,
						 NULL, prot);
6657 6658
}

6659
static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype,
6660
			    enum qeth_prot_versions prot, u8 *lp2lp)
6661
{
6662
	u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP;
6663 6664 6665
	struct qeth_cmd_buffer *iob;
	struct qeth_ipa_caps caps;
	u32 features;
6666 6667
	int rc;

6668 6669 6670
	/* some L3 HW requires combined L3+L4 csum offload: */
	if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 &&
	    cstype == IPA_OUTBOUND_CHECKSUM)
6671
		required_features |= QETH_IPA_CHECKSUM_IP_HDR;
6672

6673 6674 6675 6676 6677 6678 6679
	iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0,
				       prot);
	if (!iob)
		return -ENOMEM;

	rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features);
	if (rc)
6680
		return rc;
6681

6682 6683 6684 6685
	if ((required_features & features) != required_features) {
		qeth_set_csum_off(card, cstype, prot);
		return -EOPNOTSUPP;
	}
6686

6687 6688
	iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE,
				       SETASS_DATA_SIZEOF(flags_32bit),
6689
				       prot);
6690 6691 6692
	if (!iob) {
		qeth_set_csum_off(card, cstype, prot);
		return -ENOMEM;
6693
	}
6694 6695 6696 6697 6698

	if (features & QETH_IPA_CHECKSUM_LP2LP)
		required_features |= QETH_IPA_CHECKSUM_LP2LP;
	__ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features;
	rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6699
	if (rc) {
6700
		qeth_set_csum_off(card, cstype, prot);
6701 6702
		return rc;
	}
6703

6704 6705 6706 6707 6708 6709
	if (!qeth_ipa_caps_supported(&caps, required_features) ||
	    !qeth_ipa_caps_enabled(&caps, required_features)) {
		qeth_set_csum_off(card, cstype, prot);
		return -EOPNOTSUPP;
	}

6710 6711
	dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n",
		 cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot);
6712 6713 6714 6715

	if (lp2lp)
		*lp2lp = qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP);

6716 6717 6718
	return 0;
}

6719
static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
6720
			     enum qeth_prot_versions prot, u8 *lp2lp)
6721
{
6722
	return on ? qeth_set_csum_on(card, cstype, prot, lp2lp) :
6723
		    qeth_set_csum_off(card, cstype, prot);
6724 6725
}

6726 6727 6728 6729 6730 6731 6732
static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply,
			     unsigned long data)
{
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
	struct qeth_tso_start_data *tso_data = reply->param;

	if (qeth_setassparms_inspect_rc(cmd))
6733
		return -EIO;
6734 6735 6736 6737 6738 6739

	tso_data->mss = cmd->data.setassparms.data.tso.mss;
	tso_data->supported = cmd->data.setassparms.data.tso.supported;
	return 0;
}

6740 6741
static int qeth_set_tso_off(struct qeth_card *card,
			    enum qeth_prot_versions prot)
6742
{
6743
	return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO,
6744
						 IPA_CMD_ASS_STOP, NULL, prot);
6745
}
6746

6747 6748 6749
static int qeth_set_tso_on(struct qeth_card *card,
			   enum qeth_prot_versions prot)
{
6750 6751 6752 6753 6754 6755 6756 6757 6758 6759
	struct qeth_tso_start_data tso_data;
	struct qeth_cmd_buffer *iob;
	struct qeth_ipa_caps caps;
	int rc;

	iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
				       IPA_CMD_ASS_START, 0, prot);
	if (!iob)
		return -ENOMEM;

6760
	rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data);
6761 6762 6763 6764 6765 6766 6767 6768 6769
	if (rc)
		return rc;

	if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) {
		qeth_set_tso_off(card, prot);
		return -EOPNOTSUPP;
	}

	iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6770 6771
				       IPA_CMD_ASS_ENABLE,
				       SETASS_DATA_SIZEOF(caps), prot);
6772 6773 6774 6775 6776 6777
	if (!iob) {
		qeth_set_tso_off(card, prot);
		return -ENOMEM;
	}

	/* enable TSO capability */
6778 6779 6780
	__ipa_cmd(iob)->data.setassparms.data.caps.enabled =
		QETH_IPA_LARGE_SEND_TCP;
	rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6781 6782 6783 6784 6785 6786 6787 6788 6789 6790 6791 6792 6793 6794
	if (rc) {
		qeth_set_tso_off(card, prot);
		return rc;
	}

	if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) ||
	    !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) {
		qeth_set_tso_off(card, prot);
		return -EOPNOTSUPP;
	}

	dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot,
		 tso_data.mss);
	return 0;
6795
}
6796

6797 6798 6799
static int qeth_set_ipa_tso(struct qeth_card *card, bool on,
			    enum qeth_prot_versions prot)
{
6800
	return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot);
6801
}
6802

6803 6804 6805 6806 6807 6808 6809
static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
{
	int rc_ipv4 = (on) ? -EOPNOTSUPP : 0;
	int rc_ipv6;

	if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
		rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6810
					    QETH_PROT_IPV4, NULL);
6811 6812 6813
	if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
		/* no/one Offload Assist available, so the rc is trivial */
		return rc_ipv4;
6814

6815
	rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6816
				    QETH_PROT_IPV6, NULL);
6817 6818 6819 6820 6821 6822 6823 6824 6825

	if (on)
		/* enable: success if any Assist is active */
		return (rc_ipv6) ? rc_ipv4 : 0;

	/* disable: failure if any Assist is still active */
	return (rc_ipv6) ? rc_ipv6 : rc_ipv4;
}

6826
/**
6827 6828
 * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
 * @dev:	a net_device
6829
 */
6830
void qeth_enable_hw_features(struct net_device *dev)
6831 6832
{
	struct qeth_card *card = dev->ml_priv;
6833
	netdev_features_t features;
6834

6835
	features = dev->features;
6836
	/* force-off any feature that might need an IPA sequence.
6837 6838
	 * netdev_update_features() will restart them.
	 */
6839 6840 6841 6842 6843 6844
	dev->features &= ~dev->hw_features;
	/* toggle VLAN filter, so that VIDs are re-programmed: */
	if (IS_LAYER2(card) && IS_VM_NIC(card)) {
		dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
		dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
	}
6845
	netdev_update_features(dev);
6846 6847 6848
	if (features != dev->features)
		dev_warn(&card->gdev->dev,
			 "Device recovery failed to restore all offload features\n");
6849
}
6850
EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
6851

6852 6853 6854 6855 6856 6857 6858 6859 6860 6861 6862 6863 6864 6865 6866 6867 6868 6869
static void qeth_check_restricted_features(struct qeth_card *card,
					   netdev_features_t changed,
					   netdev_features_t actual)
{
	netdev_features_t ipv6_features = NETIF_F_TSO6;
	netdev_features_t ipv4_features = NETIF_F_TSO;

	if (!card->info.has_lp2lp_cso_v6)
		ipv6_features |= NETIF_F_IPV6_CSUM;
	if (!card->info.has_lp2lp_cso_v4)
		ipv4_features |= NETIF_F_IP_CSUM;

	if ((changed & ipv6_features) && !(actual & ipv6_features))
		qeth_flush_local_addrs6(card);
	if ((changed & ipv4_features) && !(actual & ipv4_features))
		qeth_flush_local_addrs4(card);
}

6870 6871 6872
int qeth_set_features(struct net_device *dev, netdev_features_t features)
{
	struct qeth_card *card = dev->ml_priv;
6873
	netdev_features_t changed = dev->features ^ features;
6874 6875
	int rc = 0;

6876 6877
	QETH_CARD_TEXT(card, 2, "setfeat");
	QETH_CARD_HEX(card, 2, &features, sizeof(features));
6878

6879
	if ((changed & NETIF_F_IP_CSUM)) {
6880
		rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM,
6881 6882
				       IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4,
				       &card->info.has_lp2lp_cso_v4);
6883 6884 6885
		if (rc)
			changed ^= NETIF_F_IP_CSUM;
	}
6886 6887
	if (changed & NETIF_F_IPV6_CSUM) {
		rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM,
6888 6889
				       IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6,
				       &card->info.has_lp2lp_cso_v6);
6890 6891 6892
		if (rc)
			changed ^= NETIF_F_IPV6_CSUM;
	}
6893 6894
	if (changed & NETIF_F_RXCSUM) {
		rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM);
6895 6896 6897
		if (rc)
			changed ^= NETIF_F_RXCSUM;
	}
6898 6899 6900
	if (changed & NETIF_F_TSO) {
		rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO,
				      QETH_PROT_IPV4);
6901 6902 6903
		if (rc)
			changed ^= NETIF_F_TSO;
	}
6904 6905 6906 6907 6908 6909
	if (changed & NETIF_F_TSO6) {
		rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6,
				      QETH_PROT_IPV6);
		if (rc)
			changed ^= NETIF_F_TSO6;
	}
6910

6911 6912 6913
	qeth_check_restricted_features(card, dev->features ^ features,
				       dev->features ^ changed);

6914 6915 6916 6917 6918 6919
	/* everything changed successfully? */
	if ((dev->features ^ features) == changed)
		return 0;
	/* something went wrong. save changed features and return error */
	dev->features ^= changed;
	return -EIO;
6920 6921 6922 6923 6924 6925 6926 6927
}
EXPORT_SYMBOL_GPL(qeth_set_features);

netdev_features_t qeth_fix_features(struct net_device *dev,
				    netdev_features_t features)
{
	struct qeth_card *card = dev->ml_priv;

6928
	QETH_CARD_TEXT(card, 2, "fixfeat");
6929 6930
	if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
		features &= ~NETIF_F_IP_CSUM;
6931 6932
	if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6))
		features &= ~NETIF_F_IPV6_CSUM;
6933 6934
	if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) &&
	    !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6935
		features &= ~NETIF_F_RXCSUM;
6936
	if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
6937
		features &= ~NETIF_F_TSO;
6938 6939
	if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO))
		features &= ~NETIF_F_TSO6;
6940

6941
	QETH_CARD_HEX(card, 2, &features, sizeof(features));
6942 6943 6944
	return features;
}
EXPORT_SYMBOL_GPL(qeth_fix_features);
6945

6946 6947 6948 6949
netdev_features_t qeth_features_check(struct sk_buff *skb,
				      struct net_device *dev,
				      netdev_features_t features)
{
6950 6951
	struct qeth_card *card = dev->ml_priv;

6952
	/* Traffic with local next-hop is not eligible for some offloads: */
6953
	if (skb->ip_summed == CHECKSUM_PARTIAL &&
6954
	    READ_ONCE(card->options.isolation) != ISOLATION_MODE_FWD) {
6955 6956 6957 6958 6959 6960 6961 6962 6963 6964 6965 6966 6967 6968 6969 6970 6971 6972 6973 6974 6975 6976 6977 6978 6979
		netdev_features_t restricted = 0;

		if (skb_is_gso(skb) && !netif_needs_gso(skb, features))
			restricted |= NETIF_F_ALL_TSO;

		switch (vlan_get_protocol(skb)) {
		case htons(ETH_P_IP):
			if (!card->info.has_lp2lp_cso_v4)
				restricted |= NETIF_F_IP_CSUM;

			if (restricted && qeth_next_hop_is_local_v4(card, skb))
				features &= ~restricted;
			break;
		case htons(ETH_P_IPV6):
			if (!card->info.has_lp2lp_cso_v6)
				restricted |= NETIF_F_IPV6_CSUM;

			if (restricted && qeth_next_hop_is_local_v6(card, skb))
				features &= ~restricted;
			break;
		default:
			break;
		}
	}

6980 6981 6982 6983 6984 6985 6986 6987 6988 6989 6990 6991 6992 6993 6994 6995 6996 6997 6998 6999 7000 7001
	/* GSO segmentation builds skbs with
	 *	a (small) linear part for the headers, and
	 *	page frags for the data.
	 * Compared to a linear skb, the header-only part consumes an
	 * additional buffer element. This reduces buffer utilization, and
	 * hurts throughput. So compress small segments into one element.
	 */
	if (netif_needs_gso(skb, features)) {
		/* match skb_segment(): */
		unsigned int doffset = skb->data - skb_mac_header(skb);
		unsigned int hsize = skb_shinfo(skb)->gso_size;
		unsigned int hroom = skb_headroom(skb);

		/* linearize only if resulting skb allocations are order-0: */
		if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0))
			features &= ~NETIF_F_SG;
	}

	return vlan_features_check(skb, features);
}
EXPORT_SYMBOL_GPL(qeth_features_check);

7002 7003 7004 7005 7006 7007 7008 7009 7010 7011
void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
	struct qeth_card *card = dev->ml_priv;
	struct qeth_qdio_out_q *queue;
	unsigned int i;

	QETH_CARD_TEXT(card, 5, "getstat");

	stats->rx_packets = card->stats.rx_packets;
	stats->rx_bytes = card->stats.rx_bytes;
7012
	stats->rx_errors = card->stats.rx_length_errors +
7013
			   card->stats.rx_frame_errors +
7014 7015
			   card->stats.rx_fifo_errors;
	stats->rx_dropped = card->stats.rx_dropped_nomem +
7016 7017
			    card->stats.rx_dropped_notsupp +
			    card->stats.rx_dropped_runt;
7018
	stats->multicast = card->stats.rx_multicast;
7019
	stats->rx_length_errors = card->stats.rx_length_errors;
7020
	stats->rx_frame_errors = card->stats.rx_frame_errors;
7021
	stats->rx_fifo_errors = card->stats.rx_fifo_errors;
7022 7023 7024 7025 7026 7027 7028 7029 7030 7031 7032 7033

	for (i = 0; i < card->qdio.no_out_queues; i++) {
		queue = card->qdio.out_qs[i];

		stats->tx_packets += queue->stats.tx_packets;
		stats->tx_bytes += queue->stats.tx_bytes;
		stats->tx_errors += queue->stats.tx_errors;
		stats->tx_dropped += queue->stats.tx_dropped;
	}
}
EXPORT_SYMBOL_GPL(qeth_get_stats64);

7034 7035 7036 7037 7038 7039 7040 7041 7042 7043 7044 7045 7046 7047 7048 7049 7050 7051 7052 7053 7054 7055 7056 7057 7058 7059 7060 7061 7062 7063 7064 7065 7066 7067 7068 7069 7070 7071 7072 7073
#define TC_IQD_UCAST   0
static void qeth_iqd_set_prio_tc_map(struct net_device *dev,
				     unsigned int ucast_txqs)
{
	unsigned int prio;

	/* IQD requires mcast traffic to be placed on a dedicated queue, and
	 * qeth_iqd_select_queue() deals with this.
	 * For unicast traffic, we defer the queue selection to the stack.
	 * By installing a trivial prio map that spans over only the unicast
	 * queues, we can encourage the stack to spread the ucast traffic evenly
	 * without selecting the mcast queue.
	 */

	/* One traffic class, spanning over all active ucast queues: */
	netdev_set_num_tc(dev, 1);
	netdev_set_tc_queue(dev, TC_IQD_UCAST, ucast_txqs,
			    QETH_IQD_MIN_UCAST_TXQ);

	/* Map all priorities to this traffic class: */
	for (prio = 0; prio <= TC_BITMASK; prio++)
		netdev_set_prio_tc_map(dev, prio, TC_IQD_UCAST);
}

int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count)
{
	struct net_device *dev = card->dev;
	int rc;

	/* Per netif_setup_tc(), adjust the mapping first: */
	if (IS_IQD(card))
		qeth_iqd_set_prio_tc_map(dev, count - 1);

	rc = netif_set_real_num_tx_queues(dev, count);

	if (rc && IS_IQD(card))
		qeth_iqd_set_prio_tc_map(dev, dev->real_num_tx_queues - 1);

	return rc;
}
7074
EXPORT_SYMBOL_GPL(qeth_set_real_num_tx_queues);
7075

7076 7077 7078
u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
			  u8 cast_type, struct net_device *sb_dev)
{
7079 7080
	u16 txq;

7081 7082
	if (cast_type != RTN_UNICAST)
		return QETH_IQD_MCAST_TXQ;
7083 7084
	if (dev->real_num_tx_queues == QETH_IQD_MIN_TXQ)
		return QETH_IQD_MIN_UCAST_TXQ;
7085 7086 7087

	txq = netdev_pick_tx(dev, skb, sb_dev);
	return (txq == QETH_IQD_MCAST_TXQ) ? QETH_IQD_MIN_UCAST_TXQ : txq;
7088 7089 7090
}
EXPORT_SYMBOL_GPL(qeth_iqd_select_queue);

7091
int qeth_open(struct net_device *dev)
7092 7093
{
	struct qeth_card *card = dev->ml_priv;
7094 7095
	struct qeth_qdio_out_q *queue;
	unsigned int i;
7096 7097 7098 7099

	QETH_CARD_TEXT(card, 4, "qethopen");

	card->data.state = CH_STATE_UP;
7100
	netif_tx_start_all_queues(dev);
7101 7102

	local_bh_disable();
7103 7104 7105 7106 7107
	qeth_for_each_output_queue(card, queue, i) {
		netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll,
				  QETH_NAPI_WEIGHT);
		napi_enable(&queue->napi);
		napi_schedule(&queue->napi);
7108
	}
7109 7110 7111

	napi_enable(&card->napi);
	napi_schedule(&card->napi);
7112 7113
	/* kick-start the NAPI softirq: */
	local_bh_enable();
7114

7115 7116 7117 7118 7119 7120 7121
	return 0;
}
EXPORT_SYMBOL_GPL(qeth_open);

int qeth_stop(struct net_device *dev)
{
	struct qeth_card *card = dev->ml_priv;
7122 7123
	struct qeth_qdio_out_q *queue;
	unsigned int i;
7124 7125

	QETH_CARD_TEXT(card, 4, "qethstop");
7126 7127 7128 7129 7130

	napi_disable(&card->napi);
	cancel_delayed_work_sync(&card->buffer_reclaim_work);
	qdio_stop_irq(CARD_DDEV(card));

7131 7132 7133
	/* Quiesce the NAPI instances: */
	qeth_for_each_output_queue(card, queue, i)
		napi_disable(&queue->napi);
7134

7135 7136
	/* Stop .ndo_start_xmit, might still access queue->napi. */
	netif_tx_disable(dev);
7137

7138 7139 7140 7141
	qeth_for_each_output_queue(card, queue, i) {
		del_timer_sync(&queue->timer);
		/* Queues may get re-allocated, so remove the NAPIs. */
		netif_napi_del(&queue->napi);
7142 7143
	}

7144 7145 7146 7147
	return 0;
}
EXPORT_SYMBOL_GPL(qeth_stop);

F
Frank Blaschka 已提交
7148 7149 7150 7151
static int __init qeth_core_init(void)
{
	int rc;

7152
	pr_info("loading core functions\n");
F
Frank Blaschka 已提交
7153

7154 7155
	qeth_debugfs_root = debugfs_create_dir("qeth", NULL);

F
Frank Blaschka 已提交
7156 7157
	rc = qeth_register_dbf_views();
	if (rc)
7158
		goto dbf_err;
M
Mark McLoughlin 已提交
7159
	qeth_core_root_dev = root_device_register("qeth");
7160
	rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
F
Frank Blaschka 已提交
7161 7162
	if (rc)
		goto register_err;
7163 7164 7165 7166
	qeth_core_header_cache =
		kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE,
				  roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE),
				  0, NULL);
7167 7168 7169 7170
	if (!qeth_core_header_cache) {
		rc = -ENOMEM;
		goto slab_err;
	}
7171 7172 7173 7174 7175 7176
	qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf",
			sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL);
	if (!qeth_qdio_outbuf_cache) {
		rc = -ENOMEM;
		goto cqslab_err;
	}
7177 7178 7179 7180 7181 7182
	rc = ccw_driver_register(&qeth_ccw_driver);
	if (rc)
		goto ccw_err;
	rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
	if (rc)
		goto ccwgroup_err;
7183

7184
	return 0;
7185 7186 7187 7188 7189

ccwgroup_err:
	ccw_driver_unregister(&qeth_ccw_driver);
ccw_err:
	kmem_cache_destroy(qeth_qdio_outbuf_cache);
7190 7191
cqslab_err:
	kmem_cache_destroy(qeth_core_header_cache);
7192
slab_err:
M
Mark McLoughlin 已提交
7193
	root_device_unregister(qeth_core_root_dev);
F
Frank Blaschka 已提交
7194 7195
register_err:
	qeth_unregister_dbf_views();
7196
dbf_err:
7197
	debugfs_remove_recursive(qeth_debugfs_root);
7198
	pr_err("Initializing the qeth device driver failed\n");
F
Frank Blaschka 已提交
7199 7200 7201 7202 7203
	return rc;
}

static void __exit qeth_core_exit(void)
{
7204
	qeth_clear_dbf_list();
F
Frank Blaschka 已提交
7205 7206
	ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
	ccw_driver_unregister(&qeth_ccw_driver);
7207
	kmem_cache_destroy(qeth_qdio_outbuf_cache);
7208
	kmem_cache_destroy(qeth_core_header_cache);
7209
	root_device_unregister(qeth_core_root_dev);
F
Frank Blaschka 已提交
7210
	qeth_unregister_dbf_views();
7211
	debugfs_remove_recursive(qeth_debugfs_root);
7212
	pr_info("core functions removed\n");
F
Frank Blaschka 已提交
7213 7214 7215 7216 7217 7218 7219
}

module_init(qeth_core_init);
module_exit(qeth_core_exit);
MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
MODULE_DESCRIPTION("qeth core functions");
MODULE_LICENSE("GPL");