qeth_core_main.c 187.9 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
F
Frank Blaschka 已提交
2
/*
3
 *    Copyright IBM Corp. 2007, 2009
F
Frank Blaschka 已提交
4 5 6 7 8 9
 *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
 *		 Frank Pavlic <fpavlic@de.ibm.com>,
 *		 Thomas Spatzier <tspat@de.ibm.com>,
 *		 Frank Blaschka <frank.blaschka@de.ibm.com>
 */

10 11 12
#define KMSG_COMPONENT "qeth"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

13
#include <linux/compat.h>
F
Frank Blaschka 已提交
14 15 16 17 18
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/kernel.h>
19
#include <linux/log2.h>
20
#include <linux/io.h>
F
Frank Blaschka 已提交
21 22 23
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/mii.h>
J
Julian Wiedmann 已提交
24
#include <linux/mm.h>
F
Frank Blaschka 已提交
25
#include <linux/kthread.h>
26
#include <linux/slab.h>
27 28 29
#include <linux/if_vlan.h>
#include <linux/netdevice.h>
#include <linux/netdev_features.h>
30
#include <linux/rcutree.h>
31
#include <linux/skbuff.h>
32
#include <linux/vmalloc.h>
33

34
#include <net/iucv/af_iucv.h>
35
#include <net/dsfield.h>
36
#include <net/sock.h>
F
Frank Blaschka 已提交
37

38
#include <asm/ebcdic.h>
39
#include <asm/chpid.h>
40
#include <asm/sysinfo.h>
41 42 43
#include <asm/diag.h>
#include <asm/cio.h>
#include <asm/ccwdev.h>
44
#include <asm/cpcmd.h>
F
Frank Blaschka 已提交
45 46 47

#include "qeth_core.h"

48 49 50 51 52
struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
	/* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
	/*                   N  P  A    M  L  V                      H  */
	[QETH_DBF_SETUP] = {"qeth_setup",
				8, 1,   8, 5, &debug_hex_ascii_view, NULL},
53 54
	[QETH_DBF_MSG]	 = {"qeth_msg", 8, 1, 11 * sizeof(long), 3,
			    &debug_sprintf_view, NULL},
55 56 57 58
	[QETH_DBF_CTRL]  = {"qeth_control",
		8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
};
EXPORT_SYMBOL_GPL(qeth_dbf);
F
Frank Blaschka 已提交
59

J
Julian Wiedmann 已提交
60
static struct kmem_cache *qeth_core_header_cache;
61
static struct kmem_cache *qeth_qdio_outbuf_cache;
62
static struct kmem_cache *qeth_qaob_cache;
F
Frank Blaschka 已提交
63 64

static struct device *qeth_core_root_dev;
65
static struct dentry *qeth_debugfs_root;
F
Frank Blaschka 已提交
66 67
static struct lock_class_key qdio_out_skb_queue_key;

68
static void qeth_issue_next_read_cb(struct qeth_card *card,
69 70
				    struct qeth_cmd_buffer *iob,
				    unsigned int data_length);
F
Frank Blaschka 已提交
71
static int qeth_qdio_establish(struct qeth_card *);
72
static void qeth_free_qdio_queues(struct qeth_card *card);
F
Frank Blaschka 已提交
73

J
Julian Wiedmann 已提交
74
static const char *qeth_get_cardname(struct qeth_card *card)
F
Frank Blaschka 已提交
75
{
76
	if (IS_VM_NIC(card)) {
F
Frank Blaschka 已提交
77
		switch (card->info.type) {
78
		case QETH_CARD_TYPE_OSD:
79
			return " Virtual NIC QDIO";
F
Frank Blaschka 已提交
80
		case QETH_CARD_TYPE_IQD:
81
			return " Virtual NIC Hiper";
82
		case QETH_CARD_TYPE_OSM:
83
			return " Virtual NIC QDIO - OSM";
84
		case QETH_CARD_TYPE_OSX:
85
			return " Virtual NIC QDIO - OSX";
F
Frank Blaschka 已提交
86 87 88 89 90
		default:
			return " unknown";
		}
	} else {
		switch (card->info.type) {
91
		case QETH_CARD_TYPE_OSD:
F
Frank Blaschka 已提交
92 93 94
			return " OSD Express";
		case QETH_CARD_TYPE_IQD:
			return " HiperSockets";
95 96 97 98
		case QETH_CARD_TYPE_OSM:
			return " OSM QDIO";
		case QETH_CARD_TYPE_OSX:
			return " OSX QDIO";
F
Frank Blaschka 已提交
99 100 101 102 103 104 105 106 107 108
		default:
			return " unknown";
		}
	}
	return " n/a";
}

/* max length to be returned: 14 */
const char *qeth_get_cardname_short(struct qeth_card *card)
{
109
	if (IS_VM_NIC(card)) {
F
Frank Blaschka 已提交
110
		switch (card->info.type) {
111
		case QETH_CARD_TYPE_OSD:
112
			return "Virt.NIC QDIO";
F
Frank Blaschka 已提交
113
		case QETH_CARD_TYPE_IQD:
114
			return "Virt.NIC Hiper";
115
		case QETH_CARD_TYPE_OSM:
116
			return "Virt.NIC OSM";
117
		case QETH_CARD_TYPE_OSX:
118
			return "Virt.NIC OSX";
F
Frank Blaschka 已提交
119 120 121 122 123
		default:
			return "unknown";
		}
	} else {
		switch (card->info.type) {
124
		case QETH_CARD_TYPE_OSD:
F
Frank Blaschka 已提交
125 126 127 128 129 130 131 132 133
			switch (card->info.link_type) {
			case QETH_LINK_TYPE_FAST_ETH:
				return "OSD_100";
			case QETH_LINK_TYPE_HSTR:
				return "HSTR";
			case QETH_LINK_TYPE_GBIT_ETH:
				return "OSD_1000";
			case QETH_LINK_TYPE_10GBIT_ETH:
				return "OSD_10GIG";
134 135
			case QETH_LINK_TYPE_25GBIT_ETH:
				return "OSD_25GIG";
F
Frank Blaschka 已提交
136 137 138 139 140 141 142 143 144 145 146 147 148
			case QETH_LINK_TYPE_LANE_ETH100:
				return "OSD_FE_LANE";
			case QETH_LINK_TYPE_LANE_TR:
				return "OSD_TR_LANE";
			case QETH_LINK_TYPE_LANE_ETH1000:
				return "OSD_GbE_LANE";
			case QETH_LINK_TYPE_LANE:
				return "OSD_ATM_LANE";
			default:
				return "OSD_Express";
			}
		case QETH_CARD_TYPE_IQD:
			return "HiperSockets";
149 150 151 152
		case QETH_CARD_TYPE_OSM:
			return "OSM_1000";
		case QETH_CARD_TYPE_OSX:
			return "OSX_10GIG";
F
Frank Blaschka 已提交
153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185
		default:
			return "unknown";
		}
	}
	return "n/a";
}

void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
			 int clear_start_mask)
{
	unsigned long flags;

	spin_lock_irqsave(&card->thread_mask_lock, flags);
	card->thread_allowed_mask = threads;
	if (clear_start_mask)
		card->thread_start_mask &= threads;
	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
	wake_up(&card->wait_q);
}
EXPORT_SYMBOL_GPL(qeth_set_allowed_threads);

int qeth_threads_running(struct qeth_card *card, unsigned long threads)
{
	unsigned long flags;
	int rc = 0;

	spin_lock_irqsave(&card->thread_mask_lock, flags);
	rc = (card->thread_running_mask & threads);
	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
	return rc;
}
EXPORT_SYMBOL_GPL(qeth_threads_running);

186
static void qeth_clear_working_pool_list(struct qeth_card *card)
F
Frank Blaschka 已提交
187 188
{
	struct qeth_buffer_pool_entry *pool_entry, *tmp;
189 190
	struct qeth_qdio_q *queue = card->qdio.in_q;
	unsigned int i;
F
Frank Blaschka 已提交
191

C
Carsten Otte 已提交
192
	QETH_CARD_TEXT(card, 5, "clwrklst");
F
Frank Blaschka 已提交
193
	list_for_each_entry_safe(pool_entry, tmp,
194 195
				 &card->qdio.in_buf_pool.entry_list, list)
		list_del(&pool_entry->list);
196

197 198 199
	if (!queue)
		return;

200 201
	for (i = 0; i < ARRAY_SIZE(queue->bufs); i++)
		queue->bufs[i].pool_entry = NULL;
F
Frank Blaschka 已提交
202 203
}

204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
static void qeth_free_pool_entry(struct qeth_buffer_pool_entry *entry)
{
	unsigned int i;

	for (i = 0; i < ARRAY_SIZE(entry->elements); i++) {
		if (entry->elements[i])
			__free_page(entry->elements[i]);
	}

	kfree(entry);
}

static void qeth_free_buffer_pool(struct qeth_card *card)
{
	struct qeth_buffer_pool_entry *entry, *tmp;

	list_for_each_entry_safe(entry, tmp, &card->qdio.init_pool.entry_list,
				 init_list) {
		list_del(&entry->init_list);
		qeth_free_pool_entry(entry);
	}
}

static struct qeth_buffer_pool_entry *qeth_alloc_pool_entry(unsigned int pages)
{
	struct qeth_buffer_pool_entry *entry;
	unsigned int i;

	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
	if (!entry)
		return NULL;

	for (i = 0; i < pages; i++) {
237
		entry->elements[i] = __dev_alloc_page(GFP_KERNEL);
238 239 240 241 242 243 244 245 246 247

		if (!entry->elements[i]) {
			qeth_free_pool_entry(entry);
			return NULL;
		}
	}

	return entry;
}

F
Frank Blaschka 已提交
248 249
static int qeth_alloc_buffer_pool(struct qeth_card *card)
{
250 251
	unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
	unsigned int i;
F
Frank Blaschka 已提交
252

C
Carsten Otte 已提交
253
	QETH_CARD_TEXT(card, 5, "alocpool");
F
Frank Blaschka 已提交
254
	for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
255 256 257 258
		struct qeth_buffer_pool_entry *entry;

		entry = qeth_alloc_pool_entry(buf_elements);
		if (!entry) {
F
Frank Blaschka 已提交
259 260 261
			qeth_free_buffer_pool(card);
			return -ENOMEM;
		}
262

263
		list_add(&entry->init_list, &card->qdio.init_pool.entry_list);
F
Frank Blaschka 已提交
264 265 266 267
	}
	return 0;
}

268
int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count)
F
Frank Blaschka 已提交
269
{
270 271 272 273 274 275
	unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
	struct qeth_qdio_buffer_pool *pool = &card->qdio.init_pool;
	struct qeth_buffer_pool_entry *entry, *tmp;
	int delta = count - pool->buf_count;
	LIST_HEAD(entries);

C
Carsten Otte 已提交
276
	QETH_CARD_TEXT(card, 2, "realcbp");
F
Frank Blaschka 已提交
277

278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316
	/* Defer until queue is allocated: */
	if (!card->qdio.in_q)
		goto out;

	/* Remove entries from the pool: */
	while (delta < 0) {
		entry = list_first_entry(&pool->entry_list,
					 struct qeth_buffer_pool_entry,
					 init_list);
		list_del(&entry->init_list);
		qeth_free_pool_entry(entry);

		delta++;
	}

	/* Allocate additional entries: */
	while (delta > 0) {
		entry = qeth_alloc_pool_entry(buf_elements);
		if (!entry) {
			list_for_each_entry_safe(entry, tmp, &entries,
						 init_list) {
				list_del(&entry->init_list);
				qeth_free_pool_entry(entry);
			}

			return -ENOMEM;
		}

		list_add(&entry->init_list, &entries);

		delta--;
	}

	list_splice(&entries, &pool->entry_list);

out:
	card->qdio.in_buf_pool.buf_count = count;
	pool->buf_count = count;
	return 0;
F
Frank Blaschka 已提交
317
}
318
EXPORT_SYMBOL_GPL(qeth_resize_buffer_pool);
F
Frank Blaschka 已提交
319

S
Sebastian Ott 已提交
320 321
static void qeth_free_qdio_queue(struct qeth_qdio_q *q)
{
322 323 324 325
	if (!q)
		return;

	qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
S
Sebastian Ott 已提交
326 327 328 329 330 331 332 333 334 335 336
	kfree(q);
}

static struct qeth_qdio_q *qeth_alloc_qdio_queue(void)
{
	struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
	int i;

	if (!q)
		return NULL;

337 338 339 340 341
	if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
		kfree(q);
		return NULL;
	}

S
Sebastian Ott 已提交
342
	for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
343
		q->bufs[i].buffer = q->qdio_bufs[i];
S
Sebastian Ott 已提交
344 345 346 347 348

	QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *));
	return q;
}

J
Julian Wiedmann 已提交
349
static int qeth_cq_init(struct qeth_card *card)
350 351 352 353
{
	int rc;

	if (card->options.cq == QETH_CQ_ENABLED) {
354
		QETH_CARD_TEXT(card, 2, "cqinit");
355 356
		qdio_reset_buffers(card->qdio.c_q->qdio_bufs,
				   QDIO_MAX_BUFFERS_PER_Q);
357
		card->qdio.c_q->next_buf_to_init = 127;
358 359
		rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 1, 0, 127,
			     NULL);
360
		if (rc) {
361
			QETH_CARD_TEXT_(card, 2, "1err%d", rc);
362 363 364 365 366 367 368 369
			goto out;
		}
	}
	rc = 0;
out:
	return rc;
}

J
Julian Wiedmann 已提交
370
static int qeth_alloc_cq(struct qeth_card *card)
371 372
{
	if (card->options.cq == QETH_CQ_ENABLED) {
373
		QETH_CARD_TEXT(card, 2, "cqon");
S
Sebastian Ott 已提交
374
		card->qdio.c_q = qeth_alloc_qdio_queue();
375
		if (!card->qdio.c_q) {
376 377
			dev_err(&card->gdev->dev, "Failed to create completion queue\n");
			return -ENOMEM;
378 379
		}
	} else {
380
		QETH_CARD_TEXT(card, 2, "nocq");
381 382
		card->qdio.c_q = NULL;
	}
383
	return 0;
384 385
}

J
Julian Wiedmann 已提交
386
static void qeth_free_cq(struct qeth_card *card)
387 388
{
	if (card->qdio.c_q) {
S
Sebastian Ott 已提交
389
		qeth_free_qdio_queue(card->qdio.c_q);
390 391 392 393
		card->qdio.c_q = NULL;
	}
}

J
Julian Wiedmann 已提交
394 395 396
static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
							int delayed)
{
397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418
	enum iucv_tx_notify n;

	switch (sbalf15) {
	case 0:
		n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK;
		break;
	case 4:
	case 16:
	case 17:
	case 18:
		n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE :
			TX_NOTIFY_UNREACHABLE;
		break;
	default:
		n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR :
			TX_NOTIFY_GENERALERROR;
		break;
	}

	return n;
}

J
Julian Wiedmann 已提交
419 420 421 422 423 424 425
static void qeth_put_cmd(struct qeth_cmd_buffer *iob)
{
	if (refcount_dec_and_test(&iob->ref_count)) {
		kfree(iob->data);
		kfree(iob);
	}
}
426 427
static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
			   void *data)
428 429
{
	ccw->cmd_code = cmd_code;
430
	ccw->flags = flags | CCW_FLAG_SLI;
431 432 433 434
	ccw->count = len;
	ccw->cda = (__u32) __pa(data);
}

435
static int __qeth_issue_next_read(struct qeth_card *card)
F
Frank Blaschka 已提交
436
{
437 438 439
	struct qeth_cmd_buffer *iob = card->read_cmd;
	struct qeth_channel *channel = iob->channel;
	struct ccw1 *ccw = __ccw_from_cmd(iob);
440
	int rc;
F
Frank Blaschka 已提交
441

C
Carsten Otte 已提交
442
	QETH_CARD_TEXT(card, 5, "issnxrd");
443
	if (channel->state != CH_STATE_UP)
F
Frank Blaschka 已提交
444
		return -EIO;
445

446 447
	memset(iob->data, 0, iob->length);
	qeth_setup_ccw(ccw, CCW_CMD_READ, 0, iob->length, iob->data);
448
	iob->callback = qeth_issue_next_read_cb;
449 450 451
	/* keep the cmd alive after completion: */
	qeth_get_cmd(iob);

C
Carsten Otte 已提交
452
	QETH_CARD_TEXT(card, 6, "noirqpnd");
453
	rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0);
454 455 456
	if (!rc) {
		channel->active_cmd = iob;
	} else {
457 458
		QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
				 rc, CARD_DEVID(card));
459
		qeth_unlock_channel(card, channel);
460
		qeth_put_cmd(iob);
461
		card->read_or_write_problem = 1;
F
Frank Blaschka 已提交
462 463 464 465 466
		qeth_schedule_recovery(card);
	}
	return rc;
}

467 468 469 470 471 472 473 474 475 476 477
static int qeth_issue_next_read(struct qeth_card *card)
{
	int ret;

	spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
	ret = __qeth_issue_next_read(card);
	spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));

	return ret;
}

478 479
static void qeth_enqueue_cmd(struct qeth_card *card,
			     struct qeth_cmd_buffer *iob)
480 481
{
	spin_lock_irq(&card->lock);
482
	list_add_tail(&iob->list_entry, &card->cmd_waiter_list);
483 484 485
	spin_unlock_irq(&card->lock);
}

486 487
static void qeth_dequeue_cmd(struct qeth_card *card,
			     struct qeth_cmd_buffer *iob)
488 489
{
	spin_lock_irq(&card->lock);
490
	list_del(&iob->list_entry);
491 492 493
	spin_unlock_irq(&card->lock);
}

J
Julian Wiedmann 已提交
494
static void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason)
495
{
496 497
	iob->rc = reason;
	complete(&iob->done);
498 499
}

500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527
static void qeth_flush_local_addrs4(struct qeth_card *card)
{
	struct qeth_local_addr *addr;
	struct hlist_node *tmp;
	unsigned int i;

	spin_lock_irq(&card->local_addrs4_lock);
	hash_for_each_safe(card->local_addrs4, i, tmp, addr, hnode) {
		hash_del_rcu(&addr->hnode);
		kfree_rcu(addr, rcu);
	}
	spin_unlock_irq(&card->local_addrs4_lock);
}

static void qeth_flush_local_addrs6(struct qeth_card *card)
{
	struct qeth_local_addr *addr;
	struct hlist_node *tmp;
	unsigned int i;

	spin_lock_irq(&card->local_addrs6_lock);
	hash_for_each_safe(card->local_addrs6, i, tmp, addr, hnode) {
		hash_del_rcu(&addr->hnode);
		kfree_rcu(addr, rcu);
	}
	spin_unlock_irq(&card->local_addrs6_lock);
}

528
static void qeth_flush_local_addrs(struct qeth_card *card)
529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679
{
	qeth_flush_local_addrs4(card);
	qeth_flush_local_addrs6(card);
}

static void qeth_add_local_addrs4(struct qeth_card *card,
				  struct qeth_ipacmd_local_addrs4 *cmd)
{
	unsigned int i;

	if (cmd->addr_length !=
	    sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
		dev_err_ratelimited(&card->gdev->dev,
				    "Dropped IPv4 ADD LOCAL ADDR event with bad length %u\n",
				    cmd->addr_length);
		return;
	}

	spin_lock(&card->local_addrs4_lock);
	for (i = 0; i < cmd->count; i++) {
		unsigned int key = ipv4_addr_hash(cmd->addrs[i].addr);
		struct qeth_local_addr *addr;
		bool duplicate = false;

		hash_for_each_possible(card->local_addrs4, addr, hnode, key) {
			if (addr->addr.s6_addr32[3] == cmd->addrs[i].addr) {
				duplicate = true;
				break;
			}
		}

		if (duplicate)
			continue;

		addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
		if (!addr) {
			dev_err(&card->gdev->dev,
				"Failed to allocate local addr object. Traffic to %pI4 might suffer.\n",
				&cmd->addrs[i].addr);
			continue;
		}

		ipv6_addr_set(&addr->addr, 0, 0, 0, cmd->addrs[i].addr);
		hash_add_rcu(card->local_addrs4, &addr->hnode, key);
	}
	spin_unlock(&card->local_addrs4_lock);
}

static void qeth_add_local_addrs6(struct qeth_card *card,
				  struct qeth_ipacmd_local_addrs6 *cmd)
{
	unsigned int i;

	if (cmd->addr_length !=
	    sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
		dev_err_ratelimited(&card->gdev->dev,
				    "Dropped IPv6 ADD LOCAL ADDR event with bad length %u\n",
				    cmd->addr_length);
		return;
	}

	spin_lock(&card->local_addrs6_lock);
	for (i = 0; i < cmd->count; i++) {
		u32 key = ipv6_addr_hash(&cmd->addrs[i].addr);
		struct qeth_local_addr *addr;
		bool duplicate = false;

		hash_for_each_possible(card->local_addrs6, addr, hnode, key) {
			if (ipv6_addr_equal(&addr->addr, &cmd->addrs[i].addr)) {
				duplicate = true;
				break;
			}
		}

		if (duplicate)
			continue;

		addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
		if (!addr) {
			dev_err(&card->gdev->dev,
				"Failed to allocate local addr object. Traffic to %pI6c might suffer.\n",
				&cmd->addrs[i].addr);
			continue;
		}

		addr->addr = cmd->addrs[i].addr;
		hash_add_rcu(card->local_addrs6, &addr->hnode, key);
	}
	spin_unlock(&card->local_addrs6_lock);
}

static void qeth_del_local_addrs4(struct qeth_card *card,
				  struct qeth_ipacmd_local_addrs4 *cmd)
{
	unsigned int i;

	if (cmd->addr_length !=
	    sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
		dev_err_ratelimited(&card->gdev->dev,
				    "Dropped IPv4 DEL LOCAL ADDR event with bad length %u\n",
				    cmd->addr_length);
		return;
	}

	spin_lock(&card->local_addrs4_lock);
	for (i = 0; i < cmd->count; i++) {
		struct qeth_ipacmd_local_addr4 *addr = &cmd->addrs[i];
		unsigned int key = ipv4_addr_hash(addr->addr);
		struct qeth_local_addr *tmp;

		hash_for_each_possible(card->local_addrs4, tmp, hnode, key) {
			if (tmp->addr.s6_addr32[3] == addr->addr) {
				hash_del_rcu(&tmp->hnode);
				kfree_rcu(tmp, rcu);
				break;
			}
		}
	}
	spin_unlock(&card->local_addrs4_lock);
}

static void qeth_del_local_addrs6(struct qeth_card *card,
				  struct qeth_ipacmd_local_addrs6 *cmd)
{
	unsigned int i;

	if (cmd->addr_length !=
	    sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
		dev_err_ratelimited(&card->gdev->dev,
				    "Dropped IPv6 DEL LOCAL ADDR event with bad length %u\n",
				    cmd->addr_length);
		return;
	}

	spin_lock(&card->local_addrs6_lock);
	for (i = 0; i < cmd->count; i++) {
		struct qeth_ipacmd_local_addr6 *addr = &cmd->addrs[i];
		u32 key = ipv6_addr_hash(&addr->addr);
		struct qeth_local_addr *tmp;

		hash_for_each_possible(card->local_addrs6, tmp, hnode, key) {
			if (ipv6_addr_equal(&tmp->addr, &addr->addr)) {
				hash_del_rcu(&tmp->hnode);
				kfree_rcu(tmp, rcu);
				break;
			}
		}
	}
	spin_unlock(&card->local_addrs6_lock);
}

680 681 682 683 684 685 686 687 688 689 690 691
static bool qeth_next_hop_is_local_v4(struct qeth_card *card,
				      struct sk_buff *skb)
{
	struct qeth_local_addr *tmp;
	bool is_local = false;
	unsigned int key;
	__be32 next_hop;

	if (hash_empty(card->local_addrs4))
		return false;

	rcu_read_lock();
692 693
	next_hop = qeth_next_hop_v4_rcu(skb,
					qeth_dst_check_rcu(skb, htons(ETH_P_IP)));
694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718
	key = ipv4_addr_hash(next_hop);

	hash_for_each_possible_rcu(card->local_addrs4, tmp, hnode, key) {
		if (tmp->addr.s6_addr32[3] == next_hop) {
			is_local = true;
			break;
		}
	}
	rcu_read_unlock();

	return is_local;
}

static bool qeth_next_hop_is_local_v6(struct qeth_card *card,
				      struct sk_buff *skb)
{
	struct qeth_local_addr *tmp;
	struct in6_addr *next_hop;
	bool is_local = false;
	u32 key;

	if (hash_empty(card->local_addrs6))
		return false;

	rcu_read_lock();
719 720
	next_hop = qeth_next_hop_v6_rcu(skb,
					qeth_dst_check_rcu(skb, htons(ETH_P_IPV6)));
721 722 723 724 725 726 727 728 729 730 731 732 733
	key = ipv6_addr_hash(next_hop);

	hash_for_each_possible_rcu(card->local_addrs6, tmp, hnode, key) {
		if (ipv6_addr_equal(&tmp->addr, next_hop)) {
			is_local = true;
			break;
		}
	}
	rcu_read_unlock();

	return is_local;
}

734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751
static int qeth_debugfs_local_addr_show(struct seq_file *m, void *v)
{
	struct qeth_card *card = m->private;
	struct qeth_local_addr *tmp;
	unsigned int i;

	rcu_read_lock();
	hash_for_each_rcu(card->local_addrs4, i, tmp, hnode)
		seq_printf(m, "%pI4\n", &tmp->addr.s6_addr32[3]);
	hash_for_each_rcu(card->local_addrs6, i, tmp, hnode)
		seq_printf(m, "%pI6c\n", &tmp->addr);
	rcu_read_unlock();

	return 0;
}

DEFINE_SHOW_ATTRIBUTE(qeth_debugfs_local_addr);

752
static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
F
Frank Blaschka 已提交
753 754
		struct qeth_card *card)
{
755
	const char *ipa_name;
756
	int com = cmd->hdr.command;
757

F
Frank Blaschka 已提交
758
	ipa_name = qeth_get_ipa_cmd_name(com);
759

760
	if (rc)
761 762 763
		QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n",
				 ipa_name, com, CARD_DEVID(card), rc,
				 qeth_get_ipa_msg(rc));
764
	else
765 766
		QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n",
				 ipa_name, com, CARD_DEVID(card));
F
Frank Blaschka 已提交
767 768 769
}

static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
770
						struct qeth_ipa_cmd *cmd)
F
Frank Blaschka 已提交
771
{
C
Carsten Otte 已提交
772
	QETH_CARD_TEXT(card, 5, "chkipad");
773 774

	if (IS_IPA_REPLY(cmd)) {
J
Julian Wiedmann 已提交
775
		if (cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
776 777 778 779 780 781 782 783 784
			qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
		return cmd;
	}

	/* handle unsolicited event: */
	switch (cmd->hdr.command) {
	case IPA_CMD_STOPLAN:
		if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) {
			dev_err(&card->gdev->dev,
785
				"Adjacent port of interface %s is no longer in reflective relay mode, trigger recovery\n",
J
Julian Wiedmann 已提交
786
				netdev_name(card->dev));
787 788
			/* Set offline, then probably fail to set online: */
			qeth_schedule_recovery(card);
F
Frank Blaschka 已提交
789
		} else {
790
			/* stay online for subsequent STARTLAN */
791 792
			dev_warn(&card->gdev->dev,
				 "The link for interface %s on CHPID 0x%X failed\n",
J
Julian Wiedmann 已提交
793
				 netdev_name(card->dev), card->info.chpid);
794
			qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
795
			netif_carrier_off(card->dev);
F
Frank Blaschka 已提交
796
		}
797 798 799 800
		return NULL;
	case IPA_CMD_STARTLAN:
		dev_info(&card->gdev->dev,
			 "The link for %s on CHPID 0x%X has been restored\n",
J
Julian Wiedmann 已提交
801
			 netdev_name(card->dev), card->info.chpid);
802 803 804 805 806 807 808 809 810 811 812
		if (card->info.hwtrap)
			card->info.hwtrap = 2;
		qeth_schedule_recovery(card);
		return NULL;
	case IPA_CMD_SETBRIDGEPORT_IQD:
	case IPA_CMD_SETBRIDGEPORT_OSA:
	case IPA_CMD_ADDRESS_CHANGE_NOTIF:
		if (card->discipline->control_event_handler(card, cmd))
			return cmd;
		return NULL;
	case IPA_CMD_REGISTER_LOCAL_ADDR:
813 814 815 816 817
		if (cmd->hdr.prot_version == QETH_PROT_IPV4)
			qeth_add_local_addrs4(card, &cmd->data.local_addrs4);
		else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
			qeth_add_local_addrs6(card, &cmd->data.local_addrs6);

818 819 820
		QETH_CARD_TEXT(card, 3, "irla");
		return NULL;
	case IPA_CMD_UNREGISTER_LOCAL_ADDR:
821 822 823 824 825
		if (cmd->hdr.prot_version == QETH_PROT_IPV4)
			qeth_del_local_addrs4(card, &cmd->data.local_addrs4);
		else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
			qeth_del_local_addrs6(card, &cmd->data.local_addrs6);

826 827 828 829 830
		QETH_CARD_TEXT(card, 3, "urla");
		return NULL;
	default:
		QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n");
		return cmd;
F
Frank Blaschka 已提交
831 832 833
	}
}

834
static void qeth_clear_ipacmd_list(struct qeth_card *card)
F
Frank Blaschka 已提交
835
{
836
	struct qeth_cmd_buffer *iob;
F
Frank Blaschka 已提交
837 838
	unsigned long flags;

C
Carsten Otte 已提交
839
	QETH_CARD_TEXT(card, 4, "clipalst");
F
Frank Blaschka 已提交
840 841

	spin_lock_irqsave(&card->lock, flags);
842
	list_for_each_entry(iob, &card->cmd_waiter_list, list_entry)
843
		qeth_notify_cmd(iob, -ECANCELED);
F
Frank Blaschka 已提交
844 845 846
	spin_unlock_irqrestore(&card->lock, flags);
}

847 848
static int qeth_check_idx_response(struct qeth_card *card,
	unsigned char *buffer)
F
Frank Blaschka 已提交
849
{
850
	QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
851
	if ((buffer[2] & QETH_IDX_TERMINATE_MASK) == QETH_IDX_TERMINATE) {
852
		QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
853
				 buffer[4]);
C
Carsten Otte 已提交
854 855
		QETH_CARD_TEXT(card, 2, "ckidxres");
		QETH_CARD_TEXT(card, 2, " idxterm");
856 857 858
		QETH_CARD_TEXT_(card, 2, "rc%x", buffer[4]);
		if (buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT ||
		    buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM) {
859
			dev_err(&card->gdev->dev,
860 861
				"The device does not support the configured transport mode\n");
			return -EPROTONOSUPPORT;
862
		}
F
Frank Blaschka 已提交
863 864 865 866 867
		return -EIO;
	}
	return 0;
}

868
static void qeth_release_buffer_cb(struct qeth_card *card,
869 870
				   struct qeth_cmd_buffer *iob,
				   unsigned int data_length)
871
{
872
	qeth_put_cmd(iob);
873 874
}

875 876
static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc)
{
877
	qeth_notify_cmd(iob, rc);
878
	qeth_put_cmd(iob);
F
Frank Blaschka 已提交
879 880
}

J
Julian Wiedmann 已提交
881 882 883
static struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
					      unsigned int length,
					      unsigned int ccws, long timeout)
884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900
{
	struct qeth_cmd_buffer *iob;

	if (length > QETH_BUFSIZE)
		return NULL;

	iob = kzalloc(sizeof(*iob), GFP_KERNEL);
	if (!iob)
		return NULL;

	iob->data = kzalloc(ALIGN(length, 8) + ccws * sizeof(struct ccw1),
			    GFP_KERNEL | GFP_DMA);
	if (!iob->data) {
		kfree(iob);
		return NULL;
	}

901 902
	init_completion(&iob->done);
	spin_lock_init(&iob->lock);
903
	refcount_set(&iob->ref_count, 1);
904 905 906 907 908 909
	iob->channel = channel;
	iob->timeout = timeout;
	iob->length = length;
	return iob;
}

910
static void qeth_issue_next_read_cb(struct qeth_card *card,
911 912
				    struct qeth_cmd_buffer *iob,
				    unsigned int data_length)
F
Frank Blaschka 已提交
913
{
914
	struct qeth_cmd_buffer *request = NULL;
915
	struct qeth_ipa_cmd *cmd = NULL;
916
	struct qeth_reply *reply = NULL;
917
	struct qeth_cmd_buffer *tmp;
F
Frank Blaschka 已提交
918
	unsigned long flags;
919
	int rc = 0;
F
Frank Blaschka 已提交
920

C
Carsten Otte 已提交
921
	QETH_CARD_TEXT(card, 4, "sndctlcb");
922 923 924 925 926 927
	rc = qeth_check_idx_response(card, iob->data);
	switch (rc) {
	case 0:
		break;
	case -EIO:
		qeth_schedule_recovery(card);
928
		fallthrough;
929
	default:
930
		qeth_clear_ipacmd_list(card);
931
		goto err_idx;
F
Frank Blaschka 已提交
932 933
	}

934 935
	cmd = __ipa_reply(iob);
	if (cmd) {
936
		cmd = qeth_check_ipa_data(card, cmd);
937 938
		if (!cmd)
			goto out;
F
Frank Blaschka 已提交
939 940
	}

941
	/* match against pending cmd requests */
F
Frank Blaschka 已提交
942
	spin_lock_irqsave(&card->lock, flags);
943
	list_for_each_entry(tmp, &card->cmd_waiter_list, list_entry) {
944
		if (tmp->match && tmp->match(tmp, iob)) {
945
			request = tmp;
946
			/* take the object outside the lock */
947
			qeth_get_cmd(request);
948
			break;
F
Frank Blaschka 已提交
949 950 951
		}
	}
	spin_unlock_irqrestore(&card->lock, flags);
952

953
	if (!request)
954 955
		goto out;

956
	reply = &request->reply;
957 958
	if (!reply->callback) {
		rc = 0;
959 960 961
		goto no_callback;
	}

962 963
	spin_lock_irqsave(&request->lock, flags);
	if (request->rc)
964
		/* Bail out when the requestor has already left: */
965
		rc = request->rc;
966 967 968
	else
		rc = reply->callback(card, reply, cmd ? (unsigned long)cmd :
							(unsigned long)iob);
969
	spin_unlock_irqrestore(&request->lock, flags);
970

971
no_callback:
972
	if (rc <= 0)
973 974
		qeth_notify_cmd(request, rc);
	qeth_put_cmd(request);
F
Frank Blaschka 已提交
975 976 977 978
out:
	memcpy(&card->seqno.pdu_hdr_ack,
		QETH_PDU_HEADER_SEQ_NO(iob->data),
		QETH_SEQ_NO_LENGTH);
979
	__qeth_issue_next_read(card);
980 981
err_idx:
	qeth_put_cmd(iob);
F
Frank Blaschka 已提交
982 983 984 985 986 987
}

static int qeth_set_thread_start_bit(struct qeth_card *card,
		unsigned long thread)
{
	unsigned long flags;
988
	int rc = 0;
F
Frank Blaschka 已提交
989 990

	spin_lock_irqsave(&card->thread_mask_lock, flags);
991 992 993 994 995 996
	if (!(card->thread_allowed_mask & thread))
		rc = -EPERM;
	else if (card->thread_start_mask & thread)
		rc = -EBUSY;
	else
		card->thread_start_mask |= thread;
F
Frank Blaschka 已提交
997
	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
998 999

	return rc;
F
Frank Blaschka 已提交
1000 1001
}

1002 1003
static void qeth_clear_thread_start_bit(struct qeth_card *card,
					unsigned long thread)
F
Frank Blaschka 已提交
1004 1005 1006 1007 1008 1009 1010 1011 1012
{
	unsigned long flags;

	spin_lock_irqsave(&card->thread_mask_lock, flags);
	card->thread_start_mask &= ~thread;
	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
	wake_up(&card->wait_q);
}

1013 1014
static void qeth_clear_thread_running_bit(struct qeth_card *card,
					  unsigned long thread)
F
Frank Blaschka 已提交
1015 1016 1017 1018 1019 1020
{
	unsigned long flags;

	spin_lock_irqsave(&card->thread_mask_lock, flags);
	card->thread_running_mask &= ~thread;
	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1021
	wake_up_all(&card->wait_q);
F
Frank Blaschka 已提交
1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
}

static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
{
	unsigned long flags;
	int rc = 0;

	spin_lock_irqsave(&card->thread_mask_lock, flags);
	if (card->thread_start_mask & thread) {
		if ((card->thread_allowed_mask & thread) &&
		    !(card->thread_running_mask & thread)) {
			rc = 1;
			card->thread_start_mask &= ~thread;
			card->thread_running_mask |= thread;
		} else
			rc = -EPERM;
	}
	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
	return rc;
}

1043
static int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
F
Frank Blaschka 已提交
1044 1045 1046 1047 1048 1049 1050 1051
{
	int rc = 0;

	wait_event(card->wait_q,
		   (rc = __qeth_do_run_thread(card, thread)) >= 0);
	return rc;
}

1052
int qeth_schedule_recovery(struct qeth_card *card)
F
Frank Blaschka 已提交
1053
{
1054 1055
	int rc;

C
Carsten Otte 已提交
1056
	QETH_CARD_TEXT(card, 2, "startrec");
1057 1058 1059

	rc = qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD);
	if (!rc)
F
Frank Blaschka 已提交
1060
		schedule_work(&card->kernel_thread_starter);
1061 1062

	return rc;
F
Frank Blaschka 已提交
1063 1064
}

1065 1066
static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
			    struct irb *irb)
F
Frank Blaschka 已提交
1067 1068 1069 1070 1071
{
	int dstat, cstat;
	char *sense;

	sense = (char *) irb->ecw;
1072 1073
	cstat = irb->scsw.cmd.cstat;
	dstat = irb->scsw.cmd.dstat;
F
Frank Blaschka 已提交
1074 1075 1076 1077

	if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
		     SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
		     SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
C
Carsten Otte 已提交
1078
		QETH_CARD_TEXT(card, 2, "CGENCHK");
1079 1080
		dev_warn(&cdev->dev, "The qeth device driver "
			"failed to recover an error on the device\n");
1081 1082
		QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n",
				 CCW_DEVID(cdev), dstat, cstat);
F
Frank Blaschka 已提交
1083 1084
		print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
				16, 1, irb, 64, 1);
1085
		return -EIO;
F
Frank Blaschka 已提交
1086 1087 1088 1089 1090
	}

	if (dstat & DEV_STAT_UNIT_CHECK) {
		if (sense[SENSE_RESETTING_EVENT_BYTE] &
		    SENSE_RESETTING_EVENT_FLAG) {
C
Carsten Otte 已提交
1091
			QETH_CARD_TEXT(card, 2, "REVIND");
1092
			return -EIO;
F
Frank Blaschka 已提交
1093 1094 1095
		}
		if (sense[SENSE_COMMAND_REJECT_BYTE] &
		    SENSE_COMMAND_REJECT_FLAG) {
C
Carsten Otte 已提交
1096
			QETH_CARD_TEXT(card, 2, "CMDREJi");
1097
			return -EIO;
F
Frank Blaschka 已提交
1098 1099
		}
		if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
C
Carsten Otte 已提交
1100
			QETH_CARD_TEXT(card, 2, "AFFE");
1101
			return -EIO;
F
Frank Blaschka 已提交
1102 1103
		}
		if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
C
Carsten Otte 已提交
1104
			QETH_CARD_TEXT(card, 2, "ZEROSEN");
F
Frank Blaschka 已提交
1105 1106
			return 0;
		}
C
Carsten Otte 已提交
1107
		QETH_CARD_TEXT(card, 2, "DGENCHK");
1108
		return -EIO;
F
Frank Blaschka 已提交
1109 1110 1111 1112
	}
	return 0;
}

1113
static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev,
1114
				struct irb *irb)
F
Frank Blaschka 已提交
1115
{
1116
	if (!IS_ERR(irb))
F
Frank Blaschka 已提交
1117 1118 1119 1120
		return 0;

	switch (PTR_ERR(irb)) {
	case -EIO:
1121 1122
		QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n",
				 CCW_DEVID(cdev));
C
Carsten Otte 已提交
1123 1124
		QETH_CARD_TEXT(card, 2, "ckirberr");
		QETH_CARD_TEXT_(card, 2, "  rc%d", -EIO);
1125
		return -EIO;
F
Frank Blaschka 已提交
1126
	case -ETIMEDOUT:
1127 1128
		dev_warn(&cdev->dev, "A hardware operation timed out"
			" on the device\n");
C
Carsten Otte 已提交
1129 1130
		QETH_CARD_TEXT(card, 2, "ckirberr");
		QETH_CARD_TEXT_(card, 2, "  rc%d", -ETIMEDOUT);
1131
		return -ETIMEDOUT;
F
Frank Blaschka 已提交
1132
	default:
1133 1134
		QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
				 PTR_ERR(irb), CCW_DEVID(cdev));
C
Carsten Otte 已提交
1135 1136
		QETH_CARD_TEXT(card, 2, "ckirberr");
		QETH_CARD_TEXT(card, 2, "  rc???");
1137
		return PTR_ERR(irb);
F
Frank Blaschka 已提交
1138 1139 1140 1141 1142 1143 1144 1145
	}
}

static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
		struct irb *irb)
{
	int rc;
	int cstat, dstat;
1146
	struct qeth_cmd_buffer *iob = NULL;
1147
	struct ccwgroup_device *gdev;
F
Frank Blaschka 已提交
1148 1149 1150
	struct qeth_channel *channel;
	struct qeth_card *card;

1151 1152 1153
	/* while we hold the ccwdev lock, this stays valid: */
	gdev = dev_get_drvdata(&cdev->dev);
	card = dev_get_drvdata(&gdev->dev);
F
Frank Blaschka 已提交
1154

C
Carsten Otte 已提交
1155 1156
	QETH_CARD_TEXT(card, 5, "irq");

F
Frank Blaschka 已提交
1157 1158
	if (card->read.ccwdev == cdev) {
		channel = &card->read;
C
Carsten Otte 已提交
1159
		QETH_CARD_TEXT(card, 5, "read");
F
Frank Blaschka 已提交
1160 1161
	} else if (card->write.ccwdev == cdev) {
		channel = &card->write;
C
Carsten Otte 已提交
1162
		QETH_CARD_TEXT(card, 5, "write");
F
Frank Blaschka 已提交
1163 1164
	} else {
		channel = &card->data;
C
Carsten Otte 已提交
1165
		QETH_CARD_TEXT(card, 5, "data");
F
Frank Blaschka 已提交
1166
	}
1167

1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181
	if (intparm == 0) {
		QETH_CARD_TEXT(card, 5, "irqunsol");
	} else if ((addr_t)intparm != (addr_t)channel->active_cmd) {
		QETH_CARD_TEXT(card, 5, "irqunexp");

		dev_err(&cdev->dev,
			"Received IRQ with intparm %lx, expected %px\n",
			intparm, channel->active_cmd);
		if (channel->active_cmd)
			qeth_cancel_cmd(channel->active_cmd, -EIO);
	} else {
		iob = (struct qeth_cmd_buffer *) (addr_t)intparm;
	}

1182
	qeth_unlock_channel(card, channel);
1183

1184
	rc = qeth_check_irb_error(card, cdev, irb);
1185
	if (rc) {
1186 1187
		/* IO was terminated, free its resources. */
		if (iob)
1188
			qeth_cancel_cmd(iob, rc);
1189 1190 1191
		return;
	}

1192
	if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
F
Frank Blaschka 已提交
1193
		channel->state = CH_STATE_STOPPED;
1194 1195
		wake_up(&card->wait_q);
	}
F
Frank Blaschka 已提交
1196

1197
	if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
F
Frank Blaschka 已提交
1198
		channel->state = CH_STATE_HALTED;
1199 1200
		wake_up(&card->wait_q);
	}
F
Frank Blaschka 已提交
1201

1202 1203 1204 1205
	if (iob && (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC |
					  SCSW_FCTL_HALT_FUNC))) {
		qeth_cancel_cmd(iob, -ECANCELED);
		iob = NULL;
F
Frank Blaschka 已提交
1206
	}
1207 1208 1209 1210

	cstat = irb->scsw.cmd.cstat;
	dstat = irb->scsw.cmd.dstat;

F
Frank Blaschka 已提交
1211 1212 1213 1214
	if ((dstat & DEV_STAT_UNIT_EXCEP) ||
	    (dstat & DEV_STAT_UNIT_CHECK) ||
	    (cstat)) {
		if (irb->esw.esw0.erw.cons) {
1215 1216 1217
			dev_warn(&channel->ccwdev->dev,
				"The qeth device driver failed to recover "
				"an error on the device\n");
1218 1219 1220
			QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n",
					 CCW_DEVID(channel->ccwdev), cstat,
					 dstat);
F
Frank Blaschka 已提交
1221 1222 1223 1224 1225
			print_hex_dump(KERN_WARNING, "qeth: irb ",
				DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
			print_hex_dump(KERN_WARNING, "qeth: sense data ",
				DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1);
		}
1226

1227
		rc = qeth_get_problem(card, cdev, irb);
F
Frank Blaschka 已提交
1228
		if (rc) {
1229
			card->read_or_write_problem = 1;
1230
			if (iob)
1231
				qeth_cancel_cmd(iob, rc);
1232
			qeth_clear_ipacmd_list(card);
F
Frank Blaschka 已提交
1233
			qeth_schedule_recovery(card);
1234
			return;
F
Frank Blaschka 已提交
1235 1236 1237
		}
	}

1238 1239 1240 1241
	if (iob) {
		/* sanity check: */
		if (irb->scsw.cmd.count > iob->length) {
			qeth_cancel_cmd(iob, -EIO);
1242
			return;
1243 1244 1245 1246 1247
		}
		if (iob->callback)
			iob->callback(card, iob,
				      iob->length - irb->scsw.cmd.count);
	}
F
Frank Blaschka 已提交
1248 1249
}

1250
static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
1251
		struct qeth_qdio_out_buffer *buf,
1252
		enum iucv_tx_notify notification)
F
Frank Blaschka 已提交
1253 1254 1255
{
	struct sk_buff *skb;

1256
	skb_queue_walk(&buf->skb_list, skb) {
1257 1258
		struct sock *sk = skb->sk;

1259 1260
		QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
		QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
1261 1262
		if (sk && sk->sk_family == PF_IUCV)
			iucv_sk(sk)->sk_txnotify(sk, notification);
1263 1264 1265
	}
}

1266 1267
static void qeth_tx_complete_buf(struct qeth_qdio_out_q *queue,
				 struct qeth_qdio_out_buffer *buf, bool error,
1268
				 int budget)
1269
{
1270 1271
	struct sk_buff *skb;

1272 1273 1274 1275 1276 1277
	/* Empty buffer? */
	if (buf->next_element_to_fill == 0)
		return;

	QETH_TXQ_STAT_INC(queue, bufs);
	QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill);
1278 1279 1280 1281 1282 1283 1284
	if (error) {
		QETH_TXQ_STAT_ADD(queue, tx_errors, buf->frames);
	} else {
		QETH_TXQ_STAT_ADD(queue, tx_packets, buf->frames);
		QETH_TXQ_STAT_ADD(queue, tx_bytes, buf->bytes);
	}

1285 1286 1287 1288 1289 1290
	while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) {
		unsigned int bytes = qdisc_pkt_len(skb);
		bool is_tso = skb_is_gso(skb);
		unsigned int packets;

		packets = is_tso ? skb_shinfo(skb)->gso_segs : 1;
1291
		if (!error) {
1292 1293 1294 1295 1296 1297 1298 1299 1300
			if (skb->ip_summed == CHECKSUM_PARTIAL)
				QETH_TXQ_STAT_ADD(queue, skbs_csum, packets);
			if (skb_is_nonlinear(skb))
				QETH_TXQ_STAT_INC(queue, skbs_sg);
			if (is_tso) {
				QETH_TXQ_STAT_INC(queue, skbs_tso);
				QETH_TXQ_STAT_ADD(queue, tso_bytes, bytes);
			}
		}
1301

1302
		napi_consume_skb(skb, budget);
1303
	}
1304 1305 1306
}

static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
1307
				     struct qeth_qdio_out_buffer *buf,
1308
				     bool error, int budget)
1309 1310 1311 1312
{
	int i;

	/* is PCI flag set on buffer? */
1313
	if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ) {
1314
		atomic_dec(&queue->set_pci_flags_count);
1315 1316
		QETH_TXQ_STAT_INC(queue, completion_irq);
	}
1317

1318
	qeth_tx_complete_buf(queue, buf, error, budget);
1319

1320
	for (i = 0; i < queue->max_elements; ++i) {
1321 1322
		void *data = phys_to_virt(buf->buffer->element[i].addr);

1323
		if (__test_and_clear_bit(i, buf->from_kmem_cache) && data)
1324
			kmem_cache_free(qeth_core_header_cache, data);
F
Frank Blaschka 已提交
1325
	}
1326

1327
	qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements);
F
Frank Blaschka 已提交
1328
	buf->next_element_to_fill = 0;
1329
	buf->frames = 0;
1330
	buf->bytes = 0;
1331
	atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
1332 1333
}

1334 1335 1336
static void qeth_free_out_buf(struct qeth_qdio_out_buffer *buf)
{
	if (buf->aob)
1337
		kmem_cache_free(qeth_qaob_cache, buf->aob);
1338 1339 1340
	kmem_cache_free(qeth_qdio_outbuf_cache, buf);
}

1341 1342
static void qeth_tx_complete_pending_bufs(struct qeth_card *card,
					  struct qeth_qdio_out_q *queue,
1343
					  bool drain, int budget)
1344 1345 1346 1347
{
	struct qeth_qdio_out_buffer *buf, *tmp;

	list_for_each_entry_safe(buf, tmp, &queue->pending_bufs, list_entry) {
1348
		struct qeth_qaob_priv1 *priv;
1349 1350 1351 1352
		struct qaob *aob = buf->aob;
		enum iucv_tx_notify notify;
		unsigned int i;

1353 1354
		priv = (struct qeth_qaob_priv1 *)&aob->user1;
		if (drain || READ_ONCE(priv->state) == QETH_QAOB_DONE) {
1355 1356 1357
			QETH_CARD_TEXT(card, 5, "fp");
			QETH_CARD_TEXT_(card, 5, "%lx", (long) buf);

1358 1359 1360
			notify = drain ? TX_NOTIFY_GENERALERROR :
					 qeth_compute_cq_notification(aob->aorc, 1);
			qeth_notify_skbs(queue, buf, notify);
1361
			qeth_tx_complete_buf(queue, buf, drain, budget);
1362

1363 1364 1365 1366 1367
			for (i = 0;
			     i < aob->sb_count && i < queue->max_elements;
			     i++) {
				void *data = phys_to_virt(aob->sba[i]);

1368
				if (test_bit(i, buf->from_kmem_cache) && data)
1369 1370 1371 1372
					kmem_cache_free(qeth_core_header_cache,
							data);
			}

1373
			list_del(&buf->list_entry);
1374
			qeth_free_out_buf(buf);
1375 1376 1377 1378
		}
	}
}

1379
static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
1380 1381 1382
{
	int j;

1383
	qeth_tx_complete_pending_bufs(q->card, q, true, 0);
1384

1385 1386 1387
	for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
		if (!q->bufs[j])
			continue;
1388

1389
		qeth_clear_output_buffer(q, q->bufs[j], true, 0);
1390
		if (free) {
1391
			qeth_free_out_buf(q->bufs[j]);
1392 1393 1394
			q->bufs[j] = NULL;
		}
	}
F
Frank Blaschka 已提交
1395 1396
}

1397
static void qeth_drain_output_queues(struct qeth_card *card)
F
Frank Blaschka 已提交
1398
{
1399
	int i;
F
Frank Blaschka 已提交
1400

C
Carsten Otte 已提交
1401
	QETH_CARD_TEXT(card, 2, "clearqdbf");
F
Frank Blaschka 已提交
1402
	/* clear outbound buffers to free skbs */
1403
	for (i = 0; i < card->qdio.no_out_queues; ++i) {
1404 1405
		if (card->qdio.out_qs[i])
			qeth_drain_output_queue(card->qdio.out_qs[i], false);
1406
	}
F
Frank Blaschka 已提交
1407 1408
}

1409
static void qeth_osa_set_output_queues(struct qeth_card *card, bool single)
1410
{
1411
	unsigned int max = single ? 1 : card->dev->num_tx_queues;
1412

1413
	if (card->qdio.no_out_queues == max)
1414
		return;
1415

1416
	if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
1417
		qeth_free_qdio_queues(card);
1418

1419
	if (max == 1 && card->qdio.do_prio_queueing != QETH_PRIOQ_DEFAULT)
1420 1421
		dev_info(&card->gdev->dev, "Priority Queueing not supported\n");

1422
	card->qdio.no_out_queues = max;
1423 1424
}

1425
static int qeth_update_from_chp_desc(struct qeth_card *card)
F
Frank Blaschka 已提交
1426 1427
{
	struct ccw_device *ccwdev;
1428
	struct channel_path_desc_fmt0 *chp_dsc;
F
Frank Blaschka 已提交
1429

1430
	QETH_CARD_TEXT(card, 2, "chp_desc");
F
Frank Blaschka 已提交
1431 1432

	ccwdev = card->data.ccwdev;
1433 1434
	chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
	if (!chp_dsc)
1435
		return -ENOMEM;
1436 1437 1438

	card->info.func_level = 0x4100 + chp_dsc->desc;

1439 1440
	if (IS_OSD(card) || IS_OSX(card))
		/* CHPP field bit 6 == 1 -> single queue */
1441
		qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
1442

1443
	kfree(chp_dsc);
1444 1445
	QETH_CARD_TEXT_(card, 2, "nr:%x", card->qdio.no_out_queues);
	QETH_CARD_TEXT_(card, 2, "lvl:%02x", card->info.func_level);
1446
	return 0;
F
Frank Blaschka 已提交
1447 1448 1449 1450
}

static void qeth_init_qdio_info(struct qeth_card *card)
{
1451
	QETH_CARD_TEXT(card, 4, "intqdinf");
F
Frank Blaschka 已提交
1452
	atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
1453 1454 1455
	card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
	card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;

F
Frank Blaschka 已提交
1456 1457
	/* inbound */
	card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1458
	if (IS_IQD(card))
1459 1460 1461
		card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
	else
		card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
F
Frank Blaschka 已提交
1462 1463 1464 1465 1466
	card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
	INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
	INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
}

1467
static void qeth_set_initial_options(struct qeth_card *card)
F
Frank Blaschka 已提交
1468 1469 1470
{
	card->options.route4.type = NO_ROUTER;
	card->options.route6.type = NO_ROUTER;
E
Einar Lueck 已提交
1471
	card->options.isolation = ISOLATION_MODE_NONE;
1472
	card->options.cq = QETH_CQ_DISABLED;
1473
	card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
F
Frank Blaschka 已提交
1474 1475 1476 1477 1478 1479 1480 1481
}

static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
{
	unsigned long flags;
	int rc = 0;

	spin_lock_irqsave(&card->thread_mask_lock, flags);
C
Carsten Otte 已提交
1482
	QETH_CARD_TEXT_(card, 4, "  %02x%02x%02x",
F
Frank Blaschka 已提交
1483 1484 1485 1486 1487 1488 1489 1490
			(u8) card->thread_start_mask,
			(u8) card->thread_allowed_mask,
			(u8) card->thread_running_mask);
	rc = (card->thread_start_mask & thread);
	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
	return rc;
}

1491
static int qeth_do_reset(void *data);
F
Frank Blaschka 已提交
1492 1493
static void qeth_start_kernel_thread(struct work_struct *work)
{
1494
	struct task_struct *ts;
F
Frank Blaschka 已提交
1495 1496
	struct qeth_card *card = container_of(work, struct qeth_card,
					kernel_thread_starter);
1497
	QETH_CARD_TEXT(card, 2, "strthrd");
F
Frank Blaschka 已提交
1498 1499 1500 1501

	if (card->read.state != CH_STATE_UP &&
	    card->write.state != CH_STATE_UP)
		return;
1502
	if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
1503
		ts = kthread_run(qeth_do_reset, card, "qeth_recover");
1504 1505 1506 1507 1508 1509
		if (IS_ERR(ts)) {
			qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
			qeth_clear_thread_running_bit(card,
				QETH_RECOVER_THREAD);
		}
	}
F
Frank Blaschka 已提交
1510 1511
}

1512
static void qeth_buffer_reclaim_work(struct work_struct *);
1513
static void qeth_setup_card(struct qeth_card *card)
F
Frank Blaschka 已提交
1514
{
1515
	QETH_CARD_TEXT(card, 2, "setupcrd");
F
Frank Blaschka 已提交
1516

1517
	card->info.type = CARD_RDEV(card)->id.driver_info;
F
Frank Blaschka 已提交
1518 1519 1520
	card->state = CARD_STATE_DOWN;
	spin_lock_init(&card->lock);
	spin_lock_init(&card->thread_mask_lock);
1521
	mutex_init(&card->conf_mutex);
1522
	mutex_init(&card->discipline_mutex);
F
Frank Blaschka 已提交
1523 1524 1525
	INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
	INIT_LIST_HEAD(&card->cmd_waiter_list);
	init_waitqueue_head(&card->wait_q);
1526
	qeth_set_initial_options(card);
F
Frank Blaschka 已提交
1527 1528 1529
	/* IP address takeover */
	INIT_LIST_HEAD(&card->ipato.entries);
	qeth_init_qdio_info(card);
1530
	INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
1531
	hash_init(card->rx_mode_addrs);
1532 1533 1534 1535
	hash_init(card->local_addrs4);
	hash_init(card->local_addrs6);
	spin_lock_init(&card->local_addrs4_lock);
	spin_lock_init(&card->local_addrs6_lock);
F
Frank Blaschka 已提交
1536 1537
}

1538 1539 1540 1541
static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
{
	struct qeth_card *card = container_of(slr, struct qeth_card,
					qeth_service_level);
1542 1543 1544
	if (card->info.mcl_level[0])
		seq_printf(m, "qeth: %s firmware level %s\n",
			CARD_BUS_ID(card), card->info.mcl_level);
1545 1546
}

1547
static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
F
Frank Blaschka 已提交
1548 1549 1550
{
	struct qeth_card *card;

1551
	QETH_DBF_TEXT(SETUP, 2, "alloccrd");
1552
	card = kzalloc(sizeof(*card), GFP_KERNEL);
F
Frank Blaschka 已提交
1553
	if (!card)
1554
		goto out;
1555
	QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1556 1557

	card->gdev = gdev;
1558
	dev_set_drvdata(&gdev->dev, card);
1559 1560 1561
	CARD_RDEV(card) = gdev->cdev[0];
	CARD_WDEV(card) = gdev->cdev[1];
	CARD_DDEV(card) = gdev->cdev[2];
1562

1563 1564
	card->event_wq = alloc_ordered_workqueue("%s_event", 0,
						 dev_name(&gdev->dev));
1565 1566
	if (!card->event_wq)
		goto out_wq;
1567 1568 1569 1570

	card->read_cmd = qeth_alloc_cmd(&card->read, QETH_BUFSIZE, 1, 0);
	if (!card->read_cmd)
		goto out_read_cmd;
1571

1572 1573 1574 1575 1576
	card->debugfs = debugfs_create_dir(dev_name(&gdev->dev),
					   qeth_debugfs_root);
	debugfs_create_file("local_addrs", 0400, card->debugfs, card,
			    &qeth_debugfs_local_addr_fops);

1577 1578
	card->qeth_service_level.seq_print = qeth_core_sl_print;
	register_service_level(&card->qeth_service_level);
F
Frank Blaschka 已提交
1579
	return card;
1580

1581
out_read_cmd:
1582 1583
	destroy_workqueue(card->event_wq);
out_wq:
1584
	dev_set_drvdata(&gdev->dev, NULL);
1585 1586 1587
	kfree(card);
out:
	return NULL;
F
Frank Blaschka 已提交
1588 1589
}

1590 1591
static int qeth_clear_channel(struct qeth_card *card,
			      struct qeth_channel *channel)
F
Frank Blaschka 已提交
1592 1593 1594
{
	int rc;

C
Carsten Otte 已提交
1595
	QETH_CARD_TEXT(card, 3, "clearch");
J
Julian Wiedmann 已提交
1596
	spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1597
	rc = ccw_device_clear(channel->ccwdev, (addr_t)channel->active_cmd);
J
Julian Wiedmann 已提交
1598
	spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
F
Frank Blaschka 已提交
1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611

	if (rc)
		return rc;
	rc = wait_event_interruptible_timeout(card->wait_q,
			channel->state == CH_STATE_STOPPED, QETH_TIMEOUT);
	if (rc == -ERESTARTSYS)
		return rc;
	if (channel->state != CH_STATE_STOPPED)
		return -ETIME;
	channel->state = CH_STATE_DOWN;
	return 0;
}

1612 1613
static int qeth_halt_channel(struct qeth_card *card,
			     struct qeth_channel *channel)
F
Frank Blaschka 已提交
1614 1615 1616
{
	int rc;

C
Carsten Otte 已提交
1617
	QETH_CARD_TEXT(card, 3, "haltch");
J
Julian Wiedmann 已提交
1618
	spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1619
	rc = ccw_device_halt(channel->ccwdev, (addr_t)channel->active_cmd);
J
Julian Wiedmann 已提交
1620
	spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
F
Frank Blaschka 已提交
1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632

	if (rc)
		return rc;
	rc = wait_event_interruptible_timeout(card->wait_q,
			channel->state == CH_STATE_HALTED, QETH_TIMEOUT);
	if (rc == -ERESTARTSYS)
		return rc;
	if (channel->state != CH_STATE_HALTED)
		return -ETIME;
	return 0;
}

1633
static int qeth_stop_channel(struct qeth_channel *channel)
1634 1635 1636 1637 1638 1639 1640
{
	struct ccw_device *cdev = channel->ccwdev;
	int rc;

	rc = ccw_device_set_offline(cdev);

	spin_lock_irq(get_ccwdev_lock(cdev));
1641
	if (channel->active_cmd)
1642 1643
		dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n",
			channel->active_cmd);
1644

1645
	cdev->handler = NULL;
1646 1647 1648 1649 1650
	spin_unlock_irq(get_ccwdev_lock(cdev));

	return rc;
}

1651 1652 1653 1654 1655 1656
static int qeth_start_channel(struct qeth_channel *channel)
{
	struct ccw_device *cdev = channel->ccwdev;
	int rc;

	channel->state = CH_STATE_DOWN;
1657
	xchg(&channel->active_cmd, NULL);
1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675

	spin_lock_irq(get_ccwdev_lock(cdev));
	cdev->handler = qeth_irq;
	spin_unlock_irq(get_ccwdev_lock(cdev));

	rc = ccw_device_set_online(cdev);
	if (rc)
		goto err;

	return 0;

err:
	spin_lock_irq(get_ccwdev_lock(cdev));
	cdev->handler = NULL;
	spin_unlock_irq(get_ccwdev_lock(cdev));
	return rc;
}

F
Frank Blaschka 已提交
1676 1677 1678 1679
static int qeth_halt_channels(struct qeth_card *card)
{
	int rc1 = 0, rc2 = 0, rc3 = 0;

C
Carsten Otte 已提交
1680
	QETH_CARD_TEXT(card, 3, "haltchs");
1681 1682 1683
	rc1 = qeth_halt_channel(card, &card->read);
	rc2 = qeth_halt_channel(card, &card->write);
	rc3 = qeth_halt_channel(card, &card->data);
F
Frank Blaschka 已提交
1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694
	if (rc1)
		return rc1;
	if (rc2)
		return rc2;
	return rc3;
}

static int qeth_clear_channels(struct qeth_card *card)
{
	int rc1 = 0, rc2 = 0, rc3 = 0;

C
Carsten Otte 已提交
1695
	QETH_CARD_TEXT(card, 3, "clearchs");
1696 1697 1698
	rc1 = qeth_clear_channel(card, &card->read);
	rc2 = qeth_clear_channel(card, &card->write);
	rc3 = qeth_clear_channel(card, &card->data);
F
Frank Blaschka 已提交
1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709
	if (rc1)
		return rc1;
	if (rc2)
		return rc2;
	return rc3;
}

static int qeth_clear_halt_card(struct qeth_card *card, int halt)
{
	int rc = 0;

C
Carsten Otte 已提交
1710
	QETH_CARD_TEXT(card, 3, "clhacrd");
F
Frank Blaschka 已提交
1711 1712 1713 1714 1715 1716 1717 1718

	if (halt)
		rc = qeth_halt_channels(card);
	if (rc)
		return rc;
	return qeth_clear_channels(card);
}

1719
static int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
F
Frank Blaschka 已提交
1720 1721 1722
{
	int rc = 0;

C
Carsten Otte 已提交
1723
	QETH_CARD_TEXT(card, 3, "qdioclr");
F
Frank Blaschka 已提交
1724 1725 1726
	switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
		QETH_QDIO_CLEANING)) {
	case QETH_QDIO_ESTABLISHED:
1727
		if (IS_IQD(card))
J
Jan Glauber 已提交
1728
			rc = qdio_shutdown(CARD_DDEV(card),
F
Frank Blaschka 已提交
1729 1730
				QDIO_FLAG_CLEANUP_USING_HALT);
		else
J
Jan Glauber 已提交
1731
			rc = qdio_shutdown(CARD_DDEV(card),
F
Frank Blaschka 已提交
1732 1733
				QDIO_FLAG_CLEANUP_USING_CLEAR);
		if (rc)
C
Carsten Otte 已提交
1734
			QETH_CARD_TEXT_(card, 3, "1err%d", rc);
F
Frank Blaschka 已提交
1735 1736 1737 1738 1739 1740 1741 1742 1743
		atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
		break;
	case QETH_QDIO_CLEANING:
		return rc;
	default:
		break;
	}
	rc = qeth_clear_halt_card(card, use_halt);
	if (rc)
C
Carsten Otte 已提交
1744
		QETH_CARD_TEXT_(card, 3, "2err%d", rc);
F
Frank Blaschka 已提交
1745 1746 1747
	return rc;
}

1748 1749 1750 1751 1752 1753 1754 1755 1756
static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
{
	enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
	struct diag26c_vnic_resp *response = NULL;
	struct diag26c_vnic_req *request = NULL;
	struct ccw_dev_id id;
	char userid[80];
	int rc = 0;

1757
	QETH_CARD_TEXT(card, 2, "vmlayer");
1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799

	cpcmd("QUERY USERID", userid, sizeof(userid), &rc);
	if (rc)
		goto out;

	request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
	response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
	if (!request || !response) {
		rc = -ENOMEM;
		goto out;
	}

	ccw_device_get_id(CARD_RDEV(card), &id);
	request->resp_buf_len = sizeof(*response);
	request->resp_version = DIAG26C_VERSION6_VM65918;
	request->req_format = DIAG26C_VNIC_INFO;
	ASCEBC(userid, 8);
	memcpy(&request->sys_name, userid, 8);
	request->devno = id.devno;

	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
	rc = diag26c(request, response, DIAG26C_PORT_VNIC);
	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
	if (rc)
		goto out;
	QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));

	if (request->resp_buf_len < sizeof(*response) ||
	    response->version != request->resp_version) {
		rc = -EIO;
		goto out;
	}

	if (response->protocol == VNIC_INFO_PROT_L2)
		disc = QETH_DISCIPLINE_LAYER2;
	else if (response->protocol == VNIC_INFO_PROT_L3)
		disc = QETH_DISCIPLINE_LAYER3;

out:
	kfree(response);
	kfree(request);
	if (rc)
1800
		QETH_CARD_TEXT_(card, 2, "err%x", rc);
1801 1802 1803
	return disc;
}

1804 1805 1806
/* Determine whether the device requires a specific layer discipline */
static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
{
1807 1808
	enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;

J
Julian Wiedmann 已提交
1809
	if (IS_OSM(card))
1810
		disc = QETH_DISCIPLINE_LAYER2;
1811 1812 1813
	else if (IS_VM_NIC(card))
		disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
				      qeth_vm_detect_layer(card);
1814 1815 1816

	switch (disc) {
	case QETH_DISCIPLINE_LAYER2:
1817
		QETH_CARD_TEXT(card, 3, "force l2");
1818 1819
		break;
	case QETH_DISCIPLINE_LAYER3:
1820
		QETH_CARD_TEXT(card, 3, "force l3");
1821 1822
		break;
	default:
1823
		QETH_CARD_TEXT(card, 3, "force no");
1824 1825
	}

1826
	return disc;
1827 1828
}

1829
static void qeth_set_blkt_defaults(struct qeth_card *card)
1830
{
1831
	QETH_CARD_TEXT(card, 2, "cfgblkt");
1832

1833
	if (card->info.use_v1_blkt) {
1834 1835 1836
		card->info.blkt.time_total = 0;
		card->info.blkt.inter_packet = 0;
		card->info.blkt.inter_packet_jumbo = 0;
1837 1838 1839 1840
	} else {
		card->info.blkt.time_total = 250;
		card->info.blkt.inter_packet = 5;
		card->info.blkt.inter_packet_jumbo = 15;
1841
	}
F
Frank Blaschka 已提交
1842 1843
}

1844
static void qeth_idx_init(struct qeth_card *card)
F
Frank Blaschka 已提交
1845
{
1846 1847
	memset(&card->seqno, 0, sizeof(card->seqno));

F
Frank Blaschka 已提交
1848 1849 1850 1851 1852 1853
	card->token.issuer_rm_w = 0x00010103UL;
	card->token.cm_filter_w = 0x00010108UL;
	card->token.cm_connection_w = 0x0001010aUL;
	card->token.ulp_filter_w = 0x0001010bUL;
	card->token.ulp_connection_w = 0x0001010dUL;

1854 1855
	switch (card->info.type) {
	case QETH_CARD_TYPE_IQD:
1856
		card->info.func_level =	QETH_IDX_FUNC_LEVEL_IQD;
1857 1858 1859 1860 1861 1862
		break;
	case QETH_CARD_TYPE_OSD:
		card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
		break;
	default:
		break;
F
Frank Blaschka 已提交
1863 1864 1865
	}
}

1866
static void qeth_idx_finalize_cmd(struct qeth_card *card,
1867
				  struct qeth_cmd_buffer *iob)
1868 1869 1870 1871 1872 1873 1874
{
	memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr,
	       QETH_SEQ_NO_LENGTH);
	if (iob->channel == &card->write)
		card->seqno.trans_hdr++;
}

F
Frank Blaschka 已提交
1875 1876 1877 1878 1879 1880 1881 1882 1883
static int qeth_peer_func_level(int level)
{
	if ((level & 0xff) == 8)
		return (level & 0xff) + 0x400;
	if (((level >> 8) & 3) == 1)
		return (level & 0xff) + 0x200;
	return level;
}

1884
static void qeth_mpc_finalize_cmd(struct qeth_card *card,
1885
				  struct qeth_cmd_buffer *iob)
F
Frank Blaschka 已提交
1886
{
1887
	qeth_idx_finalize_cmd(card, iob);
F
Frank Blaschka 已提交
1888 1889 1890 1891 1892 1893

	memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
	       &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
	card->seqno.pdu_hdr++;
	memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
	       &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
1894 1895

	iob->callback = qeth_release_buffer_cb;
F
Frank Blaschka 已提交
1896 1897
}

1898 1899 1900 1901 1902 1903 1904
static bool qeth_mpc_match_reply(struct qeth_cmd_buffer *iob,
				 struct qeth_cmd_buffer *reply)
{
	/* MPC cmds are issued strictly in sequence. */
	return !IS_IPA(reply->data);
}

1905
static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card,
1906
						  const void *data,
1907
						  unsigned int data_length)
1908 1909 1910
{
	struct qeth_cmd_buffer *iob;

1911 1912 1913 1914 1915 1916 1917 1918
	iob = qeth_alloc_cmd(&card->write, data_length, 1, QETH_TIMEOUT);
	if (!iob)
		return NULL;

	memcpy(iob->data, data, data_length);
	qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length,
		       iob->data);
	iob->finalize = qeth_mpc_finalize_cmd;
1919
	iob->match = qeth_mpc_match_reply;
1920 1921 1922
	return iob;
}

E
Eugene Crosser 已提交
1923 1924 1925 1926 1927
/**
 * qeth_send_control_data() -	send control command to the card
 * @card:			qeth_card structure pointer
 * @iob:			qeth_cmd_buffer pointer
 * @reply_cb:			callback function pointer
1928 1929 1930
 *  cb_card:			pointer to the qeth_card structure
 *  cb_reply:			pointer to the qeth_reply structure
 *  cb_cmd:			pointer to the original iob for non-IPA
E
Eugene Crosser 已提交
1931 1932 1933 1934 1935 1936
 *				commands, or to the qeth_ipa_cmd structure
 *				for the IPA commands.
 * @reply_param:		private pointer passed to the callback
 *
 * Callback function gets called one or more times, with cb_cmd
 * pointing to the response returned by the hardware. Callback
1937 1938 1939 1940 1941
 * function must return
 *   > 0 if more reply blocks are expected,
 *     0 if the last or only reply block is received, and
 *   < 0 on error.
 * Callback function can get the value of the reply_param pointer from the
E
Eugene Crosser 已提交
1942 1943 1944
 * field 'param' of the structure qeth_reply.
 */

1945
static int qeth_send_control_data(struct qeth_card *card,
1946 1947 1948 1949 1950
				  struct qeth_cmd_buffer *iob,
				  int (*reply_cb)(struct qeth_card *cb_card,
						  struct qeth_reply *cb_reply,
						  unsigned long cb_cmd),
				  void *reply_param)
F
Frank Blaschka 已提交
1951
{
1952
	struct qeth_channel *channel = iob->channel;
1953
	struct qeth_reply *reply = &iob->reply;
1954
	long timeout = iob->timeout;
F
Frank Blaschka 已提交
1955 1956
	int rc;

C
Carsten Otte 已提交
1957
	QETH_CARD_TEXT(card, 2, "sendctl");
F
Frank Blaschka 已提交
1958 1959 1960

	reply->callback = reply_cb;
	reply->param = reply_param;
1961

1962
	timeout = wait_event_interruptible_timeout(card->wait_q,
1963
						   qeth_trylock_channel(channel, iob),
1964 1965
						   timeout);
	if (timeout <= 0) {
1966
		qeth_put_cmd(iob);
1967 1968
		return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
	}
F
Frank Blaschka 已提交
1969

1970
	if (iob->finalize)
1971 1972
		iob->finalize(card, iob);
	QETH_DBF_HEX(CTRL, 2, iob->data, min(iob->length, QETH_DBF_CTRL_LEN));
1973

1974
	qeth_enqueue_cmd(card, iob);
1975

1976 1977 1978
	/* This pairs with iob->callback, and keeps the iob alive after IO: */
	qeth_get_cmd(iob);

C
Carsten Otte 已提交
1979
	QETH_CARD_TEXT(card, 6, "noirqpnd");
J
Julian Wiedmann 已提交
1980
	spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1981
	rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob),
1982
				      (addr_t) iob, 0, 0, timeout);
J
Julian Wiedmann 已提交
1983
	spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
F
Frank Blaschka 已提交
1984
	if (rc) {
1985 1986
		QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
				 CARD_DEVID(card), rc);
C
Carsten Otte 已提交
1987
		QETH_CARD_TEXT_(card, 2, " err%d", rc);
1988
		qeth_dequeue_cmd(card, iob);
1989
		qeth_put_cmd(iob);
1990
		qeth_unlock_channel(card, channel);
1991
		goto out;
F
Frank Blaschka 已提交
1992
	}
1993

1994
	timeout = wait_for_completion_interruptible_timeout(&iob->done,
1995 1996 1997
							    timeout);
	if (timeout <= 0)
		rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
1998

1999
	qeth_dequeue_cmd(card, iob);
2000 2001 2002

	if (reply_cb) {
		/* Wait until the callback for a late reply has completed: */
2003
		spin_lock_irq(&iob->lock);
2004 2005
		if (rc)
			/* Zap any callback that's still pending: */
2006 2007
			iob->rc = rc;
		spin_unlock_irq(&iob->lock);
2008 2009
	}

2010
	if (!rc)
2011
		rc = iob->rc;
2012 2013 2014

out:
	qeth_put_cmd(iob);
2015
	return rc;
F
Frank Blaschka 已提交
2016 2017
}

2018 2019 2020 2021 2022 2023
struct qeth_node_desc {
	struct node_descriptor nd1;
	struct node_descriptor nd2;
	struct node_descriptor nd3;
};

2024
static void qeth_read_conf_data_cb(struct qeth_card *card,
2025 2026
				   struct qeth_cmd_buffer *iob,
				   unsigned int data_length)
2027
{
2028
	struct qeth_node_desc *nd = (struct qeth_node_desc *) iob->data;
2029
	int rc = 0;
2030
	u8 *tag;
2031 2032

	QETH_CARD_TEXT(card, 2, "cfgunit");
2033 2034 2035 2036 2037 2038

	if (data_length < sizeof(*nd)) {
		rc = -EINVAL;
		goto out;
	}

2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051
	card->info.is_vm_nic = nd->nd1.plant[0] == _ascebc['V'] &&
			       nd->nd1.plant[1] == _ascebc['M'];
	tag = (u8 *)&nd->nd1.tag;
	card->info.chpid = tag[0];
	card->info.unit_addr2 = tag[1];

	tag = (u8 *)&nd->nd2.tag;
	card->info.cula = tag[1];

	card->info.use_v1_blkt = nd->nd3.model[0] == 0xF0 &&
				 nd->nd3.model[1] == 0xF0 &&
				 nd->nd3.model[2] >= 0xF1 &&
				 nd->nd3.model[2] <= 0xF4;
2052

2053
out:
2054
	qeth_notify_cmd(iob, rc);
2055
	qeth_put_cmd(iob);
2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067
}

static int qeth_read_conf_data(struct qeth_card *card)
{
	struct qeth_channel *channel = &card->data;
	struct qeth_cmd_buffer *iob;
	struct ciw *ciw;

	/* scan for RCD command in extended SenseID data */
	ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
	if (!ciw || ciw->cmd == 0)
		return -EOPNOTSUPP;
2068 2069
	if (ciw->count < sizeof(struct qeth_node_desc))
		return -EINVAL;
2070 2071 2072 2073 2074 2075 2076 2077 2078

	iob = qeth_alloc_cmd(channel, ciw->count, 1, QETH_RCD_TIMEOUT);
	if (!iob)
		return -ENOMEM;

	iob->callback = qeth_read_conf_data_cb;
	qeth_setup_ccw(__ccw_from_cmd(iob), ciw->cmd, 0, iob->length,
		       iob->data);

2079
	return qeth_send_control_data(card, iob, NULL, NULL);
2080 2081
}

2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095
static int qeth_idx_check_activate_response(struct qeth_card *card,
					    struct qeth_channel *channel,
					    struct qeth_cmd_buffer *iob)
{
	int rc;

	rc = qeth_check_idx_response(card, iob->data);
	if (rc)
		return rc;

	if (QETH_IS_IDX_ACT_POS_REPLY(iob->data))
		return 0;

	/* negative reply: */
2096 2097
	QETH_CARD_TEXT_(card, 2, "idxneg%c",
			QETH_IDX_ACT_CAUSE_CODE(iob->data));
2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115

	switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
	case QETH_IDX_ACT_ERR_EXCL:
		dev_err(&channel->ccwdev->dev,
			"The adapter is used exclusively by another host\n");
		return -EBUSY;
	case QETH_IDX_ACT_ERR_AUTH:
	case QETH_IDX_ACT_ERR_AUTH_USER:
		dev_err(&channel->ccwdev->dev,
			"Setting the device online failed because of insufficient authorization\n");
		return -EPERM;
	default:
		QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
				 CCW_DEVID(channel->ccwdev));
		return -EIO;
	}
}

2116
static void qeth_idx_activate_read_channel_cb(struct qeth_card *card,
2117 2118
					      struct qeth_cmd_buffer *iob,
					      unsigned int data_length)
2119
{
2120
	struct qeth_channel *channel = iob->channel;
2121 2122 2123
	u16 peer_level;
	int rc;

2124
	QETH_CARD_TEXT(card, 2, "idxrdcb");
2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145

	rc = qeth_idx_check_activate_response(card, channel, iob);
	if (rc)
		goto out;

	memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
	if (peer_level != qeth_peer_func_level(card->info.func_level)) {
		QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
				 CCW_DEVID(channel->ccwdev),
				 card->info.func_level, peer_level);
		rc = -EINVAL;
		goto out;
	}

	memcpy(&card->token.issuer_rm_r,
	       QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
	       QETH_MPC_TOKEN_LENGTH);
	memcpy(&card->info.mcl_level[0],
	       QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);

out:
2146
	qeth_notify_cmd(iob, rc);
2147
	qeth_put_cmd(iob);
2148 2149
}

2150
static void qeth_idx_activate_write_channel_cb(struct qeth_card *card,
2151 2152
					       struct qeth_cmd_buffer *iob,
					       unsigned int data_length)
2153
{
2154
	struct qeth_channel *channel = iob->channel;
2155 2156 2157
	u16 peer_level;
	int rc;

2158
	QETH_CARD_TEXT(card, 2, "idxwrcb");
2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173

	rc = qeth_idx_check_activate_response(card, channel, iob);
	if (rc)
		goto out;

	memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
	if ((peer_level & ~0x0100) !=
	    qeth_peer_func_level(card->info.func_level)) {
		QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
				 CCW_DEVID(channel->ccwdev),
				 card->info.func_level, peer_level);
		rc = -EINVAL;
	}

out:
2174
	qeth_notify_cmd(iob, rc);
2175
	qeth_put_cmd(iob);
2176 2177 2178 2179 2180 2181 2182
}

static void qeth_idx_setup_activate_cmd(struct qeth_card *card,
					struct qeth_cmd_buffer *iob)
{
	u16 addr = (card->info.cula << 8) + card->info.unit_addr2;
	u8 port = ((u8)card->dev->dev_port) | 0x80;
2183
	struct ccw1 *ccw = __ccw_from_cmd(iob);
2184

2185 2186 2187
	qeth_setup_ccw(&ccw[0], CCW_CMD_WRITE, CCW_FLAG_CC, IDX_ACTIVATE_SIZE,
		       iob->data);
	qeth_setup_ccw(&ccw[1], CCW_CMD_READ, 0, iob->length, iob->data);
2188 2189
	iob->finalize = qeth_idx_finalize_cmd;

2190
	port |= QETH_IDX_ACT_INVAL_FRAME;
2191 2192 2193 2194 2195
	memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1);
	memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
	       &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
	       &card->info.func_level, 2);
2196
	memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &card->info.ddev_devno, 2);
2197 2198 2199 2200 2201 2202 2203 2204 2205
	memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2);
}

static int qeth_idx_activate_read_channel(struct qeth_card *card)
{
	struct qeth_channel *channel = &card->read;
	struct qeth_cmd_buffer *iob;
	int rc;

2206
	QETH_CARD_TEXT(card, 2, "idxread");
2207

2208
	iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2209 2210 2211 2212 2213
	if (!iob)
		return -ENOMEM;

	memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
	qeth_idx_setup_activate_cmd(card, iob);
2214
	iob->callback = qeth_idx_activate_read_channel_cb;
2215

2216
	rc = qeth_send_control_data(card, iob, NULL, NULL);
2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229
	if (rc)
		return rc;

	channel->state = CH_STATE_UP;
	return 0;
}

static int qeth_idx_activate_write_channel(struct qeth_card *card)
{
	struct qeth_channel *channel = &card->write;
	struct qeth_cmd_buffer *iob;
	int rc;

2230
	QETH_CARD_TEXT(card, 2, "idxwrite");
2231

2232
	iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2233 2234 2235 2236 2237
	if (!iob)
		return -ENOMEM;

	memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
	qeth_idx_setup_activate_cmd(card, iob);
2238
	iob->callback = qeth_idx_activate_write_channel_cb;
2239

2240
	rc = qeth_send_control_data(card, iob, NULL, NULL);
2241 2242 2243 2244 2245 2246 2247
	if (rc)
		return rc;

	channel->state = CH_STATE_UP;
	return 0;
}

F
Frank Blaschka 已提交
2248 2249 2250 2251 2252
static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
		unsigned long data)
{
	struct qeth_cmd_buffer *iob;

2253
	QETH_CARD_TEXT(card, 2, "cmenblcb");
F
Frank Blaschka 已提交
2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265

	iob = (struct qeth_cmd_buffer *) data;
	memcpy(&card->token.cm_filter_r,
	       QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
	       QETH_MPC_TOKEN_LENGTH);
	return 0;
}

static int qeth_cm_enable(struct qeth_card *card)
{
	struct qeth_cmd_buffer *iob;

2266
	QETH_CARD_TEXT(card, 2, "cmenable");
F
Frank Blaschka 已提交
2267

2268
	iob = qeth_mpc_alloc_cmd(card, CM_ENABLE, CM_ENABLE_SIZE);
2269 2270
	if (!iob)
		return -ENOMEM;
2271

F
Frank Blaschka 已提交
2272 2273 2274 2275 2276
	memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
	       &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
	       &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);

2277
	return qeth_send_control_data(card, iob, qeth_cm_enable_cb, NULL);
F
Frank Blaschka 已提交
2278 2279 2280 2281 2282 2283 2284
}

static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
		unsigned long data)
{
	struct qeth_cmd_buffer *iob;

2285
	QETH_CARD_TEXT(card, 2, "cmsetpcb");
F
Frank Blaschka 已提交
2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297

	iob = (struct qeth_cmd_buffer *) data;
	memcpy(&card->token.cm_connection_r,
	       QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
	       QETH_MPC_TOKEN_LENGTH);
	return 0;
}

static int qeth_cm_setup(struct qeth_card *card)
{
	struct qeth_cmd_buffer *iob;

2298
	QETH_CARD_TEXT(card, 2, "cmsetup");
F
Frank Blaschka 已提交
2299

2300
	iob = qeth_mpc_alloc_cmd(card, CM_SETUP, CM_SETUP_SIZE);
2301 2302
	if (!iob)
		return -ENOMEM;
2303

F
Frank Blaschka 已提交
2304 2305 2306 2307 2308 2309
	memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
	       &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
	       &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
	       &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
2310
	return qeth_send_control_data(card, iob, qeth_cm_setup_cb, NULL);
F
Frank Blaschka 已提交
2311 2312
}

2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323
static bool qeth_is_supported_link_type(struct qeth_card *card, u8 link_type)
{
	if (link_type == QETH_LINK_TYPE_LANE_TR ||
	    link_type == QETH_LINK_TYPE_HSTR) {
		dev_err(&card->gdev->dev, "Unsupported Token Ring device\n");
		return false;
	}

	return true;
}

2324
static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
F
Frank Blaschka 已提交
2325
{
2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344
	struct net_device *dev = card->dev;
	unsigned int new_mtu;

	if (!max_mtu) {
		/* IQD needs accurate max MTU to set up its RX buffers: */
		if (IS_IQD(card))
			return -EINVAL;
		/* tolerate quirky HW: */
		max_mtu = ETH_MAX_MTU;
	}

	rtnl_lock();
	if (IS_IQD(card)) {
		/* move any device with default MTU to new max MTU: */
		new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu;

		/* adjust RX buffer size to new max MTU: */
		card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE;
		if (dev->max_mtu && dev->max_mtu != max_mtu)
2345
			qeth_free_qdio_queues(card);
2346 2347 2348 2349
	} else {
		if (dev->mtu)
			new_mtu = dev->mtu;
		/* default MTUs for first setup: */
2350
		else if (IS_LAYER2(card))
2351 2352 2353
			new_mtu = ETH_DATA_LEN;
		else
			new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */
F
Frank Blaschka 已提交
2354
	}
2355 2356 2357 2358 2359

	dev->max_mtu = max_mtu;
	dev->mtu = min(new_mtu, max_mtu);
	rtnl_unlock();
	return 0;
F
Frank Blaschka 已提交
2360 2361
}

J
Julian Wiedmann 已提交
2362
static int qeth_get_mtu_outof_framesize(int framesize)
F
Frank Blaschka 已提交
2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383
{
	switch (framesize) {
	case 0x4000:
		return 8192;
	case 0x6000:
		return 16384;
	case 0xa000:
		return 32768;
	case 0xffff:
		return 57344;
	default:
		return 0;
	}
}

static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
		unsigned long data)
{
	__u16 mtu, framesize;
	__u16 len;
	struct qeth_cmd_buffer *iob;
2384
	u8 link_type = 0;
F
Frank Blaschka 已提交
2385

2386
	QETH_CARD_TEXT(card, 2, "ulpenacb");
F
Frank Blaschka 已提交
2387 2388 2389 2390 2391

	iob = (struct qeth_cmd_buffer *) data;
	memcpy(&card->token.ulp_filter_r,
	       QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
	       QETH_MPC_TOKEN_LENGTH);
2392
	if (IS_IQD(card)) {
F
Frank Blaschka 已提交
2393 2394 2395
		memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
		mtu = qeth_get_mtu_outof_framesize(framesize);
	} else {
2396
		mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data);
F
Frank Blaschka 已提交
2397
	}
2398
	*(u16 *)reply->param = mtu;
F
Frank Blaschka 已提交
2399 2400 2401 2402 2403

	memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
	if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
		memcpy(&link_type,
		       QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
2404 2405 2406 2407 2408
		if (!qeth_is_supported_link_type(card, link_type))
			return -EPROTONOSUPPORT;
	}

	card->info.link_type = link_type;
2409
	QETH_CARD_TEXT_(card, 2, "link%d", card->info.link_type);
F
Frank Blaschka 已提交
2410 2411 2412
	return 0;
}

2413 2414
static u8 qeth_mpc_select_prot_type(struct qeth_card *card)
{
2415
	return IS_LAYER2(card) ? QETH_MPC_PROT_L2 : QETH_MPC_PROT_L3;
2416 2417
}

F
Frank Blaschka 已提交
2418 2419
static int qeth_ulp_enable(struct qeth_card *card)
{
2420
	u8 prot_type = qeth_mpc_select_prot_type(card);
F
Frank Blaschka 已提交
2421
	struct qeth_cmd_buffer *iob;
2422
	u16 max_mtu;
2423
	int rc;
F
Frank Blaschka 已提交
2424

2425
	QETH_CARD_TEXT(card, 2, "ulpenabl");
F
Frank Blaschka 已提交
2426

2427
	iob = qeth_mpc_alloc_cmd(card, ULP_ENABLE, ULP_ENABLE_SIZE);
2428 2429
	if (!iob)
		return -ENOMEM;
F
Frank Blaschka 已提交
2430

2431
	*(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port;
F
Frank Blaschka 已提交
2432 2433 2434 2435 2436
	memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1);
	memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
	       &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
2437
	rc = qeth_send_control_data(card, iob, qeth_ulp_enable_cb, &max_mtu);
2438 2439 2440
	if (rc)
		return rc;
	return qeth_update_max_mtu(card, max_mtu);
F
Frank Blaschka 已提交
2441 2442 2443 2444 2445 2446 2447
}

static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
		unsigned long data)
{
	struct qeth_cmd_buffer *iob;

2448
	QETH_CARD_TEXT(card, 2, "ulpstpcb");
F
Frank Blaschka 已提交
2449 2450 2451 2452 2453

	iob = (struct qeth_cmd_buffer *) data;
	memcpy(&card->token.ulp_connection_r,
	       QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
	       QETH_MPC_TOKEN_LENGTH);
2454 2455
	if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
		     3)) {
2456
		QETH_CARD_TEXT(card, 2, "olmlimit");
2457 2458
		dev_err(&card->gdev->dev, "A connection could not be "
			"established because of an OLM limit\n");
2459
		return -EMLINK;
2460
	}
S
Stefan Raspl 已提交
2461
	return 0;
F
Frank Blaschka 已提交
2462 2463 2464 2465 2466 2467 2468
}

static int qeth_ulp_setup(struct qeth_card *card)
{
	__u16 temp;
	struct qeth_cmd_buffer *iob;

2469
	QETH_CARD_TEXT(card, 2, "ulpsetup");
F
Frank Blaschka 已提交
2470

2471
	iob = qeth_mpc_alloc_cmd(card, ULP_SETUP, ULP_SETUP_SIZE);
2472 2473
	if (!iob)
		return -ENOMEM;
F
Frank Blaschka 已提交
2474 2475 2476 2477 2478 2479 2480 2481

	memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
	       &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
	       &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);

2482
	memcpy(QETH_ULP_SETUP_CUA(iob->data), &card->info.ddev_devno, 2);
F
Frank Blaschka 已提交
2483 2484
	temp = (card->info.cula << 8) + card->info.unit_addr2;
	memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
2485
	return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL);
F
Frank Blaschka 已提交
2486 2487
}

2488 2489
static int qeth_alloc_out_buf(struct qeth_qdio_out_q *q, unsigned int bidx,
			      gfp_t gfp)
2490 2491 2492
{
	struct qeth_qdio_out_buffer *newbuf;

2493
	newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, gfp);
2494 2495 2496
	if (!newbuf)
		return -ENOMEM;

2497
	newbuf->buffer = q->qdio_bufs[bidx];
2498 2499 2500 2501
	skb_queue_head_init(&newbuf->skb_list);
	lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
	atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
	q->bufs[bidx] = newbuf;
2502
	return 0;
2503 2504
}

2505
static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
2506 2507 2508 2509
{
	if (!q)
		return;

2510
	qeth_drain_output_queue(q, true);
2511 2512 2513 2514
	qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
	kfree(q);
}

2515
static struct qeth_qdio_out_q *qeth_alloc_output_queue(void)
2516 2517
{
	struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
2518
	unsigned int i;
2519 2520 2521 2522

	if (!q)
		return NULL;

2523 2524 2525 2526
	if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q))
		goto err_qdio_bufs;

	for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
2527
		if (qeth_alloc_out_buf(q, i, GFP_KERNEL))
2528
			goto err_out_bufs;
2529
	}
2530

2531
	return q;
2532 2533 2534

err_out_bufs:
	while (i > 0)
2535
		qeth_free_out_buf(q->bufs[--i]);
2536 2537 2538 2539
	qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
err_qdio_bufs:
	kfree(q);
	return NULL;
2540
}
2541

2542 2543 2544 2545 2546 2547 2548 2549
static void qeth_tx_completion_timer(struct timer_list *timer)
{
	struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer);

	napi_schedule(&queue->napi);
	QETH_TXQ_STAT_INC(queue, completion_timer);
}

2550
static int qeth_alloc_qdio_queues(struct qeth_card *card)
F
Frank Blaschka 已提交
2551
{
2552
	unsigned int i;
F
Frank Blaschka 已提交
2553

2554
	QETH_CARD_TEXT(card, 2, "allcqdbf");
F
Frank Blaschka 已提交
2555 2556 2557 2558 2559

	if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
		QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
		return 0;

2560
	QETH_CARD_TEXT(card, 2, "inq");
S
Sebastian Ott 已提交
2561
	card->qdio.in_q = qeth_alloc_qdio_queue();
F
Frank Blaschka 已提交
2562 2563
	if (!card->qdio.in_q)
		goto out_nomem;
S
Sebastian Ott 已提交
2564

F
Frank Blaschka 已提交
2565 2566 2567
	/* inbound buffer pool */
	if (qeth_alloc_buffer_pool(card))
		goto out_freeinq;
2568

F
Frank Blaschka 已提交
2569 2570
	/* outbound */
	for (i = 0; i < card->qdio.no_out_queues; ++i) {
2571 2572 2573 2574
		struct qeth_qdio_out_q *queue;

		queue = qeth_alloc_output_queue();
		if (!queue)
F
Frank Blaschka 已提交
2575
			goto out_freeoutq;
2576
		QETH_CARD_TEXT_(card, 2, "outq %i", i);
2577 2578 2579 2580
		QETH_CARD_HEX(card, 2, &queue, sizeof(void *));
		card->qdio.out_qs[i] = queue;
		queue->card = card;
		queue->queue_no = i;
2581
		INIT_LIST_HEAD(&queue->pending_bufs);
2582
		spin_lock_init(&queue->lock);
2583
		timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
2584 2585 2586 2587 2588 2589 2590 2591 2592
		if (IS_IQD(card)) {
			queue->coalesce_usecs = QETH_TX_COALESCE_USECS;
			queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES;
			queue->rescan_usecs = QETH_TX_TIMER_USECS;
		} else {
			queue->coalesce_usecs = USEC_PER_SEC;
			queue->max_coalesced_frames = 0;
			queue->rescan_usecs = 10 * USEC_PER_SEC;
		}
2593
		queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT;
F
Frank Blaschka 已提交
2594
	}
2595 2596 2597 2598 2599

	/* completion */
	if (qeth_alloc_cq(card))
		goto out_freeoutq;

F
Frank Blaschka 已提交
2600 2601 2602
	return 0;

out_freeoutq:
2603
	while (i > 0) {
2604
		qeth_free_output_queue(card->qdio.out_qs[--i]);
2605 2606
		card->qdio.out_qs[i] = NULL;
	}
F
Frank Blaschka 已提交
2607 2608
	qeth_free_buffer_pool(card);
out_freeinq:
S
Sebastian Ott 已提交
2609
	qeth_free_qdio_queue(card->qdio.in_q);
F
Frank Blaschka 已提交
2610 2611 2612 2613 2614 2615
	card->qdio.in_q = NULL;
out_nomem:
	atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
	return -ENOMEM;
}

2616
static void qeth_free_qdio_queues(struct qeth_card *card)
2617 2618 2619 2620 2621 2622 2623 2624 2625 2626
{
	int i, j;

	if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
		QETH_QDIO_UNINITIALIZED)
		return;

	qeth_free_cq(card);
	for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
		if (card->qdio.in_q->bufs[j].rx_skb)
2627
			consume_skb(card->qdio.in_q->bufs[j].rx_skb);
2628 2629 2630 2631 2632 2633
	}
	qeth_free_qdio_queue(card->qdio.in_q);
	card->qdio.in_q = NULL;
	/* inbound buffer pool */
	qeth_free_buffer_pool(card);
	/* free outbound qdio_qs */
2634 2635 2636
	for (i = 0; i < card->qdio.no_out_queues; i++) {
		qeth_free_output_queue(card->qdio.out_qs[i]);
		card->qdio.out_qs[i] = NULL;
2637 2638 2639
	}
}

2640 2641 2642
static void qeth_fill_qib_parms(struct qeth_card *card,
				struct qeth_qib_parms *parms)
{
2643 2644 2645
	struct qeth_qdio_out_q *queue;
	unsigned int i;

2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662
	parms->pcit_magic[0] = 'P';
	parms->pcit_magic[1] = 'C';
	parms->pcit_magic[2] = 'I';
	parms->pcit_magic[3] = 'T';
	ASCEBC(parms->pcit_magic, sizeof(parms->pcit_magic));
	parms->pcit_a = QETH_PCI_THRESHOLD_A(card);
	parms->pcit_b = QETH_PCI_THRESHOLD_B(card);
	parms->pcit_c = QETH_PCI_TIMER_VALUE(card);

	parms->blkt_magic[0] = 'B';
	parms->blkt_magic[1] = 'L';
	parms->blkt_magic[2] = 'K';
	parms->blkt_magic[3] = 'T';
	ASCEBC(parms->blkt_magic, sizeof(parms->blkt_magic));
	parms->blkt_total = card->info.blkt.time_total;
	parms->blkt_inter_packet = card->info.blkt.inter_packet;
	parms->blkt_inter_packet_jumbo = card->info.blkt.inter_packet_jumbo;
2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677

	/* Prio-queueing implicitly uses the default priorities: */
	if (qeth_uses_tx_prio_queueing(card) || card->qdio.no_out_queues == 1)
		return;

	parms->pque_magic[0] = 'P';
	parms->pque_magic[1] = 'Q';
	parms->pque_magic[2] = 'U';
	parms->pque_magic[3] = 'E';
	ASCEBC(parms->pque_magic, sizeof(parms->pque_magic));
	parms->pque_order = QETH_QIB_PQUE_ORDER_RR;
	parms->pque_units = QETH_QIB_PQUE_UNITS_SBAL;

	qeth_for_each_output_queue(card, queue, i)
		parms->pque_priority[i] = queue->priority;
F
Frank Blaschka 已提交
2678 2679 2680 2681
}

static int qeth_qdio_activate(struct qeth_card *card)
{
2682
	QETH_CARD_TEXT(card, 3, "qdioact");
J
Jan Glauber 已提交
2683
	return qdio_activate(CARD_DDEV(card));
F
Frank Blaschka 已提交
2684 2685 2686 2687 2688 2689
}

static int qeth_dm_act(struct qeth_card *card)
{
	struct qeth_cmd_buffer *iob;

2690
	QETH_CARD_TEXT(card, 2, "dmact");
F
Frank Blaschka 已提交
2691

2692
	iob = qeth_mpc_alloc_cmd(card, DM_ACT, DM_ACT_SIZE);
2693 2694
	if (!iob)
		return -ENOMEM;
F
Frank Blaschka 已提交
2695 2696 2697 2698 2699

	memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
	       &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2700
	return qeth_send_control_data(card, iob, NULL, NULL);
F
Frank Blaschka 已提交
2701 2702 2703 2704 2705 2706
}

static int qeth_mpc_initialize(struct qeth_card *card)
{
	int rc;

2707
	QETH_CARD_TEXT(card, 2, "mpcinit");
F
Frank Blaschka 已提交
2708 2709 2710

	rc = qeth_issue_next_read(card);
	if (rc) {
2711
		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
F
Frank Blaschka 已提交
2712 2713 2714 2715
		return rc;
	}
	rc = qeth_cm_enable(card);
	if (rc) {
2716
		QETH_CARD_TEXT_(card, 2, "2err%d", rc);
2717
		return rc;
F
Frank Blaschka 已提交
2718 2719 2720
	}
	rc = qeth_cm_setup(card);
	if (rc) {
2721
		QETH_CARD_TEXT_(card, 2, "3err%d", rc);
2722
		return rc;
F
Frank Blaschka 已提交
2723 2724 2725
	}
	rc = qeth_ulp_enable(card);
	if (rc) {
2726
		QETH_CARD_TEXT_(card, 2, "4err%d", rc);
2727
		return rc;
F
Frank Blaschka 已提交
2728 2729 2730
	}
	rc = qeth_ulp_setup(card);
	if (rc) {
2731
		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2732
		return rc;
F
Frank Blaschka 已提交
2733
	}
2734
	rc = qeth_alloc_qdio_queues(card);
F
Frank Blaschka 已提交
2735
	if (rc) {
2736
		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2737
		return rc;
F
Frank Blaschka 已提交
2738 2739 2740
	}
	rc = qeth_qdio_establish(card);
	if (rc) {
2741
		QETH_CARD_TEXT_(card, 2, "6err%d", rc);
2742
		qeth_free_qdio_queues(card);
2743
		return rc;
F
Frank Blaschka 已提交
2744 2745 2746
	}
	rc = qeth_qdio_activate(card);
	if (rc) {
2747
		QETH_CARD_TEXT_(card, 2, "7err%d", rc);
2748
		return rc;
F
Frank Blaschka 已提交
2749 2750 2751
	}
	rc = qeth_dm_act(card);
	if (rc) {
2752
		QETH_CARD_TEXT_(card, 2, "8err%d", rc);
2753
		return rc;
F
Frank Blaschka 已提交
2754 2755 2756 2757 2758
	}

	return 0;
}

2759
static void qeth_print_status_message(struct qeth_card *card)
F
Frank Blaschka 已提交
2760 2761
{
	switch (card->info.type) {
2762 2763 2764
	case QETH_CARD_TYPE_OSD:
	case QETH_CARD_TYPE_OSM:
	case QETH_CARD_TYPE_OSX:
F
Frank Blaschka 已提交
2765 2766 2767 2768 2769 2770 2771 2772 2773 2774
		/* VM will use a non-zero first character
		 * to indicate a HiperSockets like reporting
		 * of the level OSA sets the first character to zero
		 * */
		if (!card->info.mcl_level[0]) {
			sprintf(card->info.mcl_level, "%02x%02x",
				card->info.mcl_level[2],
				card->info.mcl_level[3]);
			break;
		}
2775
		fallthrough;
F
Frank Blaschka 已提交
2776
	case QETH_CARD_TYPE_IQD:
2777
		if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) {
F
Frank Blaschka 已提交
2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791
			card->info.mcl_level[0] = (char) _ebcasc[(__u8)
				card->info.mcl_level[0]];
			card->info.mcl_level[1] = (char) _ebcasc[(__u8)
				card->info.mcl_level[1]];
			card->info.mcl_level[2] = (char) _ebcasc[(__u8)
				card->info.mcl_level[2]];
			card->info.mcl_level[3] = (char) _ebcasc[(__u8)
				card->info.mcl_level[3]];
			card->info.mcl_level[QETH_MCL_LENGTH] = 0;
		}
		break;
	default:
		memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
	}
2792 2793 2794 2795 2796 2797 2798
	dev_info(&card->gdev->dev,
		 "Device is a%s card%s%s%s\nwith link type %s.\n",
		 qeth_get_cardname(card),
		 (card->info.mcl_level[0]) ? " (level: " : "",
		 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
		 (card->info.mcl_level[0]) ? ")" : "",
		 qeth_get_cardname_short(card));
F
Frank Blaschka 已提交
2799 2800 2801 2802 2803 2804
}

static void qeth_initialize_working_pool_list(struct qeth_card *card)
{
	struct qeth_buffer_pool_entry *entry;

C
Carsten Otte 已提交
2805
	QETH_CARD_TEXT(card, 5, "inwrklst");
F
Frank Blaschka 已提交
2806 2807 2808 2809 2810 2811 2812

	list_for_each_entry(entry,
			    &card->qdio.init_pool.entry_list, init_list) {
		qeth_put_buffer_pool_entry(card, entry);
	}
}

J
Julian Wiedmann 已提交
2813 2814
static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
					struct qeth_card *card)
F
Frank Blaschka 已提交
2815 2816 2817 2818 2819 2820 2821
{
	struct qeth_buffer_pool_entry *entry;
	int i, free;

	if (list_empty(&card->qdio.in_buf_pool.entry_list))
		return NULL;

2822
	list_for_each_entry(entry, &card->qdio.in_buf_pool.entry_list, list) {
F
Frank Blaschka 已提交
2823 2824
		free = 1;
		for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2825
			if (page_count(entry->elements[i]) > 1) {
F
Frank Blaschka 已提交
2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836
				free = 0;
				break;
			}
		}
		if (free) {
			list_del_init(&entry->list);
			return entry;
		}
	}

	/* no free buffer in pool so take first one and swap pages */
2837 2838
	entry = list_first_entry(&card->qdio.in_buf_pool.entry_list,
				 struct qeth_buffer_pool_entry, list);
F
Frank Blaschka 已提交
2839
	for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2840
		if (page_count(entry->elements[i]) > 1) {
2841
			struct page *page = dev_alloc_page();
2842 2843

			if (!page)
F
Frank Blaschka 已提交
2844
				return NULL;
2845 2846 2847 2848

			__free_page(entry->elements[i]);
			entry->elements[i] = page;
			QETH_CARD_STAT_INC(card, rx_sg_alloc_page);
F
Frank Blaschka 已提交
2849 2850 2851 2852 2853 2854 2855 2856 2857
		}
	}
	list_del_init(&entry->list);
	return entry;
}

static int qeth_init_input_buffer(struct qeth_card *card,
		struct qeth_qdio_buffer *buf)
{
2858
	struct qeth_buffer_pool_entry *pool_entry = buf->pool_entry;
F
Frank Blaschka 已提交
2859 2860
	int i;

2861
	if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) {
2862
		buf->rx_skb = netdev_alloc_skb(card->dev,
2863 2864
					       ETH_HLEN +
					       sizeof(struct ipv6hdr));
2865
		if (!buf->rx_skb)
2866
			return -ENOMEM;
2867 2868
	}

2869 2870 2871 2872 2873 2874 2875
	if (!pool_entry) {
		pool_entry = qeth_find_free_buffer_pool_entry(card);
		if (!pool_entry)
			return -ENOBUFS;

		buf->pool_entry = pool_entry;
	}
F
Frank Blaschka 已提交
2876 2877 2878 2879 2880 2881 2882 2883 2884

	/*
	 * since the buffer is accessed only from the input_tasklet
	 * there shouldn't be a need to synchronize; also, since we use
	 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run  out off
	 * buffers
	 */
	for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
		buf->buffer->element[i].length = PAGE_SIZE;
2885
		buf->buffer->element[i].addr =
2886
			page_to_phys(pool_entry->elements[i]);
F
Frank Blaschka 已提交
2887
		if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2888
			buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
F
Frank Blaschka 已提交
2889
		else
2890 2891
			buf->buffer->element[i].eflags = 0;
		buf->buffer->element[i].sflags = 0;
F
Frank Blaschka 已提交
2892 2893 2894 2895
	}
	return 0;
}

J
Julian Wiedmann 已提交
2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907
static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card,
					    struct qeth_qdio_out_q *queue)
{
	if (!IS_IQD(card) ||
	    qeth_iqd_is_mcast_queue(card, queue) ||
	    card->options.cq == QETH_CQ_ENABLED ||
	    qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd))
		return 1;

	return card->ssqd.mmwc ? card->ssqd.mmwc : 1;
}

2908
static int qeth_init_qdio_queues(struct qeth_card *card)
F
Frank Blaschka 已提交
2909
{
2910
	unsigned int rx_bufs = card->qdio.in_buf_pool.buf_count;
2911
	unsigned int i;
F
Frank Blaschka 已提交
2912 2913
	int rc;

2914
	QETH_CARD_TEXT(card, 2, "initqdqs");
F
Frank Blaschka 已提交
2915 2916

	/* inbound queue */
2917 2918
	qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
	memset(&card->rx, 0, sizeof(struct qeth_rx));
2919

F
Frank Blaschka 已提交
2920 2921
	qeth_initialize_working_pool_list(card);
	/*give only as many buffers to hardware as we have buffer pool entries*/
2922
	for (i = 0; i < rx_bufs; i++) {
2923 2924 2925 2926 2927
		rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
		if (rc)
			return rc;
	}

2928
	card->qdio.in_q->next_buf_to_init = QDIO_BUFNR(rx_bufs);
2929 2930
	rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, rx_bufs,
		     NULL);
F
Frank Blaschka 已提交
2931
	if (rc) {
2932
		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
F
Frank Blaschka 已提交
2933 2934
		return rc;
	}
2935 2936 2937 2938 2939 2940 2941

	/* completion */
	rc = qeth_cq_init(card);
	if (rc) {
		return rc;
	}

F
Frank Blaschka 已提交
2942 2943
	/* outbound queue */
	for (i = 0; i < card->qdio.no_out_queues; ++i) {
2944 2945 2946 2947 2948 2949
		struct qeth_qdio_out_q *queue = card->qdio.out_qs[i];

		qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
		queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
		queue->next_buf_to_fill = 0;
		queue->do_pack = 0;
2950
		queue->prev_hdr = NULL;
2951
		queue->coalesced_frames = 0;
2952
		queue->bulk_start = 0;
J
Julian Wiedmann 已提交
2953 2954
		queue->bulk_count = 0;
		queue->bulk_max = qeth_tx_select_bulk_max(card, queue);
2955 2956
		atomic_set(&queue->used_buffers, 0);
		atomic_set(&queue->set_pci_flags_count, 0);
2957
		netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i));
F
Frank Blaschka 已提交
2958 2959 2960 2961
	}
	return 0;
}

2962
static void qeth_ipa_finalize_cmd(struct qeth_card *card,
2963
				  struct qeth_cmd_buffer *iob)
2964
{
2965
	qeth_mpc_finalize_cmd(card, iob);
2966 2967

	/* override with IPA-specific values: */
2968
	__ipa_cmd(iob)->hdr.seqno = card->seqno.ipa++;
2969 2970
}

J
Julian Wiedmann 已提交
2971 2972
static void qeth_prepare_ipa_cmd(struct qeth_card *card,
				 struct qeth_cmd_buffer *iob, u16 cmd_length)
2973 2974
{
	u8 prot_type = qeth_mpc_select_prot_type(card);
2975
	u16 total_length = iob->length;
2976

2977 2978
	qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length,
		       iob->data);
2979
	iob->finalize = qeth_ipa_finalize_cmd;
2980

2981
	memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
2982
	memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2);
2983
	memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
2984 2985
	memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2);
	memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2);
2986 2987
	memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
	       &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2988
	memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2);
2989 2990
}

2991 2992 2993 2994 2995 2996 2997 2998
static bool qeth_ipa_match_reply(struct qeth_cmd_buffer *iob,
				 struct qeth_cmd_buffer *reply)
{
	struct qeth_ipa_cmd *ipa_reply = __ipa_reply(reply);

	return ipa_reply && (__ipa_cmd(iob)->hdr.seqno == ipa_reply->hdr.seqno);
}

2999 3000 3001 3002 3003 3004
struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
					   enum qeth_ipa_cmds cmd_code,
					   enum qeth_prot_versions prot,
					   unsigned int data_length)
{
	struct qeth_cmd_buffer *iob;
3005
	struct qeth_ipacmd_hdr *hdr;
3006 3007 3008 3009 3010 3011 3012

	data_length += offsetof(struct qeth_ipa_cmd, data);
	iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_length, 1,
			     QETH_IPA_TIMEOUT);
	if (!iob)
		return NULL;

J
Julian Wiedmann 已提交
3013 3014
	qeth_prepare_ipa_cmd(card, iob, data_length);
	iob->match = qeth_ipa_match_reply;
3015 3016 3017 3018 3019

	hdr = &__ipa_cmd(iob)->hdr;
	hdr->command = cmd_code;
	hdr->initiator = IPA_CMD_INITIATOR_HOST;
	/* hdr->seqno is set by qeth_send_control_data() */
3020
	hdr->adapter_type = QETH_LINK_TYPE_FAST_ETH;
3021 3022 3023 3024
	hdr->rel_adapter_no = (u8) card->dev->dev_port;
	hdr->prim_version_no = IS_LAYER2(card) ? 2 : 1;
	hdr->param_count = 1;
	hdr->prot_version = prot;
3025 3026 3027 3028
	return iob;
}
EXPORT_SYMBOL_GPL(qeth_ipa_alloc_cmd);

3029 3030 3031 3032 3033 3034 3035 3036
static int qeth_send_ipa_cmd_cb(struct qeth_card *card,
				struct qeth_reply *reply, unsigned long data)
{
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;

	return (cmd->hdr.return_code) ? -EIO : 0;
}

3037
/*
E
Eugene Crosser 已提交
3038 3039 3040 3041 3042
 * qeth_send_ipa_cmd() - send an IPA command
 *
 * See qeth_send_control_data() for explanation of the arguments.
 */

F
Frank Blaschka 已提交
3043 3044 3045 3046 3047 3048 3049
int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
		int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
			unsigned long),
		void *reply_param)
{
	int rc;

C
Carsten Otte 已提交
3050
	QETH_CARD_TEXT(card, 4, "sendipa");
3051

3052
	if (card->read_or_write_problem) {
3053
		qeth_put_cmd(iob);
3054 3055 3056
		return -EIO;
	}

3057 3058
	if (reply_cb == NULL)
		reply_cb = qeth_send_ipa_cmd_cb;
3059
	rc = qeth_send_control_data(card, iob, reply_cb, reply_param);
3060 3061 3062 3063
	if (rc == -ETIME) {
		qeth_clear_ipacmd_list(card);
		qeth_schedule_recovery(card);
	}
F
Frank Blaschka 已提交
3064 3065 3066 3067
	return rc;
}
EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);

3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078
static int qeth_send_startlan_cb(struct qeth_card *card,
				 struct qeth_reply *reply, unsigned long data)
{
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;

	if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE)
		return -ENETDOWN;

	return (cmd->hdr.return_code) ? -EIO : 0;
}

3079
static int qeth_send_startlan(struct qeth_card *card)
F
Frank Blaschka 已提交
3080
{
3081
	struct qeth_cmd_buffer *iob;
F
Frank Blaschka 已提交
3082

3083
	QETH_CARD_TEXT(card, 2, "strtlan");
F
Frank Blaschka 已提交
3084

3085
	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_STARTLAN, QETH_PROT_NONE, 0);
3086 3087
	if (!iob)
		return -ENOMEM;
3088
	return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL);
F
Frank Blaschka 已提交
3089 3090
}

3091
static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
F
Frank Blaschka 已提交
3092
{
3093
	if (!cmd->hdr.return_code)
F
Frank Blaschka 已提交
3094 3095
		cmd->hdr.return_code =
			cmd->data.setadapterparms.hdr.return_code;
3096
	return cmd->hdr.return_code;
F
Frank Blaschka 已提交
3097 3098 3099 3100 3101
}

static int qeth_query_setadapterparms_cb(struct qeth_card *card,
		struct qeth_reply *reply, unsigned long data)
{
3102
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3103
	struct qeth_query_cmds_supp *query_cmd;
F
Frank Blaschka 已提交
3104

C
Carsten Otte 已提交
3105
	QETH_CARD_TEXT(card, 3, "quyadpcb");
3106
	if (qeth_setadpparms_inspect_rc(cmd))
3107
		return -EIO;
F
Frank Blaschka 已提交
3108

3109 3110 3111 3112 3113 3114
	query_cmd = &cmd->data.setadapterparms.data.query_cmds_supp;
	if (query_cmd->lan_type & 0x7f) {
		if (!qeth_is_supported_link_type(card, query_cmd->lan_type))
			return -EPROTONOSUPPORT;

		card->info.link_type = query_cmd->lan_type;
3115
		QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type);
3116
	}
3117 3118

	card->options.adp.supported = query_cmd->supported_cmds;
3119
	return 0;
F
Frank Blaschka 已提交
3120 3121
}

S
Stefan Raspl 已提交
3122
static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
3123 3124
						    enum qeth_ipa_setadp_cmd adp_cmd,
						    unsigned int data_length)
F
Frank Blaschka 已提交
3125
{
3126
	struct qeth_ipacmd_setadpparms_hdr *hdr;
F
Frank Blaschka 已提交
3127 3128
	struct qeth_cmd_buffer *iob;

3129 3130 3131 3132 3133 3134
	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4,
				 data_length +
				 offsetof(struct qeth_ipacmd_setadpparms,
					  data));
	if (!iob)
		return NULL;
F
Frank Blaschka 已提交
3135

3136 3137 3138 3139 3140
	hdr = &__ipa_cmd(iob)->data.setadapterparms.hdr;
	hdr->cmdlength = sizeof(*hdr) + data_length;
	hdr->command_code = adp_cmd;
	hdr->used_total = 1;
	hdr->seq_no = 1;
F
Frank Blaschka 已提交
3141 3142 3143
	return iob;
}

3144
static int qeth_query_setadapterparms(struct qeth_card *card)
F
Frank Blaschka 已提交
3145 3146 3147 3148
{
	int rc;
	struct qeth_cmd_buffer *iob;

C
Carsten Otte 已提交
3149
	QETH_CARD_TEXT(card, 3, "queryadp");
F
Frank Blaschka 已提交
3150
	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
3151
				   SETADP_DATA_SIZEOF(query_cmds_supp));
3152 3153
	if (!iob)
		return -ENOMEM;
F
Frank Blaschka 已提交
3154 3155 3156 3157
	rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
	return rc;
}

3158 3159 3160 3161 3162
static int qeth_query_ipassists_cb(struct qeth_card *card,
		struct qeth_reply *reply, unsigned long data)
{
	struct qeth_ipa_cmd *cmd;

3163
	QETH_CARD_TEXT(card, 2, "qipasscb");
3164 3165

	cmd = (struct qeth_ipa_cmd *) data;
3166 3167

	switch (cmd->hdr.return_code) {
3168 3169
	case IPA_RC_SUCCESS:
		break;
3170 3171
	case IPA_RC_NOTSUPP:
	case IPA_RC_L2_UNSUPPORTED_CMD:
3172
		QETH_CARD_TEXT(card, 2, "ipaunsup");
3173 3174
		card->options.ipa4.supported |= IPA_SETADAPTERPARMS;
		card->options.ipa6.supported |= IPA_SETADAPTERPARMS;
3175
		return -EOPNOTSUPP;
3176
	default:
3177 3178 3179
		QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
				 CARD_DEVID(card), cmd->hdr.return_code);
		return -EIO;
3180 3181
	}

3182 3183 3184 3185 3186
	if (cmd->hdr.prot_version == QETH_PROT_IPV4)
		card->options.ipa4 = cmd->hdr.assists;
	else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
		card->options.ipa6 = cmd->hdr.assists;
	else
3187 3188
		QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
				 CARD_DEVID(card));
3189 3190 3191
	return 0;
}

3192 3193
static int qeth_query_ipassists(struct qeth_card *card,
				enum qeth_prot_versions prot)
3194 3195 3196 3197
{
	int rc;
	struct qeth_cmd_buffer *iob;

3198
	QETH_CARD_TEXT_(card, 2, "qipassi%i", prot);
3199
	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_QIPASSIST, prot, 0);
3200 3201
	if (!iob)
		return -ENOMEM;
3202 3203 3204 3205
	rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
	return rc;
}

3206 3207 3208
static int qeth_query_switch_attributes_cb(struct qeth_card *card,
				struct qeth_reply *reply, unsigned long data)
{
3209
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3210
	struct qeth_query_switch_attributes *attrs;
3211
	struct qeth_switch_info *sw_info;
3212 3213

	QETH_CARD_TEXT(card, 2, "qswiatcb");
3214
	if (qeth_setadpparms_inspect_rc(cmd))
3215
		return -EIO;
3216

3217 3218 3219 3220 3221 3222
	sw_info = (struct qeth_switch_info *)reply->param;
	attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
	sw_info->capabilities = attrs->capabilities;
	sw_info->settings = attrs->settings;
	QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
			sw_info->settings);
3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235
	return 0;
}

int qeth_query_switch_attributes(struct qeth_card *card,
				 struct qeth_switch_info *sw_info)
{
	struct qeth_cmd_buffer *iob;

	QETH_CARD_TEXT(card, 2, "qswiattr");
	if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES))
		return -EOPNOTSUPP;
	if (!netif_carrier_ok(card->dev))
		return -ENOMEDIUM;
3236
	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, 0);
3237 3238
	if (!iob)
		return -ENOMEM;
3239 3240 3241 3242
	return qeth_send_ipa_cmd(card, iob,
				qeth_query_switch_attributes_cb, sw_info);
}

3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261
struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card,
					  enum qeth_diags_cmds sub_cmd,
					  unsigned int data_length)
{
	struct qeth_ipacmd_diagass *cmd;
	struct qeth_cmd_buffer *iob;

	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SET_DIAG_ASS, QETH_PROT_NONE,
				 DIAG_HDR_LEN + data_length);
	if (!iob)
		return NULL;

	cmd = &__ipa_cmd(iob)->data.diagass;
	cmd->subcmd_len = DIAG_SUB_HDR_LEN + data_length;
	cmd->subcmd = sub_cmd;
	return iob;
}
EXPORT_SYMBOL_GPL(qeth_get_diag_cmd);

3262 3263 3264
static int qeth_query_setdiagass_cb(struct qeth_card *card,
		struct qeth_reply *reply, unsigned long data)
{
3265 3266
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
	u16 rc = cmd->hdr.return_code;
3267

3268
	if (rc) {
3269
		QETH_CARD_TEXT_(card, 2, "diagq:%x", rc);
3270 3271 3272 3273
		return -EIO;
	}

	card->info.diagass_support = cmd->data.diagass.ext;
3274 3275 3276 3277 3278 3279 3280
	return 0;
}

static int qeth_query_setdiagass(struct qeth_card *card)
{
	struct qeth_cmd_buffer *iob;

3281
	QETH_CARD_TEXT(card, 2, "qdiagass");
3282
	iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_QUERY, 0);
3283 3284
	if (!iob)
		return -ENOMEM;
3285 3286 3287 3288 3289 3290 3291 3292 3293
	return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL);
}

static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
{
	unsigned long info = get_zeroed_page(GFP_KERNEL);
	struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
	struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info;
	struct ccw_dev_id ccwid;
3294
	int level;
3295 3296 3297 3298 3299 3300 3301

	tid->chpid = card->info.chpid;
	ccw_device_get_id(CARD_RDEV(card), &ccwid);
	tid->ssid = ccwid.ssid;
	tid->devno = ccwid.devno;
	if (!info)
		return;
3302 3303
	level = stsi(NULL, 0, 0, 0);
	if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0))
3304
		tid->lparnr = info222->lpar_number;
3305
	if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) {
3306 3307 3308 3309 3310 3311 3312 3313 3314
		EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name));
		memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
	}
	free_page(info);
}

static int qeth_hw_trap_cb(struct qeth_card *card,
		struct qeth_reply *reply, unsigned long data)
{
3315 3316
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
	u16 rc = cmd->hdr.return_code;
3317

3318
	if (rc) {
3319
		QETH_CARD_TEXT_(card, 2, "trapc:%x", rc);
3320 3321
		return -EIO;
	}
3322 3323 3324 3325 3326 3327 3328 3329
	return 0;
}

int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
{
	struct qeth_cmd_buffer *iob;
	struct qeth_ipa_cmd *cmd;

3330
	QETH_CARD_TEXT(card, 2, "diagtrap");
3331
	iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRAP, 64);
3332 3333
	if (!iob)
		return -ENOMEM;
3334
	cmd = __ipa_cmd(iob);
3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353
	cmd->data.diagass.type = 1;
	cmd->data.diagass.action = action;
	switch (action) {
	case QETH_DIAGS_TRAP_ARM:
		cmd->data.diagass.options = 0x0003;
		cmd->data.diagass.ext = 0x00010000 +
			sizeof(struct qeth_trap_id);
		qeth_get_trap_id(card,
			(struct qeth_trap_id *)cmd->data.diagass.cdata);
		break;
	case QETH_DIAGS_TRAP_DISARM:
		cmd->data.diagass.options = 0x0001;
		break;
	case QETH_DIAGS_TRAP_CAPTURE:
		break;
	}
	return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL);
}

3354 3355 3356 3357
static int qeth_check_qdio_errors(struct qeth_card *card,
				  struct qdio_buffer *buf,
				  unsigned int qdio_error,
				  const char *dbftext)
F
Frank Blaschka 已提交
3358
{
J
Jan Glauber 已提交
3359
	if (qdio_error) {
C
Carsten Otte 已提交
3360
		QETH_CARD_TEXT(card, 2, dbftext);
C
Carsten Otte 已提交
3361
		QETH_CARD_TEXT_(card, 2, " F15=%02X",
3362
			       buf->element[15].sflags);
C
Carsten Otte 已提交
3363
		QETH_CARD_TEXT_(card, 2, " F14=%02X",
3364
			       buf->element[14].sflags);
C
Carsten Otte 已提交
3365
		QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
3366
		if ((buf->element[15].sflags) == 0x12) {
3367
			QETH_CARD_STAT_INC(card, rx_fifo_errors);
3368 3369 3370
			return 0;
		} else
			return 1;
F
Frank Blaschka 已提交
3371 3372 3373 3374
	}
	return 0;
}

3375 3376
static unsigned int qeth_rx_refill_queue(struct qeth_card *card,
					 unsigned int count)
F
Frank Blaschka 已提交
3377 3378
{
	struct qeth_qdio_q *queue = card->qdio.in_q;
3379
	struct list_head *lh;
F
Frank Blaschka 已提交
3380 3381 3382 3383 3384 3385 3386 3387 3388
	int i;
	int rc;
	int newcount = 0;

	/* only requeue at a certain threshold to avoid SIGAs */
	if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) {
		for (i = queue->next_buf_to_init;
		     i < queue->next_buf_to_init + count; ++i) {
			if (qeth_init_input_buffer(card,
J
Julian Wiedmann 已提交
3389
				&queue->bufs[QDIO_BUFNR(i)])) {
F
Frank Blaschka 已提交
3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404
				break;
			} else {
				newcount++;
			}
		}

		if (newcount < count) {
			/* we are in memory shortage so we switch back to
			   traditional skb allocation and drop packages */
			atomic_set(&card->force_alloc_skb, 3);
			count = newcount;
		} else {
			atomic_add_unless(&card->force_alloc_skb, -1, 0);
		}

3405 3406 3407 3408 3409 3410 3411 3412 3413 3414
		if (!count) {
			i = 0;
			list_for_each(lh, &card->qdio.in_buf_pool.entry_list)
				i++;
			if (i == card->qdio.in_buf_pool.buf_count) {
				QETH_CARD_TEXT(card, 2, "qsarbw");
				schedule_delayed_work(
					&card->buffer_reclaim_work,
					QETH_RECLAIM_WORK_TIME);
			}
3415
			return 0;
3416 3417
		}

J
Jan Glauber 已提交
3418
		rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
3419
			     queue->next_buf_to_init, count, NULL);
F
Frank Blaschka 已提交
3420
		if (rc) {
C
Carsten Otte 已提交
3421
			QETH_CARD_TEXT(card, 2, "qinberr");
F
Frank Blaschka 已提交
3422
		}
J
Julian Wiedmann 已提交
3423 3424
		queue->next_buf_to_init = QDIO_BUFNR(queue->next_buf_to_init +
						     count);
3425
		return count;
F
Frank Blaschka 已提交
3426
	}
3427 3428

	return 0;
F
Frank Blaschka 已提交
3429
}
3430 3431 3432

static void qeth_buffer_reclaim_work(struct work_struct *work)
{
3433 3434 3435
	struct qeth_card *card = container_of(to_delayed_work(work),
					      struct qeth_card,
					      buffer_reclaim_work);
3436

3437 3438 3439 3440
	local_bh_disable();
	napi_schedule(&card->napi);
	/* kick-start the NAPI softirq: */
	local_bh_enable();
3441
}
F
Frank Blaschka 已提交
3442

3443
static void qeth_handle_send_error(struct qeth_card *card,
J
Jan Glauber 已提交
3444
		struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
F
Frank Blaschka 已提交
3445
{
3446
	int sbalf15 = buffer->buffer->element[15].sflags;
F
Frank Blaschka 已提交
3447

C
Carsten Otte 已提交
3448
	QETH_CARD_TEXT(card, 6, "hdsnderr");
3449
	qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
3450 3451

	if (!qdio_err)
3452
		return;
3453 3454

	if ((sbalf15 >= 15) && (sbalf15 <= 31))
3455
		return;
3456

C
Carsten Otte 已提交
3457 3458
	QETH_CARD_TEXT(card, 1, "lnkfail");
	QETH_CARD_TEXT_(card, 1, "%04x %02x",
3459
		       (u16)qdio_err, (u8)sbalf15);
F
Frank Blaschka 已提交
3460 3461
}

3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477
/**
 * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer.
 * @queue: queue to check for packing buffer
 *
 * Returns number of buffers that were prepared for flush.
 */
static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue)
{
	struct qeth_qdio_out_buffer *buffer;

	buffer = queue->bufs[queue->next_buf_to_fill];
	if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
	    (buffer->next_element_to_fill > 0)) {
		/* it's a packing buffer */
		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
		queue->next_buf_to_fill =
J
Julian Wiedmann 已提交
3478
			QDIO_BUFNR(queue->next_buf_to_fill + 1);
3479 3480 3481 3482 3483
		return 1;
	}
	return 0;
}

F
Frank Blaschka 已提交
3484 3485 3486 3487 3488 3489 3490 3491 3492 3493
/*
 * Switched to packing state if the number of used buffers on a queue
 * reaches a certain limit.
 */
static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
{
	if (!queue->do_pack) {
		if (atomic_read(&queue->used_buffers)
		    >= QETH_HIGH_WATERMARK_PACK){
			/* switch non-PACKING -> PACKING */
C
Carsten Otte 已提交
3494
			QETH_CARD_TEXT(queue->card, 6, "np->pack");
3495
			QETH_TXQ_STAT_INC(queue, packing_mode_switch);
F
Frank Blaschka 已提交
3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512
			queue->do_pack = 1;
		}
	}
}

/*
 * Switches from packing to non-packing mode. If there is a packing
 * buffer on the queue this buffer will be prepared to be flushed.
 * In that case 1 is returned to inform the caller. If no buffer
 * has to be flushed, zero is returned.
 */
static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
{
	if (queue->do_pack) {
		if (atomic_read(&queue->used_buffers)
		    <= QETH_LOW_WATERMARK_PACK) {
			/* switch PACKING -> non-PACKING */
C
Carsten Otte 已提交
3513
			QETH_CARD_TEXT(queue->card, 6, "pack->np");
3514
			QETH_TXQ_STAT_INC(queue, packing_mode_switch);
F
Frank Blaschka 已提交
3515
			queue->do_pack = 0;
3516
			return qeth_prep_flush_pack_buffer(queue);
F
Frank Blaschka 已提交
3517 3518 3519 3520 3521
		}
	}
	return 0;
}

J
Jan Glauber 已提交
3522 3523
static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
			       int count)
F
Frank Blaschka 已提交
3524
{
3525
	struct qeth_qdio_out_buffer *buf = queue->bufs[index];
3526
	struct qeth_card *card = queue->card;
3527
	unsigned int frames, usecs;
3528
	struct qaob *aob = NULL;
F
Frank Blaschka 已提交
3529 3530 3531 3532
	int rc;
	int i;

	for (i = index; i < index + count; ++i) {
J
Julian Wiedmann 已提交
3533
		unsigned int bidx = QDIO_BUFNR(i);
3534
		struct sk_buff *skb;
J
Julian Wiedmann 已提交
3535

3536
		buf = queue->bufs[bidx];
3537 3538
		buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
				SBAL_EFLAGS_LAST_ENTRY;
3539
		queue->coalesced_frames += buf->frames;
F
Frank Blaschka 已提交
3540

3541 3542 3543 3544
		if (IS_IQD(card)) {
			skb_queue_walk(&buf->skb_list, skb)
				skb_tx_timestamp(skb);
		}
3545
	}
F
Frank Blaschka 已提交
3546

3547 3548 3549 3550 3551
	if (IS_IQD(card)) {
		if (card->options.cq == QETH_CQ_ENABLED &&
		    !qeth_iqd_is_mcast_queue(card, queue) &&
		    count == 1) {
			if (!buf->aob)
3552 3553
				buf->aob = kmem_cache_zalloc(qeth_qaob_cache,
							     GFP_ATOMIC);
3554
			if (buf->aob) {
3555 3556
				struct qeth_qaob_priv1 *priv;

3557
				aob = buf->aob;
3558 3559 3560
				priv = (struct qeth_qaob_priv1 *)&aob->user1;
				priv->state = QETH_QAOB_ISSUED;
				priv->queue_no = queue->queue_no;
3561 3562 3563
			}
		}
	} else {
F
Frank Blaschka 已提交
3564 3565 3566 3567 3568 3569 3570 3571
		if (!queue->do_pack) {
			if ((atomic_read(&queue->used_buffers) >=
				(QETH_HIGH_WATERMARK_PACK -
				 QETH_WATERMARK_PACK_FUZZ)) &&
			    !atomic_read(&queue->set_pci_flags_count)) {
				/* it's likely that we'll go to packing
				 * mode soon */
				atomic_inc(&queue->set_pci_flags_count);
3572
				buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
F
Frank Blaschka 已提交
3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584
			}
		} else {
			if (!atomic_read(&queue->set_pci_flags_count)) {
				/*
				 * there's no outstanding PCI any more, so we
				 * have to request a PCI to be sure the the PCI
				 * will wake at some time in the future then we
				 * can flush packed buffers that might still be
				 * hanging around, which can happen if no
				 * further send was requested by the stack
				 */
				atomic_inc(&queue->set_pci_flags_count);
3585
				buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
F
Frank Blaschka 已提交
3586 3587 3588 3589
			}
		}
	}

3590
	QETH_TXQ_STAT_INC(queue, doorbell);
3591 3592
	rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_OUTPUT, queue->queue_no,
		     index, count, aob);
3593

3594 3595 3596 3597 3598 3599
	switch (rc) {
	case 0:
	case -ENOBUFS:
		/* ignore temporary SIGA errors without busy condition */

		/* Fake the TX completion interrupt: */
3600 3601
		frames = READ_ONCE(queue->max_coalesced_frames);
		usecs = READ_ONCE(queue->coalesce_usecs);
3602

3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613
		if (frames && queue->coalesced_frames >= frames) {
			napi_schedule(&queue->napi);
			queue->coalesced_frames = 0;
			QETH_TXQ_STAT_INC(queue, coal_frames);
		} else if (qeth_use_tx_irqs(card) &&
			   atomic_read(&queue->used_buffers) >= 32) {
			/* Old behaviour carried over from the qdio layer: */
			napi_schedule(&queue->napi);
			QETH_TXQ_STAT_INC(queue, coal_frames);
		} else if (usecs) {
			qeth_tx_arm_timer(queue, usecs);
3614
		}
3615

3616 3617
		break;
	default:
C
Carsten Otte 已提交
3618
		QETH_CARD_TEXT(queue->card, 2, "flushbuf");
3619 3620 3621
		QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no);
		QETH_CARD_TEXT_(queue->card, 2, " idx%d", index);
		QETH_CARD_TEXT_(queue->card, 2, " c%d", count);
C
Carsten Otte 已提交
3622
		QETH_CARD_TEXT_(queue->card, 2, " err%d", rc);
3623

F
Frank Blaschka 已提交
3624 3625 3626 3627 3628 3629
		/* this must not happen under normal circumstances. if it
		 * happens something is really wrong -> recover */
		qeth_schedule_recovery(queue->card);
	}
}

3630 3631
static void qeth_flush_queue(struct qeth_qdio_out_q *queue)
{
J
Julian Wiedmann 已提交
3632
	qeth_flush_buffers(queue, queue->bulk_start, queue->bulk_count);
3633

J
Julian Wiedmann 已提交
3634
	queue->bulk_start = QDIO_BUFNR(queue->bulk_start + queue->bulk_count);
3635
	queue->prev_hdr = NULL;
J
Julian Wiedmann 已提交
3636
	queue->bulk_count = 0;
3637 3638
}

F
Frank Blaschka 已提交
3639 3640 3641 3642 3643 3644 3645 3646
static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
{
	/*
	 * check if weed have to switch to non-packing mode or if
	 * we have to get a pci flag out on the queue
	 */
	if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
	    !atomic_read(&queue->set_pci_flags_count)) {
3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660
		unsigned int index, flush_cnt;
		bool q_was_packing;

		spin_lock(&queue->lock);

		index = queue->next_buf_to_fill;
		q_was_packing = queue->do_pack;

		flush_cnt = qeth_switch_to_nonpacking_if_needed(queue);
		if (!flush_cnt && !atomic_read(&queue->set_pci_flags_count))
			flush_cnt = qeth_prep_flush_pack_buffer(queue);

		if (flush_cnt) {
			qeth_flush_buffers(queue, index, flush_cnt);
3661 3662
			if (q_was_packing)
				QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt);
F
Frank Blaschka 已提交
3663
		}
3664 3665

		spin_unlock(&queue->lock);
F
Frank Blaschka 已提交
3666 3667 3668
	}
}

3669
static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr)
3670 3671 3672
{
	struct qeth_card *card = (struct qeth_card *)card_ptr;

3673
	napi_schedule_irqoff(&card->napi);
3674 3675
}

3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688
int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
{
	int rc;

	if (card->options.cq ==  QETH_CQ_NOTAVAILABLE) {
		rc = -1;
		goto out;
	} else {
		if (card->options.cq == cq) {
			rc = 0;
			goto out;
		}

3689
		qeth_free_qdio_queues(card);
3690 3691 3692 3693 3694 3695 3696 3697 3698
		card->options.cq = cq;
		rc = 0;
	}
out:
	return rc;

}
EXPORT_SYMBOL_GPL(qeth_configure_cq);

3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710
static void qeth_qdio_handle_aob(struct qeth_card *card, struct qaob *aob)
{
	struct qeth_qaob_priv1 *priv = (struct qeth_qaob_priv1 *)&aob->user1;
	unsigned int queue_no = priv->queue_no;

	BUILD_BUG_ON(sizeof(*priv) > ARRAY_SIZE(aob->user1));

	if (xchg(&priv->state, QETH_QAOB_DONE) == QETH_QAOB_PENDING &&
	    queue_no < card->qdio.no_out_queues)
		napi_schedule(&card->qdio.out_qs[queue_no]->napi);
}

3711 3712 3713 3714
static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
				 unsigned int queue, int first_element,
				 int count)
{
3715 3716 3717 3718 3719 3720 3721 3722 3723
	struct qeth_qdio_q *cq = card->qdio.c_q;
	int i;
	int rc;

	QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element);
	QETH_CARD_TEXT_(card, 5, "qcqhc%d", count);
	QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);

	if (qdio_err) {
3724
		netif_tx_stop_all_queues(card->dev);
3725
		qeth_schedule_recovery(card);
3726
		return;
3727 3728 3729
	}

	for (i = first_element; i < first_element + count; ++i) {
J
Julian Wiedmann 已提交
3730
		struct qdio_buffer *buffer = cq->qdio_bufs[QDIO_BUFNR(i)];
3731
		int e = 0;
3732

3733 3734
		while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
		       buffer->element[e].addr) {
3735
			unsigned long phys_aob_addr = buffer->element[e].addr;
3736

3737
			qeth_qdio_handle_aob(card, phys_to_virt(phys_aob_addr));
3738 3739
			++e;
		}
3740
		qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
3741 3742
	}
	rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue,
3743
		     cq->next_buf_to_init, count, NULL);
3744 3745 3746 3747 3748
	if (rc) {
		dev_warn(&card->gdev->dev,
			"QDIO reported an error, rc=%i\n", rc);
		QETH_CARD_TEXT(card, 2, "qcqherr");
	}
J
Julian Wiedmann 已提交
3749 3750

	cq->next_buf_to_init = QDIO_BUFNR(cq->next_buf_to_init + count);
3751 3752
}

3753 3754 3755 3756
static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
				    unsigned int qdio_err, int queue,
				    int first_elem, int count,
				    unsigned long card_ptr)
3757 3758 3759
{
	struct qeth_card *card = (struct qeth_card *)card_ptr;

3760 3761 3762
	QETH_CARD_TEXT_(card, 2, "qihq%d", queue);
	QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err);

3763
	if (qdio_err)
3764 3765 3766
		qeth_schedule_recovery(card);
}

3767 3768 3769 3770
static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
				     unsigned int qdio_error, int __queue,
				     int first_element, int count,
				     unsigned long card_ptr)
F
Frank Blaschka 已提交
3771 3772 3773
{
	struct qeth_card *card        = (struct qeth_card *) card_ptr;

3774 3775 3776
	QETH_CARD_TEXT(card, 2, "achkcond");
	netif_tx_stop_all_queues(card->dev);
	qeth_schedule_recovery(card);
F
Frank Blaschka 已提交
3777 3778
}

3779
/*
3780 3781
 * Note: Function assumes that we have 4 outbound queues.
 */
3782
int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
F
Frank Blaschka 已提交
3783
{
J
Julian Wiedmann 已提交
3784
	struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
3785 3786 3787 3788 3789
	u8 tos;

	switch (card->qdio.do_prio_queueing) {
	case QETH_PRIO_Q_ING_TOS:
	case QETH_PRIO_Q_ING_PREC:
3790 3791
		switch (vlan_get_protocol(skb)) {
		case htons(ETH_P_IP):
3792 3793
			tos = ipv4_get_dsfield(ip_hdr(skb));
			break;
3794
		case htons(ETH_P_IPV6):
3795 3796 3797 3798
			tos = ipv6_get_dsfield(ipv6_hdr(skb));
			break;
		default:
			return card->qdio.default_out_queue;
F
Frank Blaschka 已提交
3799
		}
3800
		if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
J
Julian Wiedmann 已提交
3801
			return ~tos >> 6 & 3;
3802
		if (tos & IPTOS_MINCOST)
J
Julian Wiedmann 已提交
3803
			return 3;
3804 3805 3806 3807 3808 3809
		if (tos & IPTOS_RELIABILITY)
			return 2;
		if (tos & IPTOS_THROUGHPUT)
			return 1;
		if (tos & IPTOS_LOWDELAY)
			return 0;
3810 3811 3812 3813
		break;
	case QETH_PRIO_Q_ING_SKB:
		if (skb->priority > 5)
			return 0;
J
Julian Wiedmann 已提交
3814
		return ~skb->priority >> 1 & 3;
3815
	case QETH_PRIO_Q_ING_VLAN:
J
Julian Wiedmann 已提交
3816 3817 3818
		if (veth->h_vlan_proto == htons(ETH_P_8021Q))
			return ~ntohs(veth->h_vlan_TCI) >>
			       (VLAN_PRIO_SHIFT + 1) & 3;
3819
		break;
3820 3821
	case QETH_PRIO_Q_ING_FIXED:
		return card->qdio.default_out_queue;
F
Frank Blaschka 已提交
3822
	default:
3823
		break;
F
Frank Blaschka 已提交
3824
	}
3825
	return card->qdio.default_out_queue;
F
Frank Blaschka 已提交
3826 3827 3828
}
EXPORT_SYMBOL_GPL(qeth_get_priority_queue);

3829 3830 3831 3832 3833 3834 3835
/**
 * qeth_get_elements_for_frags() -	find number of SBALEs for skb frags.
 * @skb:				SKB address
 *
 * Returns the number of pages, and thus QDIO buffer elements, needed to cover
 * fragmented part of the SKB. Returns zero for linear SKB.
 */
3836
static int qeth_get_elements_for_frags(struct sk_buff *skb)
3837
{
3838
	int cnt, elements = 0;
3839 3840

	for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
3841
		skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
3842 3843 3844 3845

		elements += qeth_get_elements_for_range(
			(addr_t)skb_frag_address(frag),
			(addr_t)skb_frag_address(frag) + skb_frag_size(frag));
3846 3847 3848 3849
	}
	return elements;
}

3850 3851 3852 3853 3854 3855 3856 3857 3858
/**
 * qeth_count_elements() -	Counts the number of QDIO buffer elements needed
 *				to transmit an skb.
 * @skb:			the skb to operate on.
 * @data_offset:		skip this part of the skb's linear data
 *
 * Returns the number of pages, and thus QDIO buffer elements, needed to map the
 * skb's data (both its linear part and paged fragments).
 */
J
Julian Wiedmann 已提交
3859 3860
static unsigned int qeth_count_elements(struct sk_buff *skb,
					unsigned int data_offset)
3861 3862 3863 3864 3865 3866 3867 3868 3869
{
	unsigned int elements = qeth_get_elements_for_frags(skb);
	addr_t end = (addr_t)skb->data + skb_headlen(skb);
	addr_t start = (addr_t)skb->data + data_offset;

	if (start != end)
		elements += qeth_get_elements_for_range(start, end);
	return elements;
}
F
Frank Blaschka 已提交
3870

3871 3872
#define QETH_HDR_CACHE_OBJ_SIZE		(sizeof(struct qeth_hdr_tso) + \
					 MAX_TCP_HEADER)
3873

3874
/**
3875
 * qeth_add_hw_header() - add a HW header to an skb.
3876
 * @queue: TX queue that the skb will be placed on.
3877
 * @skb: skb that the HW header should be added to.
3878 3879
 * @hdr: double pointer to a qeth_hdr. When returning with >= 0,
 *	 it contains a valid pointer to a qeth_hdr.
3880 3881 3882
 * @hdr_len: length of the HW header.
 * @proto_len: length of protocol headers that need to be in same page as the
 *	       HW header.
3883
 * @elements: returns the required number of buffer elements for this skb.
3884 3885 3886 3887
 *
 * Returns the pushed length. If the header can't be pushed on
 * (eg. because it would cross a page boundary), it is allocated from
 * the cache instead and 0 is returned.
3888
 * The number of needed buffer elements is returned in @elements.
3889 3890
 * Error to create the hdr is indicated by returning with < 0.
 */
3891 3892 3893 3894
static int qeth_add_hw_header(struct qeth_qdio_out_q *queue,
			      struct sk_buff *skb, struct qeth_hdr **hdr,
			      unsigned int hdr_len, unsigned int proto_len,
			      unsigned int *elements)
3895
{
3896
	gfp_t gfp = GFP_ATOMIC | (skb_pfmemalloc(skb) ? __GFP_MEMALLOC : 0);
3897
	const unsigned int contiguous = proto_len ? proto_len : 1;
3898
	const unsigned int max_elements = queue->max_elements;
3899 3900 3901 3902 3903 3904
	unsigned int __elements;
	addr_t start, end;
	bool push_ok;
	int rc;

check_layout:
3905
	start = (addr_t)skb->data - hdr_len;
3906 3907
	end = (addr_t)skb->data;

3908
	if (qeth_get_elements_for_range(start, end + contiguous) == 1) {
3909 3910
		/* Push HW header into same page as first protocol header. */
		push_ok = true;
3911 3912 3913 3914 3915
		/* ... but TSO always needs a separate element for headers: */
		if (skb_is_gso(skb))
			__elements = 1 + qeth_count_elements(skb, proto_len);
		else
			__elements = qeth_count_elements(skb, 0);
J
Julian Wiedmann 已提交
3916 3917
	} else if (!proto_len && PAGE_ALIGNED(skb->data)) {
		/* Push HW header into preceding page, flush with skb->data. */
3918
		push_ok = true;
3919
		__elements = 1 + qeth_count_elements(skb, 0);
3920 3921 3922 3923
	} else {
		/* Use header cache, copy protocol headers up. */
		push_ok = false;
		__elements = 1 + qeth_count_elements(skb, proto_len);
3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935
	}

	/* Compress skb to fit into one IO buffer: */
	if (__elements > max_elements) {
		if (!skb_is_nonlinear(skb)) {
			/* Drop it, no easy way of shrinking it further. */
			QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n",
					 max_elements, __elements, skb->len);
			return -E2BIG;
		}

		rc = skb_linearize(skb);
3936 3937
		if (rc) {
			QETH_TXQ_STAT_INC(queue, skbs_linearized_fail);
3938
			return rc;
3939
		}
3940

3941
		QETH_TXQ_STAT_INC(queue, skbs_linearized);
3942 3943 3944 3945 3946 3947 3948
		/* Linearization changed the layout, re-evaluate: */
		goto check_layout;
	}

	*elements = __elements;
	/* Add the header: */
	if (push_ok) {
3949 3950
		*hdr = skb_push(skb, hdr_len);
		return hdr_len;
3951
	}
3952 3953

	/* Fall back to cache element with known-good alignment: */
3954 3955
	if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE)
		return -E2BIG;
3956
	*hdr = kmem_cache_alloc(qeth_core_header_cache, gfp);
3957 3958
	if (!*hdr)
		return -ENOMEM;
3959 3960
	/* Copy protocol headers behind HW header: */
	skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len);
3961 3962 3963
	return 0;
}

3964 3965 3966 3967
static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue,
			      struct sk_buff *curr_skb,
			      struct qeth_hdr *curr_hdr)
{
J
Julian Wiedmann 已提交
3968
	struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986
	struct qeth_hdr *prev_hdr = queue->prev_hdr;

	if (!prev_hdr)
		return true;

	/* All packets must have the same target: */
	if (curr_hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
		struct sk_buff *prev_skb = skb_peek(&buffer->skb_list);

		return ether_addr_equal(eth_hdr(prev_skb)->h_dest,
					eth_hdr(curr_skb)->h_dest) &&
		       qeth_l2_same_vlan(&prev_hdr->hdr.l2, &curr_hdr->hdr.l2);
	}

	return qeth_l3_same_next_hop(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3) &&
	       qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3);
}

3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998
/**
 * qeth_fill_buffer() - map skb into an output buffer
 * @buf:	buffer to transport the skb
 * @skb:	skb to map into the buffer
 * @hdr:	qeth_hdr for this skb. Either at skb->data, or allocated
 *		from qeth_core_header_cache.
 * @offset:	when mapping the skb, start at skb->data + offset
 * @hd_len:	if > 0, build a dedicated header element of this size
 */
static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
				     struct sk_buff *skb, struct qeth_hdr *hdr,
				     unsigned int offset, unsigned int hd_len)
F
Frank Blaschka 已提交
3999
{
4000 4001
	struct qdio_buffer *buffer = buf->buffer;
	int element = buf->next_element_to_fill;
4002 4003
	int length = skb_headlen(skb) - offset;
	char *data = skb->data + offset;
J
Julian Wiedmann 已提交
4004
	unsigned int elem_length, cnt;
4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015
	bool is_first_elem = true;

	__skb_queue_tail(&buf->skb_list, skb);

	/* build dedicated element for HW Header */
	if (hd_len) {
		is_first_elem = false;

		buffer->element[element].addr = virt_to_phys(hdr);
		buffer->element[element].length = hd_len;
		buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
4016 4017 4018

		/* HW header is allocated from cache: */
		if ((void *)hdr != skb->data)
4019
			__set_bit(element, buf->from_kmem_cache);
4020 4021 4022 4023 4024 4025
		/* HW header was pushed and is contiguous with linear part: */
		else if (length > 0 && !PAGE_ALIGNED(data) &&
			 (data == (char *)hdr + hd_len))
			buffer->element[element].eflags |=
				SBAL_EFLAGS_CONTIGUOUS;

4026 4027
		element++;
	}
F
Frank Blaschka 已提交
4028

4029
	/* map linear part into buffer element(s) */
F
Frank Blaschka 已提交
4030
	while (length > 0) {
J
Julian Wiedmann 已提交
4031 4032
		elem_length = min_t(unsigned int, length,
				    PAGE_SIZE - offset_in_page(data));
F
Frank Blaschka 已提交
4033

4034
		buffer->element[element].addr = virt_to_phys(data);
J
Julian Wiedmann 已提交
4035 4036
		buffer->element[element].length = elem_length;
		length -= elem_length;
4037 4038
		if (is_first_elem) {
			is_first_elem = false;
4039 4040
			if (length || skb_is_nonlinear(skb))
				/* skb needs additional elements */
4041
				buffer->element[element].eflags =
4042
					SBAL_EFLAGS_FIRST_FRAG;
F
Frank Blaschka 已提交
4043
			else
4044 4045 4046 4047
				buffer->element[element].eflags = 0;
		} else {
			buffer->element[element].eflags =
				SBAL_EFLAGS_MIDDLE_FRAG;
F
Frank Blaschka 已提交
4048
		}
J
Julian Wiedmann 已提交
4049 4050

		data += elem_length;
F
Frank Blaschka 已提交
4051 4052
		element++;
	}
4053

4054
	/* map page frags into buffer element(s) */
4055
	for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
4056 4057 4058 4059
		skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];

		data = skb_frag_address(frag);
		length = skb_frag_size(frag);
4060
		while (length > 0) {
J
Julian Wiedmann 已提交
4061 4062
			elem_length = min_t(unsigned int, length,
					    PAGE_SIZE - offset_in_page(data));
4063

4064
			buffer->element[element].addr = virt_to_phys(data);
J
Julian Wiedmann 已提交
4065
			buffer->element[element].length = elem_length;
4066 4067
			buffer->element[element].eflags =
				SBAL_EFLAGS_MIDDLE_FRAG;
J
Julian Wiedmann 已提交
4068 4069 4070

			length -= elem_length;
			data += elem_length;
4071 4072
			element++;
		}
4073 4074
	}

4075 4076
	if (buffer->element[element - 1].eflags)
		buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
4077
	buf->next_element_to_fill = element;
4078
	return element;
F
Frank Blaschka 已提交
4079 4080
}

4081 4082 4083 4084
static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
		       struct sk_buff *skb, unsigned int elements,
		       struct qeth_hdr *hdr, unsigned int offset,
		       unsigned int hd_len)
F
Frank Blaschka 已提交
4085
{
4086
	unsigned int bytes = qdisc_pkt_len(skb);
J
Julian Wiedmann 已提交
4087
	struct qeth_qdio_out_buffer *buffer;
4088
	unsigned int next_element;
4089 4090
	struct netdev_queue *txq;
	bool stopped = false;
4091 4092
	bool flush;

J
Julian Wiedmann 已提交
4093
	buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + queue->bulk_count)];
4094
	txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
F
Frank Blaschka 已提交
4095

4096 4097
	/* Just a sanity check, the wake/stop logic should ensure that we always
	 * get a free buffer.
F
Frank Blaschka 已提交
4098 4099
	 */
	if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
4100
		return -EBUSY;
4101

J
Julian Wiedmann 已提交
4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118
	flush = !qeth_iqd_may_bulk(queue, skb, hdr);

	if (flush ||
	    (buffer->next_element_to_fill + elements > queue->max_elements)) {
		if (buffer->next_element_to_fill > 0) {
			atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
			queue->bulk_count++;
		}

		if (queue->bulk_count >= queue->bulk_max)
			flush = true;

		if (flush)
			qeth_flush_queue(queue);

		buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start +
						queue->bulk_count)];
4119

4120 4121 4122 4123 4124 4125 4126
		/* Sanity-check again: */
		if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
			return -EBUSY;
	}

	if (buffer->next_element_to_fill == 0 &&
	    atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
4127 4128 4129 4130 4131 4132 4133 4134
		/* If a TX completion happens right _here_ and misses to wake
		 * the txq, then our re-check below will catch the race.
		 */
		QETH_TXQ_STAT_INC(queue, stopped);
		netif_tx_stop_queue(txq);
		stopped = true;
	}

4135
	next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
4136
	buffer->bytes += bytes;
4137
	buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
4138
	queue->prev_hdr = hdr;
4139

4140 4141 4142 4143 4144
	flush = __netdev_tx_sent_queue(txq, bytes,
				       !stopped && netdev_xmit_more());

	if (flush || next_element >= queue->max_elements) {
		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
J
Julian Wiedmann 已提交
4145 4146 4147 4148 4149 4150 4151
		queue->bulk_count++;

		if (queue->bulk_count >= queue->bulk_max)
			flush = true;

		if (flush)
			qeth_flush_queue(queue);
4152
	}
4153 4154 4155

	if (stopped && !qeth_out_queue_is_full(queue))
		netif_tx_start_queue(txq);
F
Frank Blaschka 已提交
4156 4157 4158
	return 0;
}

J
Julian Wiedmann 已提交
4159 4160 4161 4162 4163
static int qeth_do_send_packet(struct qeth_card *card,
			       struct qeth_qdio_out_q *queue,
			       struct sk_buff *skb, struct qeth_hdr *hdr,
			       unsigned int offset, unsigned int hd_len,
			       unsigned int elements_needed)
F
Frank Blaschka 已提交
4164
{
4165
	unsigned int start_index = queue->next_buf_to_fill;
F
Frank Blaschka 已提交
4166
	struct qeth_qdio_out_buffer *buffer;
4167
	unsigned int next_element;
4168 4169
	struct netdev_queue *txq;
	bool stopped = false;
F
Frank Blaschka 已提交
4170 4171 4172 4173
	int flush_count = 0;
	int do_pack = 0;
	int rc = 0;

4174
	buffer = queue->bufs[queue->next_buf_to_fill];
4175 4176 4177

	/* Just a sanity check, the wake/stop logic should ensure that we always
	 * get a free buffer.
F
Frank Blaschka 已提交
4178
	 */
4179
	if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
F
Frank Blaschka 已提交
4180
		return -EBUSY;
4181 4182 4183

	txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));

F
Frank Blaschka 已提交
4184 4185 4186 4187
	/* check if we need to switch packing state of this queue */
	qeth_switch_to_packing_if_needed(queue);
	if (queue->do_pack) {
		do_pack = 1;
F
Frank Blaschka 已提交
4188
		/* does packet fit in current buffer? */
4189 4190
		if (buffer->next_element_to_fill + elements_needed >
		    queue->max_elements) {
F
Frank Blaschka 已提交
4191 4192 4193 4194
			/* ... no -> set state PRIMED */
			atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
			flush_count++;
			queue->next_buf_to_fill =
J
Julian Wiedmann 已提交
4195
				QDIO_BUFNR(queue->next_buf_to_fill + 1);
4196
			buffer = queue->bufs[queue->next_buf_to_fill];
4197 4198

			/* We stepped forward, so sanity-check again: */
F
Frank Blaschka 已提交
4199 4200 4201
			if (atomic_read(&buffer->state) !=
			    QETH_QDIO_BUF_EMPTY) {
				qeth_flush_buffers(queue, start_index,
J
Jan Glauber 已提交
4202
							   flush_count);
4203 4204
				rc = -EBUSY;
				goto out;
F
Frank Blaschka 已提交
4205 4206 4207
			}
		}
	}
4208

4209 4210 4211 4212 4213 4214 4215 4216 4217 4218
	if (buffer->next_element_to_fill == 0 &&
	    atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
		/* If a TX completion happens right _here_ and misses to wake
		 * the txq, then our re-check below will catch the race.
		 */
		QETH_TXQ_STAT_INC(queue, stopped);
		netif_tx_stop_queue(txq);
		stopped = true;
	}

4219
	next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
4220 4221
	buffer->bytes += qdisc_pkt_len(skb);
	buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
4222 4223 4224 4225 4226 4227

	if (queue->do_pack)
		QETH_TXQ_STAT_INC(queue, skbs_pack);
	if (!queue->do_pack || stopped || next_element >= queue->max_elements) {
		flush_count++;
		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
J
Julian Wiedmann 已提交
4228 4229
		queue->next_buf_to_fill =
				QDIO_BUFNR(queue->next_buf_to_fill + 1);
4230 4231
	}

F
Frank Blaschka 已提交
4232
	if (flush_count)
J
Jan Glauber 已提交
4233
		qeth_flush_buffers(queue, start_index, flush_count);
4234

4235
out:
4236 4237
	if (do_pack)
		QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count);
F
Frank Blaschka 已提交
4238

4239 4240
	if (stopped && !qeth_out_queue_is_full(queue))
		netif_tx_start_queue(txq);
F
Frank Blaschka 已提交
4241 4242 4243
	return rc;
}

J
Julian Wiedmann 已提交
4244 4245 4246
static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
			      unsigned int payload_len, struct sk_buff *skb,
			      unsigned int proto_len)
4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259
{
	struct qeth_hdr_ext_tso *ext = &hdr->ext;

	ext->hdr_tot_len = sizeof(*ext);
	ext->imb_hdr_no = 1;
	ext->hdr_type = 1;
	ext->hdr_version = 1;
	ext->hdr_len = 28;
	ext->payload_len = payload_len;
	ext->mss = skb_shinfo(skb)->gso_size;
	ext->dg_hdr_len = proto_len;
}

4260
int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
4261
	      struct qeth_qdio_out_q *queue, __be16 proto,
4262 4263
	      void (*fill_header)(struct qeth_qdio_out_q *queue,
				  struct qeth_hdr *hdr, struct sk_buff *skb,
4264
				  __be16 proto, unsigned int data_len))
4265
{
4266
	unsigned int proto_len, hw_hdr_len;
4267
	unsigned int frame_len = skb->len;
4268
	bool is_tso = skb_is_gso(skb);
4269 4270 4271 4272 4273 4274
	unsigned int data_offset = 0;
	struct qeth_hdr *hdr = NULL;
	unsigned int hd_len = 0;
	unsigned int elements;
	int push_len, rc;

4275 4276 4277 4278 4279
	if (is_tso) {
		hw_hdr_len = sizeof(struct qeth_hdr_tso);
		proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
	} else {
		hw_hdr_len = sizeof(struct qeth_hdr);
J
Julian Wiedmann 已提交
4280
		proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0;
4281 4282
	}

4283 4284 4285 4286
	rc = skb_cow_head(skb, hw_hdr_len);
	if (rc)
		return rc;

4287
	push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len,
4288 4289 4290
				      &elements);
	if (push_len < 0)
		return push_len;
4291
	if (is_tso || !push_len) {
4292 4293
		/* HW header needs its own buffer element. */
		hd_len = hw_hdr_len + proto_len;
4294
		data_offset = push_len + proto_len;
4295
	}
4296
	memset(hdr, 0, hw_hdr_len);
4297
	fill_header(queue, hdr, skb, proto, frame_len);
4298 4299 4300
	if (is_tso)
		qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
				  frame_len - proto_len, skb, proto_len);
4301 4302

	if (IS_IQD(card)) {
4303 4304
		rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset,
				 hd_len);
4305 4306 4307
	} else {
		/* TODO: drop skb_orphan() once TX completion is fast enough */
		skb_orphan(skb);
4308
		spin_lock(&queue->lock);
4309 4310
		rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
					 hd_len, elements);
4311
		spin_unlock(&queue->lock);
4312 4313
	}

4314 4315 4316
	if (rc && !push_len)
		kmem_cache_free(qeth_core_header_cache, hdr);

4317 4318 4319 4320
	return rc;
}
EXPORT_SYMBOL_GPL(qeth_xmit);

F
Frank Blaschka 已提交
4321 4322 4323
static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
		struct qeth_reply *reply, unsigned long data)
{
4324
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
F
Frank Blaschka 已提交
4325 4326
	struct qeth_ipacmd_setadpparms *setparms;

C
Carsten Otte 已提交
4327
	QETH_CARD_TEXT(card, 4, "prmadpcb");
F
Frank Blaschka 已提交
4328 4329

	setparms = &(cmd->data.setadapterparms);
4330
	if (qeth_setadpparms_inspect_rc(cmd)) {
4331
		QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
F
Frank Blaschka 已提交
4332 4333 4334
		setparms->data.mode = SET_PROMISC_MODE_OFF;
	}
	card->info.promisc_mode = setparms->data.mode;
4335
	return (cmd->hdr.return_code) ? -EIO : 0;
F
Frank Blaschka 已提交
4336 4337
}

4338
void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable)
F
Frank Blaschka 已提交
4339
{
4340 4341
	enum qeth_ipa_promisc_modes mode = enable ? SET_PROMISC_MODE_ON :
						    SET_PROMISC_MODE_OFF;
F
Frank Blaschka 已提交
4342 4343 4344
	struct qeth_cmd_buffer *iob;
	struct qeth_ipa_cmd *cmd;

C
Carsten Otte 已提交
4345 4346
	QETH_CARD_TEXT(card, 4, "setprom");
	QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
F
Frank Blaschka 已提交
4347 4348

	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
4349
				   SETADP_DATA_SIZEOF(mode));
4350 4351
	if (!iob)
		return;
4352
	cmd = __ipa_cmd(iob);
F
Frank Blaschka 已提交
4353 4354 4355 4356 4357 4358 4359 4360
	cmd->data.setadapterparms.data.mode = mode;
	qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
}
EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);

static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
		struct qeth_reply *reply, unsigned long data)
{
4361
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4362
	struct qeth_ipacmd_setadpparms *adp_cmd;
F
Frank Blaschka 已提交
4363

C
Carsten Otte 已提交
4364
	QETH_CARD_TEXT(card, 4, "chgmaccb");
4365
	if (qeth_setadpparms_inspect_rc(cmd))
4366
		return -EIO;
F
Frank Blaschka 已提交
4367

4368
	adp_cmd = &cmd->data.setadapterparms;
4369 4370 4371
	if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr))
		return -EADDRNOTAVAIL;

4372 4373
	if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) &&
	    !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC))
4374
		return -EADDRNOTAVAIL;
4375

4376
	eth_hw_addr_set(card->dev, adp_cmd->data.change_addr.addr);
F
Frank Blaschka 已提交
4377 4378 4379 4380 4381 4382 4383 4384 4385
	return 0;
}

int qeth_setadpparms_change_macaddr(struct qeth_card *card)
{
	int rc;
	struct qeth_cmd_buffer *iob;
	struct qeth_ipa_cmd *cmd;

C
Carsten Otte 已提交
4386
	QETH_CARD_TEXT(card, 4, "chgmac");
F
Frank Blaschka 已提交
4387 4388

	iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
4389
				   SETADP_DATA_SIZEOF(change_addr));
4390 4391
	if (!iob)
		return -ENOMEM;
4392
	cmd = __ipa_cmd(iob);
F
Frank Blaschka 已提交
4393
	cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
4394 4395 4396
	cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN;
	ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr,
			card->dev->dev_addr);
F
Frank Blaschka 已提交
4397 4398 4399 4400 4401 4402
	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
			       NULL);
	return rc;
}
EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);

E
Einar Lueck 已提交
4403 4404 4405
static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
		struct qeth_reply *reply, unsigned long data)
{
4406
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
E
Einar Lueck 已提交
4407 4408
	struct qeth_set_access_ctrl *access_ctrl_req;

C
Carsten Otte 已提交
4409
	QETH_CARD_TEXT(card, 4, "setaccb");
E
Einar Lueck 已提交
4410 4411

	access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4412 4413
	QETH_CARD_TEXT_(card, 2, "rc=%d",
			cmd->data.setadapterparms.hdr.return_code);
S
Stefan Raspl 已提交
4414 4415
	if (cmd->data.setadapterparms.hdr.return_code !=
						SET_ACCESS_CTRL_RC_SUCCESS)
4416 4417 4418
		QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
				 access_ctrl_req->subcmd_code, CARD_DEVID(card),
				 cmd->data.setadapterparms.hdr.return_code);
4419
	switch (qeth_setadpparms_inspect_rc(cmd)) {
E
Einar Lueck 已提交
4420
	case SET_ACCESS_CTRL_RC_SUCCESS:
4421
		if (access_ctrl_req->subcmd_code == ISOLATION_MODE_NONE)
E
Einar Lueck 已提交
4422 4423
			dev_info(&card->gdev->dev,
			    "QDIO data connection isolation is deactivated\n");
4424
		else
E
Einar Lueck 已提交
4425 4426
			dev_info(&card->gdev->dev,
			    "QDIO data connection isolation is activated\n");
4427
		return 0;
S
Stefan Raspl 已提交
4428
	case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
4429 4430
		QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
				 CARD_DEVID(card));
4431
		return 0;
S
Stefan Raspl 已提交
4432
	case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
4433 4434
		QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
				 CARD_DEVID(card));
4435
		return 0;
E
Einar Lueck 已提交
4436 4437 4438
	case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
		dev_err(&card->gdev->dev, "Adapter does not "
			"support QDIO data connection isolation\n");
4439
		return -EOPNOTSUPP;
E
Einar Lueck 已提交
4440 4441 4442 4443
	case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
		dev_err(&card->gdev->dev,
			"Adapter is dedicated. "
			"QDIO data connection isolation not supported\n");
4444
		return -EOPNOTSUPP;
E
Einar Lueck 已提交
4445 4446 4447
	case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
		dev_err(&card->gdev->dev,
			"TSO does not permit QDIO data connection isolation\n");
4448
		return -EPERM;
S
Stefan Raspl 已提交
4449 4450 4451
	case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED:
		dev_err(&card->gdev->dev, "The adjacent switch port does not "
			"support reflective relay mode\n");
4452
		return -EOPNOTSUPP;
S
Stefan Raspl 已提交
4453 4454 4455
	case SET_ACCESS_CTRL_RC_REFLREL_FAILED:
		dev_err(&card->gdev->dev, "The reflective relay mode cannot be "
					"enabled at the adjacent switch port");
4456
		return -EREMOTEIO;
S
Stefan Raspl 已提交
4457 4458 4459
	case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED:
		dev_warn(&card->gdev->dev, "Turning off reflective relay mode "
					"at the adjacent switch failed\n");
4460 4461
		/* benign error while disabling ISOLATION_MODE_FWD */
		return 0;
E
Einar Lueck 已提交
4462
	default:
4463
		return -EIO;
E
Einar Lueck 已提交
4464 4465 4466
	}
}

4467 4468
int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
				     enum qeth_ipa_isolation_modes mode)
E
Einar Lueck 已提交
4469 4470 4471 4472 4473 4474
{
	int rc;
	struct qeth_cmd_buffer *iob;
	struct qeth_ipa_cmd *cmd;
	struct qeth_set_access_ctrl *access_ctrl_req;

C
Carsten Otte 已提交
4475
	QETH_CARD_TEXT(card, 4, "setacctl");
E
Einar Lueck 已提交
4476

4477 4478 4479 4480 4481 4482
	if (!qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
		dev_err(&card->gdev->dev,
			"Adapter does not support QDIO data connection isolation\n");
		return -EOPNOTSUPP;
	}

E
Einar Lueck 已提交
4483
	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
4484
				   SETADP_DATA_SIZEOF(set_access_ctrl));
4485 4486
	if (!iob)
		return -ENOMEM;
4487
	cmd = __ipa_cmd(iob);
E
Einar Lueck 已提交
4488
	access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4489
	access_ctrl_req->subcmd_code = mode;
E
Einar Lueck 已提交
4490 4491

	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
4492
			       NULL);
4493
	if (rc) {
4494
		QETH_CARD_TEXT_(card, 2, "rc=%d", rc);
4495 4496
		QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
				 rc, CARD_DEVID(card));
E
Einar Lueck 已提交
4497
	}
4498

E
Einar Lueck 已提交
4499 4500 4501
	return rc;
}

4502
void qeth_tx_timeout(struct net_device *dev, unsigned int txqueue)
F
Frank Blaschka 已提交
4503 4504 4505
{
	struct qeth_card *card;

4506
	card = dev->ml_priv;
C
Carsten Otte 已提交
4507
	QETH_CARD_TEXT(card, 4, "txtimeo");
F
Frank Blaschka 已提交
4508 4509 4510 4511
	qeth_schedule_recovery(card);
}
EXPORT_SYMBOL_GPL(qeth_tx_timeout);

4512
static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
F
Frank Blaschka 已提交
4513
{
4514
	struct qeth_card *card = dev->ml_priv;
F
Frank Blaschka 已提交
4515 4516 4517 4518 4519 4520
	int rc = 0;

	switch (regnum) {
	case MII_BMCR: /* Basic mode control register */
		rc = BMCR_FULLDPLX;
		if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
4521 4522
		    (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) &&
		    (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH))
F
Frank Blaschka 已提交
4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553
			rc |= BMCR_SPEED100;
		break;
	case MII_BMSR: /* Basic mode status register */
		rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
		     BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
		     BMSR_100BASE4;
		break;
	case MII_PHYSID1: /* PHYS ID 1 */
		rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
		     dev->dev_addr[2];
		rc = (rc >> 5) & 0xFFFF;
		break;
	case MII_PHYSID2: /* PHYS ID 2 */
		rc = (dev->dev_addr[2] << 10) & 0xFFFF;
		break;
	case MII_ADVERTISE: /* Advertisement control reg */
		rc = ADVERTISE_ALL;
		break;
	case MII_LPA: /* Link partner ability reg */
		rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
		     LPA_100BASE4 | LPA_LPACK;
		break;
	case MII_EXPANSION: /* Expansion register */
		break;
	case MII_DCOUNTER: /* disconnect counter */
		break;
	case MII_FCSCOUNTER: /* false carrier counter */
		break;
	case MII_NWAYTEST: /* N-way auto-neg test register */
		break;
	case MII_RERRCOUNTER: /* rx error counter */
4554 4555 4556
		rc = card->stats.rx_length_errors +
		     card->stats.rx_frame_errors +
		     card->stats.rx_fifo_errors;
F
Frank Blaschka 已提交
4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578
		break;
	case MII_SREVISION: /* silicon revision */
		break;
	case MII_RESV1: /* reserved 1 */
		break;
	case MII_LBRERROR: /* loopback, rx, bypass error */
		break;
	case MII_PHYADDR: /* physical address */
		break;
	case MII_RESV2: /* reserved 2 */
		break;
	case MII_TPISTATUS: /* TPI status for 10mbps */
		break;
	case MII_NCONFIG: /* network interface config */
		break;
	default:
		break;
	}
	return rc;
}

static int qeth_snmp_command_cb(struct qeth_card *card,
4579
				struct qeth_reply *reply, unsigned long data)
F
Frank Blaschka 已提交
4580
{
4581 4582 4583 4584
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
	struct qeth_arp_query_info *qinfo = reply->param;
	struct qeth_ipacmd_setadpparms *adp_cmd;
	unsigned int data_len;
4585
	void *snmp_data;
F
Frank Blaschka 已提交
4586

C
Carsten Otte 已提交
4587
	QETH_CARD_TEXT(card, 3, "snpcmdcb");
F
Frank Blaschka 已提交
4588 4589

	if (cmd->hdr.return_code) {
4590
		QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
4591
		return -EIO;
F
Frank Blaschka 已提交
4592 4593 4594 4595
	}
	if (cmd->data.setadapterparms.hdr.return_code) {
		cmd->hdr.return_code =
			cmd->data.setadapterparms.hdr.return_code;
4596
		QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code);
4597
		return -EIO;
F
Frank Blaschka 已提交
4598
	}
4599 4600 4601 4602 4603

	adp_cmd = &cmd->data.setadapterparms;
	data_len = adp_cmd->hdr.cmdlength - sizeof(adp_cmd->hdr);
	if (adp_cmd->hdr.seq_no == 1) {
		snmp_data = &adp_cmd->data.snmp;
4604
	} else {
4605 4606
		snmp_data = &adp_cmd->data.snmp.request;
		data_len -= offsetof(struct qeth_snmp_cmd, request);
4607
	}
F
Frank Blaschka 已提交
4608 4609 4610

	/* check if there is enough room in userspace */
	if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
4611 4612
		QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC);
		return -ENOSPC;
F
Frank Blaschka 已提交
4613
	}
C
Carsten Otte 已提交
4614
	QETH_CARD_TEXT_(card, 4, "snore%i",
4615
			cmd->data.setadapterparms.hdr.used_total);
C
Carsten Otte 已提交
4616
	QETH_CARD_TEXT_(card, 4, "sseqn%i",
4617
			cmd->data.setadapterparms.hdr.seq_no);
F
Frank Blaschka 已提交
4618
	/*copy entries to user buffer*/
4619
	memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len);
F
Frank Blaschka 已提交
4620
	qinfo->udata_offset += data_len;
4621

F
Frank Blaschka 已提交
4622 4623 4624 4625 4626 4627
	if (cmd->data.setadapterparms.hdr.seq_no <
	    cmd->data.setadapterparms.hdr.used_total)
		return 1;
	return 0;
}

4628
static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
F
Frank Blaschka 已提交
4629
{
4630
	struct qeth_snmp_ureq __user *ureq;
F
Frank Blaschka 已提交
4631
	struct qeth_cmd_buffer *iob;
4632
	unsigned int req_len;
F
Frank Blaschka 已提交
4633 4634 4635
	struct qeth_arp_query_info qinfo = {0, };
	int rc = 0;

C
Carsten Otte 已提交
4636
	QETH_CARD_TEXT(card, 3, "snmpcmd");
F
Frank Blaschka 已提交
4637

4638
	if (IS_VM_NIC(card))
F
Frank Blaschka 已提交
4639 4640 4641
		return -EOPNOTSUPP;

	if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
4642
	    IS_LAYER3(card))
F
Frank Blaschka 已提交
4643
		return -EOPNOTSUPP;
4644

4645 4646 4647 4648 4649
	ureq = (struct qeth_snmp_ureq __user *) udata;
	if (get_user(qinfo.udata_len, &ureq->hdr.data_len) ||
	    get_user(req_len, &ureq->hdr.req_len))
		return -EFAULT;

4650 4651 4652 4653
	/* Sanitize user input, to avoid overflows in iob size calculation: */
	if (req_len > QETH_BUFSIZE)
		return -EINVAL;

4654 4655 4656 4657 4658 4659 4660
	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len);
	if (!iob)
		return -ENOMEM;

	if (copy_from_user(&__ipa_cmd(iob)->data.setadapterparms.data.snmp,
			   &ureq->cmd, req_len)) {
		qeth_put_cmd(iob);
F
Frank Blaschka 已提交
4661
		return -EFAULT;
4662 4663
	}

F
Frank Blaschka 已提交
4664 4665
	qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
	if (!qinfo.udata) {
4666
		qeth_put_cmd(iob);
F
Frank Blaschka 已提交
4667 4668 4669 4670
		return -ENOMEM;
	}
	qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);

4671
	rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo);
F
Frank Blaschka 已提交
4672
	if (rc)
4673 4674
		QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
				 CARD_DEVID(card), rc);
F
Frank Blaschka 已提交
4675 4676 4677 4678
	else {
		if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
			rc = -EFAULT;
	}
4679

F
Frank Blaschka 已提交
4680 4681 4682 4683
	kfree(qinfo.udata);
	return rc;
}

4684
static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
J
Julian Wiedmann 已提交
4685 4686
					 struct qeth_reply *reply,
					 unsigned long data)
4687
{
4688
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
J
Julian Wiedmann 已提交
4689
	struct qeth_qoat_priv *priv = reply->param;
4690 4691 4692
	int resdatalen;

	QETH_CARD_TEXT(card, 3, "qoatcb");
4693
	if (qeth_setadpparms_inspect_rc(cmd))
4694
		return -EIO;
4695 4696 4697

	resdatalen = cmd->data.setadapterparms.hdr.cmdlength;

4698 4699
	if (resdatalen > (priv->buffer_len - priv->response_len))
		return -ENOSPC;
4700

4701 4702
	memcpy(priv->buffer + priv->response_len,
	       &cmd->data.setadapterparms.hdr, resdatalen);
4703 4704 4705 4706 4707 4708 4709 4710
	priv->response_len += resdatalen;

	if (cmd->data.setadapterparms.hdr.seq_no <
	    cmd->data.setadapterparms.hdr.used_total)
		return 1;
	return 0;
}

4711
static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722
{
	int rc = 0;
	struct qeth_cmd_buffer *iob;
	struct qeth_ipa_cmd *cmd;
	struct qeth_query_oat *oat_req;
	struct qeth_query_oat_data oat_data;
	struct qeth_qoat_priv priv;
	void __user *tmp;

	QETH_CARD_TEXT(card, 3, "qoatcmd");

J
Julian Wiedmann 已提交
4723 4724
	if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT))
		return -EOPNOTSUPP;
4725

J
Julian Wiedmann 已提交
4726 4727
	if (copy_from_user(&oat_data, udata, sizeof(oat_data)))
		return -EFAULT;
4728 4729 4730

	priv.buffer_len = oat_data.buffer_len;
	priv.response_len = 0;
4731
	priv.buffer = vzalloc(oat_data.buffer_len);
J
Julian Wiedmann 已提交
4732 4733
	if (!priv.buffer)
		return -ENOMEM;
4734 4735

	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
4736
				   SETADP_DATA_SIZEOF(query_oat));
4737 4738 4739 4740
	if (!iob) {
		rc = -ENOMEM;
		goto out_free;
	}
4741
	cmd = __ipa_cmd(iob);
4742 4743 4744
	oat_req = &cmd->data.setadapterparms.data.query_oat;
	oat_req->subcmd_code = oat_data.command;

J
Julian Wiedmann 已提交
4745
	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, &priv);
4746
	if (!rc) {
4747 4748
		tmp = is_compat_task() ? compat_ptr(oat_data.ptr) :
					 u64_to_user_ptr(oat_data.ptr);
4749 4750
		oat_data.response_len = priv.response_len;

J
Julian Wiedmann 已提交
4751 4752
		if (copy_to_user(tmp, priv.buffer, priv.response_len) ||
		    copy_to_user(udata, &oat_data, sizeof(oat_data)))
4753
			rc = -EFAULT;
4754
	}
4755 4756

out_free:
4757
	vfree(priv.buffer);
4758 4759 4760
	return rc;
}

4761 4762
static int qeth_query_card_info_cb(struct qeth_card *card,
				   struct qeth_reply *reply, unsigned long data)
E
Eugene Crosser 已提交
4763
{
4764
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4765
	struct qeth_link_info *link_info = reply->param;
E
Eugene Crosser 已提交
4766 4767 4768
	struct qeth_query_card_info *card_info;

	QETH_CARD_TEXT(card, 2, "qcrdincb");
4769
	if (qeth_setadpparms_inspect_rc(cmd))
4770
		return -EIO;
E
Eugene Crosser 已提交
4771

4772
	card_info = &cmd->data.setadapterparms.data.card_info;
4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828
	netdev_dbg(card->dev,
		   "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n",
		   card_info->card_type, card_info->port_mode,
		   card_info->port_speed);

	switch (card_info->port_mode) {
	case CARD_INFO_PORTM_FULLDUPLEX:
		link_info->duplex = DUPLEX_FULL;
		break;
	case CARD_INFO_PORTM_HALFDUPLEX:
		link_info->duplex = DUPLEX_HALF;
		break;
	default:
		link_info->duplex = DUPLEX_UNKNOWN;
	}

	switch (card_info->card_type) {
	case CARD_INFO_TYPE_1G_COPPER_A:
	case CARD_INFO_TYPE_1G_COPPER_B:
		link_info->speed = SPEED_1000;
		link_info->port = PORT_TP;
		break;
	case CARD_INFO_TYPE_1G_FIBRE_A:
	case CARD_INFO_TYPE_1G_FIBRE_B:
		link_info->speed = SPEED_1000;
		link_info->port = PORT_FIBRE;
		break;
	case CARD_INFO_TYPE_10G_FIBRE_A:
	case CARD_INFO_TYPE_10G_FIBRE_B:
		link_info->speed = SPEED_10000;
		link_info->port = PORT_FIBRE;
		break;
	default:
		switch (card_info->port_speed) {
		case CARD_INFO_PORTS_10M:
			link_info->speed = SPEED_10;
			break;
		case CARD_INFO_PORTS_100M:
			link_info->speed = SPEED_100;
			break;
		case CARD_INFO_PORTS_1G:
			link_info->speed = SPEED_1000;
			break;
		case CARD_INFO_PORTS_10G:
			link_info->speed = SPEED_10000;
			break;
		case CARD_INFO_PORTS_25G:
			link_info->speed = SPEED_25000;
			break;
		default:
			link_info->speed = SPEED_UNKNOWN;
		}

		link_info->port = PORT_OTHER;
	}

E
Eugene Crosser 已提交
4829 4830 4831
	return 0;
}

4832
int qeth_query_card_info(struct qeth_card *card,
4833
			 struct qeth_link_info *link_info)
E
Eugene Crosser 已提交
4834 4835 4836 4837 4838 4839
{
	struct qeth_cmd_buffer *iob;

	QETH_CARD_TEXT(card, 2, "qcrdinfo");
	if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO))
		return -EOPNOTSUPP;
4840
	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 0);
4841 4842
	if (!iob)
		return -ENOMEM;
4843 4844

	return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, link_info);
E
Eugene Crosser 已提交
4845 4846
}

4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913
static int qeth_init_link_info_oat_cb(struct qeth_card *card,
				      struct qeth_reply *reply_priv,
				      unsigned long data)
{
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
	struct qeth_link_info *link_info = reply_priv->param;
	struct qeth_query_oat_physical_if *phys_if;
	struct qeth_query_oat_reply *reply;

	if (qeth_setadpparms_inspect_rc(cmd))
		return -EIO;

	/* Multi-part reply is unexpected, don't bother: */
	if (cmd->data.setadapterparms.hdr.used_total > 1)
		return -EINVAL;

	/* Expect the reply to start with phys_if data: */
	reply = &cmd->data.setadapterparms.data.query_oat.reply[0];
	if (reply->type != QETH_QOAT_REPLY_TYPE_PHYS_IF ||
	    reply->length < sizeof(*reply))
		return -EINVAL;

	phys_if = &reply->phys_if;

	switch (phys_if->speed_duplex) {
	case QETH_QOAT_PHYS_SPEED_10M_HALF:
		link_info->speed = SPEED_10;
		link_info->duplex = DUPLEX_HALF;
		break;
	case QETH_QOAT_PHYS_SPEED_10M_FULL:
		link_info->speed = SPEED_10;
		link_info->duplex = DUPLEX_FULL;
		break;
	case QETH_QOAT_PHYS_SPEED_100M_HALF:
		link_info->speed = SPEED_100;
		link_info->duplex = DUPLEX_HALF;
		break;
	case QETH_QOAT_PHYS_SPEED_100M_FULL:
		link_info->speed = SPEED_100;
		link_info->duplex = DUPLEX_FULL;
		break;
	case QETH_QOAT_PHYS_SPEED_1000M_HALF:
		link_info->speed = SPEED_1000;
		link_info->duplex = DUPLEX_HALF;
		break;
	case QETH_QOAT_PHYS_SPEED_1000M_FULL:
		link_info->speed = SPEED_1000;
		link_info->duplex = DUPLEX_FULL;
		break;
	case QETH_QOAT_PHYS_SPEED_10G_FULL:
		link_info->speed = SPEED_10000;
		link_info->duplex = DUPLEX_FULL;
		break;
	case QETH_QOAT_PHYS_SPEED_25G_FULL:
		link_info->speed = SPEED_25000;
		link_info->duplex = DUPLEX_FULL;
		break;
	case QETH_QOAT_PHYS_SPEED_UNKNOWN:
	default:
		link_info->speed = SPEED_UNKNOWN;
		link_info->duplex = DUPLEX_UNKNOWN;
		break;
	}

	switch (phys_if->media_type) {
	case QETH_QOAT_PHYS_MEDIA_COPPER:
		link_info->port = PORT_TP;
4914
		link_info->link_mode = QETH_LINK_MODE_UNKNOWN;
4915 4916
		break;
	case QETH_QOAT_PHYS_MEDIA_FIBRE_SHORT:
4917 4918 4919
		link_info->port = PORT_FIBRE;
		link_info->link_mode = QETH_LINK_MODE_FIBRE_SHORT;
		break;
4920 4921
	case QETH_QOAT_PHYS_MEDIA_FIBRE_LONG:
		link_info->port = PORT_FIBRE;
4922
		link_info->link_mode = QETH_LINK_MODE_FIBRE_LONG;
4923 4924 4925
		break;
	default:
		link_info->port = PORT_OTHER;
4926
		link_info->link_mode = QETH_LINK_MODE_UNKNOWN;
4927 4928 4929 4930 4931 4932
		break;
	}

	return 0;
}

4933 4934 4935 4936 4937 4938 4939
static void qeth_init_link_info(struct qeth_card *card)
{
	card->info.link_info.duplex = DUPLEX_FULL;

	if (IS_IQD(card) || IS_VM_NIC(card)) {
		card->info.link_info.speed = SPEED_10000;
		card->info.link_info.port = PORT_FIBRE;
4940
		card->info.link_info.link_mode = QETH_LINK_MODE_FIBRE_SHORT;
4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966
	} else {
		switch (card->info.link_type) {
		case QETH_LINK_TYPE_FAST_ETH:
		case QETH_LINK_TYPE_LANE_ETH100:
			card->info.link_info.speed = SPEED_100;
			card->info.link_info.port = PORT_TP;
			break;
		case QETH_LINK_TYPE_GBIT_ETH:
		case QETH_LINK_TYPE_LANE_ETH1000:
			card->info.link_info.speed = SPEED_1000;
			card->info.link_info.port = PORT_FIBRE;
			break;
		case QETH_LINK_TYPE_10GBIT_ETH:
			card->info.link_info.speed = SPEED_10000;
			card->info.link_info.port = PORT_FIBRE;
			break;
		case QETH_LINK_TYPE_25GBIT_ETH:
			card->info.link_info.speed = SPEED_25000;
			card->info.link_info.port = PORT_FIBRE;
			break;
		default:
			dev_info(&card->gdev->dev, "Unknown link type %x\n",
				 card->info.link_type);
			card->info.link_info.speed = SPEED_UNKNOWN;
			card->info.link_info.port = PORT_OTHER;
		}
4967 4968

		card->info.link_info.link_mode = QETH_LINK_MODE_UNKNOWN;
4969
	}
4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993

	/* Get more accurate data via QUERY OAT: */
	if (qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) {
		struct qeth_link_info link_info;
		struct qeth_cmd_buffer *iob;

		iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
					   SETADP_DATA_SIZEOF(query_oat));
		if (iob) {
			struct qeth_ipa_cmd *cmd = __ipa_cmd(iob);
			struct qeth_query_oat *oat_req;

			oat_req = &cmd->data.setadapterparms.data.query_oat;
			oat_req->subcmd_code = QETH_QOAT_SCOPE_INTERFACE;

			if (!qeth_send_ipa_cmd(card, iob,
					       qeth_init_link_info_oat_cb,
					       &link_info)) {
				if (link_info.speed != SPEED_UNKNOWN)
					card->info.link_info.speed = link_info.speed;
				if (link_info.duplex != DUPLEX_UNKNOWN)
					card->info.link_info.duplex = link_info.duplex;
				if (link_info.port != PORT_OTHER)
					card->info.link_info.port = link_info.port;
4994 4995
				if (link_info.link_mode != QETH_LINK_MODE_UNKNOWN)
					card->info.link_info.link_mode = link_info.link_mode;
4996 4997 4998
			}
		}
	}
4999 5000
}

5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014
/**
 * qeth_vm_request_mac() - Request a hypervisor-managed MAC address
 * @card: pointer to a qeth_card
 *
 * Returns
 *	0, if a MAC address has been set for the card's netdevice
 *	a return code, for various error conditions
 */
int qeth_vm_request_mac(struct qeth_card *card)
{
	struct diag26c_mac_resp *response;
	struct diag26c_mac_req *request;
	int rc;

5015
	QETH_CARD_TEXT(card, 2, "vmreqmac");
5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026

	request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
	response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
	if (!request || !response) {
		rc = -ENOMEM;
		goto out;
	}

	request->resp_buf_len = sizeof(*response);
	request->resp_version = DIAG26C_VERSION2;
	request->op_code = DIAG26C_GET_MAC;
5027
	request->devno = card->info.ddev_devno;
5028

5029
	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
5030
	rc = diag26c(request, response, DIAG26C_MAC_SERVICES);
5031
	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
5032 5033
	if (rc)
		goto out;
5034
	QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
5035 5036 5037 5038

	if (request->resp_buf_len < sizeof(*response) ||
	    response->version != request->resp_version) {
		rc = -EIO;
5039 5040 5041
		QETH_CARD_TEXT(card, 2, "badresp");
		QETH_CARD_HEX(card, 2, &request->resp_buf_len,
			      sizeof(request->resp_buf_len));
5042 5043
	} else if (!is_valid_ether_addr(response->mac)) {
		rc = -EINVAL;
5044 5045
		QETH_CARD_TEXT(card, 2, "badmac");
		QETH_CARD_HEX(card, 2, response->mac, ETH_ALEN);
5046
	} else {
5047
		eth_hw_addr_set(card->dev, response->mac);
5048 5049 5050 5051 5052 5053 5054 5055 5056
	}

out:
	kfree(response);
	kfree(request);
	return rc;
}
EXPORT_SYMBOL_GPL(qeth_vm_request_mac);

5057 5058
static void qeth_determine_capabilities(struct qeth_card *card)
{
5059 5060
	struct qeth_channel *channel = &card->data;
	struct ccw_device *ddev = channel->ccwdev;
5061 5062 5063
	int rc;
	int ddev_offline = 0;

5064
	QETH_CARD_TEXT(card, 2, "detcapab");
5065 5066
	if (!ddev->online) {
		ddev_offline = 1;
5067
		rc = qeth_start_channel(channel);
5068
		if (rc) {
5069
			QETH_CARD_TEXT_(card, 2, "3err%d", rc);
5070 5071 5072 5073
			goto out;
		}
	}

5074
	rc = qeth_read_conf_data(card);
5075
	if (rc) {
5076 5077
		QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
				 CARD_DEVID(card), rc);
5078
		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
5079 5080 5081 5082 5083
		goto out_offline;
	}

	rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
	if (rc)
5084
		QETH_CARD_TEXT_(card, 2, "6err%d", rc);
5085

5086 5087 5088 5089 5090
	QETH_CARD_TEXT_(card, 2, "qfmt%d", card->ssqd.qfmt);
	QETH_CARD_TEXT_(card, 2, "ac1:%02x", card->ssqd.qdioac1);
	QETH_CARD_TEXT_(card, 2, "ac2:%04x", card->ssqd.qdioac2);
	QETH_CARD_TEXT_(card, 2, "ac3:%04x", card->ssqd.qdioac3);
	QETH_CARD_TEXT_(card, 2, "icnt%d", card->ssqd.icnt);
5091 5092 5093 5094 5095 5096 5097 5098 5099
	if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) ||
	    ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) ||
	    ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) {
		dev_info(&card->gdev->dev,
			"Completion Queueing supported\n");
	} else {
		card->options.cq = QETH_CQ_NOTAVAILABLE;
	}

5100 5101
out_offline:
	if (ddev_offline == 1)
5102
		qeth_stop_channel(channel);
5103 5104 5105 5106
out:
	return;
}

5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133
static void qeth_read_ccw_conf_data(struct qeth_card *card)
{
	struct qeth_card_info *info = &card->info;
	struct ccw_device *cdev = CARD_DDEV(card);
	struct ccw_dev_id dev_id;

	QETH_CARD_TEXT(card, 2, "ccwconfd");
	ccw_device_get_id(cdev, &dev_id);

	info->ddev_devno = dev_id.devno;
	info->ids_valid = !ccw_device_get_cssid(cdev, &info->cssid) &&
			  !ccw_device_get_iid(cdev, &info->iid) &&
			  !ccw_device_get_chid(cdev, 0, &info->chid);
	info->ssid = dev_id.ssid;

	dev_info(&card->gdev->dev, "CHID: %x CHPID: %x\n",
		 info->chid, info->chpid);

	QETH_CARD_TEXT_(card, 3, "devn%x", info->ddev_devno);
	QETH_CARD_TEXT_(card, 3, "cssid:%x", info->cssid);
	QETH_CARD_TEXT_(card, 3, "iid:%x", info->iid);
	QETH_CARD_TEXT_(card, 3, "ssid:%x", info->ssid);
	QETH_CARD_TEXT_(card, 3, "chpid:%x", info->chpid);
	QETH_CARD_TEXT_(card, 3, "chid:%x", info->chid);
	QETH_CARD_TEXT_(card, 3, "idval%x", info->ids_valid);
}

F
Frank Blaschka 已提交
5134 5135
static int qeth_qdio_establish(struct qeth_card *card)
{
5136 5137
	struct qdio_buffer **out_sbal_ptrs[QETH_MAX_OUT_QUEUES];
	struct qdio_buffer **in_sbal_ptrs[QETH_MAX_IN_QUEUES];
5138
	struct qeth_qib_parms *qib_parms = NULL;
F
Frank Blaschka 已提交
5139
	struct qdio_initialize init_data;
5140
	unsigned int no_input_qs = 1;
5141
	unsigned int i;
F
Frank Blaschka 已提交
5142 5143
	int rc = 0;

5144
	QETH_CARD_TEXT(card, 2, "qdioest");
F
Frank Blaschka 已提交
5145

5146 5147 5148 5149
	if (!IS_IQD(card) && !IS_VM_NIC(card)) {
		qib_parms = kzalloc(sizeof_field(struct qib, parm), GFP_KERNEL);
		if (!qib_parms)
			return -ENOMEM;
F
Frank Blaschka 已提交
5150

5151 5152
		qeth_fill_qib_parms(card, qib_parms);
	}
F
Frank Blaschka 已提交
5153

5154
	in_sbal_ptrs[0] = card->qdio.in_q->qdio_bufs;
5155
	if (card->options.cq == QETH_CQ_ENABLED) {
5156
		in_sbal_ptrs[1] = card->qdio.c_q->qdio_bufs;
5157 5158
		no_input_qs++;
	}
5159

5160 5161
	for (i = 0; i < card->qdio.no_out_queues; i++)
		out_sbal_ptrs[i] = card->qdio.out_qs[i]->qdio_bufs;
F
Frank Blaschka 已提交
5162 5163

	memset(&init_data, 0, sizeof(struct qdio_initialize));
5164 5165
	init_data.q_format		 = IS_IQD(card) ? QDIO_IQDIO_QFMT :
							  QDIO_QETH_QFMT;
F
Frank Blaschka 已提交
5166
	init_data.qib_param_field_format = 0;
5167
	init_data.qib_param_field	 = (void *)qib_parms;
5168
	init_data.no_input_qs		 = no_input_qs;
F
Frank Blaschka 已提交
5169
	init_data.no_output_qs           = card->qdio.no_out_queues;
5170 5171
	init_data.input_handler		 = qeth_qdio_input_handler;
	init_data.output_handler	 = qeth_qdio_output_handler;
5172
	init_data.irq_poll		 = qeth_qdio_poll;
F
Frank Blaschka 已提交
5173
	init_data.int_parm               = (unsigned long) card;
5174 5175
	init_data.input_sbal_addr_array  = in_sbal_ptrs;
	init_data.output_sbal_addr_array = out_sbal_ptrs;
F
Frank Blaschka 已提交
5176 5177 5178

	if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
		QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
5179 5180
		rc = qdio_allocate(CARD_DDEV(card), init_data.no_input_qs,
				   init_data.no_output_qs);
J
Jan Glauber 已提交
5181 5182 5183 5184
		if (rc) {
			atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
			goto out;
		}
5185
		rc = qdio_establish(CARD_DDEV(card), &init_data);
J
Jan Glauber 已提交
5186
		if (rc) {
F
Frank Blaschka 已提交
5187
			atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
J
Jan Glauber 已提交
5188 5189
			qdio_free(CARD_DDEV(card));
		}
F
Frank Blaschka 已提交
5190
	}
5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201

	switch (card->options.cq) {
	case QETH_CQ_ENABLED:
		dev_info(&card->gdev->dev, "Completion Queue support enabled");
		break;
	case QETH_CQ_DISABLED:
		dev_info(&card->gdev->dev, "Completion Queue support disabled");
		break;
	default:
		break;
	}
5202

J
Jan Glauber 已提交
5203
out:
5204
	kfree(qib_parms);
F
Frank Blaschka 已提交
5205 5206 5207 5208 5209
	return rc;
}

static void qeth_core_free_card(struct qeth_card *card)
{
5210
	QETH_CARD_TEXT(card, 2, "freecrd");
5211 5212 5213

	unregister_service_level(&card->qeth_service_level);
	debugfs_remove_recursive(card->debugfs);
5214
	qeth_put_cmd(card->read_cmd);
5215
	destroy_workqueue(card->event_wq);
5216
	dev_set_drvdata(&card->gdev->dev, NULL);
F
Frank Blaschka 已提交
5217 5218 5219
	kfree(card);
}

5220
static void qeth_trace_features(struct qeth_card *card)
5221 5222
{
	QETH_CARD_TEXT(card, 2, "features");
5223 5224 5225 5226 5227
	QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4));
	QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6));
	QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp));
	QETH_CARD_HEX(card, 2, &card->info.diagass_support,
		      sizeof(card->info.diagass_support));
5228 5229
}

F
Frank Blaschka 已提交
5230
static struct ccw_device_id qeth_ids[] = {
5231 5232 5233 5234 5235 5236
	{CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
					.driver_info = QETH_CARD_TYPE_OSD},
	{CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
					.driver_info = QETH_CARD_TYPE_IQD},
	{CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
					.driver_info = QETH_CARD_TYPE_OSM},
5237
#ifdef CONFIG_QETH_OSX
5238 5239
	{CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
					.driver_info = QETH_CARD_TYPE_OSX},
5240
#endif
F
Frank Blaschka 已提交
5241 5242 5243 5244 5245
	{},
};
MODULE_DEVICE_TABLE(ccw, qeth_ids);

static struct ccw_driver qeth_ccw_driver = {
5246
	.driver = {
S
Sebastian Ott 已提交
5247
		.owner = THIS_MODULE,
5248 5249
		.name = "qeth",
	},
F
Frank Blaschka 已提交
5250 5251 5252 5253 5254
	.ids = qeth_ids,
	.probe = ccwgroup_probe_ccwdev,
	.remove = ccwgroup_remove_ccwdev,
};

5255
static int qeth_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
F
Frank Blaschka 已提交
5256
{
5257
	int retries = 3;
F
Frank Blaschka 已提交
5258 5259
	int rc;

5260
	QETH_CARD_TEXT(card, 2, "hrdsetup");
F
Frank Blaschka 已提交
5261
	atomic_set(&card->force_alloc_skb, 0);
5262 5263 5264
	rc = qeth_update_from_chp_desc(card);
	if (rc)
		return rc;
F
Frank Blaschka 已提交
5265
retry:
5266
	if (retries < 3)
5267 5268
		QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
				 CARD_DEVID(card));
5269
	rc = qeth_qdio_clear_card(card, !IS_IQD(card));
5270 5271 5272
	qeth_stop_channel(&card->data);
	qeth_stop_channel(&card->write);
	qeth_stop_channel(&card->read);
5273
	qdio_free(CARD_DDEV(card));
5274 5275

	rc = qeth_start_channel(&card->read);
5276 5277
	if (rc)
		goto retriable;
5278
	rc = qeth_start_channel(&card->write);
5279 5280
	if (rc)
		goto retriable;
5281
	rc = qeth_start_channel(&card->data);
5282 5283 5284
	if (rc)
		goto retriable;
retriable:
F
Frank Blaschka 已提交
5285
	if (rc == -ERESTARTSYS) {
5286
		QETH_CARD_TEXT(card, 2, "break1");
F
Frank Blaschka 已提交
5287 5288
		return rc;
	} else if (rc) {
5289
		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
5290
		if (--retries < 0)
F
Frank Blaschka 已提交
5291 5292 5293 5294
			goto out;
		else
			goto retry;
	}
5295

5296
	qeth_determine_capabilities(card);
5297
	qeth_read_ccw_conf_data(card);
5298
	qeth_idx_init(card);
5299 5300 5301

	rc = qeth_idx_activate_read_channel(card);
	if (rc == -EINTR) {
5302
		QETH_CARD_TEXT(card, 2, "break2");
F
Frank Blaschka 已提交
5303 5304
		return rc;
	} else if (rc) {
5305
		QETH_CARD_TEXT_(card, 2, "3err%d", rc);
F
Frank Blaschka 已提交
5306 5307 5308 5309 5310
		if (--retries < 0)
			goto out;
		else
			goto retry;
	}
5311 5312 5313

	rc = qeth_idx_activate_write_channel(card);
	if (rc == -EINTR) {
5314
		QETH_CARD_TEXT(card, 2, "break3");
F
Frank Blaschka 已提交
5315 5316
		return rc;
	} else if (rc) {
5317
		QETH_CARD_TEXT_(card, 2, "4err%d", rc);
F
Frank Blaschka 已提交
5318 5319 5320 5321 5322
		if (--retries < 0)
			goto out;
		else
			goto retry;
	}
5323
	card->read_or_write_problem = 0;
F
Frank Blaschka 已提交
5324 5325
	rc = qeth_mpc_initialize(card);
	if (rc) {
5326
		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
F
Frank Blaschka 已提交
5327 5328
		goto out;
	}
5329

5330 5331
	rc = qeth_send_startlan(card);
	if (rc) {
5332
		QETH_CARD_TEXT_(card, 2, "6err%d", rc);
5333 5334
		if (rc == -ENETDOWN) {
			dev_warn(&card->gdev->dev, "The LAN is offline\n");
J
Julian Wiedmann 已提交
5335
			*carrier_ok = false;
5336 5337 5338
		} else {
			goto out;
		}
5339
	} else {
J
Julian Wiedmann 已提交
5340 5341 5342
		*carrier_ok = true;
	}

5343 5344 5345
	card->options.ipa4.supported = 0;
	card->options.ipa6.supported = 0;
	card->options.adp.supported = 0;
5346
	card->options.sbp.supported_funcs = 0;
5347
	card->info.diagass_support = 0;
5348 5349 5350
	rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
	if (rc == -ENOMEM)
		goto out;
5351 5352 5353 5354 5355
	if (qeth_is_supported(card, IPA_IPV6)) {
		rc = qeth_query_ipassists(card, QETH_PROT_IPV6);
		if (rc == -ENOMEM)
			goto out;
	}
5356 5357 5358
	if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
		rc = qeth_query_setadapterparms(card);
		if (rc < 0) {
5359
			QETH_CARD_TEXT_(card, 2, "7err%d", rc);
5360 5361 5362 5363 5364
			goto out;
		}
	}
	if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
		rc = qeth_query_setdiagass(card);
5365
		if (rc)
5366
			QETH_CARD_TEXT_(card, 2, "8err%d", rc);
5367
	}
5368

5369 5370
	qeth_trace_features(card);

5371 5372 5373 5374
	if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) ||
	    (card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM)))
		card->info.hwtrap = 0;

5375
	if (card->options.isolation != ISOLATION_MODE_NONE) {
5376 5377
		rc = qeth_setadpparms_set_access_ctrl(card,
						      card->options.isolation);
5378 5379 5380
		if (rc)
			goto out;
	}
5381

5382 5383
	qeth_init_link_info(card);

5384 5385 5386 5387 5388 5389
	rc = qeth_init_qdio_queues(card);
	if (rc) {
		QETH_CARD_TEXT_(card, 2, "9err%d", rc);
		goto out;
	}

F
Frank Blaschka 已提交
5390 5391
	return 0;
out:
5392 5393
	dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
		"an error on the device\n");
5394 5395
	QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n",
			 CARD_DEVID(card), rc);
F
Frank Blaschka 已提交
5396 5397 5398
	return rc;
}

5399 5400
static int qeth_set_online(struct qeth_card *card,
			   const struct qeth_discipline *disc)
5401
{
5402
	bool carrier_ok;
5403 5404 5405 5406 5407
	int rc;

	mutex_lock(&card->conf_mutex);
	QETH_CARD_TEXT(card, 2, "setonlin");

5408 5409 5410 5411 5412 5413 5414 5415 5416
	rc = qeth_hardsetup_card(card, &carrier_ok);
	if (rc) {
		QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
		rc = -ENODEV;
		goto err_hardsetup;
	}

	qeth_print_status_message(card);

5417
	if (card->dev->reg_state != NETREG_REGISTERED)
5418 5419 5420
		/* no need for locking / error handling at this early stage: */
		qeth_set_real_num_tx_queues(card, qeth_tx_actual_queues(card));

5421
	rc = disc->set_online(card, carrier_ok);
5422 5423 5424 5425 5426
	if (rc)
		goto err_online;

	/* let user_space know that device is online */
	kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
5427 5428

	mutex_unlock(&card->conf_mutex);
5429
	return 0;
5430

5431 5432
err_online:
err_hardsetup:
5433 5434 5435 5436
	qeth_qdio_clear_card(card, 0);
	qeth_clear_working_pool_list(card);
	qeth_flush_local_addrs(card);

5437 5438 5439 5440 5441 5442
	qeth_stop_channel(&card->data);
	qeth_stop_channel(&card->write);
	qeth_stop_channel(&card->read);
	qdio_free(CARD_DDEV(card));

	mutex_unlock(&card->conf_mutex);
5443 5444 5445
	return rc;
}

5446 5447
int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
		     bool resetting)
5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458
{
	int rc, rc2, rc3;

	mutex_lock(&card->conf_mutex);
	QETH_CARD_TEXT(card, 3, "setoffl");

	if ((!resetting && card->info.hwtrap) || card->info.hwtrap == 2) {
		qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
		card->info.hwtrap = 1;
	}

5459 5460 5461
	/* cancel any stalled cmd that might block the rtnl: */
	qeth_clear_ipacmd_list(card);

5462 5463 5464 5465 5466 5467 5468
	rtnl_lock();
	card->info.open_when_online = card->dev->flags & IFF_UP;
	dev_close(card->dev);
	netif_device_detach(card->dev);
	netif_carrier_off(card->dev);
	rtnl_unlock();

5469 5470
	cancel_work_sync(&card->rx_mode_work);

5471
	disc->set_offline(card);
5472

5473 5474 5475 5476 5477 5478
	qeth_qdio_clear_card(card, 0);
	qeth_drain_output_queues(card);
	qeth_clear_working_pool_list(card);
	qeth_flush_local_addrs(card);
	card->info.promisc_mode = 0;

5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497
	rc  = qeth_stop_channel(&card->data);
	rc2 = qeth_stop_channel(&card->write);
	rc3 = qeth_stop_channel(&card->read);
	if (!rc)
		rc = (rc2) ? rc2 : rc3;
	if (rc)
		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
	qdio_free(CARD_DDEV(card));

	/* let user_space know that device is offline */
	kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);

	mutex_unlock(&card->conf_mutex);
	return 0;
}
EXPORT_SYMBOL_GPL(qeth_set_offline);

static int qeth_do_reset(void *data)
{
5498
	const struct qeth_discipline *disc;
5499 5500 5501
	struct qeth_card *card = data;
	int rc;

5502 5503 5504
	/* Lock-free, other users will block until we are done. */
	disc = card->discipline;

5505 5506 5507 5508 5509 5510 5511
	QETH_CARD_TEXT(card, 2, "recover1");
	if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
		return 0;
	QETH_CARD_TEXT(card, 2, "recover2");
	dev_warn(&card->gdev->dev,
		 "A recovery process has been started for the device\n");

5512 5513
	qeth_set_offline(card, disc, true);
	rc = qeth_set_online(card, disc);
5514 5515 5516 5517
	if (!rc) {
		dev_info(&card->gdev->dev,
			 "Device successfully recovered!\n");
	} else {
5518 5519
		qeth_set_offline(card, disc, true);
		ccwgroup_set_offline(card->gdev, false);
5520 5521 5522 5523 5524 5525 5526 5527
		dev_warn(&card->gdev->dev,
			 "The qeth device driver failed to recover an error on the device\n");
	}
	qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
	qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
	return 0;
}

J
Julian Wiedmann 已提交
5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587
#if IS_ENABLED(CONFIG_QETH_L3)
static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
				struct qeth_hdr *hdr)
{
	struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data;
	struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
	struct net_device *dev = skb->dev;

	if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) {
		dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr,
				"FAKELL", skb->len);
		return;
	}

	if (!(l3_hdr->flags & QETH_HDR_PASSTHRU)) {
		u16 prot = (l3_hdr->flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
							     ETH_P_IP;
		unsigned char tg_addr[ETH_ALEN];

		skb_reset_network_header(skb);
		switch (l3_hdr->flags & QETH_HDR_CAST_MASK) {
		case QETH_CAST_MULTICAST:
			if (prot == ETH_P_IP)
				ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr);
			else
				ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr);
			QETH_CARD_STAT_INC(card, rx_multicast);
			break;
		case QETH_CAST_BROADCAST:
			ether_addr_copy(tg_addr, dev->broadcast);
			QETH_CARD_STAT_INC(card, rx_multicast);
			break;
		default:
			if (card->options.sniffer)
				skb->pkt_type = PACKET_OTHERHOST;
			ether_addr_copy(tg_addr, dev->dev_addr);
		}

		if (l3_hdr->ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
			dev_hard_header(skb, dev, prot, tg_addr,
					&l3_hdr->next_hop.rx.src_mac, skb->len);
		else
			dev_hard_header(skb, dev, prot, tg_addr, "FAKELL",
					skb->len);
	}

	/* copy VLAN tag from hdr into skb */
	if (!card->options.sniffer &&
	    (l3_hdr->ext_flags & (QETH_HDR_EXT_VLAN_FRAME |
				  QETH_HDR_EXT_INCLUDE_VLAN_TAG))) {
		u16 tag = (l3_hdr->ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
				l3_hdr->vlan_id :
				l3_hdr->next_hop.rx.vlan_id;

		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
	}
}
#endif

static void qeth_receive_skb(struct qeth_card *card, struct sk_buff *skb,
5588
			     struct qeth_hdr *hdr, bool uses_frags)
J
Julian Wiedmann 已提交
5589
{
5590
	struct napi_struct *napi = &card->napi;
J
Julian Wiedmann 已提交
5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604
	bool is_cso;

	switch (hdr->hdr.l2.id) {
#if IS_ENABLED(CONFIG_QETH_L3)
	case QETH_HEADER_TYPE_LAYER3:
		qeth_l3_rebuild_skb(card, skb, hdr);
		is_cso = hdr->hdr.l3.ext_flags & QETH_HDR_EXT_CSUM_TRANSP_REQ;
		break;
#endif
	case QETH_HEADER_TYPE_LAYER2:
		is_cso = hdr->hdr.l2.flags[1] & QETH_HDR_EXT_CSUM_TRANSP_REQ;
		break;
	default:
		/* never happens */
5605 5606 5607
		if (uses_frags)
			napi_free_frags(napi);
		else
5608
			kfree_skb(skb);
J
Julian Wiedmann 已提交
5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626
		return;
	}

	if (is_cso && (card->dev->features & NETIF_F_RXCSUM)) {
		skb->ip_summed = CHECKSUM_UNNECESSARY;
		QETH_CARD_STAT_INC(card, rx_skb_csum);
	} else {
		skb->ip_summed = CHECKSUM_NONE;
	}

	QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
	QETH_CARD_STAT_INC(card, rx_packets);
	if (skb_is_nonlinear(skb)) {
		QETH_CARD_STAT_INC(card, rx_sg_skbs);
		QETH_CARD_STAT_ADD(card, rx_sg_frags,
				   skb_shinfo(skb)->nr_frags);
	}

5627 5628 5629 5630 5631 5632
	if (uses_frags) {
		napi_gro_frags(napi);
	} else {
		skb->protocol = eth_type_trans(skb, skb->dev);
		napi_gro_receive(napi, skb);
	}
J
Julian Wiedmann 已提交
5633 5634
}

5635
static void qeth_create_skb_frag(struct sk_buff *skb, char *data, int data_len)
F
Frank Blaschka 已提交
5636
{
5637
	struct page *page = virt_to_page(data);
5638
	unsigned int next_frag;
5639

5640
	next_frag = skb_shinfo(skb)->nr_frags;
5641
	get_page(page);
5642 5643
	skb_add_rx_frag(skb, next_frag, page, offset_in_page(data), data_len,
			data_len);
F
Frank Blaschka 已提交
5644 5645
}

5646 5647 5648 5649 5650
static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
{
	return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
}

J
Julian Wiedmann 已提交
5651
static int qeth_extract_skb(struct qeth_card *card,
5652
			    struct qeth_qdio_buffer *qethbuffer, u8 *element_no,
J
Julian Wiedmann 已提交
5653
			    int *__offset)
F
Frank Blaschka 已提交
5654
{
5655
	struct qeth_priv *priv = netdev_priv(card->dev);
5656
	struct qdio_buffer *buffer = qethbuffer->buffer;
5657
	struct napi_struct *napi = &card->napi;
5658
	struct qdio_buffer_element *element;
5659
	unsigned int linear_len = 0;
5660
	bool uses_frags = false;
F
Frank Blaschka 已提交
5661
	int offset = *__offset;
5662
	bool use_rx_sg = false;
5663
	unsigned int headroom;
J
Julian Wiedmann 已提交
5664
	struct qeth_hdr *hdr;
5665
	struct sk_buff *skb;
5666
	int skb_len = 0;
F
Frank Blaschka 已提交
5667

5668 5669
	element = &buffer->element[*element_no];

5670
next_packet:
F
Frank Blaschka 已提交
5671
	/* qeth_hdr must not cross element boundaries */
5672
	while (element->length < offset + sizeof(struct qeth_hdr)) {
F
Frank Blaschka 已提交
5673
		if (qeth_is_last_sbale(element))
J
Julian Wiedmann 已提交
5674
			return -ENODATA;
F
Frank Blaschka 已提交
5675 5676 5677 5678
		element++;
		offset = 0;
	}

5679
	hdr = phys_to_virt(element->addr) + offset;
J
Julian Wiedmann 已提交
5680
	offset += sizeof(*hdr);
5681 5682
	skb = NULL;

J
Julian Wiedmann 已提交
5683
	switch (hdr->hdr.l2.id) {
5684
	case QETH_HEADER_TYPE_LAYER2:
J
Julian Wiedmann 已提交
5685
		skb_len = hdr->hdr.l2.pkt_length;
5686
		linear_len = ETH_HLEN;
5687
		headroom = 0;
5688 5689
		break;
	case QETH_HEADER_TYPE_LAYER3:
J
Julian Wiedmann 已提交
5690
		skb_len = hdr->hdr.l3.length;
5691 5692 5693 5694 5695
		if (!IS_LAYER3(card)) {
			QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
			goto walk_packet;
		}

J
Julian Wiedmann 已提交
5696
		if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) {
5697 5698 5699 5700 5701
			linear_len = ETH_HLEN;
			headroom = 0;
			break;
		}

J
Julian Wiedmann 已提交
5702
		if (hdr->hdr.l3.flags & QETH_HDR_IPV6)
5703 5704 5705
			linear_len = sizeof(struct ipv6hdr);
		else
			linear_len = sizeof(struct iphdr);
5706
		headroom = ETH_HLEN;
5707 5708
		break;
	default:
J
Julian Wiedmann 已提交
5709
		if (hdr->hdr.l2.id & QETH_HEADER_MASK_INVAL)
5710 5711 5712 5713
			QETH_CARD_STAT_INC(card, rx_frame_errors);
		else
			QETH_CARD_STAT_INC(card, rx_dropped_notsupp);

5714
		/* Can't determine packet length, drop the whole buffer. */
J
Julian Wiedmann 已提交
5715
		return -EPROTONOSUPPORT;
F
Frank Blaschka 已提交
5716 5717
	}

5718 5719 5720 5721
	if (skb_len < linear_len) {
		QETH_CARD_STAT_INC(card, rx_dropped_runt);
		goto walk_packet;
	}
F
Frank Blaschka 已提交
5722

5723
	use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) ||
5724
		    (skb_len > READ_ONCE(priv->rx_copybreak) &&
J
Julian Wiedmann 已提交
5725
		     !atomic_read(&card->force_alloc_skb));
5726

5727
	if (use_rx_sg) {
5728
		/* QETH_CQ_ENABLED only: */
5729 5730
		if (qethbuffer->rx_skb &&
		    skb_tailroom(qethbuffer->rx_skb) >= linear_len + headroom) {
5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751
			skb = qethbuffer->rx_skb;
			qethbuffer->rx_skb = NULL;
			goto use_skb;
		}

		skb = napi_get_frags(napi);
		if (!skb) {
			/* -ENOMEM, no point in falling back further. */
			QETH_CARD_STAT_INC(card, rx_dropped_nomem);
			goto walk_packet;
		}

		if (skb_tailroom(skb) >= linear_len + headroom) {
			uses_frags = true;
			goto use_skb;
		}

		netdev_info_once(card->dev,
				 "Insufficient linear space in NAPI frags skb, need %u but have %u\n",
				 linear_len + headroom, skb_tailroom(skb));
		/* Shouldn't happen. Don't optimize, fall back to linear skb. */
F
Frank Blaschka 已提交
5752
	}
5753

5754 5755 5756
	linear_len = skb_len;
	skb = napi_alloc_skb(napi, linear_len + headroom);
	if (!skb) {
5757
		QETH_CARD_STAT_INC(card, rx_dropped_nomem);
5758 5759
		goto walk_packet;
	}
F
Frank Blaschka 已提交
5760

5761 5762 5763
use_skb:
	if (headroom)
		skb_reserve(skb, headroom);
5764
walk_packet:
F
Frank Blaschka 已提交
5765
	while (skb_len) {
5766
		int data_len = min(skb_len, (int)(element->length - offset));
5767
		char *data = phys_to_virt(element->addr) + offset;
5768 5769 5770

		skb_len -= data_len;
		offset += data_len;
5771

5772
		/* Extract data from current element: */
5773
		if (skb && data_len) {
5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787
			if (linear_len) {
				unsigned int copy_len;

				copy_len = min_t(unsigned int, linear_len,
						 data_len);

				skb_put_data(skb, data, copy_len);
				linear_len -= copy_len;
				data_len -= copy_len;
				data += copy_len;
			}

			if (data_len)
				qeth_create_skb_frag(skb, data, data_len);
F
Frank Blaschka 已提交
5788
		}
5789 5790

		/* Step forward to next element: */
F
Frank Blaschka 已提交
5791 5792
		if (skb_len) {
			if (qeth_is_last_sbale(element)) {
C
Carsten Otte 已提交
5793
				QETH_CARD_TEXT(card, 4, "unexeob");
C
Carsten Otte 已提交
5794
				QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
5795
				if (skb) {
5796 5797 5798
					if (uses_frags)
						napi_free_frags(napi);
					else
5799
						kfree_skb(skb);
5800 5801 5802
					QETH_CARD_STAT_INC(card,
							   rx_length_errors);
				}
J
Julian Wiedmann 已提交
5803
				return -EMSGSIZE;
F
Frank Blaschka 已提交
5804 5805 5806 5807 5808
			}
			element++;
			offset = 0;
		}
	}
5809 5810 5811 5812 5813

	/* This packet was skipped, go get another one: */
	if (!skb)
		goto next_packet;

5814
	*element_no = element - &buffer->element[0];
F
Frank Blaschka 已提交
5815
	*__offset = offset;
J
Julian Wiedmann 已提交
5816

5817
	qeth_receive_skb(card, skb, hdr, uses_frags);
J
Julian Wiedmann 已提交
5818 5819 5820
	return 0;
}

5821 5822
static unsigned int qeth_extract_skbs(struct qeth_card *card, int budget,
				      struct qeth_qdio_buffer *buf, bool *done)
J
Julian Wiedmann 已提交
5823
{
5824
	unsigned int work_done = 0;
J
Julian Wiedmann 已提交
5825 5826

	while (budget) {
5827
		if (qeth_extract_skb(card, buf, &card->rx.buf_element,
J
Julian Wiedmann 已提交
5828 5829 5830 5831 5832 5833 5834
				     &card->rx.e_offset)) {
			*done = true;
			break;
		}

		work_done++;
		budget--;
F
Frank Blaschka 已提交
5835
	}
J
Julian Wiedmann 已提交
5836 5837

	return work_done;
F
Frank Blaschka 已提交
5838 5839
}

5840
static unsigned int qeth_rx_poll(struct qeth_card *card, int budget)
5841
{
5842
	struct qeth_rx *ctx = &card->rx;
5843
	unsigned int work_done = 0;
5844

5845
	while (budget > 0) {
5846 5847 5848 5849
		struct qeth_qdio_buffer *buffer;
		unsigned int skbs_done = 0;
		bool done = false;

5850
		/* Fetch completed RX buffers: */
5851 5852
		if (!card->rx.b_count) {
			card->rx.qdio_err = 0;
5853 5854 5855 5856
			card->rx.b_count = qdio_inspect_queue(CARD_DDEV(card),
							      0, true,
							      &card->rx.b_index,
							      &card->rx.qdio_err);
5857 5858 5859 5860 5861 5862
			if (card->rx.b_count <= 0) {
				card->rx.b_count = 0;
				break;
			}
		}

5863
		/* Process one completed RX buffer: */
5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878
		buffer = &card->qdio.in_q->bufs[card->rx.b_index];
		if (!(card->rx.qdio_err &&
		      qeth_check_qdio_errors(card, buffer->buffer,
					     card->rx.qdio_err, "qinerr")))
			skbs_done = qeth_extract_skbs(card, budget, buffer,
						      &done);
		else
			done = true;

		work_done += skbs_done;
		budget -= skbs_done;

		if (done) {
			QETH_CARD_STAT_INC(card, rx_bufs);
			qeth_put_buffer_pool_entry(card, buffer->pool_entry);
5879
			buffer->pool_entry = NULL;
5880
			card->rx.b_count--;
5881 5882 5883
			ctx->bufs_refill++;
			ctx->bufs_refill -= qeth_rx_refill_queue(card,
								 ctx->bufs_refill);
5884 5885 5886 5887 5888

			/* Step forward to next buffer: */
			card->rx.b_index = QDIO_BUFNR(card->rx.b_index + 1);
			card->rx.buf_element = 0;
			card->rx.e_offset = 0;
5889 5890 5891
		}
	}

5892 5893 5894
	return work_done;
}

5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912
static void qeth_cq_poll(struct qeth_card *card)
{
	unsigned int work_done = 0;

	while (work_done < QDIO_MAX_BUFFERS_PER_Q) {
		unsigned int start, error;
		int completed;

		completed = qdio_inspect_queue(CARD_DDEV(card), 1, true, &start,
					       &error);
		if (completed <= 0)
			return;

		qeth_qdio_cq_handler(card, error, 1, start, completed);
		work_done += completed;
	}
}

5913 5914 5915 5916 5917 5918 5919
int qeth_poll(struct napi_struct *napi, int budget)
{
	struct qeth_card *card = container_of(napi, struct qeth_card, napi);
	unsigned int work_done;

	work_done = qeth_rx_poll(card, budget);

5920 5921 5922 5923 5924 5925 5926 5927 5928 5929
	if (qeth_use_tx_irqs(card)) {
		struct qeth_qdio_out_q *queue;
		unsigned int i;

		qeth_for_each_output_queue(card, queue, i) {
			if (!qeth_out_queue_is_empty(queue))
				napi_schedule(&queue->napi);
		}
	}

5930 5931 5932
	if (card->options.cq == QETH_CQ_ENABLED)
		qeth_cq_poll(card);

5933 5934 5935 5936 5937 5938 5939 5940 5941 5942
	if (budget) {
		struct qeth_rx *ctx = &card->rx;

		/* Process any substantial refill backlog: */
		ctx->bufs_refill -= qeth_rx_refill_queue(card, ctx->bufs_refill);

		/* Exhausted the RX budget. Keep IRQ disabled, we get called again. */
		if (work_done >= budget)
			return work_done;
	}
5943

5944
	if (napi_complete_done(napi, work_done) &&
5945
	    qdio_start_irq(CARD_DDEV(card)))
5946
		napi_schedule(napi);
5947

5948 5949 5950 5951
	return work_done;
}
EXPORT_SYMBOL_GPL(qeth_poll);

5952
static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
5953 5954
				 unsigned int bidx, unsigned int qdio_error,
				 int budget)
5955 5956 5957 5958
{
	struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx];
	u8 sflags = buffer->buffer->element[15].sflags;
	struct qeth_card *card = queue->card;
5959
	bool error = !!qdio_error;
5960

5961
	if (qdio_error == QDIO_ERROR_SLSB_PENDING) {
5962
		struct qaob *aob = buffer->aob;
5963
		struct qeth_qaob_priv1 *priv;
5964
		enum iucv_tx_notify notify;
5965 5966 5967 5968 5969 5970 5971 5972

		if (!aob) {
			netdev_WARN_ONCE(card->dev,
					 "Pending TX buffer %#x without QAOB on TX queue %u\n",
					 bidx, queue->queue_no);
			qeth_schedule_recovery(card);
			return;
		}
5973

5974 5975
		QETH_CARD_TEXT_(card, 5, "pel%u", bidx);

5976 5977 5978
		priv = (struct qeth_qaob_priv1 *)&aob->user1;
		/* QAOB hasn't completed yet: */
		if (xchg(&priv->state, QETH_QAOB_PENDING) != QETH_QAOB_DONE) {
5979 5980
			qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING);

5981 5982 5983 5984 5985
			/* Prepare the queue slot for immediate re-use: */
			qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements);
			if (qeth_alloc_out_buf(queue, bidx, GFP_ATOMIC)) {
				QETH_CARD_TEXT(card, 2, "outofbuf");
				qeth_schedule_recovery(card);
5986
			}
5987

5988 5989 5990
			list_add(&buffer->list_entry, &queue->pending_bufs);
			/* Skip clearing the buffer: */
			return;
5991
		}
5992

5993 5994 5995 5996
		/* QAOB already completed: */
		notify = qeth_compute_cq_notification(aob->aorc, 0);
		qeth_notify_skbs(queue, buffer, notify);
		error = !!aob->aorc;
5997
		memset(aob, 0, sizeof(*aob));
5998
	} else if (card->options.cq == QETH_CQ_ENABLED) {
5999 6000
		qeth_notify_skbs(queue, buffer,
				 qeth_compute_cq_notification(sflags, 0));
6001 6002
	}

6003
	qeth_clear_output_buffer(queue, buffer, error, budget);
6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014
}

static int qeth_tx_poll(struct napi_struct *napi, int budget)
{
	struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi);
	unsigned int queue_no = queue->queue_no;
	struct qeth_card *card = queue->card;
	struct net_device *dev = card->dev;
	unsigned int work_done = 0;
	struct netdev_queue *txq;

6015 6016 6017 6018
	if (IS_IQD(card))
		txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no));
	else
		txq = netdev_get_tx_queue(dev, queue_no);
6019 6020 6021

	while (1) {
		unsigned int start, error, i;
6022 6023
		unsigned int packets = 0;
		unsigned int bytes = 0;
6024 6025
		int completed;

6026
		qeth_tx_complete_pending_bufs(card, queue, false, budget);
6027

6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044
		if (qeth_out_queue_is_empty(queue)) {
			napi_complete(napi);
			return 0;
		}

		/* Give the CPU a breather: */
		if (work_done >= QDIO_MAX_BUFFERS_PER_Q) {
			QETH_TXQ_STAT_INC(queue, completion_yield);
			if (napi_complete_done(napi, 0))
				napi_schedule(napi);
			return 0;
		}

		completed = qdio_inspect_queue(CARD_DDEV(card), queue_no, false,
					       &start, &error);
		if (completed <= 0) {
			/* Ensure we see TX completion for pending work: */
6045 6046 6047
			if (napi_complete_done(napi, 0) &&
			    !atomic_read(&queue->set_pci_flags_count))
				qeth_tx_arm_timer(queue, queue->rescan_usecs);
6048 6049 6050 6051
			return 0;
		}

		for (i = start; i < start + completed; i++) {
6052
			struct qeth_qdio_out_buffer *buffer;
6053 6054
			unsigned int bidx = QDIO_BUFNR(i);

6055
			buffer = queue->bufs[bidx];
6056
			packets += buffer->frames;
6057 6058 6059
			bytes += buffer->bytes;

			qeth_handle_send_error(card, buffer, error);
6060 6061 6062 6063 6064
			if (IS_IQD(card))
				qeth_iqd_tx_complete(queue, bidx, error, budget);
			else
				qeth_clear_output_buffer(queue, buffer, error,
							 budget);
6065 6066 6067 6068
		}

		atomic_sub(completed, &queue->used_buffers);
		work_done += completed;
6069 6070 6071 6072
		if (IS_IQD(card))
			netdev_tx_completed_queue(txq, packets, bytes);
		else
			qeth_check_outbound_queue(queue);
6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084

		/* xmit may have observed the full-condition, but not yet
		 * stopped the txq. In which case the code below won't trigger.
		 * So before returning, xmit will re-check the txq's fill level
		 * and wake it up if needed.
		 */
		if (netif_tx_queue_stopped(txq) &&
		    !qeth_out_queue_is_full(queue))
			netif_tx_wake_queue(txq);
	}
}

6085 6086 6087 6088 6089 6090 6091
static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
{
	if (!cmd->hdr.return_code)
		cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
	return cmd->hdr.return_code;
}

6092 6093 6094 6095 6096 6097 6098 6099
static int qeth_setassparms_get_caps_cb(struct qeth_card *card,
					struct qeth_reply *reply,
					unsigned long data)
{
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
	struct qeth_ipa_caps *caps = reply->param;

	if (qeth_setassparms_inspect_rc(cmd))
6100
		return -EIO;
6101 6102 6103 6104 6105 6106

	caps->supported = cmd->data.setassparms.data.caps.supported;
	caps->enabled = cmd->data.setassparms.data.caps.enabled;
	return 0;
}

6107 6108
int qeth_setassparms_cb(struct qeth_card *card,
			struct qeth_reply *reply, unsigned long data)
6109
{
6110
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6111 6112 6113

	QETH_CARD_TEXT(card, 4, "defadpcb");

6114 6115 6116 6117 6118
	if (cmd->hdr.return_code)
		return -EIO;

	cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
	if (cmd->hdr.prot_version == QETH_PROT_IPV4)
6119
		card->options.ipa4.enabled = cmd->hdr.assists.enabled;
6120
	if (cmd->hdr.prot_version == QETH_PROT_IPV6)
6121
		card->options.ipa6.enabled = cmd->hdr.assists.enabled;
6122 6123
	return 0;
}
6124
EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
6125

6126 6127
struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
						 enum qeth_ipa_funcs ipa_func,
6128 6129
						 u16 cmd_code,
						 unsigned int data_length,
6130
						 enum qeth_prot_versions prot)
6131
{
6132 6133
	struct qeth_ipacmd_setassparms *setassparms;
	struct qeth_ipacmd_setassparms_hdr *hdr;
6134 6135 6136
	struct qeth_cmd_buffer *iob;

	QETH_CARD_TEXT(card, 4, "getasscm");
6137 6138 6139 6140 6141 6142
	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETASSPARMS, prot,
				 data_length +
				 offsetof(struct qeth_ipacmd_setassparms,
					  data));
	if (!iob)
		return NULL;
6143

6144 6145
	setassparms = &__ipa_cmd(iob)->data.setassparms;
	setassparms->assist_no = ipa_func;
6146

6147 6148 6149
	hdr = &setassparms->hdr;
	hdr->length = sizeof(*hdr) + data_length;
	hdr->command_code = cmd_code;
6150 6151
	return iob;
}
6152
EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
6153

6154 6155
int qeth_send_simple_setassparms_prot(struct qeth_card *card,
				      enum qeth_ipa_funcs ipa_func,
6156
				      u16 cmd_code, u32 *data,
6157
				      enum qeth_prot_versions prot)
6158
{
6159
	unsigned int length = data ? SETASS_DATA_SIZEOF(flags_32bit) : 0;
6160 6161
	struct qeth_cmd_buffer *iob;

6162 6163
	QETH_CARD_TEXT_(card, 4, "simassp%i", prot);
	iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot);
6164 6165
	if (!iob)
		return -ENOMEM;
6166

6167 6168
	if (data)
		__ipa_cmd(iob)->data.setassparms.data.flags_32bit = *data;
6169
	return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
6170
}
6171
EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot);
6172

F
Frank Blaschka 已提交
6173 6174
static void qeth_unregister_dbf_views(void)
{
6175
	int x;
6176

6177 6178 6179 6180
	for (x = 0; x < QETH_DBF_INFOS; x++) {
		debug_unregister(qeth_dbf[x].id);
		qeth_dbf[x].id = NULL;
	}
F
Frank Blaschka 已提交
6181 6182
}

C
Carsten Otte 已提交
6183
void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
P
Peter Tiedemann 已提交
6184 6185
{
	char dbf_txt_buf[32];
6186
	va_list args;
P
Peter Tiedemann 已提交
6187

6188
	if (!debug_level_enabled(id, level))
P
Peter Tiedemann 已提交
6189
		return;
6190 6191 6192
	va_start(args, fmt);
	vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
	va_end(args);
C
Carsten Otte 已提交
6193
	debug_text_event(id, level, dbf_txt_buf);
P
Peter Tiedemann 已提交
6194 6195 6196
}
EXPORT_SYMBOL_GPL(qeth_dbf_longtext);

F
Frank Blaschka 已提交
6197 6198
static int qeth_register_dbf_views(void)
{
6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211
	int ret;
	int x;

	for (x = 0; x < QETH_DBF_INFOS; x++) {
		/* register the areas */
		qeth_dbf[x].id = debug_register(qeth_dbf[x].name,
						qeth_dbf[x].pages,
						qeth_dbf[x].areas,
						qeth_dbf[x].len);
		if (qeth_dbf[x].id == NULL) {
			qeth_unregister_dbf_views();
			return -ENOMEM;
		}
F
Frank Blaschka 已提交
6212

6213 6214 6215 6216 6217 6218
		/* register a view */
		ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view);
		if (ret) {
			qeth_unregister_dbf_views();
			return ret;
		}
F
Frank Blaschka 已提交
6219

6220 6221 6222
		/* set a passing level */
		debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level);
	}
F
Frank Blaschka 已提交
6223 6224 6225 6226

	return 0;
}

6227 6228
static DEFINE_MUTEX(qeth_mod_mutex);	/* for synchronized module loading */

6229 6230
int qeth_setup_discipline(struct qeth_card *card,
			  enum qeth_discipline_id discipline)
F
Frank Blaschka 已提交
6231
{
6232 6233
	int rc;

6234
	mutex_lock(&qeth_mod_mutex);
F
Frank Blaschka 已提交
6235 6236
	switch (discipline) {
	case QETH_DISCIPLINE_LAYER3:
6237 6238
		card->discipline = try_then_request_module(
			symbol_get(qeth_l3_discipline), "qeth_l3");
F
Frank Blaschka 已提交
6239 6240
		break;
	case QETH_DISCIPLINE_LAYER2:
6241 6242
		card->discipline = try_then_request_module(
			symbol_get(qeth_l2_discipline), "qeth_l2");
F
Frank Blaschka 已提交
6243
		break;
6244 6245
	default:
		break;
F
Frank Blaschka 已提交
6246
	}
6247
	mutex_unlock(&qeth_mod_mutex);
6248

6249
	if (!card->discipline) {
6250 6251
		dev_err(&card->gdev->dev, "There is no kernel module to "
			"support discipline %d\n", discipline);
6252
		return -EINVAL;
F
Frank Blaschka 已提交
6253
	}
6254

6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265
	rc = card->discipline->setup(card->gdev);
	if (rc) {
		if (discipline == QETH_DISCIPLINE_LAYER2)
			symbol_put(qeth_l2_discipline);
		else
			symbol_put(qeth_l3_discipline);
		card->discipline = NULL;

		return rc;
	}

6266
	card->options.layer = discipline;
6267
	return 0;
F
Frank Blaschka 已提交
6268 6269
}

6270
void qeth_remove_discipline(struct qeth_card *card)
F
Frank Blaschka 已提交
6271
{
6272 6273
	card->discipline->remove(card->gdev);

6274
	if (IS_LAYER2(card))
6275
		symbol_put(qeth_l2_discipline);
F
Frank Blaschka 已提交
6276
	else
6277
		symbol_put(qeth_l3_discipline);
6278
	card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
6279
	card->discipline = NULL;
F
Frank Blaschka 已提交
6280 6281
}

6282
static const struct device_type qeth_generic_devtype = {
6283 6284 6285
	.name = "qeth_generic",
};

6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353
#define DBF_NAME_LEN	20

struct qeth_dbf_entry {
	char dbf_name[DBF_NAME_LEN];
	debug_info_t *dbf_info;
	struct list_head dbf_list;
};

static LIST_HEAD(qeth_dbf_list);
static DEFINE_MUTEX(qeth_dbf_list_mutex);

static debug_info_t *qeth_get_dbf_entry(char *name)
{
	struct qeth_dbf_entry *entry;
	debug_info_t *rc = NULL;

	mutex_lock(&qeth_dbf_list_mutex);
	list_for_each_entry(entry, &qeth_dbf_list, dbf_list) {
		if (strcmp(entry->dbf_name, name) == 0) {
			rc = entry->dbf_info;
			break;
		}
	}
	mutex_unlock(&qeth_dbf_list_mutex);
	return rc;
}

static int qeth_add_dbf_entry(struct qeth_card *card, char *name)
{
	struct qeth_dbf_entry *new_entry;

	card->debug = debug_register(name, 2, 1, 8);
	if (!card->debug) {
		QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf");
		goto err;
	}
	if (debug_register_view(card->debug, &debug_hex_ascii_view))
		goto err_dbg;
	new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL);
	if (!new_entry)
		goto err_dbg;
	strncpy(new_entry->dbf_name, name, DBF_NAME_LEN);
	new_entry->dbf_info = card->debug;
	mutex_lock(&qeth_dbf_list_mutex);
	list_add(&new_entry->dbf_list, &qeth_dbf_list);
	mutex_unlock(&qeth_dbf_list_mutex);

	return 0;

err_dbg:
	debug_unregister(card->debug);
err:
	return -ENOMEM;
}

static void qeth_clear_dbf_list(void)
{
	struct qeth_dbf_entry *entry, *tmp;

	mutex_lock(&qeth_dbf_list_mutex);
	list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) {
		list_del(&entry->dbf_list);
		debug_unregister(entry->dbf_info);
		kfree(entry);
	}
	mutex_unlock(&qeth_dbf_list_mutex);
}

6354 6355 6356
static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
{
	struct net_device *dev;
6357
	struct qeth_priv *priv;
6358 6359 6360

	switch (card->info.type) {
	case QETH_CARD_TYPE_IQD:
6361
		dev = alloc_netdev_mqs(sizeof(*priv), "hsi%d", NET_NAME_UNKNOWN,
6362
				       ether_setup, QETH_MAX_OUT_QUEUES, 1);
6363
		break;
6364
	case QETH_CARD_TYPE_OSM:
6365
		dev = alloc_etherdev(sizeof(*priv));
6366
		break;
6367
	default:
6368
		dev = alloc_etherdev_mqs(sizeof(*priv), QETH_MAX_OUT_QUEUES, 1);
6369 6370 6371 6372 6373
	}

	if (!dev)
		return NULL;

6374 6375
	priv = netdev_priv(dev);
	priv->rx_copybreak = QETH_RX_COPYBREAK;
6376
	priv->tx_wanted_queues = IS_IQD(card) ? QETH_IQD_MIN_TXQ : 1;
6377

6378 6379
	dev->ml_priv = card;
	dev->watchdog_timeo = QETH_TX_TIMEOUT;
J
Julian Wiedmann 已提交
6380
	dev->min_mtu = 576;
6381 6382 6383
	 /* initialized when device first goes online: */
	dev->max_mtu = 0;
	dev->mtu = 0;
6384 6385
	SET_NETDEV_DEV(dev, &card->gdev->dev);
	netif_carrier_off(dev);
6386

J
Julian Wiedmann 已提交
6387 6388 6389 6390 6391 6392
	dev->ethtool_ops = &qeth_ethtool_ops;
	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
	dev->hw_features |= NETIF_F_SG;
	dev->vlan_features |= NETIF_F_SG;
	if (IS_IQD(card))
		dev->features |= NETIF_F_SG;
6393

6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407
	return dev;
}

struct net_device *qeth_clone_netdev(struct net_device *orig)
{
	struct net_device *clone = qeth_alloc_netdev(orig->ml_priv);

	if (!clone)
		return NULL;

	clone->dev_port = orig->dev_port;
	return clone;
}

F
Frank Blaschka 已提交
6408 6409 6410 6411 6412
static int qeth_core_probe_device(struct ccwgroup_device *gdev)
{
	struct qeth_card *card;
	struct device *dev;
	int rc;
6413
	enum qeth_discipline_id enforced_disc;
6414
	char dbf_name[DBF_NAME_LEN];
F
Frank Blaschka 已提交
6415

6416
	QETH_DBF_TEXT(SETUP, 2, "probedev");
F
Frank Blaschka 已提交
6417 6418 6419 6420 6421

	dev = &gdev->dev;
	if (!get_device(dev))
		return -ENODEV;

6422
	QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev));
F
Frank Blaschka 已提交
6423

6424
	card = qeth_alloc_card(gdev);
F
Frank Blaschka 已提交
6425
	if (!card) {
6426
		QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM);
F
Frank Blaschka 已提交
6427 6428 6429
		rc = -ENOMEM;
		goto err_dev;
	}
6430 6431 6432

	snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s",
		dev_name(&gdev->dev));
6433
	card->debug = qeth_get_dbf_entry(dbf_name);
6434
	if (!card->debug) {
6435 6436 6437
		rc = qeth_add_dbf_entry(card, dbf_name);
		if (rc)
			goto err_card;
6438 6439
	}

6440
	qeth_setup_card(card);
6441
	card->dev = qeth_alloc_netdev(card);
6442 6443
	if (!card->dev) {
		rc = -ENOMEM;
6444
		goto err_card;
6445
	}
6446

6447 6448 6449
	qeth_determine_capabilities(card);
	qeth_set_blkt_defaults(card);

6450 6451 6452 6453
	card->qdio.no_out_queues = card->dev->num_tx_queues;
	rc = qeth_update_from_chp_desc(card);
	if (rc)
		goto err_chp_desc;
6454

J
Julian Wiedmann 已提交
6455
	gdev->dev.groups = qeth_dev_groups;
6456

6457 6458 6459 6460 6461 6462 6463
	enforced_disc = qeth_enforce_discipline(card);
	switch (enforced_disc) {
	case QETH_DISCIPLINE_UNDETERMINED:
		gdev->dev.type = &qeth_generic_devtype;
		break;
	default:
		card->info.layer_enforced = true;
6464
		/* It's so early that we don't need the discipline_mutex yet. */
6465
		rc = qeth_setup_discipline(card, enforced_disc);
6466
		if (rc)
6467
			goto err_setup_disc;
6468 6469

		break;
F
Frank Blaschka 已提交
6470 6471 6472 6473
	}

	return 0;

6474
err_setup_disc:
6475
err_chp_desc:
6476
	free_netdev(card->dev);
F
Frank Blaschka 已提交
6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487
err_card:
	qeth_core_free_card(card);
err_dev:
	put_device(dev);
	return rc;
}

static void qeth_core_remove_device(struct ccwgroup_device *gdev)
{
	struct qeth_card *card = dev_get_drvdata(&gdev->dev);

6488
	QETH_CARD_TEXT(card, 2, "removedv");
F
Frank Blaschka 已提交
6489

6490
	mutex_lock(&card->discipline_mutex);
6491 6492
	if (card->discipline)
		qeth_remove_discipline(card);
6493
	mutex_unlock(&card->discipline_mutex);
6494

6495 6496
	qeth_free_qdio_queues(card);

6497
	free_netdev(card->dev);
F
Frank Blaschka 已提交
6498 6499 6500 6501 6502 6503 6504 6505
	qeth_core_free_card(card);
	put_device(&gdev->dev);
}

static int qeth_core_set_online(struct ccwgroup_device *gdev)
{
	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
	int rc = 0;
6506
	enum qeth_discipline_id def_discipline;
F
Frank Blaschka 已提交
6507

6508
	mutex_lock(&card->discipline_mutex);
6509
	if (!card->discipline) {
6510 6511
		def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
						QETH_DISCIPLINE_LAYER2;
6512
		rc = qeth_setup_discipline(card, def_discipline);
F
Frank Blaschka 已提交
6513 6514 6515
		if (rc)
			goto err;
	}
6516

6517 6518
	rc = qeth_set_online(card, card->discipline);

F
Frank Blaschka 已提交
6519
err:
6520
	mutex_unlock(&card->discipline_mutex);
F
Frank Blaschka 已提交
6521 6522 6523 6524 6525 6526
	return rc;
}

static int qeth_core_set_offline(struct ccwgroup_device *gdev)
{
	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6527
	int rc;
6528

6529 6530 6531 6532 6533
	mutex_lock(&card->discipline_mutex);
	rc = qeth_set_offline(card, card->discipline, false);
	mutex_unlock(&card->discipline_mutex);

	return rc;
F
Frank Blaschka 已提交
6534 6535 6536 6537 6538
}

static void qeth_core_shutdown(struct ccwgroup_device *gdev)
{
	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6539

6540 6541 6542 6543
	qeth_set_allowed_threads(card, 0, 1);
	if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
		qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
	qeth_qdio_clear_card(card, 0);
6544
	qeth_drain_output_queues(card);
6545
	qdio_free(CARD_DDEV(card));
F
Frank Blaschka 已提交
6546 6547
}

6548 6549
static ssize_t group_store(struct device_driver *ddrv, const char *buf,
			   size_t count)
F
Frank Blaschka 已提交
6550 6551 6552
{
	int err;

6553 6554
	err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3,
				  buf);
6555 6556 6557

	return err ? err : count;
}
6558
static DRIVER_ATTR_WO(group);
F
Frank Blaschka 已提交
6559

6560 6561 6562 6563 6564 6565 6566 6567 6568 6569 6570 6571
static struct attribute *qeth_drv_attrs[] = {
	&driver_attr_group.attr,
	NULL,
};
static struct attribute_group qeth_drv_attr_group = {
	.attrs = qeth_drv_attrs,
};
static const struct attribute_group *qeth_drv_attr_groups[] = {
	&qeth_drv_attr_group,
	NULL,
};

6572 6573 6574 6575 6576 6577 6578 6579 6580 6581 6582 6583 6584 6585
static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
	.driver = {
		.groups = qeth_drv_attr_groups,
		.owner = THIS_MODULE,
		.name = "qeth",
	},
	.ccw_driver = &qeth_ccw_driver,
	.setup = qeth_core_probe_device,
	.remove = qeth_core_remove_device,
	.set_online = qeth_core_set_online,
	.set_offline = qeth_core_set_offline,
	.shutdown = qeth_core_shutdown,
};

A
Arnd Bergmann 已提交
6586
int qeth_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd)
6587 6588 6589 6590 6591 6592
{
	struct qeth_card *card = dev->ml_priv;
	int rc = 0;

	switch (cmd) {
	case SIOC_QETH_ADP_SET_SNMP_CONTROL:
A
Arnd Bergmann 已提交
6593
		rc = qeth_snmp_command(card, data);
6594 6595
		break;
	case SIOC_QETH_GET_CARD_TYPE:
6596 6597
		if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) &&
		    !IS_VM_NIC(card))
6598
			return 1;
6599
		return 0;
A
Arnd Bergmann 已提交
6600 6601 6602 6603
	case SIOC_QETH_QUERY_OAT:
		rc = qeth_query_oat_command(card, data);
		break;
	default:
6604
		rc = -EOPNOTSUPP;
A
Arnd Bergmann 已提交
6605 6606 6607 6608 6609 6610 6611 6612 6613 6614 6615 6616 6617 6618
	}
	if (rc)
		QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
	return rc;
}
EXPORT_SYMBOL_GPL(qeth_siocdevprivate);

int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
	struct qeth_card *card = dev->ml_priv;
	struct mii_ioctl_data *mii_data;
	int rc = 0;

	switch (cmd) {
6619 6620 6621 6622 6623 6624 6625 6626 6627 6628 6629 6630 6631
	case SIOCGMIIPHY:
		mii_data = if_mii(rq);
		mii_data->phy_id = 0;
		break;
	case SIOCGMIIREG:
		mii_data = if_mii(rq);
		if (mii_data->phy_id != 0)
			rc = -EINVAL;
		else
			mii_data->val_out = qeth_mdio_read(dev,
				mii_data->phy_id, mii_data->reg_num);
		break;
	default:
A
Arnd Bergmann 已提交
6632
		return -EOPNOTSUPP;
6633 6634 6635 6636 6637 6638 6639
	}
	if (rc)
		QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
	return rc;
}
EXPORT_SYMBOL_GPL(qeth_do_ioctl);

6640 6641
static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply,
			      unsigned long data)
6642 6643
{
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6644
	u32 *features = reply->param;
6645

6646
	if (qeth_setassparms_inspect_rc(cmd))
6647
		return -EIO;
6648

6649
	*features = cmd->data.setassparms.data.flags_32bit;
6650 6651 6652
	return 0;
}

6653 6654
static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype,
			     enum qeth_prot_versions prot)
6655
{
6656 6657
	return qeth_send_simple_setassparms_prot(card, cstype, IPA_CMD_ASS_STOP,
						 NULL, prot);
6658 6659
}

6660
static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype,
6661
			    enum qeth_prot_versions prot, u8 *lp2lp)
6662
{
6663
	u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP;
6664 6665 6666
	struct qeth_cmd_buffer *iob;
	struct qeth_ipa_caps caps;
	u32 features;
6667 6668
	int rc;

6669 6670 6671
	/* some L3 HW requires combined L3+L4 csum offload: */
	if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 &&
	    cstype == IPA_OUTBOUND_CHECKSUM)
6672
		required_features |= QETH_IPA_CHECKSUM_IP_HDR;
6673

6674 6675 6676 6677 6678 6679 6680
	iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0,
				       prot);
	if (!iob)
		return -ENOMEM;

	rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features);
	if (rc)
6681
		return rc;
6682

6683 6684 6685 6686
	if ((required_features & features) != required_features) {
		qeth_set_csum_off(card, cstype, prot);
		return -EOPNOTSUPP;
	}
6687

6688 6689
	iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE,
				       SETASS_DATA_SIZEOF(flags_32bit),
6690
				       prot);
6691 6692 6693
	if (!iob) {
		qeth_set_csum_off(card, cstype, prot);
		return -ENOMEM;
6694
	}
6695 6696 6697 6698 6699

	if (features & QETH_IPA_CHECKSUM_LP2LP)
		required_features |= QETH_IPA_CHECKSUM_LP2LP;
	__ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features;
	rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6700
	if (rc) {
6701
		qeth_set_csum_off(card, cstype, prot);
6702 6703
		return rc;
	}
6704

6705 6706 6707 6708 6709 6710
	if (!qeth_ipa_caps_supported(&caps, required_features) ||
	    !qeth_ipa_caps_enabled(&caps, required_features)) {
		qeth_set_csum_off(card, cstype, prot);
		return -EOPNOTSUPP;
	}

6711 6712
	dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n",
		 cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot);
6713 6714 6715 6716

	if (lp2lp)
		*lp2lp = qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP);

6717 6718 6719
	return 0;
}

6720
static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
6721
			     enum qeth_prot_versions prot, u8 *lp2lp)
6722
{
6723
	return on ? qeth_set_csum_on(card, cstype, prot, lp2lp) :
6724
		    qeth_set_csum_off(card, cstype, prot);
6725 6726
}

6727 6728 6729 6730 6731 6732 6733
static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply,
			     unsigned long data)
{
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
	struct qeth_tso_start_data *tso_data = reply->param;

	if (qeth_setassparms_inspect_rc(cmd))
6734
		return -EIO;
6735 6736 6737 6738 6739 6740

	tso_data->mss = cmd->data.setassparms.data.tso.mss;
	tso_data->supported = cmd->data.setassparms.data.tso.supported;
	return 0;
}

6741 6742
static int qeth_set_tso_off(struct qeth_card *card,
			    enum qeth_prot_versions prot)
6743
{
6744
	return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO,
6745
						 IPA_CMD_ASS_STOP, NULL, prot);
6746
}
6747

6748 6749 6750
static int qeth_set_tso_on(struct qeth_card *card,
			   enum qeth_prot_versions prot)
{
6751 6752 6753 6754 6755 6756 6757 6758 6759 6760
	struct qeth_tso_start_data tso_data;
	struct qeth_cmd_buffer *iob;
	struct qeth_ipa_caps caps;
	int rc;

	iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
				       IPA_CMD_ASS_START, 0, prot);
	if (!iob)
		return -ENOMEM;

6761
	rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data);
6762 6763 6764 6765 6766 6767 6768 6769 6770
	if (rc)
		return rc;

	if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) {
		qeth_set_tso_off(card, prot);
		return -EOPNOTSUPP;
	}

	iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6771 6772
				       IPA_CMD_ASS_ENABLE,
				       SETASS_DATA_SIZEOF(caps), prot);
6773 6774 6775 6776 6777 6778
	if (!iob) {
		qeth_set_tso_off(card, prot);
		return -ENOMEM;
	}

	/* enable TSO capability */
6779 6780 6781
	__ipa_cmd(iob)->data.setassparms.data.caps.enabled =
		QETH_IPA_LARGE_SEND_TCP;
	rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6782 6783 6784 6785 6786 6787 6788 6789 6790 6791 6792 6793 6794 6795
	if (rc) {
		qeth_set_tso_off(card, prot);
		return rc;
	}

	if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) ||
	    !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) {
		qeth_set_tso_off(card, prot);
		return -EOPNOTSUPP;
	}

	dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot,
		 tso_data.mss);
	return 0;
6796
}
6797

6798 6799 6800
static int qeth_set_ipa_tso(struct qeth_card *card, bool on,
			    enum qeth_prot_versions prot)
{
6801
	return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot);
6802
}
6803

6804 6805 6806 6807 6808 6809 6810
static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
{
	int rc_ipv4 = (on) ? -EOPNOTSUPP : 0;
	int rc_ipv6;

	if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
		rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6811
					    QETH_PROT_IPV4, NULL);
6812 6813 6814
	if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
		/* no/one Offload Assist available, so the rc is trivial */
		return rc_ipv4;
6815

6816
	rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6817
				    QETH_PROT_IPV6, NULL);
6818 6819 6820 6821 6822 6823 6824 6825 6826

	if (on)
		/* enable: success if any Assist is active */
		return (rc_ipv6) ? rc_ipv4 : 0;

	/* disable: failure if any Assist is still active */
	return (rc_ipv6) ? rc_ipv6 : rc_ipv4;
}

6827
/**
6828 6829
 * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
 * @dev:	a net_device
6830
 */
6831
void qeth_enable_hw_features(struct net_device *dev)
6832 6833
{
	struct qeth_card *card = dev->ml_priv;
6834
	netdev_features_t features;
6835

6836
	features = dev->features;
6837
	/* force-off any feature that might need an IPA sequence.
6838 6839
	 * netdev_update_features() will restart them.
	 */
6840 6841 6842 6843 6844 6845
	dev->features &= ~dev->hw_features;
	/* toggle VLAN filter, so that VIDs are re-programmed: */
	if (IS_LAYER2(card) && IS_VM_NIC(card)) {
		dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
		dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
	}
6846
	netdev_update_features(dev);
6847 6848 6849
	if (features != dev->features)
		dev_warn(&card->gdev->dev,
			 "Device recovery failed to restore all offload features\n");
6850
}
6851
EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
6852

6853 6854 6855 6856 6857 6858 6859 6860 6861 6862 6863 6864 6865 6866 6867 6868 6869 6870
static void qeth_check_restricted_features(struct qeth_card *card,
					   netdev_features_t changed,
					   netdev_features_t actual)
{
	netdev_features_t ipv6_features = NETIF_F_TSO6;
	netdev_features_t ipv4_features = NETIF_F_TSO;

	if (!card->info.has_lp2lp_cso_v6)
		ipv6_features |= NETIF_F_IPV6_CSUM;
	if (!card->info.has_lp2lp_cso_v4)
		ipv4_features |= NETIF_F_IP_CSUM;

	if ((changed & ipv6_features) && !(actual & ipv6_features))
		qeth_flush_local_addrs6(card);
	if ((changed & ipv4_features) && !(actual & ipv4_features))
		qeth_flush_local_addrs4(card);
}

6871 6872 6873
int qeth_set_features(struct net_device *dev, netdev_features_t features)
{
	struct qeth_card *card = dev->ml_priv;
6874
	netdev_features_t changed = dev->features ^ features;
6875 6876
	int rc = 0;

6877 6878
	QETH_CARD_TEXT(card, 2, "setfeat");
	QETH_CARD_HEX(card, 2, &features, sizeof(features));
6879

6880
	if ((changed & NETIF_F_IP_CSUM)) {
6881
		rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM,
6882 6883
				       IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4,
				       &card->info.has_lp2lp_cso_v4);
6884 6885 6886
		if (rc)
			changed ^= NETIF_F_IP_CSUM;
	}
6887 6888
	if (changed & NETIF_F_IPV6_CSUM) {
		rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM,
6889 6890
				       IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6,
				       &card->info.has_lp2lp_cso_v6);
6891 6892 6893
		if (rc)
			changed ^= NETIF_F_IPV6_CSUM;
	}
6894 6895
	if (changed & NETIF_F_RXCSUM) {
		rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM);
6896 6897 6898
		if (rc)
			changed ^= NETIF_F_RXCSUM;
	}
6899 6900 6901
	if (changed & NETIF_F_TSO) {
		rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO,
				      QETH_PROT_IPV4);
6902 6903 6904
		if (rc)
			changed ^= NETIF_F_TSO;
	}
6905 6906 6907 6908 6909 6910
	if (changed & NETIF_F_TSO6) {
		rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6,
				      QETH_PROT_IPV6);
		if (rc)
			changed ^= NETIF_F_TSO6;
	}
6911

6912 6913 6914
	qeth_check_restricted_features(card, dev->features ^ features,
				       dev->features ^ changed);

6915 6916 6917 6918 6919 6920
	/* everything changed successfully? */
	if ((dev->features ^ features) == changed)
		return 0;
	/* something went wrong. save changed features and return error */
	dev->features ^= changed;
	return -EIO;
6921 6922 6923 6924 6925 6926 6927 6928
}
EXPORT_SYMBOL_GPL(qeth_set_features);

netdev_features_t qeth_fix_features(struct net_device *dev,
				    netdev_features_t features)
{
	struct qeth_card *card = dev->ml_priv;

6929
	QETH_CARD_TEXT(card, 2, "fixfeat");
6930 6931
	if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
		features &= ~NETIF_F_IP_CSUM;
6932 6933
	if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6))
		features &= ~NETIF_F_IPV6_CSUM;
6934 6935
	if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) &&
	    !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6936
		features &= ~NETIF_F_RXCSUM;
6937
	if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
6938
		features &= ~NETIF_F_TSO;
6939 6940
	if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO))
		features &= ~NETIF_F_TSO6;
6941

6942
	QETH_CARD_HEX(card, 2, &features, sizeof(features));
6943 6944 6945
	return features;
}
EXPORT_SYMBOL_GPL(qeth_fix_features);
6946

6947 6948 6949 6950
netdev_features_t qeth_features_check(struct sk_buff *skb,
				      struct net_device *dev,
				      netdev_features_t features)
{
6951 6952
	struct qeth_card *card = dev->ml_priv;

6953
	/* Traffic with local next-hop is not eligible for some offloads: */
6954
	if (skb->ip_summed == CHECKSUM_PARTIAL &&
6955
	    READ_ONCE(card->options.isolation) != ISOLATION_MODE_FWD) {
6956 6957 6958 6959 6960 6961 6962 6963 6964 6965 6966 6967 6968 6969 6970 6971 6972 6973 6974 6975 6976 6977 6978 6979 6980
		netdev_features_t restricted = 0;

		if (skb_is_gso(skb) && !netif_needs_gso(skb, features))
			restricted |= NETIF_F_ALL_TSO;

		switch (vlan_get_protocol(skb)) {
		case htons(ETH_P_IP):
			if (!card->info.has_lp2lp_cso_v4)
				restricted |= NETIF_F_IP_CSUM;

			if (restricted && qeth_next_hop_is_local_v4(card, skb))
				features &= ~restricted;
			break;
		case htons(ETH_P_IPV6):
			if (!card->info.has_lp2lp_cso_v6)
				restricted |= NETIF_F_IPV6_CSUM;

			if (restricted && qeth_next_hop_is_local_v6(card, skb))
				features &= ~restricted;
			break;
		default:
			break;
		}
	}

6981 6982 6983 6984 6985 6986 6987 6988 6989 6990 6991 6992 6993 6994 6995 6996 6997 6998 6999 7000 7001 7002
	/* GSO segmentation builds skbs with
	 *	a (small) linear part for the headers, and
	 *	page frags for the data.
	 * Compared to a linear skb, the header-only part consumes an
	 * additional buffer element. This reduces buffer utilization, and
	 * hurts throughput. So compress small segments into one element.
	 */
	if (netif_needs_gso(skb, features)) {
		/* match skb_segment(): */
		unsigned int doffset = skb->data - skb_mac_header(skb);
		unsigned int hsize = skb_shinfo(skb)->gso_size;
		unsigned int hroom = skb_headroom(skb);

		/* linearize only if resulting skb allocations are order-0: */
		if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0))
			features &= ~NETIF_F_SG;
	}

	return vlan_features_check(skb, features);
}
EXPORT_SYMBOL_GPL(qeth_features_check);

7003 7004 7005 7006 7007 7008 7009 7010 7011 7012
void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
	struct qeth_card *card = dev->ml_priv;
	struct qeth_qdio_out_q *queue;
	unsigned int i;

	QETH_CARD_TEXT(card, 5, "getstat");

	stats->rx_packets = card->stats.rx_packets;
	stats->rx_bytes = card->stats.rx_bytes;
7013
	stats->rx_errors = card->stats.rx_length_errors +
7014
			   card->stats.rx_frame_errors +
7015 7016
			   card->stats.rx_fifo_errors;
	stats->rx_dropped = card->stats.rx_dropped_nomem +
7017 7018
			    card->stats.rx_dropped_notsupp +
			    card->stats.rx_dropped_runt;
7019
	stats->multicast = card->stats.rx_multicast;
7020
	stats->rx_length_errors = card->stats.rx_length_errors;
7021
	stats->rx_frame_errors = card->stats.rx_frame_errors;
7022
	stats->rx_fifo_errors = card->stats.rx_fifo_errors;
7023 7024 7025 7026 7027 7028 7029 7030 7031 7032 7033 7034

	for (i = 0; i < card->qdio.no_out_queues; i++) {
		queue = card->qdio.out_qs[i];

		stats->tx_packets += queue->stats.tx_packets;
		stats->tx_bytes += queue->stats.tx_bytes;
		stats->tx_errors += queue->stats.tx_errors;
		stats->tx_dropped += queue->stats.tx_dropped;
	}
}
EXPORT_SYMBOL_GPL(qeth_get_stats64);

7035 7036 7037 7038 7039 7040 7041 7042 7043 7044 7045 7046 7047 7048 7049 7050 7051 7052 7053 7054 7055 7056 7057 7058 7059 7060 7061 7062 7063 7064 7065 7066 7067 7068 7069 7070 7071 7072 7073 7074
#define TC_IQD_UCAST   0
static void qeth_iqd_set_prio_tc_map(struct net_device *dev,
				     unsigned int ucast_txqs)
{
	unsigned int prio;

	/* IQD requires mcast traffic to be placed on a dedicated queue, and
	 * qeth_iqd_select_queue() deals with this.
	 * For unicast traffic, we defer the queue selection to the stack.
	 * By installing a trivial prio map that spans over only the unicast
	 * queues, we can encourage the stack to spread the ucast traffic evenly
	 * without selecting the mcast queue.
	 */

	/* One traffic class, spanning over all active ucast queues: */
	netdev_set_num_tc(dev, 1);
	netdev_set_tc_queue(dev, TC_IQD_UCAST, ucast_txqs,
			    QETH_IQD_MIN_UCAST_TXQ);

	/* Map all priorities to this traffic class: */
	for (prio = 0; prio <= TC_BITMASK; prio++)
		netdev_set_prio_tc_map(dev, prio, TC_IQD_UCAST);
}

int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count)
{
	struct net_device *dev = card->dev;
	int rc;

	/* Per netif_setup_tc(), adjust the mapping first: */
	if (IS_IQD(card))
		qeth_iqd_set_prio_tc_map(dev, count - 1);

	rc = netif_set_real_num_tx_queues(dev, count);

	if (rc && IS_IQD(card))
		qeth_iqd_set_prio_tc_map(dev, dev->real_num_tx_queues - 1);

	return rc;
}
7075
EXPORT_SYMBOL_GPL(qeth_set_real_num_tx_queues);
7076

7077 7078 7079
u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
			  u8 cast_type, struct net_device *sb_dev)
{
7080 7081
	u16 txq;

7082 7083
	if (cast_type != RTN_UNICAST)
		return QETH_IQD_MCAST_TXQ;
7084 7085
	if (dev->real_num_tx_queues == QETH_IQD_MIN_TXQ)
		return QETH_IQD_MIN_UCAST_TXQ;
7086 7087 7088

	txq = netdev_pick_tx(dev, skb, sb_dev);
	return (txq == QETH_IQD_MCAST_TXQ) ? QETH_IQD_MIN_UCAST_TXQ : txq;
7089 7090 7091
}
EXPORT_SYMBOL_GPL(qeth_iqd_select_queue);

7092
int qeth_open(struct net_device *dev)
7093 7094
{
	struct qeth_card *card = dev->ml_priv;
7095 7096
	struct qeth_qdio_out_q *queue;
	unsigned int i;
7097 7098 7099 7100

	QETH_CARD_TEXT(card, 4, "qethopen");

	card->data.state = CH_STATE_UP;
7101
	netif_tx_start_all_queues(dev);
7102 7103

	local_bh_disable();
7104 7105 7106 7107 7108
	qeth_for_each_output_queue(card, queue, i) {
		netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll,
				  QETH_NAPI_WEIGHT);
		napi_enable(&queue->napi);
		napi_schedule(&queue->napi);
7109
	}
7110 7111 7112

	napi_enable(&card->napi);
	napi_schedule(&card->napi);
7113 7114
	/* kick-start the NAPI softirq: */
	local_bh_enable();
7115

7116 7117 7118 7119 7120 7121 7122
	return 0;
}
EXPORT_SYMBOL_GPL(qeth_open);

int qeth_stop(struct net_device *dev)
{
	struct qeth_card *card = dev->ml_priv;
7123 7124
	struct qeth_qdio_out_q *queue;
	unsigned int i;
7125 7126

	QETH_CARD_TEXT(card, 4, "qethstop");
7127 7128 7129 7130 7131

	napi_disable(&card->napi);
	cancel_delayed_work_sync(&card->buffer_reclaim_work);
	qdio_stop_irq(CARD_DDEV(card));

7132 7133 7134
	/* Quiesce the NAPI instances: */
	qeth_for_each_output_queue(card, queue, i)
		napi_disable(&queue->napi);
7135

7136 7137
	/* Stop .ndo_start_xmit, might still access queue->napi. */
	netif_tx_disable(dev);
7138

7139 7140 7141 7142
	qeth_for_each_output_queue(card, queue, i) {
		del_timer_sync(&queue->timer);
		/* Queues may get re-allocated, so remove the NAPIs. */
		netif_napi_del(&queue->napi);
7143 7144
	}

7145 7146 7147 7148
	return 0;
}
EXPORT_SYMBOL_GPL(qeth_stop);

F
Frank Blaschka 已提交
7149 7150 7151 7152
static int __init qeth_core_init(void)
{
	int rc;

7153
	pr_info("loading core functions\n");
F
Frank Blaschka 已提交
7154

7155 7156
	qeth_debugfs_root = debugfs_create_dir("qeth", NULL);

F
Frank Blaschka 已提交
7157 7158
	rc = qeth_register_dbf_views();
	if (rc)
7159
		goto dbf_err;
M
Mark McLoughlin 已提交
7160
	qeth_core_root_dev = root_device_register("qeth");
7161
	rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
F
Frank Blaschka 已提交
7162 7163
	if (rc)
		goto register_err;
7164 7165 7166 7167
	qeth_core_header_cache =
		kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE,
				  roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE),
				  0, NULL);
7168 7169 7170 7171
	if (!qeth_core_header_cache) {
		rc = -ENOMEM;
		goto slab_err;
	}
7172 7173 7174 7175 7176 7177
	qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf",
			sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL);
	if (!qeth_qdio_outbuf_cache) {
		rc = -ENOMEM;
		goto cqslab_err;
	}
7178 7179 7180 7181 7182 7183 7184 7185 7186 7187

	qeth_qaob_cache = kmem_cache_create("qeth_qaob",
					    sizeof(struct qaob),
					    sizeof(struct qaob),
					    0, NULL);
	if (!qeth_qaob_cache) {
		rc = -ENOMEM;
		goto qaob_err;
	}

7188 7189 7190 7191 7192 7193
	rc = ccw_driver_register(&qeth_ccw_driver);
	if (rc)
		goto ccw_err;
	rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
	if (rc)
		goto ccwgroup_err;
7194

7195
	return 0;
7196 7197 7198 7199

ccwgroup_err:
	ccw_driver_unregister(&qeth_ccw_driver);
ccw_err:
7200 7201
	kmem_cache_destroy(qeth_qaob_cache);
qaob_err:
7202
	kmem_cache_destroy(qeth_qdio_outbuf_cache);
7203 7204
cqslab_err:
	kmem_cache_destroy(qeth_core_header_cache);
7205
slab_err:
M
Mark McLoughlin 已提交
7206
	root_device_unregister(qeth_core_root_dev);
F
Frank Blaschka 已提交
7207 7208
register_err:
	qeth_unregister_dbf_views();
7209
dbf_err:
7210
	debugfs_remove_recursive(qeth_debugfs_root);
7211
	pr_err("Initializing the qeth device driver failed\n");
F
Frank Blaschka 已提交
7212 7213 7214 7215 7216
	return rc;
}

static void __exit qeth_core_exit(void)
{
7217
	qeth_clear_dbf_list();
F
Frank Blaschka 已提交
7218 7219
	ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
	ccw_driver_unregister(&qeth_ccw_driver);
7220
	kmem_cache_destroy(qeth_qaob_cache);
7221
	kmem_cache_destroy(qeth_qdio_outbuf_cache);
7222
	kmem_cache_destroy(qeth_core_header_cache);
7223
	root_device_unregister(qeth_core_root_dev);
F
Frank Blaschka 已提交
7224
	qeth_unregister_dbf_views();
7225
	debugfs_remove_recursive(qeth_debugfs_root);
7226
	pr_info("core functions removed\n");
F
Frank Blaschka 已提交
7227 7228 7229 7230 7231 7232 7233
}

module_init(qeth_core_init);
module_exit(qeth_core_exit);
MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
MODULE_DESCRIPTION("qeth core functions");
MODULE_LICENSE("GPL");