qeth_core_main.c 189.8 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
F
Frank Blaschka 已提交
2
/*
3
 *    Copyright IBM Corp. 2007, 2009
F
Frank Blaschka 已提交
4 5 6 7 8 9
 *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
 *		 Frank Pavlic <fpavlic@de.ibm.com>,
 *		 Thomas Spatzier <tspat@de.ibm.com>,
 *		 Frank Blaschka <frank.blaschka@de.ibm.com>
 */

10 11 12
#define KMSG_COMPONENT "qeth"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

13
#include <linux/compat.h>
F
Frank Blaschka 已提交
14 15 16 17 18
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/kernel.h>
19
#include <linux/log2.h>
20
#include <linux/io.h>
F
Frank Blaschka 已提交
21 22 23
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/mii.h>
J
Julian Wiedmann 已提交
24
#include <linux/mm.h>
F
Frank Blaschka 已提交
25
#include <linux/kthread.h>
26
#include <linux/slab.h>
27 28 29
#include <linux/if_vlan.h>
#include <linux/netdevice.h>
#include <linux/netdev_features.h>
30
#include <linux/rcutree.h>
31
#include <linux/skbuff.h>
32
#include <linux/vmalloc.h>
33

34
#include <net/iucv/af_iucv.h>
35
#include <net/dsfield.h>
36
#include <net/sock.h>
F
Frank Blaschka 已提交
37

38
#include <asm/ebcdic.h>
39
#include <asm/chpid.h>
40
#include <asm/sysinfo.h>
41 42 43
#include <asm/diag.h>
#include <asm/cio.h>
#include <asm/ccwdev.h>
44
#include <asm/cpcmd.h>
F
Frank Blaschka 已提交
45 46 47

#include "qeth_core.h"

48 49 50 51 52
struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
	/* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
	/*                   N  P  A    M  L  V                      H  */
	[QETH_DBF_SETUP] = {"qeth_setup",
				8, 1,   8, 5, &debug_hex_ascii_view, NULL},
53 54
	[QETH_DBF_MSG]	 = {"qeth_msg", 8, 1, 11 * sizeof(long), 3,
			    &debug_sprintf_view, NULL},
55 56 57 58
	[QETH_DBF_CTRL]  = {"qeth_control",
		8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
};
EXPORT_SYMBOL_GPL(qeth_dbf);
F
Frank Blaschka 已提交
59

60 61
struct kmem_cache *qeth_core_header_cache;
EXPORT_SYMBOL_GPL(qeth_core_header_cache);
62
static struct kmem_cache *qeth_qdio_outbuf_cache;
F
Frank Blaschka 已提交
63 64

static struct device *qeth_core_root_dev;
65
static struct dentry *qeth_debugfs_root;
F
Frank Blaschka 已提交
66 67
static struct lock_class_key qdio_out_skb_queue_key;

68
static void qeth_issue_next_read_cb(struct qeth_card *card,
69 70
				    struct qeth_cmd_buffer *iob,
				    unsigned int data_length);
F
Frank Blaschka 已提交
71
static int qeth_qdio_establish(struct qeth_card *);
72
static void qeth_free_qdio_queues(struct qeth_card *card);
F
Frank Blaschka 已提交
73

S
Stefan Raspl 已提交
74 75 76 77 78 79 80 81 82
static void qeth_close_dev_handler(struct work_struct *work)
{
	struct qeth_card *card;

	card = container_of(work, struct qeth_card, close_dev_work);
	QETH_CARD_TEXT(card, 2, "cldevhdl");
	ccwgroup_set_offline(card->gdev);
}

J
Julian Wiedmann 已提交
83
static const char *qeth_get_cardname(struct qeth_card *card)
F
Frank Blaschka 已提交
84
{
85
	if (IS_VM_NIC(card)) {
F
Frank Blaschka 已提交
86
		switch (card->info.type) {
87
		case QETH_CARD_TYPE_OSD:
88
			return " Virtual NIC QDIO";
F
Frank Blaschka 已提交
89
		case QETH_CARD_TYPE_IQD:
90
			return " Virtual NIC Hiper";
91
		case QETH_CARD_TYPE_OSM:
92
			return " Virtual NIC QDIO - OSM";
93
		case QETH_CARD_TYPE_OSX:
94
			return " Virtual NIC QDIO - OSX";
F
Frank Blaschka 已提交
95 96 97 98 99
		default:
			return " unknown";
		}
	} else {
		switch (card->info.type) {
100
		case QETH_CARD_TYPE_OSD:
F
Frank Blaschka 已提交
101 102 103 104 105
			return " OSD Express";
		case QETH_CARD_TYPE_IQD:
			return " HiperSockets";
		case QETH_CARD_TYPE_OSN:
			return " OSN QDIO";
106 107 108 109
		case QETH_CARD_TYPE_OSM:
			return " OSM QDIO";
		case QETH_CARD_TYPE_OSX:
			return " OSX QDIO";
F
Frank Blaschka 已提交
110 111 112 113 114 115 116 117 118 119
		default:
			return " unknown";
		}
	}
	return " n/a";
}

/* max length to be returned: 14 */
const char *qeth_get_cardname_short(struct qeth_card *card)
{
120
	if (IS_VM_NIC(card)) {
F
Frank Blaschka 已提交
121
		switch (card->info.type) {
122
		case QETH_CARD_TYPE_OSD:
123
			return "Virt.NIC QDIO";
F
Frank Blaschka 已提交
124
		case QETH_CARD_TYPE_IQD:
125
			return "Virt.NIC Hiper";
126
		case QETH_CARD_TYPE_OSM:
127
			return "Virt.NIC OSM";
128
		case QETH_CARD_TYPE_OSX:
129
			return "Virt.NIC OSX";
F
Frank Blaschka 已提交
130 131 132 133 134
		default:
			return "unknown";
		}
	} else {
		switch (card->info.type) {
135
		case QETH_CARD_TYPE_OSD:
F
Frank Blaschka 已提交
136 137 138 139 140 141 142 143 144
			switch (card->info.link_type) {
			case QETH_LINK_TYPE_FAST_ETH:
				return "OSD_100";
			case QETH_LINK_TYPE_HSTR:
				return "HSTR";
			case QETH_LINK_TYPE_GBIT_ETH:
				return "OSD_1000";
			case QETH_LINK_TYPE_10GBIT_ETH:
				return "OSD_10GIG";
145 146
			case QETH_LINK_TYPE_25GBIT_ETH:
				return "OSD_25GIG";
F
Frank Blaschka 已提交
147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
			case QETH_LINK_TYPE_LANE_ETH100:
				return "OSD_FE_LANE";
			case QETH_LINK_TYPE_LANE_TR:
				return "OSD_TR_LANE";
			case QETH_LINK_TYPE_LANE_ETH1000:
				return "OSD_GbE_LANE";
			case QETH_LINK_TYPE_LANE:
				return "OSD_ATM_LANE";
			default:
				return "OSD_Express";
			}
		case QETH_CARD_TYPE_IQD:
			return "HiperSockets";
		case QETH_CARD_TYPE_OSN:
			return "OSN";
162 163 164 165
		case QETH_CARD_TYPE_OSM:
			return "OSM_1000";
		case QETH_CARD_TYPE_OSX:
			return "OSX_10GIG";
F
Frank Blaschka 已提交
166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198
		default:
			return "unknown";
		}
	}
	return "n/a";
}

void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
			 int clear_start_mask)
{
	unsigned long flags;

	spin_lock_irqsave(&card->thread_mask_lock, flags);
	card->thread_allowed_mask = threads;
	if (clear_start_mask)
		card->thread_start_mask &= threads;
	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
	wake_up(&card->wait_q);
}
EXPORT_SYMBOL_GPL(qeth_set_allowed_threads);

int qeth_threads_running(struct qeth_card *card, unsigned long threads)
{
	unsigned long flags;
	int rc = 0;

	spin_lock_irqsave(&card->thread_mask_lock, flags);
	rc = (card->thread_running_mask & threads);
	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
	return rc;
}
EXPORT_SYMBOL_GPL(qeth_threads_running);

199
static void qeth_clear_working_pool_list(struct qeth_card *card)
F
Frank Blaschka 已提交
200 201
{
	struct qeth_buffer_pool_entry *pool_entry, *tmp;
202 203
	struct qeth_qdio_q *queue = card->qdio.in_q;
	unsigned int i;
F
Frank Blaschka 已提交
204

C
Carsten Otte 已提交
205
	QETH_CARD_TEXT(card, 5, "clwrklst");
F
Frank Blaschka 已提交
206
	list_for_each_entry_safe(pool_entry, tmp,
207 208
				 &card->qdio.in_buf_pool.entry_list, list)
		list_del(&pool_entry->list);
209 210 211

	for (i = 0; i < ARRAY_SIZE(queue->bufs); i++)
		queue->bufs[i].pool_entry = NULL;
F
Frank Blaschka 已提交
212 213
}

214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
static void qeth_free_pool_entry(struct qeth_buffer_pool_entry *entry)
{
	unsigned int i;

	for (i = 0; i < ARRAY_SIZE(entry->elements); i++) {
		if (entry->elements[i])
			__free_page(entry->elements[i]);
	}

	kfree(entry);
}

static void qeth_free_buffer_pool(struct qeth_card *card)
{
	struct qeth_buffer_pool_entry *entry, *tmp;

	list_for_each_entry_safe(entry, tmp, &card->qdio.init_pool.entry_list,
				 init_list) {
		list_del(&entry->init_list);
		qeth_free_pool_entry(entry);
	}
}

static struct qeth_buffer_pool_entry *qeth_alloc_pool_entry(unsigned int pages)
{
	struct qeth_buffer_pool_entry *entry;
	unsigned int i;

	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
	if (!entry)
		return NULL;

	for (i = 0; i < pages; i++) {
247
		entry->elements[i] = __dev_alloc_page(GFP_KERNEL);
248 249 250 251 252 253 254 255 256 257

		if (!entry->elements[i]) {
			qeth_free_pool_entry(entry);
			return NULL;
		}
	}

	return entry;
}

F
Frank Blaschka 已提交
258 259
static int qeth_alloc_buffer_pool(struct qeth_card *card)
{
260 261
	unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
	unsigned int i;
F
Frank Blaschka 已提交
262

C
Carsten Otte 已提交
263
	QETH_CARD_TEXT(card, 5, "alocpool");
F
Frank Blaschka 已提交
264
	for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
265 266 267 268
		struct qeth_buffer_pool_entry *entry;

		entry = qeth_alloc_pool_entry(buf_elements);
		if (!entry) {
F
Frank Blaschka 已提交
269 270 271
			qeth_free_buffer_pool(card);
			return -ENOMEM;
		}
272

273
		list_add(&entry->init_list, &card->qdio.init_pool.entry_list);
F
Frank Blaschka 已提交
274 275 276 277
	}
	return 0;
}

278
int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count)
F
Frank Blaschka 已提交
279
{
280 281 282 283 284 285
	unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
	struct qeth_qdio_buffer_pool *pool = &card->qdio.init_pool;
	struct qeth_buffer_pool_entry *entry, *tmp;
	int delta = count - pool->buf_count;
	LIST_HEAD(entries);

C
Carsten Otte 已提交
286
	QETH_CARD_TEXT(card, 2, "realcbp");
F
Frank Blaschka 已提交
287

288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326
	/* Defer until queue is allocated: */
	if (!card->qdio.in_q)
		goto out;

	/* Remove entries from the pool: */
	while (delta < 0) {
		entry = list_first_entry(&pool->entry_list,
					 struct qeth_buffer_pool_entry,
					 init_list);
		list_del(&entry->init_list);
		qeth_free_pool_entry(entry);

		delta++;
	}

	/* Allocate additional entries: */
	while (delta > 0) {
		entry = qeth_alloc_pool_entry(buf_elements);
		if (!entry) {
			list_for_each_entry_safe(entry, tmp, &entries,
						 init_list) {
				list_del(&entry->init_list);
				qeth_free_pool_entry(entry);
			}

			return -ENOMEM;
		}

		list_add(&entry->init_list, &entries);

		delta--;
	}

	list_splice(&entries, &pool->entry_list);

out:
	card->qdio.in_buf_pool.buf_count = count;
	pool->buf_count = count;
	return 0;
F
Frank Blaschka 已提交
327
}
328
EXPORT_SYMBOL_GPL(qeth_resize_buffer_pool);
F
Frank Blaschka 已提交
329

S
Sebastian Ott 已提交
330 331
static void qeth_free_qdio_queue(struct qeth_qdio_q *q)
{
332 333 334 335
	if (!q)
		return;

	qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
S
Sebastian Ott 已提交
336 337 338 339 340 341 342 343 344 345 346
	kfree(q);
}

static struct qeth_qdio_q *qeth_alloc_qdio_queue(void)
{
	struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
	int i;

	if (!q)
		return NULL;

347 348 349 350 351
	if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
		kfree(q);
		return NULL;
	}

S
Sebastian Ott 已提交
352
	for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
353
		q->bufs[i].buffer = q->qdio_bufs[i];
S
Sebastian Ott 已提交
354 355 356 357 358

	QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *));
	return q;
}

J
Julian Wiedmann 已提交
359
static int qeth_cq_init(struct qeth_card *card)
360 361 362 363
{
	int rc;

	if (card->options.cq == QETH_CQ_ENABLED) {
364
		QETH_CARD_TEXT(card, 2, "cqinit");
365 366
		qdio_reset_buffers(card->qdio.c_q->qdio_bufs,
				   QDIO_MAX_BUFFERS_PER_Q);
367 368
		card->qdio.c_q->next_buf_to_init = 127;
		rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT,
369
			     card->qdio.no_in_queues - 1, 0, 127, NULL);
370
		if (rc) {
371
			QETH_CARD_TEXT_(card, 2, "1err%d", rc);
372 373 374 375 376 377 378 379
			goto out;
		}
	}
	rc = 0;
out:
	return rc;
}

J
Julian Wiedmann 已提交
380
static int qeth_alloc_cq(struct qeth_card *card)
381 382
{
	if (card->options.cq == QETH_CQ_ENABLED) {
383
		QETH_CARD_TEXT(card, 2, "cqon");
S
Sebastian Ott 已提交
384
		card->qdio.c_q = qeth_alloc_qdio_queue();
385
		if (!card->qdio.c_q) {
386 387
			dev_err(&card->gdev->dev, "Failed to create completion queue\n");
			return -ENOMEM;
388
		}
389

390 391
		card->qdio.no_in_queues = 2;
	} else {
392
		QETH_CARD_TEXT(card, 2, "nocq");
393 394 395
		card->qdio.c_q = NULL;
		card->qdio.no_in_queues = 1;
	}
396
	QETH_CARD_TEXT_(card, 2, "iqc%d", card->qdio.no_in_queues);
397
	return 0;
398 399
}

J
Julian Wiedmann 已提交
400
static void qeth_free_cq(struct qeth_card *card)
401 402 403
{
	if (card->qdio.c_q) {
		--card->qdio.no_in_queues;
S
Sebastian Ott 已提交
404
		qeth_free_qdio_queue(card->qdio.c_q);
405 406 407 408
		card->qdio.c_q = NULL;
	}
}

J
Julian Wiedmann 已提交
409 410 411
static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
							int delayed)
{
412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
	enum iucv_tx_notify n;

	switch (sbalf15) {
	case 0:
		n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK;
		break;
	case 4:
	case 16:
	case 17:
	case 18:
		n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE :
			TX_NOTIFY_UNREACHABLE;
		break;
	default:
		n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR :
			TX_NOTIFY_GENERALERROR;
		break;
	}

	return n;
}

434 435
static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
			   void *data)
436 437
{
	ccw->cmd_code = cmd_code;
438
	ccw->flags = flags | CCW_FLAG_SLI;
439 440 441 442
	ccw->count = len;
	ccw->cda = (__u32) __pa(data);
}

443
static int __qeth_issue_next_read(struct qeth_card *card)
F
Frank Blaschka 已提交
444
{
445 446 447
	struct qeth_cmd_buffer *iob = card->read_cmd;
	struct qeth_channel *channel = iob->channel;
	struct ccw1 *ccw = __ccw_from_cmd(iob);
448
	int rc;
F
Frank Blaschka 已提交
449

C
Carsten Otte 已提交
450
	QETH_CARD_TEXT(card, 5, "issnxrd");
451
	if (channel->state != CH_STATE_UP)
F
Frank Blaschka 已提交
452
		return -EIO;
453

454 455
	memset(iob->data, 0, iob->length);
	qeth_setup_ccw(ccw, CCW_CMD_READ, 0, iob->length, iob->data);
456
	iob->callback = qeth_issue_next_read_cb;
457 458 459
	/* keep the cmd alive after completion: */
	qeth_get_cmd(iob);

C
Carsten Otte 已提交
460
	QETH_CARD_TEXT(card, 6, "noirqpnd");
461
	rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0);
462 463 464
	if (!rc) {
		channel->active_cmd = iob;
	} else {
465 466
		QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
				 rc, CARD_DEVID(card));
467
		qeth_unlock_channel(card, channel);
468
		qeth_put_cmd(iob);
469
		card->read_or_write_problem = 1;
F
Frank Blaschka 已提交
470 471 472 473 474
		qeth_schedule_recovery(card);
	}
	return rc;
}

475 476 477 478 479 480 481 482 483 484 485
static int qeth_issue_next_read(struct qeth_card *card)
{
	int ret;

	spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
	ret = __qeth_issue_next_read(card);
	spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));

	return ret;
}

486 487
static void qeth_enqueue_cmd(struct qeth_card *card,
			     struct qeth_cmd_buffer *iob)
488 489
{
	spin_lock_irq(&card->lock);
490
	list_add_tail(&iob->list_entry, &card->cmd_waiter_list);
491 492 493
	spin_unlock_irq(&card->lock);
}

494 495
static void qeth_dequeue_cmd(struct qeth_card *card,
			     struct qeth_cmd_buffer *iob)
496 497
{
	spin_lock_irq(&card->lock);
498
	list_del(&iob->list_entry);
499 500 501
	spin_unlock_irq(&card->lock);
}

502
void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason)
503
{
504 505
	iob->rc = reason;
	complete(&iob->done);
506
}
507
EXPORT_SYMBOL_GPL(qeth_notify_cmd);
508

509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536
static void qeth_flush_local_addrs4(struct qeth_card *card)
{
	struct qeth_local_addr *addr;
	struct hlist_node *tmp;
	unsigned int i;

	spin_lock_irq(&card->local_addrs4_lock);
	hash_for_each_safe(card->local_addrs4, i, tmp, addr, hnode) {
		hash_del_rcu(&addr->hnode);
		kfree_rcu(addr, rcu);
	}
	spin_unlock_irq(&card->local_addrs4_lock);
}

static void qeth_flush_local_addrs6(struct qeth_card *card)
{
	struct qeth_local_addr *addr;
	struct hlist_node *tmp;
	unsigned int i;

	spin_lock_irq(&card->local_addrs6_lock);
	hash_for_each_safe(card->local_addrs6, i, tmp, addr, hnode) {
		hash_del_rcu(&addr->hnode);
		kfree_rcu(addr, rcu);
	}
	spin_unlock_irq(&card->local_addrs6_lock);
}

537
static void qeth_flush_local_addrs(struct qeth_card *card)
538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688
{
	qeth_flush_local_addrs4(card);
	qeth_flush_local_addrs6(card);
}

static void qeth_add_local_addrs4(struct qeth_card *card,
				  struct qeth_ipacmd_local_addrs4 *cmd)
{
	unsigned int i;

	if (cmd->addr_length !=
	    sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
		dev_err_ratelimited(&card->gdev->dev,
				    "Dropped IPv4 ADD LOCAL ADDR event with bad length %u\n",
				    cmd->addr_length);
		return;
	}

	spin_lock(&card->local_addrs4_lock);
	for (i = 0; i < cmd->count; i++) {
		unsigned int key = ipv4_addr_hash(cmd->addrs[i].addr);
		struct qeth_local_addr *addr;
		bool duplicate = false;

		hash_for_each_possible(card->local_addrs4, addr, hnode, key) {
			if (addr->addr.s6_addr32[3] == cmd->addrs[i].addr) {
				duplicate = true;
				break;
			}
		}

		if (duplicate)
			continue;

		addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
		if (!addr) {
			dev_err(&card->gdev->dev,
				"Failed to allocate local addr object. Traffic to %pI4 might suffer.\n",
				&cmd->addrs[i].addr);
			continue;
		}

		ipv6_addr_set(&addr->addr, 0, 0, 0, cmd->addrs[i].addr);
		hash_add_rcu(card->local_addrs4, &addr->hnode, key);
	}
	spin_unlock(&card->local_addrs4_lock);
}

static void qeth_add_local_addrs6(struct qeth_card *card,
				  struct qeth_ipacmd_local_addrs6 *cmd)
{
	unsigned int i;

	if (cmd->addr_length !=
	    sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
		dev_err_ratelimited(&card->gdev->dev,
				    "Dropped IPv6 ADD LOCAL ADDR event with bad length %u\n",
				    cmd->addr_length);
		return;
	}

	spin_lock(&card->local_addrs6_lock);
	for (i = 0; i < cmd->count; i++) {
		u32 key = ipv6_addr_hash(&cmd->addrs[i].addr);
		struct qeth_local_addr *addr;
		bool duplicate = false;

		hash_for_each_possible(card->local_addrs6, addr, hnode, key) {
			if (ipv6_addr_equal(&addr->addr, &cmd->addrs[i].addr)) {
				duplicate = true;
				break;
			}
		}

		if (duplicate)
			continue;

		addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
		if (!addr) {
			dev_err(&card->gdev->dev,
				"Failed to allocate local addr object. Traffic to %pI6c might suffer.\n",
				&cmd->addrs[i].addr);
			continue;
		}

		addr->addr = cmd->addrs[i].addr;
		hash_add_rcu(card->local_addrs6, &addr->hnode, key);
	}
	spin_unlock(&card->local_addrs6_lock);
}

static void qeth_del_local_addrs4(struct qeth_card *card,
				  struct qeth_ipacmd_local_addrs4 *cmd)
{
	unsigned int i;

	if (cmd->addr_length !=
	    sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
		dev_err_ratelimited(&card->gdev->dev,
				    "Dropped IPv4 DEL LOCAL ADDR event with bad length %u\n",
				    cmd->addr_length);
		return;
	}

	spin_lock(&card->local_addrs4_lock);
	for (i = 0; i < cmd->count; i++) {
		struct qeth_ipacmd_local_addr4 *addr = &cmd->addrs[i];
		unsigned int key = ipv4_addr_hash(addr->addr);
		struct qeth_local_addr *tmp;

		hash_for_each_possible(card->local_addrs4, tmp, hnode, key) {
			if (tmp->addr.s6_addr32[3] == addr->addr) {
				hash_del_rcu(&tmp->hnode);
				kfree_rcu(tmp, rcu);
				break;
			}
		}
	}
	spin_unlock(&card->local_addrs4_lock);
}

static void qeth_del_local_addrs6(struct qeth_card *card,
				  struct qeth_ipacmd_local_addrs6 *cmd)
{
	unsigned int i;

	if (cmd->addr_length !=
	    sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
		dev_err_ratelimited(&card->gdev->dev,
				    "Dropped IPv6 DEL LOCAL ADDR event with bad length %u\n",
				    cmd->addr_length);
		return;
	}

	spin_lock(&card->local_addrs6_lock);
	for (i = 0; i < cmd->count; i++) {
		struct qeth_ipacmd_local_addr6 *addr = &cmd->addrs[i];
		u32 key = ipv6_addr_hash(&addr->addr);
		struct qeth_local_addr *tmp;

		hash_for_each_possible(card->local_addrs6, tmp, hnode, key) {
			if (ipv6_addr_equal(&tmp->addr, &addr->addr)) {
				hash_del_rcu(&tmp->hnode);
				kfree_rcu(tmp, rcu);
				break;
			}
		}
	}
	spin_unlock(&card->local_addrs6_lock);
}

689 690 691 692 693 694 695 696 697 698 699 700
static bool qeth_next_hop_is_local_v4(struct qeth_card *card,
				      struct sk_buff *skb)
{
	struct qeth_local_addr *tmp;
	bool is_local = false;
	unsigned int key;
	__be32 next_hop;

	if (hash_empty(card->local_addrs4))
		return false;

	rcu_read_lock();
701 702
	next_hop = qeth_next_hop_v4_rcu(skb,
					qeth_dst_check_rcu(skb, htons(ETH_P_IP)));
703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727
	key = ipv4_addr_hash(next_hop);

	hash_for_each_possible_rcu(card->local_addrs4, tmp, hnode, key) {
		if (tmp->addr.s6_addr32[3] == next_hop) {
			is_local = true;
			break;
		}
	}
	rcu_read_unlock();

	return is_local;
}

static bool qeth_next_hop_is_local_v6(struct qeth_card *card,
				      struct sk_buff *skb)
{
	struct qeth_local_addr *tmp;
	struct in6_addr *next_hop;
	bool is_local = false;
	u32 key;

	if (hash_empty(card->local_addrs6))
		return false;

	rcu_read_lock();
728 729
	next_hop = qeth_next_hop_v6_rcu(skb,
					qeth_dst_check_rcu(skb, htons(ETH_P_IPV6)));
730 731 732 733 734 735 736 737 738 739 740 741 742
	key = ipv6_addr_hash(next_hop);

	hash_for_each_possible_rcu(card->local_addrs6, tmp, hnode, key) {
		if (ipv6_addr_equal(&tmp->addr, next_hop)) {
			is_local = true;
			break;
		}
	}
	rcu_read_unlock();

	return is_local;
}

743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760
static int qeth_debugfs_local_addr_show(struct seq_file *m, void *v)
{
	struct qeth_card *card = m->private;
	struct qeth_local_addr *tmp;
	unsigned int i;

	rcu_read_lock();
	hash_for_each_rcu(card->local_addrs4, i, tmp, hnode)
		seq_printf(m, "%pI4\n", &tmp->addr.s6_addr32[3]);
	hash_for_each_rcu(card->local_addrs6, i, tmp, hnode)
		seq_printf(m, "%pI6c\n", &tmp->addr);
	rcu_read_unlock();

	return 0;
}

DEFINE_SHOW_ATTRIBUTE(qeth_debugfs_local_addr);

761
static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
F
Frank Blaschka 已提交
762 763
		struct qeth_card *card)
{
764
	const char *ipa_name;
765
	int com = cmd->hdr.command;
766

F
Frank Blaschka 已提交
767
	ipa_name = qeth_get_ipa_cmd_name(com);
768

769
	if (rc)
770 771 772
		QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n",
				 ipa_name, com, CARD_DEVID(card), rc,
				 qeth_get_ipa_msg(rc));
773
	else
774 775
		QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n",
				 ipa_name, com, CARD_DEVID(card));
F
Frank Blaschka 已提交
776 777 778
}

static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
779
						struct qeth_ipa_cmd *cmd)
F
Frank Blaschka 已提交
780
{
C
Carsten Otte 已提交
781
	QETH_CARD_TEXT(card, 5, "chkipad");
782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797

	if (IS_IPA_REPLY(cmd)) {
		if (cmd->hdr.command != IPA_CMD_SETCCID &&
		    cmd->hdr.command != IPA_CMD_DELCCID &&
		    cmd->hdr.command != IPA_CMD_MODCCID &&
		    cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
			qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
		return cmd;
	}

	/* handle unsolicited event: */
	switch (cmd->hdr.command) {
	case IPA_CMD_STOPLAN:
		if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) {
			dev_err(&card->gdev->dev,
				"Interface %s is down because the adjacent port is no longer in reflective relay mode\n",
J
Julian Wiedmann 已提交
798
				netdev_name(card->dev));
799
			schedule_work(&card->close_dev_work);
F
Frank Blaschka 已提交
800
		} else {
801 802
			dev_warn(&card->gdev->dev,
				 "The link for interface %s on CHPID 0x%X failed\n",
J
Julian Wiedmann 已提交
803
				 netdev_name(card->dev), card->info.chpid);
804
			qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
805
			netif_carrier_off(card->dev);
F
Frank Blaschka 已提交
806
		}
807 808 809 810
		return NULL;
	case IPA_CMD_STARTLAN:
		dev_info(&card->gdev->dev,
			 "The link for %s on CHPID 0x%X has been restored\n",
J
Julian Wiedmann 已提交
811
			 netdev_name(card->dev), card->info.chpid);
812 813 814 815 816 817 818 819 820 821 822 823 824
		if (card->info.hwtrap)
			card->info.hwtrap = 2;
		qeth_schedule_recovery(card);
		return NULL;
	case IPA_CMD_SETBRIDGEPORT_IQD:
	case IPA_CMD_SETBRIDGEPORT_OSA:
	case IPA_CMD_ADDRESS_CHANGE_NOTIF:
		if (card->discipline->control_event_handler(card, cmd))
			return cmd;
		return NULL;
	case IPA_CMD_MODCCID:
		return cmd;
	case IPA_CMD_REGISTER_LOCAL_ADDR:
825 826 827 828 829
		if (cmd->hdr.prot_version == QETH_PROT_IPV4)
			qeth_add_local_addrs4(card, &cmd->data.local_addrs4);
		else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
			qeth_add_local_addrs6(card, &cmd->data.local_addrs6);

830 831 832
		QETH_CARD_TEXT(card, 3, "irla");
		return NULL;
	case IPA_CMD_UNREGISTER_LOCAL_ADDR:
833 834 835 836 837
		if (cmd->hdr.prot_version == QETH_PROT_IPV4)
			qeth_del_local_addrs4(card, &cmd->data.local_addrs4);
		else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
			qeth_del_local_addrs6(card, &cmd->data.local_addrs6);

838 839 840 841 842
		QETH_CARD_TEXT(card, 3, "urla");
		return NULL;
	default:
		QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n");
		return cmd;
F
Frank Blaschka 已提交
843 844 845
	}
}

846
static void qeth_clear_ipacmd_list(struct qeth_card *card)
F
Frank Blaschka 已提交
847
{
848
	struct qeth_cmd_buffer *iob;
F
Frank Blaschka 已提交
849 850
	unsigned long flags;

C
Carsten Otte 已提交
851
	QETH_CARD_TEXT(card, 4, "clipalst");
F
Frank Blaschka 已提交
852 853

	spin_lock_irqsave(&card->lock, flags);
854
	list_for_each_entry(iob, &card->cmd_waiter_list, list_entry)
855
		qeth_notify_cmd(iob, -ECANCELED);
F
Frank Blaschka 已提交
856 857 858
	spin_unlock_irqrestore(&card->lock, flags);
}

859 860
static int qeth_check_idx_response(struct qeth_card *card,
	unsigned char *buffer)
F
Frank Blaschka 已提交
861
{
862
	QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
863
	if ((buffer[2] & QETH_IDX_TERMINATE_MASK) == QETH_IDX_TERMINATE) {
864
		QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
865
				 buffer[4]);
C
Carsten Otte 已提交
866 867
		QETH_CARD_TEXT(card, 2, "ckidxres");
		QETH_CARD_TEXT(card, 2, " idxterm");
868 869 870
		QETH_CARD_TEXT_(card, 2, "rc%x", buffer[4]);
		if (buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT ||
		    buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM) {
871
			dev_err(&card->gdev->dev,
872 873
				"The device does not support the configured transport mode\n");
			return -EPROTONOSUPPORT;
874
		}
F
Frank Blaschka 已提交
875 876 877 878 879
		return -EIO;
	}
	return 0;
}

880
void qeth_put_cmd(struct qeth_cmd_buffer *iob)
881 882 883 884 885 886
{
	if (refcount_dec_and_test(&iob->ref_count)) {
		kfree(iob->data);
		kfree(iob);
	}
}
887
EXPORT_SYMBOL_GPL(qeth_put_cmd);
F
Frank Blaschka 已提交
888

889
static void qeth_release_buffer_cb(struct qeth_card *card,
890 891
				   struct qeth_cmd_buffer *iob,
				   unsigned int data_length)
892
{
893
	qeth_put_cmd(iob);
894 895
}

896 897
static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc)
{
898
	qeth_notify_cmd(iob, rc);
899
	qeth_put_cmd(iob);
F
Frank Blaschka 已提交
900 901
}

902 903 904
struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
				       unsigned int length, unsigned int ccws,
				       long timeout)
905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921
{
	struct qeth_cmd_buffer *iob;

	if (length > QETH_BUFSIZE)
		return NULL;

	iob = kzalloc(sizeof(*iob), GFP_KERNEL);
	if (!iob)
		return NULL;

	iob->data = kzalloc(ALIGN(length, 8) + ccws * sizeof(struct ccw1),
			    GFP_KERNEL | GFP_DMA);
	if (!iob->data) {
		kfree(iob);
		return NULL;
	}

922 923
	init_completion(&iob->done);
	spin_lock_init(&iob->lock);
924
	refcount_set(&iob->ref_count, 1);
925 926 927 928 929
	iob->channel = channel;
	iob->timeout = timeout;
	iob->length = length;
	return iob;
}
930
EXPORT_SYMBOL_GPL(qeth_alloc_cmd);
931

932
static void qeth_issue_next_read_cb(struct qeth_card *card,
933 934
				    struct qeth_cmd_buffer *iob,
				    unsigned int data_length)
F
Frank Blaschka 已提交
935
{
936
	struct qeth_cmd_buffer *request = NULL;
937
	struct qeth_ipa_cmd *cmd = NULL;
938
	struct qeth_reply *reply = NULL;
939
	struct qeth_cmd_buffer *tmp;
F
Frank Blaschka 已提交
940
	unsigned long flags;
941
	int rc = 0;
F
Frank Blaschka 已提交
942

C
Carsten Otte 已提交
943
	QETH_CARD_TEXT(card, 4, "sndctlcb");
944 945 946 947 948 949
	rc = qeth_check_idx_response(card, iob->data);
	switch (rc) {
	case 0:
		break;
	case -EIO:
		qeth_schedule_recovery(card);
950
		fallthrough;
951
	default:
952
		qeth_clear_ipacmd_list(card);
953
		goto err_idx;
F
Frank Blaschka 已提交
954 955
	}

956 957
	cmd = __ipa_reply(iob);
	if (cmd) {
958
		cmd = qeth_check_ipa_data(card, cmd);
959 960 961 962 963 964 965
		if (!cmd)
			goto out;
		if (IS_OSN(card) && card->osn_info.assist_cb &&
		    cmd->hdr.command != IPA_CMD_STARTLAN) {
			card->osn_info.assist_cb(card->dev, cmd);
			goto out;
		}
F
Frank Blaschka 已提交
966 967
	}

968
	/* match against pending cmd requests */
F
Frank Blaschka 已提交
969
	spin_lock_irqsave(&card->lock, flags);
970
	list_for_each_entry(tmp, &card->cmd_waiter_list, list_entry) {
971
		if (tmp->match && tmp->match(tmp, iob)) {
972
			request = tmp;
973
			/* take the object outside the lock */
974
			qeth_get_cmd(request);
975
			break;
F
Frank Blaschka 已提交
976 977 978
		}
	}
	spin_unlock_irqrestore(&card->lock, flags);
979

980
	if (!request)
981 982
		goto out;

983
	reply = &request->reply;
984 985
	if (!reply->callback) {
		rc = 0;
986 987 988
		goto no_callback;
	}

989 990
	spin_lock_irqsave(&request->lock, flags);
	if (request->rc)
991
		/* Bail out when the requestor has already left: */
992
		rc = request->rc;
993 994 995
	else
		rc = reply->callback(card, reply, cmd ? (unsigned long)cmd :
							(unsigned long)iob);
996
	spin_unlock_irqrestore(&request->lock, flags);
997

998
no_callback:
999
	if (rc <= 0)
1000 1001
		qeth_notify_cmd(request, rc);
	qeth_put_cmd(request);
F
Frank Blaschka 已提交
1002 1003 1004 1005
out:
	memcpy(&card->seqno.pdu_hdr_ack,
		QETH_PDU_HEADER_SEQ_NO(iob->data),
		QETH_SEQ_NO_LENGTH);
1006
	__qeth_issue_next_read(card);
1007 1008
err_idx:
	qeth_put_cmd(iob);
F
Frank Blaschka 已提交
1009 1010 1011 1012 1013 1014
}

static int qeth_set_thread_start_bit(struct qeth_card *card,
		unsigned long thread)
{
	unsigned long flags;
1015
	int rc = 0;
F
Frank Blaschka 已提交
1016 1017

	spin_lock_irqsave(&card->thread_mask_lock, flags);
1018 1019 1020 1021 1022 1023
	if (!(card->thread_allowed_mask & thread))
		rc = -EPERM;
	else if (card->thread_start_mask & thread)
		rc = -EBUSY;
	else
		card->thread_start_mask |= thread;
F
Frank Blaschka 已提交
1024
	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1025 1026

	return rc;
F
Frank Blaschka 已提交
1027 1028
}

1029 1030
static void qeth_clear_thread_start_bit(struct qeth_card *card,
					unsigned long thread)
F
Frank Blaschka 已提交
1031 1032 1033 1034 1035 1036 1037 1038 1039
{
	unsigned long flags;

	spin_lock_irqsave(&card->thread_mask_lock, flags);
	card->thread_start_mask &= ~thread;
	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
	wake_up(&card->wait_q);
}

1040 1041
static void qeth_clear_thread_running_bit(struct qeth_card *card,
					  unsigned long thread)
F
Frank Blaschka 已提交
1042 1043 1044 1045 1046 1047
{
	unsigned long flags;

	spin_lock_irqsave(&card->thread_mask_lock, flags);
	card->thread_running_mask &= ~thread;
	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1048
	wake_up_all(&card->wait_q);
F
Frank Blaschka 已提交
1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069
}

static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
{
	unsigned long flags;
	int rc = 0;

	spin_lock_irqsave(&card->thread_mask_lock, flags);
	if (card->thread_start_mask & thread) {
		if ((card->thread_allowed_mask & thread) &&
		    !(card->thread_running_mask & thread)) {
			rc = 1;
			card->thread_start_mask &= ~thread;
			card->thread_running_mask |= thread;
		} else
			rc = -EPERM;
	}
	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
	return rc;
}

1070
static int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
F
Frank Blaschka 已提交
1071 1072 1073 1074 1075 1076 1077 1078
{
	int rc = 0;

	wait_event(card->wait_q,
		   (rc = __qeth_do_run_thread(card, thread)) >= 0);
	return rc;
}

1079
int qeth_schedule_recovery(struct qeth_card *card)
F
Frank Blaschka 已提交
1080
{
1081 1082
	int rc;

C
Carsten Otte 已提交
1083
	QETH_CARD_TEXT(card, 2, "startrec");
1084 1085 1086

	rc = qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD);
	if (!rc)
F
Frank Blaschka 已提交
1087
		schedule_work(&card->kernel_thread_starter);
1088 1089

	return rc;
F
Frank Blaschka 已提交
1090 1091
}

1092 1093
static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
			    struct irb *irb)
F
Frank Blaschka 已提交
1094 1095 1096 1097 1098
{
	int dstat, cstat;
	char *sense;

	sense = (char *) irb->ecw;
1099 1100
	cstat = irb->scsw.cmd.cstat;
	dstat = irb->scsw.cmd.dstat;
F
Frank Blaschka 已提交
1101 1102 1103 1104

	if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
		     SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
		     SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
C
Carsten Otte 已提交
1105
		QETH_CARD_TEXT(card, 2, "CGENCHK");
1106 1107
		dev_warn(&cdev->dev, "The qeth device driver "
			"failed to recover an error on the device\n");
1108 1109
		QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n",
				 CCW_DEVID(cdev), dstat, cstat);
F
Frank Blaschka 已提交
1110 1111
		print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
				16, 1, irb, 64, 1);
1112
		return -EIO;
F
Frank Blaschka 已提交
1113 1114 1115 1116 1117
	}

	if (dstat & DEV_STAT_UNIT_CHECK) {
		if (sense[SENSE_RESETTING_EVENT_BYTE] &
		    SENSE_RESETTING_EVENT_FLAG) {
C
Carsten Otte 已提交
1118
			QETH_CARD_TEXT(card, 2, "REVIND");
1119
			return -EIO;
F
Frank Blaschka 已提交
1120 1121 1122
		}
		if (sense[SENSE_COMMAND_REJECT_BYTE] &
		    SENSE_COMMAND_REJECT_FLAG) {
C
Carsten Otte 已提交
1123
			QETH_CARD_TEXT(card, 2, "CMDREJi");
1124
			return -EIO;
F
Frank Blaschka 已提交
1125 1126
		}
		if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
C
Carsten Otte 已提交
1127
			QETH_CARD_TEXT(card, 2, "AFFE");
1128
			return -EIO;
F
Frank Blaschka 已提交
1129 1130
		}
		if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
C
Carsten Otte 已提交
1131
			QETH_CARD_TEXT(card, 2, "ZEROSEN");
F
Frank Blaschka 已提交
1132 1133
			return 0;
		}
C
Carsten Otte 已提交
1134
		QETH_CARD_TEXT(card, 2, "DGENCHK");
1135
		return -EIO;
F
Frank Blaschka 已提交
1136 1137 1138 1139
	}
	return 0;
}

1140
static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev,
1141
				struct irb *irb)
F
Frank Blaschka 已提交
1142
{
1143
	if (!IS_ERR(irb))
F
Frank Blaschka 已提交
1144 1145 1146 1147
		return 0;

	switch (PTR_ERR(irb)) {
	case -EIO:
1148 1149
		QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n",
				 CCW_DEVID(cdev));
C
Carsten Otte 已提交
1150 1151
		QETH_CARD_TEXT(card, 2, "ckirberr");
		QETH_CARD_TEXT_(card, 2, "  rc%d", -EIO);
1152
		return -EIO;
F
Frank Blaschka 已提交
1153
	case -ETIMEDOUT:
1154 1155
		dev_warn(&cdev->dev, "A hardware operation timed out"
			" on the device\n");
C
Carsten Otte 已提交
1156 1157
		QETH_CARD_TEXT(card, 2, "ckirberr");
		QETH_CARD_TEXT_(card, 2, "  rc%d", -ETIMEDOUT);
1158
		return -ETIMEDOUT;
F
Frank Blaschka 已提交
1159
	default:
1160 1161
		QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
				 PTR_ERR(irb), CCW_DEVID(cdev));
C
Carsten Otte 已提交
1162 1163
		QETH_CARD_TEXT(card, 2, "ckirberr");
		QETH_CARD_TEXT(card, 2, "  rc???");
1164
		return PTR_ERR(irb);
F
Frank Blaschka 已提交
1165 1166 1167 1168 1169 1170 1171 1172
	}
}

static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
		struct irb *irb)
{
	int rc;
	int cstat, dstat;
1173
	struct qeth_cmd_buffer *iob = NULL;
1174
	struct ccwgroup_device *gdev;
F
Frank Blaschka 已提交
1175 1176 1177
	struct qeth_channel *channel;
	struct qeth_card *card;

1178 1179 1180
	/* while we hold the ccwdev lock, this stays valid: */
	gdev = dev_get_drvdata(&cdev->dev);
	card = dev_get_drvdata(&gdev->dev);
F
Frank Blaschka 已提交
1181

C
Carsten Otte 已提交
1182 1183
	QETH_CARD_TEXT(card, 5, "irq");

F
Frank Blaschka 已提交
1184 1185
	if (card->read.ccwdev == cdev) {
		channel = &card->read;
C
Carsten Otte 已提交
1186
		QETH_CARD_TEXT(card, 5, "read");
F
Frank Blaschka 已提交
1187 1188
	} else if (card->write.ccwdev == cdev) {
		channel = &card->write;
C
Carsten Otte 已提交
1189
		QETH_CARD_TEXT(card, 5, "write");
F
Frank Blaschka 已提交
1190 1191
	} else {
		channel = &card->data;
C
Carsten Otte 已提交
1192
		QETH_CARD_TEXT(card, 5, "data");
F
Frank Blaschka 已提交
1193
	}
1194

1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208
	if (intparm == 0) {
		QETH_CARD_TEXT(card, 5, "irqunsol");
	} else if ((addr_t)intparm != (addr_t)channel->active_cmd) {
		QETH_CARD_TEXT(card, 5, "irqunexp");

		dev_err(&cdev->dev,
			"Received IRQ with intparm %lx, expected %px\n",
			intparm, channel->active_cmd);
		if (channel->active_cmd)
			qeth_cancel_cmd(channel->active_cmd, -EIO);
	} else {
		iob = (struct qeth_cmd_buffer *) (addr_t)intparm;
	}

1209
	qeth_unlock_channel(card, channel);
1210

1211
	rc = qeth_check_irb_error(card, cdev, irb);
1212
	if (rc) {
1213 1214
		/* IO was terminated, free its resources. */
		if (iob)
1215
			qeth_cancel_cmd(iob, rc);
1216 1217 1218
		return;
	}

1219
	if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
F
Frank Blaschka 已提交
1220
		channel->state = CH_STATE_STOPPED;
1221 1222
		wake_up(&card->wait_q);
	}
F
Frank Blaschka 已提交
1223

1224
	if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
F
Frank Blaschka 已提交
1225
		channel->state = CH_STATE_HALTED;
1226 1227
		wake_up(&card->wait_q);
	}
F
Frank Blaschka 已提交
1228

1229 1230 1231 1232
	if (iob && (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC |
					  SCSW_FCTL_HALT_FUNC))) {
		qeth_cancel_cmd(iob, -ECANCELED);
		iob = NULL;
F
Frank Blaschka 已提交
1233
	}
1234 1235 1236 1237

	cstat = irb->scsw.cmd.cstat;
	dstat = irb->scsw.cmd.dstat;

F
Frank Blaschka 已提交
1238 1239 1240 1241
	if ((dstat & DEV_STAT_UNIT_EXCEP) ||
	    (dstat & DEV_STAT_UNIT_CHECK) ||
	    (cstat)) {
		if (irb->esw.esw0.erw.cons) {
1242 1243 1244
			dev_warn(&channel->ccwdev->dev,
				"The qeth device driver failed to recover "
				"an error on the device\n");
1245 1246 1247
			QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n",
					 CCW_DEVID(channel->ccwdev), cstat,
					 dstat);
F
Frank Blaschka 已提交
1248 1249 1250 1251 1252
			print_hex_dump(KERN_WARNING, "qeth: irb ",
				DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
			print_hex_dump(KERN_WARNING, "qeth: sense data ",
				DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1);
		}
1253

1254
		rc = qeth_get_problem(card, cdev, irb);
F
Frank Blaschka 已提交
1255
		if (rc) {
1256
			card->read_or_write_problem = 1;
1257
			if (iob)
1258
				qeth_cancel_cmd(iob, rc);
1259
			qeth_clear_ipacmd_list(card);
F
Frank Blaschka 已提交
1260
			qeth_schedule_recovery(card);
1261
			return;
F
Frank Blaschka 已提交
1262 1263 1264
		}
	}

1265 1266 1267 1268
	if (iob) {
		/* sanity check: */
		if (irb->scsw.cmd.count > iob->length) {
			qeth_cancel_cmd(iob, -EIO);
1269
			return;
1270 1271 1272 1273 1274
		}
		if (iob->callback)
			iob->callback(card, iob,
				      iob->length - irb->scsw.cmd.count);
	}
F
Frank Blaschka 已提交
1275 1276
}

1277
static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
1278
		struct qeth_qdio_out_buffer *buf,
1279
		enum iucv_tx_notify notification)
F
Frank Blaschka 已提交
1280 1281 1282
{
	struct sk_buff *skb;

1283
	skb_queue_walk(&buf->skb_list, skb) {
1284 1285
		struct sock *sk = skb->sk;

1286 1287
		QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
		QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
1288 1289
		if (sk && sk->sk_family == PF_IUCV)
			iucv_sk(sk)->sk_txnotify(sk, notification);
1290 1291 1292
	}
}

1293 1294
static void qeth_tx_complete_buf(struct qeth_qdio_out_q *queue,
				 struct qeth_qdio_out_buffer *buf, bool error,
1295
				 int budget)
1296
{
1297 1298
	struct sk_buff *skb;

1299 1300 1301 1302 1303 1304
	/* Empty buffer? */
	if (buf->next_element_to_fill == 0)
		return;

	QETH_TXQ_STAT_INC(queue, bufs);
	QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill);
1305 1306 1307 1308 1309 1310 1311
	if (error) {
		QETH_TXQ_STAT_ADD(queue, tx_errors, buf->frames);
	} else {
		QETH_TXQ_STAT_ADD(queue, tx_packets, buf->frames);
		QETH_TXQ_STAT_ADD(queue, tx_bytes, buf->bytes);
	}

1312 1313 1314 1315 1316 1317
	while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) {
		unsigned int bytes = qdisc_pkt_len(skb);
		bool is_tso = skb_is_gso(skb);
		unsigned int packets;

		packets = is_tso ? skb_shinfo(skb)->gso_segs : 1;
1318
		if (!error) {
1319 1320 1321 1322 1323 1324 1325 1326 1327
			if (skb->ip_summed == CHECKSUM_PARTIAL)
				QETH_TXQ_STAT_ADD(queue, skbs_csum, packets);
			if (skb_is_nonlinear(skb))
				QETH_TXQ_STAT_INC(queue, skbs_sg);
			if (is_tso) {
				QETH_TXQ_STAT_INC(queue, skbs_tso);
				QETH_TXQ_STAT_ADD(queue, tso_bytes, bytes);
			}
		}
1328

1329
		napi_consume_skb(skb, budget);
1330
	}
1331 1332 1333
}

static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
1334
				     struct qeth_qdio_out_buffer *buf,
1335
				     bool error, int budget)
1336 1337 1338 1339
{
	int i;

	/* is PCI flag set on buffer? */
1340
	if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ) {
1341
		atomic_dec(&queue->set_pci_flags_count);
1342 1343
		QETH_TXQ_STAT_INC(queue, completion_irq);
	}
1344

1345
	qeth_tx_complete_buf(queue, buf, error, budget);
1346

1347
	for (i = 0; i < queue->max_elements; ++i) {
1348 1349
		void *data = phys_to_virt(buf->buffer->element[i].addr);

1350
		if (__test_and_clear_bit(i, buf->from_kmem_cache) && data)
1351
			kmem_cache_free(qeth_core_header_cache, data);
F
Frank Blaschka 已提交
1352
	}
1353

1354
	qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements);
F
Frank Blaschka 已提交
1355
	buf->next_element_to_fill = 0;
1356
	buf->frames = 0;
1357
	buf->bytes = 0;
1358
	atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
1359 1360
}

1361 1362 1363 1364 1365 1366 1367
static void qeth_free_out_buf(struct qeth_qdio_out_buffer *buf)
{
	if (buf->aob)
		qdio_release_aob(buf->aob);
	kmem_cache_free(qeth_qdio_outbuf_cache, buf);
}

1368 1369
static void qeth_tx_complete_pending_bufs(struct qeth_card *card,
					  struct qeth_qdio_out_q *queue,
1370
					  bool drain, int budget)
1371 1372 1373 1374
{
	struct qeth_qdio_out_buffer *buf, *tmp;

	list_for_each_entry_safe(buf, tmp, &queue->pending_bufs, list_entry) {
1375
		struct qeth_qaob_priv1 *priv;
1376 1377 1378 1379
		struct qaob *aob = buf->aob;
		enum iucv_tx_notify notify;
		unsigned int i;

1380 1381
		priv = (struct qeth_qaob_priv1 *)&aob->user1;
		if (drain || READ_ONCE(priv->state) == QETH_QAOB_DONE) {
1382 1383 1384
			QETH_CARD_TEXT(card, 5, "fp");
			QETH_CARD_TEXT_(card, 5, "%lx", (long) buf);

1385 1386 1387
			notify = drain ? TX_NOTIFY_GENERALERROR :
					 qeth_compute_cq_notification(aob->aorc, 1);
			qeth_notify_skbs(queue, buf, notify);
1388
			qeth_tx_complete_buf(queue, buf, drain, budget);
1389

1390 1391 1392 1393 1394
			for (i = 0;
			     i < aob->sb_count && i < queue->max_elements;
			     i++) {
				void *data = phys_to_virt(aob->sba[i]);

1395
				if (test_bit(i, buf->from_kmem_cache) && data)
1396 1397 1398 1399
					kmem_cache_free(qeth_core_header_cache,
							data);
			}

1400
			list_del(&buf->list_entry);
1401
			qeth_free_out_buf(buf);
1402 1403 1404 1405
		}
	}
}

1406
static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
1407 1408 1409
{
	int j;

1410
	qeth_tx_complete_pending_bufs(q->card, q, true, 0);
1411

1412 1413 1414
	for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
		if (!q->bufs[j])
			continue;
1415

1416
		qeth_clear_output_buffer(q, q->bufs[j], true, 0);
1417
		if (free) {
1418
			qeth_free_out_buf(q->bufs[j]);
1419 1420 1421
			q->bufs[j] = NULL;
		}
	}
F
Frank Blaschka 已提交
1422 1423
}

1424
static void qeth_drain_output_queues(struct qeth_card *card)
F
Frank Blaschka 已提交
1425
{
1426
	int i;
F
Frank Blaschka 已提交
1427

C
Carsten Otte 已提交
1428
	QETH_CARD_TEXT(card, 2, "clearqdbf");
F
Frank Blaschka 已提交
1429
	/* clear outbound buffers to free skbs */
1430
	for (i = 0; i < card->qdio.no_out_queues; ++i) {
1431 1432
		if (card->qdio.out_qs[i])
			qeth_drain_output_queue(card->qdio.out_qs[i], false);
1433
	}
F
Frank Blaschka 已提交
1434 1435
}

1436
static void qeth_osa_set_output_queues(struct qeth_card *card, bool single)
1437
{
1438
	unsigned int max = single ? 1 : card->dev->num_tx_queues;
1439

1440
	if (card->qdio.no_out_queues == max)
1441
		return;
1442

1443
	if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
1444
		qeth_free_qdio_queues(card);
1445

1446
	if (max == 1 && card->qdio.do_prio_queueing != QETH_PRIOQ_DEFAULT)
1447 1448
		dev_info(&card->gdev->dev, "Priority Queueing not supported\n");

1449
	card->qdio.no_out_queues = max;
1450 1451
}

1452
static int qeth_update_from_chp_desc(struct qeth_card *card)
F
Frank Blaschka 已提交
1453 1454
{
	struct ccw_device *ccwdev;
1455
	struct channel_path_desc_fmt0 *chp_dsc;
F
Frank Blaschka 已提交
1456

1457
	QETH_CARD_TEXT(card, 2, "chp_desc");
F
Frank Blaschka 已提交
1458 1459

	ccwdev = card->data.ccwdev;
1460 1461
	chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
	if (!chp_dsc)
1462
		return -ENOMEM;
1463 1464 1465

	card->info.func_level = 0x4100 + chp_dsc->desc;

1466 1467
	if (IS_OSD(card) || IS_OSX(card))
		/* CHPP field bit 6 == 1 -> single queue */
1468
		qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
1469

1470
	kfree(chp_dsc);
1471 1472
	QETH_CARD_TEXT_(card, 2, "nr:%x", card->qdio.no_out_queues);
	QETH_CARD_TEXT_(card, 2, "lvl:%02x", card->info.func_level);
1473
	return 0;
F
Frank Blaschka 已提交
1474 1475 1476 1477
}

static void qeth_init_qdio_info(struct qeth_card *card)
{
1478
	QETH_CARD_TEXT(card, 4, "intqdinf");
F
Frank Blaschka 已提交
1479
	atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
1480 1481 1482
	card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
	card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;

F
Frank Blaschka 已提交
1483
	/* inbound */
1484
	card->qdio.no_in_queues = 1;
F
Frank Blaschka 已提交
1485
	card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1486
	if (IS_IQD(card))
1487 1488 1489
		card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
	else
		card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
F
Frank Blaschka 已提交
1490 1491 1492 1493 1494
	card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
	INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
	INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
}

1495
static void qeth_set_initial_options(struct qeth_card *card)
F
Frank Blaschka 已提交
1496 1497 1498
{
	card->options.route4.type = NO_ROUTER;
	card->options.route6.type = NO_ROUTER;
E
Einar Lueck 已提交
1499
	card->options.isolation = ISOLATION_MODE_NONE;
1500
	card->options.cq = QETH_CQ_DISABLED;
1501
	card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
F
Frank Blaschka 已提交
1502 1503 1504 1505 1506 1507 1508 1509
}

static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
{
	unsigned long flags;
	int rc = 0;

	spin_lock_irqsave(&card->thread_mask_lock, flags);
C
Carsten Otte 已提交
1510
	QETH_CARD_TEXT_(card, 4, "  %02x%02x%02x",
F
Frank Blaschka 已提交
1511 1512 1513 1514 1515 1516 1517 1518
			(u8) card->thread_start_mask,
			(u8) card->thread_allowed_mask,
			(u8) card->thread_running_mask);
	rc = (card->thread_start_mask & thread);
	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
	return rc;
}

1519
static int qeth_do_reset(void *data);
F
Frank Blaschka 已提交
1520 1521
static void qeth_start_kernel_thread(struct work_struct *work)
{
1522
	struct task_struct *ts;
F
Frank Blaschka 已提交
1523 1524
	struct qeth_card *card = container_of(work, struct qeth_card,
					kernel_thread_starter);
1525
	QETH_CARD_TEXT(card, 2, "strthrd");
F
Frank Blaschka 已提交
1526 1527 1528 1529

	if (card->read.state != CH_STATE_UP &&
	    card->write.state != CH_STATE_UP)
		return;
1530
	if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
1531
		ts = kthread_run(qeth_do_reset, card, "qeth_recover");
1532 1533 1534 1535 1536 1537
		if (IS_ERR(ts)) {
			qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
			qeth_clear_thread_running_bit(card,
				QETH_RECOVER_THREAD);
		}
	}
F
Frank Blaschka 已提交
1538 1539
}

1540
static void qeth_buffer_reclaim_work(struct work_struct *);
1541
static void qeth_setup_card(struct qeth_card *card)
F
Frank Blaschka 已提交
1542
{
1543
	QETH_CARD_TEXT(card, 2, "setupcrd");
F
Frank Blaschka 已提交
1544

1545
	card->info.type = CARD_RDEV(card)->id.driver_info;
F
Frank Blaschka 已提交
1546 1547 1548
	card->state = CARD_STATE_DOWN;
	spin_lock_init(&card->lock);
	spin_lock_init(&card->thread_mask_lock);
1549
	mutex_init(&card->conf_mutex);
1550
	mutex_init(&card->discipline_mutex);
F
Frank Blaschka 已提交
1551 1552 1553
	INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
	INIT_LIST_HEAD(&card->cmd_waiter_list);
	init_waitqueue_head(&card->wait_q);
1554
	qeth_set_initial_options(card);
F
Frank Blaschka 已提交
1555 1556 1557
	/* IP address takeover */
	INIT_LIST_HEAD(&card->ipato.entries);
	qeth_init_qdio_info(card);
1558
	INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
S
Stefan Raspl 已提交
1559
	INIT_WORK(&card->close_dev_work, qeth_close_dev_handler);
1560
	hash_init(card->rx_mode_addrs);
1561 1562 1563 1564
	hash_init(card->local_addrs4);
	hash_init(card->local_addrs6);
	spin_lock_init(&card->local_addrs4_lock);
	spin_lock_init(&card->local_addrs6_lock);
F
Frank Blaschka 已提交
1565 1566
}

1567 1568 1569 1570
static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
{
	struct qeth_card *card = container_of(slr, struct qeth_card,
					qeth_service_level);
1571 1572 1573
	if (card->info.mcl_level[0])
		seq_printf(m, "qeth: %s firmware level %s\n",
			CARD_BUS_ID(card), card->info.mcl_level);
1574 1575
}

1576
static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
F
Frank Blaschka 已提交
1577 1578 1579
{
	struct qeth_card *card;

1580
	QETH_DBF_TEXT(SETUP, 2, "alloccrd");
1581
	card = kzalloc(sizeof(*card), GFP_KERNEL);
F
Frank Blaschka 已提交
1582
	if (!card)
1583
		goto out;
1584
	QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1585 1586

	card->gdev = gdev;
1587
	dev_set_drvdata(&gdev->dev, card);
1588 1589 1590
	CARD_RDEV(card) = gdev->cdev[0];
	CARD_WDEV(card) = gdev->cdev[1];
	CARD_DDEV(card) = gdev->cdev[2];
1591

1592 1593
	card->event_wq = alloc_ordered_workqueue("%s_event", 0,
						 dev_name(&gdev->dev));
1594 1595
	if (!card->event_wq)
		goto out_wq;
1596 1597 1598 1599

	card->read_cmd = qeth_alloc_cmd(&card->read, QETH_BUFSIZE, 1, 0);
	if (!card->read_cmd)
		goto out_read_cmd;
1600

1601 1602 1603 1604 1605
	card->debugfs = debugfs_create_dir(dev_name(&gdev->dev),
					   qeth_debugfs_root);
	debugfs_create_file("local_addrs", 0400, card->debugfs, card,
			    &qeth_debugfs_local_addr_fops);

1606 1607
	card->qeth_service_level.seq_print = qeth_core_sl_print;
	register_service_level(&card->qeth_service_level);
F
Frank Blaschka 已提交
1608
	return card;
1609

1610
out_read_cmd:
1611 1612
	destroy_workqueue(card->event_wq);
out_wq:
1613
	dev_set_drvdata(&gdev->dev, NULL);
1614 1615 1616
	kfree(card);
out:
	return NULL;
F
Frank Blaschka 已提交
1617 1618
}

1619 1620
static int qeth_clear_channel(struct qeth_card *card,
			      struct qeth_channel *channel)
F
Frank Blaschka 已提交
1621 1622 1623
{
	int rc;

C
Carsten Otte 已提交
1624
	QETH_CARD_TEXT(card, 3, "clearch");
J
Julian Wiedmann 已提交
1625
	spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1626
	rc = ccw_device_clear(channel->ccwdev, (addr_t)channel->active_cmd);
J
Julian Wiedmann 已提交
1627
	spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
F
Frank Blaschka 已提交
1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640

	if (rc)
		return rc;
	rc = wait_event_interruptible_timeout(card->wait_q,
			channel->state == CH_STATE_STOPPED, QETH_TIMEOUT);
	if (rc == -ERESTARTSYS)
		return rc;
	if (channel->state != CH_STATE_STOPPED)
		return -ETIME;
	channel->state = CH_STATE_DOWN;
	return 0;
}

1641 1642
static int qeth_halt_channel(struct qeth_card *card,
			     struct qeth_channel *channel)
F
Frank Blaschka 已提交
1643 1644 1645
{
	int rc;

C
Carsten Otte 已提交
1646
	QETH_CARD_TEXT(card, 3, "haltch");
J
Julian Wiedmann 已提交
1647
	spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1648
	rc = ccw_device_halt(channel->ccwdev, (addr_t)channel->active_cmd);
J
Julian Wiedmann 已提交
1649
	spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
F
Frank Blaschka 已提交
1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661

	if (rc)
		return rc;
	rc = wait_event_interruptible_timeout(card->wait_q,
			channel->state == CH_STATE_HALTED, QETH_TIMEOUT);
	if (rc == -ERESTARTSYS)
		return rc;
	if (channel->state != CH_STATE_HALTED)
		return -ETIME;
	return 0;
}

1662
static int qeth_stop_channel(struct qeth_channel *channel)
1663 1664 1665 1666 1667 1668 1669
{
	struct ccw_device *cdev = channel->ccwdev;
	int rc;

	rc = ccw_device_set_offline(cdev);

	spin_lock_irq(get_ccwdev_lock(cdev));
1670
	if (channel->active_cmd)
1671 1672
		dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n",
			channel->active_cmd);
1673

1674
	cdev->handler = NULL;
1675 1676 1677 1678 1679
	spin_unlock_irq(get_ccwdev_lock(cdev));

	return rc;
}

1680 1681 1682 1683 1684 1685
static int qeth_start_channel(struct qeth_channel *channel)
{
	struct ccw_device *cdev = channel->ccwdev;
	int rc;

	channel->state = CH_STATE_DOWN;
1686
	xchg(&channel->active_cmd, NULL);
1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704

	spin_lock_irq(get_ccwdev_lock(cdev));
	cdev->handler = qeth_irq;
	spin_unlock_irq(get_ccwdev_lock(cdev));

	rc = ccw_device_set_online(cdev);
	if (rc)
		goto err;

	return 0;

err:
	spin_lock_irq(get_ccwdev_lock(cdev));
	cdev->handler = NULL;
	spin_unlock_irq(get_ccwdev_lock(cdev));
	return rc;
}

F
Frank Blaschka 已提交
1705 1706 1707 1708
static int qeth_halt_channels(struct qeth_card *card)
{
	int rc1 = 0, rc2 = 0, rc3 = 0;

C
Carsten Otte 已提交
1709
	QETH_CARD_TEXT(card, 3, "haltchs");
1710 1711 1712
	rc1 = qeth_halt_channel(card, &card->read);
	rc2 = qeth_halt_channel(card, &card->write);
	rc3 = qeth_halt_channel(card, &card->data);
F
Frank Blaschka 已提交
1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723
	if (rc1)
		return rc1;
	if (rc2)
		return rc2;
	return rc3;
}

static int qeth_clear_channels(struct qeth_card *card)
{
	int rc1 = 0, rc2 = 0, rc3 = 0;

C
Carsten Otte 已提交
1724
	QETH_CARD_TEXT(card, 3, "clearchs");
1725 1726 1727
	rc1 = qeth_clear_channel(card, &card->read);
	rc2 = qeth_clear_channel(card, &card->write);
	rc3 = qeth_clear_channel(card, &card->data);
F
Frank Blaschka 已提交
1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738
	if (rc1)
		return rc1;
	if (rc2)
		return rc2;
	return rc3;
}

static int qeth_clear_halt_card(struct qeth_card *card, int halt)
{
	int rc = 0;

C
Carsten Otte 已提交
1739
	QETH_CARD_TEXT(card, 3, "clhacrd");
F
Frank Blaschka 已提交
1740 1741 1742 1743 1744 1745 1746 1747

	if (halt)
		rc = qeth_halt_channels(card);
	if (rc)
		return rc;
	return qeth_clear_channels(card);
}

1748
static int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
F
Frank Blaschka 已提交
1749 1750 1751
{
	int rc = 0;

C
Carsten Otte 已提交
1752
	QETH_CARD_TEXT(card, 3, "qdioclr");
F
Frank Blaschka 已提交
1753 1754 1755
	switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
		QETH_QDIO_CLEANING)) {
	case QETH_QDIO_ESTABLISHED:
1756
		if (IS_IQD(card))
J
Jan Glauber 已提交
1757
			rc = qdio_shutdown(CARD_DDEV(card),
F
Frank Blaschka 已提交
1758 1759
				QDIO_FLAG_CLEANUP_USING_HALT);
		else
J
Jan Glauber 已提交
1760
			rc = qdio_shutdown(CARD_DDEV(card),
F
Frank Blaschka 已提交
1761 1762
				QDIO_FLAG_CLEANUP_USING_CLEAR);
		if (rc)
C
Carsten Otte 已提交
1763
			QETH_CARD_TEXT_(card, 3, "1err%d", rc);
F
Frank Blaschka 已提交
1764 1765 1766 1767 1768 1769 1770 1771 1772
		atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
		break;
	case QETH_QDIO_CLEANING:
		return rc;
	default:
		break;
	}
	rc = qeth_clear_halt_card(card, use_halt);
	if (rc)
C
Carsten Otte 已提交
1773
		QETH_CARD_TEXT_(card, 3, "2err%d", rc);
F
Frank Blaschka 已提交
1774 1775 1776
	return rc;
}

1777 1778 1779 1780 1781 1782 1783 1784 1785
static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
{
	enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
	struct diag26c_vnic_resp *response = NULL;
	struct diag26c_vnic_req *request = NULL;
	struct ccw_dev_id id;
	char userid[80];
	int rc = 0;

1786
	QETH_CARD_TEXT(card, 2, "vmlayer");
1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828

	cpcmd("QUERY USERID", userid, sizeof(userid), &rc);
	if (rc)
		goto out;

	request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
	response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
	if (!request || !response) {
		rc = -ENOMEM;
		goto out;
	}

	ccw_device_get_id(CARD_RDEV(card), &id);
	request->resp_buf_len = sizeof(*response);
	request->resp_version = DIAG26C_VERSION6_VM65918;
	request->req_format = DIAG26C_VNIC_INFO;
	ASCEBC(userid, 8);
	memcpy(&request->sys_name, userid, 8);
	request->devno = id.devno;

	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
	rc = diag26c(request, response, DIAG26C_PORT_VNIC);
	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
	if (rc)
		goto out;
	QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));

	if (request->resp_buf_len < sizeof(*response) ||
	    response->version != request->resp_version) {
		rc = -EIO;
		goto out;
	}

	if (response->protocol == VNIC_INFO_PROT_L2)
		disc = QETH_DISCIPLINE_LAYER2;
	else if (response->protocol == VNIC_INFO_PROT_L3)
		disc = QETH_DISCIPLINE_LAYER3;

out:
	kfree(response);
	kfree(request);
	if (rc)
1829
		QETH_CARD_TEXT_(card, 2, "err%x", rc);
1830 1831 1832
	return disc;
}

1833 1834 1835
/* Determine whether the device requires a specific layer discipline */
static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
{
1836 1837
	enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;

1838
	if (IS_OSM(card) || IS_OSN(card))
1839
		disc = QETH_DISCIPLINE_LAYER2;
1840 1841 1842
	else if (IS_VM_NIC(card))
		disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
				      qeth_vm_detect_layer(card);
1843 1844 1845

	switch (disc) {
	case QETH_DISCIPLINE_LAYER2:
1846
		QETH_CARD_TEXT(card, 3, "force l2");
1847 1848
		break;
	case QETH_DISCIPLINE_LAYER3:
1849
		QETH_CARD_TEXT(card, 3, "force l3");
1850 1851
		break;
	default:
1852
		QETH_CARD_TEXT(card, 3, "force no");
1853 1854
	}

1855
	return disc;
1856 1857
}

1858
static void qeth_set_blkt_defaults(struct qeth_card *card)
1859
{
1860
	QETH_CARD_TEXT(card, 2, "cfgblkt");
1861

1862
	if (card->info.use_v1_blkt) {
1863 1864 1865
		card->info.blkt.time_total = 0;
		card->info.blkt.inter_packet = 0;
		card->info.blkt.inter_packet_jumbo = 0;
1866 1867 1868 1869
	} else {
		card->info.blkt.time_total = 250;
		card->info.blkt.inter_packet = 5;
		card->info.blkt.inter_packet_jumbo = 15;
1870
	}
F
Frank Blaschka 已提交
1871 1872
}

1873
static void qeth_idx_init(struct qeth_card *card)
F
Frank Blaschka 已提交
1874
{
1875 1876
	memset(&card->seqno, 0, sizeof(card->seqno));

F
Frank Blaschka 已提交
1877 1878 1879 1880 1881 1882
	card->token.issuer_rm_w = 0x00010103UL;
	card->token.cm_filter_w = 0x00010108UL;
	card->token.cm_connection_w = 0x0001010aUL;
	card->token.ulp_filter_w = 0x0001010bUL;
	card->token.ulp_connection_w = 0x0001010dUL;

1883 1884
	switch (card->info.type) {
	case QETH_CARD_TYPE_IQD:
1885
		card->info.func_level =	QETH_IDX_FUNC_LEVEL_IQD;
1886 1887
		break;
	case QETH_CARD_TYPE_OSD:
1888
	case QETH_CARD_TYPE_OSN:
1889 1890 1891 1892
		card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
		break;
	default:
		break;
F
Frank Blaschka 已提交
1893 1894 1895
	}
}

1896
static void qeth_idx_finalize_cmd(struct qeth_card *card,
1897
				  struct qeth_cmd_buffer *iob)
1898 1899 1900 1901 1902 1903 1904
{
	memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr,
	       QETH_SEQ_NO_LENGTH);
	if (iob->channel == &card->write)
		card->seqno.trans_hdr++;
}

F
Frank Blaschka 已提交
1905 1906 1907 1908 1909 1910 1911 1912 1913
static int qeth_peer_func_level(int level)
{
	if ((level & 0xff) == 8)
		return (level & 0xff) + 0x400;
	if (((level >> 8) & 3) == 1)
		return (level & 0xff) + 0x200;
	return level;
}

1914
static void qeth_mpc_finalize_cmd(struct qeth_card *card,
1915
				  struct qeth_cmd_buffer *iob)
F
Frank Blaschka 已提交
1916
{
1917
	qeth_idx_finalize_cmd(card, iob);
F
Frank Blaschka 已提交
1918 1919 1920 1921 1922 1923

	memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
	       &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
	card->seqno.pdu_hdr++;
	memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
	       &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
1924 1925

	iob->callback = qeth_release_buffer_cb;
F
Frank Blaschka 已提交
1926 1927
}

1928 1929 1930 1931 1932 1933 1934
static bool qeth_mpc_match_reply(struct qeth_cmd_buffer *iob,
				 struct qeth_cmd_buffer *reply)
{
	/* MPC cmds are issued strictly in sequence. */
	return !IS_IPA(reply->data);
}

1935
static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card,
1936
						  const void *data,
1937
						  unsigned int data_length)
1938 1939 1940
{
	struct qeth_cmd_buffer *iob;

1941 1942 1943 1944 1945 1946 1947 1948
	iob = qeth_alloc_cmd(&card->write, data_length, 1, QETH_TIMEOUT);
	if (!iob)
		return NULL;

	memcpy(iob->data, data, data_length);
	qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length,
		       iob->data);
	iob->finalize = qeth_mpc_finalize_cmd;
1949
	iob->match = qeth_mpc_match_reply;
1950 1951 1952
	return iob;
}

E
Eugene Crosser 已提交
1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966
/**
 * qeth_send_control_data() -	send control command to the card
 * @card:			qeth_card structure pointer
 * @iob:			qeth_cmd_buffer pointer
 * @reply_cb:			callback function pointer
 * @cb_card:			pointer to the qeth_card structure
 * @cb_reply:			pointer to the qeth_reply structure
 * @cb_cmd:			pointer to the original iob for non-IPA
 *				commands, or to the qeth_ipa_cmd structure
 *				for the IPA commands.
 * @reply_param:		private pointer passed to the callback
 *
 * Callback function gets called one or more times, with cb_cmd
 * pointing to the response returned by the hardware. Callback
1967 1968 1969 1970 1971
 * function must return
 *   > 0 if more reply blocks are expected,
 *     0 if the last or only reply block is received, and
 *   < 0 on error.
 * Callback function can get the value of the reply_param pointer from the
E
Eugene Crosser 已提交
1972 1973 1974
 * field 'param' of the structure qeth_reply.
 */

1975
static int qeth_send_control_data(struct qeth_card *card,
1976 1977 1978 1979 1980
				  struct qeth_cmd_buffer *iob,
				  int (*reply_cb)(struct qeth_card *cb_card,
						  struct qeth_reply *cb_reply,
						  unsigned long cb_cmd),
				  void *reply_param)
F
Frank Blaschka 已提交
1981
{
1982
	struct qeth_channel *channel = iob->channel;
1983
	struct qeth_reply *reply = &iob->reply;
1984
	long timeout = iob->timeout;
F
Frank Blaschka 已提交
1985 1986
	int rc;

C
Carsten Otte 已提交
1987
	QETH_CARD_TEXT(card, 2, "sendctl");
F
Frank Blaschka 已提交
1988 1989 1990

	reply->callback = reply_cb;
	reply->param = reply_param;
1991

1992
	timeout = wait_event_interruptible_timeout(card->wait_q,
1993
						   qeth_trylock_channel(channel, iob),
1994 1995
						   timeout);
	if (timeout <= 0) {
1996
		qeth_put_cmd(iob);
1997 1998
		return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
	}
F
Frank Blaschka 已提交
1999

2000
	if (iob->finalize)
2001 2002
		iob->finalize(card, iob);
	QETH_DBF_HEX(CTRL, 2, iob->data, min(iob->length, QETH_DBF_CTRL_LEN));
2003

2004
	qeth_enqueue_cmd(card, iob);
2005

2006 2007 2008
	/* This pairs with iob->callback, and keeps the iob alive after IO: */
	qeth_get_cmd(iob);

C
Carsten Otte 已提交
2009
	QETH_CARD_TEXT(card, 6, "noirqpnd");
J
Julian Wiedmann 已提交
2010
	spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
2011
	rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob),
2012
				      (addr_t) iob, 0, 0, timeout);
J
Julian Wiedmann 已提交
2013
	spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
F
Frank Blaschka 已提交
2014
	if (rc) {
2015 2016
		QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
				 CARD_DEVID(card), rc);
C
Carsten Otte 已提交
2017
		QETH_CARD_TEXT_(card, 2, " err%d", rc);
2018
		qeth_dequeue_cmd(card, iob);
2019
		qeth_put_cmd(iob);
2020
		qeth_unlock_channel(card, channel);
2021
		goto out;
F
Frank Blaschka 已提交
2022
	}
2023

2024
	timeout = wait_for_completion_interruptible_timeout(&iob->done,
2025 2026 2027
							    timeout);
	if (timeout <= 0)
		rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
2028

2029
	qeth_dequeue_cmd(card, iob);
2030 2031 2032

	if (reply_cb) {
		/* Wait until the callback for a late reply has completed: */
2033
		spin_lock_irq(&iob->lock);
2034 2035
		if (rc)
			/* Zap any callback that's still pending: */
2036 2037
			iob->rc = rc;
		spin_unlock_irq(&iob->lock);
2038 2039
	}

2040
	if (!rc)
2041
		rc = iob->rc;
2042 2043 2044

out:
	qeth_put_cmd(iob);
2045
	return rc;
F
Frank Blaschka 已提交
2046 2047
}

2048 2049 2050 2051 2052 2053
struct qeth_node_desc {
	struct node_descriptor nd1;
	struct node_descriptor nd2;
	struct node_descriptor nd3;
};

2054
static void qeth_read_conf_data_cb(struct qeth_card *card,
2055 2056
				   struct qeth_cmd_buffer *iob,
				   unsigned int data_length)
2057
{
2058
	struct qeth_node_desc *nd = (struct qeth_node_desc *) iob->data;
2059
	int rc = 0;
2060
	u8 *tag;
2061 2062

	QETH_CARD_TEXT(card, 2, "cfgunit");
2063 2064 2065 2066 2067 2068

	if (data_length < sizeof(*nd)) {
		rc = -EINVAL;
		goto out;
	}

2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081
	card->info.is_vm_nic = nd->nd1.plant[0] == _ascebc['V'] &&
			       nd->nd1.plant[1] == _ascebc['M'];
	tag = (u8 *)&nd->nd1.tag;
	card->info.chpid = tag[0];
	card->info.unit_addr2 = tag[1];

	tag = (u8 *)&nd->nd2.tag;
	card->info.cula = tag[1];

	card->info.use_v1_blkt = nd->nd3.model[0] == 0xF0 &&
				 nd->nd3.model[1] == 0xF0 &&
				 nd->nd3.model[2] >= 0xF1 &&
				 nd->nd3.model[2] <= 0xF4;
2082

2083
out:
2084
	qeth_notify_cmd(iob, rc);
2085
	qeth_put_cmd(iob);
2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097
}

static int qeth_read_conf_data(struct qeth_card *card)
{
	struct qeth_channel *channel = &card->data;
	struct qeth_cmd_buffer *iob;
	struct ciw *ciw;

	/* scan for RCD command in extended SenseID data */
	ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
	if (!ciw || ciw->cmd == 0)
		return -EOPNOTSUPP;
2098 2099
	if (ciw->count < sizeof(struct qeth_node_desc))
		return -EINVAL;
2100 2101 2102 2103 2104 2105 2106 2107 2108

	iob = qeth_alloc_cmd(channel, ciw->count, 1, QETH_RCD_TIMEOUT);
	if (!iob)
		return -ENOMEM;

	iob->callback = qeth_read_conf_data_cb;
	qeth_setup_ccw(__ccw_from_cmd(iob), ciw->cmd, 0, iob->length,
		       iob->data);

2109
	return qeth_send_control_data(card, iob, NULL, NULL);
2110 2111
}

2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125
static int qeth_idx_check_activate_response(struct qeth_card *card,
					    struct qeth_channel *channel,
					    struct qeth_cmd_buffer *iob)
{
	int rc;

	rc = qeth_check_idx_response(card, iob->data);
	if (rc)
		return rc;

	if (QETH_IS_IDX_ACT_POS_REPLY(iob->data))
		return 0;

	/* negative reply: */
2126 2127
	QETH_CARD_TEXT_(card, 2, "idxneg%c",
			QETH_IDX_ACT_CAUSE_CODE(iob->data));
2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145

	switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
	case QETH_IDX_ACT_ERR_EXCL:
		dev_err(&channel->ccwdev->dev,
			"The adapter is used exclusively by another host\n");
		return -EBUSY;
	case QETH_IDX_ACT_ERR_AUTH:
	case QETH_IDX_ACT_ERR_AUTH_USER:
		dev_err(&channel->ccwdev->dev,
			"Setting the device online failed because of insufficient authorization\n");
		return -EPERM;
	default:
		QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
				 CCW_DEVID(channel->ccwdev));
		return -EIO;
	}
}

2146
static void qeth_idx_activate_read_channel_cb(struct qeth_card *card,
2147 2148
					      struct qeth_cmd_buffer *iob,
					      unsigned int data_length)
2149
{
2150
	struct qeth_channel *channel = iob->channel;
2151 2152 2153
	u16 peer_level;
	int rc;

2154
	QETH_CARD_TEXT(card, 2, "idxrdcb");
2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175

	rc = qeth_idx_check_activate_response(card, channel, iob);
	if (rc)
		goto out;

	memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
	if (peer_level != qeth_peer_func_level(card->info.func_level)) {
		QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
				 CCW_DEVID(channel->ccwdev),
				 card->info.func_level, peer_level);
		rc = -EINVAL;
		goto out;
	}

	memcpy(&card->token.issuer_rm_r,
	       QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
	       QETH_MPC_TOKEN_LENGTH);
	memcpy(&card->info.mcl_level[0],
	       QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);

out:
2176
	qeth_notify_cmd(iob, rc);
2177
	qeth_put_cmd(iob);
2178 2179
}

2180
static void qeth_idx_activate_write_channel_cb(struct qeth_card *card,
2181 2182
					       struct qeth_cmd_buffer *iob,
					       unsigned int data_length)
2183
{
2184
	struct qeth_channel *channel = iob->channel;
2185 2186 2187
	u16 peer_level;
	int rc;

2188
	QETH_CARD_TEXT(card, 2, "idxwrcb");
2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203

	rc = qeth_idx_check_activate_response(card, channel, iob);
	if (rc)
		goto out;

	memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
	if ((peer_level & ~0x0100) !=
	    qeth_peer_func_level(card->info.func_level)) {
		QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
				 CCW_DEVID(channel->ccwdev),
				 card->info.func_level, peer_level);
		rc = -EINVAL;
	}

out:
2204
	qeth_notify_cmd(iob, rc);
2205
	qeth_put_cmd(iob);
2206 2207 2208 2209 2210 2211 2212
}

static void qeth_idx_setup_activate_cmd(struct qeth_card *card,
					struct qeth_cmd_buffer *iob)
{
	u16 addr = (card->info.cula << 8) + card->info.unit_addr2;
	u8 port = ((u8)card->dev->dev_port) | 0x80;
2213
	struct ccw1 *ccw = __ccw_from_cmd(iob);
2214

2215 2216 2217
	qeth_setup_ccw(&ccw[0], CCW_CMD_WRITE, CCW_FLAG_CC, IDX_ACTIVATE_SIZE,
		       iob->data);
	qeth_setup_ccw(&ccw[1], CCW_CMD_READ, 0, iob->length, iob->data);
2218 2219
	iob->finalize = qeth_idx_finalize_cmd;

2220
	port |= QETH_IDX_ACT_INVAL_FRAME;
2221 2222 2223 2224 2225
	memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1);
	memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
	       &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
	       &card->info.func_level, 2);
2226
	memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &card->info.ddev_devno, 2);
2227 2228 2229 2230 2231 2232 2233 2234 2235
	memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2);
}

static int qeth_idx_activate_read_channel(struct qeth_card *card)
{
	struct qeth_channel *channel = &card->read;
	struct qeth_cmd_buffer *iob;
	int rc;

2236
	QETH_CARD_TEXT(card, 2, "idxread");
2237

2238
	iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2239 2240 2241 2242 2243
	if (!iob)
		return -ENOMEM;

	memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
	qeth_idx_setup_activate_cmd(card, iob);
2244
	iob->callback = qeth_idx_activate_read_channel_cb;
2245

2246
	rc = qeth_send_control_data(card, iob, NULL, NULL);
2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259
	if (rc)
		return rc;

	channel->state = CH_STATE_UP;
	return 0;
}

static int qeth_idx_activate_write_channel(struct qeth_card *card)
{
	struct qeth_channel *channel = &card->write;
	struct qeth_cmd_buffer *iob;
	int rc;

2260
	QETH_CARD_TEXT(card, 2, "idxwrite");
2261

2262
	iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2263 2264 2265 2266 2267
	if (!iob)
		return -ENOMEM;

	memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
	qeth_idx_setup_activate_cmd(card, iob);
2268
	iob->callback = qeth_idx_activate_write_channel_cb;
2269

2270
	rc = qeth_send_control_data(card, iob, NULL, NULL);
2271 2272 2273 2274 2275 2276 2277
	if (rc)
		return rc;

	channel->state = CH_STATE_UP;
	return 0;
}

F
Frank Blaschka 已提交
2278 2279 2280 2281 2282
static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
		unsigned long data)
{
	struct qeth_cmd_buffer *iob;

2283
	QETH_CARD_TEXT(card, 2, "cmenblcb");
F
Frank Blaschka 已提交
2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295

	iob = (struct qeth_cmd_buffer *) data;
	memcpy(&card->token.cm_filter_r,
	       QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
	       QETH_MPC_TOKEN_LENGTH);
	return 0;
}

static int qeth_cm_enable(struct qeth_card *card)
{
	struct qeth_cmd_buffer *iob;

2296
	QETH_CARD_TEXT(card, 2, "cmenable");
F
Frank Blaschka 已提交
2297

2298
	iob = qeth_mpc_alloc_cmd(card, CM_ENABLE, CM_ENABLE_SIZE);
2299 2300
	if (!iob)
		return -ENOMEM;
2301

F
Frank Blaschka 已提交
2302 2303 2304 2305 2306
	memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
	       &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
	       &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);

2307
	return qeth_send_control_data(card, iob, qeth_cm_enable_cb, NULL);
F
Frank Blaschka 已提交
2308 2309 2310 2311 2312 2313 2314
}

static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
		unsigned long data)
{
	struct qeth_cmd_buffer *iob;

2315
	QETH_CARD_TEXT(card, 2, "cmsetpcb");
F
Frank Blaschka 已提交
2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327

	iob = (struct qeth_cmd_buffer *) data;
	memcpy(&card->token.cm_connection_r,
	       QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
	       QETH_MPC_TOKEN_LENGTH);
	return 0;
}

static int qeth_cm_setup(struct qeth_card *card)
{
	struct qeth_cmd_buffer *iob;

2328
	QETH_CARD_TEXT(card, 2, "cmsetup");
F
Frank Blaschka 已提交
2329

2330
	iob = qeth_mpc_alloc_cmd(card, CM_SETUP, CM_SETUP_SIZE);
2331 2332
	if (!iob)
		return -ENOMEM;
2333

F
Frank Blaschka 已提交
2334 2335 2336 2337 2338 2339
	memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
	       &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
	       &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
	       &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
2340
	return qeth_send_control_data(card, iob, qeth_cm_setup_cb, NULL);
F
Frank Blaschka 已提交
2341 2342
}

2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353
static bool qeth_is_supported_link_type(struct qeth_card *card, u8 link_type)
{
	if (link_type == QETH_LINK_TYPE_LANE_TR ||
	    link_type == QETH_LINK_TYPE_HSTR) {
		dev_err(&card->gdev->dev, "Unsupported Token Ring device\n");
		return false;
	}

	return true;
}

2354
static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
F
Frank Blaschka 已提交
2355
{
2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374
	struct net_device *dev = card->dev;
	unsigned int new_mtu;

	if (!max_mtu) {
		/* IQD needs accurate max MTU to set up its RX buffers: */
		if (IS_IQD(card))
			return -EINVAL;
		/* tolerate quirky HW: */
		max_mtu = ETH_MAX_MTU;
	}

	rtnl_lock();
	if (IS_IQD(card)) {
		/* move any device with default MTU to new max MTU: */
		new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu;

		/* adjust RX buffer size to new max MTU: */
		card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE;
		if (dev->max_mtu && dev->max_mtu != max_mtu)
2375
			qeth_free_qdio_queues(card);
2376 2377 2378 2379
	} else {
		if (dev->mtu)
			new_mtu = dev->mtu;
		/* default MTUs for first setup: */
2380
		else if (IS_LAYER2(card))
2381 2382 2383
			new_mtu = ETH_DATA_LEN;
		else
			new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */
F
Frank Blaschka 已提交
2384
	}
2385 2386 2387 2388 2389

	dev->max_mtu = max_mtu;
	dev->mtu = min(new_mtu, max_mtu);
	rtnl_unlock();
	return 0;
F
Frank Blaschka 已提交
2390 2391
}

J
Julian Wiedmann 已提交
2392
static int qeth_get_mtu_outof_framesize(int framesize)
F
Frank Blaschka 已提交
2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413
{
	switch (framesize) {
	case 0x4000:
		return 8192;
	case 0x6000:
		return 16384;
	case 0xa000:
		return 32768;
	case 0xffff:
		return 57344;
	default:
		return 0;
	}
}

static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
		unsigned long data)
{
	__u16 mtu, framesize;
	__u16 len;
	struct qeth_cmd_buffer *iob;
2414
	u8 link_type = 0;
F
Frank Blaschka 已提交
2415

2416
	QETH_CARD_TEXT(card, 2, "ulpenacb");
F
Frank Blaschka 已提交
2417 2418 2419 2420 2421

	iob = (struct qeth_cmd_buffer *) data;
	memcpy(&card->token.ulp_filter_r,
	       QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
	       QETH_MPC_TOKEN_LENGTH);
2422
	if (IS_IQD(card)) {
F
Frank Blaschka 已提交
2423 2424 2425
		memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
		mtu = qeth_get_mtu_outof_framesize(framesize);
	} else {
2426
		mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data);
F
Frank Blaschka 已提交
2427
	}
2428
	*(u16 *)reply->param = mtu;
F
Frank Blaschka 已提交
2429 2430 2431 2432 2433

	memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
	if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
		memcpy(&link_type,
		       QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
2434 2435 2436 2437 2438
		if (!qeth_is_supported_link_type(card, link_type))
			return -EPROTONOSUPPORT;
	}

	card->info.link_type = link_type;
2439
	QETH_CARD_TEXT_(card, 2, "link%d", card->info.link_type);
F
Frank Blaschka 已提交
2440 2441 2442
	return 0;
}

2443 2444 2445 2446
static u8 qeth_mpc_select_prot_type(struct qeth_card *card)
{
	if (IS_OSN(card))
		return QETH_PROT_OSN2;
2447
	return IS_LAYER2(card) ? QETH_PROT_LAYER2 : QETH_PROT_TCPIP;
2448 2449
}

F
Frank Blaschka 已提交
2450 2451
static int qeth_ulp_enable(struct qeth_card *card)
{
2452
	u8 prot_type = qeth_mpc_select_prot_type(card);
F
Frank Blaschka 已提交
2453
	struct qeth_cmd_buffer *iob;
2454
	u16 max_mtu;
2455
	int rc;
F
Frank Blaschka 已提交
2456

2457
	QETH_CARD_TEXT(card, 2, "ulpenabl");
F
Frank Blaschka 已提交
2458

2459
	iob = qeth_mpc_alloc_cmd(card, ULP_ENABLE, ULP_ENABLE_SIZE);
2460 2461
	if (!iob)
		return -ENOMEM;
F
Frank Blaschka 已提交
2462

2463
	*(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port;
F
Frank Blaschka 已提交
2464 2465 2466 2467 2468
	memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1);
	memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
	       &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
2469
	rc = qeth_send_control_data(card, iob, qeth_ulp_enable_cb, &max_mtu);
2470 2471 2472
	if (rc)
		return rc;
	return qeth_update_max_mtu(card, max_mtu);
F
Frank Blaschka 已提交
2473 2474 2475 2476 2477 2478 2479
}

static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
		unsigned long data)
{
	struct qeth_cmd_buffer *iob;

2480
	QETH_CARD_TEXT(card, 2, "ulpstpcb");
F
Frank Blaschka 已提交
2481 2482 2483 2484 2485

	iob = (struct qeth_cmd_buffer *) data;
	memcpy(&card->token.ulp_connection_r,
	       QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
	       QETH_MPC_TOKEN_LENGTH);
2486 2487
	if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
		     3)) {
2488
		QETH_CARD_TEXT(card, 2, "olmlimit");
2489 2490
		dev_err(&card->gdev->dev, "A connection could not be "
			"established because of an OLM limit\n");
2491
		return -EMLINK;
2492
	}
S
Stefan Raspl 已提交
2493
	return 0;
F
Frank Blaschka 已提交
2494 2495 2496 2497 2498 2499 2500
}

static int qeth_ulp_setup(struct qeth_card *card)
{
	__u16 temp;
	struct qeth_cmd_buffer *iob;

2501
	QETH_CARD_TEXT(card, 2, "ulpsetup");
F
Frank Blaschka 已提交
2502

2503
	iob = qeth_mpc_alloc_cmd(card, ULP_SETUP, ULP_SETUP_SIZE);
2504 2505
	if (!iob)
		return -ENOMEM;
F
Frank Blaschka 已提交
2506 2507 2508 2509 2510 2511 2512 2513

	memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
	       &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
	       &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);

2514
	memcpy(QETH_ULP_SETUP_CUA(iob->data), &card->info.ddev_devno, 2);
F
Frank Blaschka 已提交
2515 2516
	temp = (card->info.cula << 8) + card->info.unit_addr2;
	memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
2517
	return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL);
F
Frank Blaschka 已提交
2518 2519
}

2520 2521
static int qeth_alloc_out_buf(struct qeth_qdio_out_q *q, unsigned int bidx,
			      gfp_t gfp)
2522 2523 2524
{
	struct qeth_qdio_out_buffer *newbuf;

2525
	newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, gfp);
2526 2527 2528
	if (!newbuf)
		return -ENOMEM;

2529
	newbuf->buffer = q->qdio_bufs[bidx];
2530 2531 2532 2533
	skb_queue_head_init(&newbuf->skb_list);
	lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
	atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
	q->bufs[bidx] = newbuf;
2534
	return 0;
2535 2536
}

2537
static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
2538 2539 2540 2541
{
	if (!q)
		return;

2542
	qeth_drain_output_queue(q, true);
2543 2544 2545 2546
	qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
	kfree(q);
}

2547
static struct qeth_qdio_out_q *qeth_alloc_output_queue(void)
2548 2549
{
	struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
2550
	unsigned int i;
2551 2552 2553 2554

	if (!q)
		return NULL;

2555 2556 2557 2558
	if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q))
		goto err_qdio_bufs;

	for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
2559
		if (qeth_alloc_out_buf(q, i, GFP_KERNEL))
2560
			goto err_out_bufs;
2561
	}
2562

2563
	return q;
2564 2565 2566

err_out_bufs:
	while (i > 0)
2567
		qeth_free_out_buf(q->bufs[--i]);
2568 2569 2570 2571
	qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
err_qdio_bufs:
	kfree(q);
	return NULL;
2572
}
2573

2574 2575 2576 2577 2578 2579 2580 2581
static void qeth_tx_completion_timer(struct timer_list *timer)
{
	struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer);

	napi_schedule(&queue->napi);
	QETH_TXQ_STAT_INC(queue, completion_timer);
}

2582
static int qeth_alloc_qdio_queues(struct qeth_card *card)
F
Frank Blaschka 已提交
2583
{
2584
	unsigned int i;
F
Frank Blaschka 已提交
2585

2586
	QETH_CARD_TEXT(card, 2, "allcqdbf");
F
Frank Blaschka 已提交
2587 2588 2589 2590 2591

	if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
		QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
		return 0;

2592
	QETH_CARD_TEXT(card, 2, "inq");
S
Sebastian Ott 已提交
2593
	card->qdio.in_q = qeth_alloc_qdio_queue();
F
Frank Blaschka 已提交
2594 2595
	if (!card->qdio.in_q)
		goto out_nomem;
S
Sebastian Ott 已提交
2596

F
Frank Blaschka 已提交
2597 2598 2599
	/* inbound buffer pool */
	if (qeth_alloc_buffer_pool(card))
		goto out_freeinq;
2600

F
Frank Blaschka 已提交
2601 2602
	/* outbound */
	for (i = 0; i < card->qdio.no_out_queues; ++i) {
2603 2604 2605 2606
		struct qeth_qdio_out_q *queue;

		queue = qeth_alloc_output_queue();
		if (!queue)
F
Frank Blaschka 已提交
2607
			goto out_freeoutq;
2608
		QETH_CARD_TEXT_(card, 2, "outq %i", i);
2609 2610 2611 2612
		QETH_CARD_HEX(card, 2, &queue, sizeof(void *));
		card->qdio.out_qs[i] = queue;
		queue->card = card;
		queue->queue_no = i;
2613
		INIT_LIST_HEAD(&queue->pending_bufs);
2614
		spin_lock_init(&queue->lock);
2615
		timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
2616 2617 2618 2619 2620 2621 2622 2623 2624
		if (IS_IQD(card)) {
			queue->coalesce_usecs = QETH_TX_COALESCE_USECS;
			queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES;
			queue->rescan_usecs = QETH_TX_TIMER_USECS;
		} else {
			queue->coalesce_usecs = USEC_PER_SEC;
			queue->max_coalesced_frames = 0;
			queue->rescan_usecs = 10 * USEC_PER_SEC;
		}
2625
		queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT;
F
Frank Blaschka 已提交
2626
	}
2627 2628 2629 2630 2631

	/* completion */
	if (qeth_alloc_cq(card))
		goto out_freeoutq;

F
Frank Blaschka 已提交
2632 2633 2634
	return 0;

out_freeoutq:
2635
	while (i > 0) {
2636
		qeth_free_output_queue(card->qdio.out_qs[--i]);
2637 2638
		card->qdio.out_qs[i] = NULL;
	}
F
Frank Blaschka 已提交
2639 2640
	qeth_free_buffer_pool(card);
out_freeinq:
S
Sebastian Ott 已提交
2641
	qeth_free_qdio_queue(card->qdio.in_q);
F
Frank Blaschka 已提交
2642 2643 2644 2645 2646 2647
	card->qdio.in_q = NULL;
out_nomem:
	atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
	return -ENOMEM;
}

2648
static void qeth_free_qdio_queues(struct qeth_card *card)
2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665
{
	int i, j;

	if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
		QETH_QDIO_UNINITIALIZED)
		return;

	qeth_free_cq(card);
	for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
		if (card->qdio.in_q->bufs[j].rx_skb)
			dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
	}
	qeth_free_qdio_queue(card->qdio.in_q);
	card->qdio.in_q = NULL;
	/* inbound buffer pool */
	qeth_free_buffer_pool(card);
	/* free outbound qdio_qs */
2666 2667 2668
	for (i = 0; i < card->qdio.no_out_queues; i++) {
		qeth_free_output_queue(card->qdio.out_qs[i]);
		card->qdio.out_qs[i] = NULL;
2669 2670 2671
	}
}

2672 2673 2674
static void qeth_fill_qib_parms(struct qeth_card *card,
				struct qeth_qib_parms *parms)
{
2675 2676 2677
	struct qeth_qdio_out_q *queue;
	unsigned int i;

2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694
	parms->pcit_magic[0] = 'P';
	parms->pcit_magic[1] = 'C';
	parms->pcit_magic[2] = 'I';
	parms->pcit_magic[3] = 'T';
	ASCEBC(parms->pcit_magic, sizeof(parms->pcit_magic));
	parms->pcit_a = QETH_PCI_THRESHOLD_A(card);
	parms->pcit_b = QETH_PCI_THRESHOLD_B(card);
	parms->pcit_c = QETH_PCI_TIMER_VALUE(card);

	parms->blkt_magic[0] = 'B';
	parms->blkt_magic[1] = 'L';
	parms->blkt_magic[2] = 'K';
	parms->blkt_magic[3] = 'T';
	ASCEBC(parms->blkt_magic, sizeof(parms->blkt_magic));
	parms->blkt_total = card->info.blkt.time_total;
	parms->blkt_inter_packet = card->info.blkt.inter_packet;
	parms->blkt_inter_packet_jumbo = card->info.blkt.inter_packet_jumbo;
2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709

	/* Prio-queueing implicitly uses the default priorities: */
	if (qeth_uses_tx_prio_queueing(card) || card->qdio.no_out_queues == 1)
		return;

	parms->pque_magic[0] = 'P';
	parms->pque_magic[1] = 'Q';
	parms->pque_magic[2] = 'U';
	parms->pque_magic[3] = 'E';
	ASCEBC(parms->pque_magic, sizeof(parms->pque_magic));
	parms->pque_order = QETH_QIB_PQUE_ORDER_RR;
	parms->pque_units = QETH_QIB_PQUE_UNITS_SBAL;

	qeth_for_each_output_queue(card, queue, i)
		parms->pque_priority[i] = queue->priority;
F
Frank Blaschka 已提交
2710 2711 2712 2713
}

static int qeth_qdio_activate(struct qeth_card *card)
{
2714
	QETH_CARD_TEXT(card, 3, "qdioact");
J
Jan Glauber 已提交
2715
	return qdio_activate(CARD_DDEV(card));
F
Frank Blaschka 已提交
2716 2717 2718 2719 2720 2721
}

static int qeth_dm_act(struct qeth_card *card)
{
	struct qeth_cmd_buffer *iob;

2722
	QETH_CARD_TEXT(card, 2, "dmact");
F
Frank Blaschka 已提交
2723

2724
	iob = qeth_mpc_alloc_cmd(card, DM_ACT, DM_ACT_SIZE);
2725 2726
	if (!iob)
		return -ENOMEM;
F
Frank Blaschka 已提交
2727 2728 2729 2730 2731

	memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
	       &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2732
	return qeth_send_control_data(card, iob, NULL, NULL);
F
Frank Blaschka 已提交
2733 2734 2735 2736 2737 2738
}

static int qeth_mpc_initialize(struct qeth_card *card)
{
	int rc;

2739
	QETH_CARD_TEXT(card, 2, "mpcinit");
F
Frank Blaschka 已提交
2740 2741 2742

	rc = qeth_issue_next_read(card);
	if (rc) {
2743
		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
F
Frank Blaschka 已提交
2744 2745 2746 2747
		return rc;
	}
	rc = qeth_cm_enable(card);
	if (rc) {
2748
		QETH_CARD_TEXT_(card, 2, "2err%d", rc);
2749
		return rc;
F
Frank Blaschka 已提交
2750 2751 2752
	}
	rc = qeth_cm_setup(card);
	if (rc) {
2753
		QETH_CARD_TEXT_(card, 2, "3err%d", rc);
2754
		return rc;
F
Frank Blaschka 已提交
2755 2756 2757
	}
	rc = qeth_ulp_enable(card);
	if (rc) {
2758
		QETH_CARD_TEXT_(card, 2, "4err%d", rc);
2759
		return rc;
F
Frank Blaschka 已提交
2760 2761 2762
	}
	rc = qeth_ulp_setup(card);
	if (rc) {
2763
		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2764
		return rc;
F
Frank Blaschka 已提交
2765
	}
2766
	rc = qeth_alloc_qdio_queues(card);
F
Frank Blaschka 已提交
2767
	if (rc) {
2768
		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2769
		return rc;
F
Frank Blaschka 已提交
2770 2771 2772
	}
	rc = qeth_qdio_establish(card);
	if (rc) {
2773
		QETH_CARD_TEXT_(card, 2, "6err%d", rc);
2774
		qeth_free_qdio_queues(card);
2775
		return rc;
F
Frank Blaschka 已提交
2776 2777 2778
	}
	rc = qeth_qdio_activate(card);
	if (rc) {
2779
		QETH_CARD_TEXT_(card, 2, "7err%d", rc);
2780
		return rc;
F
Frank Blaschka 已提交
2781 2782 2783
	}
	rc = qeth_dm_act(card);
	if (rc) {
2784
		QETH_CARD_TEXT_(card, 2, "8err%d", rc);
2785
		return rc;
F
Frank Blaschka 已提交
2786 2787 2788 2789 2790
	}

	return 0;
}

2791
static void qeth_print_status_message(struct qeth_card *card)
F
Frank Blaschka 已提交
2792 2793
{
	switch (card->info.type) {
2794 2795 2796
	case QETH_CARD_TYPE_OSD:
	case QETH_CARD_TYPE_OSM:
	case QETH_CARD_TYPE_OSX:
F
Frank Blaschka 已提交
2797 2798 2799 2800 2801 2802 2803 2804 2805 2806
		/* VM will use a non-zero first character
		 * to indicate a HiperSockets like reporting
		 * of the level OSA sets the first character to zero
		 * */
		if (!card->info.mcl_level[0]) {
			sprintf(card->info.mcl_level, "%02x%02x",
				card->info.mcl_level[2],
				card->info.mcl_level[3]);
			break;
		}
2807
		fallthrough;
F
Frank Blaschka 已提交
2808
	case QETH_CARD_TYPE_IQD:
2809
		if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) {
F
Frank Blaschka 已提交
2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823
			card->info.mcl_level[0] = (char) _ebcasc[(__u8)
				card->info.mcl_level[0]];
			card->info.mcl_level[1] = (char) _ebcasc[(__u8)
				card->info.mcl_level[1]];
			card->info.mcl_level[2] = (char) _ebcasc[(__u8)
				card->info.mcl_level[2]];
			card->info.mcl_level[3] = (char) _ebcasc[(__u8)
				card->info.mcl_level[3]];
			card->info.mcl_level[QETH_MCL_LENGTH] = 0;
		}
		break;
	default:
		memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
	}
2824 2825 2826 2827 2828 2829 2830
	dev_info(&card->gdev->dev,
		 "Device is a%s card%s%s%s\nwith link type %s.\n",
		 qeth_get_cardname(card),
		 (card->info.mcl_level[0]) ? " (level: " : "",
		 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
		 (card->info.mcl_level[0]) ? ")" : "",
		 qeth_get_cardname_short(card));
F
Frank Blaschka 已提交
2831 2832 2833 2834 2835 2836
}

static void qeth_initialize_working_pool_list(struct qeth_card *card)
{
	struct qeth_buffer_pool_entry *entry;

C
Carsten Otte 已提交
2837
	QETH_CARD_TEXT(card, 5, "inwrklst");
F
Frank Blaschka 已提交
2838 2839 2840 2841 2842 2843 2844

	list_for_each_entry(entry,
			    &card->qdio.init_pool.entry_list, init_list) {
		qeth_put_buffer_pool_entry(card, entry);
	}
}

J
Julian Wiedmann 已提交
2845 2846
static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
					struct qeth_card *card)
F
Frank Blaschka 已提交
2847 2848 2849 2850 2851 2852 2853
{
	struct qeth_buffer_pool_entry *entry;
	int i, free;

	if (list_empty(&card->qdio.in_buf_pool.entry_list))
		return NULL;

2854
	list_for_each_entry(entry, &card->qdio.in_buf_pool.entry_list, list) {
F
Frank Blaschka 已提交
2855 2856
		free = 1;
		for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2857
			if (page_count(entry->elements[i]) > 1) {
F
Frank Blaschka 已提交
2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868
				free = 0;
				break;
			}
		}
		if (free) {
			list_del_init(&entry->list);
			return entry;
		}
	}

	/* no free buffer in pool so take first one and swap pages */
2869 2870
	entry = list_first_entry(&card->qdio.in_buf_pool.entry_list,
				 struct qeth_buffer_pool_entry, list);
F
Frank Blaschka 已提交
2871
	for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2872
		if (page_count(entry->elements[i]) > 1) {
2873
			struct page *page = dev_alloc_page();
2874 2875

			if (!page)
F
Frank Blaschka 已提交
2876
				return NULL;
2877 2878 2879 2880

			__free_page(entry->elements[i]);
			entry->elements[i] = page;
			QETH_CARD_STAT_INC(card, rx_sg_alloc_page);
F
Frank Blaschka 已提交
2881 2882 2883 2884 2885 2886 2887 2888 2889
		}
	}
	list_del_init(&entry->list);
	return entry;
}

static int qeth_init_input_buffer(struct qeth_card *card,
		struct qeth_qdio_buffer *buf)
{
2890
	struct qeth_buffer_pool_entry *pool_entry = buf->pool_entry;
F
Frank Blaschka 已提交
2891 2892
	int i;

2893
	if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) {
2894
		buf->rx_skb = netdev_alloc_skb(card->dev,
2895 2896
					       ETH_HLEN +
					       sizeof(struct ipv6hdr));
2897
		if (!buf->rx_skb)
2898
			return -ENOMEM;
2899 2900
	}

2901 2902 2903 2904 2905 2906 2907
	if (!pool_entry) {
		pool_entry = qeth_find_free_buffer_pool_entry(card);
		if (!pool_entry)
			return -ENOBUFS;

		buf->pool_entry = pool_entry;
	}
F
Frank Blaschka 已提交
2908 2909 2910 2911 2912 2913 2914 2915 2916

	/*
	 * since the buffer is accessed only from the input_tasklet
	 * there shouldn't be a need to synchronize; also, since we use
	 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run  out off
	 * buffers
	 */
	for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
		buf->buffer->element[i].length = PAGE_SIZE;
2917
		buf->buffer->element[i].addr =
2918
			page_to_phys(pool_entry->elements[i]);
F
Frank Blaschka 已提交
2919
		if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2920
			buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
F
Frank Blaschka 已提交
2921
		else
2922 2923
			buf->buffer->element[i].eflags = 0;
		buf->buffer->element[i].sflags = 0;
F
Frank Blaschka 已提交
2924 2925 2926 2927
	}
	return 0;
}

J
Julian Wiedmann 已提交
2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939
static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card,
					    struct qeth_qdio_out_q *queue)
{
	if (!IS_IQD(card) ||
	    qeth_iqd_is_mcast_queue(card, queue) ||
	    card->options.cq == QETH_CQ_ENABLED ||
	    qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd))
		return 1;

	return card->ssqd.mmwc ? card->ssqd.mmwc : 1;
}

2940
static int qeth_init_qdio_queues(struct qeth_card *card)
F
Frank Blaschka 已提交
2941
{
2942
	unsigned int rx_bufs = card->qdio.in_buf_pool.buf_count;
2943
	unsigned int i;
F
Frank Blaschka 已提交
2944 2945
	int rc;

2946
	QETH_CARD_TEXT(card, 2, "initqdqs");
F
Frank Blaschka 已提交
2947 2948

	/* inbound queue */
2949 2950
	qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
	memset(&card->rx, 0, sizeof(struct qeth_rx));
2951

F
Frank Blaschka 已提交
2952 2953
	qeth_initialize_working_pool_list(card);
	/*give only as many buffers to hardware as we have buffer pool entries*/
2954
	for (i = 0; i < rx_bufs; i++) {
2955 2956 2957 2958 2959
		rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
		if (rc)
			return rc;
	}

2960
	card->qdio.in_q->next_buf_to_init = QDIO_BUFNR(rx_bufs);
2961 2962
	rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, rx_bufs,
		     NULL);
F
Frank Blaschka 已提交
2963
	if (rc) {
2964
		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
F
Frank Blaschka 已提交
2965 2966
		return rc;
	}
2967 2968 2969 2970 2971 2972 2973

	/* completion */
	rc = qeth_cq_init(card);
	if (rc) {
		return rc;
	}

F
Frank Blaschka 已提交
2974 2975
	/* outbound queue */
	for (i = 0; i < card->qdio.no_out_queues; ++i) {
2976 2977 2978 2979 2980 2981
		struct qeth_qdio_out_q *queue = card->qdio.out_qs[i];

		qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
		queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
		queue->next_buf_to_fill = 0;
		queue->do_pack = 0;
2982
		queue->prev_hdr = NULL;
2983
		queue->coalesced_frames = 0;
2984
		queue->bulk_start = 0;
J
Julian Wiedmann 已提交
2985 2986
		queue->bulk_count = 0;
		queue->bulk_max = qeth_tx_select_bulk_max(card, queue);
2987 2988
		atomic_set(&queue->used_buffers, 0);
		atomic_set(&queue->set_pci_flags_count, 0);
2989
		netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i));
F
Frank Blaschka 已提交
2990 2991 2992 2993
	}
	return 0;
}

2994
static void qeth_ipa_finalize_cmd(struct qeth_card *card,
2995
				  struct qeth_cmd_buffer *iob)
2996
{
2997
	qeth_mpc_finalize_cmd(card, iob);
2998 2999

	/* override with IPA-specific values: */
3000
	__ipa_cmd(iob)->hdr.seqno = card->seqno.ipa++;
3001 3002
}

3003
void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
3004 3005 3006
			  u16 cmd_length,
			  bool (*match)(struct qeth_cmd_buffer *iob,
					struct qeth_cmd_buffer *reply))
3007 3008
{
	u8 prot_type = qeth_mpc_select_prot_type(card);
3009
	u16 total_length = iob->length;
3010

3011 3012
	qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length,
		       iob->data);
3013
	iob->finalize = qeth_ipa_finalize_cmd;
3014
	iob->match = match;
3015

3016
	memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
3017
	memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2);
3018
	memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
3019 3020
	memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2);
	memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2);
3021 3022
	memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
	       &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
3023
	memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2);
3024 3025 3026
}
EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);

3027 3028 3029 3030 3031 3032 3033 3034
static bool qeth_ipa_match_reply(struct qeth_cmd_buffer *iob,
				 struct qeth_cmd_buffer *reply)
{
	struct qeth_ipa_cmd *ipa_reply = __ipa_reply(reply);

	return ipa_reply && (__ipa_cmd(iob)->hdr.seqno == ipa_reply->hdr.seqno);
}

3035 3036 3037 3038 3039 3040
struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
					   enum qeth_ipa_cmds cmd_code,
					   enum qeth_prot_versions prot,
					   unsigned int data_length)
{
	struct qeth_cmd_buffer *iob;
3041
	struct qeth_ipacmd_hdr *hdr;
3042 3043 3044 3045 3046 3047 3048

	data_length += offsetof(struct qeth_ipa_cmd, data);
	iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_length, 1,
			     QETH_IPA_TIMEOUT);
	if (!iob)
		return NULL;

3049
	qeth_prepare_ipa_cmd(card, iob, data_length, qeth_ipa_match_reply);
3050 3051 3052 3053 3054

	hdr = &__ipa_cmd(iob)->hdr;
	hdr->command = cmd_code;
	hdr->initiator = IPA_CMD_INITIATOR_HOST;
	/* hdr->seqno is set by qeth_send_control_data() */
3055
	hdr->adapter_type = QETH_LINK_TYPE_FAST_ETH;
3056 3057 3058 3059
	hdr->rel_adapter_no = (u8) card->dev->dev_port;
	hdr->prim_version_no = IS_LAYER2(card) ? 2 : 1;
	hdr->param_count = 1;
	hdr->prot_version = prot;
3060 3061 3062 3063
	return iob;
}
EXPORT_SYMBOL_GPL(qeth_ipa_alloc_cmd);

3064 3065 3066 3067 3068 3069 3070 3071
static int qeth_send_ipa_cmd_cb(struct qeth_card *card,
				struct qeth_reply *reply, unsigned long data)
{
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;

	return (cmd->hdr.return_code) ? -EIO : 0;
}

E
Eugene Crosser 已提交
3072 3073 3074 3075 3076 3077
/**
 * qeth_send_ipa_cmd() - send an IPA command
 *
 * See qeth_send_control_data() for explanation of the arguments.
 */

F
Frank Blaschka 已提交
3078 3079 3080 3081 3082 3083 3084
int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
		int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
			unsigned long),
		void *reply_param)
{
	int rc;

C
Carsten Otte 已提交
3085
	QETH_CARD_TEXT(card, 4, "sendipa");
3086

3087
	if (card->read_or_write_problem) {
3088
		qeth_put_cmd(iob);
3089 3090 3091
		return -EIO;
	}

3092 3093
	if (reply_cb == NULL)
		reply_cb = qeth_send_ipa_cmd_cb;
3094
	rc = qeth_send_control_data(card, iob, reply_cb, reply_param);
3095 3096 3097 3098
	if (rc == -ETIME) {
		qeth_clear_ipacmd_list(card);
		qeth_schedule_recovery(card);
	}
F
Frank Blaschka 已提交
3099 3100 3101 3102
	return rc;
}
EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);

3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113
static int qeth_send_startlan_cb(struct qeth_card *card,
				 struct qeth_reply *reply, unsigned long data)
{
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;

	if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE)
		return -ENETDOWN;

	return (cmd->hdr.return_code) ? -EIO : 0;
}

3114
static int qeth_send_startlan(struct qeth_card *card)
F
Frank Blaschka 已提交
3115
{
3116
	struct qeth_cmd_buffer *iob;
F
Frank Blaschka 已提交
3117

3118
	QETH_CARD_TEXT(card, 2, "strtlan");
F
Frank Blaschka 已提交
3119

3120
	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_STARTLAN, QETH_PROT_NONE, 0);
3121 3122
	if (!iob)
		return -ENOMEM;
3123
	return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL);
F
Frank Blaschka 已提交
3124 3125
}

3126
static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
F
Frank Blaschka 已提交
3127
{
3128
	if (!cmd->hdr.return_code)
F
Frank Blaschka 已提交
3129 3130
		cmd->hdr.return_code =
			cmd->data.setadapterparms.hdr.return_code;
3131
	return cmd->hdr.return_code;
F
Frank Blaschka 已提交
3132 3133 3134 3135 3136
}

static int qeth_query_setadapterparms_cb(struct qeth_card *card,
		struct qeth_reply *reply, unsigned long data)
{
3137
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3138
	struct qeth_query_cmds_supp *query_cmd;
F
Frank Blaschka 已提交
3139

C
Carsten Otte 已提交
3140
	QETH_CARD_TEXT(card, 3, "quyadpcb");
3141
	if (qeth_setadpparms_inspect_rc(cmd))
3142
		return -EIO;
F
Frank Blaschka 已提交
3143

3144 3145 3146 3147 3148 3149
	query_cmd = &cmd->data.setadapterparms.data.query_cmds_supp;
	if (query_cmd->lan_type & 0x7f) {
		if (!qeth_is_supported_link_type(card, query_cmd->lan_type))
			return -EPROTONOSUPPORT;

		card->info.link_type = query_cmd->lan_type;
3150
		QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type);
3151
	}
3152 3153

	card->options.adp.supported = query_cmd->supported_cmds;
3154
	return 0;
F
Frank Blaschka 已提交
3155 3156
}

S
Stefan Raspl 已提交
3157
static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
3158 3159
						    enum qeth_ipa_setadp_cmd adp_cmd,
						    unsigned int data_length)
F
Frank Blaschka 已提交
3160
{
3161
	struct qeth_ipacmd_setadpparms_hdr *hdr;
F
Frank Blaschka 已提交
3162 3163
	struct qeth_cmd_buffer *iob;

3164 3165 3166 3167 3168 3169
	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4,
				 data_length +
				 offsetof(struct qeth_ipacmd_setadpparms,
					  data));
	if (!iob)
		return NULL;
F
Frank Blaschka 已提交
3170

3171 3172 3173 3174 3175
	hdr = &__ipa_cmd(iob)->data.setadapterparms.hdr;
	hdr->cmdlength = sizeof(*hdr) + data_length;
	hdr->command_code = adp_cmd;
	hdr->used_total = 1;
	hdr->seq_no = 1;
F
Frank Blaschka 已提交
3176 3177 3178
	return iob;
}

3179
static int qeth_query_setadapterparms(struct qeth_card *card)
F
Frank Blaschka 已提交
3180 3181 3182 3183
{
	int rc;
	struct qeth_cmd_buffer *iob;

C
Carsten Otte 已提交
3184
	QETH_CARD_TEXT(card, 3, "queryadp");
F
Frank Blaschka 已提交
3185
	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
3186
				   SETADP_DATA_SIZEOF(query_cmds_supp));
3187 3188
	if (!iob)
		return -ENOMEM;
F
Frank Blaschka 已提交
3189 3190 3191 3192
	rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
	return rc;
}

3193 3194 3195 3196 3197
static int qeth_query_ipassists_cb(struct qeth_card *card,
		struct qeth_reply *reply, unsigned long data)
{
	struct qeth_ipa_cmd *cmd;

3198
	QETH_CARD_TEXT(card, 2, "qipasscb");
3199 3200

	cmd = (struct qeth_ipa_cmd *) data;
3201 3202

	switch (cmd->hdr.return_code) {
3203 3204
	case IPA_RC_SUCCESS:
		break;
3205 3206
	case IPA_RC_NOTSUPP:
	case IPA_RC_L2_UNSUPPORTED_CMD:
3207
		QETH_CARD_TEXT(card, 2, "ipaunsup");
3208 3209
		card->options.ipa4.supported |= IPA_SETADAPTERPARMS;
		card->options.ipa6.supported |= IPA_SETADAPTERPARMS;
3210
		return -EOPNOTSUPP;
3211
	default:
3212 3213 3214
		QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
				 CARD_DEVID(card), cmd->hdr.return_code);
		return -EIO;
3215 3216
	}

3217 3218 3219 3220 3221
	if (cmd->hdr.prot_version == QETH_PROT_IPV4)
		card->options.ipa4 = cmd->hdr.assists;
	else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
		card->options.ipa6 = cmd->hdr.assists;
	else
3222 3223
		QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
				 CARD_DEVID(card));
3224 3225 3226
	return 0;
}

3227 3228
static int qeth_query_ipassists(struct qeth_card *card,
				enum qeth_prot_versions prot)
3229 3230 3231 3232
{
	int rc;
	struct qeth_cmd_buffer *iob;

3233
	QETH_CARD_TEXT_(card, 2, "qipassi%i", prot);
3234
	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_QIPASSIST, prot, 0);
3235 3236
	if (!iob)
		return -ENOMEM;
3237 3238 3239 3240
	rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
	return rc;
}

3241 3242 3243
static int qeth_query_switch_attributes_cb(struct qeth_card *card,
				struct qeth_reply *reply, unsigned long data)
{
3244
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3245
	struct qeth_query_switch_attributes *attrs;
3246
	struct qeth_switch_info *sw_info;
3247 3248

	QETH_CARD_TEXT(card, 2, "qswiatcb");
3249
	if (qeth_setadpparms_inspect_rc(cmd))
3250
		return -EIO;
3251

3252 3253 3254 3255 3256 3257
	sw_info = (struct qeth_switch_info *)reply->param;
	attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
	sw_info->capabilities = attrs->capabilities;
	sw_info->settings = attrs->settings;
	QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
			sw_info->settings);
3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270
	return 0;
}

int qeth_query_switch_attributes(struct qeth_card *card,
				 struct qeth_switch_info *sw_info)
{
	struct qeth_cmd_buffer *iob;

	QETH_CARD_TEXT(card, 2, "qswiattr");
	if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES))
		return -EOPNOTSUPP;
	if (!netif_carrier_ok(card->dev))
		return -ENOMEDIUM;
3271
	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, 0);
3272 3273
	if (!iob)
		return -ENOMEM;
3274 3275 3276 3277
	return qeth_send_ipa_cmd(card, iob,
				qeth_query_switch_attributes_cb, sw_info);
}

3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296
struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card,
					  enum qeth_diags_cmds sub_cmd,
					  unsigned int data_length)
{
	struct qeth_ipacmd_diagass *cmd;
	struct qeth_cmd_buffer *iob;

	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SET_DIAG_ASS, QETH_PROT_NONE,
				 DIAG_HDR_LEN + data_length);
	if (!iob)
		return NULL;

	cmd = &__ipa_cmd(iob)->data.diagass;
	cmd->subcmd_len = DIAG_SUB_HDR_LEN + data_length;
	cmd->subcmd = sub_cmd;
	return iob;
}
EXPORT_SYMBOL_GPL(qeth_get_diag_cmd);

3297 3298 3299
static int qeth_query_setdiagass_cb(struct qeth_card *card,
		struct qeth_reply *reply, unsigned long data)
{
3300 3301
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
	u16 rc = cmd->hdr.return_code;
3302

3303
	if (rc) {
3304
		QETH_CARD_TEXT_(card, 2, "diagq:%x", rc);
3305 3306 3307 3308
		return -EIO;
	}

	card->info.diagass_support = cmd->data.diagass.ext;
3309 3310 3311 3312 3313 3314 3315
	return 0;
}

static int qeth_query_setdiagass(struct qeth_card *card)
{
	struct qeth_cmd_buffer *iob;

3316
	QETH_CARD_TEXT(card, 2, "qdiagass");
3317
	iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_QUERY, 0);
3318 3319
	if (!iob)
		return -ENOMEM;
3320 3321 3322 3323 3324 3325 3326 3327 3328
	return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL);
}

static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
{
	unsigned long info = get_zeroed_page(GFP_KERNEL);
	struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
	struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info;
	struct ccw_dev_id ccwid;
3329
	int level;
3330 3331 3332 3333 3334 3335 3336

	tid->chpid = card->info.chpid;
	ccw_device_get_id(CARD_RDEV(card), &ccwid);
	tid->ssid = ccwid.ssid;
	tid->devno = ccwid.devno;
	if (!info)
		return;
3337 3338
	level = stsi(NULL, 0, 0, 0);
	if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0))
3339
		tid->lparnr = info222->lpar_number;
3340
	if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) {
3341 3342 3343 3344 3345 3346 3347 3348 3349
		EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name));
		memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
	}
	free_page(info);
}

static int qeth_hw_trap_cb(struct qeth_card *card,
		struct qeth_reply *reply, unsigned long data)
{
3350 3351
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
	u16 rc = cmd->hdr.return_code;
3352

3353
	if (rc) {
3354
		QETH_CARD_TEXT_(card, 2, "trapc:%x", rc);
3355 3356
		return -EIO;
	}
3357 3358 3359 3360 3361 3362 3363 3364
	return 0;
}

int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
{
	struct qeth_cmd_buffer *iob;
	struct qeth_ipa_cmd *cmd;

3365
	QETH_CARD_TEXT(card, 2, "diagtrap");
3366
	iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRAP, 64);
3367 3368
	if (!iob)
		return -ENOMEM;
3369
	cmd = __ipa_cmd(iob);
3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388
	cmd->data.diagass.type = 1;
	cmd->data.diagass.action = action;
	switch (action) {
	case QETH_DIAGS_TRAP_ARM:
		cmd->data.diagass.options = 0x0003;
		cmd->data.diagass.ext = 0x00010000 +
			sizeof(struct qeth_trap_id);
		qeth_get_trap_id(card,
			(struct qeth_trap_id *)cmd->data.diagass.cdata);
		break;
	case QETH_DIAGS_TRAP_DISARM:
		cmd->data.diagass.options = 0x0001;
		break;
	case QETH_DIAGS_TRAP_CAPTURE:
		break;
	}
	return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL);
}

3389 3390 3391 3392
static int qeth_check_qdio_errors(struct qeth_card *card,
				  struct qdio_buffer *buf,
				  unsigned int qdio_error,
				  const char *dbftext)
F
Frank Blaschka 已提交
3393
{
J
Jan Glauber 已提交
3394
	if (qdio_error) {
C
Carsten Otte 已提交
3395
		QETH_CARD_TEXT(card, 2, dbftext);
C
Carsten Otte 已提交
3396
		QETH_CARD_TEXT_(card, 2, " F15=%02X",
3397
			       buf->element[15].sflags);
C
Carsten Otte 已提交
3398
		QETH_CARD_TEXT_(card, 2, " F14=%02X",
3399
			       buf->element[14].sflags);
C
Carsten Otte 已提交
3400
		QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
3401
		if ((buf->element[15].sflags) == 0x12) {
3402
			QETH_CARD_STAT_INC(card, rx_fifo_errors);
3403 3404 3405
			return 0;
		} else
			return 1;
F
Frank Blaschka 已提交
3406 3407 3408 3409
	}
	return 0;
}

3410 3411
static unsigned int qeth_rx_refill_queue(struct qeth_card *card,
					 unsigned int count)
F
Frank Blaschka 已提交
3412 3413
{
	struct qeth_qdio_q *queue = card->qdio.in_q;
3414
	struct list_head *lh;
F
Frank Blaschka 已提交
3415 3416 3417 3418 3419 3420 3421 3422 3423
	int i;
	int rc;
	int newcount = 0;

	/* only requeue at a certain threshold to avoid SIGAs */
	if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) {
		for (i = queue->next_buf_to_init;
		     i < queue->next_buf_to_init + count; ++i) {
			if (qeth_init_input_buffer(card,
J
Julian Wiedmann 已提交
3424
				&queue->bufs[QDIO_BUFNR(i)])) {
F
Frank Blaschka 已提交
3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439
				break;
			} else {
				newcount++;
			}
		}

		if (newcount < count) {
			/* we are in memory shortage so we switch back to
			   traditional skb allocation and drop packages */
			atomic_set(&card->force_alloc_skb, 3);
			count = newcount;
		} else {
			atomic_add_unless(&card->force_alloc_skb, -1, 0);
		}

3440 3441 3442 3443 3444 3445 3446 3447 3448 3449
		if (!count) {
			i = 0;
			list_for_each(lh, &card->qdio.in_buf_pool.entry_list)
				i++;
			if (i == card->qdio.in_buf_pool.buf_count) {
				QETH_CARD_TEXT(card, 2, "qsarbw");
				schedule_delayed_work(
					&card->buffer_reclaim_work,
					QETH_RECLAIM_WORK_TIME);
			}
3450
			return 0;
3451 3452
		}

J
Jan Glauber 已提交
3453
		rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
3454
			     queue->next_buf_to_init, count, NULL);
F
Frank Blaschka 已提交
3455
		if (rc) {
C
Carsten Otte 已提交
3456
			QETH_CARD_TEXT(card, 2, "qinberr");
F
Frank Blaschka 已提交
3457
		}
J
Julian Wiedmann 已提交
3458 3459
		queue->next_buf_to_init = QDIO_BUFNR(queue->next_buf_to_init +
						     count);
3460
		return count;
F
Frank Blaschka 已提交
3461
	}
3462 3463

	return 0;
F
Frank Blaschka 已提交
3464
}
3465 3466 3467

static void qeth_buffer_reclaim_work(struct work_struct *work)
{
3468 3469 3470
	struct qeth_card *card = container_of(to_delayed_work(work),
					      struct qeth_card,
					      buffer_reclaim_work);
3471

3472 3473 3474 3475
	local_bh_disable();
	napi_schedule(&card->napi);
	/* kick-start the NAPI softirq: */
	local_bh_enable();
3476
}
F
Frank Blaschka 已提交
3477

3478
static void qeth_handle_send_error(struct qeth_card *card,
J
Jan Glauber 已提交
3479
		struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
F
Frank Blaschka 已提交
3480
{
3481
	int sbalf15 = buffer->buffer->element[15].sflags;
F
Frank Blaschka 已提交
3482

C
Carsten Otte 已提交
3483
	QETH_CARD_TEXT(card, 6, "hdsnderr");
3484
	qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
3485 3486

	if (!qdio_err)
3487
		return;
3488 3489

	if ((sbalf15 >= 15) && (sbalf15 <= 31))
3490
		return;
3491

C
Carsten Otte 已提交
3492 3493
	QETH_CARD_TEXT(card, 1, "lnkfail");
	QETH_CARD_TEXT_(card, 1, "%04x %02x",
3494
		       (u16)qdio_err, (u8)sbalf15);
F
Frank Blaschka 已提交
3495 3496
}

3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512
/**
 * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer.
 * @queue: queue to check for packing buffer
 *
 * Returns number of buffers that were prepared for flush.
 */
static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue)
{
	struct qeth_qdio_out_buffer *buffer;

	buffer = queue->bufs[queue->next_buf_to_fill];
	if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
	    (buffer->next_element_to_fill > 0)) {
		/* it's a packing buffer */
		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
		queue->next_buf_to_fill =
J
Julian Wiedmann 已提交
3513
			QDIO_BUFNR(queue->next_buf_to_fill + 1);
3514 3515 3516 3517 3518
		return 1;
	}
	return 0;
}

F
Frank Blaschka 已提交
3519 3520 3521 3522 3523 3524 3525 3526 3527 3528
/*
 * Switched to packing state if the number of used buffers on a queue
 * reaches a certain limit.
 */
static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
{
	if (!queue->do_pack) {
		if (atomic_read(&queue->used_buffers)
		    >= QETH_HIGH_WATERMARK_PACK){
			/* switch non-PACKING -> PACKING */
C
Carsten Otte 已提交
3529
			QETH_CARD_TEXT(queue->card, 6, "np->pack");
3530
			QETH_TXQ_STAT_INC(queue, packing_mode_switch);
F
Frank Blaschka 已提交
3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547
			queue->do_pack = 1;
		}
	}
}

/*
 * Switches from packing to non-packing mode. If there is a packing
 * buffer on the queue this buffer will be prepared to be flushed.
 * In that case 1 is returned to inform the caller. If no buffer
 * has to be flushed, zero is returned.
 */
static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
{
	if (queue->do_pack) {
		if (atomic_read(&queue->used_buffers)
		    <= QETH_LOW_WATERMARK_PACK) {
			/* switch PACKING -> non-PACKING */
C
Carsten Otte 已提交
3548
			QETH_CARD_TEXT(queue->card, 6, "pack->np");
3549
			QETH_TXQ_STAT_INC(queue, packing_mode_switch);
F
Frank Blaschka 已提交
3550
			queue->do_pack = 0;
3551
			return qeth_prep_flush_pack_buffer(queue);
F
Frank Blaschka 已提交
3552 3553 3554 3555 3556
		}
	}
	return 0;
}

J
Jan Glauber 已提交
3557 3558
static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
			       int count)
F
Frank Blaschka 已提交
3559
{
3560
	struct qeth_qdio_out_buffer *buf = queue->bufs[index];
3561
	struct qeth_card *card = queue->card;
3562
	unsigned int frames, usecs;
3563
	struct qaob *aob = NULL;
F
Frank Blaschka 已提交
3564 3565 3566 3567
	int rc;
	int i;

	for (i = index; i < index + count; ++i) {
J
Julian Wiedmann 已提交
3568
		unsigned int bidx = QDIO_BUFNR(i);
3569
		struct sk_buff *skb;
J
Julian Wiedmann 已提交
3570

3571
		buf = queue->bufs[bidx];
3572 3573
		buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
				SBAL_EFLAGS_LAST_ENTRY;
3574
		queue->coalesced_frames += buf->frames;
F
Frank Blaschka 已提交
3575

3576 3577 3578 3579
		if (IS_IQD(card)) {
			skb_queue_walk(&buf->skb_list, skb)
				skb_tx_timestamp(skb);
		}
3580
	}
F
Frank Blaschka 已提交
3581

3582 3583 3584 3585 3586 3587 3588
	if (IS_IQD(card)) {
		if (card->options.cq == QETH_CQ_ENABLED &&
		    !qeth_iqd_is_mcast_queue(card, queue) &&
		    count == 1) {
			if (!buf->aob)
				buf->aob = qdio_allocate_aob();
			if (buf->aob) {
3589 3590
				struct qeth_qaob_priv1 *priv;

3591
				aob = buf->aob;
3592 3593 3594
				priv = (struct qeth_qaob_priv1 *)&aob->user1;
				priv->state = QETH_QAOB_ISSUED;
				priv->queue_no = queue->queue_no;
3595 3596 3597
			}
		}
	} else {
F
Frank Blaschka 已提交
3598 3599 3600 3601 3602 3603 3604 3605
		if (!queue->do_pack) {
			if ((atomic_read(&queue->used_buffers) >=
				(QETH_HIGH_WATERMARK_PACK -
				 QETH_WATERMARK_PACK_FUZZ)) &&
			    !atomic_read(&queue->set_pci_flags_count)) {
				/* it's likely that we'll go to packing
				 * mode soon */
				atomic_inc(&queue->set_pci_flags_count);
3606
				buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
F
Frank Blaschka 已提交
3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618
			}
		} else {
			if (!atomic_read(&queue->set_pci_flags_count)) {
				/*
				 * there's no outstanding PCI any more, so we
				 * have to request a PCI to be sure the the PCI
				 * will wake at some time in the future then we
				 * can flush packed buffers that might still be
				 * hanging around, which can happen if no
				 * further send was requested by the stack
				 */
				atomic_inc(&queue->set_pci_flags_count);
3619
				buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
F
Frank Blaschka 已提交
3620 3621 3622 3623
			}
		}
	}

3624
	QETH_TXQ_STAT_INC(queue, doorbell);
3625 3626
	rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_OUTPUT, queue->queue_no,
		     index, count, aob);
3627

3628 3629 3630 3631 3632 3633
	switch (rc) {
	case 0:
	case -ENOBUFS:
		/* ignore temporary SIGA errors without busy condition */

		/* Fake the TX completion interrupt: */
3634 3635
		frames = READ_ONCE(queue->max_coalesced_frames);
		usecs = READ_ONCE(queue->coalesce_usecs);
3636

3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647
		if (frames && queue->coalesced_frames >= frames) {
			napi_schedule(&queue->napi);
			queue->coalesced_frames = 0;
			QETH_TXQ_STAT_INC(queue, coal_frames);
		} else if (qeth_use_tx_irqs(card) &&
			   atomic_read(&queue->used_buffers) >= 32) {
			/* Old behaviour carried over from the qdio layer: */
			napi_schedule(&queue->napi);
			QETH_TXQ_STAT_INC(queue, coal_frames);
		} else if (usecs) {
			qeth_tx_arm_timer(queue, usecs);
3648
		}
3649

3650 3651
		break;
	default:
C
Carsten Otte 已提交
3652
		QETH_CARD_TEXT(queue->card, 2, "flushbuf");
3653 3654 3655
		QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no);
		QETH_CARD_TEXT_(queue->card, 2, " idx%d", index);
		QETH_CARD_TEXT_(queue->card, 2, " c%d", count);
C
Carsten Otte 已提交
3656
		QETH_CARD_TEXT_(queue->card, 2, " err%d", rc);
3657

F
Frank Blaschka 已提交
3658 3659 3660 3661 3662 3663
		/* this must not happen under normal circumstances. if it
		 * happens something is really wrong -> recover */
		qeth_schedule_recovery(queue->card);
	}
}

3664 3665
static void qeth_flush_queue(struct qeth_qdio_out_q *queue)
{
J
Julian Wiedmann 已提交
3666
	qeth_flush_buffers(queue, queue->bulk_start, queue->bulk_count);
3667

J
Julian Wiedmann 已提交
3668
	queue->bulk_start = QDIO_BUFNR(queue->bulk_start + queue->bulk_count);
3669
	queue->prev_hdr = NULL;
J
Julian Wiedmann 已提交
3670
	queue->bulk_count = 0;
3671 3672
}

F
Frank Blaschka 已提交
3673 3674 3675 3676 3677 3678 3679 3680
static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
{
	/*
	 * check if weed have to switch to non-packing mode or if
	 * we have to get a pci flag out on the queue
	 */
	if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
	    !atomic_read(&queue->set_pci_flags_count)) {
3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694
		unsigned int index, flush_cnt;
		bool q_was_packing;

		spin_lock(&queue->lock);

		index = queue->next_buf_to_fill;
		q_was_packing = queue->do_pack;

		flush_cnt = qeth_switch_to_nonpacking_if_needed(queue);
		if (!flush_cnt && !atomic_read(&queue->set_pci_flags_count))
			flush_cnt = qeth_prep_flush_pack_buffer(queue);

		if (flush_cnt) {
			qeth_flush_buffers(queue, index, flush_cnt);
3695 3696
			if (q_was_packing)
				QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt);
F
Frank Blaschka 已提交
3697
		}
3698 3699

		spin_unlock(&queue->lock);
F
Frank Blaschka 已提交
3700 3701 3702
	}
}

3703
static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr)
3704 3705 3706
{
	struct qeth_card *card = (struct qeth_card *)card_ptr;

3707
	napi_schedule_irqoff(&card->napi);
3708 3709
}

3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722
int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
{
	int rc;

	if (card->options.cq ==  QETH_CQ_NOTAVAILABLE) {
		rc = -1;
		goto out;
	} else {
		if (card->options.cq == cq) {
			rc = 0;
			goto out;
		}

3723
		qeth_free_qdio_queues(card);
3724 3725 3726 3727 3728 3729 3730 3731 3732
		card->options.cq = cq;
		rc = 0;
	}
out:
	return rc;

}
EXPORT_SYMBOL_GPL(qeth_configure_cq);

3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744
static void qeth_qdio_handle_aob(struct qeth_card *card, struct qaob *aob)
{
	struct qeth_qaob_priv1 *priv = (struct qeth_qaob_priv1 *)&aob->user1;
	unsigned int queue_no = priv->queue_no;

	BUILD_BUG_ON(sizeof(*priv) > ARRAY_SIZE(aob->user1));

	if (xchg(&priv->state, QETH_QAOB_DONE) == QETH_QAOB_PENDING &&
	    queue_no < card->qdio.no_out_queues)
		napi_schedule(&card->qdio.out_qs[queue_no]->napi);
}

3745 3746 3747 3748
static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
				 unsigned int queue, int first_element,
				 int count)
{
3749 3750 3751 3752 3753 3754 3755 3756 3757
	struct qeth_qdio_q *cq = card->qdio.c_q;
	int i;
	int rc;

	QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element);
	QETH_CARD_TEXT_(card, 5, "qcqhc%d", count);
	QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);

	if (qdio_err) {
3758
		netif_tx_stop_all_queues(card->dev);
3759
		qeth_schedule_recovery(card);
3760
		return;
3761 3762 3763
	}

	for (i = first_element; i < first_element + count; ++i) {
J
Julian Wiedmann 已提交
3764
		struct qdio_buffer *buffer = cq->qdio_bufs[QDIO_BUFNR(i)];
3765
		int e = 0;
3766

3767 3768
		while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
		       buffer->element[e].addr) {
3769
			unsigned long phys_aob_addr = buffer->element[e].addr;
3770

3771
			qeth_qdio_handle_aob(card, phys_to_virt(phys_aob_addr));
3772 3773
			++e;
		}
3774
		qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
3775 3776
	}
	rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue,
3777
		     cq->next_buf_to_init, count, NULL);
3778 3779 3780 3781 3782
	if (rc) {
		dev_warn(&card->gdev->dev,
			"QDIO reported an error, rc=%i\n", rc);
		QETH_CARD_TEXT(card, 2, "qcqherr");
	}
J
Julian Wiedmann 已提交
3783 3784

	cq->next_buf_to_init = QDIO_BUFNR(cq->next_buf_to_init + count);
3785 3786
}

3787 3788 3789 3790
static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
				    unsigned int qdio_err, int queue,
				    int first_elem, int count,
				    unsigned long card_ptr)
3791 3792 3793
{
	struct qeth_card *card = (struct qeth_card *)card_ptr;

3794 3795 3796
	QETH_CARD_TEXT_(card, 2, "qihq%d", queue);
	QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err);

3797
	if (qdio_err)
3798 3799 3800
		qeth_schedule_recovery(card);
}

3801 3802 3803 3804
static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
				     unsigned int qdio_error, int __queue,
				     int first_element, int count,
				     unsigned long card_ptr)
F
Frank Blaschka 已提交
3805 3806 3807
{
	struct qeth_card *card        = (struct qeth_card *) card_ptr;

3808 3809 3810
	QETH_CARD_TEXT(card, 2, "achkcond");
	netif_tx_stop_all_queues(card->dev);
	qeth_schedule_recovery(card);
F
Frank Blaschka 已提交
3811 3812
}

3813 3814 3815
/**
 * Note: Function assumes that we have 4 outbound queues.
 */
3816
int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
F
Frank Blaschka 已提交
3817
{
J
Julian Wiedmann 已提交
3818
	struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
3819 3820 3821 3822 3823
	u8 tos;

	switch (card->qdio.do_prio_queueing) {
	case QETH_PRIO_Q_ING_TOS:
	case QETH_PRIO_Q_ING_PREC:
3824 3825
		switch (vlan_get_protocol(skb)) {
		case htons(ETH_P_IP):
3826 3827
			tos = ipv4_get_dsfield(ip_hdr(skb));
			break;
3828
		case htons(ETH_P_IPV6):
3829 3830 3831 3832
			tos = ipv6_get_dsfield(ipv6_hdr(skb));
			break;
		default:
			return card->qdio.default_out_queue;
F
Frank Blaschka 已提交
3833
		}
3834
		if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
J
Julian Wiedmann 已提交
3835
			return ~tos >> 6 & 3;
3836
		if (tos & IPTOS_MINCOST)
J
Julian Wiedmann 已提交
3837
			return 3;
3838 3839 3840 3841 3842 3843
		if (tos & IPTOS_RELIABILITY)
			return 2;
		if (tos & IPTOS_THROUGHPUT)
			return 1;
		if (tos & IPTOS_LOWDELAY)
			return 0;
3844 3845 3846 3847
		break;
	case QETH_PRIO_Q_ING_SKB:
		if (skb->priority > 5)
			return 0;
J
Julian Wiedmann 已提交
3848
		return ~skb->priority >> 1 & 3;
3849
	case QETH_PRIO_Q_ING_VLAN:
J
Julian Wiedmann 已提交
3850 3851 3852
		if (veth->h_vlan_proto == htons(ETH_P_8021Q))
			return ~ntohs(veth->h_vlan_TCI) >>
			       (VLAN_PRIO_SHIFT + 1) & 3;
3853
		break;
3854 3855
	case QETH_PRIO_Q_ING_FIXED:
		return card->qdio.default_out_queue;
F
Frank Blaschka 已提交
3856
	default:
3857
		break;
F
Frank Blaschka 已提交
3858
	}
3859
	return card->qdio.default_out_queue;
F
Frank Blaschka 已提交
3860 3861 3862
}
EXPORT_SYMBOL_GPL(qeth_get_priority_queue);

3863 3864 3865 3866 3867 3868 3869
/**
 * qeth_get_elements_for_frags() -	find number of SBALEs for skb frags.
 * @skb:				SKB address
 *
 * Returns the number of pages, and thus QDIO buffer elements, needed to cover
 * fragmented part of the SKB. Returns zero for linear SKB.
 */
3870
static int qeth_get_elements_for_frags(struct sk_buff *skb)
3871
{
3872
	int cnt, elements = 0;
3873 3874

	for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
3875
		skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
3876 3877 3878 3879

		elements += qeth_get_elements_for_range(
			(addr_t)skb_frag_address(frag),
			(addr_t)skb_frag_address(frag) + skb_frag_size(frag));
3880 3881 3882 3883
	}
	return elements;
}

3884 3885 3886 3887 3888 3889 3890 3891 3892 3893
/**
 * qeth_count_elements() -	Counts the number of QDIO buffer elements needed
 *				to transmit an skb.
 * @skb:			the skb to operate on.
 * @data_offset:		skip this part of the skb's linear data
 *
 * Returns the number of pages, and thus QDIO buffer elements, needed to map the
 * skb's data (both its linear part and paged fragments).
 */
unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset)
3894 3895 3896 3897 3898 3899 3900 3901 3902
{
	unsigned int elements = qeth_get_elements_for_frags(skb);
	addr_t end = (addr_t)skb->data + skb_headlen(skb);
	addr_t start = (addr_t)skb->data + data_offset;

	if (start != end)
		elements += qeth_get_elements_for_range(start, end);
	return elements;
}
3903
EXPORT_SYMBOL_GPL(qeth_count_elements);
F
Frank Blaschka 已提交
3904

3905 3906
#define QETH_HDR_CACHE_OBJ_SIZE		(sizeof(struct qeth_hdr_tso) + \
					 MAX_TCP_HEADER)
3907

3908
/**
3909 3910
 * qeth_add_hw_header() - add a HW header to an skb.
 * @skb: skb that the HW header should be added to.
3911 3912
 * @hdr: double pointer to a qeth_hdr. When returning with >= 0,
 *	 it contains a valid pointer to a qeth_hdr.
3913 3914 3915
 * @hdr_len: length of the HW header.
 * @proto_len: length of protocol headers that need to be in same page as the
 *	       HW header.
3916 3917 3918 3919
 *
 * Returns the pushed length. If the header can't be pushed on
 * (eg. because it would cross a page boundary), it is allocated from
 * the cache instead and 0 is returned.
3920
 * The number of needed buffer elements is returned in @elements.
3921 3922
 * Error to create the hdr is indicated by returning with < 0.
 */
3923 3924 3925 3926
static int qeth_add_hw_header(struct qeth_qdio_out_q *queue,
			      struct sk_buff *skb, struct qeth_hdr **hdr,
			      unsigned int hdr_len, unsigned int proto_len,
			      unsigned int *elements)
3927
{
3928
	gfp_t gfp = GFP_ATOMIC | (skb_pfmemalloc(skb) ? __GFP_MEMALLOC : 0);
3929
	const unsigned int contiguous = proto_len ? proto_len : 1;
3930
	const unsigned int max_elements = queue->max_elements;
3931 3932 3933 3934 3935 3936
	unsigned int __elements;
	addr_t start, end;
	bool push_ok;
	int rc;

check_layout:
3937
	start = (addr_t)skb->data - hdr_len;
3938 3939
	end = (addr_t)skb->data;

3940
	if (qeth_get_elements_for_range(start, end + contiguous) == 1) {
3941 3942
		/* Push HW header into same page as first protocol header. */
		push_ok = true;
3943 3944 3945 3946 3947
		/* ... but TSO always needs a separate element for headers: */
		if (skb_is_gso(skb))
			__elements = 1 + qeth_count_elements(skb, proto_len);
		else
			__elements = qeth_count_elements(skb, 0);
J
Julian Wiedmann 已提交
3948 3949
	} else if (!proto_len && PAGE_ALIGNED(skb->data)) {
		/* Push HW header into preceding page, flush with skb->data. */
3950
		push_ok = true;
3951
		__elements = 1 + qeth_count_elements(skb, 0);
3952 3953 3954 3955
	} else {
		/* Use header cache, copy protocol headers up. */
		push_ok = false;
		__elements = 1 + qeth_count_elements(skb, proto_len);
3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967
	}

	/* Compress skb to fit into one IO buffer: */
	if (__elements > max_elements) {
		if (!skb_is_nonlinear(skb)) {
			/* Drop it, no easy way of shrinking it further. */
			QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n",
					 max_elements, __elements, skb->len);
			return -E2BIG;
		}

		rc = skb_linearize(skb);
3968 3969
		if (rc) {
			QETH_TXQ_STAT_INC(queue, skbs_linearized_fail);
3970
			return rc;
3971
		}
3972

3973
		QETH_TXQ_STAT_INC(queue, skbs_linearized);
3974 3975 3976 3977 3978 3979 3980
		/* Linearization changed the layout, re-evaluate: */
		goto check_layout;
	}

	*elements = __elements;
	/* Add the header: */
	if (push_ok) {
3981 3982
		*hdr = skb_push(skb, hdr_len);
		return hdr_len;
3983
	}
3984 3985

	/* Fall back to cache element with known-good alignment: */
3986 3987
	if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE)
		return -E2BIG;
3988
	*hdr = kmem_cache_alloc(qeth_core_header_cache, gfp);
3989 3990
	if (!*hdr)
		return -ENOMEM;
3991 3992
	/* Copy protocol headers behind HW header: */
	skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len);
3993 3994 3995
	return 0;
}

3996 3997 3998 3999
static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue,
			      struct sk_buff *curr_skb,
			      struct qeth_hdr *curr_hdr)
{
J
Julian Wiedmann 已提交
4000
	struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018
	struct qeth_hdr *prev_hdr = queue->prev_hdr;

	if (!prev_hdr)
		return true;

	/* All packets must have the same target: */
	if (curr_hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
		struct sk_buff *prev_skb = skb_peek(&buffer->skb_list);

		return ether_addr_equal(eth_hdr(prev_skb)->h_dest,
					eth_hdr(curr_skb)->h_dest) &&
		       qeth_l2_same_vlan(&prev_hdr->hdr.l2, &curr_hdr->hdr.l2);
	}

	return qeth_l3_same_next_hop(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3) &&
	       qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3);
}

4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030
/**
 * qeth_fill_buffer() - map skb into an output buffer
 * @buf:	buffer to transport the skb
 * @skb:	skb to map into the buffer
 * @hdr:	qeth_hdr for this skb. Either at skb->data, or allocated
 *		from qeth_core_header_cache.
 * @offset:	when mapping the skb, start at skb->data + offset
 * @hd_len:	if > 0, build a dedicated header element of this size
 */
static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
				     struct sk_buff *skb, struct qeth_hdr *hdr,
				     unsigned int offset, unsigned int hd_len)
F
Frank Blaschka 已提交
4031
{
4032 4033
	struct qdio_buffer *buffer = buf->buffer;
	int element = buf->next_element_to_fill;
4034 4035
	int length = skb_headlen(skb) - offset;
	char *data = skb->data + offset;
J
Julian Wiedmann 已提交
4036
	unsigned int elem_length, cnt;
4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047
	bool is_first_elem = true;

	__skb_queue_tail(&buf->skb_list, skb);

	/* build dedicated element for HW Header */
	if (hd_len) {
		is_first_elem = false;

		buffer->element[element].addr = virt_to_phys(hdr);
		buffer->element[element].length = hd_len;
		buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
4048 4049 4050

		/* HW header is allocated from cache: */
		if ((void *)hdr != skb->data)
4051
			__set_bit(element, buf->from_kmem_cache);
4052 4053 4054 4055 4056 4057
		/* HW header was pushed and is contiguous with linear part: */
		else if (length > 0 && !PAGE_ALIGNED(data) &&
			 (data == (char *)hdr + hd_len))
			buffer->element[element].eflags |=
				SBAL_EFLAGS_CONTIGUOUS;

4058 4059
		element++;
	}
F
Frank Blaschka 已提交
4060

4061
	/* map linear part into buffer element(s) */
F
Frank Blaschka 已提交
4062
	while (length > 0) {
J
Julian Wiedmann 已提交
4063 4064
		elem_length = min_t(unsigned int, length,
				    PAGE_SIZE - offset_in_page(data));
F
Frank Blaschka 已提交
4065

4066
		buffer->element[element].addr = virt_to_phys(data);
J
Julian Wiedmann 已提交
4067 4068
		buffer->element[element].length = elem_length;
		length -= elem_length;
4069 4070
		if (is_first_elem) {
			is_first_elem = false;
4071 4072
			if (length || skb_is_nonlinear(skb))
				/* skb needs additional elements */
4073
				buffer->element[element].eflags =
4074
					SBAL_EFLAGS_FIRST_FRAG;
F
Frank Blaschka 已提交
4075
			else
4076 4077 4078 4079
				buffer->element[element].eflags = 0;
		} else {
			buffer->element[element].eflags =
				SBAL_EFLAGS_MIDDLE_FRAG;
F
Frank Blaschka 已提交
4080
		}
J
Julian Wiedmann 已提交
4081 4082

		data += elem_length;
F
Frank Blaschka 已提交
4083 4084
		element++;
	}
4085

4086
	/* map page frags into buffer element(s) */
4087
	for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
4088 4089 4090 4091
		skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];

		data = skb_frag_address(frag);
		length = skb_frag_size(frag);
4092
		while (length > 0) {
J
Julian Wiedmann 已提交
4093 4094
			elem_length = min_t(unsigned int, length,
					    PAGE_SIZE - offset_in_page(data));
4095

4096
			buffer->element[element].addr = virt_to_phys(data);
J
Julian Wiedmann 已提交
4097
			buffer->element[element].length = elem_length;
4098 4099
			buffer->element[element].eflags =
				SBAL_EFLAGS_MIDDLE_FRAG;
J
Julian Wiedmann 已提交
4100 4101 4102

			length -= elem_length;
			data += elem_length;
4103 4104
			element++;
		}
4105 4106
	}

4107 4108
	if (buffer->element[element - 1].eflags)
		buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
4109
	buf->next_element_to_fill = element;
4110
	return element;
F
Frank Blaschka 已提交
4111 4112
}

4113 4114 4115 4116
static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
		       struct sk_buff *skb, unsigned int elements,
		       struct qeth_hdr *hdr, unsigned int offset,
		       unsigned int hd_len)
F
Frank Blaschka 已提交
4117
{
4118
	unsigned int bytes = qdisc_pkt_len(skb);
J
Julian Wiedmann 已提交
4119
	struct qeth_qdio_out_buffer *buffer;
4120
	unsigned int next_element;
4121 4122
	struct netdev_queue *txq;
	bool stopped = false;
4123 4124
	bool flush;

J
Julian Wiedmann 已提交
4125
	buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + queue->bulk_count)];
4126
	txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
F
Frank Blaschka 已提交
4127

4128 4129
	/* Just a sanity check, the wake/stop logic should ensure that we always
	 * get a free buffer.
F
Frank Blaschka 已提交
4130 4131
	 */
	if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
4132
		return -EBUSY;
4133

J
Julian Wiedmann 已提交
4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150
	flush = !qeth_iqd_may_bulk(queue, skb, hdr);

	if (flush ||
	    (buffer->next_element_to_fill + elements > queue->max_elements)) {
		if (buffer->next_element_to_fill > 0) {
			atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
			queue->bulk_count++;
		}

		if (queue->bulk_count >= queue->bulk_max)
			flush = true;

		if (flush)
			qeth_flush_queue(queue);

		buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start +
						queue->bulk_count)];
4151

4152 4153 4154 4155 4156 4157 4158
		/* Sanity-check again: */
		if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
			return -EBUSY;
	}

	if (buffer->next_element_to_fill == 0 &&
	    atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
4159 4160 4161 4162 4163 4164 4165 4166
		/* If a TX completion happens right _here_ and misses to wake
		 * the txq, then our re-check below will catch the race.
		 */
		QETH_TXQ_STAT_INC(queue, stopped);
		netif_tx_stop_queue(txq);
		stopped = true;
	}

4167
	next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
4168
	buffer->bytes += bytes;
4169
	buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
4170
	queue->prev_hdr = hdr;
4171

4172 4173 4174 4175 4176
	flush = __netdev_tx_sent_queue(txq, bytes,
				       !stopped && netdev_xmit_more());

	if (flush || next_element >= queue->max_elements) {
		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
J
Julian Wiedmann 已提交
4177 4178 4179 4180 4181 4182 4183
		queue->bulk_count++;

		if (queue->bulk_count >= queue->bulk_max)
			flush = true;

		if (flush)
			qeth_flush_queue(queue);
4184
	}
4185 4186 4187

	if (stopped && !qeth_out_queue_is_full(queue))
		netif_tx_start_queue(txq);
F
Frank Blaschka 已提交
4188 4189 4190 4191
	return 0;
}

int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4192
			struct sk_buff *skb, struct qeth_hdr *hdr,
4193 4194
			unsigned int offset, unsigned int hd_len,
			int elements_needed)
F
Frank Blaschka 已提交
4195
{
4196
	unsigned int start_index = queue->next_buf_to_fill;
F
Frank Blaschka 已提交
4197
	struct qeth_qdio_out_buffer *buffer;
4198
	unsigned int next_element;
4199 4200
	struct netdev_queue *txq;
	bool stopped = false;
F
Frank Blaschka 已提交
4201 4202 4203 4204
	int flush_count = 0;
	int do_pack = 0;
	int rc = 0;

4205
	buffer = queue->bufs[queue->next_buf_to_fill];
4206 4207 4208

	/* Just a sanity check, the wake/stop logic should ensure that we always
	 * get a free buffer.
F
Frank Blaschka 已提交
4209
	 */
4210
	if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
F
Frank Blaschka 已提交
4211
		return -EBUSY;
4212 4213 4214

	txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));

F
Frank Blaschka 已提交
4215 4216 4217 4218
	/* check if we need to switch packing state of this queue */
	qeth_switch_to_packing_if_needed(queue);
	if (queue->do_pack) {
		do_pack = 1;
F
Frank Blaschka 已提交
4219
		/* does packet fit in current buffer? */
4220 4221
		if (buffer->next_element_to_fill + elements_needed >
		    queue->max_elements) {
F
Frank Blaschka 已提交
4222 4223 4224 4225
			/* ... no -> set state PRIMED */
			atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
			flush_count++;
			queue->next_buf_to_fill =
J
Julian Wiedmann 已提交
4226
				QDIO_BUFNR(queue->next_buf_to_fill + 1);
4227
			buffer = queue->bufs[queue->next_buf_to_fill];
4228 4229

			/* We stepped forward, so sanity-check again: */
F
Frank Blaschka 已提交
4230 4231 4232
			if (atomic_read(&buffer->state) !=
			    QETH_QDIO_BUF_EMPTY) {
				qeth_flush_buffers(queue, start_index,
J
Jan Glauber 已提交
4233
							   flush_count);
4234 4235
				rc = -EBUSY;
				goto out;
F
Frank Blaschka 已提交
4236 4237 4238
			}
		}
	}
4239

4240 4241 4242 4243 4244 4245 4246 4247 4248 4249
	if (buffer->next_element_to_fill == 0 &&
	    atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
		/* If a TX completion happens right _here_ and misses to wake
		 * the txq, then our re-check below will catch the race.
		 */
		QETH_TXQ_STAT_INC(queue, stopped);
		netif_tx_stop_queue(txq);
		stopped = true;
	}

4250
	next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
4251 4252
	buffer->bytes += qdisc_pkt_len(skb);
	buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
4253 4254 4255 4256 4257 4258

	if (queue->do_pack)
		QETH_TXQ_STAT_INC(queue, skbs_pack);
	if (!queue->do_pack || stopped || next_element >= queue->max_elements) {
		flush_count++;
		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
J
Julian Wiedmann 已提交
4259 4260
		queue->next_buf_to_fill =
				QDIO_BUFNR(queue->next_buf_to_fill + 1);
4261 4262
	}

F
Frank Blaschka 已提交
4263
	if (flush_count)
J
Jan Glauber 已提交
4264
		qeth_flush_buffers(queue, start_index, flush_count);
4265

4266
out:
4267 4268
	if (do_pack)
		QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count);
F
Frank Blaschka 已提交
4269

4270 4271
	if (stopped && !qeth_out_queue_is_full(queue))
		netif_tx_start_queue(txq);
F
Frank Blaschka 已提交
4272 4273 4274 4275
	return rc;
}
EXPORT_SYMBOL_GPL(qeth_do_send_packet);

J
Julian Wiedmann 已提交
4276 4277 4278
static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
			      unsigned int payload_len, struct sk_buff *skb,
			      unsigned int proto_len)
4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291
{
	struct qeth_hdr_ext_tso *ext = &hdr->ext;

	ext->hdr_tot_len = sizeof(*ext);
	ext->imb_hdr_no = 1;
	ext->hdr_type = 1;
	ext->hdr_version = 1;
	ext->hdr_len = 28;
	ext->payload_len = payload_len;
	ext->mss = skb_shinfo(skb)->gso_size;
	ext->dg_hdr_len = proto_len;
}

4292
int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
4293
	      struct qeth_qdio_out_q *queue, __be16 proto,
4294 4295
	      void (*fill_header)(struct qeth_qdio_out_q *queue,
				  struct qeth_hdr *hdr, struct sk_buff *skb,
4296
				  __be16 proto, unsigned int data_len))
4297
{
4298
	unsigned int proto_len, hw_hdr_len;
4299
	unsigned int frame_len = skb->len;
4300
	bool is_tso = skb_is_gso(skb);
4301 4302 4303 4304 4305 4306
	unsigned int data_offset = 0;
	struct qeth_hdr *hdr = NULL;
	unsigned int hd_len = 0;
	unsigned int elements;
	int push_len, rc;

4307 4308 4309 4310 4311
	if (is_tso) {
		hw_hdr_len = sizeof(struct qeth_hdr_tso);
		proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
	} else {
		hw_hdr_len = sizeof(struct qeth_hdr);
J
Julian Wiedmann 已提交
4312
		proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0;
4313 4314
	}

4315 4316 4317 4318
	rc = skb_cow_head(skb, hw_hdr_len);
	if (rc)
		return rc;

4319
	push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len,
4320 4321 4322
				      &elements);
	if (push_len < 0)
		return push_len;
4323
	if (is_tso || !push_len) {
4324 4325
		/* HW header needs its own buffer element. */
		hd_len = hw_hdr_len + proto_len;
4326
		data_offset = push_len + proto_len;
4327
	}
4328
	memset(hdr, 0, hw_hdr_len);
4329
	fill_header(queue, hdr, skb, proto, frame_len);
4330 4331 4332
	if (is_tso)
		qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
				  frame_len - proto_len, skb, proto_len);
4333 4334

	if (IS_IQD(card)) {
4335 4336
		rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset,
				 hd_len);
4337 4338 4339
	} else {
		/* TODO: drop skb_orphan() once TX completion is fast enough */
		skb_orphan(skb);
4340
		spin_lock(&queue->lock);
4341 4342
		rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
					 hd_len, elements);
4343
		spin_unlock(&queue->lock);
4344 4345
	}

4346 4347 4348
	if (rc && !push_len)
		kmem_cache_free(qeth_core_header_cache, hdr);

4349 4350 4351 4352
	return rc;
}
EXPORT_SYMBOL_GPL(qeth_xmit);

F
Frank Blaschka 已提交
4353 4354 4355
static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
		struct qeth_reply *reply, unsigned long data)
{
4356
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
F
Frank Blaschka 已提交
4357 4358
	struct qeth_ipacmd_setadpparms *setparms;

C
Carsten Otte 已提交
4359
	QETH_CARD_TEXT(card, 4, "prmadpcb");
F
Frank Blaschka 已提交
4360 4361

	setparms = &(cmd->data.setadapterparms);
4362
	if (qeth_setadpparms_inspect_rc(cmd)) {
4363
		QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
F
Frank Blaschka 已提交
4364 4365 4366
		setparms->data.mode = SET_PROMISC_MODE_OFF;
	}
	card->info.promisc_mode = setparms->data.mode;
4367
	return (cmd->hdr.return_code) ? -EIO : 0;
F
Frank Blaschka 已提交
4368 4369
}

4370
void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable)
F
Frank Blaschka 已提交
4371
{
4372 4373
	enum qeth_ipa_promisc_modes mode = enable ? SET_PROMISC_MODE_ON :
						    SET_PROMISC_MODE_OFF;
F
Frank Blaschka 已提交
4374 4375 4376
	struct qeth_cmd_buffer *iob;
	struct qeth_ipa_cmd *cmd;

C
Carsten Otte 已提交
4377 4378
	QETH_CARD_TEXT(card, 4, "setprom");
	QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
F
Frank Blaschka 已提交
4379 4380

	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
4381
				   SETADP_DATA_SIZEOF(mode));
4382 4383
	if (!iob)
		return;
4384
	cmd = __ipa_cmd(iob);
F
Frank Blaschka 已提交
4385 4386 4387 4388 4389 4390 4391 4392
	cmd->data.setadapterparms.data.mode = mode;
	qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
}
EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);

static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
		struct qeth_reply *reply, unsigned long data)
{
4393
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4394
	struct qeth_ipacmd_setadpparms *adp_cmd;
F
Frank Blaschka 已提交
4395

C
Carsten Otte 已提交
4396
	QETH_CARD_TEXT(card, 4, "chgmaccb");
4397
	if (qeth_setadpparms_inspect_rc(cmd))
4398
		return -EIO;
F
Frank Blaschka 已提交
4399

4400
	adp_cmd = &cmd->data.setadapterparms;
4401 4402 4403
	if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr))
		return -EADDRNOTAVAIL;

4404 4405
	if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) &&
	    !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC))
4406
		return -EADDRNOTAVAIL;
4407 4408

	ether_addr_copy(card->dev->dev_addr, adp_cmd->data.change_addr.addr);
F
Frank Blaschka 已提交
4409 4410 4411 4412 4413 4414 4415 4416 4417
	return 0;
}

int qeth_setadpparms_change_macaddr(struct qeth_card *card)
{
	int rc;
	struct qeth_cmd_buffer *iob;
	struct qeth_ipa_cmd *cmd;

C
Carsten Otte 已提交
4418
	QETH_CARD_TEXT(card, 4, "chgmac");
F
Frank Blaschka 已提交
4419 4420

	iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
4421
				   SETADP_DATA_SIZEOF(change_addr));
4422 4423
	if (!iob)
		return -ENOMEM;
4424
	cmd = __ipa_cmd(iob);
F
Frank Blaschka 已提交
4425
	cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
4426 4427 4428
	cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN;
	ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr,
			card->dev->dev_addr);
F
Frank Blaschka 已提交
4429 4430 4431 4432 4433 4434
	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
			       NULL);
	return rc;
}
EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);

E
Einar Lueck 已提交
4435 4436 4437
static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
		struct qeth_reply *reply, unsigned long data)
{
4438
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
E
Einar Lueck 已提交
4439 4440
	struct qeth_set_access_ctrl *access_ctrl_req;

C
Carsten Otte 已提交
4441
	QETH_CARD_TEXT(card, 4, "setaccb");
E
Einar Lueck 已提交
4442 4443

	access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4444 4445
	QETH_CARD_TEXT_(card, 2, "rc=%d",
			cmd->data.setadapterparms.hdr.return_code);
S
Stefan Raspl 已提交
4446 4447
	if (cmd->data.setadapterparms.hdr.return_code !=
						SET_ACCESS_CTRL_RC_SUCCESS)
4448 4449 4450
		QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
				 access_ctrl_req->subcmd_code, CARD_DEVID(card),
				 cmd->data.setadapterparms.hdr.return_code);
4451
	switch (qeth_setadpparms_inspect_rc(cmd)) {
E
Einar Lueck 已提交
4452
	case SET_ACCESS_CTRL_RC_SUCCESS:
4453
		if (access_ctrl_req->subcmd_code == ISOLATION_MODE_NONE)
E
Einar Lueck 已提交
4454 4455
			dev_info(&card->gdev->dev,
			    "QDIO data connection isolation is deactivated\n");
4456
		else
E
Einar Lueck 已提交
4457 4458
			dev_info(&card->gdev->dev,
			    "QDIO data connection isolation is activated\n");
4459
		return 0;
S
Stefan Raspl 已提交
4460
	case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
4461 4462
		QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
				 CARD_DEVID(card));
4463
		return 0;
S
Stefan Raspl 已提交
4464
	case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
4465 4466
		QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
				 CARD_DEVID(card));
4467
		return 0;
E
Einar Lueck 已提交
4468 4469 4470
	case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
		dev_err(&card->gdev->dev, "Adapter does not "
			"support QDIO data connection isolation\n");
4471
		return -EOPNOTSUPP;
E
Einar Lueck 已提交
4472 4473 4474 4475
	case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
		dev_err(&card->gdev->dev,
			"Adapter is dedicated. "
			"QDIO data connection isolation not supported\n");
4476
		return -EOPNOTSUPP;
E
Einar Lueck 已提交
4477 4478 4479
	case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
		dev_err(&card->gdev->dev,
			"TSO does not permit QDIO data connection isolation\n");
4480
		return -EPERM;
S
Stefan Raspl 已提交
4481 4482 4483
	case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED:
		dev_err(&card->gdev->dev, "The adjacent switch port does not "
			"support reflective relay mode\n");
4484
		return -EOPNOTSUPP;
S
Stefan Raspl 已提交
4485 4486 4487
	case SET_ACCESS_CTRL_RC_REFLREL_FAILED:
		dev_err(&card->gdev->dev, "The reflective relay mode cannot be "
					"enabled at the adjacent switch port");
4488
		return -EREMOTEIO;
S
Stefan Raspl 已提交
4489 4490 4491
	case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED:
		dev_warn(&card->gdev->dev, "Turning off reflective relay mode "
					"at the adjacent switch failed\n");
4492 4493
		/* benign error while disabling ISOLATION_MODE_FWD */
		return 0;
E
Einar Lueck 已提交
4494
	default:
4495
		return -EIO;
E
Einar Lueck 已提交
4496 4497 4498
	}
}

4499 4500
int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
				     enum qeth_ipa_isolation_modes mode)
E
Einar Lueck 已提交
4501 4502 4503 4504 4505 4506
{
	int rc;
	struct qeth_cmd_buffer *iob;
	struct qeth_ipa_cmd *cmd;
	struct qeth_set_access_ctrl *access_ctrl_req;

C
Carsten Otte 已提交
4507
	QETH_CARD_TEXT(card, 4, "setacctl");
E
Einar Lueck 已提交
4508

4509 4510 4511 4512 4513 4514
	if (!qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
		dev_err(&card->gdev->dev,
			"Adapter does not support QDIO data connection isolation\n");
		return -EOPNOTSUPP;
	}

E
Einar Lueck 已提交
4515
	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
4516
				   SETADP_DATA_SIZEOF(set_access_ctrl));
4517 4518
	if (!iob)
		return -ENOMEM;
4519
	cmd = __ipa_cmd(iob);
E
Einar Lueck 已提交
4520
	access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4521
	access_ctrl_req->subcmd_code = mode;
E
Einar Lueck 已提交
4522 4523

	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
4524
			       NULL);
4525
	if (rc) {
4526
		QETH_CARD_TEXT_(card, 2, "rc=%d", rc);
4527 4528
		QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
				 rc, CARD_DEVID(card));
E
Einar Lueck 已提交
4529
	}
4530

E
Einar Lueck 已提交
4531 4532 4533
	return rc;
}

4534
void qeth_tx_timeout(struct net_device *dev, unsigned int txqueue)
F
Frank Blaschka 已提交
4535 4536 4537
{
	struct qeth_card *card;

4538
	card = dev->ml_priv;
C
Carsten Otte 已提交
4539
	QETH_CARD_TEXT(card, 4, "txtimeo");
F
Frank Blaschka 已提交
4540 4541 4542 4543
	qeth_schedule_recovery(card);
}
EXPORT_SYMBOL_GPL(qeth_tx_timeout);

4544
static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
F
Frank Blaschka 已提交
4545
{
4546
	struct qeth_card *card = dev->ml_priv;
F
Frank Blaschka 已提交
4547 4548 4549 4550 4551 4552 4553
	int rc = 0;

	switch (regnum) {
	case MII_BMCR: /* Basic mode control register */
		rc = BMCR_FULLDPLX;
		if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
		    (card->info.link_type != QETH_LINK_TYPE_OSN) &&
4554 4555
		    (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) &&
		    (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH))
F
Frank Blaschka 已提交
4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586
			rc |= BMCR_SPEED100;
		break;
	case MII_BMSR: /* Basic mode status register */
		rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
		     BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
		     BMSR_100BASE4;
		break;
	case MII_PHYSID1: /* PHYS ID 1 */
		rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
		     dev->dev_addr[2];
		rc = (rc >> 5) & 0xFFFF;
		break;
	case MII_PHYSID2: /* PHYS ID 2 */
		rc = (dev->dev_addr[2] << 10) & 0xFFFF;
		break;
	case MII_ADVERTISE: /* Advertisement control reg */
		rc = ADVERTISE_ALL;
		break;
	case MII_LPA: /* Link partner ability reg */
		rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
		     LPA_100BASE4 | LPA_LPACK;
		break;
	case MII_EXPANSION: /* Expansion register */
		break;
	case MII_DCOUNTER: /* disconnect counter */
		break;
	case MII_FCSCOUNTER: /* false carrier counter */
		break;
	case MII_NWAYTEST: /* N-way auto-neg test register */
		break;
	case MII_RERRCOUNTER: /* rx error counter */
4587 4588 4589
		rc = card->stats.rx_length_errors +
		     card->stats.rx_frame_errors +
		     card->stats.rx_fifo_errors;
F
Frank Blaschka 已提交
4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611
		break;
	case MII_SREVISION: /* silicon revision */
		break;
	case MII_RESV1: /* reserved 1 */
		break;
	case MII_LBRERROR: /* loopback, rx, bypass error */
		break;
	case MII_PHYADDR: /* physical address */
		break;
	case MII_RESV2: /* reserved 2 */
		break;
	case MII_TPISTATUS: /* TPI status for 10mbps */
		break;
	case MII_NCONFIG: /* network interface config */
		break;
	default:
		break;
	}
	return rc;
}

static int qeth_snmp_command_cb(struct qeth_card *card,
4612
				struct qeth_reply *reply, unsigned long data)
F
Frank Blaschka 已提交
4613
{
4614 4615 4616 4617
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
	struct qeth_arp_query_info *qinfo = reply->param;
	struct qeth_ipacmd_setadpparms *adp_cmd;
	unsigned int data_len;
4618
	void *snmp_data;
F
Frank Blaschka 已提交
4619

C
Carsten Otte 已提交
4620
	QETH_CARD_TEXT(card, 3, "snpcmdcb");
F
Frank Blaschka 已提交
4621 4622

	if (cmd->hdr.return_code) {
4623
		QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
4624
		return -EIO;
F
Frank Blaschka 已提交
4625 4626 4627 4628
	}
	if (cmd->data.setadapterparms.hdr.return_code) {
		cmd->hdr.return_code =
			cmd->data.setadapterparms.hdr.return_code;
4629
		QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code);
4630
		return -EIO;
F
Frank Blaschka 已提交
4631
	}
4632 4633 4634 4635 4636

	adp_cmd = &cmd->data.setadapterparms;
	data_len = adp_cmd->hdr.cmdlength - sizeof(adp_cmd->hdr);
	if (adp_cmd->hdr.seq_no == 1) {
		snmp_data = &adp_cmd->data.snmp;
4637
	} else {
4638 4639
		snmp_data = &adp_cmd->data.snmp.request;
		data_len -= offsetof(struct qeth_snmp_cmd, request);
4640
	}
F
Frank Blaschka 已提交
4641 4642 4643

	/* check if there is enough room in userspace */
	if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
4644 4645
		QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC);
		return -ENOSPC;
F
Frank Blaschka 已提交
4646
	}
C
Carsten Otte 已提交
4647
	QETH_CARD_TEXT_(card, 4, "snore%i",
4648
			cmd->data.setadapterparms.hdr.used_total);
C
Carsten Otte 已提交
4649
	QETH_CARD_TEXT_(card, 4, "sseqn%i",
4650
			cmd->data.setadapterparms.hdr.seq_no);
F
Frank Blaschka 已提交
4651
	/*copy entries to user buffer*/
4652
	memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len);
F
Frank Blaschka 已提交
4653
	qinfo->udata_offset += data_len;
4654

F
Frank Blaschka 已提交
4655 4656 4657 4658 4659 4660
	if (cmd->data.setadapterparms.hdr.seq_no <
	    cmd->data.setadapterparms.hdr.used_total)
		return 1;
	return 0;
}

4661
static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
F
Frank Blaschka 已提交
4662
{
4663
	struct qeth_snmp_ureq __user *ureq;
F
Frank Blaschka 已提交
4664
	struct qeth_cmd_buffer *iob;
4665
	unsigned int req_len;
F
Frank Blaschka 已提交
4666 4667 4668
	struct qeth_arp_query_info qinfo = {0, };
	int rc = 0;

C
Carsten Otte 已提交
4669
	QETH_CARD_TEXT(card, 3, "snmpcmd");
F
Frank Blaschka 已提交
4670

4671
	if (IS_VM_NIC(card))
F
Frank Blaschka 已提交
4672 4673 4674
		return -EOPNOTSUPP;

	if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
4675
	    IS_LAYER3(card))
F
Frank Blaschka 已提交
4676
		return -EOPNOTSUPP;
4677

4678 4679 4680 4681 4682
	ureq = (struct qeth_snmp_ureq __user *) udata;
	if (get_user(qinfo.udata_len, &ureq->hdr.data_len) ||
	    get_user(req_len, &ureq->hdr.req_len))
		return -EFAULT;

4683 4684 4685 4686
	/* Sanitize user input, to avoid overflows in iob size calculation: */
	if (req_len > QETH_BUFSIZE)
		return -EINVAL;

4687 4688 4689 4690 4691 4692 4693
	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len);
	if (!iob)
		return -ENOMEM;

	if (copy_from_user(&__ipa_cmd(iob)->data.setadapterparms.data.snmp,
			   &ureq->cmd, req_len)) {
		qeth_put_cmd(iob);
F
Frank Blaschka 已提交
4694
		return -EFAULT;
4695 4696
	}

F
Frank Blaschka 已提交
4697 4698
	qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
	if (!qinfo.udata) {
4699
		qeth_put_cmd(iob);
F
Frank Blaschka 已提交
4700 4701 4702 4703
		return -ENOMEM;
	}
	qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);

4704
	rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo);
F
Frank Blaschka 已提交
4705
	if (rc)
4706 4707
		QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
				 CARD_DEVID(card), rc);
F
Frank Blaschka 已提交
4708 4709 4710 4711
	else {
		if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
			rc = -EFAULT;
	}
4712

F
Frank Blaschka 已提交
4713 4714 4715 4716
	kfree(qinfo.udata);
	return rc;
}

4717
static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
J
Julian Wiedmann 已提交
4718 4719
					 struct qeth_reply *reply,
					 unsigned long data)
4720
{
4721
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
J
Julian Wiedmann 已提交
4722
	struct qeth_qoat_priv *priv = reply->param;
4723 4724 4725
	int resdatalen;

	QETH_CARD_TEXT(card, 3, "qoatcb");
4726
	if (qeth_setadpparms_inspect_rc(cmd))
4727
		return -EIO;
4728 4729 4730

	resdatalen = cmd->data.setadapterparms.hdr.cmdlength;

4731 4732
	if (resdatalen > (priv->buffer_len - priv->response_len))
		return -ENOSPC;
4733

4734 4735
	memcpy(priv->buffer + priv->response_len,
	       &cmd->data.setadapterparms.hdr, resdatalen);
4736 4737 4738 4739 4740 4741 4742 4743
	priv->response_len += resdatalen;

	if (cmd->data.setadapterparms.hdr.seq_no <
	    cmd->data.setadapterparms.hdr.used_total)
		return 1;
	return 0;
}

4744
static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755
{
	int rc = 0;
	struct qeth_cmd_buffer *iob;
	struct qeth_ipa_cmd *cmd;
	struct qeth_query_oat *oat_req;
	struct qeth_query_oat_data oat_data;
	struct qeth_qoat_priv priv;
	void __user *tmp;

	QETH_CARD_TEXT(card, 3, "qoatcmd");

J
Julian Wiedmann 已提交
4756 4757
	if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT))
		return -EOPNOTSUPP;
4758

J
Julian Wiedmann 已提交
4759 4760
	if (copy_from_user(&oat_data, udata, sizeof(oat_data)))
		return -EFAULT;
4761 4762 4763

	priv.buffer_len = oat_data.buffer_len;
	priv.response_len = 0;
4764
	priv.buffer = vzalloc(oat_data.buffer_len);
J
Julian Wiedmann 已提交
4765 4766
	if (!priv.buffer)
		return -ENOMEM;
4767 4768

	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
4769
				   SETADP_DATA_SIZEOF(query_oat));
4770 4771 4772 4773
	if (!iob) {
		rc = -ENOMEM;
		goto out_free;
	}
4774
	cmd = __ipa_cmd(iob);
4775 4776 4777
	oat_req = &cmd->data.setadapterparms.data.query_oat;
	oat_req->subcmd_code = oat_data.command;

J
Julian Wiedmann 已提交
4778
	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, &priv);
4779
	if (!rc) {
4780 4781
		tmp = is_compat_task() ? compat_ptr(oat_data.ptr) :
					 u64_to_user_ptr(oat_data.ptr);
4782 4783
		oat_data.response_len = priv.response_len;

J
Julian Wiedmann 已提交
4784 4785
		if (copy_to_user(tmp, priv.buffer, priv.response_len) ||
		    copy_to_user(udata, &oat_data, sizeof(oat_data)))
4786
			rc = -EFAULT;
4787
	}
4788 4789

out_free:
4790
	vfree(priv.buffer);
4791 4792 4793
	return rc;
}

4794 4795
static int qeth_query_card_info_cb(struct qeth_card *card,
				   struct qeth_reply *reply, unsigned long data)
E
Eugene Crosser 已提交
4796
{
4797
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4798
	struct qeth_link_info *link_info = reply->param;
E
Eugene Crosser 已提交
4799 4800 4801
	struct qeth_query_card_info *card_info;

	QETH_CARD_TEXT(card, 2, "qcrdincb");
4802
	if (qeth_setadpparms_inspect_rc(cmd))
4803
		return -EIO;
E
Eugene Crosser 已提交
4804

4805
	card_info = &cmd->data.setadapterparms.data.card_info;
4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861
	netdev_dbg(card->dev,
		   "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n",
		   card_info->card_type, card_info->port_mode,
		   card_info->port_speed);

	switch (card_info->port_mode) {
	case CARD_INFO_PORTM_FULLDUPLEX:
		link_info->duplex = DUPLEX_FULL;
		break;
	case CARD_INFO_PORTM_HALFDUPLEX:
		link_info->duplex = DUPLEX_HALF;
		break;
	default:
		link_info->duplex = DUPLEX_UNKNOWN;
	}

	switch (card_info->card_type) {
	case CARD_INFO_TYPE_1G_COPPER_A:
	case CARD_INFO_TYPE_1G_COPPER_B:
		link_info->speed = SPEED_1000;
		link_info->port = PORT_TP;
		break;
	case CARD_INFO_TYPE_1G_FIBRE_A:
	case CARD_INFO_TYPE_1G_FIBRE_B:
		link_info->speed = SPEED_1000;
		link_info->port = PORT_FIBRE;
		break;
	case CARD_INFO_TYPE_10G_FIBRE_A:
	case CARD_INFO_TYPE_10G_FIBRE_B:
		link_info->speed = SPEED_10000;
		link_info->port = PORT_FIBRE;
		break;
	default:
		switch (card_info->port_speed) {
		case CARD_INFO_PORTS_10M:
			link_info->speed = SPEED_10;
			break;
		case CARD_INFO_PORTS_100M:
			link_info->speed = SPEED_100;
			break;
		case CARD_INFO_PORTS_1G:
			link_info->speed = SPEED_1000;
			break;
		case CARD_INFO_PORTS_10G:
			link_info->speed = SPEED_10000;
			break;
		case CARD_INFO_PORTS_25G:
			link_info->speed = SPEED_25000;
			break;
		default:
			link_info->speed = SPEED_UNKNOWN;
		}

		link_info->port = PORT_OTHER;
	}

E
Eugene Crosser 已提交
4862 4863 4864
	return 0;
}

4865
int qeth_query_card_info(struct qeth_card *card,
4866
			 struct qeth_link_info *link_info)
E
Eugene Crosser 已提交
4867 4868 4869 4870 4871 4872
{
	struct qeth_cmd_buffer *iob;

	QETH_CARD_TEXT(card, 2, "qcrdinfo");
	if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO))
		return -EOPNOTSUPP;
4873
	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 0);
4874 4875
	if (!iob)
		return -ENOMEM;
4876 4877

	return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, link_info);
E
Eugene Crosser 已提交
4878 4879
}

4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946
static int qeth_init_link_info_oat_cb(struct qeth_card *card,
				      struct qeth_reply *reply_priv,
				      unsigned long data)
{
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
	struct qeth_link_info *link_info = reply_priv->param;
	struct qeth_query_oat_physical_if *phys_if;
	struct qeth_query_oat_reply *reply;

	if (qeth_setadpparms_inspect_rc(cmd))
		return -EIO;

	/* Multi-part reply is unexpected, don't bother: */
	if (cmd->data.setadapterparms.hdr.used_total > 1)
		return -EINVAL;

	/* Expect the reply to start with phys_if data: */
	reply = &cmd->data.setadapterparms.data.query_oat.reply[0];
	if (reply->type != QETH_QOAT_REPLY_TYPE_PHYS_IF ||
	    reply->length < sizeof(*reply))
		return -EINVAL;

	phys_if = &reply->phys_if;

	switch (phys_if->speed_duplex) {
	case QETH_QOAT_PHYS_SPEED_10M_HALF:
		link_info->speed = SPEED_10;
		link_info->duplex = DUPLEX_HALF;
		break;
	case QETH_QOAT_PHYS_SPEED_10M_FULL:
		link_info->speed = SPEED_10;
		link_info->duplex = DUPLEX_FULL;
		break;
	case QETH_QOAT_PHYS_SPEED_100M_HALF:
		link_info->speed = SPEED_100;
		link_info->duplex = DUPLEX_HALF;
		break;
	case QETH_QOAT_PHYS_SPEED_100M_FULL:
		link_info->speed = SPEED_100;
		link_info->duplex = DUPLEX_FULL;
		break;
	case QETH_QOAT_PHYS_SPEED_1000M_HALF:
		link_info->speed = SPEED_1000;
		link_info->duplex = DUPLEX_HALF;
		break;
	case QETH_QOAT_PHYS_SPEED_1000M_FULL:
		link_info->speed = SPEED_1000;
		link_info->duplex = DUPLEX_FULL;
		break;
	case QETH_QOAT_PHYS_SPEED_10G_FULL:
		link_info->speed = SPEED_10000;
		link_info->duplex = DUPLEX_FULL;
		break;
	case QETH_QOAT_PHYS_SPEED_25G_FULL:
		link_info->speed = SPEED_25000;
		link_info->duplex = DUPLEX_FULL;
		break;
	case QETH_QOAT_PHYS_SPEED_UNKNOWN:
	default:
		link_info->speed = SPEED_UNKNOWN;
		link_info->duplex = DUPLEX_UNKNOWN;
		break;
	}

	switch (phys_if->media_type) {
	case QETH_QOAT_PHYS_MEDIA_COPPER:
		link_info->port = PORT_TP;
4947
		link_info->link_mode = QETH_LINK_MODE_UNKNOWN;
4948 4949
		break;
	case QETH_QOAT_PHYS_MEDIA_FIBRE_SHORT:
4950 4951 4952
		link_info->port = PORT_FIBRE;
		link_info->link_mode = QETH_LINK_MODE_FIBRE_SHORT;
		break;
4953 4954
	case QETH_QOAT_PHYS_MEDIA_FIBRE_LONG:
		link_info->port = PORT_FIBRE;
4955
		link_info->link_mode = QETH_LINK_MODE_FIBRE_LONG;
4956 4957 4958
		break;
	default:
		link_info->port = PORT_OTHER;
4959
		link_info->link_mode = QETH_LINK_MODE_UNKNOWN;
4960 4961 4962 4963 4964 4965
		break;
	}

	return 0;
}

4966 4967 4968 4969 4970 4971 4972
static void qeth_init_link_info(struct qeth_card *card)
{
	card->info.link_info.duplex = DUPLEX_FULL;

	if (IS_IQD(card) || IS_VM_NIC(card)) {
		card->info.link_info.speed = SPEED_10000;
		card->info.link_info.port = PORT_FIBRE;
4973
		card->info.link_info.link_mode = QETH_LINK_MODE_FIBRE_SHORT;
4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999
	} else {
		switch (card->info.link_type) {
		case QETH_LINK_TYPE_FAST_ETH:
		case QETH_LINK_TYPE_LANE_ETH100:
			card->info.link_info.speed = SPEED_100;
			card->info.link_info.port = PORT_TP;
			break;
		case QETH_LINK_TYPE_GBIT_ETH:
		case QETH_LINK_TYPE_LANE_ETH1000:
			card->info.link_info.speed = SPEED_1000;
			card->info.link_info.port = PORT_FIBRE;
			break;
		case QETH_LINK_TYPE_10GBIT_ETH:
			card->info.link_info.speed = SPEED_10000;
			card->info.link_info.port = PORT_FIBRE;
			break;
		case QETH_LINK_TYPE_25GBIT_ETH:
			card->info.link_info.speed = SPEED_25000;
			card->info.link_info.port = PORT_FIBRE;
			break;
		default:
			dev_info(&card->gdev->dev, "Unknown link type %x\n",
				 card->info.link_type);
			card->info.link_info.speed = SPEED_UNKNOWN;
			card->info.link_info.port = PORT_OTHER;
		}
5000 5001

		card->info.link_info.link_mode = QETH_LINK_MODE_UNKNOWN;
5002
	}
5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026

	/* Get more accurate data via QUERY OAT: */
	if (qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) {
		struct qeth_link_info link_info;
		struct qeth_cmd_buffer *iob;

		iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
					   SETADP_DATA_SIZEOF(query_oat));
		if (iob) {
			struct qeth_ipa_cmd *cmd = __ipa_cmd(iob);
			struct qeth_query_oat *oat_req;

			oat_req = &cmd->data.setadapterparms.data.query_oat;
			oat_req->subcmd_code = QETH_QOAT_SCOPE_INTERFACE;

			if (!qeth_send_ipa_cmd(card, iob,
					       qeth_init_link_info_oat_cb,
					       &link_info)) {
				if (link_info.speed != SPEED_UNKNOWN)
					card->info.link_info.speed = link_info.speed;
				if (link_info.duplex != DUPLEX_UNKNOWN)
					card->info.link_info.duplex = link_info.duplex;
				if (link_info.port != PORT_OTHER)
					card->info.link_info.port = link_info.port;
5027 5028
				if (link_info.link_mode != QETH_LINK_MODE_UNKNOWN)
					card->info.link_info.link_mode = link_info.link_mode;
5029 5030 5031
			}
		}
	}
5032 5033
}

5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047
/**
 * qeth_vm_request_mac() - Request a hypervisor-managed MAC address
 * @card: pointer to a qeth_card
 *
 * Returns
 *	0, if a MAC address has been set for the card's netdevice
 *	a return code, for various error conditions
 */
int qeth_vm_request_mac(struct qeth_card *card)
{
	struct diag26c_mac_resp *response;
	struct diag26c_mac_req *request;
	int rc;

5048
	QETH_CARD_TEXT(card, 2, "vmreqmac");
5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059

	request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
	response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
	if (!request || !response) {
		rc = -ENOMEM;
		goto out;
	}

	request->resp_buf_len = sizeof(*response);
	request->resp_version = DIAG26C_VERSION2;
	request->op_code = DIAG26C_GET_MAC;
5060
	request->devno = card->info.ddev_devno;
5061

5062
	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
5063
	rc = diag26c(request, response, DIAG26C_MAC_SERVICES);
5064
	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
5065 5066
	if (rc)
		goto out;
5067
	QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
5068 5069 5070 5071

	if (request->resp_buf_len < sizeof(*response) ||
	    response->version != request->resp_version) {
		rc = -EIO;
5072 5073 5074
		QETH_CARD_TEXT(card, 2, "badresp");
		QETH_CARD_HEX(card, 2, &request->resp_buf_len,
			      sizeof(request->resp_buf_len));
5075 5076
	} else if (!is_valid_ether_addr(response->mac)) {
		rc = -EINVAL;
5077 5078
		QETH_CARD_TEXT(card, 2, "badmac");
		QETH_CARD_HEX(card, 2, response->mac, ETH_ALEN);
5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089
	} else {
		ether_addr_copy(card->dev->dev_addr, response->mac);
	}

out:
	kfree(response);
	kfree(request);
	return rc;
}
EXPORT_SYMBOL_GPL(qeth_vm_request_mac);

5090 5091
static void qeth_determine_capabilities(struct qeth_card *card)
{
5092 5093
	struct qeth_channel *channel = &card->data;
	struct ccw_device *ddev = channel->ccwdev;
5094 5095 5096
	int rc;
	int ddev_offline = 0;

5097
	QETH_CARD_TEXT(card, 2, "detcapab");
5098 5099
	if (!ddev->online) {
		ddev_offline = 1;
5100
		rc = qeth_start_channel(channel);
5101
		if (rc) {
5102
			QETH_CARD_TEXT_(card, 2, "3err%d", rc);
5103 5104 5105 5106
			goto out;
		}
	}

5107
	rc = qeth_read_conf_data(card);
5108
	if (rc) {
5109 5110
		QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
				 CARD_DEVID(card), rc);
5111
		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
5112 5113 5114 5115 5116
		goto out_offline;
	}

	rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
	if (rc)
5117
		QETH_CARD_TEXT_(card, 2, "6err%d", rc);
5118

5119 5120 5121 5122 5123
	QETH_CARD_TEXT_(card, 2, "qfmt%d", card->ssqd.qfmt);
	QETH_CARD_TEXT_(card, 2, "ac1:%02x", card->ssqd.qdioac1);
	QETH_CARD_TEXT_(card, 2, "ac2:%04x", card->ssqd.qdioac2);
	QETH_CARD_TEXT_(card, 2, "ac3:%04x", card->ssqd.qdioac3);
	QETH_CARD_TEXT_(card, 2, "icnt%d", card->ssqd.icnt);
5124 5125 5126 5127 5128 5129 5130 5131 5132
	if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) ||
	    ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) ||
	    ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) {
		dev_info(&card->gdev->dev,
			"Completion Queueing supported\n");
	} else {
		card->options.cq = QETH_CQ_NOTAVAILABLE;
	}

5133 5134
out_offline:
	if (ddev_offline == 1)
5135
		qeth_stop_channel(channel);
5136 5137 5138 5139
out:
	return;
}

5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166
static void qeth_read_ccw_conf_data(struct qeth_card *card)
{
	struct qeth_card_info *info = &card->info;
	struct ccw_device *cdev = CARD_DDEV(card);
	struct ccw_dev_id dev_id;

	QETH_CARD_TEXT(card, 2, "ccwconfd");
	ccw_device_get_id(cdev, &dev_id);

	info->ddev_devno = dev_id.devno;
	info->ids_valid = !ccw_device_get_cssid(cdev, &info->cssid) &&
			  !ccw_device_get_iid(cdev, &info->iid) &&
			  !ccw_device_get_chid(cdev, 0, &info->chid);
	info->ssid = dev_id.ssid;

	dev_info(&card->gdev->dev, "CHID: %x CHPID: %x\n",
		 info->chid, info->chpid);

	QETH_CARD_TEXT_(card, 3, "devn%x", info->ddev_devno);
	QETH_CARD_TEXT_(card, 3, "cssid:%x", info->cssid);
	QETH_CARD_TEXT_(card, 3, "iid:%x", info->iid);
	QETH_CARD_TEXT_(card, 3, "ssid:%x", info->ssid);
	QETH_CARD_TEXT_(card, 3, "chpid:%x", info->chpid);
	QETH_CARD_TEXT_(card, 3, "chid:%x", info->chid);
	QETH_CARD_TEXT_(card, 3, "idval%x", info->ids_valid);
}

F
Frank Blaschka 已提交
5167 5168
static int qeth_qdio_establish(struct qeth_card *card)
{
5169 5170
	struct qdio_buffer **out_sbal_ptrs[QETH_MAX_OUT_QUEUES];
	struct qdio_buffer **in_sbal_ptrs[QETH_MAX_IN_QUEUES];
5171
	struct qeth_qib_parms *qib_parms = NULL;
F
Frank Blaschka 已提交
5172
	struct qdio_initialize init_data;
5173
	unsigned int i;
F
Frank Blaschka 已提交
5174 5175
	int rc = 0;

5176
	QETH_CARD_TEXT(card, 2, "qdioest");
F
Frank Blaschka 已提交
5177

5178 5179 5180 5181
	if (!IS_IQD(card) && !IS_VM_NIC(card)) {
		qib_parms = kzalloc(sizeof_field(struct qib, parm), GFP_KERNEL);
		if (!qib_parms)
			return -ENOMEM;
F
Frank Blaschka 已提交
5182

5183 5184
		qeth_fill_qib_parms(card, qib_parms);
	}
F
Frank Blaschka 已提交
5185

5186 5187 5188
	in_sbal_ptrs[0] = card->qdio.in_q->qdio_bufs;
	if (card->options.cq == QETH_CQ_ENABLED)
		in_sbal_ptrs[1] = card->qdio.c_q->qdio_bufs;
5189

5190 5191
	for (i = 0; i < card->qdio.no_out_queues; i++)
		out_sbal_ptrs[i] = card->qdio.out_qs[i]->qdio_bufs;
F
Frank Blaschka 已提交
5192 5193

	memset(&init_data, 0, sizeof(struct qdio_initialize));
5194 5195
	init_data.q_format		 = IS_IQD(card) ? QDIO_IQDIO_QFMT :
							  QDIO_QETH_QFMT;
F
Frank Blaschka 已提交
5196
	init_data.qib_param_field_format = 0;
5197
	init_data.qib_param_field	 = (void *)qib_parms;
5198
	init_data.no_input_qs            = card->qdio.no_in_queues;
F
Frank Blaschka 已提交
5199
	init_data.no_output_qs           = card->qdio.no_out_queues;
5200 5201
	init_data.input_handler		 = qeth_qdio_input_handler;
	init_data.output_handler	 = qeth_qdio_output_handler;
5202
	init_data.irq_poll		 = qeth_qdio_poll;
F
Frank Blaschka 已提交
5203
	init_data.int_parm               = (unsigned long) card;
5204 5205
	init_data.input_sbal_addr_array  = in_sbal_ptrs;
	init_data.output_sbal_addr_array = out_sbal_ptrs;
F
Frank Blaschka 已提交
5206 5207 5208

	if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
		QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
5209 5210
		rc = qdio_allocate(CARD_DDEV(card), init_data.no_input_qs,
				   init_data.no_output_qs);
J
Jan Glauber 已提交
5211 5212 5213 5214
		if (rc) {
			atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
			goto out;
		}
5215
		rc = qdio_establish(CARD_DDEV(card), &init_data);
J
Jan Glauber 已提交
5216
		if (rc) {
F
Frank Blaschka 已提交
5217
			atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
J
Jan Glauber 已提交
5218 5219
			qdio_free(CARD_DDEV(card));
		}
F
Frank Blaschka 已提交
5220
	}
5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231

	switch (card->options.cq) {
	case QETH_CQ_ENABLED:
		dev_info(&card->gdev->dev, "Completion Queue support enabled");
		break;
	case QETH_CQ_DISABLED:
		dev_info(&card->gdev->dev, "Completion Queue support disabled");
		break;
	default:
		break;
	}
5232

J
Jan Glauber 已提交
5233
out:
5234
	kfree(qib_parms);
F
Frank Blaschka 已提交
5235 5236 5237 5238 5239
	return rc;
}

static void qeth_core_free_card(struct qeth_card *card)
{
5240
	QETH_CARD_TEXT(card, 2, "freecrd");
5241 5242 5243

	unregister_service_level(&card->qeth_service_level);
	debugfs_remove_recursive(card->debugfs);
5244
	qeth_put_cmd(card->read_cmd);
5245
	destroy_workqueue(card->event_wq);
5246
	dev_set_drvdata(&card->gdev->dev, NULL);
F
Frank Blaschka 已提交
5247 5248 5249
	kfree(card);
}

5250
static void qeth_trace_features(struct qeth_card *card)
5251 5252
{
	QETH_CARD_TEXT(card, 2, "features");
5253 5254 5255 5256 5257
	QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4));
	QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6));
	QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp));
	QETH_CARD_HEX(card, 2, &card->info.diagass_support,
		      sizeof(card->info.diagass_support));
5258 5259
}

F
Frank Blaschka 已提交
5260
static struct ccw_device_id qeth_ids[] = {
5261 5262 5263 5264
	{CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
					.driver_info = QETH_CARD_TYPE_OSD},
	{CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
					.driver_info = QETH_CARD_TYPE_IQD},
5265
#ifdef CONFIG_QETH_OSN
5266 5267
	{CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06),
					.driver_info = QETH_CARD_TYPE_OSN},
5268
#endif
5269 5270
	{CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
					.driver_info = QETH_CARD_TYPE_OSM},
5271
#ifdef CONFIG_QETH_OSX
5272 5273
	{CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
					.driver_info = QETH_CARD_TYPE_OSX},
5274
#endif
F
Frank Blaschka 已提交
5275 5276 5277 5278 5279
	{},
};
MODULE_DEVICE_TABLE(ccw, qeth_ids);

static struct ccw_driver qeth_ccw_driver = {
5280
	.driver = {
S
Sebastian Ott 已提交
5281
		.owner = THIS_MODULE,
5282 5283
		.name = "qeth",
	},
F
Frank Blaschka 已提交
5284 5285 5286 5287 5288
	.ids = qeth_ids,
	.probe = ccwgroup_probe_ccwdev,
	.remove = ccwgroup_remove_ccwdev,
};

5289
static int qeth_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
F
Frank Blaschka 已提交
5290
{
5291
	int retries = 3;
F
Frank Blaschka 已提交
5292 5293
	int rc;

5294
	QETH_CARD_TEXT(card, 2, "hrdsetup");
F
Frank Blaschka 已提交
5295
	atomic_set(&card->force_alloc_skb, 0);
5296 5297 5298
	rc = qeth_update_from_chp_desc(card);
	if (rc)
		return rc;
F
Frank Blaschka 已提交
5299
retry:
5300
	if (retries < 3)
5301 5302
		QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
				 CARD_DEVID(card));
5303
	rc = qeth_qdio_clear_card(card, !IS_IQD(card));
5304 5305 5306
	qeth_stop_channel(&card->data);
	qeth_stop_channel(&card->write);
	qeth_stop_channel(&card->read);
5307
	qdio_free(CARD_DDEV(card));
5308 5309

	rc = qeth_start_channel(&card->read);
5310 5311
	if (rc)
		goto retriable;
5312
	rc = qeth_start_channel(&card->write);
5313 5314
	if (rc)
		goto retriable;
5315
	rc = qeth_start_channel(&card->data);
5316 5317 5318
	if (rc)
		goto retriable;
retriable:
F
Frank Blaschka 已提交
5319
	if (rc == -ERESTARTSYS) {
5320
		QETH_CARD_TEXT(card, 2, "break1");
F
Frank Blaschka 已提交
5321 5322
		return rc;
	} else if (rc) {
5323
		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
5324
		if (--retries < 0)
F
Frank Blaschka 已提交
5325 5326 5327 5328
			goto out;
		else
			goto retry;
	}
5329

5330
	qeth_determine_capabilities(card);
5331
	qeth_read_ccw_conf_data(card);
5332
	qeth_idx_init(card);
5333 5334 5335

	rc = qeth_idx_activate_read_channel(card);
	if (rc == -EINTR) {
5336
		QETH_CARD_TEXT(card, 2, "break2");
F
Frank Blaschka 已提交
5337 5338
		return rc;
	} else if (rc) {
5339
		QETH_CARD_TEXT_(card, 2, "3err%d", rc);
F
Frank Blaschka 已提交
5340 5341 5342 5343 5344
		if (--retries < 0)
			goto out;
		else
			goto retry;
	}
5345 5346 5347

	rc = qeth_idx_activate_write_channel(card);
	if (rc == -EINTR) {
5348
		QETH_CARD_TEXT(card, 2, "break3");
F
Frank Blaschka 已提交
5349 5350
		return rc;
	} else if (rc) {
5351
		QETH_CARD_TEXT_(card, 2, "4err%d", rc);
F
Frank Blaschka 已提交
5352 5353 5354 5355 5356
		if (--retries < 0)
			goto out;
		else
			goto retry;
	}
5357
	card->read_or_write_problem = 0;
F
Frank Blaschka 已提交
5358 5359
	rc = qeth_mpc_initialize(card);
	if (rc) {
5360
		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
F
Frank Blaschka 已提交
5361 5362
		goto out;
	}
5363

5364 5365
	rc = qeth_send_startlan(card);
	if (rc) {
5366
		QETH_CARD_TEXT_(card, 2, "6err%d", rc);
5367 5368
		if (rc == -ENETDOWN) {
			dev_warn(&card->gdev->dev, "The LAN is offline\n");
J
Julian Wiedmann 已提交
5369
			*carrier_ok = false;
5370 5371 5372
		} else {
			goto out;
		}
5373
	} else {
J
Julian Wiedmann 已提交
5374 5375 5376
		*carrier_ok = true;
	}

5377 5378 5379
	card->options.ipa4.supported = 0;
	card->options.ipa6.supported = 0;
	card->options.adp.supported = 0;
5380
	card->options.sbp.supported_funcs = 0;
5381
	card->info.diagass_support = 0;
5382 5383 5384
	rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
	if (rc == -ENOMEM)
		goto out;
5385 5386 5387 5388 5389
	if (qeth_is_supported(card, IPA_IPV6)) {
		rc = qeth_query_ipassists(card, QETH_PROT_IPV6);
		if (rc == -ENOMEM)
			goto out;
	}
5390 5391 5392
	if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
		rc = qeth_query_setadapterparms(card);
		if (rc < 0) {
5393
			QETH_CARD_TEXT_(card, 2, "7err%d", rc);
5394 5395 5396 5397 5398
			goto out;
		}
	}
	if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
		rc = qeth_query_setdiagass(card);
5399
		if (rc)
5400
			QETH_CARD_TEXT_(card, 2, "8err%d", rc);
5401
	}
5402

5403 5404
	qeth_trace_features(card);

5405 5406 5407 5408
	if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) ||
	    (card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM)))
		card->info.hwtrap = 0;

5409
	if (card->options.isolation != ISOLATION_MODE_NONE) {
5410 5411
		rc = qeth_setadpparms_set_access_ctrl(card,
						      card->options.isolation);
5412 5413 5414
		if (rc)
			goto out;
	}
5415

5416 5417
	qeth_init_link_info(card);

5418 5419 5420 5421 5422 5423
	rc = qeth_init_qdio_queues(card);
	if (rc) {
		QETH_CARD_TEXT_(card, 2, "9err%d", rc);
		goto out;
	}

F
Frank Blaschka 已提交
5424 5425
	return 0;
out:
5426 5427
	dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
		"an error on the device\n");
5428 5429
	QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n",
			 CARD_DEVID(card), rc);
F
Frank Blaschka 已提交
5430 5431 5432
	return rc;
}

5433 5434
static int qeth_set_online(struct qeth_card *card,
			   const struct qeth_discipline *disc)
5435
{
5436
	bool carrier_ok;
5437 5438 5439 5440 5441
	int rc;

	mutex_lock(&card->conf_mutex);
	QETH_CARD_TEXT(card, 2, "setonlin");

5442 5443 5444 5445 5446 5447 5448 5449 5450
	rc = qeth_hardsetup_card(card, &carrier_ok);
	if (rc) {
		QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
		rc = -ENODEV;
		goto err_hardsetup;
	}

	qeth_print_status_message(card);

5451
	if (card->dev->reg_state != NETREG_REGISTERED)
5452 5453 5454
		/* no need for locking / error handling at this early stage: */
		qeth_set_real_num_tx_queues(card, qeth_tx_actual_queues(card));

5455
	rc = disc->set_online(card, carrier_ok);
5456 5457 5458 5459 5460
	if (rc)
		goto err_online;

	/* let user_space know that device is online */
	kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
5461 5462

	mutex_unlock(&card->conf_mutex);
5463
	return 0;
5464

5465 5466
err_online:
err_hardsetup:
5467 5468 5469 5470
	qeth_qdio_clear_card(card, 0);
	qeth_clear_working_pool_list(card);
	qeth_flush_local_addrs(card);

5471 5472 5473 5474 5475 5476
	qeth_stop_channel(&card->data);
	qeth_stop_channel(&card->write);
	qeth_stop_channel(&card->read);
	qdio_free(CARD_DDEV(card));

	mutex_unlock(&card->conf_mutex);
5477 5478 5479
	return rc;
}

5480 5481
int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
		     bool resetting)
5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492
{
	int rc, rc2, rc3;

	mutex_lock(&card->conf_mutex);
	QETH_CARD_TEXT(card, 3, "setoffl");

	if ((!resetting && card->info.hwtrap) || card->info.hwtrap == 2) {
		qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
		card->info.hwtrap = 1;
	}

5493 5494 5495
	/* cancel any stalled cmd that might block the rtnl: */
	qeth_clear_ipacmd_list(card);

5496 5497 5498 5499 5500 5501 5502
	rtnl_lock();
	card->info.open_when_online = card->dev->flags & IFF_UP;
	dev_close(card->dev);
	netif_device_detach(card->dev);
	netif_carrier_off(card->dev);
	rtnl_unlock();

5503 5504
	cancel_work_sync(&card->rx_mode_work);

5505
	disc->set_offline(card);
5506

5507 5508 5509 5510 5511 5512
	qeth_qdio_clear_card(card, 0);
	qeth_drain_output_queues(card);
	qeth_clear_working_pool_list(card);
	qeth_flush_local_addrs(card);
	card->info.promisc_mode = 0;

5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531
	rc  = qeth_stop_channel(&card->data);
	rc2 = qeth_stop_channel(&card->write);
	rc3 = qeth_stop_channel(&card->read);
	if (!rc)
		rc = (rc2) ? rc2 : rc3;
	if (rc)
		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
	qdio_free(CARD_DDEV(card));

	/* let user_space know that device is offline */
	kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);

	mutex_unlock(&card->conf_mutex);
	return 0;
}
EXPORT_SYMBOL_GPL(qeth_set_offline);

static int qeth_do_reset(void *data)
{
5532
	const struct qeth_discipline *disc;
5533 5534 5535
	struct qeth_card *card = data;
	int rc;

5536 5537 5538
	/* Lock-free, other users will block until we are done. */
	disc = card->discipline;

5539 5540 5541 5542 5543 5544 5545
	QETH_CARD_TEXT(card, 2, "recover1");
	if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
		return 0;
	QETH_CARD_TEXT(card, 2, "recover2");
	dev_warn(&card->gdev->dev,
		 "A recovery process has been started for the device\n");

5546 5547
	qeth_set_offline(card, disc, true);
	rc = qeth_set_online(card, disc);
5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560
	if (!rc) {
		dev_info(&card->gdev->dev,
			 "Device successfully recovered!\n");
	} else {
		ccwgroup_set_offline(card->gdev);
		dev_warn(&card->gdev->dev,
			 "The qeth device driver failed to recover an error on the device\n");
	}
	qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
	qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
	return 0;
}

J
Julian Wiedmann 已提交
5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620
#if IS_ENABLED(CONFIG_QETH_L3)
static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
				struct qeth_hdr *hdr)
{
	struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data;
	struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
	struct net_device *dev = skb->dev;

	if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) {
		dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr,
				"FAKELL", skb->len);
		return;
	}

	if (!(l3_hdr->flags & QETH_HDR_PASSTHRU)) {
		u16 prot = (l3_hdr->flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
							     ETH_P_IP;
		unsigned char tg_addr[ETH_ALEN];

		skb_reset_network_header(skb);
		switch (l3_hdr->flags & QETH_HDR_CAST_MASK) {
		case QETH_CAST_MULTICAST:
			if (prot == ETH_P_IP)
				ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr);
			else
				ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr);
			QETH_CARD_STAT_INC(card, rx_multicast);
			break;
		case QETH_CAST_BROADCAST:
			ether_addr_copy(tg_addr, dev->broadcast);
			QETH_CARD_STAT_INC(card, rx_multicast);
			break;
		default:
			if (card->options.sniffer)
				skb->pkt_type = PACKET_OTHERHOST;
			ether_addr_copy(tg_addr, dev->dev_addr);
		}

		if (l3_hdr->ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
			dev_hard_header(skb, dev, prot, tg_addr,
					&l3_hdr->next_hop.rx.src_mac, skb->len);
		else
			dev_hard_header(skb, dev, prot, tg_addr, "FAKELL",
					skb->len);
	}

	/* copy VLAN tag from hdr into skb */
	if (!card->options.sniffer &&
	    (l3_hdr->ext_flags & (QETH_HDR_EXT_VLAN_FRAME |
				  QETH_HDR_EXT_INCLUDE_VLAN_TAG))) {
		u16 tag = (l3_hdr->ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
				l3_hdr->vlan_id :
				l3_hdr->next_hop.rx.vlan_id;

		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
	}
}
#endif

static void qeth_receive_skb(struct qeth_card *card, struct sk_buff *skb,
5621
			     struct qeth_hdr *hdr, bool uses_frags)
J
Julian Wiedmann 已提交
5622
{
5623
	struct napi_struct *napi = &card->napi;
J
Julian Wiedmann 已提交
5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645
	bool is_cso;

	switch (hdr->hdr.l2.id) {
	case QETH_HEADER_TYPE_OSN:
		skb_push(skb, sizeof(*hdr));
		skb_copy_to_linear_data(skb, hdr, sizeof(*hdr));
		QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
		QETH_CARD_STAT_INC(card, rx_packets);

		card->osn_info.data_cb(skb);
		return;
#if IS_ENABLED(CONFIG_QETH_L3)
	case QETH_HEADER_TYPE_LAYER3:
		qeth_l3_rebuild_skb(card, skb, hdr);
		is_cso = hdr->hdr.l3.ext_flags & QETH_HDR_EXT_CSUM_TRANSP_REQ;
		break;
#endif
	case QETH_HEADER_TYPE_LAYER2:
		is_cso = hdr->hdr.l2.flags[1] & QETH_HDR_EXT_CSUM_TRANSP_REQ;
		break;
	default:
		/* never happens */
5646 5647 5648 5649
		if (uses_frags)
			napi_free_frags(napi);
		else
			dev_kfree_skb_any(skb);
J
Julian Wiedmann 已提交
5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667
		return;
	}

	if (is_cso && (card->dev->features & NETIF_F_RXCSUM)) {
		skb->ip_summed = CHECKSUM_UNNECESSARY;
		QETH_CARD_STAT_INC(card, rx_skb_csum);
	} else {
		skb->ip_summed = CHECKSUM_NONE;
	}

	QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
	QETH_CARD_STAT_INC(card, rx_packets);
	if (skb_is_nonlinear(skb)) {
		QETH_CARD_STAT_INC(card, rx_sg_skbs);
		QETH_CARD_STAT_ADD(card, rx_sg_frags,
				   skb_shinfo(skb)->nr_frags);
	}

5668 5669 5670 5671 5672 5673
	if (uses_frags) {
		napi_gro_frags(napi);
	} else {
		skb->protocol = eth_type_trans(skb, skb->dev);
		napi_gro_receive(napi, skb);
	}
J
Julian Wiedmann 已提交
5674 5675
}

5676
static void qeth_create_skb_frag(struct sk_buff *skb, char *data, int data_len)
F
Frank Blaschka 已提交
5677
{
5678
	struct page *page = virt_to_page(data);
5679
	unsigned int next_frag;
5680

5681
	next_frag = skb_shinfo(skb)->nr_frags;
5682
	get_page(page);
5683 5684
	skb_add_rx_frag(skb, next_frag, page, offset_in_page(data), data_len,
			data_len);
F
Frank Blaschka 已提交
5685 5686
}

5687 5688 5689 5690 5691
static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
{
	return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
}

J
Julian Wiedmann 已提交
5692
static int qeth_extract_skb(struct qeth_card *card,
5693
			    struct qeth_qdio_buffer *qethbuffer, u8 *element_no,
J
Julian Wiedmann 已提交
5694
			    int *__offset)
F
Frank Blaschka 已提交
5695
{
5696
	struct qeth_priv *priv = netdev_priv(card->dev);
5697
	struct qdio_buffer *buffer = qethbuffer->buffer;
5698
	struct napi_struct *napi = &card->napi;
5699
	struct qdio_buffer_element *element;
5700
	unsigned int linear_len = 0;
5701
	bool uses_frags = false;
F
Frank Blaschka 已提交
5702
	int offset = *__offset;
5703
	bool use_rx_sg = false;
5704
	unsigned int headroom;
J
Julian Wiedmann 已提交
5705
	struct qeth_hdr *hdr;
5706
	struct sk_buff *skb;
5707
	int skb_len = 0;
F
Frank Blaschka 已提交
5708

5709 5710
	element = &buffer->element[*element_no];

5711
next_packet:
F
Frank Blaschka 已提交
5712
	/* qeth_hdr must not cross element boundaries */
5713
	while (element->length < offset + sizeof(struct qeth_hdr)) {
F
Frank Blaschka 已提交
5714
		if (qeth_is_last_sbale(element))
J
Julian Wiedmann 已提交
5715
			return -ENODATA;
F
Frank Blaschka 已提交
5716 5717 5718 5719
		element++;
		offset = 0;
	}

5720
	hdr = phys_to_virt(element->addr) + offset;
J
Julian Wiedmann 已提交
5721
	offset += sizeof(*hdr);
5722 5723
	skb = NULL;

J
Julian Wiedmann 已提交
5724
	switch (hdr->hdr.l2.id) {
5725
	case QETH_HEADER_TYPE_LAYER2:
J
Julian Wiedmann 已提交
5726
		skb_len = hdr->hdr.l2.pkt_length;
5727
		linear_len = ETH_HLEN;
5728
		headroom = 0;
5729 5730
		break;
	case QETH_HEADER_TYPE_LAYER3:
J
Julian Wiedmann 已提交
5731
		skb_len = hdr->hdr.l3.length;
5732 5733 5734 5735 5736
		if (!IS_LAYER3(card)) {
			QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
			goto walk_packet;
		}

J
Julian Wiedmann 已提交
5737
		if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) {
5738 5739 5740 5741 5742
			linear_len = ETH_HLEN;
			headroom = 0;
			break;
		}

J
Julian Wiedmann 已提交
5743
		if (hdr->hdr.l3.flags & QETH_HDR_IPV6)
5744 5745 5746
			linear_len = sizeof(struct ipv6hdr);
		else
			linear_len = sizeof(struct iphdr);
5747
		headroom = ETH_HLEN;
5748 5749
		break;
	case QETH_HEADER_TYPE_OSN:
J
Julian Wiedmann 已提交
5750
		skb_len = hdr->hdr.osn.pdu_length;
5751 5752 5753 5754 5755
		if (!IS_OSN(card)) {
			QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
			goto walk_packet;
		}

5756
		linear_len = skb_len;
5757 5758 5759
		headroom = sizeof(struct qeth_hdr);
		break;
	default:
J
Julian Wiedmann 已提交
5760
		if (hdr->hdr.l2.id & QETH_HEADER_MASK_INVAL)
5761 5762 5763 5764
			QETH_CARD_STAT_INC(card, rx_frame_errors);
		else
			QETH_CARD_STAT_INC(card, rx_dropped_notsupp);

5765
		/* Can't determine packet length, drop the whole buffer. */
J
Julian Wiedmann 已提交
5766
		return -EPROTONOSUPPORT;
F
Frank Blaschka 已提交
5767 5768
	}

5769 5770 5771 5772
	if (skb_len < linear_len) {
		QETH_CARD_STAT_INC(card, rx_dropped_runt);
		goto walk_packet;
	}
F
Frank Blaschka 已提交
5773

5774
	use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) ||
5775
		    (skb_len > READ_ONCE(priv->rx_copybreak) &&
5776 5777
		     !atomic_read(&card->force_alloc_skb) &&
		     !IS_OSN(card));
5778

5779
	if (use_rx_sg) {
5780
		/* QETH_CQ_ENABLED only: */
5781 5782
		if (qethbuffer->rx_skb &&
		    skb_tailroom(qethbuffer->rx_skb) >= linear_len + headroom) {
5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803
			skb = qethbuffer->rx_skb;
			qethbuffer->rx_skb = NULL;
			goto use_skb;
		}

		skb = napi_get_frags(napi);
		if (!skb) {
			/* -ENOMEM, no point in falling back further. */
			QETH_CARD_STAT_INC(card, rx_dropped_nomem);
			goto walk_packet;
		}

		if (skb_tailroom(skb) >= linear_len + headroom) {
			uses_frags = true;
			goto use_skb;
		}

		netdev_info_once(card->dev,
				 "Insufficient linear space in NAPI frags skb, need %u but have %u\n",
				 linear_len + headroom, skb_tailroom(skb));
		/* Shouldn't happen. Don't optimize, fall back to linear skb. */
F
Frank Blaschka 已提交
5804
	}
5805

5806 5807 5808
	linear_len = skb_len;
	skb = napi_alloc_skb(napi, linear_len + headroom);
	if (!skb) {
5809
		QETH_CARD_STAT_INC(card, rx_dropped_nomem);
5810 5811
		goto walk_packet;
	}
F
Frank Blaschka 已提交
5812

5813 5814 5815
use_skb:
	if (headroom)
		skb_reserve(skb, headroom);
5816
walk_packet:
F
Frank Blaschka 已提交
5817
	while (skb_len) {
5818
		int data_len = min(skb_len, (int)(element->length - offset));
5819
		char *data = phys_to_virt(element->addr) + offset;
5820 5821 5822

		skb_len -= data_len;
		offset += data_len;
5823

5824
		/* Extract data from current element: */
5825
		if (skb && data_len) {
5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839
			if (linear_len) {
				unsigned int copy_len;

				copy_len = min_t(unsigned int, linear_len,
						 data_len);

				skb_put_data(skb, data, copy_len);
				linear_len -= copy_len;
				data_len -= copy_len;
				data += copy_len;
			}

			if (data_len)
				qeth_create_skb_frag(skb, data, data_len);
F
Frank Blaschka 已提交
5840
		}
5841 5842

		/* Step forward to next element: */
F
Frank Blaschka 已提交
5843 5844
		if (skb_len) {
			if (qeth_is_last_sbale(element)) {
C
Carsten Otte 已提交
5845
				QETH_CARD_TEXT(card, 4, "unexeob");
C
Carsten Otte 已提交
5846
				QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
5847
				if (skb) {
5848 5849 5850 5851
					if (uses_frags)
						napi_free_frags(napi);
					else
						dev_kfree_skb_any(skb);
5852 5853 5854
					QETH_CARD_STAT_INC(card,
							   rx_length_errors);
				}
J
Julian Wiedmann 已提交
5855
				return -EMSGSIZE;
F
Frank Blaschka 已提交
5856 5857 5858 5859 5860
			}
			element++;
			offset = 0;
		}
	}
5861 5862 5863 5864 5865

	/* This packet was skipped, go get another one: */
	if (!skb)
		goto next_packet;

5866
	*element_no = element - &buffer->element[0];
F
Frank Blaschka 已提交
5867
	*__offset = offset;
J
Julian Wiedmann 已提交
5868

5869
	qeth_receive_skb(card, skb, hdr, uses_frags);
J
Julian Wiedmann 已提交
5870 5871 5872
	return 0;
}

5873 5874
static unsigned int qeth_extract_skbs(struct qeth_card *card, int budget,
				      struct qeth_qdio_buffer *buf, bool *done)
J
Julian Wiedmann 已提交
5875
{
5876
	unsigned int work_done = 0;
J
Julian Wiedmann 已提交
5877 5878

	while (budget) {
5879
		if (qeth_extract_skb(card, buf, &card->rx.buf_element,
J
Julian Wiedmann 已提交
5880 5881 5882 5883 5884 5885 5886
				     &card->rx.e_offset)) {
			*done = true;
			break;
		}

		work_done++;
		budget--;
F
Frank Blaschka 已提交
5887
	}
J
Julian Wiedmann 已提交
5888 5889

	return work_done;
F
Frank Blaschka 已提交
5890 5891
}

5892
static unsigned int qeth_rx_poll(struct qeth_card *card, int budget)
5893
{
5894
	struct qeth_rx *ctx = &card->rx;
5895
	unsigned int work_done = 0;
5896

5897
	while (budget > 0) {
5898 5899 5900 5901
		struct qeth_qdio_buffer *buffer;
		unsigned int skbs_done = 0;
		bool done = false;

5902
		/* Fetch completed RX buffers: */
5903 5904
		if (!card->rx.b_count) {
			card->rx.qdio_err = 0;
5905 5906 5907 5908
			card->rx.b_count = qdio_inspect_queue(CARD_DDEV(card),
							      0, true,
							      &card->rx.b_index,
							      &card->rx.qdio_err);
5909 5910 5911 5912 5913 5914
			if (card->rx.b_count <= 0) {
				card->rx.b_count = 0;
				break;
			}
		}

5915
		/* Process one completed RX buffer: */
5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930
		buffer = &card->qdio.in_q->bufs[card->rx.b_index];
		if (!(card->rx.qdio_err &&
		      qeth_check_qdio_errors(card, buffer->buffer,
					     card->rx.qdio_err, "qinerr")))
			skbs_done = qeth_extract_skbs(card, budget, buffer,
						      &done);
		else
			done = true;

		work_done += skbs_done;
		budget -= skbs_done;

		if (done) {
			QETH_CARD_STAT_INC(card, rx_bufs);
			qeth_put_buffer_pool_entry(card, buffer->pool_entry);
5931
			buffer->pool_entry = NULL;
5932
			card->rx.b_count--;
5933 5934 5935
			ctx->bufs_refill++;
			ctx->bufs_refill -= qeth_rx_refill_queue(card,
								 ctx->bufs_refill);
5936 5937 5938 5939 5940

			/* Step forward to next buffer: */
			card->rx.b_index = QDIO_BUFNR(card->rx.b_index + 1);
			card->rx.buf_element = 0;
			card->rx.e_offset = 0;
5941 5942 5943
		}
	}

5944 5945 5946
	return work_done;
}

5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964
static void qeth_cq_poll(struct qeth_card *card)
{
	unsigned int work_done = 0;

	while (work_done < QDIO_MAX_BUFFERS_PER_Q) {
		unsigned int start, error;
		int completed;

		completed = qdio_inspect_queue(CARD_DDEV(card), 1, true, &start,
					       &error);
		if (completed <= 0)
			return;

		qeth_qdio_cq_handler(card, error, 1, start, completed);
		work_done += completed;
	}
}

5965 5966 5967 5968 5969 5970 5971
int qeth_poll(struct napi_struct *napi, int budget)
{
	struct qeth_card *card = container_of(napi, struct qeth_card, napi);
	unsigned int work_done;

	work_done = qeth_rx_poll(card, budget);

5972 5973 5974 5975 5976 5977 5978 5979 5980 5981
	if (qeth_use_tx_irqs(card)) {
		struct qeth_qdio_out_q *queue;
		unsigned int i;

		qeth_for_each_output_queue(card, queue, i) {
			if (!qeth_out_queue_is_empty(queue))
				napi_schedule(&queue->napi);
		}
	}

5982 5983 5984
	if (card->options.cq == QETH_CQ_ENABLED)
		qeth_cq_poll(card);

5985 5986 5987 5988 5989 5990 5991 5992 5993 5994
	if (budget) {
		struct qeth_rx *ctx = &card->rx;

		/* Process any substantial refill backlog: */
		ctx->bufs_refill -= qeth_rx_refill_queue(card, ctx->bufs_refill);

		/* Exhausted the RX budget. Keep IRQ disabled, we get called again. */
		if (work_done >= budget)
			return work_done;
	}
5995

5996
	if (napi_complete_done(napi, work_done) &&
5997
	    qdio_start_irq(CARD_DDEV(card)))
5998
		napi_schedule(napi);
5999

6000 6001 6002 6003
	return work_done;
}
EXPORT_SYMBOL_GPL(qeth_poll);

6004
static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
6005 6006
				 unsigned int bidx, unsigned int qdio_error,
				 int budget)
6007 6008 6009 6010
{
	struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx];
	u8 sflags = buffer->buffer->element[15].sflags;
	struct qeth_card *card = queue->card;
6011
	bool error = !!qdio_error;
6012

6013
	if (qdio_error == QDIO_ERROR_SLSB_PENDING) {
6014
		struct qaob *aob = buffer->aob;
6015
		struct qeth_qaob_priv1 *priv;
6016
		enum iucv_tx_notify notify;
6017 6018 6019 6020 6021 6022 6023 6024

		if (!aob) {
			netdev_WARN_ONCE(card->dev,
					 "Pending TX buffer %#x without QAOB on TX queue %u\n",
					 bidx, queue->queue_no);
			qeth_schedule_recovery(card);
			return;
		}
6025

6026 6027
		QETH_CARD_TEXT_(card, 5, "pel%u", bidx);

6028 6029 6030
		priv = (struct qeth_qaob_priv1 *)&aob->user1;
		/* QAOB hasn't completed yet: */
		if (xchg(&priv->state, QETH_QAOB_PENDING) != QETH_QAOB_DONE) {
6031 6032
			qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING);

6033 6034 6035 6036 6037
			/* Prepare the queue slot for immediate re-use: */
			qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements);
			if (qeth_alloc_out_buf(queue, bidx, GFP_ATOMIC)) {
				QETH_CARD_TEXT(card, 2, "outofbuf");
				qeth_schedule_recovery(card);
6038
			}
6039

6040 6041 6042
			list_add(&buffer->list_entry, &queue->pending_bufs);
			/* Skip clearing the buffer: */
			return;
6043
		}
6044

6045 6046 6047 6048
		/* QAOB already completed: */
		notify = qeth_compute_cq_notification(aob->aorc, 0);
		qeth_notify_skbs(queue, buffer, notify);
		error = !!aob->aorc;
6049
		memset(aob, 0, sizeof(*aob));
6050
	} else if (card->options.cq == QETH_CQ_ENABLED) {
6051 6052
		qeth_notify_skbs(queue, buffer,
				 qeth_compute_cq_notification(sflags, 0));
6053 6054
	}

6055
	qeth_clear_output_buffer(queue, buffer, error, budget);
6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066
}

static int qeth_tx_poll(struct napi_struct *napi, int budget)
{
	struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi);
	unsigned int queue_no = queue->queue_no;
	struct qeth_card *card = queue->card;
	struct net_device *dev = card->dev;
	unsigned int work_done = 0;
	struct netdev_queue *txq;

6067 6068 6069 6070
	if (IS_IQD(card))
		txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no));
	else
		txq = netdev_get_tx_queue(dev, queue_no);
6071 6072 6073

	while (1) {
		unsigned int start, error, i;
6074 6075
		unsigned int packets = 0;
		unsigned int bytes = 0;
6076 6077
		int completed;

6078
		qeth_tx_complete_pending_bufs(card, queue, false, budget);
6079

6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096
		if (qeth_out_queue_is_empty(queue)) {
			napi_complete(napi);
			return 0;
		}

		/* Give the CPU a breather: */
		if (work_done >= QDIO_MAX_BUFFERS_PER_Q) {
			QETH_TXQ_STAT_INC(queue, completion_yield);
			if (napi_complete_done(napi, 0))
				napi_schedule(napi);
			return 0;
		}

		completed = qdio_inspect_queue(CARD_DDEV(card), queue_no, false,
					       &start, &error);
		if (completed <= 0) {
			/* Ensure we see TX completion for pending work: */
6097 6098 6099
			if (napi_complete_done(napi, 0) &&
			    !atomic_read(&queue->set_pci_flags_count))
				qeth_tx_arm_timer(queue, queue->rescan_usecs);
6100 6101 6102 6103
			return 0;
		}

		for (i = start; i < start + completed; i++) {
6104
			struct qeth_qdio_out_buffer *buffer;
6105 6106
			unsigned int bidx = QDIO_BUFNR(i);

6107
			buffer = queue->bufs[bidx];
6108
			packets += buffer->frames;
6109 6110 6111
			bytes += buffer->bytes;

			qeth_handle_send_error(card, buffer, error);
6112 6113 6114 6115 6116
			if (IS_IQD(card))
				qeth_iqd_tx_complete(queue, bidx, error, budget);
			else
				qeth_clear_output_buffer(queue, buffer, error,
							 budget);
6117 6118 6119 6120
		}

		atomic_sub(completed, &queue->used_buffers);
		work_done += completed;
6121 6122 6123 6124
		if (IS_IQD(card))
			netdev_tx_completed_queue(txq, packets, bytes);
		else
			qeth_check_outbound_queue(queue);
6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136

		/* xmit may have observed the full-condition, but not yet
		 * stopped the txq. In which case the code below won't trigger.
		 * So before returning, xmit will re-check the txq's fill level
		 * and wake it up if needed.
		 */
		if (netif_tx_queue_stopped(txq) &&
		    !qeth_out_queue_is_full(queue))
			netif_tx_wake_queue(txq);
	}
}

6137 6138 6139 6140 6141 6142 6143
static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
{
	if (!cmd->hdr.return_code)
		cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
	return cmd->hdr.return_code;
}

6144 6145 6146 6147 6148 6149 6150 6151
static int qeth_setassparms_get_caps_cb(struct qeth_card *card,
					struct qeth_reply *reply,
					unsigned long data)
{
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
	struct qeth_ipa_caps *caps = reply->param;

	if (qeth_setassparms_inspect_rc(cmd))
6152
		return -EIO;
6153 6154 6155 6156 6157 6158

	caps->supported = cmd->data.setassparms.data.caps.supported;
	caps->enabled = cmd->data.setassparms.data.caps.enabled;
	return 0;
}

6159 6160
int qeth_setassparms_cb(struct qeth_card *card,
			struct qeth_reply *reply, unsigned long data)
6161
{
6162
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6163 6164 6165

	QETH_CARD_TEXT(card, 4, "defadpcb");

6166 6167 6168 6169 6170
	if (cmd->hdr.return_code)
		return -EIO;

	cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
	if (cmd->hdr.prot_version == QETH_PROT_IPV4)
6171
		card->options.ipa4.enabled = cmd->hdr.assists.enabled;
6172
	if (cmd->hdr.prot_version == QETH_PROT_IPV6)
6173
		card->options.ipa6.enabled = cmd->hdr.assists.enabled;
6174 6175
	return 0;
}
6176
EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
6177

6178 6179
struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
						 enum qeth_ipa_funcs ipa_func,
6180 6181
						 u16 cmd_code,
						 unsigned int data_length,
6182
						 enum qeth_prot_versions prot)
6183
{
6184 6185
	struct qeth_ipacmd_setassparms *setassparms;
	struct qeth_ipacmd_setassparms_hdr *hdr;
6186 6187 6188
	struct qeth_cmd_buffer *iob;

	QETH_CARD_TEXT(card, 4, "getasscm");
6189 6190 6191 6192 6193 6194
	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETASSPARMS, prot,
				 data_length +
				 offsetof(struct qeth_ipacmd_setassparms,
					  data));
	if (!iob)
		return NULL;
6195

6196 6197
	setassparms = &__ipa_cmd(iob)->data.setassparms;
	setassparms->assist_no = ipa_func;
6198

6199 6200 6201
	hdr = &setassparms->hdr;
	hdr->length = sizeof(*hdr) + data_length;
	hdr->command_code = cmd_code;
6202 6203
	return iob;
}
6204
EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
6205

6206 6207
int qeth_send_simple_setassparms_prot(struct qeth_card *card,
				      enum qeth_ipa_funcs ipa_func,
6208
				      u16 cmd_code, u32 *data,
6209
				      enum qeth_prot_versions prot)
6210
{
6211
	unsigned int length = data ? SETASS_DATA_SIZEOF(flags_32bit) : 0;
6212 6213
	struct qeth_cmd_buffer *iob;

6214 6215
	QETH_CARD_TEXT_(card, 4, "simassp%i", prot);
	iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot);
6216 6217
	if (!iob)
		return -ENOMEM;
6218

6219 6220
	if (data)
		__ipa_cmd(iob)->data.setassparms.data.flags_32bit = *data;
6221
	return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
6222
}
6223
EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot);
6224

F
Frank Blaschka 已提交
6225 6226
static void qeth_unregister_dbf_views(void)
{
6227
	int x;
6228

6229 6230 6231 6232
	for (x = 0; x < QETH_DBF_INFOS; x++) {
		debug_unregister(qeth_dbf[x].id);
		qeth_dbf[x].id = NULL;
	}
F
Frank Blaschka 已提交
6233 6234
}

C
Carsten Otte 已提交
6235
void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
P
Peter Tiedemann 已提交
6236 6237
{
	char dbf_txt_buf[32];
6238
	va_list args;
P
Peter Tiedemann 已提交
6239

6240
	if (!debug_level_enabled(id, level))
P
Peter Tiedemann 已提交
6241
		return;
6242 6243 6244
	va_start(args, fmt);
	vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
	va_end(args);
C
Carsten Otte 已提交
6245
	debug_text_event(id, level, dbf_txt_buf);
P
Peter Tiedemann 已提交
6246 6247 6248
}
EXPORT_SYMBOL_GPL(qeth_dbf_longtext);

F
Frank Blaschka 已提交
6249 6250
static int qeth_register_dbf_views(void)
{
6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263
	int ret;
	int x;

	for (x = 0; x < QETH_DBF_INFOS; x++) {
		/* register the areas */
		qeth_dbf[x].id = debug_register(qeth_dbf[x].name,
						qeth_dbf[x].pages,
						qeth_dbf[x].areas,
						qeth_dbf[x].len);
		if (qeth_dbf[x].id == NULL) {
			qeth_unregister_dbf_views();
			return -ENOMEM;
		}
F
Frank Blaschka 已提交
6264

6265 6266 6267 6268 6269 6270
		/* register a view */
		ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view);
		if (ret) {
			qeth_unregister_dbf_views();
			return ret;
		}
F
Frank Blaschka 已提交
6271

6272 6273 6274
		/* set a passing level */
		debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level);
	}
F
Frank Blaschka 已提交
6275 6276 6277 6278

	return 0;
}

6279 6280
static DEFINE_MUTEX(qeth_mod_mutex);	/* for synchronized module loading */

6281 6282
int qeth_setup_discipline(struct qeth_card *card,
			  enum qeth_discipline_id discipline)
F
Frank Blaschka 已提交
6283
{
6284 6285
	int rc;

6286
	mutex_lock(&qeth_mod_mutex);
F
Frank Blaschka 已提交
6287 6288
	switch (discipline) {
	case QETH_DISCIPLINE_LAYER3:
6289 6290
		card->discipline = try_then_request_module(
			symbol_get(qeth_l3_discipline), "qeth_l3");
F
Frank Blaschka 已提交
6291 6292
		break;
	case QETH_DISCIPLINE_LAYER2:
6293 6294
		card->discipline = try_then_request_module(
			symbol_get(qeth_l2_discipline), "qeth_l2");
F
Frank Blaschka 已提交
6295
		break;
6296 6297
	default:
		break;
F
Frank Blaschka 已提交
6298
	}
6299
	mutex_unlock(&qeth_mod_mutex);
6300

6301
	if (!card->discipline) {
6302 6303
		dev_err(&card->gdev->dev, "There is no kernel module to "
			"support discipline %d\n", discipline);
6304
		return -EINVAL;
F
Frank Blaschka 已提交
6305
	}
6306

6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317
	rc = card->discipline->setup(card->gdev);
	if (rc) {
		if (discipline == QETH_DISCIPLINE_LAYER2)
			symbol_put(qeth_l2_discipline);
		else
			symbol_put(qeth_l3_discipline);
		card->discipline = NULL;

		return rc;
	}

6318
	card->options.layer = discipline;
6319
	return 0;
F
Frank Blaschka 已提交
6320 6321
}

6322
void qeth_remove_discipline(struct qeth_card *card)
F
Frank Blaschka 已提交
6323
{
6324 6325
	card->discipline->remove(card->gdev);

6326
	if (IS_LAYER2(card))
6327
		symbol_put(qeth_l2_discipline);
F
Frank Blaschka 已提交
6328
	else
6329
		symbol_put(qeth_l3_discipline);
6330
	card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
6331
	card->discipline = NULL;
F
Frank Blaschka 已提交
6332 6333
}

6334
const struct device_type qeth_generic_devtype = {
6335 6336
	.name = "qeth_generic",
};
6337 6338
EXPORT_SYMBOL_GPL(qeth_generic_devtype);

6339 6340 6341 6342
static const struct device_type qeth_osn_devtype = {
	.name = "qeth_osn",
};

6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410
#define DBF_NAME_LEN	20

struct qeth_dbf_entry {
	char dbf_name[DBF_NAME_LEN];
	debug_info_t *dbf_info;
	struct list_head dbf_list;
};

static LIST_HEAD(qeth_dbf_list);
static DEFINE_MUTEX(qeth_dbf_list_mutex);

static debug_info_t *qeth_get_dbf_entry(char *name)
{
	struct qeth_dbf_entry *entry;
	debug_info_t *rc = NULL;

	mutex_lock(&qeth_dbf_list_mutex);
	list_for_each_entry(entry, &qeth_dbf_list, dbf_list) {
		if (strcmp(entry->dbf_name, name) == 0) {
			rc = entry->dbf_info;
			break;
		}
	}
	mutex_unlock(&qeth_dbf_list_mutex);
	return rc;
}

static int qeth_add_dbf_entry(struct qeth_card *card, char *name)
{
	struct qeth_dbf_entry *new_entry;

	card->debug = debug_register(name, 2, 1, 8);
	if (!card->debug) {
		QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf");
		goto err;
	}
	if (debug_register_view(card->debug, &debug_hex_ascii_view))
		goto err_dbg;
	new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL);
	if (!new_entry)
		goto err_dbg;
	strncpy(new_entry->dbf_name, name, DBF_NAME_LEN);
	new_entry->dbf_info = card->debug;
	mutex_lock(&qeth_dbf_list_mutex);
	list_add(&new_entry->dbf_list, &qeth_dbf_list);
	mutex_unlock(&qeth_dbf_list_mutex);

	return 0;

err_dbg:
	debug_unregister(card->debug);
err:
	return -ENOMEM;
}

static void qeth_clear_dbf_list(void)
{
	struct qeth_dbf_entry *entry, *tmp;

	mutex_lock(&qeth_dbf_list_mutex);
	list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) {
		list_del(&entry->dbf_list);
		debug_unregister(entry->dbf_info);
		kfree(entry);
	}
	mutex_unlock(&qeth_dbf_list_mutex);
}

6411 6412 6413
static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
{
	struct net_device *dev;
6414
	struct qeth_priv *priv;
6415 6416 6417

	switch (card->info.type) {
	case QETH_CARD_TYPE_IQD:
6418
		dev = alloc_netdev_mqs(sizeof(*priv), "hsi%d", NET_NAME_UNKNOWN,
6419
				       ether_setup, QETH_MAX_OUT_QUEUES, 1);
6420
		break;
6421
	case QETH_CARD_TYPE_OSM:
6422
		dev = alloc_etherdev(sizeof(*priv));
6423
		break;
6424
	case QETH_CARD_TYPE_OSN:
6425 6426
		dev = alloc_netdev(sizeof(*priv), "osn%d", NET_NAME_UNKNOWN,
				   ether_setup);
6427 6428
		break;
	default:
6429
		dev = alloc_etherdev_mqs(sizeof(*priv), QETH_MAX_OUT_QUEUES, 1);
6430 6431 6432 6433 6434
	}

	if (!dev)
		return NULL;

6435 6436
	priv = netdev_priv(dev);
	priv->rx_copybreak = QETH_RX_COPYBREAK;
6437
	priv->tx_wanted_queues = IS_IQD(card) ? QETH_IQD_MIN_TXQ : 1;
6438

6439 6440
	dev->ml_priv = card;
	dev->watchdog_timeo = QETH_TX_TIMEOUT;
6441
	dev->min_mtu = IS_OSN(card) ? 64 : 576;
6442 6443 6444
	 /* initialized when device first goes online: */
	dev->max_mtu = 0;
	dev->mtu = 0;
6445 6446
	SET_NETDEV_DEV(dev, &card->gdev->dev);
	netif_carrier_off(dev);
6447

6448 6449 6450 6451 6452 6453 6454 6455 6456 6457
	if (IS_OSN(card)) {
		dev->ethtool_ops = &qeth_osn_ethtool_ops;
	} else {
		dev->ethtool_ops = &qeth_ethtool_ops;
		dev->priv_flags &= ~IFF_TX_SKB_SHARING;
		dev->hw_features |= NETIF_F_SG;
		dev->vlan_features |= NETIF_F_SG;
		if (IS_IQD(card))
			dev->features |= NETIF_F_SG;
	}
6458

6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472
	return dev;
}

struct net_device *qeth_clone_netdev(struct net_device *orig)
{
	struct net_device *clone = qeth_alloc_netdev(orig->ml_priv);

	if (!clone)
		return NULL;

	clone->dev_port = orig->dev_port;
	return clone;
}

F
Frank Blaschka 已提交
6473 6474 6475 6476 6477
static int qeth_core_probe_device(struct ccwgroup_device *gdev)
{
	struct qeth_card *card;
	struct device *dev;
	int rc;
6478
	enum qeth_discipline_id enforced_disc;
6479
	char dbf_name[DBF_NAME_LEN];
F
Frank Blaschka 已提交
6480

6481
	QETH_DBF_TEXT(SETUP, 2, "probedev");
F
Frank Blaschka 已提交
6482 6483 6484 6485 6486

	dev = &gdev->dev;
	if (!get_device(dev))
		return -ENODEV;

6487
	QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev));
F
Frank Blaschka 已提交
6488

6489
	card = qeth_alloc_card(gdev);
F
Frank Blaschka 已提交
6490
	if (!card) {
6491
		QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM);
F
Frank Blaschka 已提交
6492 6493 6494
		rc = -ENOMEM;
		goto err_dev;
	}
6495 6496 6497

	snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s",
		dev_name(&gdev->dev));
6498
	card->debug = qeth_get_dbf_entry(dbf_name);
6499
	if (!card->debug) {
6500 6501 6502
		rc = qeth_add_dbf_entry(card, dbf_name);
		if (rc)
			goto err_card;
6503 6504
	}

6505
	qeth_setup_card(card);
6506
	card->dev = qeth_alloc_netdev(card);
6507 6508
	if (!card->dev) {
		rc = -ENOMEM;
6509
		goto err_card;
6510
	}
6511

6512 6513 6514
	qeth_determine_capabilities(card);
	qeth_set_blkt_defaults(card);

6515 6516 6517 6518
	card->qdio.no_out_queues = card->dev->num_tx_queues;
	rc = qeth_update_from_chp_desc(card);
	if (rc)
		goto err_chp_desc;
6519

6520 6521 6522 6523 6524
	if (IS_OSN(card))
		gdev->dev.groups = qeth_osn_dev_groups;
	else
		gdev->dev.groups = qeth_dev_groups;

6525 6526 6527 6528 6529 6530 6531
	enforced_disc = qeth_enforce_discipline(card);
	switch (enforced_disc) {
	case QETH_DISCIPLINE_UNDETERMINED:
		gdev->dev.type = &qeth_generic_devtype;
		break;
	default:
		card->info.layer_enforced = true;
6532
		/* It's so early that we don't need the discipline_mutex yet. */
6533
		rc = qeth_setup_discipline(card, enforced_disc);
6534
		if (rc)
6535
			goto err_setup_disc;
6536

6537 6538
		gdev->dev.type = IS_OSN(card) ? &qeth_osn_devtype :
						card->discipline->devtype;
6539
		break;
F
Frank Blaschka 已提交
6540 6541 6542 6543
	}

	return 0;

6544
err_setup_disc:
6545
err_chp_desc:
6546
	free_netdev(card->dev);
F
Frank Blaschka 已提交
6547 6548 6549 6550 6551 6552 6553 6554 6555 6556 6557
err_card:
	qeth_core_free_card(card);
err_dev:
	put_device(dev);
	return rc;
}

static void qeth_core_remove_device(struct ccwgroup_device *gdev)
{
	struct qeth_card *card = dev_get_drvdata(&gdev->dev);

6558
	QETH_CARD_TEXT(card, 2, "removedv");
F
Frank Blaschka 已提交
6559

6560
	mutex_lock(&card->discipline_mutex);
6561 6562
	if (card->discipline)
		qeth_remove_discipline(card);
6563
	mutex_unlock(&card->discipline_mutex);
6564

6565 6566
	qeth_free_qdio_queues(card);

6567
	free_netdev(card->dev);
F
Frank Blaschka 已提交
6568 6569 6570 6571 6572 6573 6574 6575
	qeth_core_free_card(card);
	put_device(&gdev->dev);
}

static int qeth_core_set_online(struct ccwgroup_device *gdev)
{
	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
	int rc = 0;
6576
	enum qeth_discipline_id def_discipline;
F
Frank Blaschka 已提交
6577

6578
	mutex_lock(&card->discipline_mutex);
6579
	if (!card->discipline) {
6580 6581
		def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
						QETH_DISCIPLINE_LAYER2;
6582
		rc = qeth_setup_discipline(card, def_discipline);
F
Frank Blaschka 已提交
6583 6584 6585
		if (rc)
			goto err;
	}
6586

6587 6588
	rc = qeth_set_online(card, card->discipline);

F
Frank Blaschka 已提交
6589
err:
6590
	mutex_unlock(&card->discipline_mutex);
F
Frank Blaschka 已提交
6591 6592 6593 6594 6595 6596
	return rc;
}

static int qeth_core_set_offline(struct ccwgroup_device *gdev)
{
	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6597
	int rc;
6598

6599 6600 6601 6602 6603
	mutex_lock(&card->discipline_mutex);
	rc = qeth_set_offline(card, card->discipline, false);
	mutex_unlock(&card->discipline_mutex);

	return rc;
F
Frank Blaschka 已提交
6604 6605 6606 6607 6608
}

static void qeth_core_shutdown(struct ccwgroup_device *gdev)
{
	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6609

6610 6611 6612 6613
	qeth_set_allowed_threads(card, 0, 1);
	if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
		qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
	qeth_qdio_clear_card(card, 0);
6614
	qeth_drain_output_queues(card);
6615
	qdio_free(CARD_DDEV(card));
F
Frank Blaschka 已提交
6616 6617
}

6618 6619
static ssize_t group_store(struct device_driver *ddrv, const char *buf,
			   size_t count)
F
Frank Blaschka 已提交
6620 6621 6622
{
	int err;

6623 6624
	err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3,
				  buf);
6625 6626 6627

	return err ? err : count;
}
6628
static DRIVER_ATTR_WO(group);
F
Frank Blaschka 已提交
6629

6630 6631 6632 6633 6634 6635 6636 6637 6638 6639 6640 6641
static struct attribute *qeth_drv_attrs[] = {
	&driver_attr_group.attr,
	NULL,
};
static struct attribute_group qeth_drv_attr_group = {
	.attrs = qeth_drv_attrs,
};
static const struct attribute_group *qeth_drv_attr_groups[] = {
	&qeth_drv_attr_group,
	NULL,
};

6642 6643 6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655
static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
	.driver = {
		.groups = qeth_drv_attr_groups,
		.owner = THIS_MODULE,
		.name = "qeth",
	},
	.ccw_driver = &qeth_ccw_driver,
	.setup = qeth_core_probe_device,
	.remove = qeth_core_remove_device,
	.set_online = qeth_core_set_online,
	.set_offline = qeth_core_set_offline,
	.shutdown = qeth_core_shutdown,
};

J
Julian Wiedmann 已提交
6656 6657 6658 6659 6660 6661 6662 6663 6664 6665 6666 6667 6668 6669 6670
struct qeth_card *qeth_get_card_by_busid(char *bus_id)
{
	struct ccwgroup_device *gdev;
	struct qeth_card *card;

	gdev = get_ccwgroupdev_by_busid(&qeth_core_ccwgroup_driver, bus_id);
	if (!gdev)
		return NULL;

	card = dev_get_drvdata(&gdev->dev);
	put_device(&gdev->dev);
	return card;
}
EXPORT_SYMBOL_GPL(qeth_get_card_by_busid);

6671 6672 6673 6674 6675 6676 6677 6678 6679 6680 6681
int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
	struct qeth_card *card = dev->ml_priv;
	struct mii_ioctl_data *mii_data;
	int rc = 0;

	switch (cmd) {
	case SIOC_QETH_ADP_SET_SNMP_CONTROL:
		rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
		break;
	case SIOC_QETH_GET_CARD_TYPE:
6682 6683
		if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) &&
		    !IS_VM_NIC(card))
6684
			return 1;
6685
		return 0;
6686 6687 6688 6689 6690 6691 6692 6693 6694 6695 6696 6697 6698 6699 6700 6701 6702 6703 6704 6705 6706 6707 6708 6709 6710 6711 6712
	case SIOCGMIIPHY:
		mii_data = if_mii(rq);
		mii_data->phy_id = 0;
		break;
	case SIOCGMIIREG:
		mii_data = if_mii(rq);
		if (mii_data->phy_id != 0)
			rc = -EINVAL;
		else
			mii_data->val_out = qeth_mdio_read(dev,
				mii_data->phy_id, mii_data->reg_num);
		break;
	case SIOC_QETH_QUERY_OAT:
		rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data);
		break;
	default:
		if (card->discipline->do_ioctl)
			rc = card->discipline->do_ioctl(dev, rq, cmd);
		else
			rc = -EOPNOTSUPP;
	}
	if (rc)
		QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
	return rc;
}
EXPORT_SYMBOL_GPL(qeth_do_ioctl);

6713 6714
static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply,
			      unsigned long data)
6715 6716
{
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6717
	u32 *features = reply->param;
6718

6719
	if (qeth_setassparms_inspect_rc(cmd))
6720
		return -EIO;
6721

6722
	*features = cmd->data.setassparms.data.flags_32bit;
6723 6724 6725
	return 0;
}

6726 6727
static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype,
			     enum qeth_prot_versions prot)
6728
{
6729 6730
	return qeth_send_simple_setassparms_prot(card, cstype, IPA_CMD_ASS_STOP,
						 NULL, prot);
6731 6732
}

6733
static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype,
6734
			    enum qeth_prot_versions prot, u8 *lp2lp)
6735
{
6736
	u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP;
6737 6738 6739
	struct qeth_cmd_buffer *iob;
	struct qeth_ipa_caps caps;
	u32 features;
6740 6741
	int rc;

6742 6743 6744
	/* some L3 HW requires combined L3+L4 csum offload: */
	if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 &&
	    cstype == IPA_OUTBOUND_CHECKSUM)
6745
		required_features |= QETH_IPA_CHECKSUM_IP_HDR;
6746

6747 6748 6749 6750 6751 6752 6753
	iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0,
				       prot);
	if (!iob)
		return -ENOMEM;

	rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features);
	if (rc)
6754
		return rc;
6755

6756 6757 6758 6759
	if ((required_features & features) != required_features) {
		qeth_set_csum_off(card, cstype, prot);
		return -EOPNOTSUPP;
	}
6760

6761 6762
	iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE,
				       SETASS_DATA_SIZEOF(flags_32bit),
6763
				       prot);
6764 6765 6766
	if (!iob) {
		qeth_set_csum_off(card, cstype, prot);
		return -ENOMEM;
6767
	}
6768 6769 6770 6771 6772

	if (features & QETH_IPA_CHECKSUM_LP2LP)
		required_features |= QETH_IPA_CHECKSUM_LP2LP;
	__ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features;
	rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6773
	if (rc) {
6774
		qeth_set_csum_off(card, cstype, prot);
6775 6776
		return rc;
	}
6777

6778 6779 6780 6781 6782 6783
	if (!qeth_ipa_caps_supported(&caps, required_features) ||
	    !qeth_ipa_caps_enabled(&caps, required_features)) {
		qeth_set_csum_off(card, cstype, prot);
		return -EOPNOTSUPP;
	}

6784 6785
	dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n",
		 cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot);
6786 6787 6788 6789

	if (lp2lp)
		*lp2lp = qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP);

6790 6791 6792
	return 0;
}

6793
static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
6794
			     enum qeth_prot_versions prot, u8 *lp2lp)
6795
{
6796
	return on ? qeth_set_csum_on(card, cstype, prot, lp2lp) :
6797
		    qeth_set_csum_off(card, cstype, prot);
6798 6799
}

6800 6801 6802 6803 6804 6805 6806
static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply,
			     unsigned long data)
{
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
	struct qeth_tso_start_data *tso_data = reply->param;

	if (qeth_setassparms_inspect_rc(cmd))
6807
		return -EIO;
6808 6809 6810 6811 6812 6813

	tso_data->mss = cmd->data.setassparms.data.tso.mss;
	tso_data->supported = cmd->data.setassparms.data.tso.supported;
	return 0;
}

6814 6815
static int qeth_set_tso_off(struct qeth_card *card,
			    enum qeth_prot_versions prot)
6816
{
6817
	return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO,
6818
						 IPA_CMD_ASS_STOP, NULL, prot);
6819
}
6820

6821 6822 6823
static int qeth_set_tso_on(struct qeth_card *card,
			   enum qeth_prot_versions prot)
{
6824 6825 6826 6827 6828 6829 6830 6831 6832 6833
	struct qeth_tso_start_data tso_data;
	struct qeth_cmd_buffer *iob;
	struct qeth_ipa_caps caps;
	int rc;

	iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
				       IPA_CMD_ASS_START, 0, prot);
	if (!iob)
		return -ENOMEM;

6834
	rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data);
6835 6836 6837 6838 6839 6840 6841 6842 6843
	if (rc)
		return rc;

	if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) {
		qeth_set_tso_off(card, prot);
		return -EOPNOTSUPP;
	}

	iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6844 6845
				       IPA_CMD_ASS_ENABLE,
				       SETASS_DATA_SIZEOF(caps), prot);
6846 6847 6848 6849 6850 6851
	if (!iob) {
		qeth_set_tso_off(card, prot);
		return -ENOMEM;
	}

	/* enable TSO capability */
6852 6853 6854
	__ipa_cmd(iob)->data.setassparms.data.caps.enabled =
		QETH_IPA_LARGE_SEND_TCP;
	rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6855 6856 6857 6858 6859 6860 6861 6862 6863 6864 6865 6866 6867 6868
	if (rc) {
		qeth_set_tso_off(card, prot);
		return rc;
	}

	if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) ||
	    !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) {
		qeth_set_tso_off(card, prot);
		return -EOPNOTSUPP;
	}

	dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot,
		 tso_data.mss);
	return 0;
6869
}
6870

6871 6872 6873
static int qeth_set_ipa_tso(struct qeth_card *card, bool on,
			    enum qeth_prot_versions prot)
{
6874
	return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot);
6875
}
6876

6877 6878 6879 6880 6881 6882 6883
static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
{
	int rc_ipv4 = (on) ? -EOPNOTSUPP : 0;
	int rc_ipv6;

	if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
		rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6884
					    QETH_PROT_IPV4, NULL);
6885 6886 6887
	if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
		/* no/one Offload Assist available, so the rc is trivial */
		return rc_ipv4;
6888

6889
	rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6890
				    QETH_PROT_IPV6, NULL);
6891 6892 6893 6894 6895 6896 6897 6898 6899

	if (on)
		/* enable: success if any Assist is active */
		return (rc_ipv6) ? rc_ipv4 : 0;

	/* disable: failure if any Assist is still active */
	return (rc_ipv6) ? rc_ipv6 : rc_ipv4;
}

6900
/**
6901 6902
 * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
 * @dev:	a net_device
6903
 */
6904
void qeth_enable_hw_features(struct net_device *dev)
6905 6906
{
	struct qeth_card *card = dev->ml_priv;
6907
	netdev_features_t features;
6908

6909
	features = dev->features;
6910
	/* force-off any feature that might need an IPA sequence.
6911 6912
	 * netdev_update_features() will restart them.
	 */
6913 6914 6915 6916 6917 6918
	dev->features &= ~dev->hw_features;
	/* toggle VLAN filter, so that VIDs are re-programmed: */
	if (IS_LAYER2(card) && IS_VM_NIC(card)) {
		dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
		dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
	}
6919
	netdev_update_features(dev);
6920 6921 6922
	if (features != dev->features)
		dev_warn(&card->gdev->dev,
			 "Device recovery failed to restore all offload features\n");
6923
}
6924
EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
6925

6926 6927 6928 6929 6930 6931 6932 6933 6934 6935 6936 6937 6938 6939 6940 6941 6942 6943
static void qeth_check_restricted_features(struct qeth_card *card,
					   netdev_features_t changed,
					   netdev_features_t actual)
{
	netdev_features_t ipv6_features = NETIF_F_TSO6;
	netdev_features_t ipv4_features = NETIF_F_TSO;

	if (!card->info.has_lp2lp_cso_v6)
		ipv6_features |= NETIF_F_IPV6_CSUM;
	if (!card->info.has_lp2lp_cso_v4)
		ipv4_features |= NETIF_F_IP_CSUM;

	if ((changed & ipv6_features) && !(actual & ipv6_features))
		qeth_flush_local_addrs6(card);
	if ((changed & ipv4_features) && !(actual & ipv4_features))
		qeth_flush_local_addrs4(card);
}

6944 6945 6946
int qeth_set_features(struct net_device *dev, netdev_features_t features)
{
	struct qeth_card *card = dev->ml_priv;
6947
	netdev_features_t changed = dev->features ^ features;
6948 6949
	int rc = 0;

6950 6951
	QETH_CARD_TEXT(card, 2, "setfeat");
	QETH_CARD_HEX(card, 2, &features, sizeof(features));
6952

6953
	if ((changed & NETIF_F_IP_CSUM)) {
6954
		rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM,
6955 6956
				       IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4,
				       &card->info.has_lp2lp_cso_v4);
6957 6958 6959
		if (rc)
			changed ^= NETIF_F_IP_CSUM;
	}
6960 6961
	if (changed & NETIF_F_IPV6_CSUM) {
		rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM,
6962 6963
				       IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6,
				       &card->info.has_lp2lp_cso_v6);
6964 6965 6966
		if (rc)
			changed ^= NETIF_F_IPV6_CSUM;
	}
6967 6968
	if (changed & NETIF_F_RXCSUM) {
		rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM);
6969 6970 6971
		if (rc)
			changed ^= NETIF_F_RXCSUM;
	}
6972 6973 6974
	if (changed & NETIF_F_TSO) {
		rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO,
				      QETH_PROT_IPV4);
6975 6976 6977
		if (rc)
			changed ^= NETIF_F_TSO;
	}
6978 6979 6980 6981 6982 6983
	if (changed & NETIF_F_TSO6) {
		rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6,
				      QETH_PROT_IPV6);
		if (rc)
			changed ^= NETIF_F_TSO6;
	}
6984

6985 6986 6987
	qeth_check_restricted_features(card, dev->features ^ features,
				       dev->features ^ changed);

6988 6989 6990 6991 6992 6993
	/* everything changed successfully? */
	if ((dev->features ^ features) == changed)
		return 0;
	/* something went wrong. save changed features and return error */
	dev->features ^= changed;
	return -EIO;
6994 6995 6996 6997 6998 6999 7000 7001
}
EXPORT_SYMBOL_GPL(qeth_set_features);

netdev_features_t qeth_fix_features(struct net_device *dev,
				    netdev_features_t features)
{
	struct qeth_card *card = dev->ml_priv;

7002
	QETH_CARD_TEXT(card, 2, "fixfeat");
7003 7004
	if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
		features &= ~NETIF_F_IP_CSUM;
7005 7006
	if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6))
		features &= ~NETIF_F_IPV6_CSUM;
7007 7008
	if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) &&
	    !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
7009
		features &= ~NETIF_F_RXCSUM;
7010
	if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
7011
		features &= ~NETIF_F_TSO;
7012 7013
	if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO))
		features &= ~NETIF_F_TSO6;
7014

7015
	QETH_CARD_HEX(card, 2, &features, sizeof(features));
7016 7017 7018
	return features;
}
EXPORT_SYMBOL_GPL(qeth_fix_features);
7019

7020 7021 7022 7023
netdev_features_t qeth_features_check(struct sk_buff *skb,
				      struct net_device *dev,
				      netdev_features_t features)
{
7024 7025
	struct qeth_card *card = dev->ml_priv;

7026
	/* Traffic with local next-hop is not eligible for some offloads: */
7027
	if (skb->ip_summed == CHECKSUM_PARTIAL &&
7028
	    READ_ONCE(card->options.isolation) != ISOLATION_MODE_FWD) {
7029 7030 7031 7032 7033 7034 7035 7036 7037 7038 7039 7040 7041 7042 7043 7044 7045 7046 7047 7048 7049 7050 7051 7052 7053
		netdev_features_t restricted = 0;

		if (skb_is_gso(skb) && !netif_needs_gso(skb, features))
			restricted |= NETIF_F_ALL_TSO;

		switch (vlan_get_protocol(skb)) {
		case htons(ETH_P_IP):
			if (!card->info.has_lp2lp_cso_v4)
				restricted |= NETIF_F_IP_CSUM;

			if (restricted && qeth_next_hop_is_local_v4(card, skb))
				features &= ~restricted;
			break;
		case htons(ETH_P_IPV6):
			if (!card->info.has_lp2lp_cso_v6)
				restricted |= NETIF_F_IPV6_CSUM;

			if (restricted && qeth_next_hop_is_local_v6(card, skb))
				features &= ~restricted;
			break;
		default:
			break;
		}
	}

7054 7055 7056 7057 7058 7059 7060 7061 7062 7063 7064 7065 7066 7067 7068 7069 7070 7071 7072 7073 7074 7075
	/* GSO segmentation builds skbs with
	 *	a (small) linear part for the headers, and
	 *	page frags for the data.
	 * Compared to a linear skb, the header-only part consumes an
	 * additional buffer element. This reduces buffer utilization, and
	 * hurts throughput. So compress small segments into one element.
	 */
	if (netif_needs_gso(skb, features)) {
		/* match skb_segment(): */
		unsigned int doffset = skb->data - skb_mac_header(skb);
		unsigned int hsize = skb_shinfo(skb)->gso_size;
		unsigned int hroom = skb_headroom(skb);

		/* linearize only if resulting skb allocations are order-0: */
		if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0))
			features &= ~NETIF_F_SG;
	}

	return vlan_features_check(skb, features);
}
EXPORT_SYMBOL_GPL(qeth_features_check);

7076 7077 7078 7079 7080 7081 7082 7083 7084 7085
void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
	struct qeth_card *card = dev->ml_priv;
	struct qeth_qdio_out_q *queue;
	unsigned int i;

	QETH_CARD_TEXT(card, 5, "getstat");

	stats->rx_packets = card->stats.rx_packets;
	stats->rx_bytes = card->stats.rx_bytes;
7086
	stats->rx_errors = card->stats.rx_length_errors +
7087
			   card->stats.rx_frame_errors +
7088 7089
			   card->stats.rx_fifo_errors;
	stats->rx_dropped = card->stats.rx_dropped_nomem +
7090 7091
			    card->stats.rx_dropped_notsupp +
			    card->stats.rx_dropped_runt;
7092
	stats->multicast = card->stats.rx_multicast;
7093
	stats->rx_length_errors = card->stats.rx_length_errors;
7094
	stats->rx_frame_errors = card->stats.rx_frame_errors;
7095
	stats->rx_fifo_errors = card->stats.rx_fifo_errors;
7096 7097 7098 7099 7100 7101 7102 7103 7104 7105 7106 7107

	for (i = 0; i < card->qdio.no_out_queues; i++) {
		queue = card->qdio.out_qs[i];

		stats->tx_packets += queue->stats.tx_packets;
		stats->tx_bytes += queue->stats.tx_bytes;
		stats->tx_errors += queue->stats.tx_errors;
		stats->tx_dropped += queue->stats.tx_dropped;
	}
}
EXPORT_SYMBOL_GPL(qeth_get_stats64);

7108 7109 7110 7111 7112 7113 7114 7115 7116 7117 7118 7119 7120 7121 7122 7123 7124 7125 7126 7127 7128 7129 7130 7131 7132 7133 7134 7135 7136 7137 7138 7139 7140 7141 7142 7143 7144 7145 7146 7147
#define TC_IQD_UCAST   0
static void qeth_iqd_set_prio_tc_map(struct net_device *dev,
				     unsigned int ucast_txqs)
{
	unsigned int prio;

	/* IQD requires mcast traffic to be placed on a dedicated queue, and
	 * qeth_iqd_select_queue() deals with this.
	 * For unicast traffic, we defer the queue selection to the stack.
	 * By installing a trivial prio map that spans over only the unicast
	 * queues, we can encourage the stack to spread the ucast traffic evenly
	 * without selecting the mcast queue.
	 */

	/* One traffic class, spanning over all active ucast queues: */
	netdev_set_num_tc(dev, 1);
	netdev_set_tc_queue(dev, TC_IQD_UCAST, ucast_txqs,
			    QETH_IQD_MIN_UCAST_TXQ);

	/* Map all priorities to this traffic class: */
	for (prio = 0; prio <= TC_BITMASK; prio++)
		netdev_set_prio_tc_map(dev, prio, TC_IQD_UCAST);
}

int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count)
{
	struct net_device *dev = card->dev;
	int rc;

	/* Per netif_setup_tc(), adjust the mapping first: */
	if (IS_IQD(card))
		qeth_iqd_set_prio_tc_map(dev, count - 1);

	rc = netif_set_real_num_tx_queues(dev, count);

	if (rc && IS_IQD(card))
		qeth_iqd_set_prio_tc_map(dev, dev->real_num_tx_queues - 1);

	return rc;
}
7148
EXPORT_SYMBOL_GPL(qeth_set_real_num_tx_queues);
7149

7150 7151 7152
u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
			  u8 cast_type, struct net_device *sb_dev)
{
7153 7154
	u16 txq;

7155 7156
	if (cast_type != RTN_UNICAST)
		return QETH_IQD_MCAST_TXQ;
7157 7158
	if (dev->real_num_tx_queues == QETH_IQD_MIN_TXQ)
		return QETH_IQD_MIN_UCAST_TXQ;
7159 7160 7161

	txq = netdev_pick_tx(dev, skb, sb_dev);
	return (txq == QETH_IQD_MCAST_TXQ) ? QETH_IQD_MIN_UCAST_TXQ : txq;
7162 7163 7164
}
EXPORT_SYMBOL_GPL(qeth_iqd_select_queue);

7165
int qeth_open(struct net_device *dev)
7166 7167
{
	struct qeth_card *card = dev->ml_priv;
7168 7169
	struct qeth_qdio_out_q *queue;
	unsigned int i;
7170 7171 7172 7173

	QETH_CARD_TEXT(card, 4, "qethopen");

	card->data.state = CH_STATE_UP;
7174
	netif_tx_start_all_queues(dev);
7175 7176

	local_bh_disable();
7177 7178 7179 7180 7181
	qeth_for_each_output_queue(card, queue, i) {
		netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll,
				  QETH_NAPI_WEIGHT);
		napi_enable(&queue->napi);
		napi_schedule(&queue->napi);
7182
	}
7183 7184 7185

	napi_enable(&card->napi);
	napi_schedule(&card->napi);
7186 7187
	/* kick-start the NAPI softirq: */
	local_bh_enable();
7188

7189 7190 7191 7192 7193 7194 7195
	return 0;
}
EXPORT_SYMBOL_GPL(qeth_open);

int qeth_stop(struct net_device *dev)
{
	struct qeth_card *card = dev->ml_priv;
7196 7197
	struct qeth_qdio_out_q *queue;
	unsigned int i;
7198 7199

	QETH_CARD_TEXT(card, 4, "qethstop");
7200 7201 7202 7203 7204

	napi_disable(&card->napi);
	cancel_delayed_work_sync(&card->buffer_reclaim_work);
	qdio_stop_irq(CARD_DDEV(card));

7205 7206 7207
	/* Quiesce the NAPI instances: */
	qeth_for_each_output_queue(card, queue, i)
		napi_disable(&queue->napi);
7208

7209 7210
	/* Stop .ndo_start_xmit, might still access queue->napi. */
	netif_tx_disable(dev);
7211

7212 7213 7214 7215
	qeth_for_each_output_queue(card, queue, i) {
		del_timer_sync(&queue->timer);
		/* Queues may get re-allocated, so remove the NAPIs. */
		netif_napi_del(&queue->napi);
7216 7217
	}

7218 7219 7220 7221
	return 0;
}
EXPORT_SYMBOL_GPL(qeth_stop);

F
Frank Blaschka 已提交
7222 7223 7224 7225
static int __init qeth_core_init(void)
{
	int rc;

7226
	pr_info("loading core functions\n");
F
Frank Blaschka 已提交
7227

7228 7229
	qeth_debugfs_root = debugfs_create_dir("qeth", NULL);

F
Frank Blaschka 已提交
7230 7231
	rc = qeth_register_dbf_views();
	if (rc)
7232
		goto dbf_err;
M
Mark McLoughlin 已提交
7233
	qeth_core_root_dev = root_device_register("qeth");
7234
	rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
F
Frank Blaschka 已提交
7235 7236
	if (rc)
		goto register_err;
7237 7238 7239 7240
	qeth_core_header_cache =
		kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE,
				  roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE),
				  0, NULL);
7241 7242 7243 7244
	if (!qeth_core_header_cache) {
		rc = -ENOMEM;
		goto slab_err;
	}
7245 7246 7247 7248 7249 7250
	qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf",
			sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL);
	if (!qeth_qdio_outbuf_cache) {
		rc = -ENOMEM;
		goto cqslab_err;
	}
7251 7252 7253 7254 7255 7256
	rc = ccw_driver_register(&qeth_ccw_driver);
	if (rc)
		goto ccw_err;
	rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
	if (rc)
		goto ccwgroup_err;
7257

7258
	return 0;
7259 7260 7261 7262 7263

ccwgroup_err:
	ccw_driver_unregister(&qeth_ccw_driver);
ccw_err:
	kmem_cache_destroy(qeth_qdio_outbuf_cache);
7264 7265
cqslab_err:
	kmem_cache_destroy(qeth_core_header_cache);
7266
slab_err:
M
Mark McLoughlin 已提交
7267
	root_device_unregister(qeth_core_root_dev);
F
Frank Blaschka 已提交
7268 7269
register_err:
	qeth_unregister_dbf_views();
7270
dbf_err:
7271
	debugfs_remove_recursive(qeth_debugfs_root);
7272
	pr_err("Initializing the qeth device driver failed\n");
F
Frank Blaschka 已提交
7273 7274 7275 7276 7277
	return rc;
}

static void __exit qeth_core_exit(void)
{
7278
	qeth_clear_dbf_list();
F
Frank Blaschka 已提交
7279 7280
	ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
	ccw_driver_unregister(&qeth_ccw_driver);
7281
	kmem_cache_destroy(qeth_qdio_outbuf_cache);
7282
	kmem_cache_destroy(qeth_core_header_cache);
7283
	root_device_unregister(qeth_core_root_dev);
F
Frank Blaschka 已提交
7284
	qeth_unregister_dbf_views();
7285
	debugfs_remove_recursive(qeth_debugfs_root);
7286
	pr_info("core functions removed\n");
F
Frank Blaschka 已提交
7287 7288 7289 7290 7291 7292 7293
}

module_init(qeth_core_init);
module_exit(qeth_core_exit);
MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
MODULE_DESCRIPTION("qeth core functions");
MODULE_LICENSE("GPL");