qeth_core_main.c 187.8 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
F
Frank Blaschka 已提交
2
/*
3
 *    Copyright IBM Corp. 2007, 2009
F
Frank Blaschka 已提交
4 5 6 7 8 9
 *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
 *		 Frank Pavlic <fpavlic@de.ibm.com>,
 *		 Thomas Spatzier <tspat@de.ibm.com>,
 *		 Frank Blaschka <frank.blaschka@de.ibm.com>
 */

10 11 12
#define KMSG_COMPONENT "qeth"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

13
#include <linux/compat.h>
F
Frank Blaschka 已提交
14 15 16 17 18
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/kernel.h>
19
#include <linux/log2.h>
20
#include <linux/io.h>
F
Frank Blaschka 已提交
21 22 23
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/mii.h>
J
Julian Wiedmann 已提交
24
#include <linux/mm.h>
F
Frank Blaschka 已提交
25
#include <linux/kthread.h>
26
#include <linux/slab.h>
27 28 29
#include <linux/if_vlan.h>
#include <linux/netdevice.h>
#include <linux/netdev_features.h>
30
#include <linux/rcutree.h>
31
#include <linux/skbuff.h>
32
#include <linux/vmalloc.h>
33

34
#include <net/iucv/af_iucv.h>
35
#include <net/dsfield.h>
36
#include <net/sock.h>
F
Frank Blaschka 已提交
37

38
#include <asm/ebcdic.h>
39
#include <asm/chpid.h>
40
#include <asm/sysinfo.h>
41 42 43
#include <asm/diag.h>
#include <asm/cio.h>
#include <asm/ccwdev.h>
44
#include <asm/cpcmd.h>
F
Frank Blaschka 已提交
45 46 47

#include "qeth_core.h"

48 49 50 51 52
struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
	/* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
	/*                   N  P  A    M  L  V                      H  */
	[QETH_DBF_SETUP] = {"qeth_setup",
				8, 1,   8, 5, &debug_hex_ascii_view, NULL},
53 54
	[QETH_DBF_MSG]	 = {"qeth_msg", 8, 1, 11 * sizeof(long), 3,
			    &debug_sprintf_view, NULL},
55 56 57 58
	[QETH_DBF_CTRL]  = {"qeth_control",
		8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
};
EXPORT_SYMBOL_GPL(qeth_dbf);
F
Frank Blaschka 已提交
59

J
Julian Wiedmann 已提交
60
static struct kmem_cache *qeth_core_header_cache;
61
static struct kmem_cache *qeth_qdio_outbuf_cache;
F
Frank Blaschka 已提交
62 63

static struct device *qeth_core_root_dev;
64
static struct dentry *qeth_debugfs_root;
F
Frank Blaschka 已提交
65 66
static struct lock_class_key qdio_out_skb_queue_key;

67
static void qeth_issue_next_read_cb(struct qeth_card *card,
68 69
				    struct qeth_cmd_buffer *iob,
				    unsigned int data_length);
F
Frank Blaschka 已提交
70
static int qeth_qdio_establish(struct qeth_card *);
71
static void qeth_free_qdio_queues(struct qeth_card *card);
F
Frank Blaschka 已提交
72

S
Stefan Raspl 已提交
73 74 75 76 77 78 79 80 81
static void qeth_close_dev_handler(struct work_struct *work)
{
	struct qeth_card *card;

	card = container_of(work, struct qeth_card, close_dev_work);
	QETH_CARD_TEXT(card, 2, "cldevhdl");
	ccwgroup_set_offline(card->gdev);
}

J
Julian Wiedmann 已提交
82
static const char *qeth_get_cardname(struct qeth_card *card)
F
Frank Blaschka 已提交
83
{
84
	if (IS_VM_NIC(card)) {
F
Frank Blaschka 已提交
85
		switch (card->info.type) {
86
		case QETH_CARD_TYPE_OSD:
87
			return " Virtual NIC QDIO";
F
Frank Blaschka 已提交
88
		case QETH_CARD_TYPE_IQD:
89
			return " Virtual NIC Hiper";
90
		case QETH_CARD_TYPE_OSM:
91
			return " Virtual NIC QDIO - OSM";
92
		case QETH_CARD_TYPE_OSX:
93
			return " Virtual NIC QDIO - OSX";
F
Frank Blaschka 已提交
94 95 96 97 98
		default:
			return " unknown";
		}
	} else {
		switch (card->info.type) {
99
		case QETH_CARD_TYPE_OSD:
F
Frank Blaschka 已提交
100 101 102
			return " OSD Express";
		case QETH_CARD_TYPE_IQD:
			return " HiperSockets";
103 104 105 106
		case QETH_CARD_TYPE_OSM:
			return " OSM QDIO";
		case QETH_CARD_TYPE_OSX:
			return " OSX QDIO";
F
Frank Blaschka 已提交
107 108 109 110 111 112 113 114 115 116
		default:
			return " unknown";
		}
	}
	return " n/a";
}

/* max length to be returned: 14 */
const char *qeth_get_cardname_short(struct qeth_card *card)
{
117
	if (IS_VM_NIC(card)) {
F
Frank Blaschka 已提交
118
		switch (card->info.type) {
119
		case QETH_CARD_TYPE_OSD:
120
			return "Virt.NIC QDIO";
F
Frank Blaschka 已提交
121
		case QETH_CARD_TYPE_IQD:
122
			return "Virt.NIC Hiper";
123
		case QETH_CARD_TYPE_OSM:
124
			return "Virt.NIC OSM";
125
		case QETH_CARD_TYPE_OSX:
126
			return "Virt.NIC OSX";
F
Frank Blaschka 已提交
127 128 129 130 131
		default:
			return "unknown";
		}
	} else {
		switch (card->info.type) {
132
		case QETH_CARD_TYPE_OSD:
F
Frank Blaschka 已提交
133 134 135 136 137 138 139 140 141
			switch (card->info.link_type) {
			case QETH_LINK_TYPE_FAST_ETH:
				return "OSD_100";
			case QETH_LINK_TYPE_HSTR:
				return "HSTR";
			case QETH_LINK_TYPE_GBIT_ETH:
				return "OSD_1000";
			case QETH_LINK_TYPE_10GBIT_ETH:
				return "OSD_10GIG";
142 143
			case QETH_LINK_TYPE_25GBIT_ETH:
				return "OSD_25GIG";
F
Frank Blaschka 已提交
144 145 146 147 148 149 150 151 152 153 154 155 156
			case QETH_LINK_TYPE_LANE_ETH100:
				return "OSD_FE_LANE";
			case QETH_LINK_TYPE_LANE_TR:
				return "OSD_TR_LANE";
			case QETH_LINK_TYPE_LANE_ETH1000:
				return "OSD_GbE_LANE";
			case QETH_LINK_TYPE_LANE:
				return "OSD_ATM_LANE";
			default:
				return "OSD_Express";
			}
		case QETH_CARD_TYPE_IQD:
			return "HiperSockets";
157 158 159 160
		case QETH_CARD_TYPE_OSM:
			return "OSM_1000";
		case QETH_CARD_TYPE_OSX:
			return "OSX_10GIG";
F
Frank Blaschka 已提交
161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
		default:
			return "unknown";
		}
	}
	return "n/a";
}

void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
			 int clear_start_mask)
{
	unsigned long flags;

	spin_lock_irqsave(&card->thread_mask_lock, flags);
	card->thread_allowed_mask = threads;
	if (clear_start_mask)
		card->thread_start_mask &= threads;
	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
	wake_up(&card->wait_q);
}
EXPORT_SYMBOL_GPL(qeth_set_allowed_threads);

int qeth_threads_running(struct qeth_card *card, unsigned long threads)
{
	unsigned long flags;
	int rc = 0;

	spin_lock_irqsave(&card->thread_mask_lock, flags);
	rc = (card->thread_running_mask & threads);
	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
	return rc;
}
EXPORT_SYMBOL_GPL(qeth_threads_running);

194
static void qeth_clear_working_pool_list(struct qeth_card *card)
F
Frank Blaschka 已提交
195 196
{
	struct qeth_buffer_pool_entry *pool_entry, *tmp;
197 198
	struct qeth_qdio_q *queue = card->qdio.in_q;
	unsigned int i;
F
Frank Blaschka 已提交
199

C
Carsten Otte 已提交
200
	QETH_CARD_TEXT(card, 5, "clwrklst");
F
Frank Blaschka 已提交
201
	list_for_each_entry_safe(pool_entry, tmp,
202 203
				 &card->qdio.in_buf_pool.entry_list, list)
		list_del(&pool_entry->list);
204 205 206

	for (i = 0; i < ARRAY_SIZE(queue->bufs); i++)
		queue->bufs[i].pool_entry = NULL;
F
Frank Blaschka 已提交
207 208
}

209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
static void qeth_free_pool_entry(struct qeth_buffer_pool_entry *entry)
{
	unsigned int i;

	for (i = 0; i < ARRAY_SIZE(entry->elements); i++) {
		if (entry->elements[i])
			__free_page(entry->elements[i]);
	}

	kfree(entry);
}

static void qeth_free_buffer_pool(struct qeth_card *card)
{
	struct qeth_buffer_pool_entry *entry, *tmp;

	list_for_each_entry_safe(entry, tmp, &card->qdio.init_pool.entry_list,
				 init_list) {
		list_del(&entry->init_list);
		qeth_free_pool_entry(entry);
	}
}

static struct qeth_buffer_pool_entry *qeth_alloc_pool_entry(unsigned int pages)
{
	struct qeth_buffer_pool_entry *entry;
	unsigned int i;

	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
	if (!entry)
		return NULL;

	for (i = 0; i < pages; i++) {
242
		entry->elements[i] = __dev_alloc_page(GFP_KERNEL);
243 244 245 246 247 248 249 250 251 252

		if (!entry->elements[i]) {
			qeth_free_pool_entry(entry);
			return NULL;
		}
	}

	return entry;
}

F
Frank Blaschka 已提交
253 254
static int qeth_alloc_buffer_pool(struct qeth_card *card)
{
255 256
	unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
	unsigned int i;
F
Frank Blaschka 已提交
257

C
Carsten Otte 已提交
258
	QETH_CARD_TEXT(card, 5, "alocpool");
F
Frank Blaschka 已提交
259
	for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
260 261 262 263
		struct qeth_buffer_pool_entry *entry;

		entry = qeth_alloc_pool_entry(buf_elements);
		if (!entry) {
F
Frank Blaschka 已提交
264 265 266
			qeth_free_buffer_pool(card);
			return -ENOMEM;
		}
267

268
		list_add(&entry->init_list, &card->qdio.init_pool.entry_list);
F
Frank Blaschka 已提交
269 270 271 272
	}
	return 0;
}

273
int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count)
F
Frank Blaschka 已提交
274
{
275 276 277 278 279 280
	unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
	struct qeth_qdio_buffer_pool *pool = &card->qdio.init_pool;
	struct qeth_buffer_pool_entry *entry, *tmp;
	int delta = count - pool->buf_count;
	LIST_HEAD(entries);

C
Carsten Otte 已提交
281
	QETH_CARD_TEXT(card, 2, "realcbp");
F
Frank Blaschka 已提交
282

283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321
	/* Defer until queue is allocated: */
	if (!card->qdio.in_q)
		goto out;

	/* Remove entries from the pool: */
	while (delta < 0) {
		entry = list_first_entry(&pool->entry_list,
					 struct qeth_buffer_pool_entry,
					 init_list);
		list_del(&entry->init_list);
		qeth_free_pool_entry(entry);

		delta++;
	}

	/* Allocate additional entries: */
	while (delta > 0) {
		entry = qeth_alloc_pool_entry(buf_elements);
		if (!entry) {
			list_for_each_entry_safe(entry, tmp, &entries,
						 init_list) {
				list_del(&entry->init_list);
				qeth_free_pool_entry(entry);
			}

			return -ENOMEM;
		}

		list_add(&entry->init_list, &entries);

		delta--;
	}

	list_splice(&entries, &pool->entry_list);

out:
	card->qdio.in_buf_pool.buf_count = count;
	pool->buf_count = count;
	return 0;
F
Frank Blaschka 已提交
322
}
323
EXPORT_SYMBOL_GPL(qeth_resize_buffer_pool);
F
Frank Blaschka 已提交
324

S
Sebastian Ott 已提交
325 326
static void qeth_free_qdio_queue(struct qeth_qdio_q *q)
{
327 328 329 330
	if (!q)
		return;

	qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
S
Sebastian Ott 已提交
331 332 333 334 335 336 337 338 339 340 341
	kfree(q);
}

static struct qeth_qdio_q *qeth_alloc_qdio_queue(void)
{
	struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
	int i;

	if (!q)
		return NULL;

342 343 344 345 346
	if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
		kfree(q);
		return NULL;
	}

S
Sebastian Ott 已提交
347
	for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
348
		q->bufs[i].buffer = q->qdio_bufs[i];
S
Sebastian Ott 已提交
349 350 351 352 353

	QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *));
	return q;
}

J
Julian Wiedmann 已提交
354
static int qeth_cq_init(struct qeth_card *card)
355 356 357 358
{
	int rc;

	if (card->options.cq == QETH_CQ_ENABLED) {
359
		QETH_CARD_TEXT(card, 2, "cqinit");
360 361
		qdio_reset_buffers(card->qdio.c_q->qdio_bufs,
				   QDIO_MAX_BUFFERS_PER_Q);
362 363
		card->qdio.c_q->next_buf_to_init = 127;
		rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT,
364
			     card->qdio.no_in_queues - 1, 0, 127, NULL);
365
		if (rc) {
366
			QETH_CARD_TEXT_(card, 2, "1err%d", rc);
367 368 369 370 371 372 373 374
			goto out;
		}
	}
	rc = 0;
out:
	return rc;
}

J
Julian Wiedmann 已提交
375
static int qeth_alloc_cq(struct qeth_card *card)
376 377
{
	if (card->options.cq == QETH_CQ_ENABLED) {
378
		QETH_CARD_TEXT(card, 2, "cqon");
S
Sebastian Ott 已提交
379
		card->qdio.c_q = qeth_alloc_qdio_queue();
380
		if (!card->qdio.c_q) {
381 382
			dev_err(&card->gdev->dev, "Failed to create completion queue\n");
			return -ENOMEM;
383
		}
384

385 386
		card->qdio.no_in_queues = 2;
	} else {
387
		QETH_CARD_TEXT(card, 2, "nocq");
388 389 390
		card->qdio.c_q = NULL;
		card->qdio.no_in_queues = 1;
	}
391
	QETH_CARD_TEXT_(card, 2, "iqc%d", card->qdio.no_in_queues);
392
	return 0;
393 394
}

J
Julian Wiedmann 已提交
395
static void qeth_free_cq(struct qeth_card *card)
396 397 398
{
	if (card->qdio.c_q) {
		--card->qdio.no_in_queues;
S
Sebastian Ott 已提交
399
		qeth_free_qdio_queue(card->qdio.c_q);
400 401 402 403
		card->qdio.c_q = NULL;
	}
}

J
Julian Wiedmann 已提交
404 405 406
static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
							int delayed)
{
407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428
	enum iucv_tx_notify n;

	switch (sbalf15) {
	case 0:
		n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK;
		break;
	case 4:
	case 16:
	case 17:
	case 18:
		n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE :
			TX_NOTIFY_UNREACHABLE;
		break;
	default:
		n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR :
			TX_NOTIFY_GENERALERROR;
		break;
	}

	return n;
}

J
Julian Wiedmann 已提交
429 430 431 432 433 434 435
static void qeth_put_cmd(struct qeth_cmd_buffer *iob)
{
	if (refcount_dec_and_test(&iob->ref_count)) {
		kfree(iob->data);
		kfree(iob);
	}
}
436 437
static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
			   void *data)
438 439
{
	ccw->cmd_code = cmd_code;
440
	ccw->flags = flags | CCW_FLAG_SLI;
441 442 443 444
	ccw->count = len;
	ccw->cda = (__u32) __pa(data);
}

445
static int __qeth_issue_next_read(struct qeth_card *card)
F
Frank Blaschka 已提交
446
{
447 448 449
	struct qeth_cmd_buffer *iob = card->read_cmd;
	struct qeth_channel *channel = iob->channel;
	struct ccw1 *ccw = __ccw_from_cmd(iob);
450
	int rc;
F
Frank Blaschka 已提交
451

C
Carsten Otte 已提交
452
	QETH_CARD_TEXT(card, 5, "issnxrd");
453
	if (channel->state != CH_STATE_UP)
F
Frank Blaschka 已提交
454
		return -EIO;
455

456 457
	memset(iob->data, 0, iob->length);
	qeth_setup_ccw(ccw, CCW_CMD_READ, 0, iob->length, iob->data);
458
	iob->callback = qeth_issue_next_read_cb;
459 460 461
	/* keep the cmd alive after completion: */
	qeth_get_cmd(iob);

C
Carsten Otte 已提交
462
	QETH_CARD_TEXT(card, 6, "noirqpnd");
463
	rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0);
464 465 466
	if (!rc) {
		channel->active_cmd = iob;
	} else {
467 468
		QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
				 rc, CARD_DEVID(card));
469
		qeth_unlock_channel(card, channel);
470
		qeth_put_cmd(iob);
471
		card->read_or_write_problem = 1;
F
Frank Blaschka 已提交
472 473 474 475 476
		qeth_schedule_recovery(card);
	}
	return rc;
}

477 478 479 480 481 482 483 484 485 486 487
static int qeth_issue_next_read(struct qeth_card *card)
{
	int ret;

	spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
	ret = __qeth_issue_next_read(card);
	spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));

	return ret;
}

488 489
static void qeth_enqueue_cmd(struct qeth_card *card,
			     struct qeth_cmd_buffer *iob)
490 491
{
	spin_lock_irq(&card->lock);
492
	list_add_tail(&iob->list_entry, &card->cmd_waiter_list);
493 494 495
	spin_unlock_irq(&card->lock);
}

496 497
static void qeth_dequeue_cmd(struct qeth_card *card,
			     struct qeth_cmd_buffer *iob)
498 499
{
	spin_lock_irq(&card->lock);
500
	list_del(&iob->list_entry);
501 502 503
	spin_unlock_irq(&card->lock);
}

J
Julian Wiedmann 已提交
504
static void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason)
505
{
506 507
	iob->rc = reason;
	complete(&iob->done);
508 509
}

510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537
static void qeth_flush_local_addrs4(struct qeth_card *card)
{
	struct qeth_local_addr *addr;
	struct hlist_node *tmp;
	unsigned int i;

	spin_lock_irq(&card->local_addrs4_lock);
	hash_for_each_safe(card->local_addrs4, i, tmp, addr, hnode) {
		hash_del_rcu(&addr->hnode);
		kfree_rcu(addr, rcu);
	}
	spin_unlock_irq(&card->local_addrs4_lock);
}

static void qeth_flush_local_addrs6(struct qeth_card *card)
{
	struct qeth_local_addr *addr;
	struct hlist_node *tmp;
	unsigned int i;

	spin_lock_irq(&card->local_addrs6_lock);
	hash_for_each_safe(card->local_addrs6, i, tmp, addr, hnode) {
		hash_del_rcu(&addr->hnode);
		kfree_rcu(addr, rcu);
	}
	spin_unlock_irq(&card->local_addrs6_lock);
}

538
static void qeth_flush_local_addrs(struct qeth_card *card)
539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689
{
	qeth_flush_local_addrs4(card);
	qeth_flush_local_addrs6(card);
}

static void qeth_add_local_addrs4(struct qeth_card *card,
				  struct qeth_ipacmd_local_addrs4 *cmd)
{
	unsigned int i;

	if (cmd->addr_length !=
	    sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
		dev_err_ratelimited(&card->gdev->dev,
				    "Dropped IPv4 ADD LOCAL ADDR event with bad length %u\n",
				    cmd->addr_length);
		return;
	}

	spin_lock(&card->local_addrs4_lock);
	for (i = 0; i < cmd->count; i++) {
		unsigned int key = ipv4_addr_hash(cmd->addrs[i].addr);
		struct qeth_local_addr *addr;
		bool duplicate = false;

		hash_for_each_possible(card->local_addrs4, addr, hnode, key) {
			if (addr->addr.s6_addr32[3] == cmd->addrs[i].addr) {
				duplicate = true;
				break;
			}
		}

		if (duplicate)
			continue;

		addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
		if (!addr) {
			dev_err(&card->gdev->dev,
				"Failed to allocate local addr object. Traffic to %pI4 might suffer.\n",
				&cmd->addrs[i].addr);
			continue;
		}

		ipv6_addr_set(&addr->addr, 0, 0, 0, cmd->addrs[i].addr);
		hash_add_rcu(card->local_addrs4, &addr->hnode, key);
	}
	spin_unlock(&card->local_addrs4_lock);
}

static void qeth_add_local_addrs6(struct qeth_card *card,
				  struct qeth_ipacmd_local_addrs6 *cmd)
{
	unsigned int i;

	if (cmd->addr_length !=
	    sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
		dev_err_ratelimited(&card->gdev->dev,
				    "Dropped IPv6 ADD LOCAL ADDR event with bad length %u\n",
				    cmd->addr_length);
		return;
	}

	spin_lock(&card->local_addrs6_lock);
	for (i = 0; i < cmd->count; i++) {
		u32 key = ipv6_addr_hash(&cmd->addrs[i].addr);
		struct qeth_local_addr *addr;
		bool duplicate = false;

		hash_for_each_possible(card->local_addrs6, addr, hnode, key) {
			if (ipv6_addr_equal(&addr->addr, &cmd->addrs[i].addr)) {
				duplicate = true;
				break;
			}
		}

		if (duplicate)
			continue;

		addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
		if (!addr) {
			dev_err(&card->gdev->dev,
				"Failed to allocate local addr object. Traffic to %pI6c might suffer.\n",
				&cmd->addrs[i].addr);
			continue;
		}

		addr->addr = cmd->addrs[i].addr;
		hash_add_rcu(card->local_addrs6, &addr->hnode, key);
	}
	spin_unlock(&card->local_addrs6_lock);
}

static void qeth_del_local_addrs4(struct qeth_card *card,
				  struct qeth_ipacmd_local_addrs4 *cmd)
{
	unsigned int i;

	if (cmd->addr_length !=
	    sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
		dev_err_ratelimited(&card->gdev->dev,
				    "Dropped IPv4 DEL LOCAL ADDR event with bad length %u\n",
				    cmd->addr_length);
		return;
	}

	spin_lock(&card->local_addrs4_lock);
	for (i = 0; i < cmd->count; i++) {
		struct qeth_ipacmd_local_addr4 *addr = &cmd->addrs[i];
		unsigned int key = ipv4_addr_hash(addr->addr);
		struct qeth_local_addr *tmp;

		hash_for_each_possible(card->local_addrs4, tmp, hnode, key) {
			if (tmp->addr.s6_addr32[3] == addr->addr) {
				hash_del_rcu(&tmp->hnode);
				kfree_rcu(tmp, rcu);
				break;
			}
		}
	}
	spin_unlock(&card->local_addrs4_lock);
}

static void qeth_del_local_addrs6(struct qeth_card *card,
				  struct qeth_ipacmd_local_addrs6 *cmd)
{
	unsigned int i;

	if (cmd->addr_length !=
	    sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
		dev_err_ratelimited(&card->gdev->dev,
				    "Dropped IPv6 DEL LOCAL ADDR event with bad length %u\n",
				    cmd->addr_length);
		return;
	}

	spin_lock(&card->local_addrs6_lock);
	for (i = 0; i < cmd->count; i++) {
		struct qeth_ipacmd_local_addr6 *addr = &cmd->addrs[i];
		u32 key = ipv6_addr_hash(&addr->addr);
		struct qeth_local_addr *tmp;

		hash_for_each_possible(card->local_addrs6, tmp, hnode, key) {
			if (ipv6_addr_equal(&tmp->addr, &addr->addr)) {
				hash_del_rcu(&tmp->hnode);
				kfree_rcu(tmp, rcu);
				break;
			}
		}
	}
	spin_unlock(&card->local_addrs6_lock);
}

690 691 692 693 694 695 696 697 698 699 700 701
static bool qeth_next_hop_is_local_v4(struct qeth_card *card,
				      struct sk_buff *skb)
{
	struct qeth_local_addr *tmp;
	bool is_local = false;
	unsigned int key;
	__be32 next_hop;

	if (hash_empty(card->local_addrs4))
		return false;

	rcu_read_lock();
702 703
	next_hop = qeth_next_hop_v4_rcu(skb,
					qeth_dst_check_rcu(skb, htons(ETH_P_IP)));
704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728
	key = ipv4_addr_hash(next_hop);

	hash_for_each_possible_rcu(card->local_addrs4, tmp, hnode, key) {
		if (tmp->addr.s6_addr32[3] == next_hop) {
			is_local = true;
			break;
		}
	}
	rcu_read_unlock();

	return is_local;
}

static bool qeth_next_hop_is_local_v6(struct qeth_card *card,
				      struct sk_buff *skb)
{
	struct qeth_local_addr *tmp;
	struct in6_addr *next_hop;
	bool is_local = false;
	u32 key;

	if (hash_empty(card->local_addrs6))
		return false;

	rcu_read_lock();
729 730
	next_hop = qeth_next_hop_v6_rcu(skb,
					qeth_dst_check_rcu(skb, htons(ETH_P_IPV6)));
731 732 733 734 735 736 737 738 739 740 741 742 743
	key = ipv6_addr_hash(next_hop);

	hash_for_each_possible_rcu(card->local_addrs6, tmp, hnode, key) {
		if (ipv6_addr_equal(&tmp->addr, next_hop)) {
			is_local = true;
			break;
		}
	}
	rcu_read_unlock();

	return is_local;
}

744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761
static int qeth_debugfs_local_addr_show(struct seq_file *m, void *v)
{
	struct qeth_card *card = m->private;
	struct qeth_local_addr *tmp;
	unsigned int i;

	rcu_read_lock();
	hash_for_each_rcu(card->local_addrs4, i, tmp, hnode)
		seq_printf(m, "%pI4\n", &tmp->addr.s6_addr32[3]);
	hash_for_each_rcu(card->local_addrs6, i, tmp, hnode)
		seq_printf(m, "%pI6c\n", &tmp->addr);
	rcu_read_unlock();

	return 0;
}

DEFINE_SHOW_ATTRIBUTE(qeth_debugfs_local_addr);

762
static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
F
Frank Blaschka 已提交
763 764
		struct qeth_card *card)
{
765
	const char *ipa_name;
766
	int com = cmd->hdr.command;
767

F
Frank Blaschka 已提交
768
	ipa_name = qeth_get_ipa_cmd_name(com);
769

770
	if (rc)
771 772 773
		QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n",
				 ipa_name, com, CARD_DEVID(card), rc,
				 qeth_get_ipa_msg(rc));
774
	else
775 776
		QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n",
				 ipa_name, com, CARD_DEVID(card));
F
Frank Blaschka 已提交
777 778 779
}

static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
780
						struct qeth_ipa_cmd *cmd)
F
Frank Blaschka 已提交
781
{
C
Carsten Otte 已提交
782
	QETH_CARD_TEXT(card, 5, "chkipad");
783 784

	if (IS_IPA_REPLY(cmd)) {
J
Julian Wiedmann 已提交
785
		if (cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
786 787 788 789 790 791 792 793 794 795
			qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
		return cmd;
	}

	/* handle unsolicited event: */
	switch (cmd->hdr.command) {
	case IPA_CMD_STOPLAN:
		if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) {
			dev_err(&card->gdev->dev,
				"Interface %s is down because the adjacent port is no longer in reflective relay mode\n",
J
Julian Wiedmann 已提交
796
				netdev_name(card->dev));
797
			schedule_work(&card->close_dev_work);
F
Frank Blaschka 已提交
798
		} else {
799 800
			dev_warn(&card->gdev->dev,
				 "The link for interface %s on CHPID 0x%X failed\n",
J
Julian Wiedmann 已提交
801
				 netdev_name(card->dev), card->info.chpid);
802
			qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
803
			netif_carrier_off(card->dev);
F
Frank Blaschka 已提交
804
		}
805 806 807 808
		return NULL;
	case IPA_CMD_STARTLAN:
		dev_info(&card->gdev->dev,
			 "The link for %s on CHPID 0x%X has been restored\n",
J
Julian Wiedmann 已提交
809
			 netdev_name(card->dev), card->info.chpid);
810 811 812 813 814 815 816 817 818 819 820
		if (card->info.hwtrap)
			card->info.hwtrap = 2;
		qeth_schedule_recovery(card);
		return NULL;
	case IPA_CMD_SETBRIDGEPORT_IQD:
	case IPA_CMD_SETBRIDGEPORT_OSA:
	case IPA_CMD_ADDRESS_CHANGE_NOTIF:
		if (card->discipline->control_event_handler(card, cmd))
			return cmd;
		return NULL;
	case IPA_CMD_REGISTER_LOCAL_ADDR:
821 822 823 824 825
		if (cmd->hdr.prot_version == QETH_PROT_IPV4)
			qeth_add_local_addrs4(card, &cmd->data.local_addrs4);
		else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
			qeth_add_local_addrs6(card, &cmd->data.local_addrs6);

826 827 828
		QETH_CARD_TEXT(card, 3, "irla");
		return NULL;
	case IPA_CMD_UNREGISTER_LOCAL_ADDR:
829 830 831 832 833
		if (cmd->hdr.prot_version == QETH_PROT_IPV4)
			qeth_del_local_addrs4(card, &cmd->data.local_addrs4);
		else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
			qeth_del_local_addrs6(card, &cmd->data.local_addrs6);

834 835 836 837 838
		QETH_CARD_TEXT(card, 3, "urla");
		return NULL;
	default:
		QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n");
		return cmd;
F
Frank Blaschka 已提交
839 840 841
	}
}

842
static void qeth_clear_ipacmd_list(struct qeth_card *card)
F
Frank Blaschka 已提交
843
{
844
	struct qeth_cmd_buffer *iob;
F
Frank Blaschka 已提交
845 846
	unsigned long flags;

C
Carsten Otte 已提交
847
	QETH_CARD_TEXT(card, 4, "clipalst");
F
Frank Blaschka 已提交
848 849

	spin_lock_irqsave(&card->lock, flags);
850
	list_for_each_entry(iob, &card->cmd_waiter_list, list_entry)
851
		qeth_notify_cmd(iob, -ECANCELED);
F
Frank Blaschka 已提交
852 853 854
	spin_unlock_irqrestore(&card->lock, flags);
}

855 856
static int qeth_check_idx_response(struct qeth_card *card,
	unsigned char *buffer)
F
Frank Blaschka 已提交
857
{
858
	QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
859
	if ((buffer[2] & QETH_IDX_TERMINATE_MASK) == QETH_IDX_TERMINATE) {
860
		QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
861
				 buffer[4]);
C
Carsten Otte 已提交
862 863
		QETH_CARD_TEXT(card, 2, "ckidxres");
		QETH_CARD_TEXT(card, 2, " idxterm");
864 865 866
		QETH_CARD_TEXT_(card, 2, "rc%x", buffer[4]);
		if (buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT ||
		    buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM) {
867
			dev_err(&card->gdev->dev,
868 869
				"The device does not support the configured transport mode\n");
			return -EPROTONOSUPPORT;
870
		}
F
Frank Blaschka 已提交
871 872 873 874 875
		return -EIO;
	}
	return 0;
}

876
static void qeth_release_buffer_cb(struct qeth_card *card,
877 878
				   struct qeth_cmd_buffer *iob,
				   unsigned int data_length)
879
{
880
	qeth_put_cmd(iob);
881 882
}

883 884
static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc)
{
885
	qeth_notify_cmd(iob, rc);
886
	qeth_put_cmd(iob);
F
Frank Blaschka 已提交
887 888
}

J
Julian Wiedmann 已提交
889 890 891
static struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
					      unsigned int length,
					      unsigned int ccws, long timeout)
892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908
{
	struct qeth_cmd_buffer *iob;

	if (length > QETH_BUFSIZE)
		return NULL;

	iob = kzalloc(sizeof(*iob), GFP_KERNEL);
	if (!iob)
		return NULL;

	iob->data = kzalloc(ALIGN(length, 8) + ccws * sizeof(struct ccw1),
			    GFP_KERNEL | GFP_DMA);
	if (!iob->data) {
		kfree(iob);
		return NULL;
	}

909 910
	init_completion(&iob->done);
	spin_lock_init(&iob->lock);
911
	refcount_set(&iob->ref_count, 1);
912 913 914 915 916 917
	iob->channel = channel;
	iob->timeout = timeout;
	iob->length = length;
	return iob;
}

918
static void qeth_issue_next_read_cb(struct qeth_card *card,
919 920
				    struct qeth_cmd_buffer *iob,
				    unsigned int data_length)
F
Frank Blaschka 已提交
921
{
922
	struct qeth_cmd_buffer *request = NULL;
923
	struct qeth_ipa_cmd *cmd = NULL;
924
	struct qeth_reply *reply = NULL;
925
	struct qeth_cmd_buffer *tmp;
F
Frank Blaschka 已提交
926
	unsigned long flags;
927
	int rc = 0;
F
Frank Blaschka 已提交
928

C
Carsten Otte 已提交
929
	QETH_CARD_TEXT(card, 4, "sndctlcb");
930 931 932 933 934 935
	rc = qeth_check_idx_response(card, iob->data);
	switch (rc) {
	case 0:
		break;
	case -EIO:
		qeth_schedule_recovery(card);
936
		fallthrough;
937
	default:
938
		qeth_clear_ipacmd_list(card);
939
		goto err_idx;
F
Frank Blaschka 已提交
940 941
	}

942 943
	cmd = __ipa_reply(iob);
	if (cmd) {
944
		cmd = qeth_check_ipa_data(card, cmd);
945 946
		if (!cmd)
			goto out;
F
Frank Blaschka 已提交
947 948
	}

949
	/* match against pending cmd requests */
F
Frank Blaschka 已提交
950
	spin_lock_irqsave(&card->lock, flags);
951
	list_for_each_entry(tmp, &card->cmd_waiter_list, list_entry) {
952
		if (tmp->match && tmp->match(tmp, iob)) {
953
			request = tmp;
954
			/* take the object outside the lock */
955
			qeth_get_cmd(request);
956
			break;
F
Frank Blaschka 已提交
957 958 959
		}
	}
	spin_unlock_irqrestore(&card->lock, flags);
960

961
	if (!request)
962 963
		goto out;

964
	reply = &request->reply;
965 966
	if (!reply->callback) {
		rc = 0;
967 968 969
		goto no_callback;
	}

970 971
	spin_lock_irqsave(&request->lock, flags);
	if (request->rc)
972
		/* Bail out when the requestor has already left: */
973
		rc = request->rc;
974 975 976
	else
		rc = reply->callback(card, reply, cmd ? (unsigned long)cmd :
							(unsigned long)iob);
977
	spin_unlock_irqrestore(&request->lock, flags);
978

979
no_callback:
980
	if (rc <= 0)
981 982
		qeth_notify_cmd(request, rc);
	qeth_put_cmd(request);
F
Frank Blaschka 已提交
983 984 985 986
out:
	memcpy(&card->seqno.pdu_hdr_ack,
		QETH_PDU_HEADER_SEQ_NO(iob->data),
		QETH_SEQ_NO_LENGTH);
987
	__qeth_issue_next_read(card);
988 989
err_idx:
	qeth_put_cmd(iob);
F
Frank Blaschka 已提交
990 991 992 993 994 995
}

static int qeth_set_thread_start_bit(struct qeth_card *card,
		unsigned long thread)
{
	unsigned long flags;
996
	int rc = 0;
F
Frank Blaschka 已提交
997 998

	spin_lock_irqsave(&card->thread_mask_lock, flags);
999 1000 1001 1002 1003 1004
	if (!(card->thread_allowed_mask & thread))
		rc = -EPERM;
	else if (card->thread_start_mask & thread)
		rc = -EBUSY;
	else
		card->thread_start_mask |= thread;
F
Frank Blaschka 已提交
1005
	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1006 1007

	return rc;
F
Frank Blaschka 已提交
1008 1009
}

1010 1011
static void qeth_clear_thread_start_bit(struct qeth_card *card,
					unsigned long thread)
F
Frank Blaschka 已提交
1012 1013 1014 1015 1016 1017 1018 1019 1020
{
	unsigned long flags;

	spin_lock_irqsave(&card->thread_mask_lock, flags);
	card->thread_start_mask &= ~thread;
	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
	wake_up(&card->wait_q);
}

1021 1022
static void qeth_clear_thread_running_bit(struct qeth_card *card,
					  unsigned long thread)
F
Frank Blaschka 已提交
1023 1024 1025 1026 1027 1028
{
	unsigned long flags;

	spin_lock_irqsave(&card->thread_mask_lock, flags);
	card->thread_running_mask &= ~thread;
	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1029
	wake_up_all(&card->wait_q);
F
Frank Blaschka 已提交
1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050
}

static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
{
	unsigned long flags;
	int rc = 0;

	spin_lock_irqsave(&card->thread_mask_lock, flags);
	if (card->thread_start_mask & thread) {
		if ((card->thread_allowed_mask & thread) &&
		    !(card->thread_running_mask & thread)) {
			rc = 1;
			card->thread_start_mask &= ~thread;
			card->thread_running_mask |= thread;
		} else
			rc = -EPERM;
	}
	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
	return rc;
}

1051
static int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
F
Frank Blaschka 已提交
1052 1053 1054 1055 1056 1057 1058 1059
{
	int rc = 0;

	wait_event(card->wait_q,
		   (rc = __qeth_do_run_thread(card, thread)) >= 0);
	return rc;
}

1060
int qeth_schedule_recovery(struct qeth_card *card)
F
Frank Blaschka 已提交
1061
{
1062 1063
	int rc;

C
Carsten Otte 已提交
1064
	QETH_CARD_TEXT(card, 2, "startrec");
1065 1066 1067

	rc = qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD);
	if (!rc)
F
Frank Blaschka 已提交
1068
		schedule_work(&card->kernel_thread_starter);
1069 1070

	return rc;
F
Frank Blaschka 已提交
1071 1072
}

1073 1074
static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
			    struct irb *irb)
F
Frank Blaschka 已提交
1075 1076 1077 1078 1079
{
	int dstat, cstat;
	char *sense;

	sense = (char *) irb->ecw;
1080 1081
	cstat = irb->scsw.cmd.cstat;
	dstat = irb->scsw.cmd.dstat;
F
Frank Blaschka 已提交
1082 1083 1084 1085

	if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
		     SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
		     SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
C
Carsten Otte 已提交
1086
		QETH_CARD_TEXT(card, 2, "CGENCHK");
1087 1088
		dev_warn(&cdev->dev, "The qeth device driver "
			"failed to recover an error on the device\n");
1089 1090
		QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n",
				 CCW_DEVID(cdev), dstat, cstat);
F
Frank Blaschka 已提交
1091 1092
		print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
				16, 1, irb, 64, 1);
1093
		return -EIO;
F
Frank Blaschka 已提交
1094 1095 1096 1097 1098
	}

	if (dstat & DEV_STAT_UNIT_CHECK) {
		if (sense[SENSE_RESETTING_EVENT_BYTE] &
		    SENSE_RESETTING_EVENT_FLAG) {
C
Carsten Otte 已提交
1099
			QETH_CARD_TEXT(card, 2, "REVIND");
1100
			return -EIO;
F
Frank Blaschka 已提交
1101 1102 1103
		}
		if (sense[SENSE_COMMAND_REJECT_BYTE] &
		    SENSE_COMMAND_REJECT_FLAG) {
C
Carsten Otte 已提交
1104
			QETH_CARD_TEXT(card, 2, "CMDREJi");
1105
			return -EIO;
F
Frank Blaschka 已提交
1106 1107
		}
		if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
C
Carsten Otte 已提交
1108
			QETH_CARD_TEXT(card, 2, "AFFE");
1109
			return -EIO;
F
Frank Blaschka 已提交
1110 1111
		}
		if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
C
Carsten Otte 已提交
1112
			QETH_CARD_TEXT(card, 2, "ZEROSEN");
F
Frank Blaschka 已提交
1113 1114
			return 0;
		}
C
Carsten Otte 已提交
1115
		QETH_CARD_TEXT(card, 2, "DGENCHK");
1116
		return -EIO;
F
Frank Blaschka 已提交
1117 1118 1119 1120
	}
	return 0;
}

1121
static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev,
1122
				struct irb *irb)
F
Frank Blaschka 已提交
1123
{
1124
	if (!IS_ERR(irb))
F
Frank Blaschka 已提交
1125 1126 1127 1128
		return 0;

	switch (PTR_ERR(irb)) {
	case -EIO:
1129 1130
		QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n",
				 CCW_DEVID(cdev));
C
Carsten Otte 已提交
1131 1132
		QETH_CARD_TEXT(card, 2, "ckirberr");
		QETH_CARD_TEXT_(card, 2, "  rc%d", -EIO);
1133
		return -EIO;
F
Frank Blaschka 已提交
1134
	case -ETIMEDOUT:
1135 1136
		dev_warn(&cdev->dev, "A hardware operation timed out"
			" on the device\n");
C
Carsten Otte 已提交
1137 1138
		QETH_CARD_TEXT(card, 2, "ckirberr");
		QETH_CARD_TEXT_(card, 2, "  rc%d", -ETIMEDOUT);
1139
		return -ETIMEDOUT;
F
Frank Blaschka 已提交
1140
	default:
1141 1142
		QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
				 PTR_ERR(irb), CCW_DEVID(cdev));
C
Carsten Otte 已提交
1143 1144
		QETH_CARD_TEXT(card, 2, "ckirberr");
		QETH_CARD_TEXT(card, 2, "  rc???");
1145
		return PTR_ERR(irb);
F
Frank Blaschka 已提交
1146 1147 1148 1149 1150 1151 1152 1153
	}
}

static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
		struct irb *irb)
{
	int rc;
	int cstat, dstat;
1154
	struct qeth_cmd_buffer *iob = NULL;
1155
	struct ccwgroup_device *gdev;
F
Frank Blaschka 已提交
1156 1157 1158
	struct qeth_channel *channel;
	struct qeth_card *card;

1159 1160 1161
	/* while we hold the ccwdev lock, this stays valid: */
	gdev = dev_get_drvdata(&cdev->dev);
	card = dev_get_drvdata(&gdev->dev);
F
Frank Blaschka 已提交
1162

C
Carsten Otte 已提交
1163 1164
	QETH_CARD_TEXT(card, 5, "irq");

F
Frank Blaschka 已提交
1165 1166
	if (card->read.ccwdev == cdev) {
		channel = &card->read;
C
Carsten Otte 已提交
1167
		QETH_CARD_TEXT(card, 5, "read");
F
Frank Blaschka 已提交
1168 1169
	} else if (card->write.ccwdev == cdev) {
		channel = &card->write;
C
Carsten Otte 已提交
1170
		QETH_CARD_TEXT(card, 5, "write");
F
Frank Blaschka 已提交
1171 1172
	} else {
		channel = &card->data;
C
Carsten Otte 已提交
1173
		QETH_CARD_TEXT(card, 5, "data");
F
Frank Blaschka 已提交
1174
	}
1175

1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189
	if (intparm == 0) {
		QETH_CARD_TEXT(card, 5, "irqunsol");
	} else if ((addr_t)intparm != (addr_t)channel->active_cmd) {
		QETH_CARD_TEXT(card, 5, "irqunexp");

		dev_err(&cdev->dev,
			"Received IRQ with intparm %lx, expected %px\n",
			intparm, channel->active_cmd);
		if (channel->active_cmd)
			qeth_cancel_cmd(channel->active_cmd, -EIO);
	} else {
		iob = (struct qeth_cmd_buffer *) (addr_t)intparm;
	}

1190
	qeth_unlock_channel(card, channel);
1191

1192
	rc = qeth_check_irb_error(card, cdev, irb);
1193
	if (rc) {
1194 1195
		/* IO was terminated, free its resources. */
		if (iob)
1196
			qeth_cancel_cmd(iob, rc);
1197 1198 1199
		return;
	}

1200
	if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
F
Frank Blaschka 已提交
1201
		channel->state = CH_STATE_STOPPED;
1202 1203
		wake_up(&card->wait_q);
	}
F
Frank Blaschka 已提交
1204

1205
	if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
F
Frank Blaschka 已提交
1206
		channel->state = CH_STATE_HALTED;
1207 1208
		wake_up(&card->wait_q);
	}
F
Frank Blaschka 已提交
1209

1210 1211 1212 1213
	if (iob && (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC |
					  SCSW_FCTL_HALT_FUNC))) {
		qeth_cancel_cmd(iob, -ECANCELED);
		iob = NULL;
F
Frank Blaschka 已提交
1214
	}
1215 1216 1217 1218

	cstat = irb->scsw.cmd.cstat;
	dstat = irb->scsw.cmd.dstat;

F
Frank Blaschka 已提交
1219 1220 1221 1222
	if ((dstat & DEV_STAT_UNIT_EXCEP) ||
	    (dstat & DEV_STAT_UNIT_CHECK) ||
	    (cstat)) {
		if (irb->esw.esw0.erw.cons) {
1223 1224 1225
			dev_warn(&channel->ccwdev->dev,
				"The qeth device driver failed to recover "
				"an error on the device\n");
1226 1227 1228
			QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n",
					 CCW_DEVID(channel->ccwdev), cstat,
					 dstat);
F
Frank Blaschka 已提交
1229 1230 1231 1232 1233
			print_hex_dump(KERN_WARNING, "qeth: irb ",
				DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
			print_hex_dump(KERN_WARNING, "qeth: sense data ",
				DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1);
		}
1234

1235
		rc = qeth_get_problem(card, cdev, irb);
F
Frank Blaschka 已提交
1236
		if (rc) {
1237
			card->read_or_write_problem = 1;
1238
			if (iob)
1239
				qeth_cancel_cmd(iob, rc);
1240
			qeth_clear_ipacmd_list(card);
F
Frank Blaschka 已提交
1241
			qeth_schedule_recovery(card);
1242
			return;
F
Frank Blaschka 已提交
1243 1244 1245
		}
	}

1246 1247 1248 1249
	if (iob) {
		/* sanity check: */
		if (irb->scsw.cmd.count > iob->length) {
			qeth_cancel_cmd(iob, -EIO);
1250
			return;
1251 1252 1253 1254 1255
		}
		if (iob->callback)
			iob->callback(card, iob,
				      iob->length - irb->scsw.cmd.count);
	}
F
Frank Blaschka 已提交
1256 1257
}

1258
static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
1259
		struct qeth_qdio_out_buffer *buf,
1260
		enum iucv_tx_notify notification)
F
Frank Blaschka 已提交
1261 1262 1263
{
	struct sk_buff *skb;

1264
	skb_queue_walk(&buf->skb_list, skb) {
1265 1266
		struct sock *sk = skb->sk;

1267 1268
		QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
		QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
1269 1270
		if (sk && sk->sk_family == PF_IUCV)
			iucv_sk(sk)->sk_txnotify(sk, notification);
1271 1272 1273
	}
}

1274 1275
static void qeth_tx_complete_buf(struct qeth_qdio_out_q *queue,
				 struct qeth_qdio_out_buffer *buf, bool error,
1276
				 int budget)
1277
{
1278 1279
	struct sk_buff *skb;

1280 1281 1282 1283 1284 1285
	/* Empty buffer? */
	if (buf->next_element_to_fill == 0)
		return;

	QETH_TXQ_STAT_INC(queue, bufs);
	QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill);
1286 1287 1288 1289 1290 1291 1292
	if (error) {
		QETH_TXQ_STAT_ADD(queue, tx_errors, buf->frames);
	} else {
		QETH_TXQ_STAT_ADD(queue, tx_packets, buf->frames);
		QETH_TXQ_STAT_ADD(queue, tx_bytes, buf->bytes);
	}

1293 1294 1295 1296 1297 1298
	while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) {
		unsigned int bytes = qdisc_pkt_len(skb);
		bool is_tso = skb_is_gso(skb);
		unsigned int packets;

		packets = is_tso ? skb_shinfo(skb)->gso_segs : 1;
1299
		if (!error) {
1300 1301 1302 1303 1304 1305 1306 1307 1308
			if (skb->ip_summed == CHECKSUM_PARTIAL)
				QETH_TXQ_STAT_ADD(queue, skbs_csum, packets);
			if (skb_is_nonlinear(skb))
				QETH_TXQ_STAT_INC(queue, skbs_sg);
			if (is_tso) {
				QETH_TXQ_STAT_INC(queue, skbs_tso);
				QETH_TXQ_STAT_ADD(queue, tso_bytes, bytes);
			}
		}
1309

1310
		napi_consume_skb(skb, budget);
1311
	}
1312 1313 1314
}

static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
1315
				     struct qeth_qdio_out_buffer *buf,
1316
				     bool error, int budget)
1317 1318 1319 1320
{
	int i;

	/* is PCI flag set on buffer? */
1321
	if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ) {
1322
		atomic_dec(&queue->set_pci_flags_count);
1323 1324
		QETH_TXQ_STAT_INC(queue, completion_irq);
	}
1325

1326
	qeth_tx_complete_buf(queue, buf, error, budget);
1327

1328
	for (i = 0; i < queue->max_elements; ++i) {
1329 1330
		void *data = phys_to_virt(buf->buffer->element[i].addr);

1331
		if (__test_and_clear_bit(i, buf->from_kmem_cache) && data)
1332
			kmem_cache_free(qeth_core_header_cache, data);
F
Frank Blaschka 已提交
1333
	}
1334

1335
	qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements);
F
Frank Blaschka 已提交
1336
	buf->next_element_to_fill = 0;
1337
	buf->frames = 0;
1338
	buf->bytes = 0;
1339
	atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
1340 1341
}

1342 1343 1344 1345 1346 1347 1348
static void qeth_free_out_buf(struct qeth_qdio_out_buffer *buf)
{
	if (buf->aob)
		qdio_release_aob(buf->aob);
	kmem_cache_free(qeth_qdio_outbuf_cache, buf);
}

1349 1350
static void qeth_tx_complete_pending_bufs(struct qeth_card *card,
					  struct qeth_qdio_out_q *queue,
1351
					  bool drain, int budget)
1352 1353 1354 1355
{
	struct qeth_qdio_out_buffer *buf, *tmp;

	list_for_each_entry_safe(buf, tmp, &queue->pending_bufs, list_entry) {
1356
		struct qeth_qaob_priv1 *priv;
1357 1358 1359 1360
		struct qaob *aob = buf->aob;
		enum iucv_tx_notify notify;
		unsigned int i;

1361 1362
		priv = (struct qeth_qaob_priv1 *)&aob->user1;
		if (drain || READ_ONCE(priv->state) == QETH_QAOB_DONE) {
1363 1364 1365
			QETH_CARD_TEXT(card, 5, "fp");
			QETH_CARD_TEXT_(card, 5, "%lx", (long) buf);

1366 1367 1368
			notify = drain ? TX_NOTIFY_GENERALERROR :
					 qeth_compute_cq_notification(aob->aorc, 1);
			qeth_notify_skbs(queue, buf, notify);
1369
			qeth_tx_complete_buf(queue, buf, drain, budget);
1370

1371 1372 1373 1374 1375
			for (i = 0;
			     i < aob->sb_count && i < queue->max_elements;
			     i++) {
				void *data = phys_to_virt(aob->sba[i]);

1376
				if (test_bit(i, buf->from_kmem_cache) && data)
1377 1378 1379 1380
					kmem_cache_free(qeth_core_header_cache,
							data);
			}

1381
			list_del(&buf->list_entry);
1382
			qeth_free_out_buf(buf);
1383 1384 1385 1386
		}
	}
}

1387
static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
1388 1389 1390
{
	int j;

1391
	qeth_tx_complete_pending_bufs(q->card, q, true, 0);
1392

1393 1394 1395
	for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
		if (!q->bufs[j])
			continue;
1396

1397
		qeth_clear_output_buffer(q, q->bufs[j], true, 0);
1398
		if (free) {
1399
			qeth_free_out_buf(q->bufs[j]);
1400 1401 1402
			q->bufs[j] = NULL;
		}
	}
F
Frank Blaschka 已提交
1403 1404
}

1405
static void qeth_drain_output_queues(struct qeth_card *card)
F
Frank Blaschka 已提交
1406
{
1407
	int i;
F
Frank Blaschka 已提交
1408

C
Carsten Otte 已提交
1409
	QETH_CARD_TEXT(card, 2, "clearqdbf");
F
Frank Blaschka 已提交
1410
	/* clear outbound buffers to free skbs */
1411
	for (i = 0; i < card->qdio.no_out_queues; ++i) {
1412 1413
		if (card->qdio.out_qs[i])
			qeth_drain_output_queue(card->qdio.out_qs[i], false);
1414
	}
F
Frank Blaschka 已提交
1415 1416
}

1417
static void qeth_osa_set_output_queues(struct qeth_card *card, bool single)
1418
{
1419
	unsigned int max = single ? 1 : card->dev->num_tx_queues;
1420

1421
	if (card->qdio.no_out_queues == max)
1422
		return;
1423

1424
	if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
1425
		qeth_free_qdio_queues(card);
1426

1427
	if (max == 1 && card->qdio.do_prio_queueing != QETH_PRIOQ_DEFAULT)
1428 1429
		dev_info(&card->gdev->dev, "Priority Queueing not supported\n");

1430
	card->qdio.no_out_queues = max;
1431 1432
}

1433
static int qeth_update_from_chp_desc(struct qeth_card *card)
F
Frank Blaschka 已提交
1434 1435
{
	struct ccw_device *ccwdev;
1436
	struct channel_path_desc_fmt0 *chp_dsc;
F
Frank Blaschka 已提交
1437

1438
	QETH_CARD_TEXT(card, 2, "chp_desc");
F
Frank Blaschka 已提交
1439 1440

	ccwdev = card->data.ccwdev;
1441 1442
	chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
	if (!chp_dsc)
1443
		return -ENOMEM;
1444 1445 1446

	card->info.func_level = 0x4100 + chp_dsc->desc;

1447 1448
	if (IS_OSD(card) || IS_OSX(card))
		/* CHPP field bit 6 == 1 -> single queue */
1449
		qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
1450

1451
	kfree(chp_dsc);
1452 1453
	QETH_CARD_TEXT_(card, 2, "nr:%x", card->qdio.no_out_queues);
	QETH_CARD_TEXT_(card, 2, "lvl:%02x", card->info.func_level);
1454
	return 0;
F
Frank Blaschka 已提交
1455 1456 1457 1458
}

static void qeth_init_qdio_info(struct qeth_card *card)
{
1459
	QETH_CARD_TEXT(card, 4, "intqdinf");
F
Frank Blaschka 已提交
1460
	atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
1461 1462 1463
	card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
	card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;

F
Frank Blaschka 已提交
1464
	/* inbound */
1465
	card->qdio.no_in_queues = 1;
F
Frank Blaschka 已提交
1466
	card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1467
	if (IS_IQD(card))
1468 1469 1470
		card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
	else
		card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
F
Frank Blaschka 已提交
1471 1472 1473 1474 1475
	card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
	INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
	INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
}

1476
static void qeth_set_initial_options(struct qeth_card *card)
F
Frank Blaschka 已提交
1477 1478 1479
{
	card->options.route4.type = NO_ROUTER;
	card->options.route6.type = NO_ROUTER;
E
Einar Lueck 已提交
1480
	card->options.isolation = ISOLATION_MODE_NONE;
1481
	card->options.cq = QETH_CQ_DISABLED;
1482
	card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
F
Frank Blaschka 已提交
1483 1484 1485 1486 1487 1488 1489 1490
}

static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
{
	unsigned long flags;
	int rc = 0;

	spin_lock_irqsave(&card->thread_mask_lock, flags);
C
Carsten Otte 已提交
1491
	QETH_CARD_TEXT_(card, 4, "  %02x%02x%02x",
F
Frank Blaschka 已提交
1492 1493 1494 1495 1496 1497 1498 1499
			(u8) card->thread_start_mask,
			(u8) card->thread_allowed_mask,
			(u8) card->thread_running_mask);
	rc = (card->thread_start_mask & thread);
	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
	return rc;
}

1500
static int qeth_do_reset(void *data);
F
Frank Blaschka 已提交
1501 1502
static void qeth_start_kernel_thread(struct work_struct *work)
{
1503
	struct task_struct *ts;
F
Frank Blaschka 已提交
1504 1505
	struct qeth_card *card = container_of(work, struct qeth_card,
					kernel_thread_starter);
1506
	QETH_CARD_TEXT(card, 2, "strthrd");
F
Frank Blaschka 已提交
1507 1508 1509 1510

	if (card->read.state != CH_STATE_UP &&
	    card->write.state != CH_STATE_UP)
		return;
1511
	if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
1512
		ts = kthread_run(qeth_do_reset, card, "qeth_recover");
1513 1514 1515 1516 1517 1518
		if (IS_ERR(ts)) {
			qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
			qeth_clear_thread_running_bit(card,
				QETH_RECOVER_THREAD);
		}
	}
F
Frank Blaschka 已提交
1519 1520
}

1521
static void qeth_buffer_reclaim_work(struct work_struct *);
1522
static void qeth_setup_card(struct qeth_card *card)
F
Frank Blaschka 已提交
1523
{
1524
	QETH_CARD_TEXT(card, 2, "setupcrd");
F
Frank Blaschka 已提交
1525

1526
	card->info.type = CARD_RDEV(card)->id.driver_info;
F
Frank Blaschka 已提交
1527 1528 1529
	card->state = CARD_STATE_DOWN;
	spin_lock_init(&card->lock);
	spin_lock_init(&card->thread_mask_lock);
1530
	mutex_init(&card->conf_mutex);
1531
	mutex_init(&card->discipline_mutex);
F
Frank Blaschka 已提交
1532 1533 1534
	INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
	INIT_LIST_HEAD(&card->cmd_waiter_list);
	init_waitqueue_head(&card->wait_q);
1535
	qeth_set_initial_options(card);
F
Frank Blaschka 已提交
1536 1537 1538
	/* IP address takeover */
	INIT_LIST_HEAD(&card->ipato.entries);
	qeth_init_qdio_info(card);
1539
	INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
S
Stefan Raspl 已提交
1540
	INIT_WORK(&card->close_dev_work, qeth_close_dev_handler);
1541
	hash_init(card->rx_mode_addrs);
1542 1543 1544 1545
	hash_init(card->local_addrs4);
	hash_init(card->local_addrs6);
	spin_lock_init(&card->local_addrs4_lock);
	spin_lock_init(&card->local_addrs6_lock);
F
Frank Blaschka 已提交
1546 1547
}

1548 1549 1550 1551
static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
{
	struct qeth_card *card = container_of(slr, struct qeth_card,
					qeth_service_level);
1552 1553 1554
	if (card->info.mcl_level[0])
		seq_printf(m, "qeth: %s firmware level %s\n",
			CARD_BUS_ID(card), card->info.mcl_level);
1555 1556
}

1557
static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
F
Frank Blaschka 已提交
1558 1559 1560
{
	struct qeth_card *card;

1561
	QETH_DBF_TEXT(SETUP, 2, "alloccrd");
1562
	card = kzalloc(sizeof(*card), GFP_KERNEL);
F
Frank Blaschka 已提交
1563
	if (!card)
1564
		goto out;
1565
	QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1566 1567

	card->gdev = gdev;
1568
	dev_set_drvdata(&gdev->dev, card);
1569 1570 1571
	CARD_RDEV(card) = gdev->cdev[0];
	CARD_WDEV(card) = gdev->cdev[1];
	CARD_DDEV(card) = gdev->cdev[2];
1572

1573 1574
	card->event_wq = alloc_ordered_workqueue("%s_event", 0,
						 dev_name(&gdev->dev));
1575 1576
	if (!card->event_wq)
		goto out_wq;
1577 1578 1579 1580

	card->read_cmd = qeth_alloc_cmd(&card->read, QETH_BUFSIZE, 1, 0);
	if (!card->read_cmd)
		goto out_read_cmd;
1581

1582 1583 1584 1585 1586
	card->debugfs = debugfs_create_dir(dev_name(&gdev->dev),
					   qeth_debugfs_root);
	debugfs_create_file("local_addrs", 0400, card->debugfs, card,
			    &qeth_debugfs_local_addr_fops);

1587 1588
	card->qeth_service_level.seq_print = qeth_core_sl_print;
	register_service_level(&card->qeth_service_level);
F
Frank Blaschka 已提交
1589
	return card;
1590

1591
out_read_cmd:
1592 1593
	destroy_workqueue(card->event_wq);
out_wq:
1594
	dev_set_drvdata(&gdev->dev, NULL);
1595 1596 1597
	kfree(card);
out:
	return NULL;
F
Frank Blaschka 已提交
1598 1599
}

1600 1601
static int qeth_clear_channel(struct qeth_card *card,
			      struct qeth_channel *channel)
F
Frank Blaschka 已提交
1602 1603 1604
{
	int rc;

C
Carsten Otte 已提交
1605
	QETH_CARD_TEXT(card, 3, "clearch");
J
Julian Wiedmann 已提交
1606
	spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1607
	rc = ccw_device_clear(channel->ccwdev, (addr_t)channel->active_cmd);
J
Julian Wiedmann 已提交
1608
	spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
F
Frank Blaschka 已提交
1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621

	if (rc)
		return rc;
	rc = wait_event_interruptible_timeout(card->wait_q,
			channel->state == CH_STATE_STOPPED, QETH_TIMEOUT);
	if (rc == -ERESTARTSYS)
		return rc;
	if (channel->state != CH_STATE_STOPPED)
		return -ETIME;
	channel->state = CH_STATE_DOWN;
	return 0;
}

1622 1623
static int qeth_halt_channel(struct qeth_card *card,
			     struct qeth_channel *channel)
F
Frank Blaschka 已提交
1624 1625 1626
{
	int rc;

C
Carsten Otte 已提交
1627
	QETH_CARD_TEXT(card, 3, "haltch");
J
Julian Wiedmann 已提交
1628
	spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1629
	rc = ccw_device_halt(channel->ccwdev, (addr_t)channel->active_cmd);
J
Julian Wiedmann 已提交
1630
	spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
F
Frank Blaschka 已提交
1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642

	if (rc)
		return rc;
	rc = wait_event_interruptible_timeout(card->wait_q,
			channel->state == CH_STATE_HALTED, QETH_TIMEOUT);
	if (rc == -ERESTARTSYS)
		return rc;
	if (channel->state != CH_STATE_HALTED)
		return -ETIME;
	return 0;
}

1643
static int qeth_stop_channel(struct qeth_channel *channel)
1644 1645 1646 1647 1648 1649 1650
{
	struct ccw_device *cdev = channel->ccwdev;
	int rc;

	rc = ccw_device_set_offline(cdev);

	spin_lock_irq(get_ccwdev_lock(cdev));
1651
	if (channel->active_cmd)
1652 1653
		dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n",
			channel->active_cmd);
1654

1655
	cdev->handler = NULL;
1656 1657 1658 1659 1660
	spin_unlock_irq(get_ccwdev_lock(cdev));

	return rc;
}

1661 1662 1663 1664 1665 1666
static int qeth_start_channel(struct qeth_channel *channel)
{
	struct ccw_device *cdev = channel->ccwdev;
	int rc;

	channel->state = CH_STATE_DOWN;
1667
	xchg(&channel->active_cmd, NULL);
1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685

	spin_lock_irq(get_ccwdev_lock(cdev));
	cdev->handler = qeth_irq;
	spin_unlock_irq(get_ccwdev_lock(cdev));

	rc = ccw_device_set_online(cdev);
	if (rc)
		goto err;

	return 0;

err:
	spin_lock_irq(get_ccwdev_lock(cdev));
	cdev->handler = NULL;
	spin_unlock_irq(get_ccwdev_lock(cdev));
	return rc;
}

F
Frank Blaschka 已提交
1686 1687 1688 1689
static int qeth_halt_channels(struct qeth_card *card)
{
	int rc1 = 0, rc2 = 0, rc3 = 0;

C
Carsten Otte 已提交
1690
	QETH_CARD_TEXT(card, 3, "haltchs");
1691 1692 1693
	rc1 = qeth_halt_channel(card, &card->read);
	rc2 = qeth_halt_channel(card, &card->write);
	rc3 = qeth_halt_channel(card, &card->data);
F
Frank Blaschka 已提交
1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704
	if (rc1)
		return rc1;
	if (rc2)
		return rc2;
	return rc3;
}

static int qeth_clear_channels(struct qeth_card *card)
{
	int rc1 = 0, rc2 = 0, rc3 = 0;

C
Carsten Otte 已提交
1705
	QETH_CARD_TEXT(card, 3, "clearchs");
1706 1707 1708
	rc1 = qeth_clear_channel(card, &card->read);
	rc2 = qeth_clear_channel(card, &card->write);
	rc3 = qeth_clear_channel(card, &card->data);
F
Frank Blaschka 已提交
1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719
	if (rc1)
		return rc1;
	if (rc2)
		return rc2;
	return rc3;
}

static int qeth_clear_halt_card(struct qeth_card *card, int halt)
{
	int rc = 0;

C
Carsten Otte 已提交
1720
	QETH_CARD_TEXT(card, 3, "clhacrd");
F
Frank Blaschka 已提交
1721 1722 1723 1724 1725 1726 1727 1728

	if (halt)
		rc = qeth_halt_channels(card);
	if (rc)
		return rc;
	return qeth_clear_channels(card);
}

1729
static int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
F
Frank Blaschka 已提交
1730 1731 1732
{
	int rc = 0;

C
Carsten Otte 已提交
1733
	QETH_CARD_TEXT(card, 3, "qdioclr");
F
Frank Blaschka 已提交
1734 1735 1736
	switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
		QETH_QDIO_CLEANING)) {
	case QETH_QDIO_ESTABLISHED:
1737
		if (IS_IQD(card))
J
Jan Glauber 已提交
1738
			rc = qdio_shutdown(CARD_DDEV(card),
F
Frank Blaschka 已提交
1739 1740
				QDIO_FLAG_CLEANUP_USING_HALT);
		else
J
Jan Glauber 已提交
1741
			rc = qdio_shutdown(CARD_DDEV(card),
F
Frank Blaschka 已提交
1742 1743
				QDIO_FLAG_CLEANUP_USING_CLEAR);
		if (rc)
C
Carsten Otte 已提交
1744
			QETH_CARD_TEXT_(card, 3, "1err%d", rc);
F
Frank Blaschka 已提交
1745 1746 1747 1748 1749 1750 1751 1752 1753
		atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
		break;
	case QETH_QDIO_CLEANING:
		return rc;
	default:
		break;
	}
	rc = qeth_clear_halt_card(card, use_halt);
	if (rc)
C
Carsten Otte 已提交
1754
		QETH_CARD_TEXT_(card, 3, "2err%d", rc);
F
Frank Blaschka 已提交
1755 1756 1757
	return rc;
}

1758 1759 1760 1761 1762 1763 1764 1765 1766
static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
{
	enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
	struct diag26c_vnic_resp *response = NULL;
	struct diag26c_vnic_req *request = NULL;
	struct ccw_dev_id id;
	char userid[80];
	int rc = 0;

1767
	QETH_CARD_TEXT(card, 2, "vmlayer");
1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809

	cpcmd("QUERY USERID", userid, sizeof(userid), &rc);
	if (rc)
		goto out;

	request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
	response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
	if (!request || !response) {
		rc = -ENOMEM;
		goto out;
	}

	ccw_device_get_id(CARD_RDEV(card), &id);
	request->resp_buf_len = sizeof(*response);
	request->resp_version = DIAG26C_VERSION6_VM65918;
	request->req_format = DIAG26C_VNIC_INFO;
	ASCEBC(userid, 8);
	memcpy(&request->sys_name, userid, 8);
	request->devno = id.devno;

	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
	rc = diag26c(request, response, DIAG26C_PORT_VNIC);
	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
	if (rc)
		goto out;
	QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));

	if (request->resp_buf_len < sizeof(*response) ||
	    response->version != request->resp_version) {
		rc = -EIO;
		goto out;
	}

	if (response->protocol == VNIC_INFO_PROT_L2)
		disc = QETH_DISCIPLINE_LAYER2;
	else if (response->protocol == VNIC_INFO_PROT_L3)
		disc = QETH_DISCIPLINE_LAYER3;

out:
	kfree(response);
	kfree(request);
	if (rc)
1810
		QETH_CARD_TEXT_(card, 2, "err%x", rc);
1811 1812 1813
	return disc;
}

1814 1815 1816
/* Determine whether the device requires a specific layer discipline */
static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
{
1817 1818
	enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;

J
Julian Wiedmann 已提交
1819
	if (IS_OSM(card))
1820
		disc = QETH_DISCIPLINE_LAYER2;
1821 1822 1823
	else if (IS_VM_NIC(card))
		disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
				      qeth_vm_detect_layer(card);
1824 1825 1826

	switch (disc) {
	case QETH_DISCIPLINE_LAYER2:
1827
		QETH_CARD_TEXT(card, 3, "force l2");
1828 1829
		break;
	case QETH_DISCIPLINE_LAYER3:
1830
		QETH_CARD_TEXT(card, 3, "force l3");
1831 1832
		break;
	default:
1833
		QETH_CARD_TEXT(card, 3, "force no");
1834 1835
	}

1836
	return disc;
1837 1838
}

1839
static void qeth_set_blkt_defaults(struct qeth_card *card)
1840
{
1841
	QETH_CARD_TEXT(card, 2, "cfgblkt");
1842

1843
	if (card->info.use_v1_blkt) {
1844 1845 1846
		card->info.blkt.time_total = 0;
		card->info.blkt.inter_packet = 0;
		card->info.blkt.inter_packet_jumbo = 0;
1847 1848 1849 1850
	} else {
		card->info.blkt.time_total = 250;
		card->info.blkt.inter_packet = 5;
		card->info.blkt.inter_packet_jumbo = 15;
1851
	}
F
Frank Blaschka 已提交
1852 1853
}

1854
static void qeth_idx_init(struct qeth_card *card)
F
Frank Blaschka 已提交
1855
{
1856 1857
	memset(&card->seqno, 0, sizeof(card->seqno));

F
Frank Blaschka 已提交
1858 1859 1860 1861 1862 1863
	card->token.issuer_rm_w = 0x00010103UL;
	card->token.cm_filter_w = 0x00010108UL;
	card->token.cm_connection_w = 0x0001010aUL;
	card->token.ulp_filter_w = 0x0001010bUL;
	card->token.ulp_connection_w = 0x0001010dUL;

1864 1865
	switch (card->info.type) {
	case QETH_CARD_TYPE_IQD:
1866
		card->info.func_level =	QETH_IDX_FUNC_LEVEL_IQD;
1867 1868 1869 1870 1871 1872
		break;
	case QETH_CARD_TYPE_OSD:
		card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
		break;
	default:
		break;
F
Frank Blaschka 已提交
1873 1874 1875
	}
}

1876
static void qeth_idx_finalize_cmd(struct qeth_card *card,
1877
				  struct qeth_cmd_buffer *iob)
1878 1879 1880 1881 1882 1883 1884
{
	memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr,
	       QETH_SEQ_NO_LENGTH);
	if (iob->channel == &card->write)
		card->seqno.trans_hdr++;
}

F
Frank Blaschka 已提交
1885 1886 1887 1888 1889 1890 1891 1892 1893
static int qeth_peer_func_level(int level)
{
	if ((level & 0xff) == 8)
		return (level & 0xff) + 0x400;
	if (((level >> 8) & 3) == 1)
		return (level & 0xff) + 0x200;
	return level;
}

1894
static void qeth_mpc_finalize_cmd(struct qeth_card *card,
1895
				  struct qeth_cmd_buffer *iob)
F
Frank Blaschka 已提交
1896
{
1897
	qeth_idx_finalize_cmd(card, iob);
F
Frank Blaschka 已提交
1898 1899 1900 1901 1902 1903

	memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
	       &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
	card->seqno.pdu_hdr++;
	memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
	       &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
1904 1905

	iob->callback = qeth_release_buffer_cb;
F
Frank Blaschka 已提交
1906 1907
}

1908 1909 1910 1911 1912 1913 1914
static bool qeth_mpc_match_reply(struct qeth_cmd_buffer *iob,
				 struct qeth_cmd_buffer *reply)
{
	/* MPC cmds are issued strictly in sequence. */
	return !IS_IPA(reply->data);
}

1915
static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card,
1916
						  const void *data,
1917
						  unsigned int data_length)
1918 1919 1920
{
	struct qeth_cmd_buffer *iob;

1921 1922 1923 1924 1925 1926 1927 1928
	iob = qeth_alloc_cmd(&card->write, data_length, 1, QETH_TIMEOUT);
	if (!iob)
		return NULL;

	memcpy(iob->data, data, data_length);
	qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length,
		       iob->data);
	iob->finalize = qeth_mpc_finalize_cmd;
1929
	iob->match = qeth_mpc_match_reply;
1930 1931 1932
	return iob;
}

E
Eugene Crosser 已提交
1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946
/**
 * qeth_send_control_data() -	send control command to the card
 * @card:			qeth_card structure pointer
 * @iob:			qeth_cmd_buffer pointer
 * @reply_cb:			callback function pointer
 * @cb_card:			pointer to the qeth_card structure
 * @cb_reply:			pointer to the qeth_reply structure
 * @cb_cmd:			pointer to the original iob for non-IPA
 *				commands, or to the qeth_ipa_cmd structure
 *				for the IPA commands.
 * @reply_param:		private pointer passed to the callback
 *
 * Callback function gets called one or more times, with cb_cmd
 * pointing to the response returned by the hardware. Callback
1947 1948 1949 1950 1951
 * function must return
 *   > 0 if more reply blocks are expected,
 *     0 if the last or only reply block is received, and
 *   < 0 on error.
 * Callback function can get the value of the reply_param pointer from the
E
Eugene Crosser 已提交
1952 1953 1954
 * field 'param' of the structure qeth_reply.
 */

1955
static int qeth_send_control_data(struct qeth_card *card,
1956 1957 1958 1959 1960
				  struct qeth_cmd_buffer *iob,
				  int (*reply_cb)(struct qeth_card *cb_card,
						  struct qeth_reply *cb_reply,
						  unsigned long cb_cmd),
				  void *reply_param)
F
Frank Blaschka 已提交
1961
{
1962
	struct qeth_channel *channel = iob->channel;
1963
	struct qeth_reply *reply = &iob->reply;
1964
	long timeout = iob->timeout;
F
Frank Blaschka 已提交
1965 1966
	int rc;

C
Carsten Otte 已提交
1967
	QETH_CARD_TEXT(card, 2, "sendctl");
F
Frank Blaschka 已提交
1968 1969 1970

	reply->callback = reply_cb;
	reply->param = reply_param;
1971

1972
	timeout = wait_event_interruptible_timeout(card->wait_q,
1973
						   qeth_trylock_channel(channel, iob),
1974 1975
						   timeout);
	if (timeout <= 0) {
1976
		qeth_put_cmd(iob);
1977 1978
		return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
	}
F
Frank Blaschka 已提交
1979

1980
	if (iob->finalize)
1981 1982
		iob->finalize(card, iob);
	QETH_DBF_HEX(CTRL, 2, iob->data, min(iob->length, QETH_DBF_CTRL_LEN));
1983

1984
	qeth_enqueue_cmd(card, iob);
1985

1986 1987 1988
	/* This pairs with iob->callback, and keeps the iob alive after IO: */
	qeth_get_cmd(iob);

C
Carsten Otte 已提交
1989
	QETH_CARD_TEXT(card, 6, "noirqpnd");
J
Julian Wiedmann 已提交
1990
	spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1991
	rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob),
1992
				      (addr_t) iob, 0, 0, timeout);
J
Julian Wiedmann 已提交
1993
	spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
F
Frank Blaschka 已提交
1994
	if (rc) {
1995 1996
		QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
				 CARD_DEVID(card), rc);
C
Carsten Otte 已提交
1997
		QETH_CARD_TEXT_(card, 2, " err%d", rc);
1998
		qeth_dequeue_cmd(card, iob);
1999
		qeth_put_cmd(iob);
2000
		qeth_unlock_channel(card, channel);
2001
		goto out;
F
Frank Blaschka 已提交
2002
	}
2003

2004
	timeout = wait_for_completion_interruptible_timeout(&iob->done,
2005 2006 2007
							    timeout);
	if (timeout <= 0)
		rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
2008

2009
	qeth_dequeue_cmd(card, iob);
2010 2011 2012

	if (reply_cb) {
		/* Wait until the callback for a late reply has completed: */
2013
		spin_lock_irq(&iob->lock);
2014 2015
		if (rc)
			/* Zap any callback that's still pending: */
2016 2017
			iob->rc = rc;
		spin_unlock_irq(&iob->lock);
2018 2019
	}

2020
	if (!rc)
2021
		rc = iob->rc;
2022 2023 2024

out:
	qeth_put_cmd(iob);
2025
	return rc;
F
Frank Blaschka 已提交
2026 2027
}

2028 2029 2030 2031 2032 2033
struct qeth_node_desc {
	struct node_descriptor nd1;
	struct node_descriptor nd2;
	struct node_descriptor nd3;
};

2034
static void qeth_read_conf_data_cb(struct qeth_card *card,
2035 2036
				   struct qeth_cmd_buffer *iob,
				   unsigned int data_length)
2037
{
2038
	struct qeth_node_desc *nd = (struct qeth_node_desc *) iob->data;
2039
	int rc = 0;
2040
	u8 *tag;
2041 2042

	QETH_CARD_TEXT(card, 2, "cfgunit");
2043 2044 2045 2046 2047 2048

	if (data_length < sizeof(*nd)) {
		rc = -EINVAL;
		goto out;
	}

2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061
	card->info.is_vm_nic = nd->nd1.plant[0] == _ascebc['V'] &&
			       nd->nd1.plant[1] == _ascebc['M'];
	tag = (u8 *)&nd->nd1.tag;
	card->info.chpid = tag[0];
	card->info.unit_addr2 = tag[1];

	tag = (u8 *)&nd->nd2.tag;
	card->info.cula = tag[1];

	card->info.use_v1_blkt = nd->nd3.model[0] == 0xF0 &&
				 nd->nd3.model[1] == 0xF0 &&
				 nd->nd3.model[2] >= 0xF1 &&
				 nd->nd3.model[2] <= 0xF4;
2062

2063
out:
2064
	qeth_notify_cmd(iob, rc);
2065
	qeth_put_cmd(iob);
2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077
}

static int qeth_read_conf_data(struct qeth_card *card)
{
	struct qeth_channel *channel = &card->data;
	struct qeth_cmd_buffer *iob;
	struct ciw *ciw;

	/* scan for RCD command in extended SenseID data */
	ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
	if (!ciw || ciw->cmd == 0)
		return -EOPNOTSUPP;
2078 2079
	if (ciw->count < sizeof(struct qeth_node_desc))
		return -EINVAL;
2080 2081 2082 2083 2084 2085 2086 2087 2088

	iob = qeth_alloc_cmd(channel, ciw->count, 1, QETH_RCD_TIMEOUT);
	if (!iob)
		return -ENOMEM;

	iob->callback = qeth_read_conf_data_cb;
	qeth_setup_ccw(__ccw_from_cmd(iob), ciw->cmd, 0, iob->length,
		       iob->data);

2089
	return qeth_send_control_data(card, iob, NULL, NULL);
2090 2091
}

2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105
static int qeth_idx_check_activate_response(struct qeth_card *card,
					    struct qeth_channel *channel,
					    struct qeth_cmd_buffer *iob)
{
	int rc;

	rc = qeth_check_idx_response(card, iob->data);
	if (rc)
		return rc;

	if (QETH_IS_IDX_ACT_POS_REPLY(iob->data))
		return 0;

	/* negative reply: */
2106 2107
	QETH_CARD_TEXT_(card, 2, "idxneg%c",
			QETH_IDX_ACT_CAUSE_CODE(iob->data));
2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125

	switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
	case QETH_IDX_ACT_ERR_EXCL:
		dev_err(&channel->ccwdev->dev,
			"The adapter is used exclusively by another host\n");
		return -EBUSY;
	case QETH_IDX_ACT_ERR_AUTH:
	case QETH_IDX_ACT_ERR_AUTH_USER:
		dev_err(&channel->ccwdev->dev,
			"Setting the device online failed because of insufficient authorization\n");
		return -EPERM;
	default:
		QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
				 CCW_DEVID(channel->ccwdev));
		return -EIO;
	}
}

2126
static void qeth_idx_activate_read_channel_cb(struct qeth_card *card,
2127 2128
					      struct qeth_cmd_buffer *iob,
					      unsigned int data_length)
2129
{
2130
	struct qeth_channel *channel = iob->channel;
2131 2132 2133
	u16 peer_level;
	int rc;

2134
	QETH_CARD_TEXT(card, 2, "idxrdcb");
2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155

	rc = qeth_idx_check_activate_response(card, channel, iob);
	if (rc)
		goto out;

	memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
	if (peer_level != qeth_peer_func_level(card->info.func_level)) {
		QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
				 CCW_DEVID(channel->ccwdev),
				 card->info.func_level, peer_level);
		rc = -EINVAL;
		goto out;
	}

	memcpy(&card->token.issuer_rm_r,
	       QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
	       QETH_MPC_TOKEN_LENGTH);
	memcpy(&card->info.mcl_level[0],
	       QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);

out:
2156
	qeth_notify_cmd(iob, rc);
2157
	qeth_put_cmd(iob);
2158 2159
}

2160
static void qeth_idx_activate_write_channel_cb(struct qeth_card *card,
2161 2162
					       struct qeth_cmd_buffer *iob,
					       unsigned int data_length)
2163
{
2164
	struct qeth_channel *channel = iob->channel;
2165 2166 2167
	u16 peer_level;
	int rc;

2168
	QETH_CARD_TEXT(card, 2, "idxwrcb");
2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183

	rc = qeth_idx_check_activate_response(card, channel, iob);
	if (rc)
		goto out;

	memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
	if ((peer_level & ~0x0100) !=
	    qeth_peer_func_level(card->info.func_level)) {
		QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
				 CCW_DEVID(channel->ccwdev),
				 card->info.func_level, peer_level);
		rc = -EINVAL;
	}

out:
2184
	qeth_notify_cmd(iob, rc);
2185
	qeth_put_cmd(iob);
2186 2187 2188 2189 2190 2191 2192
}

static void qeth_idx_setup_activate_cmd(struct qeth_card *card,
					struct qeth_cmd_buffer *iob)
{
	u16 addr = (card->info.cula << 8) + card->info.unit_addr2;
	u8 port = ((u8)card->dev->dev_port) | 0x80;
2193
	struct ccw1 *ccw = __ccw_from_cmd(iob);
2194

2195 2196 2197
	qeth_setup_ccw(&ccw[0], CCW_CMD_WRITE, CCW_FLAG_CC, IDX_ACTIVATE_SIZE,
		       iob->data);
	qeth_setup_ccw(&ccw[1], CCW_CMD_READ, 0, iob->length, iob->data);
2198 2199
	iob->finalize = qeth_idx_finalize_cmd;

2200
	port |= QETH_IDX_ACT_INVAL_FRAME;
2201 2202 2203 2204 2205
	memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1);
	memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
	       &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
	       &card->info.func_level, 2);
2206
	memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &card->info.ddev_devno, 2);
2207 2208 2209 2210 2211 2212 2213 2214 2215
	memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2);
}

static int qeth_idx_activate_read_channel(struct qeth_card *card)
{
	struct qeth_channel *channel = &card->read;
	struct qeth_cmd_buffer *iob;
	int rc;

2216
	QETH_CARD_TEXT(card, 2, "idxread");
2217

2218
	iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2219 2220 2221 2222 2223
	if (!iob)
		return -ENOMEM;

	memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
	qeth_idx_setup_activate_cmd(card, iob);
2224
	iob->callback = qeth_idx_activate_read_channel_cb;
2225

2226
	rc = qeth_send_control_data(card, iob, NULL, NULL);
2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239
	if (rc)
		return rc;

	channel->state = CH_STATE_UP;
	return 0;
}

static int qeth_idx_activate_write_channel(struct qeth_card *card)
{
	struct qeth_channel *channel = &card->write;
	struct qeth_cmd_buffer *iob;
	int rc;

2240
	QETH_CARD_TEXT(card, 2, "idxwrite");
2241

2242
	iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2243 2244 2245 2246 2247
	if (!iob)
		return -ENOMEM;

	memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
	qeth_idx_setup_activate_cmd(card, iob);
2248
	iob->callback = qeth_idx_activate_write_channel_cb;
2249

2250
	rc = qeth_send_control_data(card, iob, NULL, NULL);
2251 2252 2253 2254 2255 2256 2257
	if (rc)
		return rc;

	channel->state = CH_STATE_UP;
	return 0;
}

F
Frank Blaschka 已提交
2258 2259 2260 2261 2262
static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
		unsigned long data)
{
	struct qeth_cmd_buffer *iob;

2263
	QETH_CARD_TEXT(card, 2, "cmenblcb");
F
Frank Blaschka 已提交
2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275

	iob = (struct qeth_cmd_buffer *) data;
	memcpy(&card->token.cm_filter_r,
	       QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
	       QETH_MPC_TOKEN_LENGTH);
	return 0;
}

static int qeth_cm_enable(struct qeth_card *card)
{
	struct qeth_cmd_buffer *iob;

2276
	QETH_CARD_TEXT(card, 2, "cmenable");
F
Frank Blaschka 已提交
2277

2278
	iob = qeth_mpc_alloc_cmd(card, CM_ENABLE, CM_ENABLE_SIZE);
2279 2280
	if (!iob)
		return -ENOMEM;
2281

F
Frank Blaschka 已提交
2282 2283 2284 2285 2286
	memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
	       &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
	       &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);

2287
	return qeth_send_control_data(card, iob, qeth_cm_enable_cb, NULL);
F
Frank Blaschka 已提交
2288 2289 2290 2291 2292 2293 2294
}

static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
		unsigned long data)
{
	struct qeth_cmd_buffer *iob;

2295
	QETH_CARD_TEXT(card, 2, "cmsetpcb");
F
Frank Blaschka 已提交
2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307

	iob = (struct qeth_cmd_buffer *) data;
	memcpy(&card->token.cm_connection_r,
	       QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
	       QETH_MPC_TOKEN_LENGTH);
	return 0;
}

static int qeth_cm_setup(struct qeth_card *card)
{
	struct qeth_cmd_buffer *iob;

2308
	QETH_CARD_TEXT(card, 2, "cmsetup");
F
Frank Blaschka 已提交
2309

2310
	iob = qeth_mpc_alloc_cmd(card, CM_SETUP, CM_SETUP_SIZE);
2311 2312
	if (!iob)
		return -ENOMEM;
2313

F
Frank Blaschka 已提交
2314 2315 2316 2317 2318 2319
	memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
	       &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
	       &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
	       &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
2320
	return qeth_send_control_data(card, iob, qeth_cm_setup_cb, NULL);
F
Frank Blaschka 已提交
2321 2322
}

2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333
static bool qeth_is_supported_link_type(struct qeth_card *card, u8 link_type)
{
	if (link_type == QETH_LINK_TYPE_LANE_TR ||
	    link_type == QETH_LINK_TYPE_HSTR) {
		dev_err(&card->gdev->dev, "Unsupported Token Ring device\n");
		return false;
	}

	return true;
}

2334
static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
F
Frank Blaschka 已提交
2335
{
2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354
	struct net_device *dev = card->dev;
	unsigned int new_mtu;

	if (!max_mtu) {
		/* IQD needs accurate max MTU to set up its RX buffers: */
		if (IS_IQD(card))
			return -EINVAL;
		/* tolerate quirky HW: */
		max_mtu = ETH_MAX_MTU;
	}

	rtnl_lock();
	if (IS_IQD(card)) {
		/* move any device with default MTU to new max MTU: */
		new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu;

		/* adjust RX buffer size to new max MTU: */
		card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE;
		if (dev->max_mtu && dev->max_mtu != max_mtu)
2355
			qeth_free_qdio_queues(card);
2356 2357 2358 2359
	} else {
		if (dev->mtu)
			new_mtu = dev->mtu;
		/* default MTUs for first setup: */
2360
		else if (IS_LAYER2(card))
2361 2362 2363
			new_mtu = ETH_DATA_LEN;
		else
			new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */
F
Frank Blaschka 已提交
2364
	}
2365 2366 2367 2368 2369

	dev->max_mtu = max_mtu;
	dev->mtu = min(new_mtu, max_mtu);
	rtnl_unlock();
	return 0;
F
Frank Blaschka 已提交
2370 2371
}

J
Julian Wiedmann 已提交
2372
static int qeth_get_mtu_outof_framesize(int framesize)
F
Frank Blaschka 已提交
2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393
{
	switch (framesize) {
	case 0x4000:
		return 8192;
	case 0x6000:
		return 16384;
	case 0xa000:
		return 32768;
	case 0xffff:
		return 57344;
	default:
		return 0;
	}
}

static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
		unsigned long data)
{
	__u16 mtu, framesize;
	__u16 len;
	struct qeth_cmd_buffer *iob;
2394
	u8 link_type = 0;
F
Frank Blaschka 已提交
2395

2396
	QETH_CARD_TEXT(card, 2, "ulpenacb");
F
Frank Blaschka 已提交
2397 2398 2399 2400 2401

	iob = (struct qeth_cmd_buffer *) data;
	memcpy(&card->token.ulp_filter_r,
	       QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
	       QETH_MPC_TOKEN_LENGTH);
2402
	if (IS_IQD(card)) {
F
Frank Blaschka 已提交
2403 2404 2405
		memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
		mtu = qeth_get_mtu_outof_framesize(framesize);
	} else {
2406
		mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data);
F
Frank Blaschka 已提交
2407
	}
2408
	*(u16 *)reply->param = mtu;
F
Frank Blaschka 已提交
2409 2410 2411 2412 2413

	memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
	if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
		memcpy(&link_type,
		       QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
2414 2415 2416 2417 2418
		if (!qeth_is_supported_link_type(card, link_type))
			return -EPROTONOSUPPORT;
	}

	card->info.link_type = link_type;
2419
	QETH_CARD_TEXT_(card, 2, "link%d", card->info.link_type);
F
Frank Blaschka 已提交
2420 2421 2422
	return 0;
}

2423 2424
static u8 qeth_mpc_select_prot_type(struct qeth_card *card)
{
2425
	return IS_LAYER2(card) ? QETH_MPC_PROT_L2 : QETH_MPC_PROT_L3;
2426 2427
}

F
Frank Blaschka 已提交
2428 2429
static int qeth_ulp_enable(struct qeth_card *card)
{
2430
	u8 prot_type = qeth_mpc_select_prot_type(card);
F
Frank Blaschka 已提交
2431
	struct qeth_cmd_buffer *iob;
2432
	u16 max_mtu;
2433
	int rc;
F
Frank Blaschka 已提交
2434

2435
	QETH_CARD_TEXT(card, 2, "ulpenabl");
F
Frank Blaschka 已提交
2436

2437
	iob = qeth_mpc_alloc_cmd(card, ULP_ENABLE, ULP_ENABLE_SIZE);
2438 2439
	if (!iob)
		return -ENOMEM;
F
Frank Blaschka 已提交
2440

2441
	*(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port;
F
Frank Blaschka 已提交
2442 2443 2444 2445 2446
	memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1);
	memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
	       &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
2447
	rc = qeth_send_control_data(card, iob, qeth_ulp_enable_cb, &max_mtu);
2448 2449 2450
	if (rc)
		return rc;
	return qeth_update_max_mtu(card, max_mtu);
F
Frank Blaschka 已提交
2451 2452 2453 2454 2455 2456 2457
}

static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
		unsigned long data)
{
	struct qeth_cmd_buffer *iob;

2458
	QETH_CARD_TEXT(card, 2, "ulpstpcb");
F
Frank Blaschka 已提交
2459 2460 2461 2462 2463

	iob = (struct qeth_cmd_buffer *) data;
	memcpy(&card->token.ulp_connection_r,
	       QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
	       QETH_MPC_TOKEN_LENGTH);
2464 2465
	if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
		     3)) {
2466
		QETH_CARD_TEXT(card, 2, "olmlimit");
2467 2468
		dev_err(&card->gdev->dev, "A connection could not be "
			"established because of an OLM limit\n");
2469
		return -EMLINK;
2470
	}
S
Stefan Raspl 已提交
2471
	return 0;
F
Frank Blaschka 已提交
2472 2473 2474 2475 2476 2477 2478
}

static int qeth_ulp_setup(struct qeth_card *card)
{
	__u16 temp;
	struct qeth_cmd_buffer *iob;

2479
	QETH_CARD_TEXT(card, 2, "ulpsetup");
F
Frank Blaschka 已提交
2480

2481
	iob = qeth_mpc_alloc_cmd(card, ULP_SETUP, ULP_SETUP_SIZE);
2482 2483
	if (!iob)
		return -ENOMEM;
F
Frank Blaschka 已提交
2484 2485 2486 2487 2488 2489 2490 2491

	memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
	       &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
	       &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);

2492
	memcpy(QETH_ULP_SETUP_CUA(iob->data), &card->info.ddev_devno, 2);
F
Frank Blaschka 已提交
2493 2494
	temp = (card->info.cula << 8) + card->info.unit_addr2;
	memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
2495
	return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL);
F
Frank Blaschka 已提交
2496 2497
}

2498 2499
static int qeth_alloc_out_buf(struct qeth_qdio_out_q *q, unsigned int bidx,
			      gfp_t gfp)
2500 2501 2502
{
	struct qeth_qdio_out_buffer *newbuf;

2503
	newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, gfp);
2504 2505 2506
	if (!newbuf)
		return -ENOMEM;

2507
	newbuf->buffer = q->qdio_bufs[bidx];
2508 2509 2510 2511
	skb_queue_head_init(&newbuf->skb_list);
	lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
	atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
	q->bufs[bidx] = newbuf;
2512
	return 0;
2513 2514
}

2515
static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
2516 2517 2518 2519
{
	if (!q)
		return;

2520
	qeth_drain_output_queue(q, true);
2521 2522 2523 2524
	qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
	kfree(q);
}

2525
static struct qeth_qdio_out_q *qeth_alloc_output_queue(void)
2526 2527
{
	struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
2528
	unsigned int i;
2529 2530 2531 2532

	if (!q)
		return NULL;

2533 2534 2535 2536
	if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q))
		goto err_qdio_bufs;

	for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
2537
		if (qeth_alloc_out_buf(q, i, GFP_KERNEL))
2538
			goto err_out_bufs;
2539
	}
2540

2541
	return q;
2542 2543 2544

err_out_bufs:
	while (i > 0)
2545
		qeth_free_out_buf(q->bufs[--i]);
2546 2547 2548 2549
	qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
err_qdio_bufs:
	kfree(q);
	return NULL;
2550
}
2551

2552 2553 2554 2555 2556 2557 2558 2559
static void qeth_tx_completion_timer(struct timer_list *timer)
{
	struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer);

	napi_schedule(&queue->napi);
	QETH_TXQ_STAT_INC(queue, completion_timer);
}

2560
static int qeth_alloc_qdio_queues(struct qeth_card *card)
F
Frank Blaschka 已提交
2561
{
2562
	unsigned int i;
F
Frank Blaschka 已提交
2563

2564
	QETH_CARD_TEXT(card, 2, "allcqdbf");
F
Frank Blaschka 已提交
2565 2566 2567 2568 2569

	if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
		QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
		return 0;

2570
	QETH_CARD_TEXT(card, 2, "inq");
S
Sebastian Ott 已提交
2571
	card->qdio.in_q = qeth_alloc_qdio_queue();
F
Frank Blaschka 已提交
2572 2573
	if (!card->qdio.in_q)
		goto out_nomem;
S
Sebastian Ott 已提交
2574

F
Frank Blaschka 已提交
2575 2576 2577
	/* inbound buffer pool */
	if (qeth_alloc_buffer_pool(card))
		goto out_freeinq;
2578

F
Frank Blaschka 已提交
2579 2580
	/* outbound */
	for (i = 0; i < card->qdio.no_out_queues; ++i) {
2581 2582 2583 2584
		struct qeth_qdio_out_q *queue;

		queue = qeth_alloc_output_queue();
		if (!queue)
F
Frank Blaschka 已提交
2585
			goto out_freeoutq;
2586
		QETH_CARD_TEXT_(card, 2, "outq %i", i);
2587 2588 2589 2590
		QETH_CARD_HEX(card, 2, &queue, sizeof(void *));
		card->qdio.out_qs[i] = queue;
		queue->card = card;
		queue->queue_no = i;
2591
		INIT_LIST_HEAD(&queue->pending_bufs);
2592
		spin_lock_init(&queue->lock);
2593
		timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
2594 2595 2596 2597 2598 2599 2600 2601 2602
		if (IS_IQD(card)) {
			queue->coalesce_usecs = QETH_TX_COALESCE_USECS;
			queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES;
			queue->rescan_usecs = QETH_TX_TIMER_USECS;
		} else {
			queue->coalesce_usecs = USEC_PER_SEC;
			queue->max_coalesced_frames = 0;
			queue->rescan_usecs = 10 * USEC_PER_SEC;
		}
2603
		queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT;
F
Frank Blaschka 已提交
2604
	}
2605 2606 2607 2608 2609

	/* completion */
	if (qeth_alloc_cq(card))
		goto out_freeoutq;

F
Frank Blaschka 已提交
2610 2611 2612
	return 0;

out_freeoutq:
2613
	while (i > 0) {
2614
		qeth_free_output_queue(card->qdio.out_qs[--i]);
2615 2616
		card->qdio.out_qs[i] = NULL;
	}
F
Frank Blaschka 已提交
2617 2618
	qeth_free_buffer_pool(card);
out_freeinq:
S
Sebastian Ott 已提交
2619
	qeth_free_qdio_queue(card->qdio.in_q);
F
Frank Blaschka 已提交
2620 2621 2622 2623 2624 2625
	card->qdio.in_q = NULL;
out_nomem:
	atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
	return -ENOMEM;
}

2626
static void qeth_free_qdio_queues(struct qeth_card *card)
2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643
{
	int i, j;

	if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
		QETH_QDIO_UNINITIALIZED)
		return;

	qeth_free_cq(card);
	for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
		if (card->qdio.in_q->bufs[j].rx_skb)
			dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
	}
	qeth_free_qdio_queue(card->qdio.in_q);
	card->qdio.in_q = NULL;
	/* inbound buffer pool */
	qeth_free_buffer_pool(card);
	/* free outbound qdio_qs */
2644 2645 2646
	for (i = 0; i < card->qdio.no_out_queues; i++) {
		qeth_free_output_queue(card->qdio.out_qs[i]);
		card->qdio.out_qs[i] = NULL;
2647 2648 2649
	}
}

2650 2651 2652
static void qeth_fill_qib_parms(struct qeth_card *card,
				struct qeth_qib_parms *parms)
{
2653 2654 2655
	struct qeth_qdio_out_q *queue;
	unsigned int i;

2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672
	parms->pcit_magic[0] = 'P';
	parms->pcit_magic[1] = 'C';
	parms->pcit_magic[2] = 'I';
	parms->pcit_magic[3] = 'T';
	ASCEBC(parms->pcit_magic, sizeof(parms->pcit_magic));
	parms->pcit_a = QETH_PCI_THRESHOLD_A(card);
	parms->pcit_b = QETH_PCI_THRESHOLD_B(card);
	parms->pcit_c = QETH_PCI_TIMER_VALUE(card);

	parms->blkt_magic[0] = 'B';
	parms->blkt_magic[1] = 'L';
	parms->blkt_magic[2] = 'K';
	parms->blkt_magic[3] = 'T';
	ASCEBC(parms->blkt_magic, sizeof(parms->blkt_magic));
	parms->blkt_total = card->info.blkt.time_total;
	parms->blkt_inter_packet = card->info.blkt.inter_packet;
	parms->blkt_inter_packet_jumbo = card->info.blkt.inter_packet_jumbo;
2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687

	/* Prio-queueing implicitly uses the default priorities: */
	if (qeth_uses_tx_prio_queueing(card) || card->qdio.no_out_queues == 1)
		return;

	parms->pque_magic[0] = 'P';
	parms->pque_magic[1] = 'Q';
	parms->pque_magic[2] = 'U';
	parms->pque_magic[3] = 'E';
	ASCEBC(parms->pque_magic, sizeof(parms->pque_magic));
	parms->pque_order = QETH_QIB_PQUE_ORDER_RR;
	parms->pque_units = QETH_QIB_PQUE_UNITS_SBAL;

	qeth_for_each_output_queue(card, queue, i)
		parms->pque_priority[i] = queue->priority;
F
Frank Blaschka 已提交
2688 2689 2690 2691
}

static int qeth_qdio_activate(struct qeth_card *card)
{
2692
	QETH_CARD_TEXT(card, 3, "qdioact");
J
Jan Glauber 已提交
2693
	return qdio_activate(CARD_DDEV(card));
F
Frank Blaschka 已提交
2694 2695 2696 2697 2698 2699
}

static int qeth_dm_act(struct qeth_card *card)
{
	struct qeth_cmd_buffer *iob;

2700
	QETH_CARD_TEXT(card, 2, "dmact");
F
Frank Blaschka 已提交
2701

2702
	iob = qeth_mpc_alloc_cmd(card, DM_ACT, DM_ACT_SIZE);
2703 2704
	if (!iob)
		return -ENOMEM;
F
Frank Blaschka 已提交
2705 2706 2707 2708 2709

	memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
	memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
	       &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2710
	return qeth_send_control_data(card, iob, NULL, NULL);
F
Frank Blaschka 已提交
2711 2712 2713 2714 2715 2716
}

static int qeth_mpc_initialize(struct qeth_card *card)
{
	int rc;

2717
	QETH_CARD_TEXT(card, 2, "mpcinit");
F
Frank Blaschka 已提交
2718 2719 2720

	rc = qeth_issue_next_read(card);
	if (rc) {
2721
		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
F
Frank Blaschka 已提交
2722 2723 2724 2725
		return rc;
	}
	rc = qeth_cm_enable(card);
	if (rc) {
2726
		QETH_CARD_TEXT_(card, 2, "2err%d", rc);
2727
		return rc;
F
Frank Blaschka 已提交
2728 2729 2730
	}
	rc = qeth_cm_setup(card);
	if (rc) {
2731
		QETH_CARD_TEXT_(card, 2, "3err%d", rc);
2732
		return rc;
F
Frank Blaschka 已提交
2733 2734 2735
	}
	rc = qeth_ulp_enable(card);
	if (rc) {
2736
		QETH_CARD_TEXT_(card, 2, "4err%d", rc);
2737
		return rc;
F
Frank Blaschka 已提交
2738 2739 2740
	}
	rc = qeth_ulp_setup(card);
	if (rc) {
2741
		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2742
		return rc;
F
Frank Blaschka 已提交
2743
	}
2744
	rc = qeth_alloc_qdio_queues(card);
F
Frank Blaschka 已提交
2745
	if (rc) {
2746
		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2747
		return rc;
F
Frank Blaschka 已提交
2748 2749 2750
	}
	rc = qeth_qdio_establish(card);
	if (rc) {
2751
		QETH_CARD_TEXT_(card, 2, "6err%d", rc);
2752
		qeth_free_qdio_queues(card);
2753
		return rc;
F
Frank Blaschka 已提交
2754 2755 2756
	}
	rc = qeth_qdio_activate(card);
	if (rc) {
2757
		QETH_CARD_TEXT_(card, 2, "7err%d", rc);
2758
		return rc;
F
Frank Blaschka 已提交
2759 2760 2761
	}
	rc = qeth_dm_act(card);
	if (rc) {
2762
		QETH_CARD_TEXT_(card, 2, "8err%d", rc);
2763
		return rc;
F
Frank Blaschka 已提交
2764 2765 2766 2767 2768
	}

	return 0;
}

2769
static void qeth_print_status_message(struct qeth_card *card)
F
Frank Blaschka 已提交
2770 2771
{
	switch (card->info.type) {
2772 2773 2774
	case QETH_CARD_TYPE_OSD:
	case QETH_CARD_TYPE_OSM:
	case QETH_CARD_TYPE_OSX:
F
Frank Blaschka 已提交
2775 2776 2777 2778 2779 2780 2781 2782 2783 2784
		/* VM will use a non-zero first character
		 * to indicate a HiperSockets like reporting
		 * of the level OSA sets the first character to zero
		 * */
		if (!card->info.mcl_level[0]) {
			sprintf(card->info.mcl_level, "%02x%02x",
				card->info.mcl_level[2],
				card->info.mcl_level[3]);
			break;
		}
2785
		fallthrough;
F
Frank Blaschka 已提交
2786
	case QETH_CARD_TYPE_IQD:
2787
		if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) {
F
Frank Blaschka 已提交
2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801
			card->info.mcl_level[0] = (char) _ebcasc[(__u8)
				card->info.mcl_level[0]];
			card->info.mcl_level[1] = (char) _ebcasc[(__u8)
				card->info.mcl_level[1]];
			card->info.mcl_level[2] = (char) _ebcasc[(__u8)
				card->info.mcl_level[2]];
			card->info.mcl_level[3] = (char) _ebcasc[(__u8)
				card->info.mcl_level[3]];
			card->info.mcl_level[QETH_MCL_LENGTH] = 0;
		}
		break;
	default:
		memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
	}
2802 2803 2804 2805 2806 2807 2808
	dev_info(&card->gdev->dev,
		 "Device is a%s card%s%s%s\nwith link type %s.\n",
		 qeth_get_cardname(card),
		 (card->info.mcl_level[0]) ? " (level: " : "",
		 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
		 (card->info.mcl_level[0]) ? ")" : "",
		 qeth_get_cardname_short(card));
F
Frank Blaschka 已提交
2809 2810 2811 2812 2813 2814
}

static void qeth_initialize_working_pool_list(struct qeth_card *card)
{
	struct qeth_buffer_pool_entry *entry;

C
Carsten Otte 已提交
2815
	QETH_CARD_TEXT(card, 5, "inwrklst");
F
Frank Blaschka 已提交
2816 2817 2818 2819 2820 2821 2822

	list_for_each_entry(entry,
			    &card->qdio.init_pool.entry_list, init_list) {
		qeth_put_buffer_pool_entry(card, entry);
	}
}

J
Julian Wiedmann 已提交
2823 2824
static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
					struct qeth_card *card)
F
Frank Blaschka 已提交
2825 2826 2827 2828 2829 2830 2831
{
	struct qeth_buffer_pool_entry *entry;
	int i, free;

	if (list_empty(&card->qdio.in_buf_pool.entry_list))
		return NULL;

2832
	list_for_each_entry(entry, &card->qdio.in_buf_pool.entry_list, list) {
F
Frank Blaschka 已提交
2833 2834
		free = 1;
		for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2835
			if (page_count(entry->elements[i]) > 1) {
F
Frank Blaschka 已提交
2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846
				free = 0;
				break;
			}
		}
		if (free) {
			list_del_init(&entry->list);
			return entry;
		}
	}

	/* no free buffer in pool so take first one and swap pages */
2847 2848
	entry = list_first_entry(&card->qdio.in_buf_pool.entry_list,
				 struct qeth_buffer_pool_entry, list);
F
Frank Blaschka 已提交
2849
	for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2850
		if (page_count(entry->elements[i]) > 1) {
2851
			struct page *page = dev_alloc_page();
2852 2853

			if (!page)
F
Frank Blaschka 已提交
2854
				return NULL;
2855 2856 2857 2858

			__free_page(entry->elements[i]);
			entry->elements[i] = page;
			QETH_CARD_STAT_INC(card, rx_sg_alloc_page);
F
Frank Blaschka 已提交
2859 2860 2861 2862 2863 2864 2865 2866 2867
		}
	}
	list_del_init(&entry->list);
	return entry;
}

static int qeth_init_input_buffer(struct qeth_card *card,
		struct qeth_qdio_buffer *buf)
{
2868
	struct qeth_buffer_pool_entry *pool_entry = buf->pool_entry;
F
Frank Blaschka 已提交
2869 2870
	int i;

2871
	if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) {
2872
		buf->rx_skb = netdev_alloc_skb(card->dev,
2873 2874
					       ETH_HLEN +
					       sizeof(struct ipv6hdr));
2875
		if (!buf->rx_skb)
2876
			return -ENOMEM;
2877 2878
	}

2879 2880 2881 2882 2883 2884 2885
	if (!pool_entry) {
		pool_entry = qeth_find_free_buffer_pool_entry(card);
		if (!pool_entry)
			return -ENOBUFS;

		buf->pool_entry = pool_entry;
	}
F
Frank Blaschka 已提交
2886 2887 2888 2889 2890 2891 2892 2893 2894

	/*
	 * since the buffer is accessed only from the input_tasklet
	 * there shouldn't be a need to synchronize; also, since we use
	 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run  out off
	 * buffers
	 */
	for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
		buf->buffer->element[i].length = PAGE_SIZE;
2895
		buf->buffer->element[i].addr =
2896
			page_to_phys(pool_entry->elements[i]);
F
Frank Blaschka 已提交
2897
		if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2898
			buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
F
Frank Blaschka 已提交
2899
		else
2900 2901
			buf->buffer->element[i].eflags = 0;
		buf->buffer->element[i].sflags = 0;
F
Frank Blaschka 已提交
2902 2903 2904 2905
	}
	return 0;
}

J
Julian Wiedmann 已提交
2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917
static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card,
					    struct qeth_qdio_out_q *queue)
{
	if (!IS_IQD(card) ||
	    qeth_iqd_is_mcast_queue(card, queue) ||
	    card->options.cq == QETH_CQ_ENABLED ||
	    qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd))
		return 1;

	return card->ssqd.mmwc ? card->ssqd.mmwc : 1;
}

2918
static int qeth_init_qdio_queues(struct qeth_card *card)
F
Frank Blaschka 已提交
2919
{
2920
	unsigned int rx_bufs = card->qdio.in_buf_pool.buf_count;
2921
	unsigned int i;
F
Frank Blaschka 已提交
2922 2923
	int rc;

2924
	QETH_CARD_TEXT(card, 2, "initqdqs");
F
Frank Blaschka 已提交
2925 2926

	/* inbound queue */
2927 2928
	qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
	memset(&card->rx, 0, sizeof(struct qeth_rx));
2929

F
Frank Blaschka 已提交
2930 2931
	qeth_initialize_working_pool_list(card);
	/*give only as many buffers to hardware as we have buffer pool entries*/
2932
	for (i = 0; i < rx_bufs; i++) {
2933 2934 2935 2936 2937
		rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
		if (rc)
			return rc;
	}

2938
	card->qdio.in_q->next_buf_to_init = QDIO_BUFNR(rx_bufs);
2939 2940
	rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, rx_bufs,
		     NULL);
F
Frank Blaschka 已提交
2941
	if (rc) {
2942
		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
F
Frank Blaschka 已提交
2943 2944
		return rc;
	}
2945 2946 2947 2948 2949 2950 2951

	/* completion */
	rc = qeth_cq_init(card);
	if (rc) {
		return rc;
	}

F
Frank Blaschka 已提交
2952 2953
	/* outbound queue */
	for (i = 0; i < card->qdio.no_out_queues; ++i) {
2954 2955 2956 2957 2958 2959
		struct qeth_qdio_out_q *queue = card->qdio.out_qs[i];

		qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
		queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
		queue->next_buf_to_fill = 0;
		queue->do_pack = 0;
2960
		queue->prev_hdr = NULL;
2961
		queue->coalesced_frames = 0;
2962
		queue->bulk_start = 0;
J
Julian Wiedmann 已提交
2963 2964
		queue->bulk_count = 0;
		queue->bulk_max = qeth_tx_select_bulk_max(card, queue);
2965 2966
		atomic_set(&queue->used_buffers, 0);
		atomic_set(&queue->set_pci_flags_count, 0);
2967
		netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i));
F
Frank Blaschka 已提交
2968 2969 2970 2971
	}
	return 0;
}

2972
static void qeth_ipa_finalize_cmd(struct qeth_card *card,
2973
				  struct qeth_cmd_buffer *iob)
2974
{
2975
	qeth_mpc_finalize_cmd(card, iob);
2976 2977

	/* override with IPA-specific values: */
2978
	__ipa_cmd(iob)->hdr.seqno = card->seqno.ipa++;
2979 2980
}

J
Julian Wiedmann 已提交
2981 2982
static void qeth_prepare_ipa_cmd(struct qeth_card *card,
				 struct qeth_cmd_buffer *iob, u16 cmd_length)
2983 2984
{
	u8 prot_type = qeth_mpc_select_prot_type(card);
2985
	u16 total_length = iob->length;
2986

2987 2988
	qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length,
		       iob->data);
2989
	iob->finalize = qeth_ipa_finalize_cmd;
2990

2991
	memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
2992
	memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2);
2993
	memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
2994 2995
	memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2);
	memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2);
2996 2997
	memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
	       &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2998
	memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2);
2999 3000
}

3001 3002 3003 3004 3005 3006 3007 3008
static bool qeth_ipa_match_reply(struct qeth_cmd_buffer *iob,
				 struct qeth_cmd_buffer *reply)
{
	struct qeth_ipa_cmd *ipa_reply = __ipa_reply(reply);

	return ipa_reply && (__ipa_cmd(iob)->hdr.seqno == ipa_reply->hdr.seqno);
}

3009 3010 3011 3012 3013 3014
struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
					   enum qeth_ipa_cmds cmd_code,
					   enum qeth_prot_versions prot,
					   unsigned int data_length)
{
	struct qeth_cmd_buffer *iob;
3015
	struct qeth_ipacmd_hdr *hdr;
3016 3017 3018 3019 3020 3021 3022

	data_length += offsetof(struct qeth_ipa_cmd, data);
	iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_length, 1,
			     QETH_IPA_TIMEOUT);
	if (!iob)
		return NULL;

J
Julian Wiedmann 已提交
3023 3024
	qeth_prepare_ipa_cmd(card, iob, data_length);
	iob->match = qeth_ipa_match_reply;
3025 3026 3027 3028 3029

	hdr = &__ipa_cmd(iob)->hdr;
	hdr->command = cmd_code;
	hdr->initiator = IPA_CMD_INITIATOR_HOST;
	/* hdr->seqno is set by qeth_send_control_data() */
3030
	hdr->adapter_type = QETH_LINK_TYPE_FAST_ETH;
3031 3032 3033 3034
	hdr->rel_adapter_no = (u8) card->dev->dev_port;
	hdr->prim_version_no = IS_LAYER2(card) ? 2 : 1;
	hdr->param_count = 1;
	hdr->prot_version = prot;
3035 3036 3037 3038
	return iob;
}
EXPORT_SYMBOL_GPL(qeth_ipa_alloc_cmd);

3039 3040 3041 3042 3043 3044 3045 3046
static int qeth_send_ipa_cmd_cb(struct qeth_card *card,
				struct qeth_reply *reply, unsigned long data)
{
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;

	return (cmd->hdr.return_code) ? -EIO : 0;
}

E
Eugene Crosser 已提交
3047 3048 3049 3050 3051 3052
/**
 * qeth_send_ipa_cmd() - send an IPA command
 *
 * See qeth_send_control_data() for explanation of the arguments.
 */

F
Frank Blaschka 已提交
3053 3054 3055 3056 3057 3058 3059
int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
		int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
			unsigned long),
		void *reply_param)
{
	int rc;

C
Carsten Otte 已提交
3060
	QETH_CARD_TEXT(card, 4, "sendipa");
3061

3062
	if (card->read_or_write_problem) {
3063
		qeth_put_cmd(iob);
3064 3065 3066
		return -EIO;
	}

3067 3068
	if (reply_cb == NULL)
		reply_cb = qeth_send_ipa_cmd_cb;
3069
	rc = qeth_send_control_data(card, iob, reply_cb, reply_param);
3070 3071 3072 3073
	if (rc == -ETIME) {
		qeth_clear_ipacmd_list(card);
		qeth_schedule_recovery(card);
	}
F
Frank Blaschka 已提交
3074 3075 3076 3077
	return rc;
}
EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);

3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088
static int qeth_send_startlan_cb(struct qeth_card *card,
				 struct qeth_reply *reply, unsigned long data)
{
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;

	if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE)
		return -ENETDOWN;

	return (cmd->hdr.return_code) ? -EIO : 0;
}

3089
static int qeth_send_startlan(struct qeth_card *card)
F
Frank Blaschka 已提交
3090
{
3091
	struct qeth_cmd_buffer *iob;
F
Frank Blaschka 已提交
3092

3093
	QETH_CARD_TEXT(card, 2, "strtlan");
F
Frank Blaschka 已提交
3094

3095
	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_STARTLAN, QETH_PROT_NONE, 0);
3096 3097
	if (!iob)
		return -ENOMEM;
3098
	return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL);
F
Frank Blaschka 已提交
3099 3100
}

3101
static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
F
Frank Blaschka 已提交
3102
{
3103
	if (!cmd->hdr.return_code)
F
Frank Blaschka 已提交
3104 3105
		cmd->hdr.return_code =
			cmd->data.setadapterparms.hdr.return_code;
3106
	return cmd->hdr.return_code;
F
Frank Blaschka 已提交
3107 3108 3109 3110 3111
}

static int qeth_query_setadapterparms_cb(struct qeth_card *card,
		struct qeth_reply *reply, unsigned long data)
{
3112
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3113
	struct qeth_query_cmds_supp *query_cmd;
F
Frank Blaschka 已提交
3114

C
Carsten Otte 已提交
3115
	QETH_CARD_TEXT(card, 3, "quyadpcb");
3116
	if (qeth_setadpparms_inspect_rc(cmd))
3117
		return -EIO;
F
Frank Blaschka 已提交
3118

3119 3120 3121 3122 3123 3124
	query_cmd = &cmd->data.setadapterparms.data.query_cmds_supp;
	if (query_cmd->lan_type & 0x7f) {
		if (!qeth_is_supported_link_type(card, query_cmd->lan_type))
			return -EPROTONOSUPPORT;

		card->info.link_type = query_cmd->lan_type;
3125
		QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type);
3126
	}
3127 3128

	card->options.adp.supported = query_cmd->supported_cmds;
3129
	return 0;
F
Frank Blaschka 已提交
3130 3131
}

S
Stefan Raspl 已提交
3132
static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
3133 3134
						    enum qeth_ipa_setadp_cmd adp_cmd,
						    unsigned int data_length)
F
Frank Blaschka 已提交
3135
{
3136
	struct qeth_ipacmd_setadpparms_hdr *hdr;
F
Frank Blaschka 已提交
3137 3138
	struct qeth_cmd_buffer *iob;

3139 3140 3141 3142 3143 3144
	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4,
				 data_length +
				 offsetof(struct qeth_ipacmd_setadpparms,
					  data));
	if (!iob)
		return NULL;
F
Frank Blaschka 已提交
3145

3146 3147 3148 3149 3150
	hdr = &__ipa_cmd(iob)->data.setadapterparms.hdr;
	hdr->cmdlength = sizeof(*hdr) + data_length;
	hdr->command_code = adp_cmd;
	hdr->used_total = 1;
	hdr->seq_no = 1;
F
Frank Blaschka 已提交
3151 3152 3153
	return iob;
}

3154
static int qeth_query_setadapterparms(struct qeth_card *card)
F
Frank Blaschka 已提交
3155 3156 3157 3158
{
	int rc;
	struct qeth_cmd_buffer *iob;

C
Carsten Otte 已提交
3159
	QETH_CARD_TEXT(card, 3, "queryadp");
F
Frank Blaschka 已提交
3160
	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
3161
				   SETADP_DATA_SIZEOF(query_cmds_supp));
3162 3163
	if (!iob)
		return -ENOMEM;
F
Frank Blaschka 已提交
3164 3165 3166 3167
	rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
	return rc;
}

3168 3169 3170 3171 3172
static int qeth_query_ipassists_cb(struct qeth_card *card,
		struct qeth_reply *reply, unsigned long data)
{
	struct qeth_ipa_cmd *cmd;

3173
	QETH_CARD_TEXT(card, 2, "qipasscb");
3174 3175

	cmd = (struct qeth_ipa_cmd *) data;
3176 3177

	switch (cmd->hdr.return_code) {
3178 3179
	case IPA_RC_SUCCESS:
		break;
3180 3181
	case IPA_RC_NOTSUPP:
	case IPA_RC_L2_UNSUPPORTED_CMD:
3182
		QETH_CARD_TEXT(card, 2, "ipaunsup");
3183 3184
		card->options.ipa4.supported |= IPA_SETADAPTERPARMS;
		card->options.ipa6.supported |= IPA_SETADAPTERPARMS;
3185
		return -EOPNOTSUPP;
3186
	default:
3187 3188 3189
		QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
				 CARD_DEVID(card), cmd->hdr.return_code);
		return -EIO;
3190 3191
	}

3192 3193 3194 3195 3196
	if (cmd->hdr.prot_version == QETH_PROT_IPV4)
		card->options.ipa4 = cmd->hdr.assists;
	else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
		card->options.ipa6 = cmd->hdr.assists;
	else
3197 3198
		QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
				 CARD_DEVID(card));
3199 3200 3201
	return 0;
}

3202 3203
static int qeth_query_ipassists(struct qeth_card *card,
				enum qeth_prot_versions prot)
3204 3205 3206 3207
{
	int rc;
	struct qeth_cmd_buffer *iob;

3208
	QETH_CARD_TEXT_(card, 2, "qipassi%i", prot);
3209
	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_QIPASSIST, prot, 0);
3210 3211
	if (!iob)
		return -ENOMEM;
3212 3213 3214 3215
	rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
	return rc;
}

3216 3217 3218
static int qeth_query_switch_attributes_cb(struct qeth_card *card,
				struct qeth_reply *reply, unsigned long data)
{
3219
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3220
	struct qeth_query_switch_attributes *attrs;
3221
	struct qeth_switch_info *sw_info;
3222 3223

	QETH_CARD_TEXT(card, 2, "qswiatcb");
3224
	if (qeth_setadpparms_inspect_rc(cmd))
3225
		return -EIO;
3226

3227 3228 3229 3230 3231 3232
	sw_info = (struct qeth_switch_info *)reply->param;
	attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
	sw_info->capabilities = attrs->capabilities;
	sw_info->settings = attrs->settings;
	QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
			sw_info->settings);
3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245
	return 0;
}

int qeth_query_switch_attributes(struct qeth_card *card,
				 struct qeth_switch_info *sw_info)
{
	struct qeth_cmd_buffer *iob;

	QETH_CARD_TEXT(card, 2, "qswiattr");
	if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES))
		return -EOPNOTSUPP;
	if (!netif_carrier_ok(card->dev))
		return -ENOMEDIUM;
3246
	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, 0);
3247 3248
	if (!iob)
		return -ENOMEM;
3249 3250 3251 3252
	return qeth_send_ipa_cmd(card, iob,
				qeth_query_switch_attributes_cb, sw_info);
}

3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271
struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card,
					  enum qeth_diags_cmds sub_cmd,
					  unsigned int data_length)
{
	struct qeth_ipacmd_diagass *cmd;
	struct qeth_cmd_buffer *iob;

	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SET_DIAG_ASS, QETH_PROT_NONE,
				 DIAG_HDR_LEN + data_length);
	if (!iob)
		return NULL;

	cmd = &__ipa_cmd(iob)->data.diagass;
	cmd->subcmd_len = DIAG_SUB_HDR_LEN + data_length;
	cmd->subcmd = sub_cmd;
	return iob;
}
EXPORT_SYMBOL_GPL(qeth_get_diag_cmd);

3272 3273 3274
static int qeth_query_setdiagass_cb(struct qeth_card *card,
		struct qeth_reply *reply, unsigned long data)
{
3275 3276
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
	u16 rc = cmd->hdr.return_code;
3277

3278
	if (rc) {
3279
		QETH_CARD_TEXT_(card, 2, "diagq:%x", rc);
3280 3281 3282 3283
		return -EIO;
	}

	card->info.diagass_support = cmd->data.diagass.ext;
3284 3285 3286 3287 3288 3289 3290
	return 0;
}

static int qeth_query_setdiagass(struct qeth_card *card)
{
	struct qeth_cmd_buffer *iob;

3291
	QETH_CARD_TEXT(card, 2, "qdiagass");
3292
	iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_QUERY, 0);
3293 3294
	if (!iob)
		return -ENOMEM;
3295 3296 3297 3298 3299 3300 3301 3302 3303
	return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL);
}

static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
{
	unsigned long info = get_zeroed_page(GFP_KERNEL);
	struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
	struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info;
	struct ccw_dev_id ccwid;
3304
	int level;
3305 3306 3307 3308 3309 3310 3311

	tid->chpid = card->info.chpid;
	ccw_device_get_id(CARD_RDEV(card), &ccwid);
	tid->ssid = ccwid.ssid;
	tid->devno = ccwid.devno;
	if (!info)
		return;
3312 3313
	level = stsi(NULL, 0, 0, 0);
	if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0))
3314
		tid->lparnr = info222->lpar_number;
3315
	if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) {
3316 3317 3318 3319 3320 3321 3322 3323 3324
		EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name));
		memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
	}
	free_page(info);
}

static int qeth_hw_trap_cb(struct qeth_card *card,
		struct qeth_reply *reply, unsigned long data)
{
3325 3326
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
	u16 rc = cmd->hdr.return_code;
3327

3328
	if (rc) {
3329
		QETH_CARD_TEXT_(card, 2, "trapc:%x", rc);
3330 3331
		return -EIO;
	}
3332 3333 3334 3335 3336 3337 3338 3339
	return 0;
}

int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
{
	struct qeth_cmd_buffer *iob;
	struct qeth_ipa_cmd *cmd;

3340
	QETH_CARD_TEXT(card, 2, "diagtrap");
3341
	iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRAP, 64);
3342 3343
	if (!iob)
		return -ENOMEM;
3344
	cmd = __ipa_cmd(iob);
3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363
	cmd->data.diagass.type = 1;
	cmd->data.diagass.action = action;
	switch (action) {
	case QETH_DIAGS_TRAP_ARM:
		cmd->data.diagass.options = 0x0003;
		cmd->data.diagass.ext = 0x00010000 +
			sizeof(struct qeth_trap_id);
		qeth_get_trap_id(card,
			(struct qeth_trap_id *)cmd->data.diagass.cdata);
		break;
	case QETH_DIAGS_TRAP_DISARM:
		cmd->data.diagass.options = 0x0001;
		break;
	case QETH_DIAGS_TRAP_CAPTURE:
		break;
	}
	return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL);
}

3364 3365 3366 3367
static int qeth_check_qdio_errors(struct qeth_card *card,
				  struct qdio_buffer *buf,
				  unsigned int qdio_error,
				  const char *dbftext)
F
Frank Blaschka 已提交
3368
{
J
Jan Glauber 已提交
3369
	if (qdio_error) {
C
Carsten Otte 已提交
3370
		QETH_CARD_TEXT(card, 2, dbftext);
C
Carsten Otte 已提交
3371
		QETH_CARD_TEXT_(card, 2, " F15=%02X",
3372
			       buf->element[15].sflags);
C
Carsten Otte 已提交
3373
		QETH_CARD_TEXT_(card, 2, " F14=%02X",
3374
			       buf->element[14].sflags);
C
Carsten Otte 已提交
3375
		QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
3376
		if ((buf->element[15].sflags) == 0x12) {
3377
			QETH_CARD_STAT_INC(card, rx_fifo_errors);
3378 3379 3380
			return 0;
		} else
			return 1;
F
Frank Blaschka 已提交
3381 3382 3383 3384
	}
	return 0;
}

3385 3386
static unsigned int qeth_rx_refill_queue(struct qeth_card *card,
					 unsigned int count)
F
Frank Blaschka 已提交
3387 3388
{
	struct qeth_qdio_q *queue = card->qdio.in_q;
3389
	struct list_head *lh;
F
Frank Blaschka 已提交
3390 3391 3392 3393 3394 3395 3396 3397 3398
	int i;
	int rc;
	int newcount = 0;

	/* only requeue at a certain threshold to avoid SIGAs */
	if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) {
		for (i = queue->next_buf_to_init;
		     i < queue->next_buf_to_init + count; ++i) {
			if (qeth_init_input_buffer(card,
J
Julian Wiedmann 已提交
3399
				&queue->bufs[QDIO_BUFNR(i)])) {
F
Frank Blaschka 已提交
3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414
				break;
			} else {
				newcount++;
			}
		}

		if (newcount < count) {
			/* we are in memory shortage so we switch back to
			   traditional skb allocation and drop packages */
			atomic_set(&card->force_alloc_skb, 3);
			count = newcount;
		} else {
			atomic_add_unless(&card->force_alloc_skb, -1, 0);
		}

3415 3416 3417 3418 3419 3420 3421 3422 3423 3424
		if (!count) {
			i = 0;
			list_for_each(lh, &card->qdio.in_buf_pool.entry_list)
				i++;
			if (i == card->qdio.in_buf_pool.buf_count) {
				QETH_CARD_TEXT(card, 2, "qsarbw");
				schedule_delayed_work(
					&card->buffer_reclaim_work,
					QETH_RECLAIM_WORK_TIME);
			}
3425
			return 0;
3426 3427
		}

J
Jan Glauber 已提交
3428
		rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
3429
			     queue->next_buf_to_init, count, NULL);
F
Frank Blaschka 已提交
3430
		if (rc) {
C
Carsten Otte 已提交
3431
			QETH_CARD_TEXT(card, 2, "qinberr");
F
Frank Blaschka 已提交
3432
		}
J
Julian Wiedmann 已提交
3433 3434
		queue->next_buf_to_init = QDIO_BUFNR(queue->next_buf_to_init +
						     count);
3435
		return count;
F
Frank Blaschka 已提交
3436
	}
3437 3438

	return 0;
F
Frank Blaschka 已提交
3439
}
3440 3441 3442

static void qeth_buffer_reclaim_work(struct work_struct *work)
{
3443 3444 3445
	struct qeth_card *card = container_of(to_delayed_work(work),
					      struct qeth_card,
					      buffer_reclaim_work);
3446

3447 3448 3449 3450
	local_bh_disable();
	napi_schedule(&card->napi);
	/* kick-start the NAPI softirq: */
	local_bh_enable();
3451
}
F
Frank Blaschka 已提交
3452

3453
static void qeth_handle_send_error(struct qeth_card *card,
J
Jan Glauber 已提交
3454
		struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
F
Frank Blaschka 已提交
3455
{
3456
	int sbalf15 = buffer->buffer->element[15].sflags;
F
Frank Blaschka 已提交
3457

C
Carsten Otte 已提交
3458
	QETH_CARD_TEXT(card, 6, "hdsnderr");
3459
	qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
3460 3461

	if (!qdio_err)
3462
		return;
3463 3464

	if ((sbalf15 >= 15) && (sbalf15 <= 31))
3465
		return;
3466

C
Carsten Otte 已提交
3467 3468
	QETH_CARD_TEXT(card, 1, "lnkfail");
	QETH_CARD_TEXT_(card, 1, "%04x %02x",
3469
		       (u16)qdio_err, (u8)sbalf15);
F
Frank Blaschka 已提交
3470 3471
}

3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487
/**
 * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer.
 * @queue: queue to check for packing buffer
 *
 * Returns number of buffers that were prepared for flush.
 */
static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue)
{
	struct qeth_qdio_out_buffer *buffer;

	buffer = queue->bufs[queue->next_buf_to_fill];
	if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
	    (buffer->next_element_to_fill > 0)) {
		/* it's a packing buffer */
		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
		queue->next_buf_to_fill =
J
Julian Wiedmann 已提交
3488
			QDIO_BUFNR(queue->next_buf_to_fill + 1);
3489 3490 3491 3492 3493
		return 1;
	}
	return 0;
}

F
Frank Blaschka 已提交
3494 3495 3496 3497 3498 3499 3500 3501 3502 3503
/*
 * Switched to packing state if the number of used buffers on a queue
 * reaches a certain limit.
 */
static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
{
	if (!queue->do_pack) {
		if (atomic_read(&queue->used_buffers)
		    >= QETH_HIGH_WATERMARK_PACK){
			/* switch non-PACKING -> PACKING */
C
Carsten Otte 已提交
3504
			QETH_CARD_TEXT(queue->card, 6, "np->pack");
3505
			QETH_TXQ_STAT_INC(queue, packing_mode_switch);
F
Frank Blaschka 已提交
3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522
			queue->do_pack = 1;
		}
	}
}

/*
 * Switches from packing to non-packing mode. If there is a packing
 * buffer on the queue this buffer will be prepared to be flushed.
 * In that case 1 is returned to inform the caller. If no buffer
 * has to be flushed, zero is returned.
 */
static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
{
	if (queue->do_pack) {
		if (atomic_read(&queue->used_buffers)
		    <= QETH_LOW_WATERMARK_PACK) {
			/* switch PACKING -> non-PACKING */
C
Carsten Otte 已提交
3523
			QETH_CARD_TEXT(queue->card, 6, "pack->np");
3524
			QETH_TXQ_STAT_INC(queue, packing_mode_switch);
F
Frank Blaschka 已提交
3525
			queue->do_pack = 0;
3526
			return qeth_prep_flush_pack_buffer(queue);
F
Frank Blaschka 已提交
3527 3528 3529 3530 3531
		}
	}
	return 0;
}

J
Jan Glauber 已提交
3532 3533
static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
			       int count)
F
Frank Blaschka 已提交
3534
{
3535
	struct qeth_qdio_out_buffer *buf = queue->bufs[index];
3536
	struct qeth_card *card = queue->card;
3537
	unsigned int frames, usecs;
3538
	struct qaob *aob = NULL;
F
Frank Blaschka 已提交
3539 3540 3541 3542
	int rc;
	int i;

	for (i = index; i < index + count; ++i) {
J
Julian Wiedmann 已提交
3543
		unsigned int bidx = QDIO_BUFNR(i);
3544
		struct sk_buff *skb;
J
Julian Wiedmann 已提交
3545

3546
		buf = queue->bufs[bidx];
3547 3548
		buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
				SBAL_EFLAGS_LAST_ENTRY;
3549
		queue->coalesced_frames += buf->frames;
F
Frank Blaschka 已提交
3550

3551 3552 3553 3554
		if (IS_IQD(card)) {
			skb_queue_walk(&buf->skb_list, skb)
				skb_tx_timestamp(skb);
		}
3555
	}
F
Frank Blaschka 已提交
3556

3557 3558 3559 3560 3561 3562 3563
	if (IS_IQD(card)) {
		if (card->options.cq == QETH_CQ_ENABLED &&
		    !qeth_iqd_is_mcast_queue(card, queue) &&
		    count == 1) {
			if (!buf->aob)
				buf->aob = qdio_allocate_aob();
			if (buf->aob) {
3564 3565
				struct qeth_qaob_priv1 *priv;

3566
				aob = buf->aob;
3567 3568 3569
				priv = (struct qeth_qaob_priv1 *)&aob->user1;
				priv->state = QETH_QAOB_ISSUED;
				priv->queue_no = queue->queue_no;
3570 3571 3572
			}
		}
	} else {
F
Frank Blaschka 已提交
3573 3574 3575 3576 3577 3578 3579 3580
		if (!queue->do_pack) {
			if ((atomic_read(&queue->used_buffers) >=
				(QETH_HIGH_WATERMARK_PACK -
				 QETH_WATERMARK_PACK_FUZZ)) &&
			    !atomic_read(&queue->set_pci_flags_count)) {
				/* it's likely that we'll go to packing
				 * mode soon */
				atomic_inc(&queue->set_pci_flags_count);
3581
				buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
F
Frank Blaschka 已提交
3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593
			}
		} else {
			if (!atomic_read(&queue->set_pci_flags_count)) {
				/*
				 * there's no outstanding PCI any more, so we
				 * have to request a PCI to be sure the the PCI
				 * will wake at some time in the future then we
				 * can flush packed buffers that might still be
				 * hanging around, which can happen if no
				 * further send was requested by the stack
				 */
				atomic_inc(&queue->set_pci_flags_count);
3594
				buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
F
Frank Blaschka 已提交
3595 3596 3597 3598
			}
		}
	}

3599
	QETH_TXQ_STAT_INC(queue, doorbell);
3600 3601
	rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_OUTPUT, queue->queue_no,
		     index, count, aob);
3602

3603 3604 3605 3606 3607 3608
	switch (rc) {
	case 0:
	case -ENOBUFS:
		/* ignore temporary SIGA errors without busy condition */

		/* Fake the TX completion interrupt: */
3609 3610
		frames = READ_ONCE(queue->max_coalesced_frames);
		usecs = READ_ONCE(queue->coalesce_usecs);
3611

3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622
		if (frames && queue->coalesced_frames >= frames) {
			napi_schedule(&queue->napi);
			queue->coalesced_frames = 0;
			QETH_TXQ_STAT_INC(queue, coal_frames);
		} else if (qeth_use_tx_irqs(card) &&
			   atomic_read(&queue->used_buffers) >= 32) {
			/* Old behaviour carried over from the qdio layer: */
			napi_schedule(&queue->napi);
			QETH_TXQ_STAT_INC(queue, coal_frames);
		} else if (usecs) {
			qeth_tx_arm_timer(queue, usecs);
3623
		}
3624

3625 3626
		break;
	default:
C
Carsten Otte 已提交
3627
		QETH_CARD_TEXT(queue->card, 2, "flushbuf");
3628 3629 3630
		QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no);
		QETH_CARD_TEXT_(queue->card, 2, " idx%d", index);
		QETH_CARD_TEXT_(queue->card, 2, " c%d", count);
C
Carsten Otte 已提交
3631
		QETH_CARD_TEXT_(queue->card, 2, " err%d", rc);
3632

F
Frank Blaschka 已提交
3633 3634 3635 3636 3637 3638
		/* this must not happen under normal circumstances. if it
		 * happens something is really wrong -> recover */
		qeth_schedule_recovery(queue->card);
	}
}

3639 3640
static void qeth_flush_queue(struct qeth_qdio_out_q *queue)
{
J
Julian Wiedmann 已提交
3641
	qeth_flush_buffers(queue, queue->bulk_start, queue->bulk_count);
3642

J
Julian Wiedmann 已提交
3643
	queue->bulk_start = QDIO_BUFNR(queue->bulk_start + queue->bulk_count);
3644
	queue->prev_hdr = NULL;
J
Julian Wiedmann 已提交
3645
	queue->bulk_count = 0;
3646 3647
}

F
Frank Blaschka 已提交
3648 3649 3650 3651 3652 3653 3654 3655
static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
{
	/*
	 * check if weed have to switch to non-packing mode or if
	 * we have to get a pci flag out on the queue
	 */
	if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
	    !atomic_read(&queue->set_pci_flags_count)) {
3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669
		unsigned int index, flush_cnt;
		bool q_was_packing;

		spin_lock(&queue->lock);

		index = queue->next_buf_to_fill;
		q_was_packing = queue->do_pack;

		flush_cnt = qeth_switch_to_nonpacking_if_needed(queue);
		if (!flush_cnt && !atomic_read(&queue->set_pci_flags_count))
			flush_cnt = qeth_prep_flush_pack_buffer(queue);

		if (flush_cnt) {
			qeth_flush_buffers(queue, index, flush_cnt);
3670 3671
			if (q_was_packing)
				QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt);
F
Frank Blaschka 已提交
3672
		}
3673 3674

		spin_unlock(&queue->lock);
F
Frank Blaschka 已提交
3675 3676 3677
	}
}

3678
static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr)
3679 3680 3681
{
	struct qeth_card *card = (struct qeth_card *)card_ptr;

3682
	napi_schedule_irqoff(&card->napi);
3683 3684
}

3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697
int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
{
	int rc;

	if (card->options.cq ==  QETH_CQ_NOTAVAILABLE) {
		rc = -1;
		goto out;
	} else {
		if (card->options.cq == cq) {
			rc = 0;
			goto out;
		}

3698
		qeth_free_qdio_queues(card);
3699 3700 3701 3702 3703 3704 3705 3706 3707
		card->options.cq = cq;
		rc = 0;
	}
out:
	return rc;

}
EXPORT_SYMBOL_GPL(qeth_configure_cq);

3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719
static void qeth_qdio_handle_aob(struct qeth_card *card, struct qaob *aob)
{
	struct qeth_qaob_priv1 *priv = (struct qeth_qaob_priv1 *)&aob->user1;
	unsigned int queue_no = priv->queue_no;

	BUILD_BUG_ON(sizeof(*priv) > ARRAY_SIZE(aob->user1));

	if (xchg(&priv->state, QETH_QAOB_DONE) == QETH_QAOB_PENDING &&
	    queue_no < card->qdio.no_out_queues)
		napi_schedule(&card->qdio.out_qs[queue_no]->napi);
}

3720 3721 3722 3723
static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
				 unsigned int queue, int first_element,
				 int count)
{
3724 3725 3726 3727 3728 3729 3730 3731 3732
	struct qeth_qdio_q *cq = card->qdio.c_q;
	int i;
	int rc;

	QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element);
	QETH_CARD_TEXT_(card, 5, "qcqhc%d", count);
	QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);

	if (qdio_err) {
3733
		netif_tx_stop_all_queues(card->dev);
3734
		qeth_schedule_recovery(card);
3735
		return;
3736 3737 3738
	}

	for (i = first_element; i < first_element + count; ++i) {
J
Julian Wiedmann 已提交
3739
		struct qdio_buffer *buffer = cq->qdio_bufs[QDIO_BUFNR(i)];
3740
		int e = 0;
3741

3742 3743
		while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
		       buffer->element[e].addr) {
3744
			unsigned long phys_aob_addr = buffer->element[e].addr;
3745

3746
			qeth_qdio_handle_aob(card, phys_to_virt(phys_aob_addr));
3747 3748
			++e;
		}
3749
		qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
3750 3751
	}
	rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue,
3752
		     cq->next_buf_to_init, count, NULL);
3753 3754 3755 3756 3757
	if (rc) {
		dev_warn(&card->gdev->dev,
			"QDIO reported an error, rc=%i\n", rc);
		QETH_CARD_TEXT(card, 2, "qcqherr");
	}
J
Julian Wiedmann 已提交
3758 3759

	cq->next_buf_to_init = QDIO_BUFNR(cq->next_buf_to_init + count);
3760 3761
}

3762 3763 3764 3765
static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
				    unsigned int qdio_err, int queue,
				    int first_elem, int count,
				    unsigned long card_ptr)
3766 3767 3768
{
	struct qeth_card *card = (struct qeth_card *)card_ptr;

3769 3770 3771
	QETH_CARD_TEXT_(card, 2, "qihq%d", queue);
	QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err);

3772
	if (qdio_err)
3773 3774 3775
		qeth_schedule_recovery(card);
}

3776 3777 3778 3779
static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
				     unsigned int qdio_error, int __queue,
				     int first_element, int count,
				     unsigned long card_ptr)
F
Frank Blaschka 已提交
3780 3781
{
	struct qeth_card *card        = (struct qeth_card *) card_ptr;
3782
	struct net_device *dev = card->dev;
F
Frank Blaschka 已提交
3783

C
Carsten Otte 已提交
3784
	QETH_CARD_TEXT(card, 6, "qdouhdl");
3785
	if (qdio_error & QDIO_ERROR_FATAL) {
C
Carsten Otte 已提交
3786
		QETH_CARD_TEXT(card, 2, "achkcond");
3787
		netif_tx_stop_all_queues(dev);
J
Jan Glauber 已提交
3788
		qeth_schedule_recovery(card);
F
Frank Blaschka 已提交
3789 3790 3791
	}
}

3792 3793 3794
/**
 * Note: Function assumes that we have 4 outbound queues.
 */
3795
int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
F
Frank Blaschka 已提交
3796
{
J
Julian Wiedmann 已提交
3797
	struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
3798 3799 3800 3801 3802
	u8 tos;

	switch (card->qdio.do_prio_queueing) {
	case QETH_PRIO_Q_ING_TOS:
	case QETH_PRIO_Q_ING_PREC:
3803 3804
		switch (vlan_get_protocol(skb)) {
		case htons(ETH_P_IP):
3805 3806
			tos = ipv4_get_dsfield(ip_hdr(skb));
			break;
3807
		case htons(ETH_P_IPV6):
3808 3809 3810 3811
			tos = ipv6_get_dsfield(ipv6_hdr(skb));
			break;
		default:
			return card->qdio.default_out_queue;
F
Frank Blaschka 已提交
3812
		}
3813
		if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
J
Julian Wiedmann 已提交
3814
			return ~tos >> 6 & 3;
3815
		if (tos & IPTOS_MINCOST)
J
Julian Wiedmann 已提交
3816
			return 3;
3817 3818 3819 3820 3821 3822
		if (tos & IPTOS_RELIABILITY)
			return 2;
		if (tos & IPTOS_THROUGHPUT)
			return 1;
		if (tos & IPTOS_LOWDELAY)
			return 0;
3823 3824 3825 3826
		break;
	case QETH_PRIO_Q_ING_SKB:
		if (skb->priority > 5)
			return 0;
J
Julian Wiedmann 已提交
3827
		return ~skb->priority >> 1 & 3;
3828
	case QETH_PRIO_Q_ING_VLAN:
J
Julian Wiedmann 已提交
3829 3830 3831
		if (veth->h_vlan_proto == htons(ETH_P_8021Q))
			return ~ntohs(veth->h_vlan_TCI) >>
			       (VLAN_PRIO_SHIFT + 1) & 3;
3832
		break;
3833 3834
	case QETH_PRIO_Q_ING_FIXED:
		return card->qdio.default_out_queue;
F
Frank Blaschka 已提交
3835
	default:
3836
		break;
F
Frank Blaschka 已提交
3837
	}
3838
	return card->qdio.default_out_queue;
F
Frank Blaschka 已提交
3839 3840 3841
}
EXPORT_SYMBOL_GPL(qeth_get_priority_queue);

3842 3843 3844 3845 3846 3847 3848
/**
 * qeth_get_elements_for_frags() -	find number of SBALEs for skb frags.
 * @skb:				SKB address
 *
 * Returns the number of pages, and thus QDIO buffer elements, needed to cover
 * fragmented part of the SKB. Returns zero for linear SKB.
 */
3849
static int qeth_get_elements_for_frags(struct sk_buff *skb)
3850
{
3851
	int cnt, elements = 0;
3852 3853

	for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
3854
		skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
3855 3856 3857 3858

		elements += qeth_get_elements_for_range(
			(addr_t)skb_frag_address(frag),
			(addr_t)skb_frag_address(frag) + skb_frag_size(frag));
3859 3860 3861 3862
	}
	return elements;
}

3863 3864 3865 3866 3867 3868 3869 3870 3871
/**
 * qeth_count_elements() -	Counts the number of QDIO buffer elements needed
 *				to transmit an skb.
 * @skb:			the skb to operate on.
 * @data_offset:		skip this part of the skb's linear data
 *
 * Returns the number of pages, and thus QDIO buffer elements, needed to map the
 * skb's data (both its linear part and paged fragments).
 */
J
Julian Wiedmann 已提交
3872 3873
static unsigned int qeth_count_elements(struct sk_buff *skb,
					unsigned int data_offset)
3874 3875 3876 3877 3878 3879 3880 3881 3882
{
	unsigned int elements = qeth_get_elements_for_frags(skb);
	addr_t end = (addr_t)skb->data + skb_headlen(skb);
	addr_t start = (addr_t)skb->data + data_offset;

	if (start != end)
		elements += qeth_get_elements_for_range(start, end);
	return elements;
}
F
Frank Blaschka 已提交
3883

3884 3885
#define QETH_HDR_CACHE_OBJ_SIZE		(sizeof(struct qeth_hdr_tso) + \
					 MAX_TCP_HEADER)
3886

3887
/**
3888 3889
 * qeth_add_hw_header() - add a HW header to an skb.
 * @skb: skb that the HW header should be added to.
3890 3891
 * @hdr: double pointer to a qeth_hdr. When returning with >= 0,
 *	 it contains a valid pointer to a qeth_hdr.
3892 3893 3894
 * @hdr_len: length of the HW header.
 * @proto_len: length of protocol headers that need to be in same page as the
 *	       HW header.
3895 3896 3897 3898
 *
 * Returns the pushed length. If the header can't be pushed on
 * (eg. because it would cross a page boundary), it is allocated from
 * the cache instead and 0 is returned.
3899
 * The number of needed buffer elements is returned in @elements.
3900 3901
 * Error to create the hdr is indicated by returning with < 0.
 */
3902 3903 3904 3905
static int qeth_add_hw_header(struct qeth_qdio_out_q *queue,
			      struct sk_buff *skb, struct qeth_hdr **hdr,
			      unsigned int hdr_len, unsigned int proto_len,
			      unsigned int *elements)
3906
{
3907
	gfp_t gfp = GFP_ATOMIC | (skb_pfmemalloc(skb) ? __GFP_MEMALLOC : 0);
3908
	const unsigned int contiguous = proto_len ? proto_len : 1;
3909
	const unsigned int max_elements = queue->max_elements;
3910 3911 3912 3913 3914 3915
	unsigned int __elements;
	addr_t start, end;
	bool push_ok;
	int rc;

check_layout:
3916
	start = (addr_t)skb->data - hdr_len;
3917 3918
	end = (addr_t)skb->data;

3919
	if (qeth_get_elements_for_range(start, end + contiguous) == 1) {
3920 3921
		/* Push HW header into same page as first protocol header. */
		push_ok = true;
3922 3923 3924 3925 3926
		/* ... but TSO always needs a separate element for headers: */
		if (skb_is_gso(skb))
			__elements = 1 + qeth_count_elements(skb, proto_len);
		else
			__elements = qeth_count_elements(skb, 0);
J
Julian Wiedmann 已提交
3927 3928
	} else if (!proto_len && PAGE_ALIGNED(skb->data)) {
		/* Push HW header into preceding page, flush with skb->data. */
3929
		push_ok = true;
3930
		__elements = 1 + qeth_count_elements(skb, 0);
3931 3932 3933 3934
	} else {
		/* Use header cache, copy protocol headers up. */
		push_ok = false;
		__elements = 1 + qeth_count_elements(skb, proto_len);
3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946
	}

	/* Compress skb to fit into one IO buffer: */
	if (__elements > max_elements) {
		if (!skb_is_nonlinear(skb)) {
			/* Drop it, no easy way of shrinking it further. */
			QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n",
					 max_elements, __elements, skb->len);
			return -E2BIG;
		}

		rc = skb_linearize(skb);
3947 3948
		if (rc) {
			QETH_TXQ_STAT_INC(queue, skbs_linearized_fail);
3949
			return rc;
3950
		}
3951

3952
		QETH_TXQ_STAT_INC(queue, skbs_linearized);
3953 3954 3955 3956 3957 3958 3959
		/* Linearization changed the layout, re-evaluate: */
		goto check_layout;
	}

	*elements = __elements;
	/* Add the header: */
	if (push_ok) {
3960 3961
		*hdr = skb_push(skb, hdr_len);
		return hdr_len;
3962
	}
3963 3964

	/* Fall back to cache element with known-good alignment: */
3965 3966
	if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE)
		return -E2BIG;
3967
	*hdr = kmem_cache_alloc(qeth_core_header_cache, gfp);
3968 3969
	if (!*hdr)
		return -ENOMEM;
3970 3971
	/* Copy protocol headers behind HW header: */
	skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len);
3972 3973 3974
	return 0;
}

3975 3976 3977 3978
static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue,
			      struct sk_buff *curr_skb,
			      struct qeth_hdr *curr_hdr)
{
J
Julian Wiedmann 已提交
3979
	struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997
	struct qeth_hdr *prev_hdr = queue->prev_hdr;

	if (!prev_hdr)
		return true;

	/* All packets must have the same target: */
	if (curr_hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
		struct sk_buff *prev_skb = skb_peek(&buffer->skb_list);

		return ether_addr_equal(eth_hdr(prev_skb)->h_dest,
					eth_hdr(curr_skb)->h_dest) &&
		       qeth_l2_same_vlan(&prev_hdr->hdr.l2, &curr_hdr->hdr.l2);
	}

	return qeth_l3_same_next_hop(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3) &&
	       qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3);
}

3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009
/**
 * qeth_fill_buffer() - map skb into an output buffer
 * @buf:	buffer to transport the skb
 * @skb:	skb to map into the buffer
 * @hdr:	qeth_hdr for this skb. Either at skb->data, or allocated
 *		from qeth_core_header_cache.
 * @offset:	when mapping the skb, start at skb->data + offset
 * @hd_len:	if > 0, build a dedicated header element of this size
 */
static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
				     struct sk_buff *skb, struct qeth_hdr *hdr,
				     unsigned int offset, unsigned int hd_len)
F
Frank Blaschka 已提交
4010
{
4011 4012
	struct qdio_buffer *buffer = buf->buffer;
	int element = buf->next_element_to_fill;
4013 4014
	int length = skb_headlen(skb) - offset;
	char *data = skb->data + offset;
J
Julian Wiedmann 已提交
4015
	unsigned int elem_length, cnt;
4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026
	bool is_first_elem = true;

	__skb_queue_tail(&buf->skb_list, skb);

	/* build dedicated element for HW Header */
	if (hd_len) {
		is_first_elem = false;

		buffer->element[element].addr = virt_to_phys(hdr);
		buffer->element[element].length = hd_len;
		buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
4027 4028 4029

		/* HW header is allocated from cache: */
		if ((void *)hdr != skb->data)
4030
			__set_bit(element, buf->from_kmem_cache);
4031 4032 4033 4034 4035 4036
		/* HW header was pushed and is contiguous with linear part: */
		else if (length > 0 && !PAGE_ALIGNED(data) &&
			 (data == (char *)hdr + hd_len))
			buffer->element[element].eflags |=
				SBAL_EFLAGS_CONTIGUOUS;

4037 4038
		element++;
	}
F
Frank Blaschka 已提交
4039

4040
	/* map linear part into buffer element(s) */
F
Frank Blaschka 已提交
4041
	while (length > 0) {
J
Julian Wiedmann 已提交
4042 4043
		elem_length = min_t(unsigned int, length,
				    PAGE_SIZE - offset_in_page(data));
F
Frank Blaschka 已提交
4044

4045
		buffer->element[element].addr = virt_to_phys(data);
J
Julian Wiedmann 已提交
4046 4047
		buffer->element[element].length = elem_length;
		length -= elem_length;
4048 4049
		if (is_first_elem) {
			is_first_elem = false;
4050 4051
			if (length || skb_is_nonlinear(skb))
				/* skb needs additional elements */
4052
				buffer->element[element].eflags =
4053
					SBAL_EFLAGS_FIRST_FRAG;
F
Frank Blaschka 已提交
4054
			else
4055 4056 4057 4058
				buffer->element[element].eflags = 0;
		} else {
			buffer->element[element].eflags =
				SBAL_EFLAGS_MIDDLE_FRAG;
F
Frank Blaschka 已提交
4059
		}
J
Julian Wiedmann 已提交
4060 4061

		data += elem_length;
F
Frank Blaschka 已提交
4062 4063
		element++;
	}
4064

4065
	/* map page frags into buffer element(s) */
4066
	for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
4067 4068 4069 4070
		skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];

		data = skb_frag_address(frag);
		length = skb_frag_size(frag);
4071
		while (length > 0) {
J
Julian Wiedmann 已提交
4072 4073
			elem_length = min_t(unsigned int, length,
					    PAGE_SIZE - offset_in_page(data));
4074

4075
			buffer->element[element].addr = virt_to_phys(data);
J
Julian Wiedmann 已提交
4076
			buffer->element[element].length = elem_length;
4077 4078
			buffer->element[element].eflags =
				SBAL_EFLAGS_MIDDLE_FRAG;
J
Julian Wiedmann 已提交
4079 4080 4081

			length -= elem_length;
			data += elem_length;
4082 4083
			element++;
		}
4084 4085
	}

4086 4087
	if (buffer->element[element - 1].eflags)
		buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
4088
	buf->next_element_to_fill = element;
4089
	return element;
F
Frank Blaschka 已提交
4090 4091
}

4092 4093 4094 4095
static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
		       struct sk_buff *skb, unsigned int elements,
		       struct qeth_hdr *hdr, unsigned int offset,
		       unsigned int hd_len)
F
Frank Blaschka 已提交
4096
{
4097
	unsigned int bytes = qdisc_pkt_len(skb);
J
Julian Wiedmann 已提交
4098
	struct qeth_qdio_out_buffer *buffer;
4099
	unsigned int next_element;
4100 4101
	struct netdev_queue *txq;
	bool stopped = false;
4102 4103
	bool flush;

J
Julian Wiedmann 已提交
4104
	buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + queue->bulk_count)];
4105
	txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
F
Frank Blaschka 已提交
4106

4107 4108
	/* Just a sanity check, the wake/stop logic should ensure that we always
	 * get a free buffer.
F
Frank Blaschka 已提交
4109 4110
	 */
	if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
4111
		return -EBUSY;
4112

J
Julian Wiedmann 已提交
4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129
	flush = !qeth_iqd_may_bulk(queue, skb, hdr);

	if (flush ||
	    (buffer->next_element_to_fill + elements > queue->max_elements)) {
		if (buffer->next_element_to_fill > 0) {
			atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
			queue->bulk_count++;
		}

		if (queue->bulk_count >= queue->bulk_max)
			flush = true;

		if (flush)
			qeth_flush_queue(queue);

		buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start +
						queue->bulk_count)];
4130

4131 4132 4133 4134 4135 4136 4137
		/* Sanity-check again: */
		if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
			return -EBUSY;
	}

	if (buffer->next_element_to_fill == 0 &&
	    atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
4138 4139 4140 4141 4142 4143 4144 4145
		/* If a TX completion happens right _here_ and misses to wake
		 * the txq, then our re-check below will catch the race.
		 */
		QETH_TXQ_STAT_INC(queue, stopped);
		netif_tx_stop_queue(txq);
		stopped = true;
	}

4146
	next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
4147
	buffer->bytes += bytes;
4148
	buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
4149
	queue->prev_hdr = hdr;
4150

4151 4152 4153 4154 4155
	flush = __netdev_tx_sent_queue(txq, bytes,
				       !stopped && netdev_xmit_more());

	if (flush || next_element >= queue->max_elements) {
		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
J
Julian Wiedmann 已提交
4156 4157 4158 4159 4160 4161 4162
		queue->bulk_count++;

		if (queue->bulk_count >= queue->bulk_max)
			flush = true;

		if (flush)
			qeth_flush_queue(queue);
4163
	}
4164 4165 4166

	if (stopped && !qeth_out_queue_is_full(queue))
		netif_tx_start_queue(txq);
F
Frank Blaschka 已提交
4167 4168 4169
	return 0;
}

J
Julian Wiedmann 已提交
4170 4171 4172 4173 4174
static int qeth_do_send_packet(struct qeth_card *card,
			       struct qeth_qdio_out_q *queue,
			       struct sk_buff *skb, struct qeth_hdr *hdr,
			       unsigned int offset, unsigned int hd_len,
			       unsigned int elements_needed)
F
Frank Blaschka 已提交
4175
{
4176
	unsigned int start_index = queue->next_buf_to_fill;
F
Frank Blaschka 已提交
4177
	struct qeth_qdio_out_buffer *buffer;
4178
	unsigned int next_element;
4179 4180
	struct netdev_queue *txq;
	bool stopped = false;
F
Frank Blaschka 已提交
4181 4182 4183 4184
	int flush_count = 0;
	int do_pack = 0;
	int rc = 0;

4185
	buffer = queue->bufs[queue->next_buf_to_fill];
4186 4187 4188

	/* Just a sanity check, the wake/stop logic should ensure that we always
	 * get a free buffer.
F
Frank Blaschka 已提交
4189
	 */
4190
	if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
F
Frank Blaschka 已提交
4191
		return -EBUSY;
4192 4193 4194

	txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));

F
Frank Blaschka 已提交
4195 4196 4197 4198
	/* check if we need to switch packing state of this queue */
	qeth_switch_to_packing_if_needed(queue);
	if (queue->do_pack) {
		do_pack = 1;
F
Frank Blaschka 已提交
4199
		/* does packet fit in current buffer? */
4200 4201
		if (buffer->next_element_to_fill + elements_needed >
		    queue->max_elements) {
F
Frank Blaschka 已提交
4202 4203 4204 4205
			/* ... no -> set state PRIMED */
			atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
			flush_count++;
			queue->next_buf_to_fill =
J
Julian Wiedmann 已提交
4206
				QDIO_BUFNR(queue->next_buf_to_fill + 1);
4207
			buffer = queue->bufs[queue->next_buf_to_fill];
4208 4209

			/* We stepped forward, so sanity-check again: */
F
Frank Blaschka 已提交
4210 4211 4212
			if (atomic_read(&buffer->state) !=
			    QETH_QDIO_BUF_EMPTY) {
				qeth_flush_buffers(queue, start_index,
J
Jan Glauber 已提交
4213
							   flush_count);
4214 4215
				rc = -EBUSY;
				goto out;
F
Frank Blaschka 已提交
4216 4217 4218
			}
		}
	}
4219

4220 4221 4222 4223 4224 4225 4226 4227 4228 4229
	if (buffer->next_element_to_fill == 0 &&
	    atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
		/* If a TX completion happens right _here_ and misses to wake
		 * the txq, then our re-check below will catch the race.
		 */
		QETH_TXQ_STAT_INC(queue, stopped);
		netif_tx_stop_queue(txq);
		stopped = true;
	}

4230
	next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
4231 4232
	buffer->bytes += qdisc_pkt_len(skb);
	buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
4233 4234 4235 4236 4237 4238

	if (queue->do_pack)
		QETH_TXQ_STAT_INC(queue, skbs_pack);
	if (!queue->do_pack || stopped || next_element >= queue->max_elements) {
		flush_count++;
		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
J
Julian Wiedmann 已提交
4239 4240
		queue->next_buf_to_fill =
				QDIO_BUFNR(queue->next_buf_to_fill + 1);
4241 4242
	}

F
Frank Blaschka 已提交
4243
	if (flush_count)
J
Jan Glauber 已提交
4244
		qeth_flush_buffers(queue, start_index, flush_count);
4245

4246
out:
4247 4248
	if (do_pack)
		QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count);
F
Frank Blaschka 已提交
4249

4250 4251
	if (stopped && !qeth_out_queue_is_full(queue))
		netif_tx_start_queue(txq);
F
Frank Blaschka 已提交
4252 4253 4254
	return rc;
}

J
Julian Wiedmann 已提交
4255 4256 4257
static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
			      unsigned int payload_len, struct sk_buff *skb,
			      unsigned int proto_len)
4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270
{
	struct qeth_hdr_ext_tso *ext = &hdr->ext;

	ext->hdr_tot_len = sizeof(*ext);
	ext->imb_hdr_no = 1;
	ext->hdr_type = 1;
	ext->hdr_version = 1;
	ext->hdr_len = 28;
	ext->payload_len = payload_len;
	ext->mss = skb_shinfo(skb)->gso_size;
	ext->dg_hdr_len = proto_len;
}

4271
int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
4272
	      struct qeth_qdio_out_q *queue, __be16 proto,
4273 4274
	      void (*fill_header)(struct qeth_qdio_out_q *queue,
				  struct qeth_hdr *hdr, struct sk_buff *skb,
4275
				  __be16 proto, unsigned int data_len))
4276
{
4277
	unsigned int proto_len, hw_hdr_len;
4278
	unsigned int frame_len = skb->len;
4279
	bool is_tso = skb_is_gso(skb);
4280 4281 4282 4283 4284 4285
	unsigned int data_offset = 0;
	struct qeth_hdr *hdr = NULL;
	unsigned int hd_len = 0;
	unsigned int elements;
	int push_len, rc;

4286 4287 4288 4289 4290
	if (is_tso) {
		hw_hdr_len = sizeof(struct qeth_hdr_tso);
		proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
	} else {
		hw_hdr_len = sizeof(struct qeth_hdr);
J
Julian Wiedmann 已提交
4291
		proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0;
4292 4293
	}

4294 4295 4296 4297
	rc = skb_cow_head(skb, hw_hdr_len);
	if (rc)
		return rc;

4298
	push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len,
4299 4300 4301
				      &elements);
	if (push_len < 0)
		return push_len;
4302
	if (is_tso || !push_len) {
4303 4304
		/* HW header needs its own buffer element. */
		hd_len = hw_hdr_len + proto_len;
4305
		data_offset = push_len + proto_len;
4306
	}
4307
	memset(hdr, 0, hw_hdr_len);
4308
	fill_header(queue, hdr, skb, proto, frame_len);
4309 4310 4311
	if (is_tso)
		qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
				  frame_len - proto_len, skb, proto_len);
4312 4313

	if (IS_IQD(card)) {
4314 4315
		rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset,
				 hd_len);
4316 4317 4318
	} else {
		/* TODO: drop skb_orphan() once TX completion is fast enough */
		skb_orphan(skb);
4319
		spin_lock(&queue->lock);
4320 4321
		rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
					 hd_len, elements);
4322
		spin_unlock(&queue->lock);
4323 4324
	}

4325 4326 4327
	if (rc && !push_len)
		kmem_cache_free(qeth_core_header_cache, hdr);

4328 4329 4330 4331
	return rc;
}
EXPORT_SYMBOL_GPL(qeth_xmit);

F
Frank Blaschka 已提交
4332 4333 4334
static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
		struct qeth_reply *reply, unsigned long data)
{
4335
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
F
Frank Blaschka 已提交
4336 4337
	struct qeth_ipacmd_setadpparms *setparms;

C
Carsten Otte 已提交
4338
	QETH_CARD_TEXT(card, 4, "prmadpcb");
F
Frank Blaschka 已提交
4339 4340

	setparms = &(cmd->data.setadapterparms);
4341
	if (qeth_setadpparms_inspect_rc(cmd)) {
4342
		QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
F
Frank Blaschka 已提交
4343 4344 4345
		setparms->data.mode = SET_PROMISC_MODE_OFF;
	}
	card->info.promisc_mode = setparms->data.mode;
4346
	return (cmd->hdr.return_code) ? -EIO : 0;
F
Frank Blaschka 已提交
4347 4348
}

4349
void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable)
F
Frank Blaschka 已提交
4350
{
4351 4352
	enum qeth_ipa_promisc_modes mode = enable ? SET_PROMISC_MODE_ON :
						    SET_PROMISC_MODE_OFF;
F
Frank Blaschka 已提交
4353 4354 4355
	struct qeth_cmd_buffer *iob;
	struct qeth_ipa_cmd *cmd;

C
Carsten Otte 已提交
4356 4357
	QETH_CARD_TEXT(card, 4, "setprom");
	QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
F
Frank Blaschka 已提交
4358 4359

	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
4360
				   SETADP_DATA_SIZEOF(mode));
4361 4362
	if (!iob)
		return;
4363
	cmd = __ipa_cmd(iob);
F
Frank Blaschka 已提交
4364 4365 4366 4367 4368 4369 4370 4371
	cmd->data.setadapterparms.data.mode = mode;
	qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
}
EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);

static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
		struct qeth_reply *reply, unsigned long data)
{
4372
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4373
	struct qeth_ipacmd_setadpparms *adp_cmd;
F
Frank Blaschka 已提交
4374

C
Carsten Otte 已提交
4375
	QETH_CARD_TEXT(card, 4, "chgmaccb");
4376
	if (qeth_setadpparms_inspect_rc(cmd))
4377
		return -EIO;
F
Frank Blaschka 已提交
4378

4379
	adp_cmd = &cmd->data.setadapterparms;
4380 4381 4382
	if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr))
		return -EADDRNOTAVAIL;

4383 4384
	if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) &&
	    !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC))
4385
		return -EADDRNOTAVAIL;
4386 4387

	ether_addr_copy(card->dev->dev_addr, adp_cmd->data.change_addr.addr);
F
Frank Blaschka 已提交
4388 4389 4390 4391 4392 4393 4394 4395 4396
	return 0;
}

int qeth_setadpparms_change_macaddr(struct qeth_card *card)
{
	int rc;
	struct qeth_cmd_buffer *iob;
	struct qeth_ipa_cmd *cmd;

C
Carsten Otte 已提交
4397
	QETH_CARD_TEXT(card, 4, "chgmac");
F
Frank Blaschka 已提交
4398 4399

	iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
4400
				   SETADP_DATA_SIZEOF(change_addr));
4401 4402
	if (!iob)
		return -ENOMEM;
4403
	cmd = __ipa_cmd(iob);
F
Frank Blaschka 已提交
4404
	cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
4405 4406 4407
	cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN;
	ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr,
			card->dev->dev_addr);
F
Frank Blaschka 已提交
4408 4409 4410 4411 4412 4413
	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
			       NULL);
	return rc;
}
EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);

E
Einar Lueck 已提交
4414 4415 4416
static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
		struct qeth_reply *reply, unsigned long data)
{
4417
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
E
Einar Lueck 已提交
4418 4419
	struct qeth_set_access_ctrl *access_ctrl_req;

C
Carsten Otte 已提交
4420
	QETH_CARD_TEXT(card, 4, "setaccb");
E
Einar Lueck 已提交
4421 4422

	access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4423 4424
	QETH_CARD_TEXT_(card, 2, "rc=%d",
			cmd->data.setadapterparms.hdr.return_code);
S
Stefan Raspl 已提交
4425 4426
	if (cmd->data.setadapterparms.hdr.return_code !=
						SET_ACCESS_CTRL_RC_SUCCESS)
4427 4428 4429
		QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
				 access_ctrl_req->subcmd_code, CARD_DEVID(card),
				 cmd->data.setadapterparms.hdr.return_code);
4430
	switch (qeth_setadpparms_inspect_rc(cmd)) {
E
Einar Lueck 已提交
4431
	case SET_ACCESS_CTRL_RC_SUCCESS:
4432
		if (access_ctrl_req->subcmd_code == ISOLATION_MODE_NONE)
E
Einar Lueck 已提交
4433 4434
			dev_info(&card->gdev->dev,
			    "QDIO data connection isolation is deactivated\n");
4435
		else
E
Einar Lueck 已提交
4436 4437
			dev_info(&card->gdev->dev,
			    "QDIO data connection isolation is activated\n");
4438
		return 0;
S
Stefan Raspl 已提交
4439
	case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
4440 4441
		QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
				 CARD_DEVID(card));
4442
		return 0;
S
Stefan Raspl 已提交
4443
	case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
4444 4445
		QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
				 CARD_DEVID(card));
4446
		return 0;
E
Einar Lueck 已提交
4447 4448 4449
	case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
		dev_err(&card->gdev->dev, "Adapter does not "
			"support QDIO data connection isolation\n");
4450
		return -EOPNOTSUPP;
E
Einar Lueck 已提交
4451 4452 4453 4454
	case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
		dev_err(&card->gdev->dev,
			"Adapter is dedicated. "
			"QDIO data connection isolation not supported\n");
4455
		return -EOPNOTSUPP;
E
Einar Lueck 已提交
4456 4457 4458
	case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
		dev_err(&card->gdev->dev,
			"TSO does not permit QDIO data connection isolation\n");
4459
		return -EPERM;
S
Stefan Raspl 已提交
4460 4461 4462
	case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED:
		dev_err(&card->gdev->dev, "The adjacent switch port does not "
			"support reflective relay mode\n");
4463
		return -EOPNOTSUPP;
S
Stefan Raspl 已提交
4464 4465 4466
	case SET_ACCESS_CTRL_RC_REFLREL_FAILED:
		dev_err(&card->gdev->dev, "The reflective relay mode cannot be "
					"enabled at the adjacent switch port");
4467
		return -EREMOTEIO;
S
Stefan Raspl 已提交
4468 4469 4470
	case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED:
		dev_warn(&card->gdev->dev, "Turning off reflective relay mode "
					"at the adjacent switch failed\n");
4471 4472
		/* benign error while disabling ISOLATION_MODE_FWD */
		return 0;
E
Einar Lueck 已提交
4473
	default:
4474
		return -EIO;
E
Einar Lueck 已提交
4475 4476 4477
	}
}

4478 4479
int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
				     enum qeth_ipa_isolation_modes mode)
E
Einar Lueck 已提交
4480 4481 4482 4483 4484 4485
{
	int rc;
	struct qeth_cmd_buffer *iob;
	struct qeth_ipa_cmd *cmd;
	struct qeth_set_access_ctrl *access_ctrl_req;

C
Carsten Otte 已提交
4486
	QETH_CARD_TEXT(card, 4, "setacctl");
E
Einar Lueck 已提交
4487

4488 4489 4490 4491 4492 4493
	if (!qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
		dev_err(&card->gdev->dev,
			"Adapter does not support QDIO data connection isolation\n");
		return -EOPNOTSUPP;
	}

E
Einar Lueck 已提交
4494
	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
4495
				   SETADP_DATA_SIZEOF(set_access_ctrl));
4496 4497
	if (!iob)
		return -ENOMEM;
4498
	cmd = __ipa_cmd(iob);
E
Einar Lueck 已提交
4499
	access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4500
	access_ctrl_req->subcmd_code = mode;
E
Einar Lueck 已提交
4501 4502

	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
4503
			       NULL);
4504
	if (rc) {
4505
		QETH_CARD_TEXT_(card, 2, "rc=%d", rc);
4506 4507
		QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
				 rc, CARD_DEVID(card));
E
Einar Lueck 已提交
4508
	}
4509

E
Einar Lueck 已提交
4510 4511 4512
	return rc;
}

4513
void qeth_tx_timeout(struct net_device *dev, unsigned int txqueue)
F
Frank Blaschka 已提交
4514 4515 4516
{
	struct qeth_card *card;

4517
	card = dev->ml_priv;
C
Carsten Otte 已提交
4518
	QETH_CARD_TEXT(card, 4, "txtimeo");
F
Frank Blaschka 已提交
4519 4520 4521 4522
	qeth_schedule_recovery(card);
}
EXPORT_SYMBOL_GPL(qeth_tx_timeout);

4523
static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
F
Frank Blaschka 已提交
4524
{
4525
	struct qeth_card *card = dev->ml_priv;
F
Frank Blaschka 已提交
4526 4527 4528 4529 4530 4531
	int rc = 0;

	switch (regnum) {
	case MII_BMCR: /* Basic mode control register */
		rc = BMCR_FULLDPLX;
		if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
4532 4533
		    (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) &&
		    (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH))
F
Frank Blaschka 已提交
4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564
			rc |= BMCR_SPEED100;
		break;
	case MII_BMSR: /* Basic mode status register */
		rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
		     BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
		     BMSR_100BASE4;
		break;
	case MII_PHYSID1: /* PHYS ID 1 */
		rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
		     dev->dev_addr[2];
		rc = (rc >> 5) & 0xFFFF;
		break;
	case MII_PHYSID2: /* PHYS ID 2 */
		rc = (dev->dev_addr[2] << 10) & 0xFFFF;
		break;
	case MII_ADVERTISE: /* Advertisement control reg */
		rc = ADVERTISE_ALL;
		break;
	case MII_LPA: /* Link partner ability reg */
		rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
		     LPA_100BASE4 | LPA_LPACK;
		break;
	case MII_EXPANSION: /* Expansion register */
		break;
	case MII_DCOUNTER: /* disconnect counter */
		break;
	case MII_FCSCOUNTER: /* false carrier counter */
		break;
	case MII_NWAYTEST: /* N-way auto-neg test register */
		break;
	case MII_RERRCOUNTER: /* rx error counter */
4565 4566 4567
		rc = card->stats.rx_length_errors +
		     card->stats.rx_frame_errors +
		     card->stats.rx_fifo_errors;
F
Frank Blaschka 已提交
4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589
		break;
	case MII_SREVISION: /* silicon revision */
		break;
	case MII_RESV1: /* reserved 1 */
		break;
	case MII_LBRERROR: /* loopback, rx, bypass error */
		break;
	case MII_PHYADDR: /* physical address */
		break;
	case MII_RESV2: /* reserved 2 */
		break;
	case MII_TPISTATUS: /* TPI status for 10mbps */
		break;
	case MII_NCONFIG: /* network interface config */
		break;
	default:
		break;
	}
	return rc;
}

static int qeth_snmp_command_cb(struct qeth_card *card,
4590
				struct qeth_reply *reply, unsigned long data)
F
Frank Blaschka 已提交
4591
{
4592 4593 4594 4595
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
	struct qeth_arp_query_info *qinfo = reply->param;
	struct qeth_ipacmd_setadpparms *adp_cmd;
	unsigned int data_len;
4596
	void *snmp_data;
F
Frank Blaschka 已提交
4597

C
Carsten Otte 已提交
4598
	QETH_CARD_TEXT(card, 3, "snpcmdcb");
F
Frank Blaschka 已提交
4599 4600

	if (cmd->hdr.return_code) {
4601
		QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
4602
		return -EIO;
F
Frank Blaschka 已提交
4603 4604 4605 4606
	}
	if (cmd->data.setadapterparms.hdr.return_code) {
		cmd->hdr.return_code =
			cmd->data.setadapterparms.hdr.return_code;
4607
		QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code);
4608
		return -EIO;
F
Frank Blaschka 已提交
4609
	}
4610 4611 4612 4613 4614

	adp_cmd = &cmd->data.setadapterparms;
	data_len = adp_cmd->hdr.cmdlength - sizeof(adp_cmd->hdr);
	if (adp_cmd->hdr.seq_no == 1) {
		snmp_data = &adp_cmd->data.snmp;
4615
	} else {
4616 4617
		snmp_data = &adp_cmd->data.snmp.request;
		data_len -= offsetof(struct qeth_snmp_cmd, request);
4618
	}
F
Frank Blaschka 已提交
4619 4620 4621

	/* check if there is enough room in userspace */
	if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
4622 4623
		QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC);
		return -ENOSPC;
F
Frank Blaschka 已提交
4624
	}
C
Carsten Otte 已提交
4625
	QETH_CARD_TEXT_(card, 4, "snore%i",
4626
			cmd->data.setadapterparms.hdr.used_total);
C
Carsten Otte 已提交
4627
	QETH_CARD_TEXT_(card, 4, "sseqn%i",
4628
			cmd->data.setadapterparms.hdr.seq_no);
F
Frank Blaschka 已提交
4629
	/*copy entries to user buffer*/
4630
	memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len);
F
Frank Blaschka 已提交
4631
	qinfo->udata_offset += data_len;
4632

F
Frank Blaschka 已提交
4633 4634 4635 4636 4637 4638
	if (cmd->data.setadapterparms.hdr.seq_no <
	    cmd->data.setadapterparms.hdr.used_total)
		return 1;
	return 0;
}

4639
static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
F
Frank Blaschka 已提交
4640
{
4641
	struct qeth_snmp_ureq __user *ureq;
F
Frank Blaschka 已提交
4642
	struct qeth_cmd_buffer *iob;
4643
	unsigned int req_len;
F
Frank Blaschka 已提交
4644 4645 4646
	struct qeth_arp_query_info qinfo = {0, };
	int rc = 0;

C
Carsten Otte 已提交
4647
	QETH_CARD_TEXT(card, 3, "snmpcmd");
F
Frank Blaschka 已提交
4648

4649
	if (IS_VM_NIC(card))
F
Frank Blaschka 已提交
4650 4651 4652
		return -EOPNOTSUPP;

	if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
4653
	    IS_LAYER3(card))
F
Frank Blaschka 已提交
4654
		return -EOPNOTSUPP;
4655

4656 4657 4658 4659 4660
	ureq = (struct qeth_snmp_ureq __user *) udata;
	if (get_user(qinfo.udata_len, &ureq->hdr.data_len) ||
	    get_user(req_len, &ureq->hdr.req_len))
		return -EFAULT;

4661 4662 4663 4664
	/* Sanitize user input, to avoid overflows in iob size calculation: */
	if (req_len > QETH_BUFSIZE)
		return -EINVAL;

4665 4666 4667 4668 4669 4670 4671
	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len);
	if (!iob)
		return -ENOMEM;

	if (copy_from_user(&__ipa_cmd(iob)->data.setadapterparms.data.snmp,
			   &ureq->cmd, req_len)) {
		qeth_put_cmd(iob);
F
Frank Blaschka 已提交
4672
		return -EFAULT;
4673 4674
	}

F
Frank Blaschka 已提交
4675 4676
	qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
	if (!qinfo.udata) {
4677
		qeth_put_cmd(iob);
F
Frank Blaschka 已提交
4678 4679 4680 4681
		return -ENOMEM;
	}
	qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);

4682
	rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo);
F
Frank Blaschka 已提交
4683
	if (rc)
4684 4685
		QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
				 CARD_DEVID(card), rc);
F
Frank Blaschka 已提交
4686 4687 4688 4689
	else {
		if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
			rc = -EFAULT;
	}
4690

F
Frank Blaschka 已提交
4691 4692 4693 4694
	kfree(qinfo.udata);
	return rc;
}

4695
static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
J
Julian Wiedmann 已提交
4696 4697
					 struct qeth_reply *reply,
					 unsigned long data)
4698
{
4699
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
J
Julian Wiedmann 已提交
4700
	struct qeth_qoat_priv *priv = reply->param;
4701 4702 4703
	int resdatalen;

	QETH_CARD_TEXT(card, 3, "qoatcb");
4704
	if (qeth_setadpparms_inspect_rc(cmd))
4705
		return -EIO;
4706 4707 4708

	resdatalen = cmd->data.setadapterparms.hdr.cmdlength;

4709 4710
	if (resdatalen > (priv->buffer_len - priv->response_len))
		return -ENOSPC;
4711

4712 4713
	memcpy(priv->buffer + priv->response_len,
	       &cmd->data.setadapterparms.hdr, resdatalen);
4714 4715 4716 4717 4718 4719 4720 4721
	priv->response_len += resdatalen;

	if (cmd->data.setadapterparms.hdr.seq_no <
	    cmd->data.setadapterparms.hdr.used_total)
		return 1;
	return 0;
}

4722
static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733
{
	int rc = 0;
	struct qeth_cmd_buffer *iob;
	struct qeth_ipa_cmd *cmd;
	struct qeth_query_oat *oat_req;
	struct qeth_query_oat_data oat_data;
	struct qeth_qoat_priv priv;
	void __user *tmp;

	QETH_CARD_TEXT(card, 3, "qoatcmd");

J
Julian Wiedmann 已提交
4734 4735
	if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT))
		return -EOPNOTSUPP;
4736

J
Julian Wiedmann 已提交
4737 4738
	if (copy_from_user(&oat_data, udata, sizeof(oat_data)))
		return -EFAULT;
4739 4740 4741

	priv.buffer_len = oat_data.buffer_len;
	priv.response_len = 0;
4742
	priv.buffer = vzalloc(oat_data.buffer_len);
J
Julian Wiedmann 已提交
4743 4744
	if (!priv.buffer)
		return -ENOMEM;
4745 4746

	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
4747
				   SETADP_DATA_SIZEOF(query_oat));
4748 4749 4750 4751
	if (!iob) {
		rc = -ENOMEM;
		goto out_free;
	}
4752
	cmd = __ipa_cmd(iob);
4753 4754 4755
	oat_req = &cmd->data.setadapterparms.data.query_oat;
	oat_req->subcmd_code = oat_data.command;

J
Julian Wiedmann 已提交
4756
	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, &priv);
4757
	if (!rc) {
4758 4759
		tmp = is_compat_task() ? compat_ptr(oat_data.ptr) :
					 u64_to_user_ptr(oat_data.ptr);
4760 4761
		oat_data.response_len = priv.response_len;

J
Julian Wiedmann 已提交
4762 4763
		if (copy_to_user(tmp, priv.buffer, priv.response_len) ||
		    copy_to_user(udata, &oat_data, sizeof(oat_data)))
4764
			rc = -EFAULT;
4765
	}
4766 4767

out_free:
4768
	vfree(priv.buffer);
4769 4770 4771
	return rc;
}

4772 4773
static int qeth_query_card_info_cb(struct qeth_card *card,
				   struct qeth_reply *reply, unsigned long data)
E
Eugene Crosser 已提交
4774
{
4775
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4776
	struct qeth_link_info *link_info = reply->param;
E
Eugene Crosser 已提交
4777 4778 4779
	struct qeth_query_card_info *card_info;

	QETH_CARD_TEXT(card, 2, "qcrdincb");
4780
	if (qeth_setadpparms_inspect_rc(cmd))
4781
		return -EIO;
E
Eugene Crosser 已提交
4782

4783
	card_info = &cmd->data.setadapterparms.data.card_info;
4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839
	netdev_dbg(card->dev,
		   "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n",
		   card_info->card_type, card_info->port_mode,
		   card_info->port_speed);

	switch (card_info->port_mode) {
	case CARD_INFO_PORTM_FULLDUPLEX:
		link_info->duplex = DUPLEX_FULL;
		break;
	case CARD_INFO_PORTM_HALFDUPLEX:
		link_info->duplex = DUPLEX_HALF;
		break;
	default:
		link_info->duplex = DUPLEX_UNKNOWN;
	}

	switch (card_info->card_type) {
	case CARD_INFO_TYPE_1G_COPPER_A:
	case CARD_INFO_TYPE_1G_COPPER_B:
		link_info->speed = SPEED_1000;
		link_info->port = PORT_TP;
		break;
	case CARD_INFO_TYPE_1G_FIBRE_A:
	case CARD_INFO_TYPE_1G_FIBRE_B:
		link_info->speed = SPEED_1000;
		link_info->port = PORT_FIBRE;
		break;
	case CARD_INFO_TYPE_10G_FIBRE_A:
	case CARD_INFO_TYPE_10G_FIBRE_B:
		link_info->speed = SPEED_10000;
		link_info->port = PORT_FIBRE;
		break;
	default:
		switch (card_info->port_speed) {
		case CARD_INFO_PORTS_10M:
			link_info->speed = SPEED_10;
			break;
		case CARD_INFO_PORTS_100M:
			link_info->speed = SPEED_100;
			break;
		case CARD_INFO_PORTS_1G:
			link_info->speed = SPEED_1000;
			break;
		case CARD_INFO_PORTS_10G:
			link_info->speed = SPEED_10000;
			break;
		case CARD_INFO_PORTS_25G:
			link_info->speed = SPEED_25000;
			break;
		default:
			link_info->speed = SPEED_UNKNOWN;
		}

		link_info->port = PORT_OTHER;
	}

E
Eugene Crosser 已提交
4840 4841 4842
	return 0;
}

4843
int qeth_query_card_info(struct qeth_card *card,
4844
			 struct qeth_link_info *link_info)
E
Eugene Crosser 已提交
4845 4846 4847 4848 4849 4850
{
	struct qeth_cmd_buffer *iob;

	QETH_CARD_TEXT(card, 2, "qcrdinfo");
	if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO))
		return -EOPNOTSUPP;
4851
	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 0);
4852 4853
	if (!iob)
		return -ENOMEM;
4854 4855

	return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, link_info);
E
Eugene Crosser 已提交
4856 4857
}

4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924
static int qeth_init_link_info_oat_cb(struct qeth_card *card,
				      struct qeth_reply *reply_priv,
				      unsigned long data)
{
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
	struct qeth_link_info *link_info = reply_priv->param;
	struct qeth_query_oat_physical_if *phys_if;
	struct qeth_query_oat_reply *reply;

	if (qeth_setadpparms_inspect_rc(cmd))
		return -EIO;

	/* Multi-part reply is unexpected, don't bother: */
	if (cmd->data.setadapterparms.hdr.used_total > 1)
		return -EINVAL;

	/* Expect the reply to start with phys_if data: */
	reply = &cmd->data.setadapterparms.data.query_oat.reply[0];
	if (reply->type != QETH_QOAT_REPLY_TYPE_PHYS_IF ||
	    reply->length < sizeof(*reply))
		return -EINVAL;

	phys_if = &reply->phys_if;

	switch (phys_if->speed_duplex) {
	case QETH_QOAT_PHYS_SPEED_10M_HALF:
		link_info->speed = SPEED_10;
		link_info->duplex = DUPLEX_HALF;
		break;
	case QETH_QOAT_PHYS_SPEED_10M_FULL:
		link_info->speed = SPEED_10;
		link_info->duplex = DUPLEX_FULL;
		break;
	case QETH_QOAT_PHYS_SPEED_100M_HALF:
		link_info->speed = SPEED_100;
		link_info->duplex = DUPLEX_HALF;
		break;
	case QETH_QOAT_PHYS_SPEED_100M_FULL:
		link_info->speed = SPEED_100;
		link_info->duplex = DUPLEX_FULL;
		break;
	case QETH_QOAT_PHYS_SPEED_1000M_HALF:
		link_info->speed = SPEED_1000;
		link_info->duplex = DUPLEX_HALF;
		break;
	case QETH_QOAT_PHYS_SPEED_1000M_FULL:
		link_info->speed = SPEED_1000;
		link_info->duplex = DUPLEX_FULL;
		break;
	case QETH_QOAT_PHYS_SPEED_10G_FULL:
		link_info->speed = SPEED_10000;
		link_info->duplex = DUPLEX_FULL;
		break;
	case QETH_QOAT_PHYS_SPEED_25G_FULL:
		link_info->speed = SPEED_25000;
		link_info->duplex = DUPLEX_FULL;
		break;
	case QETH_QOAT_PHYS_SPEED_UNKNOWN:
	default:
		link_info->speed = SPEED_UNKNOWN;
		link_info->duplex = DUPLEX_UNKNOWN;
		break;
	}

	switch (phys_if->media_type) {
	case QETH_QOAT_PHYS_MEDIA_COPPER:
		link_info->port = PORT_TP;
4925
		link_info->link_mode = QETH_LINK_MODE_UNKNOWN;
4926 4927
		break;
	case QETH_QOAT_PHYS_MEDIA_FIBRE_SHORT:
4928 4929 4930
		link_info->port = PORT_FIBRE;
		link_info->link_mode = QETH_LINK_MODE_FIBRE_SHORT;
		break;
4931 4932
	case QETH_QOAT_PHYS_MEDIA_FIBRE_LONG:
		link_info->port = PORT_FIBRE;
4933
		link_info->link_mode = QETH_LINK_MODE_FIBRE_LONG;
4934 4935 4936
		break;
	default:
		link_info->port = PORT_OTHER;
4937
		link_info->link_mode = QETH_LINK_MODE_UNKNOWN;
4938 4939 4940 4941 4942 4943
		break;
	}

	return 0;
}

4944 4945 4946 4947 4948 4949 4950
static void qeth_init_link_info(struct qeth_card *card)
{
	card->info.link_info.duplex = DUPLEX_FULL;

	if (IS_IQD(card) || IS_VM_NIC(card)) {
		card->info.link_info.speed = SPEED_10000;
		card->info.link_info.port = PORT_FIBRE;
4951
		card->info.link_info.link_mode = QETH_LINK_MODE_FIBRE_SHORT;
4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977
	} else {
		switch (card->info.link_type) {
		case QETH_LINK_TYPE_FAST_ETH:
		case QETH_LINK_TYPE_LANE_ETH100:
			card->info.link_info.speed = SPEED_100;
			card->info.link_info.port = PORT_TP;
			break;
		case QETH_LINK_TYPE_GBIT_ETH:
		case QETH_LINK_TYPE_LANE_ETH1000:
			card->info.link_info.speed = SPEED_1000;
			card->info.link_info.port = PORT_FIBRE;
			break;
		case QETH_LINK_TYPE_10GBIT_ETH:
			card->info.link_info.speed = SPEED_10000;
			card->info.link_info.port = PORT_FIBRE;
			break;
		case QETH_LINK_TYPE_25GBIT_ETH:
			card->info.link_info.speed = SPEED_25000;
			card->info.link_info.port = PORT_FIBRE;
			break;
		default:
			dev_info(&card->gdev->dev, "Unknown link type %x\n",
				 card->info.link_type);
			card->info.link_info.speed = SPEED_UNKNOWN;
			card->info.link_info.port = PORT_OTHER;
		}
4978 4979

		card->info.link_info.link_mode = QETH_LINK_MODE_UNKNOWN;
4980
	}
4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004

	/* Get more accurate data via QUERY OAT: */
	if (qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) {
		struct qeth_link_info link_info;
		struct qeth_cmd_buffer *iob;

		iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
					   SETADP_DATA_SIZEOF(query_oat));
		if (iob) {
			struct qeth_ipa_cmd *cmd = __ipa_cmd(iob);
			struct qeth_query_oat *oat_req;

			oat_req = &cmd->data.setadapterparms.data.query_oat;
			oat_req->subcmd_code = QETH_QOAT_SCOPE_INTERFACE;

			if (!qeth_send_ipa_cmd(card, iob,
					       qeth_init_link_info_oat_cb,
					       &link_info)) {
				if (link_info.speed != SPEED_UNKNOWN)
					card->info.link_info.speed = link_info.speed;
				if (link_info.duplex != DUPLEX_UNKNOWN)
					card->info.link_info.duplex = link_info.duplex;
				if (link_info.port != PORT_OTHER)
					card->info.link_info.port = link_info.port;
5005 5006
				if (link_info.link_mode != QETH_LINK_MODE_UNKNOWN)
					card->info.link_info.link_mode = link_info.link_mode;
5007 5008 5009
			}
		}
	}
5010 5011
}

5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025
/**
 * qeth_vm_request_mac() - Request a hypervisor-managed MAC address
 * @card: pointer to a qeth_card
 *
 * Returns
 *	0, if a MAC address has been set for the card's netdevice
 *	a return code, for various error conditions
 */
int qeth_vm_request_mac(struct qeth_card *card)
{
	struct diag26c_mac_resp *response;
	struct diag26c_mac_req *request;
	int rc;

5026
	QETH_CARD_TEXT(card, 2, "vmreqmac");
5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037

	request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
	response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
	if (!request || !response) {
		rc = -ENOMEM;
		goto out;
	}

	request->resp_buf_len = sizeof(*response);
	request->resp_version = DIAG26C_VERSION2;
	request->op_code = DIAG26C_GET_MAC;
5038
	request->devno = card->info.ddev_devno;
5039

5040
	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
5041
	rc = diag26c(request, response, DIAG26C_MAC_SERVICES);
5042
	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
5043 5044
	if (rc)
		goto out;
5045
	QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
5046 5047 5048 5049

	if (request->resp_buf_len < sizeof(*response) ||
	    response->version != request->resp_version) {
		rc = -EIO;
5050 5051 5052
		QETH_CARD_TEXT(card, 2, "badresp");
		QETH_CARD_HEX(card, 2, &request->resp_buf_len,
			      sizeof(request->resp_buf_len));
5053 5054
	} else if (!is_valid_ether_addr(response->mac)) {
		rc = -EINVAL;
5055 5056
		QETH_CARD_TEXT(card, 2, "badmac");
		QETH_CARD_HEX(card, 2, response->mac, ETH_ALEN);
5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067
	} else {
		ether_addr_copy(card->dev->dev_addr, response->mac);
	}

out:
	kfree(response);
	kfree(request);
	return rc;
}
EXPORT_SYMBOL_GPL(qeth_vm_request_mac);

5068 5069
static void qeth_determine_capabilities(struct qeth_card *card)
{
5070 5071
	struct qeth_channel *channel = &card->data;
	struct ccw_device *ddev = channel->ccwdev;
5072 5073 5074
	int rc;
	int ddev_offline = 0;

5075
	QETH_CARD_TEXT(card, 2, "detcapab");
5076 5077
	if (!ddev->online) {
		ddev_offline = 1;
5078
		rc = qeth_start_channel(channel);
5079
		if (rc) {
5080
			QETH_CARD_TEXT_(card, 2, "3err%d", rc);
5081 5082 5083 5084
			goto out;
		}
	}

5085
	rc = qeth_read_conf_data(card);
5086
	if (rc) {
5087 5088
		QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
				 CARD_DEVID(card), rc);
5089
		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
5090 5091 5092 5093 5094
		goto out_offline;
	}

	rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
	if (rc)
5095
		QETH_CARD_TEXT_(card, 2, "6err%d", rc);
5096

5097 5098 5099 5100 5101
	QETH_CARD_TEXT_(card, 2, "qfmt%d", card->ssqd.qfmt);
	QETH_CARD_TEXT_(card, 2, "ac1:%02x", card->ssqd.qdioac1);
	QETH_CARD_TEXT_(card, 2, "ac2:%04x", card->ssqd.qdioac2);
	QETH_CARD_TEXT_(card, 2, "ac3:%04x", card->ssqd.qdioac3);
	QETH_CARD_TEXT_(card, 2, "icnt%d", card->ssqd.icnt);
5102 5103 5104 5105 5106 5107 5108 5109 5110
	if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) ||
	    ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) ||
	    ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) {
		dev_info(&card->gdev->dev,
			"Completion Queueing supported\n");
	} else {
		card->options.cq = QETH_CQ_NOTAVAILABLE;
	}

5111 5112
out_offline:
	if (ddev_offline == 1)
5113
		qeth_stop_channel(channel);
5114 5115 5116 5117
out:
	return;
}

5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144
static void qeth_read_ccw_conf_data(struct qeth_card *card)
{
	struct qeth_card_info *info = &card->info;
	struct ccw_device *cdev = CARD_DDEV(card);
	struct ccw_dev_id dev_id;

	QETH_CARD_TEXT(card, 2, "ccwconfd");
	ccw_device_get_id(cdev, &dev_id);

	info->ddev_devno = dev_id.devno;
	info->ids_valid = !ccw_device_get_cssid(cdev, &info->cssid) &&
			  !ccw_device_get_iid(cdev, &info->iid) &&
			  !ccw_device_get_chid(cdev, 0, &info->chid);
	info->ssid = dev_id.ssid;

	dev_info(&card->gdev->dev, "CHID: %x CHPID: %x\n",
		 info->chid, info->chpid);

	QETH_CARD_TEXT_(card, 3, "devn%x", info->ddev_devno);
	QETH_CARD_TEXT_(card, 3, "cssid:%x", info->cssid);
	QETH_CARD_TEXT_(card, 3, "iid:%x", info->iid);
	QETH_CARD_TEXT_(card, 3, "ssid:%x", info->ssid);
	QETH_CARD_TEXT_(card, 3, "chpid:%x", info->chpid);
	QETH_CARD_TEXT_(card, 3, "chid:%x", info->chid);
	QETH_CARD_TEXT_(card, 3, "idval%x", info->ids_valid);
}

F
Frank Blaschka 已提交
5145 5146
static int qeth_qdio_establish(struct qeth_card *card)
{
5147 5148
	struct qdio_buffer **out_sbal_ptrs[QETH_MAX_OUT_QUEUES];
	struct qdio_buffer **in_sbal_ptrs[QETH_MAX_IN_QUEUES];
5149
	struct qeth_qib_parms *qib_parms = NULL;
F
Frank Blaschka 已提交
5150
	struct qdio_initialize init_data;
5151
	unsigned int i;
F
Frank Blaschka 已提交
5152 5153
	int rc = 0;

5154
	QETH_CARD_TEXT(card, 2, "qdioest");
F
Frank Blaschka 已提交
5155

5156 5157 5158 5159
	if (!IS_IQD(card) && !IS_VM_NIC(card)) {
		qib_parms = kzalloc(sizeof_field(struct qib, parm), GFP_KERNEL);
		if (!qib_parms)
			return -ENOMEM;
F
Frank Blaschka 已提交
5160

5161 5162
		qeth_fill_qib_parms(card, qib_parms);
	}
F
Frank Blaschka 已提交
5163

5164 5165 5166
	in_sbal_ptrs[0] = card->qdio.in_q->qdio_bufs;
	if (card->options.cq == QETH_CQ_ENABLED)
		in_sbal_ptrs[1] = card->qdio.c_q->qdio_bufs;
5167

5168 5169
	for (i = 0; i < card->qdio.no_out_queues; i++)
		out_sbal_ptrs[i] = card->qdio.out_qs[i]->qdio_bufs;
F
Frank Blaschka 已提交
5170 5171

	memset(&init_data, 0, sizeof(struct qdio_initialize));
5172 5173
	init_data.q_format		 = IS_IQD(card) ? QDIO_IQDIO_QFMT :
							  QDIO_QETH_QFMT;
F
Frank Blaschka 已提交
5174
	init_data.qib_param_field_format = 0;
5175
	init_data.qib_param_field	 = (void *)qib_parms;
5176
	init_data.no_input_qs            = card->qdio.no_in_queues;
F
Frank Blaschka 已提交
5177
	init_data.no_output_qs           = card->qdio.no_out_queues;
5178 5179
	init_data.input_handler		 = qeth_qdio_input_handler;
	init_data.output_handler	 = qeth_qdio_output_handler;
5180
	init_data.irq_poll		 = qeth_qdio_poll;
F
Frank Blaschka 已提交
5181
	init_data.int_parm               = (unsigned long) card;
5182 5183
	init_data.input_sbal_addr_array  = in_sbal_ptrs;
	init_data.output_sbal_addr_array = out_sbal_ptrs;
F
Frank Blaschka 已提交
5184 5185 5186

	if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
		QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
5187 5188
		rc = qdio_allocate(CARD_DDEV(card), init_data.no_input_qs,
				   init_data.no_output_qs);
J
Jan Glauber 已提交
5189 5190 5191 5192
		if (rc) {
			atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
			goto out;
		}
5193
		rc = qdio_establish(CARD_DDEV(card), &init_data);
J
Jan Glauber 已提交
5194
		if (rc) {
F
Frank Blaschka 已提交
5195
			atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
J
Jan Glauber 已提交
5196 5197
			qdio_free(CARD_DDEV(card));
		}
F
Frank Blaschka 已提交
5198
	}
5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209

	switch (card->options.cq) {
	case QETH_CQ_ENABLED:
		dev_info(&card->gdev->dev, "Completion Queue support enabled");
		break;
	case QETH_CQ_DISABLED:
		dev_info(&card->gdev->dev, "Completion Queue support disabled");
		break;
	default:
		break;
	}
5210

J
Jan Glauber 已提交
5211
out:
5212
	kfree(qib_parms);
F
Frank Blaschka 已提交
5213 5214 5215 5216 5217
	return rc;
}

static void qeth_core_free_card(struct qeth_card *card)
{
5218
	QETH_CARD_TEXT(card, 2, "freecrd");
5219 5220 5221

	unregister_service_level(&card->qeth_service_level);
	debugfs_remove_recursive(card->debugfs);
5222
	qeth_put_cmd(card->read_cmd);
5223
	destroy_workqueue(card->event_wq);
5224
	dev_set_drvdata(&card->gdev->dev, NULL);
F
Frank Blaschka 已提交
5225 5226 5227
	kfree(card);
}

5228
static void qeth_trace_features(struct qeth_card *card)
5229 5230
{
	QETH_CARD_TEXT(card, 2, "features");
5231 5232 5233 5234 5235
	QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4));
	QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6));
	QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp));
	QETH_CARD_HEX(card, 2, &card->info.diagass_support,
		      sizeof(card->info.diagass_support));
5236 5237
}

F
Frank Blaschka 已提交
5238
static struct ccw_device_id qeth_ids[] = {
5239 5240 5241 5242 5243 5244
	{CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
					.driver_info = QETH_CARD_TYPE_OSD},
	{CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
					.driver_info = QETH_CARD_TYPE_IQD},
	{CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
					.driver_info = QETH_CARD_TYPE_OSM},
5245
#ifdef CONFIG_QETH_OSX
5246 5247
	{CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
					.driver_info = QETH_CARD_TYPE_OSX},
5248
#endif
F
Frank Blaschka 已提交
5249 5250 5251 5252 5253
	{},
};
MODULE_DEVICE_TABLE(ccw, qeth_ids);

static struct ccw_driver qeth_ccw_driver = {
5254
	.driver = {
S
Sebastian Ott 已提交
5255
		.owner = THIS_MODULE,
5256 5257
		.name = "qeth",
	},
F
Frank Blaschka 已提交
5258 5259 5260 5261 5262
	.ids = qeth_ids,
	.probe = ccwgroup_probe_ccwdev,
	.remove = ccwgroup_remove_ccwdev,
};

5263
static int qeth_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
F
Frank Blaschka 已提交
5264
{
5265
	int retries = 3;
F
Frank Blaschka 已提交
5266 5267
	int rc;

5268
	QETH_CARD_TEXT(card, 2, "hrdsetup");
F
Frank Blaschka 已提交
5269
	atomic_set(&card->force_alloc_skb, 0);
5270 5271 5272
	rc = qeth_update_from_chp_desc(card);
	if (rc)
		return rc;
F
Frank Blaschka 已提交
5273
retry:
5274
	if (retries < 3)
5275 5276
		QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
				 CARD_DEVID(card));
5277
	rc = qeth_qdio_clear_card(card, !IS_IQD(card));
5278 5279 5280
	qeth_stop_channel(&card->data);
	qeth_stop_channel(&card->write);
	qeth_stop_channel(&card->read);
5281
	qdio_free(CARD_DDEV(card));
5282 5283

	rc = qeth_start_channel(&card->read);
5284 5285
	if (rc)
		goto retriable;
5286
	rc = qeth_start_channel(&card->write);
5287 5288
	if (rc)
		goto retriable;
5289
	rc = qeth_start_channel(&card->data);
5290 5291 5292
	if (rc)
		goto retriable;
retriable:
F
Frank Blaschka 已提交
5293
	if (rc == -ERESTARTSYS) {
5294
		QETH_CARD_TEXT(card, 2, "break1");
F
Frank Blaschka 已提交
5295 5296
		return rc;
	} else if (rc) {
5297
		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
5298
		if (--retries < 0)
F
Frank Blaschka 已提交
5299 5300 5301 5302
			goto out;
		else
			goto retry;
	}
5303

5304
	qeth_determine_capabilities(card);
5305
	qeth_read_ccw_conf_data(card);
5306
	qeth_idx_init(card);
5307 5308 5309

	rc = qeth_idx_activate_read_channel(card);
	if (rc == -EINTR) {
5310
		QETH_CARD_TEXT(card, 2, "break2");
F
Frank Blaschka 已提交
5311 5312
		return rc;
	} else if (rc) {
5313
		QETH_CARD_TEXT_(card, 2, "3err%d", rc);
F
Frank Blaschka 已提交
5314 5315 5316 5317 5318
		if (--retries < 0)
			goto out;
		else
			goto retry;
	}
5319 5320 5321

	rc = qeth_idx_activate_write_channel(card);
	if (rc == -EINTR) {
5322
		QETH_CARD_TEXT(card, 2, "break3");
F
Frank Blaschka 已提交
5323 5324
		return rc;
	} else if (rc) {
5325
		QETH_CARD_TEXT_(card, 2, "4err%d", rc);
F
Frank Blaschka 已提交
5326 5327 5328 5329 5330
		if (--retries < 0)
			goto out;
		else
			goto retry;
	}
5331
	card->read_or_write_problem = 0;
F
Frank Blaschka 已提交
5332 5333
	rc = qeth_mpc_initialize(card);
	if (rc) {
5334
		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
F
Frank Blaschka 已提交
5335 5336
		goto out;
	}
5337

5338 5339
	rc = qeth_send_startlan(card);
	if (rc) {
5340
		QETH_CARD_TEXT_(card, 2, "6err%d", rc);
5341 5342
		if (rc == -ENETDOWN) {
			dev_warn(&card->gdev->dev, "The LAN is offline\n");
J
Julian Wiedmann 已提交
5343
			*carrier_ok = false;
5344 5345 5346
		} else {
			goto out;
		}
5347
	} else {
J
Julian Wiedmann 已提交
5348 5349 5350
		*carrier_ok = true;
	}

5351 5352 5353
	card->options.ipa4.supported = 0;
	card->options.ipa6.supported = 0;
	card->options.adp.supported = 0;
5354
	card->options.sbp.supported_funcs = 0;
5355
	card->info.diagass_support = 0;
5356 5357 5358
	rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
	if (rc == -ENOMEM)
		goto out;
5359 5360 5361 5362 5363
	if (qeth_is_supported(card, IPA_IPV6)) {
		rc = qeth_query_ipassists(card, QETH_PROT_IPV6);
		if (rc == -ENOMEM)
			goto out;
	}
5364 5365 5366
	if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
		rc = qeth_query_setadapterparms(card);
		if (rc < 0) {
5367
			QETH_CARD_TEXT_(card, 2, "7err%d", rc);
5368 5369 5370 5371 5372
			goto out;
		}
	}
	if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
		rc = qeth_query_setdiagass(card);
5373
		if (rc)
5374
			QETH_CARD_TEXT_(card, 2, "8err%d", rc);
5375
	}
5376

5377 5378
	qeth_trace_features(card);

5379 5380 5381 5382
	if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) ||
	    (card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM)))
		card->info.hwtrap = 0;

5383
	if (card->options.isolation != ISOLATION_MODE_NONE) {
5384 5385
		rc = qeth_setadpparms_set_access_ctrl(card,
						      card->options.isolation);
5386 5387 5388
		if (rc)
			goto out;
	}
5389

5390 5391
	qeth_init_link_info(card);

5392 5393 5394 5395 5396 5397
	rc = qeth_init_qdio_queues(card);
	if (rc) {
		QETH_CARD_TEXT_(card, 2, "9err%d", rc);
		goto out;
	}

F
Frank Blaschka 已提交
5398 5399
	return 0;
out:
5400 5401
	dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
		"an error on the device\n");
5402 5403
	QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n",
			 CARD_DEVID(card), rc);
F
Frank Blaschka 已提交
5404 5405 5406
	return rc;
}

5407 5408
static int qeth_set_online(struct qeth_card *card,
			   const struct qeth_discipline *disc)
5409
{
5410
	bool carrier_ok;
5411 5412 5413 5414 5415
	int rc;

	mutex_lock(&card->conf_mutex);
	QETH_CARD_TEXT(card, 2, "setonlin");

5416 5417 5418 5419 5420 5421 5422 5423 5424
	rc = qeth_hardsetup_card(card, &carrier_ok);
	if (rc) {
		QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
		rc = -ENODEV;
		goto err_hardsetup;
	}

	qeth_print_status_message(card);

5425
	if (card->dev->reg_state != NETREG_REGISTERED)
5426 5427 5428
		/* no need for locking / error handling at this early stage: */
		qeth_set_real_num_tx_queues(card, qeth_tx_actual_queues(card));

5429
	rc = disc->set_online(card, carrier_ok);
5430 5431 5432 5433 5434
	if (rc)
		goto err_online;

	/* let user_space know that device is online */
	kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
5435 5436

	mutex_unlock(&card->conf_mutex);
5437
	return 0;
5438

5439 5440
err_online:
err_hardsetup:
5441 5442 5443 5444
	qeth_qdio_clear_card(card, 0);
	qeth_clear_working_pool_list(card);
	qeth_flush_local_addrs(card);

5445 5446 5447 5448 5449 5450
	qeth_stop_channel(&card->data);
	qeth_stop_channel(&card->write);
	qeth_stop_channel(&card->read);
	qdio_free(CARD_DDEV(card));

	mutex_unlock(&card->conf_mutex);
5451 5452 5453
	return rc;
}

5454 5455
int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
		     bool resetting)
5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466
{
	int rc, rc2, rc3;

	mutex_lock(&card->conf_mutex);
	QETH_CARD_TEXT(card, 3, "setoffl");

	if ((!resetting && card->info.hwtrap) || card->info.hwtrap == 2) {
		qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
		card->info.hwtrap = 1;
	}

5467 5468 5469
	/* cancel any stalled cmd that might block the rtnl: */
	qeth_clear_ipacmd_list(card);

5470 5471 5472 5473 5474 5475 5476
	rtnl_lock();
	card->info.open_when_online = card->dev->flags & IFF_UP;
	dev_close(card->dev);
	netif_device_detach(card->dev);
	netif_carrier_off(card->dev);
	rtnl_unlock();

5477 5478
	cancel_work_sync(&card->rx_mode_work);

5479
	disc->set_offline(card);
5480

5481 5482 5483 5484 5485 5486
	qeth_qdio_clear_card(card, 0);
	qeth_drain_output_queues(card);
	qeth_clear_working_pool_list(card);
	qeth_flush_local_addrs(card);
	card->info.promisc_mode = 0;

5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505
	rc  = qeth_stop_channel(&card->data);
	rc2 = qeth_stop_channel(&card->write);
	rc3 = qeth_stop_channel(&card->read);
	if (!rc)
		rc = (rc2) ? rc2 : rc3;
	if (rc)
		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
	qdio_free(CARD_DDEV(card));

	/* let user_space know that device is offline */
	kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);

	mutex_unlock(&card->conf_mutex);
	return 0;
}
EXPORT_SYMBOL_GPL(qeth_set_offline);

static int qeth_do_reset(void *data)
{
5506
	const struct qeth_discipline *disc;
5507 5508 5509
	struct qeth_card *card = data;
	int rc;

5510 5511 5512
	/* Lock-free, other users will block until we are done. */
	disc = card->discipline;

5513 5514 5515 5516 5517 5518 5519
	QETH_CARD_TEXT(card, 2, "recover1");
	if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
		return 0;
	QETH_CARD_TEXT(card, 2, "recover2");
	dev_warn(&card->gdev->dev,
		 "A recovery process has been started for the device\n");

5520 5521
	qeth_set_offline(card, disc, true);
	rc = qeth_set_online(card, disc);
5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534
	if (!rc) {
		dev_info(&card->gdev->dev,
			 "Device successfully recovered!\n");
	} else {
		ccwgroup_set_offline(card->gdev);
		dev_warn(&card->gdev->dev,
			 "The qeth device driver failed to recover an error on the device\n");
	}
	qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
	qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
	return 0;
}

J
Julian Wiedmann 已提交
5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594
#if IS_ENABLED(CONFIG_QETH_L3)
static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
				struct qeth_hdr *hdr)
{
	struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data;
	struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
	struct net_device *dev = skb->dev;

	if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) {
		dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr,
				"FAKELL", skb->len);
		return;
	}

	if (!(l3_hdr->flags & QETH_HDR_PASSTHRU)) {
		u16 prot = (l3_hdr->flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
							     ETH_P_IP;
		unsigned char tg_addr[ETH_ALEN];

		skb_reset_network_header(skb);
		switch (l3_hdr->flags & QETH_HDR_CAST_MASK) {
		case QETH_CAST_MULTICAST:
			if (prot == ETH_P_IP)
				ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr);
			else
				ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr);
			QETH_CARD_STAT_INC(card, rx_multicast);
			break;
		case QETH_CAST_BROADCAST:
			ether_addr_copy(tg_addr, dev->broadcast);
			QETH_CARD_STAT_INC(card, rx_multicast);
			break;
		default:
			if (card->options.sniffer)
				skb->pkt_type = PACKET_OTHERHOST;
			ether_addr_copy(tg_addr, dev->dev_addr);
		}

		if (l3_hdr->ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
			dev_hard_header(skb, dev, prot, tg_addr,
					&l3_hdr->next_hop.rx.src_mac, skb->len);
		else
			dev_hard_header(skb, dev, prot, tg_addr, "FAKELL",
					skb->len);
	}

	/* copy VLAN tag from hdr into skb */
	if (!card->options.sniffer &&
	    (l3_hdr->ext_flags & (QETH_HDR_EXT_VLAN_FRAME |
				  QETH_HDR_EXT_INCLUDE_VLAN_TAG))) {
		u16 tag = (l3_hdr->ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
				l3_hdr->vlan_id :
				l3_hdr->next_hop.rx.vlan_id;

		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
	}
}
#endif

static void qeth_receive_skb(struct qeth_card *card, struct sk_buff *skb,
5595
			     struct qeth_hdr *hdr, bool uses_frags)
J
Julian Wiedmann 已提交
5596
{
5597
	struct napi_struct *napi = &card->napi;
J
Julian Wiedmann 已提交
5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611
	bool is_cso;

	switch (hdr->hdr.l2.id) {
#if IS_ENABLED(CONFIG_QETH_L3)
	case QETH_HEADER_TYPE_LAYER3:
		qeth_l3_rebuild_skb(card, skb, hdr);
		is_cso = hdr->hdr.l3.ext_flags & QETH_HDR_EXT_CSUM_TRANSP_REQ;
		break;
#endif
	case QETH_HEADER_TYPE_LAYER2:
		is_cso = hdr->hdr.l2.flags[1] & QETH_HDR_EXT_CSUM_TRANSP_REQ;
		break;
	default:
		/* never happens */
5612 5613 5614 5615
		if (uses_frags)
			napi_free_frags(napi);
		else
			dev_kfree_skb_any(skb);
J
Julian Wiedmann 已提交
5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633
		return;
	}

	if (is_cso && (card->dev->features & NETIF_F_RXCSUM)) {
		skb->ip_summed = CHECKSUM_UNNECESSARY;
		QETH_CARD_STAT_INC(card, rx_skb_csum);
	} else {
		skb->ip_summed = CHECKSUM_NONE;
	}

	QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
	QETH_CARD_STAT_INC(card, rx_packets);
	if (skb_is_nonlinear(skb)) {
		QETH_CARD_STAT_INC(card, rx_sg_skbs);
		QETH_CARD_STAT_ADD(card, rx_sg_frags,
				   skb_shinfo(skb)->nr_frags);
	}

5634 5635 5636 5637 5638 5639
	if (uses_frags) {
		napi_gro_frags(napi);
	} else {
		skb->protocol = eth_type_trans(skb, skb->dev);
		napi_gro_receive(napi, skb);
	}
J
Julian Wiedmann 已提交
5640 5641
}

5642
static void qeth_create_skb_frag(struct sk_buff *skb, char *data, int data_len)
F
Frank Blaschka 已提交
5643
{
5644
	struct page *page = virt_to_page(data);
5645
	unsigned int next_frag;
5646

5647
	next_frag = skb_shinfo(skb)->nr_frags;
5648
	get_page(page);
5649 5650
	skb_add_rx_frag(skb, next_frag, page, offset_in_page(data), data_len,
			data_len);
F
Frank Blaschka 已提交
5651 5652
}

5653 5654 5655 5656 5657
static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
{
	return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
}

J
Julian Wiedmann 已提交
5658
static int qeth_extract_skb(struct qeth_card *card,
5659
			    struct qeth_qdio_buffer *qethbuffer, u8 *element_no,
J
Julian Wiedmann 已提交
5660
			    int *__offset)
F
Frank Blaschka 已提交
5661
{
5662
	struct qeth_priv *priv = netdev_priv(card->dev);
5663
	struct qdio_buffer *buffer = qethbuffer->buffer;
5664
	struct napi_struct *napi = &card->napi;
5665
	struct qdio_buffer_element *element;
5666
	unsigned int linear_len = 0;
5667
	bool uses_frags = false;
F
Frank Blaschka 已提交
5668
	int offset = *__offset;
5669
	bool use_rx_sg = false;
5670
	unsigned int headroom;
J
Julian Wiedmann 已提交
5671
	struct qeth_hdr *hdr;
5672
	struct sk_buff *skb;
5673
	int skb_len = 0;
F
Frank Blaschka 已提交
5674

5675 5676
	element = &buffer->element[*element_no];

5677
next_packet:
F
Frank Blaschka 已提交
5678
	/* qeth_hdr must not cross element boundaries */
5679
	while (element->length < offset + sizeof(struct qeth_hdr)) {
F
Frank Blaschka 已提交
5680
		if (qeth_is_last_sbale(element))
J
Julian Wiedmann 已提交
5681
			return -ENODATA;
F
Frank Blaschka 已提交
5682 5683 5684 5685
		element++;
		offset = 0;
	}

5686
	hdr = phys_to_virt(element->addr) + offset;
J
Julian Wiedmann 已提交
5687
	offset += sizeof(*hdr);
5688 5689
	skb = NULL;

J
Julian Wiedmann 已提交
5690
	switch (hdr->hdr.l2.id) {
5691
	case QETH_HEADER_TYPE_LAYER2:
J
Julian Wiedmann 已提交
5692
		skb_len = hdr->hdr.l2.pkt_length;
5693
		linear_len = ETH_HLEN;
5694
		headroom = 0;
5695 5696
		break;
	case QETH_HEADER_TYPE_LAYER3:
J
Julian Wiedmann 已提交
5697
		skb_len = hdr->hdr.l3.length;
5698 5699 5700 5701 5702
		if (!IS_LAYER3(card)) {
			QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
			goto walk_packet;
		}

J
Julian Wiedmann 已提交
5703
		if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) {
5704 5705 5706 5707 5708
			linear_len = ETH_HLEN;
			headroom = 0;
			break;
		}

J
Julian Wiedmann 已提交
5709
		if (hdr->hdr.l3.flags & QETH_HDR_IPV6)
5710 5711 5712
			linear_len = sizeof(struct ipv6hdr);
		else
			linear_len = sizeof(struct iphdr);
5713
		headroom = ETH_HLEN;
5714 5715
		break;
	default:
J
Julian Wiedmann 已提交
5716
		if (hdr->hdr.l2.id & QETH_HEADER_MASK_INVAL)
5717 5718 5719 5720
			QETH_CARD_STAT_INC(card, rx_frame_errors);
		else
			QETH_CARD_STAT_INC(card, rx_dropped_notsupp);

5721
		/* Can't determine packet length, drop the whole buffer. */
J
Julian Wiedmann 已提交
5722
		return -EPROTONOSUPPORT;
F
Frank Blaschka 已提交
5723 5724
	}

5725 5726 5727 5728
	if (skb_len < linear_len) {
		QETH_CARD_STAT_INC(card, rx_dropped_runt);
		goto walk_packet;
	}
F
Frank Blaschka 已提交
5729

5730
	use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) ||
5731
		    (skb_len > READ_ONCE(priv->rx_copybreak) &&
J
Julian Wiedmann 已提交
5732
		     !atomic_read(&card->force_alloc_skb));
5733

5734
	if (use_rx_sg) {
5735
		/* QETH_CQ_ENABLED only: */
5736 5737
		if (qethbuffer->rx_skb &&
		    skb_tailroom(qethbuffer->rx_skb) >= linear_len + headroom) {
5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758
			skb = qethbuffer->rx_skb;
			qethbuffer->rx_skb = NULL;
			goto use_skb;
		}

		skb = napi_get_frags(napi);
		if (!skb) {
			/* -ENOMEM, no point in falling back further. */
			QETH_CARD_STAT_INC(card, rx_dropped_nomem);
			goto walk_packet;
		}

		if (skb_tailroom(skb) >= linear_len + headroom) {
			uses_frags = true;
			goto use_skb;
		}

		netdev_info_once(card->dev,
				 "Insufficient linear space in NAPI frags skb, need %u but have %u\n",
				 linear_len + headroom, skb_tailroom(skb));
		/* Shouldn't happen. Don't optimize, fall back to linear skb. */
F
Frank Blaschka 已提交
5759
	}
5760

5761 5762 5763
	linear_len = skb_len;
	skb = napi_alloc_skb(napi, linear_len + headroom);
	if (!skb) {
5764
		QETH_CARD_STAT_INC(card, rx_dropped_nomem);
5765 5766
		goto walk_packet;
	}
F
Frank Blaschka 已提交
5767

5768 5769 5770
use_skb:
	if (headroom)
		skb_reserve(skb, headroom);
5771
walk_packet:
F
Frank Blaschka 已提交
5772
	while (skb_len) {
5773
		int data_len = min(skb_len, (int)(element->length - offset));
5774
		char *data = phys_to_virt(element->addr) + offset;
5775 5776 5777

		skb_len -= data_len;
		offset += data_len;
5778

5779
		/* Extract data from current element: */
5780
		if (skb && data_len) {
5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794
			if (linear_len) {
				unsigned int copy_len;

				copy_len = min_t(unsigned int, linear_len,
						 data_len);

				skb_put_data(skb, data, copy_len);
				linear_len -= copy_len;
				data_len -= copy_len;
				data += copy_len;
			}

			if (data_len)
				qeth_create_skb_frag(skb, data, data_len);
F
Frank Blaschka 已提交
5795
		}
5796 5797

		/* Step forward to next element: */
F
Frank Blaschka 已提交
5798 5799
		if (skb_len) {
			if (qeth_is_last_sbale(element)) {
C
Carsten Otte 已提交
5800
				QETH_CARD_TEXT(card, 4, "unexeob");
C
Carsten Otte 已提交
5801
				QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
5802
				if (skb) {
5803 5804 5805 5806
					if (uses_frags)
						napi_free_frags(napi);
					else
						dev_kfree_skb_any(skb);
5807 5808 5809
					QETH_CARD_STAT_INC(card,
							   rx_length_errors);
				}
J
Julian Wiedmann 已提交
5810
				return -EMSGSIZE;
F
Frank Blaschka 已提交
5811 5812 5813 5814 5815
			}
			element++;
			offset = 0;
		}
	}
5816 5817 5818 5819 5820

	/* This packet was skipped, go get another one: */
	if (!skb)
		goto next_packet;

5821
	*element_no = element - &buffer->element[0];
F
Frank Blaschka 已提交
5822
	*__offset = offset;
J
Julian Wiedmann 已提交
5823

5824
	qeth_receive_skb(card, skb, hdr, uses_frags);
J
Julian Wiedmann 已提交
5825 5826 5827
	return 0;
}

5828 5829
static unsigned int qeth_extract_skbs(struct qeth_card *card, int budget,
				      struct qeth_qdio_buffer *buf, bool *done)
J
Julian Wiedmann 已提交
5830
{
5831
	unsigned int work_done = 0;
J
Julian Wiedmann 已提交
5832 5833

	while (budget) {
5834
		if (qeth_extract_skb(card, buf, &card->rx.buf_element,
J
Julian Wiedmann 已提交
5835 5836 5837 5838 5839 5840 5841
				     &card->rx.e_offset)) {
			*done = true;
			break;
		}

		work_done++;
		budget--;
F
Frank Blaschka 已提交
5842
	}
J
Julian Wiedmann 已提交
5843 5844

	return work_done;
F
Frank Blaschka 已提交
5845 5846
}

5847
static unsigned int qeth_rx_poll(struct qeth_card *card, int budget)
5848
{
5849
	struct qeth_rx *ctx = &card->rx;
5850
	unsigned int work_done = 0;
5851

5852
	while (budget > 0) {
5853 5854 5855 5856
		struct qeth_qdio_buffer *buffer;
		unsigned int skbs_done = 0;
		bool done = false;

5857
		/* Fetch completed RX buffers: */
5858 5859
		if (!card->rx.b_count) {
			card->rx.qdio_err = 0;
5860 5861 5862 5863
			card->rx.b_count = qdio_inspect_queue(CARD_DDEV(card),
							      0, true,
							      &card->rx.b_index,
							      &card->rx.qdio_err);
5864 5865 5866 5867 5868 5869
			if (card->rx.b_count <= 0) {
				card->rx.b_count = 0;
				break;
			}
		}

5870
		/* Process one completed RX buffer: */
5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885
		buffer = &card->qdio.in_q->bufs[card->rx.b_index];
		if (!(card->rx.qdio_err &&
		      qeth_check_qdio_errors(card, buffer->buffer,
					     card->rx.qdio_err, "qinerr")))
			skbs_done = qeth_extract_skbs(card, budget, buffer,
						      &done);
		else
			done = true;

		work_done += skbs_done;
		budget -= skbs_done;

		if (done) {
			QETH_CARD_STAT_INC(card, rx_bufs);
			qeth_put_buffer_pool_entry(card, buffer->pool_entry);
5886
			buffer->pool_entry = NULL;
5887
			card->rx.b_count--;
5888 5889 5890
			ctx->bufs_refill++;
			ctx->bufs_refill -= qeth_rx_refill_queue(card,
								 ctx->bufs_refill);
5891 5892 5893 5894 5895

			/* Step forward to next buffer: */
			card->rx.b_index = QDIO_BUFNR(card->rx.b_index + 1);
			card->rx.buf_element = 0;
			card->rx.e_offset = 0;
5896 5897 5898
		}
	}

5899 5900 5901
	return work_done;
}

5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919
static void qeth_cq_poll(struct qeth_card *card)
{
	unsigned int work_done = 0;

	while (work_done < QDIO_MAX_BUFFERS_PER_Q) {
		unsigned int start, error;
		int completed;

		completed = qdio_inspect_queue(CARD_DDEV(card), 1, true, &start,
					       &error);
		if (completed <= 0)
			return;

		qeth_qdio_cq_handler(card, error, 1, start, completed);
		work_done += completed;
	}
}

5920 5921 5922 5923 5924 5925 5926
int qeth_poll(struct napi_struct *napi, int budget)
{
	struct qeth_card *card = container_of(napi, struct qeth_card, napi);
	unsigned int work_done;

	work_done = qeth_rx_poll(card, budget);

5927 5928 5929 5930 5931 5932 5933 5934 5935 5936
	if (qeth_use_tx_irqs(card)) {
		struct qeth_qdio_out_q *queue;
		unsigned int i;

		qeth_for_each_output_queue(card, queue, i) {
			if (!qeth_out_queue_is_empty(queue))
				napi_schedule(&queue->napi);
		}
	}

5937 5938 5939
	if (card->options.cq == QETH_CQ_ENABLED)
		qeth_cq_poll(card);

5940 5941 5942 5943 5944 5945 5946 5947 5948 5949
	if (budget) {
		struct qeth_rx *ctx = &card->rx;

		/* Process any substantial refill backlog: */
		ctx->bufs_refill -= qeth_rx_refill_queue(card, ctx->bufs_refill);

		/* Exhausted the RX budget. Keep IRQ disabled, we get called again. */
		if (work_done >= budget)
			return work_done;
	}
5950

5951
	if (napi_complete_done(napi, work_done) &&
5952
	    qdio_start_irq(CARD_DDEV(card)))
5953
		napi_schedule(napi);
5954

5955 5956 5957 5958
	return work_done;
}
EXPORT_SYMBOL_GPL(qeth_poll);

5959
static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
5960 5961
				 unsigned int bidx, unsigned int qdio_error,
				 int budget)
5962 5963 5964 5965
{
	struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx];
	u8 sflags = buffer->buffer->element[15].sflags;
	struct qeth_card *card = queue->card;
5966
	bool error = !!qdio_error;
5967

5968
	if (qdio_error == QDIO_ERROR_SLSB_PENDING) {
5969
		struct qaob *aob = buffer->aob;
5970
		struct qeth_qaob_priv1 *priv;
5971
		enum iucv_tx_notify notify;
5972 5973 5974 5975 5976 5977 5978 5979

		if (!aob) {
			netdev_WARN_ONCE(card->dev,
					 "Pending TX buffer %#x without QAOB on TX queue %u\n",
					 bidx, queue->queue_no);
			qeth_schedule_recovery(card);
			return;
		}
5980

5981 5982
		QETH_CARD_TEXT_(card, 5, "pel%u", bidx);

5983 5984 5985
		priv = (struct qeth_qaob_priv1 *)&aob->user1;
		/* QAOB hasn't completed yet: */
		if (xchg(&priv->state, QETH_QAOB_PENDING) != QETH_QAOB_DONE) {
5986 5987
			qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING);

5988 5989 5990 5991 5992
			/* Prepare the queue slot for immediate re-use: */
			qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements);
			if (qeth_alloc_out_buf(queue, bidx, GFP_ATOMIC)) {
				QETH_CARD_TEXT(card, 2, "outofbuf");
				qeth_schedule_recovery(card);
5993
			}
5994

5995 5996 5997
			list_add(&buffer->list_entry, &queue->pending_bufs);
			/* Skip clearing the buffer: */
			return;
5998
		}
5999

6000 6001 6002 6003
		/* QAOB already completed: */
		notify = qeth_compute_cq_notification(aob->aorc, 0);
		qeth_notify_skbs(queue, buffer, notify);
		error = !!aob->aorc;
6004
		memset(aob, 0, sizeof(*aob));
6005
	} else if (card->options.cq == QETH_CQ_ENABLED) {
6006 6007
		qeth_notify_skbs(queue, buffer,
				 qeth_compute_cq_notification(sflags, 0));
6008 6009
	}

6010
	qeth_clear_output_buffer(queue, buffer, error, budget);
6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021
}

static int qeth_tx_poll(struct napi_struct *napi, int budget)
{
	struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi);
	unsigned int queue_no = queue->queue_no;
	struct qeth_card *card = queue->card;
	struct net_device *dev = card->dev;
	unsigned int work_done = 0;
	struct netdev_queue *txq;

6022 6023 6024 6025
	if (IS_IQD(card))
		txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no));
	else
		txq = netdev_get_tx_queue(dev, queue_no);
6026 6027 6028

	while (1) {
		unsigned int start, error, i;
6029 6030
		unsigned int packets = 0;
		unsigned int bytes = 0;
6031 6032
		int completed;

6033
		qeth_tx_complete_pending_bufs(card, queue, false, budget);
6034

6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051
		if (qeth_out_queue_is_empty(queue)) {
			napi_complete(napi);
			return 0;
		}

		/* Give the CPU a breather: */
		if (work_done >= QDIO_MAX_BUFFERS_PER_Q) {
			QETH_TXQ_STAT_INC(queue, completion_yield);
			if (napi_complete_done(napi, 0))
				napi_schedule(napi);
			return 0;
		}

		completed = qdio_inspect_queue(CARD_DDEV(card), queue_no, false,
					       &start, &error);
		if (completed <= 0) {
			/* Ensure we see TX completion for pending work: */
6052 6053 6054
			if (napi_complete_done(napi, 0) &&
			    !atomic_read(&queue->set_pci_flags_count))
				qeth_tx_arm_timer(queue, queue->rescan_usecs);
6055 6056 6057 6058
			return 0;
		}

		for (i = start; i < start + completed; i++) {
6059
			struct qeth_qdio_out_buffer *buffer;
6060 6061
			unsigned int bidx = QDIO_BUFNR(i);

6062
			buffer = queue->bufs[bidx];
6063
			packets += buffer->frames;
6064 6065 6066
			bytes += buffer->bytes;

			qeth_handle_send_error(card, buffer, error);
6067 6068 6069 6070 6071
			if (IS_IQD(card))
				qeth_iqd_tx_complete(queue, bidx, error, budget);
			else
				qeth_clear_output_buffer(queue, buffer, error,
							 budget);
6072 6073 6074 6075
		}

		atomic_sub(completed, &queue->used_buffers);
		work_done += completed;
6076 6077 6078 6079
		if (IS_IQD(card))
			netdev_tx_completed_queue(txq, packets, bytes);
		else
			qeth_check_outbound_queue(queue);
6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091

		/* xmit may have observed the full-condition, but not yet
		 * stopped the txq. In which case the code below won't trigger.
		 * So before returning, xmit will re-check the txq's fill level
		 * and wake it up if needed.
		 */
		if (netif_tx_queue_stopped(txq) &&
		    !qeth_out_queue_is_full(queue))
			netif_tx_wake_queue(txq);
	}
}

6092 6093 6094 6095 6096 6097 6098
static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
{
	if (!cmd->hdr.return_code)
		cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
	return cmd->hdr.return_code;
}

6099 6100 6101 6102 6103 6104 6105 6106
static int qeth_setassparms_get_caps_cb(struct qeth_card *card,
					struct qeth_reply *reply,
					unsigned long data)
{
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
	struct qeth_ipa_caps *caps = reply->param;

	if (qeth_setassparms_inspect_rc(cmd))
6107
		return -EIO;
6108 6109 6110 6111 6112 6113

	caps->supported = cmd->data.setassparms.data.caps.supported;
	caps->enabled = cmd->data.setassparms.data.caps.enabled;
	return 0;
}

6114 6115
int qeth_setassparms_cb(struct qeth_card *card,
			struct qeth_reply *reply, unsigned long data)
6116
{
6117
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6118 6119 6120

	QETH_CARD_TEXT(card, 4, "defadpcb");

6121 6122 6123 6124 6125
	if (cmd->hdr.return_code)
		return -EIO;

	cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
	if (cmd->hdr.prot_version == QETH_PROT_IPV4)
6126
		card->options.ipa4.enabled = cmd->hdr.assists.enabled;
6127
	if (cmd->hdr.prot_version == QETH_PROT_IPV6)
6128
		card->options.ipa6.enabled = cmd->hdr.assists.enabled;
6129 6130
	return 0;
}
6131
EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
6132

6133 6134
struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
						 enum qeth_ipa_funcs ipa_func,
6135 6136
						 u16 cmd_code,
						 unsigned int data_length,
6137
						 enum qeth_prot_versions prot)
6138
{
6139 6140
	struct qeth_ipacmd_setassparms *setassparms;
	struct qeth_ipacmd_setassparms_hdr *hdr;
6141 6142 6143
	struct qeth_cmd_buffer *iob;

	QETH_CARD_TEXT(card, 4, "getasscm");
6144 6145 6146 6147 6148 6149
	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETASSPARMS, prot,
				 data_length +
				 offsetof(struct qeth_ipacmd_setassparms,
					  data));
	if (!iob)
		return NULL;
6150

6151 6152
	setassparms = &__ipa_cmd(iob)->data.setassparms;
	setassparms->assist_no = ipa_func;
6153

6154 6155 6156
	hdr = &setassparms->hdr;
	hdr->length = sizeof(*hdr) + data_length;
	hdr->command_code = cmd_code;
6157 6158
	return iob;
}
6159
EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
6160

6161 6162
int qeth_send_simple_setassparms_prot(struct qeth_card *card,
				      enum qeth_ipa_funcs ipa_func,
6163
				      u16 cmd_code, u32 *data,
6164
				      enum qeth_prot_versions prot)
6165
{
6166
	unsigned int length = data ? SETASS_DATA_SIZEOF(flags_32bit) : 0;
6167 6168
	struct qeth_cmd_buffer *iob;

6169 6170
	QETH_CARD_TEXT_(card, 4, "simassp%i", prot);
	iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot);
6171 6172
	if (!iob)
		return -ENOMEM;
6173

6174 6175
	if (data)
		__ipa_cmd(iob)->data.setassparms.data.flags_32bit = *data;
6176
	return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
6177
}
6178
EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot);
6179

F
Frank Blaschka 已提交
6180 6181
static void qeth_unregister_dbf_views(void)
{
6182
	int x;
6183

6184 6185 6186 6187
	for (x = 0; x < QETH_DBF_INFOS; x++) {
		debug_unregister(qeth_dbf[x].id);
		qeth_dbf[x].id = NULL;
	}
F
Frank Blaschka 已提交
6188 6189
}

C
Carsten Otte 已提交
6190
void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
P
Peter Tiedemann 已提交
6191 6192
{
	char dbf_txt_buf[32];
6193
	va_list args;
P
Peter Tiedemann 已提交
6194

6195
	if (!debug_level_enabled(id, level))
P
Peter Tiedemann 已提交
6196
		return;
6197 6198 6199
	va_start(args, fmt);
	vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
	va_end(args);
C
Carsten Otte 已提交
6200
	debug_text_event(id, level, dbf_txt_buf);
P
Peter Tiedemann 已提交
6201 6202 6203
}
EXPORT_SYMBOL_GPL(qeth_dbf_longtext);

F
Frank Blaschka 已提交
6204 6205
static int qeth_register_dbf_views(void)
{
6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218
	int ret;
	int x;

	for (x = 0; x < QETH_DBF_INFOS; x++) {
		/* register the areas */
		qeth_dbf[x].id = debug_register(qeth_dbf[x].name,
						qeth_dbf[x].pages,
						qeth_dbf[x].areas,
						qeth_dbf[x].len);
		if (qeth_dbf[x].id == NULL) {
			qeth_unregister_dbf_views();
			return -ENOMEM;
		}
F
Frank Blaschka 已提交
6219

6220 6221 6222 6223 6224 6225
		/* register a view */
		ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view);
		if (ret) {
			qeth_unregister_dbf_views();
			return ret;
		}
F
Frank Blaschka 已提交
6226

6227 6228 6229
		/* set a passing level */
		debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level);
	}
F
Frank Blaschka 已提交
6230 6231 6232 6233

	return 0;
}

6234 6235
static DEFINE_MUTEX(qeth_mod_mutex);	/* for synchronized module loading */

6236 6237
int qeth_setup_discipline(struct qeth_card *card,
			  enum qeth_discipline_id discipline)
F
Frank Blaschka 已提交
6238
{
6239 6240
	int rc;

6241
	mutex_lock(&qeth_mod_mutex);
F
Frank Blaschka 已提交
6242 6243
	switch (discipline) {
	case QETH_DISCIPLINE_LAYER3:
6244 6245
		card->discipline = try_then_request_module(
			symbol_get(qeth_l3_discipline), "qeth_l3");
F
Frank Blaschka 已提交
6246 6247
		break;
	case QETH_DISCIPLINE_LAYER2:
6248 6249
		card->discipline = try_then_request_module(
			symbol_get(qeth_l2_discipline), "qeth_l2");
F
Frank Blaschka 已提交
6250
		break;
6251 6252
	default:
		break;
F
Frank Blaschka 已提交
6253
	}
6254
	mutex_unlock(&qeth_mod_mutex);
6255

6256
	if (!card->discipline) {
6257 6258
		dev_err(&card->gdev->dev, "There is no kernel module to "
			"support discipline %d\n", discipline);
6259
		return -EINVAL;
F
Frank Blaschka 已提交
6260
	}
6261

6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272
	rc = card->discipline->setup(card->gdev);
	if (rc) {
		if (discipline == QETH_DISCIPLINE_LAYER2)
			symbol_put(qeth_l2_discipline);
		else
			symbol_put(qeth_l3_discipline);
		card->discipline = NULL;

		return rc;
	}

6273
	card->options.layer = discipline;
6274
	return 0;
F
Frank Blaschka 已提交
6275 6276
}

6277
void qeth_remove_discipline(struct qeth_card *card)
F
Frank Blaschka 已提交
6278
{
6279 6280
	card->discipline->remove(card->gdev);

6281
	if (IS_LAYER2(card))
6282
		symbol_put(qeth_l2_discipline);
F
Frank Blaschka 已提交
6283
	else
6284
		symbol_put(qeth_l3_discipline);
6285
	card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
6286
	card->discipline = NULL;
F
Frank Blaschka 已提交
6287 6288
}

6289
const struct device_type qeth_generic_devtype = {
6290 6291
	.name = "qeth_generic",
};
6292 6293
EXPORT_SYMBOL_GPL(qeth_generic_devtype);

6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361
#define DBF_NAME_LEN	20

struct qeth_dbf_entry {
	char dbf_name[DBF_NAME_LEN];
	debug_info_t *dbf_info;
	struct list_head dbf_list;
};

static LIST_HEAD(qeth_dbf_list);
static DEFINE_MUTEX(qeth_dbf_list_mutex);

static debug_info_t *qeth_get_dbf_entry(char *name)
{
	struct qeth_dbf_entry *entry;
	debug_info_t *rc = NULL;

	mutex_lock(&qeth_dbf_list_mutex);
	list_for_each_entry(entry, &qeth_dbf_list, dbf_list) {
		if (strcmp(entry->dbf_name, name) == 0) {
			rc = entry->dbf_info;
			break;
		}
	}
	mutex_unlock(&qeth_dbf_list_mutex);
	return rc;
}

static int qeth_add_dbf_entry(struct qeth_card *card, char *name)
{
	struct qeth_dbf_entry *new_entry;

	card->debug = debug_register(name, 2, 1, 8);
	if (!card->debug) {
		QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf");
		goto err;
	}
	if (debug_register_view(card->debug, &debug_hex_ascii_view))
		goto err_dbg;
	new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL);
	if (!new_entry)
		goto err_dbg;
	strncpy(new_entry->dbf_name, name, DBF_NAME_LEN);
	new_entry->dbf_info = card->debug;
	mutex_lock(&qeth_dbf_list_mutex);
	list_add(&new_entry->dbf_list, &qeth_dbf_list);
	mutex_unlock(&qeth_dbf_list_mutex);

	return 0;

err_dbg:
	debug_unregister(card->debug);
err:
	return -ENOMEM;
}

static void qeth_clear_dbf_list(void)
{
	struct qeth_dbf_entry *entry, *tmp;

	mutex_lock(&qeth_dbf_list_mutex);
	list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) {
		list_del(&entry->dbf_list);
		debug_unregister(entry->dbf_info);
		kfree(entry);
	}
	mutex_unlock(&qeth_dbf_list_mutex);
}

6362 6363 6364
static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
{
	struct net_device *dev;
6365
	struct qeth_priv *priv;
6366 6367 6368

	switch (card->info.type) {
	case QETH_CARD_TYPE_IQD:
6369
		dev = alloc_netdev_mqs(sizeof(*priv), "hsi%d", NET_NAME_UNKNOWN,
6370
				       ether_setup, QETH_MAX_OUT_QUEUES, 1);
6371
		break;
6372
	case QETH_CARD_TYPE_OSM:
6373
		dev = alloc_etherdev(sizeof(*priv));
6374
		break;
6375
	default:
6376
		dev = alloc_etherdev_mqs(sizeof(*priv), QETH_MAX_OUT_QUEUES, 1);
6377 6378 6379 6380 6381
	}

	if (!dev)
		return NULL;

6382 6383
	priv = netdev_priv(dev);
	priv->rx_copybreak = QETH_RX_COPYBREAK;
6384
	priv->tx_wanted_queues = IS_IQD(card) ? QETH_IQD_MIN_TXQ : 1;
6385

6386 6387
	dev->ml_priv = card;
	dev->watchdog_timeo = QETH_TX_TIMEOUT;
J
Julian Wiedmann 已提交
6388
	dev->min_mtu = 576;
6389 6390 6391
	 /* initialized when device first goes online: */
	dev->max_mtu = 0;
	dev->mtu = 0;
6392 6393
	SET_NETDEV_DEV(dev, &card->gdev->dev);
	netif_carrier_off(dev);
6394

J
Julian Wiedmann 已提交
6395 6396 6397 6398 6399 6400
	dev->ethtool_ops = &qeth_ethtool_ops;
	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
	dev->hw_features |= NETIF_F_SG;
	dev->vlan_features |= NETIF_F_SG;
	if (IS_IQD(card))
		dev->features |= NETIF_F_SG;
6401

6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415
	return dev;
}

struct net_device *qeth_clone_netdev(struct net_device *orig)
{
	struct net_device *clone = qeth_alloc_netdev(orig->ml_priv);

	if (!clone)
		return NULL;

	clone->dev_port = orig->dev_port;
	return clone;
}

F
Frank Blaschka 已提交
6416 6417 6418 6419 6420
static int qeth_core_probe_device(struct ccwgroup_device *gdev)
{
	struct qeth_card *card;
	struct device *dev;
	int rc;
6421
	enum qeth_discipline_id enforced_disc;
6422
	char dbf_name[DBF_NAME_LEN];
F
Frank Blaschka 已提交
6423

6424
	QETH_DBF_TEXT(SETUP, 2, "probedev");
F
Frank Blaschka 已提交
6425 6426 6427 6428 6429

	dev = &gdev->dev;
	if (!get_device(dev))
		return -ENODEV;

6430
	QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev));
F
Frank Blaschka 已提交
6431

6432
	card = qeth_alloc_card(gdev);
F
Frank Blaschka 已提交
6433
	if (!card) {
6434
		QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM);
F
Frank Blaschka 已提交
6435 6436 6437
		rc = -ENOMEM;
		goto err_dev;
	}
6438 6439 6440

	snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s",
		dev_name(&gdev->dev));
6441
	card->debug = qeth_get_dbf_entry(dbf_name);
6442
	if (!card->debug) {
6443 6444 6445
		rc = qeth_add_dbf_entry(card, dbf_name);
		if (rc)
			goto err_card;
6446 6447
	}

6448
	qeth_setup_card(card);
6449
	card->dev = qeth_alloc_netdev(card);
6450 6451
	if (!card->dev) {
		rc = -ENOMEM;
6452
		goto err_card;
6453
	}
6454

6455 6456 6457
	qeth_determine_capabilities(card);
	qeth_set_blkt_defaults(card);

6458 6459 6460 6461
	card->qdio.no_out_queues = card->dev->num_tx_queues;
	rc = qeth_update_from_chp_desc(card);
	if (rc)
		goto err_chp_desc;
6462

J
Julian Wiedmann 已提交
6463
	gdev->dev.groups = qeth_dev_groups;
6464

6465 6466 6467 6468 6469 6470 6471
	enforced_disc = qeth_enforce_discipline(card);
	switch (enforced_disc) {
	case QETH_DISCIPLINE_UNDETERMINED:
		gdev->dev.type = &qeth_generic_devtype;
		break;
	default:
		card->info.layer_enforced = true;
6472
		/* It's so early that we don't need the discipline_mutex yet. */
6473
		rc = qeth_setup_discipline(card, enforced_disc);
6474
		if (rc)
6475
			goto err_setup_disc;
6476

J
Julian Wiedmann 已提交
6477
		gdev->dev.type = card->discipline->devtype;
6478
		break;
F
Frank Blaschka 已提交
6479 6480 6481 6482
	}

	return 0;

6483
err_setup_disc:
6484
err_chp_desc:
6485
	free_netdev(card->dev);
F
Frank Blaschka 已提交
6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496
err_card:
	qeth_core_free_card(card);
err_dev:
	put_device(dev);
	return rc;
}

static void qeth_core_remove_device(struct ccwgroup_device *gdev)
{
	struct qeth_card *card = dev_get_drvdata(&gdev->dev);

6497
	QETH_CARD_TEXT(card, 2, "removedv");
F
Frank Blaschka 已提交
6498

6499
	mutex_lock(&card->discipline_mutex);
6500 6501
	if (card->discipline)
		qeth_remove_discipline(card);
6502
	mutex_unlock(&card->discipline_mutex);
6503

6504 6505
	qeth_free_qdio_queues(card);

6506
	free_netdev(card->dev);
F
Frank Blaschka 已提交
6507 6508 6509 6510 6511 6512 6513 6514
	qeth_core_free_card(card);
	put_device(&gdev->dev);
}

static int qeth_core_set_online(struct ccwgroup_device *gdev)
{
	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
	int rc = 0;
6515
	enum qeth_discipline_id def_discipline;
F
Frank Blaschka 已提交
6516

6517
	mutex_lock(&card->discipline_mutex);
6518
	if (!card->discipline) {
6519 6520
		def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
						QETH_DISCIPLINE_LAYER2;
6521
		rc = qeth_setup_discipline(card, def_discipline);
F
Frank Blaschka 已提交
6522 6523 6524
		if (rc)
			goto err;
	}
6525

6526 6527
	rc = qeth_set_online(card, card->discipline);

F
Frank Blaschka 已提交
6528
err:
6529
	mutex_unlock(&card->discipline_mutex);
F
Frank Blaschka 已提交
6530 6531 6532 6533 6534 6535
	return rc;
}

static int qeth_core_set_offline(struct ccwgroup_device *gdev)
{
	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6536
	int rc;
6537

6538 6539 6540 6541 6542
	mutex_lock(&card->discipline_mutex);
	rc = qeth_set_offline(card, card->discipline, false);
	mutex_unlock(&card->discipline_mutex);

	return rc;
F
Frank Blaschka 已提交
6543 6544 6545 6546 6547
}

static void qeth_core_shutdown(struct ccwgroup_device *gdev)
{
	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6548

6549 6550 6551 6552
	qeth_set_allowed_threads(card, 0, 1);
	if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
		qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
	qeth_qdio_clear_card(card, 0);
6553
	qeth_drain_output_queues(card);
6554
	qdio_free(CARD_DDEV(card));
F
Frank Blaschka 已提交
6555 6556
}

6557 6558
static ssize_t group_store(struct device_driver *ddrv, const char *buf,
			   size_t count)
F
Frank Blaschka 已提交
6559 6560 6561
{
	int err;

6562 6563
	err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3,
				  buf);
6564 6565 6566

	return err ? err : count;
}
6567
static DRIVER_ATTR_WO(group);
F
Frank Blaschka 已提交
6568

6569 6570 6571 6572 6573 6574 6575 6576 6577 6578 6579 6580
static struct attribute *qeth_drv_attrs[] = {
	&driver_attr_group.attr,
	NULL,
};
static struct attribute_group qeth_drv_attr_group = {
	.attrs = qeth_drv_attrs,
};
static const struct attribute_group *qeth_drv_attr_groups[] = {
	&qeth_drv_attr_group,
	NULL,
};

6581 6582 6583 6584 6585 6586 6587 6588 6589 6590 6591 6592 6593 6594
static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
	.driver = {
		.groups = qeth_drv_attr_groups,
		.owner = THIS_MODULE,
		.name = "qeth",
	},
	.ccw_driver = &qeth_ccw_driver,
	.setup = qeth_core_probe_device,
	.remove = qeth_core_remove_device,
	.set_online = qeth_core_set_online,
	.set_offline = qeth_core_set_offline,
	.shutdown = qeth_core_shutdown,
};

6595 6596 6597 6598 6599 6600 6601 6602 6603 6604 6605
int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
	struct qeth_card *card = dev->ml_priv;
	struct mii_ioctl_data *mii_data;
	int rc = 0;

	switch (cmd) {
	case SIOC_QETH_ADP_SET_SNMP_CONTROL:
		rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
		break;
	case SIOC_QETH_GET_CARD_TYPE:
6606 6607
		if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) &&
		    !IS_VM_NIC(card))
6608
			return 1;
6609
		return 0;
6610 6611 6612 6613 6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626 6627 6628 6629 6630 6631 6632 6633 6634 6635 6636
	case SIOCGMIIPHY:
		mii_data = if_mii(rq);
		mii_data->phy_id = 0;
		break;
	case SIOCGMIIREG:
		mii_data = if_mii(rq);
		if (mii_data->phy_id != 0)
			rc = -EINVAL;
		else
			mii_data->val_out = qeth_mdio_read(dev,
				mii_data->phy_id, mii_data->reg_num);
		break;
	case SIOC_QETH_QUERY_OAT:
		rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data);
		break;
	default:
		if (card->discipline->do_ioctl)
			rc = card->discipline->do_ioctl(dev, rq, cmd);
		else
			rc = -EOPNOTSUPP;
	}
	if (rc)
		QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
	return rc;
}
EXPORT_SYMBOL_GPL(qeth_do_ioctl);

6637 6638
static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply,
			      unsigned long data)
6639 6640
{
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6641
	u32 *features = reply->param;
6642

6643
	if (qeth_setassparms_inspect_rc(cmd))
6644
		return -EIO;
6645

6646
	*features = cmd->data.setassparms.data.flags_32bit;
6647 6648 6649
	return 0;
}

6650 6651
static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype,
			     enum qeth_prot_versions prot)
6652
{
6653 6654
	return qeth_send_simple_setassparms_prot(card, cstype, IPA_CMD_ASS_STOP,
						 NULL, prot);
6655 6656
}

6657
static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype,
6658
			    enum qeth_prot_versions prot, u8 *lp2lp)
6659
{
6660
	u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP;
6661 6662 6663
	struct qeth_cmd_buffer *iob;
	struct qeth_ipa_caps caps;
	u32 features;
6664 6665
	int rc;

6666 6667 6668
	/* some L3 HW requires combined L3+L4 csum offload: */
	if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 &&
	    cstype == IPA_OUTBOUND_CHECKSUM)
6669
		required_features |= QETH_IPA_CHECKSUM_IP_HDR;
6670

6671 6672 6673 6674 6675 6676 6677
	iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0,
				       prot);
	if (!iob)
		return -ENOMEM;

	rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features);
	if (rc)
6678
		return rc;
6679

6680 6681 6682 6683
	if ((required_features & features) != required_features) {
		qeth_set_csum_off(card, cstype, prot);
		return -EOPNOTSUPP;
	}
6684

6685 6686
	iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE,
				       SETASS_DATA_SIZEOF(flags_32bit),
6687
				       prot);
6688 6689 6690
	if (!iob) {
		qeth_set_csum_off(card, cstype, prot);
		return -ENOMEM;
6691
	}
6692 6693 6694 6695 6696

	if (features & QETH_IPA_CHECKSUM_LP2LP)
		required_features |= QETH_IPA_CHECKSUM_LP2LP;
	__ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features;
	rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6697
	if (rc) {
6698
		qeth_set_csum_off(card, cstype, prot);
6699 6700
		return rc;
	}
6701

6702 6703 6704 6705 6706 6707
	if (!qeth_ipa_caps_supported(&caps, required_features) ||
	    !qeth_ipa_caps_enabled(&caps, required_features)) {
		qeth_set_csum_off(card, cstype, prot);
		return -EOPNOTSUPP;
	}

6708 6709
	dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n",
		 cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot);
6710 6711 6712 6713

	if (lp2lp)
		*lp2lp = qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP);

6714 6715 6716
	return 0;
}

6717
static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
6718
			     enum qeth_prot_versions prot, u8 *lp2lp)
6719
{
6720
	return on ? qeth_set_csum_on(card, cstype, prot, lp2lp) :
6721
		    qeth_set_csum_off(card, cstype, prot);
6722 6723
}

6724 6725 6726 6727 6728 6729 6730
static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply,
			     unsigned long data)
{
	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
	struct qeth_tso_start_data *tso_data = reply->param;

	if (qeth_setassparms_inspect_rc(cmd))
6731
		return -EIO;
6732 6733 6734 6735 6736 6737

	tso_data->mss = cmd->data.setassparms.data.tso.mss;
	tso_data->supported = cmd->data.setassparms.data.tso.supported;
	return 0;
}

6738 6739
static int qeth_set_tso_off(struct qeth_card *card,
			    enum qeth_prot_versions prot)
6740
{
6741
	return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO,
6742
						 IPA_CMD_ASS_STOP, NULL, prot);
6743
}
6744

6745 6746 6747
static int qeth_set_tso_on(struct qeth_card *card,
			   enum qeth_prot_versions prot)
{
6748 6749 6750 6751 6752 6753 6754 6755 6756 6757
	struct qeth_tso_start_data tso_data;
	struct qeth_cmd_buffer *iob;
	struct qeth_ipa_caps caps;
	int rc;

	iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
				       IPA_CMD_ASS_START, 0, prot);
	if (!iob)
		return -ENOMEM;

6758
	rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data);
6759 6760 6761 6762 6763 6764 6765 6766 6767
	if (rc)
		return rc;

	if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) {
		qeth_set_tso_off(card, prot);
		return -EOPNOTSUPP;
	}

	iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6768 6769
				       IPA_CMD_ASS_ENABLE,
				       SETASS_DATA_SIZEOF(caps), prot);
6770 6771 6772 6773 6774 6775
	if (!iob) {
		qeth_set_tso_off(card, prot);
		return -ENOMEM;
	}

	/* enable TSO capability */
6776 6777 6778
	__ipa_cmd(iob)->data.setassparms.data.caps.enabled =
		QETH_IPA_LARGE_SEND_TCP;
	rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6779 6780 6781 6782 6783 6784 6785 6786 6787 6788 6789 6790 6791 6792
	if (rc) {
		qeth_set_tso_off(card, prot);
		return rc;
	}

	if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) ||
	    !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) {
		qeth_set_tso_off(card, prot);
		return -EOPNOTSUPP;
	}

	dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot,
		 tso_data.mss);
	return 0;
6793
}
6794

6795 6796 6797
static int qeth_set_ipa_tso(struct qeth_card *card, bool on,
			    enum qeth_prot_versions prot)
{
6798
	return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot);
6799
}
6800

6801 6802 6803 6804 6805 6806 6807
static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
{
	int rc_ipv4 = (on) ? -EOPNOTSUPP : 0;
	int rc_ipv6;

	if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
		rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6808
					    QETH_PROT_IPV4, NULL);
6809 6810 6811
	if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
		/* no/one Offload Assist available, so the rc is trivial */
		return rc_ipv4;
6812

6813
	rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6814
				    QETH_PROT_IPV6, NULL);
6815 6816 6817 6818 6819 6820 6821 6822 6823

	if (on)
		/* enable: success if any Assist is active */
		return (rc_ipv6) ? rc_ipv4 : 0;

	/* disable: failure if any Assist is still active */
	return (rc_ipv6) ? rc_ipv6 : rc_ipv4;
}

6824
/**
6825 6826
 * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
 * @dev:	a net_device
6827
 */
6828
void qeth_enable_hw_features(struct net_device *dev)
6829 6830
{
	struct qeth_card *card = dev->ml_priv;
6831
	netdev_features_t features;
6832

6833
	features = dev->features;
6834
	/* force-off any feature that might need an IPA sequence.
6835 6836
	 * netdev_update_features() will restart them.
	 */
6837 6838 6839 6840 6841 6842
	dev->features &= ~dev->hw_features;
	/* toggle VLAN filter, so that VIDs are re-programmed: */
	if (IS_LAYER2(card) && IS_VM_NIC(card)) {
		dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
		dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
	}
6843
	netdev_update_features(dev);
6844 6845 6846
	if (features != dev->features)
		dev_warn(&card->gdev->dev,
			 "Device recovery failed to restore all offload features\n");
6847
}
6848
EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
6849

6850 6851 6852 6853 6854 6855 6856 6857 6858 6859 6860 6861 6862 6863 6864 6865 6866 6867
static void qeth_check_restricted_features(struct qeth_card *card,
					   netdev_features_t changed,
					   netdev_features_t actual)
{
	netdev_features_t ipv6_features = NETIF_F_TSO6;
	netdev_features_t ipv4_features = NETIF_F_TSO;

	if (!card->info.has_lp2lp_cso_v6)
		ipv6_features |= NETIF_F_IPV6_CSUM;
	if (!card->info.has_lp2lp_cso_v4)
		ipv4_features |= NETIF_F_IP_CSUM;

	if ((changed & ipv6_features) && !(actual & ipv6_features))
		qeth_flush_local_addrs6(card);
	if ((changed & ipv4_features) && !(actual & ipv4_features))
		qeth_flush_local_addrs4(card);
}

6868 6869 6870
int qeth_set_features(struct net_device *dev, netdev_features_t features)
{
	struct qeth_card *card = dev->ml_priv;
6871
	netdev_features_t changed = dev->features ^ features;
6872 6873
	int rc = 0;

6874 6875
	QETH_CARD_TEXT(card, 2, "setfeat");
	QETH_CARD_HEX(card, 2, &features, sizeof(features));
6876

6877
	if ((changed & NETIF_F_IP_CSUM)) {
6878
		rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM,
6879 6880
				       IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4,
				       &card->info.has_lp2lp_cso_v4);
6881 6882 6883
		if (rc)
			changed ^= NETIF_F_IP_CSUM;
	}
6884 6885
	if (changed & NETIF_F_IPV6_CSUM) {
		rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM,
6886 6887
				       IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6,
				       &card->info.has_lp2lp_cso_v6);
6888 6889 6890
		if (rc)
			changed ^= NETIF_F_IPV6_CSUM;
	}
6891 6892
	if (changed & NETIF_F_RXCSUM) {
		rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM);
6893 6894 6895
		if (rc)
			changed ^= NETIF_F_RXCSUM;
	}
6896 6897 6898
	if (changed & NETIF_F_TSO) {
		rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO,
				      QETH_PROT_IPV4);
6899 6900 6901
		if (rc)
			changed ^= NETIF_F_TSO;
	}
6902 6903 6904 6905 6906 6907
	if (changed & NETIF_F_TSO6) {
		rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6,
				      QETH_PROT_IPV6);
		if (rc)
			changed ^= NETIF_F_TSO6;
	}
6908

6909 6910 6911
	qeth_check_restricted_features(card, dev->features ^ features,
				       dev->features ^ changed);

6912 6913 6914 6915 6916 6917
	/* everything changed successfully? */
	if ((dev->features ^ features) == changed)
		return 0;
	/* something went wrong. save changed features and return error */
	dev->features ^= changed;
	return -EIO;
6918 6919 6920 6921 6922 6923 6924 6925
}
EXPORT_SYMBOL_GPL(qeth_set_features);

netdev_features_t qeth_fix_features(struct net_device *dev,
				    netdev_features_t features)
{
	struct qeth_card *card = dev->ml_priv;

6926
	QETH_CARD_TEXT(card, 2, "fixfeat");
6927 6928
	if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
		features &= ~NETIF_F_IP_CSUM;
6929 6930
	if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6))
		features &= ~NETIF_F_IPV6_CSUM;
6931 6932
	if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) &&
	    !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6933
		features &= ~NETIF_F_RXCSUM;
6934
	if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
6935
		features &= ~NETIF_F_TSO;
6936 6937
	if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO))
		features &= ~NETIF_F_TSO6;
6938

6939
	QETH_CARD_HEX(card, 2, &features, sizeof(features));
6940 6941 6942
	return features;
}
EXPORT_SYMBOL_GPL(qeth_fix_features);
6943

6944 6945 6946 6947
netdev_features_t qeth_features_check(struct sk_buff *skb,
				      struct net_device *dev,
				      netdev_features_t features)
{
6948 6949
	struct qeth_card *card = dev->ml_priv;

6950
	/* Traffic with local next-hop is not eligible for some offloads: */
6951
	if (skb->ip_summed == CHECKSUM_PARTIAL &&
6952
	    READ_ONCE(card->options.isolation) != ISOLATION_MODE_FWD) {
6953 6954 6955 6956 6957 6958 6959 6960 6961 6962 6963 6964 6965 6966 6967 6968 6969 6970 6971 6972 6973 6974 6975 6976 6977
		netdev_features_t restricted = 0;

		if (skb_is_gso(skb) && !netif_needs_gso(skb, features))
			restricted |= NETIF_F_ALL_TSO;

		switch (vlan_get_protocol(skb)) {
		case htons(ETH_P_IP):
			if (!card->info.has_lp2lp_cso_v4)
				restricted |= NETIF_F_IP_CSUM;

			if (restricted && qeth_next_hop_is_local_v4(card, skb))
				features &= ~restricted;
			break;
		case htons(ETH_P_IPV6):
			if (!card->info.has_lp2lp_cso_v6)
				restricted |= NETIF_F_IPV6_CSUM;

			if (restricted && qeth_next_hop_is_local_v6(card, skb))
				features &= ~restricted;
			break;
		default:
			break;
		}
	}

6978 6979 6980 6981 6982 6983 6984 6985 6986 6987 6988 6989 6990 6991 6992 6993 6994 6995 6996 6997 6998 6999
	/* GSO segmentation builds skbs with
	 *	a (small) linear part for the headers, and
	 *	page frags for the data.
	 * Compared to a linear skb, the header-only part consumes an
	 * additional buffer element. This reduces buffer utilization, and
	 * hurts throughput. So compress small segments into one element.
	 */
	if (netif_needs_gso(skb, features)) {
		/* match skb_segment(): */
		unsigned int doffset = skb->data - skb_mac_header(skb);
		unsigned int hsize = skb_shinfo(skb)->gso_size;
		unsigned int hroom = skb_headroom(skb);

		/* linearize only if resulting skb allocations are order-0: */
		if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0))
			features &= ~NETIF_F_SG;
	}

	return vlan_features_check(skb, features);
}
EXPORT_SYMBOL_GPL(qeth_features_check);

7000 7001 7002 7003 7004 7005 7006 7007 7008 7009
void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
	struct qeth_card *card = dev->ml_priv;
	struct qeth_qdio_out_q *queue;
	unsigned int i;

	QETH_CARD_TEXT(card, 5, "getstat");

	stats->rx_packets = card->stats.rx_packets;
	stats->rx_bytes = card->stats.rx_bytes;
7010
	stats->rx_errors = card->stats.rx_length_errors +
7011
			   card->stats.rx_frame_errors +
7012 7013
			   card->stats.rx_fifo_errors;
	stats->rx_dropped = card->stats.rx_dropped_nomem +
7014 7015
			    card->stats.rx_dropped_notsupp +
			    card->stats.rx_dropped_runt;
7016
	stats->multicast = card->stats.rx_multicast;
7017
	stats->rx_length_errors = card->stats.rx_length_errors;
7018
	stats->rx_frame_errors = card->stats.rx_frame_errors;
7019
	stats->rx_fifo_errors = card->stats.rx_fifo_errors;
7020 7021 7022 7023 7024 7025 7026 7027 7028 7029 7030 7031

	for (i = 0; i < card->qdio.no_out_queues; i++) {
		queue = card->qdio.out_qs[i];

		stats->tx_packets += queue->stats.tx_packets;
		stats->tx_bytes += queue->stats.tx_bytes;
		stats->tx_errors += queue->stats.tx_errors;
		stats->tx_dropped += queue->stats.tx_dropped;
	}
}
EXPORT_SYMBOL_GPL(qeth_get_stats64);

7032 7033 7034 7035 7036 7037 7038 7039 7040 7041 7042 7043 7044 7045 7046 7047 7048 7049 7050 7051 7052 7053 7054 7055 7056 7057 7058 7059 7060 7061 7062 7063 7064 7065 7066 7067 7068 7069 7070 7071
#define TC_IQD_UCAST   0
static void qeth_iqd_set_prio_tc_map(struct net_device *dev,
				     unsigned int ucast_txqs)
{
	unsigned int prio;

	/* IQD requires mcast traffic to be placed on a dedicated queue, and
	 * qeth_iqd_select_queue() deals with this.
	 * For unicast traffic, we defer the queue selection to the stack.
	 * By installing a trivial prio map that spans over only the unicast
	 * queues, we can encourage the stack to spread the ucast traffic evenly
	 * without selecting the mcast queue.
	 */

	/* One traffic class, spanning over all active ucast queues: */
	netdev_set_num_tc(dev, 1);
	netdev_set_tc_queue(dev, TC_IQD_UCAST, ucast_txqs,
			    QETH_IQD_MIN_UCAST_TXQ);

	/* Map all priorities to this traffic class: */
	for (prio = 0; prio <= TC_BITMASK; prio++)
		netdev_set_prio_tc_map(dev, prio, TC_IQD_UCAST);
}

int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count)
{
	struct net_device *dev = card->dev;
	int rc;

	/* Per netif_setup_tc(), adjust the mapping first: */
	if (IS_IQD(card))
		qeth_iqd_set_prio_tc_map(dev, count - 1);

	rc = netif_set_real_num_tx_queues(dev, count);

	if (rc && IS_IQD(card))
		qeth_iqd_set_prio_tc_map(dev, dev->real_num_tx_queues - 1);

	return rc;
}
7072
EXPORT_SYMBOL_GPL(qeth_set_real_num_tx_queues);
7073

7074 7075 7076
u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
			  u8 cast_type, struct net_device *sb_dev)
{
7077 7078
	u16 txq;

7079 7080
	if (cast_type != RTN_UNICAST)
		return QETH_IQD_MCAST_TXQ;
7081 7082
	if (dev->real_num_tx_queues == QETH_IQD_MIN_TXQ)
		return QETH_IQD_MIN_UCAST_TXQ;
7083 7084 7085

	txq = netdev_pick_tx(dev, skb, sb_dev);
	return (txq == QETH_IQD_MCAST_TXQ) ? QETH_IQD_MIN_UCAST_TXQ : txq;
7086 7087 7088
}
EXPORT_SYMBOL_GPL(qeth_iqd_select_queue);

7089
int qeth_open(struct net_device *dev)
7090 7091
{
	struct qeth_card *card = dev->ml_priv;
7092 7093
	struct qeth_qdio_out_q *queue;
	unsigned int i;
7094 7095 7096 7097

	QETH_CARD_TEXT(card, 4, "qethopen");

	card->data.state = CH_STATE_UP;
7098
	netif_tx_start_all_queues(dev);
7099 7100

	local_bh_disable();
7101 7102 7103 7104 7105
	qeth_for_each_output_queue(card, queue, i) {
		netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll,
				  QETH_NAPI_WEIGHT);
		napi_enable(&queue->napi);
		napi_schedule(&queue->napi);
7106
	}
7107 7108 7109

	napi_enable(&card->napi);
	napi_schedule(&card->napi);
7110 7111
	/* kick-start the NAPI softirq: */
	local_bh_enable();
7112

7113 7114 7115 7116 7117 7118 7119
	return 0;
}
EXPORT_SYMBOL_GPL(qeth_open);

int qeth_stop(struct net_device *dev)
{
	struct qeth_card *card = dev->ml_priv;
7120 7121
	struct qeth_qdio_out_q *queue;
	unsigned int i;
7122 7123

	QETH_CARD_TEXT(card, 4, "qethstop");
7124 7125 7126 7127 7128

	napi_disable(&card->napi);
	cancel_delayed_work_sync(&card->buffer_reclaim_work);
	qdio_stop_irq(CARD_DDEV(card));

7129 7130 7131
	/* Quiesce the NAPI instances: */
	qeth_for_each_output_queue(card, queue, i)
		napi_disable(&queue->napi);
7132

7133 7134
	/* Stop .ndo_start_xmit, might still access queue->napi. */
	netif_tx_disable(dev);
7135

7136 7137 7138 7139
	qeth_for_each_output_queue(card, queue, i) {
		del_timer_sync(&queue->timer);
		/* Queues may get re-allocated, so remove the NAPIs. */
		netif_napi_del(&queue->napi);
7140 7141
	}

7142 7143 7144 7145
	return 0;
}
EXPORT_SYMBOL_GPL(qeth_stop);

F
Frank Blaschka 已提交
7146 7147 7148 7149
static int __init qeth_core_init(void)
{
	int rc;

7150
	pr_info("loading core functions\n");
F
Frank Blaschka 已提交
7151

7152 7153
	qeth_debugfs_root = debugfs_create_dir("qeth", NULL);

F
Frank Blaschka 已提交
7154 7155
	rc = qeth_register_dbf_views();
	if (rc)
7156
		goto dbf_err;
M
Mark McLoughlin 已提交
7157
	qeth_core_root_dev = root_device_register("qeth");
7158
	rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
F
Frank Blaschka 已提交
7159 7160
	if (rc)
		goto register_err;
7161 7162 7163 7164
	qeth_core_header_cache =
		kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE,
				  roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE),
				  0, NULL);
7165 7166 7167 7168
	if (!qeth_core_header_cache) {
		rc = -ENOMEM;
		goto slab_err;
	}
7169 7170 7171 7172 7173 7174
	qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf",
			sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL);
	if (!qeth_qdio_outbuf_cache) {
		rc = -ENOMEM;
		goto cqslab_err;
	}
7175 7176 7177 7178 7179 7180
	rc = ccw_driver_register(&qeth_ccw_driver);
	if (rc)
		goto ccw_err;
	rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
	if (rc)
		goto ccwgroup_err;
7181

7182
	return 0;
7183 7184 7185 7186 7187

ccwgroup_err:
	ccw_driver_unregister(&qeth_ccw_driver);
ccw_err:
	kmem_cache_destroy(qeth_qdio_outbuf_cache);
7188 7189
cqslab_err:
	kmem_cache_destroy(qeth_core_header_cache);
7190
slab_err:
M
Mark McLoughlin 已提交
7191
	root_device_unregister(qeth_core_root_dev);
F
Frank Blaschka 已提交
7192 7193
register_err:
	qeth_unregister_dbf_views();
7194
dbf_err:
7195
	debugfs_remove_recursive(qeth_debugfs_root);
7196
	pr_err("Initializing the qeth device driver failed\n");
F
Frank Blaschka 已提交
7197 7198 7199 7200 7201
	return rc;
}

static void __exit qeth_core_exit(void)
{
7202
	qeth_clear_dbf_list();
F
Frank Blaschka 已提交
7203 7204
	ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
	ccw_driver_unregister(&qeth_ccw_driver);
7205
	kmem_cache_destroy(qeth_qdio_outbuf_cache);
7206
	kmem_cache_destroy(qeth_core_header_cache);
7207
	root_device_unregister(qeth_core_root_dev);
F
Frank Blaschka 已提交
7208
	qeth_unregister_dbf_views();
7209
	debugfs_remove_recursive(qeth_debugfs_root);
7210
	pr_info("core functions removed\n");
F
Frank Blaschka 已提交
7211 7212 7213 7214 7215 7216 7217
}

module_init(qeth_core_init);
module_exit(qeth_core_exit);
MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
MODULE_DESCRIPTION("qeth core functions");
MODULE_LICENSE("GPL");